mirror of
https://github.com/rd-stuffs/msm-4.14.git
synced 2025-02-20 11:45:48 +08:00
Merge "Merge remote-tracking branch 'remotes/origin/tmp-4576e0e' into msm-4.14" into msm-4.14
This commit is contained in:
commit
780ad12511
@ -508,7 +508,7 @@ tcp_rmem - vector of 3 INTEGERs: min, default, max
|
||||
min: Minimal size of receive buffer used by TCP sockets.
|
||||
It is guaranteed to each TCP socket, even under moderate memory
|
||||
pressure.
|
||||
Default: 1 page
|
||||
Default: 4K
|
||||
|
||||
default: initial size of receive buffer used by TCP sockets.
|
||||
This value overrides net.core.rmem_default used by other protocols.
|
||||
@ -676,7 +676,7 @@ tcp_window_scaling - BOOLEAN
|
||||
tcp_wmem - vector of 3 INTEGERs: min, default, max
|
||||
min: Amount of memory reserved for send buffers for TCP sockets.
|
||||
Each TCP socket has rights to use it due to fact of its birth.
|
||||
Default: 1 page
|
||||
Default: 4K
|
||||
|
||||
default: initial size of send buffer used by TCP sockets. This
|
||||
value overrides net.core.wmem_default used by other protocols.
|
||||
|
@ -54,6 +54,10 @@ KVM_FEATURE_PV_UNHALT || 7 || guest checks this feature bit
|
||||
|| || before enabling paravirtualized
|
||||
|| || spinlock support.
|
||||
------------------------------------------------------------------------------
|
||||
KVM_FEATURE_ASYNC_PF_VMEXIT || 10 || paravirtualized async PF VM exit
|
||||
|| || can be enabled by setting bit 2
|
||||
|| || when writing to msr 0x4b564d02
|
||||
------------------------------------------------------------------------------
|
||||
KVM_FEATURE_CLOCKSOURCE_STABLE_BIT || 24 || host will warn if no guest-side
|
||||
|| || per-cpu warps are expected in
|
||||
|| || kvmclock.
|
||||
|
@ -170,7 +170,8 @@ MSR_KVM_ASYNC_PF_EN: 0x4b564d02
|
||||
when asynchronous page faults are enabled on the vcpu 0 when
|
||||
disabled. Bit 1 is 1 if asynchronous page faults can be injected
|
||||
when vcpu is in cpl == 0. Bit 2 is 1 if asynchronous page faults
|
||||
are delivered to L1 as #PF vmexits.
|
||||
are delivered to L1 as #PF vmexits. Bit 2 can be set only if
|
||||
KVM_FEATURE_ASYNC_PF_VMEXIT is present in CPUID.
|
||||
|
||||
First 4 byte of 64 byte memory location will be written to by
|
||||
the hypervisor at the time of asynchronous page fault (APF)
|
||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 14
|
||||
SUBLEVEL = 24
|
||||
SUBLEVEL = 26
|
||||
EXTRAVERSION =
|
||||
NAME = Petit Gorille
|
||||
|
||||
|
@ -97,6 +97,8 @@
|
||||
};
|
||||
|
||||
&i2c1 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&i2c1_pins>;
|
||||
clock-frequency = <2600000>;
|
||||
|
||||
twl: twl@48 {
|
||||
@ -215,7 +217,12 @@
|
||||
>;
|
||||
};
|
||||
|
||||
|
||||
i2c1_pins: pinmux_i2c1_pins {
|
||||
pinctrl-single,pins = <
|
||||
OMAP3_CORE1_IOPAD(0x21ba, PIN_INPUT | MUX_MODE0) /* i2c1_scl.i2c1_scl */
|
||||
OMAP3_CORE1_IOPAD(0x21bc, PIN_INPUT | MUX_MODE0) /* i2c1_sda.i2c1_sda */
|
||||
>;
|
||||
};
|
||||
};
|
||||
|
||||
&omap3_pmx_wkup {
|
||||
|
@ -104,6 +104,8 @@
|
||||
};
|
||||
|
||||
&i2c1 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&i2c1_pins>;
|
||||
clock-frequency = <2600000>;
|
||||
|
||||
twl: twl@48 {
|
||||
@ -211,6 +213,12 @@
|
||||
OMAP3_CORE1_IOPAD(0x21b8, PIN_INPUT | MUX_MODE0) /* hsusb0_data7.hsusb0_data7 */
|
||||
>;
|
||||
};
|
||||
i2c1_pins: pinmux_i2c1_pins {
|
||||
pinctrl-single,pins = <
|
||||
OMAP3_CORE1_IOPAD(0x21ba, PIN_INPUT | MUX_MODE0) /* i2c1_scl.i2c1_scl */
|
||||
OMAP3_CORE1_IOPAD(0x21bc, PIN_INPUT | MUX_MODE0) /* i2c1_sda.i2c1_sda */
|
||||
>;
|
||||
};
|
||||
};
|
||||
|
||||
&uart2 {
|
||||
|
@ -110,26 +110,6 @@
|
||||
};
|
||||
};
|
||||
|
||||
&cpu0 {
|
||||
cpu0-supply = <&vdd_cpu>;
|
||||
operating-points = <
|
||||
/* KHz uV */
|
||||
1800000 1400000
|
||||
1608000 1350000
|
||||
1512000 1300000
|
||||
1416000 1200000
|
||||
1200000 1100000
|
||||
1008000 1050000
|
||||
816000 1000000
|
||||
696000 950000
|
||||
600000 900000
|
||||
408000 900000
|
||||
312000 900000
|
||||
216000 900000
|
||||
126000 900000
|
||||
>;
|
||||
};
|
||||
|
||||
&emmc {
|
||||
status = "okay";
|
||||
bus-width = <8>;
|
||||
|
@ -7,6 +7,8 @@ ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING
|
||||
|
||||
KVM=../../../../virt/kvm
|
||||
|
||||
CFLAGS_ARMV7VE :=$(call cc-option, -march=armv7ve)
|
||||
|
||||
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o
|
||||
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v3-sr.o
|
||||
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o
|
||||
@ -15,7 +17,10 @@ obj-$(CONFIG_KVM_ARM_HOST) += tlb.o
|
||||
obj-$(CONFIG_KVM_ARM_HOST) += cp15-sr.o
|
||||
obj-$(CONFIG_KVM_ARM_HOST) += vfp.o
|
||||
obj-$(CONFIG_KVM_ARM_HOST) += banked-sr.o
|
||||
CFLAGS_banked-sr.o += $(CFLAGS_ARMV7VE)
|
||||
|
||||
obj-$(CONFIG_KVM_ARM_HOST) += entry.o
|
||||
obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o
|
||||
obj-$(CONFIG_KVM_ARM_HOST) += switch.o
|
||||
CFLAGS_switch.o += $(CFLAGS_ARMV7VE)
|
||||
obj-$(CONFIG_KVM_ARM_HOST) += s2-setup.o
|
||||
|
@ -20,6 +20,10 @@
|
||||
|
||||
#include <asm/kvm_hyp.h>
|
||||
|
||||
/*
|
||||
* gcc before 4.9 doesn't understand -march=armv7ve, so we have to
|
||||
* trick the assembler.
|
||||
*/
|
||||
__asm__(".arch_extension virt");
|
||||
|
||||
void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt)
|
||||
|
@ -42,7 +42,7 @@ config MACH_ARMADA_375
|
||||
depends on ARCH_MULTI_V7
|
||||
select ARMADA_370_XP_IRQ
|
||||
select ARM_ERRATA_720789
|
||||
select ARM_ERRATA_753970
|
||||
select PL310_ERRATA_753970
|
||||
select ARM_GIC
|
||||
select ARMADA_375_CLK
|
||||
select HAVE_ARM_SCU
|
||||
@ -58,7 +58,7 @@ config MACH_ARMADA_38X
|
||||
bool "Marvell Armada 380/385 boards"
|
||||
depends on ARCH_MULTI_V7
|
||||
select ARM_ERRATA_720789
|
||||
select ARM_ERRATA_753970
|
||||
select PL310_ERRATA_753970
|
||||
select ARM_GIC
|
||||
select ARM_GLOBAL_TIMER
|
||||
select CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
|
||||
|
@ -472,28 +472,27 @@ void __init orion_ge11_init(struct mv643xx_eth_platform_data *eth_data,
|
||||
/*****************************************************************************
|
||||
* Ethernet switch
|
||||
****************************************************************************/
|
||||
static __initconst const char *orion_ge00_mvmdio_bus_name = "orion-mii";
|
||||
static __initdata struct mdio_board_info
|
||||
orion_ge00_switch_board_info;
|
||||
static __initdata struct mdio_board_info orion_ge00_switch_board_info = {
|
||||
.bus_id = "orion-mii",
|
||||
.modalias = "mv88e6085",
|
||||
};
|
||||
|
||||
void __init orion_ge00_switch_init(struct dsa_chip_data *d)
|
||||
{
|
||||
struct mdio_board_info *bd;
|
||||
unsigned int i;
|
||||
|
||||
if (!IS_BUILTIN(CONFIG_PHYLIB))
|
||||
return;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(d->port_names); i++)
|
||||
if (!strcmp(d->port_names[i], "cpu"))
|
||||
for (i = 0; i < ARRAY_SIZE(d->port_names); i++) {
|
||||
if (!strcmp(d->port_names[i], "cpu")) {
|
||||
d->netdev[i] = &orion_ge00.dev;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
bd = &orion_ge00_switch_board_info;
|
||||
bd->bus_id = orion_ge00_mvmdio_bus_name;
|
||||
bd->mdio_addr = d->sw_addr;
|
||||
d->netdev[i] = &orion_ge00.dev;
|
||||
strcpy(bd->modalias, "mv88e6085");
|
||||
bd->platform_data = d;
|
||||
orion_ge00_switch_board_info.mdio_addr = d->sw_addr;
|
||||
orion_ge00_switch_board_info.platform_data = d;
|
||||
|
||||
mdiobus_register_board_info(&orion_ge00_switch_board_info, 1);
|
||||
}
|
||||
|
@ -238,8 +238,9 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
|
||||
off = offsetof(struct bpf_array, map.max_entries);
|
||||
emit_a64_mov_i64(tmp, off, ctx);
|
||||
emit(A64_LDR32(tmp, r2, tmp), ctx);
|
||||
emit(A64_MOV(0, r3, r3), ctx);
|
||||
emit(A64_CMP(0, r3, tmp), ctx);
|
||||
emit(A64_B_(A64_COND_GE, jmp_offset), ctx);
|
||||
emit(A64_B_(A64_COND_CS, jmp_offset), ctx);
|
||||
|
||||
/* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
|
||||
* goto out;
|
||||
@ -247,7 +248,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
|
||||
*/
|
||||
emit_a64_mov_i64(tmp, MAX_TAIL_CALL_CNT, ctx);
|
||||
emit(A64_CMP(1, tcc, tmp), ctx);
|
||||
emit(A64_B_(A64_COND_GT, jmp_offset), ctx);
|
||||
emit(A64_B_(A64_COND_HI, jmp_offset), ctx);
|
||||
emit(A64_ADD_I(1, tcc, tcc, 1), ctx);
|
||||
|
||||
/* prog = array->ptrs[index];
|
||||
|
@ -26,6 +26,7 @@ void flush_user_icache_range_asm(unsigned long, unsigned long);
|
||||
void flush_kernel_icache_range_asm(unsigned long, unsigned long);
|
||||
void flush_user_dcache_range_asm(unsigned long, unsigned long);
|
||||
void flush_kernel_dcache_range_asm(unsigned long, unsigned long);
|
||||
void purge_kernel_dcache_range_asm(unsigned long, unsigned long);
|
||||
void flush_kernel_dcache_page_asm(void *);
|
||||
void flush_kernel_icache_page(void *);
|
||||
|
||||
|
@ -316,6 +316,8 @@ extern int _parisc_requires_coherency;
|
||||
#define parisc_requires_coherency() (0)
|
||||
#endif
|
||||
|
||||
extern int running_on_qemu;
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* __ASM_PARISC_PROCESSOR_H */
|
||||
|
@ -465,10 +465,10 @@ EXPORT_SYMBOL(copy_user_page);
|
||||
int __flush_tlb_range(unsigned long sid, unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
unsigned long flags, size;
|
||||
unsigned long flags;
|
||||
|
||||
size = (end - start);
|
||||
if (size >= parisc_tlb_flush_threshold) {
|
||||
if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
|
||||
end - start >= parisc_tlb_flush_threshold) {
|
||||
flush_tlb_all();
|
||||
return 1;
|
||||
}
|
||||
@ -539,13 +539,11 @@ void flush_cache_mm(struct mm_struct *mm)
|
||||
struct vm_area_struct *vma;
|
||||
pgd_t *pgd;
|
||||
|
||||
/* Flush the TLB to avoid speculation if coherency is required. */
|
||||
if (parisc_requires_coherency())
|
||||
flush_tlb_all();
|
||||
|
||||
/* Flushing the whole cache on each cpu takes forever on
|
||||
rp3440, etc. So, avoid it if the mm isn't too big. */
|
||||
if (mm_total_size(mm) >= parisc_cache_flush_threshold) {
|
||||
if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
|
||||
mm_total_size(mm) >= parisc_cache_flush_threshold) {
|
||||
flush_tlb_all();
|
||||
flush_cache_all();
|
||||
return;
|
||||
}
|
||||
@ -553,9 +551,9 @@ void flush_cache_mm(struct mm_struct *mm)
|
||||
if (mm->context == mfsp(3)) {
|
||||
for (vma = mm->mmap; vma; vma = vma->vm_next) {
|
||||
flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
|
||||
if ((vma->vm_flags & VM_EXEC) == 0)
|
||||
continue;
|
||||
flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
|
||||
if (vma->vm_flags & VM_EXEC)
|
||||
flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
|
||||
flush_tlb_range(vma, vma->vm_start, vma->vm_end);
|
||||
}
|
||||
return;
|
||||
}
|
||||
@ -581,14 +579,9 @@ void flush_cache_mm(struct mm_struct *mm)
|
||||
void flush_cache_range(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
BUG_ON(!vma->vm_mm->context);
|
||||
|
||||
/* Flush the TLB to avoid speculation if coherency is required. */
|
||||
if (parisc_requires_coherency())
|
||||
if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
|
||||
end - start >= parisc_cache_flush_threshold) {
|
||||
flush_tlb_range(vma, start, end);
|
||||
|
||||
if ((end - start) >= parisc_cache_flush_threshold
|
||||
|| vma->vm_mm->context != mfsp(3)) {
|
||||
flush_cache_all();
|
||||
return;
|
||||
}
|
||||
@ -596,6 +589,7 @@ void flush_cache_range(struct vm_area_struct *vma,
|
||||
flush_user_dcache_range_asm(start, end);
|
||||
if (vma->vm_flags & VM_EXEC)
|
||||
flush_user_icache_range_asm(start, end);
|
||||
flush_tlb_range(vma, start, end);
|
||||
}
|
||||
|
||||
void
|
||||
@ -604,8 +598,7 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
|
||||
BUG_ON(!vma->vm_mm->context);
|
||||
|
||||
if (pfn_valid(pfn)) {
|
||||
if (parisc_requires_coherency())
|
||||
flush_tlb_page(vma, vmaddr);
|
||||
flush_tlb_page(vma, vmaddr);
|
||||
__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
|
||||
}
|
||||
}
|
||||
@ -613,21 +606,33 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
|
||||
void flush_kernel_vmap_range(void *vaddr, int size)
|
||||
{
|
||||
unsigned long start = (unsigned long)vaddr;
|
||||
unsigned long end = start + size;
|
||||
|
||||
if ((unsigned long)size > parisc_cache_flush_threshold)
|
||||
if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
|
||||
(unsigned long)size >= parisc_cache_flush_threshold) {
|
||||
flush_tlb_kernel_range(start, end);
|
||||
flush_data_cache();
|
||||
else
|
||||
flush_kernel_dcache_range_asm(start, start + size);
|
||||
return;
|
||||
}
|
||||
|
||||
flush_kernel_dcache_range_asm(start, end);
|
||||
flush_tlb_kernel_range(start, end);
|
||||
}
|
||||
EXPORT_SYMBOL(flush_kernel_vmap_range);
|
||||
|
||||
void invalidate_kernel_vmap_range(void *vaddr, int size)
|
||||
{
|
||||
unsigned long start = (unsigned long)vaddr;
|
||||
unsigned long end = start + size;
|
||||
|
||||
if ((unsigned long)size > parisc_cache_flush_threshold)
|
||||
if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
|
||||
(unsigned long)size >= parisc_cache_flush_threshold) {
|
||||
flush_tlb_kernel_range(start, end);
|
||||
flush_data_cache();
|
||||
else
|
||||
flush_kernel_dcache_range_asm(start, start + size);
|
||||
return;
|
||||
}
|
||||
|
||||
purge_kernel_dcache_range_asm(start, end);
|
||||
flush_tlb_kernel_range(start, end);
|
||||
}
|
||||
EXPORT_SYMBOL(invalidate_kernel_vmap_range);
|
||||
|
@ -1110,6 +1110,28 @@ ENTRY_CFI(flush_kernel_dcache_range_asm)
|
||||
.procend
|
||||
ENDPROC_CFI(flush_kernel_dcache_range_asm)
|
||||
|
||||
ENTRY_CFI(purge_kernel_dcache_range_asm)
|
||||
.proc
|
||||
.callinfo NO_CALLS
|
||||
.entry
|
||||
|
||||
ldil L%dcache_stride, %r1
|
||||
ldw R%dcache_stride(%r1), %r23
|
||||
ldo -1(%r23), %r21
|
||||
ANDCM %r26, %r21, %r26
|
||||
|
||||
1: cmpb,COND(<<),n %r26, %r25,1b
|
||||
pdc,m %r23(%r26)
|
||||
|
||||
sync
|
||||
syncdma
|
||||
bv %r0(%r2)
|
||||
nop
|
||||
.exit
|
||||
|
||||
.procend
|
||||
ENDPROC_CFI(purge_kernel_dcache_range_asm)
|
||||
|
||||
ENTRY_CFI(flush_user_icache_range_asm)
|
||||
.proc
|
||||
.callinfo NO_CALLS
|
||||
|
@ -76,10 +76,10 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
|
||||
next_tick = cpuinfo->it_value;
|
||||
|
||||
/* Calculate how many ticks have elapsed. */
|
||||
now = mfctl(16);
|
||||
do {
|
||||
++ticks_elapsed;
|
||||
next_tick += cpt;
|
||||
now = mfctl(16);
|
||||
} while (next_tick - now > cpt);
|
||||
|
||||
/* Store (in CR16 cycles) up to when we are accounting right now. */
|
||||
@ -103,16 +103,17 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id)
|
||||
* if one or the other wrapped. If "now" is "bigger" we'll end up
|
||||
* with a very large unsigned number.
|
||||
*/
|
||||
while (next_tick - mfctl(16) > cpt)
|
||||
now = mfctl(16);
|
||||
while (next_tick - now > cpt)
|
||||
next_tick += cpt;
|
||||
|
||||
/* Program the IT when to deliver the next interrupt.
|
||||
* Only bottom 32-bits of next_tick are writable in CR16!
|
||||
* Timer interrupt will be delivered at least a few hundred cycles
|
||||
* after the IT fires, so if we are too close (<= 500 cycles) to the
|
||||
* after the IT fires, so if we are too close (<= 8000 cycles) to the
|
||||
* next cycle, simply skip it.
|
||||
*/
|
||||
if (next_tick - mfctl(16) <= 500)
|
||||
if (next_tick - now <= 8000)
|
||||
next_tick += cpt;
|
||||
mtctl(next_tick, 16);
|
||||
|
||||
@ -248,7 +249,7 @@ static int __init init_cr16_clocksource(void)
|
||||
* different sockets, so mark them unstable and lower rating on
|
||||
* multi-socket SMP systems.
|
||||
*/
|
||||
if (num_online_cpus() > 1) {
|
||||
if (num_online_cpus() > 1 && !running_on_qemu) {
|
||||
int cpu;
|
||||
unsigned long cpu0_loc;
|
||||
cpu0_loc = per_cpu(cpu_data, 0).cpu_loc;
|
||||
|
@ -241,6 +241,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
|
||||
* goto out;
|
||||
*/
|
||||
PPC_LWZ(b2p[TMP_REG_1], b2p_bpf_array, offsetof(struct bpf_array, map.max_entries));
|
||||
PPC_RLWINM(b2p_index, b2p_index, 0, 0, 31);
|
||||
PPC_CMPLW(b2p_index, b2p[TMP_REG_1]);
|
||||
PPC_BCC(COND_GE, out);
|
||||
|
||||
|
@ -173,8 +173,15 @@ static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu)
|
||||
|
||||
static int ckc_irq_pending(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (vcpu->arch.sie_block->ckc >= kvm_s390_get_tod_clock_fast(vcpu->kvm))
|
||||
const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
|
||||
const u64 ckc = vcpu->arch.sie_block->ckc;
|
||||
|
||||
if (vcpu->arch.sie_block->gcr[0] & 0x0020000000000000ul) {
|
||||
if ((s64)ckc >= (s64)now)
|
||||
return 0;
|
||||
} else if (ckc >= now) {
|
||||
return 0;
|
||||
}
|
||||
return ckc_interrupts_enabled(vcpu);
|
||||
}
|
||||
|
||||
@ -1004,13 +1011,19 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
|
||||
|
||||
static u64 __calculate_sltime(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u64 now, cputm, sltime = 0;
|
||||
const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
|
||||
const u64 ckc = vcpu->arch.sie_block->ckc;
|
||||
u64 cputm, sltime = 0;
|
||||
|
||||
if (ckc_interrupts_enabled(vcpu)) {
|
||||
now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
|
||||
sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
|
||||
/* already expired or overflow? */
|
||||
if (!sltime || vcpu->arch.sie_block->ckc <= now)
|
||||
if (vcpu->arch.sie_block->gcr[0] & 0x0020000000000000ul) {
|
||||
if ((s64)now < (s64)ckc)
|
||||
sltime = tod_to_ns((s64)ckc - (s64)now);
|
||||
} else if (now < ckc) {
|
||||
sltime = tod_to_ns(ckc - now);
|
||||
}
|
||||
/* already expired */
|
||||
if (!sltime)
|
||||
return 0;
|
||||
if (cpu_timer_interrupts_enabled(vcpu)) {
|
||||
cputm = kvm_s390_get_cpu_timer(vcpu);
|
||||
|
@ -169,6 +169,28 @@ int kvm_arch_hardware_enable(void)
|
||||
static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
|
||||
unsigned long end);
|
||||
|
||||
static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
|
||||
{
|
||||
u8 delta_idx = 0;
|
||||
|
||||
/*
|
||||
* The TOD jumps by delta, we have to compensate this by adding
|
||||
* -delta to the epoch.
|
||||
*/
|
||||
delta = -delta;
|
||||
|
||||
/* sign-extension - we're adding to signed values below */
|
||||
if ((s64)delta < 0)
|
||||
delta_idx = -1;
|
||||
|
||||
scb->epoch += delta;
|
||||
if (scb->ecd & ECD_MEF) {
|
||||
scb->epdx += delta_idx;
|
||||
if (scb->epoch < delta)
|
||||
scb->epdx += 1;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* This callback is executed during stop_machine(). All CPUs are therefore
|
||||
* temporarily stopped. In order not to change guest behavior, we have to
|
||||
@ -184,13 +206,17 @@ static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
|
||||
unsigned long long *delta = v;
|
||||
|
||||
list_for_each_entry(kvm, &vm_list, vm_list) {
|
||||
kvm->arch.epoch -= *delta;
|
||||
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||
vcpu->arch.sie_block->epoch -= *delta;
|
||||
kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
|
||||
if (i == 0) {
|
||||
kvm->arch.epoch = vcpu->arch.sie_block->epoch;
|
||||
kvm->arch.epdx = vcpu->arch.sie_block->epdx;
|
||||
}
|
||||
if (vcpu->arch.cputm_enabled)
|
||||
vcpu->arch.cputm_start += *delta;
|
||||
if (vcpu->arch.vsie_block)
|
||||
vcpu->arch.vsie_block->epoch -= *delta;
|
||||
kvm_clock_sync_scb(vcpu->arch.vsie_block,
|
||||
*delta);
|
||||
}
|
||||
}
|
||||
return NOTIFY_OK;
|
||||
@ -888,12 +914,9 @@ static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
|
||||
if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod)))
|
||||
return -EFAULT;
|
||||
|
||||
if (test_kvm_facility(kvm, 139))
|
||||
kvm_s390_set_tod_clock_ext(kvm, >od);
|
||||
else if (gtod.epoch_idx == 0)
|
||||
kvm_s390_set_tod_clock(kvm, gtod.tod);
|
||||
else
|
||||
if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
|
||||
return -EINVAL;
|
||||
kvm_s390_set_tod_clock(kvm, >od);
|
||||
|
||||
VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
|
||||
gtod.epoch_idx, gtod.tod);
|
||||
@ -918,13 +941,14 @@ static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
|
||||
|
||||
static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
|
||||
{
|
||||
u64 gtod;
|
||||
struct kvm_s390_vm_tod_clock gtod = { 0 };
|
||||
|
||||
if (copy_from_user(>od, (void __user *)attr->addr, sizeof(gtod)))
|
||||
if (copy_from_user(>od.tod, (void __user *)attr->addr,
|
||||
sizeof(gtod.tod)))
|
||||
return -EFAULT;
|
||||
|
||||
kvm_s390_set_tod_clock(kvm, gtod);
|
||||
VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod);
|
||||
kvm_s390_set_tod_clock(kvm, >od);
|
||||
VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2359,6 +2383,7 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
|
||||
mutex_lock(&vcpu->kvm->lock);
|
||||
preempt_disable();
|
||||
vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
|
||||
vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
|
||||
preempt_enable();
|
||||
mutex_unlock(&vcpu->kvm->lock);
|
||||
if (!kvm_is_ucontrol(vcpu->kvm)) {
|
||||
@ -2945,8 +2970,8 @@ retry:
|
||||
return 0;
|
||||
}
|
||||
|
||||
void kvm_s390_set_tod_clock_ext(struct kvm *kvm,
|
||||
const struct kvm_s390_vm_tod_clock *gtod)
|
||||
void kvm_s390_set_tod_clock(struct kvm *kvm,
|
||||
const struct kvm_s390_vm_tod_clock *gtod)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct kvm_s390_tod_clock_ext htod;
|
||||
@ -2958,10 +2983,12 @@ void kvm_s390_set_tod_clock_ext(struct kvm *kvm,
|
||||
get_tod_clock_ext((char *)&htod);
|
||||
|
||||
kvm->arch.epoch = gtod->tod - htod.tod;
|
||||
kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx;
|
||||
|
||||
if (kvm->arch.epoch > gtod->tod)
|
||||
kvm->arch.epdx -= 1;
|
||||
kvm->arch.epdx = 0;
|
||||
if (test_kvm_facility(kvm, 139)) {
|
||||
kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx;
|
||||
if (kvm->arch.epoch > gtod->tod)
|
||||
kvm->arch.epdx -= 1;
|
||||
}
|
||||
|
||||
kvm_s390_vcpu_block_all(kvm);
|
||||
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||
@ -2974,22 +3001,6 @@ void kvm_s390_set_tod_clock_ext(struct kvm *kvm,
|
||||
mutex_unlock(&kvm->lock);
|
||||
}
|
||||
|
||||
void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
int i;
|
||||
|
||||
mutex_lock(&kvm->lock);
|
||||
preempt_disable();
|
||||
kvm->arch.epoch = tod - get_tod_clock();
|
||||
kvm_s390_vcpu_block_all(kvm);
|
||||
kvm_for_each_vcpu(i, vcpu, kvm)
|
||||
vcpu->arch.sie_block->epoch = kvm->arch.epoch;
|
||||
kvm_s390_vcpu_unblock_all(kvm);
|
||||
preempt_enable();
|
||||
mutex_unlock(&kvm->lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_arch_fault_in_page - fault-in guest page if necessary
|
||||
* @vcpu: The corresponding virtual cpu
|
||||
|
@ -272,9 +272,8 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu);
|
||||
int handle_sthyi(struct kvm_vcpu *vcpu);
|
||||
|
||||
/* implemented in kvm-s390.c */
|
||||
void kvm_s390_set_tod_clock_ext(struct kvm *kvm,
|
||||
const struct kvm_s390_vm_tod_clock *gtod);
|
||||
void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod);
|
||||
void kvm_s390_set_tod_clock(struct kvm *kvm,
|
||||
const struct kvm_s390_vm_tod_clock *gtod);
|
||||
long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable);
|
||||
int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);
|
||||
int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr);
|
||||
|
@ -84,9 +84,10 @@ int kvm_s390_handle_e3(struct kvm_vcpu *vcpu)
|
||||
/* Handle SCK (SET CLOCK) interception */
|
||||
static int handle_set_clock(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_s390_vm_tod_clock gtod = { 0 };
|
||||
int rc;
|
||||
u8 ar;
|
||||
u64 op2, val;
|
||||
u64 op2;
|
||||
|
||||
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
|
||||
@ -94,12 +95,12 @@ static int handle_set_clock(struct kvm_vcpu *vcpu)
|
||||
op2 = kvm_s390_get_base_disp_s(vcpu, &ar);
|
||||
if (op2 & 7) /* Operand must be on a doubleword boundary */
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
|
||||
rc = read_guest(vcpu, op2, ar, &val, sizeof(val));
|
||||
rc = read_guest(vcpu, op2, ar, >od.tod, sizeof(gtod.tod));
|
||||
if (rc)
|
||||
return kvm_s390_inject_prog_cond(vcpu, rc);
|
||||
|
||||
VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", val);
|
||||
kvm_s390_set_tod_clock(vcpu->kvm, val);
|
||||
VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", gtod.tod);
|
||||
kvm_s390_set_tod_clock(vcpu->kvm, >od);
|
||||
|
||||
kvm_s390_set_psw_cc(vcpu, 0);
|
||||
return 0;
|
||||
|
@ -177,4 +177,41 @@ static inline void indirect_branch_prediction_barrier(void)
|
||||
}
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
/*
|
||||
* Below is used in the eBPF JIT compiler and emits the byte sequence
|
||||
* for the following assembly:
|
||||
*
|
||||
* With retpolines configured:
|
||||
*
|
||||
* callq do_rop
|
||||
* spec_trap:
|
||||
* pause
|
||||
* lfence
|
||||
* jmp spec_trap
|
||||
* do_rop:
|
||||
* mov %rax,(%rsp)
|
||||
* retq
|
||||
*
|
||||
* Without retpolines configured:
|
||||
*
|
||||
* jmp *%rax
|
||||
*/
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
# define RETPOLINE_RAX_BPF_JIT_SIZE 17
|
||||
# define RETPOLINE_RAX_BPF_JIT() \
|
||||
EMIT1_off32(0xE8, 7); /* callq do_rop */ \
|
||||
/* spec_trap: */ \
|
||||
EMIT2(0xF3, 0x90); /* pause */ \
|
||||
EMIT3(0x0F, 0xAE, 0xE8); /* lfence */ \
|
||||
EMIT2(0xEB, 0xF9); /* jmp spec_trap */ \
|
||||
/* do_rop: */ \
|
||||
EMIT4(0x48, 0x89, 0x04, 0x24); /* mov %rax,(%rsp) */ \
|
||||
EMIT1(0xC3); /* retq */
|
||||
#else
|
||||
# define RETPOLINE_RAX_BPF_JIT_SIZE 2
|
||||
# define RETPOLINE_RAX_BPF_JIT() \
|
||||
EMIT2(0xFF, 0xE0); /* jmp *%rax */
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_NOSPEC_BRANCH_H_ */
|
||||
|
@ -350,14 +350,14 @@ static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
|
||||
{
|
||||
pmdval_t v = native_pmd_val(pmd);
|
||||
|
||||
return __pmd(v | set);
|
||||
return native_make_pmd(v | set);
|
||||
}
|
||||
|
||||
static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
|
||||
{
|
||||
pmdval_t v = native_pmd_val(pmd);
|
||||
|
||||
return __pmd(v & ~clear);
|
||||
return native_make_pmd(v & ~clear);
|
||||
}
|
||||
|
||||
static inline pmd_t pmd_mkold(pmd_t pmd)
|
||||
@ -409,14 +409,14 @@ static inline pud_t pud_set_flags(pud_t pud, pudval_t set)
|
||||
{
|
||||
pudval_t v = native_pud_val(pud);
|
||||
|
||||
return __pud(v | set);
|
||||
return native_make_pud(v | set);
|
||||
}
|
||||
|
||||
static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear)
|
||||
{
|
||||
pudval_t v = native_pud_val(pud);
|
||||
|
||||
return __pud(v & ~clear);
|
||||
return native_make_pud(v & ~clear);
|
||||
}
|
||||
|
||||
static inline pud_t pud_mkold(pud_t pud)
|
||||
|
@ -32,6 +32,7 @@ extern pmd_t initial_pg_pmd[];
|
||||
static inline void pgtable_cache_init(void) { }
|
||||
static inline void check_pgt_cache(void) { }
|
||||
void paging_init(void);
|
||||
void sync_initial_page_table(void);
|
||||
|
||||
/*
|
||||
* Define this if things work differently on an i386 and an i486:
|
||||
|
@ -28,6 +28,7 @@ extern pgd_t init_top_pgt[];
|
||||
#define swapper_pg_dir init_top_pgt
|
||||
|
||||
extern void paging_init(void);
|
||||
static inline void sync_initial_page_table(void) { }
|
||||
|
||||
#define pte_ERROR(e) \
|
||||
pr_err("%s:%d: bad pte %p(%016lx)\n", \
|
||||
|
@ -323,6 +323,11 @@ static inline pudval_t native_pud_val(pud_t pud)
|
||||
#else
|
||||
#include <asm-generic/pgtable-nopud.h>
|
||||
|
||||
static inline pud_t native_make_pud(pudval_t val)
|
||||
{
|
||||
return (pud_t) { .p4d.pgd = native_make_pgd(val) };
|
||||
}
|
||||
|
||||
static inline pudval_t native_pud_val(pud_t pud)
|
||||
{
|
||||
return native_pgd_val(pud.p4d.pgd);
|
||||
@ -344,6 +349,11 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
|
||||
#else
|
||||
#include <asm-generic/pgtable-nopmd.h>
|
||||
|
||||
static inline pmd_t native_make_pmd(pmdval_t val)
|
||||
{
|
||||
return (pmd_t) { .pud.p4d.pgd = native_make_pgd(val) };
|
||||
}
|
||||
|
||||
static inline pmdval_t native_pmd_val(pmd_t pmd)
|
||||
{
|
||||
return native_pgd_val(pmd.pud.p4d.pgd);
|
||||
|
@ -25,6 +25,7 @@
|
||||
#define KVM_FEATURE_STEAL_TIME 5
|
||||
#define KVM_FEATURE_PV_EOI 6
|
||||
#define KVM_FEATURE_PV_UNHALT 7
|
||||
#define KVM_FEATURE_ASYNC_PF_VMEXIT 10
|
||||
|
||||
/* The last 8 bits are used to indicate how to interpret the flags field
|
||||
* in pvclock structure. If no bits are set, all flags are ignored.
|
||||
|
@ -341,10 +341,10 @@ static void kvm_guest_cpu_init(void)
|
||||
#endif
|
||||
pa |= KVM_ASYNC_PF_ENABLED;
|
||||
|
||||
/* Async page fault support for L1 hypervisor is optional */
|
||||
if (wrmsr_safe(MSR_KVM_ASYNC_PF_EN,
|
||||
(pa | KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT) & 0xffffffff, pa >> 32) < 0)
|
||||
wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
|
||||
if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_VMEXIT))
|
||||
pa |= KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT;
|
||||
|
||||
wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
|
||||
__this_cpu_write(apf_reason.enabled, 1);
|
||||
printk(KERN_INFO"KVM setup async PF for cpu %d\n",
|
||||
smp_processor_id());
|
||||
|
@ -1238,20 +1238,13 @@ void __init setup_arch(char **cmdline_p)
|
||||
|
||||
kasan_init();
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
/* sync back kernel address range */
|
||||
clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
|
||||
swapper_pg_dir + KERNEL_PGD_BOUNDARY,
|
||||
KERNEL_PGD_PTRS);
|
||||
|
||||
/*
|
||||
* sync back low identity map too. It is used for example
|
||||
* in the 32-bit EFI stub.
|
||||
* Sync back kernel address range.
|
||||
*
|
||||
* FIXME: Can the later sync in setup_cpu_entry_areas() replace
|
||||
* this call?
|
||||
*/
|
||||
clone_pgd_range(initial_page_table,
|
||||
swapper_pg_dir + KERNEL_PGD_BOUNDARY,
|
||||
min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
|
||||
#endif
|
||||
sync_initial_page_table();
|
||||
|
||||
tboot_probe();
|
||||
|
||||
|
@ -287,24 +287,15 @@ void __init setup_per_cpu_areas(void)
|
||||
/* Setup cpu initialized, callin, callout masks */
|
||||
setup_cpu_local_masks();
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
/*
|
||||
* Sync back kernel address range again. We already did this in
|
||||
* setup_arch(), but percpu data also needs to be available in
|
||||
* the smpboot asm. We can't reliably pick up percpu mappings
|
||||
* using vmalloc_fault(), because exception dispatch needs
|
||||
* percpu data.
|
||||
*
|
||||
* FIXME: Can the later sync in setup_cpu_entry_areas() replace
|
||||
* this call?
|
||||
*/
|
||||
clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
|
||||
swapper_pg_dir + KERNEL_PGD_BOUNDARY,
|
||||
KERNEL_PGD_PTRS);
|
||||
|
||||
/*
|
||||
* sync back low identity map too. It is used for example
|
||||
* in the 32-bit EFI stub.
|
||||
*/
|
||||
clone_pgd_range(initial_page_table,
|
||||
swapper_pg_dir + KERNEL_PGD_BOUNDARY,
|
||||
min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
|
||||
#endif
|
||||
sync_initial_page_table();
|
||||
}
|
||||
|
@ -597,7 +597,8 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
|
||||
(1 << KVM_FEATURE_ASYNC_PF) |
|
||||
(1 << KVM_FEATURE_PV_EOI) |
|
||||
(1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT) |
|
||||
(1 << KVM_FEATURE_PV_UNHALT);
|
||||
(1 << KVM_FEATURE_PV_UNHALT) |
|
||||
(1 << KVM_FEATURE_ASYNC_PF_VMEXIT);
|
||||
|
||||
if (sched_info_on())
|
||||
entry->eax |= (1 << KVM_FEATURE_STEAL_TIME);
|
||||
|
@ -1944,14 +1944,13 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
|
||||
|
||||
void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
|
||||
{
|
||||
struct kvm_lapic *apic;
|
||||
struct kvm_lapic *apic = vcpu->arch.apic;
|
||||
int i;
|
||||
|
||||
apic_debug("%s\n", __func__);
|
||||
if (!apic)
|
||||
return;
|
||||
|
||||
ASSERT(vcpu);
|
||||
apic = vcpu->arch.apic;
|
||||
ASSERT(apic != NULL);
|
||||
apic_debug("%s\n", __func__);
|
||||
|
||||
/* Stop the timer in case it's a reset to an active apic */
|
||||
hrtimer_cancel(&apic->lapic_timer.timer);
|
||||
@ -2107,7 +2106,6 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
|
||||
*/
|
||||
vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE;
|
||||
static_key_slow_inc(&apic_sw_disabled.key); /* sw disabled at reset */
|
||||
kvm_lapic_reset(vcpu, false);
|
||||
kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
|
||||
|
||||
return 0;
|
||||
@ -2511,7 +2509,6 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
|
||||
|
||||
pe = xchg(&apic->pending_events, 0);
|
||||
if (test_bit(KVM_APIC_INIT, &pe)) {
|
||||
kvm_lapic_reset(vcpu, true);
|
||||
kvm_vcpu_reset(vcpu, true);
|
||||
if (kvm_vcpu_is_bsp(apic->vcpu))
|
||||
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
|
||||
|
@ -150,6 +150,20 @@ module_param(dbg, bool, 0644);
|
||||
/* make pte_list_desc fit well in cache line */
|
||||
#define PTE_LIST_EXT 3
|
||||
|
||||
/*
|
||||
* Return values of handle_mmio_page_fault and mmu.page_fault:
|
||||
* RET_PF_RETRY: let CPU fault again on the address.
|
||||
* RET_PF_EMULATE: mmio page fault, emulate the instruction directly.
|
||||
*
|
||||
* For handle_mmio_page_fault only:
|
||||
* RET_PF_INVALID: the spte is invalid, let the real page fault path update it.
|
||||
*/
|
||||
enum {
|
||||
RET_PF_RETRY = 0,
|
||||
RET_PF_EMULATE = 1,
|
||||
RET_PF_INVALID = 2,
|
||||
};
|
||||
|
||||
struct pte_list_desc {
|
||||
u64 *sptes[PTE_LIST_EXT];
|
||||
struct pte_list_desc *more;
|
||||
@ -2794,13 +2808,13 @@ done:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access,
|
||||
int write_fault, int level, gfn_t gfn, kvm_pfn_t pfn,
|
||||
bool speculative, bool host_writable)
|
||||
static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access,
|
||||
int write_fault, int level, gfn_t gfn, kvm_pfn_t pfn,
|
||||
bool speculative, bool host_writable)
|
||||
{
|
||||
int was_rmapped = 0;
|
||||
int rmap_count;
|
||||
bool emulate = false;
|
||||
int ret = RET_PF_RETRY;
|
||||
|
||||
pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__,
|
||||
*sptep, write_fault, gfn);
|
||||
@ -2830,12 +2844,12 @@ static bool mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access,
|
||||
if (set_spte(vcpu, sptep, pte_access, level, gfn, pfn, speculative,
|
||||
true, host_writable)) {
|
||||
if (write_fault)
|
||||
emulate = true;
|
||||
ret = RET_PF_EMULATE;
|
||||
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
|
||||
}
|
||||
|
||||
if (unlikely(is_mmio_spte(*sptep)))
|
||||
emulate = true;
|
||||
ret = RET_PF_EMULATE;
|
||||
|
||||
pgprintk("%s: setting spte %llx\n", __func__, *sptep);
|
||||
pgprintk("instantiating %s PTE (%s) at %llx (%llx) addr %p\n",
|
||||
@ -2855,7 +2869,7 @@ static bool mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access,
|
||||
|
||||
kvm_release_pfn_clean(pfn);
|
||||
|
||||
return emulate;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static kvm_pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
|
||||
@ -2994,17 +3008,16 @@ static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
|
||||
* Do not cache the mmio info caused by writing the readonly gfn
|
||||
* into the spte otherwise read access on readonly gfn also can
|
||||
* caused mmio page fault and treat it as mmio access.
|
||||
* Return 1 to tell kvm to emulate it.
|
||||
*/
|
||||
if (pfn == KVM_PFN_ERR_RO_FAULT)
|
||||
return 1;
|
||||
return RET_PF_EMULATE;
|
||||
|
||||
if (pfn == KVM_PFN_ERR_HWPOISON) {
|
||||
kvm_send_hwpoison_signal(kvm_vcpu_gfn_to_hva(vcpu, gfn), current);
|
||||
return 0;
|
||||
return RET_PF_RETRY;
|
||||
}
|
||||
|
||||
return -EFAULT;
|
||||
return RET_PF_EMULATE;
|
||||
}
|
||||
|
||||
static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
|
||||
@ -3286,13 +3299,13 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
|
||||
}
|
||||
|
||||
if (fast_page_fault(vcpu, v, level, error_code))
|
||||
return 0;
|
||||
return RET_PF_RETRY;
|
||||
|
||||
mmu_seq = vcpu->kvm->mmu_notifier_seq;
|
||||
smp_rmb();
|
||||
|
||||
if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, &map_writable))
|
||||
return 0;
|
||||
return RET_PF_RETRY;
|
||||
|
||||
if (handle_abnormal_pfn(vcpu, v, gfn, pfn, ACC_ALL, &r))
|
||||
return r;
|
||||
@ -3312,7 +3325,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
|
||||
out_unlock:
|
||||
spin_unlock(&vcpu->kvm->mmu_lock);
|
||||
kvm_release_pfn_clean(pfn);
|
||||
return 0;
|
||||
return RET_PF_RETRY;
|
||||
}
|
||||
|
||||
|
||||
@ -3659,54 +3672,38 @@ exit:
|
||||
return reserved;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return values of handle_mmio_page_fault:
|
||||
* RET_MMIO_PF_EMULATE: it is a real mmio page fault, emulate the instruction
|
||||
* directly.
|
||||
* RET_MMIO_PF_INVALID: invalid spte is detected then let the real page
|
||||
* fault path update the mmio spte.
|
||||
* RET_MMIO_PF_RETRY: let CPU fault again on the address.
|
||||
* RET_MMIO_PF_BUG: a bug was detected (and a WARN was printed).
|
||||
*/
|
||||
enum {
|
||||
RET_MMIO_PF_EMULATE = 1,
|
||||
RET_MMIO_PF_INVALID = 2,
|
||||
RET_MMIO_PF_RETRY = 0,
|
||||
RET_MMIO_PF_BUG = -1
|
||||
};
|
||||
|
||||
static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
|
||||
{
|
||||
u64 spte;
|
||||
bool reserved;
|
||||
|
||||
if (mmio_info_in_cache(vcpu, addr, direct))
|
||||
return RET_MMIO_PF_EMULATE;
|
||||
return RET_PF_EMULATE;
|
||||
|
||||
reserved = walk_shadow_page_get_mmio_spte(vcpu, addr, &spte);
|
||||
if (WARN_ON(reserved))
|
||||
return RET_MMIO_PF_BUG;
|
||||
return -EINVAL;
|
||||
|
||||
if (is_mmio_spte(spte)) {
|
||||
gfn_t gfn = get_mmio_spte_gfn(spte);
|
||||
unsigned access = get_mmio_spte_access(spte);
|
||||
|
||||
if (!check_mmio_spte(vcpu, spte))
|
||||
return RET_MMIO_PF_INVALID;
|
||||
return RET_PF_INVALID;
|
||||
|
||||
if (direct)
|
||||
addr = 0;
|
||||
|
||||
trace_handle_mmio_page_fault(addr, gfn, access);
|
||||
vcpu_cache_mmio_info(vcpu, addr, gfn, access);
|
||||
return RET_MMIO_PF_EMULATE;
|
||||
return RET_PF_EMULATE;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the page table is zapped by other cpus, let CPU fault again on
|
||||
* the address.
|
||||
*/
|
||||
return RET_MMIO_PF_RETRY;
|
||||
return RET_PF_RETRY;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(handle_mmio_page_fault);
|
||||
|
||||
@ -3756,7 +3753,7 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
|
||||
pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
|
||||
|
||||
if (page_fault_handle_page_track(vcpu, error_code, gfn))
|
||||
return 1;
|
||||
return RET_PF_EMULATE;
|
||||
|
||||
r = mmu_topup_memory_caches(vcpu);
|
||||
if (r)
|
||||
@ -3877,7 +3874,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
|
||||
MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
|
||||
|
||||
if (page_fault_handle_page_track(vcpu, error_code, gfn))
|
||||
return 1;
|
||||
return RET_PF_EMULATE;
|
||||
|
||||
r = mmu_topup_memory_caches(vcpu);
|
||||
if (r)
|
||||
@ -3894,13 +3891,13 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
|
||||
}
|
||||
|
||||
if (fast_page_fault(vcpu, gpa, level, error_code))
|
||||
return 0;
|
||||
return RET_PF_RETRY;
|
||||
|
||||
mmu_seq = vcpu->kvm->mmu_notifier_seq;
|
||||
smp_rmb();
|
||||
|
||||
if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable))
|
||||
return 0;
|
||||
return RET_PF_RETRY;
|
||||
|
||||
if (handle_abnormal_pfn(vcpu, 0, gfn, pfn, ACC_ALL, &r))
|
||||
return r;
|
||||
@ -3920,7 +3917,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
|
||||
out_unlock:
|
||||
spin_unlock(&vcpu->kvm->mmu_lock);
|
||||
kvm_release_pfn_clean(pfn);
|
||||
return 0;
|
||||
return RET_PF_RETRY;
|
||||
}
|
||||
|
||||
static void nonpaging_init_context(struct kvm_vcpu *vcpu,
|
||||
@ -4919,25 +4916,25 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
|
||||
vcpu->arch.gpa_val = cr2;
|
||||
}
|
||||
|
||||
r = RET_PF_INVALID;
|
||||
if (unlikely(error_code & PFERR_RSVD_MASK)) {
|
||||
r = handle_mmio_page_fault(vcpu, cr2, direct);
|
||||
if (r == RET_MMIO_PF_EMULATE) {
|
||||
if (r == RET_PF_EMULATE) {
|
||||
emulation_type = 0;
|
||||
goto emulate;
|
||||
}
|
||||
if (r == RET_MMIO_PF_RETRY)
|
||||
return 1;
|
||||
if (r < 0)
|
||||
return r;
|
||||
/* Must be RET_MMIO_PF_INVALID. */
|
||||
}
|
||||
|
||||
r = vcpu->arch.mmu.page_fault(vcpu, cr2, lower_32_bits(error_code),
|
||||
false);
|
||||
if (r == RET_PF_INVALID) {
|
||||
r = vcpu->arch.mmu.page_fault(vcpu, cr2, lower_32_bits(error_code),
|
||||
false);
|
||||
WARN_ON(r == RET_PF_INVALID);
|
||||
}
|
||||
|
||||
if (r == RET_PF_RETRY)
|
||||
return 1;
|
||||
if (r < 0)
|
||||
return r;
|
||||
if (!r)
|
||||
return 1;
|
||||
|
||||
/*
|
||||
* Before emulating the instruction, check if the error code
|
||||
|
@ -593,7 +593,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
|
||||
struct kvm_mmu_page *sp = NULL;
|
||||
struct kvm_shadow_walk_iterator it;
|
||||
unsigned direct_access, access = gw->pt_access;
|
||||
int top_level, emulate;
|
||||
int top_level, ret;
|
||||
|
||||
direct_access = gw->pte_access;
|
||||
|
||||
@ -659,15 +659,15 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
|
||||
}
|
||||
|
||||
clear_sp_write_flooding_count(it.sptep);
|
||||
emulate = mmu_set_spte(vcpu, it.sptep, gw->pte_access, write_fault,
|
||||
it.level, gw->gfn, pfn, prefault, map_writable);
|
||||
ret = mmu_set_spte(vcpu, it.sptep, gw->pte_access, write_fault,
|
||||
it.level, gw->gfn, pfn, prefault, map_writable);
|
||||
FNAME(pte_prefetch)(vcpu, gw, it.sptep);
|
||||
|
||||
return emulate;
|
||||
return ret;
|
||||
|
||||
out_gpte_changed:
|
||||
kvm_release_pfn_clean(pfn);
|
||||
return 0;
|
||||
return RET_PF_RETRY;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -762,12 +762,12 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
|
||||
if (!prefault)
|
||||
inject_page_fault(vcpu, &walker.fault);
|
||||
|
||||
return 0;
|
||||
return RET_PF_RETRY;
|
||||
}
|
||||
|
||||
if (page_fault_handle_page_track(vcpu, error_code, walker.gfn)) {
|
||||
shadow_page_table_clear_flood(vcpu, addr);
|
||||
return 1;
|
||||
return RET_PF_EMULATE;
|
||||
}
|
||||
|
||||
vcpu->arch.write_fault_to_shadow_pgtable = false;
|
||||
@ -789,7 +789,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
|
||||
|
||||
if (try_async_pf(vcpu, prefault, walker.gfn, addr, &pfn, write_fault,
|
||||
&map_writable))
|
||||
return 0;
|
||||
return RET_PF_RETRY;
|
||||
|
||||
if (handle_abnormal_pfn(vcpu, addr, walker.gfn, pfn, walker.pte_access, &r))
|
||||
return r;
|
||||
@ -834,7 +834,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
|
||||
out_unlock:
|
||||
spin_unlock(&vcpu->kvm->mmu_lock);
|
||||
kvm_release_pfn_clean(pfn);
|
||||
return 0;
|
||||
return RET_PF_RETRY;
|
||||
}
|
||||
|
||||
static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp)
|
||||
|
@ -45,6 +45,7 @@
|
||||
#include <asm/debugreg.h>
|
||||
#include <asm/kvm_para.h>
|
||||
#include <asm/irq_remapping.h>
|
||||
#include <asm/microcode.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
|
||||
#include <asm/virtext.h>
|
||||
@ -5015,7 +5016,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
* being speculatively taken.
|
||||
*/
|
||||
if (svm->spec_ctrl)
|
||||
wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
|
||||
native_wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
|
||||
|
||||
asm volatile (
|
||||
"push %%" _ASM_BP "; \n\t"
|
||||
@ -5124,11 +5125,11 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
* If the L02 MSR bitmap does not intercept the MSR, then we need to
|
||||
* save it.
|
||||
*/
|
||||
if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))
|
||||
rdmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
|
||||
if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
|
||||
svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
|
||||
|
||||
if (svm->spec_ctrl)
|
||||
wrmsrl(MSR_IA32_SPEC_CTRL, 0);
|
||||
native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
|
||||
|
||||
/* Eliminate branch target predictions from guest mode */
|
||||
vmexit_fill_RSB();
|
||||
|
@ -51,6 +51,7 @@
|
||||
#include <asm/apic.h>
|
||||
#include <asm/irq_remapping.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/microcode.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
|
||||
#include "trace.h"
|
||||
@ -9431,7 +9432,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
* being speculatively taken.
|
||||
*/
|
||||
if (vmx->spec_ctrl)
|
||||
wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
|
||||
native_wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
|
||||
|
||||
vmx->__launched = vmx->loaded_vmcs->launched;
|
||||
asm(
|
||||
@ -9566,11 +9567,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
* If the L02 MSR bitmap does not intercept the MSR, then we need to
|
||||
* save it.
|
||||
*/
|
||||
if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))
|
||||
rdmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
|
||||
if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
|
||||
vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
|
||||
|
||||
if (vmx->spec_ctrl)
|
||||
wrmsrl(MSR_IA32_SPEC_CTRL, 0);
|
||||
native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
|
||||
|
||||
/* Eliminate branch target predictions from guest mode */
|
||||
vmexit_fill_RSB();
|
||||
|
@ -7482,13 +7482,13 @@ EXPORT_SYMBOL_GPL(kvm_task_switch);
|
||||
|
||||
int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
|
||||
{
|
||||
if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG_BIT)) {
|
||||
if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG)) {
|
||||
/*
|
||||
* When EFER.LME and CR0.PG are set, the processor is in
|
||||
* 64-bit mode (though maybe in a 32-bit code segment).
|
||||
* CR4.PAE and EFER.LMA must be set.
|
||||
*/
|
||||
if (!(sregs->cr4 & X86_CR4_PAE_BIT)
|
||||
if (!(sregs->cr4 & X86_CR4_PAE)
|
||||
|| !(sregs->efer & EFER_LMA))
|
||||
return -EINVAL;
|
||||
} else {
|
||||
@ -7821,6 +7821,8 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
|
||||
|
||||
void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
|
||||
{
|
||||
kvm_lapic_reset(vcpu, init_event);
|
||||
|
||||
vcpu->arch.hflags = 0;
|
||||
|
||||
vcpu->arch.smi_pending = 0;
|
||||
@ -8249,10 +8251,8 @@ int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size)
|
||||
return r;
|
||||
}
|
||||
|
||||
if (!size) {
|
||||
r = vm_munmap(old.userspace_addr, old.npages * PAGE_SIZE);
|
||||
WARN_ON(r < 0);
|
||||
}
|
||||
if (!size)
|
||||
vm_munmap(old.userspace_addr, old.npages * PAGE_SIZE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -163,4 +163,10 @@ void __init setup_cpu_entry_areas(void)
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
setup_cpu_entry_area(cpu);
|
||||
|
||||
/*
|
||||
* This is the last essential update to swapper_pgdir which needs
|
||||
* to be synchronized to initial_page_table on 32bit.
|
||||
*/
|
||||
sync_initial_page_table();
|
||||
}
|
||||
|
@ -453,6 +453,21 @@ static inline void permanent_kmaps_init(pgd_t *pgd_base)
|
||||
}
|
||||
#endif /* CONFIG_HIGHMEM */
|
||||
|
||||
void __init sync_initial_page_table(void)
|
||||
{
|
||||
clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
|
||||
swapper_pg_dir + KERNEL_PGD_BOUNDARY,
|
||||
KERNEL_PGD_PTRS);
|
||||
|
||||
/*
|
||||
* sync back low identity map too. It is used for example
|
||||
* in the 32-bit EFI stub.
|
||||
*/
|
||||
clone_pgd_range(initial_page_table,
|
||||
swapper_pg_dir + KERNEL_PGD_BOUNDARY,
|
||||
min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
|
||||
}
|
||||
|
||||
void __init native_pagetable_init(void)
|
||||
{
|
||||
unsigned long pfn, va;
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include <linux/if_vlan.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/set_memory.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
#include <linux/bpf.h>
|
||||
|
||||
int bpf_jit_enable __read_mostly;
|
||||
@ -287,7 +288,7 @@ static void emit_bpf_tail_call(u8 **pprog)
|
||||
EMIT2(0x89, 0xD2); /* mov edx, edx */
|
||||
EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */
|
||||
offsetof(struct bpf_array, map.max_entries));
|
||||
#define OFFSET1 43 /* number of bytes to jump */
|
||||
#define OFFSET1 (41 + RETPOLINE_RAX_BPF_JIT_SIZE) /* number of bytes to jump */
|
||||
EMIT2(X86_JBE, OFFSET1); /* jbe out */
|
||||
label1 = cnt;
|
||||
|
||||
@ -296,7 +297,7 @@ static void emit_bpf_tail_call(u8 **pprog)
|
||||
*/
|
||||
EMIT2_off32(0x8B, 0x85, 36); /* mov eax, dword ptr [rbp + 36] */
|
||||
EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
|
||||
#define OFFSET2 32
|
||||
#define OFFSET2 (30 + RETPOLINE_RAX_BPF_JIT_SIZE)
|
||||
EMIT2(X86_JA, OFFSET2); /* ja out */
|
||||
label2 = cnt;
|
||||
EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
|
||||
@ -310,7 +311,7 @@ static void emit_bpf_tail_call(u8 **pprog)
|
||||
* goto out;
|
||||
*/
|
||||
EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */
|
||||
#define OFFSET3 10
|
||||
#define OFFSET3 (8 + RETPOLINE_RAX_BPF_JIT_SIZE)
|
||||
EMIT2(X86_JE, OFFSET3); /* je out */
|
||||
label3 = cnt;
|
||||
|
||||
@ -323,7 +324,7 @@ static void emit_bpf_tail_call(u8 **pprog)
|
||||
* rdi == ctx (1st arg)
|
||||
* rax == prog->bpf_func + prologue_size
|
||||
*/
|
||||
EMIT2(0xFF, 0xE0); /* jmp rax */
|
||||
RETPOLINE_RAX_BPF_JIT();
|
||||
|
||||
/* out: */
|
||||
BUILD_BUG_ON(cnt - label1 != OFFSET1);
|
||||
|
@ -79,7 +79,7 @@ static void intel_mid_power_off(void)
|
||||
|
||||
static void intel_mid_reboot(void)
|
||||
{
|
||||
intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
|
||||
intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 0);
|
||||
}
|
||||
|
||||
static unsigned long __init intel_mid_calibrate_tsc(void)
|
||||
|
@ -1,12 +1,15 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/types.h>
|
||||
#include <linux/tick.h>
|
||||
#include <linux/percpu-defs.h>
|
||||
|
||||
#include <xen/xen.h>
|
||||
#include <xen/interface/xen.h>
|
||||
#include <xen/grant_table.h>
|
||||
#include <xen/events.h>
|
||||
|
||||
#include <asm/cpufeatures.h>
|
||||
#include <asm/msr-index.h>
|
||||
#include <asm/xen/hypercall.h>
|
||||
#include <asm/xen/page.h>
|
||||
#include <asm/fixmap.h>
|
||||
@ -15,6 +18,8 @@
|
||||
#include "mmu.h"
|
||||
#include "pmu.h"
|
||||
|
||||
static DEFINE_PER_CPU(u64, spec_ctrl);
|
||||
|
||||
void xen_arch_pre_suspend(void)
|
||||
{
|
||||
if (xen_pv_domain())
|
||||
@ -31,6 +36,9 @@ void xen_arch_post_suspend(int cancelled)
|
||||
|
||||
static void xen_vcpu_notify_restore(void *data)
|
||||
{
|
||||
if (xen_pv_domain() && boot_cpu_has(X86_FEATURE_SPEC_CTRL))
|
||||
wrmsrl(MSR_IA32_SPEC_CTRL, this_cpu_read(spec_ctrl));
|
||||
|
||||
/* Boot processor notified via generic timekeeping_resume() */
|
||||
if (smp_processor_id() == 0)
|
||||
return;
|
||||
@ -40,7 +48,15 @@ static void xen_vcpu_notify_restore(void *data)
|
||||
|
||||
static void xen_vcpu_notify_suspend(void *data)
|
||||
{
|
||||
u64 tmp;
|
||||
|
||||
tick_suspend_local();
|
||||
|
||||
if (xen_pv_domain() && boot_cpu_has(X86_FEATURE_SPEC_CTRL)) {
|
||||
rdmsrl(MSR_IA32_SPEC_CTRL, tmp);
|
||||
this_cpu_write(spec_ctrl, tmp);
|
||||
wrmsrl(MSR_IA32_SPEC_CTRL, 0);
|
||||
}
|
||||
}
|
||||
|
||||
void xen_arch_resume(void)
|
||||
|
@ -2279,7 +2279,7 @@ blk_qc_t submit_bio(struct bio *bio)
|
||||
unsigned int count;
|
||||
|
||||
if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
|
||||
count = queue_logical_block_size(bio->bi_disk->queue);
|
||||
count = queue_logical_block_size(bio->bi_disk->queue) >> 9;
|
||||
else
|
||||
count = bio_sectors(bio);
|
||||
|
||||
|
@ -638,7 +638,6 @@ static void __blk_mq_requeue_request(struct request *rq)
|
||||
|
||||
trace_block_rq_requeue(q, rq);
|
||||
wbt_requeue(q->rq_wb, &rq->issue_stat);
|
||||
blk_mq_sched_requeue_request(rq);
|
||||
|
||||
if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
|
||||
if (q->dma_drain_size && blk_rq_bytes(rq))
|
||||
@ -650,6 +649,9 @@ void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list)
|
||||
{
|
||||
__blk_mq_requeue_request(rq);
|
||||
|
||||
/* this request will be re-inserted to io scheduler queue */
|
||||
blk_mq_sched_requeue_request(rq);
|
||||
|
||||
BUG_ON(blk_queued_rq(rq));
|
||||
blk_mq_add_to_requeue_list(rq, true, kick_requeue_list);
|
||||
}
|
||||
|
@ -814,6 +814,7 @@ static struct elevator_type kyber_sched = {
|
||||
.limit_depth = kyber_limit_depth,
|
||||
.prepare_request = kyber_prepare_request,
|
||||
.finish_request = kyber_finish_request,
|
||||
.requeue_request = kyber_finish_request,
|
||||
.completed_request = kyber_completed_request,
|
||||
.dispatch_request = kyber_dispatch_request,
|
||||
.has_work = kyber_has_work,
|
||||
|
@ -66,10 +66,37 @@ static int set_copy_dsdt(const struct dmi_system_id *id)
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
static int set_gbl_term_list(const struct dmi_system_id *id)
|
||||
{
|
||||
acpi_gbl_parse_table_as_term_list = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
|
||||
static const struct dmi_system_id acpi_quirks_dmi_table[] __initconst = {
|
||||
/*
|
||||
* Touchpad on Dell XPS 9570/Precision M5530 doesn't work under I2C
|
||||
* mode.
|
||||
* https://bugzilla.kernel.org/show_bug.cgi?id=198515
|
||||
*/
|
||||
{
|
||||
.callback = set_gbl_term_list,
|
||||
.ident = "Dell Precision M5530",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Precision M5530"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = set_gbl_term_list,
|
||||
.ident = "Dell XPS 15 9570",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "XPS 15 9570"),
|
||||
},
|
||||
},
|
||||
/*
|
||||
* Invoke DSDT corruption work-around on all Toshiba Satellite.
|
||||
* DSDT will be copied to memory.
|
||||
* https://bugzilla.kernel.org/show_bug.cgi?id=14679
|
||||
*/
|
||||
{
|
||||
@ -83,7 +110,7 @@ static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
|
||||
{}
|
||||
};
|
||||
#else
|
||||
static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
|
||||
static const struct dmi_system_id acpi_quirks_dmi_table[] __initconst = {
|
||||
{}
|
||||
};
|
||||
#endif
|
||||
@ -1001,11 +1028,8 @@ void __init acpi_early_init(void)
|
||||
|
||||
acpi_permanent_mmap = true;
|
||||
|
||||
/*
|
||||
* If the machine falls into the DMI check table,
|
||||
* DSDT will be copied to memory
|
||||
*/
|
||||
dmi_check_system(dsdt_dmi_table);
|
||||
/* Check machine-specific quirks */
|
||||
dmi_check_system(acpi_quirks_dmi_table);
|
||||
|
||||
status = acpi_reallocate_root_table();
|
||||
if (ACPI_FAILURE(status)) {
|
||||
|
@ -83,7 +83,8 @@ static ssize_t driver_override_store(struct device *_dev,
|
||||
struct amba_device *dev = to_amba_device(_dev);
|
||||
char *driver_override, *old = dev->driver_override, *cp;
|
||||
|
||||
if (count > PATH_MAX)
|
||||
/* We need to keep extra room for a newline */
|
||||
if (count >= (PAGE_SIZE - 1))
|
||||
return -EINVAL;
|
||||
|
||||
driver_override = kstrndup(buf, count, GFP_KERNEL);
|
||||
|
@ -21,6 +21,7 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/dmi.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/usb.h>
|
||||
#include <linux/usb/quirks.h>
|
||||
@ -381,6 +382,21 @@ static const struct usb_device_id blacklist_table[] = {
|
||||
{ } /* Terminating entry */
|
||||
};
|
||||
|
||||
/* The Bluetooth USB module build into some devices needs to be reset on resume,
|
||||
* this is a problem with the platform (likely shutting off all power) not with
|
||||
* the module itself. So we use a DMI list to match known broken platforms.
|
||||
*/
|
||||
static const struct dmi_system_id btusb_needs_reset_resume_table[] = {
|
||||
{
|
||||
/* Lenovo Yoga 920 (QCA Rome device 0cf3:e300) */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 920"),
|
||||
},
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
#define BTUSB_MAX_ISOC_FRAMES 10
|
||||
|
||||
#define BTUSB_INTR_RUNNING 0
|
||||
@ -3013,6 +3029,9 @@ static int btusb_probe(struct usb_interface *intf,
|
||||
hdev->send = btusb_send_frame;
|
||||
hdev->notify = btusb_notify;
|
||||
|
||||
if (dmi_check_system(btusb_needs_reset_resume_table))
|
||||
interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
err = btusb_config_oob_wake(hdev);
|
||||
if (err)
|
||||
@ -3099,12 +3118,6 @@ static int btusb_probe(struct usb_interface *intf,
|
||||
if (id->driver_info & BTUSB_QCA_ROME) {
|
||||
data->setup_on_usb = btusb_setup_qca;
|
||||
hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
|
||||
|
||||
/* QCA Rome devices lose their updated firmware over suspend,
|
||||
* but the USB hub doesn't notice any status change.
|
||||
* explicitly request a device reset on resume.
|
||||
*/
|
||||
interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BT_HCIBTUSB_RTL
|
||||
|
@ -457,7 +457,7 @@ static int st33zp24_recv(struct tpm_chip *chip, unsigned char *buf,
|
||||
size_t count)
|
||||
{
|
||||
int size = 0;
|
||||
int expected;
|
||||
u32 expected;
|
||||
|
||||
if (!chip)
|
||||
return -EBUSY;
|
||||
@ -474,7 +474,7 @@ static int st33zp24_recv(struct tpm_chip *chip, unsigned char *buf,
|
||||
}
|
||||
|
||||
expected = be32_to_cpu(*(__be32 *)(buf + 2));
|
||||
if (expected > count) {
|
||||
if (expected > count || expected < TPM_HEADER_SIZE) {
|
||||
size = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
@ -1228,6 +1228,10 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max)
|
||||
break;
|
||||
|
||||
recd = be32_to_cpu(tpm_cmd.params.getrandom_out.rng_data_len);
|
||||
if (recd > num_bytes) {
|
||||
total = -EFAULT;
|
||||
break;
|
||||
}
|
||||
|
||||
rlength = be32_to_cpu(tpm_cmd.header.out.length);
|
||||
if (rlength < offsetof(struct tpm_getrandom_out, rng_data) +
|
||||
|
@ -683,6 +683,10 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip,
|
||||
if (!rc) {
|
||||
data_len = be16_to_cpup(
|
||||
(__be16 *) &buf.data[TPM_HEADER_SIZE + 4]);
|
||||
if (data_len < MIN_KEY_SIZE || data_len > MAX_KEY_SIZE + 1) {
|
||||
rc = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
rlength = be32_to_cpu(((struct tpm2_cmd *)&buf)
|
||||
->header.out.length);
|
||||
|
@ -473,7 +473,8 @@ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
|
||||
static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count)
|
||||
{
|
||||
int size = 0;
|
||||
int expected, status;
|
||||
int status;
|
||||
u32 expected;
|
||||
|
||||
if (count < TPM_HEADER_SIZE) {
|
||||
size = -EIO;
|
||||
@ -488,7 +489,7 @@ static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count)
|
||||
}
|
||||
|
||||
expected = be32_to_cpu(*(__be32 *)(buf + 2));
|
||||
if ((size_t) expected > count) {
|
||||
if (((size_t) expected > count) || (expected < TPM_HEADER_SIZE)) {
|
||||
size = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
@ -281,7 +281,11 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
|
||||
struct device *dev = chip->dev.parent;
|
||||
struct i2c_client *client = to_i2c_client(dev);
|
||||
s32 rc;
|
||||
int expected, status, burst_count, retries, size = 0;
|
||||
int status;
|
||||
int burst_count;
|
||||
int retries;
|
||||
int size = 0;
|
||||
u32 expected;
|
||||
|
||||
if (count < TPM_HEADER_SIZE) {
|
||||
i2c_nuvoton_ready(chip); /* return to idle */
|
||||
@ -323,7 +327,7 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
|
||||
* to machine native
|
||||
*/
|
||||
expected = be32_to_cpu(*(__be32 *) (buf + 2));
|
||||
if (expected > count) {
|
||||
if (expected > count || expected < size) {
|
||||
dev_err(dev, "%s() expected > count\n", __func__);
|
||||
size = -EIO;
|
||||
continue;
|
||||
|
@ -223,7 +223,7 @@ static int tpm_tcg_read_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
|
||||
}
|
||||
|
||||
static int tpm_tcg_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
|
||||
u8 *value)
|
||||
const u8 *value)
|
||||
{
|
||||
struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);
|
||||
|
||||
|
@ -202,7 +202,8 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
|
||||
{
|
||||
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
|
||||
int size = 0;
|
||||
int expected, status;
|
||||
int status;
|
||||
u32 expected;
|
||||
|
||||
if (count < TPM_HEADER_SIZE) {
|
||||
size = -EIO;
|
||||
@ -217,7 +218,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
|
||||
}
|
||||
|
||||
expected = be32_to_cpu(*(__be32 *) (buf + 2));
|
||||
if (expected > count) {
|
||||
if (expected > count || expected < TPM_HEADER_SIZE) {
|
||||
size = -EIO;
|
||||
goto out;
|
||||
}
|
||||
@ -252,7 +253,7 @@ out:
|
||||
* tpm.c can skip polling for the data to be available as the interrupt is
|
||||
* waited for here
|
||||
*/
|
||||
static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len)
|
||||
static int tpm_tis_send_data(struct tpm_chip *chip, const u8 *buf, size_t len)
|
||||
{
|
||||
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
|
||||
int rc, status, burstcnt;
|
||||
@ -343,7 +344,7 @@ static void disable_interrupts(struct tpm_chip *chip)
|
||||
* tpm.c can skip polling for the data to be available as the interrupt is
|
||||
* waited for here
|
||||
*/
|
||||
static int tpm_tis_send_main(struct tpm_chip *chip, u8 *buf, size_t len)
|
||||
static int tpm_tis_send_main(struct tpm_chip *chip, const u8 *buf, size_t len)
|
||||
{
|
||||
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
|
||||
int rc;
|
||||
|
@ -98,7 +98,7 @@ struct tpm_tis_phy_ops {
|
||||
int (*read_bytes)(struct tpm_tis_data *data, u32 addr, u16 len,
|
||||
u8 *result);
|
||||
int (*write_bytes)(struct tpm_tis_data *data, u32 addr, u16 len,
|
||||
u8 *value);
|
||||
const u8 *value);
|
||||
int (*read16)(struct tpm_tis_data *data, u32 addr, u16 *result);
|
||||
int (*read32)(struct tpm_tis_data *data, u32 addr, u32 *result);
|
||||
int (*write32)(struct tpm_tis_data *data, u32 addr, u32 src);
|
||||
@ -128,7 +128,7 @@ static inline int tpm_tis_read32(struct tpm_tis_data *data, u32 addr,
|
||||
}
|
||||
|
||||
static inline int tpm_tis_write_bytes(struct tpm_tis_data *data, u32 addr,
|
||||
u16 len, u8 *value)
|
||||
u16 len, const u8 *value)
|
||||
{
|
||||
return data->phy_ops->write_bytes(data, addr, len, value);
|
||||
}
|
||||
|
@ -46,9 +46,7 @@
|
||||
struct tpm_tis_spi_phy {
|
||||
struct tpm_tis_data priv;
|
||||
struct spi_device *spi_device;
|
||||
|
||||
u8 tx_buf[4];
|
||||
u8 rx_buf[4];
|
||||
u8 *iobuf;
|
||||
};
|
||||
|
||||
static inline struct tpm_tis_spi_phy *to_tpm_tis_spi_phy(struct tpm_tis_data *data)
|
||||
@ -57,7 +55,7 @@ static inline struct tpm_tis_spi_phy *to_tpm_tis_spi_phy(struct tpm_tis_data *da
|
||||
}
|
||||
|
||||
static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
|
||||
u8 *buffer, u8 direction)
|
||||
u8 *in, const u8 *out)
|
||||
{
|
||||
struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
|
||||
int ret = 0;
|
||||
@ -71,14 +69,14 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
|
||||
while (len) {
|
||||
transfer_len = min_t(u16, len, MAX_SPI_FRAMESIZE);
|
||||
|
||||
phy->tx_buf[0] = direction | (transfer_len - 1);
|
||||
phy->tx_buf[1] = 0xd4;
|
||||
phy->tx_buf[2] = addr >> 8;
|
||||
phy->tx_buf[3] = addr;
|
||||
phy->iobuf[0] = (in ? 0x80 : 0) | (transfer_len - 1);
|
||||
phy->iobuf[1] = 0xd4;
|
||||
phy->iobuf[2] = addr >> 8;
|
||||
phy->iobuf[3] = addr;
|
||||
|
||||
memset(&spi_xfer, 0, sizeof(spi_xfer));
|
||||
spi_xfer.tx_buf = phy->tx_buf;
|
||||
spi_xfer.rx_buf = phy->rx_buf;
|
||||
spi_xfer.tx_buf = phy->iobuf;
|
||||
spi_xfer.rx_buf = phy->iobuf;
|
||||
spi_xfer.len = 4;
|
||||
spi_xfer.cs_change = 1;
|
||||
|
||||
@ -88,9 +86,9 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
|
||||
if (ret < 0)
|
||||
goto exit;
|
||||
|
||||
if ((phy->rx_buf[3] & 0x01) == 0) {
|
||||
if ((phy->iobuf[3] & 0x01) == 0) {
|
||||
// handle SPI wait states
|
||||
phy->tx_buf[0] = 0;
|
||||
phy->iobuf[0] = 0;
|
||||
|
||||
for (i = 0; i < TPM_RETRY; i++) {
|
||||
spi_xfer.len = 1;
|
||||
@ -99,7 +97,7 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
|
||||
ret = spi_sync_locked(phy->spi_device, &m);
|
||||
if (ret < 0)
|
||||
goto exit;
|
||||
if (phy->rx_buf[0] & 0x01)
|
||||
if (phy->iobuf[0] & 0x01)
|
||||
break;
|
||||
}
|
||||
|
||||
@ -113,12 +111,12 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
|
||||
spi_xfer.len = transfer_len;
|
||||
spi_xfer.delay_usecs = 5;
|
||||
|
||||
if (direction) {
|
||||
if (in) {
|
||||
spi_xfer.tx_buf = NULL;
|
||||
spi_xfer.rx_buf = buffer;
|
||||
} else {
|
||||
spi_xfer.tx_buf = buffer;
|
||||
} else if (out) {
|
||||
spi_xfer.rx_buf = NULL;
|
||||
memcpy(phy->iobuf, out, transfer_len);
|
||||
out += transfer_len;
|
||||
}
|
||||
|
||||
spi_message_init(&m);
|
||||
@ -127,8 +125,12 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
|
||||
if (ret < 0)
|
||||
goto exit;
|
||||
|
||||
if (in) {
|
||||
memcpy(in, phy->iobuf, transfer_len);
|
||||
in += transfer_len;
|
||||
}
|
||||
|
||||
len -= transfer_len;
|
||||
buffer += transfer_len;
|
||||
}
|
||||
|
||||
exit:
|
||||
@ -139,13 +141,13 @@ exit:
|
||||
static int tpm_tis_spi_read_bytes(struct tpm_tis_data *data, u32 addr,
|
||||
u16 len, u8 *result)
|
||||
{
|
||||
return tpm_tis_spi_transfer(data, addr, len, result, 0x80);
|
||||
return tpm_tis_spi_transfer(data, addr, len, result, NULL);
|
||||
}
|
||||
|
||||
static int tpm_tis_spi_write_bytes(struct tpm_tis_data *data, u32 addr,
|
||||
u16 len, u8 *value)
|
||||
u16 len, const u8 *value)
|
||||
{
|
||||
return tpm_tis_spi_transfer(data, addr, len, value, 0);
|
||||
return tpm_tis_spi_transfer(data, addr, len, NULL, value);
|
||||
}
|
||||
|
||||
static int tpm_tis_spi_read16(struct tpm_tis_data *data, u32 addr, u16 *result)
|
||||
@ -194,6 +196,10 @@ static int tpm_tis_spi_probe(struct spi_device *dev)
|
||||
|
||||
phy->spi_device = dev;
|
||||
|
||||
phy->iobuf = devm_kmalloc(&dev->dev, MAX_SPI_FRAMESIZE, GFP_KERNEL);
|
||||
if (!phy->iobuf)
|
||||
return -ENOMEM;
|
||||
|
||||
return tpm_tis_core_init(&dev->dev, &phy->priv, -1, &tpm_spi_phy_ops,
|
||||
NULL);
|
||||
}
|
||||
|
@ -351,7 +351,13 @@ struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name)
|
||||
static int s3c_cpufreq_init(struct cpufreq_policy *policy)
|
||||
{
|
||||
policy->clk = clk_arm;
|
||||
return cpufreq_generic_init(policy, ftab, cpu_cur.info->latency);
|
||||
|
||||
policy->cpuinfo.transition_latency = cpu_cur.info->latency;
|
||||
|
||||
if (ftab)
|
||||
return cpufreq_table_validate_and_show(policy, ftab);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init s3c_cpufreq_initclks(void)
|
||||
|
@ -279,7 +279,7 @@ static const u32 correrrthrsld[] = {
|
||||
* sbridge structs
|
||||
*/
|
||||
|
||||
#define NUM_CHANNELS 4 /* Max channels per MC */
|
||||
#define NUM_CHANNELS 6 /* Max channels per MC */
|
||||
#define MAX_DIMMS 3 /* Max DIMMS per channel */
|
||||
#define KNL_MAX_CHAS 38 /* KNL max num. of Cache Home Agents */
|
||||
#define KNL_MAX_CHANNELS 6 /* KNL max num. of PCI channels */
|
||||
|
@ -8522,6 +8522,10 @@ static int remove_and_add_spares(struct mddev *mddev,
|
||||
int removed = 0;
|
||||
bool remove_some = false;
|
||||
|
||||
if (this && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
|
||||
/* Mustn't remove devices when resync thread is running */
|
||||
return 0;
|
||||
|
||||
rdev_for_each(rdev, mddev) {
|
||||
if ((this == NULL || rdev == this) &&
|
||||
rdev->raid_disk >= 0 &&
|
||||
|
@ -1262,11 +1262,12 @@ static int m88ds3103_select(struct i2c_mux_core *muxc, u32 chan)
|
||||
* New users must use I2C client binding directly!
|
||||
*/
|
||||
struct dvb_frontend *m88ds3103_attach(const struct m88ds3103_config *cfg,
|
||||
struct i2c_adapter *i2c, struct i2c_adapter **tuner_i2c_adapter)
|
||||
struct i2c_adapter *i2c,
|
||||
struct i2c_adapter **tuner_i2c_adapter)
|
||||
{
|
||||
struct i2c_client *client;
|
||||
struct i2c_board_info board_info;
|
||||
struct m88ds3103_platform_data pdata;
|
||||
struct m88ds3103_platform_data pdata = {};
|
||||
|
||||
pdata.clk = cfg->clock;
|
||||
pdata.i2c_wr_max = cfg->i2c_wr_max;
|
||||
@ -1409,6 +1410,8 @@ static int m88ds3103_probe(struct i2c_client *client,
|
||||
case M88DS3103_CHIP_ID:
|
||||
break;
|
||||
default:
|
||||
ret = -ENODEV;
|
||||
dev_err(&client->dev, "Unknown device. Chip_id=%02x\n", dev->chip_id);
|
||||
goto err_kfree;
|
||||
}
|
||||
|
||||
|
@ -129,7 +129,7 @@ static void get_full_task_comm(struct task_entry *task_entry,
|
||||
struct mm_struct *mm = task->mm;
|
||||
|
||||
/* fill the first TASK_COMM_LEN bytes with thread name */
|
||||
get_task_comm(task_entry->comm, task);
|
||||
__get_task_comm(task_entry->comm, TASK_COMM_LEN, task);
|
||||
i = strlen(task_entry->comm);
|
||||
while (i < TASK_COMM_LEN)
|
||||
task_entry->comm[i++] = ' ';
|
||||
|
@ -487,6 +487,7 @@ static unsigned long exynos_dwmmc_caps[4] = {
|
||||
|
||||
static const struct dw_mci_drv_data exynos_drv_data = {
|
||||
.caps = exynos_dwmmc_caps,
|
||||
.num_caps = ARRAY_SIZE(exynos_dwmmc_caps),
|
||||
.init = dw_mci_exynos_priv_init,
|
||||
.set_ios = dw_mci_exynos_set_ios,
|
||||
.parse_dt = dw_mci_exynos_parse_dt,
|
||||
|
@ -135,6 +135,9 @@ static int dw_mci_hi6220_parse_dt(struct dw_mci *host)
|
||||
if (priv->ctrl_id < 0)
|
||||
priv->ctrl_id = 0;
|
||||
|
||||
if (priv->ctrl_id >= TIMING_MODE)
|
||||
return -EINVAL;
|
||||
|
||||
host->priv = priv;
|
||||
return 0;
|
||||
}
|
||||
@ -207,6 +210,7 @@ static int dw_mci_hi6220_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
|
||||
|
||||
static const struct dw_mci_drv_data hi6220_data = {
|
||||
.caps = dw_mci_hi6220_caps,
|
||||
.num_caps = ARRAY_SIZE(dw_mci_hi6220_caps),
|
||||
.switch_voltage = dw_mci_hi6220_switch_voltage,
|
||||
.set_ios = dw_mci_hi6220_set_ios,
|
||||
.parse_dt = dw_mci_hi6220_parse_dt,
|
||||
|
@ -319,6 +319,7 @@ static const struct dw_mci_drv_data rk2928_drv_data = {
|
||||
|
||||
static const struct dw_mci_drv_data rk3288_drv_data = {
|
||||
.caps = dw_mci_rk3288_dwmmc_caps,
|
||||
.num_caps = ARRAY_SIZE(dw_mci_rk3288_dwmmc_caps),
|
||||
.set_ios = dw_mci_rk3288_set_ios,
|
||||
.execute_tuning = dw_mci_rk3288_execute_tuning,
|
||||
.parse_dt = dw_mci_rk3288_parse_dt,
|
||||
|
@ -195,6 +195,7 @@ static unsigned long zx_dwmmc_caps[3] = {
|
||||
|
||||
static const struct dw_mci_drv_data zx_drv_data = {
|
||||
.caps = zx_dwmmc_caps,
|
||||
.num_caps = ARRAY_SIZE(zx_dwmmc_caps),
|
||||
.execute_tuning = dw_mci_zx_execute_tuning,
|
||||
.prepare_hs400_tuning = dw_mci_zx_prepare_hs400_tuning,
|
||||
.parse_dt = dw_mci_zx_parse_dt,
|
||||
|
@ -165,6 +165,8 @@ static int dw_mci_regs_show(struct seq_file *s, void *v)
|
||||
{
|
||||
struct dw_mci *host = s->private;
|
||||
|
||||
pm_runtime_get_sync(host->dev);
|
||||
|
||||
seq_printf(s, "STATUS:\t0x%08x\n", mci_readl(host, STATUS));
|
||||
seq_printf(s, "RINTSTS:\t0x%08x\n", mci_readl(host, RINTSTS));
|
||||
seq_printf(s, "CMD:\t0x%08x\n", mci_readl(host, CMD));
|
||||
@ -172,6 +174,8 @@ static int dw_mci_regs_show(struct seq_file *s, void *v)
|
||||
seq_printf(s, "INTMASK:\t0x%08x\n", mci_readl(host, INTMASK));
|
||||
seq_printf(s, "CLKENA:\t0x%08x\n", mci_readl(host, CLKENA));
|
||||
|
||||
pm_runtime_put_autosuspend(host->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2758,12 +2762,57 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static int dw_mci_init_slot_caps(struct dw_mci_slot *slot)
|
||||
{
|
||||
struct dw_mci *host = slot->host;
|
||||
const struct dw_mci_drv_data *drv_data = host->drv_data;
|
||||
struct mmc_host *mmc = slot->mmc;
|
||||
int ctrl_id;
|
||||
|
||||
if (host->pdata->caps)
|
||||
mmc->caps = host->pdata->caps;
|
||||
|
||||
/*
|
||||
* Support MMC_CAP_ERASE by default.
|
||||
* It needs to use trim/discard/erase commands.
|
||||
*/
|
||||
mmc->caps |= MMC_CAP_ERASE;
|
||||
|
||||
if (host->pdata->pm_caps)
|
||||
mmc->pm_caps = host->pdata->pm_caps;
|
||||
|
||||
if (host->dev->of_node) {
|
||||
ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
|
||||
if (ctrl_id < 0)
|
||||
ctrl_id = 0;
|
||||
} else {
|
||||
ctrl_id = to_platform_device(host->dev)->id;
|
||||
}
|
||||
|
||||
if (drv_data && drv_data->caps) {
|
||||
if (ctrl_id >= drv_data->num_caps) {
|
||||
dev_err(host->dev, "invalid controller id %d\n",
|
||||
ctrl_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
mmc->caps |= drv_data->caps[ctrl_id];
|
||||
}
|
||||
|
||||
if (host->pdata->caps2)
|
||||
mmc->caps2 = host->pdata->caps2;
|
||||
|
||||
/* Process SDIO IRQs through the sdio_irq_work. */
|
||||
if (mmc->caps & MMC_CAP_SDIO_IRQ)
|
||||
mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dw_mci_init_slot(struct dw_mci *host)
|
||||
{
|
||||
struct mmc_host *mmc;
|
||||
struct dw_mci_slot *slot;
|
||||
const struct dw_mci_drv_data *drv_data = host->drv_data;
|
||||
int ctrl_id, ret;
|
||||
int ret;
|
||||
u32 freq[2];
|
||||
|
||||
mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
|
||||
@ -2797,38 +2846,13 @@ static int dw_mci_init_slot(struct dw_mci *host)
|
||||
if (!mmc->ocr_avail)
|
||||
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
|
||||
|
||||
if (host->pdata->caps)
|
||||
mmc->caps = host->pdata->caps;
|
||||
|
||||
/*
|
||||
* Support MMC_CAP_ERASE by default.
|
||||
* It needs to use trim/discard/erase commands.
|
||||
*/
|
||||
mmc->caps |= MMC_CAP_ERASE;
|
||||
|
||||
if (host->pdata->pm_caps)
|
||||
mmc->pm_caps = host->pdata->pm_caps;
|
||||
|
||||
if (host->dev->of_node) {
|
||||
ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
|
||||
if (ctrl_id < 0)
|
||||
ctrl_id = 0;
|
||||
} else {
|
||||
ctrl_id = to_platform_device(host->dev)->id;
|
||||
}
|
||||
if (drv_data && drv_data->caps)
|
||||
mmc->caps |= drv_data->caps[ctrl_id];
|
||||
|
||||
if (host->pdata->caps2)
|
||||
mmc->caps2 = host->pdata->caps2;
|
||||
|
||||
ret = mmc_of_parse(mmc);
|
||||
if (ret)
|
||||
goto err_host_allocated;
|
||||
|
||||
/* Process SDIO IRQs through the sdio_irq_work. */
|
||||
if (mmc->caps & MMC_CAP_SDIO_IRQ)
|
||||
mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
|
||||
ret = dw_mci_init_slot_caps(slot);
|
||||
if (ret)
|
||||
goto err_host_allocated;
|
||||
|
||||
/* Useful defaults if platform data is unset. */
|
||||
if (host->use_dma == TRANS_MODE_IDMAC) {
|
||||
|
@ -542,6 +542,7 @@ struct dw_mci_slot {
|
||||
/**
|
||||
* dw_mci driver data - dw-mshc implementation specific driver data.
|
||||
* @caps: mmc subsystem specified capabilities of the controller(s).
|
||||
* @num_caps: number of capabilities specified by @caps.
|
||||
* @init: early implementation specific initialization.
|
||||
* @set_ios: handle bus specific extensions.
|
||||
* @parse_dt: parse implementation specific device tree properties.
|
||||
@ -553,6 +554,7 @@ struct dw_mci_slot {
|
||||
*/
|
||||
struct dw_mci_drv_data {
|
||||
unsigned long *caps;
|
||||
u32 num_caps;
|
||||
int (*init)(struct dw_mci *host);
|
||||
void (*set_ios)(struct dw_mci *host, struct mmc_ios *ios);
|
||||
int (*parse_dt)(struct dw_mci *host);
|
||||
|
@ -594,9 +594,36 @@ static void byt_read_dsm(struct sdhci_pci_slot *slot)
|
||||
slot->chip->rpm_retune = intel_host->d3_retune;
|
||||
}
|
||||
|
||||
static int intel_execute_tuning(struct mmc_host *mmc, u32 opcode)
|
||||
{
|
||||
int err = sdhci_execute_tuning(mmc, opcode);
|
||||
struct sdhci_host *host = mmc_priv(mmc);
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/*
|
||||
* Tuning can leave the IP in an active state (Buffer Read Enable bit
|
||||
* set) which prevents the entry to low power states (i.e. S0i3). Data
|
||||
* reset will clear it.
|
||||
*/
|
||||
sdhci_reset(host, SDHCI_RESET_DATA);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void byt_probe_slot(struct sdhci_pci_slot *slot)
|
||||
{
|
||||
struct mmc_host_ops *ops = &slot->host->mmc_host_ops;
|
||||
|
||||
byt_read_dsm(slot);
|
||||
|
||||
ops->execute_tuning = intel_execute_tuning;
|
||||
}
|
||||
|
||||
static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
|
||||
{
|
||||
byt_read_dsm(slot);
|
||||
byt_probe_slot(slot);
|
||||
slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
|
||||
MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR |
|
||||
MMC_CAP_CMD_DURING_TFR |
|
||||
@ -651,7 +678,7 @@ static int ni_byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
|
||||
{
|
||||
int err;
|
||||
|
||||
byt_read_dsm(slot);
|
||||
byt_probe_slot(slot);
|
||||
|
||||
err = ni_set_max_freq(slot);
|
||||
if (err)
|
||||
@ -664,7 +691,7 @@ static int ni_byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
|
||||
|
||||
static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
|
||||
{
|
||||
byt_read_dsm(slot);
|
||||
byt_probe_slot(slot);
|
||||
slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE |
|
||||
MMC_CAP_WAIT_WHILE_BUSY;
|
||||
return 0;
|
||||
@ -672,7 +699,7 @@ static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
|
||||
|
||||
static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
|
||||
{
|
||||
byt_read_dsm(slot);
|
||||
byt_probe_slot(slot);
|
||||
slot->host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY |
|
||||
MMC_CAP_AGGRESSIVE_PM | MMC_CAP_CD_WAKE;
|
||||
slot->cd_idx = 0;
|
||||
|
@ -595,7 +595,7 @@ isr_done:
|
||||
|
||||
reissue_mask = 1 << 0;
|
||||
if (!pdata->per_channel_irq)
|
||||
reissue_mask |= 0xffff < 4;
|
||||
reissue_mask |= 0xffff << 4;
|
||||
|
||||
XP_IOWRITE(pdata, XP_INT_REISSUE_EN, reissue_mask);
|
||||
}
|
||||
|
@ -426,6 +426,8 @@ static int xgbe_pci_resume(struct pci_dev *pdev)
|
||||
struct net_device *netdev = pdata->netdev;
|
||||
int ret = 0;
|
||||
|
||||
XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff);
|
||||
|
||||
pdata->lpm_ctrl &= ~MDIO_CTRL1_LPOWER;
|
||||
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
|
||||
|
||||
|
@ -1877,6 +1877,14 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
|
||||
ixgbe_rx_pg_size(rx_ring),
|
||||
DMA_FROM_DEVICE,
|
||||
IXGBE_RX_DMA_ATTR);
|
||||
} else if (ring_uses_build_skb(rx_ring)) {
|
||||
unsigned long offset = (unsigned long)(skb->data) & ~PAGE_MASK;
|
||||
|
||||
dma_sync_single_range_for_cpu(rx_ring->dev,
|
||||
IXGBE_CB(skb)->dma,
|
||||
offset,
|
||||
skb_headlen(skb),
|
||||
DMA_FROM_DEVICE);
|
||||
} else {
|
||||
struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
|
||||
|
||||
|
@ -1918,13 +1918,16 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
|
||||
param->wq.linear = 1;
|
||||
}
|
||||
|
||||
static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param)
|
||||
static void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev,
|
||||
struct mlx5e_rq_param *param)
|
||||
{
|
||||
void *rqc = param->rqc;
|
||||
void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
|
||||
|
||||
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
|
||||
MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
|
||||
|
||||
param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
|
||||
}
|
||||
|
||||
static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
|
||||
@ -2778,6 +2781,9 @@ static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev,
|
||||
struct mlx5e_cq *cq,
|
||||
struct mlx5e_cq_param *param)
|
||||
{
|
||||
param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
|
||||
param->wq.db_numa_node = dev_to_node(&mdev->pdev->dev);
|
||||
|
||||
return mlx5e_alloc_cq_common(mdev, param, cq);
|
||||
}
|
||||
|
||||
@ -2789,7 +2795,7 @@ static int mlx5e_open_drop_rq(struct mlx5_core_dev *mdev,
|
||||
struct mlx5e_cq *cq = &drop_rq->cq;
|
||||
int err;
|
||||
|
||||
mlx5e_build_drop_rq_param(&rq_param);
|
||||
mlx5e_build_drop_rq_param(mdev, &rq_param);
|
||||
|
||||
err = mlx5e_alloc_drop_cq(mdev, cq, &cq_param);
|
||||
if (err)
|
||||
|
@ -36,6 +36,7 @@
|
||||
#include <linux/tcp.h>
|
||||
#include <linux/bpf_trace.h>
|
||||
#include <net/busy_poll.h>
|
||||
#include <net/ip6_checksum.h>
|
||||
#include "en.h"
|
||||
#include "en_tc.h"
|
||||
#include "eswitch.h"
|
||||
@ -546,20 +547,33 @@ bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
|
||||
return true;
|
||||
}
|
||||
|
||||
static void mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 *cqe, struct tcphdr *tcp)
|
||||
{
|
||||
u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
|
||||
u8 tcp_ack = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) ||
|
||||
(l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA);
|
||||
|
||||
tcp->check = 0;
|
||||
tcp->psh = get_cqe_lro_tcppsh(cqe);
|
||||
|
||||
if (tcp_ack) {
|
||||
tcp->ack = 1;
|
||||
tcp->ack_seq = cqe->lro_ack_seq_num;
|
||||
tcp->window = cqe->lro_tcp_win;
|
||||
}
|
||||
}
|
||||
|
||||
static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
|
||||
u32 cqe_bcnt)
|
||||
{
|
||||
struct ethhdr *eth = (struct ethhdr *)(skb->data);
|
||||
struct tcphdr *tcp;
|
||||
int network_depth = 0;
|
||||
__wsum check;
|
||||
__be16 proto;
|
||||
u16 tot_len;
|
||||
void *ip_p;
|
||||
|
||||
u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
|
||||
u8 tcp_ack = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) ||
|
||||
(l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA);
|
||||
|
||||
skb->mac_len = ETH_HLEN;
|
||||
proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);
|
||||
|
||||
@ -577,23 +591,30 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
|
||||
ipv4->check = 0;
|
||||
ipv4->check = ip_fast_csum((unsigned char *)ipv4,
|
||||
ipv4->ihl);
|
||||
|
||||
mlx5e_lro_update_tcp_hdr(cqe, tcp);
|
||||
check = csum_partial(tcp, tcp->doff * 4,
|
||||
csum_unfold((__force __sum16)cqe->check_sum));
|
||||
/* Almost done, don't forget the pseudo header */
|
||||
tcp->check = csum_tcpudp_magic(ipv4->saddr, ipv4->daddr,
|
||||
tot_len - sizeof(struct iphdr),
|
||||
IPPROTO_TCP, check);
|
||||
} else {
|
||||
u16 payload_len = tot_len - sizeof(struct ipv6hdr);
|
||||
struct ipv6hdr *ipv6 = ip_p;
|
||||
|
||||
tcp = ip_p + sizeof(struct ipv6hdr);
|
||||
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
|
||||
|
||||
ipv6->hop_limit = cqe->lro_min_ttl;
|
||||
ipv6->payload_len = cpu_to_be16(tot_len -
|
||||
sizeof(struct ipv6hdr));
|
||||
}
|
||||
ipv6->payload_len = cpu_to_be16(payload_len);
|
||||
|
||||
tcp->psh = get_cqe_lro_tcppsh(cqe);
|
||||
|
||||
if (tcp_ack) {
|
||||
tcp->ack = 1;
|
||||
tcp->ack_seq = cqe->lro_ack_seq_num;
|
||||
tcp->window = cqe->lro_tcp_win;
|
||||
mlx5e_lro_update_tcp_hdr(cqe, tcp);
|
||||
check = csum_partial(tcp, tcp->doff * 4,
|
||||
csum_unfold((__force __sum16)cqe->check_sum));
|
||||
/* Almost done, don't forget the pseudo header */
|
||||
tcp->check = csum_ipv6_magic(&ipv6->saddr, &ipv6->daddr, payload_len,
|
||||
IPPROTO_TCP, check);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -216,7 +216,8 @@ mlx5e_test_loopback_validate(struct sk_buff *skb,
|
||||
if (iph->protocol != IPPROTO_UDP)
|
||||
goto out;
|
||||
|
||||
udph = udp_hdr(skb);
|
||||
/* Don't assume skb_transport_header() was set */
|
||||
udph = (struct udphdr *)((u8 *)iph + 4 * iph->ihl);
|
||||
if (udph->dest != htons(9))
|
||||
goto out;
|
||||
|
||||
|
@ -155,7 +155,7 @@ static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
|
||||
default:
|
||||
hlen = mlx5e_skb_l2_header_offset(skb);
|
||||
}
|
||||
return min_t(u16, hlen, skb->len);
|
||||
return min_t(u16, hlen, skb_headlen(skb));
|
||||
}
|
||||
|
||||
static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data,
|
||||
|
@ -729,26 +729,29 @@ static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
|
||||
static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
|
||||
u32 tb_id)
|
||||
{
|
||||
struct mlxsw_sp_fib *fib4;
|
||||
struct mlxsw_sp_fib *fib6;
|
||||
struct mlxsw_sp_vr *vr;
|
||||
int err;
|
||||
|
||||
vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
|
||||
if (!vr)
|
||||
return ERR_PTR(-EBUSY);
|
||||
vr->fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4);
|
||||
if (IS_ERR(vr->fib4))
|
||||
return ERR_CAST(vr->fib4);
|
||||
vr->fib6 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV6);
|
||||
if (IS_ERR(vr->fib6)) {
|
||||
err = PTR_ERR(vr->fib6);
|
||||
fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4);
|
||||
if (IS_ERR(fib4))
|
||||
return ERR_CAST(fib4);
|
||||
fib6 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV6);
|
||||
if (IS_ERR(fib6)) {
|
||||
err = PTR_ERR(fib6);
|
||||
goto err_fib6_create;
|
||||
}
|
||||
vr->fib4 = fib4;
|
||||
vr->fib6 = fib6;
|
||||
vr->tb_id = tb_id;
|
||||
return vr;
|
||||
|
||||
err_fib6_create:
|
||||
mlxsw_sp_fib_destroy(vr->fib4);
|
||||
vr->fib4 = NULL;
|
||||
mlxsw_sp_fib_destroy(fib4);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
@ -3029,6 +3032,9 @@ mlxsw_sp_fib4_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
|
||||
struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
|
||||
int i;
|
||||
|
||||
if (!list_is_singular(&nh_grp->fib_list))
|
||||
return;
|
||||
|
||||
for (i = 0; i < nh_grp->count; i++) {
|
||||
struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
|
||||
|
||||
|
@ -1098,6 +1098,7 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
|
||||
bool dynamic)
|
||||
{
|
||||
char *sfd_pl;
|
||||
u8 num_rec;
|
||||
int err;
|
||||
|
||||
sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
|
||||
@ -1107,9 +1108,16 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
|
||||
mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
|
||||
mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
|
||||
mac, fid, action, local_port);
|
||||
num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
|
||||
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
|
||||
kfree(sfd_pl);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
|
||||
err = -EBUSY;
|
||||
|
||||
out:
|
||||
kfree(sfd_pl);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -1134,6 +1142,7 @@ static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
|
||||
bool adding, bool dynamic)
|
||||
{
|
||||
char *sfd_pl;
|
||||
u8 num_rec;
|
||||
int err;
|
||||
|
||||
sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
|
||||
@ -1144,9 +1153,16 @@ static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
|
||||
mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
|
||||
mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
|
||||
lag_vid, lag_id);
|
||||
num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
|
||||
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
|
||||
kfree(sfd_pl);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
|
||||
err = -EBUSY;
|
||||
|
||||
out:
|
||||
kfree(sfd_pl);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -1191,6 +1207,7 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
|
||||
u16 fid, u16 mid, bool adding)
|
||||
{
|
||||
char *sfd_pl;
|
||||
u8 num_rec;
|
||||
int err;
|
||||
|
||||
sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
|
||||
@ -1200,7 +1217,15 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
|
||||
mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
|
||||
mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
|
||||
MLXSW_REG_SFD_REC_ACTION_NOP, mid);
|
||||
num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
|
||||
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
|
||||
err = -EBUSY;
|
||||
|
||||
out:
|
||||
kfree(sfd_pl);
|
||||
return err;
|
||||
}
|
||||
|
@ -1618,6 +1618,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
|
||||
q_idx = q_idx % cpsw->tx_ch_num;
|
||||
|
||||
txch = cpsw->txv[q_idx].ch;
|
||||
txq = netdev_get_tx_queue(ndev, q_idx);
|
||||
ret = cpsw_tx_packet_submit(priv, skb, txch);
|
||||
if (unlikely(ret != 0)) {
|
||||
cpsw_err(priv, tx_err, "desc submit failed\n");
|
||||
@ -1628,15 +1629,26 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
|
||||
* tell the kernel to stop sending us tx frames.
|
||||
*/
|
||||
if (unlikely(!cpdma_check_free_tx_desc(txch))) {
|
||||
txq = netdev_get_tx_queue(ndev, q_idx);
|
||||
netif_tx_stop_queue(txq);
|
||||
|
||||
/* Barrier, so that stop_queue visible to other cpus */
|
||||
smp_mb__after_atomic();
|
||||
|
||||
if (cpdma_check_free_tx_desc(txch))
|
||||
netif_tx_wake_queue(txq);
|
||||
}
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
fail:
|
||||
ndev->stats.tx_dropped++;
|
||||
txq = netdev_get_tx_queue(ndev, skb_get_queue_mapping(skb));
|
||||
netif_tx_stop_queue(txq);
|
||||
|
||||
/* Barrier, so that stop_queue visible to other cpus */
|
||||
smp_mb__after_atomic();
|
||||
|
||||
if (cpdma_check_free_tx_desc(txch))
|
||||
netif_tx_wake_queue(txq);
|
||||
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
|
@ -842,7 +842,7 @@ void phy_start(struct phy_device *phydev)
|
||||
break;
|
||||
case PHY_HALTED:
|
||||
/* make sure interrupts are re-enabled for the PHY */
|
||||
if (phydev->irq != PHY_POLL) {
|
||||
if (phy_interrupt_is_valid(phydev)) {
|
||||
err = phy_enable_interrupts(phydev);
|
||||
if (err < 0)
|
||||
break;
|
||||
|
@ -3158,6 +3158,15 @@ ppp_connect_channel(struct channel *pch, int unit)
|
||||
goto outl;
|
||||
|
||||
ppp_lock(ppp);
|
||||
spin_lock_bh(&pch->downl);
|
||||
if (!pch->chan) {
|
||||
/* Don't connect unregistered channels */
|
||||
spin_unlock_bh(&pch->downl);
|
||||
ppp_unlock(ppp);
|
||||
ret = -ENOTCONN;
|
||||
goto outl;
|
||||
}
|
||||
spin_unlock_bh(&pch->downl);
|
||||
if (pch->file.hdrlen > ppp->file.hdrlen)
|
||||
ppp->file.hdrlen = pch->file.hdrlen;
|
||||
hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */
|
||||
|
@ -1315,6 +1315,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
|
||||
else
|
||||
*skb_xdp = 0;
|
||||
|
||||
preempt_disable();
|
||||
rcu_read_lock();
|
||||
xdp_prog = rcu_dereference(tun->xdp_prog);
|
||||
if (xdp_prog && !*skb_xdp) {
|
||||
@ -1333,9 +1334,11 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
|
||||
get_page(alloc_frag->page);
|
||||
alloc_frag->offset += buflen;
|
||||
err = xdp_do_redirect(tun->dev, &xdp, xdp_prog);
|
||||
xdp_do_flush_map();
|
||||
if (err)
|
||||
goto err_redirect;
|
||||
rcu_read_unlock();
|
||||
preempt_enable();
|
||||
return NULL;
|
||||
case XDP_TX:
|
||||
xdp_xmit = true;
|
||||
@ -1357,6 +1360,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
|
||||
skb = build_skb(buf, buflen);
|
||||
if (!skb) {
|
||||
rcu_read_unlock();
|
||||
preempt_enable();
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
@ -1369,10 +1373,12 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
|
||||
skb->dev = tun->dev;
|
||||
generic_xdp_tx(skb, xdp_prog);
|
||||
rcu_read_unlock();
|
||||
preempt_enable();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
preempt_enable();
|
||||
|
||||
return skb;
|
||||
|
||||
@ -1380,6 +1386,7 @@ err_redirect:
|
||||
put_page(alloc_frag->page);
|
||||
err_xdp:
|
||||
rcu_read_unlock();
|
||||
preempt_enable();
|
||||
this_cpu_inc(tun->pcpu_stats->rx_dropped);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -1995,8 +1995,9 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
|
||||
}
|
||||
|
||||
/* Make sure NAPI is not using any XDP TX queues for RX. */
|
||||
for (i = 0; i < vi->max_queue_pairs; i++)
|
||||
napi_disable(&vi->rq[i].napi);
|
||||
if (netif_running(dev))
|
||||
for (i = 0; i < vi->max_queue_pairs; i++)
|
||||
napi_disable(&vi->rq[i].napi);
|
||||
|
||||
netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
|
||||
err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
|
||||
@ -2015,7 +2016,8 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
|
||||
}
|
||||
if (old_prog)
|
||||
bpf_prog_put(old_prog);
|
||||
virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
|
||||
if (netif_running(dev))
|
||||
virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -574,7 +574,10 @@ static void ppp_timer(unsigned long arg)
|
||||
ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0,
|
||||
0, NULL);
|
||||
proto->restart_counter--;
|
||||
} else
|
||||
} else if (netif_carrier_ok(proto->dev))
|
||||
ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0,
|
||||
0, NULL);
|
||||
else
|
||||
ppp_cp_event(proto->dev, proto->pid, TO_BAD, 0, 0,
|
||||
0, NULL);
|
||||
break;
|
||||
|
@ -88,7 +88,6 @@ enum nvme_rdma_queue_flags {
|
||||
|
||||
struct nvme_rdma_queue {
|
||||
struct nvme_rdma_qe *rsp_ring;
|
||||
atomic_t sig_count;
|
||||
int queue_size;
|
||||
size_t cmnd_capsule_len;
|
||||
struct nvme_rdma_ctrl *ctrl;
|
||||
@ -521,7 +520,6 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl,
|
||||
queue->cmnd_capsule_len = sizeof(struct nvme_command);
|
||||
|
||||
queue->queue_size = queue_size;
|
||||
atomic_set(&queue->sig_count, 0);
|
||||
|
||||
queue->cm_id = rdma_create_id(&init_net, nvme_rdma_cm_handler, queue,
|
||||
RDMA_PS_TCP, IB_QPT_RC);
|
||||
@ -1232,21 +1230,9 @@ static void nvme_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
|
||||
nvme_end_request(rq, req->status, req->result);
|
||||
}
|
||||
|
||||
/*
|
||||
* We want to signal completion at least every queue depth/2. This returns the
|
||||
* largest power of two that is not above half of (queue size + 1) to optimize
|
||||
* (avoid divisions).
|
||||
*/
|
||||
static inline bool nvme_rdma_queue_sig_limit(struct nvme_rdma_queue *queue)
|
||||
{
|
||||
int limit = 1 << ilog2((queue->queue_size + 1) / 2);
|
||||
|
||||
return (atomic_inc_return(&queue->sig_count) & (limit - 1)) == 0;
|
||||
}
|
||||
|
||||
static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
|
||||
struct nvme_rdma_qe *qe, struct ib_sge *sge, u32 num_sge,
|
||||
struct ib_send_wr *first, bool flush)
|
||||
struct ib_send_wr *first)
|
||||
{
|
||||
struct ib_send_wr wr, *bad_wr;
|
||||
int ret;
|
||||
@ -1255,31 +1241,12 @@ static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
|
||||
sge->length = sizeof(struct nvme_command),
|
||||
sge->lkey = queue->device->pd->local_dma_lkey;
|
||||
|
||||
qe->cqe.done = nvme_rdma_send_done;
|
||||
|
||||
wr.next = NULL;
|
||||
wr.wr_cqe = &qe->cqe;
|
||||
wr.sg_list = sge;
|
||||
wr.num_sge = num_sge;
|
||||
wr.opcode = IB_WR_SEND;
|
||||
wr.send_flags = 0;
|
||||
|
||||
/*
|
||||
* Unsignalled send completions are another giant desaster in the
|
||||
* IB Verbs spec: If we don't regularly post signalled sends
|
||||
* the send queue will fill up and only a QP reset will rescue us.
|
||||
* Would have been way to obvious to handle this in hardware or
|
||||
* at least the RDMA stack..
|
||||
*
|
||||
* Always signal the flushes. The magic request used for the flush
|
||||
* sequencer is not allocated in our driver's tagset and it's
|
||||
* triggered to be freed by blk_cleanup_queue(). So we need to
|
||||
* always mark it as signaled to ensure that the "wr_cqe", which is
|
||||
* embedded in request's payload, is not freed when __ib_process_cq()
|
||||
* calls wr_cqe->done().
|
||||
*/
|
||||
if (nvme_rdma_queue_sig_limit(queue) || flush)
|
||||
wr.send_flags |= IB_SEND_SIGNALED;
|
||||
wr.send_flags = IB_SEND_SIGNALED;
|
||||
|
||||
if (first)
|
||||
first->next = ≀
|
||||
@ -1329,6 +1296,12 @@ static struct blk_mq_tags *nvme_rdma_tagset(struct nvme_rdma_queue *queue)
|
||||
return queue->ctrl->tag_set.tags[queue_idx - 1];
|
||||
}
|
||||
|
||||
static void nvme_rdma_async_done(struct ib_cq *cq, struct ib_wc *wc)
|
||||
{
|
||||
if (unlikely(wc->status != IB_WC_SUCCESS))
|
||||
nvme_rdma_wr_error(cq, wc, "ASYNC");
|
||||
}
|
||||
|
||||
static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
|
||||
{
|
||||
struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(arg);
|
||||
@ -1350,10 +1323,12 @@ static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
|
||||
cmd->common.flags |= NVME_CMD_SGL_METABUF;
|
||||
nvme_rdma_set_sg_null(cmd);
|
||||
|
||||
sqe->cqe.done = nvme_rdma_async_done;
|
||||
|
||||
ib_dma_sync_single_for_device(dev, sqe->dma, sizeof(*cmd),
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
ret = nvme_rdma_post_send(queue, sqe, &sge, 1, NULL, false);
|
||||
ret = nvme_rdma_post_send(queue, sqe, &sge, 1, NULL);
|
||||
WARN_ON_ONCE(ret);
|
||||
}
|
||||
|
||||
@ -1639,7 +1614,6 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
|
||||
struct nvme_rdma_qe *sqe = &req->sqe;
|
||||
struct nvme_command *c = sqe->data;
|
||||
bool flush = false;
|
||||
struct ib_device *dev;
|
||||
blk_status_t ret;
|
||||
int err;
|
||||
@ -1668,13 +1642,13 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
goto err;
|
||||
}
|
||||
|
||||
sqe->cqe.done = nvme_rdma_send_done;
|
||||
|
||||
ib_dma_sync_single_for_device(dev, sqe->dma,
|
||||
sizeof(struct nvme_command), DMA_TO_DEVICE);
|
||||
|
||||
if (req_op(rq) == REQ_OP_FLUSH)
|
||||
flush = true;
|
||||
err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
|
||||
req->mr->need_inval ? &req->reg_wr.wr : NULL, flush);
|
||||
req->mr->need_inval ? &req->reg_wr.wr : NULL);
|
||||
if (unlikely(err)) {
|
||||
nvme_rdma_unmap_data(queue, rq);
|
||||
goto err;
|
||||
|
@ -803,10 +803,14 @@ static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
|
||||
|
||||
/*
|
||||
* Root Ports and PCI/PCI-X to PCIe Bridges are roots of PCIe
|
||||
* hierarchies.
|
||||
* hierarchies. Note that some PCIe host implementations omit
|
||||
* the root ports entirely, in which case a downstream port on
|
||||
* a switch may become the root of the link state chain for all
|
||||
* its subordinate endpoints.
|
||||
*/
|
||||
if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
|
||||
pci_pcie_type(pdev) == PCI_EXP_TYPE_PCIE_BRIDGE) {
|
||||
pci_pcie_type(pdev) == PCI_EXP_TYPE_PCIE_BRIDGE ||
|
||||
!pdev->bus->parent->self) {
|
||||
link->root = link;
|
||||
} else {
|
||||
struct pcie_link_state *parent;
|
||||
|
@ -580,6 +580,11 @@ struct qeth_cmd_buffer {
|
||||
void (*callback) (struct qeth_channel *, struct qeth_cmd_buffer *);
|
||||
};
|
||||
|
||||
static inline struct qeth_ipa_cmd *__ipa_cmd(struct qeth_cmd_buffer *iob)
|
||||
{
|
||||
return (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE);
|
||||
}
|
||||
|
||||
/**
|
||||
* definition of a qeth channel, used for read and write
|
||||
*/
|
||||
@ -834,7 +839,7 @@ struct qeth_trap_id {
|
||||
*/
|
||||
static inline int qeth_get_elements_for_range(addr_t start, addr_t end)
|
||||
{
|
||||
return PFN_UP(end - 1) - PFN_DOWN(start);
|
||||
return PFN_UP(end) - PFN_DOWN(start);
|
||||
}
|
||||
|
||||
static inline int qeth_get_micros(void)
|
||||
|
@ -2073,7 +2073,7 @@ int qeth_send_control_data(struct qeth_card *card, int len,
|
||||
unsigned long flags;
|
||||
struct qeth_reply *reply = NULL;
|
||||
unsigned long timeout, event_timeout;
|
||||
struct qeth_ipa_cmd *cmd;
|
||||
struct qeth_ipa_cmd *cmd = NULL;
|
||||
|
||||
QETH_CARD_TEXT(card, 2, "sendctl");
|
||||
|
||||
@ -2087,23 +2087,27 @@ int qeth_send_control_data(struct qeth_card *card, int len,
|
||||
}
|
||||
reply->callback = reply_cb;
|
||||
reply->param = reply_param;
|
||||
if (card->state == CARD_STATE_DOWN)
|
||||
reply->seqno = QETH_IDX_COMMAND_SEQNO;
|
||||
else
|
||||
reply->seqno = card->seqno.ipa++;
|
||||
|
||||
init_waitqueue_head(&reply->wait_q);
|
||||
spin_lock_irqsave(&card->lock, flags);
|
||||
list_add_tail(&reply->list, &card->cmd_waiter_list);
|
||||
spin_unlock_irqrestore(&card->lock, flags);
|
||||
QETH_DBF_HEX(CTRL, 2, iob->data, QETH_DBF_CTRL_LEN);
|
||||
|
||||
while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ;
|
||||
|
||||
if (IS_IPA(iob->data)) {
|
||||
cmd = __ipa_cmd(iob);
|
||||
cmd->hdr.seqno = card->seqno.ipa++;
|
||||
reply->seqno = cmd->hdr.seqno;
|
||||
event_timeout = QETH_IPA_TIMEOUT;
|
||||
} else {
|
||||
reply->seqno = QETH_IDX_COMMAND_SEQNO;
|
||||
event_timeout = QETH_TIMEOUT;
|
||||
}
|
||||
qeth_prepare_control_data(card, len, iob);
|
||||
|
||||
if (IS_IPA(iob->data))
|
||||
event_timeout = QETH_IPA_TIMEOUT;
|
||||
else
|
||||
event_timeout = QETH_TIMEOUT;
|
||||
spin_lock_irqsave(&card->lock, flags);
|
||||
list_add_tail(&reply->list, &card->cmd_waiter_list);
|
||||
spin_unlock_irqrestore(&card->lock, flags);
|
||||
|
||||
timeout = jiffies + event_timeout;
|
||||
|
||||
QETH_CARD_TEXT(card, 6, "noirqpnd");
|
||||
@ -2128,9 +2132,8 @@ int qeth_send_control_data(struct qeth_card *card, int len,
|
||||
|
||||
/* we have only one long running ipassist, since we can ensure
|
||||
process context of this command we can sleep */
|
||||
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
|
||||
if ((cmd->hdr.command == IPA_CMD_SETIP) &&
|
||||
(cmd->hdr.prot_version == QETH_PROT_IPV4)) {
|
||||
if (cmd && cmd->hdr.command == IPA_CMD_SETIP &&
|
||||
cmd->hdr.prot_version == QETH_PROT_IPV4) {
|
||||
if (!wait_event_timeout(reply->wait_q,
|
||||
atomic_read(&reply->received), event_timeout))
|
||||
goto time_err;
|
||||
@ -2894,7 +2897,7 @@ static void qeth_fill_ipacmd_header(struct qeth_card *card,
|
||||
memset(cmd, 0, sizeof(struct qeth_ipa_cmd));
|
||||
cmd->hdr.command = command;
|
||||
cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST;
|
||||
cmd->hdr.seqno = card->seqno.ipa;
|
||||
/* cmd->hdr.seqno is set by qeth_send_control_data() */
|
||||
cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type);
|
||||
cmd->hdr.rel_adapter_no = (__u8) card->info.portno;
|
||||
if (card->options.layer2)
|
||||
@ -3859,10 +3862,12 @@ EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags);
|
||||
int qeth_get_elements_no(struct qeth_card *card,
|
||||
struct sk_buff *skb, int extra_elems, int data_offset)
|
||||
{
|
||||
int elements = qeth_get_elements_for_range(
|
||||
(addr_t)skb->data + data_offset,
|
||||
(addr_t)skb->data + skb_headlen(skb)) +
|
||||
qeth_get_elements_for_frags(skb);
|
||||
addr_t end = (addr_t)skb->data + skb_headlen(skb);
|
||||
int elements = qeth_get_elements_for_frags(skb);
|
||||
addr_t start = (addr_t)skb->data + data_offset;
|
||||
|
||||
if (start != end)
|
||||
elements += qeth_get_elements_for_range(start, end);
|
||||
|
||||
if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
|
||||
QETH_DBF_MESSAGE(2, "Invalid size of IP packet "
|
||||
|
@ -40,8 +40,40 @@ struct qeth_ipaddr {
|
||||
unsigned int pfxlen;
|
||||
} a6;
|
||||
} u;
|
||||
|
||||
};
|
||||
|
||||
static inline bool qeth_l3_addr_match_ip(struct qeth_ipaddr *a1,
|
||||
struct qeth_ipaddr *a2)
|
||||
{
|
||||
if (a1->proto != a2->proto)
|
||||
return false;
|
||||
if (a1->proto == QETH_PROT_IPV6)
|
||||
return ipv6_addr_equal(&a1->u.a6.addr, &a2->u.a6.addr);
|
||||
return a1->u.a4.addr == a2->u.a4.addr;
|
||||
}
|
||||
|
||||
static inline bool qeth_l3_addr_match_all(struct qeth_ipaddr *a1,
|
||||
struct qeth_ipaddr *a2)
|
||||
{
|
||||
/* Assumes that the pair was obtained via qeth_l3_addr_find_by_ip(),
|
||||
* so 'proto' and 'addr' match for sure.
|
||||
*
|
||||
* For ucast:
|
||||
* - 'mac' is always 0.
|
||||
* - 'mask'/'pfxlen' for RXIP/VIPA is always 0. For NORMAL, matching
|
||||
* values are required to avoid mixups in takeover eligibility.
|
||||
*
|
||||
* For mcast,
|
||||
* - 'mac' is mapped from the IP, and thus always matches.
|
||||
* - 'mask'/'pfxlen' is always 0.
|
||||
*/
|
||||
if (a1->type != a2->type)
|
||||
return false;
|
||||
if (a1->proto == QETH_PROT_IPV6)
|
||||
return a1->u.a6.pfxlen == a2->u.a6.pfxlen;
|
||||
return a1->u.a4.mask == a2->u.a4.mask;
|
||||
}
|
||||
|
||||
static inline u64 qeth_l3_ipaddr_hash(struct qeth_ipaddr *addr)
|
||||
{
|
||||
u64 ret = 0;
|
||||
|
@ -149,6 +149,24 @@ int qeth_l3_string_to_ipaddr(const char *buf, enum qeth_prot_versions proto,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static struct qeth_ipaddr *qeth_l3_find_addr_by_ip(struct qeth_card *card,
|
||||
struct qeth_ipaddr *query)
|
||||
{
|
||||
u64 key = qeth_l3_ipaddr_hash(query);
|
||||
struct qeth_ipaddr *addr;
|
||||
|
||||
if (query->is_multicast) {
|
||||
hash_for_each_possible(card->ip_mc_htable, addr, hnode, key)
|
||||
if (qeth_l3_addr_match_ip(addr, query))
|
||||
return addr;
|
||||
} else {
|
||||
hash_for_each_possible(card->ip_htable, addr, hnode, key)
|
||||
if (qeth_l3_addr_match_ip(addr, query))
|
||||
return addr;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
|
||||
{
|
||||
int i, j;
|
||||
@ -202,34 +220,6 @@ static bool qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
|
||||
return rc;
|
||||
}
|
||||
|
||||
inline int
|
||||
qeth_l3_ipaddrs_is_equal(struct qeth_ipaddr *addr1, struct qeth_ipaddr *addr2)
|
||||
{
|
||||
return addr1->proto == addr2->proto &&
|
||||
!memcmp(&addr1->u, &addr2->u, sizeof(addr1->u)) &&
|
||||
!memcmp(&addr1->mac, &addr2->mac, sizeof(addr1->mac));
|
||||
}
|
||||
|
||||
static struct qeth_ipaddr *
|
||||
qeth_l3_ip_from_hash(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
|
||||
{
|
||||
struct qeth_ipaddr *addr;
|
||||
|
||||
if (tmp_addr->is_multicast) {
|
||||
hash_for_each_possible(card->ip_mc_htable, addr,
|
||||
hnode, qeth_l3_ipaddr_hash(tmp_addr))
|
||||
if (qeth_l3_ipaddrs_is_equal(tmp_addr, addr))
|
||||
return addr;
|
||||
} else {
|
||||
hash_for_each_possible(card->ip_htable, addr,
|
||||
hnode, qeth_l3_ipaddr_hash(tmp_addr))
|
||||
if (qeth_l3_ipaddrs_is_equal(tmp_addr, addr))
|
||||
return addr;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
|
||||
{
|
||||
int rc = 0;
|
||||
@ -244,23 +234,18 @@ int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
|
||||
QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8);
|
||||
}
|
||||
|
||||
addr = qeth_l3_ip_from_hash(card, tmp_addr);
|
||||
if (!addr)
|
||||
addr = qeth_l3_find_addr_by_ip(card, tmp_addr);
|
||||
if (!addr || !qeth_l3_addr_match_all(addr, tmp_addr))
|
||||
return -ENOENT;
|
||||
|
||||
addr->ref_counter--;
|
||||
if (addr->ref_counter > 0 && (addr->type == QETH_IP_TYPE_NORMAL ||
|
||||
addr->type == QETH_IP_TYPE_RXIP))
|
||||
if (addr->type == QETH_IP_TYPE_NORMAL && addr->ref_counter > 0)
|
||||
return rc;
|
||||
if (addr->in_progress)
|
||||
return -EINPROGRESS;
|
||||
|
||||
if (!qeth_card_hw_is_reachable(card)) {
|
||||
addr->disp_flag = QETH_DISP_ADDR_DELETE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
rc = qeth_l3_deregister_addr_entry(card, addr);
|
||||
if (qeth_card_hw_is_reachable(card))
|
||||
rc = qeth_l3_deregister_addr_entry(card, addr);
|
||||
|
||||
hash_del(&addr->hnode);
|
||||
kfree(addr);
|
||||
@ -272,6 +257,7 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
|
||||
{
|
||||
int rc = 0;
|
||||
struct qeth_ipaddr *addr;
|
||||
char buf[40];
|
||||
|
||||
QETH_CARD_TEXT(card, 4, "addip");
|
||||
|
||||
@ -282,8 +268,20 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
|
||||
QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8);
|
||||
}
|
||||
|
||||
addr = qeth_l3_ip_from_hash(card, tmp_addr);
|
||||
if (!addr) {
|
||||
addr = qeth_l3_find_addr_by_ip(card, tmp_addr);
|
||||
if (addr) {
|
||||
if (tmp_addr->type != QETH_IP_TYPE_NORMAL)
|
||||
return -EADDRINUSE;
|
||||
if (qeth_l3_addr_match_all(addr, tmp_addr)) {
|
||||
addr->ref_counter++;
|
||||
return 0;
|
||||
}
|
||||
qeth_l3_ipaddr_to_string(tmp_addr->proto, (u8 *)&tmp_addr->u,
|
||||
buf);
|
||||
dev_warn(&card->gdev->dev,
|
||||
"Registering IP address %s failed\n", buf);
|
||||
return -EADDRINUSE;
|
||||
} else {
|
||||
addr = qeth_l3_get_addr_buffer(tmp_addr->proto);
|
||||
if (!addr)
|
||||
return -ENOMEM;
|
||||
@ -323,19 +321,15 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
|
||||
(rc == IPA_RC_LAN_OFFLINE)) {
|
||||
addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
|
||||
if (addr->ref_counter < 1) {
|
||||
qeth_l3_delete_ip(card, addr);
|
||||
qeth_l3_deregister_addr_entry(card, addr);
|
||||
hash_del(&addr->hnode);
|
||||
kfree(addr);
|
||||
}
|
||||
} else {
|
||||
hash_del(&addr->hnode);
|
||||
kfree(addr);
|
||||
}
|
||||
} else {
|
||||
if (addr->type == QETH_IP_TYPE_NORMAL ||
|
||||
addr->type == QETH_IP_TYPE_RXIP)
|
||||
addr->ref_counter++;
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -403,11 +397,7 @@ static void qeth_l3_recover_ip(struct qeth_card *card)
|
||||
spin_lock_bh(&card->ip_lock);
|
||||
|
||||
hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) {
|
||||
if (addr->disp_flag == QETH_DISP_ADDR_DELETE) {
|
||||
qeth_l3_deregister_addr_entry(card, addr);
|
||||
hash_del(&addr->hnode);
|
||||
kfree(addr);
|
||||
} else if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
|
||||
if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
|
||||
if (addr->proto == QETH_PROT_IPV4) {
|
||||
addr->in_progress = 1;
|
||||
spin_unlock_bh(&card->ip_lock);
|
||||
@ -723,12 +713,7 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock_bh(&card->ip_lock);
|
||||
|
||||
if (qeth_l3_ip_from_hash(card, ipaddr))
|
||||
rc = -EEXIST;
|
||||
else
|
||||
qeth_l3_add_ip(card, ipaddr);
|
||||
|
||||
rc = qeth_l3_add_ip(card, ipaddr);
|
||||
spin_unlock_bh(&card->ip_lock);
|
||||
|
||||
kfree(ipaddr);
|
||||
@ -791,12 +776,7 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock_bh(&card->ip_lock);
|
||||
|
||||
if (qeth_l3_ip_from_hash(card, ipaddr))
|
||||
rc = -EEXIST;
|
||||
else
|
||||
qeth_l3_add_ip(card, ipaddr);
|
||||
|
||||
rc = qeth_l3_add_ip(card, ipaddr);
|
||||
spin_unlock_bh(&card->ip_lock);
|
||||
|
||||
kfree(ipaddr);
|
||||
@ -1404,8 +1384,9 @@ qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev)
|
||||
memcpy(tmp->mac, buf, sizeof(tmp->mac));
|
||||
tmp->is_multicast = 1;
|
||||
|
||||
ipm = qeth_l3_ip_from_hash(card, tmp);
|
||||
ipm = qeth_l3_find_addr_by_ip(card, tmp);
|
||||
if (ipm) {
|
||||
/* for mcast, by-IP match means full match */
|
||||
ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
|
||||
} else {
|
||||
ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
|
||||
@ -1488,8 +1469,9 @@ qeth_l3_add_mc6_to_hash(struct qeth_card *card, struct inet6_dev *in6_dev)
|
||||
sizeof(struct in6_addr));
|
||||
tmp->is_multicast = 1;
|
||||
|
||||
ipm = qeth_l3_ip_from_hash(card, tmp);
|
||||
ipm = qeth_l3_find_addr_by_ip(card, tmp);
|
||||
if (ipm) {
|
||||
/* for mcast, by-IP match means full match */
|
||||
ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
|
||||
continue;
|
||||
}
|
||||
@ -2633,11 +2615,12 @@ static void qeth_tso_fill_header(struct qeth_card *card,
|
||||
static int qeth_l3_get_elements_no_tso(struct qeth_card *card,
|
||||
struct sk_buff *skb, int extra_elems)
|
||||
{
|
||||
addr_t tcpdptr = (addr_t)tcp_hdr(skb) + tcp_hdrlen(skb);
|
||||
int elements = qeth_get_elements_for_range(
|
||||
tcpdptr,
|
||||
(addr_t)skb->data + skb_headlen(skb)) +
|
||||
qeth_get_elements_for_frags(skb);
|
||||
addr_t start = (addr_t)tcp_hdr(skb) + tcp_hdrlen(skb);
|
||||
addr_t end = (addr_t)skb->data + skb_headlen(skb);
|
||||
int elements = qeth_get_elements_for_frags(skb);
|
||||
|
||||
if (start != end)
|
||||
elements += qeth_get_elements_for_range(start, end);
|
||||
|
||||
if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
|
||||
QETH_DBF_MESSAGE(2,
|
||||
|
@ -338,11 +338,12 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
|
||||
{
|
||||
struct page *page[1];
|
||||
struct vm_area_struct *vma;
|
||||
struct vm_area_struct *vmas[1];
|
||||
int ret;
|
||||
|
||||
if (mm == current->mm) {
|
||||
ret = get_user_pages_fast(vaddr, 1, !!(prot & IOMMU_WRITE),
|
||||
page);
|
||||
ret = get_user_pages_longterm(vaddr, 1, !!(prot & IOMMU_WRITE),
|
||||
page, vmas);
|
||||
} else {
|
||||
unsigned int flags = 0;
|
||||
|
||||
@ -351,7 +352,18 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
|
||||
|
||||
down_read(&mm->mmap_sem);
|
||||
ret = get_user_pages_remote(NULL, mm, vaddr, 1, flags, page,
|
||||
NULL, NULL);
|
||||
vmas, NULL);
|
||||
/*
|
||||
* The lifetime of a vaddr_get_pfn() page pin is
|
||||
* userspace-controlled. In the fs-dax case this could
|
||||
* lead to indefinite stalls in filesystem operations.
|
||||
* Disallow attempts to pin fs-dax pages via this
|
||||
* interface.
|
||||
*/
|
||||
if (ret > 0 && vma_is_fsdax(vmas[0])) {
|
||||
ret = -EOPNOTSUPP;
|
||||
put_page(page[0]);
|
||||
}
|
||||
up_read(&mm->mmap_sem);
|
||||
}
|
||||
|
||||
|
@ -422,7 +422,7 @@ static ssize_t btrfs_nodesize_show(struct kobject *kobj,
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = to_fs_info(kobj);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%u\n", fs_info->super_copy->nodesize);
|
||||
return snprintf(buf, PAGE_SIZE, "%u\n", fs_info->nodesize);
|
||||
}
|
||||
|
||||
BTRFS_ATTR(nodesize, btrfs_nodesize_show);
|
||||
@ -432,8 +432,7 @@ static ssize_t btrfs_sectorsize_show(struct kobject *kobj,
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = to_fs_info(kobj);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%u\n",
|
||||
fs_info->super_copy->sectorsize);
|
||||
return snprintf(buf, PAGE_SIZE, "%u\n", fs_info->sectorsize);
|
||||
}
|
||||
|
||||
BTRFS_ATTR(sectorsize, btrfs_sectorsize_show);
|
||||
@ -443,8 +442,7 @@ static ssize_t btrfs_clone_alignment_show(struct kobject *kobj,
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = to_fs_info(kobj);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%u\n",
|
||||
fs_info->super_copy->sectorsize);
|
||||
return snprintf(buf, PAGE_SIZE, "%u\n", fs_info->sectorsize);
|
||||
}
|
||||
|
||||
BTRFS_ATTR(clone_alignment, btrfs_clone_alignment_show);
|
||||
|
@ -1722,19 +1722,23 @@ static void update_super_roots(struct btrfs_fs_info *fs_info)
|
||||
|
||||
super = fs_info->super_copy;
|
||||
|
||||
/* update latest btrfs_super_block::chunk_root refs */
|
||||
root_item = &fs_info->chunk_root->root_item;
|
||||
super->chunk_root = root_item->bytenr;
|
||||
super->chunk_root_generation = root_item->generation;
|
||||
super->chunk_root_level = root_item->level;
|
||||
btrfs_set_super_chunk_root(super, root_item->bytenr);
|
||||
btrfs_set_super_chunk_root_generation(super, root_item->generation);
|
||||
btrfs_set_super_chunk_root_level(super, root_item->level);
|
||||
|
||||
/* update latest btrfs_super_block::root refs */
|
||||
root_item = &fs_info->tree_root->root_item;
|
||||
super->root = root_item->bytenr;
|
||||
super->generation = root_item->generation;
|
||||
super->root_level = root_item->level;
|
||||
btrfs_set_super_root(super, root_item->bytenr);
|
||||
btrfs_set_super_generation(super, root_item->generation);
|
||||
btrfs_set_super_root_level(super, root_item->level);
|
||||
|
||||
if (btrfs_test_opt(fs_info, SPACE_CACHE))
|
||||
super->cache_generation = root_item->generation;
|
||||
btrfs_set_super_cache_generation(super, root_item->generation);
|
||||
if (test_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags))
|
||||
super->uuid_tree_generation = root_item->generation;
|
||||
btrfs_set_super_uuid_tree_generation(super,
|
||||
root_item->generation);
|
||||
}
|
||||
|
||||
int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
|
||||
|
@ -1252,8 +1252,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
|
||||
*/
|
||||
if (dio->is_async && iov_iter_rw(iter) == WRITE) {
|
||||
retval = 0;
|
||||
if ((iocb->ki_filp->f_flags & O_DSYNC) ||
|
||||
IS_SYNC(iocb->ki_filp->f_mapping->host))
|
||||
if (iocb->ki_flags & IOCB_DSYNC)
|
||||
retval = dio_set_defer_completion(dio);
|
||||
else if (!dio->inode->i_sb->s_dio_done_wq) {
|
||||
/*
|
||||
|
@ -3210,7 +3210,7 @@ static inline bool vma_is_fsdax(struct vm_area_struct *vma)
|
||||
if (!vma_is_dax(vma))
|
||||
return false;
|
||||
inode = file_inode(vma->vm_file);
|
||||
if (inode->i_mode == S_IFCHR)
|
||||
if (S_ISCHR(inode->i_mode))
|
||||
return false; /* device-dax */
|
||||
return true;
|
||||
}
|
||||
|
@ -72,7 +72,6 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
|
||||
BUILD_BUG_ON(sizeof(_i) > sizeof(long)); \
|
||||
BUILD_BUG_ON(sizeof(_s) > sizeof(long)); \
|
||||
\
|
||||
_i &= _mask; \
|
||||
_i; \
|
||||
(typeof(_i)) (_i & _mask); \
|
||||
})
|
||||
#endif /* _LINUX_NOSPEC_H */
|
||||
|
@ -64,6 +64,7 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh)
|
||||
UDP_SKB_CB(skb)->cscov = cscov;
|
||||
if (skb->ip_summed == CHECKSUM_COMPLETE)
|
||||
skb->ip_summed = CHECKSUM_NONE;
|
||||
skb->csum_valid = 0;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -26,8 +26,10 @@ static void bpf_array_free_percpu(struct bpf_array *array)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < array->map.max_entries; i++)
|
||||
for (i = 0; i < array->map.max_entries; i++) {
|
||||
free_percpu(array->pptrs[i]);
|
||||
cond_resched();
|
||||
}
|
||||
}
|
||||
|
||||
static int bpf_array_alloc_percpu(struct bpf_array *array)
|
||||
@ -43,6 +45,7 @@ static int bpf_array_alloc_percpu(struct bpf_array *array)
|
||||
return -ENOMEM;
|
||||
}
|
||||
array->pptrs[i] = ptr;
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -52,11 +55,11 @@ static int bpf_array_alloc_percpu(struct bpf_array *array)
|
||||
static struct bpf_map *array_map_alloc(union bpf_attr *attr)
|
||||
{
|
||||
bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
|
||||
int numa_node = bpf_map_attr_numa_node(attr);
|
||||
int ret, numa_node = bpf_map_attr_numa_node(attr);
|
||||
u32 elem_size, index_mask, max_entries;
|
||||
bool unpriv = !capable(CAP_SYS_ADMIN);
|
||||
u64 cost, array_size, mask64;
|
||||
struct bpf_array *array;
|
||||
u64 array_size, mask64;
|
||||
|
||||
/* check sanity of attributes */
|
||||
if (attr->max_entries == 0 || attr->key_size != 4 ||
|
||||
@ -101,8 +104,19 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
|
||||
array_size += (u64) max_entries * elem_size;
|
||||
|
||||
/* make sure there is no u32 overflow later in round_up() */
|
||||
if (array_size >= U32_MAX - PAGE_SIZE)
|
||||
cost = array_size;
|
||||
if (cost >= U32_MAX - PAGE_SIZE)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
if (percpu) {
|
||||
cost += (u64)attr->max_entries * elem_size * num_possible_cpus();
|
||||
if (cost >= U32_MAX - PAGE_SIZE)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
|
||||
|
||||
ret = bpf_map_precharge_memlock(cost);
|
||||
if (ret < 0)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
/* allocate all map elements and zero-initialize them */
|
||||
array = bpf_map_area_alloc(array_size, numa_node);
|
||||
@ -118,20 +132,13 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
|
||||
array->map.max_entries = attr->max_entries;
|
||||
array->map.map_flags = attr->map_flags;
|
||||
array->map.numa_node = numa_node;
|
||||
array->map.pages = cost;
|
||||
array->elem_size = elem_size;
|
||||
|
||||
if (!percpu)
|
||||
goto out;
|
||||
|
||||
array_size += (u64) attr->max_entries * elem_size * num_possible_cpus();
|
||||
|
||||
if (array_size >= U32_MAX - PAGE_SIZE ||
|
||||
bpf_array_alloc_percpu(array)) {
|
||||
if (percpu && bpf_array_alloc_percpu(array)) {
|
||||
bpf_map_area_free(array);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
out:
|
||||
array->map.pages = round_up(array_size, PAGE_SIZE) >> PAGE_SHIFT;
|
||||
|
||||
return &array->map;
|
||||
}
|
||||
|
@ -471,7 +471,10 @@ static void trie_free(struct bpf_map *map)
|
||||
struct lpm_trie_node __rcu **slot;
|
||||
struct lpm_trie_node *node;
|
||||
|
||||
raw_spin_lock(&trie->lock);
|
||||
/* Wait for outstanding programs to complete
|
||||
* update/lookup/delete/get_next_key and free the trie.
|
||||
*/
|
||||
synchronize_rcu();
|
||||
|
||||
/* Always start at the root and walk down to a node that has no
|
||||
* children. Then free that node, nullify its reference in the parent
|
||||
@ -482,10 +485,9 @@ static void trie_free(struct bpf_map *map)
|
||||
slot = &trie->root;
|
||||
|
||||
for (;;) {
|
||||
node = rcu_dereference_protected(*slot,
|
||||
lockdep_is_held(&trie->lock));
|
||||
node = rcu_dereference_protected(*slot, 1);
|
||||
if (!node)
|
||||
goto unlock;
|
||||
goto out;
|
||||
|
||||
if (rcu_access_pointer(node->child[0])) {
|
||||
slot = &node->child[0];
|
||||
@ -503,8 +505,8 @@ static void trie_free(struct bpf_map *map)
|
||||
}
|
||||
}
|
||||
|
||||
unlock:
|
||||
raw_spin_unlock(&trie->lock);
|
||||
out:
|
||||
kfree(trie);
|
||||
}
|
||||
|
||||
static int trie_get_next_key(struct bpf_map *map, void *key, void *next_key)
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user