Merge android-4.14.117 (74196c0) into msm-4.14

* refs/heads/tmp-74196c0:
  Linux 4.14.117
  mm/kmemleak.c: fix unused-function warning
  media: v4l2: i2c: ov7670: Fix PLL bypass register values
  i2c: i2c-stm32f7: Fix SDADEL minimum formula
  clk: x86: Add system specific quirk to mark clocks as critical
  x86/mce: Improve error message when kernel cannot recover, p2
  powerpc/mm/hash: Handle mmap_min_addr correctly in get_unmapped_area topdown search
  selinux: never allow relabeling on context mounts
  Input: stmfts - acknowledge that setting brightness is a blocking call
  Input: snvs_pwrkey - initialize necessary driver data before enabling IRQ
  IB/core: Destroy QP if XRC QP fails
  IB/core: Fix potential memory leak while creating MAD agents
  IB/core: Unregister notifier before freeing MAD security
  ASoC: stm32: fix sai driver name initialisation
  scsi: RDMA/srpt: Fix a credit leak for aborted commands
  staging: iio: adt7316: fix the dac write calculation
  staging: iio: adt7316: fix the dac read calculation
  staging: iio: adt7316: allow adt751x to use internal vref for all dacs
  Bluetooth: btusb: request wake pin with NOAUTOEN
  perf/x86/amd: Update generic hardware cache events for Family 17h
  ARM: iop: don't use using 64-bit DMA masks
  ARM: orion: don't use using 64-bit DMA masks
  xsysace: Fix error handling in ace_setup
  sh: fix multiple function definition build errors
  hugetlbfs: fix memory leak for resv_map
  kmemleak: powerpc: skip scanning holes in the .bss section
  net: hns: Fix WARNING when remove HNS driver with SMMU enabled
  net: hns: fix ICMP6 neighbor solicitation messages discard problem
  net: hns: Fix probabilistic memory overwrite when HNS driver initialized
  net: hns: Use NAPI_POLL_WEIGHT for hns driver
  net: hns: fix KASAN: use-after-free in hns_nic_net_xmit_hw()
  scsi: storvsc: Fix calculation of sub-channel count
  scsi: core: add new RDAC LENOVO/DE_Series device
  vfio/pci: use correct format characters
  HID: input: add mapping for Assistant key
  rtc: da9063: set uie_unsupported when relevant
  debugfs: fix use-after-free on symlink traversal
  jffs2: fix use-after-free on symlink traversal
  net: stmmac: don't log oversized frames
  net: stmmac: fix dropping of multi-descriptor RX frames
  net: stmmac: don't overwrite discard_frame status
  net: stmmac: ratelimit RX error logs
  bonding: show full hw address in sysfs for slave entries
  net/mlx5: E-Switch, Fix esw manager vport indication for more vport commands
  igb: Fix WARN_ONCE on runtime suspend
  ARM: dts: rockchip: Fix gpu opp node names for rk3288
  batman-adv: Reduce tt_global hash refcnt only for removed entry
  batman-adv: Reduce tt_local hash refcnt only for removed entry
  batman-adv: Reduce claim hash refcnt only for removed entry
  rtc: sh: Fix invalid alarm warning for non-enabled alarm
  HID: debug: fix race condition with between rdesc_show() and device removal
  HID: logitech: check the return value of create_singlethread_workqueue
  nvme-loop: init nvmet_ctrl fatal_err_work when allocate
  mm: do not stall register_shrinker()
  USB: core: Fix bug caused by duplicate interface PM usage counter
  USB: core: Fix unterminated string returned by usb_string()
  usb: usbip: fix isoc packet num validation in get_pipe
  USB: w1 ds2490: Fix bug caused by improper use of altsetting array
  USB: yurex: Fix protection fault after device removal
  ALSA: hda/realtek - Fixed Dell AIO speaker noise
  ALSA: hda/realtek - Add new Dell platform for headset mode
  caif: reduce stack size with KASAN
  arm64: only advance singlestep for user instruction traps
  arm64: Fix single stepping in kernel traps
  kasan: prevent compiler from optimizing away memset in tests
  kasan: remove redundant initialization of variable 'real_size'
  net: dsa: bcm_sf2: fix buffer overflow doing set_rxnfc
  net: phy: marvell: Fix buffer overrun with stats counters
  rxrpc: Fix net namespace cleanup
  bnxt_en: Free short FW command HWRM memory in error path in bnxt_init_one()
  bnxt_en: Improve multicast address setup logic.
  packet: validate msg_namelen in send directly
  sctp: avoid running the sctp state machine recursively
  ipv6: invert flowlabel sharing check in process and user mode
  ipv6/flowlabel: wait rcu grace period before put_pid()
  ipv4: ip_do_fragment: Preserve skb_iif during fragmentation
  ALSA: line6: use dynamic buffers
  ANDROID: cuttlefish 4.14: enable CONFIG_CRYPTO_AES_NI_INTEL=y

Conflicts:
	mm/vmscan.c

Change-Id: I4b418c58280c5fd14cc329aef602b09f235ad99a
Signed-off-by: Blagovest Kolenichev <bkolenichev@codeaurora.org>
This commit is contained in:
Blagovest Kolenichev 2019-06-25 03:08:13 -07:00
commit c80c23744e
87 changed files with 781 additions and 422 deletions

View File

@ -370,11 +370,15 @@ autosuspend the interface's device. When the usage counter is = 0
then the interface is considered to be idle, and the kernel may
autosuspend the device.
Drivers need not be concerned about balancing changes to the usage
counter; the USB core will undo any remaining "get"s when a driver
is unbound from its interface. As a corollary, drivers must not call
any of the ``usb_autopm_*`` functions after their ``disconnect``
routine has returned.
Drivers must be careful to balance their overall changes to the usage
counter. Unbalanced "get"s will remain in effect when a driver is
unbound from its interface, preventing the device from going into
runtime suspend should the interface be bound to a driver again. On
the other hand, drivers are allowed to achieve this balance by calling
the ``usb_autopm_*`` functions even after their ``disconnect`` routine
has returned -- say from within a work-queue routine -- provided they
retain an active reference to the interface (via ``usb_get_intf`` and
``usb_put_intf``).
Drivers using the async routines are responsible for their own
synchronization and mutual exclusion.

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 14
SUBLEVEL = 116
SUBLEVEL = 117
EXTRAVERSION =
NAME = Petit Gorille

View File

@ -1181,27 +1181,27 @@
gpu_opp_table: gpu-opp-table {
compatible = "operating-points-v2";
opp@100000000 {
opp-100000000 {
opp-hz = /bits/ 64 <100000000>;
opp-microvolt = <950000>;
};
opp@200000000 {
opp-200000000 {
opp-hz = /bits/ 64 <200000000>;
opp-microvolt = <950000>;
};
opp@300000000 {
opp-300000000 {
opp-hz = /bits/ 64 <300000000>;
opp-microvolt = <1000000>;
};
opp@400000000 {
opp-400000000 {
opp-hz = /bits/ 64 <400000000>;
opp-microvolt = <1100000>;
};
opp@500000000 {
opp-500000000 {
opp-hz = /bits/ 64 <500000000>;
opp-microvolt = <1200000>;
};
opp@600000000 {
opp-600000000 {
opp-hz = /bits/ 64 <600000000>;
opp-microvolt = <1250000>;
};

View File

@ -300,7 +300,7 @@ static struct resource iop13xx_adma_2_resources[] = {
}
};
static u64 iop13xx_adma_dmamask = DMA_BIT_MASK(64);
static u64 iop13xx_adma_dmamask = DMA_BIT_MASK(32);
static struct iop_adma_platform_data iop13xx_adma_0_data = {
.hw_id = 0,
.pool_size = PAGE_SIZE,
@ -324,7 +324,7 @@ static struct platform_device iop13xx_adma_0_channel = {
.resource = iop13xx_adma_0_resources,
.dev = {
.dma_mask = &iop13xx_adma_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(64),
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = (void *) &iop13xx_adma_0_data,
},
};
@ -336,7 +336,7 @@ static struct platform_device iop13xx_adma_1_channel = {
.resource = iop13xx_adma_1_resources,
.dev = {
.dma_mask = &iop13xx_adma_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(64),
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = (void *) &iop13xx_adma_1_data,
},
};
@ -348,7 +348,7 @@ static struct platform_device iop13xx_adma_2_channel = {
.resource = iop13xx_adma_2_resources,
.dev = {
.dma_mask = &iop13xx_adma_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(64),
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = (void *) &iop13xx_adma_2_data,
},
};

View File

@ -152,7 +152,7 @@ static struct resource iop13xx_tpmi_3_resources[] = {
}
};
u64 iop13xx_tpmi_mask = DMA_BIT_MASK(64);
u64 iop13xx_tpmi_mask = DMA_BIT_MASK(32);
static struct platform_device iop13xx_tpmi_0_device = {
.name = "iop-tpmi",
.id = 0,
@ -160,7 +160,7 @@ static struct platform_device iop13xx_tpmi_0_device = {
.resource = iop13xx_tpmi_0_resources,
.dev = {
.dma_mask = &iop13xx_tpmi_mask,
.coherent_dma_mask = DMA_BIT_MASK(64),
.coherent_dma_mask = DMA_BIT_MASK(32),
},
};
@ -171,7 +171,7 @@ static struct platform_device iop13xx_tpmi_1_device = {
.resource = iop13xx_tpmi_1_resources,
.dev = {
.dma_mask = &iop13xx_tpmi_mask,
.coherent_dma_mask = DMA_BIT_MASK(64),
.coherent_dma_mask = DMA_BIT_MASK(32),
},
};
@ -182,7 +182,7 @@ static struct platform_device iop13xx_tpmi_2_device = {
.resource = iop13xx_tpmi_2_resources,
.dev = {
.dma_mask = &iop13xx_tpmi_mask,
.coherent_dma_mask = DMA_BIT_MASK(64),
.coherent_dma_mask = DMA_BIT_MASK(32),
},
};
@ -193,7 +193,7 @@ static struct platform_device iop13xx_tpmi_3_device = {
.resource = iop13xx_tpmi_3_resources,
.dev = {
.dma_mask = &iop13xx_tpmi_mask,
.coherent_dma_mask = DMA_BIT_MASK(64),
.coherent_dma_mask = DMA_BIT_MASK(32),
},
};

View File

@ -143,7 +143,7 @@ struct platform_device iop3xx_dma_0_channel = {
.resource = iop3xx_dma_0_resources,
.dev = {
.dma_mask = &iop3xx_adma_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(64),
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = (void *) &iop3xx_dma_0_data,
},
};
@ -155,7 +155,7 @@ struct platform_device iop3xx_dma_1_channel = {
.resource = iop3xx_dma_1_resources,
.dev = {
.dma_mask = &iop3xx_adma_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(64),
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = (void *) &iop3xx_dma_1_data,
},
};
@ -167,7 +167,7 @@ struct platform_device iop3xx_aau_channel = {
.resource = iop3xx_aau_resources,
.dev = {
.dma_mask = &iop3xx_adma_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(64),
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = (void *) &iop3xx_aau_data,
},
};

View File

@ -622,7 +622,7 @@ static struct platform_device orion_xor0_shared = {
.resource = orion_xor0_shared_resources,
.dev = {
.dma_mask = &orion_xor_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(64),
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &orion_xor0_pdata,
},
};
@ -683,7 +683,7 @@ static struct platform_device orion_xor1_shared = {
.resource = orion_xor1_shared_resources,
.dev = {
.dma_mask = &orion_xor_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(64),
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &orion_xor1_pdata,
},
};

View File

@ -37,6 +37,12 @@ void unregister_undef_hook(struct undef_hook *hook);
void arm64_notify_segfault(struct pt_regs *regs, unsigned long addr);
/*
* Move regs->pc to next instruction and do necessary setup before it
* is executed.
*/
void arm64_skip_faulting_instruction(struct pt_regs *regs, unsigned long size);
static inline int __in_irqentry_text(unsigned long ptr)
{
return ptr >= (unsigned long)&__irqentry_text_start &&

View File

@ -431,7 +431,7 @@ ret:
pr_warn_ratelimited("\"%s\" (%ld) uses obsolete SWP{B} instruction at 0x%llx\n",
current->comm, (unsigned long)current->pid, regs->pc);
regs->pc += 4;
arm64_skip_faulting_instruction(regs, 4);
return 0;
fault:
@ -512,7 +512,7 @@ ret:
pr_warn_ratelimited("\"%s\" (%ld) uses deprecated CP15 Barrier instruction at 0x%llx\n",
current->comm, (unsigned long)current->pid, regs->pc);
regs->pc += 4;
arm64_skip_faulting_instruction(regs, 4);
return 0;
}
@ -586,14 +586,14 @@ static int compat_setend_handler(struct pt_regs *regs, u32 big_endian)
static int a32_setend_handler(struct pt_regs *regs, u32 instr)
{
int rc = compat_setend_handler(regs, (instr >> 9) & 1);
regs->pc += 4;
arm64_skip_faulting_instruction(regs, 4);
return rc;
}
static int t16_setend_handler(struct pt_regs *regs, u32 instr)
{
int rc = compat_setend_handler(regs, (instr >> 3) & 1);
regs->pc += 2;
arm64_skip_faulting_instruction(regs, 2);
return rc;
}

View File

@ -1504,7 +1504,7 @@ static int emulate_mrs(struct pt_regs *regs, u32 insn)
if (!rc) {
dst = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RT, insn);
pt_regs_write_reg(regs, dst, val);
regs->pc += 4;
arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
}
return rc;

View File

@ -262,6 +262,18 @@ void arm64_notify_die(const char *str, struct pt_regs *regs,
}
}
void arm64_skip_faulting_instruction(struct pt_regs *regs, unsigned long size)
{
regs->pc += size;
/*
* If we were single stepping, we want to get the step exception after
* we return from the trap.
*/
if (user_mode(regs))
user_fastforward_single_step(current);
}
static LIST_HEAD(undef_hook);
static DEFINE_RAW_SPINLOCK(undef_lock);
@ -453,7 +465,7 @@ static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
if (ret)
arm64_notify_segfault(regs, address);
else
regs->pc += 4;
arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
}
static void ctr_read_handler(unsigned int esr, struct pt_regs *regs)
@ -463,7 +475,7 @@ static void ctr_read_handler(unsigned int esr, struct pt_regs *regs)
pt_regs_write_reg(regs, rt, val);
regs->pc += 4;
arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
}
static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
@ -472,7 +484,7 @@ static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
isb();
pt_regs_write_reg(regs, rt, arch_counter_get_cntvct());
regs->pc += 4;
arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
}
static void cntfrq_read_handler(unsigned int esr, struct pt_regs *regs)
@ -480,7 +492,7 @@ static void cntfrq_read_handler(unsigned int esr, struct pt_regs *regs)
int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
pt_regs_write_reg(regs, rt, arch_timer_get_rate());
regs->pc += 4;
arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
}
struct sys64_hook {
@ -845,7 +857,7 @@ static int bug_handler(struct pt_regs *regs, unsigned int esr)
}
/* If thread survives, skip over the BUG instruction and continue: */
regs->pc += AARCH64_INSN_SIZE; /* skip BRK and resume */
arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
return DBG_HOOK_HANDLED;
}

View File

@ -22,6 +22,7 @@
#include <linux/kvm_host.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/kmemleak.h>
#include <linux/kvm_para.h>
#include <linux/slab.h>
#include <linux/of.h>
@ -712,6 +713,12 @@ static void kvm_use_magic_page(void)
static __init void kvm_free_tmp(void)
{
/*
* Inform kmemleak about the hole in the .bss section since the
* corresponding pages will be unmapped with DEBUG_PAGEALLOC=y.
*/
kmemleak_free_part(&kvm_tmp[kvm_tmp_index],
ARRAY_SIZE(kvm_tmp) - kvm_tmp_index);
free_reserved_area(&kvm_tmp[kvm_tmp_index],
&kvm_tmp[ARRAY_SIZE(kvm_tmp)], -1, NULL);
}

View File

@ -31,6 +31,7 @@
#include <linux/spinlock.h>
#include <linux/export.h>
#include <linux/hugetlb.h>
#include <linux/security.h>
#include <asm/mman.h>
#include <asm/mmu.h>
#include <asm/copro.h>
@ -328,6 +329,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
unsigned long addr, found, prev;
struct vm_unmapped_area_info info;
unsigned long min_addr = max(PAGE_SIZE, mmap_min_addr);
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.length = len;
@ -344,7 +346,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
if (high_limit > DEFAULT_MAP_WINDOW)
addr += mm->context.addr_limit - DEFAULT_MAP_WINDOW;
while (addr > PAGE_SIZE) {
while (addr > min_addr) {
info.high_limit = addr;
if (!slice_scan_available(addr - 1, available, 0, &addr))
continue;
@ -356,8 +358,8 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
* Check if we need to reduce the range, or if we can
* extend it to cover the previous available slice.
*/
if (addr < PAGE_SIZE)
addr = PAGE_SIZE;
if (addr < min_addr)
addr = min_addr;
else if (slice_scan_available(addr - 1, available, 0, &prev)) {
addr = prev;
goto prev_slice;
@ -479,7 +481,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
addr = _ALIGN_UP(addr, page_size);
slice_dbg(" aligned addr=%lx\n", addr);
/* Ignore hint if it's too large or overlaps a VMA */
if (addr > high_limit - len ||
if (addr > high_limit - len || addr < mmap_min_addr ||
!slice_area_is_free(mm, addr, len))
addr = 0;
}

View File

@ -180,10 +180,10 @@ static struct sh_machine_vector __initmv sh_of_generic_mv = {
struct sh_clk_ops;
void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
void __init __weak arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
{
}
void __init plat_irq_setup(void)
void __init __weak plat_irq_setup(void)
{
}

View File

@ -479,6 +479,7 @@ CONFIG_CRYPTO_RSA=y
# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
CONFIG_CRYPTO_ADIANTUM=y
CONFIG_CRYPTO_SHA512=y
CONFIG_CRYPTO_AES_NI_INTEL=y
CONFIG_CRYPTO_LZ4=y
CONFIG_CRYPTO_ZSTD=y
CONFIG_CRYPTO_DEV_VIRTIO=y

View File

@ -116,6 +116,110 @@ static __initconst const u64 amd_hw_cache_event_ids
},
};
static __initconst const u64 amd_hw_cache_event_ids_f17h
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
[C(L1D)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = 0x0040, /* Data Cache Accesses */
[C(RESULT_MISS)] = 0xc860, /* L2$ access from DC Miss */
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = 0,
[C(RESULT_MISS)] = 0,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = 0xff5a, /* h/w prefetch DC Fills */
[C(RESULT_MISS)] = 0,
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = 0x0080, /* Instruction cache fetches */
[C(RESULT_MISS)] = 0x0081, /* Instruction cache misses */
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = 0,
[C(RESULT_MISS)] = 0,
},
},
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = 0,
[C(RESULT_MISS)] = 0,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = 0,
[C(RESULT_MISS)] = 0,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = 0,
[C(RESULT_MISS)] = 0,
},
},
[C(DTLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = 0xff45, /* All L2 DTLB accesses */
[C(RESULT_MISS)] = 0xf045, /* L2 DTLB misses (PT walks) */
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = 0,
[C(RESULT_MISS)] = 0,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = 0,
[C(RESULT_MISS)] = 0,
},
},
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = 0x0084, /* L1 ITLB misses, L2 ITLB hits */
[C(RESULT_MISS)] = 0xff85, /* L1 ITLB misses, L2 misses */
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
},
[C(BPU)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = 0x00c2, /* Retired Branch Instr. */
[C(RESULT_MISS)] = 0x00c3, /* Retired Mispredicted BI */
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
},
[C(NODE)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = 0,
[C(RESULT_MISS)] = 0,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = -1,
[C(RESULT_MISS)] = -1,
},
},
};
/*
* AMD Performance Monitor K7 and later, up to and including Family 16h:
*/
@ -861,9 +965,10 @@ __init int amd_pmu_init(void)
x86_pmu.amd_nb_constraints = 0;
}
/* Events are common for all AMDs */
memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
if (boot_cpu_data.x86 >= 0x17)
memcpy(hw_cache_event_ids, amd_hw_cache_event_ids_f17h, sizeof(hw_cache_event_ids));
else
memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, sizeof(hw_cache_event_ids));
return 0;
}

View File

@ -148,6 +148,11 @@ static struct severity {
SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA),
KERNEL
),
MCESEV(
PANIC, "Instruction fetch error in kernel",
SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_INSTR),
KERNEL
),
#endif
MCESEV(
PANIC, "Action required: unknown MCACOD",

View File

@ -1063,6 +1063,8 @@ static int ace_setup(struct ace_device *ace)
return 0;
err_read:
/* prevent double queue cleanup */
ace->gd->queue = NULL;
put_disk(ace->gd);
err_alloc_disk:
blk_cleanup_queue(ace->queue);

View File

@ -2893,6 +2893,7 @@ static int btusb_config_oob_wake(struct hci_dev *hdev)
return 0;
}
irq_set_status_flags(irq, IRQ_NOAUTOEN);
ret = devm_request_irq(&hdev->dev, irq, btusb_oob_wake_handler,
0, "OOB Wake-on-BT", data);
if (ret) {
@ -2907,7 +2908,6 @@ static int btusb_config_oob_wake(struct hci_dev *hdev)
}
data->oob_wake_irq = irq;
disable_irq(irq);
bt_dev_info(hdev, "OOB Wake-on-BT configured at IRQ %u", irq);
return 0;
}

View File

@ -165,7 +165,7 @@ static const struct clk_ops plt_clk_ops = {
};
static struct clk_plt *plt_clk_register(struct platform_device *pdev, int id,
void __iomem *base,
const struct pmc_clk_data *pmc_data,
const char **parent_names,
int num_parents)
{
@ -184,9 +184,17 @@ static struct clk_plt *plt_clk_register(struct platform_device *pdev, int id,
init.num_parents = num_parents;
pclk->hw.init = &init;
pclk->reg = base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE;
pclk->reg = pmc_data->base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE;
spin_lock_init(&pclk->lock);
/*
* On some systems, the pmc_plt_clocks already enabled by the
* firmware are being marked as critical to avoid them being
* gated by the clock framework.
*/
if (pmc_data->critical && plt_clk_is_enabled(&pclk->hw))
init.flags |= CLK_IS_CRITICAL;
ret = devm_clk_hw_register(&pdev->dev, &pclk->hw);
if (ret) {
pclk = ERR_PTR(ret);
@ -332,7 +340,7 @@ static int plt_clk_probe(struct platform_device *pdev)
return PTR_ERR(parent_names);
for (i = 0; i < PMC_CLK_NUM; i++) {
data->clks[i] = plt_clk_register(pdev, i, pmc_data->base,
data->clks[i] = plt_clk_register(pdev, i, pmc_data,
parent_names, data->nparents);
if (IS_ERR(data->clks[i])) {
err = PTR_ERR(data->clks[i]);

View File

@ -1060,10 +1060,15 @@ static int hid_debug_rdesc_show(struct seq_file *f, void *p)
seq_printf(f, "\n\n");
/* dump parsed data and input mappings */
if (down_interruptible(&hdev->driver_input_lock))
return 0;
hid_dump_device(hdev, f);
seq_printf(f, "\n");
hid_dump_input_mapping(hdev, f);
up(&hdev->driver_input_lock);
return 0;
}

View File

@ -973,6 +973,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
case 0x1b8: map_key_clear(KEY_VIDEO); break;
case 0x1bc: map_key_clear(KEY_MESSENGER); break;
case 0x1bd: map_key_clear(KEY_INFO); break;
case 0x1cb: map_key_clear(KEY_ASSISTANT); break;
case 0x201: map_key_clear(KEY_NEW); break;
case 0x202: map_key_clear(KEY_OPEN); break;
case 0x203: map_key_clear(KEY_CLOSE); break;

View File

@ -1907,6 +1907,13 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
kfree(data);
return -ENOMEM;
}
data->wq = create_singlethread_workqueue("hidpp-ff-sendqueue");
if (!data->wq) {
kfree(data->effect_ids);
kfree(data);
return -ENOMEM;
}
data->hidpp = hidpp;
data->feature_index = feature_index;
data->version = version;
@ -1951,7 +1958,6 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
/* ignore boost value at response.fap.params[2] */
/* init the hardware command queue */
data->wq = create_singlethread_workqueue("hidpp-ff-sendqueue");
atomic_set(&data->workqueue_size, 0);
/* initialize with zero autocenter to get wheel in usable state */

View File

@ -340,7 +340,7 @@ static int stm32f7_i2c_compute_timing(struct stm32f7_i2c_dev *i2c_dev,
STM32F7_I2C_ANALOG_FILTER_DELAY_MAX : 0);
dnf_delay = setup->dnf * i2cclk;
sdadel_min = setup->fall_time - i2c_specs[setup->speed].hddat_min -
sdadel_min = i2c_specs[setup->speed].hddat_min + setup->fall_time -
af_delay_min - (setup->dnf + 3) * i2cclk;
sdadel_max = i2c_specs[setup->speed].vddat_max - setup->rise_time -

View File

@ -715,16 +715,20 @@ int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
agent->device->name,
agent->port_num);
if (ret)
return ret;
goto free_security;
agent->lsm_nb.notifier_call = ib_mad_agent_security_change;
ret = register_lsm_notifier(&agent->lsm_nb);
if (ret)
return ret;
goto free_security;
agent->smp_allowed = true;
agent->lsm_nb_reg = true;
return 0;
free_security:
security_ib_free_security(agent->security);
return ret;
}
void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent)
@ -732,9 +736,10 @@ void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent)
if (!rdma_protocol_ib(agent->device, agent->port_num))
return;
security_ib_free_security(agent->security);
if (agent->lsm_nb_reg)
unregister_lsm_notifier(&agent->lsm_nb);
security_ib_free_security(agent->security);
}
int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index)

View File

@ -766,8 +766,8 @@ struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
}
EXPORT_SYMBOL(ib_open_qp);
static struct ib_qp *ib_create_xrc_qp(struct ib_qp *qp,
struct ib_qp_init_attr *qp_init_attr)
static struct ib_qp *create_xrc_qp(struct ib_qp *qp,
struct ib_qp_init_attr *qp_init_attr)
{
struct ib_qp *real_qp = qp;
@ -782,10 +782,10 @@ static struct ib_qp *ib_create_xrc_qp(struct ib_qp *qp,
qp = __ib_open_qp(real_qp, qp_init_attr->event_handler,
qp_init_attr->qp_context);
if (!IS_ERR(qp))
__ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp);
else
real_qp->device->destroy_qp(real_qp);
if (IS_ERR(qp))
return qp;
__ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp);
return qp;
}
@ -816,10 +816,8 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
return qp;
ret = ib_create_qp_security(qp, device);
if (ret) {
ib_destroy_qp(qp);
return ERR_PTR(ret);
}
if (ret)
goto err;
qp->device = device;
qp->real_qp = qp;
@ -834,8 +832,15 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
INIT_LIST_HEAD(&qp->sig_mrs);
qp->port = 0;
if (qp_init_attr->qp_type == IB_QPT_XRC_TGT)
return ib_create_xrc_qp(qp, qp_init_attr);
if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) {
struct ib_qp *xrc_qp = create_xrc_qp(qp, qp_init_attr);
if (IS_ERR(xrc_qp)) {
ret = PTR_ERR(xrc_qp);
goto err;
}
return xrc_qp;
}
qp->event_handler = qp_init_attr->event_handler;
qp->qp_context = qp_init_attr->qp_context;
@ -863,11 +868,8 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
if (qp_init_attr->cap.max_rdma_ctxs) {
ret = rdma_rw_init_mrs(qp, qp_init_attr);
if (ret) {
pr_err("failed to init MR pool ret= %d\n", ret);
ib_destroy_qp(qp);
return ERR_PTR(ret);
}
if (ret)
goto err;
}
/*
@ -880,6 +882,11 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
device->attrs.max_sge_rd);
return qp;
err:
ib_destroy_qp(qp);
return ERR_PTR(ret);
}
EXPORT_SYMBOL(ib_create_qp);

View File

@ -2381,8 +2381,19 @@ static void srpt_queue_tm_rsp(struct se_cmd *cmd)
srpt_queue_response(cmd);
}
/*
* This function is called for aborted commands if no response is sent to the
* initiator. Make sure that the credits freed by aborting a command are
* returned to the initiator the next time a response is sent by incrementing
* ch->req_lim_delta.
*/
static void srpt_aborted_task(struct se_cmd *cmd)
{
struct srpt_send_ioctx *ioctx = container_of(cmd,
struct srpt_send_ioctx, cmd);
struct srpt_rdma_ch *ch = ioctx->ch;
atomic_inc(&ch->req_lim_delta);
}
static int srpt_queue_status(struct se_cmd *cmd)

View File

@ -156,6 +156,9 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
return error;
}
pdata->input = input;
platform_set_drvdata(pdev, pdata);
error = devm_request_irq(&pdev->dev, pdata->irq,
imx_snvs_pwrkey_interrupt,
0, pdev->name, pdev);
@ -171,9 +174,6 @@ static int imx_snvs_pwrkey_probe(struct platform_device *pdev)
return error;
}
pdata->input = input;
platform_set_drvdata(pdev, pdata);
device_init_wakeup(&pdev->dev, pdata->wakeup);
return 0;

View File

@ -111,27 +111,29 @@ struct stmfts_data {
bool running;
};
static void stmfts_brightness_set(struct led_classdev *led_cdev,
static int stmfts_brightness_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
struct stmfts_data *sdata = container_of(led_cdev,
struct stmfts_data, led_cdev);
int err;
if (value == sdata->led_status || !sdata->ledvdd)
return;
if (!value) {
regulator_disable(sdata->ledvdd);
} else {
err = regulator_enable(sdata->ledvdd);
if (err)
dev_warn(&sdata->client->dev,
"failed to disable ledvdd regulator: %d\n",
err);
if (value != sdata->led_status && sdata->ledvdd) {
if (!value) {
regulator_disable(sdata->ledvdd);
} else {
err = regulator_enable(sdata->ledvdd);
if (err) {
dev_warn(&sdata->client->dev,
"failed to disable ledvdd regulator: %d\n",
err);
return err;
}
}
sdata->led_status = value;
}
sdata->led_status = value;
return 0;
}
static enum led_brightness stmfts_brightness_get(struct led_classdev *led_cdev)
@ -613,7 +615,7 @@ static int stmfts_enable_led(struct stmfts_data *sdata)
sdata->led_cdev.name = STMFTS_DEV_NAME;
sdata->led_cdev.max_brightness = LED_ON;
sdata->led_cdev.brightness = LED_OFF;
sdata->led_cdev.brightness_set = stmfts_brightness_set;
sdata->led_cdev.brightness_set_blocking = stmfts_brightness_set;
sdata->led_cdev.brightness_get = stmfts_brightness_get;
err = devm_led_classdev_register(&sdata->client->dev, &sdata->led_cdev);

View File

@ -158,10 +158,10 @@ MODULE_PARM_DESC(debug, "Debug level (0-1)");
#define REG_GFIX 0x69 /* Fix gain control */
#define REG_DBLV 0x6b /* PLL control an debugging */
#define DBLV_BYPASS 0x00 /* Bypass PLL */
#define DBLV_X4 0x01 /* clock x4 */
#define DBLV_X6 0x10 /* clock x6 */
#define DBLV_X8 0x11 /* clock x8 */
#define DBLV_BYPASS 0x0a /* Bypass PLL */
#define DBLV_X4 0x4a /* clock x4 */
#define DBLV_X6 0x8a /* clock x6 */
#define DBLV_X8 0xca /* clock x8 */
#define REG_REG76 0x76 /* OV's name */
#define R76_BLKPCOR 0x80 /* Black pixel correction enable */
@ -837,7 +837,7 @@ static int ov7675_set_framerate(struct v4l2_subdev *sd,
if (ret < 0)
return ret;
return ov7670_write(sd, REG_DBLV, DBLV_X4);
return 0;
}
static void ov7670_get_framerate_legacy(struct v4l2_subdev *sd,
@ -1601,11 +1601,7 @@ static int ov7670_probe(struct i2c_client *client,
if (config->clock_speed)
info->clock_speed = config->clock_speed;
/*
* It should be allowed for ov7670 too when it is migrated to
* the new frame rate formula.
*/
if (config->pll_bypass && id->driver_data != MODEL_OV7670)
if (config->pll_bypass)
info->pll_bypass = true;
if (config->pclk_hb_disable)

View File

@ -55,7 +55,9 @@ static SLAVE_ATTR_RO(link_failure_count);
static ssize_t perm_hwaddr_show(struct slave *slave, char *buf)
{
return sprintf(buf, "%pM\n", slave->perm_hwaddr);
return sprintf(buf, "%*phC\n",
slave->dev->addr_len,
slave->perm_hwaddr);
}
static SLAVE_ATTR_RO(perm_hwaddr);

View File

@ -130,6 +130,9 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
(fs->m_ext.vlan_etype || fs->m_ext.data[1]))
return -EINVAL;
if (fs->location != RX_CLS_LOC_ANY && fs->location >= CFP_NUM_RULES)
return -EINVAL;
if (fs->location != RX_CLS_LOC_ANY &&
test_bit(fs->location, priv->cfp.used))
return -EBUSY;
@ -330,6 +333,9 @@ static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port,
int ret;
u32 reg;
if (loc >= CFP_NUM_RULES)
return -EINVAL;
/* Refuse deletion of unused rules, and the default reserved rule */
if (!test_bit(loc, priv->cfp.used) || loc == 0)
return -EINVAL;

View File

@ -6768,8 +6768,15 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp)
skip_uc:
rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
if (rc && vnic->mc_list_count) {
netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
rc);
vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
vnic->mc_list_count = 0;
rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
}
if (rc)
netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n",
netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
rc);
return rc;
@ -8234,6 +8241,7 @@ init_err_cleanup_tc:
bnxt_clear_int_mode(bp);
init_err_pci_clean:
bnxt_free_hwrm_short_cmd_req(bp);
bnxt_free_hwrm_resources(bp);
bnxt_cleanup_pci(bp);

View File

@ -150,7 +150,6 @@ out_buffer_fail:
/* free desc along with its attached buffer */
static void hnae_free_desc(struct hnae_ring *ring)
{
hnae_free_buffers(ring);
dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
ring->desc_num * sizeof(ring->desc[0]),
ring_to_dma_dir(ring));
@ -183,6 +182,9 @@ static int hnae_alloc_desc(struct hnae_ring *ring)
/* fini ring, also free the buffer for the ring */
static void hnae_fini_ring(struct hnae_ring *ring)
{
if (is_rx_ring(ring))
hnae_free_buffers(ring);
hnae_free_desc(ring);
kfree(ring->desc_cb);
ring->desc_cb = NULL;

View File

@ -2743,6 +2743,17 @@ int hns_dsaf_get_regs_count(void)
return DSAF_DUMP_REGS_NUM;
}
static int hns_dsaf_get_port_id(u8 port)
{
if (port < DSAF_SERVICE_NW_NUM)
return port;
if (port >= DSAF_BASE_INNER_PORT_NUM)
return port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
return -EINVAL;
}
static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port)
{
struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 1, 0, 0, 0x80};
@ -2808,23 +2819,33 @@ static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port)
memset(&temp_key, 0x0, sizeof(temp_key));
mask_entry.addr[0] = 0x01;
hns_dsaf_set_mac_key(dsaf_dev, &mask_key, mask_entry.in_vlan_id,
port, mask_entry.addr);
0xf, mask_entry.addr);
tbl_tcam_mcast.tbl_mcast_item_vld = 1;
tbl_tcam_mcast.tbl_mcast_old_en = 0;
if (port < DSAF_SERVICE_NW_NUM) {
mskid = port;
} else if (port >= DSAF_BASE_INNER_PORT_NUM) {
mskid = port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
} else {
/* set MAC port to handle multicast */
mskid = hns_dsaf_get_port_id(port);
if (mskid == -EINVAL) {
dev_err(dsaf_dev->dev, "%s,pnum(%d)error,key(%#x:%#x)\n",
dsaf_dev->ae_dev.name, port,
mask_key.high.val, mask_key.low.val);
return;
}
dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32],
mskid % 32, 1);
/* set pool bit map to handle multicast */
mskid = hns_dsaf_get_port_id(port_num);
if (mskid == -EINVAL) {
dev_err(dsaf_dev->dev,
"%s, pool bit map pnum(%d)error,key(%#x:%#x)\n",
dsaf_dev->ae_dev.name, port_num,
mask_key.high.val, mask_key.low.val);
return;
}
dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32],
mskid % 32, 1);
memcpy(&temp_key, &mask_key, sizeof(mask_key));
hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc,
(struct dsaf_tbl_tcam_data *)(&mask_key),

View File

@ -129,7 +129,7 @@ static void hns_xgmac_lf_rf_control_init(struct mac_driver *mac_drv)
dsaf_set_bit(val, XGMAC_UNIDIR_EN_B, 0);
dsaf_set_bit(val, XGMAC_RF_TX_EN_B, 1);
dsaf_set_field(val, XGMAC_LF_RF_INSERT_M, XGMAC_LF_RF_INSERT_S, 0);
dsaf_write_reg(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, val);
dsaf_write_dev(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, val);
}
/**

View File

@ -29,9 +29,6 @@
#define SERVICE_TIMER_HZ (1 * HZ)
#define NIC_TX_CLEAN_MAX_NUM 256
#define NIC_RX_CLEAN_MAX_NUM 64
#define RCB_IRQ_NOT_INITED 0
#define RCB_IRQ_INITED 1
#define HNS_BUFFER_SIZE_2048 2048
@ -376,8 +373,6 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
wmb(); /* commit all data before submit */
assert(skb->queue_mapping < priv->ae_handle->q_num);
hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num);
ring->stats.tx_pkts++;
ring->stats.tx_bytes += skb->len;
return NETDEV_TX_OK;
@ -1099,6 +1094,9 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
/* issue prefetch for next Tx descriptor */
prefetch(&ring->desc_cb[ring->next_to_clean]);
}
/* update tx ring statistics. */
ring->stats.tx_pkts += pkts;
ring->stats.tx_bytes += bytes;
NETIF_TX_UNLOCK(ring);
@ -2269,7 +2267,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
hns_nic_tx_fini_pro_v2;
netif_napi_add(priv->netdev, &rd->napi,
hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM);
hns_nic_common_poll, NAPI_POLL_WEIGHT);
rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
}
for (i = h->q_num; i < h->q_num * 2; i++) {
@ -2282,7 +2280,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
hns_nic_rx_fini_pro_v2;
netif_napi_add(priv->netdev, &rd->napi,
hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM);
hns_nic_common_poll, NAPI_POLL_WEIGHT);
rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
}

View File

@ -214,6 +214,8 @@
/* enable link status from external LINK_0 and LINK_1 pins */
#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */
#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 /* PHY PM enable */
#define E1000_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */
#define E1000_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */
#define E1000_CTRL_RST 0x04000000 /* Global reset */

View File

@ -7934,9 +7934,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
struct e1000_hw *hw = &adapter->hw;
u32 ctrl, rctl, status;
u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
#ifdef CONFIG_PM
int retval = 0;
#endif
bool wake;
rtnl_lock();
netif_device_detach(netdev);
@ -7949,14 +7947,6 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
igb_clear_interrupt_scheme(adapter);
rtnl_unlock();
#ifdef CONFIG_PM
if (!runtime) {
retval = pci_save_state(pdev);
if (retval)
return retval;
}
#endif
status = rd32(E1000_STATUS);
if (status & E1000_STATUS_LU)
wufc &= ~E1000_WUFC_LNKC;
@ -7973,10 +7963,6 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
}
ctrl = rd32(E1000_CTRL);
/* advertise wake from D3Cold */
#define E1000_CTRL_ADVD3WUC 0x00100000
/* phy power management enable */
#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
ctrl |= E1000_CTRL_ADVD3WUC;
wr32(E1000_CTRL, ctrl);
@ -7990,12 +7976,15 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
wr32(E1000_WUFC, 0);
}
*enable_wake = wufc || adapter->en_mng_pt;
if (!*enable_wake)
wake = wufc || adapter->en_mng_pt;
if (!wake)
igb_power_down_link(adapter);
else
igb_power_up_link(adapter);
if (enable_wake)
*enable_wake = wake;
/* Release control of h/w to f/w. If f/w is AMT enabled, this
* would have already happened in close and is redundant.
*/
@ -8038,22 +8027,7 @@ static void igb_deliver_wake_packet(struct net_device *netdev)
static int __maybe_unused igb_suspend(struct device *dev)
{
int retval;
bool wake;
struct pci_dev *pdev = to_pci_dev(dev);
retval = __igb_shutdown(pdev, &wake, 0);
if (retval)
return retval;
if (wake) {
pci_prepare_to_sleep(pdev);
} else {
pci_wake_from_d3(pdev, false);
pci_set_power_state(pdev, PCI_D3hot);
}
return 0;
return __igb_shutdown(to_pci_dev(dev), NULL, 0);
}
static int __maybe_unused igb_resume(struct device *dev)
@ -8124,22 +8098,7 @@ static int __maybe_unused igb_runtime_idle(struct device *dev)
static int __maybe_unused igb_runtime_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
int retval;
bool wake;
retval = __igb_shutdown(pdev, &wake, 1);
if (retval)
return retval;
if (wake) {
pci_prepare_to_sleep(pdev);
} else {
pci_wake_from_d3(pdev, false);
pci_set_power_state(pdev, PCI_D3hot);
}
return 0;
return __igb_shutdown(to_pci_dev(dev), NULL, 1);
}
static int __maybe_unused igb_runtime_resume(struct device *dev)

View File

@ -79,8 +79,7 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1);
MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
if (vport)
MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
in, nic_vport_context);
@ -108,8 +107,7 @@ static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
MLX5_SET(modify_esw_vport_context_in, in, opcode,
MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
if (vport)
MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
}

View File

@ -201,6 +201,11 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
if (unlikely(rdes0 & RDES0_OWN))
return dma_own;
if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
stats->rx_length_errors++;
return discard_frame;
}
if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) {
if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR)) {
x->rx_desc++;
@ -231,9 +236,10 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
* It doesn't match with the information reported into the databook.
* At any rate, we need to understand if the CSUM hw computation is ok
* and report this info to the upper layers. */
ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR),
!!(rdes0 & RDES0_FRAME_TYPE),
!!(rdes0 & ERDES0_RX_MAC_ADDR));
if (likely(ret == good_frame))
ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR),
!!(rdes0 & RDES0_FRAME_TYPE),
!!(rdes0 & ERDES0_RX_MAC_ADDR));
if (unlikely(rdes0 & RDES0_DRIBBLING))
x->dribbling_bit++;

View File

@ -91,8 +91,6 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
return dma_own;
if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
pr_warn("%s: Oversized frame spanned multiple buffers\n",
__func__);
stats->rx_length_errors++;
return discard_frame;
}

View File

@ -3413,9 +3413,10 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
* ignored
*/
if (frame_len > priv->dma_buf_sz) {
netdev_err(priv->dev,
"len %d larger than size (%d)\n",
frame_len, priv->dma_buf_sz);
if (net_ratelimit())
netdev_err(priv->dev,
"len %d larger than size (%d)\n",
frame_len, priv->dma_buf_sz);
priv->dev->stats.rx_length_errors++;
break;
}
@ -3473,9 +3474,10 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
} else {
skb = rx_q->rx_skbuff[entry];
if (unlikely(!skb)) {
netdev_err(priv->dev,
"%s: Inconsistent Rx chain\n",
priv->dev->name);
if (net_ratelimit())
netdev_err(priv->dev,
"%s: Inconsistent Rx chain\n",
priv->dev->name);
priv->dev->stats.rx_dropped++;
break;
}

View File

@ -1497,9 +1497,10 @@ static int marvell_get_sset_count(struct phy_device *phydev)
static void marvell_get_strings(struct phy_device *phydev, u8 *data)
{
int count = marvell_get_sset_count(phydev);
int i;
for (i = 0; i < ARRAY_SIZE(marvell_hw_stats); i++) {
for (i = 0; i < count; i++) {
memcpy(data + i * ETH_GSTRING_LEN,
marvell_hw_stats[i].string, ETH_GSTRING_LEN);
}
@ -1536,9 +1537,10 @@ static u64 marvell_get_stat(struct phy_device *phydev, int i)
static void marvell_get_stats(struct phy_device *phydev,
struct ethtool_stats *stats, u64 *data)
{
int count = marvell_get_sset_count(phydev);
int i;
for (i = 0; i < ARRAY_SIZE(marvell_hw_stats); i++)
for (i = 0; i < count; i++)
data[i] = marvell_get_stat(phydev, i);
}

View File

@ -746,6 +746,15 @@ bool nvmet_host_allowed(struct nvmet_req *req, struct nvmet_subsys *subsys,
return __nvmet_host_allowed(subsys, hostnqn);
}
static void nvmet_fatal_error_handler(struct work_struct *work)
{
struct nvmet_ctrl *ctrl =
container_of(work, struct nvmet_ctrl, fatal_err_work);
pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid);
ctrl->ops->delete_ctrl(ctrl);
}
u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp)
{
@ -785,6 +794,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
INIT_LIST_HEAD(&ctrl->async_events);
INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
@ -887,21 +897,11 @@ void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
kref_put(&ctrl->ref, nvmet_ctrl_free);
}
static void nvmet_fatal_error_handler(struct work_struct *work)
{
struct nvmet_ctrl *ctrl =
container_of(work, struct nvmet_ctrl, fatal_err_work);
pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid);
ctrl->ops->delete_ctrl(ctrl);
}
void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
{
mutex_lock(&ctrl->lock);
if (!(ctrl->csts & NVME_CSTS_CFS)) {
ctrl->csts |= NVME_CSTS_CFS;
INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
schedule_work(&ctrl->fatal_err_work);
}
mutex_unlock(&ctrl->lock);

View File

@ -17,6 +17,7 @@
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/dmi.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/platform_data/x86/clk-pmc-atom.h>
@ -421,11 +422,27 @@ static int pmc_dbgfs_register(struct pmc_dev *pmc)
}
#endif /* CONFIG_DEBUG_FS */
/*
* Some systems need one or more of their pmc_plt_clks to be
* marked as critical.
*/
static const struct dmi_system_id critclk_systems[] __initconst = {
{
.ident = "MPL CEC1x",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "MPL AG"),
DMI_MATCH(DMI_PRODUCT_NAME, "CEC10 Family"),
},
},
{ /*sentinel*/ }
};
static int pmc_setup_clks(struct pci_dev *pdev, void __iomem *pmc_regmap,
const struct pmc_data *pmc_data)
{
struct platform_device *clkdev;
struct pmc_clk_data *clk_data;
const struct dmi_system_id *d = dmi_first_match(critclk_systems);
clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
if (!clk_data)
@ -433,6 +450,10 @@ static int pmc_setup_clks(struct pci_dev *pdev, void __iomem *pmc_regmap,
clk_data->base = pmc_regmap; /* offset is added by client */
clk_data->clks = pmc_data->clks;
if (d) {
clk_data->critical = true;
pr_info("%s critclks quirk enabled\n", d->ident);
}
clkdev = platform_device_register_data(&pdev->dev, "clk-pmc-atom",
PLATFORM_DEVID_NONE,

View File

@ -480,6 +480,13 @@ static int da9063_rtc_probe(struct platform_device *pdev)
da9063_data_to_tm(data, &rtc->alarm_time, rtc);
rtc->rtc_sync = false;
/*
* TODO: some models have alarms on a minute boundary but still support
* real hardware interrupts. Add this once the core supports it.
*/
if (config->rtc_data_start != RTC_SEC)
rtc->rtc_dev->uie_unsupported = 1;
irq_alarm = platform_get_irq_byname(pdev, "ALARM");
ret = devm_request_threaded_irq(&pdev->dev, irq_alarm, NULL,
da9063_alarm_event,

View File

@ -462,7 +462,7 @@ static int sh_rtc_set_time(struct device *dev, struct rtc_time *tm)
static inline int sh_rtc_read_alarm_value(struct sh_rtc *rtc, int reg_off)
{
unsigned int byte;
int value = 0xff; /* return 0xff for ignored values */
int value = -1; /* return -1 for ignored values */
byte = readb(rtc->regbase + reg_off);
if (byte & AR_ENB) {

View File

@ -248,6 +248,7 @@ static struct {
{"NETAPP", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"LSI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"ENGENIO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"LENOVO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36},
{"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN},
{"SONY", "TSL", NULL, BLIST_FORCELUN}, /* DDS3 & DDS4 autoloaders */

View File

@ -75,6 +75,7 @@ static const struct scsi_dh_blist scsi_dh_blist[] = {
{"NETAPP", "INF-01-00", "rdac", },
{"LSI", "INF-01-00", "rdac", },
{"ENGENIO", "INF-01-00", "rdac", },
{"LENOVO", "DE_Series", "rdac", },
{NULL, NULL, NULL },
};

View File

@ -658,13 +658,22 @@ static void handle_sc_creation(struct vmbus_channel *new_sc)
static void handle_multichannel_storage(struct hv_device *device, int max_chns)
{
struct storvsc_device *stor_device;
int num_cpus = num_online_cpus();
int num_sc;
struct storvsc_cmd_request *request;
struct vstor_packet *vstor_packet;
int ret, t;
num_sc = ((max_chns > num_cpus) ? num_cpus : max_chns);
/*
* If the number of CPUs is artificially restricted, such as
* with maxcpus=1 on the kernel boot line, Hyper-V could offer
* sub-channels >= the number of CPUs. These sub-channels
* should not be created. The primary channel is already created
* and assigned to one CPU, so check against # CPUs - 1.
*/
num_sc = min((int)(num_online_cpus() - 1), max_chns);
if (!num_sc)
return;
stor_device = get_out_stor_device(device);
if (!stor_device)
return;

View File

@ -47,6 +47,8 @@
#define ADT7516_MSB_AIN3 0xA
#define ADT7516_MSB_AIN4 0xB
#define ADT7316_DA_DATA_BASE 0x10
#define ADT7316_DA_10_BIT_LSB_SHIFT 6
#define ADT7316_DA_12_BIT_LSB_SHIFT 4
#define ADT7316_DA_MSB_DATA_REGS 4
#define ADT7316_LSB_DAC_A 0x10
#define ADT7316_MSB_DAC_A 0x11
@ -1086,7 +1088,7 @@ static ssize_t adt7316_store_DAC_internal_Vref(struct device *dev,
ldac_config = chip->ldac_config & (~ADT7516_DAC_IN_VREF_MASK);
if (data & 0x1)
ldac_config |= ADT7516_DAC_AB_IN_VREF;
else if (data & 0x2)
if (data & 0x2)
ldac_config |= ADT7516_DAC_CD_IN_VREF;
} else {
ret = kstrtou8(buf, 16, &data);
@ -1408,7 +1410,7 @@ static IIO_DEVICE_ATTR(ex_analog_temp_offset, 0644,
static ssize_t adt7316_show_DAC(struct adt7316_chip_info *chip,
int channel, char *buf)
{
u16 data;
u16 data = 0;
u8 msb, lsb, offset;
int ret;
@ -1433,7 +1435,11 @@ static ssize_t adt7316_show_DAC(struct adt7316_chip_info *chip,
if (ret)
return -EIO;
data = (msb << offset) + (lsb & ((1 << offset) - 1));
if (chip->dac_bits == 12)
data = lsb >> ADT7316_DA_12_BIT_LSB_SHIFT;
else if (chip->dac_bits == 10)
data = lsb >> ADT7316_DA_10_BIT_LSB_SHIFT;
data |= msb << offset;
return sprintf(buf, "%d\n", data);
}
@ -1441,7 +1447,7 @@ static ssize_t adt7316_show_DAC(struct adt7316_chip_info *chip,
static ssize_t adt7316_store_DAC(struct adt7316_chip_info *chip,
int channel, const char *buf, size_t len)
{
u8 msb, lsb, offset;
u8 msb, lsb, lsb_reg, offset;
u16 data;
int ret;
@ -1459,9 +1465,13 @@ static ssize_t adt7316_store_DAC(struct adt7316_chip_info *chip,
return -EINVAL;
if (chip->dac_bits > 8) {
lsb = data & (1 << offset);
lsb = data & ((1 << offset) - 1);
if (chip->dac_bits == 12)
lsb_reg = lsb << ADT7316_DA_12_BIT_LSB_SHIFT;
else
lsb_reg = lsb << ADT7316_DA_10_BIT_LSB_SHIFT;
ret = chip->bus.write(chip->bus.client,
ADT7316_DA_DATA_BASE + channel * 2, lsb);
ADT7316_DA_DATA_BASE + channel * 2, lsb_reg);
if (ret)
return -EIO;
}

View File

@ -473,11 +473,6 @@ static int usb_unbind_interface(struct device *dev)
pm_runtime_disable(dev);
pm_runtime_set_suspended(dev);
/* Undo any residual pm_autopm_get_interface_* calls */
for (r = atomic_read(&intf->pm_usage_cnt); r > 0; --r)
usb_autopm_put_interface_no_suspend(intf);
atomic_set(&intf->pm_usage_cnt, 0);
if (!error)
usb_autosuspend_device(udev);
@ -1640,7 +1635,6 @@ void usb_autopm_put_interface(struct usb_interface *intf)
int status;
usb_mark_last_busy(udev);
atomic_dec(&intf->pm_usage_cnt);
status = pm_runtime_put_sync(&intf->dev);
dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
__func__, atomic_read(&intf->dev.power.usage_count),
@ -1669,7 +1663,6 @@ void usb_autopm_put_interface_async(struct usb_interface *intf)
int status;
usb_mark_last_busy(udev);
atomic_dec(&intf->pm_usage_cnt);
status = pm_runtime_put(&intf->dev);
dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
__func__, atomic_read(&intf->dev.power.usage_count),
@ -1691,7 +1684,6 @@ void usb_autopm_put_interface_no_suspend(struct usb_interface *intf)
struct usb_device *udev = interface_to_usbdev(intf);
usb_mark_last_busy(udev);
atomic_dec(&intf->pm_usage_cnt);
pm_runtime_put_noidle(&intf->dev);
}
EXPORT_SYMBOL_GPL(usb_autopm_put_interface_no_suspend);
@ -1722,8 +1714,6 @@ int usb_autopm_get_interface(struct usb_interface *intf)
status = pm_runtime_get_sync(&intf->dev);
if (status < 0)
pm_runtime_put_sync(&intf->dev);
else
atomic_inc(&intf->pm_usage_cnt);
dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
__func__, atomic_read(&intf->dev.power.usage_count),
status);
@ -1757,8 +1747,6 @@ int usb_autopm_get_interface_async(struct usb_interface *intf)
status = pm_runtime_get(&intf->dev);
if (status < 0 && status != -EINPROGRESS)
pm_runtime_put_noidle(&intf->dev);
else
atomic_inc(&intf->pm_usage_cnt);
dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
__func__, atomic_read(&intf->dev.power.usage_count),
status);
@ -1782,7 +1770,6 @@ void usb_autopm_get_interface_no_resume(struct usb_interface *intf)
struct usb_device *udev = interface_to_usbdev(intf);
usb_mark_last_busy(udev);
atomic_inc(&intf->pm_usage_cnt);
pm_runtime_get_noresume(&intf->dev);
}
EXPORT_SYMBOL_GPL(usb_autopm_get_interface_no_resume);

View File

@ -818,9 +818,11 @@ int usb_string(struct usb_device *dev, int index, char *buf, size_t size)
if (dev->state == USB_STATE_SUSPENDED)
return -EHOSTUNREACH;
if (size <= 0 || !buf || !index)
if (size <= 0 || !buf)
return -EINVAL;
buf[0] = 0;
if (index <= 0 || index >= 256)
return -EINVAL;
tbuf = kmalloc(256, GFP_NOIO);
if (!tbuf)
return -ENOMEM;

View File

@ -318,6 +318,7 @@ static void yurex_disconnect(struct usb_interface *interface)
usb_deregister_dev(interface, &yurex_class);
/* prevent more I/O from starting */
usb_poison_urb(dev->urb);
mutex_lock(&dev->io_mutex);
dev->interface = NULL;
mutex_unlock(&dev->io_mutex);

View File

@ -775,18 +775,16 @@ static void rts51x_suspend_timer_fn(unsigned long data)
break;
case RTS51X_STAT_IDLE:
case RTS51X_STAT_SS:
usb_stor_dbg(us, "RTS51X_STAT_SS, intf->pm_usage_cnt:%d, power.usage:%d\n",
atomic_read(&us->pusb_intf->pm_usage_cnt),
usb_stor_dbg(us, "RTS51X_STAT_SS, power.usage:%d\n",
atomic_read(&us->pusb_intf->dev.power.usage_count));
if (atomic_read(&us->pusb_intf->pm_usage_cnt) > 0) {
if (atomic_read(&us->pusb_intf->dev.power.usage_count) > 0) {
usb_stor_dbg(us, "Ready to enter SS state\n");
rts51x_set_stat(chip, RTS51X_STAT_SS);
/* ignore mass storage interface's children */
pm_suspend_ignore_children(&us->pusb_intf->dev, true);
usb_autopm_put_interface_async(us->pusb_intf);
usb_stor_dbg(us, "RTS51X_STAT_SS 01, intf->pm_usage_cnt:%d, power.usage:%d\n",
atomic_read(&us->pusb_intf->pm_usage_cnt),
usb_stor_dbg(us, "RTS51X_STAT_SS 01, power.usage:%d\n",
atomic_read(&us->pusb_intf->dev.power.usage_count));
}
break;
@ -819,11 +817,10 @@ static void rts51x_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
int ret;
if (working_scsi(srb)) {
usb_stor_dbg(us, "working scsi, intf->pm_usage_cnt:%d, power.usage:%d\n",
atomic_read(&us->pusb_intf->pm_usage_cnt),
usb_stor_dbg(us, "working scsi, power.usage:%d\n",
atomic_read(&us->pusb_intf->dev.power.usage_count));
if (atomic_read(&us->pusb_intf->pm_usage_cnt) <= 0) {
if (atomic_read(&us->pusb_intf->dev.power.usage_count) <= 0) {
ret = usb_autopm_get_interface(us->pusb_intf);
usb_stor_dbg(us, "working scsi, ret=%d\n", ret);
}

View File

@ -383,16 +383,10 @@ static int get_pipe(struct stub_device *sdev, struct usbip_header *pdu)
}
if (usb_endpoint_xfer_isoc(epd)) {
/* validate packet size and number of packets */
unsigned int maxp, packets, bytes;
maxp = usb_endpoint_maxp(epd);
maxp *= usb_endpoint_maxp_mult(epd);
bytes = pdu->u.cmd_submit.transfer_buffer_length;
packets = DIV_ROUND_UP(bytes, maxp);
/* validate number of packets */
if (pdu->u.cmd_submit.number_of_packets < 0 ||
pdu->u.cmd_submit.number_of_packets > packets) {
pdu->u.cmd_submit.number_of_packets >
USBIP_MAX_ISO_PACKETS) {
dev_err(&sdev->udev->dev,
"CMD_SUBMIT: isoc invalid num packets %d\n",
pdu->u.cmd_submit.number_of_packets);

View File

@ -135,6 +135,13 @@ extern struct device_attribute dev_attr_usbip_debug;
#define USBIP_DIR_OUT 0x00
#define USBIP_DIR_IN 0x01
/*
* Arbitrary limit for the maximum number of isochronous packets in an URB,
* compare for example the uhci_submit_isochronous function in
* drivers/usb/host/uhci-q.c
*/
#define USBIP_MAX_ISO_PACKETS 1024
/**
* struct usbip_header_basic - data pertinent to every request
* @command: the usbip request type

View File

@ -1443,11 +1443,11 @@ static void __init vfio_pci_fill_ids(void)
rc = pci_add_dynid(&vfio_pci_driver, vendor, device,
subvendor, subdevice, class, class_mask, 0);
if (rc)
pr_warn("failed to add dynamic id [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x (%d)\n",
pr_warn("failed to add dynamic id [%04x:%04x[%04x:%04x]] class %#08x/%08x (%d)\n",
vendor, device, subvendor, subdevice,
class, class_mask, rc);
else
pr_info("add [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x\n",
pr_info("add [%04x:%04x[%04x:%04x]] class %#08x/%08x\n",
vendor, device, subvendor, subdevice,
class, class_mask);
}

View File

@ -1018,15 +1018,15 @@ static int ds_probe(struct usb_interface *intf,
/* alternative 3, 1ms interrupt (greatly speeds search), 64 byte bulk */
alt = 3;
err = usb_set_interface(dev->udev,
intf->altsetting[alt].desc.bInterfaceNumber, alt);
intf->cur_altsetting->desc.bInterfaceNumber, alt);
if (err) {
dev_err(&dev->udev->dev, "Failed to set alternative setting %d "
"for %d interface: err=%d.\n", alt,
intf->altsetting[alt].desc.bInterfaceNumber, err);
intf->cur_altsetting->desc.bInterfaceNumber, err);
goto err_out_clear;
}
iface_desc = &intf->altsetting[alt];
iface_desc = intf->cur_altsetting;
if (iface_desc->desc.bNumEndpoints != NUM_EP-1) {
pr_info("Num endpoints=%d. It is not DS9490R.\n",
iface_desc->desc.bNumEndpoints);

View File

@ -167,19 +167,24 @@ static int debugfs_show_options(struct seq_file *m, struct dentry *root)
return 0;
}
static void debugfs_evict_inode(struct inode *inode)
static void debugfs_i_callback(struct rcu_head *head)
{
truncate_inode_pages_final(&inode->i_data);
clear_inode(inode);
struct inode *inode = container_of(head, struct inode, i_rcu);
if (S_ISLNK(inode->i_mode))
kfree(inode->i_link);
free_inode_nonrcu(inode);
}
static void debugfs_destroy_inode(struct inode *inode)
{
call_rcu(&inode->i_rcu, debugfs_i_callback);
}
static const struct super_operations debugfs_super_operations = {
.statfs = simple_statfs,
.remount_fs = debugfs_remount,
.show_options = debugfs_show_options,
.evict_inode = debugfs_evict_inode,
.destroy_inode = debugfs_destroy_inode,
};
static void debugfs_release_dentry(struct dentry *dentry)

View File

@ -730,11 +730,17 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb,
umode_t mode, dev_t dev)
{
struct inode *inode;
struct resv_map *resv_map;
struct resv_map *resv_map = NULL;
resv_map = resv_map_alloc();
if (!resv_map)
return NULL;
/*
* Reserve maps are only needed for inodes that can have associated
* page allocations.
*/
if (S_ISREG(mode) || S_ISLNK(mode)) {
resv_map = resv_map_alloc();
if (!resv_map)
return NULL;
}
inode = new_inode(sb);
if (inode) {
@ -766,8 +772,10 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb,
break;
}
lockdep_annotate_inode_mutex_key(inode);
} else
kref_put(&resv_map->refs, resv_map_release);
} else {
if (resv_map)
kref_put(&resv_map->refs, resv_map_release);
}
return inode;
}

View File

@ -1414,11 +1414,6 @@ void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f)
jffs2_kill_fragtree(&f->fragtree, deleted?c:NULL);
if (f->target) {
kfree(f->target);
f->target = NULL;
}
fds = f->dents;
while(fds) {
fd = fds;

View File

@ -47,7 +47,10 @@ static struct inode *jffs2_alloc_inode(struct super_block *sb)
static void jffs2_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
kmem_cache_free(jffs2_inode_cachep, JFFS2_INODE_INFO(inode));
struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
kfree(f->target);
kmem_cache_free(jffs2_inode_cachep, f);
}
static void jffs2_destroy_inode(struct inode *inode)

View File

@ -35,10 +35,13 @@ struct pmc_clk {
*
* @base: PMC clock register base offset
* @clks: pointer to set of registered clocks, typically 0..5
* @critical: flag to indicate if firmware enabled pmc_plt_clks
* should be marked as critial or not
*/
struct pmc_clk_data {
void __iomem *base;
const struct pmc_clk *clks;
bool critical;
};
#endif /* __PLATFORM_DATA_X86_CLK_PMC_ATOM_H */

View File

@ -200,7 +200,6 @@ usb_find_last_int_out_endpoint(struct usb_host_interface *alt,
* @dev: driver model's view of this device
* @usb_dev: if an interface is bound to the USB major, this will point
* to the sysfs representation for that device.
* @pm_usage_cnt: PM usage counter for this interface
* @reset_ws: Used for scheduling resets from atomic context.
* @resetting_device: USB core reset the device, so use alt setting 0 as
* current; needs bandwidth alloc after reset.
@ -257,7 +256,6 @@ struct usb_interface {
struct device dev; /* interface specific device info */
struct device *usb_dev;
atomic_t pm_usage_cnt; /* usage counter for autosuspend */
struct work_struct reset_ws; /* for resets in atomic context */
};
#define to_usb_interface(d) container_of(d, struct usb_interface, dev)

View File

@ -32,6 +32,33 @@ void cfpkt_destroy(struct cfpkt *pkt);
*/
int cfpkt_extr_head(struct cfpkt *pkt, void *data, u16 len);
static inline u8 cfpkt_extr_head_u8(struct cfpkt *pkt)
{
u8 tmp;
cfpkt_extr_head(pkt, &tmp, 1);
return tmp;
}
static inline u16 cfpkt_extr_head_u16(struct cfpkt *pkt)
{
__le16 tmp;
cfpkt_extr_head(pkt, &tmp, 2);
return le16_to_cpu(tmp);
}
static inline u32 cfpkt_extr_head_u32(struct cfpkt *pkt)
{
__le32 tmp;
cfpkt_extr_head(pkt, &tmp, 4);
return le32_to_cpu(tmp);
}
/*
* Peek header from packet.
* Reads data from packet without changing packet.

View File

@ -104,7 +104,6 @@ enum sctp_verb {
SCTP_CMD_T1_RETRAN, /* Mark for retransmission after T1 timeout */
SCTP_CMD_UPDATE_INITTAG, /* Update peer inittag */
SCTP_CMD_SEND_MSG, /* Send the whole use message */
SCTP_CMD_SEND_NEXT_ASCONF, /* Send the next ASCONF after ACK */
SCTP_CMD_PURGE_ASCONF_QUEUE, /* Purge all asconf queues.*/
SCTP_CMD_SET_ASOC, /* Restore association context */
SCTP_CMD_LAST

View File

@ -51,6 +51,7 @@ obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o
obj-$(CONFIG_TEST_SYSCTL) += test_sysctl.o
obj-$(CONFIG_TEST_HASH) += test_hash.o test_siphash.o
obj-$(CONFIG_TEST_KASAN) += test_kasan.o
CFLAGS_test_kasan.o += -fno-builtin
obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
obj-$(CONFIG_TEST_LIST_SORT) += test_list_sort.o
obj-$(CONFIG_TEST_LKM) += test_module.o

View File

@ -420,7 +420,7 @@ static noinline void __init kasan_stack_oob(void)
static noinline void __init ksize_unpoisons_memory(void)
{
char *ptr;
size_t size = 123, real_size = size;
size_t size = 123, real_size;
pr_info("ksize() unpoisons the whole allocated chunk\n");
ptr = kmalloc(size, GFP_KERNEL);

View File

@ -1376,6 +1376,7 @@ static void scan_block(void *_start, void *_end,
/*
* Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency.
*/
#ifdef CONFIG_SMP
static void scan_large_block(void *start, void *end)
{
void *next;
@ -1387,6 +1388,7 @@ static void scan_large_block(void *start, void *end)
cond_resched();
}
}
#endif
/*
* Scan a memory block corresponding to a kmemleak_object. A condition is
@ -1504,11 +1506,6 @@ static void kmemleak_scan(void)
}
rcu_read_unlock();
/* data/bss scanning */
scan_large_block(_sdata, _edata);
scan_large_block(__bss_start, __bss_stop);
scan_large_block(__start_ro_after_init, __end_ro_after_init);
#ifdef CONFIG_SMP
/* per-cpu sections scanning */
for_each_possible_cpu(i)
@ -2039,6 +2036,17 @@ void __init kmemleak_init(void)
}
local_irq_restore(flags);
/* register the data/bss sections */
create_object((unsigned long)_sdata, _edata - _sdata,
KMEMLEAK_GREY, GFP_ATOMIC);
create_object((unsigned long)__bss_start, __bss_stop - __bss_start,
KMEMLEAK_GREY, GFP_ATOMIC);
/* only register .data..ro_after_init if not within .data */
if (__start_ro_after_init < _sdata || __end_ro_after_init > _edata)
create_object((unsigned long)__start_ro_after_init,
__end_ro_after_init - __start_ro_after_init,
KMEMLEAK_GREY, GFP_ATOMIC);
/*
* This is the point where tracking allocations is safe. Automatic
* scanning is started during the late initcall. Add the early logged

View File

@ -502,6 +502,15 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
sc.nid = 0;
freed += do_shrink_slab(&sc, shrinker, priority);
/*
* Bail out if someone want to register a new shrinker to
* prevent the regsitration from being stalled for long periods
* by parallel ongoing shrinking.
*/
if (rwsem_is_contended(&shrinker_rwsem)) {
freed = freed ? : 1;
break;
}
}
up_read(&shrinker_rwsem);

View File

@ -803,6 +803,8 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
const u8 *mac, const unsigned short vid)
{
struct batadv_bla_claim search_claim, *claim;
struct batadv_bla_claim *claim_removed_entry;
struct hlist_node *claim_removed_node;
ether_addr_copy(search_claim.addr, mac);
search_claim.vid = vid;
@ -813,10 +815,18 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): %pM, vid %d\n", __func__,
mac, batadv_print_vid(vid));
batadv_hash_remove(bat_priv->bla.claim_hash, batadv_compare_claim,
batadv_choose_claim, claim);
batadv_claim_put(claim); /* reference from the hash is gone */
claim_removed_node = batadv_hash_remove(bat_priv->bla.claim_hash,
batadv_compare_claim,
batadv_choose_claim, claim);
if (!claim_removed_node)
goto free_claim;
/* reference from the hash is gone */
claim_removed_entry = hlist_entry(claim_removed_node,
struct batadv_bla_claim, hash_entry);
batadv_claim_put(claim_removed_entry);
free_claim:
/* don't need the reference from hash_find() anymore */
batadv_claim_put(claim);
}

View File

@ -614,14 +614,26 @@ static void batadv_tt_global_free(struct batadv_priv *bat_priv,
struct batadv_tt_global_entry *tt_global,
const char *message)
{
struct batadv_tt_global_entry *tt_removed_entry;
struct hlist_node *tt_removed_node;
batadv_dbg(BATADV_DBG_TT, bat_priv,
"Deleting global tt entry %pM (vid: %d): %s\n",
tt_global->common.addr,
batadv_print_vid(tt_global->common.vid), message);
batadv_hash_remove(bat_priv->tt.global_hash, batadv_compare_tt,
batadv_choose_tt, &tt_global->common);
batadv_tt_global_entry_put(tt_global);
tt_removed_node = batadv_hash_remove(bat_priv->tt.global_hash,
batadv_compare_tt,
batadv_choose_tt,
&tt_global->common);
if (!tt_removed_node)
return;
/* drop reference of remove hash entry */
tt_removed_entry = hlist_entry(tt_removed_node,
struct batadv_tt_global_entry,
common.hash_entry);
batadv_tt_global_entry_put(tt_removed_entry);
}
/**
@ -1313,9 +1325,10 @@ u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr,
unsigned short vid, const char *message,
bool roaming)
{
struct batadv_tt_local_entry *tt_removed_entry;
struct batadv_tt_local_entry *tt_local_entry;
u16 flags, curr_flags = BATADV_NO_FLAGS;
void *tt_entry_exists;
struct hlist_node *tt_removed_node;
tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
if (!tt_local_entry)
@ -1344,15 +1357,18 @@ u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr,
*/
batadv_tt_local_event(bat_priv, tt_local_entry, BATADV_TT_CLIENT_DEL);
tt_entry_exists = batadv_hash_remove(bat_priv->tt.local_hash,
tt_removed_node = batadv_hash_remove(bat_priv->tt.local_hash,
batadv_compare_tt,
batadv_choose_tt,
&tt_local_entry->common);
if (!tt_entry_exists)
if (!tt_removed_node)
goto out;
/* extra call to free the local tt entry */
batadv_tt_local_entry_put(tt_local_entry);
/* drop reference of remove hash entry */
tt_removed_entry = hlist_entry(tt_removed_node,
struct batadv_tt_local_entry,
common.hash_entry);
batadv_tt_local_entry_put(tt_removed_entry);
out:
if (tt_local_entry)

View File

@ -352,15 +352,14 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
u8 cmdrsp;
u8 cmd;
int ret = -1;
u16 tmp16;
u8 len;
u8 param[255];
u8 linkid;
u8 linkid = 0;
struct cfctrl *cfctrl = container_obj(layer);
struct cfctrl_request_info rsp, *req;
cfpkt_extr_head(pkt, &cmdrsp, 1);
cmdrsp = cfpkt_extr_head_u8(pkt);
cmd = cmdrsp & CFCTRL_CMD_MASK;
if (cmd != CFCTRL_CMD_LINK_ERR
&& CFCTRL_RSP_BIT != (CFCTRL_RSP_BIT & cmdrsp)
@ -378,13 +377,12 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
u8 physlinkid;
u8 prio;
u8 tmp;
u32 tmp32;
u8 *cp;
int i;
struct cfctrl_link_param linkparam;
memset(&linkparam, 0, sizeof(linkparam));
cfpkt_extr_head(pkt, &tmp, 1);
tmp = cfpkt_extr_head_u8(pkt);
serv = tmp & CFCTRL_SRV_MASK;
linkparam.linktype = serv;
@ -392,13 +390,13 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
servtype = tmp >> 4;
linkparam.chtype = servtype;
cfpkt_extr_head(pkt, &tmp, 1);
tmp = cfpkt_extr_head_u8(pkt);
physlinkid = tmp & 0x07;
prio = tmp >> 3;
linkparam.priority = prio;
linkparam.phyid = physlinkid;
cfpkt_extr_head(pkt, &endpoint, 1);
endpoint = cfpkt_extr_head_u8(pkt);
linkparam.endpoint = endpoint & 0x03;
switch (serv) {
@ -407,45 +405,43 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
if (CFCTRL_ERR_BIT & cmdrsp)
break;
/* Link ID */
cfpkt_extr_head(pkt, &linkid, 1);
linkid = cfpkt_extr_head_u8(pkt);
break;
case CFCTRL_SRV_VIDEO:
cfpkt_extr_head(pkt, &tmp, 1);
tmp = cfpkt_extr_head_u8(pkt);
linkparam.u.video.connid = tmp;
if (CFCTRL_ERR_BIT & cmdrsp)
break;
/* Link ID */
cfpkt_extr_head(pkt, &linkid, 1);
linkid = cfpkt_extr_head_u8(pkt);
break;
case CFCTRL_SRV_DATAGRAM:
cfpkt_extr_head(pkt, &tmp32, 4);
linkparam.u.datagram.connid =
le32_to_cpu(tmp32);
cfpkt_extr_head_u32(pkt);
if (CFCTRL_ERR_BIT & cmdrsp)
break;
/* Link ID */
cfpkt_extr_head(pkt, &linkid, 1);
linkid = cfpkt_extr_head_u8(pkt);
break;
case CFCTRL_SRV_RFM:
/* Construct a frame, convert
* DatagramConnectionID
* to network format long and copy it out...
*/
cfpkt_extr_head(pkt, &tmp32, 4);
linkparam.u.rfm.connid =
le32_to_cpu(tmp32);
cfpkt_extr_head_u32(pkt);
cp = (u8 *) linkparam.u.rfm.volume;
for (cfpkt_extr_head(pkt, &tmp, 1);
for (tmp = cfpkt_extr_head_u8(pkt);
cfpkt_more(pkt) && tmp != '\0';
cfpkt_extr_head(pkt, &tmp, 1))
tmp = cfpkt_extr_head_u8(pkt))
*cp++ = tmp;
*cp = '\0';
if (CFCTRL_ERR_BIT & cmdrsp)
break;
/* Link ID */
cfpkt_extr_head(pkt, &linkid, 1);
linkid = cfpkt_extr_head_u8(pkt);
break;
case CFCTRL_SRV_UTIL:
@ -454,13 +450,11 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
* to network format long and copy it out...
*/
/* Fifosize KB */
cfpkt_extr_head(pkt, &tmp16, 2);
linkparam.u.utility.fifosize_kb =
le16_to_cpu(tmp16);
cfpkt_extr_head_u16(pkt);
/* Fifosize bufs */
cfpkt_extr_head(pkt, &tmp16, 2);
linkparam.u.utility.fifosize_bufs =
le16_to_cpu(tmp16);
cfpkt_extr_head_u16(pkt);
/* name */
cp = (u8 *) linkparam.u.utility.name;
caif_assert(sizeof(linkparam.u.utility.name)
@ -468,24 +462,24 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
for (i = 0;
i < UTILITY_NAME_LENGTH
&& cfpkt_more(pkt); i++) {
cfpkt_extr_head(pkt, &tmp, 1);
tmp = cfpkt_extr_head_u8(pkt);
*cp++ = tmp;
}
/* Length */
cfpkt_extr_head(pkt, &len, 1);
len = cfpkt_extr_head_u8(pkt);
linkparam.u.utility.paramlen = len;
/* Param Data */
cp = linkparam.u.utility.params;
while (cfpkt_more(pkt) && len--) {
cfpkt_extr_head(pkt, &tmp, 1);
tmp = cfpkt_extr_head_u8(pkt);
*cp++ = tmp;
}
if (CFCTRL_ERR_BIT & cmdrsp)
break;
/* Link ID */
cfpkt_extr_head(pkt, &linkid, 1);
linkid = cfpkt_extr_head_u8(pkt);
/* Length */
cfpkt_extr_head(pkt, &len, 1);
len = cfpkt_extr_head_u8(pkt);
/* Param Data */
cfpkt_extr_head(pkt, &param, len);
break;
@ -522,7 +516,7 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
}
break;
case CFCTRL_CMD_LINK_DESTROY:
cfpkt_extr_head(pkt, &linkid, 1);
linkid = cfpkt_extr_head_u8(pkt);
cfctrl->res.linkdestroy_rsp(cfctrl->serv.layer.up, linkid);
break;
case CFCTRL_CMD_LINK_ERR:

View File

@ -518,6 +518,7 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
to->pkt_type = from->pkt_type;
to->priority = from->priority;
to->protocol = from->protocol;
to->skb_iif = from->skb_iif;
skb_dst_drop(to);
skb_dst_copy(to, from);
to->dev = from->dev;

View File

@ -94,15 +94,21 @@ static struct ip6_flowlabel *fl_lookup(struct net *net, __be32 label)
return fl;
}
static void fl_free_rcu(struct rcu_head *head)
{
struct ip6_flowlabel *fl = container_of(head, struct ip6_flowlabel, rcu);
if (fl->share == IPV6_FL_S_PROCESS)
put_pid(fl->owner.pid);
kfree(fl->opt);
kfree(fl);
}
static void fl_free(struct ip6_flowlabel *fl)
{
if (fl) {
if (fl->share == IPV6_FL_S_PROCESS)
put_pid(fl->owner.pid);
kfree(fl->opt);
kfree_rcu(fl, rcu);
}
if (fl)
call_rcu(&fl->rcu, fl_free_rcu);
}
static void fl_release(struct ip6_flowlabel *fl)
@ -634,9 +640,9 @@ recheck:
if (fl1->share == IPV6_FL_S_EXCL ||
fl1->share != fl->share ||
((fl1->share == IPV6_FL_S_PROCESS) &&
(fl1->owner.pid == fl->owner.pid)) ||
(fl1->owner.pid != fl->owner.pid)) ||
((fl1->share == IPV6_FL_S_USER) &&
uid_eq(fl1->owner.uid, fl->owner.uid)))
!uid_eq(fl1->owner.uid, fl->owner.uid)))
goto release;
err = -ENOMEM;

View File

@ -2641,8 +2641,8 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
void *ph;
DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
unsigned char *addr = NULL;
int tp_len, size_max;
unsigned char *addr;
void *data;
int len_sum = 0;
int status = TP_STATUS_AVAILABLE;
@ -2653,7 +2653,6 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
if (likely(saddr == NULL)) {
dev = packet_cached_dev_get(po);
proto = po->num;
addr = NULL;
} else {
err = -EINVAL;
if (msg->msg_namelen < sizeof(struct sockaddr_ll))
@ -2663,10 +2662,13 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
sll_addr)))
goto out;
proto = saddr->sll_protocol;
addr = saddr->sll_halen ? saddr->sll_addr : NULL;
dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
if (addr && dev && saddr->sll_halen < dev->addr_len)
goto out_put;
if (po->sk.sk_socket->type == SOCK_DGRAM) {
if (dev && msg->msg_namelen < dev->addr_len +
offsetof(struct sockaddr_ll, sll_addr))
goto out_put;
addr = saddr->sll_addr;
}
}
err = -ENXIO;
@ -2838,7 +2840,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
struct sk_buff *skb;
struct net_device *dev;
__be16 proto;
unsigned char *addr;
unsigned char *addr = NULL;
int err, reserve = 0;
struct sockcm_cookie sockc;
struct virtio_net_hdr vnet_hdr = { 0 };
@ -2855,7 +2857,6 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
if (likely(saddr == NULL)) {
dev = packet_cached_dev_get(po);
proto = po->num;
addr = NULL;
} else {
err = -EINVAL;
if (msg->msg_namelen < sizeof(struct sockaddr_ll))
@ -2863,10 +2864,13 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
goto out;
proto = saddr->sll_protocol;
addr = saddr->sll_halen ? saddr->sll_addr : NULL;
dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
if (addr && dev && saddr->sll_halen < dev->addr_len)
goto out_unlock;
if (sock->type == SOCK_DGRAM) {
if (dev && msg->msg_namelen < dev->addr_len +
offsetof(struct sockaddr_ll, sll_addr))
goto out_unlock;
addr = saddr->sll_addr;
}
}
err = -ENXIO;

View File

@ -684,27 +684,27 @@ void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet)
_enter("");
if (list_empty(&rxnet->calls))
return;
if (!list_empty(&rxnet->calls)) {
write_lock(&rxnet->call_lock);
write_lock(&rxnet->call_lock);
while (!list_empty(&rxnet->calls)) {
call = list_entry(rxnet->calls.next,
struct rxrpc_call, link);
_debug("Zapping call %p", call);
while (!list_empty(&rxnet->calls)) {
call = list_entry(rxnet->calls.next, struct rxrpc_call, link);
_debug("Zapping call %p", call);
rxrpc_see_call(call);
list_del_init(&call->link);
rxrpc_see_call(call);
list_del_init(&call->link);
pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n",
call, atomic_read(&call->usage),
rxrpc_call_states[call->state],
call->flags, call->events);
pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n",
call, atomic_read(&call->usage),
rxrpc_call_states[call->state],
call->flags, call->events);
write_unlock(&rxnet->call_lock);
cond_resched();
write_lock(&rxnet->call_lock);
}
write_unlock(&rxnet->call_lock);
cond_resched();
write_lock(&rxnet->call_lock);
}
write_unlock(&rxnet->call_lock);
}

View File

@ -1092,32 +1092,6 @@ static void sctp_cmd_send_msg(struct sctp_association *asoc,
}
/* Sent the next ASCONF packet currently stored in the association.
* This happens after the ASCONF_ACK was succeffully processed.
*/
static void sctp_cmd_send_asconf(struct sctp_association *asoc)
{
struct net *net = sock_net(asoc->base.sk);
/* Send the next asconf chunk from the addip chunk
* queue.
*/
if (!list_empty(&asoc->addip_chunk_list)) {
struct list_head *entry = asoc->addip_chunk_list.next;
struct sctp_chunk *asconf = list_entry(entry,
struct sctp_chunk, list);
list_del_init(entry);
/* Hold the chunk until an ASCONF_ACK is received. */
sctp_chunk_hold(asconf);
if (sctp_primitive_ASCONF(net, asoc, asconf))
sctp_chunk_free(asconf);
else
asoc->addip_last_asconf = asconf;
}
}
/* These three macros allow us to pull the debugging code out of the
* main flow of sctp_do_sm() to keep attention focused on the real
* functionality there.
@ -1763,9 +1737,6 @@ static int sctp_cmd_interpreter(enum sctp_event event_type,
}
sctp_cmd_send_msg(asoc, cmd->obj.msg, gfp);
break;
case SCTP_CMD_SEND_NEXT_ASCONF:
sctp_cmd_send_asconf(asoc);
break;
case SCTP_CMD_PURGE_ASCONF_QUEUE:
sctp_asconf_queue_teardown(asoc);
break;

View File

@ -3756,6 +3756,29 @@ enum sctp_disposition sctp_sf_do_asconf(struct net *net,
return SCTP_DISPOSITION_CONSUME;
}
static enum sctp_disposition sctp_send_next_asconf(
struct net *net,
const struct sctp_endpoint *ep,
struct sctp_association *asoc,
const union sctp_subtype type,
struct sctp_cmd_seq *commands)
{
struct sctp_chunk *asconf;
struct list_head *entry;
if (list_empty(&asoc->addip_chunk_list))
return SCTP_DISPOSITION_CONSUME;
entry = asoc->addip_chunk_list.next;
asconf = list_entry(entry, struct sctp_chunk, list);
list_del_init(entry);
sctp_chunk_hold(asconf);
asoc->addip_last_asconf = asconf;
return sctp_sf_do_prm_asconf(net, ep, asoc, type, asconf, commands);
}
/*
* ADDIP Section 4.3 General rules for address manipulation
* When building TLV parameters for the ASCONF Chunk that will add or
@ -3847,14 +3870,10 @@ enum sctp_disposition sctp_sf_do_asconf_ack(struct net *net,
SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
if (!sctp_process_asconf_ack((struct sctp_association *)asoc,
asconf_ack)) {
/* Successfully processed ASCONF_ACK. We can
* release the next asconf if we have one.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_NEXT_ASCONF,
SCTP_NULL());
return SCTP_DISPOSITION_CONSUME;
}
asconf_ack))
return sctp_send_next_asconf(net, ep,
(struct sctp_association *)asoc,
type, commands);
abort = sctp_make_abort(asoc, asconf_ack,
sizeof(struct sctp_errhdr));

View File

@ -472,16 +472,10 @@ static int may_context_mount_inode_relabel(u32 sid,
return rc;
}
static int selinux_is_sblabel_mnt(struct super_block *sb)
static int selinux_is_genfs_special_handling(struct super_block *sb)
{
struct superblock_security_struct *sbsec = sb->s_security;
return sbsec->behavior == SECURITY_FS_USE_XATTR ||
sbsec->behavior == SECURITY_FS_USE_TRANS ||
sbsec->behavior == SECURITY_FS_USE_TASK ||
sbsec->behavior == SECURITY_FS_USE_NATIVE ||
/* Special handling. Genfs but also in-core setxattr handler */
!strcmp(sb->s_type->name, "sysfs") ||
/* Special handling. Genfs but also in-core setxattr handler */
return !strcmp(sb->s_type->name, "sysfs") ||
!strcmp(sb->s_type->name, "pstore") ||
!strcmp(sb->s_type->name, "debugfs") ||
!strcmp(sb->s_type->name, "tracefs") ||
@ -491,6 +485,34 @@ static int selinux_is_sblabel_mnt(struct super_block *sb)
!strcmp(sb->s_type->name, "cgroup2")));
}
static int selinux_is_sblabel_mnt(struct super_block *sb)
{
struct superblock_security_struct *sbsec = sb->s_security;
/*
* IMPORTANT: Double-check logic in this function when adding a new
* SECURITY_FS_USE_* definition!
*/
BUILD_BUG_ON(SECURITY_FS_USE_MAX != 7);
switch (sbsec->behavior) {
case SECURITY_FS_USE_XATTR:
case SECURITY_FS_USE_TRANS:
case SECURITY_FS_USE_TASK:
case SECURITY_FS_USE_NATIVE:
return 1;
case SECURITY_FS_USE_GENFS:
return selinux_is_genfs_special_handling(sb);
/* Never allow relabeling on context mounts */
case SECURITY_FS_USE_MNTPOINT:
case SECURITY_FS_USE_NONE:
default:
return 0;
}
}
static int sb_finish_set_opts(struct super_block *sb)
{
struct superblock_security_struct *sbsec = sb->s_security;

View File

@ -5294,6 +5294,8 @@ static void alc274_fixup_bind_dacs(struct hda_codec *codec,
return;
spec->gen.preferred_dacs = preferred_pairs;
spec->gen.auto_mute_via_amp = 1;
codec->power_save_node = 0;
}
static void alc_fixup_disable_mic_vref(struct hda_codec *codec,
@ -6745,6 +6747,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
{0x21, 0x02211020}),
SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
{0x21, 0x02211020}),
SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
{0x12, 0x40000000},
{0x14, 0x90170110},
{0x21, 0x02211020}),
SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
{0x14, 0x90170110},
{0x21, 0x02211020}),
@ -6985,6 +6991,9 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
ALC292_STANDARD_PINS,
{0x13, 0x90a60140}),
SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
{0x14, 0x90170110},
{0x21, 0x04211020}),
SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
ALC295_STANDARD_PINS,
{0x17, 0x21014020},

View File

@ -873,7 +873,6 @@ static int stm32_sai_sub_dais_init(struct platform_device *pdev,
if (!sai->cpu_dai_drv)
return -ENOMEM;
sai->cpu_dai_drv->name = dev_name(&pdev->dev);
if (STM_SAI_IS_PLAYBACK(sai)) {
memcpy(sai->cpu_dai_drv, &stm32_sai_playback_dai,
sizeof(stm32_sai_playback_dai));
@ -883,6 +882,7 @@ static int stm32_sai_sub_dais_init(struct platform_device *pdev,
sizeof(stm32_sai_capture_dai));
sai->cpu_dai_drv->capture.stream_name = sai->cpu_dai_drv->name;
}
sai->cpu_dai_drv->name = dev_name(&pdev->dev);
return 0;
}

View File

@ -344,12 +344,16 @@ int line6_read_data(struct usb_line6 *line6, unsigned address, void *data,
{
struct usb_device *usbdev = line6->usbdev;
int ret;
unsigned char len;
unsigned char *len;
unsigned count;
if (address > 0xffff || datalen > 0xff)
return -EINVAL;
len = kmalloc(sizeof(*len), GFP_KERNEL);
if (!len)
return -ENOMEM;
/* query the serial number: */
ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
@ -358,7 +362,7 @@ int line6_read_data(struct usb_line6 *line6, unsigned address, void *data,
if (ret < 0) {
dev_err(line6->ifcdev, "read request failed (error %d)\n", ret);
return ret;
goto exit;
}
/* Wait for data length. We'll get 0xff until length arrives. */
@ -368,28 +372,29 @@ int line6_read_data(struct usb_line6 *line6, unsigned address, void *data,
ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67,
USB_TYPE_VENDOR | USB_RECIP_DEVICE |
USB_DIR_IN,
0x0012, 0x0000, &len, 1,
0x0012, 0x0000, len, 1,
LINE6_TIMEOUT * HZ);
if (ret < 0) {
dev_err(line6->ifcdev,
"receive length failed (error %d)\n", ret);
return ret;
goto exit;
}
if (len != 0xff)
if (*len != 0xff)
break;
}
if (len == 0xff) {
ret = -EIO;
if (*len == 0xff) {
dev_err(line6->ifcdev, "read failed after %d retries\n",
count);
return -EIO;
} else if (len != datalen) {
goto exit;
} else if (*len != datalen) {
/* should be equal or something went wrong */
dev_err(line6->ifcdev,
"length mismatch (expected %d, got %d)\n",
(int)datalen, (int)len);
return -EIO;
(int)datalen, (int)*len);
goto exit;
}
/* receive the result: */
@ -398,12 +403,12 @@ int line6_read_data(struct usb_line6 *line6, unsigned address, void *data,
0x0013, 0x0000, data, datalen,
LINE6_TIMEOUT * HZ);
if (ret < 0) {
if (ret < 0)
dev_err(line6->ifcdev, "read failed (error %d)\n", ret);
return ret;
}
return 0;
exit:
kfree(len);
return ret;
}
EXPORT_SYMBOL_GPL(line6_read_data);
@ -415,12 +420,16 @@ int line6_write_data(struct usb_line6 *line6, unsigned address, void *data,
{
struct usb_device *usbdev = line6->usbdev;
int ret;
unsigned char status;
unsigned char *status;
int count;
if (address > 0xffff || datalen > 0xffff)
return -EINVAL;
status = kmalloc(sizeof(*status), GFP_KERNEL);
if (!status)
return -ENOMEM;
ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
0x0022, address, data, datalen,
@ -429,7 +438,7 @@ int line6_write_data(struct usb_line6 *line6, unsigned address, void *data,
if (ret < 0) {
dev_err(line6->ifcdev,
"write request failed (error %d)\n", ret);
return ret;
goto exit;
}
for (count = 0; count < LINE6_READ_WRITE_MAX_RETRIES; count++) {
@ -440,28 +449,29 @@ int line6_write_data(struct usb_line6 *line6, unsigned address, void *data,
USB_TYPE_VENDOR | USB_RECIP_DEVICE |
USB_DIR_IN,
0x0012, 0x0000,
&status, 1, LINE6_TIMEOUT * HZ);
status, 1, LINE6_TIMEOUT * HZ);
if (ret < 0) {
dev_err(line6->ifcdev,
"receiving status failed (error %d)\n", ret);
return ret;
goto exit;
}
if (status != 0xff)
if (*status != 0xff)
break;
}
if (status == 0xff) {
if (*status == 0xff) {
dev_err(line6->ifcdev, "write failed after %d retries\n",
count);
return -EIO;
} else if (status != 0) {
ret = -EIO;
} else if (*status != 0) {
dev_err(line6->ifcdev, "write failed (error %d)\n", ret);
return -EIO;
ret = -EIO;
}
return 0;
exit:
kfree(status);
return ret;
}
EXPORT_SYMBOL_GPL(line6_write_data);

View File

@ -224,28 +224,32 @@ static void podhd_startup_start_workqueue(unsigned long data)
static int podhd_dev_start(struct usb_line6_podhd *pod)
{
int ret;
u8 init_bytes[8];
u8 *init_bytes;
int i;
struct usb_device *usbdev = pod->line6.usbdev;
init_bytes = kmalloc(8, GFP_KERNEL);
if (!init_bytes)
return -ENOMEM;
ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0),
0x67, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
0x11, 0,
NULL, 0, LINE6_TIMEOUT * HZ);
if (ret < 0) {
dev_err(pod->line6.ifcdev, "read request failed (error %d)\n", ret);
return ret;
goto exit;
}
/* NOTE: looks like some kind of ping message */
ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
0x11, 0x0,
&init_bytes, 3, LINE6_TIMEOUT * HZ);
init_bytes, 3, LINE6_TIMEOUT * HZ);
if (ret < 0) {
dev_err(pod->line6.ifcdev,
"receive length failed (error %d)\n", ret);
return ret;
goto exit;
}
pod->firmware_version =
@ -254,7 +258,7 @@ static int podhd_dev_start(struct usb_line6_podhd *pod)
for (i = 0; i <= 16; i++) {
ret = line6_read_data(&pod->line6, 0xf000 + 0x08 * i, init_bytes, 8);
if (ret < 0)
return ret;
goto exit;
}
ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0),
@ -262,10 +266,9 @@ static int podhd_dev_start(struct usb_line6_podhd *pod)
USB_TYPE_STANDARD | USB_RECIP_DEVICE | USB_DIR_OUT,
1, 0,
NULL, 0, LINE6_TIMEOUT * HZ);
if (ret < 0)
return ret;
return 0;
exit:
kfree(init_bytes);
return ret;
}
static void podhd_startup_workqueue(struct work_struct *work)

View File

@ -365,15 +365,20 @@ static bool toneport_has_source_select(struct usb_line6_toneport *toneport)
/*
Setup Toneport device.
*/
static void toneport_setup(struct usb_line6_toneport *toneport)
static int toneport_setup(struct usb_line6_toneport *toneport)
{
int ticks;
int *ticks;
struct usb_line6 *line6 = &toneport->line6;
struct usb_device *usbdev = line6->usbdev;
ticks = kmalloc(sizeof(*ticks), GFP_KERNEL);
if (!ticks)
return -ENOMEM;
/* sync time on device with host: */
ticks = (int)get_seconds();
line6_write_data(line6, 0x80c6, &ticks, 4);
*ticks = (int)get_seconds();
line6_write_data(line6, 0x80c6, ticks, 4);
kfree(ticks);
/* enable device: */
toneport_send_cmd(usbdev, 0x0301, 0x0000);
@ -388,6 +393,7 @@ static void toneport_setup(struct usb_line6_toneport *toneport)
toneport_update_led(toneport);
mod_timer(&toneport->timer, jiffies + TONEPORT_PCM_DELAY * HZ);
return 0;
}
/*
@ -451,7 +457,9 @@ static int toneport_init(struct usb_line6 *line6,
return err;
}
toneport_setup(toneport);
err = toneport_setup(toneport);
if (err)
return err;
/* register audio system: */
return snd_card_register(line6->card);
@ -463,7 +471,11 @@ static int toneport_init(struct usb_line6 *line6,
*/
static int toneport_reset_resume(struct usb_interface *interface)
{
toneport_setup(usb_get_intfdata(interface));
int err;
err = toneport_setup(usb_get_intfdata(interface));
if (err)
return err;
return line6_resume(interface);
}
#endif