mirror of
https://github.com/rd-stuffs/msm-4.14.git
synced 2025-02-20 11:45:48 +08:00
This is the 4.14.129 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAl0Nx7MACgkQONu9yGCS aT6rcxAA0os++XT8OzzsnJamD3O+oqDxCm3Pd3mNj8uck2XCHtzSEvq0z8a3/Xo2 RXWBNDbYRHDGCNwK+2OxSbmyBO6ZqH7lp+KkipGTwWmc6spmm0gnDMSQ1312g6v5 uVL8Tyirc9xEvkkHHNbxxuQ5e8RoxU0P+dBktDg5ipUCNLdO397/ldrg2cpxSsT4 SeUL6kQmXyH9X/btZVq+xUdIqgn5JDuzrDbGSmFa7SV/D+IVIvxiR5tt7ZLuWVVl n9AqjTERAg0bLcOq7j5/eyephR0ooumd1J8z6PgMnmj66UTYtgkdDBVBYEFRUlEB 9tmmX5KWlpdbVfhvTRrbKo0kfRrVCy1h3CDf0hiJpYJesf6f+CBOorUORdvXKHEp rQ2o6nshVWGsGv0fD3j4FzURZxbWFDOvveGApRj2p5626gLnRwz9kBvsy3FK/Gb9 d9b2fZRDf1Iz6QYKTybexhPfDxA2Gy3MvZ1Yj7EXIrf4rUrRSf+WSjSE69g8i9Q0 /1lWVQ9aW1UQ9Ya/r6xS5q2VNzWUDCpYDRDqyiiND0E1MrgsvL56YsmWbzQOI/hV tm7j5NEqCPHVY0UQqjkAUspQbkLKbkVoj4TiQKgppkp1a2uZjo48AGbIqq0tJ/h8 aHkb/PgRyxLpD/+NFhLbwJGN7kATuMlegduhwJkxfv0kCmyEwrw= =flvv -----END PGP SIGNATURE----- Merge 4.14.129 into android-4.14 Changes in 4.14.129 perf machine: Guard against NULL in machine__exit() ax25: fix inconsistent lock state in ax25_destroy_timer be2net: Fix number of Rx queues used for flow hashing ipv6: flowlabel: fl6_sock_lookup() must use atomic_inc_not_zero lapb: fixed leak of control-blocks. neigh: fix use-after-free read in pneigh_get_next net: openvswitch: do not free vport if register_netdevice() is failed. sctp: Free cookie before we memdup a new one sunhv: Fix device naming inconsistency between sunhv_console and sunhv_reg Staging: vc04_services: Fix a couple error codes perf/x86/intel/ds: Fix EVENT vs. UEVENT PEBS constraints netfilter: nf_queue: fix reinject verdict handling ipvs: Fix use-after-free in ip_vs_in selftests: netfilter: missing error check when setting up veth interface clk: ti: clkctrl: Fix clkdm_clk handling powerpc/powernv: Return for invalid IMC domain mISDN: make sure device name is NUL terminated x86/CPU/AMD: Don't force the CPB cap when running under a hypervisor perf/ring_buffer: Fix exposing a temporarily decreased data_head perf/ring_buffer: Add ordering to rb->nest increment perf/ring-buffer: Always use {READ,WRITE}_ONCE() for rb->user_page data gpio: fix gpio-adp5588 build errors net: tulip: de4x5: Drop redundant MODULE_DEVICE_TABLE() net: aquantia: fix LRO with FCS error i2c: dev: fix potential memory leak in i2cdev_ioctl_rdwr ALSA: hda - Force polling mode on CNL for fixing codec communication configfs: Fix use-after-free when accessing sd->s_dentry perf data: Fix 'strncat may truncate' build failure with recent gcc perf record: Fix s390 missing module symbol and warning for non-root users ia64: fix build errors by exporting paddr_to_nid() KVM: PPC: Book3S: Use new mutex to synchronize access to rtas token list KVM: PPC: Book3S HV: Don't take kvm->lock around kvm_for_each_vcpu net: sh_eth: fix mdio access in sh_eth_close() for R-Car Gen2 and RZ/A1 SoCs net: phy: dp83867: Set up RGMII TX delay scsi: libcxgbi: add a check for NULL pointer in cxgbi_check_route() scsi: smartpqi: properly set both the DMA mask and the coherent DMA mask scsi: scsi_dh_alua: Fix possible null-ptr-deref scsi: libsas: delete sas port if expander discover failed mlxsw: spectrum: Prevent force of 56G HID: wacom: Don't set tool type until we're in range HID: wacom: Don't report anything prior to the tool entering range HID: wacom: Send BTN_TOUCH in response to INTUOSP2_BT eraser contact coredump: fix race condition between collapse_huge_page() and core dumping infiniband: fix race condition between infiniband mlx4, mlx5 driver and core dumping Abort file_remove_privs() for non-reg. files Linux 4.14.129 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
93c338c2e7
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 14
|
||||
SUBLEVEL = 128
|
||||
SUBLEVEL = 129
|
||||
EXTRAVERSION =
|
||||
NAME = Petit Gorille
|
||||
|
||||
|
@ -49,6 +49,7 @@ paddr_to_nid(unsigned long paddr)
|
||||
|
||||
return (i < num_node_memblks) ? node_memblk[i].nid : (num_node_memblks ? -1 : 0);
|
||||
}
|
||||
EXPORT_SYMBOL(paddr_to_nid);
|
||||
|
||||
#if defined(CONFIG_SPARSEMEM) && defined(CONFIG_NUMA)
|
||||
/*
|
||||
|
@ -296,6 +296,7 @@ struct kvm_arch {
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
struct list_head spapr_tce_tables;
|
||||
struct list_head rtas_tokens;
|
||||
struct mutex rtas_token_lock;
|
||||
DECLARE_BITMAP(enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
|
||||
#endif
|
||||
#ifdef CONFIG_KVM_MPIC
|
||||
|
@ -836,6 +836,7 @@ int kvmppc_core_init_vm(struct kvm *kvm)
|
||||
#ifdef CONFIG_PPC64
|
||||
INIT_LIST_HEAD_RCU(&kvm->arch.spapr_tce_tables);
|
||||
INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
|
||||
mutex_init(&kvm->arch.rtas_token_lock);
|
||||
#endif
|
||||
|
||||
return kvm->arch.kvm_ops->init_vm(kvm);
|
||||
|
@ -392,12 +392,7 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
|
||||
|
||||
static struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
|
||||
{
|
||||
struct kvm_vcpu *ret;
|
||||
|
||||
mutex_lock(&kvm->lock);
|
||||
ret = kvm_get_vcpu_by_id(kvm, id);
|
||||
mutex_unlock(&kvm->lock);
|
||||
return ret;
|
||||
return kvm_get_vcpu_by_id(kvm, id);
|
||||
}
|
||||
|
||||
static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
|
||||
@ -1258,7 +1253,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
|
||||
struct kvmppc_vcore *vc = vcpu->arch.vcore;
|
||||
u64 mask;
|
||||
|
||||
mutex_lock(&kvm->lock);
|
||||
spin_lock(&vc->lock);
|
||||
/*
|
||||
* If ILE (interrupt little-endian) has changed, update the
|
||||
@ -1298,7 +1292,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
|
||||
mask &= 0xFFFFFFFF;
|
||||
vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
|
||||
spin_unlock(&vc->lock);
|
||||
mutex_unlock(&kvm->lock);
|
||||
}
|
||||
|
||||
static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
|
||||
|
@ -146,7 +146,7 @@ static int rtas_token_undefine(struct kvm *kvm, char *name)
|
||||
{
|
||||
struct rtas_token_definition *d, *tmp;
|
||||
|
||||
lockdep_assert_held(&kvm->lock);
|
||||
lockdep_assert_held(&kvm->arch.rtas_token_lock);
|
||||
|
||||
list_for_each_entry_safe(d, tmp, &kvm->arch.rtas_tokens, list) {
|
||||
if (rtas_name_matches(d->handler->name, name)) {
|
||||
@ -167,7 +167,7 @@ static int rtas_token_define(struct kvm *kvm, char *name, u64 token)
|
||||
bool found;
|
||||
int i;
|
||||
|
||||
lockdep_assert_held(&kvm->lock);
|
||||
lockdep_assert_held(&kvm->arch.rtas_token_lock);
|
||||
|
||||
list_for_each_entry(d, &kvm->arch.rtas_tokens, list) {
|
||||
if (d->token == token)
|
||||
@ -206,14 +206,14 @@ int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp)
|
||||
if (copy_from_user(&args, argp, sizeof(args)))
|
||||
return -EFAULT;
|
||||
|
||||
mutex_lock(&kvm->lock);
|
||||
mutex_lock(&kvm->arch.rtas_token_lock);
|
||||
|
||||
if (args.token)
|
||||
rc = rtas_token_define(kvm, args.name, args.token);
|
||||
else
|
||||
rc = rtas_token_undefine(kvm, args.name);
|
||||
|
||||
mutex_unlock(&kvm->lock);
|
||||
mutex_unlock(&kvm->arch.rtas_token_lock);
|
||||
|
||||
return rc;
|
||||
}
|
||||
@ -245,7 +245,7 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
|
||||
orig_rets = args.rets;
|
||||
args.rets = &args.args[be32_to_cpu(args.nargs)];
|
||||
|
||||
mutex_lock(&vcpu->kvm->lock);
|
||||
mutex_lock(&vcpu->kvm->arch.rtas_token_lock);
|
||||
|
||||
rc = -ENOENT;
|
||||
list_for_each_entry(d, &vcpu->kvm->arch.rtas_tokens, list) {
|
||||
@ -256,7 +256,7 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&vcpu->kvm->lock);
|
||||
mutex_unlock(&vcpu->kvm->arch.rtas_token_lock);
|
||||
|
||||
if (rc == 0) {
|
||||
args.rets = orig_rets;
|
||||
@ -282,8 +282,6 @@ void kvmppc_rtas_tokens_free(struct kvm *kvm)
|
||||
{
|
||||
struct rtas_token_definition *d, *tmp;
|
||||
|
||||
lockdep_assert_held(&kvm->lock);
|
||||
|
||||
list_for_each_entry_safe(d, tmp, &kvm->arch.rtas_tokens, list) {
|
||||
list_del(&d->list);
|
||||
kfree(d);
|
||||
|
@ -87,6 +87,10 @@ static int imc_pmu_create(struct device_node *parent, int pmu_index, int domain)
|
||||
struct imc_pmu *pmu_ptr;
|
||||
u32 offset;
|
||||
|
||||
/* Return for unknown domain */
|
||||
if (domain < 0)
|
||||
return -EINVAL;
|
||||
|
||||
/* memory for pmu */
|
||||
pmu_ptr = kzalloc(sizeof(struct imc_pmu), GFP_KERNEL);
|
||||
if (!pmu_ptr)
|
||||
|
@ -681,7 +681,7 @@ struct event_constraint intel_core2_pebs_event_constraints[] = {
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */
|
||||
INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
|
||||
/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
|
||||
INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x01),
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x01),
|
||||
EVENT_CONSTRAINT_END
|
||||
};
|
||||
|
||||
@ -690,7 +690,7 @@ struct event_constraint intel_atom_pebs_event_constraints[] = {
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */
|
||||
INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
|
||||
/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
|
||||
INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x01),
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x01),
|
||||
/* Allow all events as PEBS with no flags */
|
||||
INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
|
||||
EVENT_CONSTRAINT_END
|
||||
@ -698,7 +698,7 @@ struct event_constraint intel_atom_pebs_event_constraints[] = {
|
||||
|
||||
struct event_constraint intel_slm_pebs_event_constraints[] = {
|
||||
/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
|
||||
INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x1),
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x1),
|
||||
/* Allow all events as PEBS with no flags */
|
||||
INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
|
||||
EVENT_CONSTRAINT_END
|
||||
@ -729,7 +729,7 @@ struct event_constraint intel_nehalem_pebs_event_constraints[] = {
|
||||
INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
|
||||
INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
|
||||
/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
|
||||
INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f),
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
|
||||
EVENT_CONSTRAINT_END
|
||||
};
|
||||
|
||||
@ -746,7 +746,7 @@ struct event_constraint intel_westmere_pebs_event_constraints[] = {
|
||||
INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
|
||||
INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
|
||||
/* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
|
||||
INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f),
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
|
||||
EVENT_CONSTRAINT_END
|
||||
};
|
||||
|
||||
@ -755,7 +755,7 @@ struct event_constraint intel_snb_pebs_event_constraints[] = {
|
||||
INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
|
||||
INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */
|
||||
/* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
|
||||
INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
|
||||
INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
|
||||
INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
|
||||
INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
|
||||
@ -770,9 +770,9 @@ struct event_constraint intel_ivb_pebs_event_constraints[] = {
|
||||
INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
|
||||
INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */
|
||||
/* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
|
||||
INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
|
||||
/* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
|
||||
INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2),
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
|
||||
INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
|
||||
INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
|
||||
INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
|
||||
@ -786,9 +786,9 @@ struct event_constraint intel_hsw_pebs_event_constraints[] = {
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
|
||||
INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */
|
||||
/* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
|
||||
INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
|
||||
/* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
|
||||
INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2),
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */
|
||||
@ -809,9 +809,9 @@ struct event_constraint intel_bdw_pebs_event_constraints[] = {
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
|
||||
INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */
|
||||
/* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
|
||||
INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
|
||||
/* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
|
||||
INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2),
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */
|
||||
@ -832,9 +832,9 @@ struct event_constraint intel_bdw_pebs_event_constraints[] = {
|
||||
struct event_constraint intel_skl_pebs_event_constraints[] = {
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */
|
||||
/* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
|
||||
INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2),
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
|
||||
/* INST_RETIRED.TOTAL_CYCLES_PS (inv=1, cmask=16) (cycles:p). */
|
||||
INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f),
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
|
||||
INTEL_PLD_CONSTRAINT(0x1cd, 0xf), /* MEM_TRANS_RETIRED.* */
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */
|
||||
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */
|
||||
|
@ -792,8 +792,11 @@ static void init_amd_zn(struct cpuinfo_x86 *c)
|
||||
{
|
||||
set_cpu_cap(c, X86_FEATURE_ZEN);
|
||||
|
||||
/* Fix erratum 1076: CPB feature bit not being set in CPUID. */
|
||||
if (!cpu_has(c, X86_FEATURE_CPB))
|
||||
/*
|
||||
* Fix erratum 1076: CPB feature bit not being set in CPUID.
|
||||
* Always set it, except when running under a hypervisor.
|
||||
*/
|
||||
if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_CPB))
|
||||
set_cpu_cap(c, X86_FEATURE_CPB);
|
||||
}
|
||||
|
||||
|
@ -124,9 +124,6 @@ static int _omap4_clkctrl_clk_enable(struct clk_hw *hw)
|
||||
int ret;
|
||||
union omap4_timeout timeout = { 0 };
|
||||
|
||||
if (!clk->enable_bit)
|
||||
return 0;
|
||||
|
||||
if (clk->clkdm) {
|
||||
ret = ti_clk_ll_ops->clkdm_clk_enable(clk->clkdm, hw->clk);
|
||||
if (ret) {
|
||||
@ -138,6 +135,9 @@ static int _omap4_clkctrl_clk_enable(struct clk_hw *hw)
|
||||
}
|
||||
}
|
||||
|
||||
if (!clk->enable_bit)
|
||||
return 0;
|
||||
|
||||
val = ti_clk_ll_ops->clk_readl(&clk->enable_reg);
|
||||
|
||||
val &= ~OMAP4_MODULEMODE_MASK;
|
||||
@ -166,7 +166,7 @@ static void _omap4_clkctrl_clk_disable(struct clk_hw *hw)
|
||||
union omap4_timeout timeout = { 0 };
|
||||
|
||||
if (!clk->enable_bit)
|
||||
return;
|
||||
goto exit;
|
||||
|
||||
val = ti_clk_ll_ops->clk_readl(&clk->enable_reg);
|
||||
|
||||
|
@ -712,6 +712,7 @@ config GPIO_ADP5588
|
||||
config GPIO_ADP5588_IRQ
|
||||
bool "Interrupt controller support for ADP5588"
|
||||
depends on GPIO_ADP5588=y
|
||||
select GPIOLIB_IRQCHIP
|
||||
help
|
||||
Say yes here to enable the adp5588 to be used as an interrupt
|
||||
controller. It requires the driver to be built in the kernel.
|
||||
|
@ -1225,13 +1225,13 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
|
||||
/* Add back in missing bits of ID for non-USI pens */
|
||||
wacom->id[0] |= (wacom->serial[0] >> 32) & 0xFFFFF;
|
||||
}
|
||||
wacom->tool[0] = wacom_intuos_get_tool_type(wacom_intuos_id_mangle(wacom->id[0]));
|
||||
|
||||
for (i = 0; i < pen_frames; i++) {
|
||||
unsigned char *frame = &data[i*pen_frame_len + 1];
|
||||
bool valid = frame[0] & 0x80;
|
||||
bool prox = frame[0] & 0x40;
|
||||
bool range = frame[0] & 0x20;
|
||||
bool invert = frame[0] & 0x10;
|
||||
|
||||
if (!valid)
|
||||
continue;
|
||||
@ -1240,8 +1240,13 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
|
||||
wacom->shared->stylus_in_proximity = false;
|
||||
wacom_exit_report(wacom);
|
||||
input_sync(pen_input);
|
||||
|
||||
wacom->tool[0] = 0;
|
||||
wacom->id[0] = 0;
|
||||
wacom->serial[0] = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
if (range) {
|
||||
/* Fix rotation alignment: userspace expects zero at left */
|
||||
int16_t rotation = (int16_t)get_unaligned_le16(&frame[9]);
|
||||
@ -1249,6 +1254,16 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
|
||||
if (rotation > 899)
|
||||
rotation -= 1800;
|
||||
|
||||
if (!wacom->tool[0]) { /* first in range */
|
||||
/* Going into range select tool */
|
||||
if (invert)
|
||||
wacom->tool[0] = BTN_TOOL_RUBBER;
|
||||
else if (wacom->id[0])
|
||||
wacom->tool[0] = wacom_intuos_get_tool_type(wacom->id[0]);
|
||||
else
|
||||
wacom->tool[0] = BTN_TOOL_PEN;
|
||||
}
|
||||
|
||||
input_report_abs(pen_input, ABS_X, get_unaligned_le16(&frame[1]));
|
||||
input_report_abs(pen_input, ABS_Y, get_unaligned_le16(&frame[3]));
|
||||
input_report_abs(pen_input, ABS_TILT_X, (char)frame[7]);
|
||||
@ -1256,17 +1271,19 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
|
||||
input_report_abs(pen_input, ABS_Z, rotation);
|
||||
input_report_abs(pen_input, ABS_WHEEL, get_unaligned_le16(&frame[11]));
|
||||
}
|
||||
input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5]));
|
||||
input_report_abs(pen_input, ABS_DISTANCE, range ? frame[13] : wacom->features.distance_max);
|
||||
if (wacom->tool[0]) {
|
||||
input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5]));
|
||||
input_report_abs(pen_input, ABS_DISTANCE, range ? frame[13] : wacom->features.distance_max);
|
||||
|
||||
input_report_key(pen_input, BTN_TOUCH, frame[0] & 0x01);
|
||||
input_report_key(pen_input, BTN_STYLUS, frame[0] & 0x02);
|
||||
input_report_key(pen_input, BTN_STYLUS2, frame[0] & 0x04);
|
||||
input_report_key(pen_input, BTN_TOUCH, frame[0] & 0x09);
|
||||
input_report_key(pen_input, BTN_STYLUS, frame[0] & 0x02);
|
||||
input_report_key(pen_input, BTN_STYLUS2, frame[0] & 0x04);
|
||||
|
||||
input_report_key(pen_input, wacom->tool[0], prox);
|
||||
input_event(pen_input, EV_MSC, MSC_SERIAL, wacom->serial[0]);
|
||||
input_report_abs(pen_input, ABS_MISC,
|
||||
wacom_intuos_id_mangle(wacom->id[0])); /* report tool id */
|
||||
input_report_key(pen_input, wacom->tool[0], prox);
|
||||
input_event(pen_input, EV_MSC, MSC_SERIAL, wacom->serial[0]);
|
||||
input_report_abs(pen_input, ABS_MISC,
|
||||
wacom_intuos_id_mangle(wacom->id[0])); /* report tool id */
|
||||
}
|
||||
|
||||
wacom->shared->stylus_in_proximity = prox;
|
||||
|
||||
|
@ -297,6 +297,7 @@ static noinline int i2cdev_ioctl_rdwr(struct i2c_client *client,
|
||||
rdwr_pa[i].buf[0] < 1 ||
|
||||
rdwr_pa[i].len < rdwr_pa[i].buf[0] +
|
||||
I2C_SMBUS_BLOCK_MAX) {
|
||||
i++;
|
||||
res = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
@ -1197,6 +1197,8 @@ static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
|
||||
* mlx4_ib_vma_close().
|
||||
*/
|
||||
down_write(&owning_mm->mmap_sem);
|
||||
if (!mmget_still_valid(owning_mm))
|
||||
goto skip_mm;
|
||||
for (i = 0; i < HW_BAR_COUNT; i++) {
|
||||
vma = context->hw_bar_info[i].vma;
|
||||
if (!vma)
|
||||
@ -1215,7 +1217,7 @@ static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
|
||||
/* context going to be destroyed, should not access ops any more */
|
||||
context->hw_bar_info[i].vma->vm_ops = NULL;
|
||||
}
|
||||
|
||||
skip_mm:
|
||||
up_write(&owning_mm->mmap_sem);
|
||||
mmput(owning_mm);
|
||||
put_task_struct(owning_process);
|
||||
|
@ -1646,6 +1646,8 @@ static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
|
||||
* mlx5_ib_vma_close.
|
||||
*/
|
||||
down_write(&owning_mm->mmap_sem);
|
||||
if (!mmget_still_valid(owning_mm))
|
||||
goto skip_mm;
|
||||
mutex_lock(&context->vma_private_list_mutex);
|
||||
list_for_each_entry_safe(vma_private, n, &context->vma_private_list,
|
||||
list) {
|
||||
@ -1662,6 +1664,7 @@ static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
|
||||
kfree(vma_private);
|
||||
}
|
||||
mutex_unlock(&context->vma_private_list_mutex);
|
||||
skip_mm:
|
||||
up_write(&owning_mm->mmap_sem);
|
||||
mmput(owning_mm);
|
||||
put_task_struct(owning_process);
|
||||
|
@ -394,7 +394,7 @@ data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
|
||||
memcpy(di.channelmap, dev->channelmap,
|
||||
sizeof(di.channelmap));
|
||||
di.nrbchan = dev->nrbchan;
|
||||
strcpy(di.name, dev_name(&dev->dev));
|
||||
strscpy(di.name, dev_name(&dev->dev), sizeof(di.name));
|
||||
if (copy_to_user((void __user *)arg, &di, sizeof(di)))
|
||||
err = -EFAULT;
|
||||
} else
|
||||
@ -678,7 +678,7 @@ base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
|
||||
memcpy(di.channelmap, dev->channelmap,
|
||||
sizeof(di.channelmap));
|
||||
di.nrbchan = dev->nrbchan;
|
||||
strcpy(di.name, dev_name(&dev->dev));
|
||||
strscpy(di.name, dev_name(&dev->dev), sizeof(di.name));
|
||||
if (copy_to_user((void __user *)arg, &di, sizeof(di)))
|
||||
err = -EFAULT;
|
||||
} else
|
||||
@ -692,6 +692,7 @@ base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
|
||||
err = -EFAULT;
|
||||
break;
|
||||
}
|
||||
dn.name[sizeof(dn.name) - 1] = '\0';
|
||||
dev = get_mdevice(dn.id);
|
||||
if (dev)
|
||||
err = device_rename(&dev->dev, dn.name);
|
||||
|
@ -683,38 +683,41 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
|
||||
if (is_err || rxd_wb->type & 0x1000U) {
|
||||
/* status error or DMA error */
|
||||
buff->is_error = 1U;
|
||||
} else {
|
||||
if (self->aq_nic_cfg->is_rss) {
|
||||
/* last 4 byte */
|
||||
u16 rss_type = rxd_wb->type & 0xFU;
|
||||
}
|
||||
if (self->aq_nic_cfg->is_rss) {
|
||||
/* last 4 byte */
|
||||
u16 rss_type = rxd_wb->type & 0xFU;
|
||||
|
||||
if (rss_type && rss_type < 0x8U) {
|
||||
buff->is_hash_l4 = (rss_type == 0x4 ||
|
||||
rss_type == 0x5);
|
||||
buff->rss_hash = rxd_wb->rss_hash;
|
||||
}
|
||||
if (rss_type && rss_type < 0x8U) {
|
||||
buff->is_hash_l4 = (rss_type == 0x4 ||
|
||||
rss_type == 0x5);
|
||||
buff->rss_hash = rxd_wb->rss_hash;
|
||||
}
|
||||
}
|
||||
|
||||
if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) {
|
||||
buff->len = rxd_wb->pkt_len %
|
||||
AQ_CFG_RX_FRAME_MAX;
|
||||
buff->len = buff->len ?
|
||||
buff->len : AQ_CFG_RX_FRAME_MAX;
|
||||
buff->next = 0U;
|
||||
buff->is_eop = 1U;
|
||||
if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) {
|
||||
buff->len = rxd_wb->pkt_len %
|
||||
AQ_CFG_RX_FRAME_MAX;
|
||||
buff->len = buff->len ?
|
||||
buff->len : AQ_CFG_RX_FRAME_MAX;
|
||||
buff->next = 0U;
|
||||
buff->is_eop = 1U;
|
||||
} else {
|
||||
buff->len =
|
||||
rxd_wb->pkt_len > AQ_CFG_RX_FRAME_MAX ?
|
||||
AQ_CFG_RX_FRAME_MAX : rxd_wb->pkt_len;
|
||||
|
||||
if (HW_ATL_B0_RXD_WB_STAT2_RSCCNT &
|
||||
rxd_wb->status) {
|
||||
/* LRO */
|
||||
buff->next = rxd_wb->next_desc_ptr;
|
||||
++ring->stats.rx.lro_packets;
|
||||
} else {
|
||||
if (HW_ATL_B0_RXD_WB_STAT2_RSCCNT &
|
||||
rxd_wb->status) {
|
||||
/* LRO */
|
||||
buff->next = rxd_wb->next_desc_ptr;
|
||||
++ring->stats.rx.lro_packets;
|
||||
} else {
|
||||
/* jumbo */
|
||||
buff->next =
|
||||
aq_ring_next_dx(ring,
|
||||
ring->hw_head);
|
||||
++ring->stats.rx.jumbo_packets;
|
||||
}
|
||||
/* jumbo */
|
||||
buff->next =
|
||||
aq_ring_next_dx(ring,
|
||||
ring->hw_head);
|
||||
++ring->stats.rx.jumbo_packets;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2108,7 +2108,6 @@ static struct eisa_driver de4x5_eisa_driver = {
|
||||
.remove = de4x5_eisa_remove,
|
||||
}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(eisa, de4x5_eisa_ids);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
|
@ -1103,7 +1103,7 @@ static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
|
||||
cmd->data = be_get_rss_hash_opts(adapter, cmd->flow_type);
|
||||
break;
|
||||
case ETHTOOL_GRXRINGS:
|
||||
cmd->data = adapter->num_rx_qs - 1;
|
||||
cmd->data = adapter->num_rx_qs;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
|
@ -2505,6 +2505,10 @@ mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
|
||||
mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap, NULL, NULL);
|
||||
|
||||
autoneg = cmd->base.autoneg == AUTONEG_ENABLE;
|
||||
if (!autoneg && cmd->base.speed == SPEED_56000) {
|
||||
netdev_err(dev, "56G not supported with autoneg off\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
eth_proto_new = autoneg ?
|
||||
mlxsw_sp_to_ptys_advert_link(cmd) :
|
||||
mlxsw_sp_to_ptys_speed(cmd->base.speed);
|
||||
|
@ -1458,6 +1458,10 @@ static void sh_eth_dev_exit(struct net_device *ndev)
|
||||
sh_eth_get_stats(ndev);
|
||||
sh_eth_reset(ndev);
|
||||
|
||||
/* Set the RMII mode again if required */
|
||||
if (mdp->cd->rmiimode)
|
||||
sh_eth_write(ndev, 0x1, RMIIMODE);
|
||||
|
||||
/* Set MAC address again */
|
||||
update_mac_address(ndev);
|
||||
}
|
||||
|
@ -249,10 +249,8 @@ static int dp83867_config_init(struct phy_device *phydev)
|
||||
ret = phy_write(phydev, MII_DP83867_PHYCTRL, val);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if ((phydev->interface >= PHY_INTERFACE_MODE_RGMII_ID) &&
|
||||
(phydev->interface <= PHY_INTERFACE_MODE_RGMII_RXID)) {
|
||||
/* Set up RGMII delays */
|
||||
val = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIICTL);
|
||||
|
||||
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
|
||||
|
@ -640,6 +640,10 @@ cxgbi_check_route(struct sockaddr *dst_addr, int ifindex)
|
||||
|
||||
if (ndev->flags & IFF_LOOPBACK) {
|
||||
ndev = ip_dev_find(&init_net, daddr->sin_addr.s_addr);
|
||||
if (!ndev) {
|
||||
err = -ENETUNREACH;
|
||||
goto rel_neigh;
|
||||
}
|
||||
mtu = ndev->mtu;
|
||||
pr_info("rt dev %s, loopback -> %s, mtu %u.\n",
|
||||
n->dev->name, ndev->name, mtu);
|
||||
|
@ -1151,10 +1151,8 @@ static int __init alua_init(void)
|
||||
int r;
|
||||
|
||||
kaluad_wq = alloc_workqueue("kaluad", WQ_MEM_RECLAIM, 0);
|
||||
if (!kaluad_wq) {
|
||||
/* Temporary failure, bypass */
|
||||
return SCSI_DH_DEV_TEMP_BUSY;
|
||||
}
|
||||
if (!kaluad_wq)
|
||||
return -ENOMEM;
|
||||
|
||||
r = scsi_register_device_handler(&alua_dh);
|
||||
if (r != 0) {
|
||||
|
@ -989,6 +989,8 @@ static struct domain_device *sas_ex_discover_expander(
|
||||
list_del(&child->dev_list_node);
|
||||
spin_unlock_irq(&parent->port->dev_list_lock);
|
||||
sas_put_device(child);
|
||||
sas_port_delete(phy->port);
|
||||
phy->port = NULL;
|
||||
return NULL;
|
||||
}
|
||||
list_add_tail(&child->siblings, &parent->ex_dev.children);
|
||||
|
@ -6392,7 +6392,7 @@ static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
|
||||
else
|
||||
mask = DMA_BIT_MASK(32);
|
||||
|
||||
rc = dma_set_mask(&ctrl_info->pci_dev->dev, mask);
|
||||
rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask);
|
||||
if (rc) {
|
||||
dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
|
||||
goto disable_device;
|
||||
|
@ -579,7 +579,7 @@ exit:
|
||||
dev->colourfx.enable ? "true" : "false",
|
||||
dev->colourfx.u, dev->colourfx.v,
|
||||
ret, (ret == 0 ? 0 : -EINVAL));
|
||||
return (ret == 0 ? 0 : EINVAL);
|
||||
return (ret == 0 ? 0 : -EINVAL);
|
||||
}
|
||||
|
||||
static int ctrl_set_colfx(struct bm2835_mmal_dev *dev,
|
||||
@ -603,7 +603,7 @@ static int ctrl_set_colfx(struct bm2835_mmal_dev *dev,
|
||||
"%s: After: mmal_ctrl:%p ctrl id:0x%x ctrl val:%d ret %d(%d)\n",
|
||||
__func__, mmal_ctrl, ctrl->id, ctrl->val, ret,
|
||||
(ret == 0 ? 0 : -EINVAL));
|
||||
return (ret == 0 ? 0 : EINVAL);
|
||||
return (ret == 0 ? 0 : -EINVAL);
|
||||
}
|
||||
|
||||
static int ctrl_set_bitrate(struct bm2835_mmal_dev *dev,
|
||||
|
@ -396,7 +396,7 @@ static const struct uart_ops sunhv_pops = {
|
||||
static struct uart_driver sunhv_reg = {
|
||||
.owner = THIS_MODULE,
|
||||
.driver_name = "sunhv",
|
||||
.dev_name = "ttyS",
|
||||
.dev_name = "ttyHV",
|
||||
.major = TTY_MAJOR,
|
||||
};
|
||||
|
||||
|
@ -58,15 +58,13 @@ static void configfs_d_iput(struct dentry * dentry,
|
||||
if (sd) {
|
||||
/* Coordinate with configfs_readdir */
|
||||
spin_lock(&configfs_dirent_lock);
|
||||
/* Coordinate with configfs_attach_attr where will increase
|
||||
* sd->s_count and update sd->s_dentry to new allocated one.
|
||||
* Only set sd->dentry to null when this dentry is the only
|
||||
* sd owner.
|
||||
* If not do so, configfs_d_iput may run just after
|
||||
* configfs_attach_attr and set sd->s_dentry to null
|
||||
* even it's still in use.
|
||||
/*
|
||||
* Set sd->s_dentry to null only when this dentry is the one
|
||||
* that is going to be killed. Otherwise configfs_d_iput may
|
||||
* run just after configfs_attach_attr and set sd->s_dentry to
|
||||
* NULL even it's still in use.
|
||||
*/
|
||||
if (atomic_read(&sd->s_count) <= 2)
|
||||
if (sd->s_dentry == dentry)
|
||||
sd->s_dentry = NULL;
|
||||
|
||||
spin_unlock(&configfs_dirent_lock);
|
||||
|
@ -1817,8 +1817,13 @@ int file_remove_privs(struct file *file)
|
||||
int kill;
|
||||
int error = 0;
|
||||
|
||||
/* Fast path for nothing security related */
|
||||
if (IS_NOSEC(inode))
|
||||
/*
|
||||
* Fast path for nothing security related.
|
||||
* As well for non-regular files, e.g. blkdev inodes.
|
||||
* For example, blkdev_write_iter() might get here
|
||||
* trying to remove privs which it is not allowed to.
|
||||
*/
|
||||
if (IS_NOSEC(inode) || !S_ISREG(inode->i_mode))
|
||||
return 0;
|
||||
|
||||
kill = dentry_needs_remove_privs(dentry);
|
||||
|
@ -62,6 +62,10 @@ static inline void mmdrop_async(struct mm_struct *mm)
|
||||
* followed by taking the mmap_sem for writing before modifying the
|
||||
* vmas or anything the coredump pretends not to change from under it.
|
||||
*
|
||||
* It also has to be called when mmgrab() is used in the context of
|
||||
* the process, but then the mm_count refcount is transferred outside
|
||||
* the context of the process to run down_write() on that pinned mm.
|
||||
*
|
||||
* NOTE: find_extend_vma() called from GUP context is the only place
|
||||
* that can modify the "mm" (notably the vm_start/end) under mmap_sem
|
||||
* for reading and outside the context of the process, so it is also
|
||||
|
@ -49,14 +49,30 @@ static void perf_output_put_handle(struct perf_output_handle *handle)
|
||||
unsigned long head;
|
||||
|
||||
again:
|
||||
/*
|
||||
* In order to avoid publishing a head value that goes backwards,
|
||||
* we must ensure the load of @rb->head happens after we've
|
||||
* incremented @rb->nest.
|
||||
*
|
||||
* Otherwise we can observe a @rb->head value before one published
|
||||
* by an IRQ/NMI happening between the load and the increment.
|
||||
*/
|
||||
barrier();
|
||||
head = local_read(&rb->head);
|
||||
|
||||
/*
|
||||
* IRQ/NMI can happen here, which means we can miss a head update.
|
||||
* IRQ/NMI can happen here and advance @rb->head, causing our
|
||||
* load above to be stale.
|
||||
*/
|
||||
|
||||
if (!local_dec_and_test(&rb->nest))
|
||||
/*
|
||||
* If this isn't the outermost nesting, we don't have to update
|
||||
* @rb->user_page->data_head.
|
||||
*/
|
||||
if (local_read(&rb->nest) > 1) {
|
||||
local_dec(&rb->nest);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Since the mmap() consumer (userspace) can run on a different CPU:
|
||||
@ -85,12 +101,21 @@ again:
|
||||
* See perf_output_begin().
|
||||
*/
|
||||
smp_wmb(); /* B, matches C */
|
||||
rb->user_page->data_head = head;
|
||||
WRITE_ONCE(rb->user_page->data_head, head);
|
||||
|
||||
/*
|
||||
* Now check if we missed an update -- rely on previous implied
|
||||
* compiler barriers to force a re-read.
|
||||
* We must publish the head before decrementing the nest count,
|
||||
* otherwise an IRQ/NMI can publish a more recent head value and our
|
||||
* write will (temporarily) publish a stale value.
|
||||
*/
|
||||
barrier();
|
||||
local_set(&rb->nest, 0);
|
||||
|
||||
/*
|
||||
* Ensure we decrement @rb->nest before we validate the @rb->head.
|
||||
* Otherwise we cannot be sure we caught the 'last' nested update.
|
||||
*/
|
||||
barrier();
|
||||
if (unlikely(head != local_read(&rb->head))) {
|
||||
local_inc(&rb->nest);
|
||||
goto again;
|
||||
@ -464,7 +489,7 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
|
||||
handle->aux_flags);
|
||||
}
|
||||
|
||||
rb->user_page->aux_head = rb->aux_head;
|
||||
WRITE_ONCE(rb->user_page->aux_head, rb->aux_head);
|
||||
if (rb_need_aux_wakeup(rb))
|
||||
wakeup = true;
|
||||
|
||||
@ -495,7 +520,7 @@ int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size)
|
||||
|
||||
rb->aux_head += size;
|
||||
|
||||
rb->user_page->aux_head = rb->aux_head;
|
||||
WRITE_ONCE(rb->user_page->aux_head, rb->aux_head);
|
||||
if (rb_need_aux_wakeup(rb)) {
|
||||
perf_output_wakeup(handle);
|
||||
handle->wakeup = rb->aux_wakeup + rb->aux_watermark;
|
||||
|
@ -1006,6 +1006,9 @@ static void collapse_huge_page(struct mm_struct *mm,
|
||||
* handled by the anon_vma lock + PG_lock.
|
||||
*/
|
||||
down_write(&mm->mmap_sem);
|
||||
result = SCAN_ANY_PROCESS;
|
||||
if (!mmget_still_valid(mm))
|
||||
goto out;
|
||||
result = hugepage_vma_revalidate(mm, address, &vma);
|
||||
if (result)
|
||||
goto out;
|
||||
|
@ -443,9 +443,11 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr)
|
||||
}
|
||||
|
||||
if (ax25->sk != NULL) {
|
||||
local_bh_disable();
|
||||
bh_lock_sock(ax25->sk);
|
||||
sock_reset_flag(ax25->sk, SOCK_ZAPPED);
|
||||
bh_unlock_sock(ax25->sk);
|
||||
local_bh_enable();
|
||||
}
|
||||
|
||||
put:
|
||||
|
@ -2743,6 +2743,7 @@ static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
|
||||
}
|
||||
|
||||
void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
|
||||
__acquires(tbl->lock)
|
||||
__acquires(rcu_bh)
|
||||
{
|
||||
struct neigh_seq_state *state = seq->private;
|
||||
@ -2753,6 +2754,7 @@ void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl
|
||||
|
||||
rcu_read_lock_bh();
|
||||
state->nht = rcu_dereference_bh(tbl->nht);
|
||||
read_lock(&tbl->lock);
|
||||
|
||||
return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
|
||||
}
|
||||
@ -2786,8 +2788,13 @@ out:
|
||||
EXPORT_SYMBOL(neigh_seq_next);
|
||||
|
||||
void neigh_seq_stop(struct seq_file *seq, void *v)
|
||||
__releases(tbl->lock)
|
||||
__releases(rcu_bh)
|
||||
{
|
||||
struct neigh_seq_state *state = seq->private;
|
||||
struct neigh_table *tbl = state->tbl;
|
||||
|
||||
read_unlock(&tbl->lock);
|
||||
rcu_read_unlock_bh();
|
||||
}
|
||||
EXPORT_SYMBOL(neigh_seq_stop);
|
||||
|
@ -254,9 +254,9 @@ struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label)
|
||||
rcu_read_lock_bh();
|
||||
for_each_sk_fl_rcu(np, sfl) {
|
||||
struct ip6_flowlabel *fl = sfl->fl;
|
||||
if (fl->label == label) {
|
||||
|
||||
if (fl->label == label && atomic_inc_not_zero(&fl->users)) {
|
||||
fl->lastuse = jiffies;
|
||||
atomic_inc(&fl->users);
|
||||
rcu_read_unlock_bh();
|
||||
return fl;
|
||||
}
|
||||
@ -623,7 +623,8 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
|
||||
goto done;
|
||||
}
|
||||
fl1 = sfl->fl;
|
||||
atomic_inc(&fl1->users);
|
||||
if (!atomic_inc_not_zero(&fl1->users))
|
||||
fl1 = NULL;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -182,6 +182,7 @@ int lapb_unregister(struct net_device *dev)
|
||||
lapb = __lapb_devtostruct(dev);
|
||||
if (!lapb)
|
||||
goto out;
|
||||
lapb_put(lapb);
|
||||
|
||||
lapb_stop_t1timer(lapb);
|
||||
lapb_stop_t2timer(lapb);
|
||||
|
@ -2268,7 +2268,6 @@ static void __net_exit __ip_vs_cleanup(struct net *net)
|
||||
{
|
||||
struct netns_ipvs *ipvs = net_ipvs(net);
|
||||
|
||||
nf_unregister_net_hooks(net, ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
|
||||
ip_vs_service_net_cleanup(ipvs); /* ip_vs_flush() with locks */
|
||||
ip_vs_conn_net_cleanup(ipvs);
|
||||
ip_vs_app_net_cleanup(ipvs);
|
||||
@ -2283,6 +2282,7 @@ static void __net_exit __ip_vs_dev_cleanup(struct net *net)
|
||||
{
|
||||
struct netns_ipvs *ipvs = net_ipvs(net);
|
||||
EnterFunction(2);
|
||||
nf_unregister_net_hooks(net, ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
|
||||
ipvs->enable = 0; /* Disable packet reception */
|
||||
smp_wmb();
|
||||
ip_vs_sync_net_cleanup(ipvs);
|
||||
|
@ -193,6 +193,7 @@ static unsigned int nf_iterate(struct sk_buff *skb,
|
||||
repeat:
|
||||
verdict = nf_hook_entry_hookfn(hook, skb, state);
|
||||
if (verdict != NF_ACCEPT) {
|
||||
*index = i;
|
||||
if (verdict != NF_REPEAT)
|
||||
return verdict;
|
||||
goto repeat;
|
||||
|
@ -176,7 +176,9 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
|
||||
{
|
||||
struct vport *vport;
|
||||
struct internal_dev *internal_dev;
|
||||
struct net_device *dev;
|
||||
int err;
|
||||
bool free_vport = true;
|
||||
|
||||
vport = ovs_vport_alloc(0, &ovs_internal_vport_ops, parms);
|
||||
if (IS_ERR(vport)) {
|
||||
@ -184,8 +186,9 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
|
||||
goto error;
|
||||
}
|
||||
|
||||
vport->dev = alloc_netdev(sizeof(struct internal_dev),
|
||||
parms->name, NET_NAME_USER, do_setup);
|
||||
dev = alloc_netdev(sizeof(struct internal_dev),
|
||||
parms->name, NET_NAME_USER, do_setup);
|
||||
vport->dev = dev;
|
||||
if (!vport->dev) {
|
||||
err = -ENOMEM;
|
||||
goto error_free_vport;
|
||||
@ -207,8 +210,10 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
|
||||
|
||||
rtnl_lock();
|
||||
err = register_netdevice(vport->dev);
|
||||
if (err)
|
||||
if (err) {
|
||||
free_vport = false;
|
||||
goto error_unlock;
|
||||
}
|
||||
|
||||
dev_set_promiscuity(vport->dev, 1);
|
||||
rtnl_unlock();
|
||||
@ -218,11 +223,12 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
|
||||
|
||||
error_unlock:
|
||||
rtnl_unlock();
|
||||
free_percpu(vport->dev->tstats);
|
||||
free_percpu(dev->tstats);
|
||||
error_free_netdev:
|
||||
free_netdev(vport->dev);
|
||||
free_netdev(dev);
|
||||
error_free_vport:
|
||||
ovs_vport_free(vport);
|
||||
if (free_vport)
|
||||
ovs_vport_free(vport);
|
||||
error:
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
@ -2586,6 +2586,8 @@ do_addr_param:
|
||||
case SCTP_PARAM_STATE_COOKIE:
|
||||
asoc->peer.cookie_len =
|
||||
ntohs(param.p->length) - sizeof(struct sctp_paramhdr);
|
||||
if (asoc->peer.cookie)
|
||||
kfree(asoc->peer.cookie);
|
||||
asoc->peer.cookie = kmemdup(param.cookie->body, asoc->peer.cookie_len, gfp);
|
||||
if (!asoc->peer.cookie)
|
||||
retval = 0;
|
||||
@ -2650,6 +2652,8 @@ do_addr_param:
|
||||
goto fall_through;
|
||||
|
||||
/* Save peer's random parameter */
|
||||
if (asoc->peer.peer_random)
|
||||
kfree(asoc->peer.peer_random);
|
||||
asoc->peer.peer_random = kmemdup(param.p,
|
||||
ntohs(param.p->length), gfp);
|
||||
if (!asoc->peer.peer_random) {
|
||||
@ -2663,6 +2667,8 @@ do_addr_param:
|
||||
goto fall_through;
|
||||
|
||||
/* Save peer's HMAC list */
|
||||
if (asoc->peer.peer_hmacs)
|
||||
kfree(asoc->peer.peer_hmacs);
|
||||
asoc->peer.peer_hmacs = kmemdup(param.p,
|
||||
ntohs(param.p->length), gfp);
|
||||
if (!asoc->peer.peer_hmacs) {
|
||||
@ -2678,6 +2684,8 @@ do_addr_param:
|
||||
if (!ep->auth_enable)
|
||||
goto fall_through;
|
||||
|
||||
if (asoc->peer.peer_chunks)
|
||||
kfree(asoc->peer.peer_chunks);
|
||||
asoc->peer.peer_chunks = kmemdup(param.p,
|
||||
ntohs(param.p->length), gfp);
|
||||
if (!asoc->peer.peer_chunks)
|
||||
|
@ -376,6 +376,7 @@ enum {
|
||||
|
||||
#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
|
||||
#define IS_CFL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa348)
|
||||
#define IS_CNL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9dc8)
|
||||
|
||||
static char *driver_short_names[] = {
|
||||
[AZX_DRIVER_ICH] = "HDA Intel",
|
||||
@ -1751,8 +1752,8 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci,
|
||||
else
|
||||
chip->bdl_pos_adj = bdl_pos_adj[dev];
|
||||
|
||||
/* Workaround for a communication error on CFL (bko#199007) */
|
||||
if (IS_CFL(pci))
|
||||
/* Workaround for a communication error on CFL (bko#199007) and CNL */
|
||||
if (IS_CFL(pci) || IS_CNL(pci))
|
||||
chip->polling_mode = 1;
|
||||
|
||||
err = azx_bus_init(chip, model[dev], &pci_hda_io_ops);
|
||||
|
@ -5,16 +5,19 @@
|
||||
#include "util.h"
|
||||
#include "machine.h"
|
||||
#include "api/fs/fs.h"
|
||||
#include "debug.h"
|
||||
|
||||
int arch__fix_module_text_start(u64 *start, const char *name)
|
||||
{
|
||||
u64 m_start = *start;
|
||||
char path[PATH_MAX];
|
||||
|
||||
snprintf(path, PATH_MAX, "module/%.*s/sections/.text",
|
||||
(int)strlen(name) - 2, name + 1);
|
||||
|
||||
if (sysfs__read_ull(path, (unsigned long long *)start) < 0)
|
||||
return -1;
|
||||
if (sysfs__read_ull(path, (unsigned long long *)start) < 0) {
|
||||
pr_debug2("Using module %s start:%#lx\n", path, m_start);
|
||||
*start = m_start;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -271,7 +271,7 @@ static int string_set_value(struct bt_ctf_field *field, const char *string)
|
||||
if (i > 0)
|
||||
strncpy(buffer, string, i);
|
||||
}
|
||||
strncat(buffer + p, numstr, 4);
|
||||
memcpy(buffer + p, numstr, 4);
|
||||
p += 3;
|
||||
}
|
||||
}
|
||||
|
@ -156,6 +156,9 @@ void machine__delete_threads(struct machine *machine)
|
||||
|
||||
void machine__exit(struct machine *machine)
|
||||
{
|
||||
if (machine == NULL)
|
||||
return;
|
||||
|
||||
machine__destroy_kernel_maps(machine);
|
||||
map_groups__exit(&machine->kmaps);
|
||||
dsos__exit(&machine->dsos);
|
||||
|
@ -23,7 +23,11 @@ ip netns add ns0
|
||||
ip netns add ns1
|
||||
ip netns add ns2
|
||||
|
||||
ip link add veth0 netns ns0 type veth peer name eth0 netns ns1
|
||||
ip link add veth0 netns ns0 type veth peer name eth0 netns ns1 > /dev/null 2>&1
|
||||
if [ $? -ne 0 ];then
|
||||
echo "SKIP: No virtual ethernet pair device support in kernel"
|
||||
exit $ksft_skip
|
||||
fi
|
||||
ip link add veth1 netns ns0 type veth peer name eth0 netns ns2
|
||||
|
||||
ip -net ns0 link set lo up
|
||||
|
Loading…
x
Reference in New Issue
Block a user