mirror of
https://github.com/rd-stuffs/msm-4.14.git
synced 2025-02-20 11:45:48 +08:00
This is the 4.14.292 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmMVsmcACgkQONu9yGCS aT4FUg/9FObnfhdvPAIvN+OySfvNRBFX88CMTl0b4BoxBwc6Rkp0vrE9mrakxISV ShysLvA3tDdnHpOQgvCZSFDKt8zoAztFIfpOP6qY9GzM1rdVZ/+uThrviWjj6AOC nFJDt53cTqAsIj7BTzJZSdRW6JtcQnjnYIIL1aIWBKbcP3Tqoot/pJIKogdtvqRJ WS+HryL27osDXzDLU/sEcDwJy09zoJPYxVujW2bx9U6jN+EF/NtS2ZWueIvome2J qzSbYefXMup+0V0VjJqbz525HLlRiD2To6IEx0icoRgPyHDS30Z+bgiEMEJcqcqZ pY2sityQ26xE68jBwbMUomojl7CQVhB5JcZeMhwBdDT7GybyqghuiPbFYEQNU9Dl 21+08CvbbTf/Lh01gRgDMQQLwcaJhmtrbtk6BwmW+nxyqKmmEQeqvjnsIyskZb1Y DZNbwsJe5vo/fXg7+PF3OhP2hiuawdcDaVQ8BHIxYIfv2G9aTyjFIGI/6V1lbAPj WdwF/kYl2X8jJr3cAkxx9ligQJkgJpZ1SZwLZKX8/en/Gom55pO4TfTDj62Nn25D BfB58RBzDcZSTNfGNset9D/kYqWhSZ7vSXVBnR4o3Ii2UslObvyGwc3XN7+/PYjF +bMqDb0Mky2Wp90eZE2fTkkd08pe/jzvMW13OUg2xg8yESzmce8= =1Med -----END PGP SIGNATURE----- Merge 4.14.292 into android-4.14-stable Changes in 4.14.292 audit: fix potential double free on error path from fsnotify_add_inode_mark parisc: Fix exception handler for fldw and fstw instructions pinctrl: amd: Don't save/restore interrupt status and wake status bits xfrm: fix refcount leak in __xfrm_policy_check() af_key: Do not call xfrm_probe_algs in parallel rose: check NULL rose_loopback_neigh->loopback bonding: 802.3ad: fix no transmission of LACPDUs net: ipvtap - add __init/__exit annotations to module init/exit funcs netfilter: ebtables: reject blobs that don't provide all entry points netfilter: nft_payload: report ERANGE for too long offset and length netfilter: nft_payload: do not truncate csum_offset and csum_type net: Fix data-races around weight_p and dev_weight_[rt]x_bias. ratelimit: Fix data-races in ___ratelimit(). net: Fix a data-race around sysctl_tstamp_allow_data. net: Fix a data-race around sysctl_net_busy_poll. net: Fix a data-race around sysctl_net_busy_read. net: Fix a data-race around netdev_budget. net: Fix a data-race around netdev_budget_usecs. net: Fix a data-race around sysctl_somaxconn. ixgbe: stop resetting SYSTIME in ixgbe_ptp_start_cyclecounter btrfs: check if root is readonly while setting security xattr loop: Check for overflow while configuring loop asm-generic: sections: refactor memory_intersects mm/hugetlb: fix hugetlb not supporting softdirty tracking md: call __md_stop_writes in md_stop mm: Force TLB flush for PFNMAP mappings before unlink_file_vma() arm64: map FDT as RW for early_init_dt_scan() s390/mm: do not trigger write fault when vma does not allow VM_WRITE x86/cpu: Add Tiger Lake to Intel family x86/bugs: Add "unknown" reporting for MMIO Stale Data kbuild: Fix include path in scripts/Makefile.modpost Bluetooth: L2CAP: Fix build errors in some archs media: pvrusb2: fix memory leak in pvr_probe HID: hidraw: fix memory leak in hidraw_release() fbdev: fb_pm2fb: Avoid potential divide by zero error ftrace: Fix NULL pointer dereference in is_ftrace_trampoline when ftrace is dead mm/rmap: Fix anon_vma->degree ambiguity leading to double-reuse neigh: fix possible DoS due to net iface start/stop loop s390/hypfs: avoid error message under KVM netfilter: conntrack: NF_CONNTRACK_PROCFS should no longer default to y kprobes: don't call disarm_kprobe() for disabled kprobes net: neigh: don't call kfree_skb() under spin_lock_irqsave() Linux 4.14.292 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: Icce4d9faec621a80f1945d04c7e6e0c7acac8eee
This commit is contained in:
commit
c934915478
@ -230,6 +230,20 @@ The possible values in this file are:
|
||||
* - 'Mitigation: Clear CPU buffers'
|
||||
- The processor is vulnerable and the CPU buffer clearing mitigation is
|
||||
enabled.
|
||||
* - 'Unknown: No mitigations'
|
||||
- The processor vulnerability status is unknown because it is
|
||||
out of Servicing period. Mitigation is not attempted.
|
||||
|
||||
Definitions:
|
||||
------------
|
||||
|
||||
Servicing period: The process of providing functional and security updates to
|
||||
Intel processors or platforms, utilizing the Intel Platform Update (IPU)
|
||||
process or other similar mechanisms.
|
||||
|
||||
End of Servicing Updates (ESU): ESU is the date at which Intel will no
|
||||
longer provide Servicing, such as through IPU or other similar update
|
||||
processes. ESU dates will typically be aligned to end of quarter.
|
||||
|
||||
If the processor is vulnerable then the following information is appended to
|
||||
the above information:
|
||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 14
|
||||
SUBLEVEL = 291
|
||||
SUBLEVEL = 292
|
||||
EXTRAVERSION =
|
||||
NAME = Petit Gorille
|
||||
|
||||
|
@ -91,7 +91,7 @@ extern void init_mem_pgprot(void);
|
||||
extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
|
||||
unsigned long virt, phys_addr_t size,
|
||||
pgprot_t prot, bool page_mappings_only);
|
||||
extern void *fixmap_remap_fdt(phys_addr_t dt_phys);
|
||||
extern void *fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot);
|
||||
extern void mark_linear_text_alias_ro(void);
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
@ -65,9 +65,6 @@ out:
|
||||
return default_cmdline;
|
||||
}
|
||||
|
||||
extern void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size,
|
||||
pgprot_t prot);
|
||||
|
||||
/*
|
||||
* This routine will be executed with the kernel mapped at its default virtual
|
||||
* address, and if it returns successfully, the kernel will be remapped, and
|
||||
@ -96,7 +93,7 @@ u64 __init kaslr_early_init(u64 dt_phys)
|
||||
* attempt at mapping the FDT in setup_machine()
|
||||
*/
|
||||
early_fixmap_init();
|
||||
fdt = __fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL);
|
||||
fdt = fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL);
|
||||
if (!fdt)
|
||||
return 0;
|
||||
|
||||
|
@ -179,9 +179,13 @@ static void __init smp_build_mpidr_hash(void)
|
||||
|
||||
static void __init setup_machine_fdt(phys_addr_t dt_phys)
|
||||
{
|
||||
void *dt_virt = fixmap_remap_fdt(dt_phys);
|
||||
int size;
|
||||
void *dt_virt = fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL);
|
||||
const char *name;
|
||||
|
||||
if (dt_virt)
|
||||
memblock_reserve(dt_phys, size);
|
||||
|
||||
if (!dt_virt || !early_init_dt_scan(dt_virt)) {
|
||||
pr_crit("\n"
|
||||
"Error: invalid device tree blob at physical address %pa (virtual address 0x%p)\n"
|
||||
@ -193,6 +197,9 @@ static void __init setup_machine_fdt(phys_addr_t dt_phys)
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
/* Early fixups are done, map the FDT as read-only now */
|
||||
fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL_RO);
|
||||
|
||||
name = of_flat_dt_get_machine_name();
|
||||
if (!name)
|
||||
return;
|
||||
|
@ -836,7 +836,7 @@ void __set_fixmap(enum fixed_addresses idx,
|
||||
}
|
||||
}
|
||||
|
||||
void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
|
||||
void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
|
||||
{
|
||||
const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
|
||||
int offset;
|
||||
@ -889,19 +889,6 @@ void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
|
||||
return dt_virt;
|
||||
}
|
||||
|
||||
void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
|
||||
{
|
||||
void *dt_virt;
|
||||
int size;
|
||||
|
||||
dt_virt = __fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL_RO);
|
||||
if (!dt_virt)
|
||||
return NULL;
|
||||
|
||||
memblock_reserve(dt_phys, size);
|
||||
return dt_virt;
|
||||
}
|
||||
|
||||
int __init arch_ioremap_pud_supported(void)
|
||||
{
|
||||
/*
|
||||
|
@ -121,7 +121,7 @@
|
||||
#define R1(i) (((i)>>21)&0x1f)
|
||||
#define R2(i) (((i)>>16)&0x1f)
|
||||
#define R3(i) ((i)&0x1f)
|
||||
#define FR3(i) ((((i)<<1)&0x1f)|(((i)>>6)&1))
|
||||
#define FR3(i) ((((i)&0x1f)<<1)|(((i)>>6)&1))
|
||||
#define IM(i,n) (((i)>>1&((1<<(n-1))-1))|((i)&1?((0-1L)<<(n-1)):0))
|
||||
#define IM5_2(i) IM((i)>>16,5)
|
||||
#define IM5_3(i) IM((i),5)
|
||||
|
@ -437,7 +437,7 @@ __init int hypfs_diag_init(void)
|
||||
int rc;
|
||||
|
||||
if (diag204_probe()) {
|
||||
pr_err("The hardware system does not support hypfs\n");
|
||||
pr_info("The hardware system does not support hypfs\n");
|
||||
return -ENODATA;
|
||||
}
|
||||
if (diag204_info_type == DIAG204_INFO_EXT) {
|
||||
|
@ -494,9 +494,9 @@ fail_hypfs_vm_exit:
|
||||
hypfs_vm_exit();
|
||||
fail_hypfs_diag_exit:
|
||||
hypfs_diag_exit();
|
||||
pr_err("Initialization of hypfs failed with rc=%i\n", rc);
|
||||
fail_dbfs_exit:
|
||||
hypfs_dbfs_exit();
|
||||
pr_err("Initialization of hypfs failed with rc=%i\n", rc);
|
||||
return rc;
|
||||
}
|
||||
device_initcall(hypfs_init)
|
||||
|
@ -433,7 +433,9 @@ static inline int do_exception(struct pt_regs *regs, int access)
|
||||
flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
|
||||
if (user_mode(regs))
|
||||
flags |= FAULT_FLAG_USER;
|
||||
if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
|
||||
if ((trans_exc_code & store_indication) == 0x400)
|
||||
access = VM_WRITE;
|
||||
if (access == VM_WRITE)
|
||||
flags |= FAULT_FLAG_WRITE;
|
||||
down_read(&mm->mmap_sem);
|
||||
|
||||
|
@ -394,5 +394,6 @@
|
||||
#define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */
|
||||
#define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */
|
||||
#define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */
|
||||
#define X86_BUG_MMIO_UNKNOWN X86_BUG(26) /* CPU is too old and its MMIO Stale Data status is unknown */
|
||||
|
||||
#endif /* _ASM_X86_CPUFEATURES_H */
|
||||
|
@ -71,6 +71,9 @@
|
||||
#define INTEL_FAM6_ALDERLAKE 0x97
|
||||
#define INTEL_FAM6_ALDERLAKE_L 0x9A
|
||||
|
||||
#define INTEL_FAM6_TIGERLAKE_L 0x8C
|
||||
#define INTEL_FAM6_TIGERLAKE 0x8D
|
||||
|
||||
/* "Small Core" Processors (Atom) */
|
||||
|
||||
#define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */
|
||||
|
@ -396,7 +396,8 @@ static void __init mmio_select_mitigation(void)
|
||||
u64 ia32_cap;
|
||||
|
||||
if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) ||
|
||||
cpu_mitigations_off()) {
|
||||
boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN) ||
|
||||
cpu_mitigations_off()) {
|
||||
mmio_mitigation = MMIO_MITIGATION_OFF;
|
||||
return;
|
||||
}
|
||||
@ -501,6 +502,8 @@ out:
|
||||
pr_info("TAA: %s\n", taa_strings[taa_mitigation]);
|
||||
if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
|
||||
pr_info("MMIO Stale Data: %s\n", mmio_strings[mmio_mitigation]);
|
||||
else if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
|
||||
pr_info("MMIO Stale Data: Unknown: No mitigations\n");
|
||||
}
|
||||
|
||||
static void __init md_clear_select_mitigation(void)
|
||||
@ -1823,6 +1826,9 @@ static ssize_t tsx_async_abort_show_state(char *buf)
|
||||
|
||||
static ssize_t mmio_stale_data_show_state(char *buf)
|
||||
{
|
||||
if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
|
||||
return sysfs_emit(buf, "Unknown: No mitigations\n");
|
||||
|
||||
if (mmio_mitigation == MMIO_MITIGATION_OFF)
|
||||
return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]);
|
||||
|
||||
@ -1933,6 +1939,7 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
|
||||
return srbds_show_state(buf);
|
||||
|
||||
case X86_BUG_MMIO_STALE_DATA:
|
||||
case X86_BUG_MMIO_UNKNOWN:
|
||||
return mmio_stale_data_show_state(buf);
|
||||
|
||||
default:
|
||||
@ -1989,6 +1996,9 @@ ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *
|
||||
|
||||
ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA);
|
||||
if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
|
||||
return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_UNKNOWN);
|
||||
else
|
||||
return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA);
|
||||
}
|
||||
#endif
|
||||
|
@ -905,6 +905,7 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
|
||||
#define MSBDS_ONLY BIT(5)
|
||||
#define NO_SWAPGS BIT(6)
|
||||
#define NO_ITLB_MULTIHIT BIT(7)
|
||||
#define NO_MMIO BIT(8)
|
||||
|
||||
#define VULNWL(_vendor, _family, _model, _whitelist) \
|
||||
{ X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist }
|
||||
@ -922,6 +923,11 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
|
||||
VULNWL(NSC, 5, X86_MODEL_ANY, NO_SPECULATION),
|
||||
|
||||
/* Intel Family 6 */
|
||||
VULNWL_INTEL(TIGERLAKE, NO_MMIO),
|
||||
VULNWL_INTEL(TIGERLAKE_L, NO_MMIO),
|
||||
VULNWL_INTEL(ALDERLAKE, NO_MMIO),
|
||||
VULNWL_INTEL(ALDERLAKE_L, NO_MMIO),
|
||||
|
||||
VULNWL_INTEL(ATOM_SALTWELL, NO_SPECULATION | NO_ITLB_MULTIHIT),
|
||||
VULNWL_INTEL(ATOM_SALTWELL_TABLET, NO_SPECULATION | NO_ITLB_MULTIHIT),
|
||||
VULNWL_INTEL(ATOM_SALTWELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT),
|
||||
@ -939,9 +945,9 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
|
||||
|
||||
VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||
|
||||
VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||
VULNWL_INTEL(ATOM_GOLDMONT_X, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||
VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||
VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
|
||||
VULNWL_INTEL(ATOM_GOLDMONT_X, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
|
||||
VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
|
||||
|
||||
/*
|
||||
* Technically, swapgs isn't serializing on AMD (despite it previously
|
||||
@ -954,13 +960,13 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
|
||||
VULNWL_INTEL(ATOM_TREMONT_X, NO_ITLB_MULTIHIT),
|
||||
|
||||
/* AMD Family 0xf - 0x12 */
|
||||
VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||
VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||
VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||
VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||
VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
|
||||
VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
|
||||
VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
|
||||
VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
|
||||
|
||||
/* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
|
||||
VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||
VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
|
||||
{}
|
||||
};
|
||||
|
||||
@ -1100,10 +1106,16 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
||||
* Affected CPU list is generally enough to enumerate the vulnerability,
|
||||
* but for virtualization case check for ARCH_CAP MSR bits also, VMM may
|
||||
* not want the guest to enumerate the bug.
|
||||
*
|
||||
* Set X86_BUG_MMIO_UNKNOWN for CPUs that are neither in the blacklist,
|
||||
* nor in the whitelist and also don't enumerate MSR ARCH_CAP MMIO bits.
|
||||
*/
|
||||
if (cpu_matches(cpu_vuln_blacklist, MMIO) &&
|
||||
!arch_cap_mmio_immune(ia32_cap))
|
||||
setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
|
||||
if (!arch_cap_mmio_immune(ia32_cap)) {
|
||||
if (cpu_matches(cpu_vuln_blacklist, MMIO))
|
||||
setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
|
||||
else if (!cpu_matches(cpu_vuln_whitelist, NO_MMIO))
|
||||
setup_force_cpu_bug(X86_BUG_MMIO_UNKNOWN);
|
||||
}
|
||||
|
||||
if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
|
||||
return;
|
||||
|
@ -1214,6 +1214,11 @@ loop_get_status(struct loop_device *lo, struct loop_info64 *info)
|
||||
info->lo_number = lo->lo_number;
|
||||
info->lo_offset = lo->lo_offset;
|
||||
info->lo_sizelimit = lo->lo_sizelimit;
|
||||
|
||||
/* loff_t vars have been assigned __u64 */
|
||||
if (lo->lo_offset < 0 || lo->lo_sizelimit < 0)
|
||||
return -EOVERFLOW;
|
||||
|
||||
info->lo_flags = lo->lo_flags;
|
||||
memcpy(info->lo_file_name, lo->lo_file_name, LO_NAME_SIZE);
|
||||
memcpy(info->lo_crypt_name, lo->lo_crypt_name, LO_NAME_SIZE);
|
||||
|
@ -354,10 +354,13 @@ static int hidraw_release(struct inode * inode, struct file * file)
|
||||
unsigned int minor = iminor(inode);
|
||||
struct hidraw_list *list = file->private_data;
|
||||
unsigned long flags;
|
||||
int i;
|
||||
|
||||
mutex_lock(&minors_lock);
|
||||
|
||||
spin_lock_irqsave(&hidraw_table[minor]->list_lock, flags);
|
||||
for (i = list->tail; i < list->head; i++)
|
||||
kfree(list->buffer[i].value);
|
||||
list_del(&list->node);
|
||||
spin_unlock_irqrestore(&hidraw_table[minor]->list_lock, flags);
|
||||
kfree(list);
|
||||
|
@ -5908,6 +5908,7 @@ void md_stop(struct mddev *mddev)
|
||||
/* stop the array and free an attached data structures.
|
||||
* This is called from dm-raid
|
||||
*/
|
||||
__md_stop_writes(mddev);
|
||||
__md_stop(mddev);
|
||||
if (mddev->bio_set)
|
||||
bioset_free(mddev->bio_set);
|
||||
|
@ -2604,6 +2604,7 @@ struct pvr2_hdw *pvr2_hdw_create(struct usb_interface *intf,
|
||||
del_timer_sync(&hdw->encoder_run_timer);
|
||||
del_timer_sync(&hdw->encoder_wait_timer);
|
||||
flush_work(&hdw->workpoll);
|
||||
v4l2_device_unregister(&hdw->v4l2_dev);
|
||||
usb_free_urb(hdw->ctl_read_urb);
|
||||
usb_free_urb(hdw->ctl_write_urb);
|
||||
kfree(hdw->ctl_read_buffer);
|
||||
|
@ -1977,30 +1977,24 @@ void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout)
|
||||
*/
|
||||
void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution)
|
||||
{
|
||||
/* check that the bond is not initialized yet */
|
||||
if (!MAC_ADDRESS_EQUAL(&(BOND_AD_INFO(bond).system.sys_mac_addr),
|
||||
bond->dev->dev_addr)) {
|
||||
BOND_AD_INFO(bond).aggregator_identifier = 0;
|
||||
BOND_AD_INFO(bond).system.sys_priority =
|
||||
bond->params.ad_actor_sys_prio;
|
||||
if (is_zero_ether_addr(bond->params.ad_actor_system))
|
||||
BOND_AD_INFO(bond).system.sys_mac_addr =
|
||||
*((struct mac_addr *)bond->dev->dev_addr);
|
||||
else
|
||||
BOND_AD_INFO(bond).system.sys_mac_addr =
|
||||
*((struct mac_addr *)bond->params.ad_actor_system);
|
||||
|
||||
BOND_AD_INFO(bond).aggregator_identifier = 0;
|
||||
/* initialize how many times this module is called in one
|
||||
* second (should be about every 100ms)
|
||||
*/
|
||||
ad_ticks_per_sec = tick_resolution;
|
||||
|
||||
BOND_AD_INFO(bond).system.sys_priority =
|
||||
bond->params.ad_actor_sys_prio;
|
||||
if (is_zero_ether_addr(bond->params.ad_actor_system))
|
||||
BOND_AD_INFO(bond).system.sys_mac_addr =
|
||||
*((struct mac_addr *)bond->dev->dev_addr);
|
||||
else
|
||||
BOND_AD_INFO(bond).system.sys_mac_addr =
|
||||
*((struct mac_addr *)bond->params.ad_actor_system);
|
||||
|
||||
/* initialize how many times this module is called in one
|
||||
* second (should be about every 100ms)
|
||||
*/
|
||||
ad_ticks_per_sec = tick_resolution;
|
||||
|
||||
bond_3ad_initiate_agg_selection(bond,
|
||||
AD_AGGREGATOR_SELECTION_TIMER *
|
||||
ad_ticks_per_sec);
|
||||
}
|
||||
bond_3ad_initiate_agg_selection(bond,
|
||||
AD_AGGREGATOR_SELECTION_TIMER *
|
||||
ad_ticks_per_sec);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1090,7 +1090,6 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
|
||||
struct cyclecounter cc;
|
||||
unsigned long flags;
|
||||
u32 incval = 0;
|
||||
u32 tsauxc = 0;
|
||||
u32 fuse0 = 0;
|
||||
|
||||
/* For some of the boards below this mask is technically incorrect.
|
||||
@ -1125,18 +1124,6 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
|
||||
case ixgbe_mac_x550em_a:
|
||||
case ixgbe_mac_X550:
|
||||
cc.read = ixgbe_ptp_read_X550;
|
||||
|
||||
/* enable SYSTIME counter */
|
||||
IXGBE_WRITE_REG(hw, IXGBE_SYSTIMR, 0);
|
||||
IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0);
|
||||
IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0);
|
||||
tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
|
||||
IXGBE_WRITE_REG(hw, IXGBE_TSAUXC,
|
||||
tsauxc & ~IXGBE_TSAUXC_DISABLE_SYSTIME);
|
||||
IXGBE_WRITE_REG(hw, IXGBE_TSIM, IXGBE_TSIM_TXTS);
|
||||
IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_TIMESYNC);
|
||||
|
||||
IXGBE_WRITE_FLUSH(hw);
|
||||
break;
|
||||
case ixgbe_mac_X540:
|
||||
cc.read = ixgbe_ptp_read_82599;
|
||||
@ -1168,6 +1155,50 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
|
||||
spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_ptp_init_systime - Initialize SYSTIME registers
|
||||
* @adapter: the ixgbe private board structure
|
||||
*
|
||||
* Initialize and start the SYSTIME registers.
|
||||
*/
|
||||
static void ixgbe_ptp_init_systime(struct ixgbe_adapter *adapter)
|
||||
{
|
||||
struct ixgbe_hw *hw = &adapter->hw;
|
||||
u32 tsauxc;
|
||||
|
||||
switch (hw->mac.type) {
|
||||
case ixgbe_mac_X550EM_x:
|
||||
case ixgbe_mac_x550em_a:
|
||||
case ixgbe_mac_X550:
|
||||
tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
|
||||
|
||||
/* Reset SYSTIME registers to 0 */
|
||||
IXGBE_WRITE_REG(hw, IXGBE_SYSTIMR, 0);
|
||||
IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0);
|
||||
IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0);
|
||||
|
||||
/* Reset interrupt settings */
|
||||
IXGBE_WRITE_REG(hw, IXGBE_TSIM, IXGBE_TSIM_TXTS);
|
||||
IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_TIMESYNC);
|
||||
|
||||
/* Activate the SYSTIME counter */
|
||||
IXGBE_WRITE_REG(hw, IXGBE_TSAUXC,
|
||||
tsauxc & ~IXGBE_TSAUXC_DISABLE_SYSTIME);
|
||||
break;
|
||||
case ixgbe_mac_X540:
|
||||
case ixgbe_mac_82599EB:
|
||||
/* Reset SYSTIME registers to 0 */
|
||||
IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0);
|
||||
IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0);
|
||||
break;
|
||||
default:
|
||||
/* Other devices aren't supported */
|
||||
return;
|
||||
};
|
||||
|
||||
IXGBE_WRITE_FLUSH(hw);
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_ptp_reset
|
||||
* @adapter: the ixgbe private board structure
|
||||
@ -1194,6 +1225,8 @@ void ixgbe_ptp_reset(struct ixgbe_adapter *adapter)
|
||||
|
||||
ixgbe_ptp_start_cyclecounter(adapter);
|
||||
|
||||
ixgbe_ptp_init_systime(adapter);
|
||||
|
||||
spin_lock_irqsave(&adapter->tmreg_lock, flags);
|
||||
timecounter_init(&adapter->hw_tc, &adapter->hw_cc,
|
||||
ktime_to_ns(ktime_get_real()));
|
||||
|
@ -193,7 +193,7 @@ static struct notifier_block ipvtap_notifier_block __read_mostly = {
|
||||
.notifier_call = ipvtap_device_event,
|
||||
};
|
||||
|
||||
static int ipvtap_init(void)
|
||||
static int __init ipvtap_init(void)
|
||||
{
|
||||
int err;
|
||||
|
||||
@ -227,7 +227,7 @@ out1:
|
||||
}
|
||||
module_init(ipvtap_init);
|
||||
|
||||
static void ipvtap_exit(void)
|
||||
static void __exit ipvtap_exit(void)
|
||||
{
|
||||
rtnl_link_unregister(&ipvtap_link_ops);
|
||||
unregister_netdevice_notifier(&ipvtap_notifier_block);
|
||||
|
@ -753,6 +753,7 @@ int amd_gpio_suspend(struct device *dev)
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
struct amd_gpio *gpio_dev = platform_get_drvdata(pdev);
|
||||
struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
|
||||
unsigned long flags;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < desc->npins; i++) {
|
||||
@ -761,7 +762,9 @@ int amd_gpio_suspend(struct device *dev)
|
||||
if (!amd_gpio_should_save(gpio_dev, pin))
|
||||
continue;
|
||||
|
||||
gpio_dev->saved_regs[i] = readl(gpio_dev->base + pin*4);
|
||||
raw_spin_lock_irqsave(&gpio_dev->lock, flags);
|
||||
gpio_dev->saved_regs[i] = readl(gpio_dev->base + pin * 4) & ~PIN_IRQ_PENDING;
|
||||
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -772,6 +775,7 @@ int amd_gpio_resume(struct device *dev)
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
struct amd_gpio *gpio_dev = platform_get_drvdata(pdev);
|
||||
struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
|
||||
unsigned long flags;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < desc->npins; i++) {
|
||||
@ -780,7 +784,10 @@ int amd_gpio_resume(struct device *dev)
|
||||
if (!amd_gpio_should_save(gpio_dev, pin))
|
||||
continue;
|
||||
|
||||
writel(gpio_dev->saved_regs[i], gpio_dev->base + pin*4);
|
||||
raw_spin_lock_irqsave(&gpio_dev->lock, flags);
|
||||
gpio_dev->saved_regs[i] |= readl(gpio_dev->base + pin * 4) & PIN_IRQ_PENDING;
|
||||
writel(gpio_dev->saved_regs[i], gpio_dev->base + pin * 4);
|
||||
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -614,6 +614,11 @@ static int pm2fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!var->pixclock) {
|
||||
DPRINTK("pixclock is zero\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (PICOS2KHZ(var->pixclock) > PM2_MAX_PIXCLOCK) {
|
||||
DPRINTK("pixclock too high (%ldKHz)\n",
|
||||
PICOS2KHZ(var->pixclock));
|
||||
|
@ -378,6 +378,9 @@ static int btrfs_xattr_handler_get(const struct xattr_handler *handler,
|
||||
struct dentry *unused, struct inode *inode,
|
||||
const char *name, void *buffer, size_t size)
|
||||
{
|
||||
if (btrfs_root_readonly(BTRFS_I(inode)->root))
|
||||
return -EROFS;
|
||||
|
||||
name = xattr_full_name(handler, name);
|
||||
return __btrfs_getxattr(inode, name, buffer, size);
|
||||
}
|
||||
|
@ -92,7 +92,7 @@ static inline bool memory_contains(void *begin, void *end, void *virt,
|
||||
/**
|
||||
* memory_intersects - checks if the region occupied by an object intersects
|
||||
* with another memory region
|
||||
* @begin: virtual address of the beginning of the memory regien
|
||||
* @begin: virtual address of the beginning of the memory region
|
||||
* @end: virtual address of the end of the memory region
|
||||
* @virt: virtual address of the memory object
|
||||
* @size: size of the memory object
|
||||
@ -105,7 +105,10 @@ static inline bool memory_intersects(void *begin, void *end, void *virt,
|
||||
{
|
||||
void *vend = virt + size;
|
||||
|
||||
return (virt >= begin && virt < end) || (vend >= begin && vend < end);
|
||||
if (virt < end && vend > begin)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -98,10 +98,6 @@ struct ebt_table {
|
||||
struct ebt_replace_kernel *table;
|
||||
unsigned int valid_hooks;
|
||||
rwlock_t lock;
|
||||
/* e.g. could be the table explicitly only allows certain
|
||||
* matches, targets, ... 0 == let it in */
|
||||
int (*check)(const struct ebt_table_info *info,
|
||||
unsigned int valid_hooks);
|
||||
/* the data used by the kernel */
|
||||
struct ebt_table_info *private;
|
||||
struct module *me;
|
||||
|
@ -39,12 +39,15 @@ struct anon_vma {
|
||||
atomic_t refcount;
|
||||
|
||||
/*
|
||||
* Count of child anon_vmas and VMAs which points to this anon_vma.
|
||||
* Count of child anon_vmas. Equals to the count of all anon_vmas that
|
||||
* have ->parent pointing to this one, including itself.
|
||||
*
|
||||
* This counter is used for making decision about reusing anon_vma
|
||||
* instead of forking new one. See comments in function anon_vma_clone.
|
||||
*/
|
||||
unsigned degree;
|
||||
unsigned long num_children;
|
||||
/* Count of VMAs whose ->anon_vma pointer points to this object. */
|
||||
unsigned long num_active_vmas;
|
||||
|
||||
struct anon_vma *parent; /* Parent of this anon_vma */
|
||||
|
||||
|
@ -43,7 +43,7 @@ extern unsigned int sysctl_net_busy_poll __read_mostly;
|
||||
|
||||
static inline bool net_busy_loop_on(void)
|
||||
{
|
||||
return sysctl_net_busy_poll;
|
||||
return READ_ONCE(sysctl_net_busy_poll);
|
||||
}
|
||||
|
||||
static inline bool sk_can_busy_loop(const struct sock *sk)
|
||||
|
@ -111,6 +111,7 @@ struct audit_fsnotify_mark *audit_alloc_mark(struct audit_krule *krule, char *pa
|
||||
|
||||
ret = fsnotify_add_mark(&audit_mark->mark, inode, NULL, true);
|
||||
if (ret < 0) {
|
||||
audit_mark->path = NULL;
|
||||
fsnotify_put_mark(&audit_mark->mark);
|
||||
audit_mark = ERR_PTR(ret);
|
||||
}
|
||||
|
@ -1687,12 +1687,14 @@ static struct kprobe *__disable_kprobe(struct kprobe *p)
|
||||
/* Try to disarm and disable this/parent probe */
|
||||
if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
|
||||
/*
|
||||
* If kprobes_all_disarmed is set, orig_p
|
||||
* should have already been disarmed, so
|
||||
* skip unneed disarming process.
|
||||
* Don't be lazy here. Even if 'kprobes_all_disarmed'
|
||||
* is false, 'orig_p' might not have been armed yet.
|
||||
* Note arm_all_kprobes() __tries__ to arm all kprobes
|
||||
* on the best effort basis.
|
||||
*/
|
||||
if (!kprobes_all_disarmed)
|
||||
if (!kprobes_all_disarmed && !kprobe_disabled(orig_p))
|
||||
disarm_kprobe(orig_p, true);
|
||||
|
||||
orig_p->flags |= KPROBE_FLAG_DISABLED;
|
||||
}
|
||||
}
|
||||
|
@ -2819,6 +2819,16 @@ static int ftrace_startup(struct ftrace_ops *ops, int command)
|
||||
|
||||
ftrace_startup_enable(command);
|
||||
|
||||
/*
|
||||
* If ftrace is in an undefined state, we just remove ops from list
|
||||
* to prevent the NULL pointer, instead of totally rolling it back and
|
||||
* free trampoline, because those actions could cause further damage.
|
||||
*/
|
||||
if (unlikely(ftrace_disabled)) {
|
||||
__unregister_ftrace_function(ops);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
ops->flags &= ~FTRACE_OPS_FL_ADDING;
|
||||
|
||||
return 0;
|
||||
|
@ -27,10 +27,16 @@
|
||||
*/
|
||||
int ___ratelimit(struct ratelimit_state *rs, const char *func)
|
||||
{
|
||||
/* Paired with WRITE_ONCE() in .proc_handler().
|
||||
* Changing two values seperately could be inconsistent
|
||||
* and some message could be lost. (See: net_ratelimit_state).
|
||||
*/
|
||||
int interval = READ_ONCE(rs->interval);
|
||||
int burst = READ_ONCE(rs->burst);
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
if (!rs->interval)
|
||||
if (!interval)
|
||||
return 1;
|
||||
|
||||
/*
|
||||
@ -45,7 +51,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
|
||||
if (!rs->begin)
|
||||
rs->begin = jiffies;
|
||||
|
||||
if (time_is_before_jiffies(rs->begin + rs->interval)) {
|
||||
if (time_is_before_jiffies(rs->begin + interval)) {
|
||||
if (rs->missed) {
|
||||
if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE)) {
|
||||
printk_deferred(KERN_WARNING
|
||||
@ -57,7 +63,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
|
||||
rs->begin = jiffies;
|
||||
rs->printed = 0;
|
||||
}
|
||||
if (rs->burst && rs->burst > rs->printed) {
|
||||
if (burst && burst > rs->printed) {
|
||||
rs->printed++;
|
||||
ret = 1;
|
||||
} else {
|
||||
|
20
mm/mmap.c
20
mm/mmap.c
@ -1611,8 +1611,12 @@ int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
|
||||
pgprot_val(vm_pgprot_modify(vm_page_prot, vm_flags)))
|
||||
return 0;
|
||||
|
||||
/* Do we need to track softdirty? */
|
||||
if (IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) && !(vm_flags & VM_SOFTDIRTY))
|
||||
/*
|
||||
* Do we need to track softdirty? hugetlb does not support softdirty
|
||||
* tracking yet.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) && !(vm_flags & VM_SOFTDIRTY) &&
|
||||
!is_vm_hugetlb_page(vma))
|
||||
return 1;
|
||||
|
||||
/* Specialty mapping? */
|
||||
@ -2538,6 +2542,18 @@ static void unmap_region(struct mm_struct *mm,
|
||||
tlb_gather_mmu(&tlb, mm, start, end);
|
||||
update_hiwater_rss(mm);
|
||||
unmap_vmas(&tlb, vma, start, end);
|
||||
|
||||
/*
|
||||
* Ensure we have no stale TLB entries by the time this mapping is
|
||||
* removed from the rmap.
|
||||
* Note that we don't have to worry about nested flushes here because
|
||||
* we're holding the mm semaphore for removing the mapping - so any
|
||||
* concurrent flush in this region has to be coming through the rmap,
|
||||
* and we synchronize against that using the rmap lock.
|
||||
*/
|
||||
if ((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) != 0)
|
||||
tlb_flush_mmu(&tlb);
|
||||
|
||||
free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
|
||||
next ? next->vm_start : USER_PGTABLES_CEILING);
|
||||
tlb_finish_mmu(&tlb, start, end);
|
||||
|
31
mm/rmap.c
31
mm/rmap.c
@ -82,7 +82,8 @@ static inline struct anon_vma *anon_vma_alloc(void)
|
||||
anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
|
||||
if (anon_vma) {
|
||||
atomic_set(&anon_vma->refcount, 1);
|
||||
anon_vma->degree = 1; /* Reference for first vma */
|
||||
anon_vma->num_children = 0;
|
||||
anon_vma->num_active_vmas = 0;
|
||||
anon_vma->parent = anon_vma;
|
||||
/*
|
||||
* Initialise the anon_vma root to point to itself. If called
|
||||
@ -190,6 +191,7 @@ int __anon_vma_prepare(struct vm_area_struct *vma)
|
||||
anon_vma = anon_vma_alloc();
|
||||
if (unlikely(!anon_vma))
|
||||
goto out_enomem_free_avc;
|
||||
anon_vma->num_children++; /* self-parent link for new root */
|
||||
allocated = anon_vma;
|
||||
}
|
||||
|
||||
@ -199,8 +201,7 @@ int __anon_vma_prepare(struct vm_area_struct *vma)
|
||||
if (likely(!vma->anon_vma)) {
|
||||
vma->anon_vma = anon_vma;
|
||||
anon_vma_chain_link(vma, avc, anon_vma);
|
||||
/* vma reference or self-parent link for new root */
|
||||
anon_vma->degree++;
|
||||
anon_vma->num_active_vmas++;
|
||||
allocated = NULL;
|
||||
avc = NULL;
|
||||
}
|
||||
@ -279,19 +280,19 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
|
||||
anon_vma_chain_link(dst, avc, anon_vma);
|
||||
|
||||
/*
|
||||
* Reuse existing anon_vma if its degree lower than two,
|
||||
* that means it has no vma and only one anon_vma child.
|
||||
* Reuse existing anon_vma if it has no vma and only one
|
||||
* anon_vma child.
|
||||
*
|
||||
* Do not chose parent anon_vma, otherwise first child
|
||||
* will always reuse it. Root anon_vma is never reused:
|
||||
* Root anon_vma is never reused:
|
||||
* it has self-parent reference and at least one child.
|
||||
*/
|
||||
if (!dst->anon_vma && anon_vma != src->anon_vma &&
|
||||
anon_vma->degree < 2)
|
||||
if (!dst->anon_vma &&
|
||||
anon_vma->num_children < 2 &&
|
||||
anon_vma->num_active_vmas == 0)
|
||||
dst->anon_vma = anon_vma;
|
||||
}
|
||||
if (dst->anon_vma)
|
||||
dst->anon_vma->degree++;
|
||||
dst->anon_vma->num_active_vmas++;
|
||||
unlock_anon_vma_root(root);
|
||||
return 0;
|
||||
|
||||
@ -341,6 +342,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
|
||||
anon_vma = anon_vma_alloc();
|
||||
if (!anon_vma)
|
||||
goto out_error;
|
||||
anon_vma->num_active_vmas++;
|
||||
avc = anon_vma_chain_alloc(GFP_KERNEL);
|
||||
if (!avc)
|
||||
goto out_error_free_anon_vma;
|
||||
@ -361,7 +363,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
|
||||
vma->anon_vma = anon_vma;
|
||||
anon_vma_lock_write(anon_vma);
|
||||
anon_vma_chain_link(vma, avc, anon_vma);
|
||||
anon_vma->parent->degree++;
|
||||
anon_vma->parent->num_children++;
|
||||
anon_vma_unlock_write(anon_vma);
|
||||
|
||||
return 0;
|
||||
@ -393,7 +395,7 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
|
||||
* to free them outside the lock.
|
||||
*/
|
||||
if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) {
|
||||
anon_vma->parent->degree--;
|
||||
anon_vma->parent->num_children--;
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -401,7 +403,7 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
|
||||
anon_vma_chain_free(avc);
|
||||
}
|
||||
if (vma->anon_vma)
|
||||
vma->anon_vma->degree--;
|
||||
vma->anon_vma->num_active_vmas--;
|
||||
unlock_anon_vma_root(root);
|
||||
|
||||
/*
|
||||
@ -412,7 +414,8 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
|
||||
list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
|
||||
struct anon_vma *anon_vma = avc->anon_vma;
|
||||
|
||||
VM_WARN_ON(anon_vma->degree);
|
||||
VM_WARN_ON(anon_vma->num_children);
|
||||
VM_WARN_ON(anon_vma->num_active_vmas);
|
||||
put_anon_vma(anon_vma);
|
||||
|
||||
list_del(&avc->same_vma);
|
||||
|
@ -1826,11 +1826,11 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
|
||||
src_match = !bacmp(&c->src, src);
|
||||
dst_match = !bacmp(&c->dst, dst);
|
||||
if (src_match && dst_match) {
|
||||
c = l2cap_chan_hold_unless_zero(c);
|
||||
if (c) {
|
||||
read_unlock(&chan_list_lock);
|
||||
return c;
|
||||
}
|
||||
if (!l2cap_chan_hold_unless_zero(c))
|
||||
continue;
|
||||
|
||||
read_unlock(&chan_list_lock);
|
||||
return c;
|
||||
}
|
||||
|
||||
/* Closest match */
|
||||
|
@ -33,18 +33,10 @@ static struct ebt_replace_kernel initial_table = {
|
||||
.entries = (char *)&initial_chain,
|
||||
};
|
||||
|
||||
static int check(const struct ebt_table_info *info, unsigned int valid_hooks)
|
||||
{
|
||||
if (valid_hooks & ~(1 << NF_BR_BROUTING))
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct ebt_table broute_table = {
|
||||
.name = "broute",
|
||||
.table = &initial_table,
|
||||
.valid_hooks = 1 << NF_BR_BROUTING,
|
||||
.check = check,
|
||||
.me = THIS_MODULE,
|
||||
};
|
||||
|
||||
|
@ -41,18 +41,10 @@ static struct ebt_replace_kernel initial_table = {
|
||||
.entries = (char *)initial_chains,
|
||||
};
|
||||
|
||||
static int check(const struct ebt_table_info *info, unsigned int valid_hooks)
|
||||
{
|
||||
if (valid_hooks & ~FILTER_VALID_HOOKS)
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct ebt_table frame_filter = {
|
||||
.name = "filter",
|
||||
.table = &initial_table,
|
||||
.valid_hooks = FILTER_VALID_HOOKS,
|
||||
.check = check,
|
||||
.me = THIS_MODULE,
|
||||
};
|
||||
|
||||
|
@ -41,18 +41,10 @@ static struct ebt_replace_kernel initial_table = {
|
||||
.entries = (char *)initial_chains,
|
||||
};
|
||||
|
||||
static int check(const struct ebt_table_info *info, unsigned int valid_hooks)
|
||||
{
|
||||
if (valid_hooks & ~NAT_VALID_HOOKS)
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct ebt_table frame_nat = {
|
||||
.name = "nat",
|
||||
.table = &initial_table,
|
||||
.valid_hooks = NAT_VALID_HOOKS,
|
||||
.check = check,
|
||||
.me = THIS_MODULE,
|
||||
};
|
||||
|
||||
|
@ -991,8 +991,7 @@ static int do_replace_finish(struct net *net, struct ebt_replace *repl,
|
||||
goto free_iterate;
|
||||
}
|
||||
|
||||
/* the table doesn't like it */
|
||||
if (t->check && (ret = t->check(newinfo, repl->valid_hooks)))
|
||||
if (repl->valid_hooks != t->valid_hooks)
|
||||
goto free_unlock;
|
||||
|
||||
if (repl->num_counters && repl->num_counters != t->private->nentries) {
|
||||
@ -1200,11 +1199,6 @@ int ebt_register_table(struct net *net, const struct ebt_table *input_table,
|
||||
if (ret != 0)
|
||||
goto free_chainstack;
|
||||
|
||||
if (table->check && table->check(newinfo, table->valid_hooks)) {
|
||||
ret = -EINVAL;
|
||||
goto free_chainstack;
|
||||
}
|
||||
|
||||
table->private = newinfo;
|
||||
rwlock_init(&table->lock);
|
||||
mutex_lock(&ebt_mutex);
|
||||
|
@ -5186,7 +5186,7 @@ static int process_backlog(struct napi_struct *napi, int quota)
|
||||
net_rps_action_and_irq_enable(sd);
|
||||
}
|
||||
|
||||
napi->weight = dev_rx_weight;
|
||||
napi->weight = READ_ONCE(dev_rx_weight);
|
||||
while (again) {
|
||||
struct sk_buff *skb;
|
||||
|
||||
@ -5648,8 +5648,8 @@ static __latent_entropy void net_rx_action(struct softirq_action *h)
|
||||
{
|
||||
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
|
||||
unsigned long time_limit = jiffies +
|
||||
usecs_to_jiffies(netdev_budget_usecs);
|
||||
int budget = netdev_budget;
|
||||
usecs_to_jiffies(READ_ONCE(netdev_budget_usecs));
|
||||
int budget = READ_ONCE(netdev_budget);
|
||||
LIST_HEAD(list);
|
||||
LIST_HEAD(repoll);
|
||||
|
||||
|
@ -222,11 +222,26 @@ static int neigh_del_timer(struct neighbour *n)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void pneigh_queue_purge(struct sk_buff_head *list)
|
||||
static void pneigh_queue_purge(struct sk_buff_head *list, struct net *net)
|
||||
{
|
||||
struct sk_buff_head tmp;
|
||||
unsigned long flags;
|
||||
struct sk_buff *skb;
|
||||
|
||||
while ((skb = skb_dequeue(list)) != NULL) {
|
||||
skb_queue_head_init(&tmp);
|
||||
spin_lock_irqsave(&list->lock, flags);
|
||||
skb = skb_peek(list);
|
||||
while (skb != NULL) {
|
||||
struct sk_buff *skb_next = skb_peek_next(skb, list);
|
||||
if (net == NULL || net_eq(dev_net(skb->dev), net)) {
|
||||
__skb_unlink(skb, list);
|
||||
__skb_queue_tail(&tmp, skb);
|
||||
}
|
||||
skb = skb_next;
|
||||
}
|
||||
spin_unlock_irqrestore(&list->lock, flags);
|
||||
|
||||
while ((skb = __skb_dequeue(&tmp))) {
|
||||
dev_put(skb->dev);
|
||||
kfree_skb(skb);
|
||||
}
|
||||
@ -295,9 +310,9 @@ int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
|
||||
write_lock_bh(&tbl->lock);
|
||||
neigh_flush_dev(tbl, dev);
|
||||
pneigh_ifdown_and_unlock(tbl, dev);
|
||||
|
||||
del_timer_sync(&tbl->proxy_timer);
|
||||
pneigh_queue_purge(&tbl->proxy_queue);
|
||||
pneigh_queue_purge(&tbl->proxy_queue, dev_net(dev));
|
||||
if (skb_queue_empty_lockless(&tbl->proxy_queue))
|
||||
del_timer_sync(&tbl->proxy_timer);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(neigh_ifdown);
|
||||
@ -1609,7 +1624,7 @@ int neigh_table_clear(int index, struct neigh_table *tbl)
|
||||
/* It is not clean... Fix it to unload IPv6 module safely */
|
||||
cancel_delayed_work_sync(&tbl->gc_work);
|
||||
del_timer_sync(&tbl->proxy_timer);
|
||||
pneigh_queue_purge(&tbl->proxy_queue);
|
||||
pneigh_queue_purge(&tbl->proxy_queue, NULL);
|
||||
neigh_ifdown(tbl, NULL);
|
||||
if (atomic_read(&tbl->entries))
|
||||
pr_crit("neighbour leakage\n");
|
||||
|
@ -4352,7 +4352,7 @@ static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly)
|
||||
{
|
||||
bool ret;
|
||||
|
||||
if (likely(sysctl_tstamp_allow_data || tsonly))
|
||||
if (likely(READ_ONCE(sysctl_tstamp_allow_data) || tsonly))
|
||||
return true;
|
||||
|
||||
read_lock_bh(&sk->sk_callback_lock);
|
||||
|
@ -2783,7 +2783,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
|
||||
|
||||
#ifdef CONFIG_NET_RX_BUSY_POLL
|
||||
sk->sk_napi_id = 0;
|
||||
sk->sk_ll_usec = sysctl_net_busy_read;
|
||||
sk->sk_ll_usec = READ_ONCE(sysctl_net_busy_read);
|
||||
#endif
|
||||
|
||||
sk->sk_max_pacing_rate = ~0U;
|
||||
|
@ -229,14 +229,17 @@ static int set_default_qdisc(struct ctl_table *table, int write,
|
||||
static int proc_do_dev_weight(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int ret;
|
||||
static DEFINE_MUTEX(dev_weight_mutex);
|
||||
int ret, weight;
|
||||
|
||||
mutex_lock(&dev_weight_mutex);
|
||||
ret = proc_dointvec(table, write, buffer, lenp, ppos);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
||||
dev_rx_weight = weight_p * dev_weight_rx_bias;
|
||||
dev_tx_weight = weight_p * dev_weight_tx_bias;
|
||||
if (!ret && write) {
|
||||
weight = READ_ONCE(weight_p);
|
||||
WRITE_ONCE(dev_rx_weight, weight * dev_weight_rx_bias);
|
||||
WRITE_ONCE(dev_tx_weight, weight * dev_weight_tx_bias);
|
||||
}
|
||||
mutex_unlock(&dev_weight_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -1707,9 +1707,12 @@ static int pfkey_register(struct sock *sk, struct sk_buff *skb, const struct sad
|
||||
pfk->registered |= (1<<hdr->sadb_msg_satype);
|
||||
}
|
||||
|
||||
mutex_lock(&pfkey_mutex);
|
||||
xfrm_probe_algs();
|
||||
|
||||
supp_skb = compose_sadb_supported(hdr, GFP_KERNEL | __GFP_ZERO);
|
||||
mutex_unlock(&pfkey_mutex);
|
||||
|
||||
if (!supp_skb) {
|
||||
if (hdr->sadb_msg_satype != SADB_SATYPE_UNSPEC)
|
||||
pfk->registered &= ~(1<<hdr->sadb_msg_satype);
|
||||
|
@ -100,7 +100,6 @@ config NF_CONNTRACK_ZONES
|
||||
|
||||
config NF_CONNTRACK_PROCFS
|
||||
bool "Supply CT list in procfs (OBSOLETE)"
|
||||
default y
|
||||
depends on PROC_FS
|
||||
---help---
|
||||
This option enables for the list of known conntrack entries
|
||||
|
@ -332,6 +332,8 @@ static int nft_payload_set_init(const struct nft_ctx *ctx,
|
||||
const struct nlattr * const tb[])
|
||||
{
|
||||
struct nft_payload_set *priv = nft_expr_priv(expr);
|
||||
u32 csum_offset, csum_type = NFT_PAYLOAD_CSUM_NONE;
|
||||
int err;
|
||||
|
||||
priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
|
||||
priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
|
||||
@ -339,11 +341,15 @@ static int nft_payload_set_init(const struct nft_ctx *ctx,
|
||||
priv->sreg = nft_parse_register(tb[NFTA_PAYLOAD_SREG]);
|
||||
|
||||
if (tb[NFTA_PAYLOAD_CSUM_TYPE])
|
||||
priv->csum_type =
|
||||
ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_TYPE]));
|
||||
if (tb[NFTA_PAYLOAD_CSUM_OFFSET])
|
||||
priv->csum_offset =
|
||||
ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_OFFSET]));
|
||||
csum_type = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_TYPE]));
|
||||
if (tb[NFTA_PAYLOAD_CSUM_OFFSET]) {
|
||||
err = nft_parse_u32_check(tb[NFTA_PAYLOAD_CSUM_OFFSET], U8_MAX,
|
||||
&csum_offset);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
priv->csum_offset = csum_offset;
|
||||
}
|
||||
if (tb[NFTA_PAYLOAD_CSUM_FLAGS]) {
|
||||
u32 flags;
|
||||
|
||||
@ -354,13 +360,14 @@ static int nft_payload_set_init(const struct nft_ctx *ctx,
|
||||
priv->csum_flags = flags;
|
||||
}
|
||||
|
||||
switch (priv->csum_type) {
|
||||
switch (csum_type) {
|
||||
case NFT_PAYLOAD_CSUM_NONE:
|
||||
case NFT_PAYLOAD_CSUM_INET:
|
||||
break;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
priv->csum_type = csum_type;
|
||||
|
||||
return nft_validate_register_load(priv->sreg, priv->len);
|
||||
}
|
||||
@ -398,6 +405,7 @@ nft_payload_select_ops(const struct nft_ctx *ctx,
|
||||
{
|
||||
enum nft_payload_bases base;
|
||||
unsigned int offset, len;
|
||||
int err;
|
||||
|
||||
if (tb[NFTA_PAYLOAD_BASE] == NULL ||
|
||||
tb[NFTA_PAYLOAD_OFFSET] == NULL ||
|
||||
@ -423,8 +431,13 @@ nft_payload_select_ops(const struct nft_ctx *ctx,
|
||||
if (tb[NFTA_PAYLOAD_DREG] == NULL)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
|
||||
len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
|
||||
err = nft_parse_u32_check(tb[NFTA_PAYLOAD_OFFSET], U8_MAX, &offset);
|
||||
if (err < 0)
|
||||
return ERR_PTR(err);
|
||||
|
||||
err = nft_parse_u32_check(tb[NFTA_PAYLOAD_LEN], U8_MAX, &len);
|
||||
if (err < 0)
|
||||
return ERR_PTR(err);
|
||||
|
||||
if (len <= 4 && is_power_of_2(len) && IS_ALIGNED(offset, len) &&
|
||||
base != NFT_PAYLOAD_LL_HEADER)
|
||||
|
@ -99,7 +99,8 @@ static void rose_loopback_timer(struct timer_list *unused)
|
||||
}
|
||||
|
||||
if (frametype == ROSE_CALL_REQUEST) {
|
||||
if (!rose_loopback_neigh->dev) {
|
||||
if (!rose_loopback_neigh->dev &&
|
||||
!rose_loopback_neigh->loopback) {
|
||||
kfree_skb(skb);
|
||||
continue;
|
||||
}
|
||||
|
@ -251,7 +251,7 @@ static inline int qdisc_restart(struct Qdisc *q, int *packets)
|
||||
|
||||
void __qdisc_run(struct Qdisc *q)
|
||||
{
|
||||
int quota = dev_tx_weight;
|
||||
int quota = READ_ONCE(dev_tx_weight);
|
||||
int packets;
|
||||
|
||||
while (qdisc_restart(q, &packets)) {
|
||||
|
@ -1509,7 +1509,7 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
|
||||
|
||||
sock = sockfd_lookup_light(fd, &err, &fput_needed);
|
||||
if (sock) {
|
||||
somaxconn = sock_net(sock->sk)->core.sysctl_somaxconn;
|
||||
somaxconn = READ_ONCE(sock_net(sock->sk)->core.sysctl_somaxconn);
|
||||
if ((unsigned int)backlog > somaxconn)
|
||||
backlog = somaxconn;
|
||||
|
||||
|
@ -2400,6 +2400,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
|
||||
if (pols[1]) {
|
||||
if (IS_ERR(pols[1])) {
|
||||
XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
|
||||
xfrm_pol_put(pols[0]);
|
||||
return 0;
|
||||
}
|
||||
pols[1]->curlft.use_time = get_seconds();
|
||||
|
@ -51,8 +51,7 @@ obj := $(KBUILD_EXTMOD)
|
||||
src := $(obj)
|
||||
|
||||
# Include the module's Makefile to find KBUILD_EXTRA_SYMBOLS
|
||||
include $(if $(wildcard $(KBUILD_EXTMOD)/Kbuild), \
|
||||
$(KBUILD_EXTMOD)/Kbuild, $(KBUILD_EXTMOD)/Makefile)
|
||||
include $(if $(wildcard $(src)/Kbuild), $(src)/Kbuild, $(src)/Makefile)
|
||||
endif
|
||||
|
||||
include scripts/Makefile.lib
|
||||
|
Loading…
x
Reference in New Issue
Block a user