mirror of
https://github.com/rd-stuffs/msm-4.14.git
synced 2025-02-20 11:45:48 +08:00
Merge android-4.14.164 (d2905c6) into msm-4.14
* refs/heads/tmp-d2905c6: Linux 4.14.164 vlan: fix memory leak in vlan_dev_set_egress_priority net: sch_prio: When ungrafting, replace with FIFO vlan: vlan_changelink() should propagate errors vxlan: fix tos value before xmit tcp: fix "old stuff" D-SACK causing SACK to be treated as D-SACK sctp: free cmd->obj.chunk for the unprocessed SCTP_CMD_REPLY USB: serial: option: add Telit ME910G1 0x110a composition USB: core: fix check for duplicate endpoints pkt_sched: fq: do not accept silly TCA_FQ_QUANTUM net: usb: lan78xx: fix possible skb leak net: stmmac: dwmac-sunxi: Allow all RGMII modes net: stmmac: dwmac-sun8i: Allow all RGMII modes net: dsa: mv88e6xxx: Preserve priority when setting CPU port. macvlan: do not assume mac_header is set in macvlan_broadcast() gtp: fix bad unlock balance in gtp_encap_enable_socket mmc: block: propagate correct returned value in mmc_rpmb_ioctl mmc: core: Prevent bus reference leak in mmc_blk_init() mmc: block: Fix bug when removing RPMB chardev mmc: block: Delete mmc_access_rpmb() mmc: block: Convert RPMB to a character device PCI/switchtec: Read all 64 bits of part_event_bitmap bpf: Fix passing modified ctx to ld/abs/ind instruction bpf: reject passing modified ctx to helper functions hv_netvsc: Fix unwanted rx_table reset llc2: Fix return statement of llc_stat_ev_rx_null_dsap_xid_c (and _test_c) parisc: Fix compiler warnings in debug_core.c block: fix memleak when __blk_rq_map_user_iov() is failed s390/dasd: fix memleak in path handling error case s390/dasd/cio: Interpret ccw_device_get_mdc return value correctly net: stmmac: RX buffer size must be 16 byte aligned net: stmmac: Do not accept invalid MTU values fs: avoid softlockups in s_inodes iterators perf/x86/intel: Fix PT PMI handling kconfig: don't crash on NULL expressions in expr_eq() regulator: rn5t618: fix module aliases ASoC: wm8962: fix lambda value rfkill: Fix incorrect check to avoid NULL pointer dereference net: usb: lan78xx: Fix error message format specifier bnx2x: Fix logic to get total no. of PFs per engine bnx2x: Do not handle requests from VFs after parity powerpc: Ensure that swiotlb buffer is allocated from low memory samples: bpf: fix syscall_tp due to unused syscall samples: bpf: Replace symbol compare of trace_event ARM: dts: am437x-gp/epos-evm: fix panel compatible bpf, mips: Limit to 33 tail calls ARM: dts: bcm283x: Fix critical trip point ASoC: topology: Check return value for soc_tplg_pcm_create() spi: spi-cavium-thunderx: Add missing pci_release_regions() ARM: dts: Cygnus: Fix MDIO node address/size cells netfilter: nf_tables: validate NFT_SET_ELEM_INTERVAL_END netfilter: uapi: Avoid undefined left-shift in xt_sctp.h ARM: vexpress: Set-up shared OPP table instead of individual for each CPU efi/gop: Fix memory leak in __gop_query32/64() efi/gop: Return EFI_SUCCESS if a usable GOP was found efi/gop: Return EFI_NOT_FOUND if there are no usable GOPs x86/efi: Update e820 with reserved EFI boot services data to fix kexec breakage libtraceevent: Fix lib installation with O= mwifiex: Fix heap overflow in mmwifiex_process_tdls_action_frame() netfilter: ctnetlink: netns exit must wait for callbacks locking/spinlock/debug: Fix various data races USB: dummy-hcd: increase max number of devices to 32 USB: dummy-hcd: use usb_urb_dir_in instead of usb_pipein UPSTREAM: USB: dummy-hcd: use usb_urb_dir_in instead of usb_pipein UPSTREAM: USB: dummy-hcd: increase max number of devices to 32 UPSTREAM: USB: dummy-hcd: Fix failure to give back unlinked URBs UPSTREAM: USB: dummy-hcd: bandwidth limits for non-bulk transfers BACKPORT: perf_event: Add support for LSM and SELinux checks ANDROID: cuttlefish_defconfig: remove 80211_HWSIM Conflicts: drivers/mmc/core/block.c drivers/mmc/core/queue.h drivers/net/ethernet/stmicro/stmmac/stmmac_main.c kernel/events/core.c kernel/locking/spinlock_debug.c Excluded below commits as per mmc team's suggestion mmc: block: propagate correct returned value in mmc_rpmb_ioctl mmc: core: Prevent bus reference leak in mmc_blk_init() mmc: block: Fix bug when removing RPMB chardev mmc: block: Delete mmc_access_rpmb() mmc: block: Convert RPMB to a character device Change-Id: I1ec72ef72135c50e5bf46b6f66f1dd88b18add28 Signed-off-by: Srinivasarao P <spathi@codeaurora.org>
This commit is contained in:
commit
14e46ae0c8
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 14
|
||||
SUBLEVEL = 163
|
||||
SUBLEVEL = 164
|
||||
EXTRAVERSION =
|
||||
NAME = Petit Gorille
|
||||
|
||||
|
@ -83,7 +83,7 @@
|
||||
};
|
||||
|
||||
lcd0: display {
|
||||
compatible = "osddisplays,osd057T0559-34ts", "panel-dpi";
|
||||
compatible = "osddisplays,osd070t1718-19ts", "panel-dpi";
|
||||
label = "lcd";
|
||||
|
||||
panel-timing {
|
||||
|
@ -45,7 +45,7 @@
|
||||
};
|
||||
|
||||
lcd0: display {
|
||||
compatible = "osddisplays,osd057T0559-34ts", "panel-dpi";
|
||||
compatible = "osddisplays,osd070t1718-19ts", "panel-dpi";
|
||||
label = "lcd";
|
||||
|
||||
panel-timing {
|
||||
|
@ -165,8 +165,8 @@
|
||||
mdio: mdio@18002000 {
|
||||
compatible = "brcm,iproc-mdio";
|
||||
reg = <0x18002000 0x8>;
|
||||
#size-cells = <1>;
|
||||
#address-cells = <0>;
|
||||
#size-cells = <0>;
|
||||
#address-cells = <1>;
|
||||
status = "disabled";
|
||||
|
||||
gphy0: ethernet-phy@0 {
|
||||
|
@ -38,7 +38,7 @@
|
||||
|
||||
trips {
|
||||
cpu-crit {
|
||||
temperature = <80000>;
|
||||
temperature = <90000>;
|
||||
hysteresis = <0>;
|
||||
type = "critical";
|
||||
};
|
||||
|
@ -555,8 +555,9 @@ static struct clk *ve_spc_clk_register(struct device *cpu_dev)
|
||||
|
||||
static int __init ve_spc_clk_init(void)
|
||||
{
|
||||
int cpu;
|
||||
int cpu, cluster;
|
||||
struct clk *clk;
|
||||
bool init_opp_table[MAX_CLUSTERS] = { false };
|
||||
|
||||
if (!info)
|
||||
return 0; /* Continue only if SPC is initialised */
|
||||
@ -582,8 +583,17 @@ static int __init ve_spc_clk_init(void)
|
||||
continue;
|
||||
}
|
||||
|
||||
cluster = topology_physical_package_id(cpu_dev->id);
|
||||
if (init_opp_table[cluster])
|
||||
continue;
|
||||
|
||||
if (ve_init_opp_table(cpu_dev))
|
||||
pr_warn("failed to initialise cpu%d opp table\n", cpu);
|
||||
else if (dev_pm_opp_set_sharing_cpus(cpu_dev,
|
||||
topology_core_cpumask(cpu_dev->id)))
|
||||
pr_warn("failed to mark OPPs shared for cpu%d\n", cpu);
|
||||
else
|
||||
init_opp_table[cluster] = true;
|
||||
}
|
||||
|
||||
platform_device_register_simple("vexpress-spc-cpufreq", -1, NULL, 0);
|
||||
|
@ -612,6 +612,7 @@ static void emit_const_to_reg(struct jit_ctx *ctx, int dst, u64 value)
|
||||
static int emit_bpf_tail_call(struct jit_ctx *ctx, int this_idx)
|
||||
{
|
||||
int off, b_off;
|
||||
int tcc_reg;
|
||||
|
||||
ctx->flags |= EBPF_SEEN_TC;
|
||||
/*
|
||||
@ -624,14 +625,14 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx, int this_idx)
|
||||
b_off = b_imm(this_idx + 1, ctx);
|
||||
emit_instr(ctx, bne, MIPS_R_AT, MIPS_R_ZERO, b_off);
|
||||
/*
|
||||
* if (--TCC < 0)
|
||||
* if (TCC-- < 0)
|
||||
* goto out;
|
||||
*/
|
||||
/* Delay slot */
|
||||
emit_instr(ctx, daddiu, MIPS_R_T5,
|
||||
(ctx->flags & EBPF_TCC_IN_V1) ? MIPS_R_V1 : MIPS_R_S4, -1);
|
||||
tcc_reg = (ctx->flags & EBPF_TCC_IN_V1) ? MIPS_R_V1 : MIPS_R_S4;
|
||||
emit_instr(ctx, daddiu, MIPS_R_T5, tcc_reg, -1);
|
||||
b_off = b_imm(this_idx + 1, ctx);
|
||||
emit_instr(ctx, bltz, MIPS_R_T5, b_off);
|
||||
emit_instr(ctx, bltz, tcc_reg, b_off);
|
||||
/*
|
||||
* prog = array->ptrs[index];
|
||||
* if (prog == NULL)
|
||||
|
@ -44,8 +44,14 @@ __xchg(unsigned long x, __volatile__ void *ptr, int size)
|
||||
** if (((unsigned long)p & 0xf) == 0)
|
||||
** return __ldcw(p);
|
||||
*/
|
||||
#define xchg(ptr, x) \
|
||||
((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr))))
|
||||
#define xchg(ptr, x) \
|
||||
({ \
|
||||
__typeof__(*(ptr)) __ret; \
|
||||
__typeof__(*(ptr)) _x_ = (x); \
|
||||
__ret = (__typeof__(*(ptr))) \
|
||||
__xchg((unsigned long)_x_, (ptr), sizeof(*(ptr))); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
/* bug catcher for when unsupported size is used - won't link */
|
||||
extern void __cmpxchg_called_with_bad_pointer(void);
|
||||
|
@ -353,6 +353,14 @@ void __init mem_init(void)
|
||||
BUILD_BUG_ON(MMU_PAGE_COUNT > 16);
|
||||
|
||||
#ifdef CONFIG_SWIOTLB
|
||||
/*
|
||||
* Some platforms (e.g. 85xx) limit DMA-able memory way below
|
||||
* 4G. We force memblock to bottom-up mode to ensure that the
|
||||
* memory allocated in swiotlb_init() is DMA-able.
|
||||
* As it's the last memblock allocation, no need to reset it
|
||||
* back to to-down.
|
||||
*/
|
||||
memblock_set_bottom_up(true);
|
||||
swiotlb_init(0);
|
||||
#endif
|
||||
|
||||
|
@ -95,7 +95,7 @@ static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) { }
|
||||
static inline void perf_get_data_addr(struct perf_event *event, struct pt_regs *regs, u64 *addrp) { }
|
||||
static inline u32 perf_get_misc_flags(struct pt_regs *regs)
|
||||
{
|
||||
return 0;
|
||||
@ -126,7 +126,7 @@ static unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw)
|
||||
static inline void power_pmu_bhrb_enable(struct perf_event *event) {}
|
||||
static inline void power_pmu_bhrb_disable(struct perf_event *event) {}
|
||||
static void power_pmu_sched_task(struct perf_event_context *ctx, bool sched_in) {}
|
||||
static inline void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) {}
|
||||
static inline void power_pmu_bhrb_read(struct perf_event *event, struct cpu_hw_events *cpuhw) {}
|
||||
static void pmao_restore_workaround(bool ebb) { }
|
||||
static bool use_ic(u64 event)
|
||||
{
|
||||
@ -174,7 +174,7 @@ static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
|
||||
* pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC, the
|
||||
* [POWER7P_]MMCRA_SDAR_VALID bit in MMCRA, or the SDAR_VALID bit in SIER.
|
||||
*/
|
||||
static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp)
|
||||
static inline void perf_get_data_addr(struct perf_event *event, struct pt_regs *regs, u64 *addrp)
|
||||
{
|
||||
unsigned long mmcra = regs->dsisr;
|
||||
bool sdar_valid;
|
||||
@ -435,7 +435,7 @@ static __u64 power_pmu_bhrb_to(u64 addr)
|
||||
}
|
||||
|
||||
/* Processing BHRB entries */
|
||||
static void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw)
|
||||
static void power_pmu_bhrb_read(struct perf_event *event, struct cpu_hw_events *cpuhw)
|
||||
{
|
||||
u64 val;
|
||||
u64 addr;
|
||||
@ -463,8 +463,7 @@ static void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw)
|
||||
* exporting it to userspace (avoid exposure of regions
|
||||
* where we could have speculative execution)
|
||||
*/
|
||||
if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN) &&
|
||||
is_kernel_addr(addr))
|
||||
if (is_kernel_addr(addr) && perf_allow_kernel(&event->attr) != 0)
|
||||
continue;
|
||||
|
||||
/* Branches are read most recent first (ie. mfbhrb 0 is
|
||||
@ -2077,12 +2076,12 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
|
||||
|
||||
if (event->attr.sample_type &
|
||||
(PERF_SAMPLE_ADDR | PERF_SAMPLE_PHYS_ADDR))
|
||||
perf_get_data_addr(regs, &data.addr);
|
||||
perf_get_data_addr(event, regs, &data.addr);
|
||||
|
||||
if (event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK) {
|
||||
struct cpu_hw_events *cpuhw;
|
||||
cpuhw = this_cpu_ptr(&cpu_hw_events);
|
||||
power_pmu_bhrb_read(cpuhw);
|
||||
power_pmu_bhrb_read(event, cpuhw);
|
||||
data.br_stack = &cpuhw->bhrb_stack;
|
||||
}
|
||||
|
||||
|
@ -281,7 +281,6 @@ CONFIG_USB_USBNET=y
|
||||
# CONFIG_WLAN_VENDOR_TI is not set
|
||||
# CONFIG_WLAN_VENDOR_ZYDAS is not set
|
||||
# CONFIG_WLAN_VENDOR_QUANTENNA is not set
|
||||
CONFIG_MAC80211_HWSIM=y
|
||||
CONFIG_VIRT_WIFI=y
|
||||
CONFIG_INPUT_MOUSEDEV=y
|
||||
CONFIG_INPUT_EVDEV=y
|
||||
|
@ -375,7 +375,7 @@ int x86_add_exclusive(unsigned int what)
|
||||
* LBR and BTS are still mutually exclusive.
|
||||
*/
|
||||
if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt)
|
||||
return 0;
|
||||
goto out;
|
||||
|
||||
if (!atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what])) {
|
||||
mutex_lock(&pmc_reserve_mutex);
|
||||
@ -387,6 +387,7 @@ int x86_add_exclusive(unsigned int what)
|
||||
mutex_unlock(&pmc_reserve_mutex);
|
||||
}
|
||||
|
||||
out:
|
||||
atomic_inc(&active_events);
|
||||
return 0;
|
||||
|
||||
@ -397,11 +398,15 @@ fail_unlock:
|
||||
|
||||
void x86_del_exclusive(unsigned int what)
|
||||
{
|
||||
atomic_dec(&active_events);
|
||||
|
||||
/*
|
||||
* See the comment in x86_add_exclusive().
|
||||
*/
|
||||
if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt)
|
||||
return;
|
||||
|
||||
atomic_dec(&x86_pmu.lbr_exclusive[what]);
|
||||
atomic_dec(&active_events);
|
||||
}
|
||||
|
||||
int x86_setup_perfctr(struct perf_event *event)
|
||||
|
@ -563,9 +563,11 @@ static int bts_event_init(struct perf_event *event)
|
||||
* Note that the default paranoia setting permits unprivileged
|
||||
* users to profile the kernel.
|
||||
*/
|
||||
if (event->attr.exclude_kernel && perf_paranoid_kernel() &&
|
||||
!capable(CAP_SYS_ADMIN))
|
||||
return -EACCES;
|
||||
if (event->attr.exclude_kernel) {
|
||||
ret = perf_allow_kernel(&event->attr);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (x86_add_exclusive(x86_lbr_exclusive_bts))
|
||||
return -EBUSY;
|
||||
|
@ -3087,8 +3087,9 @@ static int intel_pmu_hw_config(struct perf_event *event)
|
||||
if (x86_pmu.version < 3)
|
||||
return -EINVAL;
|
||||
|
||||
if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
|
||||
return -EACCES;
|
||||
ret = perf_allow_cpu(&event->attr);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
|
||||
|
||||
|
@ -776,8 +776,9 @@ static int p4_validate_raw_event(struct perf_event *event)
|
||||
* the user needs special permissions to be able to use it
|
||||
*/
|
||||
if (p4_ht_active() && p4_event_bind_map[v].shared) {
|
||||
if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
|
||||
return -EACCES;
|
||||
v = perf_allow_cpu(&event->attr);
|
||||
if (v)
|
||||
return v;
|
||||
}
|
||||
|
||||
/* ESCR EventMask bits may be invalid */
|
||||
|
@ -257,10 +257,6 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size)
|
||||
return;
|
||||
}
|
||||
|
||||
/* No need to reserve regions that will never be freed. */
|
||||
if (md.attribute & EFI_MEMORY_RUNTIME)
|
||||
return;
|
||||
|
||||
size += addr % EFI_PAGE_SIZE;
|
||||
size = round_up(size, EFI_PAGE_SIZE);
|
||||
addr = round_down(addr, EFI_PAGE_SIZE);
|
||||
@ -290,6 +286,8 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size)
|
||||
early_memunmap(new, new_size);
|
||||
|
||||
efi_memmap_install(new_phys, num_entries);
|
||||
e820__range_update(addr, size, E820_TYPE_RAM, E820_TYPE_RESERVED);
|
||||
e820__update_table(e820_table);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -152,7 +152,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
|
||||
return 0;
|
||||
|
||||
unmap_rq:
|
||||
__blk_rq_unmap_user(bio);
|
||||
blk_rq_unmap_user(bio);
|
||||
fail:
|
||||
rq->bio = NULL;
|
||||
return ret;
|
||||
|
@ -85,30 +85,6 @@ setup_pixel_info(struct screen_info *si, u32 pixels_per_scan_line,
|
||||
}
|
||||
}
|
||||
|
||||
static efi_status_t
|
||||
__gop_query32(efi_system_table_t *sys_table_arg,
|
||||
struct efi_graphics_output_protocol_32 *gop32,
|
||||
struct efi_graphics_output_mode_info **info,
|
||||
unsigned long *size, u64 *fb_base)
|
||||
{
|
||||
struct efi_graphics_output_protocol_mode_32 *mode;
|
||||
efi_graphics_output_protocol_query_mode query_mode;
|
||||
efi_status_t status;
|
||||
unsigned long m;
|
||||
|
||||
m = gop32->mode;
|
||||
mode = (struct efi_graphics_output_protocol_mode_32 *)m;
|
||||
query_mode = (void *)(unsigned long)gop32->query_mode;
|
||||
|
||||
status = __efi_call_early(query_mode, (void *)gop32, mode->mode, size,
|
||||
info);
|
||||
if (status != EFI_SUCCESS)
|
||||
return status;
|
||||
|
||||
*fb_base = mode->frame_buffer_base;
|
||||
return status;
|
||||
}
|
||||
|
||||
static efi_status_t
|
||||
setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
|
||||
efi_guid_t *proto, unsigned long size, void **gop_handle)
|
||||
@ -121,7 +97,7 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
|
||||
u64 fb_base;
|
||||
struct efi_pixel_bitmask pixel_info;
|
||||
int pixel_format;
|
||||
efi_status_t status = EFI_NOT_FOUND;
|
||||
efi_status_t status;
|
||||
u32 *handles = (u32 *)(unsigned long)gop_handle;
|
||||
int i;
|
||||
|
||||
@ -130,6 +106,7 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
|
||||
|
||||
nr_gops = size / sizeof(u32);
|
||||
for (i = 0; i < nr_gops; i++) {
|
||||
struct efi_graphics_output_protocol_mode_32 *mode;
|
||||
struct efi_graphics_output_mode_info *info = NULL;
|
||||
efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID;
|
||||
bool conout_found = false;
|
||||
@ -147,9 +124,11 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
|
||||
if (status == EFI_SUCCESS)
|
||||
conout_found = true;
|
||||
|
||||
status = __gop_query32(sys_table_arg, gop32, &info, &size,
|
||||
¤t_fb_base);
|
||||
if (status == EFI_SUCCESS && (!first_gop || conout_found) &&
|
||||
mode = (void *)(unsigned long)gop32->mode;
|
||||
info = (void *)(unsigned long)mode->info;
|
||||
current_fb_base = mode->frame_buffer_base;
|
||||
|
||||
if ((!first_gop || conout_found) &&
|
||||
info->pixel_format != PIXEL_BLT_ONLY) {
|
||||
/*
|
||||
* Systems that use the UEFI Console Splitter may
|
||||
@ -177,7 +156,7 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
|
||||
|
||||
/* Did we find any GOPs? */
|
||||
if (!first_gop)
|
||||
goto out;
|
||||
return EFI_NOT_FOUND;
|
||||
|
||||
/* EFI framebuffer */
|
||||
si->orig_video_isVGA = VIDEO_TYPE_EFI;
|
||||
@ -199,32 +178,8 @@ setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
|
||||
si->lfb_size = si->lfb_linelength * si->lfb_height;
|
||||
|
||||
si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS;
|
||||
out:
|
||||
return status;
|
||||
}
|
||||
|
||||
static efi_status_t
|
||||
__gop_query64(efi_system_table_t *sys_table_arg,
|
||||
struct efi_graphics_output_protocol_64 *gop64,
|
||||
struct efi_graphics_output_mode_info **info,
|
||||
unsigned long *size, u64 *fb_base)
|
||||
{
|
||||
struct efi_graphics_output_protocol_mode_64 *mode;
|
||||
efi_graphics_output_protocol_query_mode query_mode;
|
||||
efi_status_t status;
|
||||
unsigned long m;
|
||||
|
||||
m = gop64->mode;
|
||||
mode = (struct efi_graphics_output_protocol_mode_64 *)m;
|
||||
query_mode = (void *)(unsigned long)gop64->query_mode;
|
||||
|
||||
status = __efi_call_early(query_mode, (void *)gop64, mode->mode, size,
|
||||
info);
|
||||
if (status != EFI_SUCCESS)
|
||||
return status;
|
||||
|
||||
*fb_base = mode->frame_buffer_base;
|
||||
return status;
|
||||
return EFI_SUCCESS;
|
||||
}
|
||||
|
||||
static efi_status_t
|
||||
@ -239,7 +194,7 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
|
||||
u64 fb_base;
|
||||
struct efi_pixel_bitmask pixel_info;
|
||||
int pixel_format;
|
||||
efi_status_t status = EFI_NOT_FOUND;
|
||||
efi_status_t status;
|
||||
u64 *handles = (u64 *)(unsigned long)gop_handle;
|
||||
int i;
|
||||
|
||||
@ -248,6 +203,7 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
|
||||
|
||||
nr_gops = size / sizeof(u64);
|
||||
for (i = 0; i < nr_gops; i++) {
|
||||
struct efi_graphics_output_protocol_mode_64 *mode;
|
||||
struct efi_graphics_output_mode_info *info = NULL;
|
||||
efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID;
|
||||
bool conout_found = false;
|
||||
@ -265,9 +221,11 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
|
||||
if (status == EFI_SUCCESS)
|
||||
conout_found = true;
|
||||
|
||||
status = __gop_query64(sys_table_arg, gop64, &info, &size,
|
||||
¤t_fb_base);
|
||||
if (status == EFI_SUCCESS && (!first_gop || conout_found) &&
|
||||
mode = (void *)(unsigned long)gop64->mode;
|
||||
info = (void *)(unsigned long)mode->info;
|
||||
current_fb_base = mode->frame_buffer_base;
|
||||
|
||||
if ((!first_gop || conout_found) &&
|
||||
info->pixel_format != PIXEL_BLT_ONLY) {
|
||||
/*
|
||||
* Systems that use the UEFI Console Splitter may
|
||||
@ -295,7 +253,7 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
|
||||
|
||||
/* Did we find any GOPs? */
|
||||
if (!first_gop)
|
||||
goto out;
|
||||
return EFI_NOT_FOUND;
|
||||
|
||||
/* EFI framebuffer */
|
||||
si->orig_video_isVGA = VIDEO_TYPE_EFI;
|
||||
@ -317,8 +275,8 @@ setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
|
||||
si->lfb_size = si->lfb_linelength * si->lfb_height;
|
||||
|
||||
si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS;
|
||||
out:
|
||||
return status;
|
||||
|
||||
return EFI_SUCCESS;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -313,6 +313,11 @@ int mv88e6390_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port)
|
||||
{
|
||||
u16 ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST;
|
||||
|
||||
/* Use the default high priority for management frames sent to
|
||||
* the CPU.
|
||||
*/
|
||||
port |= MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST_MGMTPRI;
|
||||
|
||||
return mv88e6390_g1_monitor_write(chip, ptr, port);
|
||||
}
|
||||
|
||||
|
@ -189,6 +189,7 @@
|
||||
#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_INGRESS_DEST 0x2000
|
||||
#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_EGRESS_DEST 0x2100
|
||||
#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST 0x3000
|
||||
#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST_MGMTPRI 0x00e0
|
||||
#define MV88E6390_G1_MONITOR_MGMT_CTL_DATA_MASK 0x00ff
|
||||
|
||||
/* Offset 0x1C: Global Control 2 */
|
||||
|
@ -1112,7 +1112,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
|
||||
for (i = 0; i < E1H_FUNC_MAX / 2; i++) {
|
||||
u32 func_config =
|
||||
MF_CFG_RD(bp,
|
||||
func_mf_config[BP_PORT(bp) + 2 * i].
|
||||
func_mf_config[BP_PATH(bp) + 2 * i].
|
||||
config);
|
||||
func_num +=
|
||||
((func_config & FUNC_MF_CFG_FUNC_HIDE) ? 0 : 1);
|
||||
|
@ -9995,10 +9995,18 @@ static void bnx2x_recovery_failed(struct bnx2x *bp)
|
||||
*/
|
||||
static void bnx2x_parity_recover(struct bnx2x *bp)
|
||||
{
|
||||
bool global = false;
|
||||
u32 error_recovered, error_unrecovered;
|
||||
bool is_parity;
|
||||
bool is_parity, global = false;
|
||||
#ifdef CONFIG_BNX2X_SRIOV
|
||||
int vf_idx;
|
||||
|
||||
for (vf_idx = 0; vf_idx < bp->requested_nr_virtfn; vf_idx++) {
|
||||
struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
|
||||
|
||||
if (vf)
|
||||
vf->state = VF_LOST;
|
||||
}
|
||||
#endif
|
||||
DP(NETIF_MSG_HW, "Handling parity\n");
|
||||
while (1) {
|
||||
switch (bp->recovery_state) {
|
||||
|
@ -139,6 +139,7 @@ struct bnx2x_virtf {
|
||||
#define VF_ACQUIRED 1 /* VF acquired, but not initialized */
|
||||
#define VF_ENABLED 2 /* VF Enabled */
|
||||
#define VF_RESET 3 /* VF FLR'd, pending cleanup */
|
||||
#define VF_LOST 4 /* Recovery while VFs are loaded */
|
||||
|
||||
bool flr_clnup_stage; /* true during flr cleanup */
|
||||
bool malicious; /* true if FW indicated so, until FLR */
|
||||
|
@ -2112,6 +2112,18 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
|
||||
{
|
||||
int i;
|
||||
|
||||
if (vf->state == VF_LOST) {
|
||||
/* Just ack the FW and return if VFs are lost
|
||||
* in case of parity error. VFs are supposed to be timedout
|
||||
* on waiting for PF response.
|
||||
*/
|
||||
DP(BNX2X_MSG_IOV,
|
||||
"VF 0x%x lost, not handling the request\n", vf->abs_vfid);
|
||||
|
||||
storm_memset_vf_mbx_ack(bp, vf->abs_vfid);
|
||||
return;
|
||||
}
|
||||
|
||||
/* check if tlv type is known */
|
||||
if (bnx2x_tlv_supported(mbx->first_tlv.tl.type)) {
|
||||
/* Lock the per vf op mutex and note the locker's identity.
|
||||
|
@ -724,6 +724,9 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
|
||||
/* default */
|
||||
break;
|
||||
case PHY_INTERFACE_MODE_RGMII:
|
||||
case PHY_INTERFACE_MODE_RGMII_ID:
|
||||
case PHY_INTERFACE_MODE_RGMII_RXID:
|
||||
case PHY_INTERFACE_MODE_RGMII_TXID:
|
||||
reg |= SYSCON_EPIT | SYSCON_ETCS_INT_GMII;
|
||||
break;
|
||||
case PHY_INTERFACE_MODE_RMII:
|
||||
|
@ -53,7 +53,7 @@ static int sun7i_gmac_init(struct platform_device *pdev, void *priv)
|
||||
* rate, which then uses the auto-reparenting feature of the
|
||||
* clock driver, and enabling/disabling the clock.
|
||||
*/
|
||||
if (gmac->interface == PHY_INTERFACE_MODE_RGMII) {
|
||||
if (phy_interface_mode_is_rgmii(gmac->interface)) {
|
||||
clk_set_rate(gmac->tx_clk, SUN7I_GMAC_GMII_RGMII_RATE);
|
||||
clk_prepare_enable(gmac->tx_clk);
|
||||
gmac->clk_enabled = 1;
|
||||
|
@ -51,8 +51,7 @@
|
||||
#include <linux/of_mdio.h>
|
||||
#include "dwmac1000.h"
|
||||
|
||||
|
||||
#define STMMAC_ALIGN(x) __ALIGN_KERNEL(x, SMP_CACHE_BYTES)
|
||||
#define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
|
||||
#define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
|
||||
|
||||
/* Module parameters */
|
||||
@ -3793,12 +3792,24 @@ static void stmmac_set_rx_mode(struct net_device *dev)
|
||||
static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
|
||||
{
|
||||
struct stmmac_priv *priv = netdev_priv(dev);
|
||||
int txfifosz = priv->plat->tx_fifo_size;
|
||||
|
||||
if (txfifosz == 0)
|
||||
txfifosz = priv->dma_cap.tx_fifo_size;
|
||||
|
||||
txfifosz /= priv->plat->tx_queues_to_use;
|
||||
|
||||
if (netif_running(dev)) {
|
||||
netdev_err(priv->dev, "must be stopped to change its MTU\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
new_mtu = STMMAC_ALIGN(new_mtu);
|
||||
|
||||
/* If condition true, FIFO is too small or MTU too large */
|
||||
if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
|
||||
return -EINVAL;
|
||||
|
||||
dev->mtu = new_mtu;
|
||||
|
||||
netdev_update_features(dev);
|
||||
|
@ -816,7 +816,7 @@ static struct sock *gtp_encap_enable_socket(int fd, int type,
|
||||
lock_sock(sock->sk);
|
||||
if (sock->sk->sk_user_data) {
|
||||
sk = ERR_PTR(-EBUSY);
|
||||
goto out_sock;
|
||||
goto out_rel_sock;
|
||||
}
|
||||
|
||||
sk = sock->sk;
|
||||
@ -829,8 +829,9 @@ static struct sock *gtp_encap_enable_socket(int fd, int type,
|
||||
|
||||
setup_udp_tunnel_sock(sock_net(sock->sk), sock, &tuncfg);
|
||||
|
||||
out_sock:
|
||||
out_rel_sock:
|
||||
release_sock(sock->sk);
|
||||
out_sock:
|
||||
sockfd_put(sock);
|
||||
return sk;
|
||||
}
|
||||
|
@ -179,7 +179,6 @@ struct rndis_device {
|
||||
|
||||
u8 hw_mac_adr[ETH_ALEN];
|
||||
u8 rss_key[NETVSC_HASH_KEYLEN];
|
||||
u16 rx_table[ITAB_NUM];
|
||||
};
|
||||
|
||||
|
||||
@ -741,6 +740,8 @@ struct net_device_context {
|
||||
|
||||
u32 tx_table[VRSS_SEND_TAB_SIZE];
|
||||
|
||||
u16 rx_table[ITAB_NUM];
|
||||
|
||||
/* Ethtool settings */
|
||||
bool udp4_l4_hash;
|
||||
bool udp6_l4_hash;
|
||||
|
@ -1528,7 +1528,7 @@ static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
|
||||
rndis_dev = ndev->extension;
|
||||
if (indir) {
|
||||
for (i = 0; i < ITAB_NUM; i++)
|
||||
indir[i] = rndis_dev->rx_table[i];
|
||||
indir[i] = ndc->rx_table[i];
|
||||
}
|
||||
|
||||
if (key)
|
||||
@ -1558,7 +1558,7 @@ static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir,
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < ITAB_NUM; i++)
|
||||
rndis_dev->rx_table[i] = indir[i];
|
||||
ndc->rx_table[i] = indir[i];
|
||||
}
|
||||
|
||||
if (!key) {
|
||||
|
@ -715,6 +715,7 @@ static int rndis_set_rss_param_msg(struct rndis_device *rdev,
|
||||
const u8 *rss_key, u16 flag)
|
||||
{
|
||||
struct net_device *ndev = rdev->ndev;
|
||||
struct net_device_context *ndc = netdev_priv(ndev);
|
||||
struct rndis_request *request;
|
||||
struct rndis_set_request *set;
|
||||
struct rndis_set_complete *set_complete;
|
||||
@ -754,7 +755,7 @@ static int rndis_set_rss_param_msg(struct rndis_device *rdev,
|
||||
/* Set indirection table entries */
|
||||
itab = (u32 *)(rssp + 1);
|
||||
for (i = 0; i < ITAB_NUM; i++)
|
||||
itab[i] = rdev->rx_table[i];
|
||||
itab[i] = ndc->rx_table[i];
|
||||
|
||||
/* Set hask key values */
|
||||
keyp = (u8 *)((unsigned long)rssp + rssp->kashkey_offset);
|
||||
@ -1204,6 +1205,7 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
|
||||
struct netvsc_device_info *device_info)
|
||||
{
|
||||
struct net_device *net = hv_get_drvdata(dev);
|
||||
struct net_device_context *ndc = netdev_priv(net);
|
||||
struct netvsc_device *net_device;
|
||||
struct rndis_device *rndis_device;
|
||||
struct ndis_recv_scale_cap rsscap;
|
||||
@ -1286,9 +1288,11 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
|
||||
/* We will use the given number of channels if available. */
|
||||
net_device->num_chn = min(net_device->max_chn, device_info->num_chn);
|
||||
|
||||
for (i = 0; i < ITAB_NUM; i++)
|
||||
rndis_device->rx_table[i] = ethtool_rxfh_indir_default(
|
||||
if (!netif_is_rxfh_configured(net)) {
|
||||
for (i = 0; i < ITAB_NUM; i++)
|
||||
ndc->rx_table[i] = ethtool_rxfh_indir_default(
|
||||
i, net_device->num_chn);
|
||||
}
|
||||
|
||||
atomic_set(&net_device->open_chn, 1);
|
||||
vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open);
|
||||
|
@ -263,7 +263,7 @@ static void macvlan_broadcast(struct sk_buff *skb,
|
||||
struct net_device *src,
|
||||
enum macvlan_mode mode)
|
||||
{
|
||||
const struct ethhdr *eth = eth_hdr(skb);
|
||||
const struct ethhdr *eth = skb_eth_hdr(skb);
|
||||
const struct macvlan_dev *vlan;
|
||||
struct sk_buff *nskb;
|
||||
unsigned int i;
|
||||
|
@ -497,7 +497,7 @@ static int lan78xx_read_stats(struct lan78xx_net *dev,
|
||||
}
|
||||
} else {
|
||||
netdev_warn(dev->net,
|
||||
"Failed to read stat ret = 0x%x", ret);
|
||||
"Failed to read stat ret = %d", ret);
|
||||
}
|
||||
|
||||
kfree(stats);
|
||||
@ -2602,11 +2602,6 @@ static int lan78xx_stop(struct net_device *net)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int lan78xx_linearize(struct sk_buff *skb)
|
||||
{
|
||||
return skb_linearize(skb);
|
||||
}
|
||||
|
||||
static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
|
||||
struct sk_buff *skb, gfp_t flags)
|
||||
{
|
||||
@ -2617,8 +2612,10 @@ static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (lan78xx_linearize(skb) < 0)
|
||||
if (skb_linearize(skb)) {
|
||||
dev_kfree_skb_any(skb);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
|
||||
|
||||
|
@ -2216,7 +2216,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
|
||||
skb_dst_update_pmtu(skb, mtu);
|
||||
}
|
||||
|
||||
tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
|
||||
tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb);
|
||||
ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
|
||||
err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr),
|
||||
vni, md, flags, udp_sum);
|
||||
@ -2257,7 +2257,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
|
||||
skb_dst_update_pmtu(skb, mtu);
|
||||
}
|
||||
|
||||
tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
|
||||
tos = ip_tunnel_ecn_encap(RT_TOS(tos), old_iph, skb);
|
||||
ttl = ttl ? : ip6_dst_hoplimit(ndst);
|
||||
skb_scrub_packet(skb, xnet);
|
||||
err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr),
|
||||
|
@ -956,59 +956,117 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
|
||||
|
||||
switch (*pos) {
|
||||
case WLAN_EID_SUPP_RATES:
|
||||
if (pos[1] > 32)
|
||||
return;
|
||||
sta_ptr->tdls_cap.rates_len = pos[1];
|
||||
for (i = 0; i < pos[1]; i++)
|
||||
sta_ptr->tdls_cap.rates[i] = pos[i + 2];
|
||||
break;
|
||||
|
||||
case WLAN_EID_EXT_SUPP_RATES:
|
||||
if (pos[1] > 32)
|
||||
return;
|
||||
basic = sta_ptr->tdls_cap.rates_len;
|
||||
if (pos[1] > 32 - basic)
|
||||
return;
|
||||
for (i = 0; i < pos[1]; i++)
|
||||
sta_ptr->tdls_cap.rates[basic + i] = pos[i + 2];
|
||||
sta_ptr->tdls_cap.rates_len += pos[1];
|
||||
break;
|
||||
case WLAN_EID_HT_CAPABILITY:
|
||||
memcpy((u8 *)&sta_ptr->tdls_cap.ht_capb, pos,
|
||||
if (pos > end - sizeof(struct ieee80211_ht_cap) - 2)
|
||||
return;
|
||||
if (pos[1] != sizeof(struct ieee80211_ht_cap))
|
||||
return;
|
||||
/* copy the ie's value into ht_capb*/
|
||||
memcpy((u8 *)&sta_ptr->tdls_cap.ht_capb, pos + 2,
|
||||
sizeof(struct ieee80211_ht_cap));
|
||||
sta_ptr->is_11n_enabled = 1;
|
||||
break;
|
||||
case WLAN_EID_HT_OPERATION:
|
||||
memcpy(&sta_ptr->tdls_cap.ht_oper, pos,
|
||||
if (pos > end -
|
||||
sizeof(struct ieee80211_ht_operation) - 2)
|
||||
return;
|
||||
if (pos[1] != sizeof(struct ieee80211_ht_operation))
|
||||
return;
|
||||
/* copy the ie's value into ht_oper*/
|
||||
memcpy(&sta_ptr->tdls_cap.ht_oper, pos + 2,
|
||||
sizeof(struct ieee80211_ht_operation));
|
||||
break;
|
||||
case WLAN_EID_BSS_COEX_2040:
|
||||
if (pos > end - 3)
|
||||
return;
|
||||
if (pos[1] != 1)
|
||||
return;
|
||||
sta_ptr->tdls_cap.coex_2040 = pos[2];
|
||||
break;
|
||||
case WLAN_EID_EXT_CAPABILITY:
|
||||
if (pos > end - sizeof(struct ieee_types_header))
|
||||
return;
|
||||
if (pos[1] < sizeof(struct ieee_types_header))
|
||||
return;
|
||||
if (pos[1] > 8)
|
||||
return;
|
||||
memcpy((u8 *)&sta_ptr->tdls_cap.extcap, pos,
|
||||
sizeof(struct ieee_types_header) +
|
||||
min_t(u8, pos[1], 8));
|
||||
break;
|
||||
case WLAN_EID_RSN:
|
||||
if (pos > end - sizeof(struct ieee_types_header))
|
||||
return;
|
||||
if (pos[1] < sizeof(struct ieee_types_header))
|
||||
return;
|
||||
if (pos[1] > IEEE_MAX_IE_SIZE -
|
||||
sizeof(struct ieee_types_header))
|
||||
return;
|
||||
memcpy((u8 *)&sta_ptr->tdls_cap.rsn_ie, pos,
|
||||
sizeof(struct ieee_types_header) +
|
||||
min_t(u8, pos[1], IEEE_MAX_IE_SIZE -
|
||||
sizeof(struct ieee_types_header)));
|
||||
break;
|
||||
case WLAN_EID_QOS_CAPA:
|
||||
if (pos > end - 3)
|
||||
return;
|
||||
if (pos[1] != 1)
|
||||
return;
|
||||
sta_ptr->tdls_cap.qos_info = pos[2];
|
||||
break;
|
||||
case WLAN_EID_VHT_OPERATION:
|
||||
if (priv->adapter->is_hw_11ac_capable)
|
||||
memcpy(&sta_ptr->tdls_cap.vhtoper, pos,
|
||||
if (priv->adapter->is_hw_11ac_capable) {
|
||||
if (pos > end -
|
||||
sizeof(struct ieee80211_vht_operation) - 2)
|
||||
return;
|
||||
if (pos[1] !=
|
||||
sizeof(struct ieee80211_vht_operation))
|
||||
return;
|
||||
/* copy the ie's value into vhtoper*/
|
||||
memcpy(&sta_ptr->tdls_cap.vhtoper, pos + 2,
|
||||
sizeof(struct ieee80211_vht_operation));
|
||||
}
|
||||
break;
|
||||
case WLAN_EID_VHT_CAPABILITY:
|
||||
if (priv->adapter->is_hw_11ac_capable) {
|
||||
memcpy((u8 *)&sta_ptr->tdls_cap.vhtcap, pos,
|
||||
if (pos > end -
|
||||
sizeof(struct ieee80211_vht_cap) - 2)
|
||||
return;
|
||||
if (pos[1] != sizeof(struct ieee80211_vht_cap))
|
||||
return;
|
||||
/* copy the ie's value into vhtcap*/
|
||||
memcpy((u8 *)&sta_ptr->tdls_cap.vhtcap, pos + 2,
|
||||
sizeof(struct ieee80211_vht_cap));
|
||||
sta_ptr->is_11ac_enabled = 1;
|
||||
}
|
||||
break;
|
||||
case WLAN_EID_AID:
|
||||
if (priv->adapter->is_hw_11ac_capable)
|
||||
if (priv->adapter->is_hw_11ac_capable) {
|
||||
if (pos > end - 4)
|
||||
return;
|
||||
if (pos[1] != 2)
|
||||
return;
|
||||
sta_ptr->tdls_cap.aid =
|
||||
get_unaligned_le16((pos + 2));
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -23,7 +23,7 @@
|
||||
#include <linux/pci.h>
|
||||
#include <linux/cdev.h>
|
||||
#include <linux/wait.h>
|
||||
|
||||
#include <linux/io-64-nonatomic-lo-hi.h>
|
||||
#include <linux/nospec.h>
|
||||
|
||||
MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver");
|
||||
@ -898,7 +898,7 @@ static int ioctl_event_summary(struct switchtec_dev *stdev,
|
||||
u32 reg;
|
||||
|
||||
s.global = ioread32(&stdev->mmio_sw_event->global_summary);
|
||||
s.part_bitmap = ioread32(&stdev->mmio_sw_event->part_event_bitmap);
|
||||
s.part_bitmap = readq(&stdev->mmio_sw_event->part_event_bitmap);
|
||||
s.local_part = ioread32(&stdev->mmio_part_cfg->part_event_summary);
|
||||
|
||||
for (i = 0; i < stdev->partition_count; i++) {
|
||||
|
@ -154,6 +154,7 @@ static struct platform_driver rn5t618_regulator_driver = {
|
||||
|
||||
module_platform_driver(rn5t618_regulator_driver);
|
||||
|
||||
MODULE_ALIAS("platform:rn5t618-regulator");
|
||||
MODULE_AUTHOR("Beniamino Galvani <b.galvani@gmail.com>");
|
||||
MODULE_DESCRIPTION("RN5T618 regulator driver");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
|
@ -1133,7 +1133,8 @@ static u32 get_fcx_max_data(struct dasd_device *device)
|
||||
{
|
||||
struct dasd_eckd_private *private = device->private;
|
||||
int fcx_in_css, fcx_in_gneq, fcx_in_features;
|
||||
int tpm, mdc;
|
||||
unsigned int mdc;
|
||||
int tpm;
|
||||
|
||||
if (dasd_nofcx)
|
||||
return 0;
|
||||
@ -1147,7 +1148,7 @@ static u32 get_fcx_max_data(struct dasd_device *device)
|
||||
return 0;
|
||||
|
||||
mdc = ccw_device_get_mdc(device->cdev, 0);
|
||||
if (mdc < 0) {
|
||||
if (mdc == 0) {
|
||||
dev_warn(&device->cdev->dev, "Detecting the maximum supported data size for zHPF requests failed\n");
|
||||
return 0;
|
||||
} else {
|
||||
@ -1158,12 +1159,12 @@ static u32 get_fcx_max_data(struct dasd_device *device)
|
||||
static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
|
||||
{
|
||||
struct dasd_eckd_private *private = device->private;
|
||||
int mdc;
|
||||
unsigned int mdc;
|
||||
u32 fcx_max_data;
|
||||
|
||||
if (private->fcx_max_data) {
|
||||
mdc = ccw_device_get_mdc(device->cdev, lpm);
|
||||
if ((mdc < 0)) {
|
||||
if (mdc == 0) {
|
||||
dev_warn(&device->cdev->dev,
|
||||
"Detecting the maximum data size for zHPF "
|
||||
"requests failed (rc=%d) for a new path %x\n",
|
||||
@ -1767,7 +1768,7 @@ out_err2:
|
||||
dasd_free_block(device->block);
|
||||
device->block = NULL;
|
||||
out_err1:
|
||||
kfree(private->conf_data);
|
||||
dasd_eckd_clear_conf_data(device);
|
||||
kfree(device->private);
|
||||
device->private = NULL;
|
||||
return rc;
|
||||
@ -1776,7 +1777,6 @@ out_err1:
|
||||
static void dasd_eckd_uncheck_device(struct dasd_device *device)
|
||||
{
|
||||
struct dasd_eckd_private *private = device->private;
|
||||
int i;
|
||||
|
||||
if (!private)
|
||||
return;
|
||||
@ -1786,21 +1786,7 @@ static void dasd_eckd_uncheck_device(struct dasd_device *device)
|
||||
private->sneq = NULL;
|
||||
private->vdsneq = NULL;
|
||||
private->gneq = NULL;
|
||||
private->conf_len = 0;
|
||||
for (i = 0; i < 8; i++) {
|
||||
kfree(device->path[i].conf_data);
|
||||
if ((__u8 *)device->path[i].conf_data ==
|
||||
private->conf_data) {
|
||||
private->conf_data = NULL;
|
||||
private->conf_len = 0;
|
||||
}
|
||||
device->path[i].conf_data = NULL;
|
||||
device->path[i].cssid = 0;
|
||||
device->path[i].ssid = 0;
|
||||
device->path[i].chpid = 0;
|
||||
}
|
||||
kfree(private->conf_data);
|
||||
private->conf_data = NULL;
|
||||
dasd_eckd_clear_conf_data(device);
|
||||
}
|
||||
|
||||
static struct dasd_ccw_req *
|
||||
|
@ -595,7 +595,7 @@ EXPORT_SYMBOL(ccw_device_tm_start_timeout);
|
||||
* @mask: mask of paths to use
|
||||
*
|
||||
* Return the number of 64K-bytes blocks all paths at least support
|
||||
* for a transport command. Return values <= 0 indicate failures.
|
||||
* for a transport command. Return value 0 indicates failure.
|
||||
*/
|
||||
int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask)
|
||||
{
|
||||
|
@ -81,6 +81,7 @@ static int thunderx_spi_probe(struct pci_dev *pdev,
|
||||
|
||||
error:
|
||||
clk_disable_unprepare(p->clk);
|
||||
pci_release_regions(pdev);
|
||||
spi_master_put(master);
|
||||
return ret;
|
||||
}
|
||||
@ -95,6 +96,7 @@ static void thunderx_spi_remove(struct pci_dev *pdev)
|
||||
return;
|
||||
|
||||
clk_disable_unprepare(p->clk);
|
||||
pci_release_regions(pdev);
|
||||
/* Put everything in a known state. */
|
||||
writeq(0, p->register_base + OCTEON_SPI_CFG(p));
|
||||
}
|
||||
|
@ -203,9 +203,58 @@ static const unsigned short super_speed_maxpacket_maxes[4] = {
|
||||
[USB_ENDPOINT_XFER_INT] = 1024,
|
||||
};
|
||||
|
||||
static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
|
||||
int asnum, struct usb_host_interface *ifp, int num_ep,
|
||||
unsigned char *buffer, int size)
|
||||
static bool endpoint_is_duplicate(struct usb_endpoint_descriptor *e1,
|
||||
struct usb_endpoint_descriptor *e2)
|
||||
{
|
||||
if (e1->bEndpointAddress == e2->bEndpointAddress)
|
||||
return true;
|
||||
|
||||
if (usb_endpoint_xfer_control(e1) || usb_endpoint_xfer_control(e2)) {
|
||||
if (usb_endpoint_num(e1) == usb_endpoint_num(e2))
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check for duplicate endpoint addresses in other interfaces and in the
|
||||
* altsetting currently being parsed.
|
||||
*/
|
||||
static bool config_endpoint_is_duplicate(struct usb_host_config *config,
|
||||
int inum, int asnum, struct usb_endpoint_descriptor *d)
|
||||
{
|
||||
struct usb_endpoint_descriptor *epd;
|
||||
struct usb_interface_cache *intfc;
|
||||
struct usb_host_interface *alt;
|
||||
int i, j, k;
|
||||
|
||||
for (i = 0; i < config->desc.bNumInterfaces; ++i) {
|
||||
intfc = config->intf_cache[i];
|
||||
|
||||
for (j = 0; j < intfc->num_altsetting; ++j) {
|
||||
alt = &intfc->altsetting[j];
|
||||
|
||||
if (alt->desc.bInterfaceNumber == inum &&
|
||||
alt->desc.bAlternateSetting != asnum)
|
||||
continue;
|
||||
|
||||
for (k = 0; k < alt->desc.bNumEndpoints; ++k) {
|
||||
epd = &alt->endpoint[k].desc;
|
||||
|
||||
if (endpoint_is_duplicate(epd, d))
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int usb_parse_endpoint(struct device *ddev, int cfgno,
|
||||
struct usb_host_config *config, int inum, int asnum,
|
||||
struct usb_host_interface *ifp, int num_ep,
|
||||
unsigned char *buffer, int size)
|
||||
{
|
||||
unsigned char *buffer0 = buffer;
|
||||
struct usb_endpoint_descriptor *d;
|
||||
@ -242,13 +291,10 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
|
||||
goto skip_to_next_endpoint_or_interface_descriptor;
|
||||
|
||||
/* Check for duplicate endpoint addresses */
|
||||
for (i = 0; i < ifp->desc.bNumEndpoints; ++i) {
|
||||
if (ifp->endpoint[i].desc.bEndpointAddress ==
|
||||
d->bEndpointAddress) {
|
||||
dev_warn(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n",
|
||||
cfgno, inum, asnum, d->bEndpointAddress);
|
||||
goto skip_to_next_endpoint_or_interface_descriptor;
|
||||
}
|
||||
if (config_endpoint_is_duplicate(config, inum, asnum, d)) {
|
||||
dev_warn(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n",
|
||||
cfgno, inum, asnum, d->bEndpointAddress);
|
||||
goto skip_to_next_endpoint_or_interface_descriptor;
|
||||
}
|
||||
|
||||
endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints];
|
||||
@ -522,8 +568,8 @@ static int usb_parse_interface(struct device *ddev, int cfgno,
|
||||
if (((struct usb_descriptor_header *) buffer)->bDescriptorType
|
||||
== USB_DT_INTERFACE)
|
||||
break;
|
||||
retval = usb_parse_endpoint(ddev, cfgno, inum, asnum, alt,
|
||||
num_ep, buffer, size);
|
||||
retval = usb_parse_endpoint(ddev, cfgno, config, inum, asnum,
|
||||
alt, num_ep, buffer, size);
|
||||
if (retval < 0)
|
||||
return retval;
|
||||
++n;
|
||||
|
@ -980,8 +980,18 @@ static int dummy_udc_start(struct usb_gadget *g,
|
||||
struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(g);
|
||||
struct dummy *dum = dum_hcd->dum;
|
||||
|
||||
if (driver->max_speed == USB_SPEED_UNKNOWN)
|
||||
switch (g->speed) {
|
||||
/* All the speeds we support */
|
||||
case USB_SPEED_LOW:
|
||||
case USB_SPEED_FULL:
|
||||
case USB_SPEED_HIGH:
|
||||
case USB_SPEED_SUPER:
|
||||
break;
|
||||
default:
|
||||
dev_err(dummy_dev(dum_hcd), "Unsupported driver max speed %d\n",
|
||||
driver->max_speed);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* SLAVE side init ... the layer above hardware, which
|
||||
@ -1325,7 +1335,7 @@ static int dummy_perform_transfer(struct urb *urb, struct dummy_request *req,
|
||||
u32 this_sg;
|
||||
bool next_sg;
|
||||
|
||||
to_host = usb_pipein(urb->pipe);
|
||||
to_host = usb_urb_dir_in(urb);
|
||||
rbuf = req->req.buf + req->req.actual;
|
||||
|
||||
if (!urb->num_sgs) {
|
||||
@ -1413,7 +1423,7 @@ top:
|
||||
|
||||
/* FIXME update emulated data toggle too */
|
||||
|
||||
to_host = usb_pipein(urb->pipe);
|
||||
to_host = usb_urb_dir_in(urb);
|
||||
if (unlikely(len == 0))
|
||||
is_short = 1;
|
||||
else {
|
||||
@ -1770,6 +1780,7 @@ static void dummy_timer(unsigned long _dum_hcd)
|
||||
int i;
|
||||
|
||||
/* simplistic model for one frame's bandwidth */
|
||||
/* FIXME: account for transaction and packet overhead */
|
||||
switch (dum->gadget.speed) {
|
||||
case USB_SPEED_LOW:
|
||||
total = 8/*bytes*/ * 12/*packets*/;
|
||||
@ -1784,9 +1795,10 @@ static void dummy_timer(unsigned long _dum_hcd)
|
||||
/* Bus speed is 500000 bytes/ms, so use a little less */
|
||||
total = 490000;
|
||||
break;
|
||||
default:
|
||||
default: /* Can't happen */
|
||||
dev_err(dummy_dev(dum_hcd), "bogus device speed\n");
|
||||
return;
|
||||
total = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
/* FIXME if HZ != 1000 this will probably misbehave ... */
|
||||
@ -1814,7 +1826,6 @@ restart:
|
||||
struct dummy_request *req;
|
||||
u8 address;
|
||||
struct dummy_ep *ep = NULL;
|
||||
int type;
|
||||
int status = -EINPROGRESS;
|
||||
|
||||
/* stop when we reach URBs queued after the timer interrupt */
|
||||
@ -1826,18 +1837,14 @@ restart:
|
||||
goto return_urb;
|
||||
else if (dum_hcd->rh_state != DUMMY_RH_RUNNING)
|
||||
continue;
|
||||
type = usb_pipetype(urb->pipe);
|
||||
|
||||
/* used up this frame's non-periodic bandwidth?
|
||||
* FIXME there's infinite bandwidth for control and
|
||||
* periodic transfers ... unrealistic.
|
||||
*/
|
||||
if (total <= 0 && type == PIPE_BULK)
|
||||
/* Used up this frame's bandwidth? */
|
||||
if (total <= 0)
|
||||
continue;
|
||||
|
||||
/* find the gadget's ep for this request (if configured) */
|
||||
address = usb_pipeendpoint (urb->pipe);
|
||||
if (usb_pipein(urb->pipe))
|
||||
if (usb_urb_dir_in(urb))
|
||||
address |= USB_DIR_IN;
|
||||
ep = find_endpoint(dum, address);
|
||||
if (!ep) {
|
||||
@ -2390,7 +2397,7 @@ static inline ssize_t show_urb(char *buf, size_t size, struct urb *urb)
|
||||
s = "?";
|
||||
break;
|
||||
} s; }),
|
||||
ep, ep ? (usb_pipein(urb->pipe) ? "in" : "out") : "",
|
||||
ep, ep ? (usb_urb_dir_in(urb) ? "in" : "out") : "",
|
||||
({ char *s; \
|
||||
switch (usb_pipetype(urb->pipe)) { \
|
||||
case PIPE_CONTROL: \
|
||||
@ -2734,7 +2741,7 @@ static struct platform_driver dummy_hcd_driver = {
|
||||
};
|
||||
|
||||
/*-------------------------------------------------------------------------*/
|
||||
#define MAX_NUM_UDC 2
|
||||
#define MAX_NUM_UDC 32
|
||||
static struct platform_device *the_udc_pdev[MAX_NUM_UDC];
|
||||
static struct platform_device *the_hcd_pdev[MAX_NUM_UDC];
|
||||
|
||||
|
@ -1175,6 +1175,8 @@ static const struct usb_device_id option_ids[] = {
|
||||
.driver_info = NCTRL(0) | RSVD(3) },
|
||||
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1102, 0xff), /* Telit ME910 (ECM) */
|
||||
.driver_info = NCTRL(0) },
|
||||
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x110a, 0xff), /* Telit ME910G1 */
|
||||
.driver_info = NCTRL(0) | RSVD(3) },
|
||||
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
|
||||
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
|
||||
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
|
||||
|
@ -35,11 +35,11 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused)
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&sb->s_inode_list_lock);
|
||||
|
||||
cond_resched();
|
||||
invalidate_mapping_pages(inode->i_mapping, 0, -1);
|
||||
iput(toput_inode);
|
||||
toput_inode = inode;
|
||||
|
||||
cond_resched();
|
||||
spin_lock(&sb->s_inode_list_lock);
|
||||
}
|
||||
spin_unlock(&sb->s_inode_list_lock);
|
||||
|
@ -656,6 +656,7 @@ int invalidate_inodes(struct super_block *sb, bool kill_dirty)
|
||||
struct inode *inode, *next;
|
||||
LIST_HEAD(dispose);
|
||||
|
||||
again:
|
||||
spin_lock(&sb->s_inode_list_lock);
|
||||
list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
|
||||
spin_lock(&inode->i_lock);
|
||||
@ -678,6 +679,12 @@ int invalidate_inodes(struct super_block *sb, bool kill_dirty)
|
||||
inode_lru_list_del(inode);
|
||||
spin_unlock(&inode->i_lock);
|
||||
list_add(&inode->i_lru, &dispose);
|
||||
if (need_resched()) {
|
||||
spin_unlock(&sb->s_inode_list_lock);
|
||||
cond_resched();
|
||||
dispose_list(&dispose);
|
||||
goto again;
|
||||
}
|
||||
}
|
||||
spin_unlock(&sb->s_inode_list_lock);
|
||||
|
||||
|
@ -90,6 +90,7 @@ void fsnotify_unmount_inodes(struct super_block *sb)
|
||||
|
||||
iput_inode = inode;
|
||||
|
||||
cond_resched();
|
||||
spin_lock(&sb->s_inode_list_lock);
|
||||
}
|
||||
spin_unlock(&sb->s_inode_list_lock);
|
||||
|
@ -976,6 +976,7 @@ static int add_dquot_ref(struct super_block *sb, int type)
|
||||
* later.
|
||||
*/
|
||||
old_inode = inode;
|
||||
cond_resched();
|
||||
spin_lock(&sb->s_inode_list_lock);
|
||||
}
|
||||
spin_unlock(&sb->s_inode_list_lock);
|
||||
|
@ -28,6 +28,14 @@ static inline struct ethhdr *eth_hdr(const struct sk_buff *skb)
|
||||
return (struct ethhdr *)skb_mac_header(skb);
|
||||
}
|
||||
|
||||
/* Prefer this version in TX path, instead of
|
||||
* skb_reset_mac_header() + eth_hdr()
|
||||
*/
|
||||
static inline struct ethhdr *skb_eth_hdr(const struct sk_buff *skb)
|
||||
{
|
||||
return (struct ethhdr *)skb->data;
|
||||
}
|
||||
|
||||
static inline struct ethhdr *inner_eth_hdr(const struct sk_buff *skb)
|
||||
{
|
||||
return (struct ethhdr *)skb_inner_mac_header(skb);
|
||||
|
@ -1729,6 +1729,14 @@ union security_list_options {
|
||||
int (*bpf_prog_alloc_security)(struct bpf_prog_aux *aux);
|
||||
void (*bpf_prog_free_security)(struct bpf_prog_aux *aux);
|
||||
#endif /* CONFIG_BPF_SYSCALL */
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
int (*perf_event_open)(struct perf_event_attr *attr, int type);
|
||||
int (*perf_event_alloc)(struct perf_event *event);
|
||||
void (*perf_event_free)(struct perf_event *event);
|
||||
int (*perf_event_read)(struct perf_event *event);
|
||||
int (*perf_event_write)(struct perf_event *event);
|
||||
|
||||
#endif
|
||||
};
|
||||
|
||||
struct security_hook_heads {
|
||||
@ -1958,6 +1966,13 @@ struct security_hook_heads {
|
||||
struct list_head bpf_prog_alloc_security;
|
||||
struct list_head bpf_prog_free_security;
|
||||
#endif /* CONFIG_BPF_SYSCALL */
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
struct list_head perf_event_open;
|
||||
struct list_head perf_event_alloc;
|
||||
struct list_head perf_event_free;
|
||||
struct list_head perf_event_read;
|
||||
struct list_head perf_event_write;
|
||||
#endif
|
||||
} __randomize_layout;
|
||||
|
||||
/*
|
||||
|
@ -54,6 +54,7 @@ struct perf_guest_info_callbacks {
|
||||
#include <linux/perf_regs.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/cgroup.h>
|
||||
#include <linux/security.h>
|
||||
#include <asm/local.h>
|
||||
|
||||
struct perf_callchain_entry {
|
||||
@ -722,6 +723,9 @@ struct perf_event {
|
||||
int cgrp_defer_enabled;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SECURITY
|
||||
void *security;
|
||||
#endif
|
||||
struct list_head sb_list;
|
||||
|
||||
/*
|
||||
@ -1195,24 +1199,46 @@ extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
|
||||
int perf_event_max_stack_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos);
|
||||
|
||||
/* Access to perf_event_open(2) syscall. */
|
||||
#define PERF_SECURITY_OPEN 0
|
||||
|
||||
/* Finer grained perf_event_open(2) access control. */
|
||||
#define PERF_SECURITY_CPU 1
|
||||
#define PERF_SECURITY_KERNEL 2
|
||||
#define PERF_SECURITY_TRACEPOINT 3
|
||||
|
||||
static inline bool perf_paranoid_any(void)
|
||||
{
|
||||
return sysctl_perf_event_paranoid > 2;
|
||||
}
|
||||
|
||||
static inline bool perf_paranoid_tracepoint_raw(void)
|
||||
static inline int perf_is_paranoid(void)
|
||||
{
|
||||
return sysctl_perf_event_paranoid > -1;
|
||||
}
|
||||
|
||||
static inline bool perf_paranoid_cpu(void)
|
||||
static inline int perf_allow_kernel(struct perf_event_attr *attr)
|
||||
{
|
||||
return sysctl_perf_event_paranoid > 0;
|
||||
if (sysctl_perf_event_paranoid > 1 && !capable(CAP_SYS_ADMIN))
|
||||
return -EACCES;
|
||||
|
||||
return security_perf_event_open(attr, PERF_SECURITY_KERNEL);
|
||||
}
|
||||
|
||||
static inline bool perf_paranoid_kernel(void)
|
||||
static inline int perf_allow_cpu(struct perf_event_attr *attr)
|
||||
{
|
||||
return sysctl_perf_event_paranoid > 1;
|
||||
if (sysctl_perf_event_paranoid > 0 && !capable(CAP_SYS_ADMIN))
|
||||
return -EACCES;
|
||||
|
||||
return security_perf_event_open(attr, PERF_SECURITY_CPU);
|
||||
}
|
||||
|
||||
static inline int perf_allow_tracepoint(struct perf_event_attr *attr)
|
||||
{
|
||||
if (sysctl_perf_event_paranoid > -1 && !capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
return security_perf_event_open(attr, PERF_SECURITY_TRACEPOINT);
|
||||
}
|
||||
|
||||
extern void perf_event_init(void);
|
||||
|
@ -1811,5 +1811,42 @@ static inline void free_secdata(void *secdata)
|
||||
{ }
|
||||
#endif /* CONFIG_SECURITY */
|
||||
|
||||
#endif /* ! __LINUX_SECURITY_H */
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
struct perf_event_attr;
|
||||
struct perf_event;
|
||||
|
||||
#ifdef CONFIG_SECURITY
|
||||
extern int security_perf_event_open(struct perf_event_attr *attr, int type);
|
||||
extern int security_perf_event_alloc(struct perf_event *event);
|
||||
extern void security_perf_event_free(struct perf_event *event);
|
||||
extern int security_perf_event_read(struct perf_event *event);
|
||||
extern int security_perf_event_write(struct perf_event *event);
|
||||
#else
|
||||
static inline int security_perf_event_open(struct perf_event_attr *attr,
|
||||
int type)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int security_perf_event_alloc(struct perf_event *event)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void security_perf_event_free(struct perf_event *event)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int security_perf_event_read(struct perf_event *event)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int security_perf_event_write(struct perf_event *event)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_SECURITY */
|
||||
#endif /* CONFIG_PERF_EVENTS */
|
||||
|
||||
#endif /* ! __LINUX_SECURITY_H */
|
||||
|
@ -41,19 +41,19 @@ struct xt_sctp_info {
|
||||
#define SCTP_CHUNKMAP_SET(chunkmap, type) \
|
||||
do { \
|
||||
(chunkmap)[type / bytes(__u32)] |= \
|
||||
1 << (type % bytes(__u32)); \
|
||||
1u << (type % bytes(__u32)); \
|
||||
} while (0)
|
||||
|
||||
#define SCTP_CHUNKMAP_CLEAR(chunkmap, type) \
|
||||
do { \
|
||||
(chunkmap)[type / bytes(__u32)] &= \
|
||||
~(1 << (type % bytes(__u32))); \
|
||||
~(1u << (type % bytes(__u32))); \
|
||||
} while (0)
|
||||
|
||||
#define SCTP_CHUNKMAP_IS_SET(chunkmap, type) \
|
||||
({ \
|
||||
((chunkmap)[type / bytes (__u32)] & \
|
||||
(1 << (type % bytes (__u32)))) ? 1: 0; \
|
||||
(1u << (type % bytes (__u32)))) ? 1: 0; \
|
||||
})
|
||||
|
||||
#define SCTP_CHUNKMAP_RESET(chunkmap) \
|
||||
|
@ -1251,6 +1251,30 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
|
||||
return check_generic_ptr_alignment(reg, pointer_desc, off, size, strict);
|
||||
}
|
||||
|
||||
static int check_ctx_reg(struct bpf_verifier_env *env,
|
||||
const struct bpf_reg_state *reg, int regno)
|
||||
{
|
||||
/* Access to ctx or passing it to a helper is only allowed in
|
||||
* its original, unmodified form.
|
||||
*/
|
||||
|
||||
if (reg->off) {
|
||||
verbose("dereference of modified ctx ptr R%d off=%d disallowed\n",
|
||||
regno, reg->off);
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
|
||||
char tn_buf[48];
|
||||
|
||||
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
|
||||
verbose("variable ctx access var_off=%s disallowed\n", tn_buf);
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* truncate register to smaller size (in bytes)
|
||||
* must be called with size < BPF_REG_SIZE
|
||||
*/
|
||||
@ -1320,22 +1344,10 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
|
||||
verbose("R%d leaks addr into ctx\n", value_regno);
|
||||
return -EACCES;
|
||||
}
|
||||
/* ctx accesses must be at a fixed offset, so that we can
|
||||
* determine what type of data were returned.
|
||||
*/
|
||||
if (reg->off) {
|
||||
verbose("dereference of modified ctx ptr R%d off=%d+%d, ctx+const is allowed, ctx+const+const is not\n",
|
||||
regno, reg->off, off - reg->off);
|
||||
return -EACCES;
|
||||
}
|
||||
if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
|
||||
char tn_buf[48];
|
||||
err = check_ctx_reg(env, reg, regno);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
|
||||
verbose("variable ctx access var_off=%s off=%d size=%d",
|
||||
tn_buf, off, size);
|
||||
return -EACCES;
|
||||
}
|
||||
err = check_ctx_access(env, insn_idx, off, size, t, ®_type);
|
||||
if (!err && t == BPF_READ && value_regno >= 0) {
|
||||
/* ctx access returns either a scalar, or a
|
||||
@ -1573,6 +1585,9 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
|
||||
expected_type = PTR_TO_CTX;
|
||||
if (type != expected_type)
|
||||
goto err_type;
|
||||
err = check_ctx_reg(env, reg, regno);
|
||||
if (err < 0)
|
||||
return err;
|
||||
} else if (arg_type == ARG_PTR_TO_MEM ||
|
||||
arg_type == ARG_PTR_TO_UNINIT_MEM) {
|
||||
expected_type = PTR_TO_STACK;
|
||||
@ -3442,6 +3457,7 @@ static bool may_access_skb(enum bpf_prog_type type)
|
||||
static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
|
||||
{
|
||||
struct bpf_reg_state *regs = cur_regs(env);
|
||||
static const int ctx_reg = BPF_REG_6;
|
||||
u8 mode = BPF_MODE(insn->code);
|
||||
int i, err;
|
||||
|
||||
@ -3458,11 +3474,11 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
|
||||
}
|
||||
|
||||
/* check whether implicit source operand (register R6) is readable */
|
||||
err = check_reg_arg(env, BPF_REG_6, SRC_OP);
|
||||
err = check_reg_arg(env, ctx_reg, SRC_OP);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (regs[BPF_REG_6].type != PTR_TO_CTX) {
|
||||
if (regs[ctx_reg].type != PTR_TO_CTX) {
|
||||
verbose("at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -3474,6 +3490,10 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
|
||||
return err;
|
||||
}
|
||||
|
||||
err = check_ctx_reg(env, ®s[ctx_reg], ctx_reg);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
/* reset caller saved regs to unreadable */
|
||||
for (i = 0; i < CALLER_SAVED_REGS; i++) {
|
||||
mark_reg_not_init(regs, caller_saved[i]);
|
||||
|
@ -4029,9 +4029,11 @@ find_get_context(struct pmu *pmu, struct task_struct *task,
|
||||
|
||||
if (!task) {
|
||||
/* Must be root to operate on a CPU event: */
|
||||
if (!is_kernel_event(event) && perf_paranoid_cpu() &&
|
||||
!capable(CAP_SYS_ADMIN))
|
||||
return ERR_PTR(-EACCES);
|
||||
if (!is_kernel_event(event)) {
|
||||
err = perf_allow_cpu(&event->attr);
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
|
||||
ctx = &cpuctx->ctx;
|
||||
@ -4363,6 +4365,8 @@ static void _free_event(struct perf_event *event)
|
||||
|
||||
unaccount_event(event);
|
||||
|
||||
security_perf_event_free(event);
|
||||
|
||||
if (event->rb) {
|
||||
/*
|
||||
* Can happen when we close an event with re-directed output.
|
||||
@ -4823,6 +4827,10 @@ perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
|
||||
spin_unlock(&dormant_event_list_lock);
|
||||
#endif
|
||||
|
||||
ret = security_perf_event_read(event);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ctx = perf_event_ctx_lock(event);
|
||||
ret = __perf_read(event, buf, count);
|
||||
perf_event_ctx_unlock(event, ctx);
|
||||
@ -5068,6 +5076,11 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
struct perf_event_context *ctx;
|
||||
long ret;
|
||||
|
||||
/* Treat ioctl like writes as it is likely a mutating operation. */
|
||||
ret = security_perf_event_write(event);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ctx = perf_event_ctx_lock(event);
|
||||
ret = _perf_ioctl(event, cmd, arg);
|
||||
perf_event_ctx_unlock(event, ctx);
|
||||
@ -5529,6 +5542,10 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
if (!(vma->vm_flags & VM_SHARED))
|
||||
return -EINVAL;
|
||||
|
||||
ret = security_perf_event_read(event);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
vma_size = vma->vm_end - vma->vm_start;
|
||||
|
||||
if (vma->vm_pgoff == 0) {
|
||||
@ -5642,7 +5659,7 @@ accounting:
|
||||
lock_limit >>= PAGE_SHIFT;
|
||||
locked = vma->vm_mm->pinned_vm + extra;
|
||||
|
||||
if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
|
||||
if ((locked > lock_limit) && perf_is_paranoid() &&
|
||||
!capable(CAP_IPC_LOCK)) {
|
||||
ret = -EPERM;
|
||||
goto unlock;
|
||||
@ -10004,11 +10021,20 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
|
||||
}
|
||||
}
|
||||
|
||||
err = security_perf_event_alloc(event);
|
||||
if (err)
|
||||
goto err_callchain_buffer;
|
||||
|
||||
/* symmetric to unaccount_event() in _free_event() */
|
||||
account_event(event);
|
||||
|
||||
return event;
|
||||
|
||||
err_callchain_buffer:
|
||||
if (!event->parent) {
|
||||
if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
|
||||
put_callchain_buffers();
|
||||
}
|
||||
err_addr_filters:
|
||||
kfree(event->addr_filter_ranges);
|
||||
|
||||
@ -10126,9 +10152,11 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
|
||||
attr->branch_sample_type = mask;
|
||||
}
|
||||
/* privileged levels capture (kernel, hv): check permissions */
|
||||
if ((mask & PERF_SAMPLE_BRANCH_PERM_PLM)
|
||||
&& perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
|
||||
return -EACCES;
|
||||
if (mask & PERF_SAMPLE_BRANCH_PERM_PLM) {
|
||||
ret = perf_allow_kernel(attr);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
if (attr->sample_type & PERF_SAMPLE_REGS_USER) {
|
||||
@ -10366,13 +10394,19 @@ SYSCALL_DEFINE5(perf_event_open,
|
||||
if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
|
||||
return -EACCES;
|
||||
|
||||
/* Do we allow access to perf_event_open(2) ? */
|
||||
err = security_perf_event_open(&attr, PERF_SECURITY_OPEN);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = perf_copy_attr(attr_uptr, &attr);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (!attr.exclude_kernel) {
|
||||
if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
|
||||
return -EACCES;
|
||||
err = perf_allow_kernel(&attr);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
if (attr.namespaces) {
|
||||
@ -10389,9 +10423,11 @@ SYSCALL_DEFINE5(perf_event_open,
|
||||
}
|
||||
|
||||
/* Only privileged users can get physical addresses */
|
||||
if ((attr.sample_type & PERF_SAMPLE_PHYS_ADDR) &&
|
||||
perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
|
||||
return -EACCES;
|
||||
if ((attr.sample_type & PERF_SAMPLE_PHYS_ADDR)) {
|
||||
err = perf_allow_kernel(&attr);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
if (!attr.sample_max_stack)
|
||||
attr.sample_max_stack = sysctl_perf_event_max_stack;
|
||||
|
@ -53,19 +53,19 @@ EXPORT_SYMBOL(__rwlock_init);
|
||||
|
||||
static void spin_dump(raw_spinlock_t *lock, const char *msg)
|
||||
{
|
||||
struct task_struct *owner = NULL;
|
||||
struct task_struct *owner = READ_ONCE(lock->owner);
|
||||
|
||||
if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT)
|
||||
owner = lock->owner;
|
||||
if (owner == SPINLOCK_OWNER_INIT)
|
||||
owner = NULL;
|
||||
printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n",
|
||||
msg, raw_smp_processor_id(),
|
||||
current->comm, task_pid_nr(current));
|
||||
printk(KERN_EMERG " lock: %pS, .magic: %08x, .owner: %s/%d, "
|
||||
".owner_cpu: %d\n",
|
||||
lock, lock->magic,
|
||||
lock, READ_ONCE(lock->magic),
|
||||
owner ? owner->comm : "<none>",
|
||||
owner ? task_pid_nr(owner) : -1,
|
||||
lock->owner_cpu);
|
||||
READ_ONCE(lock->owner_cpu));
|
||||
#ifdef CONFIG_DEBUG_SPINLOCK_BITE_ON_BUG
|
||||
msm_trigger_wdog_bite();
|
||||
#elif defined(CONFIG_DEBUG_SPINLOCK_PANIC_ON_BUG)
|
||||
@ -87,16 +87,16 @@ static void spin_bug(raw_spinlock_t *lock, const char *msg)
|
||||
static inline void
|
||||
debug_spin_lock_before(raw_spinlock_t *lock)
|
||||
{
|
||||
SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
|
||||
SPIN_BUG_ON(lock->owner == current, lock, "recursion");
|
||||
SPIN_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
|
||||
SPIN_BUG_ON(READ_ONCE(lock->magic) != SPINLOCK_MAGIC, lock, "bad magic");
|
||||
SPIN_BUG_ON(READ_ONCE(lock->owner) == current, lock, "recursion");
|
||||
SPIN_BUG_ON(READ_ONCE(lock->owner_cpu) == raw_smp_processor_id(),
|
||||
lock, "cpu recursion");
|
||||
}
|
||||
|
||||
static inline void debug_spin_lock_after(raw_spinlock_t *lock)
|
||||
{
|
||||
lock->owner_cpu = raw_smp_processor_id();
|
||||
lock->owner = current;
|
||||
WRITE_ONCE(lock->owner_cpu, raw_smp_processor_id());
|
||||
WRITE_ONCE(lock->owner, current);
|
||||
}
|
||||
|
||||
static inline void debug_spin_unlock(raw_spinlock_t *lock)
|
||||
@ -106,8 +106,8 @@ static inline void debug_spin_unlock(raw_spinlock_t *lock)
|
||||
SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
|
||||
SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
|
||||
lock, "wrong CPU");
|
||||
lock->owner = SPINLOCK_OWNER_INIT;
|
||||
lock->owner_cpu = -1;
|
||||
WRITE_ONCE(lock->owner, SPINLOCK_OWNER_INIT);
|
||||
WRITE_ONCE(lock->owner_cpu, -1);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -195,8 +195,8 @@ static inline void debug_write_lock_before(rwlock_t *lock)
|
||||
|
||||
static inline void debug_write_lock_after(rwlock_t *lock)
|
||||
{
|
||||
lock->owner_cpu = raw_smp_processor_id();
|
||||
lock->owner = current;
|
||||
WRITE_ONCE(lock->owner_cpu, raw_smp_processor_id());
|
||||
WRITE_ONCE(lock->owner, current);
|
||||
}
|
||||
|
||||
static inline void debug_write_unlock(rwlock_t *lock)
|
||||
@ -205,8 +205,8 @@ static inline void debug_write_unlock(rwlock_t *lock)
|
||||
RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner");
|
||||
RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
|
||||
lock, "wrong CPU");
|
||||
lock->owner = SPINLOCK_OWNER_INIT;
|
||||
lock->owner_cpu = -1;
|
||||
WRITE_ONCE(lock->owner, SPINLOCK_OWNER_INIT);
|
||||
WRITE_ONCE(lock->owner_cpu, -1);
|
||||
}
|
||||
|
||||
void do_raw_write_lock(rwlock_t *lock)
|
||||
|
@ -7,6 +7,7 @@
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/security.h>
|
||||
#include "trace.h"
|
||||
|
||||
static char __percpu *perf_trace_buf[PERF_NR_CONTEXTS];
|
||||
@ -24,8 +25,10 @@ static int total_ref_count;
|
||||
static int perf_trace_event_perm(struct trace_event_call *tp_event,
|
||||
struct perf_event *p_event)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (tp_event->perf_perm) {
|
||||
int ret = tp_event->perf_perm(tp_event, p_event);
|
||||
ret = tp_event->perf_perm(tp_event, p_event);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@ -44,8 +47,9 @@ static int perf_trace_event_perm(struct trace_event_call *tp_event,
|
||||
|
||||
/* The ftrace function trace is allowed only for root. */
|
||||
if (ftrace_event_is_function(tp_event)) {
|
||||
if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
ret = perf_allow_tracepoint(&p_event->attr);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!is_sampling_event(p_event))
|
||||
return 0;
|
||||
@ -80,8 +84,9 @@ static int perf_trace_event_perm(struct trace_event_call *tp_event,
|
||||
* ...otherwise raw tracepoint data can be a severe data leak,
|
||||
* only allow root to have these.
|
||||
*/
|
||||
if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
ret = perf_allow_tracepoint(&p_event->attr);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -110,6 +110,7 @@ int vlan_check_real_dev(struct net_device *real_dev,
|
||||
void vlan_setup(struct net_device *dev);
|
||||
int register_vlan_dev(struct net_device *dev);
|
||||
void unregister_vlan_dev(struct net_device *dev, struct list_head *head);
|
||||
void vlan_dev_uninit(struct net_device *dev);
|
||||
bool vlan_dev_inherit_address(struct net_device *dev,
|
||||
struct net_device *real_dev);
|
||||
|
||||
|
@ -610,7 +610,8 @@ static int vlan_dev_init(struct net_device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vlan_dev_uninit(struct net_device *dev)
|
||||
/* Note: this function might be called multiple times for the same device. */
|
||||
void vlan_dev_uninit(struct net_device *dev)
|
||||
{
|
||||
struct vlan_priority_tci_mapping *pm;
|
||||
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
|
||||
|
@ -95,11 +95,13 @@ static int vlan_changelink(struct net_device *dev, struct nlattr *tb[],
|
||||
struct ifla_vlan_flags *flags;
|
||||
struct ifla_vlan_qos_mapping *m;
|
||||
struct nlattr *attr;
|
||||
int rem;
|
||||
int rem, err;
|
||||
|
||||
if (data[IFLA_VLAN_FLAGS]) {
|
||||
flags = nla_data(data[IFLA_VLAN_FLAGS]);
|
||||
vlan_dev_change_flags(dev, flags->flags, flags->mask);
|
||||
err = vlan_dev_change_flags(dev, flags->flags, flags->mask);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
if (data[IFLA_VLAN_INGRESS_QOS]) {
|
||||
nla_for_each_nested(attr, data[IFLA_VLAN_INGRESS_QOS], rem) {
|
||||
@ -110,7 +112,9 @@ static int vlan_changelink(struct net_device *dev, struct nlattr *tb[],
|
||||
if (data[IFLA_VLAN_EGRESS_QOS]) {
|
||||
nla_for_each_nested(attr, data[IFLA_VLAN_EGRESS_QOS], rem) {
|
||||
m = nla_data(attr);
|
||||
vlan_dev_set_egress_priority(dev, m->from, m->to);
|
||||
err = vlan_dev_set_egress_priority(dev, m->from, m->to);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
@ -157,10 +161,11 @@ static int vlan_newlink(struct net *src_net, struct net_device *dev,
|
||||
return -EINVAL;
|
||||
|
||||
err = vlan_changelink(dev, tb, data, extack);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
return register_vlan_dev(dev);
|
||||
if (!err)
|
||||
err = register_vlan_dev(dev);
|
||||
if (err)
|
||||
vlan_dev_uninit(dev);
|
||||
return err;
|
||||
}
|
||||
|
||||
static inline size_t vlan_qos_map_size(unsigned int n)
|
||||
|
@ -1751,8 +1751,11 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
|
||||
}
|
||||
|
||||
/* Ignore very old stuff early */
|
||||
if (!after(sp[used_sacks].end_seq, prior_snd_una))
|
||||
if (!after(sp[used_sacks].end_seq, prior_snd_una)) {
|
||||
if (i == 0)
|
||||
first_sack_index = -1;
|
||||
continue;
|
||||
}
|
||||
|
||||
used_sacks++;
|
||||
}
|
||||
|
@ -32,7 +32,7 @@ static int llc_stat_ev_rx_null_dsap_xid_c(struct sk_buff *skb)
|
||||
return LLC_PDU_IS_CMD(pdu) && /* command PDU */
|
||||
LLC_PDU_TYPE_IS_U(pdu) && /* U type PDU */
|
||||
LLC_U_PDU_CMD(pdu) == LLC_1_PDU_CMD_XID &&
|
||||
!pdu->dsap ? 0 : 1; /* NULL DSAP value */
|
||||
!pdu->dsap; /* NULL DSAP value */
|
||||
}
|
||||
|
||||
static int llc_stat_ev_rx_null_dsap_test_c(struct sk_buff *skb)
|
||||
@ -42,7 +42,7 @@ static int llc_stat_ev_rx_null_dsap_test_c(struct sk_buff *skb)
|
||||
return LLC_PDU_IS_CMD(pdu) && /* command PDU */
|
||||
LLC_PDU_TYPE_IS_U(pdu) && /* U type PDU */
|
||||
LLC_U_PDU_CMD(pdu) == LLC_1_PDU_CMD_TEST &&
|
||||
!pdu->dsap ? 0 : 1; /* NULL DSAP */
|
||||
!pdu->dsap; /* NULL DSAP */
|
||||
}
|
||||
|
||||
static int llc_station_ac_send_xid_r(struct sk_buff *skb)
|
||||
|
@ -3437,6 +3437,9 @@ static void __net_exit ctnetlink_net_exit_batch(struct list_head *net_exit_list)
|
||||
|
||||
list_for_each_entry(net, net_exit_list, exit_list)
|
||||
ctnetlink_net_exit(net);
|
||||
|
||||
/* wait for other cpus until they are done with ctnl_notifiers */
|
||||
synchronize_rcu();
|
||||
}
|
||||
|
||||
static struct pernet_operations ctnetlink_net_ops = {
|
||||
|
@ -3917,14 +3917,20 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
|
||||
if (nla[NFTA_SET_ELEM_DATA] == NULL &&
|
||||
!(flags & NFT_SET_ELEM_INTERVAL_END))
|
||||
return -EINVAL;
|
||||
if (nla[NFTA_SET_ELEM_DATA] != NULL &&
|
||||
flags & NFT_SET_ELEM_INTERVAL_END)
|
||||
return -EINVAL;
|
||||
} else {
|
||||
if (nla[NFTA_SET_ELEM_DATA] != NULL)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if ((flags & NFT_SET_ELEM_INTERVAL_END) &&
|
||||
(nla[NFTA_SET_ELEM_DATA] ||
|
||||
nla[NFTA_SET_ELEM_OBJREF] ||
|
||||
nla[NFTA_SET_ELEM_TIMEOUT] ||
|
||||
nla[NFTA_SET_ELEM_EXPIRATION] ||
|
||||
nla[NFTA_SET_ELEM_USERDATA] ||
|
||||
nla[NFTA_SET_ELEM_EXPR]))
|
||||
return -EINVAL;
|
||||
|
||||
timeout = 0;
|
||||
if (nla[NFTA_SET_ELEM_TIMEOUT] != NULL) {
|
||||
if (!(set->flags & NFT_SET_TIMEOUT))
|
||||
|
@ -993,10 +993,13 @@ static void rfkill_sync_work(struct work_struct *work)
|
||||
int __must_check rfkill_register(struct rfkill *rfkill)
|
||||
{
|
||||
static unsigned long rfkill_no;
|
||||
struct device *dev = &rfkill->dev;
|
||||
struct device *dev;
|
||||
int error;
|
||||
|
||||
BUG_ON(!rfkill);
|
||||
if (!rfkill)
|
||||
return -EINVAL;
|
||||
|
||||
dev = &rfkill->dev;
|
||||
|
||||
mutex_lock(&rfkill_global_mutex);
|
||||
|
||||
|
@ -734,7 +734,7 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
|
||||
if (tb[TCA_FQ_QUANTUM]) {
|
||||
u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);
|
||||
|
||||
if (quantum > 0)
|
||||
if (quantum > 0 && quantum <= (1 << 20))
|
||||
q->quantum = quantum;
|
||||
else
|
||||
err = -EINVAL;
|
||||
|
@ -265,8 +265,14 @@ static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
|
||||
struct prio_sched_data *q = qdisc_priv(sch);
|
||||
unsigned long band = arg - 1;
|
||||
|
||||
if (new == NULL)
|
||||
new = &noop_qdisc;
|
||||
if (!new) {
|
||||
new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
|
||||
TC_H_MAKE(sch->handle, arg));
|
||||
if (!new)
|
||||
new = &noop_qdisc;
|
||||
else
|
||||
qdisc_hash_add(new, true);
|
||||
}
|
||||
|
||||
*old = qdisc_replace(sch, new, &q->queues[band]);
|
||||
return 0;
|
||||
|
@ -1359,8 +1359,10 @@ static int sctp_cmd_interpreter(enum sctp_event event_type,
|
||||
/* Generate an INIT ACK chunk. */
|
||||
new_obj = sctp_make_init_ack(asoc, chunk, GFP_ATOMIC,
|
||||
0);
|
||||
if (!new_obj)
|
||||
goto nomem;
|
||||
if (!new_obj) {
|
||||
error = -ENOMEM;
|
||||
break;
|
||||
}
|
||||
|
||||
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
|
||||
SCTP_CHUNK(new_obj));
|
||||
@ -1382,7 +1384,8 @@ static int sctp_cmd_interpreter(enum sctp_event event_type,
|
||||
if (!new_obj) {
|
||||
if (cmd->obj.chunk)
|
||||
sctp_chunk_free(cmd->obj.chunk);
|
||||
goto nomem;
|
||||
error = -ENOMEM;
|
||||
break;
|
||||
}
|
||||
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
|
||||
SCTP_CHUNK(new_obj));
|
||||
@ -1429,8 +1432,10 @@ static int sctp_cmd_interpreter(enum sctp_event event_type,
|
||||
|
||||
/* Generate a SHUTDOWN chunk. */
|
||||
new_obj = sctp_make_shutdown(asoc, chunk);
|
||||
if (!new_obj)
|
||||
goto nomem;
|
||||
if (!new_obj) {
|
||||
error = -ENOMEM;
|
||||
break;
|
||||
}
|
||||
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
|
||||
SCTP_CHUNK(new_obj));
|
||||
break;
|
||||
@ -1760,11 +1765,17 @@ static int sctp_cmd_interpreter(enum sctp_event event_type,
|
||||
break;
|
||||
}
|
||||
|
||||
if (error)
|
||||
if (error) {
|
||||
cmd = sctp_next_cmd(commands);
|
||||
while (cmd) {
|
||||
if (cmd->verb == SCTP_CMD_REPLY)
|
||||
sctp_chunk_free(cmd->obj.chunk);
|
||||
cmd = sctp_next_cmd(commands);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
/* If this is in response to a received chunk, wait until
|
||||
* we are done with the packet to open the queue so that we don't
|
||||
* send multiple packets in response to a single request.
|
||||
@ -1779,8 +1790,5 @@ out:
|
||||
sp->data_ready_signalled = 0;
|
||||
|
||||
return error;
|
||||
nomem:
|
||||
error = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -50,13 +50,27 @@ static __always_inline void count(void *map)
|
||||
SEC("tracepoint/syscalls/sys_enter_open")
|
||||
int trace_enter_open(struct syscalls_enter_open_args *ctx)
|
||||
{
|
||||
count((void *)&enter_open_map);
|
||||
count(&enter_open_map);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("tracepoint/syscalls/sys_enter_openat")
|
||||
int trace_enter_open_at(struct syscalls_enter_open_args *ctx)
|
||||
{
|
||||
count(&enter_open_map);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("tracepoint/syscalls/sys_exit_open")
|
||||
int trace_enter_exit(struct syscalls_exit_open_args *ctx)
|
||||
{
|
||||
count((void *)&exit_open_map);
|
||||
count(&exit_open_map);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("tracepoint/syscalls/sys_exit_openat")
|
||||
int trace_enter_exit_at(struct syscalls_exit_open_args *ctx)
|
||||
{
|
||||
count(&exit_open_map);
|
||||
return 0;
|
||||
}
|
||||
|
@ -34,9 +34,9 @@ static void print_ksym(__u64 addr)
|
||||
return;
|
||||
sym = ksym_search(addr);
|
||||
printf("%s;", sym->name);
|
||||
if (!strcmp(sym->name, "sys_read"))
|
||||
if (!strstr(sym->name, "sys_read"))
|
||||
sys_read_seen = true;
|
||||
else if (!strcmp(sym->name, "sys_write"))
|
||||
else if (!strstr(sym->name, "sys_write"))
|
||||
sys_write_seen = true;
|
||||
}
|
||||
|
||||
|
@ -201,6 +201,13 @@ static int expr_eq(struct expr *e1, struct expr *e2)
|
||||
{
|
||||
int res, old_count;
|
||||
|
||||
/*
|
||||
* A NULL expr is taken to be yes, but there's also a different way to
|
||||
* represent yes. expr_is_yes() checks for either representation.
|
||||
*/
|
||||
if (!e1 || !e2)
|
||||
return expr_is_yes(e1) && expr_is_yes(e2);
|
||||
|
||||
if (e1->type != e2->type)
|
||||
return 0;
|
||||
switch (e1->type) {
|
||||
|
@ -1753,3 +1753,30 @@ void security_bpf_prog_free(struct bpf_prog_aux *aux)
|
||||
call_void_hook(bpf_prog_free_security, aux);
|
||||
}
|
||||
#endif /* CONFIG_BPF_SYSCALL */
|
||||
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
int security_perf_event_open(struct perf_event_attr *attr, int type)
|
||||
{
|
||||
return call_int_hook(perf_event_open, 0, attr, type);
|
||||
}
|
||||
|
||||
int security_perf_event_alloc(struct perf_event *event)
|
||||
{
|
||||
return call_int_hook(perf_event_alloc, 0, event);
|
||||
}
|
||||
|
||||
void security_perf_event_free(struct perf_event *event)
|
||||
{
|
||||
call_void_hook(perf_event_free, event);
|
||||
}
|
||||
|
||||
int security_perf_event_read(struct perf_event *event)
|
||||
{
|
||||
return call_int_hook(perf_event_read, 0, event);
|
||||
}
|
||||
|
||||
int security_perf_event_write(struct perf_event *event)
|
||||
{
|
||||
return call_int_hook(perf_event_write, 0, event);
|
||||
}
|
||||
#endif /* CONFIG_PERF_EVENTS */
|
||||
|
@ -6620,6 +6620,68 @@ static void selinux_bpf_prog_free(struct bpf_prog_aux *aux)
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
static int selinux_perf_event_open(struct perf_event_attr *attr, int type)
|
||||
{
|
||||
u32 requested, sid = current_sid();
|
||||
|
||||
if (type == PERF_SECURITY_OPEN)
|
||||
requested = PERF_EVENT__OPEN;
|
||||
else if (type == PERF_SECURITY_CPU)
|
||||
requested = PERF_EVENT__CPU;
|
||||
else if (type == PERF_SECURITY_KERNEL)
|
||||
requested = PERF_EVENT__KERNEL;
|
||||
else if (type == PERF_SECURITY_TRACEPOINT)
|
||||
requested = PERF_EVENT__TRACEPOINT;
|
||||
else
|
||||
return -EINVAL;
|
||||
|
||||
return avc_has_perm(&selinux_state, sid, sid, SECCLASS_PERF_EVENT,
|
||||
requested, NULL);
|
||||
}
|
||||
|
||||
static int selinux_perf_event_alloc(struct perf_event *event)
|
||||
{
|
||||
struct perf_event_security_struct *perfsec;
|
||||
|
||||
perfsec = kzalloc(sizeof(*perfsec), GFP_KERNEL);
|
||||
if (!perfsec)
|
||||
return -ENOMEM;
|
||||
|
||||
perfsec->sid = current_sid();
|
||||
event->security = perfsec;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void selinux_perf_event_free(struct perf_event *event)
|
||||
{
|
||||
struct perf_event_security_struct *perfsec = event->security;
|
||||
|
||||
event->security = NULL;
|
||||
kfree(perfsec);
|
||||
}
|
||||
|
||||
static int selinux_perf_event_read(struct perf_event *event)
|
||||
{
|
||||
struct perf_event_security_struct *perfsec = event->security;
|
||||
u32 sid = current_sid();
|
||||
|
||||
return avc_has_perm(&selinux_state, sid, perfsec->sid,
|
||||
SECCLASS_PERF_EVENT, PERF_EVENT__READ, NULL);
|
||||
}
|
||||
|
||||
static int selinux_perf_event_write(struct perf_event *event)
|
||||
{
|
||||
struct perf_event_security_struct *perfsec = event->security;
|
||||
u32 sid = current_sid();
|
||||
|
||||
return avc_has_perm(&selinux_state, sid, perfsec->sid,
|
||||
SECCLASS_PERF_EVENT, PERF_EVENT__WRITE, NULL);
|
||||
}
|
||||
#endif
|
||||
|
||||
static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = {
|
||||
LSM_HOOK_INIT(binder_set_context_mgr, selinux_binder_set_context_mgr),
|
||||
LSM_HOOK_INIT(binder_transaction, selinux_binder_transaction),
|
||||
@ -6849,6 +6911,14 @@ static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = {
|
||||
LSM_HOOK_INIT(bpf_map_free_security, selinux_bpf_map_free),
|
||||
LSM_HOOK_INIT(bpf_prog_free_security, selinux_bpf_prog_free),
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
LSM_HOOK_INIT(perf_event_open, selinux_perf_event_open),
|
||||
LSM_HOOK_INIT(perf_event_alloc, selinux_perf_event_alloc),
|
||||
LSM_HOOK_INIT(perf_event_free, selinux_perf_event_free),
|
||||
LSM_HOOK_INIT(perf_event_read, selinux_perf_event_read),
|
||||
LSM_HOOK_INIT(perf_event_write, selinux_perf_event_write),
|
||||
#endif
|
||||
};
|
||||
|
||||
static __init int selinux_init(void)
|
||||
|
@ -241,6 +241,8 @@ struct security_class_mapping secclass_map[] = {
|
||||
{ "manage_subnet", NULL } },
|
||||
{ "bpf",
|
||||
{"map_create", "map_read", "map_write", "prog_load", "prog_run"} },
|
||||
{ "perf_event",
|
||||
{"open", "cpu", "kernel", "tracepoint", "read", "write"} },
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
|
@ -154,7 +154,11 @@ struct pkey_security_struct {
|
||||
};
|
||||
|
||||
struct bpf_security_struct {
|
||||
u32 sid; /*SID of bpf obj creater*/
|
||||
u32 sid; /* SID of bpf obj creator */
|
||||
};
|
||||
|
||||
struct perf_event_security_struct {
|
||||
u32 sid; /* SID of perf_event obj creator */
|
||||
};
|
||||
|
||||
#endif /* _SELINUX_OBJSEC_H_ */
|
||||
|
@ -2791,7 +2791,7 @@ static int fll_factors(struct _fll_div *fll_div, unsigned int Fref,
|
||||
|
||||
if (target % Fref == 0) {
|
||||
fll_div->theta = 0;
|
||||
fll_div->lambda = 0;
|
||||
fll_div->lambda = 1;
|
||||
} else {
|
||||
gcd_fll = gcd(target, fratio * Fref);
|
||||
|
||||
@ -2861,7 +2861,7 @@ static int wm8962_set_fll(struct snd_soc_codec *codec, int fll_id, int source,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (fll_div.theta || fll_div.lambda)
|
||||
if (fll_div.theta)
|
||||
fll1 |= WM8962_FLL_FRAC;
|
||||
|
||||
/* Stop the FLL while we reconfigure */
|
||||
|
@ -1921,6 +1921,7 @@ static int soc_tplg_pcm_elems_load(struct soc_tplg *tplg,
|
||||
int count = hdr->count;
|
||||
int i;
|
||||
bool abi_match;
|
||||
int ret;
|
||||
|
||||
if (tplg->pass != SOC_TPLG_PASS_PCM_DAI)
|
||||
return 0;
|
||||
@ -1957,7 +1958,12 @@ static int soc_tplg_pcm_elems_load(struct soc_tplg *tplg,
|
||||
}
|
||||
|
||||
/* create the FE DAIs and DAI links */
|
||||
soc_tplg_pcm_create(tplg, _pcm);
|
||||
ret = soc_tplg_pcm_create(tplg, _pcm);
|
||||
if (ret < 0) {
|
||||
if (!abi_match)
|
||||
kfree(_pcm);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* offset by version-specific struct size and
|
||||
* real priv data size
|
||||
|
@ -115,6 +115,7 @@ EVENT_PARSE_VERSION = $(EP_VERSION).$(EP_PATCHLEVEL).$(EP_EXTRAVERSION)
|
||||
|
||||
LIB_TARGET = libtraceevent.a libtraceevent.so.$(EVENT_PARSE_VERSION)
|
||||
LIB_INSTALL = libtraceevent.a libtraceevent.so*
|
||||
LIB_INSTALL := $(addprefix $(OUTPUT),$(LIB_INSTALL))
|
||||
|
||||
INCLUDES = -I. -I $(srctree)/tools/include $(CONFIG_INCLUDES)
|
||||
|
||||
|
@ -7281,7 +7281,7 @@ static struct bpf_test tests[] = {
|
||||
offsetof(struct __sk_buff, mark)),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr = "dereference of modified ctx ptr R1 off=68+8, ctx+const is allowed, ctx+const+const is not",
|
||||
.errstr = "dereference of modified ctx ptr",
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
@ -7944,6 +7944,62 @@ static struct bpf_test tests[] = {
|
||||
.errstr = "BPF_XADD stores into R2 packet",
|
||||
.prog_type = BPF_PROG_TYPE_XDP,
|
||||
},
|
||||
{
|
||||
"pass unmodified ctx pointer to helper",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_csum_update),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
.result = ACCEPT,
|
||||
},
|
||||
{
|
||||
"pass modified ctx pointer to helper, 1",
|
||||
.insns = {
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_csum_update),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
.result = REJECT,
|
||||
.errstr = "dereference of modified ctx ptr",
|
||||
},
|
||||
{
|
||||
"pass modified ctx pointer to helper, 2",
|
||||
.insns = {
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_get_socket_cookie),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result_unpriv = REJECT,
|
||||
.result = REJECT,
|
||||
.errstr_unpriv = "dereference of modified ctx ptr",
|
||||
.errstr = "dereference of modified ctx ptr",
|
||||
},
|
||||
{
|
||||
"pass modified ctx pointer to helper, 3",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 0),
|
||||
BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 4),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_csum_update),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
.result = REJECT,
|
||||
.errstr = "variable ctx access var_off=(0x0; 0x4)",
|
||||
},
|
||||
};
|
||||
|
||||
static int probe_filter_length(const struct bpf_insn *fp)
|
||||
|
Loading…
x
Reference in New Issue
Block a user