mirror of
https://github.com/rd-stuffs/msm-4.14.git
synced 2025-02-20 11:45:48 +08:00
Merge "Merge android-4.14-p.87 (11d6531a) into msm-4.14"
This commit is contained in:
commit
091230177c
@ -4003,9 +4003,13 @@
|
||||
|
||||
spectre_v2= [X86] Control mitigation of Spectre variant 2
|
||||
(indirect branch speculation) vulnerability.
|
||||
The default operation protects the kernel from
|
||||
user space attacks.
|
||||
|
||||
on - unconditionally enable
|
||||
off - unconditionally disable
|
||||
on - unconditionally enable, implies
|
||||
spectre_v2_user=on
|
||||
off - unconditionally disable, implies
|
||||
spectre_v2_user=off
|
||||
auto - kernel detects whether your CPU model is
|
||||
vulnerable
|
||||
|
||||
@ -4015,6 +4019,12 @@
|
||||
CONFIG_RETPOLINE configuration option, and the
|
||||
compiler with which the kernel was built.
|
||||
|
||||
Selecting 'on' will also enable the mitigation
|
||||
against user space to user space task attacks.
|
||||
|
||||
Selecting 'off' will disable both the kernel and
|
||||
the user space protections.
|
||||
|
||||
Specific mitigations can also be selected manually:
|
||||
|
||||
retpoline - replace indirect branches
|
||||
@ -4024,6 +4034,48 @@
|
||||
Not specifying this option is equivalent to
|
||||
spectre_v2=auto.
|
||||
|
||||
spectre_v2_user=
|
||||
[X86] Control mitigation of Spectre variant 2
|
||||
(indirect branch speculation) vulnerability between
|
||||
user space tasks
|
||||
|
||||
on - Unconditionally enable mitigations. Is
|
||||
enforced by spectre_v2=on
|
||||
|
||||
off - Unconditionally disable mitigations. Is
|
||||
enforced by spectre_v2=off
|
||||
|
||||
prctl - Indirect branch speculation is enabled,
|
||||
but mitigation can be enabled via prctl
|
||||
per thread. The mitigation control state
|
||||
is inherited on fork.
|
||||
|
||||
prctl,ibpb
|
||||
- Like "prctl" above, but only STIBP is
|
||||
controlled per thread. IBPB is issued
|
||||
always when switching between different user
|
||||
space processes.
|
||||
|
||||
seccomp
|
||||
- Same as "prctl" above, but all seccomp
|
||||
threads will enable the mitigation unless
|
||||
they explicitly opt out.
|
||||
|
||||
seccomp,ibpb
|
||||
- Like "seccomp" above, but only STIBP is
|
||||
controlled per thread. IBPB is issued
|
||||
always when switching between different
|
||||
user space processes.
|
||||
|
||||
auto - Kernel selects the mitigation depending on
|
||||
the available CPU features and vulnerability.
|
||||
|
||||
Default mitigation:
|
||||
If CONFIG_SECCOMP=y then "seccomp", otherwise "prctl"
|
||||
|
||||
Not specifying this option is equivalent to
|
||||
spectre_v2_user=auto.
|
||||
|
||||
spec_store_bypass_disable=
|
||||
[HW] Control Speculative Store Bypass (SSB) Disable mitigation
|
||||
(Speculative Store Bypass vulnerability)
|
||||
|
@ -92,3 +92,12 @@ Speculation misfeature controls
|
||||
* prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_ENABLE, 0, 0);
|
||||
* prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_DISABLE, 0, 0);
|
||||
* prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_FORCE_DISABLE, 0, 0);
|
||||
|
||||
- PR_SPEC_INDIR_BRANCH: Indirect Branch Speculation in User Processes
|
||||
(Mitigate Spectre V2 style attacks against user processes)
|
||||
|
||||
Invocations:
|
||||
* prctl(PR_GET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, 0, 0, 0);
|
||||
* prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_ENABLE, 0, 0);
|
||||
* prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_DISABLE, 0, 0);
|
||||
* prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_FORCE_DISABLE, 0, 0);
|
||||
|
5
Makefile
5
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 14
|
||||
SUBLEVEL = 85
|
||||
SUBLEVEL = 87
|
||||
EXTRAVERSION =
|
||||
NAME = Petit Gorille
|
||||
|
||||
@ -897,6 +897,9 @@ KBUILD_CFLAGS += $(call cc-option,-Wdeclaration-after-statement,)
|
||||
# disable pointer signed / unsigned warnings in gcc 4.0
|
||||
KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign)
|
||||
|
||||
# disable stringop warnings in gcc 8+
|
||||
KBUILD_CFLAGS += $(call cc-disable-warning, stringop-truncation)
|
||||
|
||||
# disable invalid "can't wrap" optimizations for signed / pointers
|
||||
KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow)
|
||||
|
||||
|
@ -109,7 +109,7 @@ endmenu
|
||||
|
||||
choice
|
||||
prompt "ARC Instruction Set"
|
||||
default ISA_ARCOMPACT
|
||||
default ISA_ARCV2
|
||||
|
||||
config ISA_ARCOMPACT
|
||||
bool "ARCompact ISA"
|
||||
|
@ -6,7 +6,7 @@
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
|
||||
KBUILD_DEFCONFIG := nsim_700_defconfig
|
||||
KBUILD_DEFCONFIG := nsim_hs_defconfig
|
||||
|
||||
cflags-y += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__
|
||||
cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7
|
||||
|
@ -15,6 +15,7 @@ CONFIG_PERF_EVENTS=y
|
||||
# CONFIG_VM_EVENT_COUNTERS is not set
|
||||
# CONFIG_SLUB_DEBUG is not set
|
||||
# CONFIG_COMPAT_BRK is not set
|
||||
CONFIG_ISA_ARCOMPACT=y
|
||||
CONFIG_MODULES=y
|
||||
CONFIG_MODULE_FORCE_LOAD=y
|
||||
CONFIG_MODULE_UNLOAD=y
|
||||
@ -98,6 +99,7 @@ CONFIG_VFAT_FS=y
|
||||
CONFIG_NTFS_FS=y
|
||||
CONFIG_TMPFS=y
|
||||
CONFIG_NFS_FS=y
|
||||
CONFIG_NFS_V3_ACL=y
|
||||
CONFIG_NLS_CODEPAGE_437=y
|
||||
CONFIG_NLS_ISO8859_1=y
|
||||
# CONFIG_ENABLE_WARN_DEPRECATED is not set
|
||||
|
@ -97,6 +97,7 @@ CONFIG_VFAT_FS=y
|
||||
CONFIG_NTFS_FS=y
|
||||
CONFIG_TMPFS=y
|
||||
CONFIG_NFS_FS=y
|
||||
CONFIG_NFS_V3_ACL=y
|
||||
CONFIG_NLS_CODEPAGE_437=y
|
||||
CONFIG_NLS_ISO8859_1=y
|
||||
# CONFIG_ENABLE_WARN_DEPRECATED is not set
|
||||
|
@ -100,6 +100,7 @@ CONFIG_VFAT_FS=y
|
||||
CONFIG_NTFS_FS=y
|
||||
CONFIG_TMPFS=y
|
||||
CONFIG_NFS_FS=y
|
||||
CONFIG_NFS_V3_ACL=y
|
||||
CONFIG_NLS_CODEPAGE_437=y
|
||||
CONFIG_NLS_ISO8859_1=y
|
||||
# CONFIG_ENABLE_WARN_DEPRECATED is not set
|
||||
|
@ -66,6 +66,7 @@ CONFIG_EXT3_FS=y
|
||||
CONFIG_VFAT_FS=y
|
||||
CONFIG_TMPFS=y
|
||||
CONFIG_NFS_FS=y
|
||||
CONFIG_NFS_V3_ACL=y
|
||||
CONFIG_NLS_CODEPAGE_437=y
|
||||
CONFIG_NLS_ISO8859_1=y
|
||||
# CONFIG_ENABLE_WARN_DEPRECATED is not set
|
||||
|
@ -15,6 +15,7 @@ CONFIG_SYSCTL_SYSCALL=y
|
||||
CONFIG_EMBEDDED=y
|
||||
CONFIG_PERF_EVENTS=y
|
||||
# CONFIG_COMPAT_BRK is not set
|
||||
CONFIG_ISA_ARCOMPACT=y
|
||||
CONFIG_KPROBES=y
|
||||
CONFIG_MODULES=y
|
||||
CONFIG_MODULE_FORCE_LOAD=y
|
||||
@ -74,6 +75,7 @@ CONFIG_PROC_KCORE=y
|
||||
CONFIG_TMPFS=y
|
||||
# CONFIG_MISC_FILESYSTEMS is not set
|
||||
CONFIG_NFS_FS=y
|
||||
CONFIG_NFS_V3_ACL=y
|
||||
CONFIG_ROOT_NFS=y
|
||||
CONFIG_DEBUG_INFO=y
|
||||
# CONFIG_ENABLE_WARN_DEPRECATED is not set
|
||||
|
@ -16,6 +16,7 @@ CONFIG_EMBEDDED=y
|
||||
CONFIG_PERF_EVENTS=y
|
||||
# CONFIG_SLUB_DEBUG is not set
|
||||
# CONFIG_COMPAT_BRK is not set
|
||||
CONFIG_ISA_ARCOMPACT=y
|
||||
CONFIG_KPROBES=y
|
||||
CONFIG_MODULES=y
|
||||
# CONFIG_LBDAF is not set
|
||||
|
@ -16,6 +16,7 @@ CONFIG_EMBEDDED=y
|
||||
CONFIG_PERF_EVENTS=y
|
||||
# CONFIG_SLUB_DEBUG is not set
|
||||
# CONFIG_COMPAT_BRK is not set
|
||||
CONFIG_ISA_ARCOMPACT=y
|
||||
CONFIG_KPROBES=y
|
||||
CONFIG_MODULES=y
|
||||
# CONFIG_LBDAF is not set
|
||||
@ -69,5 +70,6 @@ CONFIG_EXT2_FS_XATTR=y
|
||||
CONFIG_TMPFS=y
|
||||
# CONFIG_MISC_FILESYSTEMS is not set
|
||||
CONFIG_NFS_FS=y
|
||||
CONFIG_NFS_V3_ACL=y
|
||||
# CONFIG_ENABLE_WARN_DEPRECATED is not set
|
||||
# CONFIG_ENABLE_MUST_CHECK is not set
|
||||
|
@ -68,5 +68,6 @@ CONFIG_EXT2_FS_XATTR=y
|
||||
CONFIG_TMPFS=y
|
||||
# CONFIG_MISC_FILESYSTEMS is not set
|
||||
CONFIG_NFS_FS=y
|
||||
CONFIG_NFS_V3_ACL=y
|
||||
# CONFIG_ENABLE_WARN_DEPRECATED is not set
|
||||
# CONFIG_ENABLE_MUST_CHECK is not set
|
||||
|
@ -79,6 +79,7 @@ CONFIG_EXT2_FS_XATTR=y
|
||||
CONFIG_TMPFS=y
|
||||
# CONFIG_MISC_FILESYSTEMS is not set
|
||||
CONFIG_NFS_FS=y
|
||||
CONFIG_NFS_V3_ACL=y
|
||||
# CONFIG_ENABLE_WARN_DEPRECATED is not set
|
||||
# CONFIG_ENABLE_MUST_CHECK is not set
|
||||
CONFIG_FTRACE=y
|
||||
|
@ -19,6 +19,7 @@ CONFIG_KALLSYMS_ALL=y
|
||||
# CONFIG_AIO is not set
|
||||
CONFIG_EMBEDDED=y
|
||||
# CONFIG_COMPAT_BRK is not set
|
||||
CONFIG_ISA_ARCOMPACT=y
|
||||
CONFIG_SLAB=y
|
||||
CONFIG_MODULES=y
|
||||
CONFIG_MODULE_FORCE_LOAD=y
|
||||
|
@ -88,6 +88,7 @@ CONFIG_NTFS_FS=y
|
||||
CONFIG_TMPFS=y
|
||||
CONFIG_JFFS2_FS=y
|
||||
CONFIG_NFS_FS=y
|
||||
CONFIG_NFS_V3_ACL=y
|
||||
CONFIG_NLS_CODEPAGE_437=y
|
||||
CONFIG_NLS_ISO8859_1=y
|
||||
# CONFIG_ENABLE_WARN_DEPRECATED is not set
|
||||
|
@ -92,6 +92,7 @@ CONFIG_NTFS_FS=y
|
||||
CONFIG_TMPFS=y
|
||||
CONFIG_JFFS2_FS=y
|
||||
CONFIG_NFS_FS=y
|
||||
CONFIG_NFS_V3_ACL=y
|
||||
CONFIG_NLS_CODEPAGE_437=y
|
||||
CONFIG_NLS_ISO8859_1=y
|
||||
# CONFIG_ENABLE_WARN_DEPRECATED is not set
|
||||
|
@ -47,7 +47,11 @@
|
||||
#include "rk3288.dtsi"
|
||||
|
||||
/ {
|
||||
memory@0 {
|
||||
/*
|
||||
* The default coreboot on veyron devices ignores memory@0 nodes
|
||||
* and would instead create another memory node.
|
||||
*/
|
||||
memory {
|
||||
device_type = "memory";
|
||||
reg = <0x0 0x0 0x0 0x80000000>;
|
||||
};
|
||||
|
@ -130,7 +130,7 @@
|
||||
};
|
||||
|
||||
&pcie0 {
|
||||
ep-gpios = <&gpio4 RK_PC6 GPIO_ACTIVE_LOW>;
|
||||
ep-gpios = <&gpio4 RK_PC6 GPIO_ACTIVE_HIGH>;
|
||||
num-lanes = <4>;
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pcie_clkreqn_cpm>;
|
||||
|
@ -51,7 +51,7 @@ static inline unsigned long mips_get_syscall_arg(unsigned long *arg,
|
||||
#ifdef CONFIG_64BIT
|
||||
case 4: case 5: case 6: case 7:
|
||||
#ifdef CONFIG_MIPS32_O32
|
||||
if (test_thread_flag(TIF_32BIT_REGS))
|
||||
if (test_tsk_thread_flag(task, TIF_32BIT_REGS))
|
||||
return get_user(*arg, (int *)usp + n);
|
||||
else
|
||||
#endif
|
||||
|
@ -84,7 +84,7 @@ static struct rt2880_pmx_func pcie_rst_grp[] = {
|
||||
};
|
||||
static struct rt2880_pmx_func nd_sd_grp[] = {
|
||||
FUNC("nand", MT7620_GPIO_MODE_NAND, 45, 15),
|
||||
FUNC("sd", MT7620_GPIO_MODE_SD, 45, 15)
|
||||
FUNC("sd", MT7620_GPIO_MODE_SD, 47, 13)
|
||||
};
|
||||
|
||||
static struct rt2880_pmx_group mt7620a_pinmux_data[] = {
|
||||
|
@ -440,10 +440,6 @@ config RETPOLINE
|
||||
branches. Requires a compiler with -mindirect-branch=thunk-extern
|
||||
support for full protection. The kernel may run slower.
|
||||
|
||||
Without compiler support, at least indirect branches in assembler
|
||||
code are eliminated. Since this includes the syscall entry path,
|
||||
it is not entirely pointless.
|
||||
|
||||
config INTEL_RDT
|
||||
bool "Intel Resource Director Technology support"
|
||||
default n
|
||||
@ -959,13 +955,7 @@ config NR_CPUS
|
||||
approximately eight kilobytes to the kernel image.
|
||||
|
||||
config SCHED_SMT
|
||||
bool "SMT (Hyperthreading) scheduler support"
|
||||
depends on SMP
|
||||
---help---
|
||||
SMT scheduler support improves the CPU scheduler's decision making
|
||||
when dealing with Intel Pentium 4 chips with HyperThreading at a
|
||||
cost of slightly increased overhead in some places. If unsure say
|
||||
N here.
|
||||
def_bool y if SMP
|
||||
|
||||
config SCHED_MC
|
||||
def_bool y
|
||||
|
@ -244,9 +244,10 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
|
||||
|
||||
# Avoid indirect branches in kernel to deal with Spectre
|
||||
ifdef CONFIG_RETPOLINE
|
||||
ifneq ($(RETPOLINE_CFLAGS),)
|
||||
KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE
|
||||
ifeq ($(RETPOLINE_CFLAGS),)
|
||||
$(error You are building kernel with non-retpoline compiler, please update your compiler.)
|
||||
endif
|
||||
KBUILD_CFLAGS += $(RETPOLINE_CFLAGS)
|
||||
endif
|
||||
|
||||
archscripts: scripts_basic
|
||||
|
@ -438,26 +438,6 @@ int x86_setup_perfctr(struct perf_event *event)
|
||||
if (config == -1LL)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Branch tracing:
|
||||
*/
|
||||
if (attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
|
||||
!attr->freq && hwc->sample_period == 1) {
|
||||
/* BTS is not supported by this architecture. */
|
||||
if (!x86_pmu.bts_active)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* BTS is currently only allowed for user-mode. */
|
||||
if (!attr->exclude_kernel)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* disallow bts if conflicting events are present */
|
||||
if (x86_add_exclusive(x86_lbr_exclusive_lbr))
|
||||
return -EBUSY;
|
||||
|
||||
event->destroy = hw_perf_lbr_event_destroy;
|
||||
}
|
||||
|
||||
hwc->config |= config;
|
||||
|
||||
return 0;
|
||||
|
@ -2345,16 +2345,7 @@ done:
|
||||
static struct event_constraint *
|
||||
intel_bts_constraints(struct perf_event *event)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
unsigned int hw_event, bts_event;
|
||||
|
||||
if (event->attr.freq)
|
||||
return NULL;
|
||||
|
||||
hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
|
||||
bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
|
||||
|
||||
if (unlikely(hw_event == bts_event && hwc->sample_period == 1))
|
||||
if (unlikely(intel_pmu_has_bts(event)))
|
||||
return &bts_constraint;
|
||||
|
||||
return NULL;
|
||||
@ -2973,10 +2964,47 @@ static unsigned long intel_pmu_free_running_flags(struct perf_event *event)
|
||||
return flags;
|
||||
}
|
||||
|
||||
static int intel_pmu_bts_config(struct perf_event *event)
|
||||
{
|
||||
struct perf_event_attr *attr = &event->attr;
|
||||
|
||||
if (unlikely(intel_pmu_has_bts(event))) {
|
||||
/* BTS is not supported by this architecture. */
|
||||
if (!x86_pmu.bts_active)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* BTS is currently only allowed for user-mode. */
|
||||
if (!attr->exclude_kernel)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* disallow bts if conflicting events are present */
|
||||
if (x86_add_exclusive(x86_lbr_exclusive_lbr))
|
||||
return -EBUSY;
|
||||
|
||||
event->destroy = hw_perf_lbr_event_destroy;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int core_pmu_hw_config(struct perf_event *event)
|
||||
{
|
||||
int ret = x86_pmu_hw_config(event);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return intel_pmu_bts_config(event);
|
||||
}
|
||||
|
||||
static int intel_pmu_hw_config(struct perf_event *event)
|
||||
{
|
||||
int ret = x86_pmu_hw_config(event);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = intel_pmu_bts_config(event);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -2999,7 +3027,7 @@ static int intel_pmu_hw_config(struct perf_event *event)
|
||||
/*
|
||||
* BTS is set up earlier in this path, so don't account twice
|
||||
*/
|
||||
if (!intel_pmu_has_bts(event)) {
|
||||
if (!unlikely(intel_pmu_has_bts(event))) {
|
||||
/* disallow lbr if conflicting events are present */
|
||||
if (x86_add_exclusive(x86_lbr_exclusive_lbr))
|
||||
return -EBUSY;
|
||||
@ -3462,7 +3490,7 @@ static __initconst const struct x86_pmu core_pmu = {
|
||||
.enable_all = core_pmu_enable_all,
|
||||
.enable = core_pmu_enable_event,
|
||||
.disable = x86_pmu_disable_event,
|
||||
.hw_config = x86_pmu_hw_config,
|
||||
.hw_config = core_pmu_hw_config,
|
||||
.schedule_events = x86_schedule_events,
|
||||
.eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
|
||||
.perfctr = MSR_ARCH_PERFMON_PERFCTR0,
|
||||
|
@ -850,11 +850,16 @@ static inline int amd_pmu_init(void)
|
||||
|
||||
static inline bool intel_pmu_has_bts(struct perf_event *event)
|
||||
{
|
||||
if (event->attr.config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
|
||||
!event->attr.freq && event->hw.sample_period == 1)
|
||||
return true;
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
unsigned int hw_event, bts_event;
|
||||
|
||||
return false;
|
||||
if (event->attr.freq)
|
||||
return false;
|
||||
|
||||
hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
|
||||
bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
|
||||
|
||||
return hw_event == bts_event && hwc->sample_period == 1;
|
||||
}
|
||||
|
||||
int intel_pmu_save_and_restart(struct perf_event *event);
|
||||
|
@ -284,7 +284,9 @@
|
||||
#define X86_FEATURE_AMD_IBPB (13*32+12) /* "" Indirect Branch Prediction Barrier */
|
||||
#define X86_FEATURE_AMD_IBRS (13*32+14) /* "" Indirect Branch Restricted Speculation */
|
||||
#define X86_FEATURE_AMD_STIBP (13*32+15) /* "" Single Thread Indirect Branch Predictors */
|
||||
#define X86_FEATURE_AMD_SSBD (13*32+24) /* "" Speculative Store Bypass Disable */
|
||||
#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */
|
||||
#define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */
|
||||
|
||||
/* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
|
||||
#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
|
||||
|
@ -41,9 +41,10 @@
|
||||
|
||||
#define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
|
||||
#define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */
|
||||
#define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */
|
||||
#define SPEC_CTRL_STIBP_SHIFT 1 /* Single Thread Indirect Branch Predictor (STIBP) bit */
|
||||
#define SPEC_CTRL_STIBP (1 << SPEC_CTRL_STIBP_SHIFT) /* STIBP mask */
|
||||
#define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */
|
||||
#define SPEC_CTRL_SSBD (1 << SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
|
||||
#define SPEC_CTRL_SSBD (1 << SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
|
||||
|
||||
#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
|
||||
#define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */
|
||||
|
@ -3,6 +3,8 @@
|
||||
#ifndef _ASM_X86_NOSPEC_BRANCH_H_
|
||||
#define _ASM_X86_NOSPEC_BRANCH_H_
|
||||
|
||||
#include <linux/static_key.h>
|
||||
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/alternative-asm.h>
|
||||
#include <asm/cpufeatures.h>
|
||||
@ -162,29 +164,35 @@
|
||||
_ASM_PTR " 999b\n\t" \
|
||||
".popsection\n\t"
|
||||
|
||||
#if defined(CONFIG_X86_64) && defined(RETPOLINE)
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
#ifdef CONFIG_X86_64
|
||||
|
||||
/*
|
||||
* Since the inline asm uses the %V modifier which is only in newer GCC,
|
||||
* the 64-bit one is dependent on RETPOLINE not CONFIG_RETPOLINE.
|
||||
* Inline asm uses the %V modifier which is only in newer GCC
|
||||
* which is ensured when CONFIG_RETPOLINE is defined.
|
||||
*/
|
||||
# define CALL_NOSPEC \
|
||||
ANNOTATE_NOSPEC_ALTERNATIVE \
|
||||
ALTERNATIVE( \
|
||||
ALTERNATIVE_2( \
|
||||
ANNOTATE_RETPOLINE_SAFE \
|
||||
"call *%[thunk_target]\n", \
|
||||
"call __x86_indirect_thunk_%V[thunk_target]\n", \
|
||||
X86_FEATURE_RETPOLINE)
|
||||
X86_FEATURE_RETPOLINE, \
|
||||
"lfence;\n" \
|
||||
ANNOTATE_RETPOLINE_SAFE \
|
||||
"call *%[thunk_target]\n", \
|
||||
X86_FEATURE_RETPOLINE_AMD)
|
||||
# define THUNK_TARGET(addr) [thunk_target] "r" (addr)
|
||||
|
||||
#elif defined(CONFIG_X86_32) && defined(CONFIG_RETPOLINE)
|
||||
#else /* CONFIG_X86_32 */
|
||||
/*
|
||||
* For i386 we use the original ret-equivalent retpoline, because
|
||||
* otherwise we'll run out of registers. We don't care about CET
|
||||
* here, anyway.
|
||||
*/
|
||||
# define CALL_NOSPEC \
|
||||
ALTERNATIVE( \
|
||||
ANNOTATE_NOSPEC_ALTERNATIVE \
|
||||
ALTERNATIVE_2( \
|
||||
ANNOTATE_RETPOLINE_SAFE \
|
||||
"call *%[thunk_target]\n", \
|
||||
" jmp 904f;\n" \
|
||||
@ -199,9 +207,14 @@
|
||||
" ret;\n" \
|
||||
" .align 16\n" \
|
||||
"904: call 901b;\n", \
|
||||
X86_FEATURE_RETPOLINE)
|
||||
X86_FEATURE_RETPOLINE, \
|
||||
"lfence;\n" \
|
||||
ANNOTATE_RETPOLINE_SAFE \
|
||||
"call *%[thunk_target]\n", \
|
||||
X86_FEATURE_RETPOLINE_AMD)
|
||||
|
||||
# define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
|
||||
#endif
|
||||
#else /* No retpoline for C / inline asm */
|
||||
# define CALL_NOSPEC "call *%[thunk_target]\n"
|
||||
# define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
|
||||
@ -210,14 +223,19 @@
|
||||
/* The Spectre V2 mitigation variants */
|
||||
enum spectre_v2_mitigation {
|
||||
SPECTRE_V2_NONE,
|
||||
SPECTRE_V2_RETPOLINE_MINIMAL,
|
||||
SPECTRE_V2_RETPOLINE_MINIMAL_AMD,
|
||||
SPECTRE_V2_RETPOLINE_GENERIC,
|
||||
SPECTRE_V2_RETPOLINE_AMD,
|
||||
SPECTRE_V2_IBRS,
|
||||
SPECTRE_V2_IBRS_ENHANCED,
|
||||
};
|
||||
|
||||
/* The indirect branch speculation control variants */
|
||||
enum spectre_v2_user_mitigation {
|
||||
SPECTRE_V2_USER_NONE,
|
||||
SPECTRE_V2_USER_STRICT,
|
||||
SPECTRE_V2_USER_PRCTL,
|
||||
SPECTRE_V2_USER_SECCOMP,
|
||||
};
|
||||
|
||||
/* The Speculative Store Bypass disable variants */
|
||||
enum ssb_mitigation {
|
||||
SPEC_STORE_BYPASS_NONE,
|
||||
@ -295,6 +313,10 @@ do { \
|
||||
preempt_enable(); \
|
||||
} while (0)
|
||||
|
||||
DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp);
|
||||
DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
|
||||
DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
/*
|
||||
|
@ -53,12 +53,24 @@ static inline u64 ssbd_tif_to_spec_ctrl(u64 tifn)
|
||||
return (tifn & _TIF_SSBD) >> (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
|
||||
}
|
||||
|
||||
static inline u64 stibp_tif_to_spec_ctrl(u64 tifn)
|
||||
{
|
||||
BUILD_BUG_ON(TIF_SPEC_IB < SPEC_CTRL_STIBP_SHIFT);
|
||||
return (tifn & _TIF_SPEC_IB) >> (TIF_SPEC_IB - SPEC_CTRL_STIBP_SHIFT);
|
||||
}
|
||||
|
||||
static inline unsigned long ssbd_spec_ctrl_to_tif(u64 spec_ctrl)
|
||||
{
|
||||
BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
|
||||
return (spec_ctrl & SPEC_CTRL_SSBD) << (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
|
||||
}
|
||||
|
||||
static inline unsigned long stibp_spec_ctrl_to_tif(u64 spec_ctrl)
|
||||
{
|
||||
BUILD_BUG_ON(TIF_SPEC_IB < SPEC_CTRL_STIBP_SHIFT);
|
||||
return (spec_ctrl & SPEC_CTRL_STIBP) << (TIF_SPEC_IB - SPEC_CTRL_STIBP_SHIFT);
|
||||
}
|
||||
|
||||
static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn)
|
||||
{
|
||||
return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL;
|
||||
@ -70,11 +82,7 @@ extern void speculative_store_bypass_ht_init(void);
|
||||
static inline void speculative_store_bypass_ht_init(void) { }
|
||||
#endif
|
||||
|
||||
extern void speculative_store_bypass_update(unsigned long tif);
|
||||
|
||||
static inline void speculative_store_bypass_update_current(void)
|
||||
{
|
||||
speculative_store_bypass_update(current_thread_info()->flags);
|
||||
}
|
||||
extern void speculation_ctrl_update(unsigned long tif);
|
||||
extern void speculation_ctrl_update_current(void);
|
||||
|
||||
#endif
|
||||
|
@ -11,9 +11,6 @@ struct task_struct *__switch_to_asm(struct task_struct *prev,
|
||||
|
||||
__visible struct task_struct *__switch_to(struct task_struct *prev,
|
||||
struct task_struct *next);
|
||||
struct tss_struct;
|
||||
void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
|
||||
struct tss_struct *tss);
|
||||
|
||||
/* This runs runs on the previous thread's stack. */
|
||||
static inline void prepare_switch_to(struct task_struct *prev,
|
||||
|
@ -81,10 +81,12 @@ struct thread_info {
|
||||
#define TIF_SIGPENDING 2 /* signal pending */
|
||||
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
|
||||
#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
|
||||
#define TIF_SSBD 5 /* Reduced data speculation */
|
||||
#define TIF_SSBD 5 /* Speculative store bypass disable */
|
||||
#define TIF_SYSCALL_EMU 6 /* syscall emulation active */
|
||||
#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
|
||||
#define TIF_SECCOMP 8 /* secure computing */
|
||||
#define TIF_SPEC_IB 9 /* Indirect branch speculation mitigation */
|
||||
#define TIF_SPEC_FORCE_UPDATE 10 /* Force speculation MSR update in context switch */
|
||||
#define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */
|
||||
#define TIF_UPROBE 12 /* breakpointed or singlestepping */
|
||||
#define TIF_PATCH_PENDING 13 /* pending live patching update */
|
||||
@ -112,6 +114,8 @@ struct thread_info {
|
||||
#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
|
||||
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
|
||||
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
|
||||
#define _TIF_SPEC_IB (1 << TIF_SPEC_IB)
|
||||
#define _TIF_SPEC_FORCE_UPDATE (1 << TIF_SPEC_FORCE_UPDATE)
|
||||
#define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY)
|
||||
#define _TIF_UPROBE (1 << TIF_UPROBE)
|
||||
#define _TIF_PATCH_PENDING (1 << TIF_PATCH_PENDING)
|
||||
@ -147,8 +151,18 @@ struct thread_info {
|
||||
_TIF_FSCHECK)
|
||||
|
||||
/* flags to check in __switch_to() */
|
||||
#define _TIF_WORK_CTXSW \
|
||||
(_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_SSBD)
|
||||
#define _TIF_WORK_CTXSW_BASE \
|
||||
(_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP| \
|
||||
_TIF_SSBD | _TIF_SPEC_FORCE_UPDATE)
|
||||
|
||||
/*
|
||||
* Avoid calls to __switch_to_xtra() on UP as STIBP is not evaluated.
|
||||
*/
|
||||
#ifdef CONFIG_SMP
|
||||
# define _TIF_WORK_CTXSW (_TIF_WORK_CTXSW_BASE | _TIF_SPEC_IB)
|
||||
#else
|
||||
# define _TIF_WORK_CTXSW (_TIF_WORK_CTXSW_BASE)
|
||||
#endif
|
||||
|
||||
#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
|
||||
#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
|
||||
|
@ -185,10 +185,14 @@ struct tlb_state {
|
||||
|
||||
#define LOADED_MM_SWITCHING ((struct mm_struct *)1)
|
||||
|
||||
/* Last user mm for optimizing IBPB */
|
||||
union {
|
||||
struct mm_struct *last_user_mm;
|
||||
unsigned long last_user_mm_ibpb;
|
||||
};
|
||||
|
||||
u16 loaded_mm_asid;
|
||||
u16 next_asid;
|
||||
/* last user mm's ctx id */
|
||||
u64 last_ctx_id;
|
||||
|
||||
/*
|
||||
* We can be in one of several states:
|
||||
|
@ -554,7 +554,9 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
|
||||
nodes_per_socket = ((value >> 3) & 7) + 1;
|
||||
}
|
||||
|
||||
if (c->x86 >= 0x15 && c->x86 <= 0x17) {
|
||||
if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) &&
|
||||
!boot_cpu_has(X86_FEATURE_VIRT_SSBD) &&
|
||||
c->x86 >= 0x15 && c->x86 <= 0x17) {
|
||||
unsigned int bit;
|
||||
|
||||
switch (c->x86) {
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/nospec.h>
|
||||
#include <linux/prctl.h>
|
||||
#include <linux/sched/smt.h>
|
||||
|
||||
#include <asm/spec-ctrl.h>
|
||||
#include <asm/cmdline.h>
|
||||
@ -34,12 +35,10 @@ static void __init spectre_v2_select_mitigation(void);
|
||||
static void __init ssb_select_mitigation(void);
|
||||
static void __init l1tf_select_mitigation(void);
|
||||
|
||||
/*
|
||||
* Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
|
||||
* writes to SPEC_CTRL contain whatever reserved bits have been set.
|
||||
*/
|
||||
u64 __ro_after_init x86_spec_ctrl_base;
|
||||
/* The base value of the SPEC_CTRL MSR that always has to be preserved. */
|
||||
u64 x86_spec_ctrl_base;
|
||||
EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
|
||||
static DEFINE_MUTEX(spec_ctrl_mutex);
|
||||
|
||||
/*
|
||||
* The vendor and possibly platform specific bits which can be modified in
|
||||
@ -54,6 +53,13 @@ static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
|
||||
u64 __ro_after_init x86_amd_ls_cfg_base;
|
||||
u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
|
||||
|
||||
/* Control conditional STIPB in switch_to() */
|
||||
DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp);
|
||||
/* Control conditional IBPB in switch_mm() */
|
||||
DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
|
||||
/* Control unconditional IBPB in switch_mm() */
|
||||
DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
|
||||
|
||||
void __init check_bugs(void)
|
||||
{
|
||||
identify_boot_cpu();
|
||||
@ -124,31 +130,6 @@ void __init check_bugs(void)
|
||||
#endif
|
||||
}
|
||||
|
||||
/* The kernel command line selection */
|
||||
enum spectre_v2_mitigation_cmd {
|
||||
SPECTRE_V2_CMD_NONE,
|
||||
SPECTRE_V2_CMD_AUTO,
|
||||
SPECTRE_V2_CMD_FORCE,
|
||||
SPECTRE_V2_CMD_RETPOLINE,
|
||||
SPECTRE_V2_CMD_RETPOLINE_GENERIC,
|
||||
SPECTRE_V2_CMD_RETPOLINE_AMD,
|
||||
};
|
||||
|
||||
static const char *spectre_v2_strings[] = {
|
||||
[SPECTRE_V2_NONE] = "Vulnerable",
|
||||
[SPECTRE_V2_RETPOLINE_MINIMAL] = "Vulnerable: Minimal generic ASM retpoline",
|
||||
[SPECTRE_V2_RETPOLINE_MINIMAL_AMD] = "Vulnerable: Minimal AMD ASM retpoline",
|
||||
[SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline",
|
||||
[SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline",
|
||||
[SPECTRE_V2_IBRS_ENHANCED] = "Mitigation: Enhanced IBRS",
|
||||
};
|
||||
|
||||
#undef pr_fmt
|
||||
#define pr_fmt(fmt) "Spectre V2 : " fmt
|
||||
|
||||
static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
|
||||
SPECTRE_V2_NONE;
|
||||
|
||||
void
|
||||
x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
|
||||
{
|
||||
@ -166,9 +147,14 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
|
||||
guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
|
||||
|
||||
/* SSBD controlled in MSR_SPEC_CTRL */
|
||||
if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
|
||||
if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
|
||||
static_cpu_has(X86_FEATURE_AMD_SSBD))
|
||||
hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
|
||||
|
||||
/* Conditional STIBP enabled? */
|
||||
if (static_branch_unlikely(&switch_to_cond_stibp))
|
||||
hostval |= stibp_tif_to_spec_ctrl(ti->flags);
|
||||
|
||||
if (hostval != guestval) {
|
||||
msrval = setguest ? guestval : hostval;
|
||||
wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
|
||||
@ -202,7 +188,7 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
|
||||
tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
|
||||
ssbd_spec_ctrl_to_tif(hostval);
|
||||
|
||||
speculative_store_bypass_update(tif);
|
||||
speculation_ctrl_update(tif);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
|
||||
@ -217,6 +203,15 @@ static void x86_amd_ssb_disable(void)
|
||||
wrmsrl(MSR_AMD64_LS_CFG, msrval);
|
||||
}
|
||||
|
||||
#undef pr_fmt
|
||||
#define pr_fmt(fmt) "Spectre V2 : " fmt
|
||||
|
||||
static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
|
||||
SPECTRE_V2_NONE;
|
||||
|
||||
static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init =
|
||||
SPECTRE_V2_USER_NONE;
|
||||
|
||||
#ifdef RETPOLINE
|
||||
static bool spectre_v2_bad_module;
|
||||
|
||||
@ -238,23 +233,6 @@ static inline const char *spectre_v2_module_string(void)
|
||||
static inline const char *spectre_v2_module_string(void) { return ""; }
|
||||
#endif
|
||||
|
||||
static void __init spec2_print_if_insecure(const char *reason)
|
||||
{
|
||||
if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
|
||||
pr_info("%s selected on command line.\n", reason);
|
||||
}
|
||||
|
||||
static void __init spec2_print_if_secure(const char *reason)
|
||||
{
|
||||
if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
|
||||
pr_info("%s selected on command line.\n", reason);
|
||||
}
|
||||
|
||||
static inline bool retp_compiler(void)
|
||||
{
|
||||
return __is_defined(RETPOLINE);
|
||||
}
|
||||
|
||||
static inline bool match_option(const char *arg, int arglen, const char *opt)
|
||||
{
|
||||
int len = strlen(opt);
|
||||
@ -262,43 +240,210 @@ static inline bool match_option(const char *arg, int arglen, const char *opt)
|
||||
return len == arglen && !strncmp(arg, opt, len);
|
||||
}
|
||||
|
||||
/* The kernel command line selection for spectre v2 */
|
||||
enum spectre_v2_mitigation_cmd {
|
||||
SPECTRE_V2_CMD_NONE,
|
||||
SPECTRE_V2_CMD_AUTO,
|
||||
SPECTRE_V2_CMD_FORCE,
|
||||
SPECTRE_V2_CMD_RETPOLINE,
|
||||
SPECTRE_V2_CMD_RETPOLINE_GENERIC,
|
||||
SPECTRE_V2_CMD_RETPOLINE_AMD,
|
||||
};
|
||||
|
||||
enum spectre_v2_user_cmd {
|
||||
SPECTRE_V2_USER_CMD_NONE,
|
||||
SPECTRE_V2_USER_CMD_AUTO,
|
||||
SPECTRE_V2_USER_CMD_FORCE,
|
||||
SPECTRE_V2_USER_CMD_PRCTL,
|
||||
SPECTRE_V2_USER_CMD_PRCTL_IBPB,
|
||||
SPECTRE_V2_USER_CMD_SECCOMP,
|
||||
SPECTRE_V2_USER_CMD_SECCOMP_IBPB,
|
||||
};
|
||||
|
||||
static const char * const spectre_v2_user_strings[] = {
|
||||
[SPECTRE_V2_USER_NONE] = "User space: Vulnerable",
|
||||
[SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection",
|
||||
[SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl",
|
||||
[SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl",
|
||||
};
|
||||
|
||||
static const struct {
|
||||
const char *option;
|
||||
enum spectre_v2_user_cmd cmd;
|
||||
bool secure;
|
||||
} v2_user_options[] __initdata = {
|
||||
{ "auto", SPECTRE_V2_USER_CMD_AUTO, false },
|
||||
{ "off", SPECTRE_V2_USER_CMD_NONE, false },
|
||||
{ "on", SPECTRE_V2_USER_CMD_FORCE, true },
|
||||
{ "prctl", SPECTRE_V2_USER_CMD_PRCTL, false },
|
||||
{ "prctl,ibpb", SPECTRE_V2_USER_CMD_PRCTL_IBPB, false },
|
||||
{ "seccomp", SPECTRE_V2_USER_CMD_SECCOMP, false },
|
||||
{ "seccomp,ibpb", SPECTRE_V2_USER_CMD_SECCOMP_IBPB, false },
|
||||
};
|
||||
|
||||
static void __init spec_v2_user_print_cond(const char *reason, bool secure)
|
||||
{
|
||||
if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
|
||||
pr_info("spectre_v2_user=%s forced on command line.\n", reason);
|
||||
}
|
||||
|
||||
static enum spectre_v2_user_cmd __init
|
||||
spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd)
|
||||
{
|
||||
char arg[20];
|
||||
int ret, i;
|
||||
|
||||
switch (v2_cmd) {
|
||||
case SPECTRE_V2_CMD_NONE:
|
||||
return SPECTRE_V2_USER_CMD_NONE;
|
||||
case SPECTRE_V2_CMD_FORCE:
|
||||
return SPECTRE_V2_USER_CMD_FORCE;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
ret = cmdline_find_option(boot_command_line, "spectre_v2_user",
|
||||
arg, sizeof(arg));
|
||||
if (ret < 0)
|
||||
return SPECTRE_V2_USER_CMD_AUTO;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) {
|
||||
if (match_option(arg, ret, v2_user_options[i].option)) {
|
||||
spec_v2_user_print_cond(v2_user_options[i].option,
|
||||
v2_user_options[i].secure);
|
||||
return v2_user_options[i].cmd;
|
||||
}
|
||||
}
|
||||
|
||||
pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg);
|
||||
return SPECTRE_V2_USER_CMD_AUTO;
|
||||
}
|
||||
|
||||
static void __init
|
||||
spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
|
||||
{
|
||||
enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE;
|
||||
bool smt_possible = IS_ENABLED(CONFIG_SMP);
|
||||
enum spectre_v2_user_cmd cmd;
|
||||
|
||||
if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP))
|
||||
return;
|
||||
|
||||
if (cpu_smt_control == CPU_SMT_FORCE_DISABLED ||
|
||||
cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
|
||||
smt_possible = false;
|
||||
|
||||
cmd = spectre_v2_parse_user_cmdline(v2_cmd);
|
||||
switch (cmd) {
|
||||
case SPECTRE_V2_USER_CMD_NONE:
|
||||
goto set_mode;
|
||||
case SPECTRE_V2_USER_CMD_FORCE:
|
||||
mode = SPECTRE_V2_USER_STRICT;
|
||||
break;
|
||||
case SPECTRE_V2_USER_CMD_PRCTL:
|
||||
case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
|
||||
mode = SPECTRE_V2_USER_PRCTL;
|
||||
break;
|
||||
case SPECTRE_V2_USER_CMD_AUTO:
|
||||
case SPECTRE_V2_USER_CMD_SECCOMP:
|
||||
case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
|
||||
if (IS_ENABLED(CONFIG_SECCOMP))
|
||||
mode = SPECTRE_V2_USER_SECCOMP;
|
||||
else
|
||||
mode = SPECTRE_V2_USER_PRCTL;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Initialize Indirect Branch Prediction Barrier */
|
||||
if (boot_cpu_has(X86_FEATURE_IBPB)) {
|
||||
setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
|
||||
|
||||
switch (cmd) {
|
||||
case SPECTRE_V2_USER_CMD_FORCE:
|
||||
case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
|
||||
case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
|
||||
static_branch_enable(&switch_mm_always_ibpb);
|
||||
break;
|
||||
case SPECTRE_V2_USER_CMD_PRCTL:
|
||||
case SPECTRE_V2_USER_CMD_AUTO:
|
||||
case SPECTRE_V2_USER_CMD_SECCOMP:
|
||||
static_branch_enable(&switch_mm_cond_ibpb);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
|
||||
static_key_enabled(&switch_mm_always_ibpb) ?
|
||||
"always-on" : "conditional");
|
||||
}
|
||||
|
||||
/* If enhanced IBRS is enabled no STIPB required */
|
||||
if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
|
||||
return;
|
||||
|
||||
/*
|
||||
* If SMT is not possible or STIBP is not available clear the STIPB
|
||||
* mode.
|
||||
*/
|
||||
if (!smt_possible || !boot_cpu_has(X86_FEATURE_STIBP))
|
||||
mode = SPECTRE_V2_USER_NONE;
|
||||
set_mode:
|
||||
spectre_v2_user = mode;
|
||||
/* Only print the STIBP mode when SMT possible */
|
||||
if (smt_possible)
|
||||
pr_info("%s\n", spectre_v2_user_strings[mode]);
|
||||
}
|
||||
|
||||
static const char * const spectre_v2_strings[] = {
|
||||
[SPECTRE_V2_NONE] = "Vulnerable",
|
||||
[SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline",
|
||||
[SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline",
|
||||
[SPECTRE_V2_IBRS_ENHANCED] = "Mitigation: Enhanced IBRS",
|
||||
};
|
||||
|
||||
static const struct {
|
||||
const char *option;
|
||||
enum spectre_v2_mitigation_cmd cmd;
|
||||
bool secure;
|
||||
} mitigation_options[] = {
|
||||
{ "off", SPECTRE_V2_CMD_NONE, false },
|
||||
{ "on", SPECTRE_V2_CMD_FORCE, true },
|
||||
{ "retpoline", SPECTRE_V2_CMD_RETPOLINE, false },
|
||||
{ "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_AMD, false },
|
||||
{ "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
|
||||
{ "auto", SPECTRE_V2_CMD_AUTO, false },
|
||||
} mitigation_options[] __initdata = {
|
||||
{ "off", SPECTRE_V2_CMD_NONE, false },
|
||||
{ "on", SPECTRE_V2_CMD_FORCE, true },
|
||||
{ "retpoline", SPECTRE_V2_CMD_RETPOLINE, false },
|
||||
{ "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_AMD, false },
|
||||
{ "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
|
||||
{ "auto", SPECTRE_V2_CMD_AUTO, false },
|
||||
};
|
||||
|
||||
static void __init spec_v2_print_cond(const char *reason, bool secure)
|
||||
{
|
||||
if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
|
||||
pr_info("%s selected on command line.\n", reason);
|
||||
}
|
||||
|
||||
static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
|
||||
{
|
||||
enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
|
||||
char arg[20];
|
||||
int ret, i;
|
||||
enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
|
||||
|
||||
if (cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
|
||||
return SPECTRE_V2_CMD_NONE;
|
||||
else {
|
||||
ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
|
||||
if (ret < 0)
|
||||
return SPECTRE_V2_CMD_AUTO;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
|
||||
if (!match_option(arg, ret, mitigation_options[i].option))
|
||||
continue;
|
||||
cmd = mitigation_options[i].cmd;
|
||||
break;
|
||||
}
|
||||
ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
|
||||
if (ret < 0)
|
||||
return SPECTRE_V2_CMD_AUTO;
|
||||
|
||||
if (i >= ARRAY_SIZE(mitigation_options)) {
|
||||
pr_err("unknown option (%s). Switching to AUTO select\n", arg);
|
||||
return SPECTRE_V2_CMD_AUTO;
|
||||
}
|
||||
for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
|
||||
if (!match_option(arg, ret, mitigation_options[i].option))
|
||||
continue;
|
||||
cmd = mitigation_options[i].cmd;
|
||||
break;
|
||||
}
|
||||
|
||||
if (i >= ARRAY_SIZE(mitigation_options)) {
|
||||
pr_err("unknown option (%s). Switching to AUTO select\n", arg);
|
||||
return SPECTRE_V2_CMD_AUTO;
|
||||
}
|
||||
|
||||
if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
|
||||
@ -315,11 +460,8 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
|
||||
return SPECTRE_V2_CMD_AUTO;
|
||||
}
|
||||
|
||||
if (mitigation_options[i].secure)
|
||||
spec2_print_if_secure(mitigation_options[i].option);
|
||||
else
|
||||
spec2_print_if_insecure(mitigation_options[i].option);
|
||||
|
||||
spec_v2_print_cond(mitigation_options[i].option,
|
||||
mitigation_options[i].secure);
|
||||
return cmd;
|
||||
}
|
||||
|
||||
@ -375,14 +517,12 @@ retpoline_auto:
|
||||
pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
|
||||
goto retpoline_generic;
|
||||
}
|
||||
mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD :
|
||||
SPECTRE_V2_RETPOLINE_MINIMAL_AMD;
|
||||
mode = SPECTRE_V2_RETPOLINE_AMD;
|
||||
setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
|
||||
setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
|
||||
} else {
|
||||
retpoline_generic:
|
||||
mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC :
|
||||
SPECTRE_V2_RETPOLINE_MINIMAL;
|
||||
mode = SPECTRE_V2_RETPOLINE_GENERIC;
|
||||
setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
|
||||
}
|
||||
|
||||
@ -401,12 +541,6 @@ specv2_set_mode:
|
||||
setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
|
||||
pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
|
||||
|
||||
/* Initialize Indirect Branch Prediction Barrier if supported */
|
||||
if (boot_cpu_has(X86_FEATURE_IBPB)) {
|
||||
setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
|
||||
pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n");
|
||||
}
|
||||
|
||||
/*
|
||||
* Retpoline means the kernel is safe because it has no indirect
|
||||
* branches. Enhanced IBRS protects firmware too, so, enable restricted
|
||||
@ -422,6 +556,66 @@ specv2_set_mode:
|
||||
setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
|
||||
pr_info("Enabling Restricted Speculation for firmware calls\n");
|
||||
}
|
||||
|
||||
/* Set up IBPB and STIBP depending on the general spectre V2 command */
|
||||
spectre_v2_user_select_mitigation(cmd);
|
||||
|
||||
/* Enable STIBP if appropriate */
|
||||
arch_smt_update();
|
||||
}
|
||||
|
||||
static void update_stibp_msr(void * __unused)
|
||||
{
|
||||
wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
|
||||
}
|
||||
|
||||
/* Update x86_spec_ctrl_base in case SMT state changed. */
|
||||
static void update_stibp_strict(void)
|
||||
{
|
||||
u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
|
||||
|
||||
if (sched_smt_active())
|
||||
mask |= SPEC_CTRL_STIBP;
|
||||
|
||||
if (mask == x86_spec_ctrl_base)
|
||||
return;
|
||||
|
||||
pr_info("Update user space SMT mitigation: STIBP %s\n",
|
||||
mask & SPEC_CTRL_STIBP ? "always-on" : "off");
|
||||
x86_spec_ctrl_base = mask;
|
||||
on_each_cpu(update_stibp_msr, NULL, 1);
|
||||
}
|
||||
|
||||
/* Update the static key controlling the evaluation of TIF_SPEC_IB */
|
||||
static void update_indir_branch_cond(void)
|
||||
{
|
||||
if (sched_smt_active())
|
||||
static_branch_enable(&switch_to_cond_stibp);
|
||||
else
|
||||
static_branch_disable(&switch_to_cond_stibp);
|
||||
}
|
||||
|
||||
void arch_smt_update(void)
|
||||
{
|
||||
/* Enhanced IBRS implies STIBP. No update required. */
|
||||
if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
|
||||
return;
|
||||
|
||||
mutex_lock(&spec_ctrl_mutex);
|
||||
|
||||
switch (spectre_v2_user) {
|
||||
case SPECTRE_V2_USER_NONE:
|
||||
break;
|
||||
case SPECTRE_V2_USER_STRICT:
|
||||
update_stibp_strict();
|
||||
break;
|
||||
case SPECTRE_V2_USER_PRCTL:
|
||||
case SPECTRE_V2_USER_SECCOMP:
|
||||
update_indir_branch_cond();
|
||||
break;
|
||||
}
|
||||
|
||||
mutex_unlock(&spec_ctrl_mutex);
|
||||
}
|
||||
|
||||
#undef pr_fmt
|
||||
@ -438,7 +632,7 @@ enum ssb_mitigation_cmd {
|
||||
SPEC_STORE_BYPASS_CMD_SECCOMP,
|
||||
};
|
||||
|
||||
static const char *ssb_strings[] = {
|
||||
static const char * const ssb_strings[] = {
|
||||
[SPEC_STORE_BYPASS_NONE] = "Vulnerable",
|
||||
[SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled",
|
||||
[SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl",
|
||||
@ -448,7 +642,7 @@ static const char *ssb_strings[] = {
|
||||
static const struct {
|
||||
const char *option;
|
||||
enum ssb_mitigation_cmd cmd;
|
||||
} ssb_mitigation_options[] = {
|
||||
} ssb_mitigation_options[] __initdata = {
|
||||
{ "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
|
||||
{ "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
|
||||
{ "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
|
||||
@ -532,18 +726,16 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void)
|
||||
if (mode == SPEC_STORE_BYPASS_DISABLE) {
|
||||
setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
|
||||
/*
|
||||
* Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD uses
|
||||
* a completely different MSR and bit dependent on family.
|
||||
* Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
|
||||
* use a completely different MSR and bit dependent on family.
|
||||
*/
|
||||
switch (boot_cpu_data.x86_vendor) {
|
||||
case X86_VENDOR_INTEL:
|
||||
if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
|
||||
!static_cpu_has(X86_FEATURE_AMD_SSBD)) {
|
||||
x86_amd_ssb_disable();
|
||||
} else {
|
||||
x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
|
||||
x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
|
||||
wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
|
||||
break;
|
||||
case X86_VENDOR_AMD:
|
||||
x86_amd_ssb_disable();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@ -561,10 +753,25 @@ static void ssb_select_mitigation(void)
|
||||
#undef pr_fmt
|
||||
#define pr_fmt(fmt) "Speculation prctl: " fmt
|
||||
|
||||
static void task_update_spec_tif(struct task_struct *tsk)
|
||||
{
|
||||
/* Force the update of the real TIF bits */
|
||||
set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE);
|
||||
|
||||
/*
|
||||
* Immediately update the speculation control MSRs for the current
|
||||
* task, but for a non-current task delay setting the CPU
|
||||
* mitigation until it is scheduled next.
|
||||
*
|
||||
* This can only happen for SECCOMP mitigation. For PRCTL it's
|
||||
* always the current task.
|
||||
*/
|
||||
if (tsk == current)
|
||||
speculation_ctrl_update_current();
|
||||
}
|
||||
|
||||
static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
|
||||
{
|
||||
bool update;
|
||||
|
||||
if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
|
||||
ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
|
||||
return -ENXIO;
|
||||
@ -575,28 +782,56 @@ static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
|
||||
if (task_spec_ssb_force_disable(task))
|
||||
return -EPERM;
|
||||
task_clear_spec_ssb_disable(task);
|
||||
update = test_and_clear_tsk_thread_flag(task, TIF_SSBD);
|
||||
task_update_spec_tif(task);
|
||||
break;
|
||||
case PR_SPEC_DISABLE:
|
||||
task_set_spec_ssb_disable(task);
|
||||
update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
|
||||
task_update_spec_tif(task);
|
||||
break;
|
||||
case PR_SPEC_FORCE_DISABLE:
|
||||
task_set_spec_ssb_disable(task);
|
||||
task_set_spec_ssb_force_disable(task);
|
||||
update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
|
||||
task_update_spec_tif(task);
|
||||
break;
|
||||
default:
|
||||
return -ERANGE;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* If being set on non-current task, delay setting the CPU
|
||||
* mitigation until it is next scheduled.
|
||||
*/
|
||||
if (task == current && update)
|
||||
speculative_store_bypass_update_current();
|
||||
|
||||
static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
|
||||
{
|
||||
switch (ctrl) {
|
||||
case PR_SPEC_ENABLE:
|
||||
if (spectre_v2_user == SPECTRE_V2_USER_NONE)
|
||||
return 0;
|
||||
/*
|
||||
* Indirect branch speculation is always disabled in strict
|
||||
* mode.
|
||||
*/
|
||||
if (spectre_v2_user == SPECTRE_V2_USER_STRICT)
|
||||
return -EPERM;
|
||||
task_clear_spec_ib_disable(task);
|
||||
task_update_spec_tif(task);
|
||||
break;
|
||||
case PR_SPEC_DISABLE:
|
||||
case PR_SPEC_FORCE_DISABLE:
|
||||
/*
|
||||
* Indirect branch speculation is always allowed when
|
||||
* mitigation is force disabled.
|
||||
*/
|
||||
if (spectre_v2_user == SPECTRE_V2_USER_NONE)
|
||||
return -EPERM;
|
||||
if (spectre_v2_user == SPECTRE_V2_USER_STRICT)
|
||||
return 0;
|
||||
task_set_spec_ib_disable(task);
|
||||
if (ctrl == PR_SPEC_FORCE_DISABLE)
|
||||
task_set_spec_ib_force_disable(task);
|
||||
task_update_spec_tif(task);
|
||||
break;
|
||||
default:
|
||||
return -ERANGE;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -606,6 +841,8 @@ int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
|
||||
switch (which) {
|
||||
case PR_SPEC_STORE_BYPASS:
|
||||
return ssb_prctl_set(task, ctrl);
|
||||
case PR_SPEC_INDIRECT_BRANCH:
|
||||
return ib_prctl_set(task, ctrl);
|
||||
default:
|
||||
return -ENODEV;
|
||||
}
|
||||
@ -616,6 +853,8 @@ void arch_seccomp_spec_mitigate(struct task_struct *task)
|
||||
{
|
||||
if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
|
||||
ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
|
||||
if (spectre_v2_user == SPECTRE_V2_USER_SECCOMP)
|
||||
ib_prctl_set(task, PR_SPEC_FORCE_DISABLE);
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -638,11 +877,35 @@ static int ssb_prctl_get(struct task_struct *task)
|
||||
}
|
||||
}
|
||||
|
||||
static int ib_prctl_get(struct task_struct *task)
|
||||
{
|
||||
if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
|
||||
return PR_SPEC_NOT_AFFECTED;
|
||||
|
||||
switch (spectre_v2_user) {
|
||||
case SPECTRE_V2_USER_NONE:
|
||||
return PR_SPEC_ENABLE;
|
||||
case SPECTRE_V2_USER_PRCTL:
|
||||
case SPECTRE_V2_USER_SECCOMP:
|
||||
if (task_spec_ib_force_disable(task))
|
||||
return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
|
||||
if (task_spec_ib_disable(task))
|
||||
return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
|
||||
return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
|
||||
case SPECTRE_V2_USER_STRICT:
|
||||
return PR_SPEC_DISABLE;
|
||||
default:
|
||||
return PR_SPEC_NOT_AFFECTED;
|
||||
}
|
||||
}
|
||||
|
||||
int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
|
||||
{
|
||||
switch (which) {
|
||||
case PR_SPEC_STORE_BYPASS:
|
||||
return ssb_prctl_get(task);
|
||||
case PR_SPEC_INDIRECT_BRANCH:
|
||||
return ib_prctl_get(task);
|
||||
default:
|
||||
return -ENODEV;
|
||||
}
|
||||
@ -780,7 +1043,7 @@ early_param("l1tf", l1tf_cmdline);
|
||||
#define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
|
||||
|
||||
#if IS_ENABLED(CONFIG_KVM_INTEL)
|
||||
static const char *l1tf_vmx_states[] = {
|
||||
static const char * const l1tf_vmx_states[] = {
|
||||
[VMENTER_L1D_FLUSH_AUTO] = "auto",
|
||||
[VMENTER_L1D_FLUSH_NEVER] = "vulnerable",
|
||||
[VMENTER_L1D_FLUSH_COND] = "conditional cache flushes",
|
||||
@ -796,13 +1059,14 @@ static ssize_t l1tf_show_state(char *buf)
|
||||
|
||||
if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
|
||||
(l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
|
||||
cpu_smt_control == CPU_SMT_ENABLED))
|
||||
sched_smt_active())) {
|
||||
return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
|
||||
l1tf_vmx_states[l1tf_vmx_mitigation]);
|
||||
}
|
||||
|
||||
return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
|
||||
l1tf_vmx_states[l1tf_vmx_mitigation],
|
||||
cpu_smt_control == CPU_SMT_ENABLED ? "vulnerable" : "disabled");
|
||||
sched_smt_active() ? "vulnerable" : "disabled");
|
||||
}
|
||||
#else
|
||||
static ssize_t l1tf_show_state(char *buf)
|
||||
@ -811,6 +1075,36 @@ static ssize_t l1tf_show_state(char *buf)
|
||||
}
|
||||
#endif
|
||||
|
||||
static char *stibp_state(void)
|
||||
{
|
||||
if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
|
||||
return "";
|
||||
|
||||
switch (spectre_v2_user) {
|
||||
case SPECTRE_V2_USER_NONE:
|
||||
return ", STIBP: disabled";
|
||||
case SPECTRE_V2_USER_STRICT:
|
||||
return ", STIBP: forced";
|
||||
case SPECTRE_V2_USER_PRCTL:
|
||||
case SPECTRE_V2_USER_SECCOMP:
|
||||
if (static_key_enabled(&switch_to_cond_stibp))
|
||||
return ", STIBP: conditional";
|
||||
}
|
||||
return "";
|
||||
}
|
||||
|
||||
static char *ibpb_state(void)
|
||||
{
|
||||
if (boot_cpu_has(X86_FEATURE_IBPB)) {
|
||||
if (static_key_enabled(&switch_mm_always_ibpb))
|
||||
return ", IBPB: always-on";
|
||||
if (static_key_enabled(&switch_mm_cond_ibpb))
|
||||
return ", IBPB: conditional";
|
||||
return ", IBPB: disabled";
|
||||
}
|
||||
return "";
|
||||
}
|
||||
|
||||
static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
|
||||
char *buf, unsigned int bug)
|
||||
{
|
||||
@ -828,9 +1122,11 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
|
||||
return sprintf(buf, "Mitigation: __user pointer sanitization\n");
|
||||
|
||||
case X86_BUG_SPECTRE_V2:
|
||||
return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
|
||||
boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
|
||||
return sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
|
||||
ibpb_state(),
|
||||
boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
|
||||
stibp_state(),
|
||||
boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
|
||||
spectre_v2_module_string());
|
||||
|
||||
case X86_BUG_SPEC_STORE_BYPASS:
|
||||
|
@ -760,6 +760,12 @@ static void init_speculation_control(struct cpuinfo_x86 *c)
|
||||
set_cpu_cap(c, X86_FEATURE_STIBP);
|
||||
set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
|
||||
}
|
||||
|
||||
if (cpu_has(c, X86_FEATURE_AMD_SSBD)) {
|
||||
set_cpu_cap(c, X86_FEATURE_SSBD);
|
||||
set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
|
||||
clear_cpu_cap(c, X86_FEATURE_VIRT_SSBD);
|
||||
}
|
||||
}
|
||||
|
||||
void get_cpu_cap(struct cpuinfo_x86 *c)
|
||||
@ -958,7 +964,8 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
||||
rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
|
||||
|
||||
if (!x86_match_cpu(cpu_no_spec_store_bypass) &&
|
||||
!(ia32_cap & ARCH_CAP_SSB_NO))
|
||||
!(ia32_cap & ARCH_CAP_SSB_NO) &&
|
||||
!cpu_has(c, X86_FEATURE_AMD_SSB_NO))
|
||||
setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
|
||||
|
||||
if (x86_match_cpu(cpu_no_speculation))
|
||||
|
@ -56,7 +56,7 @@
|
||||
/* Threshold LVT offset is at MSR0xC0000410[15:12] */
|
||||
#define SMCA_THR_LVT_OFF 0xF000
|
||||
|
||||
static bool thresholding_en;
|
||||
static bool thresholding_irq_en;
|
||||
|
||||
static const char * const th_names[] = {
|
||||
"load_store",
|
||||
@ -533,9 +533,8 @@ prepare_threshold_block(unsigned int bank, unsigned int block, u32 addr,
|
||||
|
||||
set_offset:
|
||||
offset = setup_APIC_mce_threshold(offset, new);
|
||||
|
||||
if ((offset == new) && (mce_threshold_vector != amd_threshold_interrupt))
|
||||
mce_threshold_vector = amd_threshold_interrupt;
|
||||
if (offset == new)
|
||||
thresholding_irq_en = true;
|
||||
|
||||
done:
|
||||
mce_threshold_block_init(&b, offset);
|
||||
@ -1356,9 +1355,6 @@ int mce_threshold_remove_device(unsigned int cpu)
|
||||
{
|
||||
unsigned int bank;
|
||||
|
||||
if (!thresholding_en)
|
||||
return 0;
|
||||
|
||||
for (bank = 0; bank < mca_cfg.banks; ++bank) {
|
||||
if (!(per_cpu(bank_map, cpu) & (1 << bank)))
|
||||
continue;
|
||||
@ -1376,9 +1372,6 @@ int mce_threshold_create_device(unsigned int cpu)
|
||||
struct threshold_bank **bp;
|
||||
int err = 0;
|
||||
|
||||
if (!thresholding_en)
|
||||
return 0;
|
||||
|
||||
bp = per_cpu(threshold_banks, cpu);
|
||||
if (bp)
|
||||
return 0;
|
||||
@ -1407,9 +1400,6 @@ static __init int threshold_init_device(void)
|
||||
{
|
||||
unsigned lcpu = 0;
|
||||
|
||||
if (mce_threshold_vector == amd_threshold_interrupt)
|
||||
thresholding_en = true;
|
||||
|
||||
/* to hit CPUs online before the notifier is up */
|
||||
for_each_online_cpu(lcpu) {
|
||||
int err = mce_threshold_create_device(lcpu);
|
||||
@ -1418,6 +1408,9 @@ static __init int threshold_init_device(void)
|
||||
return err;
|
||||
}
|
||||
|
||||
if (thresholding_irq_en)
|
||||
mce_threshold_vector = amd_threshold_interrupt;
|
||||
|
||||
return 0;
|
||||
}
|
||||
/*
|
||||
|
@ -344,10 +344,10 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
|
||||
sanitize_restored_xstate(tsk, &env, xfeatures, fx_only);
|
||||
}
|
||||
|
||||
local_bh_disable();
|
||||
fpu->initialized = 1;
|
||||
preempt_disable();
|
||||
fpu__restore(fpu);
|
||||
preempt_enable();
|
||||
local_bh_enable();
|
||||
|
||||
return err;
|
||||
} else {
|
||||
|
@ -41,6 +41,8 @@
|
||||
#include <asm/prctl.h>
|
||||
#include <asm/spec-ctrl.h>
|
||||
|
||||
#include "process.h"
|
||||
|
||||
/*
|
||||
* per-CPU TSS segments. Threads are completely 'soft' on Linux,
|
||||
* no more per-task TSS's. The TSS size is kept cacheline-aligned
|
||||
@ -255,11 +257,12 @@ void arch_setup_new_exec(void)
|
||||
enable_cpuid();
|
||||
}
|
||||
|
||||
static inline void switch_to_bitmap(struct tss_struct *tss,
|
||||
struct thread_struct *prev,
|
||||
static inline void switch_to_bitmap(struct thread_struct *prev,
|
||||
struct thread_struct *next,
|
||||
unsigned long tifp, unsigned long tifn)
|
||||
{
|
||||
struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);
|
||||
|
||||
if (tifn & _TIF_IO_BITMAP) {
|
||||
/*
|
||||
* Copy the relevant range of the IO bitmap.
|
||||
@ -398,32 +401,85 @@ static __always_inline void amd_set_ssb_virt_state(unsigned long tifn)
|
||||
wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn));
|
||||
}
|
||||
|
||||
static __always_inline void intel_set_ssb_state(unsigned long tifn)
|
||||
/*
|
||||
* Update the MSRs managing speculation control, during context switch.
|
||||
*
|
||||
* tifp: Previous task's thread flags
|
||||
* tifn: Next task's thread flags
|
||||
*/
|
||||
static __always_inline void __speculation_ctrl_update(unsigned long tifp,
|
||||
unsigned long tifn)
|
||||
{
|
||||
u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
|
||||
unsigned long tif_diff = tifp ^ tifn;
|
||||
u64 msr = x86_spec_ctrl_base;
|
||||
bool updmsr = false;
|
||||
|
||||
wrmsrl(MSR_IA32_SPEC_CTRL, msr);
|
||||
/*
|
||||
* If TIF_SSBD is different, select the proper mitigation
|
||||
* method. Note that if SSBD mitigation is disabled or permanentely
|
||||
* enabled this branch can't be taken because nothing can set
|
||||
* TIF_SSBD.
|
||||
*/
|
||||
if (tif_diff & _TIF_SSBD) {
|
||||
if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) {
|
||||
amd_set_ssb_virt_state(tifn);
|
||||
} else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
|
||||
amd_set_core_ssb_state(tifn);
|
||||
} else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
|
||||
static_cpu_has(X86_FEATURE_AMD_SSBD)) {
|
||||
msr |= ssbd_tif_to_spec_ctrl(tifn);
|
||||
updmsr = true;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Only evaluate TIF_SPEC_IB if conditional STIBP is enabled,
|
||||
* otherwise avoid the MSR write.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_SMP) &&
|
||||
static_branch_unlikely(&switch_to_cond_stibp)) {
|
||||
updmsr |= !!(tif_diff & _TIF_SPEC_IB);
|
||||
msr |= stibp_tif_to_spec_ctrl(tifn);
|
||||
}
|
||||
|
||||
if (updmsr)
|
||||
wrmsrl(MSR_IA32_SPEC_CTRL, msr);
|
||||
}
|
||||
|
||||
static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
|
||||
static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk)
|
||||
{
|
||||
if (static_cpu_has(X86_FEATURE_VIRT_SSBD))
|
||||
amd_set_ssb_virt_state(tifn);
|
||||
else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
|
||||
amd_set_core_ssb_state(tifn);
|
||||
else
|
||||
intel_set_ssb_state(tifn);
|
||||
if (test_and_clear_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE)) {
|
||||
if (task_spec_ssb_disable(tsk))
|
||||
set_tsk_thread_flag(tsk, TIF_SSBD);
|
||||
else
|
||||
clear_tsk_thread_flag(tsk, TIF_SSBD);
|
||||
|
||||
if (task_spec_ib_disable(tsk))
|
||||
set_tsk_thread_flag(tsk, TIF_SPEC_IB);
|
||||
else
|
||||
clear_tsk_thread_flag(tsk, TIF_SPEC_IB);
|
||||
}
|
||||
/* Return the updated threadinfo flags*/
|
||||
return task_thread_info(tsk)->flags;
|
||||
}
|
||||
|
||||
void speculative_store_bypass_update(unsigned long tif)
|
||||
void speculation_ctrl_update(unsigned long tif)
|
||||
{
|
||||
/* Forced update. Make sure all relevant TIF flags are different */
|
||||
preempt_disable();
|
||||
__speculative_store_bypass_update(tif);
|
||||
__speculation_ctrl_update(~tif, tif);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
|
||||
struct tss_struct *tss)
|
||||
/* Called from seccomp/prctl update */
|
||||
void speculation_ctrl_update_current(void)
|
||||
{
|
||||
preempt_disable();
|
||||
speculation_ctrl_update(speculation_ctrl_update_tif(current));
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p)
|
||||
{
|
||||
struct thread_struct *prev, *next;
|
||||
unsigned long tifp, tifn;
|
||||
@ -433,7 +489,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
|
||||
|
||||
tifn = READ_ONCE(task_thread_info(next_p)->flags);
|
||||
tifp = READ_ONCE(task_thread_info(prev_p)->flags);
|
||||
switch_to_bitmap(tss, prev, next, tifp, tifn);
|
||||
switch_to_bitmap(prev, next, tifp, tifn);
|
||||
|
||||
propagate_user_return_notify(prev_p, next_p);
|
||||
|
||||
@ -454,8 +510,15 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
|
||||
if ((tifp ^ tifn) & _TIF_NOCPUID)
|
||||
set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
|
||||
|
||||
if ((tifp ^ tifn) & _TIF_SSBD)
|
||||
__speculative_store_bypass_update(tifn);
|
||||
if (likely(!((tifp | tifn) & _TIF_SPEC_FORCE_UPDATE))) {
|
||||
__speculation_ctrl_update(tifp, tifn);
|
||||
} else {
|
||||
speculation_ctrl_update_tif(prev_p);
|
||||
tifn = speculation_ctrl_update_tif(next_p);
|
||||
|
||||
/* Enforce MSR update to ensure consistent state */
|
||||
__speculation_ctrl_update(~tifn, tifn);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
39
arch/x86/kernel/process.h
Normal file
39
arch/x86/kernel/process.h
Normal file
@ -0,0 +1,39 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
//
|
||||
// Code shared between 32 and 64 bit
|
||||
|
||||
#include <asm/spec-ctrl.h>
|
||||
|
||||
void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p);
|
||||
|
||||
/*
|
||||
* This needs to be inline to optimize for the common case where no extra
|
||||
* work needs to be done.
|
||||
*/
|
||||
static inline void switch_to_extra(struct task_struct *prev,
|
||||
struct task_struct *next)
|
||||
{
|
||||
unsigned long next_tif = task_thread_info(next)->flags;
|
||||
unsigned long prev_tif = task_thread_info(prev)->flags;
|
||||
|
||||
if (IS_ENABLED(CONFIG_SMP)) {
|
||||
/*
|
||||
* Avoid __switch_to_xtra() invocation when conditional
|
||||
* STIPB is disabled and the only different bit is
|
||||
* TIF_SPEC_IB. For CONFIG_SMP=n TIF_SPEC_IB is not
|
||||
* in the TIF_WORK_CTXSW masks.
|
||||
*/
|
||||
if (!static_branch_likely(&switch_to_cond_stibp)) {
|
||||
prev_tif &= ~_TIF_SPEC_IB;
|
||||
next_tif &= ~_TIF_SPEC_IB;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* __switch_to_xtra() handles debug registers, i/o bitmaps,
|
||||
* speculation mitigations etc.
|
||||
*/
|
||||
if (unlikely(next_tif & _TIF_WORK_CTXSW_NEXT ||
|
||||
prev_tif & _TIF_WORK_CTXSW_PREV))
|
||||
__switch_to_xtra(prev, next);
|
||||
}
|
@ -59,6 +59,8 @@
|
||||
#include <asm/intel_rdt_sched.h>
|
||||
#include <asm/proto.h>
|
||||
|
||||
#include "process.h"
|
||||
|
||||
void __show_regs(struct pt_regs *regs, int all)
|
||||
{
|
||||
unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
|
||||
@ -234,7 +236,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
|
||||
struct fpu *prev_fpu = &prev->fpu;
|
||||
struct fpu *next_fpu = &next->fpu;
|
||||
int cpu = smp_processor_id();
|
||||
struct tss_struct *tss = &per_cpu(cpu_tss_rw, cpu);
|
||||
|
||||
/* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
|
||||
|
||||
@ -266,12 +267,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
|
||||
if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl))
|
||||
set_iopl_mask(next->iopl);
|
||||
|
||||
/*
|
||||
* Now maybe handle debug registers and/or IO bitmaps
|
||||
*/
|
||||
if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV ||
|
||||
task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
|
||||
__switch_to_xtra(prev_p, next_p, tss);
|
||||
switch_to_extra(prev_p, next_p);
|
||||
|
||||
/*
|
||||
* Leave lazy mode, flushing any hypercalls made here.
|
||||
|
@ -59,6 +59,8 @@
|
||||
#include <asm/unistd_32_ia32.h>
|
||||
#endif
|
||||
|
||||
#include "process.h"
|
||||
|
||||
__visible DEFINE_PER_CPU(unsigned long, rsp_scratch);
|
||||
|
||||
/* Prints also some state that isn't saved in the pt_regs */
|
||||
@ -400,7 +402,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
|
||||
struct fpu *prev_fpu = &prev->fpu;
|
||||
struct fpu *next_fpu = &next->fpu;
|
||||
int cpu = smp_processor_id();
|
||||
struct tss_struct *tss = &per_cpu(cpu_tss_rw, cpu);
|
||||
|
||||
WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) &&
|
||||
this_cpu_read(irq_count) != -1);
|
||||
@ -467,12 +468,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
|
||||
/* Reload sp0. */
|
||||
update_sp0(next_p);
|
||||
|
||||
/*
|
||||
* Now maybe reload the debug registers and handle I/O bitmaps
|
||||
*/
|
||||
if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
|
||||
task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
|
||||
__switch_to_xtra(prev_p, next_p, tss);
|
||||
__switch_to_xtra(prev_p, next_p);
|
||||
|
||||
#ifdef CONFIG_XEN_PV
|
||||
/*
|
||||
|
@ -367,7 +367,8 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
|
||||
|
||||
/* cpuid 0x80000008.ebx */
|
||||
const u32 kvm_cpuid_8000_0008_ebx_x86_features =
|
||||
F(AMD_IBPB) | F(AMD_IBRS) | F(VIRT_SSBD);
|
||||
F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) |
|
||||
F(AMD_SSB_NO);
|
||||
|
||||
/* cpuid 0xC0000001.edx */
|
||||
const u32 kvm_cpuid_C000_0001_edx_x86_features =
|
||||
@ -649,7 +650,12 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
|
||||
entry->ebx |= F(VIRT_SSBD);
|
||||
entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features;
|
||||
cpuid_mask(&entry->ebx, CPUID_8000_0008_EBX);
|
||||
if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
|
||||
/*
|
||||
* The preference is to use SPEC CTRL MSR instead of the
|
||||
* VIRT_SPEC MSR.
|
||||
*/
|
||||
if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
|
||||
!boot_cpu_has(X86_FEATURE_AMD_SSBD))
|
||||
entry->ebx |= F(VIRT_SSBD);
|
||||
break;
|
||||
}
|
||||
|
@ -4734,9 +4734,9 @@ static bool need_remote_flush(u64 old, u64 new)
|
||||
}
|
||||
|
||||
static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
|
||||
const u8 *new, int *bytes)
|
||||
int *bytes)
|
||||
{
|
||||
u64 gentry;
|
||||
u64 gentry = 0;
|
||||
int r;
|
||||
|
||||
/*
|
||||
@ -4748,22 +4748,12 @@ static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
|
||||
/* Handle a 32-bit guest writing two halves of a 64-bit gpte */
|
||||
*gpa &= ~(gpa_t)7;
|
||||
*bytes = 8;
|
||||
r = kvm_vcpu_read_guest(vcpu, *gpa, &gentry, 8);
|
||||
if (r)
|
||||
gentry = 0;
|
||||
new = (const u8 *)&gentry;
|
||||
}
|
||||
|
||||
switch (*bytes) {
|
||||
case 4:
|
||||
gentry = *(const u32 *)new;
|
||||
break;
|
||||
case 8:
|
||||
gentry = *(const u64 *)new;
|
||||
break;
|
||||
default:
|
||||
gentry = 0;
|
||||
break;
|
||||
if (*bytes == 4 || *bytes == 8) {
|
||||
r = kvm_vcpu_read_guest_atomic(vcpu, *gpa, &gentry, *bytes);
|
||||
if (r)
|
||||
gentry = 0;
|
||||
}
|
||||
|
||||
return gentry;
|
||||
@ -4876,8 +4866,6 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
|
||||
|
||||
pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
|
||||
|
||||
gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, new, &bytes);
|
||||
|
||||
/*
|
||||
* No need to care whether allocation memory is successful
|
||||
* or not since pte prefetch is skiped if it does not have
|
||||
@ -4886,6 +4874,9 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
|
||||
mmu_topup_memory_caches(vcpu);
|
||||
|
||||
spin_lock(&vcpu->kvm->mmu_lock);
|
||||
|
||||
gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, &bytes);
|
||||
|
||||
++vcpu->kvm->stat.mmu_pte_write;
|
||||
kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
|
||||
|
||||
|
@ -1399,20 +1399,23 @@ static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu,
|
||||
static int avic_init_access_page(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&kvm->slots_lock);
|
||||
if (kvm->arch.apic_access_page_done)
|
||||
return 0;
|
||||
goto out;
|
||||
|
||||
ret = x86_set_memory_region(kvm,
|
||||
APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
|
||||
APIC_DEFAULT_PHYS_BASE,
|
||||
PAGE_SIZE);
|
||||
ret = __x86_set_memory_region(kvm,
|
||||
APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
|
||||
APIC_DEFAULT_PHYS_BASE,
|
||||
PAGE_SIZE);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto out;
|
||||
|
||||
kvm->arch.apic_access_page_done = true;
|
||||
return 0;
|
||||
out:
|
||||
mutex_unlock(&kvm->slots_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int avic_init_backing_page(struct kvm_vcpu *vcpu)
|
||||
@ -1733,21 +1736,31 @@ out:
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static void svm_clear_current_vmcb(struct vmcb *vmcb)
|
||||
{
|
||||
int i;
|
||||
|
||||
for_each_online_cpu(i)
|
||||
cmpxchg(&per_cpu(svm_data, i)->current_vmcb, vmcb, NULL);
|
||||
}
|
||||
|
||||
static void svm_free_vcpu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
|
||||
/*
|
||||
* The vmcb page can be recycled, causing a false negative in
|
||||
* svm_vcpu_load(). So, ensure that no logical CPU has this
|
||||
* vmcb page recorded as its current vmcb.
|
||||
*/
|
||||
svm_clear_current_vmcb(svm->vmcb);
|
||||
|
||||
__free_page(pfn_to_page(__sme_clr(svm->vmcb_pa) >> PAGE_SHIFT));
|
||||
__free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
|
||||
__free_page(virt_to_page(svm->nested.hsave));
|
||||
__free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
|
||||
kvm_vcpu_uninit(vcpu);
|
||||
kmem_cache_free(kvm_vcpu_cache, svm);
|
||||
/*
|
||||
* The vmcb page can be recycled, causing a false negative in
|
||||
* svm_vcpu_load(). So do a full IBPB now.
|
||||
*/
|
||||
indirect_branch_prediction_barrier();
|
||||
}
|
||||
|
||||
static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
@ -3644,7 +3657,8 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||
break;
|
||||
case MSR_IA32_SPEC_CTRL:
|
||||
if (!msr_info->host_initiated &&
|
||||
!guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS))
|
||||
!guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) &&
|
||||
!guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD))
|
||||
return 1;
|
||||
|
||||
msr_info->data = svm->spec_ctrl;
|
||||
@ -3749,11 +3763,12 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
|
||||
break;
|
||||
case MSR_IA32_SPEC_CTRL:
|
||||
if (!msr->host_initiated &&
|
||||
!guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS))
|
||||
!guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) &&
|
||||
!guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD))
|
||||
return 1;
|
||||
|
||||
/* The STIBP bit doesn't fault even if it's not advertised */
|
||||
if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP))
|
||||
if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD))
|
||||
return 1;
|
||||
|
||||
svm->spec_ctrl = data;
|
||||
|
@ -6378,6 +6378,7 @@ static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr,
|
||||
clock_pairing.nsec = ts.tv_nsec;
|
||||
clock_pairing.tsc = kvm_read_l1_tsc(vcpu, cycle);
|
||||
clock_pairing.flags = 0;
|
||||
memset(&clock_pairing.pad, 0, sizeof(clock_pairing.pad));
|
||||
|
||||
ret = 0;
|
||||
if (kvm_write_guest(vcpu->kvm, paddr, &clock_pairing,
|
||||
@ -6884,7 +6885,8 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
|
||||
else {
|
||||
if (kvm_x86_ops->sync_pir_to_irr && vcpu->arch.apicv_active)
|
||||
kvm_x86_ops->sync_pir_to_irr(vcpu);
|
||||
kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
|
||||
if (ioapic_in_kernel(vcpu->kvm))
|
||||
kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
|
||||
}
|
||||
bitmap_or((ulong *)eoi_exit_bitmap, vcpu->arch.ioapic_handled_vectors,
|
||||
vcpu_to_synic(vcpu)->vec_bitmap, 256);
|
||||
|
@ -29,6 +29,12 @@
|
||||
* Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
|
||||
*/
|
||||
|
||||
/*
|
||||
* Use bit 0 to mangle the TIF_SPEC_IB state into the mm pointer which is
|
||||
* stored in cpu_tlb_state.last_user_mm_ibpb.
|
||||
*/
|
||||
#define LAST_USER_MM_IBPB 0x1UL
|
||||
|
||||
/*
|
||||
* We get here when we do something requiring a TLB invalidation
|
||||
* but could not go invalidate all of the contexts. We do the
|
||||
@ -180,6 +186,89 @@ static void sync_current_stack_to_mm(struct mm_struct *mm)
|
||||
}
|
||||
}
|
||||
|
||||
static inline unsigned long mm_mangle_tif_spec_ib(struct task_struct *next)
|
||||
{
|
||||
unsigned long next_tif = task_thread_info(next)->flags;
|
||||
unsigned long ibpb = (next_tif >> TIF_SPEC_IB) & LAST_USER_MM_IBPB;
|
||||
|
||||
return (unsigned long)next->mm | ibpb;
|
||||
}
|
||||
|
||||
static void cond_ibpb(struct task_struct *next)
|
||||
{
|
||||
if (!next || !next->mm)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Both, the conditional and the always IBPB mode use the mm
|
||||
* pointer to avoid the IBPB when switching between tasks of the
|
||||
* same process. Using the mm pointer instead of mm->context.ctx_id
|
||||
* opens a hypothetical hole vs. mm_struct reuse, which is more or
|
||||
* less impossible to control by an attacker. Aside of that it
|
||||
* would only affect the first schedule so the theoretically
|
||||
* exposed data is not really interesting.
|
||||
*/
|
||||
if (static_branch_likely(&switch_mm_cond_ibpb)) {
|
||||
unsigned long prev_mm, next_mm;
|
||||
|
||||
/*
|
||||
* This is a bit more complex than the always mode because
|
||||
* it has to handle two cases:
|
||||
*
|
||||
* 1) Switch from a user space task (potential attacker)
|
||||
* which has TIF_SPEC_IB set to a user space task
|
||||
* (potential victim) which has TIF_SPEC_IB not set.
|
||||
*
|
||||
* 2) Switch from a user space task (potential attacker)
|
||||
* which has TIF_SPEC_IB not set to a user space task
|
||||
* (potential victim) which has TIF_SPEC_IB set.
|
||||
*
|
||||
* This could be done by unconditionally issuing IBPB when
|
||||
* a task which has TIF_SPEC_IB set is either scheduled in
|
||||
* or out. Though that results in two flushes when:
|
||||
*
|
||||
* - the same user space task is scheduled out and later
|
||||
* scheduled in again and only a kernel thread ran in
|
||||
* between.
|
||||
*
|
||||
* - a user space task belonging to the same process is
|
||||
* scheduled in after a kernel thread ran in between
|
||||
*
|
||||
* - a user space task belonging to the same process is
|
||||
* scheduled in immediately.
|
||||
*
|
||||
* Optimize this with reasonably small overhead for the
|
||||
* above cases. Mangle the TIF_SPEC_IB bit into the mm
|
||||
* pointer of the incoming task which is stored in
|
||||
* cpu_tlbstate.last_user_mm_ibpb for comparison.
|
||||
*/
|
||||
next_mm = mm_mangle_tif_spec_ib(next);
|
||||
prev_mm = this_cpu_read(cpu_tlbstate.last_user_mm_ibpb);
|
||||
|
||||
/*
|
||||
* Issue IBPB only if the mm's are different and one or
|
||||
* both have the IBPB bit set.
|
||||
*/
|
||||
if (next_mm != prev_mm &&
|
||||
(next_mm | prev_mm) & LAST_USER_MM_IBPB)
|
||||
indirect_branch_prediction_barrier();
|
||||
|
||||
this_cpu_write(cpu_tlbstate.last_user_mm_ibpb, next_mm);
|
||||
}
|
||||
|
||||
if (static_branch_unlikely(&switch_mm_always_ibpb)) {
|
||||
/*
|
||||
* Only flush when switching to a user space task with a
|
||||
* different context than the user space task which ran
|
||||
* last on this CPU.
|
||||
*/
|
||||
if (this_cpu_read(cpu_tlbstate.last_user_mm) != next->mm) {
|
||||
indirect_branch_prediction_barrier();
|
||||
this_cpu_write(cpu_tlbstate.last_user_mm, next->mm);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
|
||||
struct task_struct *tsk)
|
||||
{
|
||||
@ -248,27 +337,13 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
|
||||
} else {
|
||||
u16 new_asid;
|
||||
bool need_flush;
|
||||
u64 last_ctx_id = this_cpu_read(cpu_tlbstate.last_ctx_id);
|
||||
|
||||
/*
|
||||
* Avoid user/user BTB poisoning by flushing the branch
|
||||
* predictor when switching between processes. This stops
|
||||
* one process from doing Spectre-v2 attacks on another.
|
||||
*
|
||||
* As an optimization, flush indirect branches only when
|
||||
* switching into processes that disable dumping. This
|
||||
* protects high value processes like gpg, without having
|
||||
* too high performance overhead. IBPB is *expensive*!
|
||||
*
|
||||
* This will not flush branches when switching into kernel
|
||||
* threads. It will also not flush if we switch to idle
|
||||
* thread and back to the same process. It will flush if we
|
||||
* switch to a different non-dumpable process.
|
||||
*/
|
||||
if (tsk && tsk->mm &&
|
||||
tsk->mm->context.ctx_id != last_ctx_id &&
|
||||
get_dumpable(tsk->mm) != SUID_DUMP_USER)
|
||||
indirect_branch_prediction_barrier();
|
||||
cond_ibpb(tsk);
|
||||
|
||||
if (IS_ENABLED(CONFIG_VMAP_STACK)) {
|
||||
/*
|
||||
@ -318,14 +393,6 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
|
||||
trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Record last user mm's context id, so we can avoid
|
||||
* flushing branch buffer with IBPB if we switch back
|
||||
* to the same user.
|
||||
*/
|
||||
if (next != &init_mm)
|
||||
this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id);
|
||||
|
||||
/* Make sure we write CR3 before loaded_mm. */
|
||||
barrier();
|
||||
|
||||
@ -406,7 +473,7 @@ void initialize_tlbstate_and_flush(void)
|
||||
write_cr3(build_cr3(mm->pgd, 0));
|
||||
|
||||
/* Reinitialize tlbstate. */
|
||||
this_cpu_write(cpu_tlbstate.last_ctx_id, mm->context.ctx_id);
|
||||
this_cpu_write(cpu_tlbstate.last_user_mm_ibpb, LAST_USER_MM_IBPB);
|
||||
this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0);
|
||||
this_cpu_write(cpu_tlbstate.next_asid, 1);
|
||||
this_cpu_write(cpu_tlbstate.ctxs[0].ctx_id, mm->context.ctx_id);
|
||||
|
@ -91,14 +91,14 @@ int main(void)
|
||||
DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp));
|
||||
DEFINE(THREAD_CPENABLE, offsetof (struct thread_info, cpenable));
|
||||
#if XTENSA_HAVE_COPROCESSORS
|
||||
DEFINE(THREAD_XTREGS_CP0, offsetof (struct thread_info, xtregs_cp));
|
||||
DEFINE(THREAD_XTREGS_CP1, offsetof (struct thread_info, xtregs_cp));
|
||||
DEFINE(THREAD_XTREGS_CP2, offsetof (struct thread_info, xtregs_cp));
|
||||
DEFINE(THREAD_XTREGS_CP3, offsetof (struct thread_info, xtregs_cp));
|
||||
DEFINE(THREAD_XTREGS_CP4, offsetof (struct thread_info, xtregs_cp));
|
||||
DEFINE(THREAD_XTREGS_CP5, offsetof (struct thread_info, xtregs_cp));
|
||||
DEFINE(THREAD_XTREGS_CP6, offsetof (struct thread_info, xtregs_cp));
|
||||
DEFINE(THREAD_XTREGS_CP7, offsetof (struct thread_info, xtregs_cp));
|
||||
DEFINE(THREAD_XTREGS_CP0, offsetof(struct thread_info, xtregs_cp.cp0));
|
||||
DEFINE(THREAD_XTREGS_CP1, offsetof(struct thread_info, xtregs_cp.cp1));
|
||||
DEFINE(THREAD_XTREGS_CP2, offsetof(struct thread_info, xtregs_cp.cp2));
|
||||
DEFINE(THREAD_XTREGS_CP3, offsetof(struct thread_info, xtregs_cp.cp3));
|
||||
DEFINE(THREAD_XTREGS_CP4, offsetof(struct thread_info, xtregs_cp.cp4));
|
||||
DEFINE(THREAD_XTREGS_CP5, offsetof(struct thread_info, xtregs_cp.cp5));
|
||||
DEFINE(THREAD_XTREGS_CP6, offsetof(struct thread_info, xtregs_cp.cp6));
|
||||
DEFINE(THREAD_XTREGS_CP7, offsetof(struct thread_info, xtregs_cp.cp7));
|
||||
#endif
|
||||
DEFINE(THREAD_XTREGS_USER, offsetof (struct thread_info, xtregs_user));
|
||||
DEFINE(XTREGS_USER_SIZE, sizeof(xtregs_user_t));
|
||||
|
@ -88,18 +88,21 @@ void coprocessor_release_all(struct thread_info *ti)
|
||||
|
||||
void coprocessor_flush_all(struct thread_info *ti)
|
||||
{
|
||||
unsigned long cpenable;
|
||||
unsigned long cpenable, old_cpenable;
|
||||
int i;
|
||||
|
||||
preempt_disable();
|
||||
|
||||
RSR_CPENABLE(old_cpenable);
|
||||
cpenable = ti->cpenable;
|
||||
WSR_CPENABLE(cpenable);
|
||||
|
||||
for (i = 0; i < XCHAL_CP_MAX; i++) {
|
||||
if ((cpenable & 1) != 0 && coprocessor_owner[i] == ti)
|
||||
coprocessor_flush(ti, i);
|
||||
cpenable >>= 1;
|
||||
}
|
||||
WSR_CPENABLE(old_cpenable);
|
||||
|
||||
preempt_enable();
|
||||
}
|
||||
|
@ -127,12 +127,37 @@ static int ptrace_setregs(struct task_struct *child, void __user *uregs)
|
||||
}
|
||||
|
||||
|
||||
#if XTENSA_HAVE_COPROCESSORS
|
||||
#define CP_OFFSETS(cp) \
|
||||
{ \
|
||||
.elf_xtregs_offset = offsetof(elf_xtregs_t, cp), \
|
||||
.ti_offset = offsetof(struct thread_info, xtregs_cp.cp), \
|
||||
.sz = sizeof(xtregs_ ## cp ## _t), \
|
||||
}
|
||||
|
||||
static const struct {
|
||||
size_t elf_xtregs_offset;
|
||||
size_t ti_offset;
|
||||
size_t sz;
|
||||
} cp_offsets[] = {
|
||||
CP_OFFSETS(cp0),
|
||||
CP_OFFSETS(cp1),
|
||||
CP_OFFSETS(cp2),
|
||||
CP_OFFSETS(cp3),
|
||||
CP_OFFSETS(cp4),
|
||||
CP_OFFSETS(cp5),
|
||||
CP_OFFSETS(cp6),
|
||||
CP_OFFSETS(cp7),
|
||||
};
|
||||
#endif
|
||||
|
||||
static int ptrace_getxregs(struct task_struct *child, void __user *uregs)
|
||||
{
|
||||
struct pt_regs *regs = task_pt_regs(child);
|
||||
struct thread_info *ti = task_thread_info(child);
|
||||
elf_xtregs_t __user *xtregs = uregs;
|
||||
int ret = 0;
|
||||
int i __maybe_unused;
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, uregs, sizeof(elf_xtregs_t)))
|
||||
return -EIO;
|
||||
@ -140,8 +165,13 @@ static int ptrace_getxregs(struct task_struct *child, void __user *uregs)
|
||||
#if XTENSA_HAVE_COPROCESSORS
|
||||
/* Flush all coprocessor registers to memory. */
|
||||
coprocessor_flush_all(ti);
|
||||
ret |= __copy_to_user(&xtregs->cp0, &ti->xtregs_cp,
|
||||
sizeof(xtregs_coprocessor_t));
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(cp_offsets); ++i)
|
||||
ret |= __copy_to_user((char __user *)xtregs +
|
||||
cp_offsets[i].elf_xtregs_offset,
|
||||
(const char *)ti +
|
||||
cp_offsets[i].ti_offset,
|
||||
cp_offsets[i].sz);
|
||||
#endif
|
||||
ret |= __copy_to_user(&xtregs->opt, ®s->xtregs_opt,
|
||||
sizeof(xtregs->opt));
|
||||
@ -157,6 +187,7 @@ static int ptrace_setxregs(struct task_struct *child, void __user *uregs)
|
||||
struct pt_regs *regs = task_pt_regs(child);
|
||||
elf_xtregs_t *xtregs = uregs;
|
||||
int ret = 0;
|
||||
int i __maybe_unused;
|
||||
|
||||
if (!access_ok(VERIFY_READ, uregs, sizeof(elf_xtregs_t)))
|
||||
return -EFAULT;
|
||||
@ -166,8 +197,11 @@ static int ptrace_setxregs(struct task_struct *child, void __user *uregs)
|
||||
coprocessor_flush_all(ti);
|
||||
coprocessor_release_all(ti);
|
||||
|
||||
ret |= __copy_from_user(&ti->xtregs_cp, &xtregs->cp0,
|
||||
sizeof(xtregs_coprocessor_t));
|
||||
for (i = 0; i < ARRAY_SIZE(cp_offsets); ++i)
|
||||
ret |= __copy_from_user((char *)ti + cp_offsets[i].ti_offset,
|
||||
(const char __user *)xtregs +
|
||||
cp_offsets[i].elf_xtregs_offset,
|
||||
cp_offsets[i].sz);
|
||||
#endif
|
||||
ret |= __copy_from_user(®s->xtregs_opt, &xtregs->opt,
|
||||
sizeof(xtregs->opt));
|
||||
|
@ -3136,7 +3136,6 @@ static void binder_transaction(struct binder_proc *proc,
|
||||
t->buffer = NULL;
|
||||
goto err_binder_alloc_buf_failed;
|
||||
}
|
||||
t->buffer->allow_user_free = 0;
|
||||
t->buffer->debug_id = t->debug_id;
|
||||
t->buffer->transaction = t;
|
||||
t->buffer->target_node = target_node;
|
||||
@ -3632,14 +3631,18 @@ static int binder_thread_write(struct binder_proc *proc,
|
||||
|
||||
buffer = binder_alloc_prepare_to_free(&proc->alloc,
|
||||
data_ptr);
|
||||
if (buffer == NULL) {
|
||||
binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
|
||||
proc->pid, thread->pid, (u64)data_ptr);
|
||||
break;
|
||||
}
|
||||
if (!buffer->allow_user_free) {
|
||||
binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
|
||||
proc->pid, thread->pid, (u64)data_ptr);
|
||||
if (IS_ERR_OR_NULL(buffer)) {
|
||||
if (PTR_ERR(buffer) == -EPERM) {
|
||||
binder_user_error(
|
||||
"%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
|
||||
proc->pid, thread->pid,
|
||||
(u64)data_ptr);
|
||||
} else {
|
||||
binder_user_error(
|
||||
"%d:%d BC_FREE_BUFFER u%016llx no match\n",
|
||||
proc->pid, thread->pid,
|
||||
(u64)data_ptr);
|
||||
}
|
||||
break;
|
||||
}
|
||||
binder_debug(BINDER_DEBUG_FREE_BUFFER,
|
||||
|
@ -149,14 +149,12 @@ static struct binder_buffer *binder_alloc_prepare_to_free_locked(
|
||||
else {
|
||||
/*
|
||||
* Guard against user threads attempting to
|
||||
* free the buffer twice
|
||||
* free the buffer when in use by kernel or
|
||||
* after it's already been freed.
|
||||
*/
|
||||
if (buffer->free_in_progress) {
|
||||
pr_err("%d:%d FREE_BUFFER u%016llx user freed buffer twice\n",
|
||||
alloc->pid, current->pid, (u64)user_ptr);
|
||||
return NULL;
|
||||
}
|
||||
buffer->free_in_progress = 1;
|
||||
if (!buffer->allow_user_free)
|
||||
return ERR_PTR(-EPERM);
|
||||
buffer->allow_user_free = 0;
|
||||
return buffer;
|
||||
}
|
||||
}
|
||||
@ -490,7 +488,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
|
||||
|
||||
rb_erase(best_fit, &alloc->free_buffers);
|
||||
buffer->free = 0;
|
||||
buffer->free_in_progress = 0;
|
||||
buffer->allow_user_free = 0;
|
||||
binder_insert_allocated_buffer_locked(alloc, buffer);
|
||||
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
|
||||
"%d: binder_alloc_buf size %zd got %pK\n",
|
||||
|
@ -50,8 +50,7 @@ struct binder_buffer {
|
||||
unsigned free:1;
|
||||
unsigned allow_user_free:1;
|
||||
unsigned async_transaction:1;
|
||||
unsigned free_in_progress:1;
|
||||
unsigned debug_id:28;
|
||||
unsigned debug_id:29;
|
||||
|
||||
struct binder_transaction *transaction;
|
||||
|
||||
|
@ -1641,6 +1641,12 @@ static void atc_free_chan_resources(struct dma_chan *chan)
|
||||
atchan->descs_allocated = 0;
|
||||
atchan->status = 0;
|
||||
|
||||
/*
|
||||
* Free atslave allocated in at_dma_xlate()
|
||||
*/
|
||||
kfree(chan->private);
|
||||
chan->private = NULL;
|
||||
|
||||
dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
|
||||
}
|
||||
|
||||
@ -1675,7 +1681,7 @@ static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
|
||||
dma_cap_zero(mask);
|
||||
dma_cap_set(DMA_SLAVE, mask);
|
||||
|
||||
atslave = devm_kzalloc(&dmac_pdev->dev, sizeof(*atslave), GFP_KERNEL);
|
||||
atslave = kzalloc(sizeof(*atslave), GFP_KERNEL);
|
||||
if (!atslave)
|
||||
return NULL;
|
||||
|
||||
@ -2000,6 +2006,8 @@ static int at_dma_remove(struct platform_device *pdev)
|
||||
struct resource *io;
|
||||
|
||||
at_dma_off(atdma);
|
||||
if (pdev->dev.of_node)
|
||||
of_dma_controller_free(pdev->dev.of_node);
|
||||
dma_async_device_unregister(&atdma->dma_common);
|
||||
|
||||
dma_pool_destroy(atdma->memset_pool);
|
||||
|
@ -583,7 +583,8 @@ void ast_driver_unload(struct drm_device *dev)
|
||||
drm_mode_config_cleanup(dev);
|
||||
|
||||
ast_mm_fini(ast);
|
||||
pci_iounmap(dev->pdev, ast->ioregs);
|
||||
if (ast->ioregs != ast->regs + AST_IO_MM_OFFSET)
|
||||
pci_iounmap(dev->pdev, ast->ioregs);
|
||||
pci_iounmap(dev->pdev, ast->regs);
|
||||
kfree(ast);
|
||||
}
|
||||
|
@ -133,6 +133,7 @@ static int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv)
|
||||
|
||||
lockdep_assert_held_once(&dev->master_mutex);
|
||||
|
||||
WARN_ON(fpriv->is_master);
|
||||
old_master = fpriv->master;
|
||||
fpriv->master = drm_master_create(dev);
|
||||
if (!fpriv->master) {
|
||||
@ -161,6 +162,7 @@ out_err:
|
||||
/* drop references and restore old master on failure */
|
||||
drm_master_put(&fpriv->master);
|
||||
fpriv->master = old_master;
|
||||
fpriv->is_master = 0;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -99,7 +99,7 @@ void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe)
|
||||
/* Wait for for the pipe enable to take effect. */
|
||||
for (count = 0; count < COUNT_MAX; count++) {
|
||||
temp = REG_READ(map->conf);
|
||||
if ((temp & PIPEACONF_PIPE_STATE) == 1)
|
||||
if (temp & PIPEACONF_PIPE_STATE)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -697,6 +697,7 @@ static const struct regmap_config meson_dw_hdmi_regmap_config = {
|
||||
.reg_read = meson_dw_hdmi_reg_read,
|
||||
.reg_write = meson_dw_hdmi_reg_write,
|
||||
.max_register = 0x10000,
|
||||
.fast_io = true,
|
||||
};
|
||||
|
||||
static bool meson_hdmi_connector_is_available(struct device *dev)
|
||||
|
@ -184,18 +184,18 @@ void meson_viu_set_osd_lut(struct meson_drm *priv, enum viu_lut_sel_e lut_sel,
|
||||
if (lut_sel == VIU_LUT_OSD_OETF) {
|
||||
writel(0, priv->io_base + _REG(addr_port));
|
||||
|
||||
for (i = 0; i < 20; i++)
|
||||
for (i = 0; i < (OSD_OETF_LUT_SIZE / 2); i++)
|
||||
writel(r_map[i * 2] | (r_map[i * 2 + 1] << 16),
|
||||
priv->io_base + _REG(data_port));
|
||||
|
||||
writel(r_map[OSD_OETF_LUT_SIZE - 1] | (g_map[0] << 16),
|
||||
priv->io_base + _REG(data_port));
|
||||
|
||||
for (i = 0; i < 20; i++)
|
||||
for (i = 0; i < (OSD_OETF_LUT_SIZE / 2); i++)
|
||||
writel(g_map[i * 2 + 1] | (g_map[i * 2 + 2] << 16),
|
||||
priv->io_base + _REG(data_port));
|
||||
|
||||
for (i = 0; i < 20; i++)
|
||||
for (i = 0; i < (OSD_OETF_LUT_SIZE / 2); i++)
|
||||
writel(b_map[i * 2] | (b_map[i * 2 + 1] << 16),
|
||||
priv->io_base + _REG(data_port));
|
||||
|
||||
@ -211,18 +211,18 @@ void meson_viu_set_osd_lut(struct meson_drm *priv, enum viu_lut_sel_e lut_sel,
|
||||
} else if (lut_sel == VIU_LUT_OSD_EOTF) {
|
||||
writel(0, priv->io_base + _REG(addr_port));
|
||||
|
||||
for (i = 0; i < 20; i++)
|
||||
for (i = 0; i < (OSD_EOTF_LUT_SIZE / 2); i++)
|
||||
writel(r_map[i * 2] | (r_map[i * 2 + 1] << 16),
|
||||
priv->io_base + _REG(data_port));
|
||||
|
||||
writel(r_map[OSD_EOTF_LUT_SIZE - 1] | (g_map[0] << 16),
|
||||
priv->io_base + _REG(data_port));
|
||||
|
||||
for (i = 0; i < 20; i++)
|
||||
for (i = 0; i < (OSD_EOTF_LUT_SIZE / 2); i++)
|
||||
writel(g_map[i * 2 + 1] | (g_map[i * 2 + 2] << 16),
|
||||
priv->io_base + _REG(data_port));
|
||||
|
||||
for (i = 0; i < 20; i++)
|
||||
for (i = 0; i < (OSD_EOTF_LUT_SIZE / 2); i++)
|
||||
writel(b_map[i * 2] | (b_map[i * 2 + 1] << 16),
|
||||
priv->io_base + _REG(data_port));
|
||||
|
||||
|
@ -223,8 +223,7 @@ static int adreno_get_legacy_pwrlevels(struct device *dev)
|
||||
struct device_node *child, *node;
|
||||
int ret;
|
||||
|
||||
node = of_find_compatible_node(dev->of_node, NULL,
|
||||
"qcom,gpu-pwrlevels");
|
||||
node = of_get_compatible_child(dev->of_node, "qcom,gpu-pwrlevels");
|
||||
if (!node) {
|
||||
dev_err(dev, "Could not find the GPU powerlevels\n");
|
||||
return -ENXIO;
|
||||
@ -245,6 +244,8 @@ static int adreno_get_legacy_pwrlevels(struct device *dev)
|
||||
dev_pm_opp_add(dev, val, 0);
|
||||
}
|
||||
|
||||
of_node_put(node);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -454,6 +454,14 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
|
||||
}
|
||||
wait_for_completion(&msginfo->waitevent);
|
||||
|
||||
if (msginfo->response.gpadl_created.creation_status != 0) {
|
||||
pr_err("Failed to establish GPADL: err = 0x%x\n",
|
||||
msginfo->response.gpadl_created.creation_status);
|
||||
|
||||
ret = -EDQUOT;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
if (channel->rescind) {
|
||||
ret = -ENODEV;
|
||||
goto cleanup;
|
||||
|
@ -30,11 +30,6 @@ int st_magn_trig_set_state(struct iio_trigger *trig, bool state)
|
||||
return st_sensors_set_dataready_irq(indio_dev, state);
|
||||
}
|
||||
|
||||
static int st_magn_buffer_preenable(struct iio_dev *indio_dev)
|
||||
{
|
||||
return st_sensors_set_enable(indio_dev, true);
|
||||
}
|
||||
|
||||
static int st_magn_buffer_postenable(struct iio_dev *indio_dev)
|
||||
{
|
||||
int err;
|
||||
@ -50,7 +45,7 @@ static int st_magn_buffer_postenable(struct iio_dev *indio_dev)
|
||||
if (err < 0)
|
||||
goto st_magn_buffer_postenable_error;
|
||||
|
||||
return err;
|
||||
return st_sensors_set_enable(indio_dev, true);
|
||||
|
||||
st_magn_buffer_postenable_error:
|
||||
kfree(mdata->buffer_data);
|
||||
@ -63,11 +58,11 @@ static int st_magn_buffer_predisable(struct iio_dev *indio_dev)
|
||||
int err;
|
||||
struct st_sensor_data *mdata = iio_priv(indio_dev);
|
||||
|
||||
err = iio_triggered_buffer_predisable(indio_dev);
|
||||
err = st_sensors_set_enable(indio_dev, false);
|
||||
if (err < 0)
|
||||
goto st_magn_buffer_predisable_error;
|
||||
|
||||
err = st_sensors_set_enable(indio_dev, false);
|
||||
err = iio_triggered_buffer_predisable(indio_dev);
|
||||
|
||||
st_magn_buffer_predisable_error:
|
||||
kfree(mdata->buffer_data);
|
||||
@ -75,7 +70,6 @@ st_magn_buffer_predisable_error:
|
||||
}
|
||||
|
||||
static const struct iio_buffer_setup_ops st_magn_buffer_setup_ops = {
|
||||
.preenable = &st_magn_buffer_preenable,
|
||||
.postenable = &st_magn_buffer_postenable,
|
||||
.predisable = &st_magn_buffer_predisable,
|
||||
};
|
||||
|
@ -869,31 +869,26 @@ enum mlx5_ib_width {
|
||||
MLX5_IB_WIDTH_12X = 1 << 4
|
||||
};
|
||||
|
||||
static int translate_active_width(struct ib_device *ibdev, u8 active_width,
|
||||
static void translate_active_width(struct ib_device *ibdev, u8 active_width,
|
||||
u8 *ib_width)
|
||||
{
|
||||
struct mlx5_ib_dev *dev = to_mdev(ibdev);
|
||||
int err = 0;
|
||||
|
||||
if (active_width & MLX5_IB_WIDTH_1X) {
|
||||
if (active_width & MLX5_IB_WIDTH_1X)
|
||||
*ib_width = IB_WIDTH_1X;
|
||||
} else if (active_width & MLX5_IB_WIDTH_2X) {
|
||||
mlx5_ib_dbg(dev, "active_width %d is not supported by IB spec\n",
|
||||
(int)active_width);
|
||||
err = -EINVAL;
|
||||
} else if (active_width & MLX5_IB_WIDTH_4X) {
|
||||
else if (active_width & MLX5_IB_WIDTH_4X)
|
||||
*ib_width = IB_WIDTH_4X;
|
||||
} else if (active_width & MLX5_IB_WIDTH_8X) {
|
||||
else if (active_width & MLX5_IB_WIDTH_8X)
|
||||
*ib_width = IB_WIDTH_8X;
|
||||
} else if (active_width & MLX5_IB_WIDTH_12X) {
|
||||
else if (active_width & MLX5_IB_WIDTH_12X)
|
||||
*ib_width = IB_WIDTH_12X;
|
||||
} else {
|
||||
mlx5_ib_dbg(dev, "Invalid active_width %d\n",
|
||||
else {
|
||||
mlx5_ib_dbg(dev, "Invalid active_width %d, setting width to default value: 4x\n",
|
||||
(int)active_width);
|
||||
err = -EINVAL;
|
||||
*ib_width = IB_WIDTH_4X;
|
||||
}
|
||||
|
||||
return err;
|
||||
return;
|
||||
}
|
||||
|
||||
static int mlx5_mtu_to_ib_mtu(int mtu)
|
||||
@ -1001,10 +996,8 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = translate_active_width(ibdev, ib_link_width_oper,
|
||||
&props->active_width);
|
||||
if (err)
|
||||
goto out;
|
||||
translate_active_width(ibdev, ib_link_width_oper, &props->active_width);
|
||||
|
||||
err = mlx5_query_port_ib_proto_oper(mdev, &props->active_speed, port);
|
||||
if (err)
|
||||
goto out;
|
||||
|
@ -1108,7 +1108,9 @@ u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
|
||||
IB_MR_CHECK_SIG_STATUS, &mr_status);
|
||||
if (ret) {
|
||||
pr_err("ib_check_mr_status failed, ret %d\n", ret);
|
||||
goto err;
|
||||
/* Not a lot we can do, return ambiguous guard error */
|
||||
*sector = 0;
|
||||
return 0x1;
|
||||
}
|
||||
|
||||
if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
|
||||
@ -1136,9 +1138,6 @@ u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
|
||||
}
|
||||
|
||||
return 0;
|
||||
err:
|
||||
/* Not alot we can do here, return ambiguous guard error */
|
||||
return 0x1;
|
||||
}
|
||||
|
||||
void iser_err_comp(struct ib_wc *wc, const char *type)
|
||||
|
@ -483,18 +483,18 @@ static const u8 xboxone_hori_init[] = {
|
||||
};
|
||||
|
||||
/*
|
||||
* This packet is required for some of the PDP pads to start
|
||||
* This packet is required for most (all?) of the PDP pads to start
|
||||
* sending input reports. These pads include: (0x0e6f:0x02ab),
|
||||
* (0x0e6f:0x02a4).
|
||||
* (0x0e6f:0x02a4), (0x0e6f:0x02a6).
|
||||
*/
|
||||
static const u8 xboxone_pdp_init1[] = {
|
||||
0x0a, 0x20, 0x00, 0x03, 0x00, 0x01, 0x14
|
||||
};
|
||||
|
||||
/*
|
||||
* This packet is required for some of the PDP pads to start
|
||||
* This packet is required for most (all?) of the PDP pads to start
|
||||
* sending input reports. These pads include: (0x0e6f:0x02ab),
|
||||
* (0x0e6f:0x02a4).
|
||||
* (0x0e6f:0x02a4), (0x0e6f:0x02a6).
|
||||
*/
|
||||
static const u8 xboxone_pdp_init2[] = {
|
||||
0x06, 0x20, 0x00, 0x02, 0x01, 0x00
|
||||
@ -530,12 +530,8 @@ static const struct xboxone_init_packet xboxone_init_packets[] = {
|
||||
XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_init),
|
||||
XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_init),
|
||||
XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_fw2015_init),
|
||||
XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init1),
|
||||
XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init2),
|
||||
XBOXONE_INIT_PKT(0x0e6f, 0x02a4, xboxone_pdp_init1),
|
||||
XBOXONE_INIT_PKT(0x0e6f, 0x02a4, xboxone_pdp_init2),
|
||||
XBOXONE_INIT_PKT(0x0e6f, 0x02a6, xboxone_pdp_init1),
|
||||
XBOXONE_INIT_PKT(0x0e6f, 0x02a6, xboxone_pdp_init2),
|
||||
XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_init1),
|
||||
XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_init2),
|
||||
XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init),
|
||||
XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init),
|
||||
XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init),
|
||||
|
@ -506,7 +506,8 @@ static int cros_ec_keyb_register_bs(struct cros_ec_keyb *ckdev)
|
||||
for (i = 0; i < ARRAY_SIZE(cros_ec_keyb_bs); i++) {
|
||||
const struct cros_ec_bs_map *map = &cros_ec_keyb_bs[i];
|
||||
|
||||
if (buttons & BIT(map->bit))
|
||||
if ((map->ev_type == EV_KEY && (buttons & BIT(map->bit))) ||
|
||||
(map->ev_type == EV_SW && (switches & BIT(map->bit))))
|
||||
input_set_capability(idev, map->ev_type, map->code);
|
||||
}
|
||||
|
||||
|
@ -407,7 +407,7 @@ matrix_keypad_parse_dt(struct device *dev)
|
||||
struct matrix_keypad_platform_data *pdata;
|
||||
struct device_node *np = dev->of_node;
|
||||
unsigned int *gpios;
|
||||
int i, nrow, ncol;
|
||||
int ret, i, nrow, ncol;
|
||||
|
||||
if (!np) {
|
||||
dev_err(dev, "device lacks DT data\n");
|
||||
@ -452,12 +452,19 @@ matrix_keypad_parse_dt(struct device *dev)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
for (i = 0; i < pdata->num_row_gpios; i++)
|
||||
gpios[i] = of_get_named_gpio(np, "row-gpios", i);
|
||||
for (i = 0; i < nrow; i++) {
|
||||
ret = of_get_named_gpio(np, "row-gpios", i);
|
||||
if (ret < 0)
|
||||
return ERR_PTR(ret);
|
||||
gpios[i] = ret;
|
||||
}
|
||||
|
||||
for (i = 0; i < pdata->num_col_gpios; i++)
|
||||
gpios[pdata->num_row_gpios + i] =
|
||||
of_get_named_gpio(np, "col-gpios", i);
|
||||
for (i = 0; i < ncol; i++) {
|
||||
ret = of_get_named_gpio(np, "col-gpios", i);
|
||||
if (ret < 0)
|
||||
return ERR_PTR(ret);
|
||||
gpios[nrow + i] = ret;
|
||||
}
|
||||
|
||||
pdata->row_gpios = gpios;
|
||||
pdata->col_gpios = &gpios[pdata->num_row_gpios];
|
||||
@ -484,10 +491,8 @@ static int matrix_keypad_probe(struct platform_device *pdev)
|
||||
pdata = dev_get_platdata(&pdev->dev);
|
||||
if (!pdata) {
|
||||
pdata = matrix_keypad_parse_dt(&pdev->dev);
|
||||
if (IS_ERR(pdata)) {
|
||||
dev_err(&pdev->dev, "no platform data defined\n");
|
||||
if (IS_ERR(pdata))
|
||||
return PTR_ERR(pdata);
|
||||
}
|
||||
} else if (!pdata->keymap_data) {
|
||||
dev_err(&pdev->dev, "no keymap data defined\n");
|
||||
return -EINVAL;
|
||||
|
@ -1264,6 +1264,9 @@ static const struct acpi_device_id elan_acpi_id[] = {
|
||||
{ "ELAN0618", 0 },
|
||||
{ "ELAN061C", 0 },
|
||||
{ "ELAN061D", 0 },
|
||||
{ "ELAN061E", 0 },
|
||||
{ "ELAN0620", 0 },
|
||||
{ "ELAN0621", 0 },
|
||||
{ "ELAN0622", 0 },
|
||||
{ "ELAN1000", 0 },
|
||||
{ }
|
||||
|
@ -170,6 +170,7 @@ static const char * const smbus_pnp_ids[] = {
|
||||
"LEN0048", /* X1 Carbon 3 */
|
||||
"LEN0046", /* X250 */
|
||||
"LEN004a", /* W541 */
|
||||
"LEN005b", /* P50 */
|
||||
"LEN0071", /* T480 */
|
||||
"LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */
|
||||
"LEN0073", /* X1 Carbon G5 (Elantech) */
|
||||
|
@ -2105,6 +2105,8 @@ static int em28xx_dvb_fini(struct em28xx *dev)
|
||||
}
|
||||
}
|
||||
|
||||
em28xx_unregister_dvb(dvb);
|
||||
|
||||
/* remove I2C SEC */
|
||||
client = dvb->i2c_client_sec;
|
||||
if (client) {
|
||||
@ -2126,7 +2128,6 @@ static int em28xx_dvb_fini(struct em28xx *dev)
|
||||
i2c_unregister_device(client);
|
||||
}
|
||||
|
||||
em28xx_unregister_dvb(dvb);
|
||||
kfree(dvb);
|
||||
dev->dvb = NULL;
|
||||
kref_put(&dev->ref, em28xx_free_device);
|
||||
|
@ -417,7 +417,7 @@ static int scif_create_remote_lookup(struct scif_dev *remote_dev,
|
||||
if (err)
|
||||
goto error_window;
|
||||
err = scif_map_page(&window->num_pages_lookup.lookup[j],
|
||||
vmalloc_dma_phys ?
|
||||
vmalloc_num_pages ?
|
||||
vmalloc_to_page(&window->num_pages[i]) :
|
||||
virt_to_page(&window->num_pages[i]),
|
||||
remote_dev);
|
||||
|
@ -578,6 +578,16 @@ static int init_volumes(struct ubi_device *ubi,
|
||||
vol->ubi = ubi;
|
||||
reserved_pebs += vol->reserved_pebs;
|
||||
|
||||
/*
|
||||
* We use ubi->peb_count and not vol->reserved_pebs because
|
||||
* we want to keep the code simple. Otherwise we'd have to
|
||||
* resize/check the bitmap upon volume resize too.
|
||||
* Allocating a few bytes more does not hurt.
|
||||
*/
|
||||
err = ubi_fastmap_init_checkmap(vol, ubi->peb_count);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/*
|
||||
* In case of dynamic volume UBI knows nothing about how many
|
||||
* data is stored there. So assume the whole volume is used.
|
||||
@ -620,16 +630,6 @@ static int init_volumes(struct ubi_device *ubi,
|
||||
(long long)(vol->used_ebs - 1) * vol->usable_leb_size;
|
||||
vol->used_bytes += av->last_data_size;
|
||||
vol->last_eb_bytes = av->last_data_size;
|
||||
|
||||
/*
|
||||
* We use ubi->peb_count and not vol->reserved_pebs because
|
||||
* we want to keep the code simple. Otherwise we'd have to
|
||||
* resize/check the bitmap upon volume resize too.
|
||||
* Allocating a few bytes more does not hurt.
|
||||
*/
|
||||
err = ubi_fastmap_init_checkmap(vol, ubi->peb_count);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
/* And add the layout volume */
|
||||
|
@ -1691,6 +1691,7 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog)
|
||||
bool if_up = netif_running(nic->netdev);
|
||||
struct bpf_prog *old_prog;
|
||||
bool bpf_attached = false;
|
||||
int ret = 0;
|
||||
|
||||
/* For now just support only the usual MTU sized frames */
|
||||
if (prog && (dev->mtu > 1500)) {
|
||||
@ -1724,8 +1725,12 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog)
|
||||
if (nic->xdp_prog) {
|
||||
/* Attach BPF program */
|
||||
nic->xdp_prog = bpf_prog_add(nic->xdp_prog, nic->rx_queues - 1);
|
||||
if (!IS_ERR(nic->xdp_prog))
|
||||
if (!IS_ERR(nic->xdp_prog)) {
|
||||
bpf_attached = true;
|
||||
} else {
|
||||
ret = PTR_ERR(nic->xdp_prog);
|
||||
nic->xdp_prog = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/* Calculate Tx queues needed for XDP and network stack */
|
||||
@ -1737,7 +1742,7 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog)
|
||||
netif_trans_update(nic->netdev);
|
||||
}
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int nicvf_xdp(struct net_device *netdev, struct netdev_xdp *xdp)
|
||||
|
@ -585,10 +585,12 @@ static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
|
||||
if (!sq->dmem.base)
|
||||
return;
|
||||
|
||||
if (sq->tso_hdrs)
|
||||
if (sq->tso_hdrs) {
|
||||
dma_free_coherent(&nic->pdev->dev,
|
||||
sq->dmem.q_len * TSO_HEADER_SIZE,
|
||||
sq->tso_hdrs, sq->tso_hdrs_phys);
|
||||
sq->tso_hdrs = NULL;
|
||||
}
|
||||
|
||||
/* Free pending skbs in the queue */
|
||||
smp_rmb();
|
||||
|
@ -3590,10 +3590,8 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
|
||||
total_blocks = big_ram->num_of_blocks[dev_data->chip_id];
|
||||
ram_size = total_blocks * BIG_RAM_BLOCK_SIZE_DWORDS;
|
||||
|
||||
strncpy(type_name, big_ram->instance_name,
|
||||
strlen(big_ram->instance_name));
|
||||
strncpy(mem_name, big_ram->instance_name,
|
||||
strlen(big_ram->instance_name));
|
||||
strscpy(type_name, big_ram->instance_name, sizeof(type_name));
|
||||
strscpy(mem_name, big_ram->instance_name, sizeof(mem_name));
|
||||
|
||||
/* Dump memory header */
|
||||
offset += qed_grc_dump_mem_hdr(p_hwfn,
|
||||
|
@ -216,9 +216,9 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
* it just report sending a packet to the target
|
||||
* (without actual packet transfer).
|
||||
*/
|
||||
dev_kfree_skb_any(skb);
|
||||
ndev->stats.tx_packets++;
|
||||
ndev->stats.tx_bytes += skb->len;
|
||||
dev_kfree_skb_any(skb);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -140,7 +140,6 @@ struct ipheth_device {
|
||||
struct usb_device *udev;
|
||||
struct usb_interface *intf;
|
||||
struct net_device *net;
|
||||
struct sk_buff *tx_skb;
|
||||
struct urb *tx_urb;
|
||||
struct urb *rx_urb;
|
||||
unsigned char *tx_buf;
|
||||
@ -229,6 +228,7 @@ static void ipheth_rcvbulk_callback(struct urb *urb)
|
||||
case -ENOENT:
|
||||
case -ECONNRESET:
|
||||
case -ESHUTDOWN:
|
||||
case -EPROTO:
|
||||
return;
|
||||
case 0:
|
||||
break;
|
||||
@ -280,7 +280,6 @@ static void ipheth_sndbulk_callback(struct urb *urb)
|
||||
dev_err(&dev->intf->dev, "%s: urb status: %d\n",
|
||||
__func__, status);
|
||||
|
||||
dev_kfree_skb_irq(dev->tx_skb);
|
||||
netif_wake_queue(dev->net);
|
||||
}
|
||||
|
||||
@ -410,7 +409,7 @@ static int ipheth_tx(struct sk_buff *skb, struct net_device *net)
|
||||
if (skb->len > IPHETH_BUF_SIZE) {
|
||||
WARN(1, "%s: skb too large: %d bytes\n", __func__, skb->len);
|
||||
dev->net->stats.tx_dropped++;
|
||||
dev_kfree_skb_irq(skb);
|
||||
dev_kfree_skb_any(skb);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
@ -430,12 +429,11 @@ static int ipheth_tx(struct sk_buff *skb, struct net_device *net)
|
||||
dev_err(&dev->intf->dev, "%s: usb_submit_urb: %d\n",
|
||||
__func__, retval);
|
||||
dev->net->stats.tx_errors++;
|
||||
dev_kfree_skb_irq(skb);
|
||||
dev_kfree_skb_any(skb);
|
||||
} else {
|
||||
dev->tx_skb = skb;
|
||||
|
||||
dev->net->stats.tx_packets++;
|
||||
dev->net->stats.tx_bytes += skb->len;
|
||||
dev_consume_skb_any(skb);
|
||||
netif_stop_queue(net);
|
||||
}
|
||||
|
||||
|
@ -61,7 +61,8 @@ static const unsigned long guest_offloads[] = {
|
||||
VIRTIO_NET_F_GUEST_TSO4,
|
||||
VIRTIO_NET_F_GUEST_TSO6,
|
||||
VIRTIO_NET_F_GUEST_ECN,
|
||||
VIRTIO_NET_F_GUEST_UFO
|
||||
VIRTIO_NET_F_GUEST_UFO,
|
||||
VIRTIO_NET_F_GUEST_CSUM
|
||||
};
|
||||
|
||||
struct virtnet_stats {
|
||||
@ -1939,9 +1940,6 @@ static int virtnet_clear_guest_offloads(struct virtnet_info *vi)
|
||||
if (!vi->guest_offloads)
|
||||
return 0;
|
||||
|
||||
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))
|
||||
offloads = 1ULL << VIRTIO_NET_F_GUEST_CSUM;
|
||||
|
||||
return virtnet_set_guest_offloads(vi, offloads);
|
||||
}
|
||||
|
||||
@ -1951,8 +1949,6 @@ static int virtnet_restore_guest_offloads(struct virtnet_info *vi)
|
||||
|
||||
if (!vi->guest_offloads)
|
||||
return 0;
|
||||
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))
|
||||
offloads |= 1ULL << VIRTIO_NET_F_GUEST_CSUM;
|
||||
|
||||
return virtnet_set_guest_offloads(vi, offloads);
|
||||
}
|
||||
@ -1970,8 +1966,9 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
|
||||
&& (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
|
||||
virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
|
||||
virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
|
||||
virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO))) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing LRO, disable LRO first");
|
||||
virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
|
||||
virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing LRO/CSUM, disable LRO/CSUM first");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
|
@ -35,7 +35,6 @@
|
||||
#include "wl12xx_80211.h"
|
||||
#include "cmd.h"
|
||||
#include "event.h"
|
||||
#include "ps.h"
|
||||
#include "tx.h"
|
||||
#include "hw_ops.h"
|
||||
|
||||
@ -192,10 +191,6 @@ int wlcore_cmd_wait_for_event_or_timeout(struct wl1271 *wl,
|
||||
|
||||
timeout_time = jiffies + msecs_to_jiffies(WL1271_EVENT_TIMEOUT);
|
||||
|
||||
ret = wl1271_ps_elp_wakeup(wl);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
do {
|
||||
if (time_after(jiffies, timeout_time)) {
|
||||
wl1271_debug(DEBUG_CMD, "timeout waiting for event %d",
|
||||
@ -227,7 +222,6 @@ int wlcore_cmd_wait_for_event_or_timeout(struct wl1271 *wl,
|
||||
} while (!event);
|
||||
|
||||
out:
|
||||
wl1271_ps_elp_sleep(wl);
|
||||
kfree(events_vector);
|
||||
return ret;
|
||||
}
|
||||
|
@ -89,7 +89,7 @@ static void ls_pcie_disable_outbound_atus(struct ls_pcie *pcie)
|
||||
int i;
|
||||
|
||||
for (i = 0; i < PCIE_IATU_NUM; i++)
|
||||
dw_pcie_disable_atu(pcie->pci, DW_PCIE_REGION_OUTBOUND, i);
|
||||
dw_pcie_disable_atu(pcie->pci, i, DW_PCIE_REGION_OUTBOUND);
|
||||
}
|
||||
|
||||
static int ls1021_pcie_link_up(struct dw_pcie *pci)
|
||||
|
@ -566,17 +566,18 @@ EXPORT_SYMBOL_GPL(__devm_reset_control_get);
|
||||
* device_reset - find reset controller associated with the device
|
||||
* and perform reset
|
||||
* @dev: device to be reset by the controller
|
||||
* @optional: whether it is optional to reset the device
|
||||
*
|
||||
* Convenience wrapper for reset_control_get() and reset_control_reset().
|
||||
* Convenience wrapper for __reset_control_get() and reset_control_reset().
|
||||
* This is useful for the common case of devices with single, dedicated reset
|
||||
* lines.
|
||||
*/
|
||||
int device_reset(struct device *dev)
|
||||
int __device_reset(struct device *dev, bool optional)
|
||||
{
|
||||
struct reset_control *rstc;
|
||||
int ret;
|
||||
|
||||
rstc = reset_control_get(dev, NULL);
|
||||
rstc = __reset_control_get(dev, NULL, 0, 0, optional);
|
||||
if (IS_ERR(rstc))
|
||||
return PTR_ERR(rstc);
|
||||
|
||||
@ -586,7 +587,7 @@ int device_reset(struct device *dev)
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(device_reset);
|
||||
EXPORT_SYMBOL_GPL(__device_reset);
|
||||
|
||||
/**
|
||||
* APIs to manage an array of reset controls.
|
||||
|
@ -4545,8 +4545,8 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
|
||||
{
|
||||
struct qeth_ipa_cmd *cmd;
|
||||
struct qeth_arp_query_info *qinfo;
|
||||
struct qeth_snmp_cmd *snmp;
|
||||
unsigned char *data;
|
||||
void *snmp_data;
|
||||
__u16 data_len;
|
||||
|
||||
QETH_CARD_TEXT(card, 3, "snpcmdcb");
|
||||
@ -4554,7 +4554,6 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
|
||||
cmd = (struct qeth_ipa_cmd *) sdata;
|
||||
data = (unsigned char *)((char *)cmd - reply->offset);
|
||||
qinfo = (struct qeth_arp_query_info *) reply->param;
|
||||
snmp = &cmd->data.setadapterparms.data.snmp;
|
||||
|
||||
if (cmd->hdr.return_code) {
|
||||
QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code);
|
||||
@ -4567,10 +4566,15 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
|
||||
return 0;
|
||||
}
|
||||
data_len = *((__u16 *)QETH_IPA_PDU_LEN_PDU1(data));
|
||||
if (cmd->data.setadapterparms.hdr.seq_no == 1)
|
||||
data_len -= (__u16)((char *)&snmp->data - (char *)cmd);
|
||||
else
|
||||
data_len -= (__u16)((char *)&snmp->request - (char *)cmd);
|
||||
if (cmd->data.setadapterparms.hdr.seq_no == 1) {
|
||||
snmp_data = &cmd->data.setadapterparms.data.snmp;
|
||||
data_len -= offsetof(struct qeth_ipa_cmd,
|
||||
data.setadapterparms.data.snmp);
|
||||
} else {
|
||||
snmp_data = &cmd->data.setadapterparms.data.snmp.request;
|
||||
data_len -= offsetof(struct qeth_ipa_cmd,
|
||||
data.setadapterparms.data.snmp.request);
|
||||
}
|
||||
|
||||
/* check if there is enough room in userspace */
|
||||
if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
|
||||
@ -4583,16 +4587,9 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
|
||||
QETH_CARD_TEXT_(card, 4, "sseqn%i",
|
||||
cmd->data.setadapterparms.hdr.seq_no);
|
||||
/*copy entries to user buffer*/
|
||||
if (cmd->data.setadapterparms.hdr.seq_no == 1) {
|
||||
memcpy(qinfo->udata + qinfo->udata_offset,
|
||||
(char *)snmp,
|
||||
data_len + offsetof(struct qeth_snmp_cmd, data));
|
||||
qinfo->udata_offset += offsetof(struct qeth_snmp_cmd, data);
|
||||
} else {
|
||||
memcpy(qinfo->udata + qinfo->udata_offset,
|
||||
(char *)&snmp->request, data_len);
|
||||
}
|
||||
memcpy(qinfo->udata + qinfo->udata_offset, snmp_data, data_len);
|
||||
qinfo->udata_offset += data_len;
|
||||
|
||||
/* check if all replies received ... */
|
||||
QETH_CARD_TEXT_(card, 4, "srtot%i",
|
||||
cmd->data.setadapterparms.hdr.used_total);
|
||||
|
@ -1250,8 +1250,8 @@ fc_rspnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
|
||||
memset(rspnid, 0, sizeof(struct fcgs_rspnid_req_s));
|
||||
|
||||
rspnid->dap = s_id;
|
||||
rspnid->spn_len = (u8) strlen((char *)name);
|
||||
strncpy((char *)rspnid->spn, (char *)name, rspnid->spn_len);
|
||||
strlcpy(rspnid->spn, name, sizeof(rspnid->spn));
|
||||
rspnid->spn_len = (u8) strlen(rspnid->spn);
|
||||
|
||||
return sizeof(struct fcgs_rspnid_req_s) + sizeof(struct ct_hdr_s);
|
||||
}
|
||||
@ -1271,8 +1271,8 @@ fc_rsnn_nn_build(struct fchs_s *fchs, void *pyld, u32 s_id,
|
||||
memset(rsnn_nn, 0, sizeof(struct fcgs_rsnn_nn_req_s));
|
||||
|
||||
rsnn_nn->node_name = node_name;
|
||||
rsnn_nn->snn_len = (u8) strlen((char *)name);
|
||||
strncpy((char *)rsnn_nn->snn, (char *)name, rsnn_nn->snn_len);
|
||||
strlcpy(rsnn_nn->snn, name, sizeof(rsnn_nn->snn));
|
||||
rsnn_nn->snn_len = (u8) strlen(rsnn_nn->snn);
|
||||
|
||||
return sizeof(struct fcgs_rsnn_nn_req_s) + sizeof(struct ct_hdr_s);
|
||||
}
|
||||
|
@ -769,23 +769,23 @@ bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric)
|
||||
bfa_ioc_get_adapter_model(&fabric->fcs->bfa->ioc, model);
|
||||
|
||||
/* Model name/number */
|
||||
strncpy((char *)&port_cfg->sym_name, model,
|
||||
BFA_FCS_PORT_SYMBNAME_MODEL_SZ);
|
||||
strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
|
||||
sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
|
||||
strlcpy(port_cfg->sym_name.symname, model,
|
||||
BFA_SYMNAME_MAXLEN);
|
||||
strlcat(port_cfg->sym_name.symname, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
|
||||
BFA_SYMNAME_MAXLEN);
|
||||
|
||||
/* Driver Version */
|
||||
strncat((char *)&port_cfg->sym_name, (char *)driver_info->version,
|
||||
BFA_FCS_PORT_SYMBNAME_VERSION_SZ);
|
||||
strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
|
||||
sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
|
||||
strlcat(port_cfg->sym_name.symname, driver_info->version,
|
||||
BFA_SYMNAME_MAXLEN);
|
||||
strlcat(port_cfg->sym_name.symname, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
|
||||
BFA_SYMNAME_MAXLEN);
|
||||
|
||||
/* Host machine name */
|
||||
strncat((char *)&port_cfg->sym_name,
|
||||
(char *)driver_info->host_machine_name,
|
||||
BFA_FCS_PORT_SYMBNAME_MACHINENAME_SZ);
|
||||
strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
|
||||
sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
|
||||
strlcat(port_cfg->sym_name.symname,
|
||||
driver_info->host_machine_name,
|
||||
BFA_SYMNAME_MAXLEN);
|
||||
strlcat(port_cfg->sym_name.symname, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
|
||||
BFA_SYMNAME_MAXLEN);
|
||||
|
||||
/*
|
||||
* Host OS Info :
|
||||
@ -793,24 +793,24 @@ bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric)
|
||||
* OS name string and instead copy the entire OS info string (64 bytes).
|
||||
*/
|
||||
if (driver_info->host_os_patch[0] == '\0') {
|
||||
strncat((char *)&port_cfg->sym_name,
|
||||
(char *)driver_info->host_os_name,
|
||||
BFA_FCS_OS_STR_LEN);
|
||||
strncat((char *)&port_cfg->sym_name,
|
||||
strlcat(port_cfg->sym_name.symname,
|
||||
driver_info->host_os_name,
|
||||
BFA_SYMNAME_MAXLEN);
|
||||
strlcat(port_cfg->sym_name.symname,
|
||||
BFA_FCS_PORT_SYMBNAME_SEPARATOR,
|
||||
sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
|
||||
BFA_SYMNAME_MAXLEN);
|
||||
} else {
|
||||
strncat((char *)&port_cfg->sym_name,
|
||||
(char *)driver_info->host_os_name,
|
||||
BFA_FCS_PORT_SYMBNAME_OSINFO_SZ);
|
||||
strncat((char *)&port_cfg->sym_name,
|
||||
strlcat(port_cfg->sym_name.symname,
|
||||
driver_info->host_os_name,
|
||||
BFA_SYMNAME_MAXLEN);
|
||||
strlcat(port_cfg->sym_name.symname,
|
||||
BFA_FCS_PORT_SYMBNAME_SEPARATOR,
|
||||
sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
|
||||
BFA_SYMNAME_MAXLEN);
|
||||
|
||||
/* Append host OS Patch Info */
|
||||
strncat((char *)&port_cfg->sym_name,
|
||||
(char *)driver_info->host_os_patch,
|
||||
BFA_FCS_PORT_SYMBNAME_OSPATCH_SZ);
|
||||
strlcat(port_cfg->sym_name.symname,
|
||||
driver_info->host_os_patch,
|
||||
BFA_SYMNAME_MAXLEN);
|
||||
}
|
||||
|
||||
/* null terminate */
|
||||
@ -830,26 +830,26 @@ bfa_fcs_fabric_nsymb_init(struct bfa_fcs_fabric_s *fabric)
|
||||
bfa_ioc_get_adapter_model(&fabric->fcs->bfa->ioc, model);
|
||||
|
||||
/* Model name/number */
|
||||
strncpy((char *)&port_cfg->node_sym_name, model,
|
||||
BFA_FCS_PORT_SYMBNAME_MODEL_SZ);
|
||||
strncat((char *)&port_cfg->node_sym_name,
|
||||
strlcpy(port_cfg->node_sym_name.symname, model,
|
||||
BFA_SYMNAME_MAXLEN);
|
||||
strlcat(port_cfg->node_sym_name.symname,
|
||||
BFA_FCS_PORT_SYMBNAME_SEPARATOR,
|
||||
sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
|
||||
BFA_SYMNAME_MAXLEN);
|
||||
|
||||
/* Driver Version */
|
||||
strncat((char *)&port_cfg->node_sym_name, (char *)driver_info->version,
|
||||
BFA_FCS_PORT_SYMBNAME_VERSION_SZ);
|
||||
strncat((char *)&port_cfg->node_sym_name,
|
||||
strlcat(port_cfg->node_sym_name.symname, (char *)driver_info->version,
|
||||
BFA_SYMNAME_MAXLEN);
|
||||
strlcat(port_cfg->node_sym_name.symname,
|
||||
BFA_FCS_PORT_SYMBNAME_SEPARATOR,
|
||||
sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
|
||||
BFA_SYMNAME_MAXLEN);
|
||||
|
||||
/* Host machine name */
|
||||
strncat((char *)&port_cfg->node_sym_name,
|
||||
(char *)driver_info->host_machine_name,
|
||||
BFA_FCS_PORT_SYMBNAME_MACHINENAME_SZ);
|
||||
strncat((char *)&port_cfg->node_sym_name,
|
||||
strlcat(port_cfg->node_sym_name.symname,
|
||||
driver_info->host_machine_name,
|
||||
BFA_SYMNAME_MAXLEN);
|
||||
strlcat(port_cfg->node_sym_name.symname,
|
||||
BFA_FCS_PORT_SYMBNAME_SEPARATOR,
|
||||
sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
|
||||
BFA_SYMNAME_MAXLEN);
|
||||
|
||||
/* null terminate */
|
||||
port_cfg->node_sym_name.symname[BFA_SYMNAME_MAXLEN - 1] = 0;
|
||||
|
@ -2642,10 +2642,10 @@ bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
|
||||
bfa_ioc_get_adapter_fw_ver(&port->fcs->bfa->ioc,
|
||||
hba_attr->fw_version);
|
||||
|
||||
strncpy(hba_attr->driver_version, (char *)driver_info->version,
|
||||
strlcpy(hba_attr->driver_version, (char *)driver_info->version,
|
||||
sizeof(hba_attr->driver_version));
|
||||
|
||||
strncpy(hba_attr->os_name, driver_info->host_os_name,
|
||||
strlcpy(hba_attr->os_name, driver_info->host_os_name,
|
||||
sizeof(hba_attr->os_name));
|
||||
|
||||
/*
|
||||
@ -2653,23 +2653,23 @@ bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
|
||||
* to the os name along with a separator
|
||||
*/
|
||||
if (driver_info->host_os_patch[0] != '\0') {
|
||||
strncat(hba_attr->os_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
|
||||
sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
|
||||
strncat(hba_attr->os_name, driver_info->host_os_patch,
|
||||
sizeof(driver_info->host_os_patch));
|
||||
strlcat(hba_attr->os_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
|
||||
sizeof(hba_attr->os_name));
|
||||
strlcat(hba_attr->os_name, driver_info->host_os_patch,
|
||||
sizeof(hba_attr->os_name));
|
||||
}
|
||||
|
||||
/* Retrieve the max frame size from the port attr */
|
||||
bfa_fcs_fdmi_get_portattr(fdmi, &fcs_port_attr);
|
||||
hba_attr->max_ct_pyld = fcs_port_attr.max_frm_size;
|
||||
|
||||
strncpy(hba_attr->node_sym_name.symname,
|
||||
strlcpy(hba_attr->node_sym_name.symname,
|
||||
port->port_cfg.node_sym_name.symname, BFA_SYMNAME_MAXLEN);
|
||||
strcpy(hba_attr->vendor_info, "QLogic");
|
||||
hba_attr->num_ports =
|
||||
cpu_to_be32(bfa_ioc_get_nports(&port->fcs->bfa->ioc));
|
||||
hba_attr->fabric_name = port->fabric->lps->pr_nwwn;
|
||||
strncpy(hba_attr->bios_ver, hba_attr->option_rom_ver, BFA_VERSION_LEN);
|
||||
strlcpy(hba_attr->bios_ver, hba_attr->option_rom_ver, BFA_VERSION_LEN);
|
||||
|
||||
}
|
||||
|
||||
@ -2736,20 +2736,20 @@ bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi,
|
||||
/*
|
||||
* OS device Name
|
||||
*/
|
||||
strncpy(port_attr->os_device_name, (char *)driver_info->os_device_name,
|
||||
strlcpy(port_attr->os_device_name, driver_info->os_device_name,
|
||||
sizeof(port_attr->os_device_name));
|
||||
|
||||
/*
|
||||
* Host name
|
||||
*/
|
||||
strncpy(port_attr->host_name, (char *)driver_info->host_machine_name,
|
||||
strlcpy(port_attr->host_name, driver_info->host_machine_name,
|
||||
sizeof(port_attr->host_name));
|
||||
|
||||
port_attr->node_name = bfa_fcs_lport_get_nwwn(port);
|
||||
port_attr->port_name = bfa_fcs_lport_get_pwwn(port);
|
||||
|
||||
strncpy(port_attr->port_sym_name.symname,
|
||||
(char *)&bfa_fcs_lport_get_psym_name(port), BFA_SYMNAME_MAXLEN);
|
||||
strlcpy(port_attr->port_sym_name.symname,
|
||||
bfa_fcs_lport_get_psym_name(port).symname, BFA_SYMNAME_MAXLEN);
|
||||
bfa_fcs_lport_get_attr(port, &lport_attr);
|
||||
port_attr->port_type = cpu_to_be32(lport_attr.port_type);
|
||||
port_attr->scos = pport_attr.cos_supported;
|
||||
@ -3229,7 +3229,7 @@ bfa_fcs_lport_ms_gmal_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
|
||||
rsp_str[gmal_entry->len-1] = 0;
|
||||
|
||||
/* copy IP Address to fabric */
|
||||
strncpy(bfa_fcs_lport_get_fabric_ipaddr(port),
|
||||
strlcpy(bfa_fcs_lport_get_fabric_ipaddr(port),
|
||||
gmal_entry->ip_addr,
|
||||
BFA_FCS_FABRIC_IPADDR_SZ);
|
||||
break;
|
||||
@ -4667,21 +4667,13 @@ bfa_fcs_lport_ns_send_rspn_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
|
||||
* to that of the base port.
|
||||
*/
|
||||
|
||||
strncpy((char *)psymbl,
|
||||
(char *) &
|
||||
(bfa_fcs_lport_get_psym_name
|
||||
strlcpy(symbl,
|
||||
(char *)&(bfa_fcs_lport_get_psym_name
|
||||
(bfa_fcs_get_base_port(port->fcs))),
|
||||
strlen((char *) &
|
||||
bfa_fcs_lport_get_psym_name(bfa_fcs_get_base_port
|
||||
(port->fcs))));
|
||||
sizeof(symbl));
|
||||
|
||||
/* Ensure we have a null terminating string. */
|
||||
((char *)psymbl)[strlen((char *) &
|
||||
bfa_fcs_lport_get_psym_name(bfa_fcs_get_base_port
|
||||
(port->fcs)))] = 0;
|
||||
strncat((char *)psymbl,
|
||||
(char *) &(bfa_fcs_lport_get_psym_name(port)),
|
||||
strlen((char *) &bfa_fcs_lport_get_psym_name(port)));
|
||||
strlcat(symbl, (char *)&(bfa_fcs_lport_get_psym_name(port)),
|
||||
sizeof(symbl));
|
||||
} else {
|
||||
psymbl = (u8 *) &(bfa_fcs_lport_get_psym_name(port));
|
||||
}
|
||||
@ -5173,7 +5165,6 @@ bfa_fcs_lport_ns_util_send_rspn_id(void *cbarg, struct bfa_fcxp_s *fcxp_alloced)
|
||||
struct fchs_s fchs;
|
||||
struct bfa_fcxp_s *fcxp;
|
||||
u8 symbl[256];
|
||||
u8 *psymbl = &symbl[0];
|
||||
int len;
|
||||
|
||||
/* Avoid sending RSPN in the following states. */
|
||||
@ -5203,22 +5194,17 @@ bfa_fcs_lport_ns_util_send_rspn_id(void *cbarg, struct bfa_fcxp_s *fcxp_alloced)
|
||||
* For Vports, we append the vport's port symbolic name
|
||||
* to that of the base port.
|
||||
*/
|
||||
strncpy((char *)psymbl, (char *)&(bfa_fcs_lport_get_psym_name
|
||||
strlcpy(symbl, (char *)&(bfa_fcs_lport_get_psym_name
|
||||
(bfa_fcs_get_base_port(port->fcs))),
|
||||
strlen((char *)&bfa_fcs_lport_get_psym_name(
|
||||
bfa_fcs_get_base_port(port->fcs))));
|
||||
sizeof(symbl));
|
||||
|
||||
/* Ensure we have a null terminating string. */
|
||||
((char *)psymbl)[strlen((char *)&bfa_fcs_lport_get_psym_name(
|
||||
bfa_fcs_get_base_port(port->fcs)))] = 0;
|
||||
|
||||
strncat((char *)psymbl,
|
||||
strlcat(symbl,
|
||||
(char *)&(bfa_fcs_lport_get_psym_name(port)),
|
||||
strlen((char *)&bfa_fcs_lport_get_psym_name(port)));
|
||||
sizeof(symbl));
|
||||
}
|
||||
|
||||
len = fc_rspnid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
|
||||
bfa_fcs_lport_get_fcid(port), 0, psymbl);
|
||||
bfa_fcs_lport_get_fcid(port), 0, symbl);
|
||||
|
||||
bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
|
||||
FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0);
|
||||
|
@ -2803,7 +2803,7 @@ void
|
||||
bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
|
||||
{
|
||||
memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
|
||||
strncpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
|
||||
strlcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -350,8 +350,8 @@ bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
|
||||
lp.eid = event;
|
||||
lp.log_type = BFA_PL_LOG_TYPE_STRING;
|
||||
lp.misc = misc;
|
||||
strncpy(lp.log_entry.string_log, log_str,
|
||||
BFA_PL_STRING_LOG_SZ - 1);
|
||||
strlcpy(lp.log_entry.string_log, log_str,
|
||||
BFA_PL_STRING_LOG_SZ);
|
||||
lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0';
|
||||
bfa_plog_add(plog, &lp);
|
||||
}
|
||||
|
@ -983,20 +983,20 @@ bfad_start_ops(struct bfad_s *bfad) {
|
||||
|
||||
/* Fill the driver_info info to fcs*/
|
||||
memset(&driver_info, 0, sizeof(driver_info));
|
||||
strncpy(driver_info.version, BFAD_DRIVER_VERSION,
|
||||
sizeof(driver_info.version) - 1);
|
||||
strlcpy(driver_info.version, BFAD_DRIVER_VERSION,
|
||||
sizeof(driver_info.version));
|
||||
if (host_name)
|
||||
strncpy(driver_info.host_machine_name, host_name,
|
||||
sizeof(driver_info.host_machine_name) - 1);
|
||||
strlcpy(driver_info.host_machine_name, host_name,
|
||||
sizeof(driver_info.host_machine_name));
|
||||
if (os_name)
|
||||
strncpy(driver_info.host_os_name, os_name,
|
||||
sizeof(driver_info.host_os_name) - 1);
|
||||
strlcpy(driver_info.host_os_name, os_name,
|
||||
sizeof(driver_info.host_os_name));
|
||||
if (os_patch)
|
||||
strncpy(driver_info.host_os_patch, os_patch,
|
||||
sizeof(driver_info.host_os_patch) - 1);
|
||||
strlcpy(driver_info.host_os_patch, os_patch,
|
||||
sizeof(driver_info.host_os_patch));
|
||||
|
||||
strncpy(driver_info.os_device_name, bfad->pci_name,
|
||||
sizeof(driver_info.os_device_name) - 1);
|
||||
strlcpy(driver_info.os_device_name, bfad->pci_name,
|
||||
sizeof(driver_info.os_device_name));
|
||||
|
||||
/* FCS driver info init */
|
||||
spin_lock_irqsave(&bfad->bfad_lock, flags);
|
||||
|
@ -843,7 +843,7 @@ bfad_im_symbolic_name_show(struct device *dev, struct device_attribute *attr,
|
||||
char symname[BFA_SYMNAME_MAXLEN];
|
||||
|
||||
bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
|
||||
strncpy(symname, port_attr.port_cfg.sym_name.symname,
|
||||
strlcpy(symname, port_attr.port_cfg.sym_name.symname,
|
||||
BFA_SYMNAME_MAXLEN);
|
||||
return snprintf(buf, PAGE_SIZE, "%s\n", symname);
|
||||
}
|
||||
|
@ -127,7 +127,7 @@ bfad_iocmd_ioc_get_attr(struct bfad_s *bfad, void *cmd)
|
||||
|
||||
/* fill in driver attr info */
|
||||
strcpy(iocmd->ioc_attr.driver_attr.driver, BFAD_DRIVER_NAME);
|
||||
strncpy(iocmd->ioc_attr.driver_attr.driver_ver,
|
||||
strlcpy(iocmd->ioc_attr.driver_attr.driver_ver,
|
||||
BFAD_DRIVER_VERSION, BFA_VERSION_LEN);
|
||||
strcpy(iocmd->ioc_attr.driver_attr.fw_ver,
|
||||
iocmd->ioc_attr.adapter_attr.fw_ver);
|
||||
@ -315,9 +315,9 @@ bfad_iocmd_port_get_attr(struct bfad_s *bfad, void *cmd)
|
||||
iocmd->attr.port_type = port_attr.port_type;
|
||||
iocmd->attr.loopback = port_attr.loopback;
|
||||
iocmd->attr.authfail = port_attr.authfail;
|
||||
strncpy(iocmd->attr.port_symname.symname,
|
||||
strlcpy(iocmd->attr.port_symname.symname,
|
||||
port_attr.port_cfg.sym_name.symname,
|
||||
sizeof(port_attr.port_cfg.sym_name.symname));
|
||||
sizeof(iocmd->attr.port_symname.symname));
|
||||
|
||||
iocmd->status = BFA_STATUS_OK;
|
||||
return 0;
|
||||
|
@ -34,7 +34,6 @@ struct scsi_dev_info_list_table {
|
||||
};
|
||||
|
||||
|
||||
static const char spaces[] = " "; /* 16 of them */
|
||||
static unsigned scsi_default_dev_flags;
|
||||
static LIST_HEAD(scsi_dev_info_list);
|
||||
static char scsi_dev_flags[256];
|
||||
@ -296,20 +295,13 @@ static void scsi_strcpy_devinfo(char *name, char *to, size_t to_length,
|
||||
size_t from_length;
|
||||
|
||||
from_length = strlen(from);
|
||||
strncpy(to, from, min(to_length, from_length));
|
||||
if (from_length < to_length) {
|
||||
if (compatible) {
|
||||
/*
|
||||
* NUL terminate the string if it is short.
|
||||
*/
|
||||
to[from_length] = '\0';
|
||||
} else {
|
||||
/*
|
||||
* space pad the string if it is short.
|
||||
*/
|
||||
strncpy(&to[from_length], spaces,
|
||||
to_length - from_length);
|
||||
}
|
||||
/* this zero-pads the destination */
|
||||
strncpy(to, from, to_length);
|
||||
if (from_length < to_length && !compatible) {
|
||||
/*
|
||||
* space pad the string if it is short.
|
||||
*/
|
||||
memset(&to[from_length], ' ', to_length - from_length);
|
||||
}
|
||||
if (from_length > to_length)
|
||||
printk(KERN_WARNING "%s: %s string '%s' is too long\n",
|
||||
|
@ -1293,7 +1293,7 @@ static int cfg80211_rtw_get_station(struct wiphy *wiphy,
|
||||
|
||||
sinfo->filled |= BIT(NL80211_STA_INFO_TX_PACKETS);
|
||||
sinfo->tx_packets = psta->sta_stats.tx_pkts;
|
||||
|
||||
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED);
|
||||
}
|
||||
|
||||
/* for Ad-Hoc/AP mode */
|
||||
|
@ -4125,12 +4125,6 @@ RTY_SEND_CMD:
|
||||
rtsx_trace(chip);
|
||||
return STATUS_FAIL;
|
||||
}
|
||||
|
||||
} else if (rsp_type == SD_RSP_TYPE_R0) {
|
||||
if ((ptr[3] & 0x1E) != 0x03) {
|
||||
rtsx_trace(chip);
|
||||
return STATUS_FAIL;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1461,6 +1461,7 @@ vchiq_compat_ioctl_await_completion(struct file *file,
|
||||
struct vchiq_await_completion32 args32;
|
||||
struct vchiq_completion_data32 completion32;
|
||||
unsigned int *msgbufcount32;
|
||||
unsigned int msgbufcount_native;
|
||||
compat_uptr_t msgbuf32;
|
||||
void *msgbuf;
|
||||
void **msgbufptr;
|
||||
@ -1572,7 +1573,11 @@ vchiq_compat_ioctl_await_completion(struct file *file,
|
||||
sizeof(completion32)))
|
||||
return -EFAULT;
|
||||
|
||||
args32.msgbufcount--;
|
||||
if (get_user(msgbufcount_native, &args->msgbufcount))
|
||||
return -EFAULT;
|
||||
|
||||
if (!msgbufcount_native)
|
||||
args32.msgbufcount--;
|
||||
|
||||
msgbufcount32 =
|
||||
&((struct vchiq_await_completion32 __user *)arg)->msgbufcount;
|
||||
|
@ -461,10 +461,10 @@ static irqreturn_t hisi_thermal_alarm_irq_thread(int irq, void *dev)
|
||||
dev_crit(&data->pdev->dev, "THERMAL ALARM: %d > %d\n",
|
||||
temp, sensor->thres_temp);
|
||||
|
||||
thermal_zone_device_update(data->sensor.tzd,
|
||||
thermal_zone_device_update(data->sensors.tzd,
|
||||
THERMAL_EVENT_UNSPECIFIED);
|
||||
|
||||
} else {
|
||||
} else if (temp < sensor->thres_temp) {
|
||||
dev_crit(&data->pdev->dev, "THERMAL ALARM stopped: %d < %d\n",
|
||||
temp, sensor->thres_temp);
|
||||
}
|
||||
@ -524,6 +524,40 @@ static void hisi_thermal_toggle_sensor(struct hisi_thermal_sensor *sensor,
|
||||
on ? THERMAL_DEVICE_ENABLED : THERMAL_DEVICE_DISABLED);
|
||||
}
|
||||
|
||||
static int hisi_thermal_setup(struct hisi_thermal_data *data)
|
||||
{
|
||||
struct hisi_thermal_sensor *sensor;
|
||||
|
||||
sensor = &data->sensors;
|
||||
|
||||
/* disable module firstly */
|
||||
hisi_thermal_reset_enable(data->regs, 0);
|
||||
hisi_thermal_enable(data->regs, 0);
|
||||
|
||||
/* select sensor id */
|
||||
hisi_thermal_sensor_select(data->regs, sensor->id);
|
||||
|
||||
/* setting the hdak time */
|
||||
hisi_thermal_hdak_set(data->regs, 0);
|
||||
|
||||
/* setting lag value between current temp and the threshold */
|
||||
hisi_thermal_set_lag(data->regs, HISI_TEMP_LAG);
|
||||
|
||||
/* enable for interrupt */
|
||||
hisi_thermal_alarm_set(data->regs, sensor->thres_temp);
|
||||
|
||||
hisi_thermal_reset_set(data->regs, HISI_TEMP_RESET);
|
||||
|
||||
/* enable module */
|
||||
hisi_thermal_reset_enable(data->regs, 1);
|
||||
hisi_thermal_enable(data->regs, 1);
|
||||
|
||||
hisi_thermal_alarm_clear(data->regs, 0);
|
||||
hisi_thermal_alarm_enable(data->regs, 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hisi_thermal_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct hisi_thermal_data *data;
|
||||
|
@ -131,24 +131,6 @@ static void kgdboc_unregister_kbd(void)
|
||||
#define kgdboc_restore_input()
|
||||
#endif /* ! CONFIG_KDB_KEYBOARD */
|
||||
|
||||
static int kgdboc_option_setup(char *opt)
|
||||
{
|
||||
if (!opt) {
|
||||
pr_err("kgdboc: config string not provided\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (strlen(opt) >= MAX_CONFIG_LEN) {
|
||||
printk(KERN_ERR "kgdboc: config string too long\n");
|
||||
return -ENOSPC;
|
||||
}
|
||||
strcpy(config, opt);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
__setup("kgdboc=", kgdboc_option_setup);
|
||||
|
||||
static void cleanup_kgdboc(void)
|
||||
{
|
||||
if (kgdb_unregister_nmi_console())
|
||||
@ -162,15 +144,13 @@ static int configure_kgdboc(void)
|
||||
{
|
||||
struct tty_driver *p;
|
||||
int tty_line = 0;
|
||||
int err;
|
||||
int err = -ENODEV;
|
||||
char *cptr = config;
|
||||
struct console *cons;
|
||||
|
||||
err = kgdboc_option_setup(config);
|
||||
if (err || !strlen(config) || isspace(config[0]))
|
||||
if (!strlen(config) || isspace(config[0]))
|
||||
goto noconfig;
|
||||
|
||||
err = -ENODEV;
|
||||
kgdboc_io_ops.is_console = 0;
|
||||
kgdb_tty_driver = NULL;
|
||||
|
||||
@ -319,6 +299,25 @@ static struct kgdb_io kgdboc_io_ops = {
|
||||
};
|
||||
|
||||
#ifdef CONFIG_KGDB_SERIAL_CONSOLE
|
||||
static int kgdboc_option_setup(char *opt)
|
||||
{
|
||||
if (!opt) {
|
||||
pr_err("config string not provided\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (strlen(opt) >= MAX_CONFIG_LEN) {
|
||||
pr_err("config string too long\n");
|
||||
return -ENOSPC;
|
||||
}
|
||||
strcpy(config, opt);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
__setup("kgdboc=", kgdboc_option_setup);
|
||||
|
||||
|
||||
/* This is only available if kgdboc is a built in for early debugging */
|
||||
static int __init kgdboc_early_init(char *opt)
|
||||
{
|
||||
|
@ -64,6 +64,9 @@ static const struct usb_device_id usb_quirk_list[] = {
|
||||
/* Microsoft LifeCam-VX700 v2.0 */
|
||||
{ USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
|
||||
|
||||
/* Cherry Stream G230 2.0 (G85-231) and 3.0 (G85-232) */
|
||||
{ USB_DEVICE(0x046a, 0x0023), .driver_info = USB_QUIRK_RESET_RESUME },
|
||||
|
||||
/* Logitech HD Pro Webcams C920, C920-C, C925e and C930e */
|
||||
{ USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
|
||||
{ USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT },
|
||||
|
@ -39,4 +39,14 @@ UNUSUAL_DEV(0x0bda, 0x0159, 0x0000, 0x9999,
|
||||
"USB Card Reader",
|
||||
USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0),
|
||||
|
||||
UNUSUAL_DEV(0x0bda, 0x0177, 0x0000, 0x9999,
|
||||
"Realtek",
|
||||
"USB Card Reader",
|
||||
USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0),
|
||||
|
||||
UNUSUAL_DEV(0x0bda, 0x0184, 0x0000, 0x9999,
|
||||
"Realtek",
|
||||
"USB Card Reader",
|
||||
USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0),
|
||||
|
||||
#endif /* defined(CONFIG_USB_STORAGE_REALTEK) || ... */
|
||||
|
@ -10,7 +10,7 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
|
||||
export.o tree-log.o free-space-cache.o zlib.o lzo.o zstd.o \
|
||||
compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o \
|
||||
reada.o backref.o ulist.o qgroup.o send.o dev-replace.o raid56.o \
|
||||
uuid-tree.o props.o hash.o free-space-tree.o
|
||||
uuid-tree.o props.o hash.o free-space-tree.o tree-checker.o
|
||||
|
||||
btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o
|
||||
btrfs-$(CONFIG_BTRFS_FS_CHECK_INTEGRITY) += check-integrity.o
|
||||
|
@ -50,6 +50,7 @@
|
||||
#include "sysfs.h"
|
||||
#include "qgroup.h"
|
||||
#include "compression.h"
|
||||
#include "tree-checker.h"
|
||||
|
||||
#ifdef CONFIG_X86
|
||||
#include <asm/cpufeature.h>
|
||||
@ -450,9 +451,9 @@ static int btree_read_extent_buffer_pages(struct btrfs_fs_info *fs_info,
|
||||
int mirror_num = 0;
|
||||
int failed_mirror = 0;
|
||||
|
||||
clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
|
||||
io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
|
||||
while (1) {
|
||||
clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
|
||||
ret = read_extent_buffer_pages(io_tree, eb, WAIT_COMPLETE,
|
||||
btree_get_extent, mirror_num);
|
||||
if (!ret) {
|
||||
@ -463,14 +464,6 @@ static int btree_read_extent_buffer_pages(struct btrfs_fs_info *fs_info,
|
||||
ret = -EIO;
|
||||
}
|
||||
|
||||
/*
|
||||
* This buffer's crc is fine, but its contents are corrupted, so
|
||||
* there is no reason to read the other copies, they won't be
|
||||
* any less wrong.
|
||||
*/
|
||||
if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags))
|
||||
break;
|
||||
|
||||
num_copies = btrfs_num_copies(fs_info,
|
||||
eb->start, eb->len);
|
||||
if (num_copies == 1)
|
||||
@ -544,146 +537,6 @@ static int check_tree_block_fsid(struct btrfs_fs_info *fs_info,
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define CORRUPT(reason, eb, root, slot) \
|
||||
btrfs_crit(root->fs_info, \
|
||||
"corrupt %s, %s: block=%llu, root=%llu, slot=%d", \
|
||||
btrfs_header_level(eb) == 0 ? "leaf" : "node", \
|
||||
reason, btrfs_header_bytenr(eb), root->objectid, slot)
|
||||
|
||||
static noinline int check_leaf(struct btrfs_root *root,
|
||||
struct extent_buffer *leaf)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = root->fs_info;
|
||||
struct btrfs_key key;
|
||||
struct btrfs_key leaf_key;
|
||||
u32 nritems = btrfs_header_nritems(leaf);
|
||||
int slot;
|
||||
|
||||
/*
|
||||
* Extent buffers from a relocation tree have a owner field that
|
||||
* corresponds to the subvolume tree they are based on. So just from an
|
||||
* extent buffer alone we can not find out what is the id of the
|
||||
* corresponding subvolume tree, so we can not figure out if the extent
|
||||
* buffer corresponds to the root of the relocation tree or not. So skip
|
||||
* this check for relocation trees.
|
||||
*/
|
||||
if (nritems == 0 && !btrfs_header_flag(leaf, BTRFS_HEADER_FLAG_RELOC)) {
|
||||
struct btrfs_root *check_root;
|
||||
|
||||
key.objectid = btrfs_header_owner(leaf);
|
||||
key.type = BTRFS_ROOT_ITEM_KEY;
|
||||
key.offset = (u64)-1;
|
||||
|
||||
check_root = btrfs_get_fs_root(fs_info, &key, false);
|
||||
/*
|
||||
* The only reason we also check NULL here is that during
|
||||
* open_ctree() some roots has not yet been set up.
|
||||
*/
|
||||
if (!IS_ERR_OR_NULL(check_root)) {
|
||||
struct extent_buffer *eb;
|
||||
|
||||
eb = btrfs_root_node(check_root);
|
||||
/* if leaf is the root, then it's fine */
|
||||
if (leaf != eb) {
|
||||
CORRUPT("non-root leaf's nritems is 0",
|
||||
leaf, check_root, 0);
|
||||
free_extent_buffer(eb);
|
||||
return -EIO;
|
||||
}
|
||||
free_extent_buffer(eb);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (nritems == 0)
|
||||
return 0;
|
||||
|
||||
/* Check the 0 item */
|
||||
if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) !=
|
||||
BTRFS_LEAF_DATA_SIZE(fs_info)) {
|
||||
CORRUPT("invalid item offset size pair", leaf, root, 0);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check to make sure each items keys are in the correct order and their
|
||||
* offsets make sense. We only have to loop through nritems-1 because
|
||||
* we check the current slot against the next slot, which verifies the
|
||||
* next slot's offset+size makes sense and that the current's slot
|
||||
* offset is correct.
|
||||
*/
|
||||
for (slot = 0; slot < nritems - 1; slot++) {
|
||||
btrfs_item_key_to_cpu(leaf, &leaf_key, slot);
|
||||
btrfs_item_key_to_cpu(leaf, &key, slot + 1);
|
||||
|
||||
/* Make sure the keys are in the right order */
|
||||
if (btrfs_comp_cpu_keys(&leaf_key, &key) >= 0) {
|
||||
CORRUPT("bad key order", leaf, root, slot);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/*
|
||||
* Make sure the offset and ends are right, remember that the
|
||||
* item data starts at the end of the leaf and grows towards the
|
||||
* front.
|
||||
*/
|
||||
if (btrfs_item_offset_nr(leaf, slot) !=
|
||||
btrfs_item_end_nr(leaf, slot + 1)) {
|
||||
CORRUPT("slot offset bad", leaf, root, slot);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check to make sure that we don't point outside of the leaf,
|
||||
* just in case all the items are consistent to each other, but
|
||||
* all point outside of the leaf.
|
||||
*/
|
||||
if (btrfs_item_end_nr(leaf, slot) >
|
||||
BTRFS_LEAF_DATA_SIZE(fs_info)) {
|
||||
CORRUPT("slot end outside of leaf", leaf, root, slot);
|
||||
return -EIO;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int check_node(struct btrfs_root *root, struct extent_buffer *node)
|
||||
{
|
||||
unsigned long nr = btrfs_header_nritems(node);
|
||||
struct btrfs_key key, next_key;
|
||||
int slot;
|
||||
u64 bytenr;
|
||||
int ret = 0;
|
||||
|
||||
if (nr == 0 || nr > BTRFS_NODEPTRS_PER_BLOCK(root->fs_info)) {
|
||||
btrfs_crit(root->fs_info,
|
||||
"corrupt node: block %llu root %llu nritems %lu",
|
||||
node->start, root->objectid, nr);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
for (slot = 0; slot < nr - 1; slot++) {
|
||||
bytenr = btrfs_node_blockptr(node, slot);
|
||||
btrfs_node_key_to_cpu(node, &key, slot);
|
||||
btrfs_node_key_to_cpu(node, &next_key, slot + 1);
|
||||
|
||||
if (!bytenr) {
|
||||
CORRUPT("invalid item slot", node, root, slot);
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (btrfs_comp_cpu_keys(&key, &next_key) >= 0) {
|
||||
CORRUPT("bad key order", node, root, slot);
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
|
||||
u64 phy_offset, struct page *page,
|
||||
u64 start, u64 end, int mirror)
|
||||
@ -749,12 +602,12 @@ static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
|
||||
* that we don't try and read the other copies of this block, just
|
||||
* return -EIO.
|
||||
*/
|
||||
if (found_level == 0 && check_leaf(root, eb)) {
|
||||
if (found_level == 0 && btrfs_check_leaf_full(root, eb)) {
|
||||
set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
|
||||
ret = -EIO;
|
||||
}
|
||||
|
||||
if (found_level > 0 && check_node(root, eb))
|
||||
if (found_level > 0 && btrfs_check_node(root, eb))
|
||||
ret = -EIO;
|
||||
|
||||
if (!ret)
|
||||
@ -4009,7 +3862,13 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
|
||||
buf->len,
|
||||
fs_info->dirty_metadata_batch);
|
||||
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
|
||||
if (btrfs_header_level(buf) == 0 && check_leaf(root, buf)) {
|
||||
/*
|
||||
* Since btrfs_mark_buffer_dirty() can be called with item pointer set
|
||||
* but item data not updated.
|
||||
* So here we should only check item pointers, not item data.
|
||||
*/
|
||||
if (btrfs_header_level(buf) == 0 &&
|
||||
btrfs_check_leaf_relaxed(root, buf)) {
|
||||
btrfs_print_leaf(buf);
|
||||
ASSERT(0);
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user