Merge "Merge android-4.14-p.70 (e601ab6) into msm-4.14"

This commit is contained in:
qctecmdr Service 2018-10-17 13:06:44 -07:00 committed by Gerrit - the friendly Code Review server
commit 2330aa6c4c
591 changed files with 4559 additions and 2276 deletions

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 4 VERSION = 4
PATCHLEVEL = 14 PATCHLEVEL = 14
SUBLEVEL = 66 SUBLEVEL = 70
EXTRAVERSION = EXTRAVERSION =
NAME = Petit Gorille NAME = Petit Gorille
@ -357,9 +357,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
else if [ -x /bin/bash ]; then echo /bin/bash; \ else if [ -x /bin/bash ]; then echo /bin/bash; \
else echo sh; fi ; fi) else echo sh; fi ; fi)
HOST_LFS_CFLAGS := $(shell getconf LFS_CFLAGS) HOST_LFS_CFLAGS := $(shell getconf LFS_CFLAGS 2>/dev/null)
HOST_LFS_LDFLAGS := $(shell getconf LFS_LDFLAGS) HOST_LFS_LDFLAGS := $(shell getconf LFS_LDFLAGS 2>/dev/null)
HOST_LFS_LIBS := $(shell getconf LFS_LIBS) HOST_LFS_LIBS := $(shell getconf LFS_LIBS 2>/dev/null)
HOSTCC = gcc HOSTCC = gcc
HOSTCXX = g++ HOSTCXX = g++
@ -501,9 +501,13 @@ KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
endif endif
RETPOLINE_CFLAGS_GCC := -mindirect-branch=thunk-extern -mindirect-branch-register RETPOLINE_CFLAGS_GCC := -mindirect-branch=thunk-extern -mindirect-branch-register
RETPOLINE_VDSO_CFLAGS_GCC := -mindirect-branch=thunk-inline -mindirect-branch-register
RETPOLINE_CFLAGS_CLANG := -mretpoline-external-thunk RETPOLINE_CFLAGS_CLANG := -mretpoline-external-thunk
RETPOLINE_VDSO_CFLAGS_CLANG := -mretpoline
RETPOLINE_CFLAGS := $(call cc-option,$(RETPOLINE_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_CFLAGS_CLANG))) RETPOLINE_CFLAGS := $(call cc-option,$(RETPOLINE_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_CFLAGS_CLANG)))
RETPOLINE_VDSO_CFLAGS := $(call cc-option,$(RETPOLINE_VDSO_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_VDSO_CFLAGS_CLANG)))
export RETPOLINE_CFLAGS export RETPOLINE_CFLAGS
export RETPOLINE_VDSO_CFLAGS
ifeq ($(config-targets),1) ifeq ($(config-targets),1)
# =========================================================================== # ===========================================================================

View File

@ -336,6 +336,9 @@ config HAVE_ARCH_JUMP_LABEL
config HAVE_RCU_TABLE_FREE config HAVE_RCU_TABLE_FREE
bool bool
config HAVE_RCU_TABLE_INVALIDATE
bool
config ARCH_HAVE_NMI_SAFE_CMPXCHG config ARCH_HAVE_NMI_SAFE_CMPXCHG
bool bool

View File

@ -530,24 +530,19 @@ SYSCALL_DEFINE4(osf_mount, unsigned long, typenr, const char __user *, path,
SYSCALL_DEFINE1(osf_utsname, char __user *, name) SYSCALL_DEFINE1(osf_utsname, char __user *, name)
{ {
int error; int error;
char tmp[5 * 32];
down_read(&uts_sem); down_read(&uts_sem);
error = -EFAULT; memcpy(tmp + 0 * 32, utsname()->sysname, 32);
if (copy_to_user(name + 0, utsname()->sysname, 32)) memcpy(tmp + 1 * 32, utsname()->nodename, 32);
goto out; memcpy(tmp + 2 * 32, utsname()->release, 32);
if (copy_to_user(name + 32, utsname()->nodename, 32)) memcpy(tmp + 3 * 32, utsname()->version, 32);
goto out; memcpy(tmp + 4 * 32, utsname()->machine, 32);
if (copy_to_user(name + 64, utsname()->release, 32)) up_read(&uts_sem);
goto out;
if (copy_to_user(name + 96, utsname()->version, 32))
goto out;
if (copy_to_user(name + 128, utsname()->machine, 32))
goto out;
error = 0; if (copy_to_user(name, tmp, sizeof(tmp)))
out: return -EFAULT;
up_read(&uts_sem); return 0;
return error;
} }
SYSCALL_DEFINE0(getpagesize) SYSCALL_DEFINE0(getpagesize)
@ -567,18 +562,21 @@ SYSCALL_DEFINE2(osf_getdomainname, char __user *, name, int, namelen)
{ {
int len, err = 0; int len, err = 0;
char *kname; char *kname;
char tmp[32];
if (namelen > 32) if (namelen < 0 || namelen > 32)
namelen = 32; namelen = 32;
down_read(&uts_sem); down_read(&uts_sem);
kname = utsname()->domainname; kname = utsname()->domainname;
len = strnlen(kname, namelen); len = strnlen(kname, namelen);
if (copy_to_user(name, kname, min(len + 1, namelen))) len = min(len + 1, namelen);
err = -EFAULT; memcpy(tmp, kname, len);
up_read(&uts_sem); up_read(&uts_sem);
return err; if (copy_to_user(name, tmp, len))
return -EFAULT;
return 0;
} }
/* /*
@ -739,13 +737,14 @@ SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count)
}; };
unsigned long offset; unsigned long offset;
const char *res; const char *res;
long len, err = -EINVAL; long len;
char tmp[__NEW_UTS_LEN + 1];
offset = command-1; offset = command-1;
if (offset >= ARRAY_SIZE(sysinfo_table)) { if (offset >= ARRAY_SIZE(sysinfo_table)) {
/* Digital UNIX has a few unpublished interfaces here */ /* Digital UNIX has a few unpublished interfaces here */
printk("sysinfo(%d)", command); printk("sysinfo(%d)", command);
goto out; return -EINVAL;
} }
down_read(&uts_sem); down_read(&uts_sem);
@ -753,13 +752,11 @@ SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count)
len = strlen(res)+1; len = strlen(res)+1;
if ((unsigned long)len > (unsigned long)count) if ((unsigned long)len > (unsigned long)count)
len = count; len = count;
if (copy_to_user(buf, res, len)) memcpy(tmp, res, len);
err = -EFAULT;
else
err = 0;
up_read(&uts_sem); up_read(&uts_sem);
out: if (copy_to_user(buf, tmp, len))
return err; return -EFAULT;
return 0;
} }
SYSCALL_DEFINE5(osf_getsysinfo, unsigned long, op, void __user *, buffer, SYSCALL_DEFINE5(osf_getsysinfo, unsigned long, op, void __user *, buffer,

View File

@ -45,6 +45,9 @@ config ARC
select HAVE_KERNEL_GZIP select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZMA select HAVE_KERNEL_LZMA
config ARCH_HAS_CACHE_LINE_SIZE
def_bool y
config MIGHT_HAVE_PCI config MIGHT_HAVE_PCI
bool bool

View File

@ -16,7 +16,7 @@ endif
KBUILD_DEFCONFIG := nsim_700_defconfig KBUILD_DEFCONFIG := nsim_700_defconfig
cflags-y += -fno-common -pipe -fno-builtin -D__linux__ cflags-y += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__
cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7 cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7
cflags-$(CONFIG_ISA_ARCV2) += -mcpu=archs cflags-$(CONFIG_ISA_ARCV2) += -mcpu=archs
@ -140,16 +140,3 @@ dtbs: scripts
archclean: archclean:
$(Q)$(MAKE) $(clean)=$(boot) $(Q)$(MAKE) $(clean)=$(boot)
# Hacks to enable final link due to absence of link-time branch relexation
# and gcc choosing optimal(shorter) branches at -O3
#
# vineetg Feb 2010: -mlong-calls switched off for overall kernel build
# However lib/decompress_inflate.o (.init.text) calls
# zlib_inflate_workspacesize (.text) causing relocation errors.
# Thus forcing all exten calls in this file to be long calls
export CFLAGS_decompress_inflate.o = -mmedium-calls
export CFLAGS_initramfs.o = -mmedium-calls
ifdef CONFIG_SMP
export CFLAGS_core.o = -mmedium-calls
endif

View File

@ -48,7 +48,9 @@
}) })
/* Largest line length for either L1 or L2 is 128 bytes */ /* Largest line length for either L1 or L2 is 128 bytes */
#define ARCH_DMA_MINALIGN 128 #define SMP_CACHE_BYTES 128
#define cache_line_size() SMP_CACHE_BYTES
#define ARCH_DMA_MINALIGN SMP_CACHE_BYTES
extern void arc_cache_init(void); extern void arc_cache_init(void);
extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len); extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);

View File

@ -17,8 +17,11 @@
#ifndef __ASM_ARC_UDELAY_H #ifndef __ASM_ARC_UDELAY_H
#define __ASM_ARC_UDELAY_H #define __ASM_ARC_UDELAY_H
#include <asm-generic/types.h>
#include <asm/param.h> /* HZ */ #include <asm/param.h> /* HZ */
extern unsigned long loops_per_jiffy;
static inline void __delay(unsigned long loops) static inline void __delay(unsigned long loops)
{ {
__asm__ __volatile__( __asm__ __volatile__(

View File

@ -34,9 +34,7 @@ struct machine_desc {
const char *name; const char *name;
const char **dt_compat; const char **dt_compat;
void (*init_early)(void); void (*init_early)(void);
#ifdef CONFIG_SMP
void (*init_per_cpu)(unsigned int); void (*init_per_cpu)(unsigned int);
#endif
void (*init_machine)(void); void (*init_machine)(void);
void (*init_late)(void); void (*init_late)(void);

View File

@ -31,10 +31,10 @@ void __init init_IRQ(void)
/* a SMP H/w block could do IPI IRQ request here */ /* a SMP H/w block could do IPI IRQ request here */
if (plat_smp_ops.init_per_cpu) if (plat_smp_ops.init_per_cpu)
plat_smp_ops.init_per_cpu(smp_processor_id()); plat_smp_ops.init_per_cpu(smp_processor_id());
#endif
if (machine_desc->init_per_cpu) if (machine_desc->init_per_cpu)
machine_desc->init_per_cpu(smp_processor_id()); machine_desc->init_per_cpu(smp_processor_id());
#endif
} }
/* /*

View File

@ -47,7 +47,8 @@ SYSCALL_DEFINE0(arc_gettls)
SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new) SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
{ {
struct pt_regs *regs = current_pt_regs(); struct pt_regs *regs = current_pt_regs();
int uval = -EFAULT; u32 uval;
int ret;
/* /*
* This is only for old cores lacking LLOCK/SCOND, which by defintion * This is only for old cores lacking LLOCK/SCOND, which by defintion
@ -60,23 +61,47 @@ SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
/* Z indicates to userspace if operation succeded */ /* Z indicates to userspace if operation succeded */
regs->status32 &= ~STATUS_Z_MASK; regs->status32 &= ~STATUS_Z_MASK;
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) ret = access_ok(VERIFY_WRITE, uaddr, sizeof(*uaddr));
return -EFAULT; if (!ret)
goto fail;
again:
preempt_disable(); preempt_disable();
if (__get_user(uval, uaddr)) ret = __get_user(uval, uaddr);
goto done; if (ret)
goto fault;
if (uval == expected) { if (uval != expected)
if (!__put_user(new, uaddr)) goto out;
regs->status32 |= STATUS_Z_MASK;
}
done: ret = __put_user(new, uaddr);
if (ret)
goto fault;
regs->status32 |= STATUS_Z_MASK;
out:
preempt_enable();
return uval;
fault:
preempt_enable(); preempt_enable();
return uval; if (unlikely(ret != -EFAULT))
goto fail;
down_read(&current->mm->mmap_sem);
ret = fixup_user_fault(current, current->mm, (unsigned long) uaddr,
FAULT_FLAG_WRITE, NULL);
up_read(&current->mm->mmap_sem);
if (likely(!ret))
goto again;
fail:
force_sig(SIGSEGV, current);
return ret;
} }
#ifdef CONFIG_ISA_ARCV2 #ifdef CONFIG_ISA_ARCV2

View File

@ -1035,7 +1035,7 @@ void flush_cache_mm(struct mm_struct *mm)
void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr, void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
unsigned long pfn) unsigned long pfn)
{ {
unsigned int paddr = pfn << PAGE_SHIFT; phys_addr_t paddr = pfn << PAGE_SHIFT;
u_vaddr &= PAGE_MASK; u_vaddr &= PAGE_MASK;
@ -1055,8 +1055,9 @@ void flush_anon_page(struct vm_area_struct *vma, struct page *page,
unsigned long u_vaddr) unsigned long u_vaddr)
{ {
/* TBD: do we really need to clear the kernel mapping */ /* TBD: do we really need to clear the kernel mapping */
__flush_dcache_page(page_address(page), u_vaddr); __flush_dcache_page((phys_addr_t)page_address(page), u_vaddr);
__flush_dcache_page(page_address(page), page_address(page)); __flush_dcache_page((phys_addr_t)page_address(page),
(phys_addr_t)page_address(page));
} }

View File

@ -21,6 +21,7 @@
#error "Incorrect ctop.h include" #error "Incorrect ctop.h include"
#endif #endif
#include <linux/types.h>
#include <soc/nps/common.h> #include <soc/nps/common.h>
/* core auxiliary registers */ /* core auxiliary registers */
@ -143,6 +144,15 @@ struct nps_host_reg_gim_p_int_dst {
}; };
/* AUX registers definition */ /* AUX registers definition */
struct nps_host_reg_aux_dpc {
union {
struct {
u32 ien:1, men:1, hen:1, reserved:29;
};
u32 value;
};
};
struct nps_host_reg_aux_udmc { struct nps_host_reg_aux_udmc {
union { union {
struct { struct {

View File

@ -15,6 +15,8 @@
*/ */
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/log2.h> #include <linux/log2.h>
#include <asm/arcregs.h> #include <asm/arcregs.h>
@ -157,10 +159,10 @@ void mtm_enable_core(unsigned int cpu)
/* Verify and set the value of the mtm hs counter */ /* Verify and set the value of the mtm hs counter */
static int __init set_mtm_hs_ctr(char *ctr_str) static int __init set_mtm_hs_ctr(char *ctr_str)
{ {
long hs_ctr; int hs_ctr;
int ret; int ret;
ret = kstrtol(ctr_str, 0, &hs_ctr); ret = kstrtoint(ctr_str, 0, &hs_ctr);
if (ret || hs_ctr > MT_HS_CNT_MAX || hs_ctr < MT_HS_CNT_MIN) { if (ret || hs_ctr > MT_HS_CNT_MAX || hs_ctr < MT_HS_CNT_MIN) {
pr_err("** Invalid @nps_mtm_hs_ctr [%d] needs to be [%d:%d] (incl)\n", pr_err("** Invalid @nps_mtm_hs_ctr [%d] needs to be [%d:%d] (incl)\n",

View File

@ -87,6 +87,11 @@
}; };
}; };
/* Table Table 5-79 of the TRM shows 480ab000 is reserved */
&usb_otg_hs {
status = "disabled";
};
&iva { &iva {
status = "disabled"; status = "disabled";
}; };

View File

@ -535,6 +535,8 @@
touchscreen-size-x = <480>; touchscreen-size-x = <480>;
touchscreen-size-y = <272>; touchscreen-size-y = <272>;
wakeup-source;
}; };
tlv320aic3106: tlv320aic3106@1b { tlv320aic3106: tlv320aic3106@1b {

View File

@ -170,7 +170,7 @@
3700 5 3700 5
3900 6 3900 6
4000 7>; 4000 7>;
cooling-cells = <2>; #cooling-cells = <2>;
}; };
gpio-leds { gpio-leds {

View File

@ -216,7 +216,7 @@
reg = <0x18008000 0x100>; reg = <0x18008000 0x100>;
#address-cells = <1>; #address-cells = <1>;
#size-cells = <0>; #size-cells = <0>;
interrupts = <GIC_SPI 85 IRQ_TYPE_NONE>; interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <100000>; clock-frequency = <100000>;
status = "disabled"; status = "disabled";
}; };
@ -245,7 +245,7 @@
reg = <0x1800b000 0x100>; reg = <0x1800b000 0x100>;
#address-cells = <1>; #address-cells = <1>;
#size-cells = <0>; #size-cells = <0>;
interrupts = <GIC_SPI 86 IRQ_TYPE_NONE>; interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <100000>; clock-frequency = <100000>;
status = "disabled"; status = "disabled";
}; };
@ -256,7 +256,7 @@
#interrupt-cells = <1>; #interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0>; interrupt-map-mask = <0 0 0 0>;
interrupt-map = <0 0 0 0 &gic GIC_SPI 100 IRQ_TYPE_NONE>; interrupt-map = <0 0 0 0 &gic GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
linux,pci-domain = <0>; linux,pci-domain = <0>;
@ -278,10 +278,10 @@
compatible = "brcm,iproc-msi"; compatible = "brcm,iproc-msi";
msi-controller; msi-controller;
interrupt-parent = <&gic>; interrupt-parent = <&gic>;
interrupts = <GIC_SPI 96 IRQ_TYPE_NONE>, interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 97 IRQ_TYPE_NONE>, <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 98 IRQ_TYPE_NONE>, <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 99 IRQ_TYPE_NONE>; <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
}; };
}; };
@ -291,7 +291,7 @@
#interrupt-cells = <1>; #interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0>; interrupt-map-mask = <0 0 0 0>;
interrupt-map = <0 0 0 0 &gic GIC_SPI 106 IRQ_TYPE_NONE>; interrupt-map = <0 0 0 0 &gic GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
linux,pci-domain = <1>; linux,pci-domain = <1>;
@ -313,10 +313,10 @@
compatible = "brcm,iproc-msi"; compatible = "brcm,iproc-msi";
msi-controller; msi-controller;
interrupt-parent = <&gic>; interrupt-parent = <&gic>;
interrupts = <GIC_SPI 102 IRQ_TYPE_NONE>, interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 103 IRQ_TYPE_NONE>, <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 104 IRQ_TYPE_NONE>, <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 105 IRQ_TYPE_NONE>; <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
}; };
}; };

View File

@ -391,7 +391,7 @@
reg = <0x38000 0x50>; reg = <0x38000 0x50>;
#address-cells = <1>; #address-cells = <1>;
#size-cells = <0>; #size-cells = <0>;
interrupts = <GIC_SPI 89 IRQ_TYPE_NONE>; interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <100000>; clock-frequency = <100000>;
dma-coherent; dma-coherent;
status = "disabled"; status = "disabled";
@ -496,7 +496,7 @@
#interrupt-cells = <1>; #interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0>; interrupt-map-mask = <0 0 0 0>;
interrupt-map = <0 0 0 0 &gic GIC_SPI 131 IRQ_TYPE_NONE>; interrupt-map = <0 0 0 0 &gic GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>;
linux,pci-domain = <0>; linux,pci-domain = <0>;
@ -519,10 +519,10 @@
compatible = "brcm,iproc-msi"; compatible = "brcm,iproc-msi";
msi-controller; msi-controller;
interrupt-parent = <&gic>; interrupt-parent = <&gic>;
interrupts = <GIC_SPI 127 IRQ_TYPE_NONE>, interrupts = <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 128 IRQ_TYPE_NONE>, <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 129 IRQ_TYPE_NONE>, <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 130 IRQ_TYPE_NONE>; <GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>;
brcm,pcie-msi-inten; brcm,pcie-msi-inten;
}; };
}; };
@ -533,7 +533,7 @@
#interrupt-cells = <1>; #interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0>; interrupt-map-mask = <0 0 0 0>;
interrupt-map = <0 0 0 0 &gic GIC_SPI 137 IRQ_TYPE_NONE>; interrupt-map = <0 0 0 0 &gic GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>;
linux,pci-domain = <1>; linux,pci-domain = <1>;
@ -556,10 +556,10 @@
compatible = "brcm,iproc-msi"; compatible = "brcm,iproc-msi";
msi-controller; msi-controller;
interrupt-parent = <&gic>; interrupt-parent = <&gic>;
interrupts = <GIC_SPI 133 IRQ_TYPE_NONE>, interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 134 IRQ_TYPE_NONE>, <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 135 IRQ_TYPE_NONE>, <GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 136 IRQ_TYPE_NONE>; <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>;
brcm,pcie-msi-inten; brcm,pcie-msi-inten;
}; };
}; };
@ -570,7 +570,7 @@
#interrupt-cells = <1>; #interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0>; interrupt-map-mask = <0 0 0 0>;
interrupt-map = <0 0 0 0 &gic GIC_SPI 143 IRQ_TYPE_NONE>; interrupt-map = <0 0 0 0 &gic GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
linux,pci-domain = <2>; linux,pci-domain = <2>;
@ -593,10 +593,10 @@
compatible = "brcm,iproc-msi"; compatible = "brcm,iproc-msi";
msi-controller; msi-controller;
interrupt-parent = <&gic>; interrupt-parent = <&gic>;
interrupts = <GIC_SPI 139 IRQ_TYPE_NONE>, interrupts = <GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 140 IRQ_TYPE_NONE>, <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 141 IRQ_TYPE_NONE>, <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 142 IRQ_TYPE_NONE>; <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>;
brcm,pcie-msi-inten; brcm,pcie-msi-inten;
}; };
}; };

View File

@ -365,7 +365,7 @@
i2c0: i2c@18009000 { i2c0: i2c@18009000 {
compatible = "brcm,iproc-i2c"; compatible = "brcm,iproc-i2c";
reg = <0x18009000 0x50>; reg = <0x18009000 0x50>;
interrupts = <GIC_SPI 121 IRQ_TYPE_NONE>; interrupts = <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>; #address-cells = <1>;
#size-cells = <0>; #size-cells = <0>;
clock-frequency = <100000>; clock-frequency = <100000>;

View File

@ -518,11 +518,7 @@
gpio-controller; gpio-controller;
#gpio-cells = <2>; #gpio-cells = <2>;
reg = <0x226000 0x1000>; reg = <0x226000 0x1000>;
interrupts = <42 IRQ_TYPE_EDGE_BOTH interrupts = <42 43 44 45 46 47 48 49 50>;
43 IRQ_TYPE_EDGE_BOTH 44 IRQ_TYPE_EDGE_BOTH
45 IRQ_TYPE_EDGE_BOTH 46 IRQ_TYPE_EDGE_BOTH
47 IRQ_TYPE_EDGE_BOTH 48 IRQ_TYPE_EDGE_BOTH
49 IRQ_TYPE_EDGE_BOTH 50 IRQ_TYPE_EDGE_BOTH>;
ti,ngpio = <144>; ti,ngpio = <144>;
ti,davinci-gpio-unbanked = <0>; ti,davinci-gpio-unbanked = <0>;
status = "disabled"; status = "disabled";

View File

@ -644,7 +644,7 @@
dsa,member = <0 0>; dsa,member = <0 0>;
eeprom-length = <512>; eeprom-length = <512>;
interrupt-parent = <&gpio6>; interrupt-parent = <&gpio6>;
interrupts = <3 IRQ_TYPE_EDGE_FALLING>; interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
interrupt-controller; interrupt-controller;
#interrupt-cells = <2>; #interrupt-cells = <2>;

View File

@ -206,6 +206,7 @@
#address-cells = <1>; #address-cells = <1>;
#size-cells = <0>; #size-cells = <0>;
reg = <0x70>; reg = <0x70>;
reset-gpio = <&gpio TEGRA_GPIO(BB, 0) GPIO_ACTIVE_LOW>;
}; };
}; };

View File

@ -144,9 +144,11 @@ CONFIG_USB_STORAGE=y
CONFIG_USB_CHIPIDEA=y CONFIG_USB_CHIPIDEA=y
CONFIG_USB_CHIPIDEA_UDC=y CONFIG_USB_CHIPIDEA_UDC=y
CONFIG_USB_CHIPIDEA_HOST=y CONFIG_USB_CHIPIDEA_HOST=y
CONFIG_USB_CHIPIDEA_ULPI=y
CONFIG_NOP_USB_XCEIV=y CONFIG_NOP_USB_XCEIV=y
CONFIG_USB_GADGET=y CONFIG_USB_GADGET=y
CONFIG_USB_ETH=m CONFIG_USB_ETH=m
CONFIG_USB_ULPI_BUS=y
CONFIG_MMC=y CONFIG_MMC=y
CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_PLTFM=y

View File

@ -773,7 +773,7 @@ static struct gpiod_lookup_table mmc_gpios_table = {
GPIO_LOOKUP("davinci_gpio.0", DA850_MMCSD_CD_PIN, "cd", GPIO_LOOKUP("davinci_gpio.0", DA850_MMCSD_CD_PIN, "cd",
GPIO_ACTIVE_LOW), GPIO_ACTIVE_LOW),
GPIO_LOOKUP("davinci_gpio.0", DA850_MMCSD_WP_PIN, "wp", GPIO_LOOKUP("davinci_gpio.0", DA850_MMCSD_WP_PIN, "wp",
GPIO_ACTIVE_LOW), GPIO_ACTIVE_HIGH),
}, },
}; };

View File

@ -109,6 +109,45 @@ void omap5_erratum_workaround_801819(void)
static inline void omap5_erratum_workaround_801819(void) { } static inline void omap5_erratum_workaround_801819(void) { }
#endif #endif
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
/*
* Configure ACR and enable ACTLR[0] (Enable invalidates of BTB with
* ICIALLU) to activate the workaround for secondary Core.
* NOTE: it is assumed that the primary core's configuration is done
* by the boot loader (kernel will detect a misconfiguration and complain
* if this is not done).
*
* In General Purpose(GP) devices, ACR bit settings can only be done
* by ROM code in "secure world" using the smc call and there is no
* option to update the "firmware" on such devices. This also works for
* High security(HS) devices, as a backup option in case the
* "update" is not done in the "security firmware".
*/
static void omap5_secondary_harden_predictor(void)
{
u32 acr, acr_mask;
asm volatile ("mrc p15, 0, %0, c1, c0, 1" : "=r" (acr));
/*
* ACTLR[0] (Enable invalidates of BTB with ICIALLU)
*/
acr_mask = BIT(0);
/* Do we already have it done.. if yes, skip expensive smc */
if ((acr & acr_mask) == acr_mask)
return;
acr |= acr_mask;
omap_smc1(OMAP5_DRA7_MON_SET_ACR_INDEX, acr);
pr_debug("%s: ARM ACR setup for CVE_2017_5715 applied on CPU%d\n",
__func__, smp_processor_id());
}
#else
static inline void omap5_secondary_harden_predictor(void) { }
#endif
static void omap4_secondary_init(unsigned int cpu) static void omap4_secondary_init(unsigned int cpu)
{ {
/* /*
@ -131,6 +170,8 @@ static void omap4_secondary_init(unsigned int cpu)
set_cntfreq(); set_cntfreq();
/* Configure ACR to disable streaming WA for 801819 */ /* Configure ACR to disable streaming WA for 801819 */
omap5_erratum_workaround_801819(); omap5_erratum_workaround_801819();
/* Enable ACR to allow for ICUALLU workaround */
omap5_secondary_harden_predictor();
} }
/* /*

View File

@ -185,7 +185,7 @@ static int pxa_irq_suspend(void)
{ {
int i; int i;
for (i = 0; i < pxa_internal_irq_nr / 32; i++) { for (i = 0; i < DIV_ROUND_UP(pxa_internal_irq_nr, 32); i++) {
void __iomem *base = irq_base(i); void __iomem *base = irq_base(i);
saved_icmr[i] = __raw_readl(base + ICMR); saved_icmr[i] = __raw_readl(base + ICMR);
@ -204,7 +204,7 @@ static void pxa_irq_resume(void)
{ {
int i; int i;
for (i = 0; i < pxa_internal_irq_nr / 32; i++) { for (i = 0; i < DIV_ROUND_UP(pxa_internal_irq_nr, 32); i++) {
void __iomem *base = irq_base(i); void __iomem *base = irq_base(i);
__raw_writel(saved_icmr[i], base + ICMR); __raw_writel(saved_icmr[i], base + ICMR);

View File

@ -18,6 +18,7 @@ config ARCH_ROCKCHIP
select ARM_GLOBAL_TIMER select ARM_GLOBAL_TIMER
select CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK select CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
select ZONE_DMA if ARM_LPAE select ZONE_DMA if ARM_LPAE
select PM
help help
Support for Rockchip's Cortex-A9 Single-to-Quad-Core-SoCs Support for Rockchip's Cortex-A9 Single-to-Quad-Core-SoCs
containing the RK2928, RK30xx and RK31xx series. containing the RK2928, RK30xx and RK31xx series.

View File

@ -795,19 +795,28 @@ static int __mark_rodata_ro(void *unused)
return 0; return 0;
} }
static int kernel_set_to_readonly __read_mostly;
void mark_rodata_ro(void) void mark_rodata_ro(void)
{ {
kernel_set_to_readonly = 1;
stop_machine(__mark_rodata_ro, NULL, NULL); stop_machine(__mark_rodata_ro, NULL, NULL);
} }
void set_kernel_text_rw(void) void set_kernel_text_rw(void)
{ {
if (!kernel_set_to_readonly)
return;
set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false, set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false,
current->active_mm); current->active_mm);
} }
void set_kernel_text_ro(void) void set_kernel_text_ro(void)
{ {
if (!kernel_set_to_readonly)
return;
set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true, set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true,
current->active_mm); current->active_mm);
} }

View File

@ -291,8 +291,8 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
break; break;
case KPROBE_REENTER: case KPROBE_REENTER:
/* A nested probe was hit in FIQ, it is a BUG */ /* A nested probe was hit in FIQ, it is a BUG */
pr_warn("Unrecoverable kprobe detected at %p.\n", pr_warn("Unrecoverable kprobe detected.\n");
p->addr); dump_kprobe(p);
/* fall through */ /* fall through */
default: default:
/* impossible cases */ /* impossible cases */

View File

@ -1517,7 +1517,6 @@ fail:
print_registers(&result_regs); print_registers(&result_regs);
if (mem) { if (mem) {
pr_err("current_stack=%p\n", current_stack);
pr_err("expected_memory:\n"); pr_err("expected_memory:\n");
print_memory(expected_memory, mem_size); print_memory(expected_memory, mem_size);
pr_err("result_memory:\n"); pr_err("result_memory:\n");

View File

@ -742,7 +742,6 @@ config NEED_PER_CPU_EMBED_FIRST_CHUNK
config HOLES_IN_ZONE config HOLES_IN_ZONE
def_bool y def_bool y
depends on NUMA
source kernel/Kconfig.preempt source kernel/Kconfig.preempt
source kernel/Kconfig.hz source kernel/Kconfig.hz

View File

@ -205,6 +205,7 @@ config ARCH_ROCKCHIP
select GPIOLIB select GPIOLIB
select PINCTRL select PINCTRL
select PINCTRL_ROCKCHIP select PINCTRL_ROCKCHIP
select PM
select ROCKCHIP_TIMER select ROCKCHIP_TIMER
help help
This enables support for the ARMv8 based Rockchip chipsets, This enables support for the ARMv8 based Rockchip chipsets,

View File

@ -7,7 +7,7 @@
&apb { &apb {
mali: gpu@c0000 { mali: gpu@c0000 {
compatible = "amlogic,meson-gxbb-mali", "arm,mali-450"; compatible = "amlogic,meson-gxl-mali", "arm,mali-450";
reg = <0x0 0xc0000 0x0 0x40000>; reg = <0x0 0xc0000 0x0 0x40000>;
interrupts = <GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>, interrupts = <GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>,

View File

@ -118,7 +118,7 @@
#interrupt-cells = <1>; #interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0>; interrupt-map-mask = <0 0 0 0>;
interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 281 IRQ_TYPE_NONE>; interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 281 IRQ_TYPE_LEVEL_HIGH>;
linux,pci-domain = <0>; linux,pci-domain = <0>;
@ -149,7 +149,7 @@
#interrupt-cells = <1>; #interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0>; interrupt-map-mask = <0 0 0 0>;
interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 305 IRQ_TYPE_NONE>; interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 305 IRQ_TYPE_LEVEL_HIGH>;
linux,pci-domain = <4>; linux,pci-domain = <4>;
@ -566,7 +566,7 @@
reg = <0x66080000 0x100>; reg = <0x66080000 0x100>;
#address-cells = <1>; #address-cells = <1>;
#size-cells = <0>; #size-cells = <0>;
interrupts = <GIC_SPI 394 IRQ_TYPE_NONE>; interrupts = <GIC_SPI 394 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <100000>; clock-frequency = <100000>;
status = "disabled"; status = "disabled";
}; };
@ -594,7 +594,7 @@
reg = <0x660b0000 0x100>; reg = <0x660b0000 0x100>;
#address-cells = <1>; #address-cells = <1>;
#size-cells = <0>; #size-cells = <0>;
interrupts = <GIC_SPI 395 IRQ_TYPE_NONE>; interrupts = <GIC_SPI 395 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <100000>; clock-frequency = <100000>;
status = "disabled"; status = "disabled";
}; };

View File

@ -43,6 +43,10 @@
enet-phy-lane-swap; enet-phy-lane-swap;
}; };
&sdio0 {
mmc-ddr-1_8v;
};
&uart2 { &uart2 {
status = "okay"; status = "okay";
}; };

View File

@ -42,3 +42,7 @@
&gphy0 { &gphy0 {
enet-phy-lane-swap; enet-phy-lane-swap;
}; };
&sdio0 {
mmc-ddr-1_8v;
};

View File

@ -409,7 +409,7 @@
reg = <0x000b0000 0x100>; reg = <0x000b0000 0x100>;
#address-cells = <1>; #address-cells = <1>;
#size-cells = <0>; #size-cells = <0>;
interrupts = <GIC_SPI 177 IRQ_TYPE_NONE>; interrupts = <GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <100000>; clock-frequency = <100000>;
status = "disabled"; status = "disabled";
}; };
@ -453,7 +453,7 @@
reg = <0x000e0000 0x100>; reg = <0x000e0000 0x100>;
#address-cells = <1>; #address-cells = <1>;
#size-cells = <0>; #size-cells = <0>;
interrupts = <GIC_SPI 178 IRQ_TYPE_NONE>; interrupts = <GIC_SPI 178 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <100000>; clock-frequency = <100000>;
status = "disabled"; status = "disabled";
}; };

View File

@ -1132,14 +1132,14 @@
port@0 { port@0 {
reg = <0>; reg = <0>;
etf_out: endpoint { etf_in: endpoint {
slave-mode; slave-mode;
remote-endpoint = <&funnel0_out>; remote-endpoint = <&funnel0_out>;
}; };
}; };
port@1 { port@1 {
reg = <0>; reg = <0>;
etf_in: endpoint { etf_out: endpoint {
remote-endpoint = <&replicator_in>; remote-endpoint = <&replicator_in>;
}; };
}; };

View File

@ -331,7 +331,7 @@
reg = <0x0 0xff120000 0x0 0x100>; reg = <0x0 0xff120000 0x0 0x100>;
interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>; interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cru SCLK_UART1>, <&cru PCLK_UART1>; clocks = <&cru SCLK_UART1>, <&cru PCLK_UART1>;
clock-names = "sclk_uart", "pclk_uart"; clock-names = "baudclk", "apb_pclk";
dmas = <&dmac 4>, <&dmac 5>; dmas = <&dmac 4>, <&dmac 5>;
#dma-cells = <2>; #dma-cells = <2>;
pinctrl-names = "default"; pinctrl-names = "default";

View File

@ -20,9 +20,14 @@
#define CTR_L1IP_SHIFT 14 #define CTR_L1IP_SHIFT 14
#define CTR_L1IP_MASK 3 #define CTR_L1IP_MASK 3
#define CTR_DMINLINE_SHIFT 16
#define CTR_IMINLINE_SHIFT 0
#define CTR_CWG_SHIFT 24 #define CTR_CWG_SHIFT 24
#define CTR_CWG_MASK 15 #define CTR_CWG_MASK 15
#define CTR_CACHE_MINLINE_MASK \
(0xf << CTR_DMINLINE_SHIFT | 0xf << CTR_IMINLINE_SHIFT)
#define CTR_L1IP(ctr) (((ctr) >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK) #define CTR_L1IP(ctr) (((ctr) >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK)
#define ICACHE_POLICY_VPIPT 0 #define ICACHE_POLICY_VPIPT 0

View File

@ -45,7 +45,8 @@
#define ARM64_HARDEN_BP_POST_GUEST_EXIT 25 #define ARM64_HARDEN_BP_POST_GUEST_EXIT 25
#define ARM64_HW_DBM 26 #define ARM64_HW_DBM 26
#define ARM64_SSBD 27 #define ARM64_SSBD 27
#define ARM64_MISMATCHED_CACHE_TYPE 28
#define ARM64_NCAPS 28 #define ARM64_NCAPS 29
#endif /* __ASM_CPUCAPS_H */ #endif /* __ASM_CPUCAPS_H */

View File

@ -17,8 +17,8 @@
*/ */
#include <linux/arm-smccc.h> #include <linux/arm-smccc.h>
#include <linux/types.h>
#include <linux/psci.h> #include <linux/psci.h>
#include <linux/types.h>
#include <asm/cpu.h> #include <asm/cpu.h>
#include <asm/cputype.h> #include <asm/cputype.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
@ -47,12 +47,18 @@ is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
} }
static bool static bool
has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry, has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
int scope) int scope)
{ {
u64 mask = CTR_CACHE_MINLINE_MASK;
/* Skip matching the min line sizes for cache type check */
if (entry->capability == ARM64_MISMATCHED_CACHE_TYPE)
mask ^= arm64_ftr_reg_ctrel0.strict_mask;
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
return (read_cpuid_cachetype() & arm64_ftr_reg_ctrel0.strict_mask) != return (read_cpuid_cachetype() & mask) !=
(arm64_ftr_reg_ctrel0.sys_val & arm64_ftr_reg_ctrel0.strict_mask); (arm64_ftr_reg_ctrel0.sys_val & mask);
} }
static int cpu_enable_trap_ctr_access(void *__unused) static int cpu_enable_trap_ctr_access(void *__unused)
@ -475,7 +481,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
{ {
.desc = "Mismatched cache line size", .desc = "Mismatched cache line size",
.capability = ARM64_MISMATCHED_CACHE_LINE_SIZE, .capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
.matches = has_mismatched_cache_line_size, .matches = has_mismatched_cache_type,
.def_scope = SCOPE_LOCAL_CPU,
.enable = cpu_enable_trap_ctr_access,
},
{
.desc = "Mismatched cache type",
.capability = ARM64_MISMATCHED_CACHE_TYPE,
.matches = has_mismatched_cache_type,
.def_scope = SCOPE_LOCAL_CPU, .def_scope = SCOPE_LOCAL_CPU,
.enable = cpu_enable_trap_ctr_access, .enable = cpu_enable_trap_ctr_access,
}, },

View File

@ -180,14 +180,14 @@ static const struct arm64_ftr_bits ftr_ctr[] = {
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 28, 1, 1), /* IDC */ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 28, 1, 1), /* IDC */
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, 20, 4, 0), /* ERG */ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, 20, 4, 0), /* ERG */
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DMINLINE_SHIFT, 4, 1),
/* /*
* Linux can handle differing I-cache policies. Userspace JITs will * Linux can handle differing I-cache policies. Userspace JITs will
* make use of *minLine. * make use of *minLine.
* If we have differing I-cache policies, report it as the weakest - VIPT. * If we have differing I-cache policies, report it as the weakest - VIPT.
*/ */
ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_EXACT, 14, 2, ICACHE_POLICY_VIPT), /* L1Ip */ ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_EXACT, 14, 2, ICACHE_POLICY_VIPT), /* L1Ip */
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* IminLine */ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IMINLINE_SHIFT, 4, 0),
ARM64_FTR_END, ARM64_FTR_END,
}; };

View File

@ -275,7 +275,7 @@ static int __kprobes reenter_kprobe(struct kprobe *p,
break; break;
case KPROBE_HIT_SS: case KPROBE_HIT_SS:
case KPROBE_REENTER: case KPROBE_REENTER:
pr_warn("Unrecoverable kprobe detected at %p.\n", p->addr); pr_warn("Unrecoverable kprobe detected.\n");
dump_kprobe(p); dump_kprobe(p);
BUG(); BUG();
break; break;

View File

@ -218,7 +218,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
* This is the secondary CPU boot entry. We're using this CPUs * This is the secondary CPU boot entry. We're using this CPUs
* idle thread stack, but a set of temporary page tables. * idle thread stack, but a set of temporary page tables.
*/ */
asmlinkage void secondary_start_kernel(void) asmlinkage notrace void secondary_start_kernel(void)
{ {
struct mm_struct *mm = &init_mm; struct mm_struct *mm = &init_mm;
unsigned int cpu; unsigned int cpu;

View File

@ -770,13 +770,14 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
size >> PAGE_SHIFT); size >> PAGE_SHIFT);
return NULL; return NULL;
} }
if (!coherent)
__dma_flush_area(page_to_virt(page), iosize);
addr = dma_common_contiguous_remap(page, size, VM_USERMAP, addr = dma_common_contiguous_remap(page, size, VM_USERMAP,
prot, prot,
__builtin_return_address(0)); __builtin_return_address(0));
if (!addr) { if (addr) {
memset(addr, 0, size);
if (!coherent)
__dma_flush_area(page_to_virt(page), iosize);
} else {
iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs); iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs);
dma_release_from_contiguous(dev, page, dma_release_from_contiguous(dev, page,
size >> PAGE_SHIFT); size >> PAGE_SHIFT);

View File

@ -288,11 +288,13 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
#endif /* CONFIG_NUMA */ #endif /* CONFIG_NUMA */
#ifdef CONFIG_HAVE_ARCH_PFN_VALID #ifdef CONFIG_HAVE_ARCH_PFN_VALID
#define PFN_MASK ((1UL << (64 - PAGE_SHIFT)) - 1)
int pfn_valid(unsigned long pfn) int pfn_valid(unsigned long pfn)
{ {
return (pfn & PFN_MASK) == pfn && memblock_is_map_memory(pfn << PAGE_SHIFT); phys_addr_t addr = pfn << PAGE_SHIFT;
if ((addr >> PAGE_SHIFT) != pfn)
return 0;
return memblock_is_map_memory(addr);
} }
EXPORT_SYMBOL(pfn_valid); EXPORT_SYMBOL(pfn_valid);
#endif #endif

View File

@ -44,6 +44,7 @@ extern inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page, static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page,
unsigned long address) unsigned long address)
{ {
pgtable_page_dtor(page);
__free_page(page); __free_page(page);
} }
@ -74,8 +75,9 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
return page; return page;
} }
extern inline void pte_free(struct mm_struct *mm, struct page *page) static inline void pte_free(struct mm_struct *mm, struct page *page)
{ {
pgtable_page_dtor(page);
__free_page(page); __free_page(page);
} }

View File

@ -155,15 +155,11 @@ cflags-$(CONFIG_CPU_R4300) += -march=r4300 -Wa,--trap
cflags-$(CONFIG_CPU_VR41XX) += -march=r4100 -Wa,--trap cflags-$(CONFIG_CPU_VR41XX) += -march=r4100 -Wa,--trap
cflags-$(CONFIG_CPU_R4X00) += -march=r4600 -Wa,--trap cflags-$(CONFIG_CPU_R4X00) += -march=r4600 -Wa,--trap
cflags-$(CONFIG_CPU_TX49XX) += -march=r4600 -Wa,--trap cflags-$(CONFIG_CPU_TX49XX) += -march=r4600 -Wa,--trap
cflags-$(CONFIG_CPU_MIPS32_R1) += $(call cc-option,-march=mips32,-mips32 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \ cflags-$(CONFIG_CPU_MIPS32_R1) += -march=mips32 -Wa,--trap
-Wa,-mips32 -Wa,--trap cflags-$(CONFIG_CPU_MIPS32_R2) += -march=mips32r2 -Wa,--trap
cflags-$(CONFIG_CPU_MIPS32_R2) += $(call cc-option,-march=mips32r2,-mips32r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \
-Wa,-mips32r2 -Wa,--trap
cflags-$(CONFIG_CPU_MIPS32_R6) += -march=mips32r6 -Wa,--trap -modd-spreg cflags-$(CONFIG_CPU_MIPS32_R6) += -march=mips32r6 -Wa,--trap -modd-spreg
cflags-$(CONFIG_CPU_MIPS64_R1) += $(call cc-option,-march=mips64,-mips64 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \ cflags-$(CONFIG_CPU_MIPS64_R1) += -march=mips64 -Wa,--trap
-Wa,-mips64 -Wa,--trap cflags-$(CONFIG_CPU_MIPS64_R2) += -march=mips64r2 -Wa,--trap
cflags-$(CONFIG_CPU_MIPS64_R2) += $(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \
-Wa,-mips64r2 -Wa,--trap
cflags-$(CONFIG_CPU_MIPS64_R6) += -march=mips64r6 -Wa,--trap cflags-$(CONFIG_CPU_MIPS64_R6) += -march=mips64r6 -Wa,--trap
cflags-$(CONFIG_CPU_R5000) += -march=r5000 -Wa,--trap cflags-$(CONFIG_CPU_R5000) += -march=r5000 -Wa,--trap
cflags-$(CONFIG_CPU_R5432) += $(call cc-option,-march=r5400,-march=r5000) \ cflags-$(CONFIG_CPU_R5432) += $(call cc-option,-march=r5400,-march=r5000) \

View File

@ -212,12 +212,6 @@ static int __init bcm47xx_cpu_fixes(void)
*/ */
if (bcm47xx_bus.bcma.bus.chipinfo.id == BCMA_CHIP_ID_BCM4706) if (bcm47xx_bus.bcma.bus.chipinfo.id == BCMA_CHIP_ID_BCM4706)
cpu_wait = NULL; cpu_wait = NULL;
/*
* BCM47XX Erratum "R10: PCIe Transactions Periodically Fail"
* Enable ExternalSync for sync instruction to take effect
*/
set_c0_config7(MIPS_CONF7_ES);
break; break;
#endif #endif
} }

View File

@ -680,8 +680,6 @@
#define MIPS_CONF7_WII (_ULCAST_(1) << 31) #define MIPS_CONF7_WII (_ULCAST_(1) << 31)
#define MIPS_CONF7_RPS (_ULCAST_(1) << 2) #define MIPS_CONF7_RPS (_ULCAST_(1) << 2)
/* ExternalSync */
#define MIPS_CONF7_ES (_ULCAST_(1) << 8)
#define MIPS_CONF7_IAR (_ULCAST_(1) << 10) #define MIPS_CONF7_IAR (_ULCAST_(1) << 10)
#define MIPS_CONF7_AR (_ULCAST_(1) << 16) #define MIPS_CONF7_AR (_ULCAST_(1) << 16)
@ -2747,7 +2745,6 @@ __BUILD_SET_C0(status)
__BUILD_SET_C0(cause) __BUILD_SET_C0(cause)
__BUILD_SET_C0(config) __BUILD_SET_C0(config)
__BUILD_SET_C0(config5) __BUILD_SET_C0(config5)
__BUILD_SET_C0(config7)
__BUILD_SET_C0(intcontrol) __BUILD_SET_C0(intcontrol)
__BUILD_SET_C0(intctl) __BUILD_SET_C0(intctl)
__BUILD_SET_C0(srsmap) __BUILD_SET_C0(srsmap)

View File

@ -141,7 +141,7 @@ struct mips_fpu_struct {
#define NUM_DSP_REGS 6 #define NUM_DSP_REGS 6
typedef __u32 dspreg_t; typedef unsigned long dspreg_t;
struct mips_dsp_state { struct mips_dsp_state {
dspreg_t dspr[NUM_DSP_REGS]; dspreg_t dspr[NUM_DSP_REGS];
@ -388,7 +388,20 @@ unsigned long get_wchan(struct task_struct *p);
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[29]) #define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[29])
#define KSTK_STATUS(tsk) (task_pt_regs(tsk)->cp0_status) #define KSTK_STATUS(tsk) (task_pt_regs(tsk)->cp0_status)
#ifdef CONFIG_CPU_LOONGSON3
/*
* Loongson-3's SFB (Store-Fill-Buffer) may buffer writes indefinitely when a
* tight read loop is executed, because reads take priority over writes & the
* hardware (incorrectly) doesn't ensure that writes will eventually occur.
*
* Since spin loops of any kind should have a cpu_relax() in them, force an SFB
* flush from cpu_relax() such that any pending writes will become visible as
* expected.
*/
#define cpu_relax() smp_mb()
#else
#define cpu_relax() barrier() #define cpu_relax() barrier()
#endif
/* /*
* Return_address is a replacement for __builtin_return_address(count) * Return_address is a replacement for __builtin_return_address(count)

View File

@ -847,7 +847,7 @@ long arch_ptrace(struct task_struct *child, long request,
goto out; goto out;
} }
dregs = __get_dsp_regs(child); dregs = __get_dsp_regs(child);
tmp = (unsigned long) (dregs[addr - DSP_BASE]); tmp = dregs[addr - DSP_BASE];
break; break;
} }
case DSP_CONTROL: case DSP_CONTROL:

View File

@ -141,7 +141,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
goto out; goto out;
} }
dregs = __get_dsp_regs(child); dregs = __get_dsp_regs(child);
tmp = (unsigned long) (dregs[addr - DSP_BASE]); tmp = dregs[addr - DSP_BASE];
break; break;
} }
case DSP_CONTROL: case DSP_CONTROL:

View File

@ -4,12 +4,12 @@
#include "libgcc.h" #include "libgcc.h"
/* /*
* GCC 7 suboptimally generates __multi3 calls for mips64r6, so for that * GCC 7 & older can suboptimally generate __multi3 calls for mips64r6, so for
* specific case only we'll implement it here. * that specific case only we implement that intrinsic here.
* *
* See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82981 * See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82981
*/ */
#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6) && (__GNUC__ == 7) #if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6) && (__GNUC__ < 8)
/* multiply 64-bit values, low 64-bits returned */ /* multiply 64-bit values, low 64-bits returned */
static inline long long notrace dmulu(long long a, long long b) static inline long long notrace dmulu(long long a, long long b)

View File

@ -221,12 +221,6 @@ EXCEPTION_ENTRY(_data_page_fault_handler)
l.addi r3,r1,0 // pt_regs l.addi r3,r1,0 // pt_regs
/* r4 set be EXCEPTION_HANDLE */ // effective address of fault /* r4 set be EXCEPTION_HANDLE */ // effective address of fault
/*
* __PHX__: TODO
*
* all this can be written much simpler. look at
* DTLB miss handler in the CONFIG_GUARD_PROTECTED_CORE part
*/
#ifdef CONFIG_OPENRISC_NO_SPR_SR_DSX #ifdef CONFIG_OPENRISC_NO_SPR_SR_DSX
l.lwz r6,PT_PC(r3) // address of an offending insn l.lwz r6,PT_PC(r3) // address of an offending insn
l.lwz r6,0(r6) // instruction that caused pf l.lwz r6,0(r6) // instruction that caused pf
@ -258,7 +252,7 @@ EXCEPTION_ENTRY(_data_page_fault_handler)
#else #else
l.lwz r6,PT_SR(r3) // SR l.mfspr r6,r0,SPR_SR // SR
l.andi r6,r6,SPR_SR_DSX // check for delay slot exception l.andi r6,r6,SPR_SR_DSX // check for delay slot exception
l.sfne r6,r0 // exception happened in delay slot l.sfne r6,r0 // exception happened in delay slot
l.bnf 7f l.bnf 7f

View File

@ -141,8 +141,7 @@
* r4 - EEAR exception EA * r4 - EEAR exception EA
* r10 - current pointing to current_thread_info struct * r10 - current pointing to current_thread_info struct
* r12 - syscall 0, since we didn't come from syscall * r12 - syscall 0, since we didn't come from syscall
* r13 - temp it actually contains new SR, not needed anymore * r30 - handler address of the handler we'll jump to
* r31 - handler address of the handler we'll jump to
* *
* handler has to save remaining registers to the exception * handler has to save remaining registers to the exception
* ksp frame *before* tainting them! * ksp frame *before* tainting them!
@ -178,6 +177,7 @@
/* r1 is KSP, r30 is __pa(KSP) */ ;\ /* r1 is KSP, r30 is __pa(KSP) */ ;\
tophys (r30,r1) ;\ tophys (r30,r1) ;\
l.sw PT_GPR12(r30),r12 ;\ l.sw PT_GPR12(r30),r12 ;\
/* r4 use for tmp before EA */ ;\
l.mfspr r12,r0,SPR_EPCR_BASE ;\ l.mfspr r12,r0,SPR_EPCR_BASE ;\
l.sw PT_PC(r30),r12 ;\ l.sw PT_PC(r30),r12 ;\
l.mfspr r12,r0,SPR_ESR_BASE ;\ l.mfspr r12,r0,SPR_ESR_BASE ;\
@ -197,7 +197,10 @@
/* r12 == 1 if we come from syscall */ ;\ /* r12 == 1 if we come from syscall */ ;\
CLEAR_GPR(r12) ;\ CLEAR_GPR(r12) ;\
/* ----- turn on MMU ----- */ ;\ /* ----- turn on MMU ----- */ ;\
l.ori r30,r0,(EXCEPTION_SR) ;\ /* Carry DSX into exception SR */ ;\
l.mfspr r30,r0,SPR_SR ;\
l.andi r30,r30,SPR_SR_DSX ;\
l.ori r30,r30,(EXCEPTION_SR) ;\
l.mtspr r0,r30,SPR_ESR_BASE ;\ l.mtspr r0,r30,SPR_ESR_BASE ;\
/* r30: EA address of handler */ ;\ /* r30: EA address of handler */ ;\
LOAD_SYMBOL_2_GPR(r30,handler) ;\ LOAD_SYMBOL_2_GPR(r30,handler) ;\

View File

@ -358,7 +358,7 @@ static inline int in_delay_slot(struct pt_regs *regs)
return 0; return 0;
} }
#else #else
return regs->sr & SPR_SR_DSX; return mfspr(SPR_SR) & SPR_SR_DSX;
#endif #endif
} }

View File

@ -20,7 +20,6 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *x,
{ {
volatile unsigned int *a; volatile unsigned int *a;
mb();
a = __ldcw_align(x); a = __ldcw_align(x);
while (__ldcw(a) == 0) while (__ldcw(a) == 0)
while (*a == 0) while (*a == 0)
@ -30,16 +29,15 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *x,
local_irq_disable(); local_irq_disable();
} else } else
cpu_relax(); cpu_relax();
mb();
} }
static inline void arch_spin_unlock(arch_spinlock_t *x) static inline void arch_spin_unlock(arch_spinlock_t *x)
{ {
volatile unsigned int *a; volatile unsigned int *a;
mb();
a = __ldcw_align(x); a = __ldcw_align(x);
*a = 1;
mb(); mb();
*a = 1;
} }
static inline int arch_spin_trylock(arch_spinlock_t *x) static inline int arch_spin_trylock(arch_spinlock_t *x)
@ -47,10 +45,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *x)
volatile unsigned int *a; volatile unsigned int *a;
int ret; int ret;
mb();
a = __ldcw_align(x); a = __ldcw_align(x);
ret = __ldcw(a) != 0; ret = __ldcw(a) != 0;
mb();
return ret; return ret;
} }

View File

@ -629,12 +629,12 @@ cas_action:
stw %r1, 4(%sr2,%r20) stw %r1, 4(%sr2,%r20)
#endif #endif
/* The load and store could fail */ /* The load and store could fail */
1: ldw,ma 0(%r26), %r28 1: ldw 0(%r26), %r28
sub,<> %r28, %r25, %r0 sub,<> %r28, %r25, %r0
2: stw,ma %r24, 0(%r26) 2: stw %r24, 0(%r26)
/* Free lock */ /* Free lock */
sync sync
stw,ma %r20, 0(%sr2,%r20) stw %r20, 0(%sr2,%r20)
#if ENABLE_LWS_DEBUG #if ENABLE_LWS_DEBUG
/* Clear thread register indicator */ /* Clear thread register indicator */
stw %r0, 4(%sr2,%r20) stw %r0, 4(%sr2,%r20)
@ -798,30 +798,30 @@ cas2_action:
ldo 1(%r0),%r28 ldo 1(%r0),%r28
/* 8bit CAS */ /* 8bit CAS */
13: ldb,ma 0(%r26), %r29 13: ldb 0(%r26), %r29
sub,= %r29, %r25, %r0 sub,= %r29, %r25, %r0
b,n cas2_end b,n cas2_end
14: stb,ma %r24, 0(%r26) 14: stb %r24, 0(%r26)
b cas2_end b cas2_end
copy %r0, %r28 copy %r0, %r28
nop nop
nop nop
/* 16bit CAS */ /* 16bit CAS */
15: ldh,ma 0(%r26), %r29 15: ldh 0(%r26), %r29
sub,= %r29, %r25, %r0 sub,= %r29, %r25, %r0
b,n cas2_end b,n cas2_end
16: sth,ma %r24, 0(%r26) 16: sth %r24, 0(%r26)
b cas2_end b cas2_end
copy %r0, %r28 copy %r0, %r28
nop nop
nop nop
/* 32bit CAS */ /* 32bit CAS */
17: ldw,ma 0(%r26), %r29 17: ldw 0(%r26), %r29
sub,= %r29, %r25, %r0 sub,= %r29, %r25, %r0
b,n cas2_end b,n cas2_end
18: stw,ma %r24, 0(%r26) 18: stw %r24, 0(%r26)
b cas2_end b cas2_end
copy %r0, %r28 copy %r0, %r28
nop nop
@ -829,10 +829,10 @@ cas2_action:
/* 64bit CAS */ /* 64bit CAS */
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
19: ldd,ma 0(%r26), %r29 19: ldd 0(%r26), %r29
sub,*= %r29, %r25, %r0 sub,*= %r29, %r25, %r0
b,n cas2_end b,n cas2_end
20: std,ma %r24, 0(%r26) 20: std %r24, 0(%r26)
copy %r0, %r28 copy %r0, %r28
#else #else
/* Compare first word */ /* Compare first word */
@ -851,7 +851,7 @@ cas2_action:
cas2_end: cas2_end:
/* Free lock */ /* Free lock */
sync sync
stw,ma %r20, 0(%sr2,%r20) stw %r20, 0(%sr2,%r20)
/* Enable interrupts */ /* Enable interrupts */
ssm PSW_SM_I, %r0 ssm PSW_SM_I, %r0
/* Return to userspace, set no error */ /* Return to userspace, set no error */

View File

@ -195,9 +195,6 @@ struct fadump_crash_info_header {
struct cpumask online_mask; struct cpumask online_mask;
}; };
/* Crash memory ranges */
#define INIT_CRASHMEM_RANGES (INIT_MEMBLOCK_REGIONS + 2)
struct fad_crash_memory_ranges { struct fad_crash_memory_ranges {
unsigned long long base; unsigned long long base;
unsigned long long size; unsigned long long size;

View File

@ -223,10 +223,17 @@ do { \
} \ } \
} while (0) } while (0)
/*
* This is a type: either unsigned long, if the argument fits into
* that type, or otherwise unsigned long long.
*/
#define __long_type(x) \
__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
#define __get_user_nocheck(x, ptr, size) \ #define __get_user_nocheck(x, ptr, size) \
({ \ ({ \
long __gu_err; \ long __gu_err; \
unsigned long __gu_val; \ __long_type(*(ptr)) __gu_val; \
const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
__chk_user_ptr(ptr); \ __chk_user_ptr(ptr); \
if (!is_kernel_addr((unsigned long)__gu_addr)) \ if (!is_kernel_addr((unsigned long)__gu_addr)) \
@ -239,7 +246,7 @@ do { \
#define __get_user_check(x, ptr, size) \ #define __get_user_check(x, ptr, size) \
({ \ ({ \
long __gu_err = -EFAULT; \ long __gu_err = -EFAULT; \
unsigned long __gu_val = 0; \ __long_type(*(ptr)) __gu_val = 0; \
const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
might_fault(); \ might_fault(); \
if (access_ok(VERIFY_READ, __gu_addr, (size))) \ if (access_ok(VERIFY_READ, __gu_addr, (size))) \
@ -251,7 +258,7 @@ do { \
#define __get_user_nosleep(x, ptr, size) \ #define __get_user_nosleep(x, ptr, size) \
({ \ ({ \
long __gu_err; \ long __gu_err; \
unsigned long __gu_val; \ __long_type(*(ptr)) __gu_val; \
const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
__chk_user_ptr(ptr); \ __chk_user_ptr(ptr); \
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \

View File

@ -1452,6 +1452,8 @@ TRAMP_REAL_BEGIN(stf_barrier_fallback)
TRAMP_REAL_BEGIN(rfi_flush_fallback) TRAMP_REAL_BEGIN(rfi_flush_fallback)
SET_SCRATCH0(r13); SET_SCRATCH0(r13);
GET_PACA(r13); GET_PACA(r13);
std r1,PACA_EXRFI+EX_R12(r13)
ld r1,PACAKSAVE(r13)
std r9,PACA_EXRFI+EX_R9(r13) std r9,PACA_EXRFI+EX_R9(r13)
std r10,PACA_EXRFI+EX_R10(r13) std r10,PACA_EXRFI+EX_R10(r13)
std r11,PACA_EXRFI+EX_R11(r13) std r11,PACA_EXRFI+EX_R11(r13)
@ -1486,12 +1488,15 @@ TRAMP_REAL_BEGIN(rfi_flush_fallback)
ld r9,PACA_EXRFI+EX_R9(r13) ld r9,PACA_EXRFI+EX_R9(r13)
ld r10,PACA_EXRFI+EX_R10(r13) ld r10,PACA_EXRFI+EX_R10(r13)
ld r11,PACA_EXRFI+EX_R11(r13) ld r11,PACA_EXRFI+EX_R11(r13)
ld r1,PACA_EXRFI+EX_R12(r13)
GET_SCRATCH0(r13); GET_SCRATCH0(r13);
rfid rfid
TRAMP_REAL_BEGIN(hrfi_flush_fallback) TRAMP_REAL_BEGIN(hrfi_flush_fallback)
SET_SCRATCH0(r13); SET_SCRATCH0(r13);
GET_PACA(r13); GET_PACA(r13);
std r1,PACA_EXRFI+EX_R12(r13)
ld r1,PACAKSAVE(r13)
std r9,PACA_EXRFI+EX_R9(r13) std r9,PACA_EXRFI+EX_R9(r13)
std r10,PACA_EXRFI+EX_R10(r13) std r10,PACA_EXRFI+EX_R10(r13)
std r11,PACA_EXRFI+EX_R11(r13) std r11,PACA_EXRFI+EX_R11(r13)
@ -1526,6 +1531,7 @@ TRAMP_REAL_BEGIN(hrfi_flush_fallback)
ld r9,PACA_EXRFI+EX_R9(r13) ld r9,PACA_EXRFI+EX_R9(r13)
ld r10,PACA_EXRFI+EX_R10(r13) ld r10,PACA_EXRFI+EX_R10(r13)
ld r11,PACA_EXRFI+EX_R11(r13) ld r11,PACA_EXRFI+EX_R11(r13)
ld r1,PACA_EXRFI+EX_R12(r13)
GET_SCRATCH0(r13); GET_SCRATCH0(r13);
hrfid hrfid

View File

@ -47,8 +47,10 @@ static struct fadump_mem_struct fdm;
static const struct fadump_mem_struct *fdm_active; static const struct fadump_mem_struct *fdm_active;
static DEFINE_MUTEX(fadump_mutex); static DEFINE_MUTEX(fadump_mutex);
struct fad_crash_memory_ranges crash_memory_ranges[INIT_CRASHMEM_RANGES]; struct fad_crash_memory_ranges *crash_memory_ranges;
int crash_memory_ranges_size;
int crash_mem_ranges; int crash_mem_ranges;
int max_crash_mem_ranges;
/* Scan the Firmware Assisted dump configuration details. */ /* Scan the Firmware Assisted dump configuration details. */
int __init early_init_dt_scan_fw_dump(unsigned long node, int __init early_init_dt_scan_fw_dump(unsigned long node,
@ -843,38 +845,88 @@ static int __init process_fadump(const struct fadump_mem_struct *fdm_active)
return 0; return 0;
} }
static inline void fadump_add_crash_memory(unsigned long long base, static void free_crash_memory_ranges(void)
unsigned long long end) {
kfree(crash_memory_ranges);
crash_memory_ranges = NULL;
crash_memory_ranges_size = 0;
max_crash_mem_ranges = 0;
}
/*
* Allocate or reallocate crash memory ranges array in incremental units
* of PAGE_SIZE.
*/
static int allocate_crash_memory_ranges(void)
{
struct fad_crash_memory_ranges *new_array;
u64 new_size;
new_size = crash_memory_ranges_size + PAGE_SIZE;
pr_debug("Allocating %llu bytes of memory for crash memory ranges\n",
new_size);
new_array = krealloc(crash_memory_ranges, new_size, GFP_KERNEL);
if (new_array == NULL) {
pr_err("Insufficient memory for setting up crash memory ranges\n");
free_crash_memory_ranges();
return -ENOMEM;
}
crash_memory_ranges = new_array;
crash_memory_ranges_size = new_size;
max_crash_mem_ranges = (new_size /
sizeof(struct fad_crash_memory_ranges));
return 0;
}
static inline int fadump_add_crash_memory(unsigned long long base,
unsigned long long end)
{ {
if (base == end) if (base == end)
return; return 0;
if (crash_mem_ranges == max_crash_mem_ranges) {
int ret;
ret = allocate_crash_memory_ranges();
if (ret)
return ret;
}
pr_debug("crash_memory_range[%d] [%#016llx-%#016llx], %#llx bytes\n", pr_debug("crash_memory_range[%d] [%#016llx-%#016llx], %#llx bytes\n",
crash_mem_ranges, base, end - 1, (end - base)); crash_mem_ranges, base, end - 1, (end - base));
crash_memory_ranges[crash_mem_ranges].base = base; crash_memory_ranges[crash_mem_ranges].base = base;
crash_memory_ranges[crash_mem_ranges].size = end - base; crash_memory_ranges[crash_mem_ranges].size = end - base;
crash_mem_ranges++; crash_mem_ranges++;
return 0;
} }
static void fadump_exclude_reserved_area(unsigned long long start, static int fadump_exclude_reserved_area(unsigned long long start,
unsigned long long end) unsigned long long end)
{ {
unsigned long long ra_start, ra_end; unsigned long long ra_start, ra_end;
int ret = 0;
ra_start = fw_dump.reserve_dump_area_start; ra_start = fw_dump.reserve_dump_area_start;
ra_end = ra_start + fw_dump.reserve_dump_area_size; ra_end = ra_start + fw_dump.reserve_dump_area_size;
if ((ra_start < end) && (ra_end > start)) { if ((ra_start < end) && (ra_end > start)) {
if ((start < ra_start) && (end > ra_end)) { if ((start < ra_start) && (end > ra_end)) {
fadump_add_crash_memory(start, ra_start); ret = fadump_add_crash_memory(start, ra_start);
fadump_add_crash_memory(ra_end, end); if (ret)
return ret;
ret = fadump_add_crash_memory(ra_end, end);
} else if (start < ra_start) { } else if (start < ra_start) {
fadump_add_crash_memory(start, ra_start); ret = fadump_add_crash_memory(start, ra_start);
} else if (ra_end < end) { } else if (ra_end < end) {
fadump_add_crash_memory(ra_end, end); ret = fadump_add_crash_memory(ra_end, end);
} }
} else } else
fadump_add_crash_memory(start, end); ret = fadump_add_crash_memory(start, end);
return ret;
} }
static int fadump_init_elfcore_header(char *bufp) static int fadump_init_elfcore_header(char *bufp)
@ -914,10 +966,11 @@ static int fadump_init_elfcore_header(char *bufp)
* Traverse through memblock structure and setup crash memory ranges. These * Traverse through memblock structure and setup crash memory ranges. These
* ranges will be used create PT_LOAD program headers in elfcore header. * ranges will be used create PT_LOAD program headers in elfcore header.
*/ */
static void fadump_setup_crash_memory_ranges(void) static int fadump_setup_crash_memory_ranges(void)
{ {
struct memblock_region *reg; struct memblock_region *reg;
unsigned long long start, end; unsigned long long start, end;
int ret;
pr_debug("Setup crash memory ranges.\n"); pr_debug("Setup crash memory ranges.\n");
crash_mem_ranges = 0; crash_mem_ranges = 0;
@ -928,7 +981,9 @@ static void fadump_setup_crash_memory_ranges(void)
* specified during fadump registration. We need to create a separate * specified during fadump registration. We need to create a separate
* program header for this chunk with the correct offset. * program header for this chunk with the correct offset.
*/ */
fadump_add_crash_memory(RMA_START, fw_dump.boot_memory_size); ret = fadump_add_crash_memory(RMA_START, fw_dump.boot_memory_size);
if (ret)
return ret;
for_each_memblock(memory, reg) { for_each_memblock(memory, reg) {
start = (unsigned long long)reg->base; start = (unsigned long long)reg->base;
@ -948,8 +1003,12 @@ static void fadump_setup_crash_memory_ranges(void)
} }
/* add this range excluding the reserved dump area. */ /* add this range excluding the reserved dump area. */
fadump_exclude_reserved_area(start, end); ret = fadump_exclude_reserved_area(start, end);
if (ret)
return ret;
} }
return 0;
} }
/* /*
@ -1072,6 +1131,7 @@ static int register_fadump(void)
{ {
unsigned long addr; unsigned long addr;
void *vaddr; void *vaddr;
int ret;
/* /*
* If no memory is reserved then we can not register for firmware- * If no memory is reserved then we can not register for firmware-
@ -1080,7 +1140,9 @@ static int register_fadump(void)
if (!fw_dump.reserve_dump_area_size) if (!fw_dump.reserve_dump_area_size)
return -ENODEV; return -ENODEV;
fadump_setup_crash_memory_ranges(); ret = fadump_setup_crash_memory_ranges();
if (ret)
return ret;
addr = be64_to_cpu(fdm.rmr_region.destination_address) + be64_to_cpu(fdm.rmr_region.source_len); addr = be64_to_cpu(fdm.rmr_region.destination_address) + be64_to_cpu(fdm.rmr_region.source_len);
/* Initialize fadump crash info header. */ /* Initialize fadump crash info header. */
@ -1158,6 +1220,7 @@ void fadump_cleanup(void)
} else if (fw_dump.dump_registered) { } else if (fw_dump.dump_registered) {
/* Un-register Firmware-assisted dump if it was registered. */ /* Un-register Firmware-assisted dump if it was registered. */
fadump_unregister_dump(&fdm); fadump_unregister_dump(&fdm);
free_crash_memory_ranges();
} }
} }

View File

@ -130,6 +130,7 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
long i, j, ret = 0, locked_entries = 0; long i, j, ret = 0, locked_entries = 0;
unsigned int pageshift; unsigned int pageshift;
unsigned long flags; unsigned long flags;
unsigned long cur_ua;
struct page *page = NULL; struct page *page = NULL;
mutex_lock(&mem_list_mutex); mutex_lock(&mem_list_mutex);
@ -178,7 +179,8 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
} }
for (i = 0; i < entries; ++i) { for (i = 0; i < entries; ++i) {
if (1 != get_user_pages_fast(ua + (i << PAGE_SHIFT), cur_ua = ua + (i << PAGE_SHIFT);
if (1 != get_user_pages_fast(cur_ua,
1/* pages */, 1/* iswrite */, &page)) { 1/* pages */, 1/* iswrite */, &page)) {
ret = -EFAULT; ret = -EFAULT;
for (j = 0; j < i; ++j) for (j = 0; j < i; ++j)
@ -197,7 +199,7 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
if (is_migrate_cma_page(page)) { if (is_migrate_cma_page(page)) {
if (mm_iommu_move_page_from_cma(page)) if (mm_iommu_move_page_from_cma(page))
goto populate; goto populate;
if (1 != get_user_pages_fast(ua + (i << PAGE_SHIFT), if (1 != get_user_pages_fast(cur_ua,
1/* pages */, 1/* iswrite */, 1/* pages */, 1/* iswrite */,
&page)) { &page)) {
ret = -EFAULT; ret = -EFAULT;
@ -211,20 +213,21 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
} }
populate: populate:
pageshift = PAGE_SHIFT; pageshift = PAGE_SHIFT;
if (PageCompound(page)) { if (mem->pageshift > PAGE_SHIFT && PageCompound(page)) {
pte_t *pte; pte_t *pte;
struct page *head = compound_head(page); struct page *head = compound_head(page);
unsigned int compshift = compound_order(head); unsigned int compshift = compound_order(head);
unsigned int pteshift;
local_irq_save(flags); /* disables as well */ local_irq_save(flags); /* disables as well */
pte = find_linux_pte(mm->pgd, ua, NULL, &pageshift); pte = find_linux_pte(mm->pgd, cur_ua, NULL, &pteshift);
local_irq_restore(flags);
/* Double check it is still the same pinned page */ /* Double check it is still the same pinned page */
if (pte && pte_page(*pte) == head && if (pte && pte_page(*pte) == head &&
pageshift == compshift) pteshift == compshift + PAGE_SHIFT)
pageshift = max_t(unsigned int, pageshift, pageshift = max_t(unsigned int, pteshift,
PAGE_SHIFT); PAGE_SHIFT);
local_irq_restore(flags);
} }
mem->pageshift = min(mem->pageshift, pageshift); mem->pageshift = min(mem->pageshift, pageshift);
mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT; mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT;

View File

@ -322,6 +322,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
u64 imm64; u64 imm64;
u8 *func; u8 *func;
u32 true_cond; u32 true_cond;
u32 tmp_idx;
/* /*
* addrs[] maps a BPF bytecode address into a real offset from * addrs[] maps a BPF bytecode address into a real offset from
@ -681,11 +682,7 @@ emit_clear:
case BPF_STX | BPF_XADD | BPF_W: case BPF_STX | BPF_XADD | BPF_W:
/* Get EA into TMP_REG_1 */ /* Get EA into TMP_REG_1 */
PPC_ADDI(b2p[TMP_REG_1], dst_reg, off); PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
/* error if EA is not word-aligned */ tmp_idx = ctx->idx * 4;
PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x03);
PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + 12);
PPC_LI(b2p[BPF_REG_0], 0);
PPC_JMP(exit_addr);
/* load value from memory into TMP_REG_2 */ /* load value from memory into TMP_REG_2 */
PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0); PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
/* add value from src_reg into this */ /* add value from src_reg into this */
@ -693,32 +690,16 @@ emit_clear:
/* store result back */ /* store result back */
PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]); PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
/* we're done if this succeeded */ /* we're done if this succeeded */
PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4)); PPC_BCC_SHORT(COND_NE, tmp_idx);
/* otherwise, let's try once more */
PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
/* exit if the store was not successful */
PPC_LI(b2p[BPF_REG_0], 0);
PPC_BCC(COND_NE, exit_addr);
break; break;
/* *(u64 *)(dst + off) += src */ /* *(u64 *)(dst + off) += src */
case BPF_STX | BPF_XADD | BPF_DW: case BPF_STX | BPF_XADD | BPF_DW:
PPC_ADDI(b2p[TMP_REG_1], dst_reg, off); PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
/* error if EA is not doubleword-aligned */ tmp_idx = ctx->idx * 4;
PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x07);
PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (3*4));
PPC_LI(b2p[BPF_REG_0], 0);
PPC_JMP(exit_addr);
PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0); PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg); PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]); PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4)); PPC_BCC_SHORT(COND_NE, tmp_idx);
PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
PPC_LI(b2p[BPF_REG_0], 0);
PPC_BCC(COND_NE, exit_addr);
break; break;
/* /*

View File

@ -9,8 +9,10 @@
* option) any later version. * option) any later version.
*/ */
#include <linux/init.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/of_address.h> #include <linux/of_address.h>
@ -150,3 +152,5 @@ static int __init t1042rdb_diu_init(void)
} }
early_initcall(t1042rdb_diu_init); early_initcall(t1042rdb_diu_init);
MODULE_LICENSE("GPL");

View File

@ -3286,12 +3286,49 @@ static void pnv_pci_ioda_create_dbgfs(void)
#endif /* CONFIG_DEBUG_FS */ #endif /* CONFIG_DEBUG_FS */
} }
static void pnv_pci_enable_bridge(struct pci_bus *bus)
{
struct pci_dev *dev = bus->self;
struct pci_bus *child;
/* Empty bus ? bail */
if (list_empty(&bus->devices))
return;
/*
* If there's a bridge associated with that bus enable it. This works
* around races in the generic code if the enabling is done during
* parallel probing. This can be removed once those races have been
* fixed.
*/
if (dev) {
int rc = pci_enable_device(dev);
if (rc)
pci_err(dev, "Error enabling bridge (%d)\n", rc);
pci_set_master(dev);
}
/* Perform the same to child busses */
list_for_each_entry(child, &bus->children, node)
pnv_pci_enable_bridge(child);
}
static void pnv_pci_enable_bridges(void)
{
struct pci_controller *hose;
list_for_each_entry(hose, &hose_list, list_node)
pnv_pci_enable_bridge(hose->bus);
}
static void pnv_pci_ioda_fixup(void) static void pnv_pci_ioda_fixup(void)
{ {
pnv_pci_ioda_setup_PEs(); pnv_pci_ioda_setup_PEs();
pnv_pci_ioda_setup_iommu_api(); pnv_pci_ioda_setup_iommu_api();
pnv_pci_ioda_create_dbgfs(); pnv_pci_ioda_create_dbgfs();
pnv_pci_enable_bridges();
#ifdef CONFIG_EEH #ifdef CONFIG_EEH
eeh_init(); eeh_init();
eeh_addr_cache_build(); eeh_addr_cache_build();

View File

@ -360,7 +360,7 @@ static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs)
} }
savep = __va(regs->gpr[3]); savep = __va(regs->gpr[3]);
regs->gpr[3] = savep[0]; /* restore original r3 */ regs->gpr[3] = be64_to_cpu(savep[0]); /* restore original r3 */
/* If it isn't an extended log we can use the per cpu 64bit buffer */ /* If it isn't an extended log we can use the per cpu 64bit buffer */
h = (struct rtas_error_log *)&savep[1]; h = (struct rtas_error_log *)&savep[1];
@ -371,7 +371,7 @@ static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs)
int len, error_log_length; int len, error_log_length;
error_log_length = 8 + rtas_error_extended_log_length(h); error_log_length = 8 + rtas_error_extended_log_length(h);
len = max_t(int, error_log_length, RTAS_ERROR_LOG_MAX); len = min_t(int, error_log_length, RTAS_ERROR_LOG_MAX);
memset(global_mce_data_buf, 0, RTAS_ERROR_LOG_MAX); memset(global_mce_data_buf, 0, RTAS_ERROR_LOG_MAX);
memcpy(global_mce_data_buf, h, len); memcpy(global_mce_data_buf, h, len);
errhdr = (struct rtas_error_log *)global_mce_data_buf; errhdr = (struct rtas_error_log *)global_mce_data_buf;

View File

@ -196,7 +196,7 @@ static int mpic_msgr_probe(struct platform_device *dev)
/* IO map the message register block. */ /* IO map the message register block. */
of_address_to_resource(np, 0, &rsrc); of_address_to_resource(np, 0, &rsrc);
msgr_block_addr = ioremap(rsrc.start, rsrc.end - rsrc.start); msgr_block_addr = ioremap(rsrc.start, resource_size(&rsrc));
if (!msgr_block_addr) { if (!msgr_block_addr) {
dev_err(&dev->dev, "Failed to iomap MPIC message registers"); dev_err(&dev->dev, "Failed to iomap MPIC message registers");
return -EFAULT; return -EFAULT;

View File

@ -262,7 +262,6 @@ struct qdio_outbuf_state {
void *user; void *user;
}; };
#define QDIO_OUTBUF_STATE_FLAG_NONE 0x00
#define QDIO_OUTBUF_STATE_FLAG_PENDING 0x01 #define QDIO_OUTBUF_STATE_FLAG_PENDING 0x01
#define CHSC_AC1_INITIATE_INPUTQ 0x80 #define CHSC_AC1_INITIATE_INPUTQ 0x80

View File

@ -404,11 +404,13 @@ static void *get_vmcoreinfo_old(unsigned long *size)
if (copy_oldmem_kernel(nt_name, addr + sizeof(note), if (copy_oldmem_kernel(nt_name, addr + sizeof(note),
sizeof(nt_name) - 1)) sizeof(nt_name) - 1))
return NULL; return NULL;
if (strcmp(nt_name, "VMCOREINFO") != 0) if (strcmp(nt_name, VMCOREINFO_NOTE_NAME) != 0)
return NULL; return NULL;
vmcoreinfo = kzalloc_panic(note.n_descsz); vmcoreinfo = kzalloc_panic(note.n_descsz);
if (copy_oldmem_kernel(vmcoreinfo, addr + 24, note.n_descsz)) if (copy_oldmem_kernel(vmcoreinfo, addr + 24, note.n_descsz)) {
kfree(vmcoreinfo);
return NULL; return NULL;
}
*size = note.n_descsz; *size = note.n_descsz;
return vmcoreinfo; return vmcoreinfo;
} }
@ -418,15 +420,20 @@ static void *get_vmcoreinfo_old(unsigned long *size)
*/ */
static void *nt_vmcoreinfo(void *ptr) static void *nt_vmcoreinfo(void *ptr)
{ {
const char *name = VMCOREINFO_NOTE_NAME;
unsigned long size; unsigned long size;
void *vmcoreinfo; void *vmcoreinfo;
vmcoreinfo = os_info_old_entry(OS_INFO_VMCOREINFO, &size); vmcoreinfo = os_info_old_entry(OS_INFO_VMCOREINFO, &size);
if (!vmcoreinfo) if (vmcoreinfo)
vmcoreinfo = get_vmcoreinfo_old(&size); return nt_init_name(ptr, 0, vmcoreinfo, size, name);
vmcoreinfo = get_vmcoreinfo_old(&size);
if (!vmcoreinfo) if (!vmcoreinfo)
return ptr; return ptr;
return nt_init_name(ptr, 0, vmcoreinfo, size, "VMCOREINFO"); ptr = nt_init_name(ptr, 0, vmcoreinfo, size, name);
kfree(vmcoreinfo);
return ptr;
} }
/* /*

View File

@ -17,7 +17,7 @@
ENTRY(memmove) ENTRY(memmove)
ltgr %r4,%r4 ltgr %r4,%r4
lgr %r1,%r2 lgr %r1,%r2
bzr %r14 jz .Lmemmove_exit
aghi %r4,-1 aghi %r4,-1
clgr %r2,%r3 clgr %r2,%r3
jnh .Lmemmove_forward jnh .Lmemmove_forward
@ -36,6 +36,7 @@ ENTRY(memmove)
.Lmemmove_forward_remainder: .Lmemmove_forward_remainder:
larl %r5,.Lmemmove_mvc larl %r5,.Lmemmove_mvc
ex %r4,0(%r5) ex %r4,0(%r5)
.Lmemmove_exit:
BR_EX %r14 BR_EX %r14
.Lmemmove_reverse: .Lmemmove_reverse:
ic %r0,0(%r4,%r3) ic %r0,0(%r4,%r3)
@ -65,7 +66,7 @@ EXPORT_SYMBOL(memmove)
*/ */
ENTRY(memset) ENTRY(memset)
ltgr %r4,%r4 ltgr %r4,%r4
bzr %r14 jz .Lmemset_exit
ltgr %r3,%r3 ltgr %r3,%r3
jnz .Lmemset_fill jnz .Lmemset_fill
aghi %r4,-1 aghi %r4,-1
@ -80,12 +81,13 @@ ENTRY(memset)
.Lmemset_clear_remainder: .Lmemset_clear_remainder:
larl %r3,.Lmemset_xc larl %r3,.Lmemset_xc
ex %r4,0(%r3) ex %r4,0(%r3)
.Lmemset_exit:
BR_EX %r14 BR_EX %r14
.Lmemset_fill: .Lmemset_fill:
stc %r3,0(%r2) stc %r3,0(%r2)
cghi %r4,1 cghi %r4,1
lgr %r1,%r2 lgr %r1,%r2
ber %r14 je .Lmemset_fill_exit
aghi %r4,-2 aghi %r4,-2
srlg %r3,%r4,8 srlg %r3,%r4,8
ltgr %r3,%r3 ltgr %r3,%r3
@ -97,6 +99,7 @@ ENTRY(memset)
.Lmemset_fill_remainder: .Lmemset_fill_remainder:
larl %r3,.Lmemset_mvc larl %r3,.Lmemset_mvc
ex %r4,0(%r3) ex %r4,0(%r3)
.Lmemset_fill_exit:
BR_EX %r14 BR_EX %r14
.Lmemset_xc: .Lmemset_xc:
xc 0(1,%r1),0(%r1) xc 0(1,%r1),0(%r1)
@ -111,7 +114,7 @@ EXPORT_SYMBOL(memset)
*/ */
ENTRY(memcpy) ENTRY(memcpy)
ltgr %r4,%r4 ltgr %r4,%r4
bzr %r14 jz .Lmemcpy_exit
aghi %r4,-1 aghi %r4,-1
srlg %r5,%r4,8 srlg %r5,%r4,8
ltgr %r5,%r5 ltgr %r5,%r5
@ -120,6 +123,7 @@ ENTRY(memcpy)
.Lmemcpy_remainder: .Lmemcpy_remainder:
larl %r5,.Lmemcpy_mvc larl %r5,.Lmemcpy_mvc
ex %r4,0(%r5) ex %r4,0(%r5)
.Lmemcpy_exit:
BR_EX %r14 BR_EX %r14
.Lmemcpy_loop: .Lmemcpy_loop:
mvc 0(256,%r1),0(%r3) mvc 0(256,%r1),0(%r3)

View File

@ -486,6 +486,8 @@ retry:
/* No reason to continue if interrupted by SIGKILL. */ /* No reason to continue if interrupted by SIGKILL. */
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) { if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
fault = VM_FAULT_SIGNAL; fault = VM_FAULT_SIGNAL;
if (flags & FAULT_FLAG_RETRY_NOWAIT)
goto out_up;
goto out; goto out;
} }
if (unlikely(fault & VM_FAULT_ERROR)) if (unlikely(fault & VM_FAULT_ERROR))

View File

@ -271,7 +271,7 @@ void arch_set_page_states(int make_stable)
list_for_each(l, &zone->free_area[order].free_list[t]) { list_for_each(l, &zone->free_area[order].free_list[t]) {
page = list_entry(l, struct page, lru); page = list_entry(l, struct page, lru);
if (make_stable) if (make_stable)
set_page_stable_dat(page, 0); set_page_stable_dat(page, order);
else else
set_page_unused(page, order); set_page_unused(page, order);
} }

View File

@ -518,8 +518,6 @@ static void bpf_jit_epilogue(struct bpf_jit *jit)
/* br %r1 */ /* br %r1 */
_EMIT2(0x07f1); _EMIT2(0x07f1);
} else { } else {
/* larl %r1,.+14 */
EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14);
/* ex 0,S390_lowcore.br_r1_tampoline */ /* ex 0,S390_lowcore.br_r1_tampoline */
EMIT4_DISP(0x44000000, REG_0, REG_0, EMIT4_DISP(0x44000000, REG_0, REG_0,
offsetof(struct lowcore, br_r1_trampoline)); offsetof(struct lowcore, br_r1_trampoline));
@ -1403,6 +1401,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
goto free_addrs; goto free_addrs;
} }
if (bpf_jit_prog(&jit, fp)) { if (bpf_jit_prog(&jit, fp)) {
bpf_jit_binary_free(header);
fp = orig_fp; fp = orig_fp;
goto free_addrs; goto free_addrs;
} }

View File

@ -134,26 +134,14 @@ void __init numa_setup(void)
{ {
pr_info("NUMA mode: %s\n", mode->name); pr_info("NUMA mode: %s\n", mode->name);
nodes_clear(node_possible_map); nodes_clear(node_possible_map);
/* Initially attach all possible CPUs to node 0. */
cpumask_copy(&node_to_cpumask_map[0], cpu_possible_mask);
if (mode->setup) if (mode->setup)
mode->setup(); mode->setup();
numa_setup_memory(); numa_setup_memory();
memblock_dump_all(); memblock_dump_all();
} }
/*
* numa_init_early() - Initialization initcall
*
* This runs when only one CPU is online and before the first
* topology update is called for by the scheduler.
*/
static int __init numa_init_early(void)
{
/* Attach all possible CPUs to node 0 for now. */
cpumask_copy(&node_to_cpumask_map[0], cpu_possible_mask);
return 0;
}
early_initcall(numa_init_early);
/* /*
* numa_init_late() - Initialization initcall * numa_init_late() - Initialization initcall
* *

View File

@ -420,6 +420,8 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
hwirq = 0; hwirq = 0;
for_each_pci_msi_entry(msi, pdev) { for_each_pci_msi_entry(msi, pdev) {
rc = -EIO; rc = -EIO;
if (hwirq >= msi_vecs)
break;
irq = irq_alloc_desc(0); /* Alloc irq on node 0 */ irq = irq_alloc_desc(0); /* Alloc irq on node 0 */
if (irq < 0) if (irq < 0)
return -ENOMEM; return -ENOMEM;

View File

@ -14,6 +14,7 @@ generic-y += local64.h
generic-y += mcs_spinlock.h generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h generic-y += mm-arch-hooks.h
generic-y += module.h generic-y += module.h
generic-y += msi.h
generic-y += preempt.h generic-y += preempt.h
generic-y += rwsem.h generic-y += rwsem.h
generic-y += serial.h generic-y += serial.h

View File

@ -204,23 +204,27 @@ SYSCALL_DEFINE5(rt_sigaction, int, sig,
asmlinkage long sys_getdomainname(char __user *name, int len) asmlinkage long sys_getdomainname(char __user *name, int len)
{ {
int nlen, err; int nlen, err;
char tmp[__NEW_UTS_LEN + 1];
if (len < 0) if (len < 0)
return -EINVAL; return -EINVAL;
down_read(&uts_sem); down_read(&uts_sem);
nlen = strlen(utsname()->domainname) + 1; nlen = strlen(utsname()->domainname) + 1;
err = -EINVAL; err = -EINVAL;
if (nlen > len) if (nlen > len)
goto out; goto out_unlock;
memcpy(tmp, utsname()->domainname, nlen);
err = -EFAULT; up_read(&uts_sem);
if (!copy_to_user(name, utsname()->domainname, nlen))
err = 0;
out: if (copy_to_user(name, tmp, nlen))
return -EFAULT;
return 0;
out_unlock:
up_read(&uts_sem); up_read(&uts_sem);
return err; return err;
} }

View File

@ -527,23 +527,27 @@ extern void check_pending(int signum);
SYSCALL_DEFINE2(getdomainname, char __user *, name, int, len) SYSCALL_DEFINE2(getdomainname, char __user *, name, int, len)
{ {
int nlen, err; int nlen, err;
char tmp[__NEW_UTS_LEN + 1];
if (len < 0) if (len < 0)
return -EINVAL; return -EINVAL;
down_read(&uts_sem); down_read(&uts_sem);
nlen = strlen(utsname()->domainname) + 1; nlen = strlen(utsname()->domainname) + 1;
err = -EINVAL; err = -EINVAL;
if (nlen > len) if (nlen > len)
goto out; goto out_unlock;
memcpy(tmp, utsname()->domainname, nlen);
err = -EFAULT; up_read(&uts_sem);
if (!copy_to_user(name, utsname()->domainname, nlen))
err = 0;
out: if (copy_to_user(name, tmp, nlen))
return -EFAULT;
return 0;
out_unlock:
up_read(&uts_sem); up_read(&uts_sem);
return err; return err;
} }

View File

@ -813,7 +813,7 @@ static void __init get_tick_patch(void)
} }
} }
static void init_tick_ops(struct sparc64_tick_ops *ops) static void __init init_tick_ops(struct sparc64_tick_ops *ops)
{ {
unsigned long freq, quotient, tick; unsigned long freq, quotient, tick;

View File

@ -170,6 +170,7 @@ config X86
select HAVE_PERF_REGS select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP select HAVE_PERF_USER_STACK_DUMP
select HAVE_RCU_TABLE_FREE select HAVE_RCU_TABLE_FREE
select HAVE_RCU_TABLE_INVALIDATE if HAVE_RCU_TABLE_FREE
select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RELIABLE_STACKTRACE if X86_64 && UNWINDER_FRAME_POINTER && STACK_VALIDATION select HAVE_RELIABLE_STACKTRACE if X86_64 && UNWINDER_FRAME_POINTER && STACK_VALIDATION
select HAVE_STACK_VALIDATION if X86_64 select HAVE_STACK_VALIDATION if X86_64

View File

@ -104,9 +104,13 @@ define cmd_check_data_rel
done done
endef endef
# We need to run two commands under "if_changed", so merge them into a
# single invocation.
quiet_cmd_check-and-link-vmlinux = LD $@
cmd_check-and-link-vmlinux = $(cmd_check_data_rel); $(cmd_ld)
$(obj)/vmlinux: $(vmlinux-objs-y) FORCE $(obj)/vmlinux: $(vmlinux-objs-y) FORCE
$(call if_changed,check_data_rel) $(call if_changed,check-and-link-vmlinux)
$(call if_changed,ld)
OBJCOPYFLAGS_vmlinux.bin := -R .comment -S OBJCOPYFLAGS_vmlinux.bin := -R .comment -S
$(obj)/vmlinux.bin: vmlinux FORCE $(obj)/vmlinux.bin: vmlinux FORCE

View File

@ -74,9 +74,9 @@ $(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE
CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \ CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \
$(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \ $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \
-fno-omit-frame-pointer -foptimize-sibling-calls \ -fno-omit-frame-pointer -foptimize-sibling-calls \
-DDISABLE_BRANCH_PROFILING -DBUILD_VDSO -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO $(RETPOLINE_VDSO_CFLAGS)
$(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS)) $(CFL) $(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS)) $(CFL)
# #
# vDSO code runs in userspace and -pg doesn't help with profiling anyway. # vDSO code runs in userspace and -pg doesn't help with profiling anyway.
@ -147,11 +147,13 @@ KBUILD_CFLAGS_32 := $(filter-out -mcmodel=kernel,$(KBUILD_CFLAGS_32))
KBUILD_CFLAGS_32 := $(filter-out -fno-pic,$(KBUILD_CFLAGS_32)) KBUILD_CFLAGS_32 := $(filter-out -fno-pic,$(KBUILD_CFLAGS_32))
KBUILD_CFLAGS_32 := $(filter-out -mfentry,$(KBUILD_CFLAGS_32)) KBUILD_CFLAGS_32 := $(filter-out -mfentry,$(KBUILD_CFLAGS_32))
KBUILD_CFLAGS_32 := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS_32)) KBUILD_CFLAGS_32 := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS_32))
KBUILD_CFLAGS_32 := $(filter-out $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS_32))
KBUILD_CFLAGS_32 += -m32 -msoft-float -mregparm=0 -fpic KBUILD_CFLAGS_32 += -m32 -msoft-float -mregparm=0 -fpic
KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector) KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector)
KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls) KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls)
KBUILD_CFLAGS_32 += -fno-omit-frame-pointer KBUILD_CFLAGS_32 += -fno-omit-frame-pointer
KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING
KBUILD_CFLAGS_32 += $(RETPOLINE_VDSO_CFLAGS)
$(obj)/vdso32.so.dbg: KBUILD_CFLAGS = $(KBUILD_CFLAGS_32) $(obj)/vdso32.so.dbg: KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
$(obj)/vdso32.so.dbg: FORCE \ $(obj)/vdso32.so.dbg: FORCE \

View File

@ -579,7 +579,7 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
{ {
struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu); struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
struct perf_event *event = pcpu->event; struct perf_event *event = pcpu->event;
struct hw_perf_event *hwc = &event->hw; struct hw_perf_event *hwc;
struct perf_sample_data data; struct perf_sample_data data;
struct perf_raw_record raw; struct perf_raw_record raw;
struct pt_regs regs; struct pt_regs regs;
@ -602,6 +602,10 @@ fail:
return 0; return 0;
} }
if (WARN_ON_ONCE(!event))
goto fail;
hwc = &event->hw;
msr = hwc->config_base; msr = hwc->config_base;
buf = ibs_data.regs; buf = ibs_data.regs;
rdmsrl(msr, *buf); rdmsrl(msr, *buf);

View File

@ -2462,7 +2462,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
perf_callchain_store(entry, regs->ip); perf_callchain_store(entry, regs->ip);
if (!current->mm) if (!nmi_uaccess_okay())
return; return;
if (perf_callchain_user32(regs, entry)) if (perf_callchain_user32(regs, entry))

View File

@ -33,7 +33,8 @@ extern inline unsigned long native_save_fl(void)
return flags; return flags;
} }
static inline void native_restore_fl(unsigned long flags) extern inline void native_restore_fl(unsigned long flags);
extern inline void native_restore_fl(unsigned long flags)
{ {
asm volatile("push %0 ; popf" asm volatile("push %0 ; popf"
: /* no output */ : /* no output */

View File

@ -200,6 +200,7 @@ enum mce_notifier_prios {
MCE_PRIO_LOWEST = 0, MCE_PRIO_LOWEST = 0,
}; };
struct notifier_block;
extern void mce_register_decode_chain(struct notifier_block *nb); extern void mce_register_decode_chain(struct notifier_block *nb);
extern void mce_unregister_decode_chain(struct notifier_block *nb); extern void mce_unregister_decode_chain(struct notifier_block *nb);

View File

@ -2,6 +2,8 @@
#ifndef _ASM_X86_PGTABLE_3LEVEL_H #ifndef _ASM_X86_PGTABLE_3LEVEL_H
#define _ASM_X86_PGTABLE_3LEVEL_H #define _ASM_X86_PGTABLE_3LEVEL_H
#include <asm/atomic64_32.h>
/* /*
* Intel Physical Address Extension (PAE) Mode - three-level page * Intel Physical Address Extension (PAE) Mode - three-level page
* tables on PPro+ CPUs. * tables on PPro+ CPUs.
@ -147,10 +149,7 @@ static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
{ {
pte_t res; pte_t res;
/* xchg acts as a barrier before the setting of the high bits */ res.pte = (pteval_t)atomic64_xchg((atomic64_t *)ptep, 0);
res.pte_low = xchg(&ptep->pte_low, 0);
res.pte_high = ptep->pte_high;
ptep->pte_high = 0;
return res; return res;
} }

View File

@ -132,6 +132,8 @@ struct cpuinfo_x86 {
/* Index into per_cpu list: */ /* Index into per_cpu list: */
u16 cpu_index; u16 cpu_index;
u32 microcode; u32 microcode;
/* Address space bits used by the cache internally */
u8 x86_cache_bits;
} __randomize_layout; } __randomize_layout;
struct cpuid_regs { struct cpuid_regs {
@ -180,9 +182,9 @@ extern const struct seq_operations cpuinfo_op;
extern void cpu_detect(struct cpuinfo_x86 *c); extern void cpu_detect(struct cpuinfo_x86 *c);
static inline unsigned long l1tf_pfn_limit(void) static inline unsigned long long l1tf_pfn_limit(void)
{ {
return BIT(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT) - 1; return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT);
} }
extern void early_cpu_init(void); extern void early_cpu_init(void);

View File

@ -175,8 +175,16 @@ struct tlb_state {
* are on. This means that it may not match current->active_mm, * are on. This means that it may not match current->active_mm,
* which will contain the previous user mm when we're in lazy TLB * which will contain the previous user mm when we're in lazy TLB
* mode even if we've already switched back to swapper_pg_dir. * mode even if we've already switched back to swapper_pg_dir.
*
* During switch_mm_irqs_off(), loaded_mm will be set to
* LOADED_MM_SWITCHING during the brief interrupts-off window
* when CR3 and loaded_mm would otherwise be inconsistent. This
* is for nmi_uaccess_okay()'s benefit.
*/ */
struct mm_struct *loaded_mm; struct mm_struct *loaded_mm;
#define LOADED_MM_SWITCHING ((struct mm_struct *)1)
u16 loaded_mm_asid; u16 loaded_mm_asid;
u16 next_asid; u16 next_asid;
/* last user mm's ctx id */ /* last user mm's ctx id */
@ -246,6 +254,38 @@ struct tlb_state {
}; };
DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate); DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
/*
* Blindly accessing user memory from NMI context can be dangerous
* if we're in the middle of switching the current user task or
* switching the loaded mm. It can also be dangerous if we
* interrupted some kernel code that was temporarily using a
* different mm.
*/
static inline bool nmi_uaccess_okay(void)
{
struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
struct mm_struct *current_mm = current->mm;
VM_WARN_ON_ONCE(!loaded_mm);
/*
* The condition we want to check is
* current_mm->pgd == __va(read_cr3_pa()). This may be slow, though,
* if we're running in a VM with shadow paging, and nmi_uaccess_okay()
* is supposed to be reasonably fast.
*
* Instead, we check the almost equivalent but somewhat conservative
* condition below, and we rely on the fact that switch_mm_irqs_off()
* sets loaded_mm to LOADED_MM_SWITCHING before writing to CR3.
*/
if (loaded_mm != current_mm)
return false;
VM_WARN_ON_ONCE(current_mm->pgd != __va(read_cr3_pa()));
return true;
}
/* Initialize cr4 shadow for this CPU. */ /* Initialize cr4 shadow for this CPU. */
static inline void cr4_init_shadow(void) static inline void cr4_init_shadow(void)
{ {

View File

@ -93,7 +93,7 @@ static inline unsigned int __getcpu(void)
* *
* If RDPID is available, use it. * If RDPID is available, use it.
*/ */
alternative_io ("lsl %[p],%[seg]", alternative_io ("lsl %[seg],%[p]",
".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */ ".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */
X86_FEATURE_RDPID, X86_FEATURE_RDPID,
[p] "=a" (p), [seg] "r" (__PER_CPU_SEG)); [p] "=a" (p), [seg] "r" (__PER_CPU_SEG));

View File

@ -652,6 +652,45 @@ EXPORT_SYMBOL_GPL(l1tf_mitigation);
enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO; enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation); EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
/*
* These CPUs all support 44bits physical address space internally in the
* cache but CPUID can report a smaller number of physical address bits.
*
* The L1TF mitigation uses the top most address bit for the inversion of
* non present PTEs. When the installed memory reaches into the top most
* address bit due to memory holes, which has been observed on machines
* which report 36bits physical address bits and have 32G RAM installed,
* then the mitigation range check in l1tf_select_mitigation() triggers.
* This is a false positive because the mitigation is still possible due to
* the fact that the cache uses 44bit internally. Use the cache bits
* instead of the reported physical bits and adjust them on the affected
* machines to 44bit if the reported bits are less than 44.
*/
static void override_cache_bits(struct cpuinfo_x86 *c)
{
if (c->x86 != 6)
return;
switch (c->x86_model) {
case INTEL_FAM6_NEHALEM:
case INTEL_FAM6_WESTMERE:
case INTEL_FAM6_SANDYBRIDGE:
case INTEL_FAM6_IVYBRIDGE:
case INTEL_FAM6_HASWELL_CORE:
case INTEL_FAM6_HASWELL_ULT:
case INTEL_FAM6_HASWELL_GT3E:
case INTEL_FAM6_BROADWELL_CORE:
case INTEL_FAM6_BROADWELL_GT3E:
case INTEL_FAM6_SKYLAKE_MOBILE:
case INTEL_FAM6_SKYLAKE_DESKTOP:
case INTEL_FAM6_KABYLAKE_MOBILE:
case INTEL_FAM6_KABYLAKE_DESKTOP:
if (c->x86_cache_bits < 44)
c->x86_cache_bits = 44;
break;
}
}
static void __init l1tf_select_mitigation(void) static void __init l1tf_select_mitigation(void)
{ {
u64 half_pa; u64 half_pa;
@ -659,6 +698,8 @@ static void __init l1tf_select_mitigation(void)
if (!boot_cpu_has_bug(X86_BUG_L1TF)) if (!boot_cpu_has_bug(X86_BUG_L1TF))
return; return;
override_cache_bits(&boot_cpu_data);
switch (l1tf_mitigation) { switch (l1tf_mitigation) {
case L1TF_MITIGATION_OFF: case L1TF_MITIGATION_OFF:
case L1TF_MITIGATION_FLUSH_NOWARN: case L1TF_MITIGATION_FLUSH_NOWARN:
@ -678,14 +719,13 @@ static void __init l1tf_select_mitigation(void)
return; return;
#endif #endif
/*
* This is extremely unlikely to happen because almost all
* systems have far more MAX_PA/2 than RAM can be fit into
* DIMM slots.
*/
half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT; half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) { if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n"); pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
half_pa);
pr_info("However, doing so will make a part of your RAM unusable.\n");
pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html might help you decide.\n");
return; return;
} }

View File

@ -890,6 +890,7 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
} }
} }
#endif #endif
c->x86_cache_bits = c->x86_phys_bits;
} }
static const __initconst struct x86_cpu_id cpu_no_speculation[] = { static const __initconst struct x86_cpu_id cpu_no_speculation[] = {

View File

@ -150,6 +150,9 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
if (cpu_has(c, X86_FEATURE_HYPERVISOR)) if (cpu_has(c, X86_FEATURE_HYPERVISOR))
return false; return false;
if (c->x86 != 6)
return false;
for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) { for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
if (c->x86_model == spectre_bad_microcodes[i].model && if (c->x86_model == spectre_bad_microcodes[i].model &&
c->x86_stepping == spectre_bad_microcodes[i].stepping) c->x86_stepping == spectre_bad_microcodes[i].stepping)

View File

@ -190,8 +190,11 @@ static void save_microcode_patch(void *data, unsigned int size)
p = memdup_patch(data, size); p = memdup_patch(data, size);
if (!p) if (!p)
pr_err("Error allocating buffer %p\n", data); pr_err("Error allocating buffer %p\n", data);
else else {
list_replace(&iter->plist, &p->plist); list_replace(&iter->plist, &p->plist);
kfree(iter->data);
kfree(iter);
}
} }
} }

View File

@ -17,6 +17,7 @@
#include <linux/bug.h> #include <linux/bug.h>
#include <linux/nmi.h> #include <linux/nmi.h>
#include <linux/sysfs.h> #include <linux/sysfs.h>
#include <linux/kasan.h>
#include <asm/cpu_entry_area.h> #include <asm/cpu_entry_area.h>
#include <asm/stacktrace.h> #include <asm/stacktrace.h>
@ -298,7 +299,10 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
* We're not going to return, but we might be on an IST stack or * We're not going to return, but we might be on an IST stack or
* have very little stack space left. Rewind the stack and kill * have very little stack space left. Rewind the stack and kill
* the task. * the task.
* Before we rewind the stack, we have to tell KASAN that we're going to
* reuse the task stack and that existing poisons are invalid.
*/ */
kasan_unpoison_task_stack(current);
rewind_stack_do_exit(signr); rewind_stack_do_exit(signr);
} }
NOKPROBE_SYMBOL(oops_end); NOKPROBE_SYMBOL(oops_end);

View File

@ -532,7 +532,7 @@ static int bzImage64_cleanup(void *loader_data)
static int bzImage64_verify_sig(const char *kernel, unsigned long kernel_len) static int bzImage64_verify_sig(const char *kernel, unsigned long kernel_len)
{ {
return verify_pefile_signature(kernel, kernel_len, return verify_pefile_signature(kernel, kernel_len,
NULL, VERIFY_USE_SECONDARY_KEYRING,
VERIFYING_KEXEC_PE_SIGNATURE); VERIFYING_KEXEC_PE_SIGNATURE);
} }
#endif #endif

View File

@ -143,6 +143,7 @@ static unsigned long kvm_get_tsc_khz(void)
src = &hv_clock[cpu].pvti; src = &hv_clock[cpu].pvti;
tsc_khz = pvclock_tsc_khz(src); tsc_khz = pvclock_tsc_khz(src);
put_cpu(); put_cpu();
setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
return tsc_khz; return tsc_khz;
} }

Some files were not shown because too many files have changed in this diff Show More