This is the 4.14.69 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAluVXrMACgkQONu9yGCS
 aT5AyA//TMltTkpcS08FE8gUSgPT7ZjJdoeS0yUD7Sh8H0GV6MSn12QGgr8UWyUS
 t9a01U2erwz1Ua3I+CjBxLOBfw6c5on87C9nM4ruhV4zA/umFi+odBTnXPkc1fnM
 JtRgqlf5Gsy0Tp/CjcABot8P2RwyXOQBwGN544yU1GAq7WYbNg2o4YyuKd8W1epu
 lro06RCS4gsmguKaxYgFxwJFYvCbhGKlS3GDLz/v0XXMkaHIzbTdUduedA/7DiIc
 ZOAOaFXYLdbx2gTmodMuEWVOWUeReZnegAOQXswfjbqID0HxvNlYWCFrNAX9zDw0
 JLwX2hE4LC+ptBr8AwzBbz2yhm2smcSiGENohra9MyxyNrRrlVvFBqGIOHZGl5Dm
 +XPeVk1Prg/nubZ8K+1OMBMPGjrIfYESrl3JRSw3TOhF58jRSExoFSHQwKciAFbZ
 6huV41/eBRYBPXKiZPv01jluZc/u05n0gHZDz6tnJSBDKICUeoHoDq3oX78tjMvK
 3Qj+CZJS6DUczgz6azLx80Bnt/clEfCtQ/lC+h4SEdvnClPoOtUs7q4j74fxKG/8
 bkEVVOJMFtAXTpj7VcxHL1QMoYMMqNGmheWMhzHw3CsC2TTkuAdHfY5KF70ggjH7
 AN0a2BZCKj9KfsI5/IzytzfuzwMXfj6bZ9NWwKmLm37gzWLLxEg=
 =z4Ok
 -----END PGP SIGNATURE-----

Merge 4.14.69 into android-4.14-p

Changes in 4.14.69
	net: 6lowpan: fix reserved space for single frames
	net: mac802154: tx: expand tailroom if necessary
	9p/net: Fix zero-copy path in the 9p virtio transport
	spi: davinci: fix a NULL pointer dereference
	spi: pxa2xx: Add support for Intel Ice Lake
	spi: spi-fsl-dspi: Fix imprecise abort on VF500 during probe
	spi: cadence: Change usleep_range() to udelay(), for atomic context
	mmc: renesas_sdhi_internal_dmac: fix #define RST_RESERVED_BITS
	readahead: stricter check for bdi io_pages
	block: blk_init_allocated_queue() set q->fq as NULL in the fail case
	block: really disable runtime-pm for blk-mq
	drm/i915/userptr: reject zero user_size
	libertas: fix suspend and resume for SDIO connected cards
	media: Revert "[media] tvp5150: fix pad format frame height"
	mailbox: xgene-slimpro: Fix potential NULL pointer dereference
	Replace magic for trusting the secondary keyring with #define
	Fix kexec forbidding kernels signed with keys in the secondary keyring to boot
	powerpc/fadump: handle crash memory ranges array index overflow
	powerpc/pseries: Fix endianness while restoring of r3 in MCE handler.
	PCI: Add wrappers for dev_printk()
	powerpc/powernv/pci: Work around races in PCI bridge enabling
	cxl: Fix wrong comparison in cxl_adapter_context_get()
	ib_srpt: Fix a use-after-free in srpt_close_ch()
	RDMA/rxe: Set wqe->status correctly if an unexpected response is received
	9p: fix multiple NULL-pointer-dereferences
	fs/9p/xattr.c: catch the error of p9_client_clunk when setting xattr failed
	9p/virtio: fix off-by-one error in sg list bounds check
	net/9p/client.c: version pointer uninitialized
	net/9p/trans_fd.c: fix race-condition by flushing workqueue before the kfree()
	dm integrity: change 'suspending' variable from bool to int
	dm thin: stop no_space_timeout worker when switching to write-mode
	dm cache metadata: save in-core policy_hint_size to on-disk superblock
	dm cache metadata: set dirty on all cache blocks after a crash
	dm crypt: don't decrease device limits
	uart: fix race between uart_put_char() and uart_shutdown()
	Drivers: hv: vmbus: Reset the channel callback in vmbus_onoffer_rescind()
	iio: sca3000: Fix missing return in switch
	iio: ad9523: Fix displayed phase
	iio: ad9523: Fix return value for ad952x_store()
	extcon: Release locking when sending the notification of connector state
	vmw_balloon: fix inflation of 64-bit GFNs
	vmw_balloon: do not use 2MB without batching
	vmw_balloon: VMCI_DOORBELL_SET does not check status
	vmw_balloon: fix VMCI use when balloon built into kernel
	rtc: omap: fix potential crash on power off
	tracing: Do not call start/stop() functions when tracing_on does not change
	tracing/blktrace: Fix to allow setting same value
	printk/tracing: Do not trace printk_nmi_enter()
	livepatch: Validate module/old func name length
	uprobes: Use synchronize_rcu() not synchronize_sched()
	mfd: hi655x: Fix regmap area declared size for hi655x
	ovl: fix wrong use of impure dir cache in ovl_iterate()
	drivers/block/zram/zram_drv.c: fix bug storing backing_dev
	cpufreq: governor: Avoid accessing invalid governor_data
	PM / sleep: wakeup: Fix build error caused by missing SRCU support
	KVM: VMX: fixes for vmentry_l1d_flush module parameter
	KVM: PPC: Book3S: Fix guest DMA when guest partially backed by THP pages
	xtensa: limit offsets in __loop_cache_{all,page}
	xtensa: increase ranges in ___invalidate_{i,d}cache_all
	block, bfq: return nbytes and not zero from struct cftype .write() method
	pnfs/blocklayout: off by one in bl_map_stripe()
	NFSv4 client live hangs after live data migration recovery
	NFSv4: Fix locking in pnfs_generic_recover_commit_reqs
	NFSv4: Fix a sleep in atomic context in nfs4_callback_sequence()
	ARM: tegra: Fix Tegra30 Cardhu PCA954x reset
	mm/tlb: Remove tlb_remove_table() non-concurrent condition
	iommu/vt-d: Add definitions for PFSID
	iommu/vt-d: Fix dev iotlb pfsid use
	sys: don't hold uts_sem while accessing userspace memory
	userns: move user access out of the mutex
	ubifs: Fix memory leak in lprobs self-check
	Revert "UBIFS: Fix potential integer overflow in allocation"
	ubifs: Check data node size before truncate
	ubifs: xattr: Don't operate on deleted inodes
	ubifs: Fix synced_i_size calculation for xattr inodes
	pwm: tiehrpwm: Don't use emulation mode bits to control PWM output
	pwm: tiehrpwm: Fix disabling of output of PWMs
	fb: fix lost console when the user unplugs a USB adapter
	udlfb: set optimal write delay
	getxattr: use correct xattr length
	libnvdimm: fix ars_status output length calculation
	bcache: release dc->writeback_lock properly in bch_writeback_thread()
	cap_inode_getsecurity: use d_find_any_alias() instead of d_find_alias()
	perf auxtrace: Fix queue resize
	crypto: vmx - Fix sleep-in-atomic bugs
	crypto: caam - fix DMA mapping direction for RSA forms 2 & 3
	crypto: caam/jr - fix descriptor DMA unmapping
	crypto: caam/qi - fix error path in xts setkey
	fs/quota: Fix spectre gadget in do_quotactl
	arm64: mm: always enable CONFIG_HOLES_IN_ZONE
	Linux 4.14.69

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2018-09-10 09:24:54 +02:00
commit c535ee76cd
95 changed files with 846 additions and 424 deletions

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 14
SUBLEVEL = 68
SUBLEVEL = 69
EXTRAVERSION =
NAME = Petit Gorille

View File

@ -530,24 +530,19 @@ SYSCALL_DEFINE4(osf_mount, unsigned long, typenr, const char __user *, path,
SYSCALL_DEFINE1(osf_utsname, char __user *, name)
{
int error;
char tmp[5 * 32];
down_read(&uts_sem);
error = -EFAULT;
if (copy_to_user(name + 0, utsname()->sysname, 32))
goto out;
if (copy_to_user(name + 32, utsname()->nodename, 32))
goto out;
if (copy_to_user(name + 64, utsname()->release, 32))
goto out;
if (copy_to_user(name + 96, utsname()->version, 32))
goto out;
if (copy_to_user(name + 128, utsname()->machine, 32))
goto out;
memcpy(tmp + 0 * 32, utsname()->sysname, 32);
memcpy(tmp + 1 * 32, utsname()->nodename, 32);
memcpy(tmp + 2 * 32, utsname()->release, 32);
memcpy(tmp + 3 * 32, utsname()->version, 32);
memcpy(tmp + 4 * 32, utsname()->machine, 32);
up_read(&uts_sem);
error = 0;
out:
up_read(&uts_sem);
return error;
if (copy_to_user(name, tmp, sizeof(tmp)))
return -EFAULT;
return 0;
}
SYSCALL_DEFINE0(getpagesize)
@ -567,18 +562,21 @@ SYSCALL_DEFINE2(osf_getdomainname, char __user *, name, int, namelen)
{
int len, err = 0;
char *kname;
char tmp[32];
if (namelen > 32)
if (namelen < 0 || namelen > 32)
namelen = 32;
down_read(&uts_sem);
kname = utsname()->domainname;
len = strnlen(kname, namelen);
if (copy_to_user(name, kname, min(len + 1, namelen)))
err = -EFAULT;
len = min(len + 1, namelen);
memcpy(tmp, kname, len);
up_read(&uts_sem);
return err;
if (copy_to_user(name, tmp, len))
return -EFAULT;
return 0;
}
/*
@ -739,13 +737,14 @@ SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count)
};
unsigned long offset;
const char *res;
long len, err = -EINVAL;
long len;
char tmp[__NEW_UTS_LEN + 1];
offset = command-1;
if (offset >= ARRAY_SIZE(sysinfo_table)) {
/* Digital UNIX has a few unpublished interfaces here */
printk("sysinfo(%d)", command);
goto out;
return -EINVAL;
}
down_read(&uts_sem);
@ -753,13 +752,11 @@ SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count)
len = strlen(res)+1;
if ((unsigned long)len > (unsigned long)count)
len = count;
if (copy_to_user(buf, res, len))
err = -EFAULT;
else
err = 0;
memcpy(tmp, res, len);
up_read(&uts_sem);
out:
return err;
if (copy_to_user(buf, tmp, len))
return -EFAULT;
return 0;
}
SYSCALL_DEFINE5(osf_getsysinfo, unsigned long, op, void __user *, buffer,

View File

@ -206,6 +206,7 @@
#address-cells = <1>;
#size-cells = <0>;
reg = <0x70>;
reset-gpio = <&gpio TEGRA_GPIO(BB, 0) GPIO_ACTIVE_LOW>;
};
};

View File

@ -694,7 +694,6 @@ config NEED_PER_CPU_EMBED_FIRST_CHUNK
config HOLES_IN_ZONE
def_bool y
depends on NUMA
source kernel/Kconfig.preempt
source kernel/Kconfig.hz

View File

@ -195,9 +195,6 @@ struct fadump_crash_info_header {
struct cpumask online_mask;
};
/* Crash memory ranges */
#define INIT_CRASHMEM_RANGES (INIT_MEMBLOCK_REGIONS + 2)
struct fad_crash_memory_ranges {
unsigned long long base;
unsigned long long size;

View File

@ -47,8 +47,10 @@ static struct fadump_mem_struct fdm;
static const struct fadump_mem_struct *fdm_active;
static DEFINE_MUTEX(fadump_mutex);
struct fad_crash_memory_ranges crash_memory_ranges[INIT_CRASHMEM_RANGES];
struct fad_crash_memory_ranges *crash_memory_ranges;
int crash_memory_ranges_size;
int crash_mem_ranges;
int max_crash_mem_ranges;
/* Scan the Firmware Assisted dump configuration details. */
int __init early_init_dt_scan_fw_dump(unsigned long node,
@ -843,38 +845,88 @@ static int __init process_fadump(const struct fadump_mem_struct *fdm_active)
return 0;
}
static inline void fadump_add_crash_memory(unsigned long long base,
unsigned long long end)
static void free_crash_memory_ranges(void)
{
kfree(crash_memory_ranges);
crash_memory_ranges = NULL;
crash_memory_ranges_size = 0;
max_crash_mem_ranges = 0;
}
/*
* Allocate or reallocate crash memory ranges array in incremental units
* of PAGE_SIZE.
*/
static int allocate_crash_memory_ranges(void)
{
struct fad_crash_memory_ranges *new_array;
u64 new_size;
new_size = crash_memory_ranges_size + PAGE_SIZE;
pr_debug("Allocating %llu bytes of memory for crash memory ranges\n",
new_size);
new_array = krealloc(crash_memory_ranges, new_size, GFP_KERNEL);
if (new_array == NULL) {
pr_err("Insufficient memory for setting up crash memory ranges\n");
free_crash_memory_ranges();
return -ENOMEM;
}
crash_memory_ranges = new_array;
crash_memory_ranges_size = new_size;
max_crash_mem_ranges = (new_size /
sizeof(struct fad_crash_memory_ranges));
return 0;
}
static inline int fadump_add_crash_memory(unsigned long long base,
unsigned long long end)
{
if (base == end)
return;
return 0;
if (crash_mem_ranges == max_crash_mem_ranges) {
int ret;
ret = allocate_crash_memory_ranges();
if (ret)
return ret;
}
pr_debug("crash_memory_range[%d] [%#016llx-%#016llx], %#llx bytes\n",
crash_mem_ranges, base, end - 1, (end - base));
crash_memory_ranges[crash_mem_ranges].base = base;
crash_memory_ranges[crash_mem_ranges].size = end - base;
crash_mem_ranges++;
return 0;
}
static void fadump_exclude_reserved_area(unsigned long long start,
static int fadump_exclude_reserved_area(unsigned long long start,
unsigned long long end)
{
unsigned long long ra_start, ra_end;
int ret = 0;
ra_start = fw_dump.reserve_dump_area_start;
ra_end = ra_start + fw_dump.reserve_dump_area_size;
if ((ra_start < end) && (ra_end > start)) {
if ((start < ra_start) && (end > ra_end)) {
fadump_add_crash_memory(start, ra_start);
fadump_add_crash_memory(ra_end, end);
ret = fadump_add_crash_memory(start, ra_start);
if (ret)
return ret;
ret = fadump_add_crash_memory(ra_end, end);
} else if (start < ra_start) {
fadump_add_crash_memory(start, ra_start);
ret = fadump_add_crash_memory(start, ra_start);
} else if (ra_end < end) {
fadump_add_crash_memory(ra_end, end);
ret = fadump_add_crash_memory(ra_end, end);
}
} else
fadump_add_crash_memory(start, end);
ret = fadump_add_crash_memory(start, end);
return ret;
}
static int fadump_init_elfcore_header(char *bufp)
@ -914,10 +966,11 @@ static int fadump_init_elfcore_header(char *bufp)
* Traverse through memblock structure and setup crash memory ranges. These
* ranges will be used create PT_LOAD program headers in elfcore header.
*/
static void fadump_setup_crash_memory_ranges(void)
static int fadump_setup_crash_memory_ranges(void)
{
struct memblock_region *reg;
unsigned long long start, end;
int ret;
pr_debug("Setup crash memory ranges.\n");
crash_mem_ranges = 0;
@ -928,7 +981,9 @@ static void fadump_setup_crash_memory_ranges(void)
* specified during fadump registration. We need to create a separate
* program header for this chunk with the correct offset.
*/
fadump_add_crash_memory(RMA_START, fw_dump.boot_memory_size);
ret = fadump_add_crash_memory(RMA_START, fw_dump.boot_memory_size);
if (ret)
return ret;
for_each_memblock(memory, reg) {
start = (unsigned long long)reg->base;
@ -948,8 +1003,12 @@ static void fadump_setup_crash_memory_ranges(void)
}
/* add this range excluding the reserved dump area. */
fadump_exclude_reserved_area(start, end);
ret = fadump_exclude_reserved_area(start, end);
if (ret)
return ret;
}
return 0;
}
/*
@ -1072,6 +1131,7 @@ static int register_fadump(void)
{
unsigned long addr;
void *vaddr;
int ret;
/*
* If no memory is reserved then we can not register for firmware-
@ -1080,7 +1140,9 @@ static int register_fadump(void)
if (!fw_dump.reserve_dump_area_size)
return -ENODEV;
fadump_setup_crash_memory_ranges();
ret = fadump_setup_crash_memory_ranges();
if (ret)
return ret;
addr = be64_to_cpu(fdm.rmr_region.destination_address) + be64_to_cpu(fdm.rmr_region.source_len);
/* Initialize fadump crash info header. */
@ -1158,6 +1220,7 @@ void fadump_cleanup(void)
} else if (fw_dump.dump_registered) {
/* Un-register Firmware-assisted dump if it was registered. */
fadump_unregister_dump(&fdm);
free_crash_memory_ranges();
}
}

View File

@ -130,6 +130,7 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
long i, j, ret = 0, locked_entries = 0;
unsigned int pageshift;
unsigned long flags;
unsigned long cur_ua;
struct page *page = NULL;
mutex_lock(&mem_list_mutex);
@ -178,7 +179,8 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
}
for (i = 0; i < entries; ++i) {
if (1 != get_user_pages_fast(ua + (i << PAGE_SHIFT),
cur_ua = ua + (i << PAGE_SHIFT);
if (1 != get_user_pages_fast(cur_ua,
1/* pages */, 1/* iswrite */, &page)) {
ret = -EFAULT;
for (j = 0; j < i; ++j)
@ -197,7 +199,7 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
if (is_migrate_cma_page(page)) {
if (mm_iommu_move_page_from_cma(page))
goto populate;
if (1 != get_user_pages_fast(ua + (i << PAGE_SHIFT),
if (1 != get_user_pages_fast(cur_ua,
1/* pages */, 1/* iswrite */,
&page)) {
ret = -EFAULT;
@ -211,20 +213,21 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
}
populate:
pageshift = PAGE_SHIFT;
if (PageCompound(page)) {
if (mem->pageshift > PAGE_SHIFT && PageCompound(page)) {
pte_t *pte;
struct page *head = compound_head(page);
unsigned int compshift = compound_order(head);
unsigned int pteshift;
local_irq_save(flags); /* disables as well */
pte = find_linux_pte(mm->pgd, ua, NULL, &pageshift);
local_irq_restore(flags);
pte = find_linux_pte(mm->pgd, cur_ua, NULL, &pteshift);
/* Double check it is still the same pinned page */
if (pte && pte_page(*pte) == head &&
pageshift == compshift)
pageshift = max_t(unsigned int, pageshift,
pteshift == compshift + PAGE_SHIFT)
pageshift = max_t(unsigned int, pteshift,
PAGE_SHIFT);
local_irq_restore(flags);
}
mem->pageshift = min(mem->pageshift, pageshift);
mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT;

View File

@ -3286,12 +3286,49 @@ static void pnv_pci_ioda_create_dbgfs(void)
#endif /* CONFIG_DEBUG_FS */
}
static void pnv_pci_enable_bridge(struct pci_bus *bus)
{
struct pci_dev *dev = bus->self;
struct pci_bus *child;
/* Empty bus ? bail */
if (list_empty(&bus->devices))
return;
/*
* If there's a bridge associated with that bus enable it. This works
* around races in the generic code if the enabling is done during
* parallel probing. This can be removed once those races have been
* fixed.
*/
if (dev) {
int rc = pci_enable_device(dev);
if (rc)
pci_err(dev, "Error enabling bridge (%d)\n", rc);
pci_set_master(dev);
}
/* Perform the same to child busses */
list_for_each_entry(child, &bus->children, node)
pnv_pci_enable_bridge(child);
}
static void pnv_pci_enable_bridges(void)
{
struct pci_controller *hose;
list_for_each_entry(hose, &hose_list, list_node)
pnv_pci_enable_bridge(hose->bus);
}
static void pnv_pci_ioda_fixup(void)
{
pnv_pci_ioda_setup_PEs();
pnv_pci_ioda_setup_iommu_api();
pnv_pci_ioda_create_dbgfs();
pnv_pci_enable_bridges();
#ifdef CONFIG_EEH
eeh_init();
eeh_addr_cache_build();

View File

@ -360,7 +360,7 @@ static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs)
}
savep = __va(regs->gpr[3]);
regs->gpr[3] = savep[0]; /* restore original r3 */
regs->gpr[3] = be64_to_cpu(savep[0]); /* restore original r3 */
/* If it isn't an extended log we can use the per cpu 64bit buffer */
h = (struct rtas_error_log *)&savep[1];

View File

@ -204,23 +204,27 @@ SYSCALL_DEFINE5(rt_sigaction, int, sig,
asmlinkage long sys_getdomainname(char __user *name, int len)
{
int nlen, err;
int nlen, err;
char tmp[__NEW_UTS_LEN + 1];
if (len < 0)
return -EINVAL;
down_read(&uts_sem);
down_read(&uts_sem);
nlen = strlen(utsname()->domainname) + 1;
err = -EINVAL;
if (nlen > len)
goto out;
goto out_unlock;
memcpy(tmp, utsname()->domainname, nlen);
err = -EFAULT;
if (!copy_to_user(name, utsname()->domainname, nlen))
err = 0;
up_read(&uts_sem);
out:
if (copy_to_user(name, tmp, nlen))
return -EFAULT;
return 0;
out_unlock:
up_read(&uts_sem);
return err;
}

View File

@ -527,23 +527,27 @@ extern void check_pending(int signum);
SYSCALL_DEFINE2(getdomainname, char __user *, name, int, len)
{
int nlen, err;
int nlen, err;
char tmp[__NEW_UTS_LEN + 1];
if (len < 0)
return -EINVAL;
down_read(&uts_sem);
down_read(&uts_sem);
nlen = strlen(utsname()->domainname) + 1;
err = -EINVAL;
if (nlen > len)
goto out;
goto out_unlock;
memcpy(tmp, utsname()->domainname, nlen);
err = -EFAULT;
if (!copy_to_user(name, utsname()->domainname, nlen))
err = 0;
up_read(&uts_sem);
out:
if (copy_to_user(name, tmp, nlen))
return -EFAULT;
return 0;
out_unlock:
up_read(&uts_sem);
return err;
}

View File

@ -532,7 +532,7 @@ static int bzImage64_cleanup(void *loader_data)
static int bzImage64_verify_sig(const char *kernel, unsigned long kernel_len)
{
return verify_pefile_signature(kernel, kernel_len,
NULL,
VERIFY_USE_SECONDARY_KEYRING,
VERIFYING_KEXEC_PE_SIGNATURE);
}
#endif

View File

@ -200,12 +200,14 @@ static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush_param = VMENTER_
static const struct {
const char *option;
enum vmx_l1d_flush_state cmd;
bool for_parse;
} vmentry_l1d_param[] = {
{"auto", VMENTER_L1D_FLUSH_AUTO},
{"never", VMENTER_L1D_FLUSH_NEVER},
{"cond", VMENTER_L1D_FLUSH_COND},
{"always", VMENTER_L1D_FLUSH_ALWAYS},
[VMENTER_L1D_FLUSH_AUTO] = {"auto", true},
[VMENTER_L1D_FLUSH_NEVER] = {"never", true},
[VMENTER_L1D_FLUSH_COND] = {"cond", true},
[VMENTER_L1D_FLUSH_ALWAYS] = {"always", true},
[VMENTER_L1D_FLUSH_EPT_DISABLED] = {"EPT disabled", false},
[VMENTER_L1D_FLUSH_NOT_REQUIRED] = {"not required", false},
};
#define L1D_CACHE_ORDER 4
@ -289,8 +291,9 @@ static int vmentry_l1d_flush_parse(const char *s)
if (s) {
for (i = 0; i < ARRAY_SIZE(vmentry_l1d_param); i++) {
if (sysfs_streq(s, vmentry_l1d_param[i].option))
return vmentry_l1d_param[i].cmd;
if (vmentry_l1d_param[i].for_parse &&
sysfs_streq(s, vmentry_l1d_param[i].option))
return i;
}
}
return -EINVAL;
@ -300,13 +303,13 @@ static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp)
{
int l1tf, ret;
if (!boot_cpu_has(X86_BUG_L1TF))
return 0;
l1tf = vmentry_l1d_flush_parse(s);
if (l1tf < 0)
return l1tf;
if (!boot_cpu_has(X86_BUG_L1TF))
return 0;
/*
* Has vmx_init() run already? If not then this is the pre init
* parameter parsing. In that case just store the value and let
@ -326,6 +329,9 @@ static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp)
static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp)
{
if (WARN_ON_ONCE(l1tf_vmx_mitigation >= ARRAY_SIZE(vmentry_l1d_param)))
return sprintf(s, "???\n");
return sprintf(s, "%s\n", vmentry_l1d_param[l1tf_vmx_mitigation].option);
}

View File

@ -31,16 +31,32 @@
*
*/
.macro __loop_cache_all ar at insn size line_width
.macro __loop_cache_unroll ar at insn size line_width max_immed
.if (1 << (\line_width)) > (\max_immed)
.set _reps, 1
.elseif (2 << (\line_width)) > (\max_immed)
.set _reps, 2
.else
.set _reps, 4
.endif
__loopi \ar, \at, \size, (_reps << (\line_width))
.set _index, 0
.rep _reps
\insn \ar, _index << (\line_width)
.set _index, _index + 1
.endr
__endla \ar, \at, _reps << (\line_width)
.endm
.macro __loop_cache_all ar at insn size line_width max_immed
movi \ar, 0
__loopi \ar, \at, \size, (4 << (\line_width))
\insn \ar, 0 << (\line_width)
\insn \ar, 1 << (\line_width)
\insn \ar, 2 << (\line_width)
\insn \ar, 3 << (\line_width)
__endla \ar, \at, 4 << (\line_width)
__loop_cache_unroll \ar, \at, \insn, \size, \line_width, \max_immed
.endm
@ -57,14 +73,9 @@
.endm
.macro __loop_cache_page ar at insn line_width
.macro __loop_cache_page ar at insn line_width max_immed
__loopi \ar, \at, PAGE_SIZE, 4 << (\line_width)
\insn \ar, 0 << (\line_width)
\insn \ar, 1 << (\line_width)
\insn \ar, 2 << (\line_width)
\insn \ar, 3 << (\line_width)
__endla \ar, \at, 4 << (\line_width)
__loop_cache_unroll \ar, \at, \insn, PAGE_SIZE, \line_width, \max_immed
.endm
@ -72,7 +83,8 @@
.macro ___unlock_dcache_all ar at
#if XCHAL_DCACHE_LINE_LOCKABLE && XCHAL_DCACHE_SIZE
__loop_cache_all \ar \at diu XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH
__loop_cache_all \ar \at diu XCHAL_DCACHE_SIZE \
XCHAL_DCACHE_LINEWIDTH 240
#endif
.endm
@ -81,7 +93,8 @@
.macro ___unlock_icache_all ar at
#if XCHAL_ICACHE_LINE_LOCKABLE && XCHAL_ICACHE_SIZE
__loop_cache_all \ar \at iiu XCHAL_ICACHE_SIZE XCHAL_ICACHE_LINEWIDTH
__loop_cache_all \ar \at iiu XCHAL_ICACHE_SIZE \
XCHAL_ICACHE_LINEWIDTH 240
#endif
.endm
@ -90,7 +103,8 @@
.macro ___flush_invalidate_dcache_all ar at
#if XCHAL_DCACHE_SIZE
__loop_cache_all \ar \at diwbi XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH
__loop_cache_all \ar \at diwbi XCHAL_DCACHE_SIZE \
XCHAL_DCACHE_LINEWIDTH 240
#endif
.endm
@ -99,7 +113,8 @@
.macro ___flush_dcache_all ar at
#if XCHAL_DCACHE_SIZE
__loop_cache_all \ar \at diwb XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH
__loop_cache_all \ar \at diwb XCHAL_DCACHE_SIZE \
XCHAL_DCACHE_LINEWIDTH 240
#endif
.endm
@ -108,8 +123,8 @@
.macro ___invalidate_dcache_all ar at
#if XCHAL_DCACHE_SIZE
__loop_cache_all \ar \at dii __stringify(DCACHE_WAY_SIZE) \
XCHAL_DCACHE_LINEWIDTH
__loop_cache_all \ar \at dii XCHAL_DCACHE_SIZE \
XCHAL_DCACHE_LINEWIDTH 1020
#endif
.endm
@ -118,8 +133,8 @@
.macro ___invalidate_icache_all ar at
#if XCHAL_ICACHE_SIZE
__loop_cache_all \ar \at iii __stringify(ICACHE_WAY_SIZE) \
XCHAL_ICACHE_LINEWIDTH
__loop_cache_all \ar \at iii XCHAL_ICACHE_SIZE \
XCHAL_ICACHE_LINEWIDTH 1020
#endif
.endm
@ -166,7 +181,7 @@
.macro ___flush_invalidate_dcache_page ar as
#if XCHAL_DCACHE_SIZE
__loop_cache_page \ar \as dhwbi XCHAL_DCACHE_LINEWIDTH
__loop_cache_page \ar \as dhwbi XCHAL_DCACHE_LINEWIDTH 1020
#endif
.endm
@ -175,7 +190,7 @@
.macro ___flush_dcache_page ar as
#if XCHAL_DCACHE_SIZE
__loop_cache_page \ar \as dhwb XCHAL_DCACHE_LINEWIDTH
__loop_cache_page \ar \as dhwb XCHAL_DCACHE_LINEWIDTH 1020
#endif
.endm
@ -184,7 +199,7 @@
.macro ___invalidate_dcache_page ar as
#if XCHAL_DCACHE_SIZE
__loop_cache_page \ar \as dhi XCHAL_DCACHE_LINEWIDTH
__loop_cache_page \ar \as dhi XCHAL_DCACHE_LINEWIDTH 1020
#endif
.endm
@ -193,7 +208,7 @@
.macro ___invalidate_icache_page ar as
#if XCHAL_ICACHE_SIZE
__loop_cache_page \ar \as ihi XCHAL_ICACHE_LINEWIDTH
__loop_cache_page \ar \as ihi XCHAL_ICACHE_LINEWIDTH 1020
#endif
.endm

View File

@ -887,7 +887,8 @@ static ssize_t bfq_io_set_weight(struct kernfs_open_file *of,
if (ret)
return ret;
return bfq_io_set_weight_legacy(of_css(of), NULL, weight);
ret = bfq_io_set_weight_legacy(of_css(of), NULL, weight);
return ret ?: nbytes;
}
static int bfqg_print_stat(struct seq_file *sf, void *v)

View File

@ -1025,6 +1025,7 @@ out_exit_flush_rq:
q->exit_rq_fn(q, q->fq->flush_rq);
out_free_flush_queue:
blk_free_flush_queue(q->fq);
q->fq = NULL;
return -ENOMEM;
}
EXPORT_SYMBOL(blk_init_allocated_queue);
@ -3458,9 +3459,11 @@ EXPORT_SYMBOL(blk_finish_plug);
*/
void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
{
/* not support for RQF_PM and ->rpm_status in blk-mq yet */
if (q->mq_ops)
/* Don't enable runtime PM for blk-mq until it is ready */
if (q->mq_ops) {
pm_runtime_disable(dev);
return;
}
q->dev = dev;
q->rpm_status = RPM_ACTIVE;

View File

@ -15,6 +15,7 @@
#include <linux/cred.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/verification.h>
#include <keys/asymmetric-type.h>
#include <keys/system_keyring.h>
#include <crypto/pkcs7.h>
@ -230,7 +231,7 @@ int verify_pkcs7_signature(const void *data, size_t len,
if (!trusted_keys) {
trusted_keys = builtin_trusted_keys;
} else if (trusted_keys == (void *)1UL) {
} else if (trusted_keys == VERIFY_USE_SECONDARY_KEYRING) {
#ifdef CONFIG_SECONDARY_TRUSTED_KEYRING
trusted_keys = secondary_trusted_keys;
#else

View File

@ -62,7 +62,7 @@ static int pkcs7_preparse(struct key_preparsed_payload *prep)
return verify_pkcs7_signature(NULL, 0,
prep->data, prep->datalen,
(void *)1UL, usage,
VERIFY_USE_SECONDARY_KEYRING, usage,
pkcs7_view_content, prep);
}

View File

@ -321,6 +321,7 @@ static ssize_t backing_dev_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
char *file_name;
size_t sz;
struct file *backing_dev = NULL;
struct inode *inode;
struct address_space *mapping;
@ -341,7 +342,11 @@ static ssize_t backing_dev_store(struct device *dev,
goto out;
}
strlcpy(file_name, buf, len);
strlcpy(file_name, buf, PATH_MAX);
/* ignore trailing newline */
sz = strlen(file_name);
if (sz > 0 && file_name[sz - 1] == '\n')
file_name[sz - 1] = 0x00;
backing_dev = filp_open(file_name, O_RDWR|O_LARGEFILE, 0);
if (IS_ERR(backing_dev)) {

View File

@ -555,12 +555,20 @@ EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_stop);
void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy)
{
struct policy_dbs_info *policy_dbs = policy->governor_data;
struct policy_dbs_info *policy_dbs;
/* Protect gov->gdbs_data against cpufreq_dbs_governor_exit() */
mutex_lock(&gov_dbs_data_mutex);
policy_dbs = policy->governor_data;
if (!policy_dbs)
goto out;
mutex_lock(&policy_dbs->update_mutex);
cpufreq_policy_apply_limits(policy);
gov_update_sample_delay(policy_dbs, 0);
mutex_unlock(&policy_dbs->update_mutex);
out:
mutex_unlock(&gov_dbs_data_mutex);
}
EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_limits);

View File

@ -350,10 +350,8 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
int ret = 0;
if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
crypto_ablkcipher_set_flags(ablkcipher,
CRYPTO_TFM_RES_BAD_KEY_LEN);
dev_err(jrdev, "key size mismatch\n");
return -EINVAL;
goto badkey;
}
memcpy(ctx->key, key, keylen);
@ -388,7 +386,7 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
return ret;
badkey:
crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return 0;
return -EINVAL;
}
/*

View File

@ -71,8 +71,8 @@ static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
}
static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
@ -90,8 +90,8 @@ static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
}
/* RSA Job Completion handler */
@ -417,13 +417,13 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
goto unmap_p;
}
pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE);
pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, pdb->tmp1_dma)) {
dev_err(dev, "Unable to map RSA tmp1 memory\n");
goto unmap_q;
}
pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE);
pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, pdb->tmp2_dma)) {
dev_err(dev, "Unable to map RSA tmp2 memory\n");
goto unmap_tmp1;
@ -451,7 +451,7 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
return 0;
unmap_tmp1:
dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
unmap_q:
dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
unmap_p:
@ -504,13 +504,13 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
goto unmap_dq;
}
pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE);
pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, pdb->tmp1_dma)) {
dev_err(dev, "Unable to map RSA tmp1 memory\n");
goto unmap_qinv;
}
pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE);
pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, pdb->tmp2_dma)) {
dev_err(dev, "Unable to map RSA tmp2 memory\n");
goto unmap_tmp1;
@ -538,7 +538,7 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
return 0;
unmap_tmp1:
dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE);
dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
unmap_qinv:
dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
unmap_dq:

View File

@ -190,7 +190,8 @@ static void caam_jr_dequeue(unsigned long devarg)
BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0);
/* Unmap just-run descriptor so we can post-process */
dma_unmap_single(dev, jrp->outring[hw_idx].desc,
dma_unmap_single(dev,
caam_dma_to_cpu(jrp->outring[hw_idx].desc),
jrp->entinfo[sw_idx].desc_size,
DMA_TO_DEVICE);

View File

@ -107,24 +107,23 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
ret = crypto_skcipher_encrypt(req);
skcipher_request_zero(req);
} else {
preempt_disable();
pagefault_disable();
enable_kernel_vsx();
blkcipher_walk_init(&walk, dst, src, nbytes);
ret = blkcipher_walk_virt(desc, &walk);
while ((nbytes = walk.nbytes)) {
preempt_disable();
pagefault_disable();
enable_kernel_vsx();
aes_p8_cbc_encrypt(walk.src.virt.addr,
walk.dst.virt.addr,
nbytes & AES_BLOCK_MASK,
&ctx->enc_key, walk.iv, 1);
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
nbytes &= AES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, &walk, nbytes);
}
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
}
return ret;
@ -147,24 +146,23 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
ret = crypto_skcipher_decrypt(req);
skcipher_request_zero(req);
} else {
preempt_disable();
pagefault_disable();
enable_kernel_vsx();
blkcipher_walk_init(&walk, dst, src, nbytes);
ret = blkcipher_walk_virt(desc, &walk);
while ((nbytes = walk.nbytes)) {
preempt_disable();
pagefault_disable();
enable_kernel_vsx();
aes_p8_cbc_encrypt(walk.src.virt.addr,
walk.dst.virt.addr,
nbytes & AES_BLOCK_MASK,
&ctx->dec_key, walk.iv, 0);
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
nbytes &= AES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, &walk, nbytes);
}
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
}
return ret;

View File

@ -116,32 +116,39 @@ static int p8_aes_xts_crypt(struct blkcipher_desc *desc,
ret = enc? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req);
skcipher_request_zero(req);
} else {
blkcipher_walk_init(&walk, dst, src, nbytes);
ret = blkcipher_walk_virt(desc, &walk);
preempt_disable();
pagefault_disable();
enable_kernel_vsx();
blkcipher_walk_init(&walk, dst, src, nbytes);
ret = blkcipher_walk_virt(desc, &walk);
iv = walk.iv;
memset(tweak, 0, AES_BLOCK_SIZE);
aes_p8_encrypt(iv, tweak, &ctx->tweak_key);
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
while ((nbytes = walk.nbytes)) {
preempt_disable();
pagefault_disable();
enable_kernel_vsx();
if (enc)
aes_p8_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
nbytes & AES_BLOCK_MASK, &ctx->enc_key, NULL, tweak);
else
aes_p8_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
nbytes & AES_BLOCK_MASK, &ctx->dec_key, NULL, tweak);
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
nbytes &= AES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, &walk, nbytes);
}
disable_kernel_vsx();
pagefault_enable();
preempt_enable();
}
return ret;
}

View File

@ -433,8 +433,8 @@ int extcon_sync(struct extcon_dev *edev, unsigned int id)
return index;
spin_lock_irqsave(&edev->lock, flags);
state = !!(edev->state & BIT(index));
spin_unlock_irqrestore(&edev->lock, flags);
/*
* Call functions in a raw notifier chain for the specific one
@ -448,6 +448,7 @@ int extcon_sync(struct extcon_dev *edev, unsigned int id)
*/
raw_notifier_call_chain(&edev->nh_all, state, edev);
spin_lock_irqsave(&edev->lock, flags);
/* This could be in interrupt handler */
prop_buf = (char *)get_zeroed_page(GFP_ATOMIC);
if (!prop_buf) {

View File

@ -782,6 +782,9 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
I915_USERPTR_UNSYNCHRONIZED))
return -EINVAL;
if (!args->user_size)
return -EINVAL;
if (offset_in_page(args->user_ptr | args->user_size))
return -EINVAL;

View File

@ -541,11 +541,8 @@ static void reset_channel_cb(void *arg)
channel->onchannel_callback = NULL;
}
static int vmbus_close_internal(struct vmbus_channel *channel)
void vmbus_reset_channel_cb(struct vmbus_channel *channel)
{
struct vmbus_channel_close_channel *msg;
int ret;
/*
* vmbus_on_event(), running in the per-channel tasklet, can race
* with vmbus_close_internal() in the case of SMP guest, e.g., when
@ -555,6 +552,29 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
*/
tasklet_disable(&channel->callback_event);
channel->sc_creation_callback = NULL;
/* Stop the callback asap */
if (channel->target_cpu != get_cpu()) {
put_cpu();
smp_call_function_single(channel->target_cpu, reset_channel_cb,
channel, true);
} else {
reset_channel_cb(channel);
put_cpu();
}
/* Re-enable tasklet for use on re-open */
tasklet_enable(&channel->callback_event);
}
static int vmbus_close_internal(struct vmbus_channel *channel)
{
struct vmbus_channel_close_channel *msg;
int ret;
vmbus_reset_channel_cb(channel);
/*
* In case a device driver's probe() fails (e.g.,
* util_probe() -> vmbus_open() returns -ENOMEM) and the device is
@ -568,16 +588,6 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
}
channel->state = CHANNEL_OPEN_STATE;
channel->sc_creation_callback = NULL;
/* Stop callback and cancel the timer asap */
if (channel->target_cpu != get_cpu()) {
put_cpu();
smp_call_function_single(channel->target_cpu, reset_channel_cb,
channel, true);
} else {
reset_channel_cb(channel);
put_cpu();
}
/* Send a closing message */
@ -620,8 +630,6 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
get_order(channel->ringbuffer_pagecount * PAGE_SIZE));
out:
/* re-enable tasklet for use on re-open */
tasklet_enable(&channel->callback_event);
return ret;
}

View File

@ -881,6 +881,12 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
return;
}
/*
* Before setting channel->rescind in vmbus_rescind_cleanup(), we
* should make sure the channel callback is not running any more.
*/
vmbus_reset_channel_cb(channel);
/*
* Now wait for offer handling to complete.
*/

View File

@ -797,6 +797,7 @@ static int sca3000_write_raw(struct iio_dev *indio_dev,
mutex_lock(&st->lock);
ret = sca3000_write_3db_freq(st, val);
mutex_unlock(&st->lock);
return ret;
default:
return -EINVAL;
}

View File

@ -508,7 +508,7 @@ static ssize_t ad9523_store(struct device *dev,
return ret;
if (!state)
return 0;
return len;
mutex_lock(&indio_dev->mlock);
switch ((u32)this_attr->address) {
@ -642,7 +642,7 @@ static int ad9523_read_raw(struct iio_dev *indio_dev,
code = (AD9523_CLK_DIST_DIV_PHASE_REV(ret) * 3141592) /
AD9523_CLK_DIST_DIV_REV(ret);
*val = code / 1000000;
*val2 = (code % 1000000) * 10;
*val2 = code % 1000000;
return IIO_VAL_INT_PLUS_MICRO;
default:
return -EINVAL;

View File

@ -276,6 +276,7 @@ static inline enum comp_state check_ack(struct rxe_qp *qp,
case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE:
if (wqe->wr.opcode != IB_WR_RDMA_READ &&
wqe->wr.opcode != IB_WR_RDMA_READ_WITH_INV) {
wqe->status = IB_WC_FATAL_ERR;
return COMPST_ERROR;
}
reset_retry_counters(qp);

View File

@ -1713,8 +1713,7 @@ static bool srpt_close_ch(struct srpt_rdma_ch *ch)
int ret;
if (!srpt_set_ch_state(ch, CH_DRAINING)) {
pr_debug("%s-%d: already closed\n", ch->sess_name,
ch->qp->qp_num);
pr_debug("%s: already closed\n", ch->sess_name);
return false;
}

View File

@ -1336,8 +1336,8 @@ void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
qi_submit_sync(&desc, iommu);
}
void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
u64 addr, unsigned mask)
void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
u16 qdep, u64 addr, unsigned mask)
{
struct qi_desc desc;
@ -1352,7 +1352,7 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
qdep = 0;
desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
QI_DIOTLB_TYPE;
QI_DIOTLB_TYPE | QI_DEV_IOTLB_PFSID(pfsid);
qi_submit_sync(&desc, iommu);
}

View File

@ -422,6 +422,7 @@ struct device_domain_info {
struct list_head global; /* link to global list */
u8 bus; /* PCI bus number */
u8 devfn; /* PCI devfn number */
u16 pfsid; /* SRIOV physical function source ID */
u8 pasid_supported:3;
u8 pasid_enabled:1;
u8 pri_supported:1;
@ -1502,6 +1503,20 @@ static void iommu_enable_dev_iotlb(struct device_domain_info *info)
return;
pdev = to_pci_dev(info->dev);
/* For IOMMU that supports device IOTLB throttling (DIT), we assign
* PFSID to the invalidation desc of a VF such that IOMMU HW can gauge
* queue depth at PF level. If DIT is not set, PFSID will be treated as
* reserved, which should be set to 0.
*/
if (!ecap_dit(info->iommu->ecap))
info->pfsid = 0;
else {
struct pci_dev *pf_pdev;
/* pdev will be returned if device is not a vf */
pf_pdev = pci_physfn(pdev);
info->pfsid = PCI_DEVID(pf_pdev->bus->number, pf_pdev->devfn);
}
#ifdef CONFIG_INTEL_IOMMU_SVM
/* The PCIe spec, in its wisdom, declares that the behaviour of
@ -1567,7 +1582,8 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
sid = info->bus << 8 | info->devfn;
qdep = info->ats_qdep;
qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
qdep, addr, mask);
}
spin_unlock_irqrestore(&device_domain_lock, flags);
}

View File

@ -195,9 +195,9 @@ static int slimpro_mbox_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ctx);
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mb_base = devm_ioremap(&pdev->dev, regs->start, resource_size(regs));
if (!mb_base)
return -ENOMEM;
mb_base = devm_ioremap_resource(&pdev->dev, regs);
if (IS_ERR(mb_base))
return PTR_ERR(mb_base);
/* Setup mailbox links */
for (i = 0; i < MBOX_CNT; i++) {

View File

@ -456,8 +456,10 @@ static int bch_writeback_thread(void *arg)
* data on cache. BCACHE_DEV_DETACHING flag is set in
* bch_cached_dev_detach().
*/
if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) {
up_write(&dc->writeback_lock);
break;
}
}
up_write(&dc->writeback_lock);

View File

@ -362,7 +362,7 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd)
disk_super->version = cpu_to_le32(cmd->version);
memset(disk_super->policy_name, 0, sizeof(disk_super->policy_name));
memset(disk_super->policy_version, 0, sizeof(disk_super->policy_version));
disk_super->policy_hint_size = 0;
disk_super->policy_hint_size = cpu_to_le32(0);
__copy_sm_root(cmd, disk_super);
@ -700,6 +700,7 @@ static int __commit_transaction(struct dm_cache_metadata *cmd,
disk_super->policy_version[0] = cpu_to_le32(cmd->policy_version[0]);
disk_super->policy_version[1] = cpu_to_le32(cmd->policy_version[1]);
disk_super->policy_version[2] = cpu_to_le32(cmd->policy_version[2]);
disk_super->policy_hint_size = cpu_to_le32(cmd->policy_hint_size);
disk_super->read_hits = cpu_to_le32(cmd->stats.read_hits);
disk_super->read_misses = cpu_to_le32(cmd->stats.read_misses);
@ -1321,6 +1322,7 @@ static int __load_mapping_v1(struct dm_cache_metadata *cmd,
dm_oblock_t oblock;
unsigned flags;
bool dirty = true;
dm_array_cursor_get_value(mapping_cursor, (void **) &mapping_value_le);
memcpy(&mapping, mapping_value_le, sizeof(mapping));
@ -1331,8 +1333,10 @@ static int __load_mapping_v1(struct dm_cache_metadata *cmd,
dm_array_cursor_get_value(hint_cursor, (void **) &hint_value_le);
memcpy(&hint, hint_value_le, sizeof(hint));
}
if (cmd->clean_when_opened)
dirty = flags & M_DIRTY;
r = fn(context, oblock, to_cblock(cb), flags & M_DIRTY,
r = fn(context, oblock, to_cblock(cb), dirty,
le32_to_cpu(hint), hints_valid);
if (r) {
DMERR("policy couldn't load cache block %llu",
@ -1360,7 +1364,7 @@ static int __load_mapping_v2(struct dm_cache_metadata *cmd,
dm_oblock_t oblock;
unsigned flags;
bool dirty;
bool dirty = true;
dm_array_cursor_get_value(mapping_cursor, (void **) &mapping_value_le);
memcpy(&mapping, mapping_value_le, sizeof(mapping));
@ -1371,8 +1375,9 @@ static int __load_mapping_v2(struct dm_cache_metadata *cmd,
dm_array_cursor_get_value(hint_cursor, (void **) &hint_value_le);
memcpy(&hint, hint_value_le, sizeof(hint));
}
if (cmd->clean_when_opened)
dirty = dm_bitset_cursor_get_value(dirty_cursor);
dirty = dm_bitset_cursor_get_value(dirty_cursor);
r = fn(context, oblock, to_cblock(cb), dirty,
le32_to_cpu(hint), hints_valid);
if (r) {

View File

@ -3072,11 +3072,11 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
*/
limits->max_segment_size = PAGE_SIZE;
if (cc->sector_size != (1 << SECTOR_SHIFT)) {
limits->logical_block_size = cc->sector_size;
limits->physical_block_size = cc->sector_size;
blk_limits_io_min(limits, cc->sector_size);
}
limits->logical_block_size =
max_t(unsigned short, limits->logical_block_size, cc->sector_size);
limits->physical_block_size =
max_t(unsigned, limits->physical_block_size, cc->sector_size);
limits->io_min = max_t(unsigned, limits->io_min, cc->sector_size);
}
static struct target_type crypt_target = {

View File

@ -177,7 +177,7 @@ struct dm_integrity_c {
__u8 sectors_per_block;
unsigned char mode;
bool suspending;
int suspending;
int failed;
@ -2209,7 +2209,7 @@ static void dm_integrity_postsuspend(struct dm_target *ti)
del_timer_sync(&ic->autocommit_timer);
ic->suspending = true;
WRITE_ONCE(ic->suspending, 1);
queue_work(ic->commit_wq, &ic->commit_work);
drain_workqueue(ic->commit_wq);
@ -2219,7 +2219,7 @@ static void dm_integrity_postsuspend(struct dm_target *ti)
dm_integrity_flush_buffers(ic);
}
ic->suspending = false;
WRITE_ONCE(ic->suspending, 0);
BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));

View File

@ -2514,6 +2514,8 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
case PM_WRITE:
if (old_mode != new_mode)
notify_of_pool_mode_change(pool, "write");
if (old_mode == PM_OUT_OF_DATA_SPACE)
cancel_delayed_work_sync(&pool->no_space_timeout);
pool->out_of_data_space = false;
pool->pf.error_if_no_space = pt->requested_pf.error_if_no_space;
dm_pool_metadata_read_write(pool->pmd);

View File

@ -871,7 +871,7 @@ static int tvp5150_fill_fmt(struct v4l2_subdev *sd,
f = &format->format;
f->width = decoder->rect.width;
f->height = decoder->rect.height;
f->height = decoder->rect.height / 2;
f->code = MEDIA_BUS_FMT_UYVY8_2X8;
f->field = V4L2_FIELD_ALTERNATE;

View File

@ -49,7 +49,7 @@ static struct regmap_config hi655x_regmap_config = {
.reg_bits = 32,
.reg_stride = HI655X_STRIDE,
.val_bits = 8,
.max_register = HI655X_BUS_ADDR(0xFFF),
.max_register = HI655X_BUS_ADDR(0x400) - HI655X_STRIDE,
};
static struct resource pwrkey_resources[] = {

View File

@ -287,7 +287,7 @@ int cxl_adapter_context_get(struct cxl *adapter)
int rc;
rc = atomic_inc_unless_negative(&adapter->contexts_num);
return rc >= 0 ? 0 : -EBUSY;
return rc ? 0 : -EBUSY;
}
void cxl_adapter_context_put(struct cxl *adapter)

View File

@ -341,7 +341,13 @@ static bool vmballoon_send_start(struct vmballoon *b, unsigned long req_caps)
success = false;
}
if (b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS)
/*
* 2MB pages are only supported with batching. If batching is for some
* reason disabled, do not use 2MB pages, since otherwise the legacy
* mechanism is used with 2MB pages, causing a failure.
*/
if ((b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS) &&
(b->capabilities & VMW_BALLOON_BATCHED_CMDS))
b->supported_page_sizes = 2;
else
b->supported_page_sizes = 1;
@ -450,7 +456,7 @@ static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
pfn32 = (u32)pfn;
if (pfn32 != pfn)
return -1;
return -EINVAL;
STATS_INC(b->stats.lock[false]);
@ -460,7 +466,7 @@ static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
STATS_INC(b->stats.lock_fail[false]);
return 1;
return -EIO;
}
static int vmballoon_send_batched_lock(struct vmballoon *b,
@ -597,11 +603,12 @@ static int vmballoon_lock_page(struct vmballoon *b, unsigned int num_pages,
locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status,
target);
if (locked > 0) {
if (locked) {
STATS_INC(b->stats.refused_alloc[false]);
if (hv_status == VMW_BALLOON_ERROR_RESET ||
hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED) {
if (locked == -EIO &&
(hv_status == VMW_BALLOON_ERROR_RESET ||
hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED)) {
vmballoon_free_page(page, false);
return -EIO;
}
@ -617,7 +624,7 @@ static int vmballoon_lock_page(struct vmballoon *b, unsigned int num_pages,
} else {
vmballoon_free_page(page, false);
}
return -EIO;
return locked;
}
/* track allocated page */
@ -1029,29 +1036,30 @@ static void vmballoon_vmci_cleanup(struct vmballoon *b)
*/
static int vmballoon_vmci_init(struct vmballoon *b)
{
int error = 0;
unsigned long error, dummy;
if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) != 0) {
error = vmci_doorbell_create(&b->vmci_doorbell,
VMCI_FLAG_DELAYED_CB,
VMCI_PRIVILEGE_FLAG_RESTRICTED,
vmballoon_doorbell, b);
if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) == 0)
return 0;
if (error == VMCI_SUCCESS) {
VMWARE_BALLOON_CMD(VMCI_DOORBELL_SET,
b->vmci_doorbell.context,
b->vmci_doorbell.resource, error);
STATS_INC(b->stats.doorbell_set);
}
}
error = vmci_doorbell_create(&b->vmci_doorbell, VMCI_FLAG_DELAYED_CB,
VMCI_PRIVILEGE_FLAG_RESTRICTED,
vmballoon_doorbell, b);
if (error != 0) {
vmballoon_vmci_cleanup(b);
if (error != VMCI_SUCCESS)
goto fail;
return -EIO;
}
error = VMWARE_BALLOON_CMD(VMCI_DOORBELL_SET, b->vmci_doorbell.context,
b->vmci_doorbell.resource, dummy);
STATS_INC(b->stats.doorbell_set);
if (error != VMW_BALLOON_SUCCESS)
goto fail;
return 0;
fail:
vmballoon_vmci_cleanup(b);
return -EIO;
}
/*
@ -1289,7 +1297,14 @@ static int __init vmballoon_init(void)
return 0;
}
module_init(vmballoon_init);
/*
* Using late_initcall() instead of module_init() allows the balloon to use the
* VMCI doorbell even when the balloon is built into the kernel. Otherwise the
* VMCI is probed only after the balloon is initialized. If the balloon is used
* as a module, late_initcall() is equivalent to module_init().
*/
late_initcall(vmballoon_init);
static void __exit vmballoon_exit(void)
{

View File

@ -44,7 +44,7 @@
/* DM_CM_RST */
#define RST_DTRANRST1 BIT(9)
#define RST_DTRANRST0 BIT(8)
#define RST_RESERVED_BITS GENMASK_ULL(32, 0)
#define RST_RESERVED_BITS GENMASK_ULL(31, 0)
/* DM_CM_INFO1 and DM_CM_INFO1_MASK */
#define INFO1_CLEAR 0

View File

@ -104,6 +104,7 @@ struct lbs_private {
u8 fw_ready;
u8 surpriseremoved;
u8 setup_fw_on_resume;
u8 power_up_on_resume;
int (*hw_host_to_card) (struct lbs_private *priv, u8 type, u8 *payload, u16 nb);
void (*reset_card) (struct lbs_private *priv);
int (*power_save) (struct lbs_private *priv);

View File

@ -1290,15 +1290,23 @@ static void if_sdio_remove(struct sdio_func *func)
static int if_sdio_suspend(struct device *dev)
{
struct sdio_func *func = dev_to_sdio_func(dev);
int ret;
struct if_sdio_card *card = sdio_get_drvdata(func);
struct lbs_private *priv = card->priv;
int ret;
mmc_pm_flag_t flags = sdio_get_host_pm_caps(func);
priv->power_up_on_resume = false;
/* If we're powered off anyway, just let the mmc layer remove the
* card. */
if (!lbs_iface_active(card->priv))
return -ENOSYS;
if (!lbs_iface_active(priv)) {
if (priv->fw_ready) {
priv->power_up_on_resume = true;
if_sdio_power_off(card);
}
return 0;
}
dev_info(dev, "%s: suspend: PM flags = 0x%x\n",
sdio_func_id(func), flags);
@ -1306,9 +1314,14 @@ static int if_sdio_suspend(struct device *dev)
/* If we aren't being asked to wake on anything, we should bail out
* and let the SD stack power down the card.
*/
if (card->priv->wol_criteria == EHS_REMOVE_WAKEUP) {
if (priv->wol_criteria == EHS_REMOVE_WAKEUP) {
dev_info(dev, "Suspend without wake params -- powering down card\n");
return -ENOSYS;
if (priv->fw_ready) {
priv->power_up_on_resume = true;
if_sdio_power_off(card);
}
return 0;
}
if (!(flags & MMC_PM_KEEP_POWER)) {
@ -1321,7 +1334,7 @@ static int if_sdio_suspend(struct device *dev)
if (ret)
return ret;
ret = lbs_suspend(card->priv);
ret = lbs_suspend(priv);
if (ret)
return ret;
@ -1336,6 +1349,11 @@ static int if_sdio_resume(struct device *dev)
dev_info(dev, "%s: resume: we're back\n", sdio_func_id(func));
if (card->priv->power_up_on_resume) {
if_sdio_power_on(card);
wait_event(card->pwron_waitq, card->priv->fw_ready);
}
ret = lbs_resume(card->priv);
return ret;

View File

@ -808,9 +808,9 @@ u32 nd_cmd_out_size(struct nvdimm *nvdimm, int cmd,
* overshoots the remainder by 4 bytes, assume it was
* including 'status'.
*/
if (out_field[1] - 8 == remainder)
if (out_field[1] - 4 == remainder)
return remainder;
return out_field[1] - 4;
return out_field[1] - 8;
} else if (cmd == ND_CMD_CALL) {
struct nd_cmd_pkg *pkg = (struct nd_cmd_pkg *) in_field;

View File

@ -33,10 +33,6 @@
#define TBCTL 0x00
#define TBPRD 0x0A
#define TBCTL_RUN_MASK (BIT(15) | BIT(14))
#define TBCTL_STOP_NEXT 0
#define TBCTL_STOP_ON_CYCLE BIT(14)
#define TBCTL_FREE_RUN (BIT(15) | BIT(14))
#define TBCTL_PRDLD_MASK BIT(3)
#define TBCTL_PRDLD_SHDW 0
#define TBCTL_PRDLD_IMDT BIT(3)
@ -360,7 +356,7 @@ static int ehrpwm_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
/* Channels polarity can be configured from action qualifier module */
configure_polarity(pc, pwm->hwpwm);
/* Enable TBCLK before enabling PWM device */
/* Enable TBCLK */
ret = clk_enable(pc->tbclk);
if (ret) {
dev_err(chip->dev, "Failed to enable TBCLK for %s: %d\n",
@ -368,9 +364,6 @@ static int ehrpwm_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
return ret;
}
/* Enable time counter for free_run */
ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_RUN_MASK, TBCTL_FREE_RUN);
return 0;
}
@ -388,6 +381,8 @@ static void ehrpwm_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
aqcsfrc_mask = AQCSFRC_CSFA_MASK;
}
/* Update shadow register first before modifying active register */
ehrpwm_modify(pc->mmio_base, AQCSFRC, aqcsfrc_mask, aqcsfrc_val);
/*
* Changes to immediate action on Action Qualifier. This puts
* Action Qualifier control on PWM output from next TBCLK
@ -400,9 +395,6 @@ static void ehrpwm_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
/* Disabling TBCLK on PWM disable */
clk_disable(pc->tbclk);
/* Stop Time base counter */
ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_RUN_MASK, TBCTL_STOP_NEXT);
/* Disable clock on PWM disable */
pm_runtime_put_sync(chip->dev);
}

View File

@ -817,13 +817,6 @@ static int omap_rtc_probe(struct platform_device *pdev)
goto err;
}
if (rtc->is_pmic_controller) {
if (!pm_power_off) {
omap_rtc_power_off_rtc = rtc;
pm_power_off = omap_rtc_power_off;
}
}
/* Support ext_wakeup pinconf */
rtc_pinctrl_desc.name = dev_name(&pdev->dev);
@ -833,6 +826,13 @@ static int omap_rtc_probe(struct platform_device *pdev)
return PTR_ERR(rtc->pctldev);
}
if (rtc->is_pmic_controller) {
if (!pm_power_off) {
omap_rtc_power_off_rtc = rtc;
pm_power_off = omap_rtc_power_off;
}
}
return 0;
err:

View File

@ -319,7 +319,7 @@ static void cdns_spi_fill_tx_fifo(struct cdns_spi *xspi)
*/
if (cdns_spi_read(xspi, CDNS_SPI_ISR) &
CDNS_SPI_IXR_TXFULL)
usleep_range(10, 20);
udelay(10);
if (xspi->txbuf)
cdns_spi_write(xspi, CDNS_SPI_TXD, *xspi->txbuf++);

View File

@ -217,7 +217,7 @@ static void davinci_spi_chipselect(struct spi_device *spi, int value)
pdata = &dspi->pdata;
/* program delay transfers if tx_delay is non zero */
if (spicfg->wdelay)
if (spicfg && spicfg->wdelay)
spidat1 |= SPIDAT1_WDEL;
/*

View File

@ -1006,21 +1006,6 @@ static int dspi_probe(struct platform_device *pdev)
goto out_master_put;
}
dspi_init(dspi);
dspi->irq = platform_get_irq(pdev, 0);
if (dspi->irq < 0) {
dev_err(&pdev->dev, "can't get platform irq\n");
ret = dspi->irq;
goto out_master_put;
}
ret = devm_request_irq(&pdev->dev, dspi->irq, dspi_interrupt, 0,
pdev->name, dspi);
if (ret < 0) {
dev_err(&pdev->dev, "Unable to attach DSPI interrupt\n");
goto out_master_put;
}
dspi->clk = devm_clk_get(&pdev->dev, "dspi");
if (IS_ERR(dspi->clk)) {
ret = PTR_ERR(dspi->clk);
@ -1031,6 +1016,21 @@ static int dspi_probe(struct platform_device *pdev)
if (ret)
goto out_master_put;
dspi_init(dspi);
dspi->irq = platform_get_irq(pdev, 0);
if (dspi->irq < 0) {
dev_err(&pdev->dev, "can't get platform irq\n");
ret = dspi->irq;
goto out_clk_put;
}
ret = devm_request_irq(&pdev->dev, dspi->irq, dspi_interrupt, 0,
pdev->name, dspi);
if (ret < 0) {
dev_err(&pdev->dev, "Unable to attach DSPI interrupt\n");
goto out_clk_put;
}
if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
ret = dspi_request_dma(dspi, res->start);
if (ret < 0) {

View File

@ -1480,6 +1480,10 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = {
{ PCI_VDEVICE(INTEL, 0x31c2), LPSS_BXT_SSP },
{ PCI_VDEVICE(INTEL, 0x31c4), LPSS_BXT_SSP },
{ PCI_VDEVICE(INTEL, 0x31c6), LPSS_BXT_SSP },
/* ICL-LP */
{ PCI_VDEVICE(INTEL, 0x34aa), LPSS_CNL_SSP },
{ PCI_VDEVICE(INTEL, 0x34ab), LPSS_CNL_SSP },
{ PCI_VDEVICE(INTEL, 0x34fb), LPSS_CNL_SSP },
/* APL */
{ PCI_VDEVICE(INTEL, 0x5ac2), LPSS_BXT_SSP },
{ PCI_VDEVICE(INTEL, 0x5ac4), LPSS_BXT_SSP },

View File

@ -195,6 +195,7 @@ static int uart_port_startup(struct tty_struct *tty, struct uart_state *state,
{
struct uart_port *uport = uart_port_check(state);
unsigned long page;
unsigned long flags = 0;
int retval = 0;
if (uport->type == PORT_UNKNOWN)
@ -209,15 +210,18 @@ static int uart_port_startup(struct tty_struct *tty, struct uart_state *state,
* Initialise and allocate the transmit and temporary
* buffer.
*/
if (!state->xmit.buf) {
/* This is protected by the per port mutex */
page = get_zeroed_page(GFP_KERNEL);
if (!page)
return -ENOMEM;
page = get_zeroed_page(GFP_KERNEL);
if (!page)
return -ENOMEM;
uart_port_lock(state, flags);
if (!state->xmit.buf) {
state->xmit.buf = (unsigned char *) page;
uart_circ_clear(&state->xmit);
} else {
free_page(page);
}
uart_port_unlock(uport, flags);
retval = uport->ops->startup(uport);
if (retval == 0) {
@ -276,6 +280,7 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state)
{
struct uart_port *uport = uart_port_check(state);
struct tty_port *port = &state->port;
unsigned long flags = 0;
/*
* Set the TTY IO error marker
@ -308,10 +313,12 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state)
/*
* Free the transmit buffer page.
*/
uart_port_lock(state, flags);
if (state->xmit.buf) {
free_page((unsigned long)state->xmit.buf);
state->xmit.buf = NULL;
}
uart_port_unlock(uport, flags);
}
/**

View File

@ -1716,12 +1716,12 @@ static int do_register_framebuffer(struct fb_info *fb_info)
return 0;
}
static int do_unregister_framebuffer(struct fb_info *fb_info)
static int unbind_console(struct fb_info *fb_info)
{
struct fb_event event;
int i, ret = 0;
int ret;
int i = fb_info->node;
i = fb_info->node;
if (i < 0 || i >= FB_MAX || registered_fb[i] != fb_info)
return -EINVAL;
@ -1736,17 +1736,29 @@ static int do_unregister_framebuffer(struct fb_info *fb_info)
unlock_fb_info(fb_info);
console_unlock();
return ret;
}
static int __unlink_framebuffer(struct fb_info *fb_info);
static int do_unregister_framebuffer(struct fb_info *fb_info)
{
struct fb_event event;
int ret;
ret = unbind_console(fb_info);
if (ret)
return -EINVAL;
pm_vt_switch_unregister(fb_info->dev);
unlink_framebuffer(fb_info);
__unlink_framebuffer(fb_info);
if (fb_info->pixmap.addr &&
(fb_info->pixmap.flags & FB_PIXMAP_DEFAULT))
kfree(fb_info->pixmap.addr);
fb_destroy_modelist(&fb_info->modelist);
registered_fb[i] = NULL;
registered_fb[fb_info->node] = NULL;
num_registered_fb--;
fb_cleanup_device(fb_info);
event.info = fb_info;
@ -1759,7 +1771,7 @@ static int do_unregister_framebuffer(struct fb_info *fb_info)
return 0;
}
int unlink_framebuffer(struct fb_info *fb_info)
static int __unlink_framebuffer(struct fb_info *fb_info)
{
int i;
@ -1771,6 +1783,20 @@ int unlink_framebuffer(struct fb_info *fb_info)
device_destroy(fb_class, MKDEV(FB_MAJOR, i));
fb_info->dev = NULL;
}
return 0;
}
int unlink_framebuffer(struct fb_info *fb_info)
{
int ret;
ret = __unlink_framebuffer(fb_info);
if (ret)
return ret;
unbind_console(fb_info);
return 0;
}
EXPORT_SYMBOL(unlink_framebuffer);

View File

@ -105,7 +105,7 @@ int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name,
{
struct kvec kvec = {.iov_base = (void *)value, .iov_len = value_len};
struct iov_iter from;
int retval;
int retval, err;
iov_iter_kvec(&from, WRITE | ITER_KVEC, &kvec, 1, value_len);
@ -126,7 +126,9 @@ int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name,
retval);
else
p9_client_write(fid, 0, &from, &retval);
p9_client_clunk(fid);
err = p9_client_clunk(fid);
if (!retval && err)
retval = err;
return retval;
}

View File

@ -204,7 +204,7 @@ static bool bl_map_stripe(struct pnfs_block_dev *dev, u64 offset,
chunk = div_u64(offset, dev->chunk_size);
div_u64_rem(chunk, dev->nr_children, &chunk_idx);
if (chunk_idx > dev->nr_children) {
if (chunk_idx >= dev->nr_children) {
dprintk("%s: invalid chunk idx %d (%lld/%lld)\n",
__func__, chunk_idx, offset, dev->chunk_size);
/* error, should not happen */

View File

@ -433,11 +433,14 @@ validate_seqid(const struct nfs4_slot_table *tbl, const struct nfs4_slot *slot,
* a match. If the slot is in use and the sequence numbers match, the
* client is still waiting for a response to the original request.
*/
static bool referring_call_exists(struct nfs_client *clp,
static int referring_call_exists(struct nfs_client *clp,
uint32_t nrclists,
struct referring_call_list *rclists)
struct referring_call_list *rclists,
spinlock_t *lock)
__releases(lock)
__acquires(lock)
{
bool status = 0;
int status = 0;
int i, j;
struct nfs4_session *session;
struct nfs4_slot_table *tbl;
@ -460,8 +463,10 @@ static bool referring_call_exists(struct nfs_client *clp,
for (j = 0; j < rclist->rcl_nrefcalls; j++) {
ref = &rclist->rcl_refcalls[j];
spin_unlock(lock);
status = nfs4_slot_wait_on_seqid(tbl, ref->rc_slotid,
ref->rc_sequenceid, HZ >> 1) < 0;
spin_lock(lock);
if (status)
goto out;
}
@ -538,7 +543,8 @@ __be32 nfs4_callback_sequence(void *argp, void *resp,
* related callback was received before the response to the original
* call.
*/
if (referring_call_exists(clp, args->csa_nrclists, args->csa_rclists)) {
if (referring_call_exists(clp, args->csa_nrclists, args->csa_rclists,
&tbl->slot_tbl_lock) < 0) {
status = htonl(NFS4ERR_DELAY);
goto out_unlock;
}

View File

@ -547,8 +547,15 @@ nfs4_async_handle_exception(struct rpc_task *task, struct nfs_server *server,
ret = -EIO;
return ret;
out_retry:
if (ret == 0)
if (ret == 0) {
exception->retry = 1;
/*
* For NFS4ERR_MOVED, the client transport will need to
* be recomputed after migration recovery has completed.
*/
if (errorcode == -NFS4ERR_MOVED)
rpc_task_release_transport(task);
}
return ret;
}

View File

@ -61,7 +61,7 @@ EXPORT_SYMBOL_GPL(pnfs_generic_commit_release);
/* The generic layer is about to remove the req from the commit list.
* If this will make the bucket empty, it will need to put the lseg reference.
* Note this must be called holding i_lock
* Note this must be called holding nfsi->commit_mutex
*/
void
pnfs_generic_clear_request_commit(struct nfs_page *req,
@ -149,9 +149,7 @@ restart:
if (list_empty(&b->written)) {
freeme = b->wlseg;
b->wlseg = NULL;
spin_unlock(&cinfo->inode->i_lock);
pnfs_put_lseg(freeme);
spin_lock(&cinfo->inode->i_lock);
goto restart;
}
}
@ -167,7 +165,7 @@ static void pnfs_generic_retry_commit(struct nfs_commit_info *cinfo, int idx)
LIST_HEAD(pages);
int i;
spin_lock(&cinfo->inode->i_lock);
mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
for (i = idx; i < fl_cinfo->nbuckets; i++) {
bucket = &fl_cinfo->buckets[i];
if (list_empty(&bucket->committing))
@ -177,12 +175,12 @@ static void pnfs_generic_retry_commit(struct nfs_commit_info *cinfo, int idx)
list_for_each(pos, &bucket->committing)
cinfo->ds->ncommitting--;
list_splice_init(&bucket->committing, &pages);
spin_unlock(&cinfo->inode->i_lock);
mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
nfs_retry_commit(&pages, freeme, cinfo, i);
pnfs_put_lseg(freeme);
spin_lock(&cinfo->inode->i_lock);
mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
}
spin_unlock(&cinfo->inode->i_lock);
mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
}
static unsigned int
@ -222,13 +220,13 @@ void pnfs_fetch_commit_bucket_list(struct list_head *pages,
struct list_head *pos;
bucket = &cinfo->ds->buckets[data->ds_commit_index];
spin_lock(&cinfo->inode->i_lock);
mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
list_for_each(pos, &bucket->committing)
cinfo->ds->ncommitting--;
list_splice_init(&bucket->committing, pages);
data->lseg = bucket->clseg;
bucket->clseg = NULL;
spin_unlock(&cinfo->inode->i_lock);
mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
}

View File

@ -623,6 +623,21 @@ static int ovl_fill_real(struct dir_context *ctx, const char *name,
return orig_ctx->actor(orig_ctx, name, namelen, offset, ino, d_type);
}
static bool ovl_is_impure_dir(struct file *file)
{
struct ovl_dir_file *od = file->private_data;
struct inode *dir = d_inode(file->f_path.dentry);
/*
* Only upper dir can be impure, but if we are in the middle of
* iterating a lower real dir, dir could be copied up and marked
* impure. We only want the impure cache if we started iterating
* a real upper dir to begin with.
*/
return od->is_upper && ovl_test_flag(OVL_IMPURE, dir);
}
static int ovl_iterate_real(struct file *file, struct dir_context *ctx)
{
int err;
@ -646,7 +661,7 @@ static int ovl_iterate_real(struct file *file, struct dir_context *ctx)
rdt.parent_ino = stat.ino;
}
if (ovl_test_flag(OVL_IMPURE, d_inode(dir))) {
if (ovl_is_impure_dir(file)) {
rdt.cache = ovl_cache_get_impure(&file->f_path);
if (IS_ERR(rdt.cache))
return PTR_ERR(rdt.cache);
@ -676,7 +691,7 @@ static int ovl_iterate(struct file *file, struct dir_context *ctx)
* entries.
*/
if (ovl_same_sb(dentry->d_sb) &&
(ovl_test_flag(OVL_IMPURE, d_inode(dentry)) ||
(ovl_is_impure_dir(file) ||
OVL_TYPE_MERGE(ovl_path_type(dentry->d_parent)))) {
return ovl_iterate_real(file, ctx);
}

View File

@ -18,6 +18,7 @@
#include <linux/quotaops.h>
#include <linux/types.h>
#include <linux/writeback.h>
#include <linux/nospec.h>
static int check_quotactl_permission(struct super_block *sb, int type, int cmd,
qid_t id)
@ -703,6 +704,7 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
if (type >= (XQM_COMMAND(cmd) ? XQM_MAXQUOTAS : MAXQUOTAS))
return -EINVAL;
type = array_index_nospec(type, MAXQUOTAS);
/*
* Quota not supported on this fs? Check this before s_quota_types
* since they needn't be set if quota is not supported at all.

View File

@ -665,6 +665,11 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
spin_lock(&ui->ui_lock);
ui->synced_i_size = ui->ui_size;
spin_unlock(&ui->ui_lock);
if (xent) {
spin_lock(&host_ui->ui_lock);
host_ui->synced_i_size = host_ui->ui_size;
spin_unlock(&host_ui->ui_lock);
}
mark_inode_clean(c, ui);
mark_inode_clean(c, host_ui);
return 0;
@ -1283,11 +1288,10 @@ static int truncate_data_node(const struct ubifs_info *c, const struct inode *in
int *new_len)
{
void *buf;
int err, compr_type;
u32 dlen, out_len, old_dlen;
int err, dlen, compr_type, out_len, old_dlen;
out_len = le32_to_cpu(dn->size);
buf = kmalloc_array(out_len, WORST_COMPR_FACTOR, GFP_NOFS);
buf = kmalloc(out_len * WORST_COMPR_FACTOR, GFP_NOFS);
if (!buf)
return -ENOMEM;
@ -1389,7 +1393,16 @@ int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode,
else if (err)
goto out_free;
else {
if (le32_to_cpu(dn->size) <= dlen)
int dn_len = le32_to_cpu(dn->size);
if (dn_len <= 0 || dn_len > UBIFS_BLOCK_SIZE) {
ubifs_err(c, "bad data node (block %u, inode %lu)",
blk, inode->i_ino);
ubifs_dump_node(c, dn);
goto out_free;
}
if (dn_len <= dlen)
dlen = 0; /* Nothing to do */
else {
err = truncate_data_node(c, inode, blk, dn, &dlen);

View File

@ -1091,10 +1091,6 @@ static int scan_check_cb(struct ubifs_info *c,
}
}
buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
if (!buf)
return -ENOMEM;
/*
* After an unclean unmount, empty and freeable LEBs
* may contain garbage - do not scan them.
@ -1113,6 +1109,10 @@ static int scan_check_cb(struct ubifs_info *c,
return LPT_SCAN_CONTINUE;
}
buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
if (!buf)
return -ENOMEM;
sleb = ubifs_scan(c, lnum, 0, buf, 0);
if (IS_ERR(sleb)) {
ret = PTR_ERR(sleb);

View File

@ -152,6 +152,12 @@ static int create_xattr(struct ubifs_info *c, struct inode *host,
ui->data_len = size;
mutex_lock(&host_ui->ui_mutex);
if (!host->i_nlink) {
err = -ENOENT;
goto out_noent;
}
host->i_ctime = current_time(host);
host_ui->xattr_cnt += 1;
host_ui->xattr_size += CALC_DENT_SIZE(fname_len(nm));
@ -184,6 +190,7 @@ out_cancel:
host_ui->xattr_size -= CALC_XATTR_BYTES(size);
host_ui->xattr_names -= fname_len(nm);
host_ui->flags &= ~UBIFS_CRYPT_FL;
out_noent:
mutex_unlock(&host_ui->ui_mutex);
out_free:
make_bad_inode(inode);
@ -235,6 +242,12 @@ static int change_xattr(struct ubifs_info *c, struct inode *host,
mutex_unlock(&ui->ui_mutex);
mutex_lock(&host_ui->ui_mutex);
if (!host->i_nlink) {
err = -ENOENT;
goto out_noent;
}
host->i_ctime = current_time(host);
host_ui->xattr_size -= CALC_XATTR_BYTES(old_size);
host_ui->xattr_size += CALC_XATTR_BYTES(size);
@ -256,6 +269,7 @@ static int change_xattr(struct ubifs_info *c, struct inode *host,
out_cancel:
host_ui->xattr_size -= CALC_XATTR_BYTES(size);
host_ui->xattr_size += CALC_XATTR_BYTES(old_size);
out_noent:
mutex_unlock(&host_ui->ui_mutex);
make_bad_inode(inode);
out_free:
@ -484,6 +498,12 @@ static int remove_xattr(struct ubifs_info *c, struct inode *host,
return err;
mutex_lock(&host_ui->ui_mutex);
if (!host->i_nlink) {
err = -ENOENT;
goto out_noent;
}
host->i_ctime = current_time(host);
host_ui->xattr_cnt -= 1;
host_ui->xattr_size -= CALC_DENT_SIZE(fname_len(nm));
@ -503,6 +523,7 @@ out_cancel:
host_ui->xattr_size += CALC_DENT_SIZE(fname_len(nm));
host_ui->xattr_size += CALC_XATTR_BYTES(ui->data_len);
host_ui->xattr_names += fname_len(nm);
out_noent:
mutex_unlock(&host_ui->ui_mutex);
ubifs_release_budget(c, &req);
make_bad_inode(inode);
@ -542,6 +563,9 @@ static int ubifs_xattr_remove(struct inode *host, const char *name)
ubifs_assert(inode_is_locked(host));
if (!host->i_nlink)
return -ENOENT;
if (fname_len(&nm) > UBIFS_MAX_NLEN)
return -ENAMETOOLONG;

View File

@ -541,7 +541,7 @@ getxattr(struct dentry *d, const char __user *name, void __user *value,
if (error > 0) {
if ((strcmp(kname, XATTR_NAME_POSIX_ACL_ACCESS) == 0) ||
(strcmp(kname, XATTR_NAME_POSIX_ACL_DEFAULT) == 0))
posix_acl_fix_xattr_to_user(kvalue, size);
posix_acl_fix_xattr_to_user(kvalue, error);
if (size && copy_to_user(value, kvalue, error))
error = -EFAULT;
} else if (error == -ERANGE && size >= XATTR_SIZE_MAX) {

View File

@ -1026,6 +1026,8 @@ extern int vmbus_establish_gpadl(struct vmbus_channel *channel,
extern int vmbus_teardown_gpadl(struct vmbus_channel *channel,
u32 gpadl_handle);
void vmbus_reset_channel_cb(struct vmbus_channel *channel);
extern int vmbus_recvpacket(struct vmbus_channel *channel,
void *buffer,
u32 bufferlen,

View File

@ -112,6 +112,7 @@
* Extended Capability Register
*/
#define ecap_dit(e) ((e >> 41) & 0x1)
#define ecap_pasid(e) ((e >> 40) & 0x1)
#define ecap_pss(e) ((e >> 35) & 0x1f)
#define ecap_eafs(e) ((e >> 34) & 0x1)
@ -281,6 +282,7 @@ enum {
#define QI_DEV_IOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32)
#define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16)
#define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
#define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52))
#define QI_DEV_IOTLB_SIZE 1
#define QI_DEV_IOTLB_MAX_INVS 32
@ -305,6 +307,7 @@ enum {
#define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32)
#define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16)
#define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4)
#define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52))
#define QI_DEV_EIOTLB_MAX_INVS 32
#define QI_PGRP_IDX(idx) (((u64)(idx)) << 55)
@ -450,9 +453,8 @@ extern void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,
u8 fm, u64 type);
extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
unsigned int size_order, u64 type);
extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
u64 addr, unsigned mask);
extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
u16 qdep, u64 addr, unsigned mask);
extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
extern int dmar_ir_support(void);

View File

@ -2292,4 +2292,16 @@ static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev)
/* provide the legacy pci_dma_* API */
#include <linux/pci-dma-compat.h>
#define pci_printk(level, pdev, fmt, arg...) \
dev_printk(level, &(pdev)->dev, fmt, ##arg)
#define pci_emerg(pdev, fmt, arg...) dev_emerg(&(pdev)->dev, fmt, ##arg)
#define pci_alert(pdev, fmt, arg...) dev_alert(&(pdev)->dev, fmt, ##arg)
#define pci_crit(pdev, fmt, arg...) dev_crit(&(pdev)->dev, fmt, ##arg)
#define pci_err(pdev, fmt, arg...) dev_err(&(pdev)->dev, fmt, ##arg)
#define pci_warn(pdev, fmt, arg...) dev_warn(&(pdev)->dev, fmt, ##arg)
#define pci_notice(pdev, fmt, arg...) dev_notice(&(pdev)->dev, fmt, ##arg)
#define pci_info(pdev, fmt, arg...) dev_info(&(pdev)->dev, fmt, ##arg)
#define pci_dbg(pdev, fmt, arg...) dev_dbg(&(pdev)->dev, fmt, ##arg)
#endif /* LINUX_PCI_H */

View File

@ -156,6 +156,7 @@ int rpc_switch_client_transport(struct rpc_clnt *,
void rpc_shutdown_client(struct rpc_clnt *);
void rpc_release_client(struct rpc_clnt *);
void rpc_task_release_transport(struct rpc_task *);
void rpc_task_release_client(struct rpc_task *);
int rpcb_create_local(struct net *);

View File

@ -12,6 +12,12 @@
#ifndef _LINUX_VERIFICATION_H
#define _LINUX_VERIFICATION_H
/*
* Indicate that both builtin trusted keys and secondary trusted keys
* should be used.
*/
#define VERIFY_USE_SECONDARY_KEYRING ((struct key *)1UL)
/*
* The use to which an asymmetric key is being put.
*/

View File

@ -88,7 +88,7 @@ struct dlfb_data {
#define MIN_RAW_PIX_BYTES 2
#define MIN_RAW_CMD_BYTES (RAW_HEADER_BYTES + MIN_RAW_PIX_BYTES)
#define DL_DEFIO_WRITE_DELAY 5 /* fb_deferred_io.delay in jiffies */
#define DL_DEFIO_WRITE_DELAY msecs_to_jiffies(HZ <= 300 ? 4 : 10) /* optimal value for 720p video */
#define DL_DEFIO_WRITE_DISABLE (HZ*60) /* "disable" with long delay */
/* remove these once align.h patch is taken into kernel */

View File

@ -605,6 +605,9 @@ static int klp_init_func(struct klp_object *obj, struct klp_func *func)
if (!func->old_name || !func->new_func)
return -EINVAL;
if (strlen(func->old_name) >= KSYM_NAME_LEN)
return -EINVAL;
INIT_LIST_HEAD(&func->stack_node);
func->patched = false;
func->transition = false;
@ -678,6 +681,9 @@ static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
if (!obj->funcs)
return -EINVAL;
if (klp_is_module(obj) && strlen(obj->name) >= MODULE_NAME_LEN)
return -EINVAL;
obj->patched = false;
obj->mod = NULL;

View File

@ -105,6 +105,7 @@ config PM_SLEEP
def_bool y
depends on SUSPEND || HIBERNATE_CALLBACKS
select PM
select SRCU
config PM_SLEEP_SMP
def_bool y

View File

@ -309,12 +309,12 @@ static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args)
return printk_safe_log_store(s, fmt, args);
}
void printk_nmi_enter(void)
void notrace printk_nmi_enter(void)
{
this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK);
}
void printk_nmi_exit(void)
void notrace printk_nmi_exit(void)
{
this_cpu_and(printk_context, ~PRINTK_NMI_CONTEXT_MASK);
}

View File

@ -1178,18 +1178,19 @@ static int override_release(char __user *release, size_t len)
SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
{
int errno = 0;
struct new_utsname tmp;
down_read(&uts_sem);
if (copy_to_user(name, utsname(), sizeof *name))
errno = -EFAULT;
memcpy(&tmp, utsname(), sizeof(tmp));
up_read(&uts_sem);
if (copy_to_user(name, &tmp, sizeof(tmp)))
return -EFAULT;
if (!errno && override_release(name->release, sizeof(name->release)))
errno = -EFAULT;
if (!errno && override_architecture(name))
errno = -EFAULT;
return errno;
if (override_release(name->release, sizeof(name->release)))
return -EFAULT;
if (override_architecture(name))
return -EFAULT;
return 0;
}
#ifdef __ARCH_WANT_SYS_OLD_UNAME
@ -1198,55 +1199,46 @@ SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
*/
SYSCALL_DEFINE1(uname, struct old_utsname __user *, name)
{
int error = 0;
struct old_utsname tmp;
if (!name)
return -EFAULT;
down_read(&uts_sem);
if (copy_to_user(name, utsname(), sizeof(*name)))
error = -EFAULT;
memcpy(&tmp, utsname(), sizeof(tmp));
up_read(&uts_sem);
if (copy_to_user(name, &tmp, sizeof(tmp)))
return -EFAULT;
if (!error && override_release(name->release, sizeof(name->release)))
error = -EFAULT;
if (!error && override_architecture(name))
error = -EFAULT;
return error;
if (override_release(name->release, sizeof(name->release)))
return -EFAULT;
if (override_architecture(name))
return -EFAULT;
return 0;
}
SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
{
int error;
struct oldold_utsname tmp = {};
if (!name)
return -EFAULT;
if (!access_ok(VERIFY_WRITE, name, sizeof(struct oldold_utsname)))
return -EFAULT;
down_read(&uts_sem);
error = __copy_to_user(&name->sysname, &utsname()->sysname,
__OLD_UTS_LEN);
error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
error |= __copy_to_user(&name->nodename, &utsname()->nodename,
__OLD_UTS_LEN);
error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
error |= __copy_to_user(&name->release, &utsname()->release,
__OLD_UTS_LEN);
error |= __put_user(0, name->release + __OLD_UTS_LEN);
error |= __copy_to_user(&name->version, &utsname()->version,
__OLD_UTS_LEN);
error |= __put_user(0, name->version + __OLD_UTS_LEN);
error |= __copy_to_user(&name->machine, &utsname()->machine,
__OLD_UTS_LEN);
error |= __put_user(0, name->machine + __OLD_UTS_LEN);
memcpy(&tmp.sysname, &utsname()->sysname, __OLD_UTS_LEN);
memcpy(&tmp.nodename, &utsname()->nodename, __OLD_UTS_LEN);
memcpy(&tmp.release, &utsname()->release, __OLD_UTS_LEN);
memcpy(&tmp.version, &utsname()->version, __OLD_UTS_LEN);
memcpy(&tmp.machine, &utsname()->machine, __OLD_UTS_LEN);
up_read(&uts_sem);
if (copy_to_user(name, &tmp, sizeof(tmp)))
return -EFAULT;
if (!error && override_architecture(name))
error = -EFAULT;
if (!error && override_release(name->release, sizeof(name->release)))
error = -EFAULT;
return error ? -EFAULT : 0;
if (override_architecture(name))
return -EFAULT;
if (override_release(name->release, sizeof(name->release)))
return -EFAULT;
return 0;
}
#endif
@ -1260,17 +1252,18 @@ SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
if (len < 0 || len > __NEW_UTS_LEN)
return -EINVAL;
down_write(&uts_sem);
errno = -EFAULT;
if (!copy_from_user(tmp, name, len)) {
struct new_utsname *u = utsname();
struct new_utsname *u;
down_write(&uts_sem);
u = utsname();
memcpy(u->nodename, tmp, len);
memset(u->nodename + len, 0, sizeof(u->nodename) - len);
errno = 0;
uts_proc_notify(UTS_PROC_HOSTNAME);
up_write(&uts_sem);
}
up_write(&uts_sem);
return errno;
}
@ -1278,8 +1271,9 @@ SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
SYSCALL_DEFINE2(gethostname, char __user *, name, int, len)
{
int i, errno;
int i;
struct new_utsname *u;
char tmp[__NEW_UTS_LEN + 1];
if (len < 0)
return -EINVAL;
@ -1288,11 +1282,11 @@ SYSCALL_DEFINE2(gethostname, char __user *, name, int, len)
i = 1 + strlen(u->nodename);
if (i > len)
i = len;
errno = 0;
if (copy_to_user(name, u->nodename, i))
errno = -EFAULT;
memcpy(tmp, u->nodename, i);
up_read(&uts_sem);
return errno;
if (copy_to_user(name, tmp, i))
return -EFAULT;
return 0;
}
#endif
@ -1311,17 +1305,18 @@ SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len)
if (len < 0 || len > __NEW_UTS_LEN)
return -EINVAL;
down_write(&uts_sem);
errno = -EFAULT;
if (!copy_from_user(tmp, name, len)) {
struct new_utsname *u = utsname();
struct new_utsname *u;
down_write(&uts_sem);
u = utsname();
memcpy(u->domainname, tmp, len);
memset(u->domainname + len, 0, sizeof(u->domainname) - len);
errno = 0;
uts_proc_notify(UTS_PROC_DOMAINNAME);
up_write(&uts_sem);
}
up_write(&uts_sem);
return errno;
}

View File

@ -1809,6 +1809,10 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
mutex_lock(&q->blk_trace_mutex);
if (attr == &dev_attr_enable) {
if (!!value == !!q->blk_trace) {
ret = 0;
goto out_unlock_bdev;
}
if (value)
ret = blk_trace_setup_queue(q, bdev);
else

View File

@ -7545,7 +7545,9 @@ rb_simple_write(struct file *filp, const char __user *ubuf,
if (buffer) {
mutex_lock(&trace_types_lock);
if (val) {
if (!!val == tracer_tracing_is_on(tr)) {
val = 0; /* do nothing */
} else if (val) {
tracer_tracing_on(tr);
if (tr->current_trace->start)
tr->current_trace->start(tr);

View File

@ -967,7 +967,7 @@ probe_event_disable(struct trace_uprobe *tu, struct trace_event_file *file)
list_del_rcu(&link->list);
/* synchronize with u{,ret}probe_trace_func */
synchronize_sched();
synchronize_rcu();
kfree(link);
if (!list_empty(&tu->tp.files))

View File

@ -650,7 +650,16 @@ static ssize_t map_write(struct file *file, const char __user *buf,
unsigned idx;
struct uid_gid_extent *extent = NULL;
char *kbuf = NULL, *pos, *next_line;
ssize_t ret = -EINVAL;
ssize_t ret;
/* Only allow < page size writes at the beginning of the file */
if ((*ppos != 0) || (count >= PAGE_SIZE))
return -EINVAL;
/* Slurp in the user data */
kbuf = memdup_user_nul(buf, count);
if (IS_ERR(kbuf))
return PTR_ERR(kbuf);
/*
* The userns_state_mutex serializes all writes to any given map.
@ -684,19 +693,6 @@ static ssize_t map_write(struct file *file, const char __user *buf,
if (cap_valid(cap_setid) && !file_ns_capable(file, ns, CAP_SYS_ADMIN))
goto out;
/* Only allow < page size writes at the beginning of the file */
ret = -EINVAL;
if ((*ppos != 0) || (count >= PAGE_SIZE))
goto out;
/* Slurp in the user data */
kbuf = memdup_user_nul(buf, count);
if (IS_ERR(kbuf)) {
ret = PTR_ERR(kbuf);
kbuf = NULL;
goto out;
}
/* Parse the user data */
ret = -EINVAL;
pos = kbuf;

View File

@ -18,7 +18,7 @@
#ifdef CONFIG_PROC_SYSCTL
static void *get_uts(struct ctl_table *table, int write)
static void *get_uts(struct ctl_table *table)
{
char *which = table->data;
struct uts_namespace *uts_ns;
@ -26,21 +26,9 @@ static void *get_uts(struct ctl_table *table, int write)
uts_ns = current->nsproxy->uts_ns;
which = (which - (char *)&init_uts_ns) + (char *)uts_ns;
if (!write)
down_read(&uts_sem);
else
down_write(&uts_sem);
return which;
}
static void put_uts(struct ctl_table *table, int write, void *which)
{
if (!write)
up_read(&uts_sem);
else
up_write(&uts_sem);
}
/*
* Special case of dostring for the UTS structure. This has locks
* to observe. Should this be in kernel/sys.c ????
@ -50,13 +38,34 @@ static int proc_do_uts_string(struct ctl_table *table, int write,
{
struct ctl_table uts_table;
int r;
memcpy(&uts_table, table, sizeof(uts_table));
uts_table.data = get_uts(table, write);
r = proc_dostring(&uts_table, write, buffer, lenp, ppos);
put_uts(table, write, uts_table.data);
char tmp_data[__NEW_UTS_LEN + 1];
if (write)
memcpy(&uts_table, table, sizeof(uts_table));
uts_table.data = tmp_data;
/*
* Buffer the value in tmp_data so that proc_dostring() can be called
* without holding any locks.
* We also need to read the original value in the write==1 case to
* support partial writes.
*/
down_read(&uts_sem);
memcpy(tmp_data, get_uts(table), sizeof(tmp_data));
up_read(&uts_sem);
r = proc_dostring(&uts_table, write, buffer, lenp, ppos);
if (write) {
/*
* Write back the new value.
* Note that, since we dropped uts_sem, the result can
* theoretically be incorrect if there are two parallel writes
* at non-zero offsets to the same sysctl.
*/
down_write(&uts_sem);
memcpy(get_uts(table), tmp_data, sizeof(tmp_data));
up_write(&uts_sem);
proc_sys_poll_notify(table->poll);
}
return r;
}

View File

@ -392,15 +392,6 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
{
struct mmu_table_batch **batch = &tlb->batch;
/*
* When there's less then two users of this mm there cannot be a
* concurrent page-table walk.
*/
if (atomic_read(&tlb->mm->mm_users) < 2) {
__tlb_remove_table(table);
return;
}
if (*batch == NULL) {
*batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
if (*batch == NULL) {

View File

@ -380,6 +380,7 @@ ondemand_readahead(struct address_space *mapping,
{
struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
unsigned long max_pages = ra->ra_pages;
unsigned long add_pages;
pgoff_t prev_offset;
/*
@ -469,10 +470,17 @@ readit:
* Will this read hit the readahead marker made by itself?
* If so, trigger the readahead marker hit now, and merge
* the resulted next readahead window into the current one.
* Take care of maximum IO pages as above.
*/
if (offset == ra->start && ra->size == ra->async_size) {
ra->async_size = get_next_ra_size(ra, max_pages);
ra->size += ra->async_size;
add_pages = get_next_ra_size(ra, max_pages);
if (ra->size + add_pages <= max_pages) {
ra->async_size = add_pages;
ra->size += add_pages;
} else {
ra->size = max_pages;
ra->async_size = max_pages >> 1;
}
}
return ra_submit(ra, mapping, filp);

View File

@ -955,7 +955,7 @@ static int p9_client_version(struct p9_client *c)
{
int err = 0;
struct p9_req_t *req;
char *version;
char *version = NULL;
int msize;
p9_debug(P9_DEBUG_9P, ">>> TVERSION msize %d protocol %d\n",

View File

@ -185,6 +185,8 @@ static void p9_mux_poll_stop(struct p9_conn *m)
spin_lock_irqsave(&p9_poll_lock, flags);
list_del_init(&m->poll_pending_link);
spin_unlock_irqrestore(&p9_poll_lock, flags);
flush_work(&p9_poll_work);
}
/**
@ -951,7 +953,7 @@ p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args)
if (err < 0)
return err;
if (valid_ipaddr4(addr) < 0)
if (addr == NULL || valid_ipaddr4(addr) < 0)
return -EINVAL;
csocket = NULL;
@ -1001,6 +1003,9 @@ p9_fd_create_unix(struct p9_client *client, const char *addr, char *args)
csocket = NULL;
if (addr == NULL)
return -EINVAL;
if (strlen(addr) >= UNIX_PATH_MAX) {
pr_err("%s (%d): address too long: %s\n",
__func__, task_pid_nr(current), addr);

View File

@ -646,6 +646,9 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args)
struct rdma_conn_param conn_param;
struct ib_qp_init_attr qp_attr;
if (addr == NULL)
return -EINVAL;
/* Parse the transport specific mount options */
err = parse_opts(args, &opts);
if (err < 0)

View File

@ -189,7 +189,7 @@ static int pack_sg_list(struct scatterlist *sg, int start,
s = rest_of_page(data);
if (s > count)
s = count;
BUG_ON(index > limit);
BUG_ON(index >= limit);
/* Make sure we don't terminate early. */
sg_unmark_end(&sg[index]);
sg_set_buf(&sg[index++], data, s);
@ -234,6 +234,7 @@ pack_sg_list_p(struct scatterlist *sg, int start, int limit,
s = PAGE_SIZE - data_off;
if (s > count)
s = count;
BUG_ON(index >= limit);
/* Make sure we don't terminate early. */
sg_unmark_end(&sg[index]);
sg_set_page(&sg[index++], pdata[i++], s, data_off);
@ -406,6 +407,7 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
p9_debug(P9_DEBUG_TRANS, "virtio request\n");
if (uodata) {
__le32 sz;
int n = p9_get_mapped_pages(chan, &out_pages, uodata,
outlen, &offs, &need_drop);
if (n < 0)
@ -416,6 +418,12 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
memcpy(&req->tc->sdata[req->tc->size - 4], &v, 4);
outlen = n;
}
/* The size field of the message must include the length of the
* header and the length of the data. We didn't actually know
* the length of the data until this point so add it in now.
*/
sz = cpu_to_le32(req->tc->size + outlen);
memcpy(&req->tc->sdata[0], &sz, sizeof(sz));
} else if (uidata) {
int n = p9_get_mapped_pages(chan, &in_pages, uidata,
inlen, &offs, &need_drop);
@ -643,6 +651,9 @@ p9_virtio_create(struct p9_client *client, const char *devname, char *args)
int ret = -ENOENT;
int found = 0;
if (devname == NULL)
return -EINVAL;
mutex_lock(&virtio_9p_lock);
list_for_each_entry(chan, &virtio_chan_list, chan_list) {
if (!strncmp(devname, chan->tag, chan->tag_len) &&

View File

@ -95,6 +95,9 @@ static int p9_xen_create(struct p9_client *client, const char *addr, char *args)
{
struct xen_9pfs_front_priv *priv;
if (addr == NULL)
return -EINVAL;
read_lock(&xen_9pfs_lock);
list_for_each_entry(priv, &xen_9pfs_devs, list) {
if (!strcmp(priv->tag, addr)) {

View File

@ -265,9 +265,24 @@ netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *ldev)
/* We must take a copy of the skb before we modify/replace the ipv6
* header as the header could be used elsewhere
*/
skb = skb_unshare(skb, GFP_ATOMIC);
if (!skb)
return NET_XMIT_DROP;
if (unlikely(skb_headroom(skb) < ldev->needed_headroom ||
skb_tailroom(skb) < ldev->needed_tailroom)) {
struct sk_buff *nskb;
nskb = skb_copy_expand(skb, ldev->needed_headroom,
ldev->needed_tailroom, GFP_ATOMIC);
if (likely(nskb)) {
consume_skb(skb);
skb = nskb;
} else {
kfree_skb(skb);
return NET_XMIT_DROP;
}
} else {
skb = skb_unshare(skb, GFP_ATOMIC);
if (!skb)
return NET_XMIT_DROP;
}
ret = lowpan_header(skb, ldev, &dgram_size, &dgram_offset);
if (ret < 0) {

View File

@ -63,8 +63,21 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb)
int ret;
if (!(local->hw.flags & IEEE802154_HW_TX_OMIT_CKSUM)) {
u16 crc = crc_ccitt(0, skb->data, skb->len);
struct sk_buff *nskb;
u16 crc;
if (unlikely(skb_tailroom(skb) < IEEE802154_FCS_LEN)) {
nskb = skb_copy_expand(skb, 0, IEEE802154_FCS_LEN,
GFP_ATOMIC);
if (likely(nskb)) {
consume_skb(skb);
skb = nskb;
} else {
goto err_tx;
}
}
crc = crc_ccitt(0, skb->data, skb->len);
put_unaligned_le16(crc, skb_put(skb, 2));
}

View File

@ -965,10 +965,20 @@ out:
}
EXPORT_SYMBOL_GPL(rpc_bind_new_program);
void rpc_task_release_transport(struct rpc_task *task)
{
struct rpc_xprt *xprt = task->tk_xprt;
if (xprt) {
task->tk_xprt = NULL;
xprt_put(xprt);
}
}
EXPORT_SYMBOL_GPL(rpc_task_release_transport);
void rpc_task_release_client(struct rpc_task *task)
{
struct rpc_clnt *clnt = task->tk_client;
struct rpc_xprt *xprt = task->tk_xprt;
if (clnt != NULL) {
/* Remove from client task list */
@ -979,12 +989,14 @@ void rpc_task_release_client(struct rpc_task *task)
rpc_release_client(clnt);
}
rpc_task_release_transport(task);
}
if (xprt != NULL) {
task->tk_xprt = NULL;
xprt_put(xprt);
}
static
void rpc_task_set_transport(struct rpc_task *task, struct rpc_clnt *clnt)
{
if (!task->tk_xprt)
task->tk_xprt = xprt_iter_get_next(&clnt->cl_xpi);
}
static
@ -992,8 +1004,7 @@ void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt)
{
if (clnt != NULL) {
if (task->tk_xprt == NULL)
task->tk_xprt = xprt_iter_get_next(&clnt->cl_xpi);
rpc_task_set_transport(task, clnt);
task->tk_client = clnt;
atomic_inc(&clnt->cl_count);
if (clnt->cl_softrtry)
@ -1529,6 +1540,7 @@ call_start(struct rpc_task *task)
clnt->cl_program->version[clnt->cl_vers]->counts[idx]++;
clnt->cl_stats->rpccnt++;
task->tk_action = call_reserve;
rpc_task_set_transport(task, clnt);
}
/*

View File

@ -413,7 +413,7 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer,
if (strcmp(name, "capability") != 0)
return -EOPNOTSUPP;
dentry = d_find_alias(inode);
dentry = d_find_any_alias(inode);
if (!dentry)
return -EINVAL;

View File

@ -197,6 +197,9 @@ static int auxtrace_queues__grow(struct auxtrace_queues *queues,
for (i = 0; i < queues->nr_queues; i++) {
list_splice_tail(&queues->queue_array[i].head,
&queue_array[i].head);
queue_array[i].tid = queues->queue_array[i].tid;
queue_array[i].cpu = queues->queue_array[i].cpu;
queue_array[i].set = queues->queue_array[i].set;
queue_array[i].priv = queues->queue_array[i].priv;
}