Merge android-4.14.115 (b5123fd) into msm-4.14

* refs/heads/tmp-b5123fd:
  Linux 4.14.115
  Documentation: Add nospectre_v1 parameter
  powerpc/fsl: Add FSL_PPC_BOOK3E as supported arch for nospectre_v2 boot arg
  ipv4: set the tcp_min_rtt_wlen range from 0 to one day
  net/rose: fix unbound loop in rose_loopback_timer()
  net/rose: Convert timers to use timer_setup()
  team: fix possible recursive locking when add slaves
  stmmac: pci: Adjust IOT2000 matching
  net: stmmac: move stmmac_check_ether_addr() to driver probe
  net: rds: exchange of 8K and 1M pool
  net/mlx5e: ethtool, Remove unsupported SFP EEPROM high pages query
  mlxsw: spectrum: Fix autoneg status in ethtool
  ipv4: add sanity checks in ipv4_link_failure()
  Revert "block/loop: Use global lock for ioctl() operation."
  mm: Fix warning in insert_pfn()
  x86/retpolines: Disable switch jump tables when retpolines are enabled
  x86, retpolines: Raise limit for generating indirect calls from switch-case
  dm integrity: change memcmp to strncmp in dm_integrity_ctr
  tipc: check link name with right length in tipc_nl_compat_link_set
  tipc: check bearer name with right length in tipc_nl_compat_bearer_enable
  fm10k: Fix a potential NULL pointer dereference
  netfilter: ebtables: CONFIG_COMPAT: drop a bogus WARN_ON
  NFS: Forbid setting AF_INET6 to "struct sockaddr_in"->sin_family.
  sched/deadline: Correctly handle active 0-lag timers
  binder: fix handling of misaligned binder object
  ipvs: fix warning on unused variable
  fs/proc/proc_sysctl.c: Fix a NULL pointer dereference
  intel_th: gth: Fix an off-by-one in output unassigning
  slip: make slhc_free() silently accept an error pointer
  tipc: handle the err returned from cmd header function
  vsock/virtio: fix kernel panic from virtio_transport_reset_no_sock
  ext4: fix some error pointer dereferences
  USB: Consolidate LPM checks to avoid enabling LPM twice
  USB: Add new USB LPM helpers
  drm/vc4: Fix compilation error reported by kbuild test bot
  Revert "drm/i915/fbdev: Actually configure untiled displays"
  drm/vc4: Fix memory leak during gpu reset.
  ARM: 8857/1: efi: enable CP15 DMB instructions before cleaning the cache
  dmaengine: sh: rcar-dmac: With cyclic DMA residue 0 is valid
  vfio/type1: Limit DMA mappings per container
  Input: synaptics-rmi4 - write config register values to the right offset
  sunrpc: don't mark uninitialised items as VALID.
  nfsd: Don't release the callback slot unless it was actually held
  ceph: fix ci->i_head_snapc leak
  ceph: ensure d_name stability in ceph_dentry_hash()
  ceph: only use d_name directly when parent is locked
  sched/numa: Fix a possible divide-by-zero
  IB/rdmavt: Fix frwr memory registration
  trace: Fix preempt_enable_no_resched() abuse
  MIPS: scall64-o32: Fix indirect syscall number load
  lib/Kconfig.debug: fix build error without CONFIG_BLOCK
  zram: pass down the bvec we need to read into in the work struct
  tracing: Fix buffer_ref pipe ops
  tracing: Fix a memory leak by early error exit in trace_pid_write()
  cifs: do not attempt cifs operation on smb2+ rename error
  kbuild: simplify ld-option implementation

Conflicts:
	net/ipv4/sysctl_net_ipv4.c

Change-Id: I2c01204927f3217ee955185c4526df777b5a4b42
Signed-off-by: Blagovest Kolenichev <bkolenichev@codeaurora.org>
This commit is contained in:
Blagovest Kolenichev 2019-06-25 03:08:02 -07:00
commit c1042002f1
63 changed files with 429 additions and 221 deletions

View File

@ -2689,7 +2689,11 @@
nosmt=force: Force disable SMT, cannot be undone
via the sysfs control file.
nospectre_v2 [X86] Disable all mitigations for the Spectre variant 2
nospectre_v1 [PPC] Disable mitigations for Spectre Variant 1 (bounds
check bypass). With this option data leaks are possible
in the system.
nospectre_v2 [X86,PPC_FSL_BOOK3E] Disable all mitigations for the Spectre variant 2
(indirect branch prediction) vulnerability. System may
allow data leaks with this option, which is equivalent
to spectre_v2=off.

View File

@ -402,6 +402,7 @@ tcp_min_rtt_wlen - INTEGER
minimum RTT when it is moved to a longer path (e.g., due to traffic
engineering). A longer window makes the filter more resistant to RTT
inflations such as transient congestion. The unit is seconds.
Possible values: 0 - 86400 (1 day)
Default: 300
tcp_moderate_rcvbuf - BOOLEAN

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 14
SUBLEVEL = 114
SUBLEVEL = 115
EXTRAVERSION =
NAME = Petit Gorille

View File

@ -1395,7 +1395,21 @@ ENTRY(efi_stub_entry)
@ Preserve return value of efi_entry() in r4
mov r4, r0
bl cache_clean_flush
@ our cache maintenance code relies on CP15 barrier instructions
@ but since we arrived here with the MMU and caches configured
@ by UEFI, we must check that the CP15BEN bit is set in SCTLR.
@ Note that this bit is RAO/WI on v6 and earlier, so the ISB in
@ the enable path will be executed on v7+ only.
mrc p15, 0, r1, c1, c0, 0 @ read SCTLR
tst r1, #(1 << 5) @ CP15BEN bit set?
bne 0f
orr r1, r1, #(1 << 5) @ CP15 barrier instructions
mcr p15, 0, r1, c1, c0, 0 @ write SCTLR
ARM( .inst 0xf57ff06f @ v7+ isb )
THUMB( isb )
0: bl cache_clean_flush
bl cache_off
@ Set parameters for booting zImage according to boot protocol

View File

@ -125,7 +125,7 @@ trace_a_syscall:
subu t1, v0, __NR_O32_Linux
move a1, v0
bnez t1, 1f /* __NR_syscall at offset 0 */
lw a1, PT_R4(sp) /* Arg1 for __NR_syscall case */
ld a1, PT_R4(sp) /* Arg1 for __NR_syscall case */
.set pop
1: jal syscall_trace_enter

View File

@ -245,6 +245,15 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
# Avoid indirect branches in kernel to deal with Spectre
ifdef CONFIG_RETPOLINE
KBUILD_CFLAGS += $(RETPOLINE_CFLAGS)
# Additionally, avoid generating expensive indirect jumps which
# are subject to retpolines for small number of switch cases.
# clang turns off jump table generation by default when under
# retpoline builds, however, gcc does not for x86. This has
# only been fixed starting from gcc stable version 8.4.0 and
# onwards, but not for older ones. See gcc bug #86952.
ifndef CONFIG_CC_IS_CLANG
KBUILD_CFLAGS += $(call cc-option,-fno-jump-tables)
endif
endif
archscripts: scripts_basic

View File

@ -916,14 +916,13 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
index = page - alloc->pages;
page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
mm = alloc->vma_vm_mm;
if (!mmget_not_zero(mm))
goto err_mmget;
if (!down_write_trylock(&mm->mmap_sem))
goto err_down_write_mmap_sem_failed;
vma = binder_alloc_get_vma(alloc);
if (vma) {
if (!mmget_not_zero(alloc->vma_vm_mm))
goto err_mmget;
mm = alloc->vma_vm_mm;
if (!down_write_trylock(&mm->mmap_sem))
goto err_down_write_mmap_sem_failed;
}
list_lru_isolate(lru, item);
spin_unlock(lock);
@ -934,10 +933,9 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
zap_page_range(vma, page_addr, PAGE_SIZE);
trace_binder_unmap_user_end(alloc, index);
up_write(&mm->mmap_sem);
mmput(mm);
}
up_write(&mm->mmap_sem);
mmput(mm);
trace_binder_unmap_kernel_start(alloc, index);

View File

@ -82,7 +82,6 @@
static DEFINE_IDR(loop_index_idr);
static DEFINE_MUTEX(loop_index_mutex);
static DEFINE_MUTEX(loop_ctl_mutex);
static int max_part;
static int part_shift;
@ -1019,7 +1018,7 @@ static int loop_clr_fd(struct loop_device *lo)
*/
if (atomic_read(&lo->lo_refcnt) > 1) {
lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
mutex_unlock(&loop_ctl_mutex);
mutex_unlock(&lo->lo_ctl_mutex);
return 0;
}
@ -1071,12 +1070,12 @@ static int loop_clr_fd(struct loop_device *lo)
if (!part_shift)
lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
loop_unprepare_queue(lo);
mutex_unlock(&loop_ctl_mutex);
mutex_unlock(&lo->lo_ctl_mutex);
/*
* Need not hold loop_ctl_mutex to fput backing file.
* Calling fput holding loop_ctl_mutex triggers a circular
* Need not hold lo_ctl_mutex to fput backing file.
* Calling fput holding lo_ctl_mutex triggers a circular
* lock dependency possibility warning as fput can take
* bd_mutex which is usually taken before loop_ctl_mutex.
* bd_mutex which is usually taken before lo_ctl_mutex.
*/
fput(filp);
return 0;
@ -1195,7 +1194,7 @@ loop_get_status(struct loop_device *lo, struct loop_info64 *info)
int ret;
if (lo->lo_state != Lo_bound) {
mutex_unlock(&loop_ctl_mutex);
mutex_unlock(&lo->lo_ctl_mutex);
return -ENXIO;
}
@ -1214,10 +1213,10 @@ loop_get_status(struct loop_device *lo, struct loop_info64 *info)
lo->lo_encrypt_key_size);
}
/* Drop loop_ctl_mutex while we call into the filesystem. */
/* Drop lo_ctl_mutex while we call into the filesystem. */
path = lo->lo_backing_file->f_path;
path_get(&path);
mutex_unlock(&loop_ctl_mutex);
mutex_unlock(&lo->lo_ctl_mutex);
ret = vfs_getattr(&path, &stat, STATX_INO, AT_STATX_SYNC_AS_STAT);
if (!ret) {
info->lo_device = huge_encode_dev(stat.dev);
@ -1309,7 +1308,7 @@ loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) {
int err;
if (!arg) {
mutex_unlock(&loop_ctl_mutex);
mutex_unlock(&lo->lo_ctl_mutex);
return -EINVAL;
}
err = loop_get_status(lo, &info64);
@ -1327,7 +1326,7 @@ loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) {
int err;
if (!arg) {
mutex_unlock(&loop_ctl_mutex);
mutex_unlock(&lo->lo_ctl_mutex);
return -EINVAL;
}
err = loop_get_status(lo, &info64);
@ -1402,7 +1401,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
struct loop_device *lo = bdev->bd_disk->private_data;
int err;
mutex_lock_nested(&loop_ctl_mutex, 1);
mutex_lock_nested(&lo->lo_ctl_mutex, 1);
switch (cmd) {
case LOOP_SET_FD:
err = loop_set_fd(lo, mode, bdev, arg);
@ -1411,7 +1410,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
err = loop_change_fd(lo, bdev, arg);
break;
case LOOP_CLR_FD:
/* loop_clr_fd would have unlocked loop_ctl_mutex on success */
/* loop_clr_fd would have unlocked lo_ctl_mutex on success */
err = loop_clr_fd(lo);
if (!err)
goto out_unlocked;
@ -1424,7 +1423,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
break;
case LOOP_GET_STATUS:
err = loop_get_status_old(lo, (struct loop_info __user *) arg);
/* loop_get_status() unlocks loop_ctl_mutex */
/* loop_get_status() unlocks lo_ctl_mutex */
goto out_unlocked;
case LOOP_SET_STATUS64:
err = -EPERM;
@ -1434,7 +1433,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
break;
case LOOP_GET_STATUS64:
err = loop_get_status64(lo, (struct loop_info64 __user *) arg);
/* loop_get_status() unlocks loop_ctl_mutex */
/* loop_get_status() unlocks lo_ctl_mutex */
goto out_unlocked;
case LOOP_SET_CAPACITY:
err = -EPERM;
@ -1454,7 +1453,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
default:
err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
}
mutex_unlock(&loop_ctl_mutex);
mutex_unlock(&lo->lo_ctl_mutex);
out_unlocked:
return err;
@ -1571,7 +1570,7 @@ loop_get_status_compat(struct loop_device *lo,
int err;
if (!arg) {
mutex_unlock(&loop_ctl_mutex);
mutex_unlock(&lo->lo_ctl_mutex);
return -EINVAL;
}
err = loop_get_status(lo, &info64);
@ -1588,16 +1587,16 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
switch(cmd) {
case LOOP_SET_STATUS:
mutex_lock(&loop_ctl_mutex);
mutex_lock(&lo->lo_ctl_mutex);
err = loop_set_status_compat(
lo, (const struct compat_loop_info __user *) arg);
mutex_unlock(&loop_ctl_mutex);
mutex_unlock(&lo->lo_ctl_mutex);
break;
case LOOP_GET_STATUS:
mutex_lock(&loop_ctl_mutex);
mutex_lock(&lo->lo_ctl_mutex);
err = loop_get_status_compat(
lo, (struct compat_loop_info __user *) arg);
/* loop_get_status() unlocks loop_ctl_mutex */
/* loop_get_status() unlocks lo_ctl_mutex */
break;
case LOOP_SET_CAPACITY:
case LOOP_CLR_FD:
@ -1641,7 +1640,7 @@ static void __lo_release(struct loop_device *lo)
if (atomic_dec_return(&lo->lo_refcnt))
return;
mutex_lock(&loop_ctl_mutex);
mutex_lock(&lo->lo_ctl_mutex);
if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) {
/*
* In autoclear mode, stop the loop thread
@ -1659,7 +1658,7 @@ static void __lo_release(struct loop_device *lo)
blk_mq_unfreeze_queue(lo->lo_queue);
}
mutex_unlock(&loop_ctl_mutex);
mutex_unlock(&lo->lo_ctl_mutex);
}
static void lo_release(struct gendisk *disk, fmode_t mode)
@ -1705,10 +1704,10 @@ static int unregister_transfer_cb(int id, void *ptr, void *data)
struct loop_device *lo = ptr;
struct loop_func_table *xfer = data;
mutex_lock(&loop_ctl_mutex);
mutex_lock(&lo->lo_ctl_mutex);
if (lo->lo_encryption == xfer)
loop_release_xfer(lo);
mutex_unlock(&loop_ctl_mutex);
mutex_unlock(&lo->lo_ctl_mutex);
return 0;
}
@ -1881,6 +1880,7 @@ static int loop_add(struct loop_device **l, int i)
if (!part_shift)
disk->flags |= GENHD_FL_NO_PART_SCAN;
disk->flags |= GENHD_FL_EXT_DEVT;
mutex_init(&lo->lo_ctl_mutex);
atomic_set(&lo->lo_refcnt, 0);
lo->lo_number = i;
spin_lock_init(&lo->lo_lock);
@ -1993,19 +1993,19 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd,
ret = loop_lookup(&lo, parm);
if (ret < 0)
break;
mutex_lock(&loop_ctl_mutex);
mutex_lock(&lo->lo_ctl_mutex);
if (lo->lo_state != Lo_unbound) {
ret = -EBUSY;
mutex_unlock(&loop_ctl_mutex);
mutex_unlock(&lo->lo_ctl_mutex);
break;
}
if (atomic_read(&lo->lo_refcnt) > 0) {
ret = -EBUSY;
mutex_unlock(&loop_ctl_mutex);
mutex_unlock(&lo->lo_ctl_mutex);
break;
}
lo->lo_disk->private_data = NULL;
mutex_unlock(&loop_ctl_mutex);
mutex_unlock(&lo->lo_ctl_mutex);
idr_remove(&loop_index_idr, lo->lo_number);
loop_remove(lo);
break;

View File

@ -54,6 +54,7 @@ struct loop_device {
spinlock_t lo_lock;
int lo_state;
struct mutex lo_ctl_mutex;
struct kthread_worker worker;
struct task_struct *worker_task;
bool use_dio;

View File

@ -782,18 +782,18 @@ struct zram_work {
struct zram *zram;
unsigned long entry;
struct bio *bio;
struct bio_vec bvec;
};
#if PAGE_SIZE != 4096
static void zram_sync_read(struct work_struct *work)
{
struct bio_vec bvec;
struct zram_work *zw = container_of(work, struct zram_work, work);
struct zram *zram = zw->zram;
unsigned long entry = zw->entry;
struct bio *bio = zw->bio;
read_from_bdev_async(zram, &bvec, entry, bio);
read_from_bdev_async(zram, &zw->bvec, entry, bio);
}
/*
@ -806,6 +806,7 @@ static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
{
struct zram_work work;
work.bvec = *bvec;
work.zram = zram;
work.entry = entry;
work.bio = bio;

View File

@ -1332,6 +1332,7 @@ static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan,
enum dma_status status;
unsigned long flags;
unsigned int residue;
bool cyclic;
status = dma_cookie_status(chan, cookie, txstate);
if (status == DMA_COMPLETE || !txstate)
@ -1339,10 +1340,11 @@ static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan,
spin_lock_irqsave(&rchan->lock, flags);
residue = rcar_dmac_chan_get_residue(rchan, cookie);
cyclic = rchan->desc.running ? rchan->desc.running->cyclic : false;
spin_unlock_irqrestore(&rchan->lock, flags);
/* if there's no residue, the cookie is complete */
if (!residue)
if (!residue && !cyclic)
return DMA_COMPLETE;
dma_set_residue(txstate, residue);

View File

@ -326,8 +326,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
bool *enabled, int width, int height)
{
struct drm_i915_private *dev_priv = to_i915(fb_helper->dev);
unsigned long conn_configured, conn_seq, mask;
unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
unsigned long conn_configured, conn_seq;
int i, j;
bool *save_enabled;
bool fallback = true, ret = true;
@ -345,9 +345,10 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
drm_modeset_backoff(&ctx);
memcpy(save_enabled, enabled, count);
conn_seq = GENMASK(count - 1, 0);
mask = GENMASK(count - 1, 0);
conn_configured = 0;
retry:
conn_seq = conn_configured;
for (i = 0; i < count; i++) {
struct drm_fb_helper_connector *fb_conn;
struct drm_connector *connector;
@ -360,8 +361,7 @@ retry:
if (conn_configured & BIT(i))
continue;
/* First pass, only consider tiled connectors */
if (conn_seq == GENMASK(count - 1, 0) && !connector->has_tile)
if (conn_seq == 0 && !connector->has_tile)
continue;
if (connector->status == connector_status_connected)
@ -465,10 +465,8 @@ retry:
conn_configured |= BIT(i);
}
if (conn_configured != conn_seq) { /* repeat until no more are found */
conn_seq = conn_configured;
if ((conn_configured & mask) != mask && conn_configured != conn_seq)
goto retry;
}
/*
* If the BIOS didn't enable everything it could, fall back to have the

View File

@ -867,7 +867,7 @@ static void
vc4_crtc_reset(struct drm_crtc *crtc)
{
if (crtc->state)
__drm_atomic_helper_crtc_destroy_state(crtc->state);
vc4_crtc_destroy_state(crtc, crtc->state);
crtc->state = kzalloc(sizeof(struct vc4_crtc_state), GFP_KERNEL);
if (crtc->state)

View File

@ -624,7 +624,7 @@ static void intel_th_gth_unassign(struct intel_th_device *thdev,
othdev->output.port = -1;
othdev->output.active = false;
gth->output[port].output = NULL;
for (master = 0; master < TH_CONFIGURABLE_MASTERS; master++)
for (master = 0; master <= TH_CONFIGURABLE_MASTERS; master++)
if (gth->master[master] == port)
gth->master[master] = -1;
spin_unlock(&gth->gth_lock);

View File

@ -611,11 +611,6 @@ static int rvt_set_page(struct ib_mr *ibmr, u64 addr)
if (unlikely(mapped_segs == mr->mr.max_segs))
return -ENOMEM;
if (mr->mr.length == 0) {
mr->mr.user_base = addr;
mr->mr.iova = addr;
}
m = mapped_segs / RVT_SEGSZ;
n = mapped_segs % RVT_SEGSZ;
mr->mr.map[m]->segs[n].vaddr = (void *)addr;
@ -633,17 +628,24 @@ static int rvt_set_page(struct ib_mr *ibmr, u64 addr)
* @sg_nents: number of entries in sg
* @sg_offset: offset in bytes into sg
*
* Overwrite rvt_mr length with mr length calculated by ib_sg_to_pages.
*
* Return: number of sg elements mapped to the memory region
*/
int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
int sg_nents, unsigned int *sg_offset)
{
struct rvt_mr *mr = to_imr(ibmr);
int ret;
mr->mr.length = 0;
mr->mr.page_shift = PAGE_SHIFT;
return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
rvt_set_page);
ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rvt_set_page);
mr->mr.user_base = ibmr->iova;
mr->mr.iova = ibmr->iova;
mr->mr.offset = ibmr->iova - (u64)mr->mr.map[0]->segs[0].vaddr;
mr->mr.length = (size_t)ibmr->length;
return ret;
}
/**
@ -674,6 +676,7 @@ int rvt_fast_reg_mr(struct rvt_qp *qp, struct ib_mr *ibmr, u32 key,
ibmr->rkey = key;
mr->mr.lkey = key;
mr->mr.access_flags = access;
mr->mr.iova = ibmr->iova;
atomic_set(&mr->mr.lkey_invalid, 0);
return 0;

View File

@ -1239,7 +1239,7 @@ static int rmi_f11_initialize(struct rmi_function *fn)
}
rc = f11_write_control_regs(fn, &f11->sens_query,
&f11->dev_controls, fn->fd.query_base_addr);
&f11->dev_controls, fn->fd.control_base_addr);
if (rc)
dev_warn(&fn->dev, "Failed to write control registers\n");

View File

@ -2917,17 +2917,17 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
}
ic->sectors_per_block = val >> SECTOR_SHIFT;
} else if (!memcmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
} else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error,
"Invalid internal_hash argument");
if (r)
goto bad;
} else if (!memcmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
} else if (!strncmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error,
"Invalid journal_crypt argument");
if (r)
goto bad;
} else if (!memcmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
} else if (!strncmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error,
"Invalid journal_mac argument");
if (r)

View File

@ -58,6 +58,8 @@ static int __init fm10k_init_module(void)
/* create driver workqueue */
fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
fm10k_driver_name);
if (!fm10k_workqueue)
return -ENOMEM;
fm10k_dbg_init();

View File

@ -1622,7 +1622,7 @@ static int mlx5e_get_module_info(struct net_device *netdev,
break;
case MLX5_MODULE_ID_SFP:
modinfo->type = ETH_MODULE_SFF_8472;
modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
modinfo->eeprom_len = MLX5_EEPROM_PAGE_LENGTH;
break;
default:
netdev_err(priv->netdev, "%s: cable type not recognized:0x%x\n",

View File

@ -392,10 +392,6 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
size -= offset + size - MLX5_EEPROM_PAGE_LENGTH;
i2c_addr = MLX5_I2C_ADDR_LOW;
if (offset >= MLX5_EEPROM_PAGE_LENGTH) {
i2c_addr = MLX5_I2C_ADDR_HIGH;
offset -= MLX5_EEPROM_PAGE_LENGTH;
}
MLX5_SET(mcia_reg, in, l, 0);
MLX5_SET(mcia_reg, in, module, module_num);

View File

@ -2521,11 +2521,11 @@ mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
if (err)
return err;
mlxsw_sp_port->link.autoneg = autoneg;
if (!netif_running(dev))
return 0;
mlxsw_sp_port->link.autoneg = autoneg;
mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);

View File

@ -2582,8 +2582,6 @@ static int stmmac_open(struct net_device *dev)
struct stmmac_priv *priv = netdev_priv(dev);
int ret;
stmmac_check_ether_addr(priv);
if (priv->hw->pcs != STMMAC_PCS_RGMII &&
priv->hw->pcs != STMMAC_PCS_TBI &&
priv->hw->pcs != STMMAC_PCS_RTBI) {
@ -4213,6 +4211,8 @@ int stmmac_dvr_probe(struct device *device,
if (ret)
goto error_hw_init;
stmmac_check_ether_addr(priv);
/* Configure real RX and TX queues */
netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);

View File

@ -159,6 +159,12 @@ static const struct dmi_system_id quark_pci_dmi[] = {
},
.driver_data = (void *)&galileo_stmmac_dmi_data,
},
/*
* There are 2 types of SIMATIC IOT2000: IOT20202 and IOT2040.
* The asset tag "6ES7647-0AA00-0YA2" is only for IOT2020 which
* has only one pci network device while other asset tags are
* for IOT2040 which has two.
*/
{
.matches = {
DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
@ -170,8 +176,6 @@ static const struct dmi_system_id quark_pci_dmi[] = {
{
.matches = {
DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG,
"6ES7647-0AA00-1YA2"),
},
.driver_data = (void *)&iot2040_stmmac_dmi_data,
},

View File

@ -153,7 +153,7 @@ out_fail:
void
slhc_free(struct slcompress *comp)
{
if ( comp == NULLSLCOMPR )
if ( IS_ERR_OR_NULL(comp) )
return;
if ( comp->tstate != NULLSLSTATE )

View File

@ -1157,6 +1157,12 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
return -EINVAL;
}
if (netdev_has_upper_dev(dev, port_dev)) {
netdev_err(dev, "Device %s is already an upper device of the team interface\n",
portname);
return -EBUSY;
}
if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
vlan_uses_dev(dev)) {
netdev_err(dev, "Device %s is VLAN challenged and team device has VLAN set up\n",

View File

@ -1903,14 +1903,11 @@ int usb_runtime_idle(struct device *dev)
return -EBUSY;
}
int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable)
static int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable)
{
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
int ret = -EPERM;
if (enable && !udev->usb2_hw_lpm_allowed)
return 0;
if (hcd->driver->set_usb2_hw_lpm) {
ret = hcd->driver->set_usb2_hw_lpm(hcd, udev, enable);
if (!ret)
@ -1920,6 +1917,24 @@ int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable)
return ret;
}
int usb_enable_usb2_hardware_lpm(struct usb_device *udev)
{
if (!udev->usb2_hw_lpm_capable ||
!udev->usb2_hw_lpm_allowed ||
udev->usb2_hw_lpm_enabled)
return 0;
return usb_set_usb2_hardware_lpm(udev, 1);
}
int usb_disable_usb2_hardware_lpm(struct usb_device *udev)
{
if (!udev->usb2_hw_lpm_enabled)
return 0;
return usb_set_usb2_hardware_lpm(udev, 0);
}
#endif /* CONFIG_PM */
struct bus_type usb_bus_type = {

View File

@ -3185,8 +3185,7 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
}
/* disable USB2 hardware LPM */
if (udev->usb2_hw_lpm_enabled == 1)
usb_set_usb2_hardware_lpm(udev, 0);
usb_disable_usb2_hardware_lpm(udev);
if (usb_disable_ltm(udev)) {
dev_err(&udev->dev, "Failed to disable LTM before suspend\n.");
@ -3224,8 +3223,7 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
usb_enable_ltm(udev);
err_ltm:
/* Try to enable USB2 hardware LPM again */
if (udev->usb2_hw_lpm_capable == 1)
usb_set_usb2_hardware_lpm(udev, 1);
usb_enable_usb2_hardware_lpm(udev);
if (udev->do_remote_wakeup)
(void) usb_disable_remote_wakeup(udev);
@ -3511,8 +3509,7 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
hub_port_logical_disconnect(hub, port1);
} else {
/* Try to enable USB2 hardware LPM */
if (udev->usb2_hw_lpm_capable == 1)
usb_set_usb2_hardware_lpm(udev, 1);
usb_enable_usb2_hardware_lpm(udev);
/* Try to enable USB3 LTM */
usb_enable_ltm(udev);
@ -4348,7 +4345,7 @@ static void hub_set_initial_usb2_lpm_policy(struct usb_device *udev)
if ((udev->bos->ext_cap->bmAttributes & cpu_to_le32(USB_BESL_SUPPORT)) ||
connect_type == USB_PORT_CONNECT_TYPE_HARD_WIRED) {
udev->usb2_hw_lpm_allowed = 1;
usb_set_usb2_hardware_lpm(udev, 1);
usb_enable_usb2_hardware_lpm(udev);
}
}
@ -5511,8 +5508,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
/* Disable USB2 hardware LPM.
* It will be re-enabled by the enumeration process.
*/
if (udev->usb2_hw_lpm_enabled == 1)
usb_set_usb2_hardware_lpm(udev, 0);
usb_disable_usb2_hardware_lpm(udev);
/* Disable LPM and LTM while we reset the device and reinstall the alt
* settings. Device-initiated LPM settings, and system exit latency
@ -5622,7 +5618,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
done:
/* Now that the alt settings are re-installed, enable LTM and LPM. */
usb_set_usb2_hardware_lpm(udev, 1);
usb_enable_usb2_hardware_lpm(udev);
usb_unlocked_enable_lpm(udev);
usb_enable_ltm(udev);
usb_release_bos_descriptor(udev);

View File

@ -1182,8 +1182,7 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
dev->actconfig->interface[i] = NULL;
}
if (dev->usb2_hw_lpm_enabled == 1)
usb_set_usb2_hardware_lpm(dev, 0);
usb_disable_usb2_hardware_lpm(dev);
usb_unlocked_disable_lpm(dev);
usb_disable_ltm(dev);

View File

@ -508,7 +508,10 @@ static ssize_t usb2_hardware_lpm_store(struct device *dev,
if (!ret) {
udev->usb2_hw_lpm_allowed = value;
ret = usb_set_usb2_hardware_lpm(udev, value);
if (value)
ret = usb_enable_usb2_hardware_lpm(udev);
else
ret = usb_disable_usb2_hardware_lpm(udev);
}
usb_unlock_device(udev);

View File

@ -89,7 +89,8 @@ extern int usb_remote_wakeup(struct usb_device *dev);
extern int usb_runtime_suspend(struct device *dev);
extern int usb_runtime_resume(struct device *dev);
extern int usb_runtime_idle(struct device *dev);
extern int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable);
extern int usb_enable_usb2_hardware_lpm(struct usb_device *udev);
extern int usb_disable_usb2_hardware_lpm(struct usb_device *udev);
#else
@ -109,7 +110,12 @@ static inline int usb_autoresume_device(struct usb_device *udev)
return 0;
}
static inline int usb_set_usb2_hardware_lpm(struct usb_device *udev, int enable)
static inline int usb_enable_usb2_hardware_lpm(struct usb_device *udev)
{
return 0;
}
static inline int usb_disable_usb2_hardware_lpm(struct usb_device *udev)
{
return 0;
}

View File

@ -58,12 +58,18 @@ module_param_named(disable_hugepages,
MODULE_PARM_DESC(disable_hugepages,
"Disable VFIO IOMMU support for IOMMU hugepages.");
static unsigned int dma_entry_limit __read_mostly = U16_MAX;
module_param_named(dma_entry_limit, dma_entry_limit, uint, 0644);
MODULE_PARM_DESC(dma_entry_limit,
"Maximum number of user DMA mappings per container (65535).");
struct vfio_iommu {
struct list_head domain_list;
struct vfio_domain *external_domain; /* domain for external user */
struct mutex lock;
struct rb_root dma_list;
struct blocking_notifier_head notifier;
unsigned int dma_avail;
bool v2;
bool nesting;
};
@ -732,6 +738,7 @@ static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
vfio_unlink_dma(iommu, dma);
put_task_struct(dma->task);
kfree(dma);
iommu->dma_avail++;
}
static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu)
@ -1003,12 +1010,18 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
goto out_unlock;
}
if (!iommu->dma_avail) {
ret = -ENOSPC;
goto out_unlock;
}
dma = kzalloc(sizeof(*dma), GFP_KERNEL);
if (!dma) {
ret = -ENOMEM;
goto out_unlock;
}
iommu->dma_avail--;
dma->iova = iova;
dma->vaddr = vaddr;
dma->prot = prot;
@ -1504,6 +1517,7 @@ static void *vfio_iommu_type1_open(unsigned long arg)
INIT_LIST_HEAD(&iommu->domain_list);
iommu->dma_list = RB_ROOT;
iommu->dma_avail = dma_entry_limit;
mutex_init(&iommu->lock);
BLOCKING_INIT_NOTIFIER_HEAD(&iommu->notifier);

View File

@ -1454,6 +1454,7 @@ void ceph_dentry_lru_del(struct dentry *dn)
unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn)
{
struct ceph_inode_info *dci = ceph_inode(dir);
unsigned hash;
switch (dci->i_dir_layout.dl_dir_hash) {
case 0: /* for backward compat */
@ -1461,8 +1462,11 @@ unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn)
return dn->d_name.hash;
default:
return ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
spin_lock(&dn->d_lock);
hash = ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
dn->d_name.name, dn->d_name.len);
spin_unlock(&dn->d_lock);
return hash;
}
}

View File

@ -1219,6 +1219,15 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
list_add(&ci->i_prealloc_cap_flush->i_list, &to_remove);
ci->i_prealloc_cap_flush = NULL;
}
if (drop &&
ci->i_wrbuffer_ref_head == 0 &&
ci->i_wr_ref == 0 &&
ci->i_dirty_caps == 0 &&
ci->i_flushing_caps == 0) {
ceph_put_snap_context(ci->i_head_snapc);
ci->i_head_snapc = NULL;
}
}
spin_unlock(&ci->i_ceph_lock);
while (!list_empty(&to_remove)) {
@ -1863,10 +1872,39 @@ retry:
return path;
}
/* Duplicate the dentry->d_name.name safely */
static int clone_dentry_name(struct dentry *dentry, const char **ppath,
int *ppathlen)
{
u32 len;
char *name;
retry:
len = READ_ONCE(dentry->d_name.len);
name = kmalloc(len + 1, GFP_NOFS);
if (!name)
return -ENOMEM;
spin_lock(&dentry->d_lock);
if (dentry->d_name.len != len) {
spin_unlock(&dentry->d_lock);
kfree(name);
goto retry;
}
memcpy(name, dentry->d_name.name, len);
spin_unlock(&dentry->d_lock);
name[len] = '\0';
*ppath = name;
*ppathlen = len;
return 0;
}
static int build_dentry_path(struct dentry *dentry, struct inode *dir,
const char **ppath, int *ppathlen, u64 *pino,
int *pfreepath)
bool *pfreepath, bool parent_locked)
{
int ret;
char *path;
rcu_read_lock();
@ -1875,8 +1913,15 @@ static int build_dentry_path(struct dentry *dentry, struct inode *dir,
if (dir && ceph_snap(dir) == CEPH_NOSNAP) {
*pino = ceph_ino(dir);
rcu_read_unlock();
*ppath = dentry->d_name.name;
*ppathlen = dentry->d_name.len;
if (parent_locked) {
*ppath = dentry->d_name.name;
*ppathlen = dentry->d_name.len;
} else {
ret = clone_dentry_name(dentry, ppath, ppathlen);
if (ret)
return ret;
*pfreepath = true;
}
return 0;
}
rcu_read_unlock();
@ -1884,13 +1929,13 @@ static int build_dentry_path(struct dentry *dentry, struct inode *dir,
if (IS_ERR(path))
return PTR_ERR(path);
*ppath = path;
*pfreepath = 1;
*pfreepath = true;
return 0;
}
static int build_inode_path(struct inode *inode,
const char **ppath, int *ppathlen, u64 *pino,
int *pfreepath)
bool *pfreepath)
{
struct dentry *dentry;
char *path;
@ -1906,7 +1951,7 @@ static int build_inode_path(struct inode *inode,
if (IS_ERR(path))
return PTR_ERR(path);
*ppath = path;
*pfreepath = 1;
*pfreepath = true;
return 0;
}
@ -1917,7 +1962,7 @@ static int build_inode_path(struct inode *inode,
static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
struct inode *rdiri, const char *rpath,
u64 rino, const char **ppath, int *pathlen,
u64 *ino, int *freepath)
u64 *ino, bool *freepath, bool parent_locked)
{
int r = 0;
@ -1927,7 +1972,7 @@ static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
ceph_snap(rinode));
} else if (rdentry) {
r = build_dentry_path(rdentry, rdiri, ppath, pathlen, ino,
freepath);
freepath, parent_locked);
dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen,
*ppath);
} else if (rpath || rino) {
@ -1953,7 +1998,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
const char *path2 = NULL;
u64 ino1 = 0, ino2 = 0;
int pathlen1 = 0, pathlen2 = 0;
int freepath1 = 0, freepath2 = 0;
bool freepath1 = false, freepath2 = false;
int len;
u16 releases;
void *p, *end;
@ -1961,16 +2006,19 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
ret = set_request_path_attr(req->r_inode, req->r_dentry,
req->r_parent, req->r_path1, req->r_ino1.ino,
&path1, &pathlen1, &ino1, &freepath1);
&path1, &pathlen1, &ino1, &freepath1,
test_bit(CEPH_MDS_R_PARENT_LOCKED,
&req->r_req_flags));
if (ret < 0) {
msg = ERR_PTR(ret);
goto out;
}
/* If r_old_dentry is set, then assume that its parent is locked */
ret = set_request_path_attr(NULL, req->r_old_dentry,
req->r_old_dentry_dir,
req->r_path2, req->r_ino2.ino,
&path2, &pathlen2, &ino2, &freepath2);
&path2, &pathlen2, &ino2, &freepath2, true);
if (ret < 0) {
msg = ERR_PTR(ret);
goto out_free1;

View File

@ -568,7 +568,12 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
old_snapc = NULL;
update_snapc:
if (ci->i_head_snapc) {
if (ci->i_wrbuffer_ref_head == 0 &&
ci->i_wr_ref == 0 &&
ci->i_dirty_caps == 0 &&
ci->i_flushing_caps == 0) {
ci->i_head_snapc = NULL;
} else {
ci->i_head_snapc = ceph_get_snap_context(new_snapc);
dout(" new snapc is %p\n", new_snapc);
}

View File

@ -1730,6 +1730,10 @@ cifs_do_rename(const unsigned int xid, struct dentry *from_dentry,
if (rc == 0 || rc != -EBUSY)
goto do_rename_exit;
/* Don't fall back to using SMB on SMB 2+ mount */
if (server->vals->protocol_id != 0)
goto do_rename_exit;
/* open-file renames don't work across directories */
if (to_dentry->d_parent != from_dentry->d_parent)
goto do_rename_exit;

View File

@ -828,6 +828,7 @@ int ext4_get_inode_usage(struct inode *inode, qsize_t *usage)
bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
if (IS_ERR(bh)) {
ret = PTR_ERR(bh);
bh = NULL;
goto out;
}
@ -2905,6 +2906,7 @@ int ext4_xattr_delete_inode(handle_t *handle, struct inode *inode,
if (error == -EIO)
EXT4_ERROR_INODE(inode, "block %llu read error",
EXT4_I(inode)->i_file_acl);
bh = NULL;
goto cleanup;
}
error = ext4_xattr_check_block(inode, bh);
@ -3061,6 +3063,7 @@ ext4_xattr_block_cache_find(struct inode *inode,
if (IS_ERR(bh)) {
if (PTR_ERR(bh) == -ENOMEM)
return NULL;
bh = NULL;
EXT4_ERROR_INODE(inode, "block %lu read error",
(unsigned long)ce->e_value);
} else if (ext4_xattr_cmp(header, BHDR(bh)) == 0) {

View File

@ -2044,7 +2044,8 @@ static int nfs23_validate_mount_data(void *options,
memcpy(sap, &data->addr, sizeof(data->addr));
args->nfs_server.addrlen = sizeof(data->addr);
args->nfs_server.port = ntohs(data->addr.sin_port);
if (!nfs_verify_server_address(sap))
if (sap->sa_family != AF_INET ||
!nfs_verify_server_address(sap))
goto out_no_address;
if (!(data->flags & NFS_MOUNT_TCP))

View File

@ -939,8 +939,9 @@ static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata)
cb->cb_seq_status = 1;
cb->cb_status = 0;
if (minorversion) {
if (!nfsd41_cb_get_slot(clp, task))
if (!cb->cb_holds_slot && !nfsd41_cb_get_slot(clp, task))
return;
cb->cb_holds_slot = true;
}
rpc_call_start(task);
}
@ -967,6 +968,9 @@ static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback
return true;
}
if (!cb->cb_holds_slot)
goto need_restart;
switch (cb->cb_seq_status) {
case 0:
/*
@ -1004,6 +1008,7 @@ static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback
cb->cb_seq_status);
}
cb->cb_holds_slot = false;
clear_bit(0, &clp->cl_cb_slot_busy);
rpc_wake_up_next(&clp->cl_cb_waitq);
dprintk("%s: freed slot, new seqid=%d\n", __func__,
@ -1211,6 +1216,7 @@ void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
cb->cb_seq_status = 1;
cb->cb_status = 0;
cb->cb_need_restart = false;
cb->cb_holds_slot = false;
}
void nfsd4_run_cb(struct nfsd4_callback *cb)

View File

@ -69,6 +69,7 @@ struct nfsd4_callback {
int cb_seq_status;
int cb_status;
bool cb_need_restart;
bool cb_holds_slot;
};
struct nfsd4_callback_ops {

View File

@ -1620,9 +1620,11 @@ static void drop_sysctl_table(struct ctl_table_header *header)
if (--header->nreg)
return;
if (parent)
if (parent) {
put_links(header);
start_unregistering(header);
start_unregistering(header);
}
if (!--header->count)
kfree_rcu(header, rcu);

View File

@ -332,8 +332,8 @@ const struct pipe_buf_operations default_pipe_buf_ops = {
.get = generic_pipe_buf_get,
};
static int generic_pipe_buf_nosteal(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
int generic_pipe_buf_nosteal(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
return 1;
}

View File

@ -182,6 +182,7 @@ void free_pipe_info(struct pipe_inode_info *);
void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
int generic_pipe_buf_nosteal(struct pipe_inode_info *, struct pipe_buffer *);
void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
void pipe_buf_mark_unmergeable(struct pipe_buffer *buf);

View File

@ -220,7 +220,6 @@ static void task_non_contending(struct task_struct *p)
if (dl_se->dl_runtime == 0)
return;
WARN_ON(hrtimer_active(&dl_se->inactive_timer));
WARN_ON(dl_se->dl_non_contending);
zerolag_time = dl_se->deadline -
@ -237,7 +236,7 @@ static void task_non_contending(struct task_struct *p)
* If the "0-lag time" already passed, decrease the active
* utilization now, instead of starting a timer
*/
if (zerolag_time < 0) {
if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) {
if (dl_task(p))
sub_running_bw(dl_se->dl_bw, dl_rq);
if (!dl_task(p) || p->state == TASK_DEAD) {

View File

@ -2097,6 +2097,10 @@ static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period)
if (p->last_task_numa_placement) {
delta = runtime - p->last_sum_exec_runtime;
*period = now - p->last_task_numa_placement;
/* Avoid time going backwards, prevent potential divide error: */
if (unlikely((s64)*period < 0))
*period = 0;
} else {
delta = p->se.avg.load_sum / p->se.load.weight;
*period = LOAD_AVG_MAX;

View File

@ -700,7 +700,7 @@ u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
preempt_disable_notrace();
time = rb_time_stamp(buffer);
preempt_enable_no_resched_notrace();
preempt_enable_notrace();
return time;
}

View File

@ -495,8 +495,10 @@ int trace_pid_write(struct trace_pid_list *filtered_pids,
* not modified.
*/
pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
if (!pid_list)
if (!pid_list) {
trace_parser_put(&parser);
return -ENOMEM;
}
pid_list->pid_max = READ_ONCE(pid_max);
@ -506,6 +508,7 @@ int trace_pid_write(struct trace_pid_list *filtered_pids,
pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
if (!pid_list->pids) {
trace_parser_put(&parser);
kfree(pid_list);
return -ENOMEM;
}
@ -6727,19 +6730,23 @@ struct buffer_ref {
struct ring_buffer *buffer;
void *page;
int cpu;
int ref;
refcount_t refcount;
};
static void buffer_ref_release(struct buffer_ref *ref)
{
if (!refcount_dec_and_test(&ref->refcount))
return;
ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
kfree(ref);
}
static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
struct buffer_ref *ref = (struct buffer_ref *)buf->private;
if (--ref->ref)
return;
ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
kfree(ref);
buffer_ref_release(ref);
buf->private = 0;
}
@ -6748,7 +6755,7 @@ static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
{
struct buffer_ref *ref = (struct buffer_ref *)buf->private;
ref->ref++;
refcount_inc(&ref->refcount);
}
/* Pipe buffer operations for a buffer. */
@ -6756,7 +6763,7 @@ static const struct pipe_buf_operations buffer_pipe_buf_ops = {
.can_merge = 0,
.confirm = generic_pipe_buf_confirm,
.release = buffer_pipe_buf_release,
.steal = generic_pipe_buf_steal,
.steal = generic_pipe_buf_nosteal,
.get = buffer_pipe_buf_get,
};
@ -6769,11 +6776,7 @@ static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
struct buffer_ref *ref =
(struct buffer_ref *)spd->partial[i].private;
if (--ref->ref)
return;
ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
kfree(ref);
buffer_ref_release(ref);
spd->partial[i].private = 0;
}
@ -6828,7 +6831,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
break;
}
ref->ref = 1;
refcount_set(&ref->refcount, 1);
ref->buffer = iter->trace_buffer->buffer;
ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
if (IS_ERR(ref->page)) {

View File

@ -2006,6 +2006,7 @@ config TEST_KMOD
depends on m
depends on BLOCK && (64BIT || LBDAF) # for XFS, BTRFS
depends on NETDEVICES && NET_CORE && INET # for TUN
depends on BLOCK
select TEST_LKM
select XFS_FS
select TUN

View File

@ -1818,10 +1818,15 @@ static int insert_pfn(struct vm_area_struct *vma, unsigned long addr,
* in may not match the PFN we have mapped if the
* mapped PFN is a writeable COW page. In the mkwrite
* case we are creating a writable PTE for a shared
* mapping and we expect the PFNs to match.
* mapping and we expect the PFNs to match. If they
* don't match, we are likely racing with block
* allocation and mapping invalidation so just skip the
* update.
*/
if (WARN_ON_ONCE(pte_pfn(*pte) != pfn_t_to_pfn(pfn)))
if (pte_pfn(*pte) != pfn_t_to_pfn(pfn)) {
WARN_ON_ONCE(!is_zero_pfn(pte_pfn(*pte)));
goto out_unlock;
}
entry = *pte;
goto out_mkwrite;
} else

View File

@ -2030,7 +2030,8 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
if (match_kern)
match_kern->match_size = ret;
if (WARN_ON(type == EBT_COMPAT_TARGET && size_left))
/* rule should have no remaining data after target */
if (type == EBT_COMPAT_TARGET && size_left)
return -EINVAL;
match32 = (struct compat_ebt_entry_mwt *) buf;

View File

@ -1192,25 +1192,39 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
return dst;
}
static void ipv4_link_failure(struct sk_buff *skb)
static void ipv4_send_dest_unreach(struct sk_buff *skb)
{
struct ip_options opt;
struct rtable *rt;
int res;
/* Recompile ip options since IPCB may not be valid anymore.
* Also check we have a reasonable ipv4 header.
*/
memset(&opt, 0, sizeof(opt));
opt.optlen = ip_hdr(skb)->ihl*4 - sizeof(struct iphdr);
rcu_read_lock();
res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL);
rcu_read_unlock();
if (res)
if (!pskb_network_may_pull(skb, sizeof(struct iphdr)) ||
ip_hdr(skb)->version != 4 || ip_hdr(skb)->ihl < 5)
return;
memset(&opt, 0, sizeof(opt));
if (ip_hdr(skb)->ihl > 5) {
if (!pskb_network_may_pull(skb, ip_hdr(skb)->ihl * 4))
return;
opt.optlen = ip_hdr(skb)->ihl * 4 - sizeof(struct iphdr);
rcu_read_lock();
res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL);
rcu_read_unlock();
if (res)
return;
}
__icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, &opt);
}
static void ipv4_link_failure(struct sk_buff *skb)
{
struct rtable *rt;
ipv4_send_dest_unreach(skb);
rt = skb_rtable(skb);
if (rt)

View File

@ -49,6 +49,7 @@ static int tcp_delack_seg_min = TCP_DELACK_MIN;
static int tcp_delack_seg_max = 60;
static int tcp_use_userconfig_min;
static int tcp_use_userconfig_max = 1;
static int one_day_secs = 24 * 3600;
/* obsolete */
static int sysctl_tcp_low_latency __read_mostly;
@ -571,7 +572,9 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_tcp_min_rtt_wlen,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
.extra2 = &one_day_secs
},
{
.procname = "tcp_low_latency",

View File

@ -889,12 +889,13 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
{
struct ip_vs_dest *dest;
unsigned int atype, i;
int ret = 0;
EnterFunction(2);
#ifdef CONFIG_IP_VS_IPV6
if (udest->af == AF_INET6) {
int ret;
atype = ipv6_addr_type(&udest->addr.in6);
if ((!(atype & IPV6_ADDR_UNICAST) ||
atype & IPV6_ADDR_LINKLOCAL) &&

View File

@ -44,6 +44,17 @@ struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev, int npages)
else
pool = rds_ibdev->mr_1m_pool;
if (atomic_read(&pool->dirty_count) >= pool->max_items / 10)
queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
/* Switch pools if one of the pool is reaching upper limit */
if (atomic_read(&pool->dirty_count) >= pool->max_items * 9 / 10) {
if (pool->pool_type == RDS_IB_MR_8K_POOL)
pool = rds_ibdev->mr_1m_pool;
else
pool = rds_ibdev->mr_8k_pool;
}
ibmr = rds_ib_try_reuse_ibmr(pool);
if (ibmr)
return ibmr;

View File

@ -442,9 +442,6 @@ struct rds_ib_mr *rds_ib_try_reuse_ibmr(struct rds_ib_mr_pool *pool)
struct rds_ib_mr *ibmr = NULL;
int iter = 0;
if (atomic_read(&pool->dirty_count) >= pool->max_items_soft / 10)
queue_delayed_work(rds_ib_mr_wq, &pool->flush_worker, 10);
while (1) {
ibmr = rds_ib_reuse_mr(pool);
if (ibmr)

View File

@ -318,9 +318,11 @@ void rose_destroy_socket(struct sock *);
/*
* Handler for deferred kills.
*/
static void rose_destroy_timer(unsigned long data)
static void rose_destroy_timer(struct timer_list *t)
{
rose_destroy_socket((struct sock *)data);
struct sock *sk = from_timer(sk, t, sk_timer);
rose_destroy_socket(sk);
}
/*
@ -353,8 +355,7 @@ void rose_destroy_socket(struct sock *sk)
if (sk_has_allocations(sk)) {
/* Defer: outstanding buffers */
setup_timer(&sk->sk_timer, rose_destroy_timer,
(unsigned long)sk);
timer_setup(&sk->sk_timer, rose_destroy_timer, 0);
sk->sk_timer.expires = jiffies + 10 * HZ;
add_timer(&sk->sk_timer);
} else
@ -538,8 +539,8 @@ static int rose_create(struct net *net, struct socket *sock, int protocol,
sock->ops = &rose_proto_ops;
sk->sk_protocol = protocol;
init_timer(&rose->timer);
init_timer(&rose->idletimer);
timer_setup(&rose->timer, NULL, 0);
timer_setup(&rose->idletimer, NULL, 0);
rose->t1 = msecs_to_jiffies(sysctl_rose_call_request_timeout);
rose->t2 = msecs_to_jiffies(sysctl_rose_reset_request_timeout);
@ -582,8 +583,8 @@ static struct sock *rose_make_new(struct sock *osk)
sk->sk_state = TCP_ESTABLISHED;
sock_copy_flags(sk, osk);
init_timer(&rose->timer);
init_timer(&rose->idletimer);
timer_setup(&rose->timer, NULL, 0);
timer_setup(&rose->idletimer, NULL, 0);
orose = rose_sk(osk);
rose->t1 = orose->t1;

View File

@ -27,8 +27,8 @@
#include <linux/interrupt.h>
#include <net/rose.h>
static void rose_ftimer_expiry(unsigned long);
static void rose_t0timer_expiry(unsigned long);
static void rose_ftimer_expiry(struct timer_list *);
static void rose_t0timer_expiry(struct timer_list *);
static void rose_transmit_restart_confirmation(struct rose_neigh *neigh);
static void rose_transmit_restart_request(struct rose_neigh *neigh);
@ -37,8 +37,7 @@ void rose_start_ftimer(struct rose_neigh *neigh)
{
del_timer(&neigh->ftimer);
neigh->ftimer.data = (unsigned long)neigh;
neigh->ftimer.function = &rose_ftimer_expiry;
neigh->ftimer.function = (TIMER_FUNC_TYPE)rose_ftimer_expiry;
neigh->ftimer.expires =
jiffies + msecs_to_jiffies(sysctl_rose_link_fail_timeout);
@ -49,8 +48,7 @@ static void rose_start_t0timer(struct rose_neigh *neigh)
{
del_timer(&neigh->t0timer);
neigh->t0timer.data = (unsigned long)neigh;
neigh->t0timer.function = &rose_t0timer_expiry;
neigh->t0timer.function = (TIMER_FUNC_TYPE)rose_t0timer_expiry;
neigh->t0timer.expires =
jiffies + msecs_to_jiffies(sysctl_rose_restart_request_timeout);
@ -77,13 +75,13 @@ static int rose_t0timer_running(struct rose_neigh *neigh)
return timer_pending(&neigh->t0timer);
}
static void rose_ftimer_expiry(unsigned long param)
static void rose_ftimer_expiry(struct timer_list *t)
{
}
static void rose_t0timer_expiry(unsigned long param)
static void rose_t0timer_expiry(struct timer_list *t)
{
struct rose_neigh *neigh = (struct rose_neigh *)param;
struct rose_neigh *neigh = from_timer(neigh, t, t0timer);
rose_transmit_restart_request(neigh);

View File

@ -16,15 +16,17 @@
#include <linux/init.h>
static struct sk_buff_head loopback_queue;
#define ROSE_LOOPBACK_LIMIT 1000
static struct timer_list loopback_timer;
static void rose_set_loopback_timer(void);
static void rose_loopback_timer(struct timer_list *unused);
void rose_loopback_init(void)
{
skb_queue_head_init(&loopback_queue);
init_timer(&loopback_timer);
timer_setup(&loopback_timer, rose_loopback_timer, 0);
}
static int rose_loopback_running(void)
@ -34,36 +36,30 @@ static int rose_loopback_running(void)
int rose_loopback_queue(struct sk_buff *skb, struct rose_neigh *neigh)
{
struct sk_buff *skbn;
struct sk_buff *skbn = NULL;
skbn = skb_clone(skb, GFP_ATOMIC);
if (skb_queue_len(&loopback_queue) < ROSE_LOOPBACK_LIMIT)
skbn = skb_clone(skb, GFP_ATOMIC);
kfree_skb(skb);
if (skbn != NULL) {
if (skbn) {
consume_skb(skb);
skb_queue_tail(&loopback_queue, skbn);
if (!rose_loopback_running())
rose_set_loopback_timer();
} else {
kfree_skb(skb);
}
return 1;
}
static void rose_loopback_timer(unsigned long);
static void rose_set_loopback_timer(void)
{
del_timer(&loopback_timer);
loopback_timer.data = 0;
loopback_timer.function = &rose_loopback_timer;
loopback_timer.expires = jiffies + 10;
add_timer(&loopback_timer);
mod_timer(&loopback_timer, jiffies + 10);
}
static void rose_loopback_timer(unsigned long param)
static void rose_loopback_timer(struct timer_list *unused)
{
struct sk_buff *skb;
struct net_device *dev;
@ -71,8 +67,12 @@ static void rose_loopback_timer(unsigned long param)
struct sock *sk;
unsigned short frametype;
unsigned int lci_i, lci_o;
int count;
while ((skb = skb_dequeue(&loopback_queue)) != NULL) {
for (count = 0; count < ROSE_LOOPBACK_LIMIT; count++) {
skb = skb_dequeue(&loopback_queue);
if (!skb)
return;
if (skb->len < ROSE_MIN_LEN) {
kfree_skb(skb);
continue;
@ -109,6 +109,8 @@ static void rose_loopback_timer(unsigned long param)
kfree_skb(skb);
}
}
if (!skb_queue_empty(&loopback_queue))
mod_timer(&loopback_timer, jiffies + 1);
}
void __exit rose_loopback_clear(void)

View File

@ -104,8 +104,8 @@ static int __must_check rose_add_node(struct rose_route_struct *rose_route,
skb_queue_head_init(&rose_neigh->queue);
init_timer(&rose_neigh->ftimer);
init_timer(&rose_neigh->t0timer);
timer_setup(&rose_neigh->ftimer, NULL, 0);
timer_setup(&rose_neigh->t0timer, NULL, 0);
if (rose_route->ndigis != 0) {
rose_neigh->digipeat =
@ -390,8 +390,8 @@ void rose_add_loopback_neigh(void)
skb_queue_head_init(&sn->queue);
init_timer(&sn->ftimer);
init_timer(&sn->t0timer);
timer_setup(&sn->ftimer, NULL, 0);
timer_setup(&sn->t0timer, NULL, 0);
spin_lock_bh(&rose_neigh_list_lock);
sn->next = rose_neigh_list;

View File

@ -29,8 +29,8 @@
#include <net/rose.h>
static void rose_heartbeat_expiry(unsigned long);
static void rose_timer_expiry(unsigned long);
static void rose_idletimer_expiry(unsigned long);
static void rose_timer_expiry(struct timer_list *);
static void rose_idletimer_expiry(struct timer_list *);
void rose_start_heartbeat(struct sock *sk)
{
@ -49,8 +49,7 @@ void rose_start_t1timer(struct sock *sk)
del_timer(&rose->timer);
rose->timer.data = (unsigned long)sk;
rose->timer.function = &rose_timer_expiry;
rose->timer.function = (TIMER_FUNC_TYPE)rose_timer_expiry;
rose->timer.expires = jiffies + rose->t1;
add_timer(&rose->timer);
@ -62,8 +61,7 @@ void rose_start_t2timer(struct sock *sk)
del_timer(&rose->timer);
rose->timer.data = (unsigned long)sk;
rose->timer.function = &rose_timer_expiry;
rose->timer.function = (TIMER_FUNC_TYPE)rose_timer_expiry;
rose->timer.expires = jiffies + rose->t2;
add_timer(&rose->timer);
@ -75,8 +73,7 @@ void rose_start_t3timer(struct sock *sk)
del_timer(&rose->timer);
rose->timer.data = (unsigned long)sk;
rose->timer.function = &rose_timer_expiry;
rose->timer.function = (TIMER_FUNC_TYPE)rose_timer_expiry;
rose->timer.expires = jiffies + rose->t3;
add_timer(&rose->timer);
@ -88,8 +85,7 @@ void rose_start_hbtimer(struct sock *sk)
del_timer(&rose->timer);
rose->timer.data = (unsigned long)sk;
rose->timer.function = &rose_timer_expiry;
rose->timer.function = (TIMER_FUNC_TYPE)rose_timer_expiry;
rose->timer.expires = jiffies + rose->hb;
add_timer(&rose->timer);
@ -102,8 +98,7 @@ void rose_start_idletimer(struct sock *sk)
del_timer(&rose->idletimer);
if (rose->idle > 0) {
rose->idletimer.data = (unsigned long)sk;
rose->idletimer.function = &rose_idletimer_expiry;
rose->idletimer.function = (TIMER_FUNC_TYPE)rose_idletimer_expiry;
rose->idletimer.expires = jiffies + rose->idle;
add_timer(&rose->idletimer);
@ -163,10 +158,10 @@ static void rose_heartbeat_expiry(unsigned long param)
bh_unlock_sock(sk);
}
static void rose_timer_expiry(unsigned long param)
static void rose_timer_expiry(struct timer_list *t)
{
struct sock *sk = (struct sock *)param;
struct rose_sock *rose = rose_sk(sk);
struct rose_sock *rose = from_timer(rose, t, timer);
struct sock *sk = &rose->sock;
bh_lock_sock(sk);
switch (rose->state) {
@ -192,9 +187,10 @@ static void rose_timer_expiry(unsigned long param)
bh_unlock_sock(sk);
}
static void rose_idletimer_expiry(unsigned long param)
static void rose_idletimer_expiry(struct timer_list *t)
{
struct sock *sk = (struct sock *)param;
struct rose_sock *rose = from_timer(rose, t, idletimer);
struct sock *sk = &rose->sock;
bh_lock_sock(sk);
rose_clear_queues(sk);

View File

@ -54,6 +54,7 @@ static void cache_init(struct cache_head *h, struct cache_detail *detail)
h->last_refresh = now;
}
static inline int cache_is_valid(struct cache_head *h);
static void cache_fresh_locked(struct cache_head *head, time_t expiry,
struct cache_detail *detail);
static void cache_fresh_unlocked(struct cache_head *head,
@ -100,6 +101,8 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
if (cache_is_expired(detail, tmp)) {
hlist_del_init(&tmp->cache_list);
detail->entries --;
if (cache_is_valid(tmp) == -EAGAIN)
set_bit(CACHE_NEGATIVE, &tmp->flags);
cache_fresh_locked(tmp, 0, detail);
freeme = tmp;
break;

View File

@ -262,8 +262,14 @@ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
if (msg->rep_type)
tipc_tlv_init(msg->rep, msg->rep_type);
if (cmd->header)
(*cmd->header)(msg);
if (cmd->header) {
err = (*cmd->header)(msg);
if (err) {
kfree_skb(msg->rep);
msg->rep = NULL;
return err;
}
}
arg = nlmsg_new(0, GFP_KERNEL);
if (!arg) {
@ -388,7 +394,12 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
if (!bearer)
return -EMSGSIZE;
len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
len = TLV_GET_DATA_LEN(msg->req);
len -= offsetof(struct tipc_bearer_config, name);
if (len <= 0)
return -EINVAL;
len = min_t(int, len, TIPC_MAX_BEARER_NAME);
if (!string_is_valid(b->name, len))
return -EINVAL;
@ -757,7 +768,12 @@ static int tipc_nl_compat_link_set(struct tipc_nl_compat_cmd_doit *cmd,
lc = (struct tipc_link_config *)TLV_DATA(msg->req);
len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
len = TLV_GET_DATA_LEN(msg->req);
len -= offsetof(struct tipc_link_config, name);
if (len <= 0)
return -EINVAL;
len = min_t(int, len, TIPC_MAX_LINK_NAME);
if (!string_is_valid(lc->name, len))
return -EINVAL;

View File

@ -662,6 +662,8 @@ static int virtio_transport_reset(struct vsock_sock *vsk,
*/
static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt)
{
const struct virtio_transport *t;
struct virtio_vsock_pkt *reply;
struct virtio_vsock_pkt_info info = {
.op = VIRTIO_VSOCK_OP_RST,
.type = le16_to_cpu(pkt->hdr.type),
@ -672,15 +674,21 @@ static int virtio_transport_reset_no_sock(struct virtio_vsock_pkt *pkt)
if (le16_to_cpu(pkt->hdr.op) == VIRTIO_VSOCK_OP_RST)
return 0;
pkt = virtio_transport_alloc_pkt(&info, 0,
le64_to_cpu(pkt->hdr.dst_cid),
le32_to_cpu(pkt->hdr.dst_port),
le64_to_cpu(pkt->hdr.src_cid),
le32_to_cpu(pkt->hdr.src_port));
if (!pkt)
reply = virtio_transport_alloc_pkt(&info, 0,
le64_to_cpu(pkt->hdr.dst_cid),
le32_to_cpu(pkt->hdr.dst_port),
le64_to_cpu(pkt->hdr.src_cid),
le32_to_cpu(pkt->hdr.src_port));
if (!reply)
return -ENOMEM;
return virtio_transport_get_ops()->send_pkt(pkt);
t = virtio_transport_get_ops();
if (!t) {
virtio_transport_free_pkt(reply);
return -ENOTCONN;
}
return t->send_pkt(reply);
}
static void virtio_transport_wait_close(struct sock *sk, long timeout)

View File

@ -196,9 +196,7 @@ cc-ldoption = $(call try-run,\
# ld-option
# Usage: LDFLAGS += $(call ld-option, -X)
ld-option = $(call try-run,\
$(CC) $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) -x c /dev/null -c -o "$$TMPO"; \
$(LD) $(LDFLAGS) $(1) "$$TMPO" -o "$$TMP",$(1),$(2))
ld-option = $(call try-run, $(LD) $(LDFLAGS) $(1) -v,$(1),$(2))
# ar-option
# Usage: KBUILD_ARFLAGS := $(call ar-option,D)