Merge android-4.14-p.90 (a5e7b25) into msm-4.14

* refs/heads/tmp-a5e7b25:
  Linux 4.14.90
  bpf, arm: fix emit_ldx_r and emit_mov_i using TMP_REG_1
  rtc: snvs: Add timeouts to avoid kernel lockups
  nvmet-rdma: fix response use after free
  i2c: uniphier-f: fix violation of tLOW requirement for Fast-mode
  i2c: uniphier: fix violation of tLOW requirement for Fast-mode
  i2c: scmi: Fix probe error on devices with an empty SMB0001 ACPI device node
  i2c: axxia: properly handle master timeout
  mlxsw: spectrum_switchdev: Fix VLAN device deletion via ioctl
  vhost/vsock: fix reset orphans race with close timeout
  cifs: In Kconfig CONFIG_CIFS_POSIX needs depends on legacy (insecure cifs)
  drm/ast: Fix connector leak during driver unload
  ethernet: fman: fix wrong of_node_put() in probe function
  ARM: 8815/1: V7M: align v7m_dma_inv_range() with v7 counterpart
  ARM: 8814/1: mm: improve/fix ARM v7_dma_inv_range() unaligned address handling
  net/mlx4_en: Fix build break when CONFIG_INET is off
  mv88e6060: disable hardware level MAC learning
  libata: whitelist all SAMSUNG MZ7KM* solid-state disks
  Input: omap-keypad - fix keyboard debounce configuration
  Input: synaptics - enable SMBus for HP 15-ay000
  clk: mmp: Off by one in mmp_clk_add()
  clk: mvebu: Off by one bugs in cp110_of_clk_get()
  drm/msm: Fix error return checking
  ide: pmac: add of_node_put()
  drivers/tty: add missing of_node_put()
  drivers/sbus/char: add of_node_put()
  sbus: char: add of_node_put()
  SUNRPC: Fix a potential race in xprt_connect()
  nfs: don't dirty kernel pages read by direct-io
  bpf: Fix verifier log string check for bad alignment.
  bonding: fix 802.3ad state sent to partner when unbinding slave
  ARC: io.h: Implement reads{x}()/writes{x}()
  drm/msm: Grab a vblank reference when waiting for commit_done
  x86/earlyprintk/efi: Fix infinite loop on some screen widths
  scsi: vmw_pscsi: Rearrange code to avoid multiple calls to free_irq during unload
  scsi: libiscsi: Fix NULL pointer dereference in iscsi_eh_session_reset
  Input: hyper-v - fix wakeup from suspend-to-idle
  mac80211_hwsim: fix module init error paths for netlink
  locking/qspinlock: Fix build for anonymous union in older GCC compilers
  IB/hfi1: Remove race conditions in user_sdma send path
  mac80211: Fix condition validating WMM IE
  mac80211: don't WARN on bad WMM parameters from buggy APs
  netfilter: ipset: Fix wraparound in hash:*net* types
  elevator: lookup mq vs non-mq elevators
  locking/qspinlock, x86: Provide liveness guarantee
  locking/qspinlock/x86: Increase _Q_PENDING_LOOPS upper bound
  locking/qspinlock: Re-order code
  locking/qspinlock: Kill cmpxchg() loop when claiming lock from head of queue
  locking/qspinlock: Remove duplicate clear_pending() function from PV code
  locking/qspinlock: Remove unbounded cmpxchg() loop from locking slowpath
  locking/qspinlock: Merge 'struct __qspinlock' into 'struct qspinlock'
  locking/qspinlock: Bound spinning on pending->locked transition in slowpath
  locking/qspinlock: Ensure node is initialised before updating prev->next
  locking: Remove smp_read_barrier_depends() from queued_spin_lock_slowpath()
  x86/build: Fix compiler support check for CONFIG_RETPOLINE
  drm/amdgpu: update SMC firmware image for polaris10 variants
  drm/i915/execlists: Apply a full mb before execution for Braswell
  Revert "drm/rockchip: Allow driver to be shutdown on reboot/kexec"
  drm/nouveau/kms: Fix memory leak in nv50_mstm_del()
  powerpc/msi: Fix NULL pointer access in teardown code
  tracing: Fix memory leak of instance function hash filters
  tracing: Fix memory leak in set_trigger_filter()
  dm cache metadata: verify cache has blocks in blocks_are_clean_separate_dirty()
  dm thin: send event about thin-pool state change _after_ making it
  ARM: mmp/mmp2: fix cpu_is_mmp2() on mmp2-dt
  fuse: continue to send FUSE_RELEASEDIR when FUSE_OPEN returns ENOSYS
  mmc: sdhci: fix the timeout check window for clock and reset
  MMC: OMAP: fix broken MMC on OMAP15XX/OMAP5910/OMAP310
  arm64: dma-mapping: Fix FORCE_CONTIGUOUS buffer clearing
  userfaultfd: check VM_MAYWRITE was set after verifying the uffd is registered
  aio: fix spectre gadget in lookup_ioctx
  pinctrl: sunxi: a83t: Fix IRQ offset typo for PH11
  timer/debug: Change /proc/timer_list from 0444 to 0400

Change-Id: I9c3c7020caccfea06edafeb33c2740560bb6cc12
Signed-off-by: Blagovest Kolenichev <bkolenichev@codeaurora.org>
This commit is contained in:
Blagovest Kolenichev 2019-01-28 06:16:26 -08:00
commit 1ed79e1650
73 changed files with 680 additions and 417 deletions

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 4 VERSION = 4
PATCHLEVEL = 14 PATCHLEVEL = 14
SUBLEVEL = 89 SUBLEVEL = 90
EXTRAVERSION = EXTRAVERSION =
NAME = Petit Gorille NAME = Petit Gorille

View File

@ -12,6 +12,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/unaligned.h>
#ifdef CONFIG_ISA_ARCV2 #ifdef CONFIG_ISA_ARCV2
#include <asm/barrier.h> #include <asm/barrier.h>
@ -94,6 +95,42 @@ static inline u32 __raw_readl(const volatile void __iomem *addr)
return w; return w;
} }
/*
* {read,write}s{b,w,l}() repeatedly access the same IO address in
* native endianness in 8-, 16-, 32-bit chunks {into,from} memory,
* @count times
*/
#define __raw_readsx(t,f) \
static inline void __raw_reads##f(const volatile void __iomem *addr, \
void *ptr, unsigned int count) \
{ \
bool is_aligned = ((unsigned long)ptr % ((t) / 8)) == 0; \
u##t *buf = ptr; \
\
if (!count) \
return; \
\
/* Some ARC CPU's don't support unaligned accesses */ \
if (is_aligned) { \
do { \
u##t x = __raw_read##f(addr); \
*buf++ = x; \
} while (--count); \
} else { \
do { \
u##t x = __raw_read##f(addr); \
put_unaligned(x, buf++); \
} while (--count); \
} \
}
#define __raw_readsb __raw_readsb
__raw_readsx(8, b)
#define __raw_readsw __raw_readsw
__raw_readsx(16, w)
#define __raw_readsl __raw_readsl
__raw_readsx(32, l)
#define __raw_writeb __raw_writeb #define __raw_writeb __raw_writeb
static inline void __raw_writeb(u8 b, volatile void __iomem *addr) static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
{ {
@ -126,6 +163,35 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr)
} }
#define __raw_writesx(t,f) \
static inline void __raw_writes##f(volatile void __iomem *addr, \
const void *ptr, unsigned int count) \
{ \
bool is_aligned = ((unsigned long)ptr % ((t) / 8)) == 0; \
const u##t *buf = ptr; \
\
if (!count) \
return; \
\
/* Some ARC CPU's don't support unaligned accesses */ \
if (is_aligned) { \
do { \
__raw_write##f(*buf++, addr); \
} while (--count); \
} else { \
do { \
__raw_write##f(get_unaligned(buf++), addr); \
} while (--count); \
} \
}
#define __raw_writesb __raw_writesb
__raw_writesx(8, b)
#define __raw_writesw __raw_writesw
__raw_writesx(16, w)
#define __raw_writesl __raw_writesl
__raw_writesx(32, l)
/* /*
* MMIO can also get buffered/optimized in micro-arch, so barriers needed * MMIO can also get buffered/optimized in micro-arch, so barriers needed
* Based on ARM model for the typical use case * Based on ARM model for the typical use case
@ -141,10 +207,16 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr)
#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; }) #define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; })
#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; }) #define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; })
#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; }) #define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; })
#define readsb(p,d,l) ({ __raw_readsb(p,d,l); __iormb(); })
#define readsw(p,d,l) ({ __raw_readsw(p,d,l); __iormb(); })
#define readsl(p,d,l) ({ __raw_readsl(p,d,l); __iormb(); })
#define writeb(v,c) ({ __iowmb(); writeb_relaxed(v,c); }) #define writeb(v,c) ({ __iowmb(); writeb_relaxed(v,c); })
#define writew(v,c) ({ __iowmb(); writew_relaxed(v,c); }) #define writew(v,c) ({ __iowmb(); writew_relaxed(v,c); })
#define writel(v,c) ({ __iowmb(); writel_relaxed(v,c); }) #define writel(v,c) ({ __iowmb(); writel_relaxed(v,c); })
#define writesb(p,d,l) ({ __iowmb(); __raw_writesb(p,d,l); })
#define writesw(p,d,l) ({ __iowmb(); __raw_writesw(p,d,l); })
#define writesl(p,d,l) ({ __iowmb(); __raw_writesl(p,d,l); })
/* /*
* Relaxed API for drivers which can handle barrier ordering themselves * Relaxed API for drivers which can handle barrier ordering themselves

View File

@ -44,10 +44,12 @@ static inline int cpu_is_pxa910(void)
#define cpu_is_pxa910() (0) #define cpu_is_pxa910() (0)
#endif #endif
#ifdef CONFIG_CPU_MMP2 #if defined(CONFIG_CPU_MMP2) || defined(CONFIG_MACH_MMP2_DT)
static inline int cpu_is_mmp2(void) static inline int cpu_is_mmp2(void)
{ {
return (((read_cpuid_id() >> 8) & 0xff) == 0x58); return (((read_cpuid_id() >> 8) & 0xff) == 0x58) &&
(((mmp_chip_id & 0xfff) == 0x410) ||
((mmp_chip_id & 0xfff) == 0x610));
} }
#else #else
#define cpu_is_mmp2() (0) #define cpu_is_mmp2() (0)

View File

@ -359,14 +359,16 @@ ENTRY(v7_dma_inv_range)
ALT_UP(W(nop)) ALT_UP(W(nop))
#endif #endif
mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line
addne r0, r0, r2
tst r1, r3 tst r1, r3
bic r1, r1, r3 bic r1, r1, r3
mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D / U line mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D / U line
1:
mcr p15, 0, r0, c7, c6, 1 @ invalidate D / U line
add r0, r0, r2
cmp r0, r1 cmp r0, r1
1:
mcrlo p15, 0, r0, c7, c6, 1 @ invalidate D / U line
addlo r0, r0, r2
cmplo r0, r1
blo 1b blo 1b
dsb st dsb st
ret lr ret lr

View File

@ -73,9 +73,11 @@
/* /*
* dcimvac: Invalidate data cache line by MVA to PoC * dcimvac: Invalidate data cache line by MVA to PoC
*/ */
.macro dcimvac, rt, tmp .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
v7m_cacheop \rt, \tmp, V7M_SCB_DCIMVAC .macro dcimvac\c, rt, tmp
v7m_cacheop \rt, \tmp, V7M_SCB_DCIMVAC, \c
.endm .endm
.endr
/* /*
* dccmvau: Clean data cache line by MVA to PoU * dccmvau: Clean data cache line by MVA to PoU
@ -369,14 +371,16 @@ v7m_dma_inv_range:
tst r0, r3 tst r0, r3
bic r0, r0, r3 bic r0, r0, r3
dccimvacne r0, r3 dccimvacne r0, r3
addne r0, r0, r2
subne r3, r2, #1 @ restore r3, corrupted by v7m's dccimvac subne r3, r2, #1 @ restore r3, corrupted by v7m's dccimvac
tst r1, r3 tst r1, r3
bic r1, r1, r3 bic r1, r1, r3
dccimvacne r1, r3 dccimvacne r1, r3
1:
dcimvac r0, r3
add r0, r0, r2
cmp r0, r1 cmp r0, r1
1:
dcimvaclo r0, r3
addlo r0, r0, r2
cmplo r0, r1
blo 1b blo 1b
dsb st dsb st
ret lr ret lr

View File

@ -915,7 +915,7 @@ static inline void emit_str_r(const u8 dst, const u8 src, bool dstk,
/* dst = *(size*)(src + off) */ /* dst = *(size*)(src + off) */
static inline void emit_ldx_r(const u8 dst[], const u8 src, bool dstk, static inline void emit_ldx_r(const u8 dst[], const u8 src, bool dstk,
s32 off, struct jit_ctx *ctx, const u8 sz){ s32 off, struct jit_ctx *ctx, const u8 sz){
const u8 *tmp = bpf2a32[TMP_REG_1]; const u8 *tmp = bpf2a32[TMP_REG_2];
const u8 *rd = dstk ? tmp : dst; const u8 *rd = dstk ? tmp : dst;
u8 rm = src; u8 rm = src;
s32 off_max; s32 off_max;

View File

@ -34,5 +34,10 @@ void arch_teardown_msi_irqs(struct pci_dev *dev)
{ {
struct pci_controller *phb = pci_bus_to_host(dev->bus); struct pci_controller *phb = pci_bus_to_host(dev->bus);
phb->controller_ops.teardown_msi_irqs(dev); /*
* We can be called even when arch_setup_msi_irqs() returns -ENOSYS,
* so check the pointer again.
*/
if (phb->controller_ops.teardown_msi_irqs)
phb->controller_ops.teardown_msi_irqs(dev);
} }

View File

@ -244,9 +244,6 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
# Avoid indirect branches in kernel to deal with Spectre # Avoid indirect branches in kernel to deal with Spectre
ifdef CONFIG_RETPOLINE ifdef CONFIG_RETPOLINE
ifeq ($(RETPOLINE_CFLAGS),)
$(error You are building kernel with non-retpoline compiler, please update your compiler.)
endif
KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) KBUILD_CFLAGS += $(RETPOLINE_CFLAGS)
endif endif
@ -263,6 +260,13 @@ archprepare:
ifeq ($(CONFIG_KEXEC_FILE),y) ifeq ($(CONFIG_KEXEC_FILE),y)
$(Q)$(MAKE) $(build)=arch/x86/purgatory arch/x86/purgatory/kexec-purgatory.c $(Q)$(MAKE) $(build)=arch/x86/purgatory arch/x86/purgatory/kexec-purgatory.c
endif endif
ifdef CONFIG_RETPOLINE
ifeq ($(RETPOLINE_CFLAGS),)
@echo "You are building kernel with non-retpoline compiler." >&2
@echo "Please update your compiler." >&2
@false
endif
endif
### ###
# Kernel objects # Kernel objects

View File

@ -5,6 +5,29 @@
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm-generic/qspinlock_types.h> #include <asm-generic/qspinlock_types.h>
#include <asm/paravirt.h> #include <asm/paravirt.h>
#include <asm/rmwcc.h>
#define _Q_PENDING_LOOPS (1 << 9)
#define queued_fetch_set_pending_acquire queued_fetch_set_pending_acquire
static __always_inline bool __queued_RMW_btsl(struct qspinlock *lock)
{
GEN_BINARY_RMWcc(LOCK_PREFIX "btsl", lock->val.counter,
"I", _Q_PENDING_OFFSET, "%0", c);
}
static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock)
{
u32 val = 0;
if (__queued_RMW_btsl(lock))
val |= _Q_PENDING_VAL;
val |= atomic_read(&lock->val) & ~_Q_PENDING_MASK;
return val;
}
#define queued_spin_unlock queued_spin_unlock #define queued_spin_unlock queued_spin_unlock
/** /**
@ -15,7 +38,7 @@
*/ */
static inline void native_queued_spin_unlock(struct qspinlock *lock) static inline void native_queued_spin_unlock(struct qspinlock *lock)
{ {
smp_store_release((u8 *)lock, 0); smp_store_release(&lock->locked, 0);
} }
#ifdef CONFIG_PARAVIRT_SPINLOCKS #ifdef CONFIG_PARAVIRT_SPINLOCKS

View File

@ -22,8 +22,7 @@ PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock_slowpath);
* *
* void __pv_queued_spin_unlock(struct qspinlock *lock) * void __pv_queued_spin_unlock(struct qspinlock *lock)
* { * {
* struct __qspinlock *l = (void *)lock; * u8 lockval = cmpxchg(&lock->locked, _Q_LOCKED_VAL, 0);
* u8 lockval = cmpxchg(&l->locked, _Q_LOCKED_VAL, 0);
* *
* if (likely(lockval == _Q_LOCKED_VAL)) * if (likely(lockval == _Q_LOCKED_VAL))
* return; * return;

View File

@ -179,7 +179,7 @@ early_efi_write(struct console *con, const char *str, unsigned int num)
num--; num--;
} }
if (efi_x >= si->lfb_width) { if (efi_x + font->width > si->lfb_width) {
efi_x = 0; efi_x = 0;
efi_y += font->height; efi_y += font->height;
} }

View File

@ -83,12 +83,15 @@ bool elv_bio_merge_ok(struct request *rq, struct bio *bio)
} }
EXPORT_SYMBOL(elv_bio_merge_ok); EXPORT_SYMBOL(elv_bio_merge_ok);
static struct elevator_type *elevator_find(const char *name) /*
* Return scheduler with name 'name' and with matching 'mq capability
*/
static struct elevator_type *elevator_find(const char *name, bool mq)
{ {
struct elevator_type *e; struct elevator_type *e;
list_for_each_entry(e, &elv_list, list) { list_for_each_entry(e, &elv_list, list) {
if (!strcmp(e->elevator_name, name)) if (!strcmp(e->elevator_name, name) && (mq == e->uses_mq))
return e; return e;
} }
@ -100,25 +103,25 @@ static void elevator_put(struct elevator_type *e)
module_put(e->elevator_owner); module_put(e->elevator_owner);
} }
static struct elevator_type *elevator_get(const char *name, bool try_loading) static struct elevator_type *elevator_get(struct request_queue *q,
const char *name, bool try_loading)
{ {
struct elevator_type *e; struct elevator_type *e;
spin_lock(&elv_list_lock); spin_lock(&elv_list_lock);
e = elevator_find(name); e = elevator_find(name, q->mq_ops != NULL);
if (!e && try_loading) { if (!e && try_loading) {
spin_unlock(&elv_list_lock); spin_unlock(&elv_list_lock);
request_module("%s-iosched", name); request_module("%s-iosched", name);
spin_lock(&elv_list_lock); spin_lock(&elv_list_lock);
e = elevator_find(name); e = elevator_find(name, q->mq_ops != NULL);
} }
if (e && !try_module_get(e->elevator_owner)) if (e && !try_module_get(e->elevator_owner))
e = NULL; e = NULL;
spin_unlock(&elv_list_lock); spin_unlock(&elv_list_lock);
return e; return e;
} }
@ -144,8 +147,12 @@ void __init load_default_elevator_module(void)
if (!chosen_elevator[0]) if (!chosen_elevator[0])
return; return;
/*
* Boot parameter is deprecated, we haven't supported that for MQ.
* Only look for non-mq schedulers from here.
*/
spin_lock(&elv_list_lock); spin_lock(&elv_list_lock);
e = elevator_find(chosen_elevator); e = elevator_find(chosen_elevator, false);
spin_unlock(&elv_list_lock); spin_unlock(&elv_list_lock);
if (!e) if (!e)
@ -202,7 +209,7 @@ int elevator_init(struct request_queue *q, char *name)
q->boundary_rq = NULL; q->boundary_rq = NULL;
if (name) { if (name) {
e = elevator_get(name, true); e = elevator_get(q, name, true);
if (!e) if (!e)
return -EINVAL; return -EINVAL;
} }
@ -214,7 +221,7 @@ int elevator_init(struct request_queue *q, char *name)
* allowed from async. * allowed from async.
*/ */
if (!e && !q->mq_ops && *chosen_elevator) { if (!e && !q->mq_ops && *chosen_elevator) {
e = elevator_get(chosen_elevator, false); e = elevator_get(q, chosen_elevator, false);
if (!e) if (!e)
printk(KERN_ERR "I/O scheduler %s not found\n", printk(KERN_ERR "I/O scheduler %s not found\n",
chosen_elevator); chosen_elevator);
@ -229,17 +236,17 @@ int elevator_init(struct request_queue *q, char *name)
*/ */
if (q->mq_ops) { if (q->mq_ops) {
if (q->nr_hw_queues == 1) if (q->nr_hw_queues == 1)
e = elevator_get("mq-deadline", false); e = elevator_get(q, "mq-deadline", false);
if (!e) if (!e)
return 0; return 0;
} else } else
e = elevator_get(CONFIG_DEFAULT_IOSCHED, false); e = elevator_get(q, CONFIG_DEFAULT_IOSCHED, false);
if (!e) { if (!e) {
printk(KERN_ERR printk(KERN_ERR
"Default I/O scheduler not found. " \ "Default I/O scheduler not found. " \
"Using noop.\n"); "Using noop.\n");
e = elevator_get("noop", false); e = elevator_get(q, "noop", false);
} }
} }
@ -907,7 +914,7 @@ int elv_register(struct elevator_type *e)
/* register, don't allow duplicate names */ /* register, don't allow duplicate names */
spin_lock(&elv_list_lock); spin_lock(&elv_list_lock);
if (elevator_find(e->elevator_name)) { if (elevator_find(e->elevator_name, e->uses_mq)) {
spin_unlock(&elv_list_lock); spin_unlock(&elv_list_lock);
if (e->icq_cache) if (e->icq_cache)
kmem_cache_destroy(e->icq_cache); kmem_cache_destroy(e->icq_cache);
@ -1068,7 +1075,7 @@ static int __elevator_change(struct request_queue *q, const char *name)
return elevator_switch(q, NULL); return elevator_switch(q, NULL);
strlcpy(elevator_name, name, sizeof(elevator_name)); strlcpy(elevator_name, name, sizeof(elevator_name));
e = elevator_get(strstrip(elevator_name), true); e = elevator_get(q, strstrip(elevator_name), true);
if (!e) if (!e)
return -EINVAL; return -EINVAL;
@ -1078,15 +1085,6 @@ static int __elevator_change(struct request_queue *q, const char *name)
return 0; return 0;
} }
if (!e->uses_mq && q->mq_ops) {
elevator_put(e);
return -EINVAL;
}
if (e->uses_mq && !q->mq_ops) {
elevator_put(e);
return -EINVAL;
}
return elevator_switch(q, e); return elevator_switch(q, e);
} }

View File

@ -4593,6 +4593,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, { "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, { "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "SAMSUNG*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, { "SAMSUNG*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "SAMSUNG*MZ7KM*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "ST[1248][0248]0[FH]*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, { "ST[1248][0248]0[FH]*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
/* /*

View File

@ -183,7 +183,7 @@ void mmp_clk_add(struct mmp_clk_unit *unit, unsigned int id,
pr_err("CLK %d has invalid pointer %p\n", id, clk); pr_err("CLK %d has invalid pointer %p\n", id, clk);
return; return;
} }
if (id > unit->nr_clks) { if (id >= unit->nr_clks) {
pr_err("CLK %d is invalid\n", id); pr_err("CLK %d is invalid\n", id);
return; return;
} }

View File

@ -203,11 +203,11 @@ static struct clk_hw *cp110_of_clk_get(struct of_phandle_args *clkspec,
unsigned int idx = clkspec->args[1]; unsigned int idx = clkspec->args[1];
if (type == CP110_CLK_TYPE_CORE) { if (type == CP110_CLK_TYPE_CORE) {
if (idx > CP110_MAX_CORE_CLOCKS) if (idx >= CP110_MAX_CORE_CLOCKS)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
return clk_data->hws[idx]; return clk_data->hws[idx];
} else if (type == CP110_CLK_TYPE_GATABLE) { } else if (type == CP110_CLK_TYPE_GATABLE) {
if (idx > CP110_MAX_GATABLE_CLOCKS) if (idx >= CP110_MAX_GATABLE_CLOCKS)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
return clk_data->hws[CP110_MAX_CORE_CLOCKS + idx]; return clk_data->hws[CP110_MAX_CORE_CLOCKS + idx];
} }

View File

@ -723,7 +723,8 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
(adev->pdev->revision == 0xe7) || (adev->pdev->revision == 0xe7) ||
(adev->pdev->revision == 0xef))) || (adev->pdev->revision == 0xef))) ||
((adev->pdev->device == 0x6fdf) && ((adev->pdev->device == 0x6fdf) &&
(adev->pdev->revision == 0xef))) { ((adev->pdev->revision == 0xef) ||
(adev->pdev->revision == 0xff)))) {
info->is_kicker = true; info->is_kicker = true;
strcpy(fw_name, "amdgpu/polaris10_k_smc.bin"); strcpy(fw_name, "amdgpu/polaris10_k_smc.bin");
} else } else

View File

@ -263,6 +263,7 @@ static void ast_fbdev_destroy(struct drm_device *dev,
{ {
struct ast_framebuffer *afb = &afbdev->afb; struct ast_framebuffer *afb = &afbdev->afb;
drm_crtc_force_disable_all(dev);
drm_fb_helper_unregister_fbi(&afbdev->helper); drm_fb_helper_unregister_fbi(&afbdev->helper);
if (afb->obj) { if (afb->obj) {

View File

@ -343,8 +343,13 @@ static u64 execlists_update_context(struct drm_i915_gem_request *rq)
* may not be visible to the HW prior to the completion of the UC * may not be visible to the HW prior to the completion of the UC
* register write and that we may begin execution from the context * register write and that we may begin execution from the context
* before its image is complete leading to invalid PD chasing. * before its image is complete leading to invalid PD chasing.
*
* Furthermore, Braswell, at least, wants a full mb to be sure that
* the writes are coherent in memory (visible to the GPU) prior to
* execution, and not just visible to other CPUs (as is the result of
* wmb).
*/ */
wmb(); mb();
return ce->lrc_desc; return ce->lrc_desc;
} }

View File

@ -194,7 +194,12 @@ static void msm_atomic_wait_for_commit_done(
if (!new_crtc_state->active) if (!new_crtc_state->active)
continue; continue;
if (drm_crtc_vblank_get(crtc))
continue;
kms->funcs->wait_for_crtc_commit_done(kms, crtc); kms->funcs->wait_for_crtc_commit_done(kms, crtc);
drm_crtc_vblank_put(crtc);
} }
} }

View File

@ -66,7 +66,7 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
// pm_runtime_get_sync(mmu->dev); // pm_runtime_get_sync(mmu->dev);
ret = iommu_map_sg(iommu->domain, iova, sgt->sgl, sgt->nents, prot); ret = iommu_map_sg(iommu->domain, iova, sgt->sgl, sgt->nents, prot);
// pm_runtime_put_sync(mmu->dev); // pm_runtime_put_sync(mmu->dev);
WARN_ON(ret < 0); WARN_ON(!ret);
return (ret == len) ? 0 : -EINVAL; return (ret == len) ? 0 : -EINVAL;
} }

View File

@ -3378,6 +3378,7 @@ nv50_mstm_del(struct nv50_mstm **pmstm)
{ {
struct nv50_mstm *mstm = *pmstm; struct nv50_mstm *mstm = *pmstm;
if (mstm) { if (mstm) {
drm_dp_mst_topology_mgr_destroy(&mstm->mgr);
kfree(*pmstm); kfree(*pmstm);
*pmstm = NULL; *pmstm = NULL;
} }

View File

@ -425,11 +425,6 @@ static int rockchip_drm_platform_remove(struct platform_device *pdev)
return 0; return 0;
} }
static void rockchip_drm_platform_shutdown(struct platform_device *pdev)
{
rockchip_drm_platform_remove(pdev);
}
static const struct of_device_id rockchip_drm_dt_ids[] = { static const struct of_device_id rockchip_drm_dt_ids[] = {
{ .compatible = "rockchip,display-subsystem", }, { .compatible = "rockchip,display-subsystem", },
{ /* sentinel */ }, { /* sentinel */ },
@ -439,7 +434,6 @@ MODULE_DEVICE_TABLE(of, rockchip_drm_dt_ids);
static struct platform_driver rockchip_drm_platform_driver = { static struct platform_driver rockchip_drm_platform_driver = {
.probe = rockchip_drm_platform_probe, .probe = rockchip_drm_platform_probe,
.remove = rockchip_drm_platform_remove, .remove = rockchip_drm_platform_remove,
.shutdown = rockchip_drm_platform_shutdown,
.driver = { .driver = {
.name = "rockchip-drm", .name = "rockchip-drm",
.of_match_table = rockchip_drm_dt_ids, .of_match_table = rockchip_drm_dt_ids,

View File

@ -309,7 +309,7 @@ static void mousevsc_on_receive(struct hv_device *device,
hid_input_report(input_dev->hid_device, HID_INPUT_REPORT, hid_input_report(input_dev->hid_device, HID_INPUT_REPORT,
input_dev->input_buf, len, 1); input_dev->input_buf, len, 1);
pm_wakeup_event(&input_dev->device->device, 0); pm_wakeup_hard_event(&input_dev->device->device);
break; break;
default: default:

View File

@ -74,8 +74,7 @@
MST_STATUS_ND) MST_STATUS_ND)
#define MST_STATUS_ERR (MST_STATUS_NAK | \ #define MST_STATUS_ERR (MST_STATUS_NAK | \
MST_STATUS_AL | \ MST_STATUS_AL | \
MST_STATUS_IP | \ MST_STATUS_IP)
MST_STATUS_TSS)
#define MST_TX_BYTES_XFRD 0x50 #define MST_TX_BYTES_XFRD 0x50
#define MST_RX_BYTES_XFRD 0x54 #define MST_RX_BYTES_XFRD 0x54
#define SCL_HIGH_PERIOD 0x80 #define SCL_HIGH_PERIOD 0x80
@ -241,7 +240,7 @@ static int axxia_i2c_empty_rx_fifo(struct axxia_i2c_dev *idev)
*/ */
if (c <= 0 || c > I2C_SMBUS_BLOCK_MAX) { if (c <= 0 || c > I2C_SMBUS_BLOCK_MAX) {
idev->msg_err = -EPROTO; idev->msg_err = -EPROTO;
i2c_int_disable(idev, ~0); i2c_int_disable(idev, ~MST_STATUS_TSS);
complete(&idev->msg_complete); complete(&idev->msg_complete);
break; break;
} }
@ -299,14 +298,19 @@ static irqreturn_t axxia_i2c_isr(int irq, void *_dev)
if (status & MST_STATUS_SCC) { if (status & MST_STATUS_SCC) {
/* Stop completed */ /* Stop completed */
i2c_int_disable(idev, ~0); i2c_int_disable(idev, ~MST_STATUS_TSS);
complete(&idev->msg_complete); complete(&idev->msg_complete);
} else if (status & MST_STATUS_SNS) { } else if (status & MST_STATUS_SNS) {
/* Transfer done */ /* Transfer done */
i2c_int_disable(idev, ~0); i2c_int_disable(idev, ~MST_STATUS_TSS);
if (i2c_m_rd(idev->msg) && idev->msg_xfrd < idev->msg->len) if (i2c_m_rd(idev->msg) && idev->msg_xfrd < idev->msg->len)
axxia_i2c_empty_rx_fifo(idev); axxia_i2c_empty_rx_fifo(idev);
complete(&idev->msg_complete); complete(&idev->msg_complete);
} else if (status & MST_STATUS_TSS) {
/* Transfer timeout */
idev->msg_err = -ETIMEDOUT;
i2c_int_disable(idev, ~MST_STATUS_TSS);
complete(&idev->msg_complete);
} else if (unlikely(status & MST_STATUS_ERR)) { } else if (unlikely(status & MST_STATUS_ERR)) {
/* Transfer error */ /* Transfer error */
i2c_int_disable(idev, ~0); i2c_int_disable(idev, ~0);
@ -339,10 +343,10 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg)
u32 rx_xfer, tx_xfer; u32 rx_xfer, tx_xfer;
u32 addr_1, addr_2; u32 addr_1, addr_2;
unsigned long time_left; unsigned long time_left;
unsigned int wt_value;
idev->msg = msg; idev->msg = msg;
idev->msg_xfrd = 0; idev->msg_xfrd = 0;
idev->msg_err = 0;
reinit_completion(&idev->msg_complete); reinit_completion(&idev->msg_complete);
if (i2c_m_ten(msg)) { if (i2c_m_ten(msg)) {
@ -382,9 +386,18 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg)
else if (axxia_i2c_fill_tx_fifo(idev) != 0) else if (axxia_i2c_fill_tx_fifo(idev) != 0)
int_mask |= MST_STATUS_TFL; int_mask |= MST_STATUS_TFL;
wt_value = WT_VALUE(readl(idev->base + WAIT_TIMER_CONTROL));
/* Disable wait timer temporarly */
writel(wt_value, idev->base + WAIT_TIMER_CONTROL);
/* Check if timeout error happened */
if (idev->msg_err)
goto out;
/* Start manual mode */ /* Start manual mode */
writel(CMD_MANUAL, idev->base + MST_COMMAND); writel(CMD_MANUAL, idev->base + MST_COMMAND);
writel(WT_EN | wt_value, idev->base + WAIT_TIMER_CONTROL);
i2c_int_enable(idev, int_mask); i2c_int_enable(idev, int_mask);
time_left = wait_for_completion_timeout(&idev->msg_complete, time_left = wait_for_completion_timeout(&idev->msg_complete,
@ -395,13 +408,15 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg)
if (readl(idev->base + MST_COMMAND) & CMD_BUSY) if (readl(idev->base + MST_COMMAND) & CMD_BUSY)
dev_warn(idev->dev, "busy after xfer\n"); dev_warn(idev->dev, "busy after xfer\n");
if (time_left == 0) if (time_left == 0) {
idev->msg_err = -ETIMEDOUT; idev->msg_err = -ETIMEDOUT;
if (idev->msg_err == -ETIMEDOUT)
i2c_recover_bus(&idev->adapter); i2c_recover_bus(&idev->adapter);
axxia_i2c_init(idev);
}
if (unlikely(idev->msg_err) && idev->msg_err != -ENXIO) out:
if (unlikely(idev->msg_err) && idev->msg_err != -ENXIO &&
idev->msg_err != -ETIMEDOUT)
axxia_i2c_init(idev); axxia_i2c_init(idev);
return idev->msg_err; return idev->msg_err;
@ -409,7 +424,7 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg)
static int axxia_i2c_stop(struct axxia_i2c_dev *idev) static int axxia_i2c_stop(struct axxia_i2c_dev *idev)
{ {
u32 int_mask = MST_STATUS_ERR | MST_STATUS_SCC; u32 int_mask = MST_STATUS_ERR | MST_STATUS_SCC | MST_STATUS_TSS;
unsigned long time_left; unsigned long time_left;
reinit_completion(&idev->msg_complete); reinit_completion(&idev->msg_complete);
@ -436,6 +451,9 @@ axxia_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
int i; int i;
int ret = 0; int ret = 0;
idev->msg_err = 0;
i2c_int_enable(idev, MST_STATUS_TSS);
for (i = 0; ret == 0 && i < num; ++i) for (i = 0; ret == 0 && i < num; ++i)
ret = axxia_i2c_xfer_msg(idev, &msgs[i]); ret = axxia_i2c_xfer_msg(idev, &msgs[i]);

View File

@ -364,6 +364,7 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
{ {
struct acpi_smbus_cmi *smbus_cmi; struct acpi_smbus_cmi *smbus_cmi;
const struct acpi_device_id *id; const struct acpi_device_id *id;
int ret;
smbus_cmi = kzalloc(sizeof(struct acpi_smbus_cmi), GFP_KERNEL); smbus_cmi = kzalloc(sizeof(struct acpi_smbus_cmi), GFP_KERNEL);
if (!smbus_cmi) if (!smbus_cmi)
@ -385,8 +386,10 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
acpi_walk_namespace(ACPI_TYPE_METHOD, smbus_cmi->handle, 1, acpi_walk_namespace(ACPI_TYPE_METHOD, smbus_cmi->handle, 1,
acpi_smbus_cmi_query_methods, NULL, smbus_cmi, NULL); acpi_smbus_cmi_query_methods, NULL, smbus_cmi, NULL);
if (smbus_cmi->cap_info == 0) if (smbus_cmi->cap_info == 0) {
ret = -ENODEV;
goto err; goto err;
}
snprintf(smbus_cmi->adapter.name, sizeof(smbus_cmi->adapter.name), snprintf(smbus_cmi->adapter.name, sizeof(smbus_cmi->adapter.name),
"SMBus CMI adapter %s", "SMBus CMI adapter %s",
@ -397,7 +400,8 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
smbus_cmi->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD; smbus_cmi->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
smbus_cmi->adapter.dev.parent = &device->dev; smbus_cmi->adapter.dev.parent = &device->dev;
if (i2c_add_adapter(&smbus_cmi->adapter)) { ret = i2c_add_adapter(&smbus_cmi->adapter);
if (ret) {
dev_err(&device->dev, "Couldn't register adapter!\n"); dev_err(&device->dev, "Couldn't register adapter!\n");
goto err; goto err;
} }
@ -407,7 +411,7 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
err: err:
kfree(smbus_cmi); kfree(smbus_cmi);
device->driver_data = NULL; device->driver_data = NULL;
return -EIO; return ret;
} }
static int acpi_smbus_cmi_remove(struct acpi_device *device) static int acpi_smbus_cmi_remove(struct acpi_device *device)

View File

@ -470,9 +470,26 @@ static void uniphier_fi2c_hw_init(struct uniphier_fi2c_priv *priv)
uniphier_fi2c_reset(priv); uniphier_fi2c_reset(priv);
/*
* Standard-mode: tLOW + tHIGH = 10 us
* Fast-mode: tLOW + tHIGH = 2.5 us
*/
writel(cyc, priv->membase + UNIPHIER_FI2C_CYC); writel(cyc, priv->membase + UNIPHIER_FI2C_CYC);
writel(cyc / 2, priv->membase + UNIPHIER_FI2C_LCTL); /*
* Standard-mode: tLOW = 4.7 us, tHIGH = 4.0 us, tBUF = 4.7 us
* Fast-mode: tLOW = 1.3 us, tHIGH = 0.6 us, tBUF = 1.3 us
* "tLow/tHIGH = 5/4" meets both.
*/
writel(cyc * 5 / 9, priv->membase + UNIPHIER_FI2C_LCTL);
/*
* Standard-mode: tHD;STA = 4.0 us, tSU;STA = 4.7 us, tSU;STO = 4.0 us
* Fast-mode: tHD;STA = 0.6 us, tSU;STA = 0.6 us, tSU;STO = 0.6 us
*/
writel(cyc / 2, priv->membase + UNIPHIER_FI2C_SSUT); writel(cyc / 2, priv->membase + UNIPHIER_FI2C_SSUT);
/*
* Standard-mode: tSU;DAT = 250 ns
* Fast-mode: tSU;DAT = 100 ns
*/
writel(cyc / 16, priv->membase + UNIPHIER_FI2C_DSUT); writel(cyc / 16, priv->membase + UNIPHIER_FI2C_DSUT);
uniphier_fi2c_prepare_operation(priv); uniphier_fi2c_prepare_operation(priv);

View File

@ -320,7 +320,13 @@ static void uniphier_i2c_hw_init(struct uniphier_i2c_priv *priv)
uniphier_i2c_reset(priv, true); uniphier_i2c_reset(priv, true);
writel((cyc / 2 << 16) | cyc, priv->membase + UNIPHIER_I2C_CLK); /*
* Bit30-16: clock cycles of tLOW.
* Standard-mode: tLOW = 4.7 us, tHIGH = 4.0 us
* Fast-mode: tLOW = 1.3 us, tHIGH = 0.6 us
* "tLow/tHIGH = 5/4" meets both.
*/
writel((cyc * 5 / 9 << 16) | cyc, priv->membase + UNIPHIER_I2C_CLK);
uniphier_i2c_reset(priv, false); uniphier_i2c_reset(priv, false);
} }

View File

@ -920,6 +920,7 @@ static u8 pmac_ide_cable_detect(ide_hwif_t *hwif)
struct device_node *root = of_find_node_by_path("/"); struct device_node *root = of_find_node_by_path("/");
const char *model = of_get_property(root, "model", NULL); const char *model = of_get_property(root, "model", NULL);
of_node_put(root);
/* Get cable type from device-tree. */ /* Get cable type from device-tree. */
if (cable && !strncmp(cable, "80-", 3)) { if (cable && !strncmp(cable, "80-", 3)) {
/* Some drives fail to detect 80c cable in PowerBook */ /* Some drives fail to detect 80c cable in PowerBook */

View File

@ -187,7 +187,6 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
pq->ctxt = uctxt->ctxt; pq->ctxt = uctxt->ctxt;
pq->subctxt = fd->subctxt; pq->subctxt = fd->subctxt;
pq->n_max_reqs = hfi1_sdma_comp_ring_size; pq->n_max_reqs = hfi1_sdma_comp_ring_size;
pq->state = SDMA_PKT_Q_INACTIVE;
atomic_set(&pq->n_reqs, 0); atomic_set(&pq->n_reqs, 0);
init_waitqueue_head(&pq->wait); init_waitqueue_head(&pq->wait);
atomic_set(&pq->n_locked, 0); atomic_set(&pq->n_locked, 0);
@ -276,7 +275,7 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
/* Wait until all requests have been freed. */ /* Wait until all requests have been freed. */
wait_event_interruptible( wait_event_interruptible(
pq->wait, pq->wait,
(ACCESS_ONCE(pq->state) == SDMA_PKT_Q_INACTIVE)); !atomic_read(&pq->n_reqs));
kfree(pq->reqs); kfree(pq->reqs);
kfree(pq->req_in_use); kfree(pq->req_in_use);
kmem_cache_destroy(pq->txreq_cache); kmem_cache_destroy(pq->txreq_cache);
@ -312,6 +311,13 @@ static u8 dlid_to_selector(u16 dlid)
return mapping[hash]; return mapping[hash];
} }
/**
* hfi1_user_sdma_process_request() - Process and start a user sdma request
* @fd: valid file descriptor
* @iovec: array of io vectors to process
* @dim: overall iovec array size
* @count: number of io vector array entries processed
*/
int hfi1_user_sdma_process_request(struct hfi1_filedata *fd, int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
struct iovec *iovec, unsigned long dim, struct iovec *iovec, unsigned long dim,
unsigned long *count) unsigned long *count)
@ -560,20 +566,12 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
req->ahg_idx = sdma_ahg_alloc(req->sde); req->ahg_idx = sdma_ahg_alloc(req->sde);
set_comp_state(pq, cq, info.comp_idx, QUEUED, 0); set_comp_state(pq, cq, info.comp_idx, QUEUED, 0);
pq->state = SDMA_PKT_Q_ACTIVE;
/* Send the first N packets in the request to buy us some time */ /* Send the first N packets in the request to buy us some time */
ret = user_sdma_send_pkts(req, pcount); ret = user_sdma_send_pkts(req, pcount);
if (unlikely(ret < 0 && ret != -EBUSY)) if (unlikely(ret < 0 && ret != -EBUSY))
goto free_req; goto free_req;
/*
* It is possible that the SDMA engine would have processed all the
* submitted packets by the time we get here. Therefore, only set
* packet queue state to ACTIVE if there are still uncompleted
* requests.
*/
if (atomic_read(&pq->n_reqs))
xchg(&pq->state, SDMA_PKT_Q_ACTIVE);
/* /*
* This is a somewhat blocking send implementation. * This is a somewhat blocking send implementation.
* The driver will block the caller until all packets of the * The driver will block the caller until all packets of the
@ -1391,10 +1389,8 @@ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status)
static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq) static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq)
{ {
if (atomic_dec_and_test(&pq->n_reqs)) { if (atomic_dec_and_test(&pq->n_reqs))
xchg(&pq->state, SDMA_PKT_Q_INACTIVE);
wake_up(&pq->wait); wake_up(&pq->wait);
}
} }
static void user_sdma_free_request(struct user_sdma_request *req, bool unpin) static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)

View File

@ -94,9 +94,10 @@
#define TXREQ_FLAGS_REQ_ACK BIT(0) /* Set the ACK bit in the header */ #define TXREQ_FLAGS_REQ_ACK BIT(0) /* Set the ACK bit in the header */
#define TXREQ_FLAGS_REQ_DISABLE_SH BIT(1) /* Disable header suppression */ #define TXREQ_FLAGS_REQ_DISABLE_SH BIT(1) /* Disable header suppression */
#define SDMA_PKT_Q_INACTIVE BIT(0) enum pkt_q_sdma_state {
#define SDMA_PKT_Q_ACTIVE BIT(1) SDMA_PKT_Q_ACTIVE,
#define SDMA_PKT_Q_DEFERRED BIT(2) SDMA_PKT_Q_DEFERRED,
};
/* /*
* Maximum retry attempts to submit a TX request * Maximum retry attempts to submit a TX request
@ -124,7 +125,7 @@ struct hfi1_user_sdma_pkt_q {
struct user_sdma_request *reqs; struct user_sdma_request *reqs;
unsigned long *req_in_use; unsigned long *req_in_use;
struct iowait busy; struct iowait busy;
unsigned state; enum pkt_q_sdma_state state;
wait_queue_head_t wait; wait_queue_head_t wait;
unsigned long unpinned; unsigned long unpinned;
struct mmu_rb_handler *handler; struct mmu_rb_handler *handler;

View File

@ -60,8 +60,18 @@
/* OMAP4 values */ /* OMAP4 values */
#define OMAP4_VAL_IRQDISABLE 0x0 #define OMAP4_VAL_IRQDISABLE 0x0
#define OMAP4_VAL_DEBOUNCINGTIME 0x7
#define OMAP4_VAL_PVT 0x7 /*
* Errata i689: If a key is released for a time shorter than debounce time,
* the keyboard will idle and never detect the key release. The workaround
* is to use at least a 12ms debounce time. See omap5432 TRM chapter
* "26.4.6.2 Keyboard Controller Timer" for more information.
*/
#define OMAP4_KEYPAD_PTV_DIV_128 0x6
#define OMAP4_KEYPAD_DEBOUNCINGTIME_MS(dbms, ptv) \
((((dbms) * 1000) / ((1 << ((ptv) + 1)) * (1000000 / 32768))) - 1)
#define OMAP4_VAL_DEBOUNCINGTIME_16MS \
OMAP4_KEYPAD_DEBOUNCINGTIME_MS(16, OMAP4_KEYPAD_PTV_DIV_128)
enum { enum {
KBD_REVISION_OMAP4 = 0, KBD_REVISION_OMAP4 = 0,
@ -181,9 +191,9 @@ static int omap4_keypad_open(struct input_dev *input)
kbd_writel(keypad_data, OMAP4_KBD_CTRL, kbd_writel(keypad_data, OMAP4_KBD_CTRL,
OMAP4_DEF_CTRL_NOSOFTMODE | OMAP4_DEF_CTRL_NOSOFTMODE |
(OMAP4_VAL_PVT << OMAP4_DEF_CTRL_PTV_SHIFT)); (OMAP4_KEYPAD_PTV_DIV_128 << OMAP4_DEF_CTRL_PTV_SHIFT));
kbd_writel(keypad_data, OMAP4_KBD_DEBOUNCINGTIME, kbd_writel(keypad_data, OMAP4_KBD_DEBOUNCINGTIME,
OMAP4_VAL_DEBOUNCINGTIME); OMAP4_VAL_DEBOUNCINGTIME_16MS);
/* clear pending interrupts */ /* clear pending interrupts */
kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS, kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS,
kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS)); kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS));

View File

@ -178,6 +178,7 @@ static const char * const smbus_pnp_ids[] = {
"LEN0096", /* X280 */ "LEN0096", /* X280 */
"LEN0097", /* X280 -> ALPS trackpoint */ "LEN0097", /* X280 -> ALPS trackpoint */
"LEN200f", /* T450s */ "LEN200f", /* T450s */
"SYN3221", /* HP 15-ay000 */
NULL NULL
}; };

View File

@ -177,7 +177,7 @@ static void hv_kbd_on_receive(struct hv_device *hv_dev,
* state because the Enter-UP can trigger a wakeup at once. * state because the Enter-UP can trigger a wakeup at once.
*/ */
if (!(info & IS_BREAK)) if (!(info & IS_BREAK))
pm_wakeup_event(&hv_dev->device, 0); pm_wakeup_hard_event(&hv_dev->device);
break; break;

View File

@ -929,6 +929,10 @@ static int blocks_are_clean_separate_dirty(struct dm_cache_metadata *cmd,
bool dirty_flag; bool dirty_flag;
*result = true; *result = true;
if (from_cblock(cmd->cache_blocks) == 0)
/* Nothing to do */
return 0;
r = dm_bitset_cursor_begin(&cmd->dirty_info, cmd->dirty_root, r = dm_bitset_cursor_begin(&cmd->dirty_info, cmd->dirty_root,
from_cblock(cmd->cache_blocks), &cmd->dirty_cursor); from_cblock(cmd->cache_blocks), &cmd->dirty_cursor);
if (r) { if (r) {

View File

@ -195,7 +195,7 @@ static void throttle_unlock(struct throttle *t)
struct dm_thin_new_mapping; struct dm_thin_new_mapping;
/* /*
* The pool runs in 4 modes. Ordered in degraded order for comparisons. * The pool runs in various modes. Ordered in degraded order for comparisons.
*/ */
enum pool_mode { enum pool_mode {
PM_WRITE, /* metadata may be changed */ PM_WRITE, /* metadata may be changed */
@ -281,9 +281,38 @@ struct pool {
struct dm_bio_prison_cell **cell_sort_array; struct dm_bio_prison_cell **cell_sort_array;
}; };
static enum pool_mode get_pool_mode(struct pool *pool);
static void metadata_operation_failed(struct pool *pool, const char *op, int r); static void metadata_operation_failed(struct pool *pool, const char *op, int r);
static enum pool_mode get_pool_mode(struct pool *pool)
{
return pool->pf.mode;
}
static void notify_of_pool_mode_change(struct pool *pool)
{
const char *descs[] = {
"write",
"out-of-data-space",
"read-only",
"read-only",
"fail"
};
const char *extra_desc = NULL;
enum pool_mode mode = get_pool_mode(pool);
if (mode == PM_OUT_OF_DATA_SPACE) {
if (!pool->pf.error_if_no_space)
extra_desc = " (queue IO)";
else
extra_desc = " (error IO)";
}
dm_table_event(pool->ti->table);
DMINFO("%s: switching pool to %s%s mode",
dm_device_name(pool->pool_md),
descs[(int)mode], extra_desc ? : "");
}
/* /*
* Target context for a pool. * Target context for a pool.
*/ */
@ -2362,8 +2391,6 @@ static void do_waker(struct work_struct *ws)
queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD); queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
} }
static void notify_of_pool_mode_change_to_oods(struct pool *pool);
/* /*
* We're holding onto IO to allow userland time to react. After the * We're holding onto IO to allow userland time to react. After the
* timeout either the pool will have been resized (and thus back in * timeout either the pool will have been resized (and thus back in
@ -2376,7 +2403,7 @@ static void do_no_space_timeout(struct work_struct *ws)
if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) { if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) {
pool->pf.error_if_no_space = true; pool->pf.error_if_no_space = true;
notify_of_pool_mode_change_to_oods(pool); notify_of_pool_mode_change(pool);
error_retry_list_with_code(pool, BLK_STS_NOSPC); error_retry_list_with_code(pool, BLK_STS_NOSPC);
} }
} }
@ -2444,26 +2471,6 @@ static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *))
/*----------------------------------------------------------------*/ /*----------------------------------------------------------------*/
static enum pool_mode get_pool_mode(struct pool *pool)
{
return pool->pf.mode;
}
static void notify_of_pool_mode_change(struct pool *pool, const char *new_mode)
{
dm_table_event(pool->ti->table);
DMINFO("%s: switching pool to %s mode",
dm_device_name(pool->pool_md), new_mode);
}
static void notify_of_pool_mode_change_to_oods(struct pool *pool)
{
if (!pool->pf.error_if_no_space)
notify_of_pool_mode_change(pool, "out-of-data-space (queue IO)");
else
notify_of_pool_mode_change(pool, "out-of-data-space (error IO)");
}
static bool passdown_enabled(struct pool_c *pt) static bool passdown_enabled(struct pool_c *pt)
{ {
return pt->adjusted_pf.discard_passdown; return pt->adjusted_pf.discard_passdown;
@ -2512,8 +2519,6 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
switch (new_mode) { switch (new_mode) {
case PM_FAIL: case PM_FAIL:
if (old_mode != new_mode)
notify_of_pool_mode_change(pool, "failure");
dm_pool_metadata_read_only(pool->pmd); dm_pool_metadata_read_only(pool->pmd);
pool->process_bio = process_bio_fail; pool->process_bio = process_bio_fail;
pool->process_discard = process_bio_fail; pool->process_discard = process_bio_fail;
@ -2527,8 +2532,6 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
case PM_OUT_OF_METADATA_SPACE: case PM_OUT_OF_METADATA_SPACE:
case PM_READ_ONLY: case PM_READ_ONLY:
if (!is_read_only_pool_mode(old_mode))
notify_of_pool_mode_change(pool, "read-only");
dm_pool_metadata_read_only(pool->pmd); dm_pool_metadata_read_only(pool->pmd);
pool->process_bio = process_bio_read_only; pool->process_bio = process_bio_read_only;
pool->process_discard = process_bio_success; pool->process_discard = process_bio_success;
@ -2549,8 +2552,6 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
* alarming rate. Adjust your low water mark if you're * alarming rate. Adjust your low water mark if you're
* frequently seeing this mode. * frequently seeing this mode.
*/ */
if (old_mode != new_mode)
notify_of_pool_mode_change_to_oods(pool);
pool->out_of_data_space = true; pool->out_of_data_space = true;
pool->process_bio = process_bio_read_only; pool->process_bio = process_bio_read_only;
pool->process_discard = process_discard_bio; pool->process_discard = process_discard_bio;
@ -2563,8 +2564,6 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
break; break;
case PM_WRITE: case PM_WRITE:
if (old_mode != new_mode)
notify_of_pool_mode_change(pool, "write");
if (old_mode == PM_OUT_OF_DATA_SPACE) if (old_mode == PM_OUT_OF_DATA_SPACE)
cancel_delayed_work_sync(&pool->no_space_timeout); cancel_delayed_work_sync(&pool->no_space_timeout);
pool->out_of_data_space = false; pool->out_of_data_space = false;
@ -2584,6 +2583,9 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
* doesn't cause an unexpected mode transition on resume. * doesn't cause an unexpected mode transition on resume.
*/ */
pt->adjusted_pf.mode = new_mode; pt->adjusted_pf.mode = new_mode;
if (old_mode != new_mode)
notify_of_pool_mode_change(pool);
} }
static void abort_transaction(struct pool *pool) static void abort_transaction(struct pool *pool)

View File

@ -104,6 +104,7 @@ struct mmc_omap_slot {
unsigned int vdd; unsigned int vdd;
u16 saved_con; u16 saved_con;
u16 bus_mode; u16 bus_mode;
u16 power_mode;
unsigned int fclk_freq; unsigned int fclk_freq;
struct tasklet_struct cover_tasklet; struct tasklet_struct cover_tasklet;
@ -1157,7 +1158,7 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
struct mmc_omap_slot *slot = mmc_priv(mmc); struct mmc_omap_slot *slot = mmc_priv(mmc);
struct mmc_omap_host *host = slot->host; struct mmc_omap_host *host = slot->host;
int i, dsor; int i, dsor;
int clk_enabled; int clk_enabled, init_stream;
mmc_omap_select_slot(slot, 0); mmc_omap_select_slot(slot, 0);
@ -1167,6 +1168,7 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
slot->vdd = ios->vdd; slot->vdd = ios->vdd;
clk_enabled = 0; clk_enabled = 0;
init_stream = 0;
switch (ios->power_mode) { switch (ios->power_mode) {
case MMC_POWER_OFF: case MMC_POWER_OFF:
mmc_omap_set_power(slot, 0, ios->vdd); mmc_omap_set_power(slot, 0, ios->vdd);
@ -1174,13 +1176,17 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
case MMC_POWER_UP: case MMC_POWER_UP:
/* Cannot touch dsor yet, just power up MMC */ /* Cannot touch dsor yet, just power up MMC */
mmc_omap_set_power(slot, 1, ios->vdd); mmc_omap_set_power(slot, 1, ios->vdd);
slot->power_mode = ios->power_mode;
goto exit; goto exit;
case MMC_POWER_ON: case MMC_POWER_ON:
mmc_omap_fclk_enable(host, 1); mmc_omap_fclk_enable(host, 1);
clk_enabled = 1; clk_enabled = 1;
dsor |= 1 << 11; dsor |= 1 << 11;
if (slot->power_mode != MMC_POWER_ON)
init_stream = 1;
break; break;
} }
slot->power_mode = ios->power_mode;
if (slot->bus_mode != ios->bus_mode) { if (slot->bus_mode != ios->bus_mode) {
if (slot->pdata->set_bus_mode != NULL) if (slot->pdata->set_bus_mode != NULL)
@ -1196,7 +1202,7 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
for (i = 0; i < 2; i++) for (i = 0; i < 2; i++)
OMAP_MMC_WRITE(host, CON, dsor); OMAP_MMC_WRITE(host, CON, dsor);
slot->saved_con = dsor; slot->saved_con = dsor;
if (ios->power_mode == MMC_POWER_ON) { if (init_stream) {
/* worst case at 400kHz, 80 cycles makes 200 microsecs */ /* worst case at 400kHz, 80 cycles makes 200 microsecs */
int usecs = 250; int usecs = 250;
@ -1234,6 +1240,7 @@ static int mmc_omap_new_slot(struct mmc_omap_host *host, int id)
slot->host = host; slot->host = host;
slot->mmc = mmc; slot->mmc = mmc;
slot->id = id; slot->id = id;
slot->power_mode = MMC_POWER_UNDEFINED;
slot->pdata = &host->pdata->slots[id]; slot->pdata = &host->pdata->slots[id];
host->slots[id] = slot; host->slots[id] = slot;

View File

@ -2086,6 +2086,9 @@ void bond_3ad_unbind_slave(struct slave *slave)
aggregator->aggregator_identifier); aggregator->aggregator_identifier);
/* Tell the partner that this port is not suitable for aggregation */ /* Tell the partner that this port is not suitable for aggregation */
port->actor_oper_port_state &= ~AD_STATE_SYNCHRONIZATION;
port->actor_oper_port_state &= ~AD_STATE_COLLECTING;
port->actor_oper_port_state &= ~AD_STATE_DISTRIBUTING;
port->actor_oper_port_state &= ~AD_STATE_AGGREGATION; port->actor_oper_port_state &= ~AD_STATE_AGGREGATION;
__update_lacpdu_from_port(port); __update_lacpdu_from_port(port);
ad_lacpdu_send(port); ad_lacpdu_send(port);

View File

@ -114,8 +114,7 @@ static int mv88e6060_switch_reset(struct dsa_switch *ds)
/* Reset the switch. */ /* Reset the switch. */
REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL, REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
GLOBAL_ATU_CONTROL_SWRESET | GLOBAL_ATU_CONTROL_SWRESET |
GLOBAL_ATU_CONTROL_ATUSIZE_1024 | GLOBAL_ATU_CONTROL_LEARNDIS);
GLOBAL_ATU_CONTROL_ATE_AGE_5MIN);
/* Wait up to one second for reset to complete. */ /* Wait up to one second for reset to complete. */
timeout = jiffies + 1 * HZ; timeout = jiffies + 1 * HZ;
@ -140,13 +139,10 @@ static int mv88e6060_setup_global(struct dsa_switch *ds)
*/ */
REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, GLOBAL_CONTROL_MAX_FRAME_1536); REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, GLOBAL_CONTROL_MAX_FRAME_1536);
/* Enable automatic address learning, set the address /* Disable automatic address learning.
* database size to 1024 entries, and set the default aging
* time to 5 minutes.
*/ */
REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL, REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
GLOBAL_ATU_CONTROL_ATUSIZE_1024 | GLOBAL_ATU_CONTROL_LEARNDIS);
GLOBAL_ATU_CONTROL_ATE_AGE_5MIN);
return 0; return 0;
} }

View File

@ -2786,7 +2786,7 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
if (!muram_node) { if (!muram_node) {
dev_err(&of_dev->dev, "%s: could not find MURAM node\n", dev_err(&of_dev->dev, "%s: could not find MURAM node\n",
__func__); __func__);
goto fman_node_put; goto fman_free;
} }
err = of_address_to_resource(muram_node, 0, err = of_address_to_resource(muram_node, 0,
@ -2795,11 +2795,10 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
of_node_put(muram_node); of_node_put(muram_node);
dev_err(&of_dev->dev, "%s: of_address_to_resource() = %d\n", dev_err(&of_dev->dev, "%s: of_address_to_resource() = %d\n",
__func__, err); __func__, err);
goto fman_node_put; goto fman_free;
} }
of_node_put(muram_node); of_node_put(muram_node);
of_node_put(fm_node);
err = devm_request_irq(&of_dev->dev, irq, fman_irq, 0, "fman", fman); err = devm_request_irq(&of_dev->dev, irq, fman_irq, 0, "fman", fman);
if (err < 0) { if (err < 0) {

View File

@ -5,7 +5,7 @@
config MLX4_EN config MLX4_EN
tristate "Mellanox Technologies 1/10/40Gbit Ethernet support" tristate "Mellanox Technologies 1/10/40Gbit Ethernet support"
depends on MAY_USE_DEVLINK depends on MAY_USE_DEVLINK
depends on PCI depends on PCI && NETDEVICES && ETHERNET && INET
select MLX4_CORE select MLX4_CORE
imply PTP_1588_CLOCK imply PTP_1588_CLOCK
---help--- ---help---

View File

@ -295,7 +295,13 @@ static bool
mlxsw_sp_bridge_port_should_destroy(const struct mlxsw_sp_bridge_port * mlxsw_sp_bridge_port_should_destroy(const struct mlxsw_sp_bridge_port *
bridge_port) bridge_port)
{ {
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_port->dev); struct net_device *dev = bridge_port->dev;
struct mlxsw_sp *mlxsw_sp;
if (is_vlan_dev(dev))
mlxsw_sp = mlxsw_sp_lower_get(vlan_dev_real_dev(dev));
else
mlxsw_sp = mlxsw_sp_lower_get(dev);
/* In case ports were pulled from out of a bridged LAG, then /* In case ports were pulled from out of a bridged LAG, then
* it's possible the reference count isn't zero, yet the bridge * it's possible the reference count isn't zero, yet the bridge
@ -1646,7 +1652,7 @@ mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
u16 vid = vlan_dev_vlan_id(bridge_port->dev); u16 vid = vlan_dev_vlan_id(bridge_port->dev);
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
if (WARN_ON(!mlxsw_sp_port_vlan)) if (!mlxsw_sp_port_vlan)
return; return;
mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);

View File

@ -3472,16 +3472,16 @@ static int __init init_mac80211_hwsim(void)
if (err) if (err)
goto out_unregister_pernet; goto out_unregister_pernet;
err = hwsim_init_netlink();
if (err)
goto out_unregister_driver;
hwsim_class = class_create(THIS_MODULE, "mac80211_hwsim"); hwsim_class = class_create(THIS_MODULE, "mac80211_hwsim");
if (IS_ERR(hwsim_class)) { if (IS_ERR(hwsim_class)) {
err = PTR_ERR(hwsim_class); err = PTR_ERR(hwsim_class);
goto out_unregister_driver; goto out_exit_netlink;
} }
err = hwsim_init_netlink();
if (err < 0)
goto out_unregister_driver;
for (i = 0; i < radios; i++) { for (i = 0; i < radios; i++) {
struct hwsim_new_radio_params param = { 0 }; struct hwsim_new_radio_params param = { 0 };
@ -3587,6 +3587,8 @@ out_free_mon:
free_netdev(hwsim_mon); free_netdev(hwsim_mon);
out_free_radios: out_free_radios:
mac80211_hwsim_free(); mac80211_hwsim_free();
out_exit_netlink:
hwsim_exit_netlink();
out_unregister_driver: out_unregister_driver:
platform_driver_unregister(&mac80211_hwsim_driver); platform_driver_unregister(&mac80211_hwsim_driver);
out_unregister_pernet: out_unregister_pernet:

View File

@ -524,6 +524,7 @@ static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
{ {
struct nvmet_rdma_rsp *rsp = struct nvmet_rdma_rsp *rsp =
container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe); container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe);
struct nvmet_rdma_queue *queue = cq->cq_context;
nvmet_rdma_release_rsp(rsp); nvmet_rdma_release_rsp(rsp);
@ -531,7 +532,7 @@ static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
wc->status != IB_WC_WR_FLUSH_ERR)) { wc->status != IB_WC_WR_FLUSH_ERR)) {
pr_err("SEND for CQE 0x%p failed with status %s (%d).\n", pr_err("SEND for CQE 0x%p failed with status %s (%d).\n",
wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status); wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status);
nvmet_rdma_error_comp(rsp->queue); nvmet_rdma_error_comp(queue);
} }
} }

View File

@ -568,7 +568,7 @@ static const struct sunxi_desc_pin sun8i_a83t_pins[] = {
SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 11), SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 11),
SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 1)), /* PH_EINT11 */ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 11)), /* PH_EINT11 */
}; };
static const struct sunxi_pinctrl_desc sun8i_a83t_pinctrl_data = { static const struct sunxi_pinctrl_desc sun8i_a83t_pinctrl_data = {

View File

@ -47,49 +47,83 @@ struct snvs_rtc_data {
struct clk *clk; struct clk *clk;
}; };
/* Read 64 bit timer register, which could be in inconsistent state */
static u64 rtc_read_lpsrt(struct snvs_rtc_data *data)
{
u32 msb, lsb;
regmap_read(data->regmap, data->offset + SNVS_LPSRTCMR, &msb);
regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &lsb);
return (u64)msb << 32 | lsb;
}
/* Read the secure real time counter, taking care to deal with the cases of the
* counter updating while being read.
*/
static u32 rtc_read_lp_counter(struct snvs_rtc_data *data) static u32 rtc_read_lp_counter(struct snvs_rtc_data *data)
{ {
u64 read1, read2; u64 read1, read2;
u32 val; unsigned int timeout = 100;
/* As expected, the registers might update between the read of the LSB
* reg and the MSB reg. It's also possible that one register might be
* in partially modified state as well.
*/
read1 = rtc_read_lpsrt(data);
do { do {
regmap_read(data->regmap, data->offset + SNVS_LPSRTCMR, &val); read2 = read1;
read1 = val; read1 = rtc_read_lpsrt(data);
read1 <<= 32; } while (read1 != read2 && --timeout);
regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &val); if (!timeout)
read1 |= val; dev_err(&data->rtc->dev, "Timeout trying to get valid LPSRT Counter read\n");
regmap_read(data->regmap, data->offset + SNVS_LPSRTCMR, &val);
read2 = val;
read2 <<= 32;
regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &val);
read2 |= val;
} while (read1 != read2);
/* Convert 47-bit counter to 32-bit raw second count */ /* Convert 47-bit counter to 32-bit raw second count */
return (u32) (read1 >> CNTR_TO_SECS_SH); return (u32) (read1 >> CNTR_TO_SECS_SH);
} }
static void rtc_write_sync_lp(struct snvs_rtc_data *data) /* Just read the lsb from the counter, dealing with inconsistent state */
static int rtc_read_lp_counter_lsb(struct snvs_rtc_data *data, u32 *lsb)
{ {
u32 count1, count2, count3; u32 count1, count2;
int i; unsigned int timeout = 100;
/* Wait for 3 CKIL cycles */ regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count1);
for (i = 0; i < 3; i++) { do {
do { count2 = count1;
regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count1); regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count1);
regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count2); } while (count1 != count2 && --timeout);
} while (count1 != count2); if (!timeout) {
dev_err(&data->rtc->dev, "Timeout trying to get valid LPSRT Counter read\n");
/* Now wait until counter value changes */ return -ETIMEDOUT;
do {
do {
regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count2);
regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count3);
} while (count2 != count3);
} while (count3 == count1);
} }
*lsb = count1;
return 0;
}
static int rtc_write_sync_lp(struct snvs_rtc_data *data)
{
u32 count1, count2;
u32 elapsed;
unsigned int timeout = 1000;
int ret;
ret = rtc_read_lp_counter_lsb(data, &count1);
if (ret)
return ret;
/* Wait for 3 CKIL cycles, about 61.0-91.5 µs */
do {
ret = rtc_read_lp_counter_lsb(data, &count2);
if (ret)
return ret;
elapsed = count2 - count1; /* wrap around _is_ handled! */
} while (elapsed < 3 && --timeout);
if (!timeout) {
dev_err(&data->rtc->dev, "Timeout waiting for LPSRT Counter to change\n");
return -ETIMEDOUT;
}
return 0;
} }
static int snvs_rtc_enable(struct snvs_rtc_data *data, bool enable) static int snvs_rtc_enable(struct snvs_rtc_data *data, bool enable)
@ -173,9 +207,7 @@ static int snvs_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
(SNVS_LPCR_LPTA_EN | SNVS_LPCR_LPWUI_EN), (SNVS_LPCR_LPTA_EN | SNVS_LPCR_LPWUI_EN),
enable ? (SNVS_LPCR_LPTA_EN | SNVS_LPCR_LPWUI_EN) : 0); enable ? (SNVS_LPCR_LPTA_EN | SNVS_LPCR_LPWUI_EN) : 0);
rtc_write_sync_lp(data); return rtc_write_sync_lp(data);
return 0;
} }
static int snvs_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) static int snvs_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
@ -183,11 +215,14 @@ static int snvs_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
struct snvs_rtc_data *data = dev_get_drvdata(dev); struct snvs_rtc_data *data = dev_get_drvdata(dev);
struct rtc_time *alrm_tm = &alrm->time; struct rtc_time *alrm_tm = &alrm->time;
unsigned long time; unsigned long time;
int ret;
rtc_tm_to_time(alrm_tm, &time); rtc_tm_to_time(alrm_tm, &time);
regmap_update_bits(data->regmap, data->offset + SNVS_LPCR, SNVS_LPCR_LPTA_EN, 0); regmap_update_bits(data->regmap, data->offset + SNVS_LPCR, SNVS_LPCR_LPTA_EN, 0);
rtc_write_sync_lp(data); ret = rtc_write_sync_lp(data);
if (ret)
return ret;
regmap_write(data->regmap, data->offset + SNVS_LPTAR, time); regmap_write(data->regmap, data->offset + SNVS_LPTAR, time);
/* Clear alarm interrupt status bit */ /* Clear alarm interrupt status bit */

View File

@ -221,6 +221,7 @@ static int d7s_probe(struct platform_device *op)
dev_set_drvdata(&op->dev, p); dev_set_drvdata(&op->dev, p);
d7s_device = p; d7s_device = p;
err = 0; err = 0;
of_node_put(opts);
out: out:
return err; return err;

View File

@ -910,8 +910,10 @@ static void envctrl_init_i2c_child(struct device_node *dp,
for (len = 0; len < PCF8584_MAX_CHANNELS; ++len) { for (len = 0; len < PCF8584_MAX_CHANNELS; ++len) {
pchild->mon_type[len] = ENVCTRL_NOMON; pchild->mon_type[len] = ENVCTRL_NOMON;
} }
of_node_put(root_node);
return; return;
} }
of_node_put(root_node);
} }
/* Get the monitor channels. */ /* Get the monitor channels. */

View File

@ -2416,8 +2416,8 @@ int iscsi_eh_session_reset(struct scsi_cmnd *sc)
failed: failed:
ISCSI_DBG_EH(session, ISCSI_DBG_EH(session,
"failing session reset: Could not log back into " "failing session reset: Could not log back into "
"%s, %s [age %d]\n", session->targetname, "%s [age %d]\n", session->targetname,
conn->persistent_address, session->age); session->age);
spin_unlock_bh(&session->frwd_lock); spin_unlock_bh(&session->frwd_lock);
mutex_unlock(&session->eh_mutex); mutex_unlock(&session->eh_mutex);
return FAILED; return FAILED;

View File

@ -1202,8 +1202,6 @@ static void pvscsi_shutdown_intr(struct pvscsi_adapter *adapter)
static void pvscsi_release_resources(struct pvscsi_adapter *adapter) static void pvscsi_release_resources(struct pvscsi_adapter *adapter)
{ {
pvscsi_shutdown_intr(adapter);
if (adapter->workqueue) if (adapter->workqueue)
destroy_workqueue(adapter->workqueue); destroy_workqueue(adapter->workqueue);
@ -1535,6 +1533,7 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
out_reset_adapter: out_reset_adapter:
ll_adapter_reset(adapter); ll_adapter_reset(adapter);
out_release_resources: out_release_resources:
pvscsi_shutdown_intr(adapter);
pvscsi_release_resources(adapter); pvscsi_release_resources(adapter);
scsi_host_put(host); scsi_host_put(host);
out_disable_device: out_disable_device:
@ -1543,6 +1542,7 @@ out_disable_device:
return error; return error;
out_release_resources_and_disable: out_release_resources_and_disable:
pvscsi_shutdown_intr(adapter);
pvscsi_release_resources(adapter); pvscsi_release_resources(adapter);
goto out_disable_device; goto out_disable_device;
} }

View File

@ -111,6 +111,7 @@ void sunserial_console_termios(struct console *con, struct device_node *uart_dp)
mode = of_get_property(dp, mode_prop, NULL); mode = of_get_property(dp, mode_prop, NULL);
if (!mode) if (!mode)
mode = "9600,8,n,1,-"; mode = "9600,8,n,1,-";
of_node_put(dp);
} }
cflag = CREAD | HUPCL | CLOCAL; cflag = CREAD | HUPCL | CLOCAL;

View File

@ -561,13 +561,21 @@ static void vhost_vsock_reset_orphans(struct sock *sk)
* executing. * executing.
*/ */
if (!vhost_vsock_get(vsk->remote_addr.svm_cid)) { /* If the peer is still valid, no need to reset connection */
sock_set_flag(sk, SOCK_DONE); if (vhost_vsock_get(vsk->remote_addr.svm_cid))
vsk->peer_shutdown = SHUTDOWN_MASK; return;
sk->sk_state = SS_UNCONNECTED;
sk->sk_err = ECONNRESET; /* If the close timeout is pending, let it expire. This avoids races
sk->sk_error_report(sk); * with the timeout callback.
} */
if (vsk->close_work_scheduled)
return;
sock_set_flag(sk, SOCK_DONE);
vsk->peer_shutdown = SHUTDOWN_MASK;
sk->sk_state = SS_UNCONNECTED;
sk->sk_err = ECONNRESET;
sk->sk_error_report(sk);
} }
static int vhost_vsock_dev_release(struct inode *inode, struct file *file) static int vhost_vsock_dev_release(struct inode *inode, struct file *file)

View File

@ -43,6 +43,7 @@
#include <asm/kmap_types.h> #include <asm/kmap_types.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/nospec.h>
#include "internal.h" #include "internal.h"
@ -1084,6 +1085,7 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)
if (!table || id >= table->nr) if (!table || id >= table->nr)
goto out; goto out;
id = array_index_nospec(id, table->nr);
ctx = rcu_dereference(table->table[id]); ctx = rcu_dereference(table->table[id]);
if (ctx && ctx->user_id == ctx_id) { if (ctx && ctx->user_id == ctx_id) {
if (percpu_ref_tryget_live(&ctx->users)) if (percpu_ref_tryget_live(&ctx->users))

View File

@ -121,7 +121,7 @@ config CIFS_XATTR
config CIFS_POSIX config CIFS_POSIX
bool "CIFS POSIX Extensions" bool "CIFS POSIX Extensions"
depends on CIFS_XATTR depends on CIFS && CIFS_ALLOW_INSECURE_LEGACY && CIFS_XATTR
help help
Enabling this option will cause the cifs client to attempt to Enabling this option will cause the cifs client to attempt to
negotiate a newer dialect with servers, such as Samba 3.0.5 negotiate a newer dialect with servers, such as Samba 3.0.5

View File

@ -1470,7 +1470,7 @@ static int fuse_dir_open(struct inode *inode, struct file *file)
static int fuse_dir_release(struct inode *inode, struct file *file) static int fuse_dir_release(struct inode *inode, struct file *file)
{ {
fuse_release_common(file, FUSE_RELEASEDIR); fuse_release_common(file, true);
return 0; return 0;
} }

View File

@ -86,12 +86,12 @@ static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req)
iput(req->misc.release.inode); iput(req->misc.release.inode);
} }
static void fuse_file_put(struct fuse_file *ff, bool sync) static void fuse_file_put(struct fuse_file *ff, bool sync, bool isdir)
{ {
if (refcount_dec_and_test(&ff->count)) { if (refcount_dec_and_test(&ff->count)) {
struct fuse_req *req = ff->reserved_req; struct fuse_req *req = ff->reserved_req;
if (ff->fc->no_open) { if (ff->fc->no_open && !isdir) {
/* /*
* Drop the release request when client does not * Drop the release request when client does not
* implement 'open' * implement 'open'
@ -244,10 +244,11 @@ static void fuse_prepare_release(struct fuse_file *ff, int flags, int opcode)
req->in.args[0].value = inarg; req->in.args[0].value = inarg;
} }
void fuse_release_common(struct file *file, int opcode) void fuse_release_common(struct file *file, bool isdir)
{ {
struct fuse_file *ff = file->private_data; struct fuse_file *ff = file->private_data;
struct fuse_req *req = ff->reserved_req; struct fuse_req *req = ff->reserved_req;
int opcode = isdir ? FUSE_RELEASEDIR : FUSE_RELEASE;
fuse_prepare_release(ff, file->f_flags, opcode); fuse_prepare_release(ff, file->f_flags, opcode);
@ -269,7 +270,7 @@ void fuse_release_common(struct file *file, int opcode)
* synchronous RELEASE is allowed (and desirable) in this case * synchronous RELEASE is allowed (and desirable) in this case
* because the server can be trusted not to screw up. * because the server can be trusted not to screw up.
*/ */
fuse_file_put(ff, ff->fc->destroy_req != NULL); fuse_file_put(ff, ff->fc->destroy_req != NULL, isdir);
} }
static int fuse_open(struct inode *inode, struct file *file) static int fuse_open(struct inode *inode, struct file *file)
@ -285,7 +286,7 @@ static int fuse_release(struct inode *inode, struct file *file)
if (fc->writeback_cache) if (fc->writeback_cache)
write_inode_now(inode, 1); write_inode_now(inode, 1);
fuse_release_common(file, FUSE_RELEASE); fuse_release_common(file, false);
/* return value is ignored by VFS */ /* return value is ignored by VFS */
return 0; return 0;
@ -299,7 +300,7 @@ void fuse_sync_release(struct fuse_file *ff, int flags)
* iput(NULL) is a no-op and since the refcount is 1 and everything's * iput(NULL) is a no-op and since the refcount is 1 and everything's
* synchronous, we are fine with not doing igrab() here" * synchronous, we are fine with not doing igrab() here"
*/ */
fuse_file_put(ff, true); fuse_file_put(ff, true, false);
} }
EXPORT_SYMBOL_GPL(fuse_sync_release); EXPORT_SYMBOL_GPL(fuse_sync_release);
@ -804,7 +805,7 @@ static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req)
put_page(page); put_page(page);
} }
if (req->ff) if (req->ff)
fuse_file_put(req->ff, false); fuse_file_put(req->ff, false, false);
} }
static void fuse_send_readpages(struct fuse_req *req, struct file *file) static void fuse_send_readpages(struct fuse_req *req, struct file *file)
@ -1458,7 +1459,7 @@ static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req)
__free_page(req->pages[i]); __free_page(req->pages[i]);
if (req->ff) if (req->ff)
fuse_file_put(req->ff, false); fuse_file_put(req->ff, false, false);
} }
static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req) static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
@ -1615,7 +1616,7 @@ int fuse_write_inode(struct inode *inode, struct writeback_control *wbc)
ff = __fuse_write_file_get(fc, fi); ff = __fuse_write_file_get(fc, fi);
err = fuse_flush_times(inode, ff); err = fuse_flush_times(inode, ff);
if (ff) if (ff)
fuse_file_put(ff, 0); fuse_file_put(ff, false, false);
return err; return err;
} }
@ -1929,7 +1930,7 @@ static int fuse_writepages(struct address_space *mapping,
err = 0; err = 0;
} }
if (data.ff) if (data.ff)
fuse_file_put(data.ff, false); fuse_file_put(data.ff, false, false);
kfree(data.orig_pages); kfree(data.orig_pages);
out: out:

View File

@ -742,7 +742,7 @@ void fuse_sync_release(struct fuse_file *ff, int flags);
/** /**
* Send RELEASE or RELEASEDIR request * Send RELEASE or RELEASEDIR request
*/ */
void fuse_release_common(struct file *file, int opcode); void fuse_release_common(struct file *file, bool isdir);
/** /**
* Send FSYNC or FSYNCDIR request * Send FSYNC or FSYNCDIR request

View File

@ -98,8 +98,11 @@ struct nfs_direct_req {
struct pnfs_ds_commit_info ds_cinfo; /* Storage for cinfo */ struct pnfs_ds_commit_info ds_cinfo; /* Storage for cinfo */
struct work_struct work; struct work_struct work;
int flags; int flags;
/* for write */
#define NFS_ODIRECT_DO_COMMIT (1) /* an unstable reply was received */ #define NFS_ODIRECT_DO_COMMIT (1) /* an unstable reply was received */
#define NFS_ODIRECT_RESCHED_WRITES (2) /* write verification failed */ #define NFS_ODIRECT_RESCHED_WRITES (2) /* write verification failed */
/* for read */
#define NFS_ODIRECT_SHOULD_DIRTY (3) /* dirty user-space page after read */
struct nfs_writeverf verf; /* unstable write verifier */ struct nfs_writeverf verf; /* unstable write verifier */
}; };
@ -412,7 +415,8 @@ static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
struct nfs_page *req = nfs_list_entry(hdr->pages.next); struct nfs_page *req = nfs_list_entry(hdr->pages.next);
struct page *page = req->wb_page; struct page *page = req->wb_page;
if (!PageCompound(page) && bytes < hdr->good_bytes) if (!PageCompound(page) && bytes < hdr->good_bytes &&
(dreq->flags == NFS_ODIRECT_SHOULD_DIRTY))
set_page_dirty(page); set_page_dirty(page);
bytes += req->wb_bytes; bytes += req->wb_bytes;
nfs_list_remove_request(req); nfs_list_remove_request(req);
@ -587,6 +591,9 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter)
if (!is_sync_kiocb(iocb)) if (!is_sync_kiocb(iocb))
dreq->iocb = iocb; dreq->iocb = iocb;
if (iter_is_iovec(iter))
dreq->flags = NFS_ODIRECT_SHOULD_DIRTY;
nfs_start_io_direct(inode); nfs_start_io_direct(inode);
NFS_I(inode)->read_io += count; NFS_I(inode)->read_io += count;

View File

@ -1576,7 +1576,6 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
cond_resched(); cond_resched();
BUG_ON(!vma_can_userfault(vma)); BUG_ON(!vma_can_userfault(vma));
WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
/* /*
* Nothing to do: this vma is already registered into this * Nothing to do: this vma is already registered into this
@ -1585,6 +1584,8 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
if (!vma->vm_userfaultfd_ctx.ctx) if (!vma->vm_userfaultfd_ctx.ctx)
goto skip; goto skip;
WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
if (vma->vm_start > start) if (vma->vm_start > start)
start = vma->vm_start; start = vma->vm_start;
vma_end = min(end, vma->vm_end); vma_end = min(end, vma->vm_end);

View File

@ -29,13 +29,41 @@
#endif #endif
typedef struct qspinlock { typedef struct qspinlock {
atomic_t val; union {
atomic_t val;
/*
* By using the whole 2nd least significant byte for the
* pending bit, we can allow better optimization of the lock
* acquisition for the pending bit holder.
*/
#ifdef __LITTLE_ENDIAN
struct {
u8 locked;
u8 pending;
};
struct {
u16 locked_pending;
u16 tail;
};
#else
struct {
u16 tail;
u16 locked_pending;
};
struct {
u8 reserved[2];
u8 pending;
u8 locked;
};
#endif
};
} arch_spinlock_t; } arch_spinlock_t;
/* /*
* Initializier * Initializier
*/ */
#define __ARCH_SPIN_LOCK_UNLOCKED { ATOMIC_INIT(0) } #define __ARCH_SPIN_LOCK_UNLOCKED { { .val = ATOMIC_INIT(0) } }
/* /*
* Bitfields in the atomic value: * Bitfields in the atomic value:

View File

@ -76,6 +76,18 @@
#define MAX_NODES 4 #define MAX_NODES 4
#endif #endif
/*
* The pending bit spinning loop count.
* This heuristic is used to limit the number of lockword accesses
* made by atomic_cond_read_relaxed when waiting for the lock to
* transition out of the "== _Q_PENDING_VAL" state. We don't spin
* indefinitely because there's no guarantee that we'll make forward
* progress.
*/
#ifndef _Q_PENDING_LOOPS
#define _Q_PENDING_LOOPS 1
#endif
/* /*
* Per-CPU queue node structures; we can never have more than 4 nested * Per-CPU queue node structures; we can never have more than 4 nested
* contexts: task, softirq, hardirq, nmi. * contexts: task, softirq, hardirq, nmi.
@ -114,41 +126,18 @@ static inline __pure struct mcs_spinlock *decode_tail(u32 tail)
#define _Q_LOCKED_PENDING_MASK (_Q_LOCKED_MASK | _Q_PENDING_MASK) #define _Q_LOCKED_PENDING_MASK (_Q_LOCKED_MASK | _Q_PENDING_MASK)
/*
* By using the whole 2nd least significant byte for the pending bit, we
* can allow better optimization of the lock acquisition for the pending
* bit holder.
*
* This internal structure is also used by the set_locked function which
* is not restricted to _Q_PENDING_BITS == 8.
*/
struct __qspinlock {
union {
atomic_t val;
#ifdef __LITTLE_ENDIAN
struct {
u8 locked;
u8 pending;
};
struct {
u16 locked_pending;
u16 tail;
};
#else
struct {
u16 tail;
u16 locked_pending;
};
struct {
u8 reserved[2];
u8 pending;
u8 locked;
};
#endif
};
};
#if _Q_PENDING_BITS == 8 #if _Q_PENDING_BITS == 8
/**
* clear_pending - clear the pending bit.
* @lock: Pointer to queued spinlock structure
*
* *,1,* -> *,0,*
*/
static __always_inline void clear_pending(struct qspinlock *lock)
{
WRITE_ONCE(lock->pending, 0);
}
/** /**
* clear_pending_set_locked - take ownership and clear the pending bit. * clear_pending_set_locked - take ownership and clear the pending bit.
* @lock: Pointer to queued spinlock structure * @lock: Pointer to queued spinlock structure
@ -159,9 +148,7 @@ struct __qspinlock {
*/ */
static __always_inline void clear_pending_set_locked(struct qspinlock *lock) static __always_inline void clear_pending_set_locked(struct qspinlock *lock)
{ {
struct __qspinlock *l = (void *)lock; WRITE_ONCE(lock->locked_pending, _Q_LOCKED_VAL);
WRITE_ONCE(l->locked_pending, _Q_LOCKED_VAL);
} }
/* /*
@ -170,24 +157,33 @@ static __always_inline void clear_pending_set_locked(struct qspinlock *lock)
* @tail : The new queue tail code word * @tail : The new queue tail code word
* Return: The previous queue tail code word * Return: The previous queue tail code word
* *
* xchg(lock, tail) * xchg(lock, tail), which heads an address dependency
* *
* p,*,* -> n,*,* ; prev = xchg(lock, node) * p,*,* -> n,*,* ; prev = xchg(lock, node)
*/ */
static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail) static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
{ {
struct __qspinlock *l = (void *)lock;
/* /*
* Use release semantics to make sure that the MCS node is properly * Use release semantics to make sure that the MCS node is properly
* initialized before changing the tail code. * initialized before changing the tail code.
*/ */
return (u32)xchg_release(&l->tail, return (u32)xchg_release(&lock->tail,
tail >> _Q_TAIL_OFFSET) << _Q_TAIL_OFFSET; tail >> _Q_TAIL_OFFSET) << _Q_TAIL_OFFSET;
} }
#else /* _Q_PENDING_BITS == 8 */ #else /* _Q_PENDING_BITS == 8 */
/**
* clear_pending - clear the pending bit.
* @lock: Pointer to queued spinlock structure
*
* *,1,* -> *,0,*
*/
static __always_inline void clear_pending(struct qspinlock *lock)
{
atomic_andnot(_Q_PENDING_VAL, &lock->val);
}
/** /**
* clear_pending_set_locked - take ownership and clear the pending bit. * clear_pending_set_locked - take ownership and clear the pending bit.
* @lock: Pointer to queued spinlock structure * @lock: Pointer to queued spinlock structure
@ -229,6 +225,20 @@ static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
} }
#endif /* _Q_PENDING_BITS == 8 */ #endif /* _Q_PENDING_BITS == 8 */
/**
* queued_fetch_set_pending_acquire - fetch the whole lock value and set pending
* @lock : Pointer to queued spinlock structure
* Return: The previous lock value
*
* *,*,* -> *,1,*
*/
#ifndef queued_fetch_set_pending_acquire
static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock)
{
return atomic_fetch_or_acquire(_Q_PENDING_VAL, &lock->val);
}
#endif
/** /**
* set_locked - Set the lock bit and own the lock * set_locked - Set the lock bit and own the lock
* @lock: Pointer to queued spinlock structure * @lock: Pointer to queued spinlock structure
@ -237,9 +247,7 @@ static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
*/ */
static __always_inline void set_locked(struct qspinlock *lock) static __always_inline void set_locked(struct qspinlock *lock)
{ {
struct __qspinlock *l = (void *)lock; WRITE_ONCE(lock->locked, _Q_LOCKED_VAL);
WRITE_ONCE(l->locked, _Q_LOCKED_VAL);
} }
@ -294,7 +302,7 @@ static __always_inline u32 __pv_wait_head_or_lock(struct qspinlock *lock,
void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
{ {
struct mcs_spinlock *prev, *next, *node; struct mcs_spinlock *prev, *next, *node;
u32 new, old, tail; u32 old, tail;
int idx; int idx;
BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS)); BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
@ -306,65 +314,58 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
return; return;
/* /*
* wait for in-progress pending->locked hand-overs * Wait for in-progress pending->locked hand-overs with a bounded
* number of spins so that we guarantee forward progress.
* *
* 0,1,0 -> 0,0,1 * 0,1,0 -> 0,0,1
*/ */
if (val == _Q_PENDING_VAL) { if (val == _Q_PENDING_VAL) {
while ((val = atomic_read(&lock->val)) == _Q_PENDING_VAL) int cnt = _Q_PENDING_LOOPS;
cpu_relax(); val = smp_cond_load_acquire(&lock->val.counter,
(VAL != _Q_PENDING_VAL) || !cnt--);
} }
/*
* If we observe any contention; queue.
*/
if (val & ~_Q_LOCKED_MASK)
goto queue;
/* /*
* trylock || pending * trylock || pending
* *
* 0,0,0 -> 0,0,1 ; trylock * 0,0,0 -> 0,0,1 ; trylock
* 0,0,1 -> 0,1,1 ; pending * 0,0,1 -> 0,1,1 ; pending
*/ */
for (;;) { val = queued_fetch_set_pending_acquire(lock);
/*
* If we observe any contention; queue.
*/
if (val & ~_Q_LOCKED_MASK)
goto queue;
new = _Q_LOCKED_VAL; /*
if (val == new) * If we observe any contention; undo and queue.
new |= _Q_PENDING_VAL; */
if (unlikely(val & ~_Q_LOCKED_MASK)) {
/* if (!(val & _Q_PENDING_MASK))
* Acquire semantic is required here as the function may clear_pending(lock);
* return immediately if the lock was free. goto queue;
*/
old = atomic_cmpxchg_acquire(&lock->val, val, new);
if (old == val)
break;
val = old;
} }
/* /*
* we won the trylock * We're pending, wait for the owner to go away.
*/
if (new == _Q_LOCKED_VAL)
return;
/*
* we're pending, wait for the owner to go away.
* *
* *,1,1 -> *,1,0 * 0,1,1 -> 0,1,0
* *
* this wait loop must be a load-acquire such that we match the * this wait loop must be a load-acquire such that we match the
* store-release that clears the locked bit and create lock * store-release that clears the locked bit and create lock
* sequentiality; this is because not all clear_pending_set_locked() * sequentiality; this is because not all
* implementations imply full barriers. * clear_pending_set_locked() implementations imply full
* barriers.
*/ */
smp_cond_load_acquire(&lock->val.counter, !(VAL & _Q_LOCKED_MASK)); if (val & _Q_LOCKED_MASK)
smp_cond_load_acquire(&lock->val.counter, !(VAL & _Q_LOCKED_MASK));
/* /*
* take ownership and clear the pending bit. * take ownership and clear the pending bit.
* *
* *,1,0 -> *,0,1 * 0,1,0 -> 0,0,1
*/ */
clear_pending_set_locked(lock); clear_pending_set_locked(lock);
return; return;
@ -416,16 +417,15 @@ queue:
*/ */
if (old & _Q_TAIL_MASK) { if (old & _Q_TAIL_MASK) {
prev = decode_tail(old); prev = decode_tail(old);
/*
* The above xchg_tail() is also a load of @lock which generates,
* through decode_tail(), a pointer.
*
* The address dependency matches the RELEASE of xchg_tail()
* such that the access to @prev must happen after.
*/
smp_read_barrier_depends();
WRITE_ONCE(prev->next, node); /*
* We must ensure that the stores to @node are observed before
* the write to prev->next. The address dependency from
* xchg_tail is not sufficient to ensure this because the read
* component of xchg_tail is unordered with respect to the
* initialisation of @node.
*/
smp_store_release(&prev->next, node);
pv_wait_node(node, prev); pv_wait_node(node, prev);
arch_mcs_spin_lock_contended(&node->locked); arch_mcs_spin_lock_contended(&node->locked);
@ -472,30 +472,27 @@ locked:
* claim the lock: * claim the lock:
* *
* n,0,0 -> 0,0,1 : lock, uncontended * n,0,0 -> 0,0,1 : lock, uncontended
* *,0,0 -> *,0,1 : lock, contended * *,*,0 -> *,*,1 : lock, contended
* *
* If the queue head is the only one in the queue (lock value == tail), * If the queue head is the only one in the queue (lock value == tail)
* clear the tail code and grab the lock. Otherwise, we only need * and nobody is pending, clear the tail code and grab the lock.
* to grab the lock. * Otherwise, we only need to grab the lock.
*/ */
for (;;) {
/* In the PV case we might already have _Q_LOCKED_VAL set */ /* In the PV case we might already have _Q_LOCKED_VAL set */
if ((val & _Q_TAIL_MASK) != tail) { if ((val & _Q_TAIL_MASK) == tail) {
set_locked(lock);
break;
}
/* /*
* The smp_cond_load_acquire() call above has provided the * The smp_cond_load_acquire() call above has provided the
* necessary acquire semantics required for locking. At most * necessary acquire semantics required for locking.
* two iterations of this loop may be ran.
*/ */
old = atomic_cmpxchg_relaxed(&lock->val, val, _Q_LOCKED_VAL); old = atomic_cmpxchg_relaxed(&lock->val, val, _Q_LOCKED_VAL);
if (old == val) if (old == val)
goto release; /* No contention */ goto release; /* No contention */
val = old;
} }
/* Either somebody is queued behind us or _Q_PENDING_VAL is set */
set_locked(lock);
/* /*
* contended path; wait for next if not observed yet, release. * contended path; wait for next if not observed yet, release.
*/ */

View File

@ -70,10 +70,8 @@ struct pv_node {
#define queued_spin_trylock(l) pv_queued_spin_steal_lock(l) #define queued_spin_trylock(l) pv_queued_spin_steal_lock(l)
static inline bool pv_queued_spin_steal_lock(struct qspinlock *lock) static inline bool pv_queued_spin_steal_lock(struct qspinlock *lock)
{ {
struct __qspinlock *l = (void *)lock;
if (!(atomic_read(&lock->val) & _Q_LOCKED_PENDING_MASK) && if (!(atomic_read(&lock->val) & _Q_LOCKED_PENDING_MASK) &&
(cmpxchg_acquire(&l->locked, 0, _Q_LOCKED_VAL) == 0)) { (cmpxchg_acquire(&lock->locked, 0, _Q_LOCKED_VAL) == 0)) {
qstat_inc(qstat_pv_lock_stealing, true); qstat_inc(qstat_pv_lock_stealing, true);
return true; return true;
} }
@ -88,16 +86,7 @@ static inline bool pv_queued_spin_steal_lock(struct qspinlock *lock)
#if _Q_PENDING_BITS == 8 #if _Q_PENDING_BITS == 8
static __always_inline void set_pending(struct qspinlock *lock) static __always_inline void set_pending(struct qspinlock *lock)
{ {
struct __qspinlock *l = (void *)lock; WRITE_ONCE(lock->pending, 1);
WRITE_ONCE(l->pending, 1);
}
static __always_inline void clear_pending(struct qspinlock *lock)
{
struct __qspinlock *l = (void *)lock;
WRITE_ONCE(l->pending, 0);
} }
/* /*
@ -107,10 +96,8 @@ static __always_inline void clear_pending(struct qspinlock *lock)
*/ */
static __always_inline int trylock_clear_pending(struct qspinlock *lock) static __always_inline int trylock_clear_pending(struct qspinlock *lock)
{ {
struct __qspinlock *l = (void *)lock; return !READ_ONCE(lock->locked) &&
(cmpxchg_acquire(&lock->locked_pending, _Q_PENDING_VAL,
return !READ_ONCE(l->locked) &&
(cmpxchg_acquire(&l->locked_pending, _Q_PENDING_VAL,
_Q_LOCKED_VAL) == _Q_PENDING_VAL); _Q_LOCKED_VAL) == _Q_PENDING_VAL);
} }
#else /* _Q_PENDING_BITS == 8 */ #else /* _Q_PENDING_BITS == 8 */
@ -119,11 +106,6 @@ static __always_inline void set_pending(struct qspinlock *lock)
atomic_or(_Q_PENDING_VAL, &lock->val); atomic_or(_Q_PENDING_VAL, &lock->val);
} }
static __always_inline void clear_pending(struct qspinlock *lock)
{
atomic_andnot(_Q_PENDING_VAL, &lock->val);
}
static __always_inline int trylock_clear_pending(struct qspinlock *lock) static __always_inline int trylock_clear_pending(struct qspinlock *lock)
{ {
int val = atomic_read(&lock->val); int val = atomic_read(&lock->val);
@ -355,7 +337,6 @@ static void pv_wait_node(struct mcs_spinlock *node, struct mcs_spinlock *prev)
static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node) static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node)
{ {
struct pv_node *pn = (struct pv_node *)node; struct pv_node *pn = (struct pv_node *)node;
struct __qspinlock *l = (void *)lock;
/* /*
* If the vCPU is indeed halted, advance its state to match that of * If the vCPU is indeed halted, advance its state to match that of
@ -384,7 +365,7 @@ static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node)
* the hash table later on at unlock time, no atomic instruction is * the hash table later on at unlock time, no atomic instruction is
* needed. * needed.
*/ */
WRITE_ONCE(l->locked, _Q_SLOW_VAL); WRITE_ONCE(lock->locked, _Q_SLOW_VAL);
(void)pv_hash(lock, pn); (void)pv_hash(lock, pn);
} }
@ -399,7 +380,6 @@ static u32
pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node) pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node)
{ {
struct pv_node *pn = (struct pv_node *)node; struct pv_node *pn = (struct pv_node *)node;
struct __qspinlock *l = (void *)lock;
struct qspinlock **lp = NULL; struct qspinlock **lp = NULL;
int waitcnt = 0; int waitcnt = 0;
int loop; int loop;
@ -450,13 +430,13 @@ pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node)
* *
* Matches the smp_rmb() in __pv_queued_spin_unlock(). * Matches the smp_rmb() in __pv_queued_spin_unlock().
*/ */
if (xchg(&l->locked, _Q_SLOW_VAL) == 0) { if (xchg(&lock->locked, _Q_SLOW_VAL) == 0) {
/* /*
* The lock was free and now we own the lock. * The lock was free and now we own the lock.
* Change the lock value back to _Q_LOCKED_VAL * Change the lock value back to _Q_LOCKED_VAL
* and unhash the table. * and unhash the table.
*/ */
WRITE_ONCE(l->locked, _Q_LOCKED_VAL); WRITE_ONCE(lock->locked, _Q_LOCKED_VAL);
WRITE_ONCE(*lp, NULL); WRITE_ONCE(*lp, NULL);
goto gotlock; goto gotlock;
} }
@ -464,7 +444,7 @@ pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node)
WRITE_ONCE(pn->state, vcpu_hashed); WRITE_ONCE(pn->state, vcpu_hashed);
qstat_inc(qstat_pv_wait_head, true); qstat_inc(qstat_pv_wait_head, true);
qstat_inc(qstat_pv_wait_again, waitcnt); qstat_inc(qstat_pv_wait_again, waitcnt);
pv_wait(&l->locked, _Q_SLOW_VAL); pv_wait(&lock->locked, _Q_SLOW_VAL);
/* /*
* Because of lock stealing, the queue head vCPU may not be * Because of lock stealing, the queue head vCPU may not be
@ -489,7 +469,6 @@ gotlock:
__visible void __visible void
__pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked) __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked)
{ {
struct __qspinlock *l = (void *)lock;
struct pv_node *node; struct pv_node *node;
if (unlikely(locked != _Q_SLOW_VAL)) { if (unlikely(locked != _Q_SLOW_VAL)) {
@ -518,7 +497,7 @@ __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked)
* Now that we have a reference to the (likely) blocked pv_node, * Now that we have a reference to the (likely) blocked pv_node,
* release the lock. * release the lock.
*/ */
smp_store_release(&l->locked, 0); smp_store_release(&lock->locked, 0);
/* /*
* At this point the memory pointed at by lock can be freed/reused, * At this point the memory pointed at by lock can be freed/reused,
@ -544,7 +523,6 @@ __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked)
#ifndef __pv_queued_spin_unlock #ifndef __pv_queued_spin_unlock
__visible void __pv_queued_spin_unlock(struct qspinlock *lock) __visible void __pv_queued_spin_unlock(struct qspinlock *lock)
{ {
struct __qspinlock *l = (void *)lock;
u8 locked; u8 locked;
/* /*
@ -552,7 +530,7 @@ __visible void __pv_queued_spin_unlock(struct qspinlock *lock)
* unhash. Otherwise it would be possible to have multiple @lock * unhash. Otherwise it would be possible to have multiple @lock
* entries, which would be BAD. * entries, which would be BAD.
*/ */
locked = cmpxchg_release(&l->locked, _Q_LOCKED_VAL, 0); locked = cmpxchg_release(&lock->locked, _Q_LOCKED_VAL, 0);
if (likely(locked == _Q_LOCKED_VAL)) if (likely(locked == _Q_LOCKED_VAL))
return; return;

View File

@ -389,7 +389,7 @@ static int __init init_timer_list_procfs(void)
{ {
struct proc_dir_entry *pe; struct proc_dir_entry *pe;
pe = proc_create("timer_list", 0444, NULL, &timer_list_fops); pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
if (!pe) if (!pe)
return -ENOMEM; return -ENOMEM;
return 0; return 0;

View File

@ -5534,6 +5534,7 @@ void ftrace_destroy_filter_files(struct ftrace_ops *ops)
if (ops->flags & FTRACE_OPS_FL_ENABLED) if (ops->flags & FTRACE_OPS_FL_ENABLED)
ftrace_shutdown(ops, 0); ftrace_shutdown(ops, 0);
ops->flags |= FTRACE_OPS_FL_DELETED; ops->flags |= FTRACE_OPS_FL_DELETED;
ftrace_free_filter(ops);
mutex_unlock(&ftrace_lock); mutex_unlock(&ftrace_lock);
} }

View File

@ -744,8 +744,10 @@ int set_trigger_filter(char *filter_str,
/* The filter is for the 'trigger' event, not the triggered event */ /* The filter is for the 'trigger' event, not the triggered event */
ret = create_event_filter(file->event_call, filter_str, false, &filter); ret = create_event_filter(file->event_call, filter_str, false, &filter);
if (ret) /*
goto out; * If create_event_filter() fails, filter still needs to be freed.
* Which the calling code will do with data->filter.
*/
assign: assign:
tmp = rcu_access_pointer(data->filter); tmp = rcu_access_pointer(data->filter);

View File

@ -1861,7 +1861,8 @@ static bool ieee80211_sta_wmm_params(struct ieee80211_local *local,
params[ac].acm = acm; params[ac].acm = acm;
params[ac].uapsd = uapsd; params[ac].uapsd = uapsd;
if (params[ac].cw_min > params[ac].cw_max) { if (params[ac].cw_min == 0 ||
params[ac].cw_min > params[ac].cw_max) {
sdata_info(sdata, sdata_info(sdata,
"AP has invalid WMM params (CWmin/max=%d/%d for ACI %d), using defaults\n", "AP has invalid WMM params (CWmin/max=%d/%d for ACI %d), using defaults\n",
params[ac].cw_min, params[ac].cw_max, aci); params[ac].cw_min, params[ac].cw_max, aci);

View File

@ -168,7 +168,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
struct hash_ipportnet4_elem e = { .cidr = HOST_MASK - 1 }; struct hash_ipportnet4_elem e = { .cidr = HOST_MASK - 1 };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set); struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
u32 ip = 0, ip_to = 0, p = 0, port, port_to; u32 ip = 0, ip_to = 0, p = 0, port, port_to;
u32 ip2_from = 0, ip2_to = 0, ip2_last, ip2; u32 ip2_from = 0, ip2_to = 0, ip2;
bool with_ports = false; bool with_ports = false;
u8 cidr; u8 cidr;
int ret; int ret;
@ -269,22 +269,21 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
ip_set_mask_from_to(ip2_from, ip2_to, e.cidr + 1); ip_set_mask_from_to(ip2_from, ip2_to, e.cidr + 1);
} }
if (retried) if (retried) {
ip = ntohl(h->next.ip); ip = ntohl(h->next.ip);
p = ntohs(h->next.port);
ip2 = ntohl(h->next.ip2);
} else {
p = port;
ip2 = ip2_from;
}
for (; ip <= ip_to; ip++) { for (; ip <= ip_to; ip++) {
e.ip = htonl(ip); e.ip = htonl(ip);
p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port)
: port;
for (; p <= port_to; p++) { for (; p <= port_to; p++) {
e.port = htons(p); e.port = htons(p);
ip2 = retried && do {
ip == ntohl(h->next.ip) &&
p == ntohs(h->next.port)
? ntohl(h->next.ip2) : ip2_from;
while (ip2 <= ip2_to) {
e.ip2 = htonl(ip2); e.ip2 = htonl(ip2);
ip2_last = ip_set_range_to_cidr(ip2, ip2_to, ip2 = ip_set_range_to_cidr(ip2, ip2_to, &cidr);
&cidr);
e.cidr = cidr - 1; e.cidr = cidr - 1;
ret = adtfn(set, &e, &ext, &ext, flags); ret = adtfn(set, &e, &ext, &ext, flags);
@ -292,9 +291,10 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
return ret; return ret;
ret = 0; ret = 0;
ip2 = ip2_last + 1; } while (ip2++ < ip2_to);
} ip2 = ip2_from;
} }
p = port;
} }
return ret; return ret;
} }

View File

@ -143,7 +143,7 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
ipset_adtfn adtfn = set->variant->adt[adt]; ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_net4_elem e = { .cidr = HOST_MASK }; struct hash_net4_elem e = { .cidr = HOST_MASK };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set); struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
u32 ip = 0, ip_to = 0, last; u32 ip = 0, ip_to = 0;
int ret; int ret;
if (tb[IPSET_ATTR_LINENO]) if (tb[IPSET_ATTR_LINENO])
@ -193,16 +193,15 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
} }
if (retried) if (retried)
ip = ntohl(h->next.ip); ip = ntohl(h->next.ip);
while (ip <= ip_to) { do {
e.ip = htonl(ip); e.ip = htonl(ip);
last = ip_set_range_to_cidr(ip, ip_to, &e.cidr); ip = ip_set_range_to_cidr(ip, ip_to, &e.cidr);
ret = adtfn(set, &e, &ext, &ext, flags); ret = adtfn(set, &e, &ext, &ext, flags);
if (ret && !ip_set_eexist(ret, flags)) if (ret && !ip_set_eexist(ret, flags))
return ret; return ret;
ret = 0; ret = 0;
ip = last + 1; } while (ip++ < ip_to);
}
return ret; return ret;
} }

View File

@ -200,7 +200,7 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
ipset_adtfn adtfn = set->variant->adt[adt]; ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netiface4_elem e = { .cidr = HOST_MASK, .elem = 1 }; struct hash_netiface4_elem e = { .cidr = HOST_MASK, .elem = 1 };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set); struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
u32 ip = 0, ip_to = 0, last; u32 ip = 0, ip_to = 0;
int ret; int ret;
if (tb[IPSET_ATTR_LINENO]) if (tb[IPSET_ATTR_LINENO])
@ -255,17 +255,16 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
if (retried) if (retried)
ip = ntohl(h->next.ip); ip = ntohl(h->next.ip);
while (ip <= ip_to) { do {
e.ip = htonl(ip); e.ip = htonl(ip);
last = ip_set_range_to_cidr(ip, ip_to, &e.cidr); ip = ip_set_range_to_cidr(ip, ip_to, &e.cidr);
ret = adtfn(set, &e, &ext, &ext, flags); ret = adtfn(set, &e, &ext, &ext, flags);
if (ret && !ip_set_eexist(ret, flags)) if (ret && !ip_set_eexist(ret, flags))
return ret; return ret;
ret = 0; ret = 0;
ip = last + 1; } while (ip++ < ip_to);
}
return ret; return ret;
} }

View File

@ -169,8 +169,8 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
ipset_adtfn adtfn = set->variant->adt[adt]; ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netnet4_elem e = { }; struct hash_netnet4_elem e = { };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set); struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
u32 ip = 0, ip_to = 0, last; u32 ip = 0, ip_to = 0;
u32 ip2 = 0, ip2_from = 0, ip2_to = 0, last2; u32 ip2 = 0, ip2_from = 0, ip2_to = 0;
int ret; int ret;
if (tb[IPSET_ATTR_LINENO]) if (tb[IPSET_ATTR_LINENO])
@ -247,27 +247,27 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
ip_set_mask_from_to(ip2_from, ip2_to, e.cidr[1]); ip_set_mask_from_to(ip2_from, ip2_to, e.cidr[1]);
} }
if (retried) if (retried) {
ip = ntohl(h->next.ip[0]); ip = ntohl(h->next.ip[0]);
ip2 = ntohl(h->next.ip[1]);
} else {
ip2 = ip2_from;
}
while (ip <= ip_to) { do {
e.ip[0] = htonl(ip); e.ip[0] = htonl(ip);
last = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]); ip = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]);
ip2 = (retried && do {
ip == ntohl(h->next.ip[0])) ? ntohl(h->next.ip[1])
: ip2_from;
while (ip2 <= ip2_to) {
e.ip[1] = htonl(ip2); e.ip[1] = htonl(ip2);
last2 = ip_set_range_to_cidr(ip2, ip2_to, &e.cidr[1]); ip2 = ip_set_range_to_cidr(ip2, ip2_to, &e.cidr[1]);
ret = adtfn(set, &e, &ext, &ext, flags); ret = adtfn(set, &e, &ext, &ext, flags);
if (ret && !ip_set_eexist(ret, flags)) if (ret && !ip_set_eexist(ret, flags))
return ret; return ret;
ret = 0; ret = 0;
ip2 = last2 + 1; } while (ip2++ < ip2_to);
} ip2 = ip2_from;
ip = last + 1; } while (ip++ < ip_to);
}
return ret; return ret;
} }

View File

@ -161,7 +161,7 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
ipset_adtfn adtfn = set->variant->adt[adt]; ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netport4_elem e = { .cidr = HOST_MASK - 1 }; struct hash_netport4_elem e = { .cidr = HOST_MASK - 1 };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set); struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
u32 port, port_to, p = 0, ip = 0, ip_to = 0, last; u32 port, port_to, p = 0, ip = 0, ip_to = 0;
bool with_ports = false; bool with_ports = false;
u8 cidr; u8 cidr;
int ret; int ret;
@ -239,25 +239,26 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
ip_set_mask_from_to(ip, ip_to, e.cidr + 1); ip_set_mask_from_to(ip, ip_to, e.cidr + 1);
} }
if (retried) if (retried) {
ip = ntohl(h->next.ip); ip = ntohl(h->next.ip);
while (ip <= ip_to) { p = ntohs(h->next.port);
} else {
p = port;
}
do {
e.ip = htonl(ip); e.ip = htonl(ip);
last = ip_set_range_to_cidr(ip, ip_to, &cidr); ip = ip_set_range_to_cidr(ip, ip_to, &cidr);
e.cidr = cidr - 1; e.cidr = cidr - 1;
p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port)
: port;
for (; p <= port_to; p++) { for (; p <= port_to; p++) {
e.port = htons(p); e.port = htons(p);
ret = adtfn(set, &e, &ext, &ext, flags); ret = adtfn(set, &e, &ext, &ext, flags);
if (ret && !ip_set_eexist(ret, flags)) if (ret && !ip_set_eexist(ret, flags))
return ret; return ret;
ret = 0; ret = 0;
} }
ip = last + 1; p = port;
} } while (ip++ < ip_to);
return ret; return ret;
} }

View File

@ -184,8 +184,8 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
ipset_adtfn adtfn = set->variant->adt[adt]; ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netportnet4_elem e = { }; struct hash_netportnet4_elem e = { };
struct ip_set_ext ext = IP_SET_INIT_UEXT(set); struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
u32 ip = 0, ip_to = 0, ip_last, p = 0, port, port_to; u32 ip = 0, ip_to = 0, p = 0, port, port_to;
u32 ip2_from = 0, ip2_to = 0, ip2_last, ip2; u32 ip2_from = 0, ip2_to = 0, ip2;
bool with_ports = false; bool with_ports = false;
int ret; int ret;
@ -288,33 +288,34 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
ip_set_mask_from_to(ip2_from, ip2_to, e.cidr[1]); ip_set_mask_from_to(ip2_from, ip2_to, e.cidr[1]);
} }
if (retried) if (retried) {
ip = ntohl(h->next.ip[0]); ip = ntohl(h->next.ip[0]);
p = ntohs(h->next.port);
ip2 = ntohl(h->next.ip[1]);
} else {
p = port;
ip2 = ip2_from;
}
while (ip <= ip_to) { do {
e.ip[0] = htonl(ip); e.ip[0] = htonl(ip);
ip_last = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]); ip = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]);
p = retried && ip == ntohl(h->next.ip[0]) ? ntohs(h->next.port)
: port;
for (; p <= port_to; p++) { for (; p <= port_to; p++) {
e.port = htons(p); e.port = htons(p);
ip2 = (retried && ip == ntohl(h->next.ip[0]) && do {
p == ntohs(h->next.port)) ? ntohl(h->next.ip[1])
: ip2_from;
while (ip2 <= ip2_to) {
e.ip[1] = htonl(ip2); e.ip[1] = htonl(ip2);
ip2_last = ip_set_range_to_cidr(ip2, ip2_to, ip2 = ip_set_range_to_cidr(ip2, ip2_to,
&e.cidr[1]); &e.cidr[1]);
ret = adtfn(set, &e, &ext, &ext, flags); ret = adtfn(set, &e, &ext, &ext, flags);
if (ret && !ip_set_eexist(ret, flags)) if (ret && !ip_set_eexist(ret, flags))
return ret; return ret;
ret = 0; ret = 0;
ip2 = ip2_last + 1; } while (ip2++ < ip2_to);
} ip2 = ip2_from;
} }
ip = ip_last + 1; p = port;
} } while (ip++ < ip_to);
return ret; return ret;
} }

View File

@ -780,8 +780,15 @@ void xprt_connect(struct rpc_task *task)
return; return;
if (xprt_test_and_set_connecting(xprt)) if (xprt_test_and_set_connecting(xprt))
return; return;
xprt->stat.connect_start = jiffies; /* Race breaker */
xprt->ops->connect(xprt, task); if (!xprt_connected(xprt)) {
xprt->stat.connect_start = jiffies;
xprt->ops->connect(xprt, task);
} else {
xprt_clear_connecting(xprt);
task->tk_status = 0;
rpc_wake_up_queued_task(&xprt->pending, task);
}
} }
xprt_release_write(xprt, task); xprt_release_write(xprt, task);
} }

View File

@ -8070,7 +8070,7 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
reject_from_alignment = fd_prog < 0 && reject_from_alignment = fd_prog < 0 &&
(test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) && (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) &&
strstr(bpf_vlog, "Unknown alignment."); strstr(bpf_vlog, "misaligned");
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
if (reject_from_alignment) { if (reject_from_alignment) {
printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n", printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n",