mirror of
https://github.com/rd-stuffs/msm-4.14.git
synced 2025-02-20 11:45:48 +08:00
Merge android-4.14.132 (0dcd8eb) into msm-4.14
* refs/heads/tmp-0dcd8eb: Linux 4.14.132 arm64: insn: Fix ldadd instruction encoding tipc: pass tunnel dev as NULL to udp_tunnel(6)_xmit_skb futex: Update comments and docs about return values of arch futex code bpf, arm64: use more scalable stadd over ldxr / stxr loop in xadd arm64: futex: Avoid copying out uninitialised stack in failed cmpxchg() bpf: udp: ipv6: Avoid running reuseport's bpf_prog from __udp6_lib_err bpf: udp: Avoid calling reuseport's bpf_prog from udp_gro bonding: Always enable vlan tx offload team: Always enable vlan tx offload tun: wake up waitqueues after IFF_UP is set tipc: check msg->req data len in tipc_nl_compat_bearer_disable tipc: change to use register_pernet_device sctp: change to hold sk after auth shkey is created successfully net: stmmac: fixed new system time seconds value calculation net: remove duplicate fetch in sock_getsockopt net/packet: fix memory leak in packet_set_ring() ipv4: Use return value of inet_iif() for __raw_v4_lookup in the while loop af_packet: Block execution of tasks waiting for transmit to complete in AF_PACKET eeprom: at24: fix unexpected timeout under high load cpu/speculation: Warn on unsupported mitigations= parameter NFS/flexfiles: Use the correct TCP timeout for flexfiles I/O x86/microcode: Fix the microcode load on CPU hotplug for real x86/speculation: Allow guests to use SSBD even if host does not scsi: vmw_pscsi: Fix use-after-free in pvscsi_queue_lck() dm log writes: make sure super sector log updates are written in order mm/page_idle.c: fix oops because end_pfn is larger than max_pfn fs/binfmt_flat.c: make load_flat_shared_library() work mm/mempolicy.c: fix an incorrect rebind node in mpol_rebind_nodemask fs/proc/array.c: allow reporting eip/esp for all coredumping threads Revert "compiler.h: update definition of unreachable()" qmi_wwan: Fix out-of-bounds read net/9p: include trans_common.h to fix missing prototype warning. 9p: p9dirent_read: check network-provided name length 9p/rdma: remove useless check in cm_event_handler 9p: acl: fix uninitialized iattr access 9p/rdma: do not disconnect on down_interruptible EAGAIN 9p/xen: fix check for xenbus_read error in front_probe block: bio_iov_iter_get_pages: pin more pages for multi-segment IOs block: add a lower-level bio_add_page interface IB/hfi1: Close PSM sdma_progress sleep window Revert "x86/uaccess, ftrace: Fix ftrace_likely_update() vs. SMAP" perf header: Fix unchecked usage of strncpy() perf help: Remove needless use of strncpy() perf ui helpline: Use strlcpy() as a shorter form of strncpy() + explicit set nul Change-Id: I253fc7ffebfad129b8c2165dd2d5aa5af221fd4b Signed-off-by: Blagovest Kolenichev <bkolenichev@codeaurora.org>
This commit is contained in:
commit
dbc4aced9e
@ -218,5 +218,4 @@ All other architectures should build just fine too - but they won't have
|
||||
the new syscalls yet.
|
||||
|
||||
Architectures need to implement the new futex_atomic_cmpxchg_inatomic()
|
||||
inline function before writing up the syscalls (that function returns
|
||||
-ENOSYS right now).
|
||||
inline function before writing up the syscalls.
|
||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 14
|
||||
SUBLEVEL = 131
|
||||
SUBLEVEL = 132
|
||||
EXTRAVERSION =
|
||||
NAME = Petit Gorille
|
||||
|
||||
|
@ -134,7 +134,9 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr,
|
||||
: "memory");
|
||||
uaccess_disable();
|
||||
|
||||
*uval = val;
|
||||
if (!ret)
|
||||
*uval = val;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -271,6 +271,7 @@ __AARCH64_INSN_FUNCS(adrp, 0x9F000000, 0x90000000)
|
||||
__AARCH64_INSN_FUNCS(prfm, 0x3FC00000, 0x39800000)
|
||||
__AARCH64_INSN_FUNCS(prfm_lit, 0xFF000000, 0xD8000000)
|
||||
__AARCH64_INSN_FUNCS(str_reg, 0x3FE0EC00, 0x38206800)
|
||||
__AARCH64_INSN_FUNCS(ldadd, 0x3F20FC00, 0x38200000)
|
||||
__AARCH64_INSN_FUNCS(ldr_reg, 0x3FE0EC00, 0x38606800)
|
||||
__AARCH64_INSN_FUNCS(ldr_lit, 0xBF000000, 0x18000000)
|
||||
__AARCH64_INSN_FUNCS(ldrsw_lit, 0xFF000000, 0x98000000)
|
||||
@ -383,6 +384,13 @@ u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
|
||||
enum aarch64_insn_register state,
|
||||
enum aarch64_insn_size_type size,
|
||||
enum aarch64_insn_ldst_type type);
|
||||
u32 aarch64_insn_gen_ldadd(enum aarch64_insn_register result,
|
||||
enum aarch64_insn_register address,
|
||||
enum aarch64_insn_register value,
|
||||
enum aarch64_insn_size_type size);
|
||||
u32 aarch64_insn_gen_stadd(enum aarch64_insn_register address,
|
||||
enum aarch64_insn_register value,
|
||||
enum aarch64_insn_size_type size);
|
||||
u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
|
||||
enum aarch64_insn_register src,
|
||||
int imm, enum aarch64_insn_variant variant,
|
||||
|
@ -793,6 +793,46 @@ u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
|
||||
state);
|
||||
}
|
||||
|
||||
u32 aarch64_insn_gen_ldadd(enum aarch64_insn_register result,
|
||||
enum aarch64_insn_register address,
|
||||
enum aarch64_insn_register value,
|
||||
enum aarch64_insn_size_type size)
|
||||
{
|
||||
u32 insn = aarch64_insn_get_ldadd_value();
|
||||
|
||||
switch (size) {
|
||||
case AARCH64_INSN_SIZE_32:
|
||||
case AARCH64_INSN_SIZE_64:
|
||||
break;
|
||||
default:
|
||||
pr_err("%s: unimplemented size encoding %d\n", __func__, size);
|
||||
return AARCH64_BREAK_FAULT;
|
||||
}
|
||||
|
||||
insn = aarch64_insn_encode_ldst_size(size, insn);
|
||||
|
||||
insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
|
||||
result);
|
||||
|
||||
insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
|
||||
address);
|
||||
|
||||
return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
|
||||
value);
|
||||
}
|
||||
|
||||
u32 aarch64_insn_gen_stadd(enum aarch64_insn_register address,
|
||||
enum aarch64_insn_register value,
|
||||
enum aarch64_insn_size_type size)
|
||||
{
|
||||
/*
|
||||
* STADD is simply encoded as an alias for LDADD with XZR as
|
||||
* the destination register.
|
||||
*/
|
||||
return aarch64_insn_gen_ldadd(AARCH64_INSN_REG_ZR, address,
|
||||
value, size);
|
||||
}
|
||||
|
||||
static u32 aarch64_insn_encode_prfm_imm(enum aarch64_insn_prfm_type type,
|
||||
enum aarch64_insn_prfm_target target,
|
||||
enum aarch64_insn_prfm_policy policy,
|
||||
|
@ -100,6 +100,10 @@
|
||||
#define A64_STXR(sf, Rt, Rn, Rs) \
|
||||
A64_LSX(sf, Rt, Rn, Rs, STORE_EX)
|
||||
|
||||
/* LSE atomics */
|
||||
#define A64_STADD(sf, Rn, Rs) \
|
||||
aarch64_insn_gen_stadd(Rn, Rs, A64_SIZE(sf))
|
||||
|
||||
/* Add/subtract (immediate) */
|
||||
#define A64_ADDSUB_IMM(sf, Rd, Rn, imm12, type) \
|
||||
aarch64_insn_gen_add_sub_imm(Rd, Rn, imm12, \
|
||||
|
@ -330,7 +330,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
|
||||
const int i = insn - ctx->prog->insnsi;
|
||||
const bool is64 = BPF_CLASS(code) == BPF_ALU64;
|
||||
const bool isdw = BPF_SIZE(code) == BPF_DW;
|
||||
u8 jmp_cond;
|
||||
u8 jmp_cond, reg;
|
||||
s32 jmp_offset;
|
||||
|
||||
#define check_imm(bits, imm) do { \
|
||||
@ -706,18 +706,28 @@ emit_cond_jmp:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
||||
/* STX XADD: lock *(u32 *)(dst + off) += src */
|
||||
case BPF_STX | BPF_XADD | BPF_W:
|
||||
/* STX XADD: lock *(u64 *)(dst + off) += src */
|
||||
case BPF_STX | BPF_XADD | BPF_DW:
|
||||
emit_a64_mov_i(1, tmp, off, ctx);
|
||||
emit(A64_ADD(1, tmp, tmp, dst), ctx);
|
||||
emit(A64_LDXR(isdw, tmp2, tmp), ctx);
|
||||
emit(A64_ADD(isdw, tmp2, tmp2, src), ctx);
|
||||
emit(A64_STXR(isdw, tmp2, tmp, tmp3), ctx);
|
||||
jmp_offset = -3;
|
||||
check_imm19(jmp_offset);
|
||||
emit(A64_CBNZ(0, tmp3, jmp_offset), ctx);
|
||||
if (!off) {
|
||||
reg = dst;
|
||||
} else {
|
||||
emit_a64_mov_i(1, tmp, off, ctx);
|
||||
emit(A64_ADD(1, tmp, tmp, dst), ctx);
|
||||
reg = tmp;
|
||||
}
|
||||
if (cpus_have_cap(ARM64_HAS_LSE_ATOMICS)) {
|
||||
emit(A64_STADD(isdw, reg, src), ctx);
|
||||
} else {
|
||||
emit(A64_LDXR(isdw, tmp2, reg), ctx);
|
||||
emit(A64_ADD(isdw, tmp2, tmp2, src), ctx);
|
||||
emit(A64_STXR(isdw, tmp2, reg, tmp3), ctx);
|
||||
jmp_offset = -3;
|
||||
check_imm19(jmp_offset);
|
||||
emit(A64_CBNZ(0, tmp3, jmp_offset), ctx);
|
||||
}
|
||||
break;
|
||||
|
||||
/* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */
|
||||
|
@ -820,6 +820,16 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void)
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* If SSBD is controlled by the SPEC_CTRL MSR, then set the proper
|
||||
* bit in the mask to allow guests to use the mitigation even in the
|
||||
* case where the host does not enable it.
|
||||
*/
|
||||
if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
|
||||
static_cpu_has(X86_FEATURE_AMD_SSBD)) {
|
||||
x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
|
||||
}
|
||||
|
||||
/*
|
||||
* We have three CPU feature flags that are in play here:
|
||||
* - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
|
||||
@ -837,7 +847,6 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void)
|
||||
x86_amd_ssb_disable();
|
||||
} else {
|
||||
x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
|
||||
x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
|
||||
wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
|
||||
}
|
||||
}
|
||||
|
@ -790,13 +790,16 @@ static struct syscore_ops mc_syscore_ops = {
|
||||
.resume = mc_bp_resume,
|
||||
};
|
||||
|
||||
static int mc_cpu_online(unsigned int cpu)
|
||||
static int mc_cpu_starting(unsigned int cpu)
|
||||
{
|
||||
struct device *dev;
|
||||
|
||||
dev = get_cpu_device(cpu);
|
||||
microcode_update_cpu(cpu);
|
||||
pr_debug("CPU%d added\n", cpu);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mc_cpu_online(unsigned int cpu)
|
||||
{
|
||||
struct device *dev = get_cpu_device(cpu);
|
||||
|
||||
if (sysfs_create_group(&dev->kobj, &mc_attr_group))
|
||||
pr_err("Failed to create group for CPU%d\n", cpu);
|
||||
@ -873,7 +876,9 @@ int __init microcode_init(void)
|
||||
goto out_ucode_group;
|
||||
|
||||
register_syscore_ops(&mc_syscore_ops);
|
||||
cpuhp_setup_state_nocalls(CPUHP_AP_MICROCODE_LOADER, "x86/microcode:online",
|
||||
cpuhp_setup_state_nocalls(CPUHP_AP_MICROCODE_LOADER, "x86/microcode:starting",
|
||||
mc_cpu_starting, NULL);
|
||||
cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/microcode:online",
|
||||
mc_cpu_online, mc_cpu_down_prep);
|
||||
|
||||
pr_info("Microcode Update Driver: v%s.", DRIVER_VERSION);
|
||||
|
133
block/bio.c
133
block/bio.c
@ -786,7 +786,7 @@ int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (bio->bi_vcnt >= bio->bi_max_vecs)
|
||||
if (bio_full(bio))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
@ -833,6 +833,65 @@ int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page
|
||||
}
|
||||
EXPORT_SYMBOL(bio_add_pc_page);
|
||||
|
||||
/**
|
||||
* __bio_try_merge_page - try appending data to an existing bvec.
|
||||
* @bio: destination bio
|
||||
* @page: page to add
|
||||
* @len: length of the data to add
|
||||
* @off: offset of the data in @page
|
||||
*
|
||||
* Try to add the data at @page + @off to the last bvec of @bio. This is a
|
||||
* a useful optimisation for file systems with a block size smaller than the
|
||||
* page size.
|
||||
*
|
||||
* Return %true on success or %false on failure.
|
||||
*/
|
||||
bool __bio_try_merge_page(struct bio *bio, struct page *page,
|
||||
unsigned int len, unsigned int off)
|
||||
{
|
||||
if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
|
||||
return false;
|
||||
|
||||
if (bio->bi_vcnt > 0) {
|
||||
struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
|
||||
|
||||
if (page == bv->bv_page && off == bv->bv_offset + bv->bv_len) {
|
||||
bv->bv_len += len;
|
||||
bio->bi_iter.bi_size += len;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__bio_try_merge_page);
|
||||
|
||||
/**
|
||||
* __bio_add_page - add page to a bio in a new segment
|
||||
* @bio: destination bio
|
||||
* @page: page to add
|
||||
* @len: length of the data to add
|
||||
* @off: offset of the data in @page
|
||||
*
|
||||
* Add the data at @page + @off to @bio as a new bvec. The caller must ensure
|
||||
* that @bio has space for another bvec.
|
||||
*/
|
||||
void __bio_add_page(struct bio *bio, struct page *page,
|
||||
unsigned int len, unsigned int off)
|
||||
{
|
||||
struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt];
|
||||
|
||||
WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
|
||||
WARN_ON_ONCE(bio_full(bio));
|
||||
|
||||
bv->bv_page = page;
|
||||
bv->bv_offset = off;
|
||||
bv->bv_len = len;
|
||||
|
||||
bio->bi_iter.bi_size += len;
|
||||
bio->bi_vcnt++;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__bio_add_page);
|
||||
|
||||
/**
|
||||
* bio_add_page - attempt to add page to bio
|
||||
* @bio: destination bio
|
||||
@ -846,53 +905,26 @@ EXPORT_SYMBOL(bio_add_pc_page);
|
||||
int bio_add_page(struct bio *bio, struct page *page,
|
||||
unsigned int len, unsigned int offset)
|
||||
{
|
||||
struct bio_vec *bv;
|
||||
|
||||
/*
|
||||
* cloned bio must not modify vec list
|
||||
*/
|
||||
if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* For filesystems with a blocksize smaller than the pagesize
|
||||
* we will often be called with the same page as last time and
|
||||
* a consecutive offset. Optimize this special case.
|
||||
*/
|
||||
if (bio->bi_vcnt > 0) {
|
||||
bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
|
||||
|
||||
if (page == bv->bv_page &&
|
||||
offset == bv->bv_offset + bv->bv_len) {
|
||||
bv->bv_len += len;
|
||||
goto done;
|
||||
}
|
||||
if (!__bio_try_merge_page(bio, page, len, offset)) {
|
||||
if (bio_full(bio))
|
||||
return 0;
|
||||
__bio_add_page(bio, page, len, offset);
|
||||
}
|
||||
|
||||
if (bio->bi_vcnt >= bio->bi_max_vecs)
|
||||
return 0;
|
||||
|
||||
bv = &bio->bi_io_vec[bio->bi_vcnt];
|
||||
bv->bv_page = page;
|
||||
bv->bv_len = len;
|
||||
bv->bv_offset = offset;
|
||||
|
||||
bio->bi_vcnt++;
|
||||
done:
|
||||
bio->bi_iter.bi_size += len;
|
||||
return len;
|
||||
}
|
||||
EXPORT_SYMBOL(bio_add_page);
|
||||
|
||||
/**
|
||||
* bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
|
||||
* __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
|
||||
* @bio: bio to add pages to
|
||||
* @iter: iov iterator describing the region to be mapped
|
||||
*
|
||||
* Pins as many pages from *iter and appends them to @bio's bvec array. The
|
||||
* Pins pages from *iter and appends them to @bio's bvec array. The
|
||||
* pages will have to be released using put_page() when done.
|
||||
* For multi-segment *iter, this function only adds pages from the
|
||||
* the next non-empty segment of the iov iterator.
|
||||
*/
|
||||
int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
|
||||
static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
|
||||
{
|
||||
unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt, idx;
|
||||
struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
|
||||
@ -929,6 +961,33 @@ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
|
||||
iov_iter_advance(iter, size);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
|
||||
* @bio: bio to add pages to
|
||||
* @iter: iov iterator describing the region to be mapped
|
||||
*
|
||||
* Pins pages from *iter and appends them to @bio's bvec array. The
|
||||
* pages will have to be released using put_page() when done.
|
||||
* The function tries, but does not guarantee, to pin as many pages as
|
||||
* fit into the bio, or are requested in *iter, whatever is smaller.
|
||||
* If MM encounters an error pinning the requested pages, it stops.
|
||||
* Error is returned only if 0 pages could be pinned.
|
||||
*/
|
||||
int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
|
||||
{
|
||||
unsigned short orig_vcnt = bio->bi_vcnt;
|
||||
|
||||
do {
|
||||
int ret = __bio_iov_iter_get_pages(bio, iter);
|
||||
|
||||
if (unlikely(ret))
|
||||
return bio->bi_vcnt > orig_vcnt ? 0 : ret;
|
||||
|
||||
} while (iov_iter_count(iter) && !bio_full(bio));
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages);
|
||||
|
||||
struct submit_bio_ret {
|
||||
|
@ -132,25 +132,22 @@ static int defer_packet_queue(
|
||||
struct hfi1_user_sdma_pkt_q *pq =
|
||||
container_of(wait, struct hfi1_user_sdma_pkt_q, busy);
|
||||
struct hfi1_ibdev *dev = &pq->dd->verbs_dev;
|
||||
struct user_sdma_txreq *tx =
|
||||
container_of(txreq, struct user_sdma_txreq, txreq);
|
||||
|
||||
if (sdma_progress(sde, seq, txreq)) {
|
||||
if (tx->busycount++ < MAX_DEFER_RETRY_COUNT)
|
||||
goto eagain;
|
||||
}
|
||||
write_seqlock(&dev->iowait_lock);
|
||||
if (sdma_progress(sde, seq, txreq))
|
||||
goto eagain;
|
||||
/*
|
||||
* We are assuming that if the list is enqueued somewhere, it
|
||||
* is to the dmawait list since that is the only place where
|
||||
* it is supposed to be enqueued.
|
||||
*/
|
||||
xchg(&pq->state, SDMA_PKT_Q_DEFERRED);
|
||||
write_seqlock(&dev->iowait_lock);
|
||||
if (list_empty(&pq->busy.list))
|
||||
iowait_queue(pkts_sent, &pq->busy, &sde->dmawait);
|
||||
write_sequnlock(&dev->iowait_lock);
|
||||
return -EBUSY;
|
||||
eagain:
|
||||
write_sequnlock(&dev->iowait_lock);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
@ -803,7 +800,6 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
|
||||
|
||||
tx->flags = 0;
|
||||
tx->req = req;
|
||||
tx->busycount = 0;
|
||||
INIT_LIST_HEAD(&tx->list);
|
||||
|
||||
/*
|
||||
|
@ -236,7 +236,6 @@ struct user_sdma_txreq {
|
||||
struct list_head list;
|
||||
struct user_sdma_request *req;
|
||||
u16 flags;
|
||||
unsigned int busycount;
|
||||
u64 seqnum;
|
||||
};
|
||||
|
||||
|
@ -57,6 +57,7 @@
|
||||
|
||||
#define WRITE_LOG_VERSION 1ULL
|
||||
#define WRITE_LOG_MAGIC 0x6a736677736872ULL
|
||||
#define WRITE_LOG_SUPER_SECTOR 0
|
||||
|
||||
/*
|
||||
* The disk format for this is braindead simple.
|
||||
@ -112,6 +113,7 @@ struct log_writes_c {
|
||||
struct list_head logging_blocks;
|
||||
wait_queue_head_t wait;
|
||||
struct task_struct *log_kthread;
|
||||
struct completion super_done;
|
||||
};
|
||||
|
||||
struct pending_block {
|
||||
@ -177,6 +179,14 @@ static void log_end_io(struct bio *bio)
|
||||
bio_put(bio);
|
||||
}
|
||||
|
||||
static void log_end_super(struct bio *bio)
|
||||
{
|
||||
struct log_writes_c *lc = bio->bi_private;
|
||||
|
||||
complete(&lc->super_done);
|
||||
log_end_io(bio);
|
||||
}
|
||||
|
||||
/*
|
||||
* Meant to be called if there is an error, it will free all the pages
|
||||
* associated with the block.
|
||||
@ -212,7 +222,8 @@ static int write_metadata(struct log_writes_c *lc, void *entry,
|
||||
bio->bi_iter.bi_size = 0;
|
||||
bio->bi_iter.bi_sector = sector;
|
||||
bio_set_dev(bio, lc->logdev->bdev);
|
||||
bio->bi_end_io = log_end_io;
|
||||
bio->bi_end_io = (sector == WRITE_LOG_SUPER_SECTOR) ?
|
||||
log_end_super : log_end_io;
|
||||
bio->bi_private = lc;
|
||||
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
|
||||
|
||||
@ -334,11 +345,18 @@ static int log_super(struct log_writes_c *lc)
|
||||
super.nr_entries = cpu_to_le64(lc->logged_entries);
|
||||
super.sectorsize = cpu_to_le32(lc->sectorsize);
|
||||
|
||||
if (write_metadata(lc, &super, sizeof(super), NULL, 0, 0)) {
|
||||
if (write_metadata(lc, &super, sizeof(super), NULL, 0,
|
||||
WRITE_LOG_SUPER_SECTOR)) {
|
||||
DMERR("Couldn't write super");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Super sector should be writen in-order, otherwise the
|
||||
* nr_entries could be rewritten incorrectly by an old bio.
|
||||
*/
|
||||
wait_for_completion_io(&lc->super_done);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -447,6 +465,7 @@ static int log_writes_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
||||
INIT_LIST_HEAD(&lc->unflushed_blocks);
|
||||
INIT_LIST_HEAD(&lc->logging_blocks);
|
||||
init_waitqueue_head(&lc->wait);
|
||||
init_completion(&lc->super_done);
|
||||
atomic_set(&lc->io_blocks, 0);
|
||||
atomic_set(&lc->pending_blocks, 0);
|
||||
|
||||
|
@ -113,22 +113,6 @@ MODULE_PARM_DESC(write_timeout, "Time (in ms) to try writes (default 25)");
|
||||
((1 << AT24_SIZE_FLAGS | (_flags)) \
|
||||
<< AT24_SIZE_BYTELEN | ilog2(_len))
|
||||
|
||||
/*
|
||||
* Both reads and writes fail if the previous write didn't complete yet. This
|
||||
* macro loops a few times waiting at least long enough for one entire page
|
||||
* write to work while making sure that at least one iteration is run before
|
||||
* checking the break condition.
|
||||
*
|
||||
* It takes two parameters: a variable in which the future timeout in jiffies
|
||||
* will be stored and a temporary variable holding the time of the last
|
||||
* iteration of processing the request. Both should be unsigned integers
|
||||
* holding at least 32 bits.
|
||||
*/
|
||||
#define loop_until_timeout(tout, op_time) \
|
||||
for (tout = jiffies + msecs_to_jiffies(write_timeout), op_time = 0; \
|
||||
op_time ? time_before(op_time, tout) : true; \
|
||||
usleep_range(1000, 1500), op_time = jiffies)
|
||||
|
||||
static const struct i2c_device_id at24_ids[] = {
|
||||
/* needs 8 addresses as A0-A2 are ignored */
|
||||
{ "24c00", AT24_DEVICE_MAGIC(128 / 8, AT24_FLAG_TAKE8ADDR) },
|
||||
@ -234,7 +218,14 @@ static ssize_t at24_eeprom_read_smbus(struct at24_data *at24, char *buf,
|
||||
if (count > I2C_SMBUS_BLOCK_MAX)
|
||||
count = I2C_SMBUS_BLOCK_MAX;
|
||||
|
||||
loop_until_timeout(timeout, read_time) {
|
||||
timeout = jiffies + msecs_to_jiffies(write_timeout);
|
||||
do {
|
||||
/*
|
||||
* The timestamp shall be taken before the actual operation
|
||||
* to avoid a premature timeout in case of high CPU load.
|
||||
*/
|
||||
read_time = jiffies;
|
||||
|
||||
status = i2c_smbus_read_i2c_block_data_or_emulated(client,
|
||||
offset,
|
||||
count, buf);
|
||||
@ -244,7 +235,9 @@ static ssize_t at24_eeprom_read_smbus(struct at24_data *at24, char *buf,
|
||||
|
||||
if (status == count)
|
||||
return count;
|
||||
}
|
||||
|
||||
usleep_range(1000, 1500);
|
||||
} while (time_before(read_time, timeout));
|
||||
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
@ -284,7 +277,14 @@ static ssize_t at24_eeprom_read_i2c(struct at24_data *at24, char *buf,
|
||||
msg[1].buf = buf;
|
||||
msg[1].len = count;
|
||||
|
||||
loop_until_timeout(timeout, read_time) {
|
||||
timeout = jiffies + msecs_to_jiffies(write_timeout);
|
||||
do {
|
||||
/*
|
||||
* The timestamp shall be taken before the actual operation
|
||||
* to avoid a premature timeout in case of high CPU load.
|
||||
*/
|
||||
read_time = jiffies;
|
||||
|
||||
status = i2c_transfer(client->adapter, msg, 2);
|
||||
if (status == 2)
|
||||
status = count;
|
||||
@ -294,7 +294,9 @@ static ssize_t at24_eeprom_read_i2c(struct at24_data *at24, char *buf,
|
||||
|
||||
if (status == count)
|
||||
return count;
|
||||
}
|
||||
|
||||
usleep_range(1000, 1500);
|
||||
} while (time_before(read_time, timeout));
|
||||
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
@ -343,11 +345,20 @@ static ssize_t at24_eeprom_read_serial(struct at24_data *at24, char *buf,
|
||||
msg[1].buf = buf;
|
||||
msg[1].len = count;
|
||||
|
||||
loop_until_timeout(timeout, read_time) {
|
||||
timeout = jiffies + msecs_to_jiffies(write_timeout);
|
||||
do {
|
||||
/*
|
||||
* The timestamp shall be taken before the actual operation
|
||||
* to avoid a premature timeout in case of high CPU load.
|
||||
*/
|
||||
read_time = jiffies;
|
||||
|
||||
status = i2c_transfer(client->adapter, msg, 2);
|
||||
if (status == 2)
|
||||
return count;
|
||||
}
|
||||
|
||||
usleep_range(1000, 1500);
|
||||
} while (time_before(read_time, timeout));
|
||||
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
@ -374,11 +385,20 @@ static ssize_t at24_eeprom_read_mac(struct at24_data *at24, char *buf,
|
||||
msg[1].buf = buf;
|
||||
msg[1].len = count;
|
||||
|
||||
loop_until_timeout(timeout, read_time) {
|
||||
timeout = jiffies + msecs_to_jiffies(write_timeout);
|
||||
do {
|
||||
/*
|
||||
* The timestamp shall be taken before the actual operation
|
||||
* to avoid a premature timeout in case of high CPU load.
|
||||
*/
|
||||
read_time = jiffies;
|
||||
|
||||
status = i2c_transfer(client->adapter, msg, 2);
|
||||
if (status == 2)
|
||||
return count;
|
||||
}
|
||||
|
||||
usleep_range(1000, 1500);
|
||||
} while (time_before(read_time, timeout));
|
||||
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
@ -420,7 +440,14 @@ static ssize_t at24_eeprom_write_smbus_block(struct at24_data *at24,
|
||||
client = at24_translate_offset(at24, &offset);
|
||||
count = at24_adjust_write_count(at24, offset, count);
|
||||
|
||||
loop_until_timeout(timeout, write_time) {
|
||||
timeout = jiffies + msecs_to_jiffies(write_timeout);
|
||||
do {
|
||||
/*
|
||||
* The timestamp shall be taken before the actual operation
|
||||
* to avoid a premature timeout in case of high CPU load.
|
||||
*/
|
||||
write_time = jiffies;
|
||||
|
||||
status = i2c_smbus_write_i2c_block_data(client,
|
||||
offset, count, buf);
|
||||
if (status == 0)
|
||||
@ -431,7 +458,9 @@ static ssize_t at24_eeprom_write_smbus_block(struct at24_data *at24,
|
||||
|
||||
if (status == count)
|
||||
return count;
|
||||
}
|
||||
|
||||
usleep_range(1000, 1500);
|
||||
} while (time_before(write_time, timeout));
|
||||
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
@ -446,7 +475,14 @@ static ssize_t at24_eeprom_write_smbus_byte(struct at24_data *at24,
|
||||
|
||||
client = at24_translate_offset(at24, &offset);
|
||||
|
||||
loop_until_timeout(timeout, write_time) {
|
||||
timeout = jiffies + msecs_to_jiffies(write_timeout);
|
||||
do {
|
||||
/*
|
||||
* The timestamp shall be taken before the actual operation
|
||||
* to avoid a premature timeout in case of high CPU load.
|
||||
*/
|
||||
write_time = jiffies;
|
||||
|
||||
status = i2c_smbus_write_byte_data(client, offset, buf[0]);
|
||||
if (status == 0)
|
||||
status = count;
|
||||
@ -456,7 +492,9 @@ static ssize_t at24_eeprom_write_smbus_byte(struct at24_data *at24,
|
||||
|
||||
if (status == count)
|
||||
return count;
|
||||
}
|
||||
|
||||
usleep_range(1000, 1500);
|
||||
} while (time_before(write_time, timeout));
|
||||
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
@ -485,7 +523,14 @@ static ssize_t at24_eeprom_write_i2c(struct at24_data *at24, const char *buf,
|
||||
memcpy(&msg.buf[i], buf, count);
|
||||
msg.len = i + count;
|
||||
|
||||
loop_until_timeout(timeout, write_time) {
|
||||
timeout = jiffies + msecs_to_jiffies(write_timeout);
|
||||
do {
|
||||
/*
|
||||
* The timestamp shall be taken before the actual operation
|
||||
* to avoid a premature timeout in case of high CPU load.
|
||||
*/
|
||||
write_time = jiffies;
|
||||
|
||||
status = i2c_transfer(client->adapter, &msg, 1);
|
||||
if (status == 1)
|
||||
status = count;
|
||||
@ -495,7 +540,9 @@ static ssize_t at24_eeprom_write_i2c(struct at24_data *at24, const char *buf,
|
||||
|
||||
if (status == count)
|
||||
return count;
|
||||
}
|
||||
|
||||
usleep_range(1000, 1500);
|
||||
} while (time_before(write_time, timeout));
|
||||
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
@ -4263,12 +4263,12 @@ void bond_setup(struct net_device *bond_dev)
|
||||
bond_dev->features |= NETIF_F_NETNS_LOCAL;
|
||||
|
||||
bond_dev->hw_features = BOND_VLAN_FEATURES |
|
||||
NETIF_F_HW_VLAN_CTAG_TX |
|
||||
NETIF_F_HW_VLAN_CTAG_RX |
|
||||
NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
|
||||
bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL;
|
||||
bond_dev->features |= bond_dev->hw_features;
|
||||
bond_dev->features |= NETIF_F_HW_VLAN_CTAG_TX;
|
||||
}
|
||||
|
||||
/* Destroy a bonding device.
|
||||
|
@ -121,7 +121,7 @@ static int stmmac_adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec,
|
||||
* programmed with (2^32 – <new_sec_value>)
|
||||
*/
|
||||
if (gmac4)
|
||||
sec = (100000000ULL - sec);
|
||||
sec = -sec;
|
||||
|
||||
value = readl(ioaddr + PTP_TCR);
|
||||
if (value & PTP_TCR_TSCTRLSSR)
|
||||
|
@ -2131,12 +2131,12 @@ static void team_setup(struct net_device *dev)
|
||||
dev->features |= NETIF_F_NETNS_LOCAL;
|
||||
|
||||
dev->hw_features = TEAM_VLAN_FEATURES |
|
||||
NETIF_F_HW_VLAN_CTAG_TX |
|
||||
NETIF_F_HW_VLAN_CTAG_RX |
|
||||
NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
|
||||
dev->hw_features |= NETIF_F_GSO_ENCAP_ALL;
|
||||
dev->features |= dev->hw_features;
|
||||
dev->features |= NETIF_F_HW_VLAN_CTAG_TX;
|
||||
}
|
||||
|
||||
static int team_newlink(struct net *src_net, struct net_device *dev,
|
||||
|
@ -831,18 +831,8 @@ static void tun_net_uninit(struct net_device *dev)
|
||||
/* Net device open. */
|
||||
static int tun_net_open(struct net_device *dev)
|
||||
{
|
||||
struct tun_struct *tun = netdev_priv(dev);
|
||||
int i;
|
||||
|
||||
netif_tx_start_all_queues(dev);
|
||||
|
||||
for (i = 0; i < tun->numqueues; i++) {
|
||||
struct tun_file *tfile;
|
||||
|
||||
tfile = rtnl_dereference(tun->tfiles[i]);
|
||||
tfile->socket.sk->sk_write_space(tfile->socket.sk);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2830,6 +2820,7 @@ static int tun_device_event(struct notifier_block *unused,
|
||||
{
|
||||
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
|
||||
struct tun_struct *tun = netdev_priv(dev);
|
||||
int i;
|
||||
|
||||
if (dev->rtnl_link_ops != &tun_link_ops)
|
||||
return NOTIFY_DONE;
|
||||
@ -2839,6 +2830,14 @@ static int tun_device_event(struct notifier_block *unused,
|
||||
if (tun_queue_resize(tun))
|
||||
return NOTIFY_BAD;
|
||||
break;
|
||||
case NETDEV_UP:
|
||||
for (i = 0; i < tun->numqueues; i++) {
|
||||
struct tun_file *tfile;
|
||||
|
||||
tfile = rtnl_dereference(tun->tfiles[i]);
|
||||
tfile->socket.sk->sk_write_space(tfile->socket.sk);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -1395,14 +1395,14 @@ static int qmi_wwan_probe(struct usb_interface *intf,
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
info = (void *)&id->driver_info;
|
||||
|
||||
/* Several Quectel modems supports dynamic interface configuration, so
|
||||
* we need to match on class/subclass/protocol. These values are
|
||||
* identical for the diagnostic- and QMI-interface, but bNumEndpoints is
|
||||
* different. Ignore the current interface if the number of endpoints
|
||||
* equals the number for the diag interface (two).
|
||||
*/
|
||||
info = (void *)id->driver_info;
|
||||
|
||||
if (info->data & QMI_WWAN_QUIRK_QUECTEL_DYNCFG) {
|
||||
if (desc->bNumEndpoints == 2)
|
||||
return -ENODEV;
|
||||
|
@ -763,6 +763,7 @@ static int pvscsi_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd
|
||||
struct pvscsi_adapter *adapter = shost_priv(host);
|
||||
struct pvscsi_ctx *ctx;
|
||||
unsigned long flags;
|
||||
unsigned char op;
|
||||
|
||||
spin_lock_irqsave(&adapter->hw_lock, flags);
|
||||
|
||||
@ -775,13 +776,14 @@ static int pvscsi_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd
|
||||
}
|
||||
|
||||
cmd->scsi_done = done;
|
||||
op = cmd->cmnd[0];
|
||||
|
||||
dev_dbg(&cmd->device->sdev_gendev,
|
||||
"queued cmd %p, ctx %p, op=%x\n", cmd, ctx, cmd->cmnd[0]);
|
||||
"queued cmd %p, ctx %p, op=%x\n", cmd, ctx, op);
|
||||
|
||||
spin_unlock_irqrestore(&adapter->hw_lock, flags);
|
||||
|
||||
pvscsi_kick_io(adapter, cmd->cmnd[0]);
|
||||
pvscsi_kick_io(adapter, op);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -276,7 +276,7 @@ static int v9fs_xattr_set_acl(const struct xattr_handler *handler,
|
||||
switch (handler->flags) {
|
||||
case ACL_TYPE_ACCESS:
|
||||
if (acl) {
|
||||
struct iattr iattr;
|
||||
struct iattr iattr = { 0 };
|
||||
struct posix_acl *old_acl = acl;
|
||||
|
||||
retval = posix_acl_update_mode(inode, &iattr.ia_mode, &acl);
|
||||
|
@ -856,9 +856,14 @@ err:
|
||||
|
||||
static int load_flat_shared_library(int id, struct lib_info *libs)
|
||||
{
|
||||
/*
|
||||
* This is a fake bprm struct; only the members "buf", "file" and
|
||||
* "filename" are actually used.
|
||||
*/
|
||||
struct linux_binprm bprm;
|
||||
int res;
|
||||
char buf[16];
|
||||
loff_t pos = 0;
|
||||
|
||||
memset(&bprm, 0, sizeof(bprm));
|
||||
|
||||
@ -872,25 +877,11 @@ static int load_flat_shared_library(int id, struct lib_info *libs)
|
||||
if (IS_ERR(bprm.file))
|
||||
return res;
|
||||
|
||||
bprm.cred = prepare_exec_creds();
|
||||
res = -ENOMEM;
|
||||
if (!bprm.cred)
|
||||
goto out;
|
||||
res = kernel_read(bprm.file, bprm.buf, BINPRM_BUF_SIZE, &pos);
|
||||
|
||||
/* We don't really care about recalculating credentials at this point
|
||||
* as we're past the point of no return and are dealing with shared
|
||||
* libraries.
|
||||
*/
|
||||
bprm.called_set_creds = 1;
|
||||
|
||||
res = prepare_binprm(&bprm);
|
||||
|
||||
if (!res)
|
||||
if (res >= 0)
|
||||
res = load_flat_file(&bprm, libs, id, NULL);
|
||||
|
||||
abort_creds(bprm.cred);
|
||||
|
||||
out:
|
||||
allow_write_access(bprm.file);
|
||||
fput(bprm.file);
|
||||
|
||||
|
@ -18,7 +18,7 @@
|
||||
|
||||
#define NFSDBG_FACILITY NFSDBG_PNFS_LD
|
||||
|
||||
static unsigned int dataserver_timeo = NFS_DEF_TCP_RETRANS;
|
||||
static unsigned int dataserver_timeo = NFS_DEF_TCP_TIMEO;
|
||||
static unsigned int dataserver_retrans;
|
||||
|
||||
static bool ff_layout_has_available_ds(struct pnfs_layout_segment *lseg);
|
||||
|
@ -448,7 +448,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
|
||||
* a program is not able to use ptrace(2) in that case. It is
|
||||
* safe because the task has stopped executing permanently.
|
||||
*/
|
||||
if (permitted && (task->flags & PF_DUMPCORE)) {
|
||||
if (permitted && (task->flags & (PF_EXITING|PF_DUMPCORE))) {
|
||||
if (try_get_task_stack(task)) {
|
||||
eip = KSTK_EIP(task);
|
||||
esp = KSTK_ESP(task);
|
||||
|
@ -23,7 +23,9 @@
|
||||
*
|
||||
* Return:
|
||||
* 0 - On success
|
||||
* <0 - On error
|
||||
* -EFAULT - User access resulted in a page fault
|
||||
* -EAGAIN - Atomic operation was unable to complete due to contention
|
||||
* -ENOSYS - Operation not supported
|
||||
*/
|
||||
static inline int
|
||||
arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
|
||||
@ -85,7 +87,9 @@ out_pagefault_enable:
|
||||
*
|
||||
* Return:
|
||||
* 0 - On success
|
||||
* <0 - On error
|
||||
* -EFAULT - User access resulted in a page fault
|
||||
* -EAGAIN - Atomic operation was unable to complete due to contention
|
||||
* -ENOSYS - Function not implemented (only if !HAVE_FUTEX_CMPXCHG)
|
||||
*/
|
||||
static inline int
|
||||
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||
|
@ -126,6 +126,11 @@ static inline void *bio_data(struct bio *bio)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline bool bio_full(struct bio *bio)
|
||||
{
|
||||
return bio->bi_vcnt >= bio->bi_max_vecs;
|
||||
}
|
||||
|
||||
/*
|
||||
* will die
|
||||
*/
|
||||
@ -467,6 +472,10 @@ void bio_chain(struct bio *, struct bio *);
|
||||
extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
|
||||
extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
|
||||
unsigned int, unsigned int);
|
||||
bool __bio_try_merge_page(struct bio *bio, struct page *page,
|
||||
unsigned int len, unsigned int off);
|
||||
void __bio_add_page(struct bio *bio, struct page *page,
|
||||
unsigned int len, unsigned int off);
|
||||
int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter);
|
||||
struct rq_map_data;
|
||||
extern struct bio *bio_map_user_iov(struct request_queue *,
|
||||
|
@ -119,10 +119,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
|
||||
# define ASM_UNREACHABLE
|
||||
#endif
|
||||
#ifndef unreachable
|
||||
# define unreachable() do { \
|
||||
annotate_unreachable(); \
|
||||
__builtin_unreachable(); \
|
||||
} while (0)
|
||||
# define unreachable() do { annotate_reachable(); do { } while (1); } while (0)
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -2426,6 +2426,9 @@ static int __init mitigations_parse_cmdline(char *arg)
|
||||
cpu_mitigations = CPU_MITIGATIONS_AUTO;
|
||||
else if (!strcmp(arg, "auto,nosmt"))
|
||||
cpu_mitigations = CPU_MITIGATIONS_AUTO_NOSMT;
|
||||
else
|
||||
pr_crit("Unsupported mitigations=%s, system may still be vulnerable\n",
|
||||
arg);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -205,8 +205,6 @@ void trace_likely_condition(struct ftrace_likely_data *f, int val, int expect)
|
||||
void ftrace_likely_update(struct ftrace_likely_data *f, int val,
|
||||
int expect, int is_constant)
|
||||
{
|
||||
unsigned long flags = user_access_save();
|
||||
|
||||
/* A constant is always correct */
|
||||
if (is_constant) {
|
||||
f->constant++;
|
||||
@ -225,8 +223,6 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
|
||||
f->data.correct++;
|
||||
else
|
||||
f->data.incorrect++;
|
||||
|
||||
user_access_restore(flags);
|
||||
}
|
||||
EXPORT_SYMBOL(ftrace_likely_update);
|
||||
|
||||
|
@ -305,7 +305,7 @@ static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
|
||||
else {
|
||||
nodes_remap(tmp, pol->v.nodes,pol->w.cpuset_mems_allowed,
|
||||
*nodes);
|
||||
pol->w.cpuset_mems_allowed = tmp;
|
||||
pol->w.cpuset_mems_allowed = *nodes;
|
||||
}
|
||||
|
||||
if (nodes_empty(tmp))
|
||||
|
@ -136,7 +136,7 @@ static ssize_t page_idle_bitmap_read(struct file *file, struct kobject *kobj,
|
||||
|
||||
end_pfn = pfn + count * BITS_PER_BYTE;
|
||||
if (end_pfn > max_pfn)
|
||||
end_pfn = ALIGN(max_pfn, BITMAP_CHUNK_BITS);
|
||||
end_pfn = max_pfn;
|
||||
|
||||
for (; pfn < end_pfn; pfn++) {
|
||||
bit = pfn % BITMAP_CHUNK_BITS;
|
||||
@ -181,7 +181,7 @@ static ssize_t page_idle_bitmap_write(struct file *file, struct kobject *kobj,
|
||||
|
||||
end_pfn = pfn + count * BITS_PER_BYTE;
|
||||
if (end_pfn > max_pfn)
|
||||
end_pfn = ALIGN(max_pfn, BITMAP_CHUNK_BITS);
|
||||
end_pfn = max_pfn;
|
||||
|
||||
for (; pfn < end_pfn; pfn++) {
|
||||
bit = pfn % BITMAP_CHUNK_BITS;
|
||||
|
@ -622,13 +622,19 @@ int p9dirent_read(struct p9_client *clnt, char *buf, int len,
|
||||
if (ret) {
|
||||
p9_debug(P9_DEBUG_9P, "<<< p9dirent_read failed: %d\n", ret);
|
||||
trace_9p_protocol_dump(clnt, &fake_pdu);
|
||||
goto out;
|
||||
return ret;
|
||||
}
|
||||
|
||||
strcpy(dirent->d_name, nameptr);
|
||||
ret = strscpy(dirent->d_name, nameptr, sizeof(dirent->d_name));
|
||||
if (ret < 0) {
|
||||
p9_debug(P9_DEBUG_ERROR,
|
||||
"On the wire dirent name too long: %s\n",
|
||||
nameptr);
|
||||
kfree(nameptr);
|
||||
return ret;
|
||||
}
|
||||
kfree(nameptr);
|
||||
|
||||
out:
|
||||
return fake_pdu.offset;
|
||||
}
|
||||
EXPORT_SYMBOL(p9dirent_read);
|
||||
|
@ -14,6 +14,7 @@
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <linux/module.h>
|
||||
#include "trans_common.h"
|
||||
|
||||
/**
|
||||
* p9_release_req_pages - Release pages after the transaction.
|
||||
|
@ -276,8 +276,7 @@ p9_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
|
||||
case RDMA_CM_EVENT_DISCONNECTED:
|
||||
if (rdma)
|
||||
rdma->state = P9_RDMA_CLOSED;
|
||||
if (c)
|
||||
c->status = Disconnected;
|
||||
c->status = Disconnected;
|
||||
break;
|
||||
|
||||
case RDMA_CM_EVENT_TIMEWAIT_EXIT:
|
||||
@ -476,7 +475,7 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req)
|
||||
|
||||
err = post_recv(client, rpl_context);
|
||||
if (err) {
|
||||
p9_debug(P9_DEBUG_FCALL, "POST RECV failed\n");
|
||||
p9_debug(P9_DEBUG_ERROR, "POST RECV failed: %d\n", err);
|
||||
goto recv_error;
|
||||
}
|
||||
/* remove posted receive buffer from request structure */
|
||||
@ -545,7 +544,7 @@ dont_need_post_recv:
|
||||
recv_error:
|
||||
kfree(rpl_context);
|
||||
spin_lock_irqsave(&rdma->req_lock, flags);
|
||||
if (rdma->state < P9_RDMA_CLOSING) {
|
||||
if (err != -EINTR && rdma->state < P9_RDMA_CLOSING) {
|
||||
rdma->state = P9_RDMA_CLOSING;
|
||||
spin_unlock_irqrestore(&rdma->req_lock, flags);
|
||||
rdma_disconnect(rdma->cm_id);
|
||||
|
@ -392,8 +392,8 @@ static int xen_9pfs_front_probe(struct xenbus_device *dev,
|
||||
unsigned int max_rings, max_ring_order, len = 0;
|
||||
|
||||
versions = xenbus_read(XBT_NIL, dev->otherend, "versions", &len);
|
||||
if (!len)
|
||||
return -EINVAL;
|
||||
if (IS_ERR(versions))
|
||||
return PTR_ERR(versions);
|
||||
if (strcmp(versions, "1")) {
|
||||
kfree(versions);
|
||||
return -EINVAL;
|
||||
|
@ -1358,9 +1358,6 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
|
||||
{
|
||||
u32 meminfo[SK_MEMINFO_VARS];
|
||||
|
||||
if (get_user(len, optlen))
|
||||
return -EFAULT;
|
||||
|
||||
sk_get_meminfo(sk, meminfo);
|
||||
|
||||
len = min_t(unsigned int, len, sizeof(meminfo));
|
||||
|
@ -202,7 +202,7 @@ static int raw_v4_input(struct sk_buff *skb, const struct iphdr *iph, int hash)
|
||||
}
|
||||
sk = __raw_v4_lookup(net, sk_next(sk), iph->protocol,
|
||||
iph->saddr, iph->daddr,
|
||||
skb->dev->ifindex, sdif);
|
||||
dif, sdif);
|
||||
}
|
||||
out:
|
||||
read_unlock(&raw_v4_hashinfo.lock);
|
||||
|
@ -569,7 +569,11 @@ static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb,
|
||||
struct sock *udp4_lib_lookup_skb(struct sk_buff *skb,
|
||||
__be16 sport, __be16 dport)
|
||||
{
|
||||
return __udp4_lib_lookup_skb(skb, sport, dport, &udp_table);
|
||||
const struct iphdr *iph = ip_hdr(skb);
|
||||
|
||||
return __udp4_lib_lookup(dev_net(skb->dev), iph->saddr, sport,
|
||||
iph->daddr, dport, inet_iif(skb),
|
||||
inet_sdif(skb), &udp_table, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(udp4_lib_lookup_skb);
|
||||
|
||||
|
@ -308,7 +308,7 @@ struct sock *udp6_lib_lookup_skb(struct sk_buff *skb,
|
||||
|
||||
return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport,
|
||||
&iph->daddr, dport, inet6_iif(skb),
|
||||
inet6_sdif(skb), &udp_table, skb);
|
||||
inet6_sdif(skb), &udp_table, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(udp6_lib_lookup_skb);
|
||||
|
||||
@ -509,7 +509,7 @@ void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
||||
struct net *net = dev_net(skb->dev);
|
||||
|
||||
sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source,
|
||||
inet6_iif(skb), 0, udptable, skb);
|
||||
inet6_iif(skb), 0, udptable, NULL);
|
||||
if (!sk) {
|
||||
__ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
|
||||
ICMP6_MIB_INERRORS);
|
||||
|
@ -2438,6 +2438,9 @@ static void tpacket_destruct_skb(struct sk_buff *skb)
|
||||
|
||||
ts = __packet_set_timestamp(po, ph, skb);
|
||||
__packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
|
||||
|
||||
if (!packet_read_pending(&po->tx_ring))
|
||||
complete(&po->skb_completion);
|
||||
}
|
||||
|
||||
sock_wfree(skb);
|
||||
@ -2632,7 +2635,7 @@ static int tpacket_parse_header(struct packet_sock *po, void *frame,
|
||||
|
||||
static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
struct sk_buff *skb = NULL;
|
||||
struct net_device *dev;
|
||||
struct virtio_net_hdr *vnet_hdr = NULL;
|
||||
struct sockcm_cookie sockc;
|
||||
@ -2647,6 +2650,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
|
||||
int len_sum = 0;
|
||||
int status = TP_STATUS_AVAILABLE;
|
||||
int hlen, tlen, copylen = 0;
|
||||
long timeo = 0;
|
||||
|
||||
mutex_lock(&po->pg_vec_lock);
|
||||
|
||||
@ -2693,12 +2697,21 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
|
||||
if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !po->has_vnet_hdr)
|
||||
size_max = dev->mtu + reserve + VLAN_HLEN;
|
||||
|
||||
reinit_completion(&po->skb_completion);
|
||||
|
||||
do {
|
||||
ph = packet_current_frame(po, &po->tx_ring,
|
||||
TP_STATUS_SEND_REQUEST);
|
||||
if (unlikely(ph == NULL)) {
|
||||
if (need_wait && need_resched())
|
||||
schedule();
|
||||
if (need_wait && skb) {
|
||||
timeo = sock_sndtimeo(&po->sk, msg->msg_flags & MSG_DONTWAIT);
|
||||
timeo = wait_for_completion_interruptible_timeout(&po->skb_completion, timeo);
|
||||
if (timeo <= 0) {
|
||||
err = !timeo ? -ETIMEDOUT : -ERESTARTSYS;
|
||||
goto out_put;
|
||||
}
|
||||
}
|
||||
/* check for additional frames */
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -3252,6 +3265,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
|
||||
sock_init_data(sock, sk);
|
||||
|
||||
po = pkt_sk(sk);
|
||||
init_completion(&po->skb_completion);
|
||||
sk->sk_family = PF_PACKET;
|
||||
po->num = proto;
|
||||
po->xmit = dev_queue_xmit;
|
||||
@ -4340,7 +4354,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
|
||||
req3->tp_sizeof_priv ||
|
||||
req3->tp_feature_req_word) {
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
goto out_free_pg_vec;
|
||||
}
|
||||
}
|
||||
break;
|
||||
@ -4404,6 +4418,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
|
||||
prb_shutdown_retire_blk_timer(po, rb_queue);
|
||||
}
|
||||
|
||||
out_free_pg_vec:
|
||||
if (pg_vec)
|
||||
free_pg_vec(pg_vec, order, req->tp_block_nr);
|
||||
out:
|
||||
|
@ -128,6 +128,7 @@ struct packet_sock {
|
||||
unsigned int tp_hdrlen;
|
||||
unsigned int tp_reserve;
|
||||
unsigned int tp_tstamp;
|
||||
struct completion skb_completion;
|
||||
struct net_device __rcu *cached_dev;
|
||||
int (*xmit)(struct sk_buff *skb);
|
||||
struct packet_type prot_hook ____cacheline_aligned_in_smp;
|
||||
|
@ -126,10 +126,6 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
|
||||
/* Initialize the bind addr area */
|
||||
sctp_bind_addr_init(&ep->base.bind_addr, 0);
|
||||
|
||||
/* Remember who we are attached to. */
|
||||
ep->base.sk = sk;
|
||||
sock_hold(ep->base.sk);
|
||||
|
||||
/* Create the lists of associations. */
|
||||
INIT_LIST_HEAD(&ep->asocs);
|
||||
|
||||
@ -167,6 +163,10 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
|
||||
ep->prsctp_enable = net->sctp.prsctp_enable;
|
||||
ep->reconf_enable = net->sctp.reconf_enable;
|
||||
|
||||
/* Remember who we are attached to. */
|
||||
ep->base.sk = sk;
|
||||
sock_hold(ep->base.sk);
|
||||
|
||||
return ep;
|
||||
|
||||
nomem_hmacs:
|
||||
|
@ -128,7 +128,7 @@ static int __init tipc_init(void)
|
||||
if (err)
|
||||
goto out_sysctl;
|
||||
|
||||
err = register_pernet_subsys(&tipc_net_ops);
|
||||
err = register_pernet_device(&tipc_net_ops);
|
||||
if (err)
|
||||
goto out_pernet;
|
||||
|
||||
@ -136,7 +136,7 @@ static int __init tipc_init(void)
|
||||
if (err)
|
||||
goto out_socket;
|
||||
|
||||
err = register_pernet_subsys(&tipc_topsrv_net_ops);
|
||||
err = register_pernet_device(&tipc_topsrv_net_ops);
|
||||
if (err)
|
||||
goto out_pernet_topsrv;
|
||||
|
||||
@ -147,11 +147,11 @@ static int __init tipc_init(void)
|
||||
pr_info("Started in single node mode\n");
|
||||
return 0;
|
||||
out_bearer:
|
||||
unregister_pernet_subsys(&tipc_topsrv_net_ops);
|
||||
unregister_pernet_device(&tipc_topsrv_net_ops);
|
||||
out_pernet_topsrv:
|
||||
tipc_socket_stop();
|
||||
out_socket:
|
||||
unregister_pernet_subsys(&tipc_net_ops);
|
||||
unregister_pernet_device(&tipc_net_ops);
|
||||
out_pernet:
|
||||
tipc_unregister_sysctl();
|
||||
out_sysctl:
|
||||
@ -166,9 +166,9 @@ out_netlink:
|
||||
static void __exit tipc_exit(void)
|
||||
{
|
||||
tipc_bearer_cleanup();
|
||||
unregister_pernet_subsys(&tipc_topsrv_net_ops);
|
||||
unregister_pernet_device(&tipc_topsrv_net_ops);
|
||||
tipc_socket_stop();
|
||||
unregister_pernet_subsys(&tipc_net_ops);
|
||||
unregister_pernet_device(&tipc_net_ops);
|
||||
tipc_netlink_stop();
|
||||
tipc_netlink_compat_stop();
|
||||
tipc_unregister_sysctl();
|
||||
|
@ -436,7 +436,11 @@ static int tipc_nl_compat_bearer_disable(struct tipc_nl_compat_cmd_doit *cmd,
|
||||
if (!bearer)
|
||||
return -EMSGSIZE;
|
||||
|
||||
len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
|
||||
len = TLV_GET_DATA_LEN(msg->req);
|
||||
if (len <= 0)
|
||||
return -EINVAL;
|
||||
|
||||
len = min_t(int, len, TIPC_MAX_BEARER_NAME);
|
||||
if (!string_is_valid(name, len))
|
||||
return -EINVAL;
|
||||
|
||||
@ -528,7 +532,11 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg,
|
||||
|
||||
name = (char *)TLV_DATA(msg->req);
|
||||
|
||||
len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
|
||||
len = TLV_GET_DATA_LEN(msg->req);
|
||||
if (len <= 0)
|
||||
return -EINVAL;
|
||||
|
||||
len = min_t(int, len, TIPC_MAX_BEARER_NAME);
|
||||
if (!string_is_valid(name, len))
|
||||
return -EINVAL;
|
||||
|
||||
@ -806,7 +814,11 @@ static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd,
|
||||
if (!link)
|
||||
return -EMSGSIZE;
|
||||
|
||||
len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
|
||||
len = TLV_GET_DATA_LEN(msg->req);
|
||||
if (len <= 0)
|
||||
return -EINVAL;
|
||||
|
||||
len = min_t(int, len, TIPC_MAX_BEARER_NAME);
|
||||
if (!string_is_valid(name, len))
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -174,7 +174,6 @@ static int tipc_udp_xmit(struct net *net, struct sk_buff *skb,
|
||||
goto tx_error;
|
||||
}
|
||||
|
||||
skb->dev = rt->dst.dev;
|
||||
ttl = ip4_dst_hoplimit(&rt->dst);
|
||||
udp_tunnel_xmit_skb(rt, ub->ubsock->sk, skb, src->ipv4.s_addr,
|
||||
dst->ipv4.s_addr, 0, ttl, 0, src->port,
|
||||
@ -193,10 +192,9 @@ static int tipc_udp_xmit(struct net *net, struct sk_buff *skb,
|
||||
if (err)
|
||||
goto tx_error;
|
||||
ttl = ip6_dst_hoplimit(ndst);
|
||||
err = udp_tunnel6_xmit_skb(ndst, ub->ubsock->sk, skb,
|
||||
ndst->dev, &src->ipv6,
|
||||
&dst->ipv6, 0, ttl, 0, src->port,
|
||||
dst->port, false);
|
||||
err = udp_tunnel6_xmit_skb(ndst, ub->ubsock->sk, skb, NULL,
|
||||
&src->ipv6, &dst->ipv6, 0, ttl, 0,
|
||||
src->port, dst->port, false);
|
||||
#endif
|
||||
}
|
||||
return err;
|
||||
|
@ -189,7 +189,7 @@ static void add_man_viewer(const char *name)
|
||||
while (*p)
|
||||
p = &((*p)->next);
|
||||
*p = zalloc(sizeof(**p) + len + 1);
|
||||
strncpy((*p)->name, name, len);
|
||||
strcpy((*p)->name, name);
|
||||
}
|
||||
|
||||
static int supported_man_viewer(const char *name, size_t len)
|
||||
|
@ -24,7 +24,7 @@ static void tui_helpline__push(const char *msg)
|
||||
SLsmg_set_color(0);
|
||||
SLsmg_write_nstring((char *)msg, SLtt_Screen_Cols);
|
||||
SLsmg_refresh();
|
||||
strncpy(ui_helpline__current, msg, sz)[sz - 1] = '\0';
|
||||
strlcpy(ui_helpline__current, msg, sz);
|
||||
}
|
||||
|
||||
static int tui_helpline__show(const char *format, va_list ap)
|
||||
|
@ -3171,7 +3171,7 @@ perf_event__synthesize_event_update_name(struct perf_tool *tool,
|
||||
if (ev == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
strncpy(ev->data, evsel->name, len);
|
||||
strlcpy(ev->data, evsel->name, len + 1);
|
||||
err = process(tool, (union perf_event*) ev, NULL, NULL);
|
||||
free(ev);
|
||||
return err;
|
||||
|
Loading…
x
Reference in New Issue
Block a user