mirror of
https://github.com/rd-stuffs/msm-4.14.git
synced 2025-02-20 11:45:48 +08:00
This is the 4.14.56 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAltNuVYACgkQONu9yGCS aT7kTA/+MRHC5oFvdnhSsF6jAHsY9rgJNQXPtZCFhZnHhhYHtubQ2OJOmSZ7IfM0 9yhz/7vijC9+tLufXQxQnu2UUL3ojNu1+l+q9s0U1GUzNiONlJ9q/CyB4xjXFRCS 1RdiDZaQbIqUCYs38UCTsEJF65uKjzQ6dpF21XdIXp5FPxgiZawo4HpjQRJswbAl Du97ybMEPN3XnAn207GjZwy58ubRLF5HDG1sqNGfjVWJ7oMTi+QJOCvY3PJtU3j2 unS0qjxLU432rOyDfaJK7Yj9s61zu0PurbJrHo+dw3O3hd/Og7soqoqohUEjZWXd z7jjrntXZOZ/0st2yHmygfAPUJm/8jsh7Pd39Jgyfeu/3Clo51gO494rwATQsyE5 mwIdllyzyMNBEJI2F2fxE60WlFsbTjeBOX3BaOwnF8pGRJWsCAfbFknRbuKh1fO5 czFbUSOi00POw4WHT1rxV9u0yDBXmP47fy9zHquOim+PfK8pFvWuf6GSFjvqRTv8 20w1w7eixMi09ZXOkgTJ3S00MKHSpxoaenI3n2NcEVVRgDEVfh3C/zelvvfCDMHD i36DN39Sj41PNA/R4n0TIA4W+ab9qBVzQl16yaj9JURR2rA92GyMVC1+Xjqo1Py3 GRFOf2Gprlm0/vfkiRsMu9coAJuKV6+8fHXQU4mzHulKUaDWuJ0= =/wBU -----END PGP SIGNATURE----- Merge 4.14.56 into android-4.14 Changes in 4.14.56 media: rc: mce_kbd decoder: fix stuck keys ASoC: mediatek: preallocate pages use platform device MIPS: Call dump_stack() from show_regs() MIPS: Use async IPIs for arch_trigger_cpumask_backtrace() MIPS: Fix ioremap() RAM check mmc: sdhci-esdhc-imx: allow 1.8V modes without 100/200MHz pinctrl states mmc: dw_mmc: fix card threshold control configuration ibmasm: don't write out of bounds in read handler staging: rtl8723bs: Prevent an underflow in rtw_check_beacon_data(). staging: r8822be: Fix RTL8822be can't find any wireless AP ata: Fix ZBC_OUT command block check ata: Fix ZBC_OUT all bit handling vmw_balloon: fix inflation with batching ahci: Disable LPM on Lenovo 50 series laptops with a too old BIOS USB: serial: ch341: fix type promotion bug in ch341_control_in() USB: serial: cp210x: add another USB ID for Qivicon ZigBee stick USB: serial: keyspan_pda: fix modem-status error handling USB: yurex: fix out-of-bounds uaccess in read handler USB: serial: mos7840: fix status-register error handling usb: quirks: add delay quirks for Corsair Strafe xhci: xhci-mem: off by one in xhci_stream_id_to_ring() devpts: hoist out check for DEVPTS_SUPER_MAGIC devpts: resolve devpts bind-mounts Fix up non-directory creation in SGID directories genirq/affinity: assign vectors to all possible CPUs scsi: megaraid_sas: use adapter_type for all gen controllers scsi: megaraid_sas: replace instance->ctrl_context checks with instance->adapter_type scsi: megaraid_sas: replace is_ventura with adapter_type checks scsi: megaraid_sas: Create separate functions to allocate ctrl memory scsi: megaraid_sas: fix selection of reply queue ALSA: hda/realtek - two more lenovo models need fixup of MIC_LOCATION ALSA: hda - Handle pm failure during hotplug mm: do not drop unused pages when userfaultd is running fs/proc/task_mmu.c: fix Locked field in /proc/pid/smaps* fs, elf: make sure to page align bss in load_elf_library mm: do not bug_on on incorrect length in __mm_populate() tracing: Reorder display of TGID to be after PID kbuild: delete INSTALL_FW_PATH from kbuild documentation arm64: neon: Fix function may_use_simd() return error status tools build: fix # escaping in .cmd files for future Make IB/hfi1: Fix incorrect mixing of ERR_PTR and NULL return values i2c: tegra: Fix NACK error handling iw_cxgb4: correctly enforce the max reg_mr depth xen: setup pv irq ops vector earlier nvme-pci: Remap CMB SQ entries on every controller reset crypto: x86/salsa20 - remove x86 salsa20 implementations uprobes/x86: Remove incorrect WARN_ON() in uprobe_init_insn() netfilter: nf_queue: augment nfqa_cfg_policy netfilter: x_tables: initialise match/target check parameter struct loop: add recursion validation to LOOP_CHANGE_FD PM / hibernate: Fix oops at snapshot_write() RDMA/ucm: Mark UCM interface as BROKEN loop: remember whether sysfs_create_group() was done f2fs: give message and set need_fsck given broken node id Linux 4.14.56 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
818299f6bd
@ -152,15 +152,6 @@ stripped after they are installed. If INSTALL_MOD_STRIP is '1', then
|
||||
the default option --strip-debug will be used. Otherwise,
|
||||
INSTALL_MOD_STRIP value will be used as the options to the strip command.
|
||||
|
||||
INSTALL_FW_PATH
|
||||
--------------------------------------------------
|
||||
INSTALL_FW_PATH specifies where to install the firmware blobs.
|
||||
The default value is:
|
||||
|
||||
$(INSTALL_MOD_PATH)/lib/firmware
|
||||
|
||||
The value can be overridden in which case the default value is ignored.
|
||||
|
||||
INSTALL_HDR_PATH
|
||||
--------------------------------------------------
|
||||
INSTALL_HDR_PATH specifies where to install user space headers when
|
||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 14
|
||||
SUBLEVEL = 55
|
||||
SUBLEVEL = 56
|
||||
EXTRAVERSION =
|
||||
NAME = Petit Gorille
|
||||
|
||||
|
@ -29,20 +29,15 @@ DECLARE_PER_CPU(bool, kernel_neon_busy);
|
||||
static __must_check inline bool may_use_simd(void)
|
||||
{
|
||||
/*
|
||||
* The raw_cpu_read() is racy if called with preemption enabled.
|
||||
* This is not a bug: kernel_neon_busy is only set when
|
||||
* preemption is disabled, so we cannot migrate to another CPU
|
||||
* while it is set, nor can we migrate to a CPU where it is set.
|
||||
* So, if we find it clear on some CPU then we're guaranteed to
|
||||
* find it clear on any CPU we could migrate to.
|
||||
*
|
||||
* If we are in between kernel_neon_begin()...kernel_neon_end(),
|
||||
* the flag will be set, but preemption is also disabled, so we
|
||||
* can't migrate to another CPU and spuriously see it become
|
||||
* false.
|
||||
* kernel_neon_busy is only set while preemption is disabled,
|
||||
* and is clear whenever preemption is enabled. Since
|
||||
* this_cpu_read() is atomic w.r.t. preemption, kernel_neon_busy
|
||||
* cannot change under our feet -- if it's set we cannot be
|
||||
* migrated, and if it's clear we cannot be migrated to a CPU
|
||||
* where it is set.
|
||||
*/
|
||||
return !in_irq() && !irqs_disabled() && !in_nmi() &&
|
||||
!raw_cpu_read(kernel_neon_busy);
|
||||
!this_cpu_read(kernel_neon_busy);
|
||||
}
|
||||
|
||||
#else /* ! CONFIG_KERNEL_MODE_NEON */
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include <linux/kallsyms.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/prctl.h>
|
||||
#include <linux/nmi.h>
|
||||
|
||||
#include <asm/asm.h>
|
||||
#include <asm/bootinfo.h>
|
||||
@ -655,28 +656,42 @@ unsigned long arch_align_stack(unsigned long sp)
|
||||
return sp & ALMASK;
|
||||
}
|
||||
|
||||
static void arch_dump_stack(void *info)
|
||||
static DEFINE_PER_CPU(call_single_data_t, backtrace_csd);
|
||||
static struct cpumask backtrace_csd_busy;
|
||||
|
||||
static void handle_backtrace(void *info)
|
||||
{
|
||||
struct pt_regs *regs;
|
||||
nmi_cpu_backtrace(get_irq_regs());
|
||||
cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy);
|
||||
}
|
||||
|
||||
regs = get_irq_regs();
|
||||
static void raise_backtrace(cpumask_t *mask)
|
||||
{
|
||||
call_single_data_t *csd;
|
||||
int cpu;
|
||||
|
||||
if (regs)
|
||||
show_regs(regs);
|
||||
for_each_cpu(cpu, mask) {
|
||||
/*
|
||||
* If we previously sent an IPI to the target CPU & it hasn't
|
||||
* cleared its bit in the busy cpumask then it didn't handle
|
||||
* our previous IPI & it's not safe for us to reuse the
|
||||
* call_single_data_t.
|
||||
*/
|
||||
if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) {
|
||||
pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n",
|
||||
cpu);
|
||||
continue;
|
||||
}
|
||||
|
||||
dump_stack();
|
||||
csd = &per_cpu(backtrace_csd, cpu);
|
||||
csd->func = handle_backtrace;
|
||||
smp_call_function_single_async(cpu, csd);
|
||||
}
|
||||
}
|
||||
|
||||
void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
|
||||
{
|
||||
long this_cpu = get_cpu();
|
||||
|
||||
if (cpumask_test_cpu(this_cpu, mask) && !exclude_self)
|
||||
dump_stack();
|
||||
|
||||
smp_call_function_many(mask, arch_dump_stack, NULL, 1);
|
||||
|
||||
put_cpu();
|
||||
nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace);
|
||||
}
|
||||
|
||||
int mips_get_process_fp_mode(struct task_struct *task)
|
||||
|
@ -351,6 +351,7 @@ static void __show_regs(const struct pt_regs *regs)
|
||||
void show_regs(struct pt_regs *regs)
|
||||
{
|
||||
__show_regs((struct pt_regs *)regs);
|
||||
dump_stack();
|
||||
}
|
||||
|
||||
void show_registers(struct pt_regs *regs)
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include <linux/export.h>
|
||||
#include <asm/addrspace.h>
|
||||
#include <asm/byteorder.h>
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
@ -98,6 +99,20 @@ static int remap_area_pages(unsigned long address, phys_addr_t phys_addr,
|
||||
return error;
|
||||
}
|
||||
|
||||
static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
|
||||
void *arg)
|
||||
{
|
||||
unsigned long i;
|
||||
|
||||
for (i = 0; i < nr_pages; i++) {
|
||||
if (pfn_valid(start_pfn + i) &&
|
||||
!PageReserved(pfn_to_page(start_pfn + i)))
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Generic mapping function (not visible outside):
|
||||
*/
|
||||
@ -116,8 +131,8 @@ static int remap_area_pages(unsigned long address, phys_addr_t phys_addr,
|
||||
|
||||
void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long flags)
|
||||
{
|
||||
unsigned long offset, pfn, last_pfn;
|
||||
struct vm_struct * area;
|
||||
unsigned long offset;
|
||||
phys_addr_t last_addr;
|
||||
void * addr;
|
||||
|
||||
@ -137,18 +152,16 @@ void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long
|
||||
return (void __iomem *) CKSEG1ADDR(phys_addr);
|
||||
|
||||
/*
|
||||
* Don't allow anybody to remap normal RAM that we're using..
|
||||
* Don't allow anybody to remap RAM that may be allocated by the page
|
||||
* allocator, since that could lead to races & data clobbering.
|
||||
*/
|
||||
if (phys_addr < virt_to_phys(high_memory)) {
|
||||
char *t_addr, *t_end;
|
||||
struct page *page;
|
||||
|
||||
t_addr = __va(phys_addr);
|
||||
t_end = t_addr + (size - 1);
|
||||
|
||||
for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
|
||||
if(!PageReserved(page))
|
||||
return NULL;
|
||||
pfn = PFN_DOWN(phys_addr);
|
||||
last_pfn = PFN_DOWN(last_addr);
|
||||
if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
|
||||
__ioremap_check_ram) == 1) {
|
||||
WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
|
||||
&phys_addr, &last_addr);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -15,7 +15,6 @@ obj-$(CONFIG_CRYPTO_GLUE_HELPER_X86) += glue_helper.o
|
||||
|
||||
obj-$(CONFIG_CRYPTO_AES_586) += aes-i586.o
|
||||
obj-$(CONFIG_CRYPTO_TWOFISH_586) += twofish-i586.o
|
||||
obj-$(CONFIG_CRYPTO_SALSA20_586) += salsa20-i586.o
|
||||
obj-$(CONFIG_CRYPTO_SERPENT_SSE2_586) += serpent-sse2-i586.o
|
||||
|
||||
obj-$(CONFIG_CRYPTO_AES_X86_64) += aes-x86_64.o
|
||||
@ -24,7 +23,6 @@ obj-$(CONFIG_CRYPTO_CAMELLIA_X86_64) += camellia-x86_64.o
|
||||
obj-$(CONFIG_CRYPTO_BLOWFISH_X86_64) += blowfish-x86_64.o
|
||||
obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o
|
||||
obj-$(CONFIG_CRYPTO_TWOFISH_X86_64_3WAY) += twofish-x86_64-3way.o
|
||||
obj-$(CONFIG_CRYPTO_SALSA20_X86_64) += salsa20-x86_64.o
|
||||
obj-$(CONFIG_CRYPTO_CHACHA20_X86_64) += chacha20-x86_64.o
|
||||
obj-$(CONFIG_CRYPTO_SERPENT_SSE2_X86_64) += serpent-sse2-x86_64.o
|
||||
obj-$(CONFIG_CRYPTO_AES_NI_INTEL) += aesni-intel.o
|
||||
@ -59,7 +57,6 @@ endif
|
||||
|
||||
aes-i586-y := aes-i586-asm_32.o aes_glue.o
|
||||
twofish-i586-y := twofish-i586-asm_32.o twofish_glue.o
|
||||
salsa20-i586-y := salsa20-i586-asm_32.o salsa20_glue.o
|
||||
serpent-sse2-i586-y := serpent-sse2-i586-asm_32.o serpent_sse2_glue.o
|
||||
|
||||
aes-x86_64-y := aes-x86_64-asm_64.o aes_glue.o
|
||||
@ -68,7 +65,6 @@ camellia-x86_64-y := camellia-x86_64-asm_64.o camellia_glue.o
|
||||
blowfish-x86_64-y := blowfish-x86_64-asm_64.o blowfish_glue.o
|
||||
twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_glue.o
|
||||
twofish-x86_64-3way-y := twofish-x86_64-asm_64-3way.o twofish_glue_3way.o
|
||||
salsa20-x86_64-y := salsa20-x86_64-asm_64.o salsa20_glue.o
|
||||
chacha20-x86_64-y := chacha20-ssse3-x86_64.o chacha20_glue.o
|
||||
serpent-sse2-x86_64-y := serpent-sse2-x86_64-asm_64.o serpent_sse2_glue.o
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,919 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#include <linux/linkage.h>
|
||||
|
||||
# enter salsa20_encrypt_bytes
|
||||
ENTRY(salsa20_encrypt_bytes)
|
||||
mov %rsp,%r11
|
||||
and $31,%r11
|
||||
add $256,%r11
|
||||
sub %r11,%rsp
|
||||
# x = arg1
|
||||
mov %rdi,%r8
|
||||
# m = arg2
|
||||
mov %rsi,%rsi
|
||||
# out = arg3
|
||||
mov %rdx,%rdi
|
||||
# bytes = arg4
|
||||
mov %rcx,%rdx
|
||||
# unsigned>? bytes - 0
|
||||
cmp $0,%rdx
|
||||
# comment:fp stack unchanged by jump
|
||||
# goto done if !unsigned>
|
||||
jbe ._done
|
||||
# comment:fp stack unchanged by fallthrough
|
||||
# start:
|
||||
._start:
|
||||
# r11_stack = r11
|
||||
movq %r11,0(%rsp)
|
||||
# r12_stack = r12
|
||||
movq %r12,8(%rsp)
|
||||
# r13_stack = r13
|
||||
movq %r13,16(%rsp)
|
||||
# r14_stack = r14
|
||||
movq %r14,24(%rsp)
|
||||
# r15_stack = r15
|
||||
movq %r15,32(%rsp)
|
||||
# rbx_stack = rbx
|
||||
movq %rbx,40(%rsp)
|
||||
# rbp_stack = rbp
|
||||
movq %rbp,48(%rsp)
|
||||
# in0 = *(uint64 *) (x + 0)
|
||||
movq 0(%r8),%rcx
|
||||
# in2 = *(uint64 *) (x + 8)
|
||||
movq 8(%r8),%r9
|
||||
# in4 = *(uint64 *) (x + 16)
|
||||
movq 16(%r8),%rax
|
||||
# in6 = *(uint64 *) (x + 24)
|
||||
movq 24(%r8),%r10
|
||||
# in8 = *(uint64 *) (x + 32)
|
||||
movq 32(%r8),%r11
|
||||
# in10 = *(uint64 *) (x + 40)
|
||||
movq 40(%r8),%r12
|
||||
# in12 = *(uint64 *) (x + 48)
|
||||
movq 48(%r8),%r13
|
||||
# in14 = *(uint64 *) (x + 56)
|
||||
movq 56(%r8),%r14
|
||||
# j0 = in0
|
||||
movq %rcx,56(%rsp)
|
||||
# j2 = in2
|
||||
movq %r9,64(%rsp)
|
||||
# j4 = in4
|
||||
movq %rax,72(%rsp)
|
||||
# j6 = in6
|
||||
movq %r10,80(%rsp)
|
||||
# j8 = in8
|
||||
movq %r11,88(%rsp)
|
||||
# j10 = in10
|
||||
movq %r12,96(%rsp)
|
||||
# j12 = in12
|
||||
movq %r13,104(%rsp)
|
||||
# j14 = in14
|
||||
movq %r14,112(%rsp)
|
||||
# x_backup = x
|
||||
movq %r8,120(%rsp)
|
||||
# bytesatleast1:
|
||||
._bytesatleast1:
|
||||
# unsigned<? bytes - 64
|
||||
cmp $64,%rdx
|
||||
# comment:fp stack unchanged by jump
|
||||
# goto nocopy if !unsigned<
|
||||
jae ._nocopy
|
||||
# ctarget = out
|
||||
movq %rdi,128(%rsp)
|
||||
# out = &tmp
|
||||
leaq 192(%rsp),%rdi
|
||||
# i = bytes
|
||||
mov %rdx,%rcx
|
||||
# while (i) { *out++ = *m++; --i }
|
||||
rep movsb
|
||||
# out = &tmp
|
||||
leaq 192(%rsp),%rdi
|
||||
# m = &tmp
|
||||
leaq 192(%rsp),%rsi
|
||||
# comment:fp stack unchanged by fallthrough
|
||||
# nocopy:
|
||||
._nocopy:
|
||||
# out_backup = out
|
||||
movq %rdi,136(%rsp)
|
||||
# m_backup = m
|
||||
movq %rsi,144(%rsp)
|
||||
# bytes_backup = bytes
|
||||
movq %rdx,152(%rsp)
|
||||
# x1 = j0
|
||||
movq 56(%rsp),%rdi
|
||||
# x0 = x1
|
||||
mov %rdi,%rdx
|
||||
# (uint64) x1 >>= 32
|
||||
shr $32,%rdi
|
||||
# x3 = j2
|
||||
movq 64(%rsp),%rsi
|
||||
# x2 = x3
|
||||
mov %rsi,%rcx
|
||||
# (uint64) x3 >>= 32
|
||||
shr $32,%rsi
|
||||
# x5 = j4
|
||||
movq 72(%rsp),%r8
|
||||
# x4 = x5
|
||||
mov %r8,%r9
|
||||
# (uint64) x5 >>= 32
|
||||
shr $32,%r8
|
||||
# x5_stack = x5
|
||||
movq %r8,160(%rsp)
|
||||
# x7 = j6
|
||||
movq 80(%rsp),%r8
|
||||
# x6 = x7
|
||||
mov %r8,%rax
|
||||
# (uint64) x7 >>= 32
|
||||
shr $32,%r8
|
||||
# x9 = j8
|
||||
movq 88(%rsp),%r10
|
||||
# x8 = x9
|
||||
mov %r10,%r11
|
||||
# (uint64) x9 >>= 32
|
||||
shr $32,%r10
|
||||
# x11 = j10
|
||||
movq 96(%rsp),%r12
|
||||
# x10 = x11
|
||||
mov %r12,%r13
|
||||
# x10_stack = x10
|
||||
movq %r13,168(%rsp)
|
||||
# (uint64) x11 >>= 32
|
||||
shr $32,%r12
|
||||
# x13 = j12
|
||||
movq 104(%rsp),%r13
|
||||
# x12 = x13
|
||||
mov %r13,%r14
|
||||
# (uint64) x13 >>= 32
|
||||
shr $32,%r13
|
||||
# x15 = j14
|
||||
movq 112(%rsp),%r15
|
||||
# x14 = x15
|
||||
mov %r15,%rbx
|
||||
# (uint64) x15 >>= 32
|
||||
shr $32,%r15
|
||||
# x15_stack = x15
|
||||
movq %r15,176(%rsp)
|
||||
# i = 20
|
||||
mov $20,%r15
|
||||
# mainloop:
|
||||
._mainloop:
|
||||
# i_backup = i
|
||||
movq %r15,184(%rsp)
|
||||
# x5 = x5_stack
|
||||
movq 160(%rsp),%r15
|
||||
# a = x12 + x0
|
||||
lea (%r14,%rdx),%rbp
|
||||
# (uint32) a <<<= 7
|
||||
rol $7,%ebp
|
||||
# x4 ^= a
|
||||
xor %rbp,%r9
|
||||
# b = x1 + x5
|
||||
lea (%rdi,%r15),%rbp
|
||||
# (uint32) b <<<= 7
|
||||
rol $7,%ebp
|
||||
# x9 ^= b
|
||||
xor %rbp,%r10
|
||||
# a = x0 + x4
|
||||
lea (%rdx,%r9),%rbp
|
||||
# (uint32) a <<<= 9
|
||||
rol $9,%ebp
|
||||
# x8 ^= a
|
||||
xor %rbp,%r11
|
||||
# b = x5 + x9
|
||||
lea (%r15,%r10),%rbp
|
||||
# (uint32) b <<<= 9
|
||||
rol $9,%ebp
|
||||
# x13 ^= b
|
||||
xor %rbp,%r13
|
||||
# a = x4 + x8
|
||||
lea (%r9,%r11),%rbp
|
||||
# (uint32) a <<<= 13
|
||||
rol $13,%ebp
|
||||
# x12 ^= a
|
||||
xor %rbp,%r14
|
||||
# b = x9 + x13
|
||||
lea (%r10,%r13),%rbp
|
||||
# (uint32) b <<<= 13
|
||||
rol $13,%ebp
|
||||
# x1 ^= b
|
||||
xor %rbp,%rdi
|
||||
# a = x8 + x12
|
||||
lea (%r11,%r14),%rbp
|
||||
# (uint32) a <<<= 18
|
||||
rol $18,%ebp
|
||||
# x0 ^= a
|
||||
xor %rbp,%rdx
|
||||
# b = x13 + x1
|
||||
lea (%r13,%rdi),%rbp
|
||||
# (uint32) b <<<= 18
|
||||
rol $18,%ebp
|
||||
# x5 ^= b
|
||||
xor %rbp,%r15
|
||||
# x10 = x10_stack
|
||||
movq 168(%rsp),%rbp
|
||||
# x5_stack = x5
|
||||
movq %r15,160(%rsp)
|
||||
# c = x6 + x10
|
||||
lea (%rax,%rbp),%r15
|
||||
# (uint32) c <<<= 7
|
||||
rol $7,%r15d
|
||||
# x14 ^= c
|
||||
xor %r15,%rbx
|
||||
# c = x10 + x14
|
||||
lea (%rbp,%rbx),%r15
|
||||
# (uint32) c <<<= 9
|
||||
rol $9,%r15d
|
||||
# x2 ^= c
|
||||
xor %r15,%rcx
|
||||
# c = x14 + x2
|
||||
lea (%rbx,%rcx),%r15
|
||||
# (uint32) c <<<= 13
|
||||
rol $13,%r15d
|
||||
# x6 ^= c
|
||||
xor %r15,%rax
|
||||
# c = x2 + x6
|
||||
lea (%rcx,%rax),%r15
|
||||
# (uint32) c <<<= 18
|
||||
rol $18,%r15d
|
||||
# x10 ^= c
|
||||
xor %r15,%rbp
|
||||
# x15 = x15_stack
|
||||
movq 176(%rsp),%r15
|
||||
# x10_stack = x10
|
||||
movq %rbp,168(%rsp)
|
||||
# d = x11 + x15
|
||||
lea (%r12,%r15),%rbp
|
||||
# (uint32) d <<<= 7
|
||||
rol $7,%ebp
|
||||
# x3 ^= d
|
||||
xor %rbp,%rsi
|
||||
# d = x15 + x3
|
||||
lea (%r15,%rsi),%rbp
|
||||
# (uint32) d <<<= 9
|
||||
rol $9,%ebp
|
||||
# x7 ^= d
|
||||
xor %rbp,%r8
|
||||
# d = x3 + x7
|
||||
lea (%rsi,%r8),%rbp
|
||||
# (uint32) d <<<= 13
|
||||
rol $13,%ebp
|
||||
# x11 ^= d
|
||||
xor %rbp,%r12
|
||||
# d = x7 + x11
|
||||
lea (%r8,%r12),%rbp
|
||||
# (uint32) d <<<= 18
|
||||
rol $18,%ebp
|
||||
# x15 ^= d
|
||||
xor %rbp,%r15
|
||||
# x15_stack = x15
|
||||
movq %r15,176(%rsp)
|
||||
# x5 = x5_stack
|
||||
movq 160(%rsp),%r15
|
||||
# a = x3 + x0
|
||||
lea (%rsi,%rdx),%rbp
|
||||
# (uint32) a <<<= 7
|
||||
rol $7,%ebp
|
||||
# x1 ^= a
|
||||
xor %rbp,%rdi
|
||||
# b = x4 + x5
|
||||
lea (%r9,%r15),%rbp
|
||||
# (uint32) b <<<= 7
|
||||
rol $7,%ebp
|
||||
# x6 ^= b
|
||||
xor %rbp,%rax
|
||||
# a = x0 + x1
|
||||
lea (%rdx,%rdi),%rbp
|
||||
# (uint32) a <<<= 9
|
||||
rol $9,%ebp
|
||||
# x2 ^= a
|
||||
xor %rbp,%rcx
|
||||
# b = x5 + x6
|
||||
lea (%r15,%rax),%rbp
|
||||
# (uint32) b <<<= 9
|
||||
rol $9,%ebp
|
||||
# x7 ^= b
|
||||
xor %rbp,%r8
|
||||
# a = x1 + x2
|
||||
lea (%rdi,%rcx),%rbp
|
||||
# (uint32) a <<<= 13
|
||||
rol $13,%ebp
|
||||
# x3 ^= a
|
||||
xor %rbp,%rsi
|
||||
# b = x6 + x7
|
||||
lea (%rax,%r8),%rbp
|
||||
# (uint32) b <<<= 13
|
||||
rol $13,%ebp
|
||||
# x4 ^= b
|
||||
xor %rbp,%r9
|
||||
# a = x2 + x3
|
||||
lea (%rcx,%rsi),%rbp
|
||||
# (uint32) a <<<= 18
|
||||
rol $18,%ebp
|
||||
# x0 ^= a
|
||||
xor %rbp,%rdx
|
||||
# b = x7 + x4
|
||||
lea (%r8,%r9),%rbp
|
||||
# (uint32) b <<<= 18
|
||||
rol $18,%ebp
|
||||
# x5 ^= b
|
||||
xor %rbp,%r15
|
||||
# x10 = x10_stack
|
||||
movq 168(%rsp),%rbp
|
||||
# x5_stack = x5
|
||||
movq %r15,160(%rsp)
|
||||
# c = x9 + x10
|
||||
lea (%r10,%rbp),%r15
|
||||
# (uint32) c <<<= 7
|
||||
rol $7,%r15d
|
||||
# x11 ^= c
|
||||
xor %r15,%r12
|
||||
# c = x10 + x11
|
||||
lea (%rbp,%r12),%r15
|
||||
# (uint32) c <<<= 9
|
||||
rol $9,%r15d
|
||||
# x8 ^= c
|
||||
xor %r15,%r11
|
||||
# c = x11 + x8
|
||||
lea (%r12,%r11),%r15
|
||||
# (uint32) c <<<= 13
|
||||
rol $13,%r15d
|
||||
# x9 ^= c
|
||||
xor %r15,%r10
|
||||
# c = x8 + x9
|
||||
lea (%r11,%r10),%r15
|
||||
# (uint32) c <<<= 18
|
||||
rol $18,%r15d
|
||||
# x10 ^= c
|
||||
xor %r15,%rbp
|
||||
# x15 = x15_stack
|
||||
movq 176(%rsp),%r15
|
||||
# x10_stack = x10
|
||||
movq %rbp,168(%rsp)
|
||||
# d = x14 + x15
|
||||
lea (%rbx,%r15),%rbp
|
||||
# (uint32) d <<<= 7
|
||||
rol $7,%ebp
|
||||
# x12 ^= d
|
||||
xor %rbp,%r14
|
||||
# d = x15 + x12
|
||||
lea (%r15,%r14),%rbp
|
||||
# (uint32) d <<<= 9
|
||||
rol $9,%ebp
|
||||
# x13 ^= d
|
||||
xor %rbp,%r13
|
||||
# d = x12 + x13
|
||||
lea (%r14,%r13),%rbp
|
||||
# (uint32) d <<<= 13
|
||||
rol $13,%ebp
|
||||
# x14 ^= d
|
||||
xor %rbp,%rbx
|
||||
# d = x13 + x14
|
||||
lea (%r13,%rbx),%rbp
|
||||
# (uint32) d <<<= 18
|
||||
rol $18,%ebp
|
||||
# x15 ^= d
|
||||
xor %rbp,%r15
|
||||
# x15_stack = x15
|
||||
movq %r15,176(%rsp)
|
||||
# x5 = x5_stack
|
||||
movq 160(%rsp),%r15
|
||||
# a = x12 + x0
|
||||
lea (%r14,%rdx),%rbp
|
||||
# (uint32) a <<<= 7
|
||||
rol $7,%ebp
|
||||
# x4 ^= a
|
||||
xor %rbp,%r9
|
||||
# b = x1 + x5
|
||||
lea (%rdi,%r15),%rbp
|
||||
# (uint32) b <<<= 7
|
||||
rol $7,%ebp
|
||||
# x9 ^= b
|
||||
xor %rbp,%r10
|
||||
# a = x0 + x4
|
||||
lea (%rdx,%r9),%rbp
|
||||
# (uint32) a <<<= 9
|
||||
rol $9,%ebp
|
||||
# x8 ^= a
|
||||
xor %rbp,%r11
|
||||
# b = x5 + x9
|
||||
lea (%r15,%r10),%rbp
|
||||
# (uint32) b <<<= 9
|
||||
rol $9,%ebp
|
||||
# x13 ^= b
|
||||
xor %rbp,%r13
|
||||
# a = x4 + x8
|
||||
lea (%r9,%r11),%rbp
|
||||
# (uint32) a <<<= 13
|
||||
rol $13,%ebp
|
||||
# x12 ^= a
|
||||
xor %rbp,%r14
|
||||
# b = x9 + x13
|
||||
lea (%r10,%r13),%rbp
|
||||
# (uint32) b <<<= 13
|
||||
rol $13,%ebp
|
||||
# x1 ^= b
|
||||
xor %rbp,%rdi
|
||||
# a = x8 + x12
|
||||
lea (%r11,%r14),%rbp
|
||||
# (uint32) a <<<= 18
|
||||
rol $18,%ebp
|
||||
# x0 ^= a
|
||||
xor %rbp,%rdx
|
||||
# b = x13 + x1
|
||||
lea (%r13,%rdi),%rbp
|
||||
# (uint32) b <<<= 18
|
||||
rol $18,%ebp
|
||||
# x5 ^= b
|
||||
xor %rbp,%r15
|
||||
# x10 = x10_stack
|
||||
movq 168(%rsp),%rbp
|
||||
# x5_stack = x5
|
||||
movq %r15,160(%rsp)
|
||||
# c = x6 + x10
|
||||
lea (%rax,%rbp),%r15
|
||||
# (uint32) c <<<= 7
|
||||
rol $7,%r15d
|
||||
# x14 ^= c
|
||||
xor %r15,%rbx
|
||||
# c = x10 + x14
|
||||
lea (%rbp,%rbx),%r15
|
||||
# (uint32) c <<<= 9
|
||||
rol $9,%r15d
|
||||
# x2 ^= c
|
||||
xor %r15,%rcx
|
||||
# c = x14 + x2
|
||||
lea (%rbx,%rcx),%r15
|
||||
# (uint32) c <<<= 13
|
||||
rol $13,%r15d
|
||||
# x6 ^= c
|
||||
xor %r15,%rax
|
||||
# c = x2 + x6
|
||||
lea (%rcx,%rax),%r15
|
||||
# (uint32) c <<<= 18
|
||||
rol $18,%r15d
|
||||
# x10 ^= c
|
||||
xor %r15,%rbp
|
||||
# x15 = x15_stack
|
||||
movq 176(%rsp),%r15
|
||||
# x10_stack = x10
|
||||
movq %rbp,168(%rsp)
|
||||
# d = x11 + x15
|
||||
lea (%r12,%r15),%rbp
|
||||
# (uint32) d <<<= 7
|
||||
rol $7,%ebp
|
||||
# x3 ^= d
|
||||
xor %rbp,%rsi
|
||||
# d = x15 + x3
|
||||
lea (%r15,%rsi),%rbp
|
||||
# (uint32) d <<<= 9
|
||||
rol $9,%ebp
|
||||
# x7 ^= d
|
||||
xor %rbp,%r8
|
||||
# d = x3 + x7
|
||||
lea (%rsi,%r8),%rbp
|
||||
# (uint32) d <<<= 13
|
||||
rol $13,%ebp
|
||||
# x11 ^= d
|
||||
xor %rbp,%r12
|
||||
# d = x7 + x11
|
||||
lea (%r8,%r12),%rbp
|
||||
# (uint32) d <<<= 18
|
||||
rol $18,%ebp
|
||||
# x15 ^= d
|
||||
xor %rbp,%r15
|
||||
# x15_stack = x15
|
||||
movq %r15,176(%rsp)
|
||||
# x5 = x5_stack
|
||||
movq 160(%rsp),%r15
|
||||
# a = x3 + x0
|
||||
lea (%rsi,%rdx),%rbp
|
||||
# (uint32) a <<<= 7
|
||||
rol $7,%ebp
|
||||
# x1 ^= a
|
||||
xor %rbp,%rdi
|
||||
# b = x4 + x5
|
||||
lea (%r9,%r15),%rbp
|
||||
# (uint32) b <<<= 7
|
||||
rol $7,%ebp
|
||||
# x6 ^= b
|
||||
xor %rbp,%rax
|
||||
# a = x0 + x1
|
||||
lea (%rdx,%rdi),%rbp
|
||||
# (uint32) a <<<= 9
|
||||
rol $9,%ebp
|
||||
# x2 ^= a
|
||||
xor %rbp,%rcx
|
||||
# b = x5 + x6
|
||||
lea (%r15,%rax),%rbp
|
||||
# (uint32) b <<<= 9
|
||||
rol $9,%ebp
|
||||
# x7 ^= b
|
||||
xor %rbp,%r8
|
||||
# a = x1 + x2
|
||||
lea (%rdi,%rcx),%rbp
|
||||
# (uint32) a <<<= 13
|
||||
rol $13,%ebp
|
||||
# x3 ^= a
|
||||
xor %rbp,%rsi
|
||||
# b = x6 + x7
|
||||
lea (%rax,%r8),%rbp
|
||||
# (uint32) b <<<= 13
|
||||
rol $13,%ebp
|
||||
# x4 ^= b
|
||||
xor %rbp,%r9
|
||||
# a = x2 + x3
|
||||
lea (%rcx,%rsi),%rbp
|
||||
# (uint32) a <<<= 18
|
||||
rol $18,%ebp
|
||||
# x0 ^= a
|
||||
xor %rbp,%rdx
|
||||
# b = x7 + x4
|
||||
lea (%r8,%r9),%rbp
|
||||
# (uint32) b <<<= 18
|
||||
rol $18,%ebp
|
||||
# x5 ^= b
|
||||
xor %rbp,%r15
|
||||
# x10 = x10_stack
|
||||
movq 168(%rsp),%rbp
|
||||
# x5_stack = x5
|
||||
movq %r15,160(%rsp)
|
||||
# c = x9 + x10
|
||||
lea (%r10,%rbp),%r15
|
||||
# (uint32) c <<<= 7
|
||||
rol $7,%r15d
|
||||
# x11 ^= c
|
||||
xor %r15,%r12
|
||||
# c = x10 + x11
|
||||
lea (%rbp,%r12),%r15
|
||||
# (uint32) c <<<= 9
|
||||
rol $9,%r15d
|
||||
# x8 ^= c
|
||||
xor %r15,%r11
|
||||
# c = x11 + x8
|
||||
lea (%r12,%r11),%r15
|
||||
# (uint32) c <<<= 13
|
||||
rol $13,%r15d
|
||||
# x9 ^= c
|
||||
xor %r15,%r10
|
||||
# c = x8 + x9
|
||||
lea (%r11,%r10),%r15
|
||||
# (uint32) c <<<= 18
|
||||
rol $18,%r15d
|
||||
# x10 ^= c
|
||||
xor %r15,%rbp
|
||||
# x15 = x15_stack
|
||||
movq 176(%rsp),%r15
|
||||
# x10_stack = x10
|
||||
movq %rbp,168(%rsp)
|
||||
# d = x14 + x15
|
||||
lea (%rbx,%r15),%rbp
|
||||
# (uint32) d <<<= 7
|
||||
rol $7,%ebp
|
||||
# x12 ^= d
|
||||
xor %rbp,%r14
|
||||
# d = x15 + x12
|
||||
lea (%r15,%r14),%rbp
|
||||
# (uint32) d <<<= 9
|
||||
rol $9,%ebp
|
||||
# x13 ^= d
|
||||
xor %rbp,%r13
|
||||
# d = x12 + x13
|
||||
lea (%r14,%r13),%rbp
|
||||
# (uint32) d <<<= 13
|
||||
rol $13,%ebp
|
||||
# x14 ^= d
|
||||
xor %rbp,%rbx
|
||||
# d = x13 + x14
|
||||
lea (%r13,%rbx),%rbp
|
||||
# (uint32) d <<<= 18
|
||||
rol $18,%ebp
|
||||
# x15 ^= d
|
||||
xor %rbp,%r15
|
||||
# x15_stack = x15
|
||||
movq %r15,176(%rsp)
|
||||
# i = i_backup
|
||||
movq 184(%rsp),%r15
|
||||
# unsigned>? i -= 4
|
||||
sub $4,%r15
|
||||
# comment:fp stack unchanged by jump
|
||||
# goto mainloop if unsigned>
|
||||
ja ._mainloop
|
||||
# (uint32) x2 += j2
|
||||
addl 64(%rsp),%ecx
|
||||
# x3 <<= 32
|
||||
shl $32,%rsi
|
||||
# x3 += j2
|
||||
addq 64(%rsp),%rsi
|
||||
# (uint64) x3 >>= 32
|
||||
shr $32,%rsi
|
||||
# x3 <<= 32
|
||||
shl $32,%rsi
|
||||
# x2 += x3
|
||||
add %rsi,%rcx
|
||||
# (uint32) x6 += j6
|
||||
addl 80(%rsp),%eax
|
||||
# x7 <<= 32
|
||||
shl $32,%r8
|
||||
# x7 += j6
|
||||
addq 80(%rsp),%r8
|
||||
# (uint64) x7 >>= 32
|
||||
shr $32,%r8
|
||||
# x7 <<= 32
|
||||
shl $32,%r8
|
||||
# x6 += x7
|
||||
add %r8,%rax
|
||||
# (uint32) x8 += j8
|
||||
addl 88(%rsp),%r11d
|
||||
# x9 <<= 32
|
||||
shl $32,%r10
|
||||
# x9 += j8
|
||||
addq 88(%rsp),%r10
|
||||
# (uint64) x9 >>= 32
|
||||
shr $32,%r10
|
||||
# x9 <<= 32
|
||||
shl $32,%r10
|
||||
# x8 += x9
|
||||
add %r10,%r11
|
||||
# (uint32) x12 += j12
|
||||
addl 104(%rsp),%r14d
|
||||
# x13 <<= 32
|
||||
shl $32,%r13
|
||||
# x13 += j12
|
||||
addq 104(%rsp),%r13
|
||||
# (uint64) x13 >>= 32
|
||||
shr $32,%r13
|
||||
# x13 <<= 32
|
||||
shl $32,%r13
|
||||
# x12 += x13
|
||||
add %r13,%r14
|
||||
# (uint32) x0 += j0
|
||||
addl 56(%rsp),%edx
|
||||
# x1 <<= 32
|
||||
shl $32,%rdi
|
||||
# x1 += j0
|
||||
addq 56(%rsp),%rdi
|
||||
# (uint64) x1 >>= 32
|
||||
shr $32,%rdi
|
||||
# x1 <<= 32
|
||||
shl $32,%rdi
|
||||
# x0 += x1
|
||||
add %rdi,%rdx
|
||||
# x5 = x5_stack
|
||||
movq 160(%rsp),%rdi
|
||||
# (uint32) x4 += j4
|
||||
addl 72(%rsp),%r9d
|
||||
# x5 <<= 32
|
||||
shl $32,%rdi
|
||||
# x5 += j4
|
||||
addq 72(%rsp),%rdi
|
||||
# (uint64) x5 >>= 32
|
||||
shr $32,%rdi
|
||||
# x5 <<= 32
|
||||
shl $32,%rdi
|
||||
# x4 += x5
|
||||
add %rdi,%r9
|
||||
# x10 = x10_stack
|
||||
movq 168(%rsp),%r8
|
||||
# (uint32) x10 += j10
|
||||
addl 96(%rsp),%r8d
|
||||
# x11 <<= 32
|
||||
shl $32,%r12
|
||||
# x11 += j10
|
||||
addq 96(%rsp),%r12
|
||||
# (uint64) x11 >>= 32
|
||||
shr $32,%r12
|
||||
# x11 <<= 32
|
||||
shl $32,%r12
|
||||
# x10 += x11
|
||||
add %r12,%r8
|
||||
# x15 = x15_stack
|
||||
movq 176(%rsp),%rdi
|
||||
# (uint32) x14 += j14
|
||||
addl 112(%rsp),%ebx
|
||||
# x15 <<= 32
|
||||
shl $32,%rdi
|
||||
# x15 += j14
|
||||
addq 112(%rsp),%rdi
|
||||
# (uint64) x15 >>= 32
|
||||
shr $32,%rdi
|
||||
# x15 <<= 32
|
||||
shl $32,%rdi
|
||||
# x14 += x15
|
||||
add %rdi,%rbx
|
||||
# out = out_backup
|
||||
movq 136(%rsp),%rdi
|
||||
# m = m_backup
|
||||
movq 144(%rsp),%rsi
|
||||
# x0 ^= *(uint64 *) (m + 0)
|
||||
xorq 0(%rsi),%rdx
|
||||
# *(uint64 *) (out + 0) = x0
|
||||
movq %rdx,0(%rdi)
|
||||
# x2 ^= *(uint64 *) (m + 8)
|
||||
xorq 8(%rsi),%rcx
|
||||
# *(uint64 *) (out + 8) = x2
|
||||
movq %rcx,8(%rdi)
|
||||
# x4 ^= *(uint64 *) (m + 16)
|
||||
xorq 16(%rsi),%r9
|
||||
# *(uint64 *) (out + 16) = x4
|
||||
movq %r9,16(%rdi)
|
||||
# x6 ^= *(uint64 *) (m + 24)
|
||||
xorq 24(%rsi),%rax
|
||||
# *(uint64 *) (out + 24) = x6
|
||||
movq %rax,24(%rdi)
|
||||
# x8 ^= *(uint64 *) (m + 32)
|
||||
xorq 32(%rsi),%r11
|
||||
# *(uint64 *) (out + 32) = x8
|
||||
movq %r11,32(%rdi)
|
||||
# x10 ^= *(uint64 *) (m + 40)
|
||||
xorq 40(%rsi),%r8
|
||||
# *(uint64 *) (out + 40) = x10
|
||||
movq %r8,40(%rdi)
|
||||
# x12 ^= *(uint64 *) (m + 48)
|
||||
xorq 48(%rsi),%r14
|
||||
# *(uint64 *) (out + 48) = x12
|
||||
movq %r14,48(%rdi)
|
||||
# x14 ^= *(uint64 *) (m + 56)
|
||||
xorq 56(%rsi),%rbx
|
||||
# *(uint64 *) (out + 56) = x14
|
||||
movq %rbx,56(%rdi)
|
||||
# bytes = bytes_backup
|
||||
movq 152(%rsp),%rdx
|
||||
# in8 = j8
|
||||
movq 88(%rsp),%rcx
|
||||
# in8 += 1
|
||||
add $1,%rcx
|
||||
# j8 = in8
|
||||
movq %rcx,88(%rsp)
|
||||
# unsigned>? unsigned<? bytes - 64
|
||||
cmp $64,%rdx
|
||||
# comment:fp stack unchanged by jump
|
||||
# goto bytesatleast65 if unsigned>
|
||||
ja ._bytesatleast65
|
||||
# comment:fp stack unchanged by jump
|
||||
# goto bytesatleast64 if !unsigned<
|
||||
jae ._bytesatleast64
|
||||
# m = out
|
||||
mov %rdi,%rsi
|
||||
# out = ctarget
|
||||
movq 128(%rsp),%rdi
|
||||
# i = bytes
|
||||
mov %rdx,%rcx
|
||||
# while (i) { *out++ = *m++; --i }
|
||||
rep movsb
|
||||
# comment:fp stack unchanged by fallthrough
|
||||
# bytesatleast64:
|
||||
._bytesatleast64:
|
||||
# x = x_backup
|
||||
movq 120(%rsp),%rdi
|
||||
# in8 = j8
|
||||
movq 88(%rsp),%rsi
|
||||
# *(uint64 *) (x + 32) = in8
|
||||
movq %rsi,32(%rdi)
|
||||
# r11 = r11_stack
|
||||
movq 0(%rsp),%r11
|
||||
# r12 = r12_stack
|
||||
movq 8(%rsp),%r12
|
||||
# r13 = r13_stack
|
||||
movq 16(%rsp),%r13
|
||||
# r14 = r14_stack
|
||||
movq 24(%rsp),%r14
|
||||
# r15 = r15_stack
|
||||
movq 32(%rsp),%r15
|
||||
# rbx = rbx_stack
|
||||
movq 40(%rsp),%rbx
|
||||
# rbp = rbp_stack
|
||||
movq 48(%rsp),%rbp
|
||||
# comment:fp stack unchanged by fallthrough
|
||||
# done:
|
||||
._done:
|
||||
# leave
|
||||
add %r11,%rsp
|
||||
mov %rdi,%rax
|
||||
mov %rsi,%rdx
|
||||
ret
|
||||
# bytesatleast65:
|
||||
._bytesatleast65:
|
||||
# bytes -= 64
|
||||
sub $64,%rdx
|
||||
# out += 64
|
||||
add $64,%rdi
|
||||
# m += 64
|
||||
add $64,%rsi
|
||||
# comment:fp stack unchanged by jump
|
||||
# goto bytesatleast1
|
||||
jmp ._bytesatleast1
|
||||
ENDPROC(salsa20_encrypt_bytes)
|
||||
|
||||
# enter salsa20_keysetup
|
||||
ENTRY(salsa20_keysetup)
|
||||
mov %rsp,%r11
|
||||
and $31,%r11
|
||||
add $256,%r11
|
||||
sub %r11,%rsp
|
||||
# k = arg2
|
||||
mov %rsi,%rsi
|
||||
# kbits = arg3
|
||||
mov %rdx,%rdx
|
||||
# x = arg1
|
||||
mov %rdi,%rdi
|
||||
# in0 = *(uint64 *) (k + 0)
|
||||
movq 0(%rsi),%r8
|
||||
# in2 = *(uint64 *) (k + 8)
|
||||
movq 8(%rsi),%r9
|
||||
# *(uint64 *) (x + 4) = in0
|
||||
movq %r8,4(%rdi)
|
||||
# *(uint64 *) (x + 12) = in2
|
||||
movq %r9,12(%rdi)
|
||||
# unsigned<? kbits - 256
|
||||
cmp $256,%rdx
|
||||
# comment:fp stack unchanged by jump
|
||||
# goto kbits128 if unsigned<
|
||||
jb ._kbits128
|
||||
# kbits256:
|
||||
._kbits256:
|
||||
# in10 = *(uint64 *) (k + 16)
|
||||
movq 16(%rsi),%rdx
|
||||
# in12 = *(uint64 *) (k + 24)
|
||||
movq 24(%rsi),%rsi
|
||||
# *(uint64 *) (x + 44) = in10
|
||||
movq %rdx,44(%rdi)
|
||||
# *(uint64 *) (x + 52) = in12
|
||||
movq %rsi,52(%rdi)
|
||||
# in0 = 1634760805
|
||||
mov $1634760805,%rsi
|
||||
# in4 = 857760878
|
||||
mov $857760878,%rdx
|
||||
# in10 = 2036477234
|
||||
mov $2036477234,%rcx
|
||||
# in14 = 1797285236
|
||||
mov $1797285236,%r8
|
||||
# *(uint32 *) (x + 0) = in0
|
||||
movl %esi,0(%rdi)
|
||||
# *(uint32 *) (x + 20) = in4
|
||||
movl %edx,20(%rdi)
|
||||
# *(uint32 *) (x + 40) = in10
|
||||
movl %ecx,40(%rdi)
|
||||
# *(uint32 *) (x + 60) = in14
|
||||
movl %r8d,60(%rdi)
|
||||
# comment:fp stack unchanged by jump
|
||||
# goto keysetupdone
|
||||
jmp ._keysetupdone
|
||||
# kbits128:
|
||||
._kbits128:
|
||||
# in10 = *(uint64 *) (k + 0)
|
||||
movq 0(%rsi),%rdx
|
||||
# in12 = *(uint64 *) (k + 8)
|
||||
movq 8(%rsi),%rsi
|
||||
# *(uint64 *) (x + 44) = in10
|
||||
movq %rdx,44(%rdi)
|
||||
# *(uint64 *) (x + 52) = in12
|
||||
movq %rsi,52(%rdi)
|
||||
# in0 = 1634760805
|
||||
mov $1634760805,%rsi
|
||||
# in4 = 824206446
|
||||
mov $824206446,%rdx
|
||||
# in10 = 2036477238
|
||||
mov $2036477238,%rcx
|
||||
# in14 = 1797285236
|
||||
mov $1797285236,%r8
|
||||
# *(uint32 *) (x + 0) = in0
|
||||
movl %esi,0(%rdi)
|
||||
# *(uint32 *) (x + 20) = in4
|
||||
movl %edx,20(%rdi)
|
||||
# *(uint32 *) (x + 40) = in10
|
||||
movl %ecx,40(%rdi)
|
||||
# *(uint32 *) (x + 60) = in14
|
||||
movl %r8d,60(%rdi)
|
||||
# keysetupdone:
|
||||
._keysetupdone:
|
||||
# leave
|
||||
add %r11,%rsp
|
||||
mov %rdi,%rax
|
||||
mov %rsi,%rdx
|
||||
ret
|
||||
ENDPROC(salsa20_keysetup)
|
||||
|
||||
# enter salsa20_ivsetup
|
||||
ENTRY(salsa20_ivsetup)
|
||||
mov %rsp,%r11
|
||||
and $31,%r11
|
||||
add $256,%r11
|
||||
sub %r11,%rsp
|
||||
# iv = arg2
|
||||
mov %rsi,%rsi
|
||||
# x = arg1
|
||||
mov %rdi,%rdi
|
||||
# in6 = *(uint64 *) (iv + 0)
|
||||
movq 0(%rsi),%rsi
|
||||
# in8 = 0
|
||||
mov $0,%r8
|
||||
# *(uint64 *) (x + 24) = in6
|
||||
movq %rsi,24(%rdi)
|
||||
# *(uint64 *) (x + 32) = in8
|
||||
movq %r8,32(%rdi)
|
||||
# leave
|
||||
add %r11,%rsp
|
||||
mov %rdi,%rax
|
||||
mov %rsi,%rdx
|
||||
ret
|
||||
ENDPROC(salsa20_ivsetup)
|
@ -1,116 +0,0 @@
|
||||
/*
|
||||
* Glue code for optimized assembly version of Salsa20.
|
||||
*
|
||||
* Copyright (c) 2007 Tan Swee Heng <thesweeheng@gmail.com>
|
||||
*
|
||||
* The assembly codes are public domain assembly codes written by Daniel. J.
|
||||
* Bernstein <djb@cr.yp.to>. The codes are modified to include indentation
|
||||
* and to remove extraneous comments and functions that are not needed.
|
||||
* - i586 version, renamed as salsa20-i586-asm_32.S
|
||||
* available from <http://cr.yp.to/snuffle/salsa20/x86-pm/salsa20.s>
|
||||
* - x86-64 version, renamed as salsa20-x86_64-asm_64.S
|
||||
* available from <http://cr.yp.to/snuffle/salsa20/amd64-3/salsa20.s>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation; either version 2 of the License, or (at your option)
|
||||
* any later version.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <crypto/algapi.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/crypto.h>
|
||||
|
||||
#define SALSA20_IV_SIZE 8U
|
||||
#define SALSA20_MIN_KEY_SIZE 16U
|
||||
#define SALSA20_MAX_KEY_SIZE 32U
|
||||
|
||||
struct salsa20_ctx
|
||||
{
|
||||
u32 input[16];
|
||||
};
|
||||
|
||||
asmlinkage void salsa20_keysetup(struct salsa20_ctx *ctx, const u8 *k,
|
||||
u32 keysize, u32 ivsize);
|
||||
asmlinkage void salsa20_ivsetup(struct salsa20_ctx *ctx, const u8 *iv);
|
||||
asmlinkage void salsa20_encrypt_bytes(struct salsa20_ctx *ctx,
|
||||
const u8 *src, u8 *dst, u32 bytes);
|
||||
|
||||
static int setkey(struct crypto_tfm *tfm, const u8 *key,
|
||||
unsigned int keysize)
|
||||
{
|
||||
struct salsa20_ctx *ctx = crypto_tfm_ctx(tfm);
|
||||
salsa20_keysetup(ctx, key, keysize*8, SALSA20_IV_SIZE*8);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int encrypt(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
{
|
||||
struct blkcipher_walk walk;
|
||||
struct crypto_blkcipher *tfm = desc->tfm;
|
||||
struct salsa20_ctx *ctx = crypto_blkcipher_ctx(tfm);
|
||||
int err;
|
||||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt_block(desc, &walk, 64);
|
||||
|
||||
salsa20_ivsetup(ctx, walk.iv);
|
||||
|
||||
while (walk.nbytes >= 64) {
|
||||
salsa20_encrypt_bytes(ctx, walk.src.virt.addr,
|
||||
walk.dst.virt.addr,
|
||||
walk.nbytes - (walk.nbytes % 64));
|
||||
err = blkcipher_walk_done(desc, &walk, walk.nbytes % 64);
|
||||
}
|
||||
|
||||
if (walk.nbytes) {
|
||||
salsa20_encrypt_bytes(ctx, walk.src.virt.addr,
|
||||
walk.dst.virt.addr, walk.nbytes);
|
||||
err = blkcipher_walk_done(desc, &walk, 0);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct crypto_alg alg = {
|
||||
.cra_name = "salsa20",
|
||||
.cra_driver_name = "salsa20-asm",
|
||||
.cra_priority = 200,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
|
||||
.cra_type = &crypto_blkcipher_type,
|
||||
.cra_blocksize = 1,
|
||||
.cra_ctxsize = sizeof(struct salsa20_ctx),
|
||||
.cra_alignmask = 3,
|
||||
.cra_module = THIS_MODULE,
|
||||
.cra_u = {
|
||||
.blkcipher = {
|
||||
.setkey = setkey,
|
||||
.encrypt = encrypt,
|
||||
.decrypt = encrypt,
|
||||
.min_keysize = SALSA20_MIN_KEY_SIZE,
|
||||
.max_keysize = SALSA20_MAX_KEY_SIZE,
|
||||
.ivsize = SALSA20_IV_SIZE,
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
static int __init init(void)
|
||||
{
|
||||
return crypto_register_alg(&alg);
|
||||
}
|
||||
|
||||
static void __exit fini(void)
|
||||
{
|
||||
crypto_unregister_alg(&alg);
|
||||
}
|
||||
|
||||
module_init(init);
|
||||
module_exit(fini);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION ("Salsa20 stream cipher algorithm (optimized assembly version)");
|
||||
MODULE_ALIAS_CRYPTO("salsa20");
|
||||
MODULE_ALIAS_CRYPTO("salsa20-asm");
|
@ -290,7 +290,7 @@ static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool
|
||||
insn_init(insn, auprobe->insn, sizeof(auprobe->insn), x86_64);
|
||||
/* has the side-effect of processing the entire instruction */
|
||||
insn_get_length(insn);
|
||||
if (WARN_ON_ONCE(!insn_complete(insn)))
|
||||
if (!insn_complete(insn))
|
||||
return -ENOEXEC;
|
||||
|
||||
if (is_prefix_bad(insn))
|
||||
|
@ -1230,12 +1230,20 @@ asmlinkage __visible void __init xen_start_kernel(void)
|
||||
|
||||
xen_setup_features();
|
||||
|
||||
xen_setup_machphys_mapping();
|
||||
|
||||
/* Install Xen paravirt ops */
|
||||
pv_info = xen_info;
|
||||
pv_init_ops.patch = paravirt_patch_default;
|
||||
pv_cpu_ops = xen_cpu_ops;
|
||||
xen_init_irq_ops();
|
||||
|
||||
/*
|
||||
* Setup xen_vcpu early because it is needed for
|
||||
* local_irq_disable(), irqs_disabled(), e.g. in printk().
|
||||
*
|
||||
* Don't do the full vcpu_info placement stuff until we have
|
||||
* the cpu_possible_mask and a non-dummy shared_info.
|
||||
*/
|
||||
xen_vcpu_info_reset(0);
|
||||
|
||||
x86_platform.get_nmi_reason = xen_get_nmi_reason;
|
||||
|
||||
@ -1247,6 +1255,7 @@ asmlinkage __visible void __init xen_start_kernel(void)
|
||||
* Set up some pagetable state before starting to set any ptes.
|
||||
*/
|
||||
|
||||
xen_setup_machphys_mapping();
|
||||
xen_init_mmu_ops();
|
||||
|
||||
/* Prevent unwanted bits from being set in PTEs. */
|
||||
@ -1271,20 +1280,9 @@ asmlinkage __visible void __init xen_start_kernel(void)
|
||||
get_cpu_cap(&boot_cpu_data);
|
||||
x86_configure_nx();
|
||||
|
||||
xen_init_irq_ops();
|
||||
|
||||
/* Let's presume PV guests always boot on vCPU with id 0. */
|
||||
per_cpu(xen_vcpu_id, 0) = 0;
|
||||
|
||||
/*
|
||||
* Setup xen_vcpu early because idt_setup_early_handler needs it for
|
||||
* local_irq_disable(), irqs_disabled().
|
||||
*
|
||||
* Don't do the full vcpu_info placement stuff until we have
|
||||
* the cpu_possible_mask and a non-dummy shared_info.
|
||||
*/
|
||||
xen_vcpu_info_reset(0);
|
||||
|
||||
idt_setup_early_handler();
|
||||
|
||||
xen_init_capabilities();
|
||||
|
@ -128,8 +128,6 @@ static const struct pv_irq_ops xen_irq_ops __initconst = {
|
||||
|
||||
void __init xen_init_irq_ops(void)
|
||||
{
|
||||
/* For PVH we use default pv_irq_ops settings. */
|
||||
if (!xen_feature(XENFEAT_hvm_callback_vector))
|
||||
pv_irq_ops = xen_irq_ops;
|
||||
pv_irq_ops = xen_irq_ops;
|
||||
x86_init.irqs.intr_init = xen_init_IRQ;
|
||||
}
|
||||
|
@ -1324,32 +1324,6 @@ config CRYPTO_SALSA20
|
||||
The Salsa20 stream cipher algorithm is designed by Daniel J.
|
||||
Bernstein <djb@cr.yp.to>. See <http://cr.yp.to/snuffle.html>
|
||||
|
||||
config CRYPTO_SALSA20_586
|
||||
tristate "Salsa20 stream cipher algorithm (i586)"
|
||||
depends on (X86 || UML_X86) && !64BIT
|
||||
select CRYPTO_BLKCIPHER
|
||||
help
|
||||
Salsa20 stream cipher algorithm.
|
||||
|
||||
Salsa20 is a stream cipher submitted to eSTREAM, the ECRYPT
|
||||
Stream Cipher Project. See <http://www.ecrypt.eu.org/stream/>
|
||||
|
||||
The Salsa20 stream cipher algorithm is designed by Daniel J.
|
||||
Bernstein <djb@cr.yp.to>. See <http://cr.yp.to/snuffle.html>
|
||||
|
||||
config CRYPTO_SALSA20_X86_64
|
||||
tristate "Salsa20 stream cipher algorithm (x86_64)"
|
||||
depends on (X86 || UML_X86) && 64BIT
|
||||
select CRYPTO_BLKCIPHER
|
||||
help
|
||||
Salsa20 stream cipher algorithm.
|
||||
|
||||
Salsa20 is a stream cipher submitted to eSTREAM, the ECRYPT
|
||||
Stream Cipher Project. See <http://www.ecrypt.eu.org/stream/>
|
||||
|
||||
The Salsa20 stream cipher algorithm is designed by Daniel J.
|
||||
Bernstein <djb@cr.yp.to>. See <http://cr.yp.to/snuffle.html>
|
||||
|
||||
config CRYPTO_CHACHA20
|
||||
tristate "ChaCha20 cipher algorithm"
|
||||
select CRYPTO_BLKCIPHER
|
||||
|
@ -1267,6 +1267,59 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
|
||||
return strcmp(buf, dmi->driver_data) < 0;
|
||||
}
|
||||
|
||||
static bool ahci_broken_lpm(struct pci_dev *pdev)
|
||||
{
|
||||
static const struct dmi_system_id sysids[] = {
|
||||
/* Various Lenovo 50 series have LPM issues with older BIOSen */
|
||||
{
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X250"),
|
||||
},
|
||||
.driver_data = "20180406", /* 1.31 */
|
||||
},
|
||||
{
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L450"),
|
||||
},
|
||||
.driver_data = "20180420", /* 1.28 */
|
||||
},
|
||||
{
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T450s"),
|
||||
},
|
||||
.driver_data = "20180315", /* 1.33 */
|
||||
},
|
||||
{
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W541"),
|
||||
},
|
||||
/*
|
||||
* Note date based on release notes, 2.35 has been
|
||||
* reported to be good, but I've been unable to get
|
||||
* a hold of the reporter to get the DMI BIOS date.
|
||||
* TODO: fix this.
|
||||
*/
|
||||
.driver_data = "20180310", /* 2.35 */
|
||||
},
|
||||
{ } /* terminate list */
|
||||
};
|
||||
const struct dmi_system_id *dmi = dmi_first_match(sysids);
|
||||
int year, month, date;
|
||||
char buf[9];
|
||||
|
||||
if (!dmi)
|
||||
return false;
|
||||
|
||||
dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
|
||||
snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
|
||||
|
||||
return strcmp(buf, dmi->driver_data) < 0;
|
||||
}
|
||||
|
||||
static bool ahci_broken_online(struct pci_dev *pdev)
|
||||
{
|
||||
#define ENCODE_BUSDEVFN(bus, slot, func) \
|
||||
@ -1677,6 +1730,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
"quirky BIOS, skipping spindown on poweroff\n");
|
||||
}
|
||||
|
||||
if (ahci_broken_lpm(pdev)) {
|
||||
pi.flags |= ATA_FLAG_NO_LPM;
|
||||
dev_warn(&pdev->dev,
|
||||
"BIOS update required for Link Power Management support\n");
|
||||
}
|
||||
|
||||
if (ahci_broken_suspend(pdev)) {
|
||||
hpriv->flags |= AHCI_HFLAG_NO_SUSPEND;
|
||||
dev_warn(&pdev->dev,
|
||||
|
@ -2501,6 +2501,9 @@ int ata_dev_configure(struct ata_device *dev)
|
||||
(id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
|
||||
dev->horkage |= ATA_HORKAGE_NOLPM;
|
||||
|
||||
if (ap->flags & ATA_FLAG_NO_LPM)
|
||||
dev->horkage |= ATA_HORKAGE_NOLPM;
|
||||
|
||||
if (dev->horkage & ATA_HORKAGE_NOLPM) {
|
||||
ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
|
||||
dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
|
||||
|
@ -3801,10 +3801,20 @@ static unsigned int ata_scsi_zbc_out_xlat(struct ata_queued_cmd *qc)
|
||||
*/
|
||||
goto invalid_param_len;
|
||||
}
|
||||
if (block > dev->n_sectors)
|
||||
goto out_of_range;
|
||||
|
||||
all = cdb[14] & 0x1;
|
||||
if (all) {
|
||||
/*
|
||||
* Ignore the block address (zone ID) as defined by ZBC.
|
||||
*/
|
||||
block = 0;
|
||||
} else if (block >= dev->n_sectors) {
|
||||
/*
|
||||
* Block must be a valid zone ID (a zone start LBA).
|
||||
*/
|
||||
fp = 2;
|
||||
goto invalid_fld;
|
||||
}
|
||||
|
||||
if (ata_ncq_enabled(qc->dev) &&
|
||||
ata_fpdma_zac_mgmt_out_supported(qc->dev)) {
|
||||
@ -3833,10 +3843,6 @@ static unsigned int ata_scsi_zbc_out_xlat(struct ata_queued_cmd *qc)
|
||||
invalid_fld:
|
||||
ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff);
|
||||
return 1;
|
||||
out_of_range:
|
||||
/* "Logical Block Address out of range" */
|
||||
ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x21, 0x00);
|
||||
return 1;
|
||||
invalid_param_len:
|
||||
/* "Parameter list length error" */
|
||||
ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0);
|
||||
|
@ -617,6 +617,36 @@ static void loop_reread_partitions(struct loop_device *lo,
|
||||
__func__, lo->lo_number, lo->lo_file_name, rc);
|
||||
}
|
||||
|
||||
static inline int is_loop_device(struct file *file)
|
||||
{
|
||||
struct inode *i = file->f_mapping->host;
|
||||
|
||||
return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR;
|
||||
}
|
||||
|
||||
static int loop_validate_file(struct file *file, struct block_device *bdev)
|
||||
{
|
||||
struct inode *inode = file->f_mapping->host;
|
||||
struct file *f = file;
|
||||
|
||||
/* Avoid recursion */
|
||||
while (is_loop_device(f)) {
|
||||
struct loop_device *l;
|
||||
|
||||
if (f->f_mapping->host->i_bdev == bdev)
|
||||
return -EBADF;
|
||||
|
||||
l = f->f_mapping->host->i_bdev->bd_disk->private_data;
|
||||
if (l->lo_state == Lo_unbound) {
|
||||
return -EINVAL;
|
||||
}
|
||||
f = l->lo_backing_file;
|
||||
}
|
||||
if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* loop_change_fd switched the backing store of a loopback device to
|
||||
* a new file. This is useful for operating system installers to free up
|
||||
@ -646,14 +676,15 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
|
||||
if (!file)
|
||||
goto out;
|
||||
|
||||
error = loop_validate_file(file, bdev);
|
||||
if (error)
|
||||
goto out_putf;
|
||||
|
||||
inode = file->f_mapping->host;
|
||||
old_file = lo->lo_backing_file;
|
||||
|
||||
error = -EINVAL;
|
||||
|
||||
if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
|
||||
goto out_putf;
|
||||
|
||||
/* size of the new backing store needs to be the same */
|
||||
if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
|
||||
goto out_putf;
|
||||
@ -679,13 +710,6 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
|
||||
return error;
|
||||
}
|
||||
|
||||
static inline int is_loop_device(struct file *file)
|
||||
{
|
||||
struct inode *i = file->f_mapping->host;
|
||||
|
||||
return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR;
|
||||
}
|
||||
|
||||
/* loop sysfs attributes */
|
||||
|
||||
static ssize_t loop_attr_show(struct device *dev, char *page,
|
||||
@ -782,16 +806,17 @@ static struct attribute_group loop_attribute_group = {
|
||||
.attrs= loop_attrs,
|
||||
};
|
||||
|
||||
static int loop_sysfs_init(struct loop_device *lo)
|
||||
static void loop_sysfs_init(struct loop_device *lo)
|
||||
{
|
||||
return sysfs_create_group(&disk_to_dev(lo->lo_disk)->kobj,
|
||||
&loop_attribute_group);
|
||||
lo->sysfs_inited = !sysfs_create_group(&disk_to_dev(lo->lo_disk)->kobj,
|
||||
&loop_attribute_group);
|
||||
}
|
||||
|
||||
static void loop_sysfs_exit(struct loop_device *lo)
|
||||
{
|
||||
sysfs_remove_group(&disk_to_dev(lo->lo_disk)->kobj,
|
||||
&loop_attribute_group);
|
||||
if (lo->sysfs_inited)
|
||||
sysfs_remove_group(&disk_to_dev(lo->lo_disk)->kobj,
|
||||
&loop_attribute_group);
|
||||
}
|
||||
|
||||
static void loop_config_discard(struct loop_device *lo)
|
||||
@ -850,7 +875,7 @@ static int loop_prepare_queue(struct loop_device *lo)
|
||||
static int loop_set_fd(struct loop_device *lo, fmode_t mode,
|
||||
struct block_device *bdev, unsigned int arg)
|
||||
{
|
||||
struct file *file, *f;
|
||||
struct file *file;
|
||||
struct inode *inode;
|
||||
struct address_space *mapping;
|
||||
int lo_flags = 0;
|
||||
@ -869,29 +894,13 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
|
||||
if (lo->lo_state != Lo_unbound)
|
||||
goto out_putf;
|
||||
|
||||
/* Avoid recursion */
|
||||
f = file;
|
||||
while (is_loop_device(f)) {
|
||||
struct loop_device *l;
|
||||
|
||||
if (f->f_mapping->host->i_bdev == bdev)
|
||||
goto out_putf;
|
||||
|
||||
l = f->f_mapping->host->i_bdev->bd_disk->private_data;
|
||||
if (l->lo_state == Lo_unbound) {
|
||||
error = -EINVAL;
|
||||
goto out_putf;
|
||||
}
|
||||
f = l->lo_backing_file;
|
||||
}
|
||||
error = loop_validate_file(file, bdev);
|
||||
if (error)
|
||||
goto out_putf;
|
||||
|
||||
mapping = file->f_mapping;
|
||||
inode = mapping->host;
|
||||
|
||||
error = -EINVAL;
|
||||
if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
|
||||
goto out_putf;
|
||||
|
||||
if (!(file->f_mode & FMODE_WRITE) || !(mode & FMODE_WRITE) ||
|
||||
!file->f_op->write_iter)
|
||||
lo_flags |= LO_FLAGS_READ_ONLY;
|
||||
|
@ -58,6 +58,7 @@ struct loop_device {
|
||||
struct kthread_worker worker;
|
||||
struct task_struct *worker_task;
|
||||
bool use_dio;
|
||||
bool sysfs_inited;
|
||||
|
||||
struct request_queue *lo_queue;
|
||||
struct blk_mq_tag_set tag_set;
|
||||
|
@ -547,6 +547,14 @@ static int tegra_i2c_disable_packet_mode(struct tegra_i2c_dev *i2c_dev)
|
||||
{
|
||||
u32 cnfg;
|
||||
|
||||
/*
|
||||
* NACK interrupt is generated before the I2C controller generates
|
||||
* the STOP condition on the bus. So wait for 2 clock periods
|
||||
* before disabling the controller so that the STOP condition has
|
||||
* been delivered properly.
|
||||
*/
|
||||
udelay(DIV_ROUND_UP(2 * 1000000, i2c_dev->bus_clk_rate));
|
||||
|
||||
cnfg = i2c_readl(i2c_dev, I2C_CNFG);
|
||||
if (cnfg & I2C_CNFG_PACKET_MODE_EN)
|
||||
i2c_writel(i2c_dev, cnfg & ~I2C_CNFG_PACKET_MODE_EN, I2C_CNFG);
|
||||
@ -708,15 +716,6 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
|
||||
if (likely(i2c_dev->msg_err == I2C_ERR_NONE))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* NACK interrupt is generated before the I2C controller generates
|
||||
* the STOP condition on the bus. So wait for 2 clock periods
|
||||
* before resetting the controller so that the STOP condition has
|
||||
* been delivered properly.
|
||||
*/
|
||||
if (i2c_dev->msg_err == I2C_ERR_NO_ACK)
|
||||
udelay(DIV_ROUND_UP(2 * 1000000, i2c_dev->bus_clk_rate));
|
||||
|
||||
tegra_i2c_init(i2c_dev);
|
||||
if (i2c_dev->msg_err == I2C_ERR_NO_ACK) {
|
||||
if (msg->flags & I2C_M_IGNORE_NAK)
|
||||
|
@ -34,6 +34,18 @@ config INFINIBAND_USER_ACCESS
|
||||
libibverbs, libibcm and a hardware driver library from
|
||||
<http://www.openfabrics.org/git/>.
|
||||
|
||||
config INFINIBAND_USER_ACCESS_UCM
|
||||
bool "Userspace CM (UCM, DEPRECATED)"
|
||||
depends on BROKEN
|
||||
depends on INFINIBAND_USER_ACCESS
|
||||
help
|
||||
The UCM module has known security flaws, which no one is
|
||||
interested to fix. The user-space part of this code was
|
||||
dropped from the upstream a long time ago.
|
||||
|
||||
This option is DEPRECATED and planned to be removed.
|
||||
|
||||
|
||||
config INFINIBAND_EXP_USER_ACCESS
|
||||
bool "Allow experimental support for Infiniband ABI"
|
||||
depends on INFINIBAND_USER_ACCESS
|
||||
|
@ -5,8 +5,8 @@ user_access-$(CONFIG_INFINIBAND_ADDR_TRANS) := rdma_ucm.o
|
||||
obj-$(CONFIG_INFINIBAND) += ib_core.o ib_cm.o iw_cm.o \
|
||||
$(infiniband-y)
|
||||
obj-$(CONFIG_INFINIBAND_USER_MAD) += ib_umad.o
|
||||
obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \
|
||||
$(user_access-y)
|
||||
obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o $(user_access-y)
|
||||
obj-$(CONFIG_INFINIBAND_USER_ACCESS_UCM) += ib_ucm.o $(user_access-y)
|
||||
|
||||
ib_core-y := packer.o ud_header.o verbs.o cq.o rw.o sysfs.o \
|
||||
device.o fmr_pool.o cache.o netlink.o \
|
||||
|
@ -720,7 +720,7 @@ static int c4iw_set_page(struct ib_mr *ibmr, u64 addr)
|
||||
{
|
||||
struct c4iw_mr *mhp = to_c4iw_mr(ibmr);
|
||||
|
||||
if (unlikely(mhp->mpl_len == mhp->max_mpl_len))
|
||||
if (unlikely(mhp->mpl_len == mhp->attr.pbl_size))
|
||||
return -ENOMEM;
|
||||
|
||||
mhp->mpl[mhp->mpl_len++] = addr;
|
||||
|
@ -273,7 +273,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
|
||||
|
||||
lockdep_assert_held(&qp->s_lock);
|
||||
ps->s_txreq = get_txreq(ps->dev, qp);
|
||||
if (IS_ERR(ps->s_txreq))
|
||||
if (!ps->s_txreq)
|
||||
goto bail_no_tx;
|
||||
|
||||
ps->s_txreq->phdr.hdr.hdr_type = priv->hdr_type;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright(c) 2015, 2016 Intel Corporation.
|
||||
* Copyright(c) 2015 - 2018 Intel Corporation.
|
||||
*
|
||||
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
* redistributing this file, you may do so under either license.
|
||||
@ -72,7 +72,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
|
||||
int middle = 0;
|
||||
|
||||
ps->s_txreq = get_txreq(ps->dev, qp);
|
||||
if (IS_ERR(ps->s_txreq))
|
||||
if (!ps->s_txreq)
|
||||
goto bail_no_tx;
|
||||
|
||||
if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright(c) 2015, 2016 Intel Corporation.
|
||||
* Copyright(c) 2015 - 2018 Intel Corporation.
|
||||
*
|
||||
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
* redistributing this file, you may do so under either license.
|
||||
@ -479,7 +479,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
|
||||
u32 lid;
|
||||
|
||||
ps->s_txreq = get_txreq(ps->dev, qp);
|
||||
if (IS_ERR(ps->s_txreq))
|
||||
if (!ps->s_txreq)
|
||||
goto bail_no_tx;
|
||||
|
||||
if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright(c) 2016 - 2017 Intel Corporation.
|
||||
* Copyright(c) 2016 - 2018 Intel Corporation.
|
||||
*
|
||||
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
* redistributing this file, you may do so under either license.
|
||||
@ -94,7 +94,7 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
|
||||
struct rvt_qp *qp)
|
||||
__must_hold(&qp->s_lock)
|
||||
{
|
||||
struct verbs_txreq *tx = ERR_PTR(-EBUSY);
|
||||
struct verbs_txreq *tx = NULL;
|
||||
|
||||
write_seqlock(&dev->txwait_lock);
|
||||
if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright(c) 2016 Intel Corporation.
|
||||
* Copyright(c) 2016 - 2018 Intel Corporation.
|
||||
*
|
||||
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
* redistributing this file, you may do so under either license.
|
||||
@ -83,7 +83,7 @@ static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
|
||||
if (unlikely(!tx)) {
|
||||
/* call slow path to get the lock */
|
||||
tx = __get_txreq(dev, qp);
|
||||
if (IS_ERR(tx))
|
||||
if (!tx)
|
||||
return tx;
|
||||
}
|
||||
tx->qp = qp;
|
||||
|
@ -130,6 +130,8 @@ static void mce_kbd_rx_timeout(unsigned long data)
|
||||
|
||||
for (i = 0; i < MCIR2_MASK_KEYS_START; i++)
|
||||
input_report_key(mce_kbd->idev, kbd_keycodes[i], 0);
|
||||
|
||||
input_sync(mce_kbd->idev);
|
||||
}
|
||||
|
||||
static enum mce_kbd_mode mce_kbd_mode(struct mce_kbd_dec *data)
|
||||
|
@ -507,35 +507,14 @@ static int remote_settings_file_close(struct inode *inode, struct file *file)
|
||||
static ssize_t remote_settings_file_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
|
||||
{
|
||||
void __iomem *address = (void __iomem *)file->private_data;
|
||||
unsigned char *page;
|
||||
int retval;
|
||||
int len = 0;
|
||||
unsigned int value;
|
||||
|
||||
if (*offset < 0)
|
||||
return -EINVAL;
|
||||
if (count == 0 || count > 1024)
|
||||
return 0;
|
||||
if (*offset != 0)
|
||||
return 0;
|
||||
|
||||
page = (unsigned char *)__get_free_page(GFP_KERNEL);
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
char lbuf[20];
|
||||
|
||||
value = readl(address);
|
||||
len = sprintf(page, "%d\n", value);
|
||||
len = snprintf(lbuf, sizeof(lbuf), "%d\n", value);
|
||||
|
||||
if (copy_to_user(buf, page, len)) {
|
||||
retval = -EFAULT;
|
||||
goto exit;
|
||||
}
|
||||
*offset += len;
|
||||
retval = len;
|
||||
|
||||
exit:
|
||||
free_page((unsigned long)page);
|
||||
return retval;
|
||||
return simple_read_from_buffer(buf, count, offset, lbuf, len);
|
||||
}
|
||||
|
||||
static ssize_t remote_settings_file_write(struct file *file, const char __user *ubuff, size_t count, loff_t *offset)
|
||||
|
@ -467,7 +467,7 @@ static int vmballoon_send_batched_lock(struct vmballoon *b,
|
||||
unsigned int num_pages, bool is_2m_pages, unsigned int *target)
|
||||
{
|
||||
unsigned long status;
|
||||
unsigned long pfn = page_to_pfn(b->page);
|
||||
unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page));
|
||||
|
||||
STATS_INC(b->stats.lock[is_2m_pages]);
|
||||
|
||||
@ -515,7 +515,7 @@ static bool vmballoon_send_batched_unlock(struct vmballoon *b,
|
||||
unsigned int num_pages, bool is_2m_pages, unsigned int *target)
|
||||
{
|
||||
unsigned long status;
|
||||
unsigned long pfn = page_to_pfn(b->page);
|
||||
unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page));
|
||||
|
||||
STATS_INC(b->stats.unlock[is_2m_pages]);
|
||||
|
||||
|
@ -1089,8 +1089,8 @@ static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data)
|
||||
* It's used when HS400 mode is enabled.
|
||||
*/
|
||||
if (data->flags & MMC_DATA_WRITE &&
|
||||
!(host->timing != MMC_TIMING_MMC_HS400))
|
||||
return;
|
||||
host->timing != MMC_TIMING_MMC_HS400)
|
||||
goto disable;
|
||||
|
||||
if (data->flags & MMC_DATA_WRITE)
|
||||
enable = SDMMC_CARD_WR_THR_EN;
|
||||
@ -1098,7 +1098,8 @@ static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data)
|
||||
enable = SDMMC_CARD_RD_THR_EN;
|
||||
|
||||
if (host->timing != MMC_TIMING_MMC_HS200 &&
|
||||
host->timing != MMC_TIMING_UHS_SDR104)
|
||||
host->timing != MMC_TIMING_UHS_SDR104 &&
|
||||
host->timing != MMC_TIMING_MMC_HS400)
|
||||
goto disable;
|
||||
|
||||
blksz_depth = blksz / (1 << host->data_shift);
|
||||
|
@ -305,6 +305,15 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
|
||||
|
||||
if (imx_data->socdata->flags & ESDHC_FLAG_HS400)
|
||||
val |= SDHCI_SUPPORT_HS400;
|
||||
|
||||
/*
|
||||
* Do not advertise faster UHS modes if there are no
|
||||
* pinctrl states for 100MHz/200MHz.
|
||||
*/
|
||||
if (IS_ERR_OR_NULL(imx_data->pins_100mhz) ||
|
||||
IS_ERR_OR_NULL(imx_data->pins_200mhz))
|
||||
val &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_DDR50
|
||||
| SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_HS400);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1135,18 +1144,6 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
|
||||
ESDHC_PINCTRL_STATE_100MHZ);
|
||||
imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl,
|
||||
ESDHC_PINCTRL_STATE_200MHZ);
|
||||
if (IS_ERR(imx_data->pins_100mhz) ||
|
||||
IS_ERR(imx_data->pins_200mhz)) {
|
||||
dev_warn(mmc_dev(host->mmc),
|
||||
"could not get ultra high speed state, work on normal mode\n");
|
||||
/*
|
||||
* fall back to not supporting uhs by specifying no
|
||||
* 1.8v quirk
|
||||
*/
|
||||
host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
|
||||
}
|
||||
} else {
|
||||
host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
|
||||
}
|
||||
|
||||
/* call to generic mmc_of_parse to support additional capabilities */
|
||||
|
@ -1233,17 +1233,15 @@ static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
|
||||
static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
|
||||
int qid, int depth)
|
||||
{
|
||||
if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz)) {
|
||||
unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth),
|
||||
dev->ctrl.page_size);
|
||||
nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset;
|
||||
nvmeq->sq_cmds_io = dev->cmb + offset;
|
||||
} else {
|
||||
nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
|
||||
&nvmeq->sq_dma_addr, GFP_KERNEL);
|
||||
if (!nvmeq->sq_cmds)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* CMB SQEs will be mapped before creation */
|
||||
if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz))
|
||||
return 0;
|
||||
|
||||
nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
|
||||
&nvmeq->sq_dma_addr, GFP_KERNEL);
|
||||
if (!nvmeq->sq_cmds)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1320,6 +1318,13 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
|
||||
struct nvme_dev *dev = nvmeq->dev;
|
||||
int result;
|
||||
|
||||
if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz)) {
|
||||
unsigned offset = (qid - 1) * roundup(SQ_SIZE(nvmeq->q_depth),
|
||||
dev->ctrl.page_size);
|
||||
nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset;
|
||||
nvmeq->sq_cmds_io = dev->cmb + offset;
|
||||
}
|
||||
|
||||
nvmeq->cq_vector = qid - 1;
|
||||
result = adapter_alloc_cq(dev, qid, nvmeq);
|
||||
if (result < 0)
|
||||
|
@ -1504,6 +1504,13 @@ enum FW_BOOT_CONTEXT {
|
||||
|
||||
#define MR_CAN_HANDLE_SYNC_CACHE_OFFSET 0X01000000
|
||||
|
||||
enum MR_ADAPTER_TYPE {
|
||||
MFI_SERIES = 1,
|
||||
THUNDERBOLT_SERIES = 2,
|
||||
INVADER_SERIES = 3,
|
||||
VENTURA_SERIES = 4,
|
||||
};
|
||||
|
||||
/*
|
||||
* register set for both 1068 and 1078 controllers
|
||||
* structure extended for 1078 registers
|
||||
@ -2092,6 +2099,7 @@ enum MR_PD_TYPE {
|
||||
|
||||
struct megasas_instance {
|
||||
|
||||
unsigned int *reply_map;
|
||||
__le32 *producer;
|
||||
dma_addr_t producer_h;
|
||||
__le32 *consumer;
|
||||
@ -2236,12 +2244,12 @@ struct megasas_instance {
|
||||
bool dev_handle;
|
||||
bool fw_sync_cache_support;
|
||||
u32 mfi_frame_size;
|
||||
bool is_ventura;
|
||||
bool msix_combined;
|
||||
u16 max_raid_mapsize;
|
||||
/* preffered count to send as LDIO irrspective of FP capable.*/
|
||||
u8 r1_ldio_hint_default;
|
||||
u32 nvme_page_size;
|
||||
u8 adapter_type;
|
||||
};
|
||||
struct MR_LD_VF_MAP {
|
||||
u32 size;
|
||||
|
@ -2023,7 +2023,7 @@ void megaraid_sas_kill_hba(struct megasas_instance *instance)
|
||||
msleep(1000);
|
||||
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
|
||||
(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
|
||||
(instance->ctrl_context)) {
|
||||
(instance->adapter_type != MFI_SERIES)) {
|
||||
writel(MFI_STOP_ADP, &instance->reg_set->doorbell);
|
||||
/* Flush */
|
||||
readl(&instance->reg_set->doorbell);
|
||||
@ -2494,7 +2494,8 @@ int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
|
||||
dev_warn(&instance->pdev->dev, "SR-IOV: Starting heartbeat for scsi%d\n",
|
||||
instance->host->host_no);
|
||||
|
||||
if (instance->ctrl_context && !instance->mask_interrupts)
|
||||
if ((instance->adapter_type != MFI_SERIES) &&
|
||||
!instance->mask_interrupts)
|
||||
retval = megasas_issue_blocked_cmd(instance, cmd,
|
||||
MEGASAS_ROUTINE_WAIT_TIME_VF);
|
||||
else
|
||||
@ -2790,7 +2791,9 @@ static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
|
||||
/*
|
||||
* First wait for all commands to complete
|
||||
*/
|
||||
if (instance->ctrl_context) {
|
||||
if (instance->adapter_type == MFI_SERIES) {
|
||||
ret = megasas_generic_reset(scmd);
|
||||
} else {
|
||||
struct megasas_cmd_fusion *cmd;
|
||||
cmd = (struct megasas_cmd_fusion *)scmd->SCp.ptr;
|
||||
if (cmd)
|
||||
@ -2798,8 +2801,7 @@ static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
|
||||
MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE);
|
||||
ret = megasas_reset_fusion(scmd->device->host,
|
||||
SCSIIO_TIMEOUT_OCR);
|
||||
} else
|
||||
ret = megasas_generic_reset(scmd);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -2816,7 +2818,7 @@ static int megasas_task_abort(struct scsi_cmnd *scmd)
|
||||
|
||||
instance = (struct megasas_instance *)scmd->device->host->hostdata;
|
||||
|
||||
if (instance->ctrl_context)
|
||||
if (instance->adapter_type != MFI_SERIES)
|
||||
ret = megasas_task_abort_fusion(scmd);
|
||||
else {
|
||||
sdev_printk(KERN_NOTICE, scmd->device, "TASK ABORT not supported\n");
|
||||
@ -2838,7 +2840,7 @@ static int megasas_reset_target(struct scsi_cmnd *scmd)
|
||||
|
||||
instance = (struct megasas_instance *)scmd->device->host->hostdata;
|
||||
|
||||
if (instance->ctrl_context)
|
||||
if (instance->adapter_type != MFI_SERIES)
|
||||
ret = megasas_reset_target_fusion(scmd);
|
||||
else {
|
||||
sdev_printk(KERN_NOTICE, scmd->device, "TARGET RESET not supported\n");
|
||||
@ -3715,7 +3717,7 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
|
||||
PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
|
||||
(instance->pdev->device ==
|
||||
PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
|
||||
(instance->ctrl_context))
|
||||
(instance->adapter_type != MFI_SERIES))
|
||||
writel(
|
||||
MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
|
||||
&instance->reg_set->doorbell);
|
||||
@ -3733,7 +3735,7 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
|
||||
PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
|
||||
(instance->pdev->device ==
|
||||
PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
|
||||
(instance->ctrl_context))
|
||||
(instance->adapter_type != MFI_SERIES))
|
||||
writel(MFI_INIT_HOTPLUG,
|
||||
&instance->reg_set->doorbell);
|
||||
else
|
||||
@ -3753,11 +3755,11 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
|
||||
PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
|
||||
(instance->pdev->device ==
|
||||
PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
|
||||
(instance->ctrl_context)) {
|
||||
(instance->adapter_type != MFI_SERIES)) {
|
||||
writel(MFI_RESET_FLAGS,
|
||||
&instance->reg_set->doorbell);
|
||||
|
||||
if (instance->ctrl_context) {
|
||||
if (instance->adapter_type != MFI_SERIES) {
|
||||
for (i = 0; i < (10 * 1000); i += 20) {
|
||||
if (readl(
|
||||
&instance->
|
||||
@ -3924,7 +3926,8 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
|
||||
* max_sge_sz = 12 byte (sizeof megasas_sge64)
|
||||
* Total 192 byte (3 MFI frame of 64 byte)
|
||||
*/
|
||||
frame_count = instance->ctrl_context ? (3 + 1) : (15 + 1);
|
||||
frame_count = (instance->adapter_type == MFI_SERIES) ?
|
||||
(15 + 1) : (3 + 1);
|
||||
instance->mfi_frame_size = MEGAMFI_FRAME_SIZE * frame_count;
|
||||
/*
|
||||
* Use DMA pool facility provided by PCI layer
|
||||
@ -3979,7 +3982,7 @@ static int megasas_create_frame_pool(struct megasas_instance *instance)
|
||||
memset(cmd->frame, 0, instance->mfi_frame_size);
|
||||
cmd->frame->io.context = cpu_to_le32(cmd->index);
|
||||
cmd->frame->io.pad_0 = 0;
|
||||
if (!instance->ctrl_context && reset_devices)
|
||||
if ((instance->adapter_type == MFI_SERIES) && reset_devices)
|
||||
cmd->frame->hdr.cmd = MFI_CMD_INVALID;
|
||||
}
|
||||
|
||||
@ -4099,7 +4102,7 @@ int megasas_alloc_cmds(struct megasas_instance *instance)
|
||||
inline int
|
||||
dcmd_timeout_ocr_possible(struct megasas_instance *instance) {
|
||||
|
||||
if (!instance->ctrl_context)
|
||||
if (instance->adapter_type == MFI_SERIES)
|
||||
return KILL_ADAPTER;
|
||||
else if (instance->unload ||
|
||||
test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags))
|
||||
@ -4143,7 +4146,8 @@ megasas_get_pd_info(struct megasas_instance *instance, struct scsi_device *sdev)
|
||||
dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->pd_info_h);
|
||||
dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_PD_INFO));
|
||||
|
||||
if (instance->ctrl_context && !instance->mask_interrupts)
|
||||
if ((instance->adapter_type != MFI_SERIES) &&
|
||||
!instance->mask_interrupts)
|
||||
ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
|
||||
else
|
||||
ret = megasas_issue_polled(instance, cmd);
|
||||
@ -4240,7 +4244,8 @@ megasas_get_pd_list(struct megasas_instance *instance)
|
||||
dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
|
||||
dcmd->sgl.sge32[0].length = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
|
||||
|
||||
if (instance->ctrl_context && !instance->mask_interrupts)
|
||||
if ((instance->adapter_type != MFI_SERIES) &&
|
||||
!instance->mask_interrupts)
|
||||
ret = megasas_issue_blocked_cmd(instance, cmd,
|
||||
MFI_IO_TIMEOUT_SECS);
|
||||
else
|
||||
@ -4251,7 +4256,7 @@ megasas_get_pd_list(struct megasas_instance *instance)
|
||||
dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
|
||||
"failed/not supported by firmware\n");
|
||||
|
||||
if (instance->ctrl_context)
|
||||
if (instance->adapter_type != MFI_SERIES)
|
||||
megaraid_sas_kill_hba(instance);
|
||||
else
|
||||
instance->pd_list_not_supported = 1;
|
||||
@ -4372,7 +4377,8 @@ megasas_get_ld_list(struct megasas_instance *instance)
|
||||
dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_LIST));
|
||||
dcmd->pad_0 = 0;
|
||||
|
||||
if (instance->ctrl_context && !instance->mask_interrupts)
|
||||
if ((instance->adapter_type != MFI_SERIES) &&
|
||||
!instance->mask_interrupts)
|
||||
ret = megasas_issue_blocked_cmd(instance, cmd,
|
||||
MFI_IO_TIMEOUT_SECS);
|
||||
else
|
||||
@ -4491,7 +4497,8 @@ megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
|
||||
dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
|
||||
dcmd->pad_0 = 0;
|
||||
|
||||
if (instance->ctrl_context && !instance->mask_interrupts)
|
||||
if ((instance->adapter_type != MFI_SERIES) &&
|
||||
!instance->mask_interrupts)
|
||||
ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
|
||||
else
|
||||
ret = megasas_issue_polled(instance, cmd);
|
||||
@ -4664,7 +4671,8 @@ megasas_get_ctrl_info(struct megasas_instance *instance)
|
||||
dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_ctrl_info));
|
||||
dcmd->mbox.b[0] = 1;
|
||||
|
||||
if (instance->ctrl_context && !instance->mask_interrupts)
|
||||
if ((instance->adapter_type != MFI_SERIES) &&
|
||||
!instance->mask_interrupts)
|
||||
ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
|
||||
else
|
||||
ret = megasas_issue_polled(instance, cmd);
|
||||
@ -4783,7 +4791,8 @@ int megasas_set_crash_dump_params(struct megasas_instance *instance,
|
||||
dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->crash_dump_h);
|
||||
dcmd->sgl.sge32[0].length = cpu_to_le32(CRASH_DMA_BUF_SIZE);
|
||||
|
||||
if (instance->ctrl_context && !instance->mask_interrupts)
|
||||
if ((instance->adapter_type != MFI_SERIES) &&
|
||||
!instance->mask_interrupts)
|
||||
ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
|
||||
else
|
||||
ret = megasas_issue_polled(instance, cmd);
|
||||
@ -5129,6 +5138,26 @@ skip_alloc:
|
||||
instance->use_seqnum_jbod_fp = false;
|
||||
}
|
||||
|
||||
static void megasas_setup_reply_map(struct megasas_instance *instance)
|
||||
{
|
||||
const struct cpumask *mask;
|
||||
unsigned int queue, cpu;
|
||||
|
||||
for (queue = 0; queue < instance->msix_vectors; queue++) {
|
||||
mask = pci_irq_get_affinity(instance->pdev, queue);
|
||||
if (!mask)
|
||||
goto fallback;
|
||||
|
||||
for_each_cpu(cpu, mask)
|
||||
instance->reply_map[cpu] = queue;
|
||||
}
|
||||
return;
|
||||
|
||||
fallback:
|
||||
for_each_possible_cpu(cpu)
|
||||
instance->reply_map[cpu] = cpu % instance->msix_vectors;
|
||||
}
|
||||
|
||||
/**
|
||||
* megasas_init_fw - Initializes the FW
|
||||
* @instance: Adapter soft state
|
||||
@ -5170,7 +5199,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
|
||||
|
||||
reg_set = instance->reg_set;
|
||||
|
||||
if (fusion)
|
||||
if (instance->adapter_type != MFI_SERIES)
|
||||
instance->instancet = &megasas_instance_template_fusion;
|
||||
else {
|
||||
switch (instance->pdev->device) {
|
||||
@ -5211,7 +5240,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
|
||||
goto fail_ready_state;
|
||||
}
|
||||
|
||||
if (instance->is_ventura) {
|
||||
if (instance->adapter_type == VENTURA_SERIES) {
|
||||
scratch_pad_3 =
|
||||
readl(&instance->reg_set->outbound_scratch_pad_3);
|
||||
instance->max_raid_mapsize = ((scratch_pad_3 >>
|
||||
@ -5229,7 +5258,8 @@ static int megasas_init_fw(struct megasas_instance *instance)
|
||||
(&instance->reg_set->outbound_scratch_pad_2);
|
||||
/* Check max MSI-X vectors */
|
||||
if (fusion) {
|
||||
if (fusion->adapter_type == THUNDERBOLT_SERIES) { /* Thunderbolt Series*/
|
||||
if (instance->adapter_type == THUNDERBOLT_SERIES) {
|
||||
/* Thunderbolt Series*/
|
||||
instance->msix_vectors = (scratch_pad_2
|
||||
& MR_MAX_REPLY_QUEUES_OFFSET) + 1;
|
||||
fw_msix_count = instance->msix_vectors;
|
||||
@ -5293,6 +5323,8 @@ static int megasas_init_fw(struct megasas_instance *instance)
|
||||
goto fail_setup_irqs;
|
||||
}
|
||||
|
||||
megasas_setup_reply_map(instance);
|
||||
|
||||
dev_info(&instance->pdev->dev,
|
||||
"firmware supports msix\t: (%d)", fw_msix_count);
|
||||
dev_info(&instance->pdev->dev,
|
||||
@ -5319,7 +5351,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
|
||||
if (instance->instancet->init_adapter(instance))
|
||||
goto fail_init_adapter;
|
||||
|
||||
if (instance->is_ventura) {
|
||||
if (instance->adapter_type == VENTURA_SERIES) {
|
||||
scratch_pad_4 =
|
||||
readl(&instance->reg_set->outbound_scratch_pad_4);
|
||||
if ((scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK) >=
|
||||
@ -5355,7 +5387,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
|
||||
memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
|
||||
|
||||
/* stream detection initialization */
|
||||
if (instance->is_ventura && fusion) {
|
||||
if (instance->adapter_type == VENTURA_SERIES) {
|
||||
fusion->stream_detect_by_ld =
|
||||
kzalloc(sizeof(struct LD_STREAM_DETECT *)
|
||||
* MAX_LOGICAL_DRIVES_EXT,
|
||||
@ -5804,7 +5836,8 @@ megasas_get_target_prop(struct megasas_instance *instance,
|
||||
dcmd->sgl.sge32[0].length =
|
||||
cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES));
|
||||
|
||||
if (instance->ctrl_context && !instance->mask_interrupts)
|
||||
if ((instance->adapter_type != MFI_SERIES) &&
|
||||
!instance->mask_interrupts)
|
||||
ret = megasas_issue_blocked_cmd(instance,
|
||||
cmd, MFI_IO_TIMEOUT_SECS);
|
||||
else
|
||||
@ -5965,6 +5998,125 @@ fail_set_dma_mask:
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* megasas_set_adapter_type - Set adapter type.
|
||||
* Supported controllers can be divided in
|
||||
* 4 categories- enum MR_ADAPTER_TYPE {
|
||||
* MFI_SERIES = 1,
|
||||
* THUNDERBOLT_SERIES = 2,
|
||||
* INVADER_SERIES = 3,
|
||||
* VENTURA_SERIES = 4,
|
||||
* };
|
||||
* @instance: Adapter soft state
|
||||
* return: void
|
||||
*/
|
||||
static inline void megasas_set_adapter_type(struct megasas_instance *instance)
|
||||
{
|
||||
switch (instance->pdev->device) {
|
||||
case PCI_DEVICE_ID_LSI_VENTURA:
|
||||
case PCI_DEVICE_ID_LSI_HARPOON:
|
||||
case PCI_DEVICE_ID_LSI_TOMCAT:
|
||||
case PCI_DEVICE_ID_LSI_VENTURA_4PORT:
|
||||
case PCI_DEVICE_ID_LSI_CRUSADER_4PORT:
|
||||
instance->adapter_type = VENTURA_SERIES;
|
||||
break;
|
||||
case PCI_DEVICE_ID_LSI_FUSION:
|
||||
case PCI_DEVICE_ID_LSI_PLASMA:
|
||||
instance->adapter_type = THUNDERBOLT_SERIES;
|
||||
break;
|
||||
case PCI_DEVICE_ID_LSI_INVADER:
|
||||
case PCI_DEVICE_ID_LSI_INTRUDER:
|
||||
case PCI_DEVICE_ID_LSI_INTRUDER_24:
|
||||
case PCI_DEVICE_ID_LSI_CUTLASS_52:
|
||||
case PCI_DEVICE_ID_LSI_CUTLASS_53:
|
||||
case PCI_DEVICE_ID_LSI_FURY:
|
||||
instance->adapter_type = INVADER_SERIES;
|
||||
break;
|
||||
default: /* For all other supported controllers */
|
||||
instance->adapter_type = MFI_SERIES;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static inline int megasas_alloc_mfi_ctrl_mem(struct megasas_instance *instance)
|
||||
{
|
||||
instance->producer = pci_alloc_consistent(instance->pdev, sizeof(u32),
|
||||
&instance->producer_h);
|
||||
instance->consumer = pci_alloc_consistent(instance->pdev, sizeof(u32),
|
||||
&instance->consumer_h);
|
||||
|
||||
if (!instance->producer || !instance->consumer) {
|
||||
dev_err(&instance->pdev->dev,
|
||||
"Failed to allocate memory for producer, consumer\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
*instance->producer = 0;
|
||||
*instance->consumer = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* megasas_alloc_ctrl_mem - Allocate per controller memory for core data
|
||||
* structures which are not common across MFI
|
||||
* adapters and fusion adapters.
|
||||
* For MFI based adapters, allocate producer and
|
||||
* consumer buffers. For fusion adapters, allocate
|
||||
* memory for fusion context.
|
||||
* @instance: Adapter soft state
|
||||
* return: 0 for SUCCESS
|
||||
*/
|
||||
static int megasas_alloc_ctrl_mem(struct megasas_instance *instance)
|
||||
{
|
||||
instance->reply_map = kzalloc(sizeof(unsigned int) * nr_cpu_ids,
|
||||
GFP_KERNEL);
|
||||
if (!instance->reply_map)
|
||||
return -ENOMEM;
|
||||
|
||||
switch (instance->adapter_type) {
|
||||
case MFI_SERIES:
|
||||
if (megasas_alloc_mfi_ctrl_mem(instance))
|
||||
goto fail;
|
||||
break;
|
||||
case VENTURA_SERIES:
|
||||
case THUNDERBOLT_SERIES:
|
||||
case INVADER_SERIES:
|
||||
if (megasas_alloc_fusion_context(instance))
|
||||
goto fail;
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
fail:
|
||||
kfree(instance->reply_map);
|
||||
instance->reply_map = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/*
|
||||
* megasas_free_ctrl_mem - Free fusion context for fusion adapters and
|
||||
* producer, consumer buffers for MFI adapters
|
||||
*
|
||||
* @instance - Adapter soft instance
|
||||
*
|
||||
*/
|
||||
static inline void megasas_free_ctrl_mem(struct megasas_instance *instance)
|
||||
{
|
||||
kfree(instance->reply_map);
|
||||
if (instance->adapter_type == MFI_SERIES) {
|
||||
if (instance->producer)
|
||||
pci_free_consistent(instance->pdev, sizeof(u32),
|
||||
instance->producer,
|
||||
instance->producer_h);
|
||||
if (instance->consumer)
|
||||
pci_free_consistent(instance->pdev, sizeof(u32),
|
||||
instance->consumer,
|
||||
instance->consumer_h);
|
||||
} else {
|
||||
megasas_free_fusion_context(instance);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* megasas_probe_one - PCI hotplug entry point
|
||||
* @pdev: PCI device structure
|
||||
@ -5977,7 +6129,6 @@ static int megasas_probe_one(struct pci_dev *pdev,
|
||||
struct Scsi_Host *host;
|
||||
struct megasas_instance *instance;
|
||||
u16 control = 0;
|
||||
struct fusion_context *fusion = NULL;
|
||||
|
||||
/* Reset MSI-X in the kdump kernel */
|
||||
if (reset_devices) {
|
||||
@ -6022,56 +6173,10 @@ static int megasas_probe_one(struct pci_dev *pdev,
|
||||
atomic_set(&instance->fw_reset_no_pci_access, 0);
|
||||
instance->pdev = pdev;
|
||||
|
||||
switch (instance->pdev->device) {
|
||||
case PCI_DEVICE_ID_LSI_VENTURA:
|
||||
case PCI_DEVICE_ID_LSI_HARPOON:
|
||||
case PCI_DEVICE_ID_LSI_TOMCAT:
|
||||
case PCI_DEVICE_ID_LSI_VENTURA_4PORT:
|
||||
case PCI_DEVICE_ID_LSI_CRUSADER_4PORT:
|
||||
instance->is_ventura = true;
|
||||
case PCI_DEVICE_ID_LSI_FUSION:
|
||||
case PCI_DEVICE_ID_LSI_PLASMA:
|
||||
case PCI_DEVICE_ID_LSI_INVADER:
|
||||
case PCI_DEVICE_ID_LSI_FURY:
|
||||
case PCI_DEVICE_ID_LSI_INTRUDER:
|
||||
case PCI_DEVICE_ID_LSI_INTRUDER_24:
|
||||
case PCI_DEVICE_ID_LSI_CUTLASS_52:
|
||||
case PCI_DEVICE_ID_LSI_CUTLASS_53:
|
||||
{
|
||||
if (megasas_alloc_fusion_context(instance)) {
|
||||
megasas_free_fusion_context(instance);
|
||||
goto fail_alloc_dma_buf;
|
||||
}
|
||||
fusion = instance->ctrl_context;
|
||||
megasas_set_adapter_type(instance);
|
||||
|
||||
if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
|
||||
(instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA))
|
||||
fusion->adapter_type = THUNDERBOLT_SERIES;
|
||||
else if (instance->is_ventura)
|
||||
fusion->adapter_type = VENTURA_SERIES;
|
||||
else
|
||||
fusion->adapter_type = INVADER_SERIES;
|
||||
}
|
||||
break;
|
||||
default: /* For all other supported controllers */
|
||||
|
||||
instance->producer =
|
||||
pci_alloc_consistent(pdev, sizeof(u32),
|
||||
&instance->producer_h);
|
||||
instance->consumer =
|
||||
pci_alloc_consistent(pdev, sizeof(u32),
|
||||
&instance->consumer_h);
|
||||
|
||||
if (!instance->producer || !instance->consumer) {
|
||||
dev_printk(KERN_DEBUG, &pdev->dev, "Failed to allocate "
|
||||
"memory for producer, consumer\n");
|
||||
goto fail_alloc_dma_buf;
|
||||
}
|
||||
|
||||
*instance->producer = 0;
|
||||
*instance->consumer = 0;
|
||||
break;
|
||||
}
|
||||
if (megasas_alloc_ctrl_mem(instance))
|
||||
goto fail_alloc_dma_buf;
|
||||
|
||||
/* Crash dump feature related initialisation*/
|
||||
instance->drv_buf_index = 0;
|
||||
@ -6166,7 +6271,7 @@ static int megasas_probe_one(struct pci_dev *pdev,
|
||||
instance->disableOnlineCtrlReset = 1;
|
||||
instance->UnevenSpanSupport = 0;
|
||||
|
||||
if (instance->ctrl_context) {
|
||||
if (instance->adapter_type != MFI_SERIES) {
|
||||
INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq);
|
||||
INIT_WORK(&instance->crash_init, megasas_fusion_crash_dump_wq);
|
||||
} else
|
||||
@ -6246,7 +6351,7 @@ fail_io_attach:
|
||||
instance->instancet->disable_intr(instance);
|
||||
megasas_destroy_irqs(instance);
|
||||
|
||||
if (instance->ctrl_context)
|
||||
if (instance->adapter_type != MFI_SERIES)
|
||||
megasas_release_fusion(instance);
|
||||
else
|
||||
megasas_release_mfi(instance);
|
||||
@ -6267,14 +6372,8 @@ fail_alloc_dma_buf:
|
||||
pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES),
|
||||
instance->tgt_prop,
|
||||
instance->tgt_prop_h);
|
||||
if (instance->producer)
|
||||
pci_free_consistent(pdev, sizeof(u32), instance->producer,
|
||||
instance->producer_h);
|
||||
if (instance->consumer)
|
||||
pci_free_consistent(pdev, sizeof(u32), instance->consumer,
|
||||
instance->consumer_h);
|
||||
megasas_free_ctrl_mem(instance);
|
||||
scsi_host_put(host);
|
||||
|
||||
fail_alloc_instance:
|
||||
fail_set_dma_mask:
|
||||
pci_disable_device(pdev);
|
||||
@ -6480,7 +6579,9 @@ megasas_resume(struct pci_dev *pdev)
|
||||
if (rval < 0)
|
||||
goto fail_reenable_msix;
|
||||
|
||||
if (instance->ctrl_context) {
|
||||
megasas_setup_reply_map(instance);
|
||||
|
||||
if (instance->adapter_type != MFI_SERIES) {
|
||||
megasas_reset_reply_desc(instance);
|
||||
if (megasas_ioc_init_fusion(instance)) {
|
||||
megasas_free_cmds(instance);
|
||||
@ -6543,12 +6644,8 @@ fail_init_mfi:
|
||||
pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES),
|
||||
instance->tgt_prop,
|
||||
instance->tgt_prop_h);
|
||||
if (instance->producer)
|
||||
pci_free_consistent(pdev, sizeof(u32), instance->producer,
|
||||
instance->producer_h);
|
||||
if (instance->consumer)
|
||||
pci_free_consistent(pdev, sizeof(u32), instance->consumer,
|
||||
instance->consumer_h);
|
||||
|
||||
megasas_free_ctrl_mem(instance);
|
||||
scsi_host_put(host);
|
||||
|
||||
fail_set_dma_mask:
|
||||
@ -6656,7 +6753,7 @@ skip_firing_dcmds:
|
||||
if (instance->msix_vectors)
|
||||
pci_free_irq_vectors(instance->pdev);
|
||||
|
||||
if (instance->is_ventura) {
|
||||
if (instance->adapter_type == VENTURA_SERIES) {
|
||||
for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i)
|
||||
kfree(fusion->stream_detect_by_ld[i]);
|
||||
kfree(fusion->stream_detect_by_ld);
|
||||
@ -6664,7 +6761,7 @@ skip_firing_dcmds:
|
||||
}
|
||||
|
||||
|
||||
if (instance->ctrl_context) {
|
||||
if (instance->adapter_type != MFI_SERIES) {
|
||||
megasas_release_fusion(instance);
|
||||
pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
|
||||
(sizeof(struct MR_PD_CFG_SEQ) *
|
||||
@ -6689,15 +6786,8 @@ skip_firing_dcmds:
|
||||
fusion->pd_seq_sync[i],
|
||||
fusion->pd_seq_phys[i]);
|
||||
}
|
||||
megasas_free_fusion_context(instance);
|
||||
} else {
|
||||
megasas_release_mfi(instance);
|
||||
pci_free_consistent(pdev, sizeof(u32),
|
||||
instance->producer,
|
||||
instance->producer_h);
|
||||
pci_free_consistent(pdev, sizeof(u32),
|
||||
instance->consumer,
|
||||
instance->consumer_h);
|
||||
}
|
||||
|
||||
kfree(instance->ctrl_info);
|
||||
@ -6738,6 +6828,8 @@ skip_firing_dcmds:
|
||||
pci_free_consistent(pdev, sizeof(struct MR_DRV_SYSTEM_INFO),
|
||||
instance->system_info_buf, instance->system_info_h);
|
||||
|
||||
megasas_free_ctrl_mem(instance);
|
||||
|
||||
scsi_host_put(host);
|
||||
|
||||
pci_disable_device(pdev);
|
||||
|
@ -745,7 +745,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
|
||||
*pDevHandle = MR_PdDevHandleGet(pd, map);
|
||||
*pPdInterface = MR_PdInterfaceTypeGet(pd, map);
|
||||
/* get second pd also for raid 1/10 fast path writes*/
|
||||
if (instance->is_ventura &&
|
||||
if ((instance->adapter_type == VENTURA_SERIES) &&
|
||||
(raid->level == 1) &&
|
||||
!io_info->isRead) {
|
||||
r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map);
|
||||
@ -755,8 +755,8 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
|
||||
}
|
||||
} else {
|
||||
if ((raid->level >= 5) &&
|
||||
((fusion->adapter_type == THUNDERBOLT_SERIES) ||
|
||||
((fusion->adapter_type == INVADER_SERIES) &&
|
||||
((instance->adapter_type == THUNDERBOLT_SERIES) ||
|
||||
((instance->adapter_type == INVADER_SERIES) &&
|
||||
(raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
|
||||
pRAID_Context->reg_lock_flags = REGION_TYPE_EXCLUSIVE;
|
||||
else if (raid->level == 1) {
|
||||
@ -770,7 +770,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
|
||||
}
|
||||
|
||||
*pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
|
||||
if (instance->is_ventura) {
|
||||
if (instance->adapter_type == VENTURA_SERIES) {
|
||||
((struct RAID_CONTEXT_G35 *)pRAID_Context)->span_arm =
|
||||
(span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
|
||||
io_info->span_arm =
|
||||
@ -861,7 +861,7 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
|
||||
*pDevHandle = MR_PdDevHandleGet(pd, map);
|
||||
*pPdInterface = MR_PdInterfaceTypeGet(pd, map);
|
||||
/* get second pd also for raid 1/10 fast path writes*/
|
||||
if (instance->is_ventura &&
|
||||
if ((instance->adapter_type == VENTURA_SERIES) &&
|
||||
(raid->level == 1) &&
|
||||
!io_info->isRead) {
|
||||
r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map);
|
||||
@ -871,8 +871,8 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
|
||||
}
|
||||
} else {
|
||||
if ((raid->level >= 5) &&
|
||||
((fusion->adapter_type == THUNDERBOLT_SERIES) ||
|
||||
((fusion->adapter_type == INVADER_SERIES) &&
|
||||
((instance->adapter_type == THUNDERBOLT_SERIES) ||
|
||||
((instance->adapter_type == INVADER_SERIES) &&
|
||||
(raid->regTypeReqOnRead != REGION_TYPE_UNUSED))))
|
||||
pRAID_Context->reg_lock_flags = REGION_TYPE_EXCLUSIVE;
|
||||
else if (raid->level == 1) {
|
||||
@ -888,7 +888,7 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
|
||||
}
|
||||
|
||||
*pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
|
||||
if (instance->is_ventura) {
|
||||
if (instance->adapter_type == VENTURA_SERIES) {
|
||||
((struct RAID_CONTEXT_G35 *)pRAID_Context)->span_arm =
|
||||
(span << RAID_CTX_SPANARM_SPAN_SHIFT) | physArm;
|
||||
io_info->span_arm =
|
||||
@ -1096,10 +1096,10 @@ MR_BuildRaidContext(struct megasas_instance *instance,
|
||||
cpu_to_le16(raid->fpIoTimeoutForLd ?
|
||||
raid->fpIoTimeoutForLd :
|
||||
map->raidMap.fpPdIoTimeoutSec);
|
||||
if (fusion->adapter_type == INVADER_SERIES)
|
||||
if (instance->adapter_type == INVADER_SERIES)
|
||||
pRAID_Context->reg_lock_flags = (isRead) ?
|
||||
raid->regTypeReqOnRead : raid->regTypeReqOnWrite;
|
||||
else if (!instance->is_ventura)
|
||||
else if (instance->adapter_type == THUNDERBOLT_SERIES)
|
||||
pRAID_Context->reg_lock_flags = (isRead) ?
|
||||
REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite;
|
||||
pRAID_Context->virtual_disk_tgt_id = raid->targetId;
|
||||
|
@ -237,7 +237,7 @@ megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_c
|
||||
reg_set = instance->reg_set;
|
||||
|
||||
/* ventura FW does not fill outbound_scratch_pad_3 with queue depth */
|
||||
if (!instance->is_ventura)
|
||||
if (instance->adapter_type < VENTURA_SERIES)
|
||||
cur_max_fw_cmds =
|
||||
readl(&instance->reg_set->outbound_scratch_pad_3) & 0x00FFFF;
|
||||
|
||||
@ -285,7 +285,7 @@ megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_c
|
||||
instance->host->can_queue = instance->cur_can_queue;
|
||||
}
|
||||
|
||||
if (instance->is_ventura)
|
||||
if (instance->adapter_type == VENTURA_SERIES)
|
||||
instance->max_mpt_cmds =
|
||||
instance->max_fw_cmds * RAID_1_PEER_CMDS;
|
||||
else
|
||||
@ -838,7 +838,7 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
|
||||
drv_ops = (MFI_CAPABILITIES *) &(init_frame->driver_operations);
|
||||
|
||||
/* driver support Extended MSIX */
|
||||
if (fusion->adapter_type >= INVADER_SERIES)
|
||||
if (instance->adapter_type >= INVADER_SERIES)
|
||||
drv_ops->mfi_capabilities.support_additional_msix = 1;
|
||||
/* driver supports HA / Remote LUN over Fast Path interface */
|
||||
drv_ops->mfi_capabilities.support_fp_remote_lun = 1;
|
||||
@ -1789,7 +1789,7 @@ megasas_make_sgl_fusion(struct megasas_instance *instance,
|
||||
|
||||
fusion = instance->ctrl_context;
|
||||
|
||||
if (fusion->adapter_type >= INVADER_SERIES) {
|
||||
if (instance->adapter_type >= INVADER_SERIES) {
|
||||
struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end = sgl_ptr;
|
||||
sgl_ptr_end += fusion->max_sge_in_main_msg - 1;
|
||||
sgl_ptr_end->Flags = 0;
|
||||
@ -1799,7 +1799,7 @@ megasas_make_sgl_fusion(struct megasas_instance *instance,
|
||||
sgl_ptr->Length = cpu_to_le32(sg_dma_len(os_sgl));
|
||||
sgl_ptr->Address = cpu_to_le64(sg_dma_address(os_sgl));
|
||||
sgl_ptr->Flags = 0;
|
||||
if (fusion->adapter_type >= INVADER_SERIES)
|
||||
if (instance->adapter_type >= INVADER_SERIES)
|
||||
if (i == sge_count - 1)
|
||||
sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST;
|
||||
sgl_ptr++;
|
||||
@ -1809,7 +1809,7 @@ megasas_make_sgl_fusion(struct megasas_instance *instance,
|
||||
(sge_count > fusion->max_sge_in_main_msg)) {
|
||||
|
||||
struct MPI25_IEEE_SGE_CHAIN64 *sg_chain;
|
||||
if (fusion->adapter_type >= INVADER_SERIES) {
|
||||
if (instance->adapter_type >= INVADER_SERIES) {
|
||||
if ((le16_to_cpu(cmd->io_request->IoFlags) &
|
||||
MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) !=
|
||||
MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
|
||||
@ -1825,7 +1825,7 @@ megasas_make_sgl_fusion(struct megasas_instance *instance,
|
||||
sg_chain = sgl_ptr;
|
||||
/* Prepare chain element */
|
||||
sg_chain->NextChainOffset = 0;
|
||||
if (fusion->adapter_type >= INVADER_SERIES)
|
||||
if (instance->adapter_type >= INVADER_SERIES)
|
||||
sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT;
|
||||
else
|
||||
sg_chain->Flags =
|
||||
@ -2341,15 +2341,12 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
|
||||
fp_possible = (io_info.fpOkForIo > 0) ? true : false;
|
||||
}
|
||||
|
||||
/* Use raw_smp_processor_id() for now until cmd->request->cpu is CPU
|
||||
id by default, not CPU group id, otherwise all MSI-X queues won't
|
||||
be utilized */
|
||||
cmd->request_desc->SCSIIO.MSIxIndex = instance->msix_vectors ?
|
||||
raw_smp_processor_id() % instance->msix_vectors : 0;
|
||||
cmd->request_desc->SCSIIO.MSIxIndex =
|
||||
instance->reply_map[raw_smp_processor_id()];
|
||||
|
||||
praid_context = &io_request->RaidContext;
|
||||
|
||||
if (instance->is_ventura) {
|
||||
if (instance->adapter_type == VENTURA_SERIES) {
|
||||
spin_lock_irqsave(&instance->stream_lock, spinlock_flags);
|
||||
megasas_stream_detect(instance, cmd, &io_info);
|
||||
spin_unlock_irqrestore(&instance->stream_lock, spinlock_flags);
|
||||
@ -2402,7 +2399,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
|
||||
cmd->request_desc->SCSIIO.RequestFlags =
|
||||
(MPI2_REQ_DESCRIPT_FLAGS_FP_IO
|
||||
<< MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
|
||||
if (fusion->adapter_type == INVADER_SERIES) {
|
||||
if (instance->adapter_type == INVADER_SERIES) {
|
||||
if (io_request->RaidContext.raid_context.reg_lock_flags ==
|
||||
REGION_TYPE_UNUSED)
|
||||
cmd->request_desc->SCSIIO.RequestFlags =
|
||||
@ -2415,7 +2412,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
|
||||
io_request->RaidContext.raid_context.reg_lock_flags |=
|
||||
(MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
|
||||
MR_RL_FLAGS_SEQ_NUM_ENABLE);
|
||||
} else if (instance->is_ventura) {
|
||||
} else if (instance->adapter_type == VENTURA_SERIES) {
|
||||
io_request->RaidContext.raid_context_g35.nseg_type |=
|
||||
(1 << RAID_CONTEXT_NSEG_SHIFT);
|
||||
io_request->RaidContext.raid_context_g35.nseg_type |=
|
||||
@ -2434,7 +2431,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
|
||||
&io_info, local_map_ptr);
|
||||
scp->SCp.Status |= MEGASAS_LOAD_BALANCE_FLAG;
|
||||
cmd->pd_r1_lb = io_info.pd_after_lb;
|
||||
if (instance->is_ventura)
|
||||
if (instance->adapter_type == VENTURA_SERIES)
|
||||
io_request->RaidContext.raid_context_g35.span_arm
|
||||
= io_info.span_arm;
|
||||
else
|
||||
@ -2444,7 +2441,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
|
||||
} else
|
||||
scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
|
||||
|
||||
if (instance->is_ventura)
|
||||
if (instance->adapter_type == VENTURA_SERIES)
|
||||
cmd->r1_alt_dev_handle = io_info.r1_alt_dev_handle;
|
||||
else
|
||||
cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
|
||||
@ -2467,7 +2464,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
|
||||
cmd->request_desc->SCSIIO.RequestFlags =
|
||||
(MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO
|
||||
<< MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
|
||||
if (fusion->adapter_type == INVADER_SERIES) {
|
||||
if (instance->adapter_type == INVADER_SERIES) {
|
||||
if (io_info.do_fp_rlbypass ||
|
||||
(io_request->RaidContext.raid_context.reg_lock_flags
|
||||
== REGION_TYPE_UNUSED))
|
||||
@ -2480,7 +2477,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
|
||||
(MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
|
||||
MR_RL_FLAGS_SEQ_NUM_ENABLE);
|
||||
io_request->RaidContext.raid_context.nseg = 0x1;
|
||||
} else if (instance->is_ventura) {
|
||||
} else if (instance->adapter_type == VENTURA_SERIES) {
|
||||
io_request->RaidContext.raid_context_g35.routing_flags |=
|
||||
(1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT);
|
||||
io_request->RaidContext.raid_context_g35.nseg_type |=
|
||||
@ -2555,7 +2552,7 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
|
||||
|
||||
/* set RAID context values */
|
||||
pRAID_Context->config_seq_num = raid->seqNum;
|
||||
if (!instance->is_ventura)
|
||||
if (instance->adapter_type != VENTURA_SERIES)
|
||||
pRAID_Context->reg_lock_flags = REGION_TYPE_SHARED_READ;
|
||||
pRAID_Context->timeout_value =
|
||||
cpu_to_le16(raid->fpIoTimeoutForLd);
|
||||
@ -2640,7 +2637,7 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
|
||||
cpu_to_le16(device_id + (MAX_PHYSICAL_DEVICES - 1));
|
||||
pRAID_Context->config_seq_num = pd_sync->seq[pd_index].seqNum;
|
||||
io_request->DevHandle = pd_sync->seq[pd_index].devHandle;
|
||||
if (instance->is_ventura) {
|
||||
if (instance->adapter_type == VENTURA_SERIES) {
|
||||
io_request->RaidContext.raid_context_g35.routing_flags |=
|
||||
(1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT);
|
||||
io_request->RaidContext.raid_context_g35.nseg_type |=
|
||||
@ -2667,10 +2664,9 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
|
||||
}
|
||||
|
||||
cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle;
|
||||
cmd->request_desc->SCSIIO.MSIxIndex =
|
||||
instance->msix_vectors ?
|
||||
(raw_smp_processor_id() % instance->msix_vectors) : 0;
|
||||
|
||||
cmd->request_desc->SCSIIO.MSIxIndex =
|
||||
instance->reply_map[raw_smp_processor_id()];
|
||||
|
||||
if (!fp_possible) {
|
||||
/* system pd firmware path */
|
||||
@ -2688,7 +2684,7 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
|
||||
pRAID_Context->timeout_value =
|
||||
cpu_to_le16((os_timeout_value > timeout_limit) ?
|
||||
timeout_limit : os_timeout_value);
|
||||
if (fusion->adapter_type >= INVADER_SERIES)
|
||||
if (instance->adapter_type >= INVADER_SERIES)
|
||||
io_request->IoFlags |=
|
||||
cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
|
||||
|
||||
@ -2771,7 +2767,7 @@ megasas_build_io_fusion(struct megasas_instance *instance,
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (instance->is_ventura) {
|
||||
if (instance->adapter_type == VENTURA_SERIES) {
|
||||
set_num_sge(&io_request->RaidContext.raid_context_g35, sge_count);
|
||||
cpu_to_le16s(&io_request->RaidContext.raid_context_g35.routing_flags);
|
||||
cpu_to_le16s(&io_request->RaidContext.raid_context_g35.nseg_type);
|
||||
@ -3301,7 +3297,7 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance,
|
||||
|
||||
io_req = cmd->io_request;
|
||||
|
||||
if (fusion->adapter_type >= INVADER_SERIES) {
|
||||
if (instance->adapter_type >= INVADER_SERIES) {
|
||||
struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end =
|
||||
(struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL;
|
||||
sgl_ptr_end += fusion->max_sge_in_main_msg - 1;
|
||||
@ -4233,7 +4229,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
|
||||
for (i = 0 ; i < instance->max_scsi_cmds; i++) {
|
||||
cmd_fusion = fusion->cmd_list[i];
|
||||
/*check for extra commands issued by driver*/
|
||||
if (instance->is_ventura) {
|
||||
if (instance->adapter_type == VENTURA_SERIES) {
|
||||
r1_cmd = fusion->cmd_list[i + instance->max_fw_cmds];
|
||||
megasas_return_cmd_fusion(instance, r1_cmd);
|
||||
}
|
||||
@ -4334,7 +4330,7 @@ transition_to_ready:
|
||||
megasas_set_dynamic_target_properties(sdev);
|
||||
|
||||
/* reset stream detection array */
|
||||
if (instance->is_ventura) {
|
||||
if (instance->adapter_type == VENTURA_SERIES) {
|
||||
for (j = 0; j < MAX_LOGICAL_DRIVES_EXT; ++j) {
|
||||
memset(fusion->stream_detect_by_ld[j],
|
||||
0, sizeof(struct LD_STREAM_DETECT));
|
||||
|
@ -104,12 +104,6 @@ enum MR_RAID_FLAGS_IO_SUB_TYPE {
|
||||
#define RAID_1_PEER_CMDS 2
|
||||
#define JBOD_MAPS_COUNT 2
|
||||
|
||||
enum MR_FUSION_ADAPTER_TYPE {
|
||||
THUNDERBOLT_SERIES = 0,
|
||||
INVADER_SERIES = 1,
|
||||
VENTURA_SERIES = 2,
|
||||
};
|
||||
|
||||
/*
|
||||
* Raid Context structure which describes MegaRAID specific IO Parameters
|
||||
* This resides at offset 0x60 where the SGL normally starts in MPT IO Frames
|
||||
@ -1319,7 +1313,6 @@ struct fusion_context {
|
||||
struct LD_LOAD_BALANCE_INFO *load_balance_info;
|
||||
u32 load_balance_info_pages;
|
||||
LD_SPAN_INFO log_to_span[MAX_LOGICAL_DRIVES_EXT];
|
||||
u8 adapter_type;
|
||||
struct LD_STREAM_DETECT **stream_detect_by_ld;
|
||||
};
|
||||
|
||||
|
@ -1059,7 +1059,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
|
||||
return _FAIL;
|
||||
|
||||
|
||||
if (len > MAX_IE_SZ)
|
||||
if (len < 0 || len > MAX_IE_SZ)
|
||||
return _FAIL;
|
||||
|
||||
pbss_network->IELength = len;
|
||||
|
@ -814,7 +814,7 @@ static void _rtl8822be_enable_aspm_back_door(struct ieee80211_hw *hw)
|
||||
return;
|
||||
|
||||
pci_read_config_byte(rtlpci->pdev, 0x70f, &tmp);
|
||||
pci_write_config_byte(rtlpci->pdev, 0x70f, tmp | BIT(7));
|
||||
pci_write_config_byte(rtlpci->pdev, 0x70f, tmp | ASPM_L1_LATENCY << 3);
|
||||
|
||||
pci_read_config_byte(rtlpci->pdev, 0x719, &tmp);
|
||||
pci_write_config_byte(rtlpci->pdev, 0x719, tmp | BIT(3) | BIT(4));
|
||||
|
@ -99,6 +99,7 @@
|
||||
#define RTL_USB_MAX_RX_COUNT 100
|
||||
#define QBSS_LOAD_SIZE 5
|
||||
#define MAX_WMMELE_LENGTH 64
|
||||
#define ASPM_L1_LATENCY 7
|
||||
|
||||
#define TOTAL_CAM_ENTRY 32
|
||||
|
||||
|
@ -231,6 +231,10 @@ static const struct usb_device_id usb_quirk_list[] = {
|
||||
/* Corsair K70 RGB */
|
||||
{ USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT },
|
||||
|
||||
/* Corsair Strafe */
|
||||
{ USB_DEVICE(0x1b1c, 0x1b15), .driver_info = USB_QUIRK_DELAY_INIT |
|
||||
USB_QUIRK_DELAY_CTRL_MSG },
|
||||
|
||||
/* Corsair Strafe RGB */
|
||||
{ USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT |
|
||||
USB_QUIRK_DELAY_CTRL_MSG },
|
||||
|
@ -604,7 +604,7 @@ struct xhci_ring *xhci_stream_id_to_ring(
|
||||
if (!ep->stream_info)
|
||||
return NULL;
|
||||
|
||||
if (stream_id > ep->stream_info->num_streams)
|
||||
if (stream_id >= ep->stream_info->num_streams)
|
||||
return NULL;
|
||||
return ep->stream_info->stream_rings[stream_id];
|
||||
}
|
||||
|
@ -400,8 +400,7 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count,
|
||||
loff_t *ppos)
|
||||
{
|
||||
struct usb_yurex *dev;
|
||||
int retval = 0;
|
||||
int bytes_read = 0;
|
||||
int len = 0;
|
||||
char in_buffer[20];
|
||||
unsigned long flags;
|
||||
|
||||
@ -409,26 +408,16 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count,
|
||||
|
||||
mutex_lock(&dev->io_mutex);
|
||||
if (!dev->interface) { /* already disconnected */
|
||||
retval = -ENODEV;
|
||||
goto exit;
|
||||
mutex_unlock(&dev->io_mutex);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&dev->lock, flags);
|
||||
bytes_read = snprintf(in_buffer, 20, "%lld\n", dev->bbu);
|
||||
len = snprintf(in_buffer, 20, "%lld\n", dev->bbu);
|
||||
spin_unlock_irqrestore(&dev->lock, flags);
|
||||
|
||||
if (*ppos < bytes_read) {
|
||||
if (copy_to_user(buffer, in_buffer + *ppos, bytes_read - *ppos))
|
||||
retval = -EFAULT;
|
||||
else {
|
||||
retval = bytes_read - *ppos;
|
||||
*ppos += bytes_read;
|
||||
}
|
||||
}
|
||||
|
||||
exit:
|
||||
mutex_unlock(&dev->io_mutex);
|
||||
return retval;
|
||||
|
||||
return simple_read_from_buffer(buffer, count, ppos, in_buffer, len);
|
||||
}
|
||||
|
||||
static ssize_t yurex_write(struct file *file, const char __user *user_buffer,
|
||||
|
@ -131,7 +131,7 @@ static int ch341_control_in(struct usb_device *dev,
|
||||
r = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), request,
|
||||
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
|
||||
value, index, buf, bufsize, DEFAULT_TIMEOUT);
|
||||
if (r < bufsize) {
|
||||
if (r < (int)bufsize) {
|
||||
if (r >= 0) {
|
||||
dev_err(&dev->dev,
|
||||
"short control message received (%d < %u)\n",
|
||||
|
@ -152,6 +152,7 @@ static const struct usb_device_id id_table[] = {
|
||||
{ USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
|
||||
{ USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
|
||||
{ USB_DEVICE(0x10C4, 0x89A4) }, /* CESINEL FTBC Flexible Thyristor Bridge Controller */
|
||||
{ USB_DEVICE(0x10C4, 0x89FB) }, /* Qivicon ZigBee USB Radio Stick */
|
||||
{ USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
|
||||
{ USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */
|
||||
{ USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */
|
||||
|
@ -373,8 +373,10 @@ static int keyspan_pda_get_modem_info(struct usb_serial *serial,
|
||||
3, /* get pins */
|
||||
USB_TYPE_VENDOR|USB_RECIP_INTERFACE|USB_DIR_IN,
|
||||
0, 0, data, 1, 2000);
|
||||
if (rc >= 0)
|
||||
if (rc == 1)
|
||||
*value = *data;
|
||||
else if (rc >= 0)
|
||||
rc = -EIO;
|
||||
|
||||
kfree(data);
|
||||
return rc;
|
||||
|
@ -481,6 +481,9 @@ static void mos7840_control_callback(struct urb *urb)
|
||||
}
|
||||
|
||||
dev_dbg(dev, "%s urb buffer size is %d\n", __func__, urb->actual_length);
|
||||
if (urb->actual_length < 1)
|
||||
goto out;
|
||||
|
||||
dev_dbg(dev, "%s mos7840_port->MsrLsr is %d port %d\n", __func__,
|
||||
mos7840_port->MsrLsr, mos7840_port->port_num);
|
||||
data = urb->transfer_buffer;
|
||||
|
@ -1235,9 +1235,8 @@ static int load_elf_library(struct file *file)
|
||||
goto out_free_ph;
|
||||
}
|
||||
|
||||
len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr +
|
||||
ELF_MIN_ALIGN - 1);
|
||||
bss = eppnt->p_memsz + eppnt->p_vaddr;
|
||||
len = ELF_PAGEALIGN(eppnt->p_filesz + eppnt->p_vaddr);
|
||||
bss = ELF_PAGEALIGN(eppnt->p_memsz + eppnt->p_vaddr);
|
||||
if (bss > len) {
|
||||
error = vm_brk(len, bss - len);
|
||||
if (error)
|
||||
|
@ -138,10 +138,6 @@ static int devpts_ptmx_path(struct path *path)
|
||||
struct super_block *sb;
|
||||
int err;
|
||||
|
||||
/* Has the devpts filesystem already been found? */
|
||||
if (path->mnt->mnt_sb->s_magic == DEVPTS_SUPER_MAGIC)
|
||||
return 0;
|
||||
|
||||
/* Is a devpts filesystem at "pts" in the same directory? */
|
||||
err = path_pts(path);
|
||||
if (err)
|
||||
@ -159,22 +155,32 @@ static int devpts_ptmx_path(struct path *path)
|
||||
struct vfsmount *devpts_mntget(struct file *filp, struct pts_fs_info *fsi)
|
||||
{
|
||||
struct path path;
|
||||
int err;
|
||||
int err = 0;
|
||||
|
||||
path = filp->f_path;
|
||||
path_get(&path);
|
||||
|
||||
err = devpts_ptmx_path(&path);
|
||||
/* Walk upward while the start point is a bind mount of
|
||||
* a single file.
|
||||
*/
|
||||
while (path.mnt->mnt_root == path.dentry)
|
||||
if (follow_up(&path) == 0)
|
||||
break;
|
||||
|
||||
/* devpts_ptmx_path() finds a devpts fs or returns an error. */
|
||||
if ((path.mnt->mnt_sb->s_magic != DEVPTS_SUPER_MAGIC) ||
|
||||
(DEVPTS_SB(path.mnt->mnt_sb) != fsi))
|
||||
err = devpts_ptmx_path(&path);
|
||||
dput(path.dentry);
|
||||
if (err) {
|
||||
mntput(path.mnt);
|
||||
return ERR_PTR(err);
|
||||
if (!err) {
|
||||
if (DEVPTS_SB(path.mnt->mnt_sb) == fsi)
|
||||
return path.mnt;
|
||||
|
||||
err = -ENODEV;
|
||||
}
|
||||
if (DEVPTS_SB(path.mnt->mnt_sb) != fsi) {
|
||||
mntput(path.mnt);
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
return path.mnt;
|
||||
|
||||
mntput(path.mnt);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
struct pts_fs_info *devpts_acquire(struct file *filp)
|
||||
@ -182,15 +188,19 @@ struct pts_fs_info *devpts_acquire(struct file *filp)
|
||||
struct pts_fs_info *result;
|
||||
struct path path;
|
||||
struct super_block *sb;
|
||||
int err;
|
||||
|
||||
path = filp->f_path;
|
||||
path_get(&path);
|
||||
|
||||
err = devpts_ptmx_path(&path);
|
||||
if (err) {
|
||||
result = ERR_PTR(err);
|
||||
goto out;
|
||||
/* Has the devpts filesystem already been found? */
|
||||
if (path.mnt->mnt_sb->s_magic != DEVPTS_SUPER_MAGIC) {
|
||||
int err;
|
||||
|
||||
err = devpts_ptmx_path(&path);
|
||||
if (err) {
|
||||
result = ERR_PTR(err);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1585,18 +1585,6 @@ static inline bool __exist_node_summaries(struct f2fs_sb_info *sbi)
|
||||
is_set_ckpt_flags(sbi, CP_FASTBOOT_FLAG));
|
||||
}
|
||||
|
||||
/*
|
||||
* Check whether the given nid is within node id range.
|
||||
*/
|
||||
static inline int check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
|
||||
{
|
||||
if (unlikely(nid < F2FS_ROOT_INO(sbi)))
|
||||
return -EINVAL;
|
||||
if (unlikely(nid >= NM_I(sbi)->max_nid))
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check whether the inode has blocks or not
|
||||
*/
|
||||
@ -2719,6 +2707,7 @@ f2fs_hash_t f2fs_dentry_hash(const struct qstr *name_info,
|
||||
struct dnode_of_data;
|
||||
struct node_info;
|
||||
|
||||
int check_nid_range(struct f2fs_sb_info *sbi, nid_t nid);
|
||||
bool available_free_memory(struct f2fs_sb_info *sbi, int type);
|
||||
int need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid);
|
||||
bool is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid);
|
||||
|
@ -194,12 +194,8 @@ static int do_read_inode(struct inode *inode)
|
||||
projid_t i_projid;
|
||||
|
||||
/* Check if ino is within scope */
|
||||
if (check_nid_range(sbi, inode->i_ino)) {
|
||||
f2fs_msg(inode->i_sb, KERN_ERR, "bad inode number: %lu",
|
||||
(unsigned long) inode->i_ino);
|
||||
WARN_ON(1);
|
||||
if (check_nid_range(sbi, inode->i_ino))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
node_page = get_node_page(sbi, inode->i_ino);
|
||||
if (IS_ERR(node_page))
|
||||
@ -588,8 +584,11 @@ no_delete:
|
||||
alloc_nid_failed(sbi, inode->i_ino);
|
||||
clear_inode_flag(inode, FI_FREE_NID);
|
||||
} else {
|
||||
f2fs_bug_on(sbi, err &&
|
||||
!exist_written_data(sbi, inode->i_ino, ORPHAN_INO));
|
||||
/*
|
||||
* If xattr nid is corrupted, we can reach out error condition,
|
||||
* err & !exist_written_data(sbi, inode->i_ino, ORPHAN_INO)).
|
||||
* In that case, check_nid_range() is enough to give a clue.
|
||||
*/
|
||||
}
|
||||
out_clear:
|
||||
fscrypt_put_encryption_info(inode);
|
||||
|
@ -29,6 +29,21 @@ static struct kmem_cache *nat_entry_slab;
|
||||
static struct kmem_cache *free_nid_slab;
|
||||
static struct kmem_cache *nat_entry_set_slab;
|
||||
|
||||
/*
|
||||
* Check whether the given nid is within node id range.
|
||||
*/
|
||||
int check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
|
||||
{
|
||||
if (unlikely(nid < F2FS_ROOT_INO(sbi) || nid >= NM_I(sbi)->max_nid)) {
|
||||
set_sbi_flag(sbi, SBI_NEED_FSCK);
|
||||
f2fs_msg(sbi->sb, KERN_WARNING,
|
||||
"%s: out-of-range nid=%x, run fsck to fix.",
|
||||
__func__, nid);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool available_free_memory(struct f2fs_sb_info *sbi, int type)
|
||||
{
|
||||
struct f2fs_nm_info *nm_i = NM_I(sbi);
|
||||
@ -1158,7 +1173,8 @@ void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
|
||||
|
||||
if (!nid)
|
||||
return;
|
||||
f2fs_bug_on(sbi, check_nid_range(sbi, nid));
|
||||
if (check_nid_range(sbi, nid))
|
||||
return;
|
||||
|
||||
rcu_read_lock();
|
||||
apage = radix_tree_lookup(&NODE_MAPPING(sbi)->page_tree, nid);
|
||||
@ -1182,7 +1198,8 @@ static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid,
|
||||
|
||||
if (!nid)
|
||||
return ERR_PTR(-ENOENT);
|
||||
f2fs_bug_on(sbi, check_nid_range(sbi, nid));
|
||||
if (check_nid_range(sbi, nid))
|
||||
return ERR_PTR(-EINVAL);
|
||||
repeat:
|
||||
page = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
|
||||
if (!page)
|
||||
|
@ -2006,8 +2006,14 @@ void inode_init_owner(struct inode *inode, const struct inode *dir,
|
||||
inode->i_uid = current_fsuid();
|
||||
if (dir && dir->i_mode & S_ISGID) {
|
||||
inode->i_gid = dir->i_gid;
|
||||
|
||||
/* Directories are special, and always inherit S_ISGID */
|
||||
if (S_ISDIR(mode))
|
||||
mode |= S_ISGID;
|
||||
else if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP) &&
|
||||
!in_group_p(inode->i_gid) &&
|
||||
!capable_wrt_inode_uidgid(dir, CAP_FSETID))
|
||||
mode &= ~S_ISGID;
|
||||
} else
|
||||
inode->i_gid = current_fsgid();
|
||||
inode->i_mode = mode;
|
||||
|
@ -912,7 +912,7 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
|
||||
mss->private_hugetlb >> 10,
|
||||
mss->swap >> 10,
|
||||
(unsigned long)(mss->swap_pss >> (10 + PSS_SHIFT)),
|
||||
(unsigned long)(mss->pss >> (10 + PSS_SHIFT)));
|
||||
(unsigned long)(mss->pss_locked >> (10 + PSS_SHIFT)));
|
||||
|
||||
if (!rollup_mode) {
|
||||
arch_show_smap(m, vma);
|
||||
|
@ -211,6 +211,7 @@ enum {
|
||||
ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */
|
||||
/* (doesn't imply presence) */
|
||||
ATA_FLAG_SATA = (1 << 1),
|
||||
ATA_FLAG_NO_LPM = (1 << 2), /* host not happy with LPM */
|
||||
ATA_FLAG_NO_LOG_PAGE = (1 << 5), /* do not issue log page read */
|
||||
ATA_FLAG_NO_ATAPI = (1 << 6), /* No ATAPI support */
|
||||
ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */
|
||||
|
@ -39,7 +39,7 @@ static void irq_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk,
|
||||
}
|
||||
}
|
||||
|
||||
static cpumask_var_t *alloc_node_to_present_cpumask(void)
|
||||
static cpumask_var_t *alloc_node_to_possible_cpumask(void)
|
||||
{
|
||||
cpumask_var_t *masks;
|
||||
int node;
|
||||
@ -62,7 +62,7 @@ out_unwind:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void free_node_to_present_cpumask(cpumask_var_t *masks)
|
||||
static void free_node_to_possible_cpumask(cpumask_var_t *masks)
|
||||
{
|
||||
int node;
|
||||
|
||||
@ -71,22 +71,22 @@ static void free_node_to_present_cpumask(cpumask_var_t *masks)
|
||||
kfree(masks);
|
||||
}
|
||||
|
||||
static void build_node_to_present_cpumask(cpumask_var_t *masks)
|
||||
static void build_node_to_possible_cpumask(cpumask_var_t *masks)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
for_each_present_cpu(cpu)
|
||||
for_each_possible_cpu(cpu)
|
||||
cpumask_set_cpu(cpu, masks[cpu_to_node(cpu)]);
|
||||
}
|
||||
|
||||
static int get_nodes_in_cpumask(cpumask_var_t *node_to_present_cpumask,
|
||||
static int get_nodes_in_cpumask(cpumask_var_t *node_to_possible_cpumask,
|
||||
const struct cpumask *mask, nodemask_t *nodemsk)
|
||||
{
|
||||
int n, nodes = 0;
|
||||
|
||||
/* Calculate the number of nodes in the supplied affinity mask */
|
||||
for_each_node(n) {
|
||||
if (cpumask_intersects(mask, node_to_present_cpumask[n])) {
|
||||
if (cpumask_intersects(mask, node_to_possible_cpumask[n])) {
|
||||
node_set(n, *nodemsk);
|
||||
nodes++;
|
||||
}
|
||||
@ -109,7 +109,7 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
|
||||
int last_affv = affv + affd->pre_vectors;
|
||||
nodemask_t nodemsk = NODE_MASK_NONE;
|
||||
struct cpumask *masks;
|
||||
cpumask_var_t nmsk, *node_to_present_cpumask;
|
||||
cpumask_var_t nmsk, *node_to_possible_cpumask;
|
||||
|
||||
/*
|
||||
* If there aren't any vectors left after applying the pre/post
|
||||
@ -125,8 +125,8 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
|
||||
if (!masks)
|
||||
goto out;
|
||||
|
||||
node_to_present_cpumask = alloc_node_to_present_cpumask();
|
||||
if (!node_to_present_cpumask)
|
||||
node_to_possible_cpumask = alloc_node_to_possible_cpumask();
|
||||
if (!node_to_possible_cpumask)
|
||||
goto out;
|
||||
|
||||
/* Fill out vectors at the beginning that don't need affinity */
|
||||
@ -135,8 +135,8 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
|
||||
|
||||
/* Stabilize the cpumasks */
|
||||
get_online_cpus();
|
||||
build_node_to_present_cpumask(node_to_present_cpumask);
|
||||
nodes = get_nodes_in_cpumask(node_to_present_cpumask, cpu_present_mask,
|
||||
build_node_to_possible_cpumask(node_to_possible_cpumask);
|
||||
nodes = get_nodes_in_cpumask(node_to_possible_cpumask, cpu_possible_mask,
|
||||
&nodemsk);
|
||||
|
||||
/*
|
||||
@ -146,7 +146,7 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
|
||||
if (affv <= nodes) {
|
||||
for_each_node_mask(n, nodemsk) {
|
||||
cpumask_copy(masks + curvec,
|
||||
node_to_present_cpumask[n]);
|
||||
node_to_possible_cpumask[n]);
|
||||
if (++curvec == last_affv)
|
||||
break;
|
||||
}
|
||||
@ -160,7 +160,7 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
|
||||
vecs_per_node = (affv - (curvec - affd->pre_vectors)) / nodes;
|
||||
|
||||
/* Get the cpus on this node which are in the mask */
|
||||
cpumask_and(nmsk, cpu_present_mask, node_to_present_cpumask[n]);
|
||||
cpumask_and(nmsk, cpu_possible_mask, node_to_possible_cpumask[n]);
|
||||
|
||||
/* Calculate the number of cpus per vector */
|
||||
ncpus = cpumask_weight(nmsk);
|
||||
@ -192,7 +192,7 @@ done:
|
||||
/* Fill out vectors at the end that don't need affinity */
|
||||
for (; curvec < nvecs; curvec++)
|
||||
cpumask_copy(masks + curvec, irq_default_affinity);
|
||||
free_node_to_present_cpumask(node_to_present_cpumask);
|
||||
free_node_to_possible_cpumask(node_to_possible_cpumask);
|
||||
out:
|
||||
free_cpumask_var(nmsk);
|
||||
return masks;
|
||||
@ -214,7 +214,7 @@ int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity
|
||||
return 0;
|
||||
|
||||
get_online_cpus();
|
||||
ret = min_t(int, cpumask_weight(cpu_present_mask), vecs) + resv;
|
||||
ret = min_t(int, cpumask_weight(cpu_possible_mask), vecs) + resv;
|
||||
put_online_cpus();
|
||||
return ret;
|
||||
}
|
||||
|
@ -186,6 +186,11 @@ static ssize_t snapshot_write(struct file *filp, const char __user *buf,
|
||||
res = PAGE_SIZE - pg_offp;
|
||||
}
|
||||
|
||||
if (!data_of(data->handle)) {
|
||||
res = -EINVAL;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
res = simple_write_to_buffer(data_of(data->handle), res, &pg_offp,
|
||||
buf, count);
|
||||
if (res > 0)
|
||||
|
2
mm/gup.c
2
mm/gup.c
@ -1235,8 +1235,6 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
|
||||
int locked = 0;
|
||||
long ret = 0;
|
||||
|
||||
VM_BUG_ON(start & ~PAGE_MASK);
|
||||
VM_BUG_ON(len != PAGE_ALIGN(len));
|
||||
end = start + len;
|
||||
|
||||
for (nstart = start; nstart < end; nstart = nend) {
|
||||
|
29
mm/mmap.c
29
mm/mmap.c
@ -177,8 +177,8 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
|
||||
return next;
|
||||
}
|
||||
|
||||
static int do_brk(unsigned long addr, unsigned long len, struct list_head *uf);
|
||||
|
||||
static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long flags,
|
||||
struct list_head *uf);
|
||||
SYSCALL_DEFINE1(brk, unsigned long, brk)
|
||||
{
|
||||
unsigned long retval;
|
||||
@ -236,7 +236,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
|
||||
goto out;
|
||||
|
||||
/* Ok, looks good - let it rip. */
|
||||
if (do_brk(oldbrk, newbrk-oldbrk, &uf) < 0)
|
||||
if (do_brk_flags(oldbrk, newbrk-oldbrk, 0, &uf) < 0)
|
||||
goto out;
|
||||
|
||||
set_brk:
|
||||
@ -2897,21 +2897,14 @@ static inline void verify_mm_writelocked(struct mm_struct *mm)
|
||||
* anonymous maps. eventually we may be able to do some
|
||||
* brk-specific accounting here.
|
||||
*/
|
||||
static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long flags, struct list_head *uf)
|
||||
static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long flags, struct list_head *uf)
|
||||
{
|
||||
struct mm_struct *mm = current->mm;
|
||||
struct vm_area_struct *vma, *prev;
|
||||
unsigned long len;
|
||||
struct rb_node **rb_link, *rb_parent;
|
||||
pgoff_t pgoff = addr >> PAGE_SHIFT;
|
||||
int error;
|
||||
|
||||
len = PAGE_ALIGN(request);
|
||||
if (len < request)
|
||||
return -ENOMEM;
|
||||
if (!len)
|
||||
return 0;
|
||||
|
||||
/* Until we need other flags, refuse anything except VM_EXEC. */
|
||||
if ((flags & (~VM_EXEC)) != 0)
|
||||
return -EINVAL;
|
||||
@ -2983,18 +2976,20 @@ out:
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int do_brk(unsigned long addr, unsigned long len, struct list_head *uf)
|
||||
{
|
||||
return do_brk_flags(addr, len, 0, uf);
|
||||
}
|
||||
|
||||
int vm_brk_flags(unsigned long addr, unsigned long len, unsigned long flags)
|
||||
int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags)
|
||||
{
|
||||
struct mm_struct *mm = current->mm;
|
||||
unsigned long len;
|
||||
int ret;
|
||||
bool populate;
|
||||
LIST_HEAD(uf);
|
||||
|
||||
len = PAGE_ALIGN(request);
|
||||
if (len < request)
|
||||
return -ENOMEM;
|
||||
if (!len)
|
||||
return 0;
|
||||
|
||||
if (down_write_killable(&mm->mmap_sem))
|
||||
return -EINTR;
|
||||
|
||||
|
@ -64,6 +64,7 @@
|
||||
#include <linux/backing-dev.h>
|
||||
#include <linux/page_idle.h>
|
||||
#include <linux/memremap.h>
|
||||
#include <linux/userfaultfd_k.h>
|
||||
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
@ -1476,11 +1477,16 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
|
||||
set_pte_at(mm, address, pvmw.pte, pteval);
|
||||
}
|
||||
|
||||
} else if (pte_unused(pteval)) {
|
||||
} else if (pte_unused(pteval) && !userfaultfd_armed(vma)) {
|
||||
/*
|
||||
* The guest indicated that the page content is of no
|
||||
* interest anymore. Simply discard the pte, vmscan
|
||||
* will take care of the rest.
|
||||
* A future reference will then fault in a new zero
|
||||
* page. When userfaultfd is active, we must not drop
|
||||
* this page though, as its main user (postcopy
|
||||
* migration) will not expect userfaults on already
|
||||
* copied pages.
|
||||
*/
|
||||
dec_mm_counter(mm, mm_counter(page));
|
||||
} else if (IS_ENABLED(CONFIG_MIGRATION) &&
|
||||
|
@ -696,6 +696,8 @@ ebt_check_entry(struct ebt_entry *e, struct net *net,
|
||||
}
|
||||
i = 0;
|
||||
|
||||
memset(&mtpar, 0, sizeof(mtpar));
|
||||
memset(&tgpar, 0, sizeof(tgpar));
|
||||
mtpar.net = tgpar.net = net;
|
||||
mtpar.table = tgpar.table = name;
|
||||
mtpar.entryinfo = tgpar.entryinfo = e;
|
||||
|
@ -541,6 +541,7 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
|
||||
return -ENOMEM;
|
||||
|
||||
j = 0;
|
||||
memset(&mtpar, 0, sizeof(mtpar));
|
||||
mtpar.net = net;
|
||||
mtpar.table = name;
|
||||
mtpar.entryinfo = &e->ip;
|
||||
|
@ -561,6 +561,7 @@ find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
|
||||
return -ENOMEM;
|
||||
|
||||
j = 0;
|
||||
memset(&mtpar, 0, sizeof(mtpar));
|
||||
mtpar.net = net;
|
||||
mtpar.table = name;
|
||||
mtpar.entryinfo = &e->ipv6;
|
||||
|
@ -1228,6 +1228,9 @@ static int nfqnl_recv_unsupp(struct net *net, struct sock *ctnl,
|
||||
static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = {
|
||||
[NFQA_CFG_CMD] = { .len = sizeof(struct nfqnl_msg_config_cmd) },
|
||||
[NFQA_CFG_PARAMS] = { .len = sizeof(struct nfqnl_msg_config_params) },
|
||||
[NFQA_CFG_QUEUE_MAXLEN] = { .type = NLA_U32 },
|
||||
[NFQA_CFG_MASK] = { .type = NLA_U32 },
|
||||
[NFQA_CFG_FLAGS] = { .type = NLA_U32 },
|
||||
};
|
||||
|
||||
static const struct nf_queue_handler nfqh = {
|
||||
|
@ -33,6 +33,7 @@
|
||||
#include <linux/delay.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <sound/core.h>
|
||||
#include <sound/jack.h>
|
||||
#include <sound/asoundef.h>
|
||||
@ -764,8 +765,10 @@ static void check_presence_and_report(struct hda_codec *codec, hda_nid_t nid,
|
||||
|
||||
if (pin_idx < 0)
|
||||
return;
|
||||
mutex_lock(&spec->pcm_lock);
|
||||
if (hdmi_present_sense(get_pin(spec, pin_idx), 1))
|
||||
snd_hda_jack_report_sync(codec);
|
||||
mutex_unlock(&spec->pcm_lock);
|
||||
}
|
||||
|
||||
static void jack_callback(struct hda_codec *codec,
|
||||
@ -1628,21 +1631,23 @@ static void sync_eld_via_acomp(struct hda_codec *codec,
|
||||
static bool hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
|
||||
{
|
||||
struct hda_codec *codec = per_pin->codec;
|
||||
struct hdmi_spec *spec = codec->spec;
|
||||
int ret;
|
||||
|
||||
/* no temporary power up/down needed for component notifier */
|
||||
if (!codec_has_acomp(codec))
|
||||
snd_hda_power_up_pm(codec);
|
||||
if (!codec_has_acomp(codec)) {
|
||||
ret = snd_hda_power_up_pm(codec);
|
||||
if (ret < 0 && pm_runtime_suspended(hda_codec_dev(codec))) {
|
||||
snd_hda_power_down_pm(codec);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_lock(&spec->pcm_lock);
|
||||
if (codec_has_acomp(codec)) {
|
||||
sync_eld_via_acomp(codec, per_pin);
|
||||
ret = false; /* don't call snd_hda_jack_report_sync() */
|
||||
} else {
|
||||
ret = hdmi_present_sense_via_verbs(per_pin, repoll);
|
||||
}
|
||||
mutex_unlock(&spec->pcm_lock);
|
||||
|
||||
if (!codec_has_acomp(codec))
|
||||
snd_hda_power_down_pm(codec);
|
||||
@ -1654,12 +1659,16 @@ static void hdmi_repoll_eld(struct work_struct *work)
|
||||
{
|
||||
struct hdmi_spec_per_pin *per_pin =
|
||||
container_of(to_delayed_work(work), struct hdmi_spec_per_pin, work);
|
||||
struct hda_codec *codec = per_pin->codec;
|
||||
struct hdmi_spec *spec = codec->spec;
|
||||
|
||||
if (per_pin->repoll_count++ > 6)
|
||||
per_pin->repoll_count = 0;
|
||||
|
||||
mutex_lock(&spec->pcm_lock);
|
||||
if (hdmi_present_sense(per_pin, per_pin->repoll_count))
|
||||
snd_hda_jack_report_sync(per_pin->codec);
|
||||
mutex_unlock(&spec->pcm_lock);
|
||||
}
|
||||
|
||||
static void intel_haswell_fixup_connect_list(struct hda_codec *codec,
|
||||
|
@ -6445,7 +6445,6 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
|
||||
SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
|
||||
SND_PCI_QUIRK(0x17aa, 0x312a, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
|
||||
SND_PCI_QUIRK(0x17aa, 0x312f, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
|
||||
SND_PCI_QUIRK(0x17aa, 0x3136, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
|
||||
SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
|
||||
SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
|
||||
SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
|
||||
@ -6628,6 +6627,11 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
|
||||
{0x1a, 0x02a11040},
|
||||
{0x1b, 0x01014020},
|
||||
{0x21, 0x0221101f}),
|
||||
SND_HDA_PIN_QUIRK(0x10ec0235, 0x17aa, "Lenovo", ALC294_FIXUP_LENOVO_MIC_LOCATION,
|
||||
{0x14, 0x90170110},
|
||||
{0x19, 0x02a11020},
|
||||
{0x1a, 0x02a11030},
|
||||
{0x21, 0x0221101f}),
|
||||
SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
|
||||
{0x12, 0x90a60140},
|
||||
{0x14, 0x90170150},
|
||||
|
@ -63,13 +63,13 @@ static const struct snd_pcm_ops mtk_afe_pcm_ops = {
|
||||
static int mtk_afe_pcm_new(struct snd_soc_pcm_runtime *rtd)
|
||||
{
|
||||
size_t size;
|
||||
struct snd_card *card = rtd->card->snd_card;
|
||||
struct snd_pcm *pcm = rtd->pcm;
|
||||
struct mtk_base_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
|
||||
|
||||
size = afe->mtk_afe_hardware->buffer_bytes_max;
|
||||
return snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
|
||||
card->dev, size, size);
|
||||
rtd->platform->dev,
|
||||
size, size);
|
||||
}
|
||||
|
||||
static void mtk_afe_pcm_free(struct snd_pcm *pcm)
|
||||
|
@ -63,8 +63,8 @@ dep-cmd = $(if $(wildcard $(fixdep)),
|
||||
$(fixdep) $(depfile) $@ '$(make-cmd)' > $(dot-target).tmp; \
|
||||
rm -f $(depfile); \
|
||||
mv -f $(dot-target).tmp $(dot-target).cmd, \
|
||||
printf '\# cannot find fixdep (%s)\n' $(fixdep) > $(dot-target).cmd; \
|
||||
printf '\# using basic dep data\n\n' >> $(dot-target).cmd; \
|
||||
printf '$(pound) cannot find fixdep (%s)\n' $(fixdep) > $(dot-target).cmd; \
|
||||
printf '$(pound) using basic dep data\n\n' >> $(dot-target).cmd; \
|
||||
cat $(depfile) >> $(dot-target).cmd; \
|
||||
printf '\n%s\n' 'cmd_$@ := $(make-cmd)' >> $(dot-target).cmd)
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user