mirror of
https://github.com/rd-stuffs/msm-4.14.git
synced 2025-02-20 11:45:48 +08:00
Merge remote-tracking branch 'remotes/origin/tmp-3afae84' into msm-4.14
* remotes/origin/tmp-3afae84: Linux 4.14.7 dvb_frontend: don't use-after-free the frontend struct media: dvb-core: always call invoke_release() in fe_free() x86/intel_rdt: Fix potential deadlock during resctrl unmount RDMA/cxgb4: Annotate r2 and stag as __be32 md: free unused memory after bitmap resize dm raid: fix panic when attempting to force a raid to sync audit: ensure that 'audit=1' actually enables audit for PID 1 audit: Allow auditd to set pid to 0 to end auditing nvmet-rdma: update queue list during ib_device removal blk-mq: Avoid that request queue removal can trigger list corruption ide: ide-atapi: fix compile error with defining macro DEBUG ipvlan: fix ipv6 outbound device powerpc/powernv/idle: Round up latency and residency values kbuild: do not call cc-option before KBUILD_CFLAGS initialization KVM: arm/arm64: vgic-its: Preserve the revious read from the pending table fix kcm_clone() fcntl: don't cap l_start and l_end values for F_GETLK64 in compat syscall usb: gadget: ffs: Forbid usb_ep_alloc_request from sleeping ipmi: Stop timers before cleaning up the module sctp: use right member as the param of list_for_each_entry cls_bpf: don't decrement net's refcount when offload fails net: openvswitch: datapath: fix data type in queue_gso_packets net: accept UFO datagrams from tuntap and packet tun: fix rcu_read_lock imbalance in tun_build_skb net: ipv6: Fixup device for anycast routes during copy tun: free skb in early errors tcp: when scheduling TLP, time of RTO should account for current ACK tap: free skb if flags error net: sched: cbq: create block for q->link.block tcp: use current time in tcp_rcv_space_adjust() tipc: call tipc_rcv() only if bearer is up in tipc_udp_recv() tcp: use IPCB instead of TCP_SKB_CB in inet_exact_dif_match() s390/qeth: fix GSO throughput regression s390/qeth: fix thinko in IPv4 multicast address tracking s390/qeth: build max size GSO skbs on L2 devices tcp/dccp: block bh before arming time_wait timer stmmac: reset last TSO segment size after device open net: remove hlist_nulls_add_tail_rcu() usbnet: fix alignment for frames with no ethernet header tcp: remove buggy call to tcp_v6_restore_cb() net/packet: fix a race in packet_bind() and packet_notifier() packet: fix crash in fanout_demux_rollover() tcp: add tcp_v4_fill_cb()/tcp_v4_restore_cb() sit: update frag_off info rds: Fix NULL pointer dereference in __rds_rdma_map vhost: fix skb leak in handle_rx() tipc: fix memory leak in tipc_accept_from_sock() s390/qeth: fix early exit from error path net: realtek: r8169: implement set_link_ksettings() net: thunderx: Fix TCP/UDP checksum offload for IPv4 pkts net: thunderx: Fix TCP/UDP checksum offload for IPv6 pkts net: qmi_wwan: add Quectel BG96 2c7c:0296 Linux 4.14.6 afs: Connect up the CB.ProbeUuid afs: Fix total-length calculation for multiple-page send IB/mlx5: Assign send CQ and recv CQ of UMR QP IB/mlx4: Increase maximal message size under UD QP bnxt_re: changing the ip address shouldn't affect new connections f2fs: fix to clear FI_NO_PREALLOC xfrm: Copy policy family in clone_policy tls: Use kzalloc for aead_request allocation jump_label: Invoke jump_label_test() via early_initcall() atm: horizon: Fix irq release error kbuild: rpm-pkg: fix jobserver unavailable warning mailbox: mailbox-test: don't rely on rx_buffer content to signal data ready clk: hi3660: fix incorrect uart3 clock freqency clk: uniphier: fix DAPLL2 clock rate of Pro5 clk: qcom: common: fix legacy board-clock registration clk: sunxi-ng: a83t: Fix i2c buses bits clk: stm32h7: fix test of clock config bpf: fix lockdep splat geneve: fix fill_info when link down fcntl: don't leak fd reference when fixup_compat_flock fails sctp: use the right sk after waking up from wait_buf sleep sctp: do not free asoc when it is already dead in sctp_sendmsg slub: fix sysfs duplicate filename creation when slub_debug=O zsmalloc: calling zs_map_object() from irq is a bug sparc64/mm: set fields in deferred pages block: wake up all tasks blocked in get_request() dt-bindings: usb: fix reg-property port-number range xfs: fix forgotten rcu read unlock when skipping inode reclaim nfp: fix flower offload metadata flag usage nfp: inherit the max_mtu from the PF netdev sunrpc: Fix rpc_task_begin trace point NFS: Fix a typo in nfs_rename() dynamic-debug-howto: fix optional/omitted ending line number to be LARGE instead of 0 lib/genalloc.c: make the avail variable an atomic_long_t pipe: match pipe_max_size data type with procfs drivers/rapidio/devices/rio_mport_cdev.c: fix resource leak in error handling path in 'rio_dma_transfer()' rsi: fix memory leak on buf and usb_reg_buf route: update fnhe_expires for redirect when the fnhe exists route: also update fnhe_genid when updating a route cache gre6: use log_ecn_error module parameter in ip6_tnl_rcv() mac80211_hwsim: Fix memory leak in hwsim_new_radio_nl() x86/mpx/selftests: Fix up weird arrays apparmor: fix leak of null profile name if profile allocation fails powerpc/perf: Fix pmu_count to count only nest imc pmus coccinelle: fix parallel build with CHECK=scripts/coccicheck kbuild: pkg: use --transform option to prefix paths in tar net/smc: use sk_rcvbuf as start for rmb creation irqchip/qcom: Fix u32 comparison with value less than zero ARM: avoid faulting on qemu ARM: BUG if jumping to usermode address in kernel mode crypto: talitos - fix ctr-aes-talitos crypto: talitos - fix use of sg_link_tbl_len crypto: talitos - fix AEAD for sha224 on non sha224 capable chips crypto: talitos - fix setkey to check key weakness crypto: talitos - fix memory corruption on SEC2 crypto: talitos - fix AEAD test failures IB/core: Only enforce security for InfiniBand IB/core: Avoid unnecessary return value check bus: arm-ccn: fix module unloading Error: Removing state 147 which has instances left. bus: arm-ccn: Fix use of smp_processor_id() in preemptible context bus: arm-ccn: Check memory allocation failure bus: arm-cci: Fix use of smp_processor_id() in preemptible context Revert "ARM: dts: imx53: add srtc node" arm64: SW PAN: Update saved ttbr0 value on enter_lazy_tlb arm64: SW PAN: Point saved ttbr0 at the zero page when switching to init_mm arm64: fpsimd: Prevent registers leaking from dead tasks KVM: arm/arm64: vgic-its: Check result of allocation before use KVM: arm/arm64: vgic: Preserve the revious read from the pending table KVM: arm/arm64: vgic-irqfd: Fix MSI entry allocation KVM: arm/arm64: Fix broken GICH_ELRSR big endian conversion KVM: VMX: remove I/O port 0x80 bypass on Intel hosts arm: KVM: Fix VTTBR_BADDR_MASK BUG_ON off-by-one arm64: KVM: fix VTTBR_BADDR_MASK BUG_ON off-by-one media: rc: partial revert of "media: rc: per-protocol repeat period" media: rc: sir_ir: detect presence of port media: dvb: i2c transfers over usb cannot be done from stack drm/i915: Fix vblank timestamp/frame counter jumps on gen2 drm/exynos: gem: Drop NONCONTIG flag for buffers allocated without IOMMU drm/bridge: analogix dp: Fix runtime PM state in get_modes() callback md/r5cache: move mddev_lock() out of r5c_journal_mode_set() kdb: Fix handling of kallsyms_symbol_next() return value brcmfmac: change driver unbind order of the sdio function devices iwlwifi: mvm: enable RX offloading with TKIP and WEP iwlwifi: mvm: fix packet injection iwlwifi: add new cards for 9260 and 22000 series iwlwifi: mvm: flush queue before deleting ROC iwlwifi: mvm: don't use transmit queue hang detection when it is not possible iwlwifi: mvm: mark MIC stripped MPDUs powerpc/64s: Initialize ISAv3 MMU registers before setting partition table Revert "powerpc: Do not call ppc_md.panic in fadump panic notifier" KVM: s390: Fix skey emulation permission check s390: fix compat system call table s390/mm: fix off-by-one bug in 5-level page table handling s390: always save and restore all registers on context switch smp/hotplug: Move step CPUHP_AP_SMPCFD_DYING to the correct place iommu/vt-d: Fix scatterlist offset handling ALSA: usb-audio: Add check return value for usb_string() ALSA: usb-audio: Fix out-of-bound error ALSA: seq: Remove spurious WARN_ON() at timer check ALSA: pcm: prevent UAF in snd_pcm_info ALSA: hda/realtek - New codec support for ALC257 btrfs: handle errors while updating refcounts in update_ref_for_cow btrfs: fix missing error return in btrfs_drop_snapshot KVM: x86: fix APIC page invalidation x86/PCI: Make broadcom_postcore_init() check acpi_disabled x86/idt: Load idt early in start_secondary X.509: fix comparisons of ->pkey_algo X.509: reject invalid BIT STRING for subjectPublicKey KEYS: reject NULL restriction string when type is specified KEYS: add missing permission check for request_key() destination ASN.1: check for error from ASN1_OP_END__ACT actions ASN.1: fix out-of-bounds read when parsing indefinite length item efi/esrt: Use memunmap() instead of kfree() to free the remapping efi: Move some sysfs files to be read-only by root scsi: libsas: align sata_device's rps_resp on a cacheline scsi: use dma_get_cache_alignment() as minimum DMA alignment scsi: dma-mapping: always provide dma_get_cache_alignment isa: Prevent NULL dereference in isa_bus driver callbacks firmware: vpd: Fix platform driver and device registration/unregistration firmware: vpd: Tie firmware kobject to device lifetime firmware: vpd: Destroy vpd sections in remove function firmware: cleanup FIRMWARE_IN_KERNEL message hv: kvp: Avoid reading past allocated blocks from KVP file Drivers: hv: vmbus: Fix a rescind issue pinctrl: armada-37xx: Fix direction_output() callback behavior iio: adc: meson-saradc: Meson8 and Meson8b do not have REG11 and REG13 iio: adc: meson-saradc: initialize the bandgap correctly on older SoCs iio: adc: meson-saradc: fix the bit_idx of the adc_en clock iio: adc: cpcap: fix incorrect validation iio: health: max30102: Temperature should be in milli Celsius iio: stm32: fix adc/trigger link error virtio: release virtio index when fail to device_register can: peak/pcie_fd: fix potential bug in restarting tx queue can: usb_8dev: cancel urb on -EPIPE and -EPROTO can: esd_usb2: cancel urb on -EPIPE and -EPROTO can: ems_usb: cancel urb on -EPIPE and -EPROTO can: mcba_usb: cancel urb on -EPROTO can: kvaser_usb: cancel urb on -EPIPE and -EPROTO can: kvaser_usb: ratelimit errors if incomplete messages are received can: kvaser_usb: Fix comparison bug in kvaser_usb_read_bulk_callback() can: kvaser_usb: free buf in error paths can: ti_hecc: Fix napi poll return value for repoll can: flexcan: fix VF610 state transition issue can: peak/pci: fix potential bug when probe() fails can: mcba_usb: fix device disconnect bug usb: f_fs: Force Reserved1=1 in OS_DESC_EXT_COMPAT serdev: ttyport: fix tty locking in close serdev: ttyport: fix NULL-deref on hangup serdev: ttyport: add missing receive_buf sanity checks usb: gadget: core: Fix ->udc_set_speed() speed handling usb: gadget: udc: renesas_usb3: fix number of the pipes Change-Id: I47977dc6948f8e5edbcd21770a63242e86adcb3b Signed-off-by: Runmin Wang <runminw@codeaurora.org>
This commit is contained in:
commit
b716d1c640
@ -11,7 +11,7 @@ Required properties:
|
||||
be used, but a device adhering to this binding may leave out all except
|
||||
for usbVID,PID.
|
||||
- reg: the port number which this device is connecting to, the range
|
||||
is 1-31.
|
||||
is 1-255.
|
||||
|
||||
Example:
|
||||
|
||||
|
22
Makefile
22
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 14
|
||||
SUBLEVEL = 5
|
||||
SUBLEVEL = 7
|
||||
EXTRAVERSION =
|
||||
NAME = Petit Gorille
|
||||
|
||||
@ -377,8 +377,6 @@ LDFLAGS_MODULE =
|
||||
CFLAGS_KERNEL =
|
||||
AFLAGS_KERNEL =
|
||||
LDFLAGS_vmlinux =
|
||||
CFLAGS_GCOV := -fprofile-arcs -ftest-coverage -fno-tree-loop-im $(call cc-disable-warning,maybe-uninitialized,)
|
||||
CFLAGS_KCOV := $(call cc-option,-fsanitize-coverage=trace-pc,)
|
||||
|
||||
# Use USERINCLUDE when you must reference the UAPI directories only.
|
||||
USERINCLUDE := \
|
||||
@ -397,21 +395,19 @@ LINUXINCLUDE := \
|
||||
-I$(objtree)/include \
|
||||
$(USERINCLUDE)
|
||||
|
||||
KBUILD_CPPFLAGS := -D__KERNEL__
|
||||
|
||||
KBUILD_AFLAGS := -D__ASSEMBLY__
|
||||
KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
|
||||
-fno-strict-aliasing -fno-common -fshort-wchar \
|
||||
-Werror-implicit-function-declaration \
|
||||
-Wno-format-security \
|
||||
-std=gnu89 $(call cc-option,-fno-PIE)
|
||||
|
||||
|
||||
-std=gnu89
|
||||
KBUILD_CPPFLAGS := -D__KERNEL__
|
||||
KBUILD_AFLAGS_KERNEL :=
|
||||
KBUILD_CFLAGS_KERNEL :=
|
||||
KBUILD_AFLAGS := -D__ASSEMBLY__ $(call cc-option,-fno-PIE)
|
||||
KBUILD_AFLAGS_MODULE := -DMODULE
|
||||
KBUILD_CFLAGS_MODULE := -DMODULE
|
||||
KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds
|
||||
GCC_PLUGINS_CFLAGS :=
|
||||
|
||||
# Read KERNELRELEASE from include/config/kernel.release (if it exists)
|
||||
KERNELRELEASE = $(shell cat include/config/kernel.release 2> /dev/null)
|
||||
@ -424,7 +420,7 @@ export MAKE AWK GENKSYMS INSTALLKERNEL PERL PYTHON UTS_MACHINE
|
||||
export HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS
|
||||
|
||||
export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS LDFLAGS
|
||||
export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_GCOV CFLAGS_KCOV CFLAGS_KASAN CFLAGS_UBSAN
|
||||
export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_KASAN CFLAGS_UBSAN
|
||||
export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE
|
||||
export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE
|
||||
export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL
|
||||
@ -625,6 +621,12 @@ endif
|
||||
# Defaults to vmlinux, but the arch makefile usually adds further targets
|
||||
all: vmlinux
|
||||
|
||||
KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
|
||||
KBUILD_AFLAGS += $(call cc-option,-fno-PIE)
|
||||
CFLAGS_GCOV := -fprofile-arcs -ftest-coverage -fno-tree-loop-im $(call cc-disable-warning,maybe-uninitialized,)
|
||||
CFLAGS_KCOV := $(call cc-option,-fsanitize-coverage=trace-pc,)
|
||||
export CFLAGS_GCOV CFLAGS_KCOV
|
||||
|
||||
# The arch Makefile can set ARCH_{CPP,A,C}FLAGS to override the default
|
||||
# values of the respective KBUILD_* variables
|
||||
ARCH_CPPFLAGS :=
|
||||
|
@ -433,15 +433,6 @@
|
||||
clock-names = "ipg", "per";
|
||||
};
|
||||
|
||||
srtc: srtc@53fa4000 {
|
||||
compatible = "fsl,imx53-rtc", "fsl,imx25-rtc";
|
||||
reg = <0x53fa4000 0x4000>;
|
||||
interrupts = <24>;
|
||||
interrupt-parent = <&tzic>;
|
||||
clocks = <&clks IMX5_CLK_SRTC_GATE>;
|
||||
clock-names = "ipg";
|
||||
};
|
||||
|
||||
iomuxc: iomuxc@53fa8000 {
|
||||
compatible = "fsl,imx53-iomuxc";
|
||||
reg = <0x53fa8000 0x4000>;
|
||||
|
@ -518,4 +518,22 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro bug, msg, line
|
||||
#ifdef CONFIG_THUMB2_KERNEL
|
||||
1: .inst 0xde02
|
||||
#else
|
||||
1: .inst 0xe7f001f2
|
||||
#endif
|
||||
#ifdef CONFIG_DEBUG_BUGVERBOSE
|
||||
.pushsection .rodata.str, "aMS", %progbits, 1
|
||||
2: .asciz "\msg"
|
||||
.popsection
|
||||
.pushsection __bug_table, "aw"
|
||||
.align 2
|
||||
.word 1b, 2b
|
||||
.hword \line
|
||||
.popsection
|
||||
#endif
|
||||
.endm
|
||||
|
||||
#endif /* __ASM_ASSEMBLER_H__ */
|
||||
|
@ -161,8 +161,7 @@
|
||||
#else
|
||||
#define VTTBR_X (5 - KVM_T0SZ)
|
||||
#endif
|
||||
#define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
|
||||
#define VTTBR_BADDR_MASK (((_AC(1, ULL) << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
|
||||
#define VTTBR_BADDR_MASK (((_AC(1, ULL) << (40 - VTTBR_X)) - 1) << VTTBR_X)
|
||||
#define VTTBR_VMID_SHIFT _AC(48, ULL)
|
||||
#define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
|
||||
|
||||
|
@ -300,6 +300,8 @@
|
||||
mov r2, sp
|
||||
ldr r1, [r2, #\offset + S_PSR] @ get calling cpsr
|
||||
ldr lr, [r2, #\offset + S_PC]! @ get pc
|
||||
tst r1, #PSR_I_BIT | 0x0f
|
||||
bne 1f
|
||||
msr spsr_cxsf, r1 @ save in spsr_svc
|
||||
#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
|
||||
@ We must avoid clrex due to Cortex-A15 erratum #830321
|
||||
@ -314,6 +316,7 @@
|
||||
@ after ldm {}^
|
||||
add sp, sp, #\offset + PT_REGS_SIZE
|
||||
movs pc, lr @ return & move spsr_svc into cpsr
|
||||
1: bug "Returning to usermode but unexpected PSR bits set?", \@
|
||||
#elif defined(CONFIG_CPU_V7M)
|
||||
@ V7M restore.
|
||||
@ Note that we don't need to do clrex here as clearing the local
|
||||
@ -329,6 +332,8 @@
|
||||
ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
|
||||
ldr lr, [sp, #\offset + S_PC] @ get pc
|
||||
add sp, sp, #\offset + S_SP
|
||||
tst r1, #PSR_I_BIT | 0x0f
|
||||
bne 1f
|
||||
msr spsr_cxsf, r1 @ save in spsr_svc
|
||||
|
||||
@ We must avoid clrex due to Cortex-A15 erratum #830321
|
||||
@ -341,6 +346,7 @@
|
||||
.endif
|
||||
add sp, sp, #PT_REGS_SIZE - S_SP
|
||||
movs pc, lr @ return & move spsr_svc into cpsr
|
||||
1: bug "Returning to usermode but unexpected PSR bits set?", \@
|
||||
#endif /* !CONFIG_THUMB2_KERNEL */
|
||||
.endm
|
||||
|
||||
|
@ -215,7 +215,6 @@ typedef struct compat_siginfo {
|
||||
} compat_siginfo_t;
|
||||
|
||||
#define COMPAT_OFF_T_MAX 0x7fffffff
|
||||
#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL
|
||||
|
||||
/*
|
||||
* A pointer passed in from user mode. This should not
|
||||
|
@ -132,11 +132,9 @@ static inline void efi_set_pgd(struct mm_struct *mm)
|
||||
* Defer the switch to the current thread's TTBR0_EL1
|
||||
* until uaccess_enable(). Restore the current
|
||||
* thread's saved ttbr0 corresponding to its active_mm
|
||||
* (if different from init_mm).
|
||||
*/
|
||||
cpu_set_reserved_ttbr0();
|
||||
if (current->active_mm != &init_mm)
|
||||
update_saved_ttbr0(current, current->active_mm);
|
||||
update_saved_ttbr0(current, current->active_mm);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -170,8 +170,7 @@
|
||||
#define VTCR_EL2_FLAGS (VTCR_EL2_COMMON_BITS | VTCR_EL2_TGRAN_FLAGS)
|
||||
#define VTTBR_X (VTTBR_X_TGRAN_MAGIC - VTCR_EL2_T0SZ_IPA)
|
||||
|
||||
#define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
|
||||
#define VTTBR_BADDR_MASK (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
|
||||
#define VTTBR_BADDR_MASK (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_X)
|
||||
#define VTTBR_VMID_SHIFT (UL(48))
|
||||
#define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
|
||||
|
||||
|
@ -162,29 +162,21 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu);
|
||||
|
||||
#define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.id, 0); 0; })
|
||||
|
||||
/*
|
||||
* This is called when "tsk" is about to enter lazy TLB mode.
|
||||
*
|
||||
* mm: describes the currently active mm context
|
||||
* tsk: task which is entering lazy tlb
|
||||
* cpu: cpu number which is entering lazy tlb
|
||||
*
|
||||
* tsk->mm will be NULL
|
||||
*/
|
||||
static inline void
|
||||
enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
|
||||
{
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
|
||||
static inline void update_saved_ttbr0(struct task_struct *tsk,
|
||||
struct mm_struct *mm)
|
||||
{
|
||||
if (system_uses_ttbr0_pan()) {
|
||||
BUG_ON(mm->pgd == swapper_pg_dir);
|
||||
task_thread_info(tsk)->ttbr0 =
|
||||
virt_to_phys(mm->pgd) | ASID(mm) << 48;
|
||||
}
|
||||
u64 ttbr;
|
||||
|
||||
if (!system_uses_ttbr0_pan())
|
||||
return;
|
||||
|
||||
if (mm == &init_mm)
|
||||
ttbr = __pa_symbol(empty_zero_page);
|
||||
else
|
||||
ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48;
|
||||
|
||||
task_thread_info(tsk)->ttbr0 = ttbr;
|
||||
}
|
||||
#else
|
||||
static inline void update_saved_ttbr0(struct task_struct *tsk,
|
||||
@ -193,6 +185,16 @@ static inline void update_saved_ttbr0(struct task_struct *tsk,
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void
|
||||
enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
|
||||
{
|
||||
/*
|
||||
* We don't actually care about the ttbr0 mapping, so point it at the
|
||||
* zero page.
|
||||
*/
|
||||
update_saved_ttbr0(tsk, &init_mm);
|
||||
}
|
||||
|
||||
static inline void __switch_mm(struct mm_struct *next)
|
||||
{
|
||||
unsigned int cpu = smp_processor_id();
|
||||
@ -220,11 +222,9 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||
* Update the saved TTBR0_EL1 of the scheduled-in task as the previous
|
||||
* value may have not been initialised yet (activate_mm caller) or the
|
||||
* ASID has changed since the last run (following the context switch
|
||||
* of another thread of the same process). Avoid setting the reserved
|
||||
* TTBR0_EL1 to swapper_pg_dir (init_mm; e.g. via idle_task_exit).
|
||||
* of another thread of the same process).
|
||||
*/
|
||||
if (next != &init_mm)
|
||||
update_saved_ttbr0(tsk, next);
|
||||
update_saved_ttbr0(tsk, next);
|
||||
}
|
||||
|
||||
#define deactivate_mm(tsk,mm) do { } while (0)
|
||||
|
@ -319,6 +319,15 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
|
||||
|
||||
memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
|
||||
|
||||
/*
|
||||
* In case p was allocated the same task_struct pointer as some
|
||||
* other recently-exited task, make sure p is disassociated from
|
||||
* any cpu that may have run that now-exited task recently.
|
||||
* Otherwise we could erroneously skip reloading the FPSIMD
|
||||
* registers for p.
|
||||
*/
|
||||
fpsimd_flush_task_state(p);
|
||||
|
||||
if (likely(!(p->flags & PF_KTHREAD))) {
|
||||
*childregs = *current_pt_regs();
|
||||
childregs->regs[0] = 0;
|
||||
|
@ -200,7 +200,6 @@ typedef struct compat_siginfo {
|
||||
} compat_siginfo_t;
|
||||
|
||||
#define COMPAT_OFF_T_MAX 0x7fffffff
|
||||
#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL
|
||||
|
||||
/*
|
||||
* A pointer passed in from user mode. This should not
|
||||
|
@ -195,7 +195,6 @@ typedef struct compat_siginfo {
|
||||
} compat_siginfo_t;
|
||||
|
||||
#define COMPAT_OFF_T_MAX 0x7fffffff
|
||||
#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL
|
||||
|
||||
struct compat_ipc64_perm {
|
||||
compat_key_t key;
|
||||
|
@ -185,7 +185,6 @@ typedef struct compat_siginfo {
|
||||
} compat_siginfo_t;
|
||||
|
||||
#define COMPAT_OFF_T_MAX 0x7fffffff
|
||||
#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL
|
||||
|
||||
/*
|
||||
* A pointer passed in from user mode. This should not
|
||||
|
@ -76,6 +76,7 @@ struct machdep_calls {
|
||||
|
||||
void __noreturn (*restart)(char *cmd);
|
||||
void __noreturn (*halt)(void);
|
||||
void (*panic)(char *str);
|
||||
void (*cpu_die)(void);
|
||||
|
||||
long (*time_init)(void); /* Optional, may be NULL */
|
||||
|
@ -24,6 +24,7 @@ extern void reloc_got2(unsigned long);
|
||||
|
||||
void check_for_initrd(void);
|
||||
void initmem_init(void);
|
||||
void setup_panic(void);
|
||||
#define ARCH_PANIC_TIMEOUT 180
|
||||
|
||||
#ifdef CONFIG_PPC_PSERIES
|
||||
|
@ -102,6 +102,7 @@ _GLOBAL(__setup_cpu_power9)
|
||||
li r0,0
|
||||
mtspr SPRN_PSSCR,r0
|
||||
mtspr SPRN_LPID,r0
|
||||
mtspr SPRN_PID,r0
|
||||
mfspr r3,SPRN_LPCR
|
||||
LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC)
|
||||
or r3, r3, r4
|
||||
@ -126,6 +127,7 @@ _GLOBAL(__restore_cpu_power9)
|
||||
li r0,0
|
||||
mtspr SPRN_PSSCR,r0
|
||||
mtspr SPRN_LPID,r0
|
||||
mtspr SPRN_PID,r0
|
||||
mfspr r3,SPRN_LPCR
|
||||
LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC)
|
||||
or r3, r3, r4
|
||||
|
@ -1453,25 +1453,6 @@ static void fadump_init_files(void)
|
||||
return;
|
||||
}
|
||||
|
||||
static int fadump_panic_event(struct notifier_block *this,
|
||||
unsigned long event, void *ptr)
|
||||
{
|
||||
/*
|
||||
* If firmware-assisted dump has been registered then trigger
|
||||
* firmware-assisted dump and let firmware handle everything
|
||||
* else. If this returns, then fadump was not registered, so
|
||||
* go through the rest of the panic path.
|
||||
*/
|
||||
crash_fadump(NULL, ptr);
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static struct notifier_block fadump_panic_block = {
|
||||
.notifier_call = fadump_panic_event,
|
||||
.priority = INT_MIN /* may not return; must be done last */
|
||||
};
|
||||
|
||||
/*
|
||||
* Prepare for firmware-assisted dump.
|
||||
*/
|
||||
@ -1504,9 +1485,6 @@ int __init setup_fadump(void)
|
||||
init_fadump_mem_struct(&fdm, fw_dump.reserve_dump_area_start);
|
||||
fadump_init_files();
|
||||
|
||||
atomic_notifier_chain_register(&panic_notifier_list,
|
||||
&fadump_panic_block);
|
||||
|
||||
return 1;
|
||||
}
|
||||
subsys_initcall(setup_fadump);
|
||||
|
@ -704,6 +704,30 @@ int check_legacy_ioport(unsigned long base_port)
|
||||
}
|
||||
EXPORT_SYMBOL(check_legacy_ioport);
|
||||
|
||||
static int ppc_panic_event(struct notifier_block *this,
|
||||
unsigned long event, void *ptr)
|
||||
{
|
||||
/*
|
||||
* If firmware-assisted dump has been registered then trigger
|
||||
* firmware-assisted dump and let firmware handle everything else.
|
||||
*/
|
||||
crash_fadump(NULL, ptr);
|
||||
ppc_md.panic(ptr); /* May not return */
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static struct notifier_block ppc_panic_block = {
|
||||
.notifier_call = ppc_panic_event,
|
||||
.priority = INT_MIN /* may not return; must be done last */
|
||||
};
|
||||
|
||||
void __init setup_panic(void)
|
||||
{
|
||||
if (!ppc_md.panic)
|
||||
return;
|
||||
atomic_notifier_chain_register(&panic_notifier_list, &ppc_panic_block);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CHECK_CACHE_COHERENCY
|
||||
/*
|
||||
* For platforms that have configurable cache-coherency. This function
|
||||
@ -848,6 +872,9 @@ void __init setup_arch(char **cmdline_p)
|
||||
/* Probe the machine type, establish ppc_md. */
|
||||
probe_machine();
|
||||
|
||||
/* Setup panic notifier if requested by the platform. */
|
||||
setup_panic();
|
||||
|
||||
/*
|
||||
* Configure ppc_md.power_save (ppc32 only, 64-bit machines do
|
||||
* it from their respective probe() function.
|
||||
|
@ -191,8 +191,10 @@ static int opal_imc_counters_probe(struct platform_device *pdev)
|
||||
break;
|
||||
}
|
||||
|
||||
if (!imc_pmu_create(imc_dev, pmu_count, domain))
|
||||
pmu_count++;
|
||||
if (!imc_pmu_create(imc_dev, pmu_count, domain)) {
|
||||
if (domain == IMC_DOMAIN_NEST)
|
||||
pmu_count++;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -104,6 +104,20 @@ static void __noreturn ps3_halt(void)
|
||||
ps3_sys_manager_halt(); /* never returns */
|
||||
}
|
||||
|
||||
static void ps3_panic(char *str)
|
||||
{
|
||||
DBG("%s:%d %s\n", __func__, __LINE__, str);
|
||||
|
||||
smp_send_stop();
|
||||
printk("\n");
|
||||
printk(" System does not reboot automatically.\n");
|
||||
printk(" Please press POWER button.\n");
|
||||
printk("\n");
|
||||
|
||||
while(1)
|
||||
lv1_pause(1);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_FB_PS3) || defined(CONFIG_FB_PS3_MODULE) || \
|
||||
defined(CONFIG_PS3_FLASH) || defined(CONFIG_PS3_FLASH_MODULE)
|
||||
static void __init prealloc(struct ps3_prealloc *p)
|
||||
@ -255,6 +269,7 @@ define_machine(ps3) {
|
||||
.probe = ps3_probe,
|
||||
.setup_arch = ps3_setup_arch,
|
||||
.init_IRQ = ps3_init_IRQ,
|
||||
.panic = ps3_panic,
|
||||
.get_boot_time = ps3_get_boot_time,
|
||||
.set_dabr = ps3_set_dabr,
|
||||
.calibrate_decr = ps3_calibrate_decr,
|
||||
|
@ -726,6 +726,7 @@ define_machine(pseries) {
|
||||
.pcibios_fixup = pSeries_final_fixup,
|
||||
.restart = rtas_restart,
|
||||
.halt = rtas_halt,
|
||||
.panic = rtas_os_term,
|
||||
.get_boot_time = rtas_get_boot_time,
|
||||
.get_rtc_time = rtas_get_rtc_time,
|
||||
.set_rtc_time = rtas_set_rtc_time,
|
||||
|
@ -263,7 +263,6 @@ typedef struct compat_siginfo {
|
||||
#define si_overrun _sifields._timer._overrun
|
||||
|
||||
#define COMPAT_OFF_T_MAX 0x7fffffff
|
||||
#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL
|
||||
|
||||
/*
|
||||
* A pointer passed in from user mode. This should not
|
||||
|
@ -30,21 +30,20 @@ static inline void restore_access_regs(unsigned int *acrs)
|
||||
asm volatile("lam 0,15,%0" : : "Q" (*(acrstype *)acrs));
|
||||
}
|
||||
|
||||
#define switch_to(prev,next,last) do { \
|
||||
if (prev->mm) { \
|
||||
save_fpu_regs(); \
|
||||
save_access_regs(&prev->thread.acrs[0]); \
|
||||
save_ri_cb(prev->thread.ri_cb); \
|
||||
save_gs_cb(prev->thread.gs_cb); \
|
||||
} \
|
||||
#define switch_to(prev, next, last) do { \
|
||||
/* save_fpu_regs() sets the CIF_FPU flag, which enforces \
|
||||
* a restore of the floating point / vector registers as \
|
||||
* soon as the next task returns to user space \
|
||||
*/ \
|
||||
save_fpu_regs(); \
|
||||
save_access_regs(&prev->thread.acrs[0]); \
|
||||
save_ri_cb(prev->thread.ri_cb); \
|
||||
save_gs_cb(prev->thread.gs_cb); \
|
||||
update_cr_regs(next); \
|
||||
if (next->mm) { \
|
||||
set_cpu_flag(CIF_FPU); \
|
||||
restore_access_regs(&next->thread.acrs[0]); \
|
||||
restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \
|
||||
restore_gs_cb(next->thread.gs_cb); \
|
||||
} \
|
||||
prev = __switch_to(prev,next); \
|
||||
restore_access_regs(&next->thread.acrs[0]); \
|
||||
restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \
|
||||
restore_gs_cb(next->thread.gs_cb); \
|
||||
prev = __switch_to(prev, next); \
|
||||
} while (0)
|
||||
|
||||
#endif /* __ASM_SWITCH_TO_H */
|
||||
|
@ -370,10 +370,10 @@ SYSCALL(sys_recvmmsg,compat_sys_recvmmsg)
|
||||
SYSCALL(sys_sendmmsg,compat_sys_sendmmsg)
|
||||
SYSCALL(sys_socket,sys_socket)
|
||||
SYSCALL(sys_socketpair,compat_sys_socketpair) /* 360 */
|
||||
SYSCALL(sys_bind,sys_bind)
|
||||
SYSCALL(sys_connect,sys_connect)
|
||||
SYSCALL(sys_bind,compat_sys_bind)
|
||||
SYSCALL(sys_connect,compat_sys_connect)
|
||||
SYSCALL(sys_listen,sys_listen)
|
||||
SYSCALL(sys_accept4,sys_accept4)
|
||||
SYSCALL(sys_accept4,compat_sys_accept4)
|
||||
SYSCALL(sys_getsockopt,compat_sys_getsockopt) /* 365 */
|
||||
SYSCALL(sys_setsockopt,compat_sys_setsockopt)
|
||||
SYSCALL(sys_getsockname,compat_sys_getsockname)
|
||||
|
@ -235,8 +235,6 @@ static int try_handle_skey(struct kvm_vcpu *vcpu)
|
||||
VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
|
||||
return -EAGAIN;
|
||||
}
|
||||
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -247,6 +245,9 @@ static int handle_iske(struct kvm_vcpu *vcpu)
|
||||
int reg1, reg2;
|
||||
int rc;
|
||||
|
||||
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
|
||||
|
||||
rc = try_handle_skey(vcpu);
|
||||
if (rc)
|
||||
return rc != -EAGAIN ? rc : 0;
|
||||
@ -276,6 +277,9 @@ static int handle_rrbe(struct kvm_vcpu *vcpu)
|
||||
int reg1, reg2;
|
||||
int rc;
|
||||
|
||||
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
|
||||
|
||||
rc = try_handle_skey(vcpu);
|
||||
if (rc)
|
||||
return rc != -EAGAIN ? rc : 0;
|
||||
@ -311,6 +315,9 @@ static int handle_sske(struct kvm_vcpu *vcpu)
|
||||
int reg1, reg2;
|
||||
int rc;
|
||||
|
||||
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
|
||||
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
|
||||
|
||||
rc = try_handle_skey(vcpu);
|
||||
if (rc)
|
||||
return rc != -EAGAIN ? rc : 0;
|
||||
|
@ -85,8 +85,6 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
|
||||
|
||||
/* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
|
||||
VM_BUG_ON(mm->context.asce_limit < _REGION2_SIZE);
|
||||
if (end >= TASK_SIZE_MAX)
|
||||
return -ENOMEM;
|
||||
rc = 0;
|
||||
notify = 0;
|
||||
while (mm->context.asce_limit < end) {
|
||||
|
@ -209,7 +209,6 @@ typedef struct compat_siginfo {
|
||||
} compat_siginfo_t;
|
||||
|
||||
#define COMPAT_OFF_T_MAX 0x7fffffff
|
||||
#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL
|
||||
|
||||
/*
|
||||
* A pointer passed in from user mode. This should not
|
||||
|
@ -2540,9 +2540,16 @@ void __init mem_init(void)
|
||||
{
|
||||
high_memory = __va(last_valid_pfn << PAGE_SHIFT);
|
||||
|
||||
register_page_bootmem_info();
|
||||
free_all_bootmem();
|
||||
|
||||
/*
|
||||
* Must be done after boot memory is put on freelist, because here we
|
||||
* might set fields in deferred struct pages that have not yet been
|
||||
* initialized, and free_all_bootmem() initializes all the reserved
|
||||
* deferred pages for us.
|
||||
*/
|
||||
register_page_bootmem_info();
|
||||
|
||||
/*
|
||||
* Set up the zero page, mark it reserved, so that page count
|
||||
* is not manipulated when freeing the page from user ptes.
|
||||
|
@ -173,7 +173,6 @@ typedef struct compat_siginfo {
|
||||
} compat_siginfo_t;
|
||||
|
||||
#define COMPAT_OFF_T_MAX 0x7fffffff
|
||||
#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL
|
||||
|
||||
struct compat_ipc64_perm {
|
||||
compat_key_t key;
|
||||
|
@ -209,7 +209,6 @@ typedef struct compat_siginfo {
|
||||
} compat_siginfo_t;
|
||||
|
||||
#define COMPAT_OFF_T_MAX 0x7fffffff
|
||||
#define COMPAT_LOFF_T_MAX 0x7fffffffffffffffL
|
||||
|
||||
struct compat_ipc64_perm {
|
||||
compat_key_t key;
|
||||
|
@ -1426,4 +1426,7 @@ static inline int kvm_cpu_get_apicid(int mps_cpu)
|
||||
#endif
|
||||
}
|
||||
|
||||
void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
|
||||
unsigned long start, unsigned long end);
|
||||
|
||||
#endif /* _ASM_X86_KVM_HOST_H */
|
||||
|
@ -1297,9 +1297,7 @@ static void rmdir_all_sub(void)
|
||||
kfree(rdtgrp);
|
||||
}
|
||||
/* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */
|
||||
get_online_cpus();
|
||||
update_closid_rmid(cpu_online_mask, &rdtgroup_default);
|
||||
put_online_cpus();
|
||||
|
||||
kernfs_remove(kn_info);
|
||||
kernfs_remove(kn_mongrp);
|
||||
@ -1310,6 +1308,7 @@ static void rdt_kill_sb(struct super_block *sb)
|
||||
{
|
||||
struct rdt_resource *r;
|
||||
|
||||
cpus_read_lock();
|
||||
mutex_lock(&rdtgroup_mutex);
|
||||
|
||||
/*Put everything back to default values. */
|
||||
@ -1317,11 +1316,12 @@ static void rdt_kill_sb(struct super_block *sb)
|
||||
reset_all_ctrls(r);
|
||||
cdp_disable();
|
||||
rmdir_all_sub();
|
||||
static_branch_disable(&rdt_alloc_enable_key);
|
||||
static_branch_disable(&rdt_mon_enable_key);
|
||||
static_branch_disable(&rdt_enable_key);
|
||||
static_branch_disable_cpuslocked(&rdt_alloc_enable_key);
|
||||
static_branch_disable_cpuslocked(&rdt_mon_enable_key);
|
||||
static_branch_disable_cpuslocked(&rdt_enable_key);
|
||||
kernfs_kill_sb(sb);
|
||||
mutex_unlock(&rdtgroup_mutex);
|
||||
cpus_read_unlock();
|
||||
}
|
||||
|
||||
static struct file_system_type rdt_fs_type = {
|
||||
|
@ -239,7 +239,7 @@ static void notrace start_secondary(void *unused)
|
||||
load_cr3(swapper_pg_dir);
|
||||
__flush_tlb_all();
|
||||
#endif
|
||||
|
||||
load_current_idt();
|
||||
cpu_init();
|
||||
x86_cpuinit.early_percpu_clock_init();
|
||||
preempt_disable();
|
||||
|
@ -6750,12 +6750,7 @@ static __init int hardware_setup(void)
|
||||
memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
|
||||
memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
|
||||
|
||||
/*
|
||||
* Allow direct access to the PC debug port (it is often used for I/O
|
||||
* delays, but the vmexits simply slow things down).
|
||||
*/
|
||||
memset(vmx_io_bitmap_a, 0xff, PAGE_SIZE);
|
||||
clear_bit(0x80, vmx_io_bitmap_a);
|
||||
|
||||
memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE);
|
||||
|
||||
|
@ -6745,6 +6745,20 @@ static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu)
|
||||
kvm_x86_ops->tlb_flush(vcpu);
|
||||
}
|
||||
|
||||
void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
unsigned long apic_address;
|
||||
|
||||
/*
|
||||
* The physical address of apic access page is stored in the VMCS.
|
||||
* Update it when it becomes invalid.
|
||||
*/
|
||||
apic_address = gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
|
||||
if (start <= apic_address && apic_address < end)
|
||||
kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
|
||||
}
|
||||
|
||||
void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct page *page = NULL;
|
||||
|
@ -97,7 +97,7 @@ static int __init broadcom_postcore_init(void)
|
||||
* We should get host bridge information from ACPI unless the BIOS
|
||||
* doesn't support it.
|
||||
*/
|
||||
if (acpi_os_get_root_pointer())
|
||||
if (!acpi_disabled && acpi_os_get_root_pointer())
|
||||
return 0;
|
||||
#endif
|
||||
|
||||
|
@ -341,6 +341,7 @@ void blk_sync_queue(struct request_queue *q)
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
int i;
|
||||
|
||||
cancel_delayed_work_sync(&q->requeue_work);
|
||||
queue_for_each_hw_ctx(q, hctx, i)
|
||||
cancel_delayed_work_sync(&hctx->run_work);
|
||||
} else {
|
||||
@ -607,8 +608,8 @@ void blk_set_queue_dying(struct request_queue *q)
|
||||
spin_lock_irq(q->queue_lock);
|
||||
blk_queue_for_each_rl(rl, q) {
|
||||
if (rl->rq_pool) {
|
||||
wake_up(&rl->wait[BLK_RW_SYNC]);
|
||||
wake_up(&rl->wait[BLK_RW_ASYNC]);
|
||||
wake_up_all(&rl->wait[BLK_RW_SYNC]);
|
||||
wake_up_all(&rl->wait[BLK_RW_ASYNC]);
|
||||
}
|
||||
}
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
|
@ -150,7 +150,7 @@ static int pkcs7_find_key(struct pkcs7_message *pkcs7,
|
||||
pr_devel("Sig %u: Found cert serial match X.509[%u]\n",
|
||||
sinfo->index, certix);
|
||||
|
||||
if (x509->pub->pkey_algo != sinfo->sig->pkey_algo) {
|
||||
if (strcmp(x509->pub->pkey_algo, sinfo->sig->pkey_algo) != 0) {
|
||||
pr_warn("Sig %u: X.509 algo and PKCS#7 sig algo don't match\n",
|
||||
sinfo->index);
|
||||
continue;
|
||||
|
@ -409,6 +409,8 @@ int x509_extract_key_data(void *context, size_t hdrlen,
|
||||
ctx->cert->pub->pkey_algo = "rsa";
|
||||
|
||||
/* Discard the BIT STRING metadata */
|
||||
if (vlen < 1 || *(const u8 *)value != 0)
|
||||
return -EBADMSG;
|
||||
ctx->key = value + 1;
|
||||
ctx->key_size = vlen - 1;
|
||||
return 0;
|
||||
|
@ -135,7 +135,7 @@ int x509_check_for_self_signed(struct x509_certificate *cert)
|
||||
}
|
||||
|
||||
ret = -EKEYREJECTED;
|
||||
if (cert->pub->pkey_algo != cert->sig->pkey_algo)
|
||||
if (strcmp(cert->pub->pkey_algo, cert->sig->pkey_algo) != 0)
|
||||
goto out;
|
||||
|
||||
ret = public_key_verify_signature(cert->pub, cert->sig);
|
||||
|
@ -2803,7 +2803,7 @@ out:
|
||||
return err;
|
||||
|
||||
out_free_irq:
|
||||
free_irq(dev->irq, dev);
|
||||
free_irq(irq, dev);
|
||||
out_free:
|
||||
kfree(dev);
|
||||
out_release:
|
||||
|
@ -91,22 +91,23 @@ config FIRMWARE_IN_KERNEL
|
||||
depends on FW_LOADER
|
||||
default y
|
||||
help
|
||||
The kernel source tree includes a number of firmware 'blobs'
|
||||
that are used by various drivers. The recommended way to
|
||||
use these is to run "make firmware_install", which, after
|
||||
converting ihex files to binary, copies all of the needed
|
||||
binary files in firmware/ to /lib/firmware/ on your system so
|
||||
that they can be loaded by userspace helpers on request.
|
||||
Various drivers in the kernel source tree may require firmware,
|
||||
which is generally available in your distribution's linux-firmware
|
||||
package.
|
||||
|
||||
The linux-firmware package should install firmware into
|
||||
/lib/firmware/ on your system, so they can be loaded by userspace
|
||||
helpers on request.
|
||||
|
||||
Enabling this option will build each required firmware blob
|
||||
into the kernel directly, where request_firmware() will find
|
||||
them without having to call out to userspace. This may be
|
||||
useful if your root file system requires a device that uses
|
||||
such firmware and do not wish to use an initrd.
|
||||
specified by EXTRA_FIRMWARE into the kernel directly, where
|
||||
request_firmware() will find them without having to call out to
|
||||
userspace. This may be useful if your root file system requires a
|
||||
device that uses such firmware and you do not wish to use an
|
||||
initrd.
|
||||
|
||||
This single option controls the inclusion of firmware for
|
||||
every driver that uses request_firmware() and ships its
|
||||
firmware in the kernel source tree, which avoids a
|
||||
every driver that uses request_firmware(), which avoids a
|
||||
proliferation of 'Include firmware for xxx device' options.
|
||||
|
||||
Say 'N' and let firmware be loaded from userspace.
|
||||
|
@ -39,7 +39,7 @@ static int isa_bus_probe(struct device *dev)
|
||||
{
|
||||
struct isa_driver *isa_driver = dev->platform_data;
|
||||
|
||||
if (isa_driver->probe)
|
||||
if (isa_driver && isa_driver->probe)
|
||||
return isa_driver->probe(dev, to_isa_dev(dev)->id);
|
||||
|
||||
return 0;
|
||||
@ -49,7 +49,7 @@ static int isa_bus_remove(struct device *dev)
|
||||
{
|
||||
struct isa_driver *isa_driver = dev->platform_data;
|
||||
|
||||
if (isa_driver->remove)
|
||||
if (isa_driver && isa_driver->remove)
|
||||
return isa_driver->remove(dev, to_isa_dev(dev)->id);
|
||||
|
||||
return 0;
|
||||
@ -59,7 +59,7 @@ static void isa_bus_shutdown(struct device *dev)
|
||||
{
|
||||
struct isa_driver *isa_driver = dev->platform_data;
|
||||
|
||||
if (isa_driver->shutdown)
|
||||
if (isa_driver && isa_driver->shutdown)
|
||||
isa_driver->shutdown(dev, to_isa_dev(dev)->id);
|
||||
}
|
||||
|
||||
@ -67,7 +67,7 @@ static int isa_bus_suspend(struct device *dev, pm_message_t state)
|
||||
{
|
||||
struct isa_driver *isa_driver = dev->platform_data;
|
||||
|
||||
if (isa_driver->suspend)
|
||||
if (isa_driver && isa_driver->suspend)
|
||||
return isa_driver->suspend(dev, to_isa_dev(dev)->id, state);
|
||||
|
||||
return 0;
|
||||
@ -77,7 +77,7 @@ static int isa_bus_resume(struct device *dev)
|
||||
{
|
||||
struct isa_driver *isa_driver = dev->platform_data;
|
||||
|
||||
if (isa_driver->resume)
|
||||
if (isa_driver && isa_driver->resume)
|
||||
return isa_driver->resume(dev, to_isa_dev(dev)->id);
|
||||
|
||||
return 0;
|
||||
|
@ -1755,14 +1755,17 @@ static int cci_pmu_probe(struct platform_device *pdev)
|
||||
raw_spin_lock_init(&cci_pmu->hw_events.pmu_lock);
|
||||
mutex_init(&cci_pmu->reserve_mutex);
|
||||
atomic_set(&cci_pmu->active_events, 0);
|
||||
cpumask_set_cpu(smp_processor_id(), &cci_pmu->cpus);
|
||||
cpumask_set_cpu(get_cpu(), &cci_pmu->cpus);
|
||||
|
||||
ret = cci_pmu_init(cci_pmu, pdev);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
put_cpu();
|
||||
return ret;
|
||||
}
|
||||
|
||||
cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE,
|
||||
&cci_pmu->node);
|
||||
put_cpu();
|
||||
pr_info("ARM %s PMU driver probed", cci_pmu->model->name);
|
||||
return 0;
|
||||
}
|
||||
|
@ -1271,6 +1271,10 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
|
||||
int len = snprintf(NULL, 0, "ccn_%d", ccn->dt.id);
|
||||
|
||||
name = devm_kzalloc(ccn->dev, len + 1, GFP_KERNEL);
|
||||
if (!name) {
|
||||
err = -ENOMEM;
|
||||
goto error_choose_name;
|
||||
}
|
||||
snprintf(name, len + 1, "ccn_%d", ccn->dt.id);
|
||||
}
|
||||
|
||||
@ -1297,7 +1301,7 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
|
||||
}
|
||||
|
||||
/* Pick one CPU which we will use to collect data from CCN... */
|
||||
cpumask_set_cpu(smp_processor_id(), &ccn->dt.cpu);
|
||||
cpumask_set_cpu(get_cpu(), &ccn->dt.cpu);
|
||||
|
||||
/* Also make sure that the overflow interrupt is handled by this CPU */
|
||||
if (ccn->irq) {
|
||||
@ -1314,10 +1318,13 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
|
||||
|
||||
cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE,
|
||||
&ccn->dt.node);
|
||||
put_cpu();
|
||||
return 0;
|
||||
|
||||
error_pmu_register:
|
||||
error_set_affinity:
|
||||
put_cpu();
|
||||
error_choose_name:
|
||||
ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id);
|
||||
for (i = 0; i < ccn->num_xps; i++)
|
||||
writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL);
|
||||
@ -1580,8 +1587,8 @@ static int __init arm_ccn_init(void)
|
||||
|
||||
static void __exit arm_ccn_exit(void)
|
||||
{
|
||||
cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CCN_ONLINE);
|
||||
platform_driver_unregister(&arm_ccn_driver);
|
||||
cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CCN_ONLINE);
|
||||
}
|
||||
|
||||
module_init(arm_ccn_init);
|
||||
|
@ -242,6 +242,9 @@ struct smi_info {
|
||||
/* The timer for this si. */
|
||||
struct timer_list si_timer;
|
||||
|
||||
/* This flag is set, if the timer can be set */
|
||||
bool timer_can_start;
|
||||
|
||||
/* This flag is set, if the timer is running (timer_pending() isn't enough) */
|
||||
bool timer_running;
|
||||
|
||||
@ -417,6 +420,8 @@ out:
|
||||
|
||||
static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
|
||||
{
|
||||
if (!smi_info->timer_can_start)
|
||||
return;
|
||||
smi_info->last_timeout_jiffies = jiffies;
|
||||
mod_timer(&smi_info->si_timer, new_val);
|
||||
smi_info->timer_running = true;
|
||||
@ -436,21 +441,18 @@ static void start_new_msg(struct smi_info *smi_info, unsigned char *msg,
|
||||
smi_info->handlers->start_transaction(smi_info->si_sm, msg, size);
|
||||
}
|
||||
|
||||
static void start_check_enables(struct smi_info *smi_info, bool start_timer)
|
||||
static void start_check_enables(struct smi_info *smi_info)
|
||||
{
|
||||
unsigned char msg[2];
|
||||
|
||||
msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
|
||||
msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
|
||||
|
||||
if (start_timer)
|
||||
start_new_msg(smi_info, msg, 2);
|
||||
else
|
||||
smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
|
||||
start_new_msg(smi_info, msg, 2);
|
||||
smi_info->si_state = SI_CHECKING_ENABLES;
|
||||
}
|
||||
|
||||
static void start_clear_flags(struct smi_info *smi_info, bool start_timer)
|
||||
static void start_clear_flags(struct smi_info *smi_info)
|
||||
{
|
||||
unsigned char msg[3];
|
||||
|
||||
@ -459,10 +461,7 @@ static void start_clear_flags(struct smi_info *smi_info, bool start_timer)
|
||||
msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
|
||||
msg[2] = WDT_PRE_TIMEOUT_INT;
|
||||
|
||||
if (start_timer)
|
||||
start_new_msg(smi_info, msg, 3);
|
||||
else
|
||||
smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
|
||||
start_new_msg(smi_info, msg, 3);
|
||||
smi_info->si_state = SI_CLEARING_FLAGS;
|
||||
}
|
||||
|
||||
@ -497,11 +496,11 @@ static void start_getting_events(struct smi_info *smi_info)
|
||||
* Note that we cannot just use disable_irq(), since the interrupt may
|
||||
* be shared.
|
||||
*/
|
||||
static inline bool disable_si_irq(struct smi_info *smi_info, bool start_timer)
|
||||
static inline bool disable_si_irq(struct smi_info *smi_info)
|
||||
{
|
||||
if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
|
||||
smi_info->interrupt_disabled = true;
|
||||
start_check_enables(smi_info, start_timer);
|
||||
start_check_enables(smi_info);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
@ -511,7 +510,7 @@ static inline bool enable_si_irq(struct smi_info *smi_info)
|
||||
{
|
||||
if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
|
||||
smi_info->interrupt_disabled = false;
|
||||
start_check_enables(smi_info, true);
|
||||
start_check_enables(smi_info);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
@ -529,7 +528,7 @@ static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info)
|
||||
|
||||
msg = ipmi_alloc_smi_msg();
|
||||
if (!msg) {
|
||||
if (!disable_si_irq(smi_info, true))
|
||||
if (!disable_si_irq(smi_info))
|
||||
smi_info->si_state = SI_NORMAL;
|
||||
} else if (enable_si_irq(smi_info)) {
|
||||
ipmi_free_smi_msg(msg);
|
||||
@ -545,7 +544,7 @@ retry:
|
||||
/* Watchdog pre-timeout */
|
||||
smi_inc_stat(smi_info, watchdog_pretimeouts);
|
||||
|
||||
start_clear_flags(smi_info, true);
|
||||
start_clear_flags(smi_info);
|
||||
smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
|
||||
if (smi_info->intf)
|
||||
ipmi_smi_watchdog_pretimeout(smi_info->intf);
|
||||
@ -928,7 +927,7 @@ restart:
|
||||
* disable and messages disabled.
|
||||
*/
|
||||
if (smi_info->supports_event_msg_buff || smi_info->irq) {
|
||||
start_check_enables(smi_info, true);
|
||||
start_check_enables(smi_info);
|
||||
} else {
|
||||
smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
|
||||
if (!smi_info->curr_msg)
|
||||
@ -1235,6 +1234,7 @@ static int smi_start_processing(void *send_info,
|
||||
|
||||
/* Set up the timer that drives the interface. */
|
||||
setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
|
||||
new_smi->timer_can_start = true;
|
||||
smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
|
||||
|
||||
/* Try to claim any interrupts. */
|
||||
@ -3416,10 +3416,12 @@ static void check_for_broken_irqs(struct smi_info *smi_info)
|
||||
check_set_rcv_irq(smi_info);
|
||||
}
|
||||
|
||||
static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
|
||||
static inline void stop_timer_and_thread(struct smi_info *smi_info)
|
||||
{
|
||||
if (smi_info->thread != NULL)
|
||||
kthread_stop(smi_info->thread);
|
||||
|
||||
smi_info->timer_can_start = false;
|
||||
if (smi_info->timer_running)
|
||||
del_timer_sync(&smi_info->si_timer);
|
||||
}
|
||||
@ -3605,7 +3607,7 @@ static int try_smi_init(struct smi_info *new_smi)
|
||||
* Start clearing the flags before we enable interrupts or the
|
||||
* timer to avoid racing with the timer.
|
||||
*/
|
||||
start_clear_flags(new_smi, false);
|
||||
start_clear_flags(new_smi);
|
||||
|
||||
/*
|
||||
* IRQ is defined to be set when non-zero. req_events will
|
||||
@ -3674,7 +3676,7 @@ static int try_smi_init(struct smi_info *new_smi)
|
||||
return 0;
|
||||
|
||||
out_err_stop_timer:
|
||||
wait_for_timer_and_thread(new_smi);
|
||||
stop_timer_and_thread(new_smi);
|
||||
|
||||
out_err:
|
||||
new_smi->interrupt_disabled = true;
|
||||
@ -3866,7 +3868,7 @@ static void cleanup_one_si(struct smi_info *to_clean)
|
||||
*/
|
||||
if (to_clean->irq_cleanup)
|
||||
to_clean->irq_cleanup(to_clean);
|
||||
wait_for_timer_and_thread(to_clean);
|
||||
stop_timer_and_thread(to_clean);
|
||||
|
||||
/*
|
||||
* Timeouts are stopped, now make sure the interrupts are off
|
||||
@ -3878,7 +3880,7 @@ static void cleanup_one_si(struct smi_info *to_clean)
|
||||
schedule_timeout_uninterruptible(1);
|
||||
}
|
||||
if (to_clean->handlers)
|
||||
disable_si_irq(to_clean, false);
|
||||
disable_si_irq(to_clean);
|
||||
while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
|
||||
poll(to_clean);
|
||||
schedule_timeout_uninterruptible(1);
|
||||
|
@ -384,7 +384,7 @@ static void get_cfg_composite_div(const struct composite_clk_gcfg *gcfg,
|
||||
mux_ops = div_ops = gate_ops = NULL;
|
||||
mux_hw = div_hw = gate_hw = NULL;
|
||||
|
||||
if (gcfg->mux && gcfg->mux) {
|
||||
if (gcfg->mux && cfg->mux) {
|
||||
mux = _get_cmux(base + cfg->mux->offset,
|
||||
cfg->mux->shift,
|
||||
cfg->mux->width,
|
||||
@ -410,7 +410,7 @@ static void get_cfg_composite_div(const struct composite_clk_gcfg *gcfg,
|
||||
}
|
||||
}
|
||||
|
||||
if (gcfg->gate && gcfg->gate) {
|
||||
if (gcfg->gate && cfg->gate) {
|
||||
gate = _get_cgate(base + cfg->gate->offset,
|
||||
cfg->gate->bit_idx,
|
||||
gcfg->gate->flags, lock);
|
||||
|
@ -34,7 +34,7 @@ static const struct hisi_fixed_rate_clock hi3660_fixed_rate_clks[] = {
|
||||
|
||||
/* crgctrl */
|
||||
static const struct hisi_fixed_factor_clock hi3660_crg_fixed_factor_clks[] = {
|
||||
{ HI3660_FACTOR_UART3, "clk_factor_uart3", "iomcu_peri0", 1, 8, 0, },
|
||||
{ HI3660_FACTOR_UART3, "clk_factor_uart3", "iomcu_peri0", 1, 16, 0, },
|
||||
{ HI3660_CLK_FACTOR_MMC, "clk_factor_mmc", "clkin_sys", 1, 6, 0, },
|
||||
{ HI3660_CLK_GATE_I2C0, "clk_gate_i2c0", "clk_i2c0_iomcu", 1, 4, 0, },
|
||||
{ HI3660_CLK_GATE_I2C1, "clk_gate_i2c1", "clk_i2c1_iomcu", 1, 4, 0, },
|
||||
|
@ -143,8 +143,10 @@ static int _qcom_cc_register_board_clk(struct device *dev, const char *path,
|
||||
int ret;
|
||||
|
||||
clocks_node = of_find_node_by_path("/clocks");
|
||||
if (clocks_node)
|
||||
node = of_find_node_by_name(clocks_node, path);
|
||||
if (clocks_node) {
|
||||
node = of_get_child_by_name(clocks_node, path);
|
||||
of_node_put(clocks_node);
|
||||
}
|
||||
|
||||
if (!node) {
|
||||
fixed = devm_kzalloc(dev, sizeof(*fixed), GFP_KERNEL);
|
||||
|
@ -354,9 +354,9 @@ static SUNXI_CCU_GATE(bus_tdm_clk, "bus-tdm", "apb1",
|
||||
static SUNXI_CCU_GATE(bus_i2c0_clk, "bus-i2c0", "apb2",
|
||||
0x06c, BIT(0), 0);
|
||||
static SUNXI_CCU_GATE(bus_i2c1_clk, "bus-i2c1", "apb2",
|
||||
0x06c, BIT(0), 0);
|
||||
0x06c, BIT(1), 0);
|
||||
static SUNXI_CCU_GATE(bus_i2c2_clk, "bus-i2c2", "apb2",
|
||||
0x06c, BIT(0), 0);
|
||||
0x06c, BIT(2), 0);
|
||||
static SUNXI_CCU_GATE(bus_uart0_clk, "bus-uart0", "apb2",
|
||||
0x06c, BIT(16), 0);
|
||||
static SUNXI_CCU_GATE(bus_uart1_clk, "bus-uart1", "apb2",
|
||||
|
@ -123,7 +123,7 @@ const struct uniphier_clk_data uniphier_sld8_sys_clk_data[] = {
|
||||
const struct uniphier_clk_data uniphier_pro5_sys_clk_data[] = {
|
||||
UNIPHIER_CLK_FACTOR("spll", -1, "ref", 120, 1), /* 2400 MHz */
|
||||
UNIPHIER_CLK_FACTOR("dapll1", -1, "ref", 128, 1), /* 2560 MHz */
|
||||
UNIPHIER_CLK_FACTOR("dapll2", -1, "ref", 144, 125), /* 2949.12 MHz */
|
||||
UNIPHIER_CLK_FACTOR("dapll2", -1, "dapll1", 144, 125), /* 2949.12 MHz */
|
||||
UNIPHIER_CLK_FACTOR("uart", 0, "dapll2", 1, 40),
|
||||
UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 48),
|
||||
UNIPHIER_PRO5_SYS_CLK_NAND(2),
|
||||
|
@ -384,9 +384,9 @@ static int powernv_add_idle_states(void)
|
||||
* Firmware passes residency and latency values in ns.
|
||||
* cpuidle expects it in us.
|
||||
*/
|
||||
exit_latency = latency_ns[i] / 1000;
|
||||
exit_latency = DIV_ROUND_UP(latency_ns[i], 1000);
|
||||
if (!rc)
|
||||
target_residency = residency_ns[i] / 1000;
|
||||
target_residency = DIV_ROUND_UP(residency_ns[i], 1000);
|
||||
else
|
||||
target_residency = 0;
|
||||
|
||||
|
@ -1232,12 +1232,11 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
|
||||
sg_link_tbl_len += authsize;
|
||||
}
|
||||
|
||||
sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
|
||||
&desc->ptr[4], sg_count, areq->assoclen,
|
||||
tbl_off);
|
||||
ret = talitos_sg_map(dev, areq->src, sg_link_tbl_len, edesc,
|
||||
&desc->ptr[4], sg_count, areq->assoclen, tbl_off);
|
||||
|
||||
if (sg_count > 1) {
|
||||
tbl_off += sg_count;
|
||||
if (ret > 1) {
|
||||
tbl_off += ret;
|
||||
sync_needed = true;
|
||||
}
|
||||
|
||||
@ -1248,14 +1247,15 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
|
||||
dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
|
||||
}
|
||||
|
||||
sg_count = talitos_sg_map(dev, areq->dst, cryptlen, edesc,
|
||||
&desc->ptr[5], sg_count, areq->assoclen,
|
||||
tbl_off);
|
||||
ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
|
||||
sg_count, areq->assoclen, tbl_off);
|
||||
|
||||
if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)
|
||||
to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
|
||||
|
||||
if (sg_count > 1) {
|
||||
/* ICV data */
|
||||
if (ret > 1) {
|
||||
tbl_off += ret;
|
||||
edesc->icv_ool = true;
|
||||
sync_needed = true;
|
||||
|
||||
@ -1265,9 +1265,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
|
||||
sizeof(struct talitos_ptr) + authsize;
|
||||
|
||||
/* Add an entry to the link table for ICV data */
|
||||
tbl_ptr += sg_count - 1;
|
||||
to_talitos_ptr_ext_set(tbl_ptr, 0, is_sec1);
|
||||
tbl_ptr++;
|
||||
to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
|
||||
to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RETURN,
|
||||
is_sec1);
|
||||
to_talitos_ptr_len(tbl_ptr, authsize, is_sec1);
|
||||
@ -1275,18 +1273,33 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
|
||||
/* icv data follows link tables */
|
||||
to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + offset,
|
||||
is_sec1);
|
||||
} else {
|
||||
dma_addr_t addr = edesc->dma_link_tbl;
|
||||
|
||||
if (is_sec1)
|
||||
addr += areq->assoclen + cryptlen;
|
||||
else
|
||||
addr += sizeof(struct talitos_ptr) * tbl_off;
|
||||
|
||||
to_talitos_ptr(&desc->ptr[6], addr, is_sec1);
|
||||
to_talitos_ptr_len(&desc->ptr[6], authsize, is_sec1);
|
||||
}
|
||||
} else if (!(desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)) {
|
||||
ret = talitos_sg_map(dev, areq->dst, authsize, edesc,
|
||||
&desc->ptr[6], sg_count, areq->assoclen +
|
||||
cryptlen,
|
||||
tbl_off);
|
||||
if (ret > 1) {
|
||||
tbl_off += ret;
|
||||
edesc->icv_ool = true;
|
||||
sync_needed = true;
|
||||
} else {
|
||||
edesc->icv_ool = false;
|
||||
}
|
||||
} else {
|
||||
edesc->icv_ool = false;
|
||||
}
|
||||
|
||||
/* ICV data */
|
||||
if (!(desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)) {
|
||||
to_talitos_ptr_len(&desc->ptr[6], authsize, is_sec1);
|
||||
to_talitos_ptr(&desc->ptr[6], edesc->dma_link_tbl +
|
||||
areq->assoclen + cryptlen, is_sec1);
|
||||
}
|
||||
|
||||
/* iv out */
|
||||
if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)
|
||||
map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
|
||||
@ -1494,12 +1507,20 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
|
||||
u32 tmp[DES_EXPKEY_WORDS];
|
||||
|
||||
if (keylen > TALITOS_MAX_KEY_SIZE) {
|
||||
crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (unlikely(crypto_ablkcipher_get_flags(cipher) &
|
||||
CRYPTO_TFM_REQ_WEAK_KEY) &&
|
||||
!des_ekey(tmp, key)) {
|
||||
crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_WEAK_KEY);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
memcpy(&ctx->key, key, keylen);
|
||||
ctx->keylen = keylen;
|
||||
|
||||
@ -2614,7 +2635,7 @@ static struct talitos_alg_template driver_algs[] = {
|
||||
.ivsize = AES_BLOCK_SIZE,
|
||||
}
|
||||
},
|
||||
.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
|
||||
.desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
|
||||
DESC_HDR_SEL0_AESU |
|
||||
DESC_HDR_MODE0_AESU_CTR,
|
||||
},
|
||||
@ -3047,6 +3068,11 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
|
||||
t_alg->algt.alg.aead.setkey = aead_setkey;
|
||||
t_alg->algt.alg.aead.encrypt = aead_encrypt;
|
||||
t_alg->algt.alg.aead.decrypt = aead_decrypt;
|
||||
if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
|
||||
!strncmp(alg->cra_name, "authenc(hmac(sha224)", 20)) {
|
||||
kfree(t_alg);
|
||||
return ERR_PTR(-ENOTSUPP);
|
||||
}
|
||||
break;
|
||||
case CRYPTO_ALG_TYPE_AHASH:
|
||||
alg = &t_alg->algt.alg.hash.halg.base;
|
||||
|
@ -143,8 +143,7 @@ static ssize_t systab_show(struct kobject *kobj,
|
||||
return str - buf;
|
||||
}
|
||||
|
||||
static struct kobj_attribute efi_attr_systab =
|
||||
__ATTR(systab, 0400, systab_show, NULL);
|
||||
static struct kobj_attribute efi_attr_systab = __ATTR_RO_MODE(systab, 0400);
|
||||
|
||||
#define EFI_FIELD(var) efi.var
|
||||
|
||||
|
@ -106,7 +106,7 @@ static const struct sysfs_ops esre_attr_ops = {
|
||||
};
|
||||
|
||||
/* Generic ESRT Entry ("ESRE") support. */
|
||||
static ssize_t esre_fw_class_show(struct esre_entry *entry, char *buf)
|
||||
static ssize_t fw_class_show(struct esre_entry *entry, char *buf)
|
||||
{
|
||||
char *str = buf;
|
||||
|
||||
@ -117,18 +117,16 @@ static ssize_t esre_fw_class_show(struct esre_entry *entry, char *buf)
|
||||
return str - buf;
|
||||
}
|
||||
|
||||
static struct esre_attribute esre_fw_class = __ATTR(fw_class, 0400,
|
||||
esre_fw_class_show, NULL);
|
||||
static struct esre_attribute esre_fw_class = __ATTR_RO_MODE(fw_class, 0400);
|
||||
|
||||
#define esre_attr_decl(name, size, fmt) \
|
||||
static ssize_t esre_##name##_show(struct esre_entry *entry, char *buf) \
|
||||
static ssize_t name##_show(struct esre_entry *entry, char *buf) \
|
||||
{ \
|
||||
return sprintf(buf, fmt "\n", \
|
||||
le##size##_to_cpu(entry->esre.esre1->name)); \
|
||||
} \
|
||||
\
|
||||
static struct esre_attribute esre_##name = __ATTR(name, 0400, \
|
||||
esre_##name##_show, NULL)
|
||||
static struct esre_attribute esre_##name = __ATTR_RO_MODE(name, 0400)
|
||||
|
||||
esre_attr_decl(fw_type, 32, "%u");
|
||||
esre_attr_decl(fw_version, 32, "%u");
|
||||
@ -193,14 +191,13 @@ static int esre_create_sysfs_entry(void *esre, int entry_num)
|
||||
|
||||
/* support for displaying ESRT fields at the top level */
|
||||
#define esrt_attr_decl(name, size, fmt) \
|
||||
static ssize_t esrt_##name##_show(struct kobject *kobj, \
|
||||
static ssize_t name##_show(struct kobject *kobj, \
|
||||
struct kobj_attribute *attr, char *buf)\
|
||||
{ \
|
||||
return sprintf(buf, fmt "\n", le##size##_to_cpu(esrt->name)); \
|
||||
} \
|
||||
\
|
||||
static struct kobj_attribute esrt_##name = __ATTR(name, 0400, \
|
||||
esrt_##name##_show, NULL)
|
||||
static struct kobj_attribute esrt_##name = __ATTR_RO_MODE(name, 0400)
|
||||
|
||||
esrt_attr_decl(fw_resource_count, 32, "%u");
|
||||
esrt_attr_decl(fw_resource_count_max, 32, "%u");
|
||||
@ -431,7 +428,7 @@ err_remove_group:
|
||||
err_remove_esrt:
|
||||
kobject_put(esrt_kobj);
|
||||
err:
|
||||
kfree(esrt);
|
||||
memunmap(esrt);
|
||||
esrt = NULL;
|
||||
return error;
|
||||
}
|
||||
|
@ -63,11 +63,11 @@ static ssize_t map_attr_show(struct kobject *kobj, struct attribute *attr,
|
||||
return map_attr->show(entry, buf);
|
||||
}
|
||||
|
||||
static struct map_attribute map_type_attr = __ATTR_RO(type);
|
||||
static struct map_attribute map_phys_addr_attr = __ATTR_RO(phys_addr);
|
||||
static struct map_attribute map_virt_addr_attr = __ATTR_RO(virt_addr);
|
||||
static struct map_attribute map_num_pages_attr = __ATTR_RO(num_pages);
|
||||
static struct map_attribute map_attribute_attr = __ATTR_RO(attribute);
|
||||
static struct map_attribute map_type_attr = __ATTR_RO_MODE(type, 0400);
|
||||
static struct map_attribute map_phys_addr_attr = __ATTR_RO_MODE(phys_addr, 0400);
|
||||
static struct map_attribute map_virt_addr_attr = __ATTR_RO_MODE(virt_addr, 0400);
|
||||
static struct map_attribute map_num_pages_attr = __ATTR_RO_MODE(num_pages, 0400);
|
||||
static struct map_attribute map_attribute_attr = __ATTR_RO_MODE(attribute, 0400);
|
||||
|
||||
/*
|
||||
* These are default attributes that are added for every memmap entry.
|
||||
|
@ -295,38 +295,60 @@ static int vpd_probe(struct platform_device *pdev)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return vpd_sections_init(entry.cbmem_addr);
|
||||
vpd_kobj = kobject_create_and_add("vpd", firmware_kobj);
|
||||
if (!vpd_kobj)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = vpd_sections_init(entry.cbmem_addr);
|
||||
if (ret) {
|
||||
kobject_put(vpd_kobj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vpd_remove(struct platform_device *pdev)
|
||||
{
|
||||
vpd_section_destroy(&ro_vpd);
|
||||
vpd_section_destroy(&rw_vpd);
|
||||
|
||||
kobject_put(vpd_kobj);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct platform_driver vpd_driver = {
|
||||
.probe = vpd_probe,
|
||||
.remove = vpd_remove,
|
||||
.driver = {
|
||||
.name = "vpd",
|
||||
},
|
||||
};
|
||||
|
||||
static struct platform_device *vpd_pdev;
|
||||
|
||||
static int __init vpd_platform_init(void)
|
||||
{
|
||||
struct platform_device *pdev;
|
||||
int ret;
|
||||
|
||||
pdev = platform_device_register_simple("vpd", -1, NULL, 0);
|
||||
if (IS_ERR(pdev))
|
||||
return PTR_ERR(pdev);
|
||||
ret = platform_driver_register(&vpd_driver);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
vpd_kobj = kobject_create_and_add("vpd", firmware_kobj);
|
||||
if (!vpd_kobj)
|
||||
return -ENOMEM;
|
||||
|
||||
platform_driver_register(&vpd_driver);
|
||||
vpd_pdev = platform_device_register_simple("vpd", -1, NULL, 0);
|
||||
if (IS_ERR(vpd_pdev)) {
|
||||
platform_driver_unregister(&vpd_driver);
|
||||
return PTR_ERR(vpd_pdev);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit vpd_platform_exit(void)
|
||||
{
|
||||
vpd_section_destroy(&ro_vpd);
|
||||
vpd_section_destroy(&rw_vpd);
|
||||
kobject_put(vpd_kobj);
|
||||
platform_device_unregister(vpd_pdev);
|
||||
platform_driver_unregister(&vpd_driver);
|
||||
}
|
||||
|
||||
module_init(vpd_platform_init);
|
||||
|
@ -946,7 +946,9 @@ static int analogix_dp_get_modes(struct drm_connector *connector)
|
||||
return 0;
|
||||
}
|
||||
|
||||
pm_runtime_get_sync(dp->dev);
|
||||
edid = drm_get_edid(connector, &dp->aux.ddc);
|
||||
pm_runtime_put(dp->dev);
|
||||
if (edid) {
|
||||
drm_mode_connector_update_edid_property(&dp->connector,
|
||||
edid);
|
||||
|
@ -247,6 +247,15 @@ struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
|
||||
if (IS_ERR(exynos_gem))
|
||||
return exynos_gem;
|
||||
|
||||
if (!is_drm_iommu_supported(dev) && (flags & EXYNOS_BO_NONCONTIG)) {
|
||||
/*
|
||||
* when no IOMMU is available, all allocated buffers are
|
||||
* contiguous anyway, so drop EXYNOS_BO_NONCONTIG flag
|
||||
*/
|
||||
flags &= ~EXYNOS_BO_NONCONTIG;
|
||||
DRM_WARN("Non-contiguous allocation is not supported without IOMMU, falling back to contiguous buffer\n");
|
||||
}
|
||||
|
||||
/* set memory type and cache attribute from user side. */
|
||||
exynos_gem->flags = flags;
|
||||
|
||||
|
@ -1000,7 +1000,8 @@ enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
|
||||
return crtc->config->cpu_transcoder;
|
||||
}
|
||||
|
||||
static bool pipe_dsl_stopped(struct drm_i915_private *dev_priv, enum pipe pipe)
|
||||
static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe)
|
||||
{
|
||||
i915_reg_t reg = PIPEDSL(pipe);
|
||||
u32 line1, line2;
|
||||
@ -1015,7 +1016,28 @@ static bool pipe_dsl_stopped(struct drm_i915_private *dev_priv, enum pipe pipe)
|
||||
msleep(5);
|
||||
line2 = I915_READ(reg) & line_mask;
|
||||
|
||||
return line1 == line2;
|
||||
return line1 != line2;
|
||||
}
|
||||
|
||||
static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
enum pipe pipe = crtc->pipe;
|
||||
|
||||
/* Wait for the display line to settle/start moving */
|
||||
if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
|
||||
DRM_ERROR("pipe %c scanline %s wait timed out\n",
|
||||
pipe_name(pipe), onoff(state));
|
||||
}
|
||||
|
||||
static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
|
||||
{
|
||||
wait_for_pipe_scanline_moving(crtc, false);
|
||||
}
|
||||
|
||||
static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
|
||||
{
|
||||
wait_for_pipe_scanline_moving(crtc, true);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1038,7 +1060,6 @@ static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
|
||||
enum pipe pipe = crtc->pipe;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 4) {
|
||||
i915_reg_t reg = PIPECONF(cpu_transcoder);
|
||||
@ -1049,9 +1070,7 @@ static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
|
||||
100))
|
||||
WARN(1, "pipe_off wait timed out\n");
|
||||
} else {
|
||||
/* Wait for the display line to settle */
|
||||
if (wait_for(pipe_dsl_stopped(dev_priv, pipe), 100))
|
||||
WARN(1, "pipe_off wait timed out\n");
|
||||
intel_wait_for_pipe_scanline_stopped(crtc);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1944,15 +1963,14 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
|
||||
POSTING_READ(reg);
|
||||
|
||||
/*
|
||||
* Until the pipe starts DSL will read as 0, which would cause
|
||||
* an apparent vblank timestamp jump, which messes up also the
|
||||
* frame count when it's derived from the timestamps. So let's
|
||||
* wait for the pipe to start properly before we call
|
||||
* drm_crtc_vblank_on()
|
||||
* Until the pipe starts PIPEDSL reads will return a stale value,
|
||||
* which causes an apparent vblank timestamp jump when PIPEDSL
|
||||
* resets to its proper value. That also messes up the frame count
|
||||
* when it's derived from the timestamps. So let's wait for the
|
||||
* pipe to start properly before we call drm_crtc_vblank_on()
|
||||
*/
|
||||
if (dev->max_vblank_count == 0 &&
|
||||
wait_for(intel_get_crtc_scanline(crtc) != crtc->scanline_offset, 50))
|
||||
DRM_ERROR("pipe %c didn't start\n", pipe_name(pipe));
|
||||
if (dev->max_vblank_count == 0)
|
||||
intel_wait_for_pipe_scanline_moving(crtc);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -14682,6 +14700,8 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
|
||||
|
||||
void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
|
||||
{
|
||||
struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
|
||||
|
||||
DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n",
|
||||
pipe_name(pipe));
|
||||
|
||||
@ -14691,8 +14711,7 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
|
||||
I915_WRITE(PIPECONF(pipe), 0);
|
||||
POSTING_READ(PIPECONF(pipe));
|
||||
|
||||
if (wait_for(pipe_dsl_stopped(dev_priv, pipe), 100))
|
||||
DRM_ERROR("pipe %c off wait timed out\n", pipe_name(pipe));
|
||||
intel_wait_for_pipe_scanline_stopped(crtc);
|
||||
|
||||
I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
|
||||
POSTING_READ(DPLL(pipe));
|
||||
|
@ -640,22 +640,28 @@ void vmbus_close(struct vmbus_channel *channel)
|
||||
*/
|
||||
return;
|
||||
}
|
||||
mutex_lock(&vmbus_connection.channel_mutex);
|
||||
/*
|
||||
* Close all the sub-channels first and then close the
|
||||
* primary channel.
|
||||
*/
|
||||
list_for_each_safe(cur, tmp, &channel->sc_list) {
|
||||
cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
|
||||
vmbus_close_internal(cur_channel);
|
||||
if (cur_channel->rescind) {
|
||||
wait_for_completion(&cur_channel->rescind_event);
|
||||
mutex_lock(&vmbus_connection.channel_mutex);
|
||||
vmbus_close_internal(cur_channel);
|
||||
hv_process_channel_removal(
|
||||
cur_channel->offermsg.child_relid);
|
||||
} else {
|
||||
mutex_lock(&vmbus_connection.channel_mutex);
|
||||
vmbus_close_internal(cur_channel);
|
||||
}
|
||||
mutex_unlock(&vmbus_connection.channel_mutex);
|
||||
}
|
||||
/*
|
||||
* Now close the primary.
|
||||
*/
|
||||
mutex_lock(&vmbus_connection.channel_mutex);
|
||||
vmbus_close_internal(channel);
|
||||
mutex_unlock(&vmbus_connection.channel_mutex);
|
||||
}
|
||||
|
@ -333,6 +333,7 @@ static struct vmbus_channel *alloc_channel(void)
|
||||
return NULL;
|
||||
|
||||
spin_lock_init(&channel->lock);
|
||||
init_completion(&channel->rescind_event);
|
||||
|
||||
INIT_LIST_HEAD(&channel->sc_list);
|
||||
INIT_LIST_HEAD(&channel->percpu_list);
|
||||
@ -883,6 +884,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
|
||||
/*
|
||||
* Now wait for offer handling to complete.
|
||||
*/
|
||||
vmbus_rescind_cleanup(channel);
|
||||
while (READ_ONCE(channel->probe_done) == false) {
|
||||
/*
|
||||
* We wait here until any channel offer is currently
|
||||
@ -898,7 +900,6 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
|
||||
if (channel->device_obj) {
|
||||
if (channel->chn_rescind_callback) {
|
||||
channel->chn_rescind_callback(channel);
|
||||
vmbus_rescind_cleanup(channel);
|
||||
return;
|
||||
}
|
||||
/*
|
||||
@ -907,7 +908,6 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
|
||||
*/
|
||||
dev = get_device(&channel->device_obj->device);
|
||||
if (dev) {
|
||||
vmbus_rescind_cleanup(channel);
|
||||
vmbus_device_unregister(channel->device_obj);
|
||||
put_device(dev);
|
||||
}
|
||||
@ -921,13 +921,14 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
|
||||
* 2. Then close the primary channel.
|
||||
*/
|
||||
mutex_lock(&vmbus_connection.channel_mutex);
|
||||
vmbus_rescind_cleanup(channel);
|
||||
if (channel->state == CHANNEL_OPEN_STATE) {
|
||||
/*
|
||||
* The channel is currently not open;
|
||||
* it is safe for us to cleanup the channel.
|
||||
*/
|
||||
hv_process_channel_removal(rescind->child_relid);
|
||||
} else {
|
||||
complete(&channel->rescind_event);
|
||||
}
|
||||
mutex_unlock(&vmbus_connection.channel_mutex);
|
||||
}
|
||||
|
@ -282,7 +282,7 @@ int ide_cd_expiry(ide_drive_t *drive)
|
||||
struct request *rq = drive->hwif->rq;
|
||||
unsigned long wait = 0;
|
||||
|
||||
debug_log("%s: rq->cmd[0]: 0x%x\n", __func__, rq->cmd[0]);
|
||||
debug_log("%s: scsi_req(rq)->cmd[0]: 0x%x\n", __func__, scsi_req(rq)->cmd[0]);
|
||||
|
||||
/*
|
||||
* Some commands are *slow* and normally take a long time to complete.
|
||||
@ -463,7 +463,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
|
||||
return ide_do_reset(drive);
|
||||
}
|
||||
|
||||
debug_log("[cmd %x]: check condition\n", rq->cmd[0]);
|
||||
debug_log("[cmd %x]: check condition\n", scsi_req(rq)->cmd[0]);
|
||||
|
||||
/* Retry operation */
|
||||
ide_retry_pc(drive);
|
||||
@ -531,7 +531,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive)
|
||||
ide_pad_transfer(drive, write, bcount);
|
||||
|
||||
debug_log("[cmd %x] transferred %d bytes, padded %d bytes, resid: %u\n",
|
||||
rq->cmd[0], done, bcount, scsi_req(rq)->resid_len);
|
||||
scsi_req(rq)->cmd[0], done, bcount, scsi_req(rq)->resid_len);
|
||||
|
||||
/* And set the interrupt handler again */
|
||||
ide_set_handler(drive, ide_pc_intr, timeout);
|
||||
|
@ -1012,7 +1012,7 @@ static int cpcap_adc_probe(struct platform_device *pdev)
|
||||
platform_set_drvdata(pdev, indio_dev);
|
||||
|
||||
ddata->irq = platform_get_irq_byname(pdev, "adcdone");
|
||||
if (!ddata->irq)
|
||||
if (ddata->irq < 0)
|
||||
return -ENODEV;
|
||||
|
||||
error = devm_request_threaded_irq(&pdev->dev, ddata->irq, NULL,
|
||||
|
@ -221,8 +221,10 @@ enum meson_sar_adc_chan7_mux_sel {
|
||||
|
||||
struct meson_sar_adc_data {
|
||||
bool has_bl30_integration;
|
||||
u32 bandgap_reg;
|
||||
unsigned int resolution;
|
||||
const char *name;
|
||||
const struct regmap_config *regmap_config;
|
||||
};
|
||||
|
||||
struct meson_sar_adc_priv {
|
||||
@ -242,13 +244,20 @@ struct meson_sar_adc_priv {
|
||||
int calibscale;
|
||||
};
|
||||
|
||||
static const struct regmap_config meson_sar_adc_regmap_config = {
|
||||
static const struct regmap_config meson_sar_adc_regmap_config_gxbb = {
|
||||
.reg_bits = 8,
|
||||
.val_bits = 32,
|
||||
.reg_stride = 4,
|
||||
.max_register = MESON_SAR_ADC_REG13,
|
||||
};
|
||||
|
||||
static const struct regmap_config meson_sar_adc_regmap_config_meson8 = {
|
||||
.reg_bits = 8,
|
||||
.val_bits = 32,
|
||||
.reg_stride = 4,
|
||||
.max_register = MESON_SAR_ADC_DELTA_10,
|
||||
};
|
||||
|
||||
static unsigned int meson_sar_adc_get_fifo_count(struct iio_dev *indio_dev)
|
||||
{
|
||||
struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
|
||||
@ -600,7 +609,7 @@ static int meson_sar_adc_clk_init(struct iio_dev *indio_dev,
|
||||
init.num_parents = 1;
|
||||
|
||||
priv->clk_gate.reg = base + MESON_SAR_ADC_REG3;
|
||||
priv->clk_gate.bit_idx = fls(MESON_SAR_ADC_REG3_CLK_EN);
|
||||
priv->clk_gate.bit_idx = __ffs(MESON_SAR_ADC_REG3_CLK_EN);
|
||||
priv->clk_gate.hw.init = &init;
|
||||
|
||||
priv->adc_clk = devm_clk_register(&indio_dev->dev, &priv->clk_gate.hw);
|
||||
@ -685,6 +694,20 @@ static int meson_sar_adc_init(struct iio_dev *indio_dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void meson_sar_adc_set_bandgap(struct iio_dev *indio_dev, bool on_off)
|
||||
{
|
||||
struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
|
||||
u32 enable_mask;
|
||||
|
||||
if (priv->data->bandgap_reg == MESON_SAR_ADC_REG11)
|
||||
enable_mask = MESON_SAR_ADC_REG11_BANDGAP_EN;
|
||||
else
|
||||
enable_mask = MESON_SAR_ADC_DELTA_10_TS_VBG_EN;
|
||||
|
||||
regmap_update_bits(priv->regmap, priv->data->bandgap_reg, enable_mask,
|
||||
on_off ? enable_mask : 0);
|
||||
}
|
||||
|
||||
static int meson_sar_adc_hw_enable(struct iio_dev *indio_dev)
|
||||
{
|
||||
struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
|
||||
@ -717,9 +740,9 @@ static int meson_sar_adc_hw_enable(struct iio_dev *indio_dev)
|
||||
regval = FIELD_PREP(MESON_SAR_ADC_REG0_FIFO_CNT_IRQ_MASK, 1);
|
||||
regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG0,
|
||||
MESON_SAR_ADC_REG0_FIFO_CNT_IRQ_MASK, regval);
|
||||
regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG11,
|
||||
MESON_SAR_ADC_REG11_BANDGAP_EN,
|
||||
MESON_SAR_ADC_REG11_BANDGAP_EN);
|
||||
|
||||
meson_sar_adc_set_bandgap(indio_dev, true);
|
||||
|
||||
regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG3,
|
||||
MESON_SAR_ADC_REG3_ADC_EN,
|
||||
MESON_SAR_ADC_REG3_ADC_EN);
|
||||
@ -739,8 +762,7 @@ static int meson_sar_adc_hw_enable(struct iio_dev *indio_dev)
|
||||
err_adc_clk:
|
||||
regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG3,
|
||||
MESON_SAR_ADC_REG3_ADC_EN, 0);
|
||||
regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG11,
|
||||
MESON_SAR_ADC_REG11_BANDGAP_EN, 0);
|
||||
meson_sar_adc_set_bandgap(indio_dev, false);
|
||||
clk_disable_unprepare(priv->sana_clk);
|
||||
err_sana_clk:
|
||||
clk_disable_unprepare(priv->core_clk);
|
||||
@ -765,8 +787,8 @@ static int meson_sar_adc_hw_disable(struct iio_dev *indio_dev)
|
||||
|
||||
regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG3,
|
||||
MESON_SAR_ADC_REG3_ADC_EN, 0);
|
||||
regmap_update_bits(priv->regmap, MESON_SAR_ADC_REG11,
|
||||
MESON_SAR_ADC_REG11_BANDGAP_EN, 0);
|
||||
|
||||
meson_sar_adc_set_bandgap(indio_dev, false);
|
||||
|
||||
clk_disable_unprepare(priv->sana_clk);
|
||||
clk_disable_unprepare(priv->core_clk);
|
||||
@ -845,30 +867,40 @@ static const struct iio_info meson_sar_adc_iio_info = {
|
||||
|
||||
static const struct meson_sar_adc_data meson_sar_adc_meson8_data = {
|
||||
.has_bl30_integration = false,
|
||||
.bandgap_reg = MESON_SAR_ADC_DELTA_10,
|
||||
.regmap_config = &meson_sar_adc_regmap_config_meson8,
|
||||
.resolution = 10,
|
||||
.name = "meson-meson8-saradc",
|
||||
};
|
||||
|
||||
static const struct meson_sar_adc_data meson_sar_adc_meson8b_data = {
|
||||
.has_bl30_integration = false,
|
||||
.bandgap_reg = MESON_SAR_ADC_DELTA_10,
|
||||
.regmap_config = &meson_sar_adc_regmap_config_meson8,
|
||||
.resolution = 10,
|
||||
.name = "meson-meson8b-saradc",
|
||||
};
|
||||
|
||||
static const struct meson_sar_adc_data meson_sar_adc_gxbb_data = {
|
||||
.has_bl30_integration = true,
|
||||
.bandgap_reg = MESON_SAR_ADC_REG11,
|
||||
.regmap_config = &meson_sar_adc_regmap_config_gxbb,
|
||||
.resolution = 10,
|
||||
.name = "meson-gxbb-saradc",
|
||||
};
|
||||
|
||||
static const struct meson_sar_adc_data meson_sar_adc_gxl_data = {
|
||||
.has_bl30_integration = true,
|
||||
.bandgap_reg = MESON_SAR_ADC_REG11,
|
||||
.regmap_config = &meson_sar_adc_regmap_config_gxbb,
|
||||
.resolution = 12,
|
||||
.name = "meson-gxl-saradc",
|
||||
};
|
||||
|
||||
static const struct meson_sar_adc_data meson_sar_adc_gxm_data = {
|
||||
.has_bl30_integration = true,
|
||||
.bandgap_reg = MESON_SAR_ADC_REG11,
|
||||
.regmap_config = &meson_sar_adc_regmap_config_gxbb,
|
||||
.resolution = 12,
|
||||
.name = "meson-gxm-saradc",
|
||||
};
|
||||
@ -946,7 +978,7 @@ static int meson_sar_adc_probe(struct platform_device *pdev)
|
||||
return ret;
|
||||
|
||||
priv->regmap = devm_regmap_init_mmio(&pdev->dev, base,
|
||||
&meson_sar_adc_regmap_config);
|
||||
priv->data->regmap_config);
|
||||
if (IS_ERR(priv->regmap))
|
||||
return PTR_ERR(priv->regmap);
|
||||
|
||||
|
@ -371,7 +371,7 @@ static int max30102_read_raw(struct iio_dev *indio_dev,
|
||||
mutex_unlock(&indio_dev->mlock);
|
||||
break;
|
||||
case IIO_CHAN_INFO_SCALE:
|
||||
*val = 1; /* 0.0625 */
|
||||
*val = 1000; /* 62.5 */
|
||||
*val2 = 16;
|
||||
ret = IIO_VAL_FRACTIONAL;
|
||||
break;
|
||||
|
@ -417,8 +417,17 @@ void ib_close_shared_qp_security(struct ib_qp_security *sec)
|
||||
|
||||
int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev)
|
||||
{
|
||||
u8 i = rdma_start_port(dev);
|
||||
bool is_ib = false;
|
||||
int ret;
|
||||
|
||||
while (i <= rdma_end_port(dev) && !is_ib)
|
||||
is_ib = rdma_protocol_ib(dev, i++);
|
||||
|
||||
/* If this isn't an IB device don't create the security context */
|
||||
if (!is_ib)
|
||||
return 0;
|
||||
|
||||
qp->qp_sec = kzalloc(sizeof(*qp->qp_sec), GFP_KERNEL);
|
||||
if (!qp->qp_sec)
|
||||
return -ENOMEM;
|
||||
@ -441,6 +450,10 @@ EXPORT_SYMBOL(ib_create_qp_security);
|
||||
|
||||
void ib_destroy_qp_security_begin(struct ib_qp_security *sec)
|
||||
{
|
||||
/* Return if not IB */
|
||||
if (!sec)
|
||||
return;
|
||||
|
||||
mutex_lock(&sec->mutex);
|
||||
|
||||
/* Remove the QP from the lists so it won't get added to
|
||||
@ -470,6 +483,10 @@ void ib_destroy_qp_security_abort(struct ib_qp_security *sec)
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
/* Return if not IB */
|
||||
if (!sec)
|
||||
return;
|
||||
|
||||
/* If a concurrent cache update is in progress this
|
||||
* QP security could be marked for an error state
|
||||
* transition. Wait for this to complete.
|
||||
@ -505,6 +522,10 @@ void ib_destroy_qp_security_end(struct ib_qp_security *sec)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* Return if not IB */
|
||||
if (!sec)
|
||||
return;
|
||||
|
||||
/* If a concurrent cache update is occurring we must
|
||||
* wait until this QP security structure is processed
|
||||
* in the QP to error flow before destroying it because
|
||||
@ -557,7 +578,7 @@ int ib_security_modify_qp(struct ib_qp *qp,
|
||||
{
|
||||
int ret = 0;
|
||||
struct ib_ports_pkeys *tmp_pps;
|
||||
struct ib_ports_pkeys *new_pps;
|
||||
struct ib_ports_pkeys *new_pps = NULL;
|
||||
struct ib_qp *real_qp = qp->real_qp;
|
||||
bool special_qp = (real_qp->qp_type == IB_QPT_SMI ||
|
||||
real_qp->qp_type == IB_QPT_GSI ||
|
||||
@ -565,18 +586,27 @@ int ib_security_modify_qp(struct ib_qp *qp,
|
||||
bool pps_change = ((qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) ||
|
||||
(qp_attr_mask & IB_QP_ALT_PATH));
|
||||
|
||||
WARN_ONCE((qp_attr_mask & IB_QP_PORT &&
|
||||
rdma_protocol_ib(real_qp->device, qp_attr->port_num) &&
|
||||
!real_qp->qp_sec),
|
||||
"%s: QP security is not initialized for IB QP: %d\n",
|
||||
__func__, real_qp->qp_num);
|
||||
|
||||
/* The port/pkey settings are maintained only for the real QP. Open
|
||||
* handles on the real QP will be in the shared_qp_list. When
|
||||
* enforcing security on the real QP all the shared QPs will be
|
||||
* checked as well.
|
||||
*/
|
||||
|
||||
if (pps_change && !special_qp) {
|
||||
if (pps_change && !special_qp && real_qp->qp_sec) {
|
||||
mutex_lock(&real_qp->qp_sec->mutex);
|
||||
new_pps = get_new_pps(real_qp,
|
||||
qp_attr,
|
||||
qp_attr_mask);
|
||||
|
||||
if (!new_pps) {
|
||||
mutex_unlock(&real_qp->qp_sec->mutex);
|
||||
return -ENOMEM;
|
||||
}
|
||||
/* Add this QP to the lists for the new port
|
||||
* and pkey settings before checking for permission
|
||||
* in case there is a concurrent cache update
|
||||
@ -600,7 +630,7 @@ int ib_security_modify_qp(struct ib_qp *qp,
|
||||
qp_attr_mask,
|
||||
udata);
|
||||
|
||||
if (pps_change && !special_qp) {
|
||||
if (new_pps) {
|
||||
/* Clean up the lists and free the appropriate
|
||||
* ports_pkeys structure.
|
||||
*/
|
||||
@ -631,6 +661,9 @@ int ib_security_pkey_access(struct ib_device *dev,
|
||||
u16 pkey;
|
||||
int ret;
|
||||
|
||||
if (!rdma_protocol_ib(dev, port_num))
|
||||
return 0;
|
||||
|
||||
ret = ib_get_cached_pkey(dev, port_num, pkey_index, &pkey);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -665,6 +698,9 @@ int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!rdma_protocol_ib(agent->device, agent->port_num))
|
||||
return 0;
|
||||
|
||||
ret = security_ib_alloc_security(&agent->security);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -690,6 +726,9 @@ int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
|
||||
|
||||
void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent)
|
||||
{
|
||||
if (!rdma_protocol_ib(agent->device, agent->port_num))
|
||||
return;
|
||||
|
||||
security_ib_free_security(agent->security);
|
||||
if (agent->lsm_nb_reg)
|
||||
unregister_lsm_notifier(&agent->lsm_nb);
|
||||
@ -697,20 +736,16 @@ void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent)
|
||||
|
||||
int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index)
|
||||
{
|
||||
int ret;
|
||||
if (!rdma_protocol_ib(map->agent.device, map->agent.port_num))
|
||||
return 0;
|
||||
|
||||
if (map->agent.qp->qp_type == IB_QPT_SMI && !map->agent.smp_allowed)
|
||||
return -EACCES;
|
||||
|
||||
ret = ib_security_pkey_access(map->agent.device,
|
||||
map->agent.port_num,
|
||||
pkey_index,
|
||||
map->agent.security);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
return ib_security_pkey_access(map->agent.device,
|
||||
map->agent.port_num,
|
||||
pkey_index,
|
||||
map->agent.security);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SECURITY_INFINIBAND */
|
||||
|
@ -394,6 +394,7 @@ int bnxt_re_add_gid(struct ib_device *ibdev, u8 port_num,
|
||||
ctx->idx = tbl_idx;
|
||||
ctx->refcnt = 1;
|
||||
ctx_tbl[tbl_idx] = ctx;
|
||||
*context = ctx;
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
@ -675,8 +675,8 @@ struct fw_ri_fr_nsmr_tpte_wr {
|
||||
__u16 wrid;
|
||||
__u8 r1[3];
|
||||
__u8 len16;
|
||||
__u32 r2;
|
||||
__u32 stag;
|
||||
__be32 r2;
|
||||
__be32 stag;
|
||||
struct fw_ri_tpte tpte;
|
||||
__u64 pbl[2];
|
||||
};
|
||||
|
@ -2216,7 +2216,7 @@ static int __mlx4_ib_modify_qp(void *src, enum mlx4_ib_source_type src_type,
|
||||
context->mtu_msgmax = (IB_MTU_4096 << 5) |
|
||||
ilog2(dev->dev->caps.max_gso_sz);
|
||||
else
|
||||
context->mtu_msgmax = (IB_MTU_4096 << 5) | 12;
|
||||
context->mtu_msgmax = (IB_MTU_4096 << 5) | 13;
|
||||
} else if (attr_mask & IB_QP_PATH_MTU) {
|
||||
if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) {
|
||||
pr_err("path MTU (%u) is invalid\n",
|
||||
|
@ -3097,6 +3097,8 @@ static int create_umr_res(struct mlx5_ib_dev *dev)
|
||||
qp->real_qp = qp;
|
||||
qp->uobject = NULL;
|
||||
qp->qp_type = MLX5_IB_QPT_REG_UMR;
|
||||
qp->send_cq = init_attr->send_cq;
|
||||
qp->recv_cq = init_attr->recv_cq;
|
||||
|
||||
attr->qp_state = IB_QPS_INIT;
|
||||
attr->port_num = 1;
|
||||
|
@ -2254,10 +2254,12 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
|
||||
uint64_t tmp;
|
||||
|
||||
if (!sg_res) {
|
||||
unsigned int pgoff = sg->offset & ~PAGE_MASK;
|
||||
|
||||
sg_res = aligned_nrpages(sg->offset, sg->length);
|
||||
sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
|
||||
sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + pgoff;
|
||||
sg->dma_length = sg->length;
|
||||
pteval = page_to_phys(sg_page(sg)) | prot;
|
||||
pteval = (sg_phys(sg) - pgoff) | prot;
|
||||
phys_pfn = pteval >> VTD_PAGE_SHIFT;
|
||||
}
|
||||
|
||||
@ -3790,7 +3792,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
|
||||
|
||||
for_each_sg(sglist, sg, nelems, i) {
|
||||
BUG_ON(!sg_page(sg));
|
||||
sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
|
||||
sg->dma_address = sg_phys(sg);
|
||||
sg->dma_length = sg->length;
|
||||
}
|
||||
return nelems;
|
||||
|
@ -238,7 +238,7 @@ static int __init combiner_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct combiner *combiner;
|
||||
size_t alloc_sz;
|
||||
u32 nregs;
|
||||
int nregs;
|
||||
int err;
|
||||
|
||||
nregs = count_registers(pdev);
|
||||
|
@ -30,6 +30,7 @@
|
||||
#define MBOX_HEXDUMP_MAX_LEN (MBOX_HEXDUMP_LINE_LEN * \
|
||||
(MBOX_MAX_MSG_LEN / MBOX_BYTES_PER_LINE))
|
||||
|
||||
static bool mbox_data_ready;
|
||||
static struct dentry *root_debugfs_dir;
|
||||
|
||||
struct mbox_test_device {
|
||||
@ -152,16 +153,14 @@ out:
|
||||
|
||||
static bool mbox_test_message_data_ready(struct mbox_test_device *tdev)
|
||||
{
|
||||
unsigned char data;
|
||||
bool data_ready;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&tdev->lock, flags);
|
||||
data = tdev->rx_buffer[0];
|
||||
data_ready = mbox_data_ready;
|
||||
spin_unlock_irqrestore(&tdev->lock, flags);
|
||||
|
||||
if (data != '\0')
|
||||
return true;
|
||||
return false;
|
||||
return data_ready;
|
||||
}
|
||||
|
||||
static ssize_t mbox_test_message_read(struct file *filp, char __user *userbuf,
|
||||
@ -223,6 +222,7 @@ static ssize_t mbox_test_message_read(struct file *filp, char __user *userbuf,
|
||||
*(touser + l) = '\0';
|
||||
|
||||
memset(tdev->rx_buffer, 0, MBOX_MAX_MSG_LEN);
|
||||
mbox_data_ready = false;
|
||||
|
||||
spin_unlock_irqrestore(&tdev->lock, flags);
|
||||
|
||||
@ -292,6 +292,7 @@ static void mbox_test_receive_message(struct mbox_client *client, void *message)
|
||||
message, MBOX_MAX_MSG_LEN);
|
||||
memcpy(tdev->rx_buffer, message, MBOX_MAX_MSG_LEN);
|
||||
}
|
||||
mbox_data_ready = true;
|
||||
spin_unlock_irqrestore(&tdev->lock, flags);
|
||||
|
||||
wake_up_interruptible(&tdev->waitq);
|
||||
|
@ -2158,6 +2158,7 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
|
||||
for (k = 0; k < page; k++) {
|
||||
kfree(new_bp[k].map);
|
||||
}
|
||||
kfree(new_bp);
|
||||
|
||||
/* restore some fields from old_counts */
|
||||
bitmap->counts.bp = old_counts.bp;
|
||||
@ -2208,6 +2209,14 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
|
||||
block += old_blocks;
|
||||
}
|
||||
|
||||
if (bitmap->counts.bp != old_counts.bp) {
|
||||
unsigned long k;
|
||||
for (k = 0; k < old_counts.pages; k++)
|
||||
if (!old_counts.bp[k].hijacked)
|
||||
kfree(old_counts.bp[k].map);
|
||||
kfree(old_counts.bp);
|
||||
}
|
||||
|
||||
if (!init) {
|
||||
int i;
|
||||
while (block < (chunks << chunkshift)) {
|
||||
|
@ -2143,13 +2143,6 @@ static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
|
||||
struct dm_raid_superblock *refsb;
|
||||
uint64_t events_sb, events_refsb;
|
||||
|
||||
rdev->sb_start = 0;
|
||||
rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev);
|
||||
if (rdev->sb_size < sizeof(*sb) || rdev->sb_size > PAGE_SIZE) {
|
||||
DMERR("superblock size of a logical block is no longer valid");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
r = read_disk_sb(rdev, rdev->sb_size, false);
|
||||
if (r)
|
||||
return r;
|
||||
@ -2494,6 +2487,17 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
|
||||
if (test_bit(Journal, &rdev->flags))
|
||||
continue;
|
||||
|
||||
if (!rdev->meta_bdev)
|
||||
continue;
|
||||
|
||||
/* Set superblock offset/size for metadata device. */
|
||||
rdev->sb_start = 0;
|
||||
rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev);
|
||||
if (rdev->sb_size < sizeof(struct dm_raid_superblock) || rdev->sb_size > PAGE_SIZE) {
|
||||
DMERR("superblock size of a logical block is no longer valid");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Skipping super_load due to CTR_FLAG_SYNC will cause
|
||||
* the array to undergo initialization again as
|
||||
@ -2506,9 +2510,6 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
|
||||
if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags))
|
||||
continue;
|
||||
|
||||
if (!rdev->meta_bdev)
|
||||
continue;
|
||||
|
||||
r = super_load(rdev, freshest);
|
||||
|
||||
switch (r) {
|
||||
|
@ -2571,31 +2571,22 @@ static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page)
|
||||
int r5c_journal_mode_set(struct mddev *mddev, int mode)
|
||||
{
|
||||
struct r5conf *conf;
|
||||
int err;
|
||||
|
||||
if (mode < R5C_JOURNAL_MODE_WRITE_THROUGH ||
|
||||
mode > R5C_JOURNAL_MODE_WRITE_BACK)
|
||||
return -EINVAL;
|
||||
|
||||
err = mddev_lock(mddev);
|
||||
if (err)
|
||||
return err;
|
||||
conf = mddev->private;
|
||||
if (!conf || !conf->log) {
|
||||
mddev_unlock(mddev);
|
||||
if (!conf || !conf->log)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (raid5_calc_degraded(conf) > 0 &&
|
||||
mode == R5C_JOURNAL_MODE_WRITE_BACK) {
|
||||
mddev_unlock(mddev);
|
||||
mode == R5C_JOURNAL_MODE_WRITE_BACK)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mddev_suspend(mddev);
|
||||
conf->log->r5c_journal_mode = mode;
|
||||
mddev_resume(mddev);
|
||||
mddev_unlock(mddev);
|
||||
|
||||
pr_debug("md/raid:%s: setting r5c cache mode to %d: %s\n",
|
||||
mdname(mddev), mode, r5c_journal_mode_str[mode]);
|
||||
@ -2608,6 +2599,7 @@ static ssize_t r5c_journal_mode_store(struct mddev *mddev,
|
||||
{
|
||||
int mode = ARRAY_SIZE(r5c_journal_mode_str);
|
||||
size_t len = length;
|
||||
int ret;
|
||||
|
||||
if (len < 2)
|
||||
return -EINVAL;
|
||||
@ -2619,8 +2611,12 @@ static ssize_t r5c_journal_mode_store(struct mddev *mddev,
|
||||
if (strlen(r5c_journal_mode_str[mode]) == len &&
|
||||
!strncmp(page, r5c_journal_mode_str[mode], len))
|
||||
break;
|
||||
|
||||
return r5c_journal_mode_set(mddev, mode) ?: length;
|
||||
ret = mddev_lock(mddev);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = r5c_journal_mode_set(mddev, mode);
|
||||
mddev_unlock(mddev);
|
||||
return ret ?: length;
|
||||
}
|
||||
|
||||
struct md_sysfs_entry
|
||||
|
@ -145,15 +145,13 @@ static void __dvb_frontend_free(struct dvb_frontend *fe)
|
||||
{
|
||||
struct dvb_frontend_private *fepriv = fe->frontend_priv;
|
||||
|
||||
if (!fepriv)
|
||||
return;
|
||||
|
||||
dvb_free_device(fepriv->dvbdev);
|
||||
if (fepriv)
|
||||
dvb_free_device(fepriv->dvbdev);
|
||||
|
||||
dvb_frontend_invoke_release(fe, fe->ops.release);
|
||||
|
||||
kfree(fepriv);
|
||||
fe->frontend_priv = NULL;
|
||||
if (fepriv)
|
||||
kfree(fepriv);
|
||||
}
|
||||
|
||||
static void dvb_frontend_free(struct kref *ref)
|
||||
|
@ -38,41 +38,41 @@ static const struct {
|
||||
[RC_PROTO_UNKNOWN] = { .name = "unknown", .repeat_period = 250 },
|
||||
[RC_PROTO_OTHER] = { .name = "other", .repeat_period = 250 },
|
||||
[RC_PROTO_RC5] = { .name = "rc-5",
|
||||
.scancode_bits = 0x1f7f, .repeat_period = 164 },
|
||||
.scancode_bits = 0x1f7f, .repeat_period = 250 },
|
||||
[RC_PROTO_RC5X_20] = { .name = "rc-5x-20",
|
||||
.scancode_bits = 0x1f7f3f, .repeat_period = 164 },
|
||||
.scancode_bits = 0x1f7f3f, .repeat_period = 250 },
|
||||
[RC_PROTO_RC5_SZ] = { .name = "rc-5-sz",
|
||||
.scancode_bits = 0x2fff, .repeat_period = 164 },
|
||||
.scancode_bits = 0x2fff, .repeat_period = 250 },
|
||||
[RC_PROTO_JVC] = { .name = "jvc",
|
||||
.scancode_bits = 0xffff, .repeat_period = 250 },
|
||||
[RC_PROTO_SONY12] = { .name = "sony-12",
|
||||
.scancode_bits = 0x1f007f, .repeat_period = 100 },
|
||||
.scancode_bits = 0x1f007f, .repeat_period = 250 },
|
||||
[RC_PROTO_SONY15] = { .name = "sony-15",
|
||||
.scancode_bits = 0xff007f, .repeat_period = 100 },
|
||||
.scancode_bits = 0xff007f, .repeat_period = 250 },
|
||||
[RC_PROTO_SONY20] = { .name = "sony-20",
|
||||
.scancode_bits = 0x1fff7f, .repeat_period = 100 },
|
||||
.scancode_bits = 0x1fff7f, .repeat_period = 250 },
|
||||
[RC_PROTO_NEC] = { .name = "nec",
|
||||
.scancode_bits = 0xffff, .repeat_period = 160 },
|
||||
.scancode_bits = 0xffff, .repeat_period = 250 },
|
||||
[RC_PROTO_NECX] = { .name = "nec-x",
|
||||
.scancode_bits = 0xffffff, .repeat_period = 160 },
|
||||
.scancode_bits = 0xffffff, .repeat_period = 250 },
|
||||
[RC_PROTO_NEC32] = { .name = "nec-32",
|
||||
.scancode_bits = 0xffffffff, .repeat_period = 160 },
|
||||
.scancode_bits = 0xffffffff, .repeat_period = 250 },
|
||||
[RC_PROTO_SANYO] = { .name = "sanyo",
|
||||
.scancode_bits = 0x1fffff, .repeat_period = 250 },
|
||||
[RC_PROTO_MCIR2_KBD] = { .name = "mcir2-kbd",
|
||||
.scancode_bits = 0xffff, .repeat_period = 150 },
|
||||
.scancode_bits = 0xffff, .repeat_period = 250 },
|
||||
[RC_PROTO_MCIR2_MSE] = { .name = "mcir2-mse",
|
||||
.scancode_bits = 0x1fffff, .repeat_period = 150 },
|
||||
.scancode_bits = 0x1fffff, .repeat_period = 250 },
|
||||
[RC_PROTO_RC6_0] = { .name = "rc-6-0",
|
||||
.scancode_bits = 0xffff, .repeat_period = 164 },
|
||||
.scancode_bits = 0xffff, .repeat_period = 250 },
|
||||
[RC_PROTO_RC6_6A_20] = { .name = "rc-6-6a-20",
|
||||
.scancode_bits = 0xfffff, .repeat_period = 164 },
|
||||
.scancode_bits = 0xfffff, .repeat_period = 250 },
|
||||
[RC_PROTO_RC6_6A_24] = { .name = "rc-6-6a-24",
|
||||
.scancode_bits = 0xffffff, .repeat_period = 164 },
|
||||
.scancode_bits = 0xffffff, .repeat_period = 250 },
|
||||
[RC_PROTO_RC6_6A_32] = { .name = "rc-6-6a-32",
|
||||
.scancode_bits = 0xffffffff, .repeat_period = 164 },
|
||||
.scancode_bits = 0xffffffff, .repeat_period = 250 },
|
||||
[RC_PROTO_RC6_MCE] = { .name = "rc-6-mce",
|
||||
.scancode_bits = 0xffff7fff, .repeat_period = 164 },
|
||||
.scancode_bits = 0xffff7fff, .repeat_period = 250 },
|
||||
[RC_PROTO_SHARP] = { .name = "sharp",
|
||||
.scancode_bits = 0x1fff, .repeat_period = 250 },
|
||||
[RC_PROTO_XMP] = { .name = "xmp", .repeat_period = 250 },
|
||||
|
@ -57,7 +57,7 @@ static void add_read_queue(int flag, unsigned long val);
|
||||
static irqreturn_t sir_interrupt(int irq, void *dev_id);
|
||||
static void send_space(unsigned long len);
|
||||
static void send_pulse(unsigned long len);
|
||||
static void init_hardware(void);
|
||||
static int init_hardware(void);
|
||||
static void drop_hardware(void);
|
||||
/* Initialisation */
|
||||
|
||||
@ -263,11 +263,36 @@ static void send_pulse(unsigned long len)
|
||||
}
|
||||
}
|
||||
|
||||
static void init_hardware(void)
|
||||
static int init_hardware(void)
|
||||
{
|
||||
u8 scratch, scratch2, scratch3;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&hardware_lock, flags);
|
||||
|
||||
/*
|
||||
* This is a simple port existence test, borrowed from the autoconfig
|
||||
* function in drivers/tty/serial/8250/8250_port.c
|
||||
*/
|
||||
scratch = sinp(UART_IER);
|
||||
soutp(UART_IER, 0);
|
||||
#ifdef __i386__
|
||||
outb(0xff, 0x080);
|
||||
#endif
|
||||
scratch2 = sinp(UART_IER) & 0x0f;
|
||||
soutp(UART_IER, 0x0f);
|
||||
#ifdef __i386__
|
||||
outb(0x00, 0x080);
|
||||
#endif
|
||||
scratch3 = sinp(UART_IER) & 0x0f;
|
||||
soutp(UART_IER, scratch);
|
||||
if (scratch2 != 0 || scratch3 != 0x0f) {
|
||||
/* we fail, there's nothing here */
|
||||
spin_unlock_irqrestore(&hardware_lock, flags);
|
||||
pr_err("port existence test failed, cannot continue\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* reset UART */
|
||||
outb(0, io + UART_MCR);
|
||||
outb(0, io + UART_IER);
|
||||
@ -285,6 +310,8 @@ static void init_hardware(void)
|
||||
/* turn on UART */
|
||||
outb(UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2, io + UART_MCR);
|
||||
spin_unlock_irqrestore(&hardware_lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void drop_hardware(void)
|
||||
@ -334,14 +361,19 @@ static int sir_ir_probe(struct platform_device *dev)
|
||||
pr_err("IRQ %d already in use.\n", irq);
|
||||
return retval;
|
||||
}
|
||||
|
||||
retval = init_hardware();
|
||||
if (retval) {
|
||||
del_timer_sync(&timerlist);
|
||||
return retval;
|
||||
}
|
||||
|
||||
pr_info("I/O port 0x%.4x, IRQ %d.\n", io, irq);
|
||||
|
||||
retval = devm_rc_register_device(&sir_ir_dev->dev, rcdev);
|
||||
if (retval < 0)
|
||||
return retval;
|
||||
|
||||
init_hardware();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -223,8 +223,20 @@ EXPORT_SYMBOL(dibusb_i2c_algo);
|
||||
|
||||
int dibusb_read_eeprom_byte(struct dvb_usb_device *d, u8 offs, u8 *val)
|
||||
{
|
||||
u8 wbuf[1] = { offs };
|
||||
return dibusb_i2c_msg(d, 0x50, wbuf, 1, val, 1);
|
||||
u8 *buf;
|
||||
int rc;
|
||||
|
||||
buf = kmalloc(2, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
buf[0] = offs;
|
||||
|
||||
rc = dibusb_i2c_msg(d, 0x50, &buf[0], 1, &buf[1], 1);
|
||||
*val = buf[1];
|
||||
kfree(buf);
|
||||
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL(dibusb_read_eeprom_byte);
|
||||
|
||||
|
@ -189,7 +189,7 @@
|
||||
* MX35 FlexCAN2 03.00.00.00 no no ? no no
|
||||
* MX53 FlexCAN2 03.00.00.00 yes no no no no
|
||||
* MX6s FlexCAN3 10.00.12.00 yes yes no no yes
|
||||
* VF610 FlexCAN3 ? no yes ? yes yes?
|
||||
* VF610 FlexCAN3 ? no yes no yes yes?
|
||||
*
|
||||
* Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected.
|
||||
*/
|
||||
@ -297,7 +297,8 @@ static const struct flexcan_devtype_data fsl_imx6q_devtype_data = {
|
||||
|
||||
static const struct flexcan_devtype_data fsl_vf610_devtype_data = {
|
||||
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
|
||||
FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP,
|
||||
FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP |
|
||||
FLEXCAN_QUIRK_BROKEN_PERR_STATE,
|
||||
};
|
||||
|
||||
static const struct can_bittiming_const flexcan_bittiming_const = {
|
||||
|
@ -258,21 +258,18 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv,
|
||||
/* if this frame is an echo, */
|
||||
if ((rx_msg_flags & PUCAN_MSG_LOOPED_BACK) &&
|
||||
!(rx_msg_flags & PUCAN_MSG_SELF_RECEIVE)) {
|
||||
int n;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&priv->echo_lock, flags);
|
||||
n = can_get_echo_skb(priv->ndev, msg->client);
|
||||
can_get_echo_skb(priv->ndev, msg->client);
|
||||
spin_unlock_irqrestore(&priv->echo_lock, flags);
|
||||
|
||||
/* count bytes of the echo instead of skb */
|
||||
stats->tx_bytes += cf_len;
|
||||
stats->tx_packets++;
|
||||
|
||||
if (n) {
|
||||
/* restart tx queue only if a slot is free */
|
||||
netif_wake_queue(priv->ndev);
|
||||
}
|
||||
/* restart tx queue (a slot is free) */
|
||||
netif_wake_queue(priv->ndev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -825,7 +825,10 @@ err_release_regions:
|
||||
err_disable_pci:
|
||||
pci_disable_device(pdev);
|
||||
|
||||
return err;
|
||||
/* pci_xxx_config_word() return positive PCIBIOS_xxx error codes while
|
||||
* the probe() function must return a negative errno in case of failure
|
||||
* (err is unchanged if negative) */
|
||||
return pcibios_err_to_errno(err);
|
||||
}
|
||||
|
||||
/* free the board structure object, as well as its resources: */
|
||||
|
@ -717,7 +717,10 @@ failure_release_regions:
|
||||
failure_disable_pci:
|
||||
pci_disable_device(pdev);
|
||||
|
||||
return err;
|
||||
/* pci_xxx_config_word() return positive PCIBIOS_xxx error codes while
|
||||
* the probe() function must return a negative errno in case of failure
|
||||
* (err is unchanged if negative) */
|
||||
return pcibios_err_to_errno(err);
|
||||
}
|
||||
|
||||
static void peak_pci_remove(struct pci_dev *pdev)
|
||||
|
@ -637,6 +637,9 @@ static int ti_hecc_rx_poll(struct napi_struct *napi, int quota)
|
||||
mbx_mask = hecc_read(priv, HECC_CANMIM);
|
||||
mbx_mask |= HECC_TX_MBOX_MASK;
|
||||
hecc_write(priv, HECC_CANMIM, mbx_mask);
|
||||
} else {
|
||||
/* repoll is done only if whole budget is used */
|
||||
num_pkts = quota;
|
||||
}
|
||||
|
||||
return num_pkts;
|
||||
|
@ -288,6 +288,8 @@ static void ems_usb_read_interrupt_callback(struct urb *urb)
|
||||
|
||||
case -ECONNRESET: /* unlink */
|
||||
case -ENOENT:
|
||||
case -EPIPE:
|
||||
case -EPROTO:
|
||||
case -ESHUTDOWN:
|
||||
return;
|
||||
|
||||
|
@ -393,6 +393,8 @@ static void esd_usb2_read_bulk_callback(struct urb *urb)
|
||||
break;
|
||||
|
||||
case -ENOENT:
|
||||
case -EPIPE:
|
||||
case -EPROTO:
|
||||
case -ESHUTDOWN:
|
||||
return;
|
||||
|
||||
|
@ -609,8 +609,8 @@ static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id,
|
||||
}
|
||||
|
||||
if (pos + tmp->len > actual_len) {
|
||||
dev_err(dev->udev->dev.parent,
|
||||
"Format error\n");
|
||||
dev_err_ratelimited(dev->udev->dev.parent,
|
||||
"Format error\n");
|
||||
break;
|
||||
}
|
||||
|
||||
@ -813,6 +813,7 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv,
|
||||
if (err) {
|
||||
netdev_err(netdev, "Error transmitting URB\n");
|
||||
usb_unanchor_urb(urb);
|
||||
kfree(buf);
|
||||
usb_free_urb(urb);
|
||||
return err;
|
||||
}
|
||||
@ -1325,6 +1326,8 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
|
||||
case 0:
|
||||
break;
|
||||
case -ENOENT:
|
||||
case -EPIPE:
|
||||
case -EPROTO:
|
||||
case -ESHUTDOWN:
|
||||
return;
|
||||
default:
|
||||
@ -1333,7 +1336,7 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
|
||||
goto resubmit_urb;
|
||||
}
|
||||
|
||||
while (pos <= urb->actual_length - MSG_HEADER_LEN) {
|
||||
while (pos <= (int)(urb->actual_length - MSG_HEADER_LEN)) {
|
||||
msg = urb->transfer_buffer + pos;
|
||||
|
||||
/* The Kvaser firmware can only read and write messages that
|
||||
@ -1352,7 +1355,8 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
|
||||
}
|
||||
|
||||
if (pos + msg->len > urb->actual_length) {
|
||||
dev_err(dev->udev->dev.parent, "Format error\n");
|
||||
dev_err_ratelimited(dev->udev->dev.parent,
|
||||
"Format error\n");
|
||||
break;
|
||||
}
|
||||
|
||||
@ -1768,6 +1772,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
|
||||
spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
|
||||
|
||||
usb_unanchor_urb(urb);
|
||||
kfree(buf);
|
||||
|
||||
stats->tx_dropped++;
|
||||
|
||||
|
@ -592,6 +592,8 @@ static void mcba_usb_read_bulk_callback(struct urb *urb)
|
||||
break;
|
||||
|
||||
case -ENOENT:
|
||||
case -EPIPE:
|
||||
case -EPROTO:
|
||||
case -ESHUTDOWN:
|
||||
return;
|
||||
|
||||
|
@ -524,6 +524,8 @@ static void usb_8dev_read_bulk_callback(struct urb *urb)
|
||||
break;
|
||||
|
||||
case -ENOENT:
|
||||
case -EPIPE:
|
||||
case -EPROTO:
|
||||
case -ESHUTDOWN:
|
||||
return;
|
||||
|
||||
|
@ -1355,7 +1355,8 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
|
||||
|
||||
/* Offload checksum calculation to HW */
|
||||
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
||||
hdr->csum_l3 = 1; /* Enable IP csum calculation */
|
||||
if (ip.v4->version == 4)
|
||||
hdr->csum_l3 = 1; /* Enable IP csum calculation */
|
||||
hdr->l3_offset = skb_network_offset(skb);
|
||||
hdr->l4_offset = skb_transport_offset(skb);
|
||||
|
||||
|
@ -52,8 +52,7 @@ struct nfp_app;
|
||||
#define NFP_FLOWER_MASK_ELEMENT_RS 1
|
||||
#define NFP_FLOWER_MASK_HASH_BITS 10
|
||||
|
||||
#define NFP_FL_META_FLAG_NEW_MASK 128
|
||||
#define NFP_FL_META_FLAG_LAST_MASK 1
|
||||
#define NFP_FL_META_FLAG_MANAGE_MASK BIT(7)
|
||||
|
||||
#define NFP_FL_MASK_REUSE_TIME_NS 40000
|
||||
#define NFP_FL_MASK_ID_LOCATION 1
|
||||
|
@ -282,7 +282,7 @@ nfp_check_mask_add(struct nfp_app *app, char *mask_data, u32 mask_len,
|
||||
id = nfp_add_mask_table(app, mask_data, mask_len);
|
||||
if (id < 0)
|
||||
return false;
|
||||
*meta_flags |= NFP_FL_META_FLAG_NEW_MASK;
|
||||
*meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK;
|
||||
}
|
||||
*mask_id = id;
|
||||
|
||||
@ -299,6 +299,9 @@ nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len,
|
||||
if (!mask_entry)
|
||||
return false;
|
||||
|
||||
if (meta_flags)
|
||||
*meta_flags &= ~NFP_FL_META_FLAG_MANAGE_MASK;
|
||||
|
||||
*mask_id = mask_entry->mask_id;
|
||||
mask_entry->ref_cnt--;
|
||||
if (!mask_entry->ref_cnt) {
|
||||
@ -306,7 +309,7 @@ nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len,
|
||||
nfp_release_mask_id(app, *mask_id);
|
||||
kfree(mask_entry);
|
||||
if (meta_flags)
|
||||
*meta_flags |= NFP_FL_META_FLAG_LAST_MASK;
|
||||
*meta_flags |= NFP_FL_META_FLAG_MANAGE_MASK;
|
||||
}
|
||||
|
||||
return true;
|
||||
|
@ -297,6 +297,8 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
|
||||
netdev->netdev_ops = &nfp_repr_netdev_ops;
|
||||
netdev->ethtool_ops = &nfp_port_ethtool_ops;
|
||||
|
||||
netdev->max_mtu = pf_netdev->max_mtu;
|
||||
|
||||
SWITCHDEV_SET_OPS(netdev, &nfp_port_switchdev_ops);
|
||||
|
||||
if (nfp_app_has_tc(app)) {
|
||||
|
@ -2025,21 +2025,6 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
|
||||
{
|
||||
struct rtl8169_private *tp = netdev_priv(dev);
|
||||
int ret;
|
||||
|
||||
del_timer_sync(&tp->timer);
|
||||
|
||||
rtl_lock_work(tp);
|
||||
ret = rtl8169_set_speed(dev, cmd->autoneg, ethtool_cmd_speed(cmd),
|
||||
cmd->duplex, cmd->advertising);
|
||||
rtl_unlock_work(tp);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static netdev_features_t rtl8169_fix_features(struct net_device *dev,
|
||||
netdev_features_t features)
|
||||
{
|
||||
@ -2166,6 +2151,27 @@ static int rtl8169_get_link_ksettings(struct net_device *dev,
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int rtl8169_set_link_ksettings(struct net_device *dev,
|
||||
const struct ethtool_link_ksettings *cmd)
|
||||
{
|
||||
struct rtl8169_private *tp = netdev_priv(dev);
|
||||
int rc;
|
||||
u32 advertising;
|
||||
|
||||
if (!ethtool_convert_link_mode_to_legacy_u32(&advertising,
|
||||
cmd->link_modes.advertising))
|
||||
return -EINVAL;
|
||||
|
||||
del_timer_sync(&tp->timer);
|
||||
|
||||
rtl_lock_work(tp);
|
||||
rc = rtl8169_set_speed(dev, cmd->base.autoneg, cmd->base.speed,
|
||||
cmd->base.duplex, advertising);
|
||||
rtl_unlock_work(tp);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
|
||||
void *p)
|
||||
{
|
||||
@ -2367,7 +2373,6 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
|
||||
.get_drvinfo = rtl8169_get_drvinfo,
|
||||
.get_regs_len = rtl8169_get_regs_len,
|
||||
.get_link = ethtool_op_get_link,
|
||||
.set_settings = rtl8169_set_settings,
|
||||
.get_msglevel = rtl8169_get_msglevel,
|
||||
.set_msglevel = rtl8169_set_msglevel,
|
||||
.get_regs = rtl8169_get_regs,
|
||||
@ -2379,6 +2384,7 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
|
||||
.get_ts_info = ethtool_op_get_ts_info,
|
||||
.nway_reset = rtl8169_nway_reset,
|
||||
.get_link_ksettings = rtl8169_get_link_ksettings,
|
||||
.set_link_ksettings = rtl8169_set_link_ksettings,
|
||||
};
|
||||
|
||||
static void rtl8169_get_mac_version(struct rtl8169_private *tp,
|
||||
|
@ -2564,6 +2564,7 @@ static int stmmac_open(struct net_device *dev)
|
||||
|
||||
priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
|
||||
priv->rx_copybreak = STMMAC_RX_COPYBREAK;
|
||||
priv->mss = 0;
|
||||
|
||||
ret = alloc_dma_desc_resources(priv);
|
||||
if (ret < 0) {
|
||||
|
@ -1503,6 +1503,7 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
|
||||
{
|
||||
struct geneve_dev *geneve = netdev_priv(dev);
|
||||
struct ip_tunnel_info *info = &geneve->info;
|
||||
bool metadata = geneve->collect_md;
|
||||
__u8 tmp_vni[3];
|
||||
__u32 vni;
|
||||
|
||||
@ -1511,32 +1512,24 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
|
||||
if (nla_put_u32(skb, IFLA_GENEVE_ID, vni))
|
||||
goto nla_put_failure;
|
||||
|
||||
if (rtnl_dereference(geneve->sock4)) {
|
||||
if (!metadata && ip_tunnel_info_af(info) == AF_INET) {
|
||||
if (nla_put_in_addr(skb, IFLA_GENEVE_REMOTE,
|
||||
info->key.u.ipv4.dst))
|
||||
goto nla_put_failure;
|
||||
|
||||
if (nla_put_u8(skb, IFLA_GENEVE_UDP_CSUM,
|
||||
!!(info->key.tun_flags & TUNNEL_CSUM)))
|
||||
goto nla_put_failure;
|
||||
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
if (rtnl_dereference(geneve->sock6)) {
|
||||
} else if (!metadata) {
|
||||
if (nla_put_in6_addr(skb, IFLA_GENEVE_REMOTE6,
|
||||
&info->key.u.ipv6.dst))
|
||||
goto nla_put_failure;
|
||||
|
||||
if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_TX,
|
||||
!(info->key.tun_flags & TUNNEL_CSUM)))
|
||||
goto nla_put_failure;
|
||||
|
||||
if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_RX,
|
||||
!geneve->use_udp6_rx_checksums))
|
||||
goto nla_put_failure;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
if (nla_put_u8(skb, IFLA_GENEVE_TTL, info->key.ttl) ||
|
||||
nla_put_u8(skb, IFLA_GENEVE_TOS, info->key.tos) ||
|
||||
@ -1546,10 +1539,13 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
|
||||
if (nla_put_be16(skb, IFLA_GENEVE_PORT, info->key.tp_dst))
|
||||
goto nla_put_failure;
|
||||
|
||||
if (geneve->collect_md) {
|
||||
if (nla_put_flag(skb, IFLA_GENEVE_COLLECT_METADATA))
|
||||
if (metadata && nla_put_flag(skb, IFLA_GENEVE_COLLECT_METADATA))
|
||||
goto nla_put_failure;
|
||||
}
|
||||
|
||||
if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_RX,
|
||||
!geneve->use_udp6_rx_checksums))
|
||||
goto nla_put_failure;
|
||||
|
||||
return 0;
|
||||
|
||||
nla_put_failure:
|
||||
|
@ -409,7 +409,7 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
|
||||
struct dst_entry *dst;
|
||||
int err, ret = NET_XMIT_DROP;
|
||||
struct flowi6 fl6 = {
|
||||
.flowi6_iif = dev->ifindex,
|
||||
.flowi6_oif = dev->ifindex,
|
||||
.daddr = ip6h->daddr,
|
||||
.saddr = ip6h->saddr,
|
||||
.flowi6_flags = FLOWI_FLAG_ANYSRC,
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user