mirror of
https://github.com/rd-stuffs/msm-4.14.git
synced 2025-02-20 11:45:48 +08:00
This is the 4.14.257 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmGwYv4ACgkQONu9yGCS aT4HeQ/8Dn/KYB6zzi6YenfG8JyTdkcIZ4Y1ElurgF5RX9/JUQbw0l5EDWsmG/IN 0JUn7KsT+eStnaI2AUj175K4oZE1l3cZxvPGEOB3ynv9/is+iSyVHARrtR1ITTO3 +YTO6ZXKLUI+oMVo3SHr6dxr6kkT0b0BDgaroaYLgVqknpPQMDQvx35ZG7E2NL4O R6ou66nG/TKTbtn7vBCCoERMcPH6TEYUhi7p+L/+cdQs2/li3JDo/d3/3WGAb0ej 0kXX16VCEghicoE8m2TOA9TAgGs6nF3i6H2ZiCMl4m0gqAcr4IdAxDzD3a5IfUV9 pt1fmz+7DNrWTxv9e5ST5R5poAIoSuuVQfNQDV4MjeDLmh5ujyl/5WUk5rYQQ9vw vRtu5DrSrSNM15jOZnlCQxlcu/1xqRKuixWQbupawhKNN00w6yJKxuQ3oM87AvX+ OX0tp6FdXVoDO2sP1xXp9o7G5DDrQq9Lh5gNen6BaVF00VawM77UjJ+ijwmCUWXf jhfAyDXZEPNRijlwcOq8rtXVb68ZhQ2sT0HVJ22ppx70bglD1FgfvGPYxFf4BIxz g+MsaMUU3rgXxIo7xatAC6NnCPMC8feYINGbf+L/MDgvySf3GU84JOIeM/MDMawe coZQpDreHcYZQtbECpeFVuEA8hTaLCvmxowbG7uVRj1sNvpxxik= =dp5S -----END PGP SIGNATURE----- Merge 4.14.257 into android-4.14-stable Changes in 4.14.257 USB: serial: option: add Telit LE910S1 0x9200 composition USB: serial: option: add Fibocom FM101-GL variants usb: hub: Fix usb enumeration issue due to address0 race usb: hub: Fix locking issues with address0_mutex binder: fix test regression due to sender_euid change ALSA: ctxfi: Fix out-of-range access media: cec: copy sequence field for the reply HID: wacom: Use "Confidence" flag to prevent reporting invalid contacts staging: rtl8192e: Fix use after free in _rtl92e_pci_disconnect() fuse: fix page stealing xen: don't continue xenstore initialization in case of errors xen: detect uninitialized xenbus in xenbus_init tracing: Fix pid filtering when triggers are attached netfilter: ipvs: Fix reuse connection if RS weight is 0 ARM: dts: BCM5301X: Fix I2C controller interrupt ARM: dts: BCM5301X: Add interrupt properties to GPIO node ASoC: topology: Add missing rwsem around snd_ctl_remove() calls net: ieee802154: handle iftypes as u32 NFSv42: Don't fail clone() unless the OP_CLONE operation failed ARM: socfpga: Fix crash with CONFIG_FORTIRY_SOURCE scsi: mpt3sas: Fix kernel panic during drive powercycle test drm/vc4: fix error code in vc4_create_object() ipv6: fix typos in __ip6_finish_output() net/smc: Ensure the active closing peer first closes clcsock PM: hibernate: use correct mode for swsusp_close() tcp_cubic: fix spurious Hystart ACK train detections for not-cwnd-limited flows MIPS: use 3-level pgtable for 64KB page size on MIPS_VA_BITS_48 net/smc: Don't call clcsock shutdown twice when smc shutdown vhost/vsock: fix incorrect used length reported to the guest tracing: Check pid filtering when creating events s390/mm: validate VMA in PGSTE manipulation functions PCI: aardvark: Fix I/O space page leak PCI: aardvark: Fix a leaked reference by adding missing of_node_put() PCI: aardvark: Wait for endpoint to be ready before training link PCI: aardvark: Train link immediately after enabling training PCI: aardvark: Improve link training PCI: aardvark: Issue PERST via GPIO PCI: aardvark: Replace custom macros by standard linux/pci_regs.h macros PCI: aardvark: Indicate error in 'val' when config read fails PCI: aardvark: Introduce an advk_pcie_valid_device() helper PCI: aardvark: Don't touch PCIe registers if no card connected PCI: aardvark: Fix compilation on s390 PCI: aardvark: Move PCIe reset card code to advk_pcie_train_link() PCI: aardvark: Update comment about disabling link training PCI: aardvark: Remove PCIe outbound window configuration PCI: aardvark: Configure PCIe resources from 'ranges' DT property PCI: aardvark: Fix PCIe Max Payload Size setting PCI: Add PCI_EXP_LNKCTL2_TLS* macros PCI: aardvark: Fix link training PCI: aardvark: Fix checking for link up via LTSSM state pinctrl: armada-37xx: Correct mpp definitions pinctrl: armada-37xx: add missing pin: PCIe1 Wakeup pinctrl: armada-37xx: Correct PWM pins definitions arm64: dts: marvell: armada-37xx: declare PCIe reset pin arm64: dts: marvell: armada-37xx: Set pcie_reset_pin to gpio function hugetlbfs: flush TLBs correctly after huge_pmd_unshare proc/vmcore: fix clearing user buffer by properly using clear_user() NFC: add NCI_UNREG flag to eliminate the race fuse: release pipe buf after last use xen: sync include/xen/interface/io/ring.h with Xen's newest version xen/blkfront: read response from backend only once xen/blkfront: don't take local copy of a request from the ring page xen/blkfront: don't trust the backend response data blindly xen/netfront: read response from backend only once xen/netfront: don't read data from request on the ring page xen/netfront: disentangle tx_skb_freelist xen/netfront: don't trust the backend response data blindly tty: hvc: replace BUG_ON() with negative return value shm: extend forced shm destroy to support objects from several IPC nses ipc: WARN if trying to remove ipc object which is absent NFSv42: Fix pagecache invalidation after COPY/CLONE hugetlb: take PMD sharing into account when flushing tlb/caches net: return correct error code platform/x86: thinkpad_acpi: Fix WWAN device disabled issue after S3 deep s390/setup: avoid using memblock_enforce_memory_limit btrfs: check-integrity: fix a warning on write caching disabled disk thermal: core: Reset previous low and high trip during thermal zone init scsi: iscsi: Unblock session then wake up error handler ethernet: hisilicon: hns: hns_dsaf_misc: fix a possible array overflow in hns_dsaf_ge_srst_by_port() net: tulip: de4x5: fix the problem that the array 'lp->phy[8]' may be out of bound net: ethernet: dec: tulip: de4x5: fix possible array overflows in type3_infoblock() perf hist: Fix memory leak of a perf_hpp_fmt vrf: Reset IPCB/IP6CB when processing outbound pkts in vrf dev xmit kprobes: Limit max data_size of the kretprobe instances sata_fsl: fix UAF in sata_fsl_port_stop when rmmod sata_fsl sata_fsl: fix warning in remove_proc_entry when rmmod sata_fsl fs: add fget_many() and fput_many() fget: check that the fd still exists after getting a ref to it natsemi: xtensa: fix section mismatch warnings net: qlogic: qlcnic: Fix a NULL pointer dereference in qlcnic_83xx_add_rings() net: mpls: Fix notifications when deleting a device siphash: use _unaligned version by default net/mlx4_en: Fix an use-after-free bug in mlx4_en_try_alloc_resources() net: usb: lan78xx: lan78xx_phy_init(): use PHY_POLL instead of "0" if no IRQ is available net/rds: correct socket tunable error in rds_tcp_tune() net/smc: Keep smc_close_final rc during active close parisc: Fix KBUILD_IMAGE for self-extracting kernel parisc: Fix "make install" on newer debian releases vgacon: Propagate console boot parameters before calling `vc_resize' xhci: Fix commad ring abort, write all 64 bits to CRCR register. usb: typec: tcpm: Wait in SNK_DEBOUNCED until disconnect x86/64/mm: Map all kernel memory into trampoline_pgd tty: serial: msm_serial: Deactivate RX DMA for polling support serial: pl011: Add ACPI SBSA UART match id serial: core: fix transmit-buffer reset and memleak parisc: Mark cr16 CPU clocksource unstable on all SMP machines Linux 4.14.257 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: I3148611f406a61ce3d7ff7dcb56977a114a4f499
This commit is contained in:
commit
6d1f178f21
@ -43,26 +43,26 @@ group emmc_nb
|
||||
|
||||
group pwm0
|
||||
- pin 11 (GPIO1-11)
|
||||
- functions pwm, gpio
|
||||
- functions pwm, led, gpio
|
||||
|
||||
group pwm1
|
||||
- pin 12
|
||||
- functions pwm, gpio
|
||||
- functions pwm, led, gpio
|
||||
|
||||
group pwm2
|
||||
- pin 13
|
||||
- functions pwm, gpio
|
||||
- functions pwm, led, gpio
|
||||
|
||||
group pwm3
|
||||
- pin 14
|
||||
- functions pwm, gpio
|
||||
- functions pwm, led, gpio
|
||||
|
||||
group pmic1
|
||||
- pin 17
|
||||
- pin 7
|
||||
- functions pmic, gpio
|
||||
|
||||
group pmic0
|
||||
- pin 16
|
||||
- pin 6
|
||||
- functions pmic, gpio
|
||||
|
||||
group i2c2
|
||||
@ -112,17 +112,25 @@ group usb2_drvvbus1
|
||||
- functions drvbus, gpio
|
||||
|
||||
group sdio_sb
|
||||
- pins 60-64
|
||||
- pins 60-65
|
||||
- functions sdio, gpio
|
||||
|
||||
group rgmii
|
||||
- pins 42-55
|
||||
- pins 42-53
|
||||
- functions mii, gpio
|
||||
|
||||
group pcie1
|
||||
- pins 39-40
|
||||
- pins 39
|
||||
- functions pcie, gpio
|
||||
|
||||
group pcie1_clkreq
|
||||
- pins 40
|
||||
- functions pcie, gpio
|
||||
|
||||
group smi
|
||||
- pins 54-55
|
||||
- functions smi, gpio
|
||||
|
||||
group ptp
|
||||
- pins 56-58
|
||||
- functions ptp, gpio
|
||||
|
@ -30,8 +30,7 @@ conn_reuse_mode - INTEGER
|
||||
|
||||
0: disable any special handling on port reuse. The new
|
||||
connection will be delivered to the same real server that was
|
||||
servicing the previous connection. This will effectively
|
||||
disable expire_nodest_conn.
|
||||
servicing the previous connection.
|
||||
|
||||
bit 1: enable rescheduling of new connections when it is safe.
|
||||
That is, whenever expire_nodest_conn and for TCP sockets, when
|
||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 14
|
||||
SUBLEVEL = 256
|
||||
SUBLEVEL = 257
|
||||
EXTRAVERSION =
|
||||
NAME = Petit Gorille
|
||||
|
||||
|
@ -246,6 +246,8 @@
|
||||
|
||||
gpio-controller;
|
||||
#gpio-cells = <2>;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <2>;
|
||||
};
|
||||
|
||||
pcie0: pcie@12000 {
|
||||
@ -365,7 +367,7 @@
|
||||
i2c0: i2c@18009000 {
|
||||
compatible = "brcm,iproc-i2c";
|
||||
reg = <0x18009000 0x50>;
|
||||
interrupts = <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
clock-frequency = <100000>;
|
||||
|
@ -280,6 +280,14 @@ tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr
|
||||
tlb_add_flush(tlb, addr);
|
||||
}
|
||||
|
||||
static inline void
|
||||
tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address,
|
||||
unsigned long size)
|
||||
{
|
||||
tlb_add_flush(tlb, address);
|
||||
tlb_add_flush(tlb, address + size - PMD_SIZE);
|
||||
}
|
||||
|
||||
#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
|
||||
#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
|
||||
#define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp)
|
||||
|
@ -48,7 +48,7 @@ extern void __iomem *sdr_ctl_base_addr;
|
||||
u32 socfpga_sdram_self_refresh(u32 sdr_base);
|
||||
extern unsigned int socfpga_sdram_self_refresh_sz;
|
||||
|
||||
extern char secondary_trampoline, secondary_trampoline_end;
|
||||
extern char secondary_trampoline[], secondary_trampoline_end[];
|
||||
|
||||
extern unsigned long socfpga_cpu1start_addr;
|
||||
|
||||
|
@ -31,14 +31,14 @@
|
||||
|
||||
static int socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
||||
{
|
||||
int trampoline_size = &secondary_trampoline_end - &secondary_trampoline;
|
||||
int trampoline_size = secondary_trampoline_end - secondary_trampoline;
|
||||
|
||||
if (socfpga_cpu1start_addr) {
|
||||
/* This will put CPU #1 into reset. */
|
||||
writel(RSTMGR_MPUMODRST_CPU1,
|
||||
rst_manager_base_addr + SOCFPGA_RSTMGR_MODMPURST);
|
||||
|
||||
memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size);
|
||||
memcpy(phys_to_virt(0), secondary_trampoline, trampoline_size);
|
||||
|
||||
writel(__pa_symbol(secondary_startup),
|
||||
sys_manager_base_addr + (socfpga_cpu1start_addr & 0x000000ff));
|
||||
@ -56,12 +56,12 @@ static int socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
||||
|
||||
static int socfpga_a10_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
||||
{
|
||||
int trampoline_size = &secondary_trampoline_end - &secondary_trampoline;
|
||||
int trampoline_size = secondary_trampoline_end - secondary_trampoline;
|
||||
|
||||
if (socfpga_cpu1start_addr) {
|
||||
writel(RSTMGR_MPUMODRST_CPU1, rst_manager_base_addr +
|
||||
SOCFPGA_A10_RSTMGR_MODMPURST);
|
||||
memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size);
|
||||
memcpy(phys_to_virt(0), secondary_trampoline, trampoline_size);
|
||||
|
||||
writel(__pa_symbol(secondary_startup),
|
||||
sys_manager_base_addr + (socfpga_cpu1start_addr & 0x00000fff));
|
||||
|
@ -155,6 +155,9 @@
|
||||
|
||||
/* CON15(V2.0)/CON17(V1.4) : PCIe / CON15(V2.0)/CON12(V1.4) :mini-PCIe */
|
||||
&pcie0 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pcie_reset_pins &pcie_clkreq_pins>;
|
||||
reset-gpios = <&gpiosb 3 GPIO_ACTIVE_LOW>;
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
|
@ -82,6 +82,9 @@
|
||||
|
||||
/* J9 */
|
||||
&pcie0 {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pcie_reset_pins &pcie_clkreq_pins>;
|
||||
reset-gpios = <&gpiosb 3 GPIO_ACTIVE_LOW>;
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
|
@ -239,6 +239,15 @@
|
||||
function = "mii";
|
||||
};
|
||||
|
||||
pcie_reset_pins: pcie-reset-pins {
|
||||
groups = "pcie1";
|
||||
function = "gpio";
|
||||
};
|
||||
|
||||
pcie_clkreq_pins: pcie-clkreq-pins {
|
||||
groups = "pcie1_clkreq";
|
||||
function = "pcie";
|
||||
};
|
||||
};
|
||||
|
||||
eth0: ethernet@30000 {
|
||||
|
@ -269,6 +269,16 @@ __tlb_remove_tlb_entry (struct mmu_gather *tlb, pte_t *ptep, unsigned long addre
|
||||
tlb->end_addr = address + PAGE_SIZE;
|
||||
}
|
||||
|
||||
static inline void
|
||||
tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address,
|
||||
unsigned long size)
|
||||
{
|
||||
if (tlb->start_addr > address)
|
||||
tlb->start_addr = address;
|
||||
if (tlb->end_addr < address + size)
|
||||
tlb->end_addr = address + size;
|
||||
}
|
||||
|
||||
#define tlb_migrate_finish(mm) platform_tlb_migrate_finish(mm)
|
||||
|
||||
#define tlb_start_vma(tlb, vma) do { } while (0)
|
||||
|
@ -2990,7 +2990,7 @@ config HAVE_LATENCYTOP_SUPPORT
|
||||
config PGTABLE_LEVELS
|
||||
int
|
||||
default 4 if PAGE_SIZE_4KB && MIPS_VA_BITS_48
|
||||
default 3 if 64BIT && !PAGE_SIZE_64KB
|
||||
default 3 if 64BIT && (!PAGE_SIZE_64KB || MIPS_VA_BITS_48)
|
||||
default 2
|
||||
|
||||
source "init/Kconfig"
|
||||
|
@ -17,7 +17,12 @@
|
||||
# Mike Shaver, Helge Deller and Martin K. Petersen
|
||||
#
|
||||
|
||||
ifdef CONFIG_PARISC_SELF_EXTRACT
|
||||
boot := arch/parisc/boot
|
||||
KBUILD_IMAGE := $(boot)/bzImage
|
||||
else
|
||||
KBUILD_IMAGE := vmlinuz
|
||||
endif
|
||||
|
||||
KBUILD_DEFCONFIG := default_defconfig
|
||||
|
||||
|
@ -39,6 +39,7 @@ verify "$3"
|
||||
if [ -n "${INSTALLKERNEL}" ]; then
|
||||
if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
|
||||
if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
|
||||
if [ -x /usr/sbin/${INSTALLKERNEL} ]; then exec /usr/sbin/${INSTALLKERNEL} "$@"; fi
|
||||
fi
|
||||
|
||||
# Default install
|
||||
|
@ -245,27 +245,13 @@ void __init time_init(void)
|
||||
static int __init init_cr16_clocksource(void)
|
||||
{
|
||||
/*
|
||||
* The cr16 interval timers are not syncronized across CPUs on
|
||||
* different sockets, so mark them unstable and lower rating on
|
||||
* multi-socket SMP systems.
|
||||
* The cr16 interval timers are not syncronized across CPUs, even if
|
||||
* they share the same socket.
|
||||
*/
|
||||
if (num_online_cpus() > 1 && !running_on_qemu) {
|
||||
int cpu;
|
||||
unsigned long cpu0_loc;
|
||||
cpu0_loc = per_cpu(cpu_data, 0).cpu_loc;
|
||||
|
||||
for_each_online_cpu(cpu) {
|
||||
if (cpu == 0)
|
||||
continue;
|
||||
if ((cpu0_loc != 0) &&
|
||||
(cpu0_loc == per_cpu(cpu_data, cpu).cpu_loc))
|
||||
continue;
|
||||
|
||||
clocksource_cr16.name = "cr16_unstable";
|
||||
clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE;
|
||||
clocksource_cr16.rating = 0;
|
||||
break;
|
||||
}
|
||||
clocksource_cr16.name = "cr16_unstable";
|
||||
clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE;
|
||||
clocksource_cr16.rating = 0;
|
||||
}
|
||||
|
||||
/* XXX: We may want to mark sched_clock stable here if cr16 clocks are
|
||||
|
@ -116,6 +116,20 @@ static inline void tlb_remove_page_size(struct mmu_gather *tlb,
|
||||
return tlb_remove_page(tlb, page);
|
||||
}
|
||||
|
||||
static inline void tlb_flush_pmd_range(struct mmu_gather *tlb,
|
||||
unsigned long address, unsigned long size)
|
||||
{
|
||||
/*
|
||||
* the range might exceed the original range that was provided to
|
||||
* tlb_gather_mmu(), so we need to update it despite the fact it is
|
||||
* usually not updated.
|
||||
*/
|
||||
if (tlb->start > address)
|
||||
tlb->start = address;
|
||||
if (tlb->end < address + size)
|
||||
tlb->end = address + size;
|
||||
}
|
||||
|
||||
/*
|
||||
* pte_free_tlb frees a pte table and clears the CRSTE for the
|
||||
* page table from the tlb.
|
||||
|
@ -706,9 +706,6 @@ static void __init setup_memory(void)
|
||||
storage_key_init_range(reg->base, reg->base + reg->size);
|
||||
}
|
||||
psw_set_key(PAGE_DEFAULT_KEY);
|
||||
|
||||
/* Only cosmetics */
|
||||
memblock_enforce_memory_limit(memblock_end_of_DRAM());
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -896,6 +896,7 @@ EXPORT_SYMBOL(get_guest_storage_key);
|
||||
int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
|
||||
unsigned long *oldpte, unsigned long *oldpgste)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
unsigned long pgstev;
|
||||
spinlock_t *ptl;
|
||||
pgste_t pgste;
|
||||
@ -905,6 +906,10 @@ int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
|
||||
WARN_ON_ONCE(orc > ESSA_MAX);
|
||||
if (unlikely(orc > ESSA_MAX))
|
||||
return -EINVAL;
|
||||
|
||||
vma = find_vma(mm, hva);
|
||||
if (!vma || hva < vma->vm_start || is_vm_hugetlb_page(vma))
|
||||
return -EFAULT;
|
||||
ptep = get_locked_pte(mm, hva, &ptl);
|
||||
if (unlikely(!ptep))
|
||||
return -EFAULT;
|
||||
@ -997,10 +1002,14 @@ EXPORT_SYMBOL(pgste_perform_essa);
|
||||
int set_pgste_bits(struct mm_struct *mm, unsigned long hva,
|
||||
unsigned long bits, unsigned long value)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
spinlock_t *ptl;
|
||||
pgste_t new;
|
||||
pte_t *ptep;
|
||||
|
||||
vma = find_vma(mm, hva);
|
||||
if (!vma || hva < vma->vm_start || is_vm_hugetlb_page(vma))
|
||||
return -EFAULT;
|
||||
ptep = get_locked_pte(mm, hva, &ptl);
|
||||
if (unlikely(!ptep))
|
||||
return -EFAULT;
|
||||
@ -1025,9 +1034,13 @@ EXPORT_SYMBOL(set_pgste_bits);
|
||||
*/
|
||||
int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
spinlock_t *ptl;
|
||||
pte_t *ptep;
|
||||
|
||||
vma = find_vma(mm, hva);
|
||||
if (!vma || hva < vma->vm_start || is_vm_hugetlb_page(vma))
|
||||
return -EFAULT;
|
||||
ptep = get_locked_pte(mm, hva, &ptl);
|
||||
if (unlikely(!ptep))
|
||||
return -EFAULT;
|
||||
|
@ -127,6 +127,16 @@ static inline void tlb_remove_page_size(struct mmu_gather *tlb,
|
||||
return tlb_remove_page(tlb, page);
|
||||
}
|
||||
|
||||
static inline void
|
||||
tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address,
|
||||
unsigned long size)
|
||||
{
|
||||
if (tlb->start > address)
|
||||
tlb->start = address;
|
||||
if (tlb->end < address + size)
|
||||
tlb->end = address + size;
|
||||
}
|
||||
|
||||
#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
|
||||
static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
|
||||
unsigned int page_size)
|
||||
|
@ -130,6 +130,18 @@ static inline void tlb_remove_page_size(struct mmu_gather *tlb,
|
||||
return tlb_remove_page(tlb, page);
|
||||
}
|
||||
|
||||
static inline void
|
||||
tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address,
|
||||
unsigned long size)
|
||||
{
|
||||
tlb->need_flush = 1;
|
||||
|
||||
if (tlb->start > address)
|
||||
tlb->start = address;
|
||||
if (tlb->end < address + size)
|
||||
tlb->end = address + size;
|
||||
}
|
||||
|
||||
/**
|
||||
* tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
|
||||
*
|
||||
|
@ -57,6 +57,7 @@ static void __init setup_real_mode(void)
|
||||
#ifdef CONFIG_X86_64
|
||||
u64 *trampoline_pgd;
|
||||
u64 efer;
|
||||
int i;
|
||||
#endif
|
||||
|
||||
base = (unsigned char *)real_mode_header;
|
||||
@ -114,8 +115,17 @@ static void __init setup_real_mode(void)
|
||||
trampoline_header->flags |= TH_FLAGS_SME_ACTIVE;
|
||||
|
||||
trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
|
||||
|
||||
/* Map the real mode stub as virtual == physical */
|
||||
trampoline_pgd[0] = trampoline_pgd_entry.pgd;
|
||||
trampoline_pgd[511] = init_top_pgt[511].pgd;
|
||||
|
||||
/*
|
||||
* Include the entirety of the kernel mapping into the trampoline
|
||||
* PGD. This way, all mappings present in the normal kernel page
|
||||
* tables are usable while running on trampoline_pgd.
|
||||
*/
|
||||
for (i = pgd_index(__PAGE_OFFSET); i < PTRS_PER_PGD; i++)
|
||||
trampoline_pgd[i] = init_top_pgt[i].pgd;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -1406,6 +1406,14 @@ static int sata_fsl_init_controller(struct ata_host *host)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void sata_fsl_host_stop(struct ata_host *host)
|
||||
{
|
||||
struct sata_fsl_host_priv *host_priv = host->private_data;
|
||||
|
||||
iounmap(host_priv->hcr_base);
|
||||
kfree(host_priv);
|
||||
}
|
||||
|
||||
/*
|
||||
* scsi mid-layer and libata interface structures
|
||||
*/
|
||||
@ -1438,6 +1446,8 @@ static struct ata_port_operations sata_fsl_ops = {
|
||||
.port_start = sata_fsl_port_start,
|
||||
.port_stop = sata_fsl_port_stop,
|
||||
|
||||
.host_stop = sata_fsl_host_stop,
|
||||
|
||||
.pmp_attach = sata_fsl_pmp_attach,
|
||||
.pmp_detach = sata_fsl_pmp_detach,
|
||||
};
|
||||
@ -1492,9 +1502,9 @@ static int sata_fsl_probe(struct platform_device *ofdev)
|
||||
host_priv->ssr_base = ssr_base;
|
||||
host_priv->csr_base = csr_base;
|
||||
|
||||
irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
|
||||
if (!irq) {
|
||||
dev_err(&ofdev->dev, "invalid irq from platform\n");
|
||||
irq = platform_get_irq(ofdev, 0);
|
||||
if (irq < 0) {
|
||||
retval = irq;
|
||||
goto error_exit_with_cleanup;
|
||||
}
|
||||
host_priv->irq = irq;
|
||||
@ -1569,10 +1579,6 @@ static int sata_fsl_remove(struct platform_device *ofdev)
|
||||
|
||||
ata_host_detach(host);
|
||||
|
||||
irq_dispose_mapping(host_priv->irq);
|
||||
iounmap(host_priv->hcr_base);
|
||||
kfree(host_priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -78,6 +78,7 @@ enum blkif_state {
|
||||
BLKIF_STATE_DISCONNECTED,
|
||||
BLKIF_STATE_CONNECTED,
|
||||
BLKIF_STATE_SUSPENDED,
|
||||
BLKIF_STATE_ERROR,
|
||||
};
|
||||
|
||||
struct grant {
|
||||
@ -87,6 +88,7 @@ struct grant {
|
||||
};
|
||||
|
||||
enum blk_req_status {
|
||||
REQ_PROCESSING,
|
||||
REQ_WAITING,
|
||||
REQ_DONE,
|
||||
REQ_ERROR,
|
||||
@ -534,10 +536,10 @@ static unsigned long blkif_ring_get_request(struct blkfront_ring_info *rinfo,
|
||||
|
||||
id = get_id_from_freelist(rinfo);
|
||||
rinfo->shadow[id].request = req;
|
||||
rinfo->shadow[id].status = REQ_WAITING;
|
||||
rinfo->shadow[id].status = REQ_PROCESSING;
|
||||
rinfo->shadow[id].associated_id = NO_ASSOCIATED_ID;
|
||||
|
||||
(*ring_req)->u.rw.id = id;
|
||||
rinfo->shadow[id].req.u.rw.id = id;
|
||||
|
||||
return id;
|
||||
}
|
||||
@ -545,11 +547,12 @@ static unsigned long blkif_ring_get_request(struct blkfront_ring_info *rinfo,
|
||||
static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_info *rinfo)
|
||||
{
|
||||
struct blkfront_info *info = rinfo->dev_info;
|
||||
struct blkif_request *ring_req;
|
||||
struct blkif_request *ring_req, *final_ring_req;
|
||||
unsigned long id;
|
||||
|
||||
/* Fill out a communications ring structure. */
|
||||
id = blkif_ring_get_request(rinfo, req, &ring_req);
|
||||
id = blkif_ring_get_request(rinfo, req, &final_ring_req);
|
||||
ring_req = &rinfo->shadow[id].req;
|
||||
|
||||
ring_req->operation = BLKIF_OP_DISCARD;
|
||||
ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
|
||||
@ -560,8 +563,9 @@ static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_inf
|
||||
else
|
||||
ring_req->u.discard.flag = 0;
|
||||
|
||||
/* Keep a private copy so we can reissue requests when recovering. */
|
||||
rinfo->shadow[id].req = *ring_req;
|
||||
/* Copy the request to the ring page. */
|
||||
*final_ring_req = *ring_req;
|
||||
rinfo->shadow[id].status = REQ_WAITING;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -694,6 +698,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
|
||||
{
|
||||
struct blkfront_info *info = rinfo->dev_info;
|
||||
struct blkif_request *ring_req, *extra_ring_req = NULL;
|
||||
struct blkif_request *final_ring_req, *final_extra_ring_req = NULL;
|
||||
unsigned long id, extra_id = NO_ASSOCIATED_ID;
|
||||
bool require_extra_req = false;
|
||||
int i;
|
||||
@ -738,7 +743,8 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
|
||||
}
|
||||
|
||||
/* Fill out a communications ring structure. */
|
||||
id = blkif_ring_get_request(rinfo, req, &ring_req);
|
||||
id = blkif_ring_get_request(rinfo, req, &final_ring_req);
|
||||
ring_req = &rinfo->shadow[id].req;
|
||||
|
||||
num_sg = blk_rq_map_sg(req->q, req, rinfo->shadow[id].sg);
|
||||
num_grant = 0;
|
||||
@ -789,7 +795,9 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
|
||||
ring_req->u.rw.nr_segments = num_grant;
|
||||
if (unlikely(require_extra_req)) {
|
||||
extra_id = blkif_ring_get_request(rinfo, req,
|
||||
&extra_ring_req);
|
||||
&final_extra_ring_req);
|
||||
extra_ring_req = &rinfo->shadow[extra_id].req;
|
||||
|
||||
/*
|
||||
* Only the first request contains the scatter-gather
|
||||
* list.
|
||||
@ -831,10 +839,13 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
|
||||
if (setup.segments)
|
||||
kunmap_atomic(setup.segments);
|
||||
|
||||
/* Keep a private copy so we can reissue requests when recovering. */
|
||||
rinfo->shadow[id].req = *ring_req;
|
||||
if (unlikely(require_extra_req))
|
||||
rinfo->shadow[extra_id].req = *extra_ring_req;
|
||||
/* Copy request(s) to the ring page. */
|
||||
*final_ring_req = *ring_req;
|
||||
rinfo->shadow[id].status = REQ_WAITING;
|
||||
if (unlikely(require_extra_req)) {
|
||||
*final_extra_ring_req = *extra_ring_req;
|
||||
rinfo->shadow[extra_id].status = REQ_WAITING;
|
||||
}
|
||||
|
||||
if (new_persistent_gnts)
|
||||
gnttab_free_grant_references(setup.gref_head);
|
||||
@ -1408,8 +1419,8 @@ static enum blk_req_status blkif_rsp_to_req_status(int rsp)
|
||||
static int blkif_get_final_status(enum blk_req_status s1,
|
||||
enum blk_req_status s2)
|
||||
{
|
||||
BUG_ON(s1 == REQ_WAITING);
|
||||
BUG_ON(s2 == REQ_WAITING);
|
||||
BUG_ON(s1 < REQ_DONE);
|
||||
BUG_ON(s2 < REQ_DONE);
|
||||
|
||||
if (s1 == REQ_ERROR || s2 == REQ_ERROR)
|
||||
return BLKIF_RSP_ERROR;
|
||||
@ -1442,7 +1453,7 @@ static bool blkif_completion(unsigned long *id,
|
||||
s->status = blkif_rsp_to_req_status(bret->status);
|
||||
|
||||
/* Wait the second response if not yet here. */
|
||||
if (s2->status == REQ_WAITING)
|
||||
if (s2->status < REQ_DONE)
|
||||
return 0;
|
||||
|
||||
bret->status = blkif_get_final_status(s->status,
|
||||
@ -1550,7 +1561,7 @@ static bool blkif_completion(unsigned long *id,
|
||||
static irqreturn_t blkif_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
struct request *req;
|
||||
struct blkif_response *bret;
|
||||
struct blkif_response bret;
|
||||
RING_IDX i, rp;
|
||||
unsigned long flags;
|
||||
struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)dev_id;
|
||||
@ -1561,54 +1572,76 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
|
||||
|
||||
spin_lock_irqsave(&rinfo->ring_lock, flags);
|
||||
again:
|
||||
rp = rinfo->ring.sring->rsp_prod;
|
||||
rmb(); /* Ensure we see queued responses up to 'rp'. */
|
||||
rp = READ_ONCE(rinfo->ring.sring->rsp_prod);
|
||||
virt_rmb(); /* Ensure we see queued responses up to 'rp'. */
|
||||
if (RING_RESPONSE_PROD_OVERFLOW(&rinfo->ring, rp)) {
|
||||
pr_alert("%s: illegal number of responses %u\n",
|
||||
info->gd->disk_name, rp - rinfo->ring.rsp_cons);
|
||||
goto err;
|
||||
}
|
||||
|
||||
for (i = rinfo->ring.rsp_cons; i != rp; i++) {
|
||||
unsigned long id;
|
||||
unsigned int op;
|
||||
|
||||
RING_COPY_RESPONSE(&rinfo->ring, i, &bret);
|
||||
id = bret.id;
|
||||
|
||||
bret = RING_GET_RESPONSE(&rinfo->ring, i);
|
||||
id = bret->id;
|
||||
/*
|
||||
* The backend has messed up and given us an id that we would
|
||||
* never have given to it (we stamp it up to BLK_RING_SIZE -
|
||||
* look in get_id_from_freelist.
|
||||
*/
|
||||
if (id >= BLK_RING_SIZE(info)) {
|
||||
WARN(1, "%s: response to %s has incorrect id (%ld)\n",
|
||||
info->gd->disk_name, op_name(bret->operation), id);
|
||||
/* We can't safely get the 'struct request' as
|
||||
* the id is busted. */
|
||||
continue;
|
||||
pr_alert("%s: response has incorrect id (%ld)\n",
|
||||
info->gd->disk_name, id);
|
||||
goto err;
|
||||
}
|
||||
if (rinfo->shadow[id].status != REQ_WAITING) {
|
||||
pr_alert("%s: response references no pending request\n",
|
||||
info->gd->disk_name);
|
||||
goto err;
|
||||
}
|
||||
|
||||
rinfo->shadow[id].status = REQ_PROCESSING;
|
||||
req = rinfo->shadow[id].request;
|
||||
|
||||
if (bret->operation != BLKIF_OP_DISCARD) {
|
||||
op = rinfo->shadow[id].req.operation;
|
||||
if (op == BLKIF_OP_INDIRECT)
|
||||
op = rinfo->shadow[id].req.u.indirect.indirect_op;
|
||||
if (bret.operation != op) {
|
||||
pr_alert("%s: response has wrong operation (%u instead of %u)\n",
|
||||
info->gd->disk_name, bret.operation, op);
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (bret.operation != BLKIF_OP_DISCARD) {
|
||||
/*
|
||||
* We may need to wait for an extra response if the
|
||||
* I/O request is split in 2
|
||||
*/
|
||||
if (!blkif_completion(&id, rinfo, bret))
|
||||
if (!blkif_completion(&id, rinfo, &bret))
|
||||
continue;
|
||||
}
|
||||
|
||||
if (add_id_to_freelist(rinfo, id)) {
|
||||
WARN(1, "%s: response to %s (id %ld) couldn't be recycled!\n",
|
||||
info->gd->disk_name, op_name(bret->operation), id);
|
||||
info->gd->disk_name, op_name(bret.operation), id);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (bret->status == BLKIF_RSP_OKAY)
|
||||
if (bret.status == BLKIF_RSP_OKAY)
|
||||
blkif_req(req)->error = BLK_STS_OK;
|
||||
else
|
||||
blkif_req(req)->error = BLK_STS_IOERR;
|
||||
|
||||
switch (bret->operation) {
|
||||
switch (bret.operation) {
|
||||
case BLKIF_OP_DISCARD:
|
||||
if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
|
||||
if (unlikely(bret.status == BLKIF_RSP_EOPNOTSUPP)) {
|
||||
struct request_queue *rq = info->rq;
|
||||
printk(KERN_WARNING "blkfront: %s: %s op failed\n",
|
||||
info->gd->disk_name, op_name(bret->operation));
|
||||
|
||||
pr_warn_ratelimited("blkfront: %s: %s op failed\n",
|
||||
info->gd->disk_name, op_name(bret.operation));
|
||||
blkif_req(req)->error = BLK_STS_NOTSUPP;
|
||||
info->feature_discard = 0;
|
||||
info->feature_secdiscard = 0;
|
||||
@ -1618,15 +1651,15 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
|
||||
break;
|
||||
case BLKIF_OP_FLUSH_DISKCACHE:
|
||||
case BLKIF_OP_WRITE_BARRIER:
|
||||
if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
|
||||
printk(KERN_WARNING "blkfront: %s: %s op failed\n",
|
||||
info->gd->disk_name, op_name(bret->operation));
|
||||
if (unlikely(bret.status == BLKIF_RSP_EOPNOTSUPP)) {
|
||||
pr_warn_ratelimited("blkfront: %s: %s op failed\n",
|
||||
info->gd->disk_name, op_name(bret.operation));
|
||||
blkif_req(req)->error = BLK_STS_NOTSUPP;
|
||||
}
|
||||
if (unlikely(bret->status == BLKIF_RSP_ERROR &&
|
||||
if (unlikely(bret.status == BLKIF_RSP_ERROR &&
|
||||
rinfo->shadow[id].req.u.rw.nr_segments == 0)) {
|
||||
printk(KERN_WARNING "blkfront: %s: empty %s op failed\n",
|
||||
info->gd->disk_name, op_name(bret->operation));
|
||||
pr_warn_ratelimited("blkfront: %s: empty %s op failed\n",
|
||||
info->gd->disk_name, op_name(bret.operation));
|
||||
blkif_req(req)->error = BLK_STS_NOTSUPP;
|
||||
}
|
||||
if (unlikely(blkif_req(req)->error)) {
|
||||
@ -1639,9 +1672,10 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
|
||||
/* fall through */
|
||||
case BLKIF_OP_READ:
|
||||
case BLKIF_OP_WRITE:
|
||||
if (unlikely(bret->status != BLKIF_RSP_OKAY))
|
||||
dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
|
||||
"request: %x\n", bret->status);
|
||||
if (unlikely(bret.status != BLKIF_RSP_OKAY))
|
||||
dev_dbg_ratelimited(&info->xbdev->dev,
|
||||
"Bad return from blkdev data request: %#x\n",
|
||||
bret.status);
|
||||
|
||||
break;
|
||||
default:
|
||||
@ -1666,6 +1700,14 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
|
||||
spin_unlock_irqrestore(&rinfo->ring_lock, flags);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
|
||||
err:
|
||||
info->connected = BLKIF_STATE_ERROR;
|
||||
|
||||
spin_unlock_irqrestore(&rinfo->ring_lock, flags);
|
||||
|
||||
pr_alert("%s disabled for further use\n", info->gd->disk_name);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
|
||||
|
@ -292,7 +292,7 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
|
||||
|
||||
bo = kzalloc(sizeof(*bo), GFP_KERNEL);
|
||||
if (!bo)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
return NULL;
|
||||
|
||||
mutex_lock(&vc4->bo_lock);
|
||||
bo->label = VC4_BO_TYPE_KERNEL;
|
||||
|
@ -2433,6 +2433,9 @@ static void wacom_wac_finger_event(struct hid_device *hdev,
|
||||
struct wacom_features *features = &wacom->wacom_wac.features;
|
||||
|
||||
switch (equivalent_usage) {
|
||||
case HID_DG_CONFIDENCE:
|
||||
wacom_wac->hid_data.confidence = value;
|
||||
break;
|
||||
case HID_GD_X:
|
||||
wacom_wac->hid_data.x = value;
|
||||
break;
|
||||
@ -2463,7 +2466,8 @@ static void wacom_wac_finger_event(struct hid_device *hdev,
|
||||
|
||||
|
||||
if (usage->usage_index + 1 == field->report_count) {
|
||||
if (equivalent_usage == wacom_wac->hid_data.last_slot_field)
|
||||
if (equivalent_usage == wacom_wac->hid_data.last_slot_field &&
|
||||
wacom_wac->hid_data.confidence)
|
||||
wacom_wac_finger_slot(wacom_wac, wacom_wac->touch_input);
|
||||
}
|
||||
}
|
||||
@ -2476,6 +2480,8 @@ static void wacom_wac_finger_pre_report(struct hid_device *hdev,
|
||||
struct hid_data* hid_data = &wacom_wac->hid_data;
|
||||
int i;
|
||||
|
||||
hid_data->confidence = true;
|
||||
|
||||
for (i = 0; i < report->maxfield; i++) {
|
||||
struct hid_field *field = report->field[i];
|
||||
int j;
|
||||
|
@ -293,6 +293,7 @@ struct hid_data {
|
||||
bool inrange_state;
|
||||
bool invert_state;
|
||||
bool tipswitch;
|
||||
bool confidence;
|
||||
int x;
|
||||
int y;
|
||||
int pressure;
|
||||
|
@ -1135,6 +1135,7 @@ void cec_received_msg_ts(struct cec_adapter *adap,
|
||||
if (abort)
|
||||
dst->rx_status |= CEC_RX_STATUS_FEATURE_ABORT;
|
||||
msg->flags = dst->flags;
|
||||
msg->sequence = dst->sequence;
|
||||
/* Remove it from the wait_queue */
|
||||
list_del_init(&data->list);
|
||||
|
||||
|
@ -4703,6 +4703,10 @@ type3_infoblock(struct net_device *dev, u_char count, u_char *p)
|
||||
lp->ibn = 3;
|
||||
lp->active = *p++;
|
||||
if (MOTO_SROM_BUG) lp->active = 0;
|
||||
/* if (MOTO_SROM_BUG) statement indicates lp->active could
|
||||
* be 8 (i.e. the size of array lp->phy) */
|
||||
if (WARN_ON(lp->active >= ARRAY_SIZE(lp->phy)))
|
||||
return -EINVAL;
|
||||
lp->phy[lp->active].gep = (*p ? p : NULL); p += (2 * (*p) + 1);
|
||||
lp->phy[lp->active].rst = (*p ? p : NULL); p += (2 * (*p) + 1);
|
||||
lp->phy[lp->active].mc = get_unaligned_le16(p); p += 2;
|
||||
@ -4994,19 +4998,23 @@ mii_get_phy(struct net_device *dev)
|
||||
}
|
||||
if ((j == limit) && (i < DE4X5_MAX_MII)) {
|
||||
for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++);
|
||||
lp->phy[k].addr = i;
|
||||
lp->phy[k].id = id;
|
||||
lp->phy[k].spd.reg = GENERIC_REG; /* ANLPA register */
|
||||
lp->phy[k].spd.mask = GENERIC_MASK; /* 100Mb/s technologies */
|
||||
lp->phy[k].spd.value = GENERIC_VALUE; /* TX & T4, H/F Duplex */
|
||||
lp->mii_cnt++;
|
||||
lp->active++;
|
||||
printk("%s: Using generic MII device control. If the board doesn't operate,\nplease mail the following dump to the author:\n", dev->name);
|
||||
j = de4x5_debug;
|
||||
de4x5_debug |= DEBUG_MII;
|
||||
de4x5_dbg_mii(dev, k);
|
||||
de4x5_debug = j;
|
||||
printk("\n");
|
||||
if (k < DE4X5_MAX_PHY) {
|
||||
lp->phy[k].addr = i;
|
||||
lp->phy[k].id = id;
|
||||
lp->phy[k].spd.reg = GENERIC_REG; /* ANLPA register */
|
||||
lp->phy[k].spd.mask = GENERIC_MASK; /* 100Mb/s technologies */
|
||||
lp->phy[k].spd.value = GENERIC_VALUE; /* TX & T4, H/F Duplex */
|
||||
lp->mii_cnt++;
|
||||
lp->active++;
|
||||
printk("%s: Using generic MII device control. If the board doesn't operate,\nplease mail the following dump to the author:\n", dev->name);
|
||||
j = de4x5_debug;
|
||||
de4x5_debug |= DEBUG_MII;
|
||||
de4x5_dbg_mii(dev, k);
|
||||
de4x5_debug = j;
|
||||
printk("\n");
|
||||
} else {
|
||||
goto purgatory;
|
||||
}
|
||||
}
|
||||
}
|
||||
purgatory:
|
||||
|
@ -336,6 +336,10 @@ static void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port,
|
||||
return;
|
||||
|
||||
if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) {
|
||||
/* DSAF_MAX_PORT_NUM is 6, but DSAF_GE_NUM is 8.
|
||||
We need check to prevent array overflow */
|
||||
if (port >= DSAF_MAX_PORT_NUM)
|
||||
return;
|
||||
reg_val_1 = 0x1 << port;
|
||||
port_rst_off = dsaf_dev->mac_cb[port]->port_rst_off;
|
||||
/* there is difference between V1 and V2 in register.*/
|
||||
|
@ -2283,9 +2283,14 @@ int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
|
||||
bool carry_xdp_prog)
|
||||
{
|
||||
struct bpf_prog *xdp_prog;
|
||||
int i, t;
|
||||
int i, t, ret;
|
||||
|
||||
mlx4_en_copy_priv(tmp, priv, prof);
|
||||
ret = mlx4_en_copy_priv(tmp, priv, prof);
|
||||
if (ret) {
|
||||
en_warn(priv, "%s: mlx4_en_copy_priv() failed, return\n",
|
||||
__func__);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (mlx4_en_alloc_resources(tmp)) {
|
||||
en_warn(priv,
|
||||
|
@ -128,7 +128,7 @@ static const struct net_device_ops xtsonic_netdev_ops = {
|
||||
.ndo_set_mac_address = eth_mac_addr,
|
||||
};
|
||||
|
||||
static int __init sonic_probe1(struct net_device *dev)
|
||||
static int sonic_probe1(struct net_device *dev)
|
||||
{
|
||||
static unsigned version_printed = 0;
|
||||
unsigned int silicon_revision;
|
||||
|
@ -1078,8 +1078,14 @@ static int qlcnic_83xx_add_rings(struct qlcnic_adapter *adapter)
|
||||
sds_mbx_size = sizeof(struct qlcnic_sds_mbx);
|
||||
context_id = recv_ctx->context_id;
|
||||
num_sds = adapter->drv_sds_rings - QLCNIC_MAX_SDS_RINGS;
|
||||
ahw->hw_ops->alloc_mbx_args(&cmd, adapter,
|
||||
QLCNIC_CMD_ADD_RCV_RINGS);
|
||||
err = ahw->hw_ops->alloc_mbx_args(&cmd, adapter,
|
||||
QLCNIC_CMD_ADD_RCV_RINGS);
|
||||
if (err) {
|
||||
dev_err(&adapter->pdev->dev,
|
||||
"Failed to alloc mbx args %d\n", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
cmd.req.arg[1] = 0 | (num_sds << 8) | (context_id << 16);
|
||||
|
||||
/* set up status rings, mbx 2-81 */
|
||||
|
@ -2052,7 +2052,7 @@ static int lan78xx_phy_init(struct lan78xx_net *dev)
|
||||
if (dev->domain_data.phyirq > 0)
|
||||
phydev->irq = dev->domain_data.phyirq;
|
||||
else
|
||||
phydev->irq = 0;
|
||||
phydev->irq = PHY_POLL;
|
||||
netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
|
||||
|
||||
/* set to AUTOMDIX */
|
||||
|
@ -208,6 +208,7 @@ static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
|
||||
/* strip the ethernet header added for pass through VRF device */
|
||||
__skb_pull(skb, skb_network_offset(skb));
|
||||
|
||||
memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
|
||||
ret = vrf_ip6_local_out(net, skb->sk, skb);
|
||||
if (unlikely(net_xmit_eval(ret)))
|
||||
dev->stats.tx_errors++;
|
||||
@ -289,6 +290,7 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
|
||||
RT_SCOPE_LINK);
|
||||
}
|
||||
|
||||
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
|
||||
ret = vrf_ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
|
||||
if (unlikely(net_xmit_eval(ret)))
|
||||
vrf_dev->stats.tx_errors++;
|
||||
|
@ -121,21 +121,17 @@ struct netfront_queue {
|
||||
|
||||
/*
|
||||
* {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
|
||||
* are linked from tx_skb_freelist through skb_entry.link.
|
||||
*
|
||||
* NB. Freelist index entries are always going to be less than
|
||||
* PAGE_OFFSET, whereas pointers to skbs will always be equal or
|
||||
* greater than PAGE_OFFSET: we use this property to distinguish
|
||||
* them.
|
||||
* are linked from tx_skb_freelist through tx_link.
|
||||
*/
|
||||
union skb_entry {
|
||||
struct sk_buff *skb;
|
||||
unsigned long link;
|
||||
} tx_skbs[NET_TX_RING_SIZE];
|
||||
struct sk_buff *tx_skbs[NET_TX_RING_SIZE];
|
||||
unsigned short tx_link[NET_TX_RING_SIZE];
|
||||
#define TX_LINK_NONE 0xffff
|
||||
#define TX_PENDING 0xfffe
|
||||
grant_ref_t gref_tx_head;
|
||||
grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
|
||||
struct page *grant_tx_page[NET_TX_RING_SIZE];
|
||||
unsigned tx_skb_freelist;
|
||||
unsigned int tx_pend_queue;
|
||||
|
||||
spinlock_t rx_lock ____cacheline_aligned_in_smp;
|
||||
struct xen_netif_rx_front_ring rx;
|
||||
@ -161,6 +157,9 @@ struct netfront_info {
|
||||
struct netfront_stats __percpu *rx_stats;
|
||||
struct netfront_stats __percpu *tx_stats;
|
||||
|
||||
/* Is device behaving sane? */
|
||||
bool broken;
|
||||
|
||||
atomic_t rx_gso_checksum_fixup;
|
||||
};
|
||||
|
||||
@ -169,33 +168,25 @@ struct netfront_rx_info {
|
||||
struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
|
||||
};
|
||||
|
||||
static void skb_entry_set_link(union skb_entry *list, unsigned short id)
|
||||
{
|
||||
list->link = id;
|
||||
}
|
||||
|
||||
static int skb_entry_is_link(const union skb_entry *list)
|
||||
{
|
||||
BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link));
|
||||
return (unsigned long)list->skb < PAGE_OFFSET;
|
||||
}
|
||||
|
||||
/*
|
||||
* Access macros for acquiring freeing slots in tx_skbs[].
|
||||
*/
|
||||
|
||||
static void add_id_to_freelist(unsigned *head, union skb_entry *list,
|
||||
unsigned short id)
|
||||
static void add_id_to_list(unsigned *head, unsigned short *list,
|
||||
unsigned short id)
|
||||
{
|
||||
skb_entry_set_link(&list[id], *head);
|
||||
list[id] = *head;
|
||||
*head = id;
|
||||
}
|
||||
|
||||
static unsigned short get_id_from_freelist(unsigned *head,
|
||||
union skb_entry *list)
|
||||
static unsigned short get_id_from_list(unsigned *head, unsigned short *list)
|
||||
{
|
||||
unsigned int id = *head;
|
||||
*head = list[id].link;
|
||||
|
||||
if (id != TX_LINK_NONE) {
|
||||
*head = list[id];
|
||||
list[id] = TX_LINK_NONE;
|
||||
}
|
||||
return id;
|
||||
}
|
||||
|
||||
@ -353,7 +344,7 @@ static int xennet_open(struct net_device *dev)
|
||||
unsigned int i = 0;
|
||||
struct netfront_queue *queue = NULL;
|
||||
|
||||
if (!np->queues)
|
||||
if (!np->queues || np->broken)
|
||||
return -ENODEV;
|
||||
|
||||
for (i = 0; i < num_queues; ++i) {
|
||||
@ -381,27 +372,47 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
|
||||
unsigned short id;
|
||||
struct sk_buff *skb;
|
||||
bool more_to_do;
|
||||
const struct device *dev = &queue->info->netdev->dev;
|
||||
|
||||
BUG_ON(!netif_carrier_ok(queue->info->netdev));
|
||||
|
||||
do {
|
||||
prod = queue->tx.sring->rsp_prod;
|
||||
if (RING_RESPONSE_PROD_OVERFLOW(&queue->tx, prod)) {
|
||||
dev_alert(dev, "Illegal number of responses %u\n",
|
||||
prod - queue->tx.rsp_cons);
|
||||
goto err;
|
||||
}
|
||||
rmb(); /* Ensure we see responses up to 'rp'. */
|
||||
|
||||
for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
|
||||
struct xen_netif_tx_response *txrsp;
|
||||
struct xen_netif_tx_response txrsp;
|
||||
|
||||
txrsp = RING_GET_RESPONSE(&queue->tx, cons);
|
||||
if (txrsp->status == XEN_NETIF_RSP_NULL)
|
||||
RING_COPY_RESPONSE(&queue->tx, cons, &txrsp);
|
||||
if (txrsp.status == XEN_NETIF_RSP_NULL)
|
||||
continue;
|
||||
|
||||
id = txrsp->id;
|
||||
skb = queue->tx_skbs[id].skb;
|
||||
id = txrsp.id;
|
||||
if (id >= RING_SIZE(&queue->tx)) {
|
||||
dev_alert(dev,
|
||||
"Response has incorrect id (%u)\n",
|
||||
id);
|
||||
goto err;
|
||||
}
|
||||
if (queue->tx_link[id] != TX_PENDING) {
|
||||
dev_alert(dev,
|
||||
"Response for inactive request\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
queue->tx_link[id] = TX_LINK_NONE;
|
||||
skb = queue->tx_skbs[id];
|
||||
queue->tx_skbs[id] = NULL;
|
||||
if (unlikely(gnttab_query_foreign_access(
|
||||
queue->grant_tx_ref[id]) != 0)) {
|
||||
pr_alert("%s: warning -- grant still in use by backend domain\n",
|
||||
__func__);
|
||||
BUG();
|
||||
dev_alert(dev,
|
||||
"Grant still in use by backend domain\n");
|
||||
goto err;
|
||||
}
|
||||
gnttab_end_foreign_access_ref(
|
||||
queue->grant_tx_ref[id], GNTMAP_readonly);
|
||||
@ -409,7 +420,7 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
|
||||
&queue->gref_tx_head, queue->grant_tx_ref[id]);
|
||||
queue->grant_tx_ref[id] = GRANT_INVALID_REF;
|
||||
queue->grant_tx_page[id] = NULL;
|
||||
add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, id);
|
||||
add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, id);
|
||||
dev_kfree_skb_irq(skb);
|
||||
}
|
||||
|
||||
@ -419,13 +430,20 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
|
||||
} while (more_to_do);
|
||||
|
||||
xennet_maybe_wake_tx(queue);
|
||||
|
||||
return;
|
||||
|
||||
err:
|
||||
queue->info->broken = true;
|
||||
dev_alert(dev, "Disabled for further use\n");
|
||||
}
|
||||
|
||||
struct xennet_gnttab_make_txreq {
|
||||
struct netfront_queue *queue;
|
||||
struct sk_buff *skb;
|
||||
struct page *page;
|
||||
struct xen_netif_tx_request *tx; /* Last request */
|
||||
struct xen_netif_tx_request *tx; /* Last request on ring page */
|
||||
struct xen_netif_tx_request tx_local; /* Last request local copy*/
|
||||
unsigned int size;
|
||||
};
|
||||
|
||||
@ -441,7 +459,7 @@ static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
|
||||
struct netfront_queue *queue = info->queue;
|
||||
struct sk_buff *skb = info->skb;
|
||||
|
||||
id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
|
||||
id = get_id_from_list(&queue->tx_skb_freelist, queue->tx_link);
|
||||
tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
|
||||
ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
|
||||
WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
|
||||
@ -449,34 +467,37 @@ static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
|
||||
gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
|
||||
gfn, GNTMAP_readonly);
|
||||
|
||||
queue->tx_skbs[id].skb = skb;
|
||||
queue->tx_skbs[id] = skb;
|
||||
queue->grant_tx_page[id] = page;
|
||||
queue->grant_tx_ref[id] = ref;
|
||||
|
||||
tx->id = id;
|
||||
tx->gref = ref;
|
||||
tx->offset = offset;
|
||||
tx->size = len;
|
||||
tx->flags = 0;
|
||||
info->tx_local.id = id;
|
||||
info->tx_local.gref = ref;
|
||||
info->tx_local.offset = offset;
|
||||
info->tx_local.size = len;
|
||||
info->tx_local.flags = 0;
|
||||
|
||||
*tx = info->tx_local;
|
||||
|
||||
/*
|
||||
* Put the request in the pending queue, it will be set to be pending
|
||||
* when the producer index is about to be raised.
|
||||
*/
|
||||
add_id_to_list(&queue->tx_pend_queue, queue->tx_link, id);
|
||||
|
||||
info->tx = tx;
|
||||
info->size += tx->size;
|
||||
info->size += info->tx_local.size;
|
||||
}
|
||||
|
||||
static struct xen_netif_tx_request *xennet_make_first_txreq(
|
||||
struct netfront_queue *queue, struct sk_buff *skb,
|
||||
struct page *page, unsigned int offset, unsigned int len)
|
||||
struct xennet_gnttab_make_txreq *info,
|
||||
unsigned int offset, unsigned int len)
|
||||
{
|
||||
struct xennet_gnttab_make_txreq info = {
|
||||
.queue = queue,
|
||||
.skb = skb,
|
||||
.page = page,
|
||||
.size = 0,
|
||||
};
|
||||
info->size = 0;
|
||||
|
||||
gnttab_for_one_grant(page, offset, len, xennet_tx_setup_grant, &info);
|
||||
gnttab_for_one_grant(info->page, offset, len, xennet_tx_setup_grant, info);
|
||||
|
||||
return info.tx;
|
||||
return info->tx;
|
||||
}
|
||||
|
||||
static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset,
|
||||
@ -489,35 +510,27 @@ static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset,
|
||||
xennet_tx_setup_grant(gfn, offset, len, data);
|
||||
}
|
||||
|
||||
static struct xen_netif_tx_request *xennet_make_txreqs(
|
||||
struct netfront_queue *queue, struct xen_netif_tx_request *tx,
|
||||
struct sk_buff *skb, struct page *page,
|
||||
static void xennet_make_txreqs(
|
||||
struct xennet_gnttab_make_txreq *info,
|
||||
struct page *page,
|
||||
unsigned int offset, unsigned int len)
|
||||
{
|
||||
struct xennet_gnttab_make_txreq info = {
|
||||
.queue = queue,
|
||||
.skb = skb,
|
||||
.tx = tx,
|
||||
};
|
||||
|
||||
/* Skip unused frames from start of page */
|
||||
page += offset >> PAGE_SHIFT;
|
||||
offset &= ~PAGE_MASK;
|
||||
|
||||
while (len) {
|
||||
info.page = page;
|
||||
info.size = 0;
|
||||
info->page = page;
|
||||
info->size = 0;
|
||||
|
||||
gnttab_foreach_grant_in_range(page, offset, len,
|
||||
xennet_make_one_txreq,
|
||||
&info);
|
||||
info);
|
||||
|
||||
page++;
|
||||
offset = 0;
|
||||
len -= info.size;
|
||||
len -= info->size;
|
||||
}
|
||||
|
||||
return info.tx;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -564,13 +577,22 @@ static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
return queue_idx;
|
||||
}
|
||||
|
||||
static void xennet_mark_tx_pending(struct netfront_queue *queue)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
while ((i = get_id_from_list(&queue->tx_pend_queue, queue->tx_link)) !=
|
||||
TX_LINK_NONE)
|
||||
queue->tx_link[i] = TX_PENDING;
|
||||
}
|
||||
|
||||
#define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
|
||||
|
||||
static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
struct netfront_info *np = netdev_priv(dev);
|
||||
struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
|
||||
struct xen_netif_tx_request *tx, *first_tx;
|
||||
struct xen_netif_tx_request *first_tx;
|
||||
unsigned int i;
|
||||
int notify;
|
||||
int slots;
|
||||
@ -579,6 +601,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
unsigned int len;
|
||||
unsigned long flags;
|
||||
struct netfront_queue *queue = NULL;
|
||||
struct xennet_gnttab_make_txreq info = { };
|
||||
unsigned int num_queues = dev->real_num_tx_queues;
|
||||
u16 queue_index;
|
||||
struct sk_buff *nskb;
|
||||
@ -586,6 +609,8 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
/* Drop the packet if no queues are set up */
|
||||
if (num_queues < 1)
|
||||
goto drop;
|
||||
if (unlikely(np->broken))
|
||||
goto drop;
|
||||
/* Determine which queue to transmit this SKB on */
|
||||
queue_index = skb_get_queue_mapping(skb);
|
||||
queue = &np->queues[queue_index];
|
||||
@ -636,21 +661,24 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
}
|
||||
|
||||
/* First request for the linear area. */
|
||||
first_tx = tx = xennet_make_first_txreq(queue, skb,
|
||||
page, offset, len);
|
||||
offset += tx->size;
|
||||
info.queue = queue;
|
||||
info.skb = skb;
|
||||
info.page = page;
|
||||
first_tx = xennet_make_first_txreq(&info, offset, len);
|
||||
offset += info.tx_local.size;
|
||||
if (offset == PAGE_SIZE) {
|
||||
page++;
|
||||
offset = 0;
|
||||
}
|
||||
len -= tx->size;
|
||||
len -= info.tx_local.size;
|
||||
|
||||
if (skb->ip_summed == CHECKSUM_PARTIAL)
|
||||
/* local packet? */
|
||||
tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated;
|
||||
first_tx->flags |= XEN_NETTXF_csum_blank |
|
||||
XEN_NETTXF_data_validated;
|
||||
else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
|
||||
/* remote but checksummed. */
|
||||
tx->flags |= XEN_NETTXF_data_validated;
|
||||
first_tx->flags |= XEN_NETTXF_data_validated;
|
||||
|
||||
/* Optional extra info after the first request. */
|
||||
if (skb_shinfo(skb)->gso_size) {
|
||||
@ -659,7 +687,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
gso = (struct xen_netif_extra_info *)
|
||||
RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
|
||||
|
||||
tx->flags |= XEN_NETTXF_extra_info;
|
||||
first_tx->flags |= XEN_NETTXF_extra_info;
|
||||
|
||||
gso->u.gso.size = skb_shinfo(skb)->gso_size;
|
||||
gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ?
|
||||
@ -673,19 +701,21 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
}
|
||||
|
||||
/* Requests for the rest of the linear area. */
|
||||
tx = xennet_make_txreqs(queue, tx, skb, page, offset, len);
|
||||
xennet_make_txreqs(&info, page, offset, len);
|
||||
|
||||
/* Requests for all the frags. */
|
||||
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
|
||||
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
||||
tx = xennet_make_txreqs(queue, tx, skb,
|
||||
skb_frag_page(frag), frag->page_offset,
|
||||
xennet_make_txreqs(&info, skb_frag_page(frag),
|
||||
frag->page_offset,
|
||||
skb_frag_size(frag));
|
||||
}
|
||||
|
||||
/* First request has the packet length. */
|
||||
first_tx->size = skb->len;
|
||||
|
||||
xennet_mark_tx_pending(queue);
|
||||
|
||||
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
|
||||
if (notify)
|
||||
notify_remote_via_irq(queue->tx_irq);
|
||||
@ -743,7 +773,7 @@ static int xennet_get_extras(struct netfront_queue *queue,
|
||||
RING_IDX rp)
|
||||
|
||||
{
|
||||
struct xen_netif_extra_info *extra;
|
||||
struct xen_netif_extra_info extra;
|
||||
struct device *dev = &queue->info->netdev->dev;
|
||||
RING_IDX cons = queue->rx.rsp_cons;
|
||||
int err = 0;
|
||||
@ -759,24 +789,22 @@ static int xennet_get_extras(struct netfront_queue *queue,
|
||||
break;
|
||||
}
|
||||
|
||||
extra = (struct xen_netif_extra_info *)
|
||||
RING_GET_RESPONSE(&queue->rx, ++cons);
|
||||
RING_COPY_RESPONSE(&queue->rx, ++cons, &extra);
|
||||
|
||||
if (unlikely(!extra->type ||
|
||||
extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
|
||||
if (unlikely(!extra.type ||
|
||||
extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
|
||||
if (net_ratelimit())
|
||||
dev_warn(dev, "Invalid extra type: %d\n",
|
||||
extra->type);
|
||||
extra.type);
|
||||
err = -EINVAL;
|
||||
} else {
|
||||
memcpy(&extras[extra->type - 1], extra,
|
||||
sizeof(*extra));
|
||||
extras[extra.type - 1] = extra;
|
||||
}
|
||||
|
||||
skb = xennet_get_rx_skb(queue, cons);
|
||||
ref = xennet_get_rx_ref(queue, cons);
|
||||
xennet_move_rx_slot(queue, skb, ref);
|
||||
} while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
|
||||
} while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
|
||||
|
||||
queue->rx.rsp_cons = cons;
|
||||
return err;
|
||||
@ -786,7 +814,7 @@ static int xennet_get_responses(struct netfront_queue *queue,
|
||||
struct netfront_rx_info *rinfo, RING_IDX rp,
|
||||
struct sk_buff_head *list)
|
||||
{
|
||||
struct xen_netif_rx_response *rx = &rinfo->rx;
|
||||
struct xen_netif_rx_response *rx = &rinfo->rx, rx_local;
|
||||
struct xen_netif_extra_info *extras = rinfo->extras;
|
||||
struct device *dev = &queue->info->netdev->dev;
|
||||
RING_IDX cons = queue->rx.rsp_cons;
|
||||
@ -844,7 +872,8 @@ next:
|
||||
break;
|
||||
}
|
||||
|
||||
rx = RING_GET_RESPONSE(&queue->rx, cons + slots);
|
||||
RING_COPY_RESPONSE(&queue->rx, cons + slots, &rx_local);
|
||||
rx = &rx_local;
|
||||
skb = xennet_get_rx_skb(queue, cons + slots);
|
||||
ref = xennet_get_rx_ref(queue, cons + slots);
|
||||
slots++;
|
||||
@ -899,10 +928,11 @@ static int xennet_fill_frags(struct netfront_queue *queue,
|
||||
struct sk_buff *nskb;
|
||||
|
||||
while ((nskb = __skb_dequeue(list))) {
|
||||
struct xen_netif_rx_response *rx =
|
||||
RING_GET_RESPONSE(&queue->rx, ++cons);
|
||||
struct xen_netif_rx_response rx;
|
||||
skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
|
||||
|
||||
RING_COPY_RESPONSE(&queue->rx, ++cons, &rx);
|
||||
|
||||
if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
|
||||
unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
|
||||
|
||||
@ -917,7 +947,7 @@ static int xennet_fill_frags(struct netfront_queue *queue,
|
||||
|
||||
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
|
||||
skb_frag_page(nfrag),
|
||||
rx->offset, rx->status, PAGE_SIZE);
|
||||
rx.offset, rx.status, PAGE_SIZE);
|
||||
|
||||
skb_shinfo(nskb)->nr_frags = 0;
|
||||
kfree_skb(nskb);
|
||||
@ -1010,12 +1040,19 @@ static int xennet_poll(struct napi_struct *napi, int budget)
|
||||
skb_queue_head_init(&tmpq);
|
||||
|
||||
rp = queue->rx.sring->rsp_prod;
|
||||
if (RING_RESPONSE_PROD_OVERFLOW(&queue->rx, rp)) {
|
||||
dev_alert(&dev->dev, "Illegal number of responses %u\n",
|
||||
rp - queue->rx.rsp_cons);
|
||||
queue->info->broken = true;
|
||||
spin_unlock(&queue->rx_lock);
|
||||
return 0;
|
||||
}
|
||||
rmb(); /* Ensure we see queued responses up to 'rp'. */
|
||||
|
||||
i = queue->rx.rsp_cons;
|
||||
work_done = 0;
|
||||
while ((i != rp) && (work_done < budget)) {
|
||||
memcpy(rx, RING_GET_RESPONSE(&queue->rx, i), sizeof(*rx));
|
||||
RING_COPY_RESPONSE(&queue->rx, i, rx);
|
||||
memset(extras, 0, sizeof(rinfo.extras));
|
||||
|
||||
err = xennet_get_responses(queue, &rinfo, rp, &tmpq);
|
||||
@ -1137,17 +1174,18 @@ static void xennet_release_tx_bufs(struct netfront_queue *queue)
|
||||
|
||||
for (i = 0; i < NET_TX_RING_SIZE; i++) {
|
||||
/* Skip over entries which are actually freelist references */
|
||||
if (skb_entry_is_link(&queue->tx_skbs[i]))
|
||||
if (!queue->tx_skbs[i])
|
||||
continue;
|
||||
|
||||
skb = queue->tx_skbs[i].skb;
|
||||
skb = queue->tx_skbs[i];
|
||||
queue->tx_skbs[i] = NULL;
|
||||
get_page(queue->grant_tx_page[i]);
|
||||
gnttab_end_foreign_access(queue->grant_tx_ref[i],
|
||||
GNTMAP_readonly,
|
||||
(unsigned long)page_address(queue->grant_tx_page[i]));
|
||||
queue->grant_tx_page[i] = NULL;
|
||||
queue->grant_tx_ref[i] = GRANT_INVALID_REF;
|
||||
add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, i);
|
||||
add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, i);
|
||||
dev_kfree_skb_irq(skb);
|
||||
}
|
||||
}
|
||||
@ -1227,6 +1265,9 @@ static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
|
||||
struct netfront_queue *queue = dev_id;
|
||||
unsigned long flags;
|
||||
|
||||
if (queue->info->broken)
|
||||
return IRQ_HANDLED;
|
||||
|
||||
spin_lock_irqsave(&queue->tx_lock, flags);
|
||||
xennet_tx_buf_gc(queue);
|
||||
spin_unlock_irqrestore(&queue->tx_lock, flags);
|
||||
@ -1239,6 +1280,9 @@ static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
|
||||
struct netfront_queue *queue = dev_id;
|
||||
struct net_device *dev = queue->info->netdev;
|
||||
|
||||
if (queue->info->broken)
|
||||
return IRQ_HANDLED;
|
||||
|
||||
if (likely(netif_carrier_ok(dev) &&
|
||||
RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)))
|
||||
napi_schedule(&queue->napi);
|
||||
@ -1260,6 +1304,10 @@ static void xennet_poll_controller(struct net_device *dev)
|
||||
struct netfront_info *info = netdev_priv(dev);
|
||||
unsigned int num_queues = dev->real_num_tx_queues;
|
||||
unsigned int i;
|
||||
|
||||
if (info->broken)
|
||||
return;
|
||||
|
||||
for (i = 0; i < num_queues; ++i)
|
||||
xennet_interrupt(0, &info->queues[i]);
|
||||
}
|
||||
@ -1630,13 +1678,15 @@ static int xennet_init_queue(struct netfront_queue *queue)
|
||||
snprintf(queue->name, sizeof(queue->name), "vif%s-q%u",
|
||||
devid, queue->id);
|
||||
|
||||
/* Initialise tx_skbs as a free chain containing every entry. */
|
||||
/* Initialise tx_skb_freelist as a free chain containing every entry. */
|
||||
queue->tx_skb_freelist = 0;
|
||||
queue->tx_pend_queue = TX_LINK_NONE;
|
||||
for (i = 0; i < NET_TX_RING_SIZE; i++) {
|
||||
skb_entry_set_link(&queue->tx_skbs[i], i+1);
|
||||
queue->tx_link[i] = i + 1;
|
||||
queue->grant_tx_ref[i] = GRANT_INVALID_REF;
|
||||
queue->grant_tx_page[i] = NULL;
|
||||
}
|
||||
queue->tx_link[NET_TX_RING_SIZE - 1] = TX_LINK_NONE;
|
||||
|
||||
/* Clear out rx_skbs */
|
||||
for (i = 0; i < NET_RX_RING_SIZE; i++) {
|
||||
@ -1841,6 +1891,9 @@ static int talk_to_netback(struct xenbus_device *dev,
|
||||
if (info->queues)
|
||||
xennet_destroy_queues(info);
|
||||
|
||||
/* For the case of a reconnect reset the "broken" indicator. */
|
||||
info->broken = false;
|
||||
|
||||
err = xennet_create_queues(info, &num_queues);
|
||||
if (err < 0) {
|
||||
xenbus_dev_fatal(dev, err, "creating queues");
|
||||
|
@ -12,6 +12,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include <linux/gpio/consumer.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/irqdomain.h>
|
||||
@ -20,6 +21,7 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_gpio.h>
|
||||
#include <linux/of_pci.h>
|
||||
|
||||
/* PCIe core registers */
|
||||
@ -27,16 +29,7 @@
|
||||
#define PCIE_CORE_CMD_IO_ACCESS_EN BIT(0)
|
||||
#define PCIE_CORE_CMD_MEM_ACCESS_EN BIT(1)
|
||||
#define PCIE_CORE_CMD_MEM_IO_REQ_EN BIT(2)
|
||||
#define PCIE_CORE_DEV_CTRL_STATS_REG 0xc8
|
||||
#define PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE (0 << 4)
|
||||
#define PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT 5
|
||||
#define PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE (0 << 11)
|
||||
#define PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT 12
|
||||
#define PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SZ 0x2
|
||||
#define PCIE_CORE_LINK_CTRL_STAT_REG 0xd0
|
||||
#define PCIE_CORE_LINK_L0S_ENTRY BIT(0)
|
||||
#define PCIE_CORE_LINK_TRAINING BIT(5)
|
||||
#define PCIE_CORE_LINK_WIDTH_SHIFT 20
|
||||
#define PCIE_CORE_PCIEXP_CAP 0xc0
|
||||
#define PCIE_CORE_ERR_CAPCTL_REG 0x118
|
||||
#define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX BIT(5)
|
||||
#define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN BIT(6)
|
||||
@ -117,29 +110,92 @@
|
||||
/* PCIe window configuration */
|
||||
#define OB_WIN_BASE_ADDR 0x4c00
|
||||
#define OB_WIN_BLOCK_SIZE 0x20
|
||||
#define OB_WIN_COUNT 8
|
||||
#define OB_WIN_REG_ADDR(win, offset) (OB_WIN_BASE_ADDR + \
|
||||
OB_WIN_BLOCK_SIZE * (win) + \
|
||||
(offset))
|
||||
#define OB_WIN_MATCH_LS(win) OB_WIN_REG_ADDR(win, 0x00)
|
||||
#define OB_WIN_ENABLE BIT(0)
|
||||
#define OB_WIN_MATCH_MS(win) OB_WIN_REG_ADDR(win, 0x04)
|
||||
#define OB_WIN_REMAP_LS(win) OB_WIN_REG_ADDR(win, 0x08)
|
||||
#define OB_WIN_REMAP_MS(win) OB_WIN_REG_ADDR(win, 0x0c)
|
||||
#define OB_WIN_MASK_LS(win) OB_WIN_REG_ADDR(win, 0x10)
|
||||
#define OB_WIN_MASK_MS(win) OB_WIN_REG_ADDR(win, 0x14)
|
||||
#define OB_WIN_ACTIONS(win) OB_WIN_REG_ADDR(win, 0x18)
|
||||
|
||||
/* PCIe window types */
|
||||
#define OB_PCIE_MEM 0x0
|
||||
#define OB_PCIE_IO 0x4
|
||||
#define OB_WIN_DEFAULT_ACTIONS (OB_WIN_ACTIONS(OB_WIN_COUNT-1) + 0x4)
|
||||
#define OB_WIN_FUNC_NUM_MASK GENMASK(31, 24)
|
||||
#define OB_WIN_FUNC_NUM_SHIFT 24
|
||||
#define OB_WIN_FUNC_NUM_ENABLE BIT(23)
|
||||
#define OB_WIN_BUS_NUM_BITS_MASK GENMASK(22, 20)
|
||||
#define OB_WIN_BUS_NUM_BITS_SHIFT 20
|
||||
#define OB_WIN_MSG_CODE_ENABLE BIT(22)
|
||||
#define OB_WIN_MSG_CODE_MASK GENMASK(21, 14)
|
||||
#define OB_WIN_MSG_CODE_SHIFT 14
|
||||
#define OB_WIN_MSG_PAYLOAD_LEN BIT(12)
|
||||
#define OB_WIN_ATTR_ENABLE BIT(11)
|
||||
#define OB_WIN_ATTR_TC_MASK GENMASK(10, 8)
|
||||
#define OB_WIN_ATTR_TC_SHIFT 8
|
||||
#define OB_WIN_ATTR_RELAXED BIT(7)
|
||||
#define OB_WIN_ATTR_NOSNOOP BIT(6)
|
||||
#define OB_WIN_ATTR_POISON BIT(5)
|
||||
#define OB_WIN_ATTR_IDO BIT(4)
|
||||
#define OB_WIN_TYPE_MASK GENMASK(3, 0)
|
||||
#define OB_WIN_TYPE_SHIFT 0
|
||||
#define OB_WIN_TYPE_MEM 0x0
|
||||
#define OB_WIN_TYPE_IO 0x4
|
||||
#define OB_WIN_TYPE_CONFIG_TYPE0 0x8
|
||||
#define OB_WIN_TYPE_CONFIG_TYPE1 0x9
|
||||
#define OB_WIN_TYPE_MSG 0xc
|
||||
|
||||
/* LMI registers base address and register offsets */
|
||||
#define LMI_BASE_ADDR 0x6000
|
||||
#define CFG_REG (LMI_BASE_ADDR + 0x0)
|
||||
#define LTSSM_SHIFT 24
|
||||
#define LTSSM_MASK 0x3f
|
||||
#define LTSSM_L0 0x10
|
||||
#define RC_BAR_CONFIG 0x300
|
||||
|
||||
/* LTSSM values in CFG_REG */
|
||||
enum {
|
||||
LTSSM_DETECT_QUIET = 0x0,
|
||||
LTSSM_DETECT_ACTIVE = 0x1,
|
||||
LTSSM_POLLING_ACTIVE = 0x2,
|
||||
LTSSM_POLLING_COMPLIANCE = 0x3,
|
||||
LTSSM_POLLING_CONFIGURATION = 0x4,
|
||||
LTSSM_CONFIG_LINKWIDTH_START = 0x5,
|
||||
LTSSM_CONFIG_LINKWIDTH_ACCEPT = 0x6,
|
||||
LTSSM_CONFIG_LANENUM_ACCEPT = 0x7,
|
||||
LTSSM_CONFIG_LANENUM_WAIT = 0x8,
|
||||
LTSSM_CONFIG_COMPLETE = 0x9,
|
||||
LTSSM_CONFIG_IDLE = 0xa,
|
||||
LTSSM_RECOVERY_RCVR_LOCK = 0xb,
|
||||
LTSSM_RECOVERY_SPEED = 0xc,
|
||||
LTSSM_RECOVERY_RCVR_CFG = 0xd,
|
||||
LTSSM_RECOVERY_IDLE = 0xe,
|
||||
LTSSM_L0 = 0x10,
|
||||
LTSSM_RX_L0S_ENTRY = 0x11,
|
||||
LTSSM_RX_L0S_IDLE = 0x12,
|
||||
LTSSM_RX_L0S_FTS = 0x13,
|
||||
LTSSM_TX_L0S_ENTRY = 0x14,
|
||||
LTSSM_TX_L0S_IDLE = 0x15,
|
||||
LTSSM_TX_L0S_FTS = 0x16,
|
||||
LTSSM_L1_ENTRY = 0x17,
|
||||
LTSSM_L1_IDLE = 0x18,
|
||||
LTSSM_L2_IDLE = 0x19,
|
||||
LTSSM_L2_TRANSMIT_WAKE = 0x1a,
|
||||
LTSSM_DISABLED = 0x20,
|
||||
LTSSM_LOOPBACK_ENTRY_MASTER = 0x21,
|
||||
LTSSM_LOOPBACK_ACTIVE_MASTER = 0x22,
|
||||
LTSSM_LOOPBACK_EXIT_MASTER = 0x23,
|
||||
LTSSM_LOOPBACK_ENTRY_SLAVE = 0x24,
|
||||
LTSSM_LOOPBACK_ACTIVE_SLAVE = 0x25,
|
||||
LTSSM_LOOPBACK_EXIT_SLAVE = 0x26,
|
||||
LTSSM_HOT_RESET = 0x27,
|
||||
LTSSM_RECOVERY_EQUALIZATION_PHASE0 = 0x28,
|
||||
LTSSM_RECOVERY_EQUALIZATION_PHASE1 = 0x29,
|
||||
LTSSM_RECOVERY_EQUALIZATION_PHASE2 = 0x2a,
|
||||
LTSSM_RECOVERY_EQUALIZATION_PHASE3 = 0x2b,
|
||||
};
|
||||
|
||||
/* PCIe core controller registers */
|
||||
#define CTRL_CORE_BASE_ADDR 0x18000
|
||||
#define CTRL_CONFIG_REG (CTRL_CORE_BASE_ADDR + 0x0)
|
||||
@ -200,6 +256,13 @@ struct advk_pcie {
|
||||
struct platform_device *pdev;
|
||||
void __iomem *base;
|
||||
struct list_head resources;
|
||||
struct {
|
||||
phys_addr_t match;
|
||||
phys_addr_t remap;
|
||||
phys_addr_t mask;
|
||||
u32 actions;
|
||||
} wins[OB_WIN_COUNT];
|
||||
u8 wins_count;
|
||||
struct irq_domain *irq_domain;
|
||||
struct irq_chip irq_chip;
|
||||
raw_spinlock_t irq_lock;
|
||||
@ -212,6 +275,8 @@ struct advk_pcie {
|
||||
struct mutex msi_used_lock;
|
||||
u16 msi_msg;
|
||||
int root_bus_nr;
|
||||
int link_gen;
|
||||
struct gpio_desc *reset_gpio;
|
||||
};
|
||||
|
||||
static inline void advk_writel(struct advk_pcie *pcie, u32 val, u64 reg)
|
||||
@ -224,52 +289,155 @@ static inline u32 advk_readl(struct advk_pcie *pcie, u64 reg)
|
||||
return readl(pcie->base + reg);
|
||||
}
|
||||
|
||||
static int advk_pcie_link_up(struct advk_pcie *pcie)
|
||||
static u8 advk_pcie_ltssm_state(struct advk_pcie *pcie)
|
||||
{
|
||||
u32 val, ltssm_state;
|
||||
u32 val;
|
||||
u8 ltssm_state;
|
||||
|
||||
val = advk_readl(pcie, CFG_REG);
|
||||
ltssm_state = (val >> LTSSM_SHIFT) & LTSSM_MASK;
|
||||
return ltssm_state >= LTSSM_L0;
|
||||
return ltssm_state;
|
||||
}
|
||||
|
||||
static inline bool advk_pcie_link_up(struct advk_pcie *pcie)
|
||||
{
|
||||
/* check if LTSSM is in normal operation - some L* state */
|
||||
u8 ltssm_state = advk_pcie_ltssm_state(pcie);
|
||||
return ltssm_state >= LTSSM_L0 && ltssm_state < LTSSM_DISABLED;
|
||||
}
|
||||
|
||||
static inline bool advk_pcie_link_training(struct advk_pcie *pcie)
|
||||
{
|
||||
/*
|
||||
* According to PCIe Base specification 3.0, Table 4-14: Link
|
||||
* Status Mapped to the LTSSM is Link Training mapped to LTSSM
|
||||
* Configuration and Recovery states.
|
||||
*/
|
||||
u8 ltssm_state = advk_pcie_ltssm_state(pcie);
|
||||
return ((ltssm_state >= LTSSM_CONFIG_LINKWIDTH_START &&
|
||||
ltssm_state < LTSSM_L0) ||
|
||||
(ltssm_state >= LTSSM_RECOVERY_EQUALIZATION_PHASE0 &&
|
||||
ltssm_state <= LTSSM_RECOVERY_EQUALIZATION_PHASE3));
|
||||
}
|
||||
|
||||
static int advk_pcie_wait_for_link(struct advk_pcie *pcie)
|
||||
{
|
||||
struct device *dev = &pcie->pdev->dev;
|
||||
int retries;
|
||||
|
||||
/* check if the link is up or not */
|
||||
for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) {
|
||||
if (advk_pcie_link_up(pcie)) {
|
||||
dev_info(dev, "link up\n");
|
||||
if (advk_pcie_link_up(pcie))
|
||||
return 0;
|
||||
}
|
||||
|
||||
usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX);
|
||||
}
|
||||
|
||||
dev_err(dev, "link never came up\n");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
static void advk_pcie_issue_perst(struct advk_pcie *pcie)
|
||||
{
|
||||
if (!pcie->reset_gpio)
|
||||
return;
|
||||
|
||||
/* 10ms delay is needed for some cards */
|
||||
dev_info(&pcie->pdev->dev, "issuing PERST via reset GPIO for 10ms\n");
|
||||
gpiod_set_value_cansleep(pcie->reset_gpio, 1);
|
||||
usleep_range(10000, 11000);
|
||||
gpiod_set_value_cansleep(pcie->reset_gpio, 0);
|
||||
}
|
||||
|
||||
static void advk_pcie_train_link(struct advk_pcie *pcie)
|
||||
{
|
||||
struct device *dev = &pcie->pdev->dev;
|
||||
u32 reg;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Setup PCIe rev / gen compliance based on device tree property
|
||||
* 'max-link-speed' which also forces maximal link speed.
|
||||
*/
|
||||
reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
|
||||
reg &= ~PCIE_GEN_SEL_MSK;
|
||||
if (pcie->link_gen == 3)
|
||||
reg |= SPEED_GEN_3;
|
||||
else if (pcie->link_gen == 2)
|
||||
reg |= SPEED_GEN_2;
|
||||
else
|
||||
reg |= SPEED_GEN_1;
|
||||
advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
|
||||
|
||||
/*
|
||||
* Set maximal link speed value also into PCIe Link Control 2 register.
|
||||
* Armada 3700 Functional Specification says that default value is based
|
||||
* on SPEED_GEN but tests showed that default value is always 8.0 GT/s.
|
||||
*/
|
||||
reg = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL2);
|
||||
reg &= ~PCI_EXP_LNKCTL2_TLS;
|
||||
if (pcie->link_gen == 3)
|
||||
reg |= PCI_EXP_LNKCTL2_TLS_8_0GT;
|
||||
else if (pcie->link_gen == 2)
|
||||
reg |= PCI_EXP_LNKCTL2_TLS_5_0GT;
|
||||
else
|
||||
reg |= PCI_EXP_LNKCTL2_TLS_2_5GT;
|
||||
advk_writel(pcie, reg, PCIE_CORE_PCIEXP_CAP + PCI_EXP_LNKCTL2);
|
||||
|
||||
/* Enable link training after selecting PCIe generation */
|
||||
reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
|
||||
reg |= LINK_TRAINING_EN;
|
||||
advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
|
||||
|
||||
/*
|
||||
* Reset PCIe card via PERST# signal. Some cards are not detected
|
||||
* during link training when they are in some non-initial state.
|
||||
*/
|
||||
advk_pcie_issue_perst(pcie);
|
||||
|
||||
/*
|
||||
* PERST# signal could have been asserted by pinctrl subsystem before
|
||||
* probe() callback has been called or issued explicitly by reset gpio
|
||||
* function advk_pcie_issue_perst(), making the endpoint going into
|
||||
* fundamental reset. As required by PCI Express spec (PCI Express
|
||||
* Base Specification, REV. 4.0 PCI Express, February 19 2014, 6.6.1
|
||||
* Conventional Reset) a delay for at least 100ms after such a reset
|
||||
* before sending a Configuration Request to the device is needed.
|
||||
* So wait until PCIe link is up. Function advk_pcie_wait_for_link()
|
||||
* waits for link at least 900ms.
|
||||
*/
|
||||
ret = advk_pcie_wait_for_link(pcie);
|
||||
if (ret < 0)
|
||||
dev_err(dev, "link never came up\n");
|
||||
else
|
||||
dev_info(dev, "link up\n");
|
||||
}
|
||||
|
||||
/*
|
||||
* Set PCIe address window register which could be used for memory
|
||||
* mapping.
|
||||
*/
|
||||
static void advk_pcie_set_ob_win(struct advk_pcie *pcie,
|
||||
u32 win_num, u32 match_ms,
|
||||
u32 match_ls, u32 mask_ms,
|
||||
u32 mask_ls, u32 remap_ms,
|
||||
u32 remap_ls, u32 action)
|
||||
static void advk_pcie_set_ob_win(struct advk_pcie *pcie, u8 win_num,
|
||||
phys_addr_t match, phys_addr_t remap,
|
||||
phys_addr_t mask, u32 actions)
|
||||
{
|
||||
advk_writel(pcie, match_ls, OB_WIN_MATCH_LS(win_num));
|
||||
advk_writel(pcie, match_ms, OB_WIN_MATCH_MS(win_num));
|
||||
advk_writel(pcie, mask_ms, OB_WIN_MASK_MS(win_num));
|
||||
advk_writel(pcie, mask_ls, OB_WIN_MASK_LS(win_num));
|
||||
advk_writel(pcie, remap_ms, OB_WIN_REMAP_MS(win_num));
|
||||
advk_writel(pcie, remap_ls, OB_WIN_REMAP_LS(win_num));
|
||||
advk_writel(pcie, action, OB_WIN_ACTIONS(win_num));
|
||||
advk_writel(pcie, match_ls | BIT(0), OB_WIN_MATCH_LS(win_num));
|
||||
advk_writel(pcie, OB_WIN_ENABLE |
|
||||
lower_32_bits(match), OB_WIN_MATCH_LS(win_num));
|
||||
advk_writel(pcie, upper_32_bits(match), OB_WIN_MATCH_MS(win_num));
|
||||
advk_writel(pcie, lower_32_bits(remap), OB_WIN_REMAP_LS(win_num));
|
||||
advk_writel(pcie, upper_32_bits(remap), OB_WIN_REMAP_MS(win_num));
|
||||
advk_writel(pcie, lower_32_bits(mask), OB_WIN_MASK_LS(win_num));
|
||||
advk_writel(pcie, upper_32_bits(mask), OB_WIN_MASK_MS(win_num));
|
||||
advk_writel(pcie, actions, OB_WIN_ACTIONS(win_num));
|
||||
}
|
||||
|
||||
static void advk_pcie_disable_ob_win(struct advk_pcie *pcie, u8 win_num)
|
||||
{
|
||||
advk_writel(pcie, 0, OB_WIN_MATCH_LS(win_num));
|
||||
advk_writel(pcie, 0, OB_WIN_MATCH_MS(win_num));
|
||||
advk_writel(pcie, 0, OB_WIN_REMAP_LS(win_num));
|
||||
advk_writel(pcie, 0, OB_WIN_REMAP_MS(win_num));
|
||||
advk_writel(pcie, 0, OB_WIN_MASK_LS(win_num));
|
||||
advk_writel(pcie, 0, OB_WIN_MASK_MS(win_num));
|
||||
advk_writel(pcie, 0, OB_WIN_ACTIONS(win_num));
|
||||
}
|
||||
|
||||
static void advk_pcie_setup_hw(struct advk_pcie *pcie)
|
||||
@ -277,10 +445,6 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
|
||||
u32 reg;
|
||||
int i;
|
||||
|
||||
/* Point PCIe unit MBUS decode windows to DRAM space */
|
||||
for (i = 0; i < 8; i++)
|
||||
advk_pcie_set_ob_win(pcie, i, 0, 0, 0, 0, 0, 0, 0);
|
||||
|
||||
/* Set to Direct mode */
|
||||
reg = advk_readl(pcie, CTRL_CONFIG_REG);
|
||||
reg &= ~(CTRL_MODE_MASK << CTRL_MODE_SHIFT);
|
||||
@ -299,36 +463,27 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
|
||||
PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV;
|
||||
advk_writel(pcie, reg, PCIE_CORE_ERR_CAPCTL_REG);
|
||||
|
||||
/* Set PCIe Device Control and Status 1 PF0 register */
|
||||
reg = PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE |
|
||||
(7 << PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT) |
|
||||
PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE |
|
||||
(PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SZ <<
|
||||
PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT);
|
||||
advk_writel(pcie, reg, PCIE_CORE_DEV_CTRL_STATS_REG);
|
||||
/* Set PCIe Device Control register */
|
||||
reg = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + PCI_EXP_DEVCTL);
|
||||
reg &= ~PCI_EXP_DEVCTL_RELAX_EN;
|
||||
reg &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
|
||||
reg &= ~PCI_EXP_DEVCTL_PAYLOAD;
|
||||
reg &= ~PCI_EXP_DEVCTL_READRQ;
|
||||
reg |= PCI_EXP_DEVCTL_PAYLOAD_512B;
|
||||
reg |= PCI_EXP_DEVCTL_READRQ_512B;
|
||||
advk_writel(pcie, reg, PCIE_CORE_PCIEXP_CAP + PCI_EXP_DEVCTL);
|
||||
|
||||
/* Program PCIe Control 2 to disable strict ordering */
|
||||
reg = PCIE_CORE_CTRL2_RESERVED |
|
||||
PCIE_CORE_CTRL2_TD_ENABLE;
|
||||
advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
|
||||
|
||||
/* Set GEN2 */
|
||||
reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
|
||||
reg &= ~PCIE_GEN_SEL_MSK;
|
||||
reg |= SPEED_GEN_2;
|
||||
advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
|
||||
|
||||
/* Set lane X1 */
|
||||
reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
|
||||
reg &= ~LANE_CNT_MSK;
|
||||
reg |= LANE_COUNT_1;
|
||||
advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
|
||||
|
||||
/* Enable link training */
|
||||
reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
|
||||
reg |= LINK_TRAINING_EN;
|
||||
advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
|
||||
|
||||
/* Enable MSI */
|
||||
reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG);
|
||||
reg |= PCIE_CORE_CTRL2_MSI_ENABLE;
|
||||
@ -353,21 +508,52 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
|
||||
reg = PCIE_IRQ_ALL_MASK & (~PCIE_IRQ_ENABLE_INTS_MASK);
|
||||
advk_writel(pcie, reg, HOST_CTRL_INT_MASK_REG);
|
||||
|
||||
/*
|
||||
* Enable AXI address window location generation:
|
||||
* When it is enabled, the default outbound window
|
||||
* configurations (Default User Field: 0xD0074CFC)
|
||||
* are used to transparent address translation for
|
||||
* the outbound transactions. Thus, PCIe address
|
||||
* windows are not required for transparent memory
|
||||
* access when default outbound window configuration
|
||||
* is set for memory access.
|
||||
*/
|
||||
reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG);
|
||||
reg |= PCIE_CORE_CTRL2_OB_WIN_ENABLE;
|
||||
advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
|
||||
|
||||
/* Bypass the address window mapping for PIO */
|
||||
/*
|
||||
* Set memory access in Default User Field so it
|
||||
* is not required to configure PCIe address for
|
||||
* transparent memory access.
|
||||
*/
|
||||
advk_writel(pcie, OB_WIN_TYPE_MEM, OB_WIN_DEFAULT_ACTIONS);
|
||||
|
||||
/*
|
||||
* Bypass the address window mapping for PIO:
|
||||
* Since PIO access already contains all required
|
||||
* info over AXI interface by PIO registers, the
|
||||
* address window is not required.
|
||||
*/
|
||||
reg = advk_readl(pcie, PIO_CTRL);
|
||||
reg |= PIO_CTRL_ADDR_WIN_DISABLE;
|
||||
advk_writel(pcie, reg, PIO_CTRL);
|
||||
|
||||
/* Start link training */
|
||||
reg = advk_readl(pcie, PCIE_CORE_LINK_CTRL_STAT_REG);
|
||||
reg |= PCIE_CORE_LINK_TRAINING;
|
||||
advk_writel(pcie, reg, PCIE_CORE_LINK_CTRL_STAT_REG);
|
||||
/*
|
||||
* Configure PCIe address windows for non-memory or
|
||||
* non-transparent access as by default PCIe uses
|
||||
* transparent memory access.
|
||||
*/
|
||||
for (i = 0; i < pcie->wins_count; i++)
|
||||
advk_pcie_set_ob_win(pcie, i,
|
||||
pcie->wins[i].match, pcie->wins[i].remap,
|
||||
pcie->wins[i].mask, pcie->wins[i].actions);
|
||||
|
||||
advk_pcie_wait_for_link(pcie);
|
||||
/* Disable remaining PCIe outbound windows */
|
||||
for (i = pcie->wins_count; i < OB_WIN_COUNT; i++)
|
||||
advk_pcie_disable_ob_win(pcie, i);
|
||||
|
||||
advk_pcie_train_link(pcie);
|
||||
|
||||
reg = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG);
|
||||
reg |= PCIE_CORE_CMD_MEM_ACCESS_EN |
|
||||
@ -502,6 +688,22 @@ static bool advk_pcie_pio_is_running(struct advk_pcie *pcie)
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus,
|
||||
int devfn)
|
||||
{
|
||||
if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* If the link goes down after we check for link-up, nothing bad
|
||||
* happens but the config access times out.
|
||||
*/
|
||||
if (bus->number != pcie->root_bus_nr && !advk_pcie_link_up(pcie))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
|
||||
int where, int size, u32 *val)
|
||||
{
|
||||
@ -509,7 +711,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
|
||||
u32 reg;
|
||||
int ret;
|
||||
|
||||
if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0) {
|
||||
if (!advk_pcie_valid_device(pcie, bus, devfn)) {
|
||||
*val = 0xffffffff;
|
||||
return PCIBIOS_DEVICE_NOT_FOUND;
|
||||
}
|
||||
@ -541,8 +743,10 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
|
||||
advk_writel(pcie, 1, PIO_START);
|
||||
|
||||
ret = advk_pcie_wait_pio(pcie);
|
||||
if (ret < 0)
|
||||
if (ret < 0) {
|
||||
*val = 0xffffffff;
|
||||
return PCIBIOS_SET_FAILED;
|
||||
}
|
||||
|
||||
/* Check PIO status and get the read result */
|
||||
ret = advk_pcie_check_pio_status(pcie, val);
|
||||
@ -568,7 +772,7 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
|
||||
int offset;
|
||||
int ret;
|
||||
|
||||
if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0)
|
||||
if (!advk_pcie_valid_device(pcie, bus, devfn))
|
||||
return PCIBIOS_DEVICE_NOT_FOUND;
|
||||
|
||||
if (where % size)
|
||||
@ -789,6 +993,7 @@ static int advk_pcie_init_irq_domain(struct advk_pcie *pcie)
|
||||
struct device_node *node = dev->of_node;
|
||||
struct device_node *pcie_intc_node;
|
||||
struct irq_chip *irq_chip;
|
||||
int ret = 0;
|
||||
|
||||
raw_spin_lock_init(&pcie->irq_lock);
|
||||
|
||||
@ -803,8 +1008,8 @@ static int advk_pcie_init_irq_domain(struct advk_pcie *pcie)
|
||||
irq_chip->name = devm_kasprintf(dev, GFP_KERNEL, "%s-irq",
|
||||
dev_name(dev));
|
||||
if (!irq_chip->name) {
|
||||
of_node_put(pcie_intc_node);
|
||||
return -ENOMEM;
|
||||
ret = -ENOMEM;
|
||||
goto out_put_node;
|
||||
}
|
||||
|
||||
irq_chip->irq_mask = advk_pcie_irq_mask;
|
||||
@ -816,11 +1021,13 @@ static int advk_pcie_init_irq_domain(struct advk_pcie *pcie)
|
||||
&advk_pcie_irq_domain_ops, pcie);
|
||||
if (!pcie->irq_domain) {
|
||||
dev_err(dev, "Failed to get a INTx IRQ domain\n");
|
||||
of_node_put(pcie_intc_node);
|
||||
return -ENOMEM;
|
||||
ret = -ENOMEM;
|
||||
goto out_put_node;
|
||||
}
|
||||
|
||||
return 0;
|
||||
out_put_node:
|
||||
of_node_put(pcie_intc_node);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void advk_pcie_remove_irq_domain(struct advk_pcie *pcie)
|
||||
@ -926,13 +1133,7 @@ static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie)
|
||||
|
||||
switch (resource_type(res)) {
|
||||
case IORESOURCE_IO:
|
||||
advk_pcie_set_ob_win(pcie, 1,
|
||||
upper_32_bits(res->start),
|
||||
lower_32_bits(res->start),
|
||||
0, 0xF8000000, 0,
|
||||
lower_32_bits(res->start),
|
||||
OB_PCIE_IO);
|
||||
err = pci_remap_iospace(res, iobase);
|
||||
err = devm_pci_remap_iospace(dev, res, iobase);
|
||||
if (err) {
|
||||
dev_warn(dev, "error %d: failed to map resource %pR\n",
|
||||
err, res);
|
||||
@ -940,12 +1141,6 @@ static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie)
|
||||
}
|
||||
break;
|
||||
case IORESOURCE_MEM:
|
||||
advk_pcie_set_ob_win(pcie, 0,
|
||||
upper_32_bits(res->start),
|
||||
lower_32_bits(res->start),
|
||||
0x0, 0xF8000000, 0,
|
||||
lower_32_bits(res->start),
|
||||
(2 << 20) | OB_PCIE_MEM);
|
||||
res_valid |= !(res->flags & IORESOURCE_PREFETCH);
|
||||
break;
|
||||
case IORESOURCE_BUS:
|
||||
@ -974,6 +1169,7 @@ static int advk_pcie_probe(struct platform_device *pdev)
|
||||
struct resource *res;
|
||||
struct pci_bus *bus, *child;
|
||||
struct pci_host_bridge *bridge;
|
||||
struct resource_entry *entry;
|
||||
int ret, irq;
|
||||
|
||||
bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct advk_pcie));
|
||||
@ -1003,6 +1199,103 @@ static int advk_pcie_probe(struct platform_device *pdev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
resource_list_for_each_entry(entry, &pcie->resources) {
|
||||
resource_size_t start = entry->res->start;
|
||||
resource_size_t size = resource_size(entry->res);
|
||||
unsigned long type = resource_type(entry->res);
|
||||
u64 win_size;
|
||||
|
||||
/*
|
||||
* Aardvark hardware allows to configure also PCIe window
|
||||
* for config type 0 and type 1 mapping, but driver uses
|
||||
* only PIO for issuing configuration transfers which does
|
||||
* not use PCIe window configuration.
|
||||
*/
|
||||
if (type != IORESOURCE_MEM && type != IORESOURCE_MEM_64 &&
|
||||
type != IORESOURCE_IO)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Skip transparent memory resources. Default outbound access
|
||||
* configuration is set to transparent memory access so it
|
||||
* does not need window configuration.
|
||||
*/
|
||||
if ((type == IORESOURCE_MEM || type == IORESOURCE_MEM_64) &&
|
||||
entry->offset == 0)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* The n-th PCIe window is configured by tuple (match, remap, mask)
|
||||
* and an access to address A uses this window if A matches the
|
||||
* match with given mask.
|
||||
* So every PCIe window size must be a power of two and every start
|
||||
* address must be aligned to window size. Minimal size is 64 KiB
|
||||
* because lower 16 bits of mask must be zero. Remapped address
|
||||
* may have set only bits from the mask.
|
||||
*/
|
||||
while (pcie->wins_count < OB_WIN_COUNT && size > 0) {
|
||||
/* Calculate the largest aligned window size */
|
||||
win_size = (1ULL << (fls64(size)-1)) |
|
||||
(start ? (1ULL << __ffs64(start)) : 0);
|
||||
win_size = 1ULL << __ffs64(win_size);
|
||||
if (win_size < 0x10000)
|
||||
break;
|
||||
|
||||
dev_dbg(dev,
|
||||
"Configuring PCIe window %d: [0x%llx-0x%llx] as %lu\n",
|
||||
pcie->wins_count, (unsigned long long)start,
|
||||
(unsigned long long)start + win_size, type);
|
||||
|
||||
if (type == IORESOURCE_IO) {
|
||||
pcie->wins[pcie->wins_count].actions = OB_WIN_TYPE_IO;
|
||||
pcie->wins[pcie->wins_count].match = pci_pio_to_address(start);
|
||||
} else {
|
||||
pcie->wins[pcie->wins_count].actions = OB_WIN_TYPE_MEM;
|
||||
pcie->wins[pcie->wins_count].match = start;
|
||||
}
|
||||
pcie->wins[pcie->wins_count].remap = start - entry->offset;
|
||||
pcie->wins[pcie->wins_count].mask = ~(win_size - 1);
|
||||
|
||||
if (pcie->wins[pcie->wins_count].remap & (win_size - 1))
|
||||
break;
|
||||
|
||||
start += win_size;
|
||||
size -= win_size;
|
||||
pcie->wins_count++;
|
||||
}
|
||||
|
||||
if (size > 0) {
|
||||
dev_err(&pcie->pdev->dev,
|
||||
"Invalid PCIe region [0x%llx-0x%llx]\n",
|
||||
(unsigned long long)entry->res->start,
|
||||
(unsigned long long)entry->res->end + 1);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
pcie->reset_gpio = devm_fwnode_get_index_gpiod_from_child(dev, "reset",
|
||||
0,
|
||||
dev_fwnode(dev),
|
||||
GPIOD_OUT_LOW,
|
||||
"pcie1-reset");
|
||||
ret = PTR_ERR_OR_ZERO(pcie->reset_gpio);
|
||||
if (ret) {
|
||||
if (ret == -ENOENT) {
|
||||
pcie->reset_gpio = NULL;
|
||||
} else {
|
||||
if (ret != -EPROBE_DEFER)
|
||||
dev_err(dev, "Failed to get reset-gpio: %i\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
ret = of_pci_get_max_link_speed(dev->of_node);
|
||||
if (ret <= 0 || ret > 3)
|
||||
pcie->link_gen = 3;
|
||||
else
|
||||
pcie->link_gen = ret;
|
||||
|
||||
advk_pcie_setup_hw(pcie);
|
||||
|
||||
ret = advk_pcie_init_irq_domain(pcie);
|
||||
|
@ -153,12 +153,16 @@ static struct armada_37xx_pin_group armada_37xx_nb_groups[] = {
|
||||
PIN_GRP_GPIO("jtag", 20, 5, BIT(0), "jtag"),
|
||||
PIN_GRP_GPIO("sdio0", 8, 3, BIT(1), "sdio"),
|
||||
PIN_GRP_GPIO("emmc_nb", 27, 9, BIT(2), "emmc"),
|
||||
PIN_GRP_GPIO("pwm0", 11, 1, BIT(3), "pwm"),
|
||||
PIN_GRP_GPIO("pwm1", 12, 1, BIT(4), "pwm"),
|
||||
PIN_GRP_GPIO("pwm2", 13, 1, BIT(5), "pwm"),
|
||||
PIN_GRP_GPIO("pwm3", 14, 1, BIT(6), "pwm"),
|
||||
PIN_GRP_GPIO("pmic1", 17, 1, BIT(7), "pmic"),
|
||||
PIN_GRP_GPIO("pmic0", 16, 1, BIT(8), "pmic"),
|
||||
PIN_GRP_GPIO_3("pwm0", 11, 1, BIT(3) | BIT(20), 0, BIT(20), BIT(3),
|
||||
"pwm", "led"),
|
||||
PIN_GRP_GPIO_3("pwm1", 12, 1, BIT(4) | BIT(21), 0, BIT(21), BIT(4),
|
||||
"pwm", "led"),
|
||||
PIN_GRP_GPIO_3("pwm2", 13, 1, BIT(5) | BIT(22), 0, BIT(22), BIT(5),
|
||||
"pwm", "led"),
|
||||
PIN_GRP_GPIO_3("pwm3", 14, 1, BIT(6) | BIT(23), 0, BIT(23), BIT(6),
|
||||
"pwm", "led"),
|
||||
PIN_GRP_GPIO("pmic1", 7, 1, BIT(7), "pmic"),
|
||||
PIN_GRP_GPIO("pmic0", 6, 1, BIT(8), "pmic"),
|
||||
PIN_GRP_GPIO("i2c2", 2, 2, BIT(9), "i2c"),
|
||||
PIN_GRP_GPIO("i2c1", 0, 2, BIT(10), "i2c"),
|
||||
PIN_GRP_GPIO("spi_cs1", 17, 1, BIT(12), "spi"),
|
||||
@ -170,11 +174,6 @@ static struct armada_37xx_pin_group armada_37xx_nb_groups[] = {
|
||||
PIN_GRP_EXTRA("uart2", 9, 2, BIT(1) | BIT(13) | BIT(14) | BIT(19),
|
||||
BIT(1) | BIT(13) | BIT(14), BIT(1) | BIT(19),
|
||||
18, 2, "gpio", "uart"),
|
||||
PIN_GRP_GPIO_2("led0_od", 11, 1, BIT(20), BIT(20), 0, "led"),
|
||||
PIN_GRP_GPIO_2("led1_od", 12, 1, BIT(21), BIT(21), 0, "led"),
|
||||
PIN_GRP_GPIO_2("led2_od", 13, 1, BIT(22), BIT(22), 0, "led"),
|
||||
PIN_GRP_GPIO_2("led3_od", 14, 1, BIT(23), BIT(23), 0, "led"),
|
||||
|
||||
};
|
||||
|
||||
static struct armada_37xx_pin_group armada_37xx_sb_groups[] = {
|
||||
@ -182,8 +181,11 @@ static struct armada_37xx_pin_group armada_37xx_sb_groups[] = {
|
||||
PIN_GRP_GPIO("usb2_drvvbus1", 1, 1, BIT(1), "drvbus"),
|
||||
PIN_GRP_GPIO("sdio_sb", 24, 6, BIT(2), "sdio"),
|
||||
PIN_GRP_GPIO("rgmii", 6, 12, BIT(3), "mii"),
|
||||
PIN_GRP_GPIO("pcie1", 3, 2, BIT(4), "pcie"),
|
||||
PIN_GRP_GPIO("ptp", 20, 3, BIT(5), "ptp"),
|
||||
PIN_GRP_GPIO("smi", 18, 2, BIT(4), "smi"),
|
||||
PIN_GRP_GPIO("pcie1", 3, 1, BIT(5), "pcie"),
|
||||
PIN_GRP_GPIO("pcie1_clkreq", 4, 1, BIT(9), "pcie"),
|
||||
PIN_GRP_GPIO("pcie1_wakeup", 5, 1, BIT(10), "pcie"),
|
||||
PIN_GRP_GPIO("ptp", 20, 3, BIT(11) | BIT(12) | BIT(13), "ptp"),
|
||||
PIN_GRP("ptp_clk", 21, 1, BIT(6), "ptp", "mii"),
|
||||
PIN_GRP("ptp_trig", 22, 1, BIT(7), "ptp", "mii"),
|
||||
PIN_GRP_GPIO_3("mii_col", 23, 1, BIT(8) | BIT(14), 0, BIT(8), BIT(14),
|
||||
|
@ -1180,15 +1180,6 @@ static int tpacpi_rfk_update_swstate(const struct tpacpi_rfk *tp_rfk)
|
||||
return status;
|
||||
}
|
||||
|
||||
/* Query FW and update rfkill sw state for all rfkill switches */
|
||||
static void tpacpi_rfk_update_swstate_all(void)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < TPACPI_RFK_SW_MAX; i++)
|
||||
tpacpi_rfk_update_swstate(tpacpi_rfkill_switches[i]);
|
||||
}
|
||||
|
||||
/*
|
||||
* Sync the HW-blocking state of all rfkill switches,
|
||||
* do notice it causes the rfkill core to schedule uevents
|
||||
@ -3025,9 +3016,6 @@ static void tpacpi_send_radiosw_update(void)
|
||||
if (wlsw == TPACPI_RFK_RADIO_OFF)
|
||||
tpacpi_rfk_update_hwblock_state(true);
|
||||
|
||||
/* Sync sw blocking state */
|
||||
tpacpi_rfk_update_swstate_all();
|
||||
|
||||
/* Sync hw blocking state last if it is hw-unblocked */
|
||||
if (wlsw == TPACPI_RFK_RADIO_ON)
|
||||
tpacpi_rfk_update_hwblock_state(false);
|
||||
|
@ -2955,7 +2955,7 @@ _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
|
||||
|
||||
shost_for_each_device(sdev, ioc->shost) {
|
||||
sas_device_priv_data = sdev->hostdata;
|
||||
if (!sas_device_priv_data)
|
||||
if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
|
||||
continue;
|
||||
if (sas_device_priv_data->sas_target->sas_address
|
||||
!= sas_address)
|
||||
|
@ -1892,12 +1892,12 @@ static void session_recovery_timedout(struct work_struct *work)
|
||||
}
|
||||
spin_unlock_irqrestore(&session->lock, flags);
|
||||
|
||||
if (session->transport->session_recovery_timedout)
|
||||
session->transport->session_recovery_timedout(session);
|
||||
|
||||
ISCSI_DBG_TRANS_SESSION(session, "Unblocking SCSI target\n");
|
||||
scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE);
|
||||
ISCSI_DBG_TRANS_SESSION(session, "Completed unblocking SCSI target\n");
|
||||
|
||||
if (session->transport->session_recovery_timedout)
|
||||
session->transport->session_recovery_timedout(session);
|
||||
}
|
||||
|
||||
static void __iscsi_unblock_session(struct work_struct *work)
|
||||
|
@ -2582,13 +2582,14 @@ static void _rtl92e_pci_disconnect(struct pci_dev *pdev)
|
||||
free_irq(dev->irq, dev);
|
||||
priv->irq = 0;
|
||||
}
|
||||
free_rtllib(dev);
|
||||
|
||||
if (dev->mem_start != 0) {
|
||||
iounmap((void __iomem *)dev->mem_start);
|
||||
release_mem_region(pci_resource_start(pdev, 1),
|
||||
pci_resource_len(pdev, 1));
|
||||
}
|
||||
|
||||
free_rtllib(dev);
|
||||
} else {
|
||||
priv = rtllib_priv(dev);
|
||||
}
|
||||
|
@ -2386,11 +2386,7 @@ static void run_state_machine(struct tcpm_port *port)
|
||||
tcpm_try_src(port) ? SRC_TRY
|
||||
: SNK_ATTACHED,
|
||||
0);
|
||||
else
|
||||
/* Wait for VBUS, but not forever */
|
||||
tcpm_set_state(port, PORT_RESET, PD_T_PS_SOURCE_ON);
|
||||
break;
|
||||
|
||||
case SRC_TRY:
|
||||
port->try_src_count++;
|
||||
tcpm_set_cc(port, tcpm_rp_cc(port));
|
||||
|
@ -457,6 +457,8 @@ static void thermal_zone_device_init(struct thermal_zone_device *tz)
|
||||
{
|
||||
struct thermal_instance *pos;
|
||||
tz->temperature = THERMAL_TEMP_INVALID;
|
||||
tz->prev_low_trip = -INT_MAX;
|
||||
tz->prev_high_trip = INT_MAX;
|
||||
list_for_each_entry(pos, &tz->thermal_instances, tz_node)
|
||||
pos->initialized = false;
|
||||
}
|
||||
|
@ -99,7 +99,11 @@ static int __write_console(struct xencons_info *xencons,
|
||||
cons = intf->out_cons;
|
||||
prod = intf->out_prod;
|
||||
mb(); /* update queue values before going on */
|
||||
BUG_ON((prod - cons) > sizeof(intf->out));
|
||||
|
||||
if ((prod - cons) > sizeof(intf->out)) {
|
||||
pr_err_once("xencons: Illegal ring page indices");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
while ((sent < len) && ((prod - cons) < sizeof(intf->out)))
|
||||
intf->out[MASK_XENCONS_IDX(prod++, intf->out)] = data[sent++];
|
||||
@ -127,7 +131,10 @@ static int domU_write_console(uint32_t vtermno, const char *data, int len)
|
||||
*/
|
||||
while (len) {
|
||||
int sent = __write_console(cons, data, len);
|
||||
|
||||
|
||||
if (sent < 0)
|
||||
return sent;
|
||||
|
||||
data += sent;
|
||||
len -= sent;
|
||||
|
||||
@ -151,7 +158,11 @@ static int domU_read_console(uint32_t vtermno, char *buf, int len)
|
||||
cons = intf->in_cons;
|
||||
prod = intf->in_prod;
|
||||
mb(); /* get pointers before reading ring */
|
||||
BUG_ON((prod - cons) > sizeof(intf->in));
|
||||
|
||||
if ((prod - cons) > sizeof(intf->in)) {
|
||||
pr_err_once("xencons: Illegal ring page indices");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
while (cons != prod && recv < len)
|
||||
buf[recv++] = intf->in[MASK_XENCONS_IDX(cons++, intf->in)];
|
||||
|
@ -2790,6 +2790,7 @@ MODULE_DEVICE_TABLE(of, sbsa_uart_of_match);
|
||||
|
||||
static const struct acpi_device_id sbsa_uart_acpi_match[] = {
|
||||
{ "ARMH0011", 0 },
|
||||
{ "ARMHB000", 0 },
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(acpi, sbsa_uart_acpi_match);
|
||||
|
@ -611,6 +611,9 @@ static void msm_start_rx_dma(struct msm_port *msm_port)
|
||||
u32 val;
|
||||
int ret;
|
||||
|
||||
if (IS_ENABLED(CONFIG_CONSOLE_POLL))
|
||||
return;
|
||||
|
||||
if (!dma->chan)
|
||||
return;
|
||||
|
||||
|
@ -1541,6 +1541,7 @@ static void uart_tty_port_shutdown(struct tty_port *port)
|
||||
{
|
||||
struct uart_state *state = container_of(port, struct uart_state, port);
|
||||
struct uart_port *uport = uart_port_check(state);
|
||||
char *buf;
|
||||
|
||||
/*
|
||||
* At this point, we stop accepting input. To do this, we
|
||||
@ -1562,8 +1563,18 @@ static void uart_tty_port_shutdown(struct tty_port *port)
|
||||
*/
|
||||
tty_port_set_suspended(port, 0);
|
||||
|
||||
uart_change_pm(state, UART_PM_STATE_OFF);
|
||||
/*
|
||||
* Free the transmit buffer.
|
||||
*/
|
||||
spin_lock_irq(&uport->lock);
|
||||
buf = state->xmit.buf;
|
||||
state->xmit.buf = NULL;
|
||||
spin_unlock_irq(&uport->lock);
|
||||
|
||||
if (buf)
|
||||
free_page((unsigned long)buf);
|
||||
|
||||
uart_change_pm(state, UART_PM_STATE_OFF);
|
||||
}
|
||||
|
||||
static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
|
||||
|
@ -4473,8 +4473,6 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
|
||||
if (oldspeed == USB_SPEED_LOW)
|
||||
delay = HUB_LONG_RESET_TIME;
|
||||
|
||||
mutex_lock(hcd->address0_mutex);
|
||||
|
||||
/* Reset the device; full speed may morph to high speed */
|
||||
/* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
|
||||
retval = hub_port_reset(hub, port1, udev, delay, false);
|
||||
@ -4773,7 +4771,6 @@ fail:
|
||||
hub_port_disable(hub, port1, 0);
|
||||
update_devnum(udev, devnum); /* for disconnect processing */
|
||||
}
|
||||
mutex_unlock(hcd->address0_mutex);
|
||||
return retval;
|
||||
}
|
||||
|
||||
@ -4863,6 +4860,7 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
|
||||
struct usb_port *port_dev = hub->ports[port1 - 1];
|
||||
struct usb_device *udev = port_dev->child;
|
||||
static int unreliable_port = -1;
|
||||
bool retry_locked;
|
||||
|
||||
/* Disconnect any existing devices under this port */
|
||||
if (udev) {
|
||||
@ -4918,7 +4916,11 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
|
||||
unit_load = 100;
|
||||
|
||||
status = 0;
|
||||
|
||||
for (i = 0; i < SET_CONFIG_TRIES; i++) {
|
||||
usb_lock_port(port_dev);
|
||||
mutex_lock(hcd->address0_mutex);
|
||||
retry_locked = true;
|
||||
|
||||
/* reallocate for each attempt, since references
|
||||
* to the previous one can escape in various ways
|
||||
@ -4927,6 +4929,8 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
|
||||
if (!udev) {
|
||||
dev_err(&port_dev->dev,
|
||||
"couldn't allocate usb_device\n");
|
||||
mutex_unlock(hcd->address0_mutex);
|
||||
usb_unlock_port(port_dev);
|
||||
goto done;
|
||||
}
|
||||
|
||||
@ -4948,12 +4952,14 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
|
||||
}
|
||||
|
||||
/* reset (non-USB 3.0 devices) and get descriptor */
|
||||
usb_lock_port(port_dev);
|
||||
status = hub_port_init(hub, udev, port1, i);
|
||||
usb_unlock_port(port_dev);
|
||||
if (status < 0)
|
||||
goto loop;
|
||||
|
||||
mutex_unlock(hcd->address0_mutex);
|
||||
usb_unlock_port(port_dev);
|
||||
retry_locked = false;
|
||||
|
||||
if (udev->quirks & USB_QUIRK_DELAY_INIT)
|
||||
msleep(2000);
|
||||
|
||||
@ -5046,6 +5052,10 @@ loop:
|
||||
usb_ep0_reinit(udev);
|
||||
release_devnum(udev);
|
||||
hub_free_dev(udev);
|
||||
if (retry_locked) {
|
||||
mutex_unlock(hcd->address0_mutex);
|
||||
usb_unlock_port(port_dev);
|
||||
}
|
||||
usb_put_dev(udev);
|
||||
if ((status == -ENOTCONN) || (status == -ENOTSUPP))
|
||||
break;
|
||||
@ -5613,6 +5623,8 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
|
||||
bos = udev->bos;
|
||||
udev->bos = NULL;
|
||||
|
||||
mutex_lock(hcd->address0_mutex);
|
||||
|
||||
for (i = 0; i < SET_CONFIG_TRIES; ++i) {
|
||||
|
||||
/* ep0 maxpacket size may change; let the HCD know about it.
|
||||
@ -5622,6 +5634,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
|
||||
if (ret >= 0 || ret == -ENOTCONN || ret == -ENODEV)
|
||||
break;
|
||||
}
|
||||
mutex_unlock(hcd->address0_mutex);
|
||||
|
||||
if (ret < 0)
|
||||
goto re_enumerate;
|
||||
|
@ -350,7 +350,9 @@ static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci,
|
||||
/* Must be called with xhci->lock held, releases and aquires lock back */
|
||||
static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags)
|
||||
{
|
||||
u32 temp_32;
|
||||
struct xhci_segment *new_seg = xhci->cmd_ring->deq_seg;
|
||||
union xhci_trb *new_deq = xhci->cmd_ring->dequeue;
|
||||
u64 crcr;
|
||||
int ret;
|
||||
|
||||
xhci_dbg(xhci, "Abort command ring\n");
|
||||
@ -359,13 +361,18 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags)
|
||||
|
||||
/*
|
||||
* The control bits like command stop, abort are located in lower
|
||||
* dword of the command ring control register. Limit the write
|
||||
* to the lower dword to avoid corrupting the command ring pointer
|
||||
* in case if the command ring is stopped by the time upper dword
|
||||
* is written.
|
||||
* dword of the command ring control register.
|
||||
* Some controllers require all 64 bits to be written to abort the ring.
|
||||
* Make sure the upper dword is valid, pointing to the next command,
|
||||
* avoiding corrupting the command ring pointer in case the command ring
|
||||
* is stopped by the time the upper dword is written.
|
||||
*/
|
||||
temp_32 = readl(&xhci->op_regs->cmd_ring);
|
||||
writel(temp_32 | CMD_RING_ABORT, &xhci->op_regs->cmd_ring);
|
||||
next_trb(xhci, NULL, &new_seg, &new_deq);
|
||||
if (trb_is_link(new_deq))
|
||||
next_trb(xhci, NULL, &new_seg, &new_deq);
|
||||
|
||||
crcr = xhci_trb_virt_to_dma(new_seg, new_deq);
|
||||
xhci_write_64(xhci, crcr | CMD_RING_ABORT, &xhci->op_regs->cmd_ring);
|
||||
|
||||
/* Section 4.6.1.2 of xHCI 1.0 spec says software should also time the
|
||||
* completion of the Command Abort operation. If CRR is not negated in 5
|
||||
|
@ -1270,6 +1270,8 @@ static const struct usb_device_id option_ids[] = {
|
||||
.driver_info = NCTRL(2) },
|
||||
{ USB_DEVICE(TELIT_VENDOR_ID, 0x9010), /* Telit SBL FN980 flashing device */
|
||||
.driver_info = NCTRL(0) | ZLP },
|
||||
{ USB_DEVICE(TELIT_VENDOR_ID, 0x9200), /* Telit LE910S1 flashing device */
|
||||
.driver_info = NCTRL(0) | ZLP },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
|
||||
.driver_info = RSVD(1) },
|
||||
@ -2096,6 +2098,9 @@ static const struct usb_device_id option_ids[] = {
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) }, /* Fibocom FG150 Diag */
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) }, /* Fibocom FG150 AT */
|
||||
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
|
||||
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a2, 0xff) }, /* Fibocom FM101-GL (laptop MBIM) */
|
||||
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a4, 0xff), /* Fibocom FM101-GL (laptop MBIM) */
|
||||
.driver_info = RSVD(4) },
|
||||
{ USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */
|
||||
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */
|
||||
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */
|
||||
|
@ -490,7 +490,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
|
||||
virtio_transport_free_pkt(pkt);
|
||||
|
||||
len += sizeof(pkt->hdr);
|
||||
vhost_add_used(vq, head, len);
|
||||
vhost_add_used(vq, head, 0);
|
||||
total_len += len;
|
||||
added = true;
|
||||
} while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
|
||||
|
@ -365,11 +365,17 @@ static void vgacon_init(struct vc_data *c, int init)
|
||||
struct uni_pagedir *p;
|
||||
|
||||
/*
|
||||
* We cannot be loaded as a module, therefore init is always 1,
|
||||
* but vgacon_init can be called more than once, and init will
|
||||
* not be 1.
|
||||
* We cannot be loaded as a module, therefore init will be 1
|
||||
* if we are the default console, however if we are a fallback
|
||||
* console, for example if fbcon has failed registration, then
|
||||
* init will be 0, so we need to make sure our boot parameters
|
||||
* have been copied to the console structure for vgacon_resize
|
||||
* ultimately called by vc_resize. Any subsequent calls to
|
||||
* vgacon_init init will have init set to 0 too.
|
||||
*/
|
||||
c->vc_can_do_color = vga_can_do_color;
|
||||
c->vc_scan_lines = vga_scan_lines;
|
||||
c->vc_font.height = c->vc_cell_height = vga_video_font_height;
|
||||
|
||||
/* set dimensions manually if init != 0 since vc_resize() will fail */
|
||||
if (init) {
|
||||
@ -378,8 +384,6 @@ static void vgacon_init(struct vc_data *c, int init)
|
||||
} else
|
||||
vc_resize(c, vga_video_num_columns, vga_video_num_lines);
|
||||
|
||||
c->vc_scan_lines = vga_scan_lines;
|
||||
c->vc_font.height = c->vc_cell_height = vga_video_font_height;
|
||||
c->vc_complement_mask = 0x7700;
|
||||
if (vga_512_chars)
|
||||
c->vc_hi_font_mask = 0x0800;
|
||||
|
@ -838,7 +838,7 @@ static struct notifier_block xenbus_resume_nb = {
|
||||
|
||||
static int __init xenbus_init(void)
|
||||
{
|
||||
int err = 0;
|
||||
int err;
|
||||
uint64_t v = 0;
|
||||
xen_store_domain_type = XS_UNKNOWN;
|
||||
|
||||
@ -878,6 +878,29 @@ static int __init xenbus_init(void)
|
||||
err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v);
|
||||
if (err)
|
||||
goto out_error;
|
||||
/*
|
||||
* Uninitialized hvm_params are zero and return no error.
|
||||
* Although it is theoretically possible to have
|
||||
* HVM_PARAM_STORE_PFN set to zero on purpose, in reality it is
|
||||
* not zero when valid. If zero, it means that Xenstore hasn't
|
||||
* been properly initialized. Instead of attempting to map a
|
||||
* wrong guest physical address return error.
|
||||
*
|
||||
* Also recognize all bits set as an invalid value.
|
||||
*/
|
||||
if (!v || !~v) {
|
||||
err = -ENOENT;
|
||||
goto out_error;
|
||||
}
|
||||
/* Avoid truncation on 32-bit. */
|
||||
#if BITS_PER_LONG == 32
|
||||
if (v > ULONG_MAX) {
|
||||
pr_err("%s: cannot handle HVM_PARAM_STORE_PFN=%llx > ULONG_MAX\n",
|
||||
__func__, v);
|
||||
err = -EINVAL;
|
||||
goto out_error;
|
||||
}
|
||||
#endif
|
||||
xen_store_gfn = (unsigned long)v;
|
||||
xen_store_interface =
|
||||
xen_remap(xen_store_gfn << XEN_PAGE_SHIFT,
|
||||
@ -912,8 +935,10 @@ static int __init xenbus_init(void)
|
||||
*/
|
||||
proc_create_mount_point("xen");
|
||||
#endif
|
||||
return 0;
|
||||
|
||||
out_error:
|
||||
xen_store_domain_type = XS_UNKNOWN;
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -3339,11 +3339,23 @@ static void btrfs_end_empty_barrier(struct bio *bio)
|
||||
*/
|
||||
static void write_dev_flush(struct btrfs_device *device)
|
||||
{
|
||||
struct request_queue *q = bdev_get_queue(device->bdev);
|
||||
struct bio *bio = device->flush_bio;
|
||||
|
||||
#ifndef CONFIG_BTRFS_FS_CHECK_INTEGRITY
|
||||
/*
|
||||
* When a disk has write caching disabled, we skip submission of a bio
|
||||
* with flush and sync requests before writing the superblock, since
|
||||
* it's not needed. However when the integrity checker is enabled, this
|
||||
* results in reports that there are metadata blocks referred by a
|
||||
* superblock that were not properly flushed. So don't skip the bio
|
||||
* submission only when the integrity checker is enabled for the sake
|
||||
* of simplicity, since this is a debug tool and not meant for use in
|
||||
* non-debug builds.
|
||||
*/
|
||||
struct request_queue *q = bdev_get_queue(device->bdev);
|
||||
if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
|
||||
return;
|
||||
#endif
|
||||
|
||||
bio_reset(bio);
|
||||
bio->bi_end_io = btrfs_end_empty_barrier;
|
||||
|
19
fs/file.c
19
fs/file.c
@ -679,7 +679,7 @@ void do_close_on_exec(struct files_struct *files)
|
||||
spin_unlock(&files->file_lock);
|
||||
}
|
||||
|
||||
static struct file *__fget(unsigned int fd, fmode_t mask)
|
||||
static struct file *__fget(unsigned int fd, fmode_t mask, unsigned int refs)
|
||||
{
|
||||
struct files_struct *files = current->files;
|
||||
struct file *file;
|
||||
@ -694,23 +694,32 @@ loop:
|
||||
*/
|
||||
if (file->f_mode & mask)
|
||||
file = NULL;
|
||||
else if (!get_file_rcu(file))
|
||||
else if (!get_file_rcu_many(file, refs))
|
||||
goto loop;
|
||||
else if (__fcheck_files(files, fd) != file) {
|
||||
fput_many(file, refs);
|
||||
goto loop;
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return file;
|
||||
}
|
||||
|
||||
struct file *fget_many(unsigned int fd, unsigned int refs)
|
||||
{
|
||||
return __fget(fd, FMODE_PATH, refs);
|
||||
}
|
||||
|
||||
struct file *fget(unsigned int fd)
|
||||
{
|
||||
return __fget(fd, FMODE_PATH);
|
||||
return __fget(fd, FMODE_PATH, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(fget);
|
||||
|
||||
struct file *fget_raw(unsigned int fd)
|
||||
{
|
||||
return __fget(fd, 0);
|
||||
return __fget(fd, 0, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(fget_raw);
|
||||
|
||||
@ -741,7 +750,7 @@ static unsigned long __fget_light(unsigned int fd, fmode_t mask)
|
||||
return 0;
|
||||
return (unsigned long)file;
|
||||
} else {
|
||||
file = __fget(fd, mask);
|
||||
file = __fget(fd, mask, 1);
|
||||
if (!file)
|
||||
return 0;
|
||||
return FDPUT_FPUT | (unsigned long)file;
|
||||
|
@ -261,9 +261,9 @@ void flush_delayed_fput(void)
|
||||
|
||||
static DECLARE_DELAYED_WORK(delayed_fput_work, delayed_fput);
|
||||
|
||||
void fput(struct file *file)
|
||||
void fput_many(struct file *file, unsigned int refs)
|
||||
{
|
||||
if (atomic_long_dec_and_test(&file->f_count)) {
|
||||
if (atomic_long_sub_and_test(refs, &file->f_count)) {
|
||||
struct task_struct *task = current;
|
||||
|
||||
if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) {
|
||||
@ -282,6 +282,11 @@ void fput(struct file *file)
|
||||
}
|
||||
}
|
||||
|
||||
void fput(struct file *file)
|
||||
{
|
||||
fput_many(file, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* synchronous analog of fput(); for kernel threads that might be needed
|
||||
* in some umount() (and thus can't use flush_delayed_fput() without
|
||||
|
@ -906,6 +906,12 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
|
||||
if (!(buf->flags & PIPE_BUF_FLAG_LRU))
|
||||
lru_cache_add_file(newpage);
|
||||
|
||||
/*
|
||||
* Release while we have extra ref on stolen page. Otherwise
|
||||
* anon_pipe_buf_release() might think the page can be reused.
|
||||
*/
|
||||
pipe_buf_release(cs->pipe, buf);
|
||||
|
||||
err = 0;
|
||||
spin_lock(&cs->req->waitq.lock);
|
||||
if (test_bit(FR_ABORTED, &cs->req->flags))
|
||||
@ -2056,8 +2062,12 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
|
||||
|
||||
pipe_lock(pipe);
|
||||
out_free:
|
||||
for (idx = 0; idx < nbuf; idx++)
|
||||
pipe_buf_release(pipe, &bufs[idx]);
|
||||
for (idx = 0; idx < nbuf; idx++) {
|
||||
struct pipe_buffer *buf = &bufs[idx];
|
||||
|
||||
if (buf->ops)
|
||||
pipe_buf_release(pipe, buf);
|
||||
}
|
||||
pipe_unlock(pipe);
|
||||
|
||||
kfree(bufs);
|
||||
|
@ -186,8 +186,9 @@ static ssize_t _nfs42_proc_copy(struct file *src,
|
||||
goto out;
|
||||
}
|
||||
|
||||
truncate_pagecache_range(dst_inode, pos_dst,
|
||||
pos_dst + res->write_res.count);
|
||||
WARN_ON_ONCE(invalidate_inode_pages2_range(dst_inode->i_mapping,
|
||||
pos_dst >> PAGE_SHIFT,
|
||||
(pos_dst + res->write_res.count - 1) >> PAGE_SHIFT));
|
||||
|
||||
status = res->write_res.count;
|
||||
out:
|
||||
|
@ -625,8 +625,7 @@ static int nfs4_xdr_dec_clone(struct rpc_rqst *rqstp,
|
||||
status = decode_clone(xdr);
|
||||
if (status)
|
||||
goto out;
|
||||
status = decode_getfattr(xdr, res->dst_fattr, res->server);
|
||||
|
||||
decode_getfattr(xdr, res->dst_fattr, res->server);
|
||||
out:
|
||||
res->rpc_status = status;
|
||||
return status;
|
||||
|
@ -105,14 +105,19 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
|
||||
nr_bytes = count;
|
||||
|
||||
/* If pfn is not ram, return zeros for sparse dump files */
|
||||
if (pfn_is_ram(pfn) == 0)
|
||||
memset(buf, 0, nr_bytes);
|
||||
else {
|
||||
if (pfn_is_ram(pfn) == 0) {
|
||||
tmp = 0;
|
||||
if (!userbuf)
|
||||
memset(buf, 0, nr_bytes);
|
||||
else if (clear_user(buf, nr_bytes))
|
||||
tmp = -EFAULT;
|
||||
} else {
|
||||
tmp = copy_oldmem_page(pfn, buf, nr_bytes,
|
||||
offset, userbuf);
|
||||
if (tmp < 0)
|
||||
return tmp;
|
||||
}
|
||||
if (tmp < 0)
|
||||
return tmp;
|
||||
|
||||
*ppos += nr_bytes;
|
||||
count -= nr_bytes;
|
||||
buf += nr_bytes;
|
||||
|
@ -117,6 +117,8 @@ void arch_tlb_gather_mmu(struct mmu_gather *tlb,
|
||||
void tlb_flush_mmu(struct mmu_gather *tlb);
|
||||
void arch_tlb_finish_mmu(struct mmu_gather *tlb,
|
||||
unsigned long start, unsigned long end, bool force);
|
||||
void tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address,
|
||||
unsigned long size);
|
||||
extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
|
||||
int page_size);
|
||||
|
||||
|
@ -13,6 +13,7 @@
|
||||
struct file;
|
||||
|
||||
extern void fput(struct file *);
|
||||
extern void fput_many(struct file *, unsigned int);
|
||||
|
||||
struct file_operations;
|
||||
struct vfsmount;
|
||||
@ -41,6 +42,7 @@ static inline void fdput(struct fd fd)
|
||||
}
|
||||
|
||||
extern struct file *fget(unsigned int fd);
|
||||
extern struct file *fget_many(unsigned int fd, unsigned int refs);
|
||||
extern struct file *fget_raw(unsigned int fd);
|
||||
extern unsigned long __fdget(unsigned int fd);
|
||||
extern unsigned long __fdget_raw(unsigned int fd);
|
||||
|
@ -914,7 +914,9 @@ static inline struct file *get_file(struct file *f)
|
||||
atomic_long_inc(&f->f_count);
|
||||
return f;
|
||||
}
|
||||
#define get_file_rcu(x) atomic_long_inc_not_zero(&(x)->f_count)
|
||||
#define get_file_rcu_many(x, cnt) \
|
||||
atomic_long_add_unless(&(x)->f_count, (cnt), 0)
|
||||
#define get_file_rcu(x) get_file_rcu_many((x), 1)
|
||||
#define fput_atomic(x) atomic_long_add_unless(&(x)->f_count, -1, 1)
|
||||
#define file_count(x) atomic_long_read(&(x)->f_count)
|
||||
|
||||
|
@ -127,6 +127,16 @@ static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns)
|
||||
return ns;
|
||||
}
|
||||
|
||||
static inline struct ipc_namespace *get_ipc_ns_not_zero(struct ipc_namespace *ns)
|
||||
{
|
||||
if (ns) {
|
||||
if (refcount_inc_not_zero(&ns->count))
|
||||
return ns;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
extern void put_ipc_ns(struct ipc_namespace *ns);
|
||||
#else
|
||||
static inline struct ipc_namespace *copy_ipcs(unsigned long flags,
|
||||
@ -143,6 +153,11 @@ static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns)
|
||||
return ns;
|
||||
}
|
||||
|
||||
static inline struct ipc_namespace *get_ipc_ns_not_zero(struct ipc_namespace *ns)
|
||||
{
|
||||
return ns;
|
||||
}
|
||||
|
||||
static inline void put_ipc_ns(struct ipc_namespace *ns)
|
||||
{
|
||||
}
|
||||
|
@ -193,6 +193,8 @@ struct kretprobe {
|
||||
raw_spinlock_t lock;
|
||||
};
|
||||
|
||||
#define KRETPROBE_MAX_DATA_SIZE 4096
|
||||
|
||||
struct kretprobe_instance {
|
||||
struct hlist_node hlist;
|
||||
struct kretprobe *rp;
|
||||
|
@ -122,7 +122,7 @@ static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
|
||||
* Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
|
||||
* subscriptions and synchronises with wait4(). Also used in procfs. Also
|
||||
* pins the final release of task.io_context. Also protects ->cpuset and
|
||||
* ->cgroup.subsys[]. And ->vfork_done.
|
||||
* ->cgroup.subsys[]. And ->vfork_done. And ->sysvshm.shm_clist.
|
||||
*
|
||||
* Nests both inside and outside of read_lock(&tasklist_lock).
|
||||
* It must not be nested with write_lock_irq(&tasklist_lock),
|
||||
|
@ -20,9 +20,18 @@ struct shmid_kernel /* private to the kernel */
|
||||
pid_t shm_lprid;
|
||||
struct user_struct *mlock_user;
|
||||
|
||||
/* The task created the shm object. NULL if the task is dead. */
|
||||
/*
|
||||
* The task created the shm object, for
|
||||
* task_lock(shp->shm_creator)
|
||||
*/
|
||||
struct task_struct *shm_creator;
|
||||
struct list_head shm_clist; /* list by creator */
|
||||
|
||||
/*
|
||||
* List by creator. task_lock(->shm_creator) required for read/write.
|
||||
* If list_empty(), then the creator is dead already.
|
||||
*/
|
||||
struct list_head shm_clist;
|
||||
struct ipc_namespace *ns;
|
||||
} __randomize_layout;
|
||||
|
||||
/* shm_mode upper byte flags */
|
||||
|
@ -27,9 +27,7 @@ static inline bool siphash_key_is_zero(const siphash_key_t *key)
|
||||
}
|
||||
|
||||
u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key);
|
||||
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key);
|
||||
#endif
|
||||
|
||||
u64 siphash_1u64(const u64 a, const siphash_key_t *key);
|
||||
u64 siphash_2u64(const u64 a, const u64 b, const siphash_key_t *key);
|
||||
@ -82,10 +80,9 @@ static inline u64 ___siphash_aligned(const __le64 *data, size_t len,
|
||||
static inline u64 siphash(const void *data, size_t len,
|
||||
const siphash_key_t *key)
|
||||
{
|
||||
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
if (!IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT))
|
||||
if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
|
||||
!IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT))
|
||||
return __siphash_unaligned(data, len, key);
|
||||
#endif
|
||||
return ___siphash_aligned(data, len, key);
|
||||
}
|
||||
|
||||
@ -96,10 +93,8 @@ typedef struct {
|
||||
|
||||
u32 __hsiphash_aligned(const void *data, size_t len,
|
||||
const hsiphash_key_t *key);
|
||||
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
u32 __hsiphash_unaligned(const void *data, size_t len,
|
||||
const hsiphash_key_t *key);
|
||||
#endif
|
||||
|
||||
u32 hsiphash_1u32(const u32 a, const hsiphash_key_t *key);
|
||||
u32 hsiphash_2u32(const u32 a, const u32 b, const hsiphash_key_t *key);
|
||||
@ -135,10 +130,9 @@ static inline u32 ___hsiphash_aligned(const __le32 *data, size_t len,
|
||||
static inline u32 hsiphash(const void *data, size_t len,
|
||||
const hsiphash_key_t *key)
|
||||
{
|
||||
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
if (!IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT))
|
||||
if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
|
||||
!IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT))
|
||||
return __hsiphash_unaligned(data, len, key);
|
||||
#endif
|
||||
return ___hsiphash_aligned(data, len, key);
|
||||
}
|
||||
|
||||
|
@ -42,6 +42,7 @@ enum nci_flag {
|
||||
NCI_UP,
|
||||
NCI_DATA_EXCHANGE,
|
||||
NCI_DATA_EXCHANGE_TO,
|
||||
NCI_UNREG,
|
||||
};
|
||||
|
||||
/* NCI device states */
|
||||
|
@ -19,6 +19,8 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#define NL802154_GENL_NAME "nl802154"
|
||||
|
||||
enum nl802154_commands {
|
||||
@ -150,10 +152,9 @@ enum nl802154_attrs {
|
||||
};
|
||||
|
||||
enum nl802154_iftype {
|
||||
/* for backwards compatibility TODO */
|
||||
NL802154_IFTYPE_UNSPEC = -1,
|
||||
NL802154_IFTYPE_UNSPEC = (~(__u32)0),
|
||||
|
||||
NL802154_IFTYPE_NODE,
|
||||
NL802154_IFTYPE_NODE = 0,
|
||||
NL802154_IFTYPE_MONITOR,
|
||||
NL802154_IFTYPE_COORD,
|
||||
|
||||
|
@ -654,6 +654,11 @@
|
||||
#define PCI_EXP_LNKCAP2_SLS_8_0GB 0x00000008 /* Supported Speed 8.0GT/s */
|
||||
#define PCI_EXP_LNKCAP2_CROSSLINK 0x00000100 /* Crosslink supported */
|
||||
#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */
|
||||
#define PCI_EXP_LNKCTL2_TLS 0x000f
|
||||
#define PCI_EXP_LNKCTL2_TLS_2_5GT 0x0001 /* Supported Speed 2.5GT/s */
|
||||
#define PCI_EXP_LNKCTL2_TLS_5_0GT 0x0002 /* Supported Speed 5GT/s */
|
||||
#define PCI_EXP_LNKCTL2_TLS_8_0GT 0x0003 /* Supported Speed 8GT/s */
|
||||
#define PCI_EXP_LNKCTL2_TLS_16_0GT 0x0004 /* Supported Speed 16GT/s */
|
||||
#define PCI_EXP_LNKSTA2 50 /* Link Status 2 */
|
||||
#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 52 /* v2 endpoints with link end here */
|
||||
#define PCI_EXP_SLTCAP2 52 /* Slot Capabilities 2 */
|
||||
|
@ -1,21 +1,53 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/******************************************************************************
|
||||
* ring.h
|
||||
*
|
||||
* Shared producer-consumer ring macros.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to
|
||||
* deal in the Software without restriction, including without limitation the
|
||||
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
|
||||
* sell copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Tim Deegan and Andrew Warfield November 2004.
|
||||
*/
|
||||
|
||||
#ifndef __XEN_PUBLIC_IO_RING_H__
|
||||
#define __XEN_PUBLIC_IO_RING_H__
|
||||
|
||||
/*
|
||||
* When #include'ing this header, you need to provide the following
|
||||
* declaration upfront:
|
||||
* - standard integers types (uint8_t, uint16_t, etc)
|
||||
* They are provided by stdint.h of the standard headers.
|
||||
*
|
||||
* In addition, if you intend to use the FLEX macros, you also need to
|
||||
* provide the following, before invoking the FLEX macros:
|
||||
* - size_t
|
||||
* - memcpy
|
||||
* - grant_ref_t
|
||||
* These declarations are provided by string.h of the standard headers,
|
||||
* and grant_table.h from the Xen public headers.
|
||||
*/
|
||||
|
||||
#include <xen/interface/grant_table.h>
|
||||
|
||||
typedef unsigned int RING_IDX;
|
||||
|
||||
/* Round a 32-bit unsigned constant down to the nearest power of two. */
|
||||
#define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1))
|
||||
#define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1))
|
||||
#define __RD4(_x) (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2 : __RD2(_x))
|
||||
#define __RD8(_x) (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4 : __RD4(_x))
|
||||
#define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8 : __RD8(_x))
|
||||
@ -27,82 +59,79 @@ typedef unsigned int RING_IDX;
|
||||
* A ring contains as many entries as will fit, rounded down to the nearest
|
||||
* power of two (so we can mask with (size-1) to loop around).
|
||||
*/
|
||||
#define __CONST_RING_SIZE(_s, _sz) \
|
||||
(__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \
|
||||
sizeof(((struct _s##_sring *)0)->ring[0])))
|
||||
|
||||
#define __CONST_RING_SIZE(_s, _sz) \
|
||||
(__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \
|
||||
sizeof(((struct _s##_sring *)0)->ring[0])))
|
||||
/*
|
||||
* The same for passing in an actual pointer instead of a name tag.
|
||||
*/
|
||||
#define __RING_SIZE(_s, _sz) \
|
||||
(__RD32(((_sz) - (long)&(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
|
||||
#define __RING_SIZE(_s, _sz) \
|
||||
(__RD32(((_sz) - (long)(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0])))
|
||||
|
||||
/*
|
||||
* Macros to make the correct C datatypes for a new kind of ring.
|
||||
*
|
||||
* To make a new ring datatype, you need to have two message structures,
|
||||
* let's say struct request, and struct response already defined.
|
||||
* let's say request_t, and response_t already defined.
|
||||
*
|
||||
* In a header where you want the ring datatype declared, you then do:
|
||||
*
|
||||
* DEFINE_RING_TYPES(mytag, struct request, struct response);
|
||||
* DEFINE_RING_TYPES(mytag, request_t, response_t);
|
||||
*
|
||||
* These expand out to give you a set of types, as you can see below.
|
||||
* The most important of these are:
|
||||
*
|
||||
* struct mytag_sring - The shared ring.
|
||||
* struct mytag_front_ring - The 'front' half of the ring.
|
||||
* struct mytag_back_ring - The 'back' half of the ring.
|
||||
* mytag_sring_t - The shared ring.
|
||||
* mytag_front_ring_t - The 'front' half of the ring.
|
||||
* mytag_back_ring_t - The 'back' half of the ring.
|
||||
*
|
||||
* To initialize a ring in your code you need to know the location and size
|
||||
* of the shared memory area (PAGE_SIZE, for instance). To initialise
|
||||
* the front half:
|
||||
*
|
||||
* struct mytag_front_ring front_ring;
|
||||
* SHARED_RING_INIT((struct mytag_sring *)shared_page);
|
||||
* FRONT_RING_INIT(&front_ring, (struct mytag_sring *)shared_page,
|
||||
* PAGE_SIZE);
|
||||
* mytag_front_ring_t front_ring;
|
||||
* SHARED_RING_INIT((mytag_sring_t *)shared_page);
|
||||
* FRONT_RING_INIT(&front_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
|
||||
*
|
||||
* Initializing the back follows similarly (note that only the front
|
||||
* initializes the shared ring):
|
||||
*
|
||||
* struct mytag_back_ring back_ring;
|
||||
* BACK_RING_INIT(&back_ring, (struct mytag_sring *)shared_page,
|
||||
* PAGE_SIZE);
|
||||
* mytag_back_ring_t back_ring;
|
||||
* BACK_RING_INIT(&back_ring, (mytag_sring_t *)shared_page, PAGE_SIZE);
|
||||
*/
|
||||
|
||||
#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \
|
||||
\
|
||||
/* Shared ring entry */ \
|
||||
union __name##_sring_entry { \
|
||||
__req_t req; \
|
||||
__rsp_t rsp; \
|
||||
}; \
|
||||
\
|
||||
/* Shared ring page */ \
|
||||
struct __name##_sring { \
|
||||
RING_IDX req_prod, req_event; \
|
||||
RING_IDX rsp_prod, rsp_event; \
|
||||
uint8_t pad[48]; \
|
||||
union __name##_sring_entry ring[1]; /* variable-length */ \
|
||||
}; \
|
||||
\
|
||||
/* "Front" end's private variables */ \
|
||||
struct __name##_front_ring { \
|
||||
RING_IDX req_prod_pvt; \
|
||||
RING_IDX rsp_cons; \
|
||||
unsigned int nr_ents; \
|
||||
struct __name##_sring *sring; \
|
||||
}; \
|
||||
\
|
||||
/* "Back" end's private variables */ \
|
||||
struct __name##_back_ring { \
|
||||
RING_IDX rsp_prod_pvt; \
|
||||
RING_IDX req_cons; \
|
||||
unsigned int nr_ents; \
|
||||
struct __name##_sring *sring; \
|
||||
};
|
||||
|
||||
#define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \
|
||||
\
|
||||
/* Shared ring entry */ \
|
||||
union __name##_sring_entry { \
|
||||
__req_t req; \
|
||||
__rsp_t rsp; \
|
||||
}; \
|
||||
\
|
||||
/* Shared ring page */ \
|
||||
struct __name##_sring { \
|
||||
RING_IDX req_prod, req_event; \
|
||||
RING_IDX rsp_prod, rsp_event; \
|
||||
uint8_t __pad[48]; \
|
||||
union __name##_sring_entry ring[1]; /* variable-length */ \
|
||||
}; \
|
||||
\
|
||||
/* "Front" end's private variables */ \
|
||||
struct __name##_front_ring { \
|
||||
RING_IDX req_prod_pvt; \
|
||||
RING_IDX rsp_cons; \
|
||||
unsigned int nr_ents; \
|
||||
struct __name##_sring *sring; \
|
||||
}; \
|
||||
\
|
||||
/* "Back" end's private variables */ \
|
||||
struct __name##_back_ring { \
|
||||
RING_IDX rsp_prod_pvt; \
|
||||
RING_IDX req_cons; \
|
||||
unsigned int nr_ents; \
|
||||
struct __name##_sring *sring; \
|
||||
}; \
|
||||
\
|
||||
/*
|
||||
* Macros for manipulating rings.
|
||||
*
|
||||
@ -119,105 +148,99 @@ struct __name##_back_ring { \
|
||||
*/
|
||||
|
||||
/* Initialising empty rings */
|
||||
#define SHARED_RING_INIT(_s) do { \
|
||||
(_s)->req_prod = (_s)->rsp_prod = 0; \
|
||||
(_s)->req_event = (_s)->rsp_event = 1; \
|
||||
memset((_s)->pad, 0, sizeof((_s)->pad)); \
|
||||
#define SHARED_RING_INIT(_s) do { \
|
||||
(_s)->req_prod = (_s)->rsp_prod = 0; \
|
||||
(_s)->req_event = (_s)->rsp_event = 1; \
|
||||
(void)memset((_s)->__pad, 0, sizeof((_s)->__pad)); \
|
||||
} while(0)
|
||||
|
||||
#define FRONT_RING_INIT(_r, _s, __size) do { \
|
||||
(_r)->req_prod_pvt = 0; \
|
||||
(_r)->rsp_cons = 0; \
|
||||
(_r)->nr_ents = __RING_SIZE(_s, __size); \
|
||||
(_r)->sring = (_s); \
|
||||
#define FRONT_RING_ATTACH(_r, _s, _i, __size) do { \
|
||||
(_r)->req_prod_pvt = (_i); \
|
||||
(_r)->rsp_cons = (_i); \
|
||||
(_r)->nr_ents = __RING_SIZE(_s, __size); \
|
||||
(_r)->sring = (_s); \
|
||||
} while (0)
|
||||
|
||||
#define BACK_RING_INIT(_r, _s, __size) do { \
|
||||
(_r)->rsp_prod_pvt = 0; \
|
||||
(_r)->req_cons = 0; \
|
||||
(_r)->nr_ents = __RING_SIZE(_s, __size); \
|
||||
(_r)->sring = (_s); \
|
||||
#define FRONT_RING_INIT(_r, _s, __size) FRONT_RING_ATTACH(_r, _s, 0, __size)
|
||||
|
||||
#define BACK_RING_ATTACH(_r, _s, _i, __size) do { \
|
||||
(_r)->rsp_prod_pvt = (_i); \
|
||||
(_r)->req_cons = (_i); \
|
||||
(_r)->nr_ents = __RING_SIZE(_s, __size); \
|
||||
(_r)->sring = (_s); \
|
||||
} while (0)
|
||||
|
||||
/* Initialize to existing shared indexes -- for recovery */
|
||||
#define FRONT_RING_ATTACH(_r, _s, __size) do { \
|
||||
(_r)->sring = (_s); \
|
||||
(_r)->req_prod_pvt = (_s)->req_prod; \
|
||||
(_r)->rsp_cons = (_s)->rsp_prod; \
|
||||
(_r)->nr_ents = __RING_SIZE(_s, __size); \
|
||||
} while (0)
|
||||
|
||||
#define BACK_RING_ATTACH(_r, _s, __size) do { \
|
||||
(_r)->sring = (_s); \
|
||||
(_r)->rsp_prod_pvt = (_s)->rsp_prod; \
|
||||
(_r)->req_cons = (_s)->req_prod; \
|
||||
(_r)->nr_ents = __RING_SIZE(_s, __size); \
|
||||
} while (0)
|
||||
#define BACK_RING_INIT(_r, _s, __size) BACK_RING_ATTACH(_r, _s, 0, __size)
|
||||
|
||||
/* How big is this ring? */
|
||||
#define RING_SIZE(_r) \
|
||||
#define RING_SIZE(_r) \
|
||||
((_r)->nr_ents)
|
||||
|
||||
/* Number of free requests (for use on front side only). */
|
||||
#define RING_FREE_REQUESTS(_r) \
|
||||
#define RING_FREE_REQUESTS(_r) \
|
||||
(RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons))
|
||||
|
||||
/* Test if there is an empty slot available on the front ring.
|
||||
* (This is only meaningful from the front. )
|
||||
*/
|
||||
#define RING_FULL(_r) \
|
||||
#define RING_FULL(_r) \
|
||||
(RING_FREE_REQUESTS(_r) == 0)
|
||||
|
||||
/* Test if there are outstanding messages to be processed on a ring. */
|
||||
#define RING_HAS_UNCONSUMED_RESPONSES(_r) \
|
||||
#define RING_HAS_UNCONSUMED_RESPONSES(_r) \
|
||||
((_r)->sring->rsp_prod - (_r)->rsp_cons)
|
||||
|
||||
#define RING_HAS_UNCONSUMED_REQUESTS(_r) \
|
||||
({ \
|
||||
unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \
|
||||
unsigned int rsp = RING_SIZE(_r) - \
|
||||
((_r)->req_cons - (_r)->rsp_prod_pvt); \
|
||||
req < rsp ? req : rsp; \
|
||||
})
|
||||
#define RING_HAS_UNCONSUMED_REQUESTS(_r) ({ \
|
||||
unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \
|
||||
unsigned int rsp = RING_SIZE(_r) - \
|
||||
((_r)->req_cons - (_r)->rsp_prod_pvt); \
|
||||
req < rsp ? req : rsp; \
|
||||
})
|
||||
|
||||
/* Direct access to individual ring elements, by index. */
|
||||
#define RING_GET_REQUEST(_r, _idx) \
|
||||
#define RING_GET_REQUEST(_r, _idx) \
|
||||
(&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req))
|
||||
|
||||
#define RING_GET_RESPONSE(_r, _idx) \
|
||||
(&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
|
||||
|
||||
/*
|
||||
* Get a local copy of a request.
|
||||
* Get a local copy of a request/response.
|
||||
*
|
||||
* Use this in preference to RING_GET_REQUEST() so all processing is
|
||||
* Use this in preference to RING_GET_{REQUEST,RESPONSE}() so all processing is
|
||||
* done on a local copy that cannot be modified by the other end.
|
||||
*
|
||||
* Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this
|
||||
* to be ineffective where _req is a struct which consists of only bitfields.
|
||||
* to be ineffective where dest is a struct which consists of only bitfields.
|
||||
*/
|
||||
#define RING_COPY_REQUEST(_r, _idx, _req) do { \
|
||||
/* Use volatile to force the copy into _req. */ \
|
||||
*(_req) = *(volatile typeof(_req))RING_GET_REQUEST(_r, _idx); \
|
||||
#define RING_COPY_(type, r, idx, dest) do { \
|
||||
/* Use volatile to force the copy into dest. */ \
|
||||
*(dest) = *(volatile typeof(dest))RING_GET_##type(r, idx); \
|
||||
} while (0)
|
||||
|
||||
#define RING_GET_RESPONSE(_r, _idx) \
|
||||
(&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp))
|
||||
#define RING_COPY_REQUEST(r, idx, req) RING_COPY_(REQUEST, r, idx, req)
|
||||
#define RING_COPY_RESPONSE(r, idx, rsp) RING_COPY_(RESPONSE, r, idx, rsp)
|
||||
|
||||
/* Loop termination condition: Would the specified index overflow the ring? */
|
||||
#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
|
||||
#define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \
|
||||
(((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r))
|
||||
|
||||
/* Ill-behaved frontend determination: Can there be this many requests? */
|
||||
#define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \
|
||||
#define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \
|
||||
(((_prod) - (_r)->rsp_prod_pvt) > RING_SIZE(_r))
|
||||
|
||||
/* Ill-behaved backend determination: Can there be this many responses? */
|
||||
#define RING_RESPONSE_PROD_OVERFLOW(_r, _prod) \
|
||||
(((_prod) - (_r)->rsp_cons) > RING_SIZE(_r))
|
||||
|
||||
#define RING_PUSH_REQUESTS(_r) do { \
|
||||
virt_wmb(); /* back sees requests /before/ updated producer index */ \
|
||||
(_r)->sring->req_prod = (_r)->req_prod_pvt; \
|
||||
#define RING_PUSH_REQUESTS(_r) do { \
|
||||
virt_wmb(); /* back sees requests /before/ updated producer index */\
|
||||
(_r)->sring->req_prod = (_r)->req_prod_pvt; \
|
||||
} while (0)
|
||||
|
||||
#define RING_PUSH_RESPONSES(_r) do { \
|
||||
virt_wmb(); /* front sees responses /before/ updated producer index */ \
|
||||
(_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \
|
||||
#define RING_PUSH_RESPONSES(_r) do { \
|
||||
virt_wmb(); /* front sees resps /before/ updated producer index */ \
|
||||
(_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
@ -250,40 +273,40 @@ struct __name##_back_ring { \
|
||||
* field appropriately.
|
||||
*/
|
||||
|
||||
#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \
|
||||
RING_IDX __old = (_r)->sring->req_prod; \
|
||||
RING_IDX __new = (_r)->req_prod_pvt; \
|
||||
virt_wmb(); /* back sees requests /before/ updated producer index */ \
|
||||
(_r)->sring->req_prod = __new; \
|
||||
virt_mb(); /* back sees new requests /before/ we check req_event */ \
|
||||
(_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \
|
||||
(RING_IDX)(__new - __old)); \
|
||||
#define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \
|
||||
RING_IDX __old = (_r)->sring->req_prod; \
|
||||
RING_IDX __new = (_r)->req_prod_pvt; \
|
||||
virt_wmb(); /* back sees requests /before/ updated producer index */\
|
||||
(_r)->sring->req_prod = __new; \
|
||||
virt_mb(); /* back sees new requests /before/ we check req_event */ \
|
||||
(_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \
|
||||
(RING_IDX)(__new - __old)); \
|
||||
} while (0)
|
||||
|
||||
#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \
|
||||
RING_IDX __old = (_r)->sring->rsp_prod; \
|
||||
RING_IDX __new = (_r)->rsp_prod_pvt; \
|
||||
virt_wmb(); /* front sees responses /before/ updated producer index */ \
|
||||
(_r)->sring->rsp_prod = __new; \
|
||||
virt_mb(); /* front sees new responses /before/ we check rsp_event */ \
|
||||
(_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \
|
||||
(RING_IDX)(__new - __old)); \
|
||||
#define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \
|
||||
RING_IDX __old = (_r)->sring->rsp_prod; \
|
||||
RING_IDX __new = (_r)->rsp_prod_pvt; \
|
||||
virt_wmb(); /* front sees resps /before/ updated producer index */ \
|
||||
(_r)->sring->rsp_prod = __new; \
|
||||
virt_mb(); /* front sees new resps /before/ we check rsp_event */ \
|
||||
(_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \
|
||||
(RING_IDX)(__new - __old)); \
|
||||
} while (0)
|
||||
|
||||
#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \
|
||||
(_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
|
||||
if (_work_to_do) break; \
|
||||
(_r)->sring->req_event = (_r)->req_cons + 1; \
|
||||
virt_mb(); \
|
||||
(_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
|
||||
#define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \
|
||||
(_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
|
||||
if (_work_to_do) break; \
|
||||
(_r)->sring->req_event = (_r)->req_cons + 1; \
|
||||
virt_mb(); \
|
||||
(_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \
|
||||
} while (0)
|
||||
|
||||
#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \
|
||||
(_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
|
||||
if (_work_to_do) break; \
|
||||
(_r)->sring->rsp_event = (_r)->rsp_cons + 1; \
|
||||
virt_mb(); \
|
||||
(_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
|
||||
#define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \
|
||||
(_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
|
||||
if (_work_to_do) break; \
|
||||
(_r)->sring->rsp_event = (_r)->rsp_cons + 1; \
|
||||
virt_mb(); \
|
||||
(_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
|
||||
} while (0)
|
||||
|
||||
|
||||
|
184
ipc/shm.c
184
ipc/shm.c
@ -92,6 +92,7 @@ static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
|
||||
struct shmid_kernel *shp;
|
||||
|
||||
shp = container_of(ipcp, struct shmid_kernel, shm_perm);
|
||||
WARN_ON(ns != shp->ns);
|
||||
|
||||
if (shp->shm_nattch) {
|
||||
shp->shm_perm.mode |= SHM_DEST;
|
||||
@ -185,10 +186,43 @@ static void shm_rcu_free(struct rcu_head *head)
|
||||
kvfree(shp);
|
||||
}
|
||||
|
||||
static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
|
||||
/*
|
||||
* It has to be called with shp locked.
|
||||
* It must be called before ipc_rmid()
|
||||
*/
|
||||
static inline void shm_clist_rm(struct shmid_kernel *shp)
|
||||
{
|
||||
list_del(&s->shm_clist);
|
||||
ipc_rmid(&shm_ids(ns), &s->shm_perm);
|
||||
struct task_struct *creator;
|
||||
|
||||
/* ensure that shm_creator does not disappear */
|
||||
rcu_read_lock();
|
||||
|
||||
/*
|
||||
* A concurrent exit_shm may do a list_del_init() as well.
|
||||
* Just do nothing if exit_shm already did the work
|
||||
*/
|
||||
if (!list_empty(&shp->shm_clist)) {
|
||||
/*
|
||||
* shp->shm_creator is guaranteed to be valid *only*
|
||||
* if shp->shm_clist is not empty.
|
||||
*/
|
||||
creator = shp->shm_creator;
|
||||
|
||||
task_lock(creator);
|
||||
/*
|
||||
* list_del_init() is a nop if the entry was already removed
|
||||
* from the list.
|
||||
*/
|
||||
list_del_init(&shp->shm_clist);
|
||||
task_unlock(creator);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static inline void shm_rmid(struct shmid_kernel *s)
|
||||
{
|
||||
shm_clist_rm(s);
|
||||
ipc_rmid(&shm_ids(s->ns), &s->shm_perm);
|
||||
}
|
||||
|
||||
|
||||
@ -243,7 +277,7 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
|
||||
shm_file = shp->shm_file;
|
||||
shp->shm_file = NULL;
|
||||
ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
shm_rmid(ns, shp);
|
||||
shm_rmid(shp);
|
||||
shm_unlock(shp);
|
||||
if (!is_file_hugepages(shm_file))
|
||||
shmem_lock(shm_file, 0, shp->mlock_user);
|
||||
@ -264,10 +298,10 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
|
||||
*
|
||||
* 2) sysctl kernel.shm_rmid_forced is set to 1.
|
||||
*/
|
||||
static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
|
||||
static bool shm_may_destroy(struct shmid_kernel *shp)
|
||||
{
|
||||
return (shp->shm_nattch == 0) &&
|
||||
(ns->shm_rmid_forced ||
|
||||
(shp->ns->shm_rmid_forced ||
|
||||
(shp->shm_perm.mode & SHM_DEST));
|
||||
}
|
||||
|
||||
@ -298,7 +332,7 @@ static void shm_close(struct vm_area_struct *vma)
|
||||
shp->shm_lprid = task_tgid_vnr(current);
|
||||
shp->shm_dtim = ktime_get_real_seconds();
|
||||
shp->shm_nattch--;
|
||||
if (shm_may_destroy(ns, shp))
|
||||
if (shm_may_destroy(shp))
|
||||
shm_destroy(ns, shp);
|
||||
else
|
||||
shm_unlock(shp);
|
||||
@ -319,10 +353,10 @@ static int shm_try_destroy_orphaned(int id, void *p, void *data)
|
||||
*
|
||||
* As shp->* are changed under rwsem, it's safe to skip shp locking.
|
||||
*/
|
||||
if (shp->shm_creator != NULL)
|
||||
if (!list_empty(&shp->shm_clist))
|
||||
return 0;
|
||||
|
||||
if (shm_may_destroy(ns, shp)) {
|
||||
if (shm_may_destroy(shp)) {
|
||||
shm_lock_by_ptr(shp);
|
||||
shm_destroy(ns, shp);
|
||||
}
|
||||
@ -340,48 +374,97 @@ void shm_destroy_orphaned(struct ipc_namespace *ns)
|
||||
/* Locking assumes this will only be called with task == current */
|
||||
void exit_shm(struct task_struct *task)
|
||||
{
|
||||
struct ipc_namespace *ns = task->nsproxy->ipc_ns;
|
||||
struct shmid_kernel *shp, *n;
|
||||
for (;;) {
|
||||
struct shmid_kernel *shp;
|
||||
struct ipc_namespace *ns;
|
||||
|
||||
if (list_empty(&task->sysvshm.shm_clist))
|
||||
return;
|
||||
task_lock(task);
|
||||
|
||||
/*
|
||||
* If kernel.shm_rmid_forced is not set then only keep track of
|
||||
* which shmids are orphaned, so that a later set of the sysctl
|
||||
* can clean them up.
|
||||
*/
|
||||
if (!ns->shm_rmid_forced) {
|
||||
down_read(&shm_ids(ns).rwsem);
|
||||
list_for_each_entry(shp, &task->sysvshm.shm_clist, shm_clist)
|
||||
shp->shm_creator = NULL;
|
||||
/*
|
||||
* Only under read lock but we are only called on current
|
||||
* so no entry on the list will be shared.
|
||||
*/
|
||||
list_del(&task->sysvshm.shm_clist);
|
||||
up_read(&shm_ids(ns).rwsem);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Destroy all already created segments, that were not yet mapped,
|
||||
* and mark any mapped as orphan to cover the sysctl toggling.
|
||||
* Destroy is skipped if shm_may_destroy() returns false.
|
||||
*/
|
||||
down_write(&shm_ids(ns).rwsem);
|
||||
list_for_each_entry_safe(shp, n, &task->sysvshm.shm_clist, shm_clist) {
|
||||
shp->shm_creator = NULL;
|
||||
|
||||
if (shm_may_destroy(ns, shp)) {
|
||||
shm_lock_by_ptr(shp);
|
||||
shm_destroy(ns, shp);
|
||||
if (list_empty(&task->sysvshm.shm_clist)) {
|
||||
task_unlock(task);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Remove the list head from any segments still attached. */
|
||||
list_del(&task->sysvshm.shm_clist);
|
||||
up_write(&shm_ids(ns).rwsem);
|
||||
shp = list_first_entry(&task->sysvshm.shm_clist, struct shmid_kernel,
|
||||
shm_clist);
|
||||
|
||||
/*
|
||||
* 1) Get pointer to the ipc namespace. It is worth to say
|
||||
* that this pointer is guaranteed to be valid because
|
||||
* shp lifetime is always shorter than namespace lifetime
|
||||
* in which shp lives.
|
||||
* We taken task_lock it means that shp won't be freed.
|
||||
*/
|
||||
ns = shp->ns;
|
||||
|
||||
/*
|
||||
* 2) If kernel.shm_rmid_forced is not set then only keep track of
|
||||
* which shmids are orphaned, so that a later set of the sysctl
|
||||
* can clean them up.
|
||||
*/
|
||||
if (!ns->shm_rmid_forced)
|
||||
goto unlink_continue;
|
||||
|
||||
/*
|
||||
* 3) get a reference to the namespace.
|
||||
* The refcount could be already 0. If it is 0, then
|
||||
* the shm objects will be free by free_ipc_work().
|
||||
*/
|
||||
ns = get_ipc_ns_not_zero(ns);
|
||||
if (!ns) {
|
||||
unlink_continue:
|
||||
list_del_init(&shp->shm_clist);
|
||||
task_unlock(task);
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* 4) get a reference to shp.
|
||||
* This cannot fail: shm_clist_rm() is called before
|
||||
* ipc_rmid(), thus the refcount cannot be 0.
|
||||
*/
|
||||
WARN_ON(!ipc_rcu_getref(&shp->shm_perm));
|
||||
|
||||
/*
|
||||
* 5) unlink the shm segment from the list of segments
|
||||
* created by current.
|
||||
* This must be done last. After unlinking,
|
||||
* only the refcounts obtained above prevent IPC_RMID
|
||||
* from destroying the segment or the namespace.
|
||||
*/
|
||||
list_del_init(&shp->shm_clist);
|
||||
|
||||
task_unlock(task);
|
||||
|
||||
/*
|
||||
* 6) we have all references
|
||||
* Thus lock & if needed destroy shp.
|
||||
*/
|
||||
down_write(&shm_ids(ns).rwsem);
|
||||
shm_lock_by_ptr(shp);
|
||||
/*
|
||||
* rcu_read_lock was implicitly taken in shm_lock_by_ptr, it's
|
||||
* safe to call ipc_rcu_putref here
|
||||
*/
|
||||
ipc_rcu_putref(&shp->shm_perm, shm_rcu_free);
|
||||
|
||||
if (ipc_valid_object(&shp->shm_perm)) {
|
||||
if (shm_may_destroy(shp))
|
||||
shm_destroy(ns, shp);
|
||||
else
|
||||
shm_unlock(shp);
|
||||
} else {
|
||||
/*
|
||||
* Someone else deleted the shp from namespace
|
||||
* idr/kht while we have waited.
|
||||
* Just unlock and continue.
|
||||
*/
|
||||
shm_unlock(shp);
|
||||
}
|
||||
|
||||
up_write(&shm_ids(ns).rwsem);
|
||||
put_ipc_ns(ns); /* paired with get_ipc_ns_not_zero */
|
||||
}
|
||||
}
|
||||
|
||||
static int shm_fault(struct vm_fault *vmf)
|
||||
@ -625,7 +708,11 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
|
||||
if (error < 0)
|
||||
goto no_id;
|
||||
|
||||
shp->ns = ns;
|
||||
|
||||
task_lock(current);
|
||||
list_add(&shp->shm_clist, ¤t->sysvshm.shm_clist);
|
||||
task_unlock(current);
|
||||
|
||||
/*
|
||||
* shmid gets reported as "inode#" in /proc/pid/maps.
|
||||
@ -1449,7 +1536,8 @@ out_nattch:
|
||||
down_write(&shm_ids(ns).rwsem);
|
||||
shp = shm_lock(ns, shmid);
|
||||
shp->shm_nattch--;
|
||||
if (shm_may_destroy(ns, shp))
|
||||
|
||||
if (shm_may_destroy(shp))
|
||||
shm_destroy(ns, shp);
|
||||
else
|
||||
shm_unlock(shp);
|
||||
|
@ -409,8 +409,8 @@ static int ipcget_public(struct ipc_namespace *ns, struct ipc_ids *ids,
|
||||
static void ipc_kht_remove(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
|
||||
{
|
||||
if (ipcp->key != IPC_PRIVATE)
|
||||
rhashtable_remove_fast(&ids->key_ht, &ipcp->khtnode,
|
||||
ipc_kht_params);
|
||||
WARN_ON_ONCE(rhashtable_remove_fast(&ids->key_ht, &ipcp->khtnode,
|
||||
ipc_kht_params));
|
||||
}
|
||||
|
||||
/**
|
||||
@ -425,7 +425,7 @@ void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
|
||||
{
|
||||
int lid = ipcid_to_idx(ipcp->id);
|
||||
|
||||
idr_remove(&ids->ipcs_idr, lid);
|
||||
WARN_ON_ONCE(idr_remove(&ids->ipcs_idr, lid) != ipcp);
|
||||
ipc_kht_remove(ids, ipcp);
|
||||
ids->in_use--;
|
||||
ipcp->deleted = true;
|
||||
|
@ -2004,6 +2004,9 @@ int register_kretprobe(struct kretprobe *rp)
|
||||
}
|
||||
}
|
||||
|
||||
if (rp->data_size > KRETPROBE_MAX_DATA_SIZE)
|
||||
return -E2BIG;
|
||||
|
||||
rp->kp.pre_handler = pre_handler_kretprobe;
|
||||
rp->kp.post_handler = NULL;
|
||||
rp->kp.fault_handler = NULL;
|
||||
|
@ -668,7 +668,7 @@ static int load_image_and_restore(void)
|
||||
goto Unlock;
|
||||
|
||||
error = swsusp_read(&flags);
|
||||
swsusp_close(FMODE_READ);
|
||||
swsusp_close(FMODE_READ | FMODE_EXCL);
|
||||
if (!error)
|
||||
hibernation_restore(flags & SF_PLATFORM_MODE);
|
||||
|
||||
@ -865,7 +865,7 @@ static int software_resume(void)
|
||||
/* The snapshot device should not be opened while we're running */
|
||||
if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
|
||||
error = -EBUSY;
|
||||
swsusp_close(FMODE_READ);
|
||||
swsusp_close(FMODE_READ | FMODE_EXCL);
|
||||
goto Unlock;
|
||||
}
|
||||
|
||||
@ -901,7 +901,7 @@ static int software_resume(void)
|
||||
pm_pr_dbg("Hibernation image not present or could not be loaded.\n");
|
||||
return error;
|
||||
Close_Finish:
|
||||
swsusp_close(FMODE_READ);
|
||||
swsusp_close(FMODE_READ | FMODE_EXCL);
|
||||
goto Finish;
|
||||
}
|
||||
|
||||
|
@ -1363,14 +1363,26 @@ __event_trigger_test_discard(struct trace_event_file *file,
|
||||
if (eflags & EVENT_FILE_FL_TRIGGER_COND)
|
||||
*tt = event_triggers_call(file, entry);
|
||||
|
||||
if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
|
||||
(unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
|
||||
!filter_match_preds(file->filter, entry))) {
|
||||
__trace_event_discard_commit(buffer, event);
|
||||
return true;
|
||||
}
|
||||
if (likely(!(file->flags & (EVENT_FILE_FL_SOFT_DISABLED |
|
||||
EVENT_FILE_FL_FILTERED |
|
||||
EVENT_FILE_FL_PID_FILTER))))
|
||||
return false;
|
||||
|
||||
if (file->flags & EVENT_FILE_FL_SOFT_DISABLED)
|
||||
goto discard;
|
||||
|
||||
if (file->flags & EVENT_FILE_FL_FILTERED &&
|
||||
!filter_match_preds(file->filter, entry))
|
||||
goto discard;
|
||||
|
||||
if ((file->flags & EVENT_FILE_FL_PID_FILTER) &&
|
||||
trace_event_ignore_this_pid(file))
|
||||
goto discard;
|
||||
|
||||
return false;
|
||||
discard:
|
||||
__trace_event_discard_commit(buffer, event);
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2254,12 +2254,19 @@ static struct trace_event_file *
|
||||
trace_create_new_event(struct trace_event_call *call,
|
||||
struct trace_array *tr)
|
||||
{
|
||||
struct trace_pid_list *pid_list;
|
||||
struct trace_event_file *file;
|
||||
|
||||
file = kmem_cache_alloc(file_cachep, GFP_TRACE);
|
||||
if (!file)
|
||||
return NULL;
|
||||
|
||||
pid_list = rcu_dereference_protected(tr->filtered_pids,
|
||||
lockdep_is_held(&event_mutex));
|
||||
|
||||
if (pid_list)
|
||||
file->flags |= EVENT_FILE_FL_PID_FILTER;
|
||||
|
||||
file->event_call = call;
|
||||
file->tr = tr;
|
||||
atomic_set(&file->sm_ref, 0);
|
||||
|
@ -49,6 +49,7 @@
|
||||
SIPROUND; \
|
||||
return (v0 ^ v1) ^ (v2 ^ v3);
|
||||
|
||||
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key)
|
||||
{
|
||||
const u8 *end = data + len - (len % sizeof(u64));
|
||||
@ -80,8 +81,8 @@ u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key)
|
||||
POSTAMBLE
|
||||
}
|
||||
EXPORT_SYMBOL(__siphash_aligned);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key)
|
||||
{
|
||||
const u8 *end = data + len - (len % sizeof(u64));
|
||||
@ -113,7 +114,6 @@ u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key)
|
||||
POSTAMBLE
|
||||
}
|
||||
EXPORT_SYMBOL(__siphash_unaligned);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* siphash_1u64 - compute 64-bit siphash PRF value of a u64
|
||||
@ -250,6 +250,7 @@ EXPORT_SYMBOL(siphash_3u32);
|
||||
HSIPROUND; \
|
||||
return (v0 ^ v1) ^ (v2 ^ v3);
|
||||
|
||||
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
|
||||
{
|
||||
const u8 *end = data + len - (len % sizeof(u64));
|
||||
@ -280,8 +281,8 @@ u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
|
||||
HPOSTAMBLE
|
||||
}
|
||||
EXPORT_SYMBOL(__hsiphash_aligned);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
u32 __hsiphash_unaligned(const void *data, size_t len,
|
||||
const hsiphash_key_t *key)
|
||||
{
|
||||
@ -313,7 +314,6 @@ u32 __hsiphash_unaligned(const void *data, size_t len,
|
||||
HPOSTAMBLE
|
||||
}
|
||||
EXPORT_SYMBOL(__hsiphash_unaligned);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* hsiphash_1u32 - compute 64-bit hsiphash PRF value of a u32
|
||||
@ -418,6 +418,7 @@ EXPORT_SYMBOL(hsiphash_4u32);
|
||||
HSIPROUND; \
|
||||
return v1 ^ v3;
|
||||
|
||||
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
|
||||
{
|
||||
const u8 *end = data + len - (len % sizeof(u32));
|
||||
@ -438,8 +439,8 @@ u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
|
||||
HPOSTAMBLE
|
||||
}
|
||||
EXPORT_SYMBOL(__hsiphash_aligned);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
u32 __hsiphash_unaligned(const void *data, size_t len,
|
||||
const hsiphash_key_t *key)
|
||||
{
|
||||
@ -461,7 +462,6 @@ u32 __hsiphash_unaligned(const void *data, size_t len,
|
||||
HPOSTAMBLE
|
||||
}
|
||||
EXPORT_SYMBOL(__hsiphash_unaligned);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* hsiphash_1u32 - compute 32-bit hsiphash PRF value of a u32
|
||||
|
72
mm/hugetlb.c
72
mm/hugetlb.c
@ -3384,8 +3384,9 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
|
||||
struct page *page;
|
||||
struct hstate *h = hstate_vma(vma);
|
||||
unsigned long sz = huge_page_size(h);
|
||||
const unsigned long mmun_start = start; /* For mmu_notifiers */
|
||||
const unsigned long mmun_end = end; /* For mmu_notifiers */
|
||||
unsigned long mmun_start = start; /* For mmu_notifiers */
|
||||
unsigned long mmun_end = end; /* For mmu_notifiers */
|
||||
bool force_flush = false;
|
||||
|
||||
WARN_ON(!is_vm_hugetlb_page(vma));
|
||||
BUG_ON(start & ~huge_page_mask(h));
|
||||
@ -3397,6 +3398,11 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
|
||||
*/
|
||||
tlb_remove_check_page_size_change(tlb, sz);
|
||||
tlb_start_vma(tlb, vma);
|
||||
|
||||
/*
|
||||
* If sharing possible, alert mmu notifiers of worst case.
|
||||
*/
|
||||
adjust_range_if_pmd_sharing_possible(vma, &mmun_start, &mmun_end);
|
||||
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
|
||||
address = start;
|
||||
for (; address < end; address += sz) {
|
||||
@ -3407,6 +3413,8 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
|
||||
ptl = huge_pte_lock(h, mm, ptep);
|
||||
if (huge_pmd_unshare(mm, &address, ptep)) {
|
||||
spin_unlock(ptl);
|
||||
tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE);
|
||||
force_flush = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -3463,6 +3471,22 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
|
||||
}
|
||||
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
|
||||
tlb_end_vma(tlb, vma);
|
||||
|
||||
/*
|
||||
* If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We
|
||||
* could defer the flush until now, since by holding i_mmap_rwsem we
|
||||
* guaranteed that the last refernece would not be dropped. But we must
|
||||
* do the flushing before we return, as otherwise i_mmap_rwsem will be
|
||||
* dropped and the last reference to the shared PMDs page might be
|
||||
* dropped as well.
|
||||
*
|
||||
* In theory we could defer the freeing of the PMD pages as well, but
|
||||
* huge_pmd_unshare() relies on the exact page_count for the PMD page to
|
||||
* detect sharing, so we cannot defer the release of the page either.
|
||||
* Instead, do flush now.
|
||||
*/
|
||||
if (force_flush)
|
||||
tlb_flush_mmu(tlb);
|
||||
}
|
||||
|
||||
void __unmap_hugepage_range_final(struct mmu_gather *tlb,
|
||||
@ -3489,12 +3513,23 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
|
||||
{
|
||||
struct mm_struct *mm;
|
||||
struct mmu_gather tlb;
|
||||
unsigned long tlb_start = start;
|
||||
unsigned long tlb_end = end;
|
||||
|
||||
/*
|
||||
* If shared PMDs were possibly used within this vma range, adjust
|
||||
* start/end for worst case tlb flushing.
|
||||
* Note that we can not be sure if PMDs are shared until we try to
|
||||
* unmap pages. However, we want to make sure TLB flushing covers
|
||||
* the largest possible range.
|
||||
*/
|
||||
adjust_range_if_pmd_sharing_possible(vma, &tlb_start, &tlb_end);
|
||||
|
||||
mm = vma->vm_mm;
|
||||
|
||||
tlb_gather_mmu(&tlb, mm, start, end);
|
||||
tlb_gather_mmu(&tlb, mm, tlb_start, tlb_end);
|
||||
__unmap_hugepage_range(&tlb, vma, start, end, ref_page);
|
||||
tlb_finish_mmu(&tlb, start, end);
|
||||
tlb_finish_mmu(&tlb, tlb_start, tlb_end);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -4389,11 +4424,21 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
|
||||
pte_t pte;
|
||||
struct hstate *h = hstate_vma(vma);
|
||||
unsigned long pages = 0;
|
||||
unsigned long f_start = start;
|
||||
unsigned long f_end = end;
|
||||
bool shared_pmd = false;
|
||||
|
||||
/*
|
||||
* In the case of shared PMDs, the area to flush could be beyond
|
||||
* start/end. Set f_start/f_end to cover the maximum possible
|
||||
* range if PMD sharing is possible.
|
||||
*/
|
||||
adjust_range_if_pmd_sharing_possible(vma, &f_start, &f_end);
|
||||
|
||||
BUG_ON(address >= end);
|
||||
flush_cache_range(vma, address, end);
|
||||
flush_cache_range(vma, f_start, f_end);
|
||||
|
||||
mmu_notifier_invalidate_range_start(mm, start, end);
|
||||
mmu_notifier_invalidate_range_start(mm, f_start, f_end);
|
||||
i_mmap_lock_write(vma->vm_file->f_mapping);
|
||||
for (; address < end; address += huge_page_size(h)) {
|
||||
spinlock_t *ptl;
|
||||
@ -4404,6 +4449,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
|
||||
if (huge_pmd_unshare(mm, &address, ptep)) {
|
||||
pages++;
|
||||
spin_unlock(ptl);
|
||||
shared_pmd = true;
|
||||
continue;
|
||||
}
|
||||
pte = huge_ptep_get(ptep);
|
||||
@ -4439,12 +4485,18 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
|
||||
* Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
|
||||
* may have cleared our pud entry and done put_page on the page table:
|
||||
* once we release i_mmap_rwsem, another task can do the final put_page
|
||||
* and that page table be reused and filled with junk.
|
||||
* and that page table be reused and filled with junk. If we actually
|
||||
* did unshare a page of pmds, flush the range corresponding to the pud.
|
||||
*/
|
||||
flush_hugetlb_tlb_range(vma, start, end);
|
||||
mmu_notifier_invalidate_range(mm, start, end);
|
||||
if (shared_pmd) {
|
||||
flush_hugetlb_tlb_range(vma, f_start, f_end);
|
||||
mmu_notifier_invalidate_range(mm, f_start, f_end);
|
||||
} else {
|
||||
flush_hugetlb_tlb_range(vma, start, end);
|
||||
mmu_notifier_invalidate_range(mm, start, end);
|
||||
}
|
||||
i_mmap_unlock_write(vma->vm_file->f_mapping);
|
||||
mmu_notifier_invalidate_range_end(mm, start, end);
|
||||
mmu_notifier_invalidate_range_end(mm, f_start, f_end);
|
||||
|
||||
return pages << h->order;
|
||||
}
|
||||
|
10
mm/memory.c
10
mm/memory.c
@ -335,6 +335,16 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_
|
||||
return false;
|
||||
}
|
||||
|
||||
void tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address,
|
||||
unsigned long size)
|
||||
{
|
||||
if (tlb->page_size != 0 && tlb->page_size != PMD_SIZE)
|
||||
tlb_flush_mmu(tlb);
|
||||
|
||||
tlb->page_size = PMD_SIZE;
|
||||
tlb->start = min(tlb->start, address);
|
||||
tlb->end = max(tlb->end, address + size);
|
||||
}
|
||||
#endif /* HAVE_GENERIC_MMU_GATHER */
|
||||
|
||||
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
|
||||
|
@ -2324,7 +2324,7 @@ static int __devinet_sysctl_register(struct net *net, char *dev_name,
|
||||
free:
|
||||
kfree(t);
|
||||
out:
|
||||
return -ENOBUFS;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static void __devinet_sysctl_unregister(struct net *net,
|
||||
|
@ -340,8 +340,6 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
|
||||
return;
|
||||
|
||||
if (tcp_in_slow_start(tp)) {
|
||||
if (hystart && after(ack, ca->end_seq))
|
||||
bictcp_hystart_reset(sk);
|
||||
acked = tcp_slow_start(tp, acked);
|
||||
if (!acked)
|
||||
return;
|
||||
@ -383,6 +381,9 @@ static void hystart_update(struct sock *sk, u32 delay)
|
||||
if (ca->found & hystart_detect)
|
||||
return;
|
||||
|
||||
if (after(tp->snd_una, ca->end_seq))
|
||||
bictcp_hystart_reset(sk);
|
||||
|
||||
if (hystart_detect & HYSTART_ACK_TRAIN) {
|
||||
u32 now = bictcp_clock();
|
||||
|
||||
|
@ -175,7 +175,7 @@ static int ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *s
|
||||
#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
|
||||
/* Policy lookup after SNAT yielded a new policy */
|
||||
if (skb_dst(skb)->xfrm) {
|
||||
IPCB(skb)->flags |= IPSKB_REROUTED;
|
||||
IP6CB(skb)->flags |= IP6SKB_REROUTED;
|
||||
return dst_output(net, sk, skb);
|
||||
}
|
||||
#endif
|
||||
|
@ -1407,22 +1407,52 @@ static void mpls_dev_destroy_rcu(struct rcu_head *head)
|
||||
kfree(mdev);
|
||||
}
|
||||
|
||||
static void mpls_ifdown(struct net_device *dev, int event)
|
||||
static int mpls_ifdown(struct net_device *dev, int event)
|
||||
{
|
||||
struct mpls_route __rcu **platform_label;
|
||||
struct net *net = dev_net(dev);
|
||||
u8 alive, deleted;
|
||||
unsigned index;
|
||||
|
||||
platform_label = rtnl_dereference(net->mpls.platform_label);
|
||||
for (index = 0; index < net->mpls.platform_labels; index++) {
|
||||
struct mpls_route *rt = rtnl_dereference(platform_label[index]);
|
||||
bool nh_del = false;
|
||||
u8 alive = 0;
|
||||
|
||||
if (!rt)
|
||||
continue;
|
||||
|
||||
alive = 0;
|
||||
deleted = 0;
|
||||
if (event == NETDEV_UNREGISTER) {
|
||||
u8 deleted = 0;
|
||||
|
||||
for_nexthops(rt) {
|
||||
struct net_device *nh_dev =
|
||||
rtnl_dereference(nh->nh_dev);
|
||||
|
||||
if (!nh_dev || nh_dev == dev)
|
||||
deleted++;
|
||||
if (nh_dev == dev)
|
||||
nh_del = true;
|
||||
} endfor_nexthops(rt);
|
||||
|
||||
/* if there are no more nexthops, delete the route */
|
||||
if (deleted == rt->rt_nhn) {
|
||||
mpls_route_update(net, index, NULL, NULL);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (nh_del) {
|
||||
size_t size = sizeof(*rt) + rt->rt_nhn *
|
||||
rt->rt_nh_size;
|
||||
struct mpls_route *orig = rt;
|
||||
|
||||
rt = kmalloc(size, GFP_KERNEL);
|
||||
if (!rt)
|
||||
return -ENOMEM;
|
||||
memcpy(rt, orig, size);
|
||||
}
|
||||
}
|
||||
|
||||
change_nexthops(rt) {
|
||||
unsigned int nh_flags = nh->nh_flags;
|
||||
|
||||
@ -1446,16 +1476,15 @@ static void mpls_ifdown(struct net_device *dev, int event)
|
||||
next:
|
||||
if (!(nh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN)))
|
||||
alive++;
|
||||
if (!rtnl_dereference(nh->nh_dev))
|
||||
deleted++;
|
||||
} endfor_nexthops(rt);
|
||||
|
||||
WRITE_ONCE(rt->rt_nhn_alive, alive);
|
||||
|
||||
/* if there are no more nexthops, delete the route */
|
||||
if (event == NETDEV_UNREGISTER && deleted == rt->rt_nhn)
|
||||
mpls_route_update(net, index, NULL, NULL);
|
||||
if (nh_del)
|
||||
mpls_route_update(net, index, rt, NULL);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mpls_ifup(struct net_device *dev, unsigned int flags)
|
||||
@ -1519,8 +1548,12 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
|
||||
return NOTIFY_OK;
|
||||
|
||||
switch (event) {
|
||||
int err;
|
||||
|
||||
case NETDEV_DOWN:
|
||||
mpls_ifdown(dev, event);
|
||||
err = mpls_ifdown(dev, event);
|
||||
if (err)
|
||||
return notifier_from_errno(err);
|
||||
break;
|
||||
case NETDEV_UP:
|
||||
flags = dev_get_flags(dev);
|
||||
@ -1531,13 +1564,18 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
|
||||
break;
|
||||
case NETDEV_CHANGE:
|
||||
flags = dev_get_flags(dev);
|
||||
if (flags & (IFF_RUNNING | IFF_LOWER_UP))
|
||||
if (flags & (IFF_RUNNING | IFF_LOWER_UP)) {
|
||||
mpls_ifup(dev, RTNH_F_DEAD | RTNH_F_LINKDOWN);
|
||||
else
|
||||
mpls_ifdown(dev, event);
|
||||
} else {
|
||||
err = mpls_ifdown(dev, event);
|
||||
if (err)
|
||||
return notifier_from_errno(err);
|
||||
}
|
||||
break;
|
||||
case NETDEV_UNREGISTER:
|
||||
mpls_ifdown(dev, event);
|
||||
err = mpls_ifdown(dev, event);
|
||||
if (err)
|
||||
return notifier_from_errno(err);
|
||||
mdev = mpls_dev_get(dev);
|
||||
if (mdev) {
|
||||
mpls_dev_sysctl_unregister(dev, mdev);
|
||||
@ -1548,8 +1586,6 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
|
||||
case NETDEV_CHANGENAME:
|
||||
mdev = mpls_dev_get(dev);
|
||||
if (mdev) {
|
||||
int err;
|
||||
|
||||
mpls_dev_sysctl_unregister(dev, mdev);
|
||||
err = mpls_dev_sysctl_register(dev, mdev);
|
||||
if (err)
|
||||
|
@ -1838,7 +1838,6 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int
|
||||
struct ip_vs_proto_data *pd;
|
||||
struct ip_vs_conn *cp;
|
||||
int ret, pkts;
|
||||
int conn_reuse_mode;
|
||||
struct sock *sk;
|
||||
|
||||
/* Already marked as IPVS request or reply? */
|
||||
@ -1914,15 +1913,16 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int
|
||||
*/
|
||||
cp = pp->conn_in_get(ipvs, af, skb, &iph);
|
||||
|
||||
conn_reuse_mode = sysctl_conn_reuse_mode(ipvs);
|
||||
if (conn_reuse_mode && !iph.fragoffs && is_new_conn(skb, &iph) && cp) {
|
||||
if (!iph.fragoffs && is_new_conn(skb, &iph) && cp) {
|
||||
int conn_reuse_mode = sysctl_conn_reuse_mode(ipvs);
|
||||
bool old_ct = false, resched = false;
|
||||
|
||||
if (unlikely(sysctl_expire_nodest_conn(ipvs)) && cp->dest &&
|
||||
unlikely(!atomic_read(&cp->dest->weight))) {
|
||||
resched = true;
|
||||
old_ct = ip_vs_conn_uses_old_conntrack(cp, skb);
|
||||
} else if (is_new_conn_expected(cp, conn_reuse_mode)) {
|
||||
} else if (conn_reuse_mode &&
|
||||
is_new_conn_expected(cp, conn_reuse_mode)) {
|
||||
old_ct = ip_vs_conn_uses_old_conntrack(cp, skb);
|
||||
if (!atomic_read(&cp->n_control)) {
|
||||
resched = true;
|
||||
|
@ -485,6 +485,11 @@ static int nci_open_device(struct nci_dev *ndev)
|
||||
|
||||
mutex_lock(&ndev->req_lock);
|
||||
|
||||
if (test_bit(NCI_UNREG, &ndev->flags)) {
|
||||
rc = -ENODEV;
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (test_bit(NCI_UP, &ndev->flags)) {
|
||||
rc = -EALREADY;
|
||||
goto done;
|
||||
@ -548,6 +553,10 @@ done:
|
||||
static int nci_close_device(struct nci_dev *ndev)
|
||||
{
|
||||
nci_req_cancel(ndev, ENODEV);
|
||||
|
||||
/* This mutex needs to be held as a barrier for
|
||||
* caller nci_unregister_device
|
||||
*/
|
||||
mutex_lock(&ndev->req_lock);
|
||||
|
||||
if (!test_and_clear_bit(NCI_UP, &ndev->flags)) {
|
||||
@ -585,8 +594,8 @@ static int nci_close_device(struct nci_dev *ndev)
|
||||
/* Flush cmd wq */
|
||||
flush_workqueue(ndev->cmd_wq);
|
||||
|
||||
/* Clear flags */
|
||||
ndev->flags = 0;
|
||||
/* Clear flags except NCI_UNREG */
|
||||
ndev->flags &= BIT(NCI_UNREG);
|
||||
|
||||
mutex_unlock(&ndev->req_lock);
|
||||
|
||||
@ -1270,6 +1279,12 @@ void nci_unregister_device(struct nci_dev *ndev)
|
||||
{
|
||||
struct nci_conn_info *conn_info, *n;
|
||||
|
||||
/* This set_bit is not protected with specialized barrier,
|
||||
* However, it is fine because the mutex_lock(&ndev->req_lock);
|
||||
* in nci_close_device() will help to emit one.
|
||||
*/
|
||||
set_bit(NCI_UNREG, &ndev->flags);
|
||||
|
||||
nci_close_device(ndev);
|
||||
|
||||
destroy_workqueue(ndev->cmd_wq);
|
||||
|
@ -392,7 +392,7 @@ void rds_tcp_tune(struct socket *sock)
|
||||
sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
|
||||
}
|
||||
if (rtn->rcvbuf_size > 0) {
|
||||
sk->sk_sndbuf = rtn->rcvbuf_size;
|
||||
sk->sk_rcvbuf = rtn->rcvbuf_size;
|
||||
sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
|
||||
}
|
||||
release_sock(sk);
|
||||
|
@ -1178,8 +1178,10 @@ static unsigned int smc_poll(struct file *file, struct socket *sock,
|
||||
static int smc_shutdown(struct socket *sock, int how)
|
||||
{
|
||||
struct sock *sk = sock->sk;
|
||||
bool do_shutdown = true;
|
||||
struct smc_sock *smc;
|
||||
int rc = -EINVAL;
|
||||
int old_state;
|
||||
int rc1 = 0;
|
||||
|
||||
smc = smc_sk(sk);
|
||||
@ -1206,7 +1208,11 @@ static int smc_shutdown(struct socket *sock, int how)
|
||||
}
|
||||
switch (how) {
|
||||
case SHUT_RDWR: /* shutdown in both directions */
|
||||
old_state = sk->sk_state;
|
||||
rc = smc_close_active(smc);
|
||||
if (old_state == SMC_ACTIVE &&
|
||||
sk->sk_state == SMC_PEERCLOSEWAIT1)
|
||||
do_shutdown = false;
|
||||
break;
|
||||
case SHUT_WR:
|
||||
rc = smc_close_shutdown_write(smc);
|
||||
@ -1216,7 +1222,7 @@ static int smc_shutdown(struct socket *sock, int how)
|
||||
/* nothing more to do because peer is not involved */
|
||||
break;
|
||||
}
|
||||
if (smc->clcsock)
|
||||
if (do_shutdown && smc->clcsock)
|
||||
rc1 = kernel_sock_shutdown(smc->clcsock, how);
|
||||
/* map sock_shutdown_cmd constants to sk_shutdown value range */
|
||||
sk->sk_shutdown |= how + 1;
|
||||
|
@ -180,6 +180,7 @@ int smc_close_active(struct smc_sock *smc)
|
||||
int old_state;
|
||||
long timeout;
|
||||
int rc = 0;
|
||||
int rc1 = 0;
|
||||
|
||||
timeout = current->flags & PF_EXITING ?
|
||||
0 : sock_flag(sk, SOCK_LINGER) ?
|
||||
@ -215,6 +216,15 @@ again:
|
||||
/* send close request */
|
||||
rc = smc_close_final(conn);
|
||||
sk->sk_state = SMC_PEERCLOSEWAIT1;
|
||||
|
||||
/* actively shutdown clcsock before peer close it,
|
||||
* prevent peer from entering TIME_WAIT state.
|
||||
*/
|
||||
if (smc->clcsock && smc->clcsock->sk) {
|
||||
rc1 = kernel_sock_shutdown(smc->clcsock,
|
||||
SHUT_RDWR);
|
||||
rc = rc ? rc : rc1;
|
||||
}
|
||||
} else {
|
||||
/* peer event has changed the state */
|
||||
goto again;
|
||||
|
@ -27,16 +27,15 @@
|
||||
|
||||
#define BLANK_SLOT 4094
|
||||
|
||||
static int amixer_master(struct rsc *rsc)
|
||||
static void amixer_master(struct rsc *rsc)
|
||||
{
|
||||
rsc->conj = 0;
|
||||
return rsc->idx = container_of(rsc, struct amixer, rsc)->idx[0];
|
||||
rsc->idx = container_of(rsc, struct amixer, rsc)->idx[0];
|
||||
}
|
||||
|
||||
static int amixer_next_conj(struct rsc *rsc)
|
||||
static void amixer_next_conj(struct rsc *rsc)
|
||||
{
|
||||
rsc->conj++;
|
||||
return container_of(rsc, struct amixer, rsc)->idx[rsc->conj];
|
||||
}
|
||||
|
||||
static int amixer_index(const struct rsc *rsc)
|
||||
@ -335,16 +334,15 @@ int amixer_mgr_destroy(struct amixer_mgr *amixer_mgr)
|
||||
|
||||
/* SUM resource management */
|
||||
|
||||
static int sum_master(struct rsc *rsc)
|
||||
static void sum_master(struct rsc *rsc)
|
||||
{
|
||||
rsc->conj = 0;
|
||||
return rsc->idx = container_of(rsc, struct sum, rsc)->idx[0];
|
||||
rsc->idx = container_of(rsc, struct sum, rsc)->idx[0];
|
||||
}
|
||||
|
||||
static int sum_next_conj(struct rsc *rsc)
|
||||
static void sum_next_conj(struct rsc *rsc)
|
||||
{
|
||||
rsc->conj++;
|
||||
return container_of(rsc, struct sum, rsc)->idx[rsc->conj];
|
||||
}
|
||||
|
||||
static int sum_index(const struct rsc *rsc)
|
||||
|
@ -55,12 +55,12 @@ static struct daio_rsc_idx idx_20k2[NUM_DAIOTYP] = {
|
||||
[SPDIFIO] = {.left = 0x05, .right = 0x85},
|
||||
};
|
||||
|
||||
static int daio_master(struct rsc *rsc)
|
||||
static void daio_master(struct rsc *rsc)
|
||||
{
|
||||
/* Actually, this is not the resource index of DAIO.
|
||||
* For DAO, it is the input mapper index. And, for DAI,
|
||||
* it is the output time-slot index. */
|
||||
return rsc->conj = rsc->idx;
|
||||
rsc->conj = rsc->idx;
|
||||
}
|
||||
|
||||
static int daio_index(const struct rsc *rsc)
|
||||
@ -68,19 +68,19 @@ static int daio_index(const struct rsc *rsc)
|
||||
return rsc->conj;
|
||||
}
|
||||
|
||||
static int daio_out_next_conj(struct rsc *rsc)
|
||||
static void daio_out_next_conj(struct rsc *rsc)
|
||||
{
|
||||
return rsc->conj += 2;
|
||||
rsc->conj += 2;
|
||||
}
|
||||
|
||||
static int daio_in_next_conj_20k1(struct rsc *rsc)
|
||||
static void daio_in_next_conj_20k1(struct rsc *rsc)
|
||||
{
|
||||
return rsc->conj += 0x200;
|
||||
rsc->conj += 0x200;
|
||||
}
|
||||
|
||||
static int daio_in_next_conj_20k2(struct rsc *rsc)
|
||||
static void daio_in_next_conj_20k2(struct rsc *rsc)
|
||||
{
|
||||
return rsc->conj += 0x100;
|
||||
rsc->conj += 0x100;
|
||||
}
|
||||
|
||||
static const struct rsc_ops daio_out_rsc_ops = {
|
||||
|
@ -113,18 +113,17 @@ static int audio_ring_slot(const struct rsc *rsc)
|
||||
return (rsc->conj << 4) + offset_in_audio_slot_block[rsc->type];
|
||||
}
|
||||
|
||||
static int rsc_next_conj(struct rsc *rsc)
|
||||
static void rsc_next_conj(struct rsc *rsc)
|
||||
{
|
||||
unsigned int i;
|
||||
for (i = 0; (i < 8) && (!(rsc->msr & (0x1 << i))); )
|
||||
i++;
|
||||
rsc->conj += (AUDIO_SLOT_BLOCK_NUM >> i);
|
||||
return rsc->conj;
|
||||
}
|
||||
|
||||
static int rsc_master(struct rsc *rsc)
|
||||
static void rsc_master(struct rsc *rsc)
|
||||
{
|
||||
return rsc->conj = rsc->idx;
|
||||
rsc->conj = rsc->idx;
|
||||
}
|
||||
|
||||
static const struct rsc_ops rsc_generic_ops = {
|
||||
|
@ -43,8 +43,8 @@ struct rsc {
|
||||
};
|
||||
|
||||
struct rsc_ops {
|
||||
int (*master)(struct rsc *rsc); /* Move to master resource */
|
||||
int (*next_conj)(struct rsc *rsc); /* Move to next conjugate resource */
|
||||
void (*master)(struct rsc *rsc); /* Move to master resource */
|
||||
void (*next_conj)(struct rsc *rsc); /* Move to next conjugate resource */
|
||||
int (*index)(const struct rsc *rsc); /* Return the index of resource */
|
||||
/* Return the output slot number */
|
||||
int (*output_slot)(const struct rsc *rsc);
|
||||
|
@ -594,16 +594,15 @@ int src_mgr_destroy(struct src_mgr *src_mgr)
|
||||
|
||||
/* SRCIMP resource manager operations */
|
||||
|
||||
static int srcimp_master(struct rsc *rsc)
|
||||
static void srcimp_master(struct rsc *rsc)
|
||||
{
|
||||
rsc->conj = 0;
|
||||
return rsc->idx = container_of(rsc, struct srcimp, rsc)->idx[0];
|
||||
rsc->idx = container_of(rsc, struct srcimp, rsc)->idx[0];
|
||||
}
|
||||
|
||||
static int srcimp_next_conj(struct rsc *rsc)
|
||||
static void srcimp_next_conj(struct rsc *rsc)
|
||||
{
|
||||
rsc->conj++;
|
||||
return container_of(rsc, struct srcimp, rsc)->idx[rsc->conj];
|
||||
}
|
||||
|
||||
static int srcimp_index(const struct rsc *rsc)
|
||||
|
@ -2585,6 +2585,7 @@ EXPORT_SYMBOL_GPL(snd_soc_tplg_widget_remove_all);
|
||||
/* remove dynamic controls from the component driver */
|
||||
int snd_soc_tplg_component_remove(struct snd_soc_component *comp, u32 index)
|
||||
{
|
||||
struct snd_card *card = comp->card->snd_card;
|
||||
struct snd_soc_dobj *dobj, *next_dobj;
|
||||
int pass = SOC_TPLG_PASS_END;
|
||||
|
||||
@ -2592,6 +2593,7 @@ int snd_soc_tplg_component_remove(struct snd_soc_component *comp, u32 index)
|
||||
while (pass >= SOC_TPLG_PASS_START) {
|
||||
|
||||
/* remove mixer controls */
|
||||
down_write(&card->controls_rwsem);
|
||||
list_for_each_entry_safe(dobj, next_dobj, &comp->dobj_list,
|
||||
list) {
|
||||
|
||||
@ -2625,6 +2627,7 @@ int snd_soc_tplg_component_remove(struct snd_soc_component *comp, u32 index)
|
||||
break;
|
||||
}
|
||||
}
|
||||
up_write(&card->controls_rwsem);
|
||||
pass--;
|
||||
}
|
||||
|
||||
|
@ -468,6 +468,18 @@ struct perf_hpp_list perf_hpp_list = {
|
||||
#undef __HPP_SORT_ACC_FN
|
||||
#undef __HPP_SORT_RAW_FN
|
||||
|
||||
static void fmt_free(struct perf_hpp_fmt *fmt)
|
||||
{
|
||||
/*
|
||||
* At this point fmt should be completely
|
||||
* unhooked, if not it's a bug.
|
||||
*/
|
||||
BUG_ON(!list_empty(&fmt->list));
|
||||
BUG_ON(!list_empty(&fmt->sort_list));
|
||||
|
||||
if (fmt->free)
|
||||
fmt->free(fmt);
|
||||
}
|
||||
|
||||
void perf_hpp__init(void)
|
||||
{
|
||||
@ -531,9 +543,10 @@ void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list,
|
||||
list_add(&format->sort_list, &list->sorts);
|
||||
}
|
||||
|
||||
void perf_hpp__column_unregister(struct perf_hpp_fmt *format)
|
||||
static void perf_hpp__column_unregister(struct perf_hpp_fmt *format)
|
||||
{
|
||||
list_del_init(&format->list);
|
||||
fmt_free(format);
|
||||
}
|
||||
|
||||
void perf_hpp__cancel_cumulate(void)
|
||||
@ -605,19 +618,6 @@ next:
|
||||
}
|
||||
|
||||
|
||||
static void fmt_free(struct perf_hpp_fmt *fmt)
|
||||
{
|
||||
/*
|
||||
* At this point fmt should be completely
|
||||
* unhooked, if not it's a bug.
|
||||
*/
|
||||
BUG_ON(!list_empty(&fmt->list));
|
||||
BUG_ON(!list_empty(&fmt->sort_list));
|
||||
|
||||
if (fmt->free)
|
||||
fmt->free(fmt);
|
||||
}
|
||||
|
||||
void perf_hpp__reset_output_field(struct perf_hpp_list *list)
|
||||
{
|
||||
struct perf_hpp_fmt *fmt, *tmp;
|
||||
|
@ -339,7 +339,6 @@ enum {
|
||||
};
|
||||
|
||||
void perf_hpp__init(void);
|
||||
void perf_hpp__column_unregister(struct perf_hpp_fmt *format);
|
||||
void perf_hpp__cancel_cumulate(void);
|
||||
void perf_hpp__setup_output_field(struct perf_hpp_list *list);
|
||||
void perf_hpp__reset_output_field(struct perf_hpp_list *list);
|
||||
|
Loading…
x
Reference in New Issue
Block a user