mirror of
https://github.com/rd-stuffs/msm-4.14.git
synced 2025-02-20 11:45:48 +08:00
Merge android-4.14.125 (dc5b587) into msm-4.14
* refs/heads/tmp-dc5b587: Linux 4.14.125 ethtool: check the return value of get_regs_len ipv4: Define __ipv4_neigh_lookup_noref when CONFIG_INET is disabled fuse: Add FOPEN_STREAM to use stream_open() fs: stream_open - opener for stream-like files so that read and write can run simultaneously without deadlock qmi_wwan: Add quirk for Quectel dynamic config TTY: serial_core, add ->install drm/i915/fbc: disable framebuffer compression on GeminiLake drm/i915: Fix I915_EXEC_RING_MASK drm/radeon: prefer lower reference dividers drm/amdgpu/psp: move psp version specific function pointers to early_init drm/nouveau: add kconfig option to turn off nouveau legacy contexts. (v3) drm/gma500/cdv: Check vbt config bits when detecting lvds panels test_firmware: Use correct snprintf() limit genwqe: Prevent an integer overflow in the ioctl Revert "MIPS: perf: ath79: Fix perfcount IRQ assignment" MIPS: pistachio: Build uImage.gz by default MIPS: Bounds check virt_addr_valid i2c: xiic: Add max_read_len quirk x86/power: Fix 'nosmt' vs hibernation triple fault during resume pstore/ram: Run without kernel crash dump region pstore: Convert buf_lock to semaphore pstore: Remove needless lock during console writes fuse: fallocate: fix return with locked inode parisc: Use implicit space register selection for loading the coherence index of I/O pdirs rcu: locking and unlocking need to always be at least barriers Revert "fib_rules: return 0 directly if an exactly same rule exists when NLM_F_EXCL not supplied" Revert "fib_rules: fix error in backport of e9919a24d302 ("fib_rules: return 0...")" ipv6: fix the check before getting the cookie in rt6_get_cookie net: sfp: read eeprom in maximum 16 byte increments ipv6: use READ_ONCE() for inet->hdrincl as in ipv4 ipv6: fix EFAULT on sendto with icmpv6 and hdrincl pktgen: do not sleep with the thread lock held. net: rds: fix memory leak in rds_ib_flush_mr_pool net/mlx4_en: ethtool, Remove unsupported SFP EEPROM high pages query neighbor: Call __ipv4_neigh_lookup_noref in neigh_xmit Fix memory leak in sctp_process_init ethtool: fix potential userspace buffer overflow Change-Id: Ice7fba2663c02167db026bf7b9c8f466a158f6d5 Signed-off-by: Blagovest Kolenichev <bkolenichev@codeaurora.org>
This commit is contained in:
commit
7c63330d05
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 14
|
||||
SUBLEVEL = 124
|
||||
SUBLEVEL = 125
|
||||
EXTRAVERSION =
|
||||
NAME = Petit Gorille
|
||||
|
||||
|
@ -183,6 +183,12 @@ const char *get_system_type(void)
|
||||
return ath79_sys_type;
|
||||
}
|
||||
|
||||
int get_c0_perfcount_int(void)
|
||||
{
|
||||
return ATH79_MISC_IRQ(5);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
|
||||
|
||||
unsigned int get_c0_compare_int(void)
|
||||
{
|
||||
return CP0_LEGACY_COMPARE_IRQ;
|
||||
|
@ -203,6 +203,11 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
|
||||
|
||||
int __virt_addr_valid(const volatile void *kaddr)
|
||||
{
|
||||
unsigned long vaddr = (unsigned long)vaddr;
|
||||
|
||||
if ((vaddr < PAGE_OFFSET) || (vaddr >= MAP_BASE))
|
||||
return 0;
|
||||
|
||||
return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__virt_addr_valid);
|
||||
|
@ -6,3 +6,4 @@ cflags-$(CONFIG_MACH_PISTACHIO) += \
|
||||
-I$(srctree)/arch/mips/include/asm/mach-pistachio
|
||||
load-$(CONFIG_MACH_PISTACHIO) += 0xffffffff80400000
|
||||
zload-$(CONFIG_MACH_PISTACHIO) += 0xffffffff81000000
|
||||
all-$(CONFIG_MACH_PISTACHIO) := uImage.gz
|
||||
|
@ -566,8 +566,6 @@ static int nvram_pstore_init(void)
|
||||
nvram_pstore_info.buf = oops_data;
|
||||
nvram_pstore_info.bufsize = oops_data_sz;
|
||||
|
||||
spin_lock_init(&nvram_pstore_info.buf_lock);
|
||||
|
||||
rc = pstore_register(&nvram_pstore_info);
|
||||
if (rc && (rc != -EPERM))
|
||||
/* Print error only when pstore.backend == nvram */
|
||||
|
@ -299,7 +299,17 @@ int hibernate_resume_nonboot_cpu_disable(void)
|
||||
* address in its instruction pointer may not be possible to resolve
|
||||
* any more at that point (the page tables used by it previously may
|
||||
* have been overwritten by hibernate image data).
|
||||
*
|
||||
* First, make sure that we wake up all the potentially disabled SMT
|
||||
* threads which have been initially brought up and then put into
|
||||
* mwait/cpuidle sleep.
|
||||
* Those will be put to proper (not interfering with hibernation
|
||||
* resume) sleep afterwards, and the resumed kernel will decide itself
|
||||
* what to do with them.
|
||||
*/
|
||||
ret = cpuhp_smt_enable();
|
||||
if (ret)
|
||||
return ret;
|
||||
smp_ops.play_dead = resume_play_dead;
|
||||
ret = disable_nonboot_cpus();
|
||||
smp_ops.play_dead = play_dead;
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/kdebug.h>
|
||||
#include <linux/cpu.h>
|
||||
|
||||
#include <crypto/hash.h>
|
||||
|
||||
@ -347,3 +348,35 @@ int arch_hibernation_header_restore(void *addr)
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int arch_resume_nosmt(void)
|
||||
{
|
||||
int ret = 0;
|
||||
/*
|
||||
* We reached this while coming out of hibernation. This means
|
||||
* that SMT siblings are sleeping in hlt, as mwait is not safe
|
||||
* against control transition during resume (see comment in
|
||||
* hibernate_resume_nonboot_cpu_disable()).
|
||||
*
|
||||
* If the resumed kernel has SMT disabled, we have to take all the
|
||||
* SMT siblings out of hlt, and offline them again so that they
|
||||
* end up in mwait proper.
|
||||
*
|
||||
* Called with hotplug disabled.
|
||||
*/
|
||||
cpu_hotplug_enable();
|
||||
if (cpu_smt_control == CPU_SMT_DISABLED ||
|
||||
cpu_smt_control == CPU_SMT_FORCE_DISABLED) {
|
||||
enum cpuhp_smt_control old = cpu_smt_control;
|
||||
|
||||
ret = cpuhp_smt_enable();
|
||||
if (ret)
|
||||
goto out;
|
||||
ret = cpuhp_smt_disable(old);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
out:
|
||||
cpu_hotplug_disable();
|
||||
return ret;
|
||||
}
|
||||
|
@ -1175,7 +1175,6 @@ static int __init erst_init(void)
|
||||
"Error Record Serialization Table (ERST) support is initialized.\n");
|
||||
|
||||
buf = kmalloc(erst_erange.size, GFP_KERNEL);
|
||||
spin_lock_init(&erst_info.buf_lock);
|
||||
if (buf) {
|
||||
erst_info.buf = buf + sizeof(struct cper_pstore_record);
|
||||
erst_info.bufsize = erst_erange.size -
|
||||
|
@ -258,8 +258,7 @@ static int efi_pstore_write(struct pstore_record *record)
|
||||
efi_name[i] = name[i];
|
||||
|
||||
ret = efivar_entry_set_safe(efi_name, vendor, PSTORE_EFI_ATTRIBUTES,
|
||||
!pstore_cannot_block_path(record->reason),
|
||||
record->size, record->psi->buf);
|
||||
preemptible(), record->size, record->psi->buf);
|
||||
|
||||
if (record->reason == KMSG_DUMP_OOPS)
|
||||
efivar_run_worker();
|
||||
@ -368,7 +367,6 @@ static __init int efivars_pstore_init(void)
|
||||
return -ENOMEM;
|
||||
|
||||
efi_pstore_info.bufsize = 1024;
|
||||
spin_lock_init(&efi_pstore_info.buf_lock);
|
||||
|
||||
if (pstore_register(&efi_pstore_info)) {
|
||||
kfree(efi_pstore_info.buf);
|
||||
|
@ -37,18 +37,10 @@ static void psp_set_funcs(struct amdgpu_device *adev);
|
||||
static int psp_early_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct psp_context *psp = &adev->psp;
|
||||
|
||||
psp_set_funcs(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int psp_sw_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct psp_context *psp = &adev->psp;
|
||||
int ret;
|
||||
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_VEGA10:
|
||||
psp->init_microcode = psp_v3_1_init_microcode;
|
||||
@ -79,6 +71,15 @@ static int psp_sw_init(void *handle)
|
||||
|
||||
psp->adev = adev;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int psp_sw_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
struct psp_context *psp = &adev->psp;
|
||||
int ret;
|
||||
|
||||
ret = psp_init_microcode(psp);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to load psp firmware!\n");
|
||||
|
@ -594,6 +594,9 @@ void cdv_intel_lvds_init(struct drm_device *dev,
|
||||
int pipe;
|
||||
u8 pin;
|
||||
|
||||
if (!dev_priv->lvds_enabled_in_vbt)
|
||||
return;
|
||||
|
||||
pin = GMBUS_PORT_PANEL;
|
||||
if (!lvds_is_present_in_vbt(dev, &pin)) {
|
||||
DRM_DEBUG_KMS("LVDS is not present in VBT\n");
|
||||
|
@ -436,6 +436,9 @@ parse_driver_features(struct drm_psb_private *dev_priv,
|
||||
if (driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
|
||||
dev_priv->edp.support = 1;
|
||||
|
||||
dev_priv->lvds_enabled_in_vbt = driver->lvds_config != 0;
|
||||
DRM_DEBUG_KMS("LVDS VBT config bits: 0x%x\n", driver->lvds_config);
|
||||
|
||||
/* This bit means to use 96Mhz for DPLL_A or not */
|
||||
if (driver->primary_lfp_id)
|
||||
dev_priv->dplla_96mhz = true;
|
||||
|
@ -538,6 +538,7 @@ struct drm_psb_private {
|
||||
int lvds_ssc_freq;
|
||||
bool is_lvds_on;
|
||||
bool is_mipi_on;
|
||||
bool lvds_enabled_in_vbt;
|
||||
u32 mipi_ctrl_display;
|
||||
|
||||
unsigned int core_freq;
|
||||
|
@ -1299,6 +1299,10 @@ static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
|
||||
if (!HAS_FBC(dev_priv))
|
||||
return 0;
|
||||
|
||||
/* https://bugs.freedesktop.org/show_bug.cgi?id=108085 */
|
||||
if (IS_GEMINILAKE(dev_priv))
|
||||
return 0;
|
||||
|
||||
if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9)
|
||||
return 1;
|
||||
|
||||
|
@ -16,10 +16,21 @@ config DRM_NOUVEAU
|
||||
select INPUT if ACPI && X86
|
||||
select THERMAL if ACPI && X86
|
||||
select ACPI_VIDEO if ACPI && X86
|
||||
select DRM_VM
|
||||
help
|
||||
Choose this option for open-source NVIDIA support.
|
||||
|
||||
config NOUVEAU_LEGACY_CTX_SUPPORT
|
||||
bool "Nouveau legacy context support"
|
||||
depends on DRM_NOUVEAU
|
||||
select DRM_VM
|
||||
default y
|
||||
help
|
||||
There was a version of the nouveau DDX that relied on legacy
|
||||
ctx ioctls not erroring out. But that was back in time a long
|
||||
ways, so offer a way to disable it now. For uapi compat with
|
||||
old nouveau ddx this should be on by default, but modern distros
|
||||
should consider turning it off.
|
||||
|
||||
config NOUVEAU_PLATFORM_DRIVER
|
||||
bool "Nouveau (NVIDIA) SoC GPUs"
|
||||
depends on DRM_NOUVEAU && ARCH_TEGRA
|
||||
|
@ -967,8 +967,11 @@ nouveau_driver_fops = {
|
||||
static struct drm_driver
|
||||
driver_stub = {
|
||||
.driver_features =
|
||||
DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER |
|
||||
DRIVER_KMS_LEGACY_CONTEXT,
|
||||
DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER
|
||||
#if defined(CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT)
|
||||
| DRIVER_KMS_LEGACY_CONTEXT
|
||||
#endif
|
||||
,
|
||||
|
||||
.load = nouveau_drm_load,
|
||||
.unload = nouveau_drm_unload,
|
||||
|
@ -923,12 +923,12 @@ static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
|
||||
ref_div_max = max(min(100 / post_div, ref_div_max), 1u);
|
||||
|
||||
/* get matching reference and feedback divider */
|
||||
*ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max);
|
||||
*ref_div = min(max(den/post_div, 1u), ref_div_max);
|
||||
*fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den);
|
||||
|
||||
/* limit fb divider to its maximum */
|
||||
if (*fb_div > fb_div_max) {
|
||||
*ref_div = DIV_ROUND_CLOSEST(*ref_div * fb_div_max, *fb_div);
|
||||
*ref_div = (*ref_div * fb_div_max)/(*fb_div);
|
||||
*fb_div = fb_div_max;
|
||||
}
|
||||
}
|
||||
|
@ -725,11 +725,16 @@ static const struct i2c_algorithm xiic_algorithm = {
|
||||
.functionality = xiic_func,
|
||||
};
|
||||
|
||||
static const struct i2c_adapter_quirks xiic_quirks = {
|
||||
.max_read_len = 255,
|
||||
};
|
||||
|
||||
static const struct i2c_adapter xiic_adapter = {
|
||||
.owner = THIS_MODULE,
|
||||
.name = DRIVER_NAME,
|
||||
.class = I2C_CLASS_DEPRECATED,
|
||||
.algo = &xiic_algorithm,
|
||||
.quirks = &xiic_quirks,
|
||||
};
|
||||
|
||||
|
||||
|
@ -22,15 +22,6 @@
|
||||
#define AR71XX_RESET_REG_MISC_INT_ENABLE 4
|
||||
|
||||
#define ATH79_MISC_IRQ_COUNT 32
|
||||
#define ATH79_MISC_PERF_IRQ 5
|
||||
|
||||
static int ath79_perfcount_irq;
|
||||
|
||||
int get_c0_perfcount_int(void)
|
||||
{
|
||||
return ath79_perfcount_irq;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
|
||||
|
||||
static void ath79_misc_irq_handler(struct irq_desc *desc)
|
||||
{
|
||||
@ -122,8 +113,6 @@ static void __init ath79_misc_intc_domain_init(
|
||||
{
|
||||
void __iomem *base = domain->host_data;
|
||||
|
||||
ath79_perfcount_irq = irq_create_mapping(domain, ATH79_MISC_PERF_IRQ);
|
||||
|
||||
/* Disable and clear all interrupts */
|
||||
__raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_ENABLE);
|
||||
__raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_STATUS);
|
||||
|
@ -782,6 +782,8 @@ static int genwqe_pin_mem(struct genwqe_file *cfile, struct genwqe_mem *m)
|
||||
|
||||
if ((m->addr == 0x0) || (m->size == 0))
|
||||
return -EINVAL;
|
||||
if (m->size > ULONG_MAX - PAGE_SIZE - (m->addr & ~PAGE_MASK))
|
||||
return -EINVAL;
|
||||
|
||||
map_addr = (m->addr & PAGE_MASK);
|
||||
map_size = round_up(m->size + (m->addr & ~PAGE_MASK), PAGE_SIZE);
|
||||
|
@ -582,6 +582,10 @@ int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr,
|
||||
/* determine space needed for page_list. */
|
||||
data = (unsigned long)uaddr;
|
||||
offs = offset_in_page(data);
|
||||
if (size > ULONG_MAX - PAGE_SIZE - offs) {
|
||||
m->size = 0; /* mark unused and not added */
|
||||
return -EINVAL;
|
||||
}
|
||||
m->nr_pages = DIV_ROUND_UP(offs + size, PAGE_SIZE);
|
||||
|
||||
m->page_list = kcalloc(m->nr_pages,
|
||||
|
@ -1982,6 +1982,8 @@ static int mlx4_en_set_tunable(struct net_device *dev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define MLX4_EEPROM_PAGE_LEN 256
|
||||
|
||||
static int mlx4_en_get_module_info(struct net_device *dev,
|
||||
struct ethtool_modinfo *modinfo)
|
||||
{
|
||||
@ -2016,7 +2018,7 @@ static int mlx4_en_get_module_info(struct net_device *dev,
|
||||
break;
|
||||
case MLX4_MODULE_ID_SFP:
|
||||
modinfo->type = ETH_MODULE_SFF_8472;
|
||||
modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
|
||||
modinfo->eeprom_len = MLX4_EEPROM_PAGE_LEN;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
|
@ -2077,11 +2077,6 @@ int mlx4_get_module_info(struct mlx4_dev *dev, u8 port,
|
||||
size -= offset + size - I2C_PAGE_SIZE;
|
||||
|
||||
i2c_addr = I2C_ADDR_LOW;
|
||||
if (offset >= I2C_PAGE_SIZE) {
|
||||
/* Reset offset to high page */
|
||||
i2c_addr = I2C_ADDR_HIGH;
|
||||
offset -= I2C_PAGE_SIZE;
|
||||
}
|
||||
|
||||
cable_info = (struct mlx4_cable_info *)inmad->data;
|
||||
cable_info->dev_mem_address = cpu_to_be16(offset);
|
||||
|
@ -168,6 +168,7 @@ static int sfp__i2c_read(struct i2c_adapter *i2c, u8 bus_addr, u8 dev_addr,
|
||||
void *buf, size_t len)
|
||||
{
|
||||
struct i2c_msg msgs[2];
|
||||
size_t this_len;
|
||||
int ret;
|
||||
|
||||
msgs[0].addr = bus_addr;
|
||||
@ -179,11 +180,26 @@ static int sfp__i2c_read(struct i2c_adapter *i2c, u8 bus_addr, u8 dev_addr,
|
||||
msgs[1].len = len;
|
||||
msgs[1].buf = buf;
|
||||
|
||||
ret = i2c_transfer(i2c, msgs, ARRAY_SIZE(msgs));
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
while (len) {
|
||||
this_len = len;
|
||||
if (this_len > 16)
|
||||
this_len = 16;
|
||||
|
||||
return ret == ARRAY_SIZE(msgs) ? len : 0;
|
||||
msgs[1].len = this_len;
|
||||
|
||||
ret = i2c_transfer(i2c, msgs, ARRAY_SIZE(msgs));
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (ret != ARRAY_SIZE(msgs))
|
||||
break;
|
||||
|
||||
msgs[1].buf += this_len;
|
||||
dev_addr += this_len;
|
||||
len -= this_len;
|
||||
}
|
||||
|
||||
return msgs[1].buf - (u8 *)buf;
|
||||
}
|
||||
|
||||
static int sfp_i2c_read(struct sfp *sfp, bool a2, u8 addr, void *buf,
|
||||
|
@ -63,6 +63,7 @@ enum qmi_wwan_flags {
|
||||
|
||||
enum qmi_wwan_quirks {
|
||||
QMI_WWAN_QUIRK_DTR = 1 << 0, /* needs "set DTR" request */
|
||||
QMI_WWAN_QUIRK_QUECTEL_DYNCFG = 1 << 1, /* check num. endpoints */
|
||||
};
|
||||
|
||||
struct qmimux_hdr {
|
||||
@ -845,6 +846,16 @@ static const struct driver_info qmi_wwan_info_quirk_dtr = {
|
||||
.data = QMI_WWAN_QUIRK_DTR,
|
||||
};
|
||||
|
||||
static const struct driver_info qmi_wwan_info_quirk_quectel_dyncfg = {
|
||||
.description = "WWAN/QMI device",
|
||||
.flags = FLAG_WWAN | FLAG_SEND_ZLP,
|
||||
.bind = qmi_wwan_bind,
|
||||
.unbind = qmi_wwan_unbind,
|
||||
.manage_power = qmi_wwan_manage_power,
|
||||
.rx_fixup = qmi_wwan_rx_fixup,
|
||||
.data = QMI_WWAN_QUIRK_DTR | QMI_WWAN_QUIRK_QUECTEL_DYNCFG,
|
||||
};
|
||||
|
||||
#define HUAWEI_VENDOR_ID 0x12D1
|
||||
|
||||
/* map QMI/wwan function by a fixed interface number */
|
||||
@ -865,6 +876,15 @@ static const struct driver_info qmi_wwan_info_quirk_dtr = {
|
||||
#define QMI_GOBI_DEVICE(vend, prod) \
|
||||
QMI_FIXED_INTF(vend, prod, 0)
|
||||
|
||||
/* Quectel does not use fixed interface numbers on at least some of their
|
||||
* devices. We need to check the number of endpoints to ensure that we bind to
|
||||
* the correct interface.
|
||||
*/
|
||||
#define QMI_QUIRK_QUECTEL_DYNCFG(vend, prod) \
|
||||
USB_DEVICE_AND_INTERFACE_INFO(vend, prod, USB_CLASS_VENDOR_SPEC, \
|
||||
USB_SUBCLASS_VENDOR_SPEC, 0xff), \
|
||||
.driver_info = (unsigned long)&qmi_wwan_info_quirk_quectel_dyncfg
|
||||
|
||||
static const struct usb_device_id products[] = {
|
||||
/* 1. CDC ECM like devices match on the control interface */
|
||||
{ /* Huawei E392, E398 and possibly others sharing both device id and more... */
|
||||
@ -969,6 +989,9 @@ static const struct usb_device_id products[] = {
|
||||
USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7),
|
||||
.driver_info = (unsigned long)&qmi_wwan_info,
|
||||
},
|
||||
{QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0125)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
|
||||
{QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0306)}, /* Quectel EP06/EG06/EM06 */
|
||||
{QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */
|
||||
|
||||
/* 3. Combined interface devices matching on interface number */
|
||||
{QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
|
||||
@ -1258,11 +1281,9 @@ static const struct usb_device_id products[] = {
|
||||
{QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */
|
||||
{QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
|
||||
{QMI_QUIRK_SET_DTR(0x1e0e, 0x9001, 5)}, /* SIMCom 7100E, 7230E, 7600E ++ */
|
||||
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
|
||||
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
|
||||
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */
|
||||
{QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */
|
||||
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0306, 4)}, /* Quectel EP06 Mini PCIe */
|
||||
{QMI_QUIRK_SET_DTR(0x2cb7, 0x0104, 4)}, /* Fibocom NL678 series */
|
||||
|
||||
/* 4. Gobi 1000 devices */
|
||||
@ -1344,6 +1365,7 @@ static int qmi_wwan_probe(struct usb_interface *intf,
|
||||
{
|
||||
struct usb_device_id *id = (struct usb_device_id *)prod;
|
||||
struct usb_interface_descriptor *desc = &intf->cur_altsetting->desc;
|
||||
const struct driver_info *info;
|
||||
|
||||
/* Workaround to enable dynamic IDs. This disables usbnet
|
||||
* blacklisting functionality. Which, if required, can be
|
||||
@ -1373,6 +1395,19 @@ static int qmi_wwan_probe(struct usb_interface *intf,
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
info = (void *)&id->driver_info;
|
||||
|
||||
/* Several Quectel modems supports dynamic interface configuration, so
|
||||
* we need to match on class/subclass/protocol. These values are
|
||||
* identical for the diagnostic- and QMI-interface, but bNumEndpoints is
|
||||
* different. Ignore the current interface if the number of endpoints
|
||||
* equals the number for the diag interface (two).
|
||||
*/
|
||||
if (info->data & QMI_WWAN_QUIRK_QUECTEL_DYNCFG) {
|
||||
if (desc->bNumEndpoints == 2)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
return usbnet_probe(intf, id);
|
||||
}
|
||||
|
||||
|
@ -565,8 +565,6 @@ ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
|
||||
/* We currently only support kernel addresses */
|
||||
BUG_ON(sid != KERNEL_SPACE);
|
||||
|
||||
mtsp(sid,1);
|
||||
|
||||
/*
|
||||
** WORD 1 - low order word
|
||||
** "hints" parm includes the VALID bit!
|
||||
@ -597,7 +595,7 @@ ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
|
||||
** Grab virtual index [0:11]
|
||||
** Deposit virt_idx bits into I/O PDIR word
|
||||
*/
|
||||
asm volatile ("lci %%r0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba));
|
||||
asm volatile ("lci %%r0(%1), %0" : "=r" (ci) : "r" (vba));
|
||||
asm volatile ("extru %1,19,12,%0" : "+r" (ci) : "r" (ci));
|
||||
asm volatile ("depw %1,15,12,%0" : "+r" (pa) : "r" (ci));
|
||||
|
||||
|
@ -575,8 +575,7 @@ sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
|
||||
pa = virt_to_phys(vba);
|
||||
pa &= IOVP_MASK;
|
||||
|
||||
mtsp(sid,1);
|
||||
asm("lci 0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba));
|
||||
asm("lci 0(%1), %0" : "=r" (ci) : "r" (vba));
|
||||
pa |= (ci >> PAGE_SHIFT) & 0xff; /* move CI (8 bits) into lowest byte */
|
||||
|
||||
pa |= SBA_PDIR_VALID_BIT; /* set "valid" bit */
|
||||
|
@ -146,9 +146,6 @@ static void uart_start(struct tty_struct *tty)
|
||||
struct uart_port *port;
|
||||
unsigned long flags;
|
||||
|
||||
if (!state)
|
||||
return;
|
||||
|
||||
port = uart_port_lock(state, flags);
|
||||
__uart_start(tty);
|
||||
uart_port_unlock(port, flags);
|
||||
@ -1734,11 +1731,8 @@ static void uart_dtr_rts(struct tty_port *port, int raise)
|
||||
*/
|
||||
static int uart_open(struct tty_struct *tty, struct file *filp)
|
||||
{
|
||||
struct uart_driver *drv = tty->driver->driver_state;
|
||||
int retval, line = tty->index;
|
||||
struct uart_state *state = drv->state + line;
|
||||
|
||||
tty->driver_data = state;
|
||||
struct uart_state *state = tty->driver_data;
|
||||
int retval;
|
||||
|
||||
retval = tty_port_open(&state->port, tty, filp);
|
||||
if (retval > 0)
|
||||
@ -2421,9 +2415,6 @@ static void uart_poll_put_char(struct tty_driver *driver, int line, char ch)
|
||||
struct uart_state *state = drv->state + line;
|
||||
struct uart_port *port;
|
||||
|
||||
if (!state)
|
||||
return;
|
||||
|
||||
port = uart_port_ref(state);
|
||||
if (!port)
|
||||
return;
|
||||
@ -2435,7 +2426,18 @@ static void uart_poll_put_char(struct tty_driver *driver, int line, char ch)
|
||||
}
|
||||
#endif
|
||||
|
||||
static int uart_install(struct tty_driver *driver, struct tty_struct *tty)
|
||||
{
|
||||
struct uart_driver *drv = driver->driver_state;
|
||||
struct uart_state *state = drv->state + tty->index;
|
||||
|
||||
tty->driver_data = state;
|
||||
|
||||
return tty_standard_install(driver, tty);
|
||||
}
|
||||
|
||||
static const struct tty_operations uart_ops = {
|
||||
.install = uart_install,
|
||||
.open = uart_open,
|
||||
.close = uart_close,
|
||||
.write = uart_write,
|
||||
|
@ -614,9 +614,7 @@ static int xenbus_file_open(struct inode *inode, struct file *filp)
|
||||
if (xen_store_evtchn == 0)
|
||||
return -ENOENT;
|
||||
|
||||
nonseekable_open(inode, filp);
|
||||
|
||||
filp->f_mode &= ~FMODE_ATOMIC_POS; /* cdev-style semantics */
|
||||
stream_open(inode, filp);
|
||||
|
||||
u = kzalloc(sizeof(*u), GFP_KERNEL);
|
||||
if (u == NULL)
|
||||
|
@ -178,7 +178,9 @@ void fuse_finish_open(struct inode *inode, struct file *file)
|
||||
file->f_op = &fuse_direct_io_file_operations;
|
||||
if (!(ff->open_flags & FOPEN_KEEP_CACHE))
|
||||
invalidate_inode_pages2(inode->i_mapping);
|
||||
if (ff->open_flags & FOPEN_NONSEEKABLE)
|
||||
if (ff->open_flags & FOPEN_STREAM)
|
||||
stream_open(inode, file);
|
||||
else if (ff->open_flags & FOPEN_NONSEEKABLE)
|
||||
nonseekable_open(inode, file);
|
||||
if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) {
|
||||
struct fuse_inode *fi = get_fuse_inode(inode);
|
||||
@ -2978,7 +2980,7 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
|
||||
offset + length > i_size_read(inode)) {
|
||||
err = inode_newsize_ok(inode, offset + length);
|
||||
if (err)
|
||||
return err;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!(mode & FALLOC_FL_KEEP_SIZE))
|
||||
|
18
fs/open.c
18
fs/open.c
@ -1226,3 +1226,21 @@ int nonseekable_open(struct inode *inode, struct file *filp)
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(nonseekable_open);
|
||||
|
||||
/*
|
||||
* stream_open is used by subsystems that want stream-like file descriptors.
|
||||
* Such file descriptors are not seekable and don't have notion of position
|
||||
* (file.f_pos is always 0). Contrary to file descriptors of other regular
|
||||
* files, .read() and .write() can run simultaneously.
|
||||
*
|
||||
* stream_open never fails and is marked to return int so that it could be
|
||||
* directly used as file_operations.open .
|
||||
*/
|
||||
int stream_open(struct inode *inode, struct file *filp)
|
||||
{
|
||||
filp->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE | FMODE_ATOMIC_POS);
|
||||
filp->f_mode |= FMODE_STREAM;
|
||||
return 0;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(stream_open);
|
||||
|
@ -129,26 +129,27 @@ static const char *get_reason_str(enum kmsg_dump_reason reason)
|
||||
}
|
||||
}
|
||||
|
||||
bool pstore_cannot_block_path(enum kmsg_dump_reason reason)
|
||||
/*
|
||||
* Should pstore_dump() wait for a concurrent pstore_dump()? If
|
||||
* not, the current pstore_dump() will report a failure to dump
|
||||
* and return.
|
||||
*/
|
||||
static bool pstore_cannot_wait(enum kmsg_dump_reason reason)
|
||||
{
|
||||
/*
|
||||
* In case of NMI path, pstore shouldn't be blocked
|
||||
* regardless of reason.
|
||||
*/
|
||||
/* In NMI path, pstore shouldn't block regardless of reason. */
|
||||
if (in_nmi())
|
||||
return true;
|
||||
|
||||
switch (reason) {
|
||||
/* In panic case, other cpus are stopped by smp_send_stop(). */
|
||||
case KMSG_DUMP_PANIC:
|
||||
/* Emergency restart shouldn't be blocked by spin lock. */
|
||||
/* Emergency restart shouldn't be blocked. */
|
||||
case KMSG_DUMP_EMERG:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pstore_cannot_block_path);
|
||||
|
||||
#ifdef CONFIG_PSTORE_ZLIB_COMPRESS
|
||||
/* Derived from logfs_compress() */
|
||||
@ -499,23 +500,23 @@ static void pstore_dump(struct kmsg_dumper *dumper,
|
||||
unsigned long total = 0;
|
||||
const char *why;
|
||||
unsigned int part = 1;
|
||||
unsigned long flags = 0;
|
||||
int is_locked;
|
||||
int ret;
|
||||
|
||||
why = get_reason_str(reason);
|
||||
|
||||
if (pstore_cannot_block_path(reason)) {
|
||||
is_locked = spin_trylock_irqsave(&psinfo->buf_lock, flags);
|
||||
if (!is_locked) {
|
||||
pr_err("pstore dump routine blocked in %s path, may corrupt error record\n"
|
||||
, in_nmi() ? "NMI" : why);
|
||||
if (down_trylock(&psinfo->buf_lock)) {
|
||||
/* Failed to acquire lock: give up if we cannot wait. */
|
||||
if (pstore_cannot_wait(reason)) {
|
||||
pr_err("dump skipped in %s path: may corrupt error record\n",
|
||||
in_nmi() ? "NMI" : why);
|
||||
return;
|
||||
}
|
||||
if (down_interruptible(&psinfo->buf_lock)) {
|
||||
pr_err("could not grab semaphore?!\n");
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
spin_lock_irqsave(&psinfo->buf_lock, flags);
|
||||
is_locked = 1;
|
||||
}
|
||||
|
||||
oopscount++;
|
||||
while (total < kmsg_bytes) {
|
||||
char *dst;
|
||||
@ -532,7 +533,7 @@ static void pstore_dump(struct kmsg_dumper *dumper,
|
||||
record.part = part;
|
||||
record.buf = psinfo->buf;
|
||||
|
||||
if (big_oops_buf && is_locked) {
|
||||
if (big_oops_buf) {
|
||||
dst = big_oops_buf;
|
||||
dst_size = big_oops_buf_sz;
|
||||
} else {
|
||||
@ -550,7 +551,7 @@ static void pstore_dump(struct kmsg_dumper *dumper,
|
||||
dst_size, &dump_size))
|
||||
break;
|
||||
|
||||
if (big_oops_buf && is_locked) {
|
||||
if (big_oops_buf) {
|
||||
zipped_len = pstore_compress(dst, psinfo->buf,
|
||||
header_size + dump_size,
|
||||
psinfo->bufsize);
|
||||
@ -573,8 +574,8 @@ static void pstore_dump(struct kmsg_dumper *dumper,
|
||||
total += record.size;
|
||||
part++;
|
||||
}
|
||||
if (is_locked)
|
||||
spin_unlock_irqrestore(&psinfo->buf_lock, flags);
|
||||
|
||||
up(&psinfo->buf_lock);
|
||||
}
|
||||
|
||||
static struct kmsg_dumper pstore_dumper = {
|
||||
@ -597,31 +598,14 @@ static void pstore_unregister_kmsg(void)
|
||||
#ifdef CONFIG_PSTORE_CONSOLE
|
||||
static void pstore_console_write(struct console *con, const char *s, unsigned c)
|
||||
{
|
||||
const char *e = s + c;
|
||||
struct pstore_record record;
|
||||
|
||||
while (s < e) {
|
||||
struct pstore_record record;
|
||||
unsigned long flags;
|
||||
pstore_record_init(&record, psinfo);
|
||||
record.type = PSTORE_TYPE_CONSOLE;
|
||||
|
||||
pstore_record_init(&record, psinfo);
|
||||
record.type = PSTORE_TYPE_CONSOLE;
|
||||
|
||||
if (c > psinfo->bufsize)
|
||||
c = psinfo->bufsize;
|
||||
|
||||
if (oops_in_progress) {
|
||||
if (!spin_trylock_irqsave(&psinfo->buf_lock, flags))
|
||||
break;
|
||||
} else {
|
||||
spin_lock_irqsave(&psinfo->buf_lock, flags);
|
||||
}
|
||||
record.buf = (char *)s;
|
||||
record.size = c;
|
||||
psinfo->write(&record);
|
||||
spin_unlock_irqrestore(&psinfo->buf_lock, flags);
|
||||
s += c;
|
||||
c = e - s;
|
||||
}
|
||||
record.buf = (char *)s;
|
||||
record.size = c;
|
||||
psinfo->write(&record);
|
||||
}
|
||||
|
||||
static struct console pstore_console = {
|
||||
@ -710,6 +694,7 @@ int pstore_register(struct pstore_info *psi)
|
||||
psi->write_user = pstore_write_user_compat;
|
||||
psinfo = psi;
|
||||
mutex_init(&psinfo->read_mutex);
|
||||
sema_init(&psinfo->buf_lock, 1);
|
||||
spin_unlock(&pstore_lock);
|
||||
|
||||
if (owner && !try_module_get(owner)) {
|
||||
@ -717,7 +702,8 @@ int pstore_register(struct pstore_info *psi)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
allocate_buf_for_compression();
|
||||
if (psi->flags & PSTORE_FLAGS_DMESG)
|
||||
allocate_buf_for_compression();
|
||||
|
||||
if (pstore_is_mounted())
|
||||
pstore_get_records(0);
|
||||
|
@ -806,27 +806,36 @@ static int ramoops_probe(struct platform_device *pdev)
|
||||
goto fail_init_mprz;
|
||||
|
||||
cxt->pstore.data = cxt;
|
||||
/*
|
||||
* Prepare frontend flags based on which areas are initialized.
|
||||
* For ramoops_init_przs() cases, the "max count" variable tells
|
||||
* if there are regions present. For ramoops_init_prz() cases,
|
||||
* the single region size is how to check.
|
||||
*/
|
||||
cxt->pstore.flags = 0;
|
||||
if (cxt->max_dump_cnt)
|
||||
cxt->pstore.flags |= PSTORE_FLAGS_DMESG;
|
||||
if (cxt->console_size)
|
||||
cxt->pstore.flags |= PSTORE_FLAGS_CONSOLE;
|
||||
if (cxt->max_ftrace_cnt)
|
||||
cxt->pstore.flags |= PSTORE_FLAGS_FTRACE;
|
||||
if (cxt->pmsg_size)
|
||||
cxt->pstore.flags |= PSTORE_FLAGS_PMSG;
|
||||
|
||||
/*
|
||||
* Since bufsize is only used for dmesg crash dumps, it
|
||||
* must match the size of the dprz record (after PRZ header
|
||||
* and ECC bytes have been accounted for).
|
||||
*/
|
||||
cxt->pstore.bufsize = cxt->dprzs[0]->buffer_size;
|
||||
cxt->pstore.buf = kzalloc(cxt->pstore.bufsize, GFP_KERNEL);
|
||||
if (!cxt->pstore.buf) {
|
||||
pr_err("cannot allocate pstore crash dump buffer\n");
|
||||
err = -ENOMEM;
|
||||
goto fail_clear;
|
||||
if (cxt->pstore.flags & PSTORE_FLAGS_DMESG) {
|
||||
cxt->pstore.bufsize = cxt->dprzs[0]->buffer_size;
|
||||
cxt->pstore.buf = kzalloc(cxt->pstore.bufsize, GFP_KERNEL);
|
||||
if (!cxt->pstore.buf) {
|
||||
pr_err("cannot allocate pstore crash dump buffer\n");
|
||||
err = -ENOMEM;
|
||||
goto fail_clear;
|
||||
}
|
||||
}
|
||||
spin_lock_init(&cxt->pstore.buf_lock);
|
||||
|
||||
cxt->pstore.flags = PSTORE_FLAGS_DMESG;
|
||||
if (cxt->console_size)
|
||||
cxt->pstore.flags |= PSTORE_FLAGS_CONSOLE;
|
||||
if (cxt->ftrace_size)
|
||||
cxt->pstore.flags |= PSTORE_FLAGS_FTRACE;
|
||||
if (cxt->pmsg_size)
|
||||
cxt->pstore.flags |= PSTORE_FLAGS_PMSG;
|
||||
|
||||
err = pstore_register(&cxt->pstore);
|
||||
if (err) {
|
||||
|
@ -559,12 +559,13 @@ EXPORT_SYMBOL(vfs_write);
|
||||
|
||||
static inline loff_t file_pos_read(struct file *file)
|
||||
{
|
||||
return file->f_pos;
|
||||
return file->f_mode & FMODE_STREAM ? 0 : file->f_pos;
|
||||
}
|
||||
|
||||
static inline void file_pos_write(struct file *file, loff_t pos)
|
||||
{
|
||||
file->f_pos = pos;
|
||||
if ((file->f_mode & FMODE_STREAM) == 0)
|
||||
file->f_pos = pos;
|
||||
}
|
||||
|
||||
SYSCALL_DEFINE3(read, unsigned int, fd, char __user *, buf, size_t, count)
|
||||
|
@ -198,10 +198,14 @@ enum cpuhp_smt_control {
|
||||
extern enum cpuhp_smt_control cpu_smt_control;
|
||||
extern void cpu_smt_disable(bool force);
|
||||
extern void cpu_smt_check_topology(void);
|
||||
extern int cpuhp_smt_enable(void);
|
||||
extern int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval);
|
||||
#else
|
||||
# define cpu_smt_control (CPU_SMT_ENABLED)
|
||||
static inline void cpu_smt_disable(bool force) { }
|
||||
static inline void cpu_smt_check_topology(void) { }
|
||||
static inline int cpuhp_smt_enable(void) { return 0; }
|
||||
static inline int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) { return 0; }
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -148,6 +148,9 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
|
||||
/* Has write method(s) */
|
||||
#define FMODE_CAN_WRITE ((__force fmode_t)0x40000)
|
||||
|
||||
/* File is stream-like */
|
||||
#define FMODE_STREAM ((__force fmode_t)0x200000)
|
||||
|
||||
/* File was opened by fanotify and shouldn't generate fanotify events */
|
||||
#define FMODE_NONOTIFY ((__force fmode_t)0x4000000)
|
||||
|
||||
@ -2975,6 +2978,7 @@ extern loff_t no_seek_end_llseek_size(struct file *, loff_t, int, loff_t);
|
||||
extern loff_t no_seek_end_llseek(struct file *, loff_t, int);
|
||||
extern int generic_file_open(struct inode * inode, struct file * filp);
|
||||
extern int nonseekable_open(struct inode * inode, struct file * filp);
|
||||
extern int stream_open(struct inode * inode, struct file * filp);
|
||||
|
||||
#ifdef CONFIG_BLOCK
|
||||
typedef void (dio_submit_t)(struct bio *bio, struct inode *inode,
|
||||
|
@ -26,7 +26,7 @@
|
||||
#include <linux/errno.h>
|
||||
#include <linux/kmsg_dump.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/semaphore.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
@ -88,7 +88,7 @@ struct pstore_record {
|
||||
* @owner: module which is repsonsible for this backend driver
|
||||
* @name: name of the backend driver
|
||||
*
|
||||
* @buf_lock: spinlock to serialize access to @buf
|
||||
* @buf_lock: semaphore to serialize access to @buf
|
||||
* @buf: preallocated crash dump buffer
|
||||
* @bufsize: size of @buf available for crash dump bytes (must match
|
||||
* smallest number of bytes available for writing to a
|
||||
@ -173,7 +173,7 @@ struct pstore_info {
|
||||
struct module *owner;
|
||||
char *name;
|
||||
|
||||
spinlock_t buf_lock;
|
||||
struct semaphore buf_lock;
|
||||
char *buf;
|
||||
size_t bufsize;
|
||||
|
||||
@ -199,7 +199,6 @@ struct pstore_info {
|
||||
|
||||
extern int pstore_register(struct pstore_info *);
|
||||
extern void pstore_unregister(struct pstore_info *);
|
||||
extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason);
|
||||
|
||||
struct pstore_ftrace_record {
|
||||
unsigned long ip;
|
||||
|
@ -79,14 +79,12 @@ void synchronize_rcu(void);
|
||||
|
||||
static inline void __rcu_read_lock(void)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_PREEMPT_COUNT))
|
||||
preempt_disable();
|
||||
preempt_disable();
|
||||
}
|
||||
|
||||
static inline void __rcu_read_unlock(void)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_PREEMPT_COUNT))
|
||||
preempt_enable();
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static inline void synchronize_rcu(void)
|
||||
|
@ -18,6 +18,7 @@ static inline u32 arp_hashfn(const void *pkey, const struct net_device *dev, u32
|
||||
return val * hash_rnd[0];
|
||||
}
|
||||
|
||||
#ifdef CONFIG_INET
|
||||
static inline struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev, u32 key)
|
||||
{
|
||||
if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
|
||||
@ -25,6 +26,13 @@ static inline struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev
|
||||
|
||||
return ___neigh_lookup_noref(&arp_tbl, neigh_key_eq32, arp_hashfn, &key, dev);
|
||||
}
|
||||
#else
|
||||
static inline
|
||||
struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev, u32 key)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline struct neighbour *__ipv4_neigh_lookup(struct net_device *dev, u32 key)
|
||||
{
|
||||
|
@ -199,8 +199,7 @@ static inline u32 rt6_get_cookie(const struct rt6_info *rt)
|
||||
{
|
||||
u32 cookie = 0;
|
||||
|
||||
if (rt->rt6i_flags & RTF_PCPU ||
|
||||
(unlikely(!list_empty(&rt->rt6i_uncached)) && rt->dst.from))
|
||||
if (rt->dst.from)
|
||||
rt = (struct rt6_info *)(rt->dst.from);
|
||||
|
||||
rt6_get_cookie_safe(rt, &cookie);
|
||||
|
@ -853,7 +853,7 @@ struct drm_i915_gem_execbuffer2 {
|
||||
* struct drm_i915_gem_exec_fence *fences.
|
||||
*/
|
||||
__u64 cliprects_ptr;
|
||||
#define I915_EXEC_RING_MASK (7<<0)
|
||||
#define I915_EXEC_RING_MASK (0x3f)
|
||||
#define I915_EXEC_DEFAULT (0<<0)
|
||||
#define I915_EXEC_RENDER (1<<0)
|
||||
#define I915_EXEC_BSD (2<<0)
|
||||
|
@ -216,10 +216,12 @@ struct fuse_file_lock {
|
||||
* FOPEN_DIRECT_IO: bypass page cache for this open file
|
||||
* FOPEN_KEEP_CACHE: don't invalidate the data cache on open
|
||||
* FOPEN_NONSEEKABLE: the file is not seekable
|
||||
* FOPEN_STREAM: the file is stream-like (no file position at all)
|
||||
*/
|
||||
#define FOPEN_DIRECT_IO (1 << 0)
|
||||
#define FOPEN_KEEP_CACHE (1 << 1)
|
||||
#define FOPEN_NONSEEKABLE (1 << 2)
|
||||
#define FOPEN_STREAM (1 << 4)
|
||||
|
||||
/**
|
||||
* INIT request/reply flags
|
||||
|
@ -2144,7 +2144,7 @@ static void cpuhp_online_cpu_device(unsigned int cpu)
|
||||
kobject_uevent(&dev->kobj, KOBJ_ONLINE);
|
||||
}
|
||||
|
||||
static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
|
||||
int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
|
||||
{
|
||||
int cpu, ret = 0;
|
||||
|
||||
@ -2178,7 +2178,7 @@ static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int cpuhp_smt_enable(void)
|
||||
int cpuhp_smt_enable(void)
|
||||
{
|
||||
int cpu, ret = 0;
|
||||
|
||||
|
@ -259,6 +259,11 @@ void swsusp_show_speed(ktime_t start, ktime_t stop,
|
||||
(kps % 1000) / 10);
|
||||
}
|
||||
|
||||
__weak int arch_resume_nosmt(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* create_image - Create a hibernation image.
|
||||
* @platform_mode: Whether or not to use the platform driver.
|
||||
@ -323,6 +328,10 @@ static int create_image(int platform_mode)
|
||||
Enable_cpus:
|
||||
enable_nonboot_cpus();
|
||||
|
||||
/* Allow architectures to do nosmt-specific post-resume dances */
|
||||
if (!in_suspend)
|
||||
error = arch_resume_nosmt();
|
||||
|
||||
Platform_finish:
|
||||
platform_finish(platform_mode);
|
||||
|
||||
|
@ -222,30 +222,30 @@ static ssize_t config_show(struct device *dev,
|
||||
|
||||
mutex_lock(&test_fw_mutex);
|
||||
|
||||
len += snprintf(buf, PAGE_SIZE,
|
||||
len += scnprintf(buf, PAGE_SIZE - len,
|
||||
"Custom trigger configuration for: %s\n",
|
||||
dev_name(dev));
|
||||
|
||||
if (test_fw_config->name)
|
||||
len += snprintf(buf+len, PAGE_SIZE,
|
||||
len += scnprintf(buf+len, PAGE_SIZE - len,
|
||||
"name:\t%s\n",
|
||||
test_fw_config->name);
|
||||
else
|
||||
len += snprintf(buf+len, PAGE_SIZE,
|
||||
len += scnprintf(buf+len, PAGE_SIZE - len,
|
||||
"name:\tEMTPY\n");
|
||||
|
||||
len += snprintf(buf+len, PAGE_SIZE,
|
||||
len += scnprintf(buf+len, PAGE_SIZE - len,
|
||||
"num_requests:\t%u\n", test_fw_config->num_requests);
|
||||
|
||||
len += snprintf(buf+len, PAGE_SIZE,
|
||||
len += scnprintf(buf+len, PAGE_SIZE - len,
|
||||
"send_uevent:\t\t%s\n",
|
||||
test_fw_config->send_uevent ?
|
||||
"FW_ACTION_HOTPLUG" :
|
||||
"FW_ACTION_NOHOTPLUG");
|
||||
len += snprintf(buf+len, PAGE_SIZE,
|
||||
len += scnprintf(buf+len, PAGE_SIZE - len,
|
||||
"sync_direct:\t\t%s\n",
|
||||
test_fw_config->sync_direct ? "true" : "false");
|
||||
len += snprintf(buf+len, PAGE_SIZE,
|
||||
len += scnprintf(buf+len, PAGE_SIZE - len,
|
||||
"read_fw_idx:\t%u\n", test_fw_config->read_fw_idx);
|
||||
|
||||
mutex_unlock(&test_fw_mutex);
|
||||
|
@ -892,8 +892,13 @@ static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev,
|
||||
if (rc >= 0)
|
||||
info.n_priv_flags = rc;
|
||||
}
|
||||
if (ops->get_regs_len)
|
||||
info.regdump_len = ops->get_regs_len(dev);
|
||||
if (ops->get_regs_len) {
|
||||
int ret = ops->get_regs_len(dev);
|
||||
|
||||
if (ret > 0)
|
||||
info.regdump_len = ret;
|
||||
}
|
||||
|
||||
if (ops->get_eeprom_len)
|
||||
info.eedump_len = ops->get_eeprom_len(dev);
|
||||
|
||||
@ -1394,6 +1399,9 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
|
||||
return -EFAULT;
|
||||
|
||||
reglen = ops->get_regs_len(dev);
|
||||
if (reglen <= 0)
|
||||
return reglen;
|
||||
|
||||
if (regs.len > reglen)
|
||||
regs.len = reglen;
|
||||
|
||||
@ -1404,13 +1412,16 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (regs.len < reglen)
|
||||
reglen = regs.len;
|
||||
|
||||
ops->get_regs(dev, ®s, regbuf);
|
||||
|
||||
ret = -EFAULT;
|
||||
if (copy_to_user(useraddr, ®s, sizeof(regs)))
|
||||
goto out;
|
||||
useraddr += offsetof(struct ethtool_regs, data);
|
||||
if (regbuf && copy_to_user(useraddr, regbuf, regs.len))
|
||||
if (copy_to_user(useraddr, regbuf, reglen))
|
||||
goto out;
|
||||
ret = 0;
|
||||
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include <linux/times.h>
|
||||
#include <net/net_namespace.h>
|
||||
#include <net/neighbour.h>
|
||||
#include <net/arp.h>
|
||||
#include <net/dst.h>
|
||||
#include <net/sock.h>
|
||||
#include <net/netevent.h>
|
||||
@ -2549,7 +2550,13 @@ int neigh_xmit(int index, struct net_device *dev,
|
||||
if (!tbl)
|
||||
goto out;
|
||||
rcu_read_lock_bh();
|
||||
neigh = __neigh_lookup_noref(tbl, addr, dev);
|
||||
if (index == NEIGH_ARP_TABLE) {
|
||||
u32 key = *((u32 *)addr);
|
||||
|
||||
neigh = __ipv4_neigh_lookup_noref(dev, key);
|
||||
} else {
|
||||
neigh = __neigh_lookup_noref(tbl, addr, dev);
|
||||
}
|
||||
if (!neigh)
|
||||
neigh = __neigh_create(tbl, addr, dev, false);
|
||||
err = PTR_ERR(neigh);
|
||||
|
@ -3149,7 +3149,13 @@ static int pktgen_wait_thread_run(struct pktgen_thread *t)
|
||||
{
|
||||
while (thread_is_running(t)) {
|
||||
|
||||
/* note: 't' will still be around even after the unlock/lock
|
||||
* cycle because pktgen_thread threads are only cleared at
|
||||
* net exit
|
||||
*/
|
||||
mutex_unlock(&pktgen_thread_lock);
|
||||
msleep_interruptible(100);
|
||||
mutex_lock(&pktgen_thread_lock);
|
||||
|
||||
if (signal_pending(current))
|
||||
goto signal;
|
||||
@ -3164,6 +3170,10 @@ static int pktgen_wait_all_threads_run(struct pktgen_net *pn)
|
||||
struct pktgen_thread *t;
|
||||
int sig = 1;
|
||||
|
||||
/* prevent from racing with rmmod */
|
||||
if (!try_module_get(THIS_MODULE))
|
||||
return sig;
|
||||
|
||||
mutex_lock(&pktgen_thread_lock);
|
||||
|
||||
list_for_each_entry(t, &pn->pktgen_threads, th_list) {
|
||||
@ -3177,6 +3187,7 @@ static int pktgen_wait_all_threads_run(struct pktgen_net *pn)
|
||||
t->control |= (T_STOP);
|
||||
|
||||
mutex_unlock(&pktgen_thread_lock);
|
||||
module_put(THIS_MODULE);
|
||||
return sig;
|
||||
}
|
||||
|
||||
|
@ -782,6 +782,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
|
||||
struct sockcm_cookie sockc;
|
||||
struct ipcm6_cookie ipc6;
|
||||
int addr_len = msg->msg_namelen;
|
||||
int hdrincl;
|
||||
u16 proto;
|
||||
int err;
|
||||
|
||||
@ -795,6 +796,13 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
|
||||
if (msg->msg_flags & MSG_OOB)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* hdrincl should be READ_ONCE(inet->hdrincl)
|
||||
* but READ_ONCE() doesn't work with bit fields.
|
||||
* Doing this indirectly yields the same result.
|
||||
*/
|
||||
hdrincl = inet->hdrincl;
|
||||
hdrincl = READ_ONCE(hdrincl);
|
||||
|
||||
/*
|
||||
* Get and verify the address.
|
||||
*/
|
||||
@ -889,11 +897,14 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
|
||||
opt = ipv6_fixup_options(&opt_space, opt);
|
||||
|
||||
fl6.flowi6_proto = proto;
|
||||
rfv.msg = msg;
|
||||
rfv.hlen = 0;
|
||||
err = rawv6_probe_proto_opt(&rfv, &fl6);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
if (!hdrincl) {
|
||||
rfv.msg = msg;
|
||||
rfv.hlen = 0;
|
||||
err = rawv6_probe_proto_opt(&rfv, &fl6);
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!ipv6_addr_any(daddr))
|
||||
fl6.daddr = *daddr;
|
||||
@ -910,7 +921,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
|
||||
fl6.flowi6_oif = np->ucast_oif;
|
||||
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
|
||||
|
||||
if (inet->hdrincl)
|
||||
if (hdrincl)
|
||||
fl6.flowi6_flags |= FLOWI_FLAG_KNOWN_NH;
|
||||
|
||||
if (ipc6.tclass < 0)
|
||||
@ -933,7 +944,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
|
||||
goto do_confirm;
|
||||
|
||||
back_from_confirm:
|
||||
if (inet->hdrincl)
|
||||
if (hdrincl)
|
||||
err = rawv6_send_hdrinc(sk, msg, len, &fl6, &dst, msg->msg_flags);
|
||||
else {
|
||||
ipc6.opt = opt;
|
||||
|
@ -416,12 +416,14 @@ int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
|
||||
wait_clean_list_grace();
|
||||
|
||||
list_to_llist_nodes(pool, &unmap_list, &clean_nodes, &clean_tail);
|
||||
if (ibmr_ret)
|
||||
if (ibmr_ret) {
|
||||
*ibmr_ret = llist_entry(clean_nodes, struct rds_ib_mr, llnode);
|
||||
|
||||
clean_nodes = clean_nodes->next;
|
||||
}
|
||||
/* more than one entry in llist nodes */
|
||||
if (clean_nodes->next)
|
||||
llist_add_batch(clean_nodes->next, clean_tail, &pool->clean_list);
|
||||
if (clean_nodes)
|
||||
llist_add_batch(clean_nodes, clean_tail,
|
||||
&pool->clean_list);
|
||||
|
||||
}
|
||||
|
||||
|
@ -2318,7 +2318,6 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,
|
||||
union sctp_addr addr;
|
||||
struct sctp_af *af;
|
||||
int src_match = 0;
|
||||
char *cookie;
|
||||
|
||||
/* We must include the address that the INIT packet came from.
|
||||
* This is the only address that matters for an INIT packet.
|
||||
@ -2422,14 +2421,6 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,
|
||||
/* Peer Rwnd : Current calculated value of the peer's rwnd. */
|
||||
asoc->peer.rwnd = asoc->peer.i.a_rwnd;
|
||||
|
||||
/* Copy cookie in case we need to resend COOKIE-ECHO. */
|
||||
cookie = asoc->peer.cookie;
|
||||
if (cookie) {
|
||||
asoc->peer.cookie = kmemdup(cookie, asoc->peer.cookie_len, gfp);
|
||||
if (!asoc->peer.cookie)
|
||||
goto clean_up;
|
||||
}
|
||||
|
||||
/* RFC 2960 7.2.1 The initial value of ssthresh MAY be arbitrarily
|
||||
* high (for example, implementations MAY use the size of the receiver
|
||||
* advertised window).
|
||||
@ -2595,7 +2586,9 @@ do_addr_param:
|
||||
case SCTP_PARAM_STATE_COOKIE:
|
||||
asoc->peer.cookie_len =
|
||||
ntohs(param.p->length) - sizeof(struct sctp_paramhdr);
|
||||
asoc->peer.cookie = param.cookie->body;
|
||||
asoc->peer.cookie = kmemdup(param.cookie->body, asoc->peer.cookie_len, gfp);
|
||||
if (!asoc->peer.cookie)
|
||||
retval = 0;
|
||||
break;
|
||||
|
||||
case SCTP_PARAM_HEARTBEAT_INFO:
|
||||
|
@ -878,6 +878,11 @@ static void sctp_cmd_new_state(struct sctp_cmd_seq *cmds,
|
||||
asoc->rto_initial;
|
||||
}
|
||||
|
||||
if (sctp_state(asoc, ESTABLISHED)) {
|
||||
kfree(asoc->peer.cookie);
|
||||
asoc->peer.cookie = NULL;
|
||||
}
|
||||
|
||||
if (sctp_state(asoc, ESTABLISHED) ||
|
||||
sctp_state(asoc, CLOSED) ||
|
||||
sctp_state(asoc, SHUTDOWN_RECEIVED)) {
|
||||
|
363
scripts/coccinelle/api/stream_open.cocci
Normal file
363
scripts/coccinelle/api/stream_open.cocci
Normal file
@ -0,0 +1,363 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
// Author: Kirill Smelkov (kirr@nexedi.com)
|
||||
//
|
||||
// Search for stream-like files that are using nonseekable_open and convert
|
||||
// them to stream_open. A stream-like file is a file that does not use ppos in
|
||||
// its read and write. Rationale for the conversion is to avoid deadlock in
|
||||
// between read and write.
|
||||
|
||||
virtual report
|
||||
virtual patch
|
||||
virtual explain // explain decisions in the patch (SPFLAGS="-D explain")
|
||||
|
||||
// stream-like reader & writer - ones that do not depend on f_pos.
|
||||
@ stream_reader @
|
||||
identifier readstream, ppos;
|
||||
identifier f, buf, len;
|
||||
type loff_t;
|
||||
@@
|
||||
ssize_t readstream(struct file *f, char *buf, size_t len, loff_t *ppos)
|
||||
{
|
||||
... when != ppos
|
||||
}
|
||||
|
||||
@ stream_writer @
|
||||
identifier writestream, ppos;
|
||||
identifier f, buf, len;
|
||||
type loff_t;
|
||||
@@
|
||||
ssize_t writestream(struct file *f, const char *buf, size_t len, loff_t *ppos)
|
||||
{
|
||||
... when != ppos
|
||||
}
|
||||
|
||||
|
||||
// a function that blocks
|
||||
@ blocks @
|
||||
identifier block_f;
|
||||
identifier wait_event =~ "^wait_event_.*";
|
||||
@@
|
||||
block_f(...) {
|
||||
... when exists
|
||||
wait_event(...)
|
||||
... when exists
|
||||
}
|
||||
|
||||
// stream_reader that can block inside.
|
||||
//
|
||||
// XXX wait_* can be called not directly from current function (e.g. func -> f -> g -> wait())
|
||||
// XXX currently reader_blocks supports only direct and 1-level indirect cases.
|
||||
@ reader_blocks_direct @
|
||||
identifier stream_reader.readstream;
|
||||
identifier wait_event =~ "^wait_event_.*";
|
||||
@@
|
||||
readstream(...)
|
||||
{
|
||||
... when exists
|
||||
wait_event(...)
|
||||
... when exists
|
||||
}
|
||||
|
||||
@ reader_blocks_1 @
|
||||
identifier stream_reader.readstream;
|
||||
identifier blocks.block_f;
|
||||
@@
|
||||
readstream(...)
|
||||
{
|
||||
... when exists
|
||||
block_f(...)
|
||||
... when exists
|
||||
}
|
||||
|
||||
@ reader_blocks depends on reader_blocks_direct || reader_blocks_1 @
|
||||
identifier stream_reader.readstream;
|
||||
@@
|
||||
readstream(...) {
|
||||
...
|
||||
}
|
||||
|
||||
|
||||
// file_operations + whether they have _any_ .read, .write, .llseek ... at all.
|
||||
//
|
||||
// XXX add support for file_operations xxx[N] = ... (sound/core/pcm_native.c)
|
||||
@ fops0 @
|
||||
identifier fops;
|
||||
@@
|
||||
struct file_operations fops = {
|
||||
...
|
||||
};
|
||||
|
||||
@ has_read @
|
||||
identifier fops0.fops;
|
||||
identifier read_f;
|
||||
@@
|
||||
struct file_operations fops = {
|
||||
.read = read_f,
|
||||
};
|
||||
|
||||
@ has_read_iter @
|
||||
identifier fops0.fops;
|
||||
identifier read_iter_f;
|
||||
@@
|
||||
struct file_operations fops = {
|
||||
.read_iter = read_iter_f,
|
||||
};
|
||||
|
||||
@ has_write @
|
||||
identifier fops0.fops;
|
||||
identifier write_f;
|
||||
@@
|
||||
struct file_operations fops = {
|
||||
.write = write_f,
|
||||
};
|
||||
|
||||
@ has_write_iter @
|
||||
identifier fops0.fops;
|
||||
identifier write_iter_f;
|
||||
@@
|
||||
struct file_operations fops = {
|
||||
.write_iter = write_iter_f,
|
||||
};
|
||||
|
||||
@ has_llseek @
|
||||
identifier fops0.fops;
|
||||
identifier llseek_f;
|
||||
@@
|
||||
struct file_operations fops = {
|
||||
.llseek = llseek_f,
|
||||
};
|
||||
|
||||
@ has_no_llseek @
|
||||
identifier fops0.fops;
|
||||
@@
|
||||
struct file_operations fops = {
|
||||
.llseek = no_llseek,
|
||||
};
|
||||
|
||||
@ has_mmap @
|
||||
identifier fops0.fops;
|
||||
identifier mmap_f;
|
||||
@@
|
||||
struct file_operations fops = {
|
||||
.mmap = mmap_f,
|
||||
};
|
||||
|
||||
@ has_copy_file_range @
|
||||
identifier fops0.fops;
|
||||
identifier copy_file_range_f;
|
||||
@@
|
||||
struct file_operations fops = {
|
||||
.copy_file_range = copy_file_range_f,
|
||||
};
|
||||
|
||||
@ has_remap_file_range @
|
||||
identifier fops0.fops;
|
||||
identifier remap_file_range_f;
|
||||
@@
|
||||
struct file_operations fops = {
|
||||
.remap_file_range = remap_file_range_f,
|
||||
};
|
||||
|
||||
@ has_splice_read @
|
||||
identifier fops0.fops;
|
||||
identifier splice_read_f;
|
||||
@@
|
||||
struct file_operations fops = {
|
||||
.splice_read = splice_read_f,
|
||||
};
|
||||
|
||||
@ has_splice_write @
|
||||
identifier fops0.fops;
|
||||
identifier splice_write_f;
|
||||
@@
|
||||
struct file_operations fops = {
|
||||
.splice_write = splice_write_f,
|
||||
};
|
||||
|
||||
|
||||
// file_operations that is candidate for stream_open conversion - it does not
|
||||
// use mmap and other methods that assume @offset access to file.
|
||||
//
|
||||
// XXX for simplicity require no .{read/write}_iter and no .splice_{read/write} for now.
|
||||
// XXX maybe_steam.fops cannot be used in other rules - it gives "bad rule maybe_stream or bad variable fops".
|
||||
@ maybe_stream depends on (!has_llseek || has_no_llseek) && !has_mmap && !has_copy_file_range && !has_remap_file_range && !has_read_iter && !has_write_iter && !has_splice_read && !has_splice_write @
|
||||
identifier fops0.fops;
|
||||
@@
|
||||
struct file_operations fops = {
|
||||
};
|
||||
|
||||
|
||||
// ---- conversions ----
|
||||
|
||||
// XXX .open = nonseekable_open -> .open = stream_open
|
||||
// XXX .open = func -> openfunc -> nonseekable_open
|
||||
|
||||
// read & write
|
||||
//
|
||||
// if both are used in the same file_operations together with an opener -
|
||||
// under that conditions we can use stream_open instead of nonseekable_open.
|
||||
@ fops_rw depends on maybe_stream @
|
||||
identifier fops0.fops, openfunc;
|
||||
identifier stream_reader.readstream;
|
||||
identifier stream_writer.writestream;
|
||||
@@
|
||||
struct file_operations fops = {
|
||||
.open = openfunc,
|
||||
.read = readstream,
|
||||
.write = writestream,
|
||||
};
|
||||
|
||||
@ report_rw depends on report @
|
||||
identifier fops_rw.openfunc;
|
||||
position p1;
|
||||
@@
|
||||
openfunc(...) {
|
||||
<...
|
||||
nonseekable_open@p1
|
||||
...>
|
||||
}
|
||||
|
||||
@ script:python depends on report && reader_blocks @
|
||||
fops << fops0.fops;
|
||||
p << report_rw.p1;
|
||||
@@
|
||||
coccilib.report.print_report(p[0],
|
||||
"ERROR: %s: .read() can deadlock .write(); change nonseekable_open -> stream_open to fix." % (fops,))
|
||||
|
||||
@ script:python depends on report && !reader_blocks @
|
||||
fops << fops0.fops;
|
||||
p << report_rw.p1;
|
||||
@@
|
||||
coccilib.report.print_report(p[0],
|
||||
"WARNING: %s: .read() and .write() have stream semantic; safe to change nonseekable_open -> stream_open." % (fops,))
|
||||
|
||||
|
||||
@ explain_rw_deadlocked depends on explain && reader_blocks @
|
||||
identifier fops_rw.openfunc;
|
||||
@@
|
||||
openfunc(...) {
|
||||
<...
|
||||
- nonseekable_open
|
||||
+ nonseekable_open /* read & write (was deadlock) */
|
||||
...>
|
||||
}
|
||||
|
||||
|
||||
@ explain_rw_nodeadlock depends on explain && !reader_blocks @
|
||||
identifier fops_rw.openfunc;
|
||||
@@
|
||||
openfunc(...) {
|
||||
<...
|
||||
- nonseekable_open
|
||||
+ nonseekable_open /* read & write (no direct deadlock) */
|
||||
...>
|
||||
}
|
||||
|
||||
@ patch_rw depends on patch @
|
||||
identifier fops_rw.openfunc;
|
||||
@@
|
||||
openfunc(...) {
|
||||
<...
|
||||
- nonseekable_open
|
||||
+ stream_open
|
||||
...>
|
||||
}
|
||||
|
||||
|
||||
// read, but not write
|
||||
@ fops_r depends on maybe_stream && !has_write @
|
||||
identifier fops0.fops, openfunc;
|
||||
identifier stream_reader.readstream;
|
||||
@@
|
||||
struct file_operations fops = {
|
||||
.open = openfunc,
|
||||
.read = readstream,
|
||||
};
|
||||
|
||||
@ report_r depends on report @
|
||||
identifier fops_r.openfunc;
|
||||
position p1;
|
||||
@@
|
||||
openfunc(...) {
|
||||
<...
|
||||
nonseekable_open@p1
|
||||
...>
|
||||
}
|
||||
|
||||
@ script:python depends on report @
|
||||
fops << fops0.fops;
|
||||
p << report_r.p1;
|
||||
@@
|
||||
coccilib.report.print_report(p[0],
|
||||
"WARNING: %s: .read() has stream semantic; safe to change nonseekable_open -> stream_open." % (fops,))
|
||||
|
||||
@ explain_r depends on explain @
|
||||
identifier fops_r.openfunc;
|
||||
@@
|
||||
openfunc(...) {
|
||||
<...
|
||||
- nonseekable_open
|
||||
+ nonseekable_open /* read only */
|
||||
...>
|
||||
}
|
||||
|
||||
@ patch_r depends on patch @
|
||||
identifier fops_r.openfunc;
|
||||
@@
|
||||
openfunc(...) {
|
||||
<...
|
||||
- nonseekable_open
|
||||
+ stream_open
|
||||
...>
|
||||
}
|
||||
|
||||
|
||||
// write, but not read
|
||||
@ fops_w depends on maybe_stream && !has_read @
|
||||
identifier fops0.fops, openfunc;
|
||||
identifier stream_writer.writestream;
|
||||
@@
|
||||
struct file_operations fops = {
|
||||
.open = openfunc,
|
||||
.write = writestream,
|
||||
};
|
||||
|
||||
@ report_w depends on report @
|
||||
identifier fops_w.openfunc;
|
||||
position p1;
|
||||
@@
|
||||
openfunc(...) {
|
||||
<...
|
||||
nonseekable_open@p1
|
||||
...>
|
||||
}
|
||||
|
||||
@ script:python depends on report @
|
||||
fops << fops0.fops;
|
||||
p << report_w.p1;
|
||||
@@
|
||||
coccilib.report.print_report(p[0],
|
||||
"WARNING: %s: .write() has stream semantic; safe to change nonseekable_open -> stream_open." % (fops,))
|
||||
|
||||
@ explain_w depends on explain @
|
||||
identifier fops_w.openfunc;
|
||||
@@
|
||||
openfunc(...) {
|
||||
<...
|
||||
- nonseekable_open
|
||||
+ nonseekable_open /* write only */
|
||||
...>
|
||||
}
|
||||
|
||||
@ patch_w depends on patch @
|
||||
identifier fops_w.openfunc;
|
||||
@@
|
||||
openfunc(...) {
|
||||
<...
|
||||
- nonseekable_open
|
||||
+ stream_open
|
||||
...>
|
||||
}
|
||||
|
||||
|
||||
// no read, no write - don't change anything
|
Loading…
x
Reference in New Issue
Block a user