mirror of
https://github.com/rd-stuffs/msm-4.14.git
synced 2025-02-20 11:45:48 +08:00
This is the 4.14.300 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmOA76gACgkQONu9yGCS aT6Otg//dHBmQrTUsn9PalX7N8EfYPYzS46gb3VP5Zz2FWeXUs8uw6SIG6s/ChWt 9u7T6VI6e8IwQRCzhsQBlPYKzWMl80bZXft8VpahdhtSxZ0bnWoVK+KH/x+0rono 3Xe/yZHmkEEe2R2pn+kIBG6Bn3IIcTdlRM9EOy6Hy1yiiklq2T+z9y8yaBuYi0fs QG1lbU/hDc6WoYAjt4xYDUAfkPvPT7NfCHLyFAr7q0G64E8QkgQr3BqHCDeEtOlw dewGYUVHAluWWOUohPHyW+22gXep/eWUFxdathiamrYACR9dOIMYOczKAvorWMCF qSRADK6NB/tIC6kc7pR0foj0yUSowh5AYDMPxu+lF/W13bEV/3m9MBSJMztzGIys 4dW/RG4IjtgR3LBL8fffeXCnmy2VgFOeUqbhKchSQtrJq2DDqdeOcA1RJZpej4v+ leSLitRshASAXu+vBeSzyQC8Y1m4vH/uDzBlEA+tHFpavhx3nP4+JoAoXfUDcWAo rNiUq2+/X7iqpm8nNa3UGKqTefW/ztXU2BtF+n1H5tfFPIb/L+j9LEnScSUlbtRN i478lX6pfkx/hfF30lAb0vySyfz0ed6+neEykW4n1mrlE22rxrlH1SIb4o1M6Njr cns7/0aQ6wNxkkShKUttlhpmqPth+ANTMUW2gYfvtkYrDYeiD30= =Cb2k -----END PGP SIGNATURE----- Merge 4.14.300 into android-4.14-stable Changes in 4.14.300 HID: hyperv: fix possible memory leak in mousevsc_probe() net: gso: fix panic on frag_list with mixed head alloc types bnxt_en: fix potentially incorrect return value for ndo_rx_flow_steer net: fman: Unregister ethernet device on removal capabilities: fix undefined behavior in bit shift for CAP_TO_MASK net: lapbether: fix issue of dev reference count leakage in lapbeth_device_event() hamradio: fix issue of dev reference count leakage in bpq_device_event() drm/vc4: Fix missing platform_unregister_drivers() call in vc4_drm_register() ipv6: addrlabel: fix infoleak when sending struct ifaddrlblmsg to network tipc: fix the msg->req tlv len check in tipc_nl_compat_name_table_dump_header dmaengine: mv_xor_v2: Fix a resource leak in mv_xor_v2_remove() drivers: net: xgene: disable napi when register irq failed in xgene_enet_open() net: cxgb3_main: disable napi when bind qsets failed in cxgb_up() ethernet: s2io: disable napi when start nic failed in s2io_card_up() net: mv643xx_eth: disable napi when init rxq or txq failed in mv643xx_eth_open() net: macvlan: fix memory leaks of macvlan_common_newlink arm64: efi: Fix handling of misaligned runtime regions and drop warning ALSA: hda: fix potential memleak in 'add_widget_node' ALSA: usb-audio: Add quirk entry for M-Audio Micro nilfs2: fix deadlock in nilfs_count_free_blocks() drm/i915/dmabuf: fix sg_table handling in map_dma_buf platform/x86: hp_wmi: Fix rfkill causing soft blocked wifi btrfs: selftests: fix wrong error check in btrfs_free_dummy_root() udf: Fix a slab-out-of-bounds write bug in udf_find_entry() cert host tools: Stop complaining about deprecated OpenSSL functions dmaengine: at_hdmac: Fix at_lli struct definition dmaengine: at_hdmac: Don't start transactions at tx_submit level dmaengine: at_hdmac: Fix completion of unissued descriptor in case of errors dmaengine: at_hdmac: Don't allow CPU to reorder channel enable dmaengine: at_hdmac: Fix impossible condition dmaengine: at_hdmac: Check return code of dma_async_device_register x86/cpu: Restore AMD's DE_CFG MSR after resume selftests/futex: fix build for clang drm/imx: imx-tve: Fix return type of imx_tve_connector_mode_valid Bluetooth: L2CAP: Fix l2cap_global_chan_by_psm ASoC: core: Fix use-after-free in snd_soc_exit() serial: 8250_omap: remove wait loop from Errata i202 workaround serial: 8250: omap: Flush PM QOS work on remove tty: n_gsm: fix sleep-in-atomic-context bug in gsm_control_send ASoC: soc-utils: Remove __exit for snd_soc_util_exit() block: sed-opal: kmalloc the cmd/resp buffers parport_pc: Avoid FIFO port location truncation pinctrl: devicetree: fix null pointer dereferencing in pinctrl_dt_to_map net: bgmac: Drop free_netdev() from bgmac_enet_remove() mISDN: fix possible memory leak in mISDN_dsp_element_register() mISDN: fix misuse of put_device() in mISDN_register_device() net: caif: fix double disconnect client in chnl_net_open() xen/pcpu: fix possible memory leak in register_pcpu() drbd: use after free in drbd_create_device() net/x25: Fix skb leak in x25_lapb_receive_frame() cifs: Fix wrong return value checking when GETFLAGS ftrace: Fix the possible incorrect kernel message ftrace: Optimize the allocation for mcount entries ftrace: Fix null pointer dereference in ftrace_add_mod() ring_buffer: Do not deactivate non-existant pages ALSA: usb-audio: Drop snd_BUG_ON() from snd_usbmidi_output_open() USB: serial: option: add Sierra Wireless EM9191 USB: serial: option: remove old LARA-R6 PID USB: serial: option: add u-blox LARA-R6 00B modem USB: serial: option: add u-blox LARA-L6 modem USB: serial: option: add Fibocom FM160 0x0111 composition usb: add NO_LPM quirk for Realforce 87U Keyboard usb: chipidea: fix deadlock in ci_otg_del_timer iio: adc: at91_adc: fix possible memory leak in at91_adc_allocate_trigger() iio: trigger: sysfs: fix possible memory leak in iio_sysfs_trig_init() iio: pressure: ms5611: changed hardcoded SPI speed to value limited dm ioctl: fix misbehavior if list_versions races with module loading serial: 8250: Fall back to non-DMA Rx if IIR_RDI occurs serial: 8250_lpss: Configure DMA also w/o DMA filter mmc: core: properly select voltage range without power cycle mmc: sdhci-pci: Fix possible memory leak caused by missing pci_dev_put() misc/vmw_vmci: fix an infoleak in vmci_host_do_receive_datagram() nilfs2: fix use-after-free bug of ns_writer on remount serial: 8250: Flush DMA Rx on RLSI macvlan: enforce a consistent minimal mtu tcp: cdg: allow tcp_cdg_release() to be called multiple times kcm: avoid potential race in kcm_tx_work bpf, test_run: Fix alignment problem in bpf_prog_test_run_skb() kcm: close race conditions on sk_receive_queue 9p: trans_fd/p9_conn_cancel: drop client lock earlier gfs2: Check sb_bsize_shift after reading superblock gfs2: Switch from strlcpy to strscpy 9p/trans_fd: always use O_NONBLOCK read/write mm: fs: initialize fsdata passed to write_begin/write_end interface ntfs: fix use-after-free in ntfs_attr_find() ntfs: fix out-of-bounds read in ntfs_attr_find() ntfs: check overflow when iterating ATTR_RECORDs Linux 4.14.300 Change-Id: I6e30b49a26cfda34ab6d259641dc4ea488d312eb Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
980d7f36ac
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 14
|
||||
SUBLEVEL = 299
|
||||
SUBLEVEL = 300
|
||||
EXTRAVERSION =
|
||||
NAME = Petit Gorille
|
||||
|
||||
|
@ -16,6 +16,14 @@
|
||||
|
||||
#include <asm/efi.h>
|
||||
|
||||
static bool region_is_misaligned(const efi_memory_desc_t *md)
|
||||
{
|
||||
if (PAGE_SIZE == EFI_PAGE_SIZE)
|
||||
return false;
|
||||
return !PAGE_ALIGNED(md->phys_addr) ||
|
||||
!PAGE_ALIGNED(md->num_pages << EFI_PAGE_SHIFT);
|
||||
}
|
||||
|
||||
/*
|
||||
* Only regions of type EFI_RUNTIME_SERVICES_CODE need to be
|
||||
* executable, everything else can be mapped with the XN bits
|
||||
@ -29,14 +37,22 @@ static __init pteval_t create_mapping_protection(efi_memory_desc_t *md)
|
||||
if (type == EFI_MEMORY_MAPPED_IO)
|
||||
return PROT_DEVICE_nGnRE;
|
||||
|
||||
if (WARN_ONCE(!PAGE_ALIGNED(md->phys_addr),
|
||||
"UEFI Runtime regions are not aligned to 64 KB -- buggy firmware?"))
|
||||
if (region_is_misaligned(md)) {
|
||||
static bool __initdata code_is_misaligned;
|
||||
|
||||
/*
|
||||
* If the region is not aligned to the page size of the OS, we
|
||||
* can not use strict permissions, since that would also affect
|
||||
* the mapping attributes of the adjacent regions.
|
||||
* Regions that are not aligned to the OS page size cannot be
|
||||
* mapped with strict permissions, as those might interfere
|
||||
* with the permissions that are needed by the adjacent
|
||||
* region's mapping. However, if we haven't encountered any
|
||||
* misaligned runtime code regions so far, we can safely use
|
||||
* non-executable permissions for non-code regions.
|
||||
*/
|
||||
return pgprot_val(PAGE_KERNEL_EXEC);
|
||||
code_is_misaligned |= (type == EFI_RUNTIME_SERVICES_CODE);
|
||||
|
||||
return code_is_misaligned ? pgprot_val(PAGE_KERNEL_EXEC)
|
||||
: pgprot_val(PAGE_KERNEL);
|
||||
}
|
||||
|
||||
/* R-- */
|
||||
if ((attr & (EFI_MEMORY_XP | EFI_MEMORY_RO)) ==
|
||||
@ -64,19 +80,16 @@ int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
|
||||
bool page_mappings_only = (md->type == EFI_RUNTIME_SERVICES_CODE ||
|
||||
md->type == EFI_RUNTIME_SERVICES_DATA);
|
||||
|
||||
if (!PAGE_ALIGNED(md->phys_addr) ||
|
||||
!PAGE_ALIGNED(md->num_pages << EFI_PAGE_SHIFT)) {
|
||||
/*
|
||||
* If the end address of this region is not aligned to page
|
||||
* size, the mapping is rounded up, and may end up sharing a
|
||||
* page frame with the next UEFI memory region. If we create
|
||||
* a block entry now, we may need to split it again when mapping
|
||||
* the next region, and support for that is going to be removed
|
||||
* from the MMU routines. So avoid block mappings altogether in
|
||||
* that case.
|
||||
*/
|
||||
/*
|
||||
* If this region is not aligned to the page size used by the OS, the
|
||||
* mapping will be rounded outwards, and may end up sharing a page
|
||||
* frame with an adjacent runtime memory region. Given that the page
|
||||
* table descriptor covering the shared page will be rewritten when the
|
||||
* adjacent region gets mapped, we must avoid block mappings here so we
|
||||
* don't have to worry about splitting them when that happens.
|
||||
*/
|
||||
if (region_is_misaligned(md))
|
||||
page_mappings_only = true;
|
||||
}
|
||||
|
||||
create_pgd_mapping(mm, md->phys_addr, md->virt_addr,
|
||||
md->num_pages << EFI_PAGE_SHIFT,
|
||||
@ -104,6 +117,9 @@ int __init efi_set_mapping_permissions(struct mm_struct *mm,
|
||||
BUG_ON(md->type != EFI_RUNTIME_SERVICES_CODE &&
|
||||
md->type != EFI_RUNTIME_SERVICES_DATA);
|
||||
|
||||
if (region_is_misaligned(md))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Calling apply_to_page_range() is only safe on regions that are
|
||||
* guaranteed to be mapped down to pages. Since we are only called
|
||||
|
@ -399,6 +399,11 @@
|
||||
#define MSR_AMD64_OSVW_STATUS 0xc0010141
|
||||
#define MSR_AMD64_LS_CFG 0xc0011020
|
||||
#define MSR_AMD64_DC_CFG 0xc0011022
|
||||
|
||||
#define MSR_AMD64_DE_CFG 0xc0011029
|
||||
#define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT 1
|
||||
#define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE BIT_ULL(MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT)
|
||||
|
||||
#define MSR_AMD64_BU_CFG2 0xc001102a
|
||||
#define MSR_AMD64_IBSFETCHCTL 0xc0011030
|
||||
#define MSR_AMD64_IBSFETCHLINAD 0xc0011031
|
||||
@ -450,9 +455,6 @@
|
||||
#define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL
|
||||
#define FAM10H_MMIO_CONF_BASE_SHIFT 20
|
||||
#define MSR_FAM10H_NODE_ID 0xc001100c
|
||||
#define MSR_F10H_DECFG 0xc0011029
|
||||
#define MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT 1
|
||||
#define MSR_F10H_DECFG_LFENCE_SERIALIZE BIT_ULL(MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT)
|
||||
|
||||
/* K8 MSRs */
|
||||
#define MSR_K8_TOP_MEM1 0xc001001a
|
||||
|
@ -761,8 +761,6 @@ static void init_amd_gh(struct cpuinfo_x86 *c)
|
||||
set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
|
||||
}
|
||||
|
||||
#define MSR_AMD64_DE_CFG 0xC0011029
|
||||
|
||||
static void init_amd_ln(struct cpuinfo_x86 *c)
|
||||
{
|
||||
/*
|
||||
@ -934,16 +932,16 @@ static void init_amd(struct cpuinfo_x86 *c)
|
||||
* msr_set_bit() uses the safe accessors, too, even if the MSR
|
||||
* is not present.
|
||||
*/
|
||||
msr_set_bit(MSR_F10H_DECFG,
|
||||
MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT);
|
||||
msr_set_bit(MSR_AMD64_DE_CFG,
|
||||
MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT);
|
||||
|
||||
/*
|
||||
* Verify that the MSR write was successful (could be running
|
||||
* under a hypervisor) and only then assume that LFENCE is
|
||||
* serializing.
|
||||
*/
|
||||
ret = rdmsrl_safe(MSR_F10H_DECFG, &val);
|
||||
if (!ret && (val & MSR_F10H_DECFG_LFENCE_SERIALIZE)) {
|
||||
ret = rdmsrl_safe(MSR_AMD64_DE_CFG, &val);
|
||||
if (!ret && (val & MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT)) {
|
||||
/* A serializing LFENCE stops RDTSC speculation */
|
||||
set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
|
||||
} else {
|
||||
|
@ -3645,9 +3645,9 @@ static int svm_get_msr_feature(struct kvm_msr_entry *msr)
|
||||
msr->data = 0;
|
||||
|
||||
switch (msr->index) {
|
||||
case MSR_F10H_DECFG:
|
||||
if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC))
|
||||
msr->data |= MSR_F10H_DECFG_LFENCE_SERIALIZE;
|
||||
case MSR_AMD64_DE_CFG:
|
||||
if (cpu_feature_enabled(X86_FEATURE_LFENCE_RDTSC))
|
||||
msr->data |= MSR_AMD64_DE_CFG_LFENCE_SERIALIZE;
|
||||
break;
|
||||
default:
|
||||
return 1;
|
||||
@ -3756,7 +3756,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||
msr_info->data = 0x1E;
|
||||
}
|
||||
break;
|
||||
case MSR_F10H_DECFG:
|
||||
case MSR_AMD64_DE_CFG:
|
||||
msr_info->data = svm->msr_decfg;
|
||||
break;
|
||||
default:
|
||||
@ -3948,7 +3948,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
|
||||
case MSR_VM_IGNNE:
|
||||
vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
|
||||
break;
|
||||
case MSR_F10H_DECFG: {
|
||||
case MSR_AMD64_DE_CFG: {
|
||||
struct kvm_msr_entry msr_entry;
|
||||
|
||||
msr_entry.index = msr->index;
|
||||
|
@ -1063,7 +1063,7 @@ static unsigned num_emulated_msrs;
|
||||
* can be used by a hypervisor to validate requested CPU features.
|
||||
*/
|
||||
static u32 msr_based_features[] = {
|
||||
MSR_F10H_DECFG,
|
||||
MSR_AMD64_DE_CFG,
|
||||
MSR_IA32_UCODE_REV,
|
||||
MSR_IA32_ARCH_CAPABILITIES,
|
||||
};
|
||||
|
@ -533,6 +533,7 @@ static void pm_save_spec_msr(void)
|
||||
MSR_TSX_FORCE_ABORT,
|
||||
MSR_IA32_MCU_OPT_CTRL,
|
||||
MSR_AMD64_LS_CFG,
|
||||
MSR_AMD64_DE_CFG,
|
||||
};
|
||||
|
||||
msr_build_context(spec_msr_id, ARRAY_SIZE(spec_msr_id));
|
||||
|
@ -94,8 +94,8 @@ struct opal_dev {
|
||||
u64 lowest_lba;
|
||||
|
||||
size_t pos;
|
||||
u8 cmd[IO_BUFFER_LENGTH];
|
||||
u8 resp[IO_BUFFER_LENGTH];
|
||||
u8 *cmd;
|
||||
u8 *resp;
|
||||
|
||||
struct parsed_resp parsed;
|
||||
size_t prev_d_len;
|
||||
@ -2011,6 +2011,8 @@ void free_opal_dev(struct opal_dev *dev)
|
||||
if (!dev)
|
||||
return;
|
||||
clean_opal_dev(dev);
|
||||
kfree(dev->resp);
|
||||
kfree(dev->cmd);
|
||||
kfree(dev);
|
||||
}
|
||||
EXPORT_SYMBOL(free_opal_dev);
|
||||
@ -2023,16 +2025,38 @@ struct opal_dev *init_opal_dev(void *data, sec_send_recv *send_recv)
|
||||
if (!dev)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* Presumably DMA-able buffers must be cache-aligned. Kmalloc makes
|
||||
* sure the allocated buffer is DMA-safe in that regard.
|
||||
*/
|
||||
dev->cmd = kmalloc(IO_BUFFER_LENGTH, GFP_KERNEL);
|
||||
if (!dev->cmd)
|
||||
goto err_free_dev;
|
||||
|
||||
dev->resp = kmalloc(IO_BUFFER_LENGTH, GFP_KERNEL);
|
||||
if (!dev->resp)
|
||||
goto err_free_cmd;
|
||||
|
||||
INIT_LIST_HEAD(&dev->unlk_lst);
|
||||
mutex_init(&dev->dev_lock);
|
||||
dev->data = data;
|
||||
dev->send_recv = send_recv;
|
||||
if (check_opal_support(dev) != 0) {
|
||||
pr_debug("Opal is not supported on this device\n");
|
||||
kfree(dev);
|
||||
return NULL;
|
||||
goto err_free_resp;
|
||||
}
|
||||
return dev;
|
||||
|
||||
err_free_resp:
|
||||
kfree(dev->resp);
|
||||
|
||||
err_free_cmd:
|
||||
kfree(dev->cmd);
|
||||
|
||||
err_free_dev:
|
||||
kfree(dev);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(init_opal_dev);
|
||||
|
||||
|
@ -2804,7 +2804,7 @@ static int init_submitter(struct drbd_device *device)
|
||||
enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor)
|
||||
{
|
||||
struct drbd_resource *resource = adm_ctx->resource;
|
||||
struct drbd_connection *connection;
|
||||
struct drbd_connection *connection, *n;
|
||||
struct drbd_device *device;
|
||||
struct drbd_peer_device *peer_device, *tmp_peer_device;
|
||||
struct gendisk *disk;
|
||||
@ -2933,7 +2933,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
|
||||
out_idr_remove_vol:
|
||||
idr_remove(&connection->peer_devices, vnr);
|
||||
out_idr_remove_from_resource:
|
||||
for_each_connection(connection, resource) {
|
||||
for_each_connection_safe(connection, n, resource) {
|
||||
peer_device = idr_remove(&connection->peer_devices, vnr);
|
||||
if (peer_device)
|
||||
kref_put(&connection->kref, drbd_destroy_connection);
|
||||
|
@ -252,6 +252,8 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
|
||||
ATC_SPIP_BOUNDARY(first->boundary));
|
||||
channel_writel(atchan, DPIP, ATC_DPIP_HOLE(first->dst_hole) |
|
||||
ATC_DPIP_BOUNDARY(first->boundary));
|
||||
/* Don't allow CPU to reorder channel enable. */
|
||||
wmb();
|
||||
dma_writel(atdma, CHER, atchan->mask);
|
||||
|
||||
vdbg_dump_regs(atchan);
|
||||
@ -312,7 +314,8 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
|
||||
struct at_desc *desc_first = atc_first_active(atchan);
|
||||
struct at_desc *desc;
|
||||
int ret;
|
||||
u32 ctrla, dscr, trials;
|
||||
u32 ctrla, dscr;
|
||||
unsigned int i;
|
||||
|
||||
/*
|
||||
* If the cookie doesn't match to the currently running transfer then
|
||||
@ -382,7 +385,7 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
|
||||
dscr = channel_readl(atchan, DSCR);
|
||||
rmb(); /* ensure DSCR is read before CTRLA */
|
||||
ctrla = channel_readl(atchan, CTRLA);
|
||||
for (trials = 0; trials < ATC_MAX_DSCR_TRIALS; ++trials) {
|
||||
for (i = 0; i < ATC_MAX_DSCR_TRIALS; ++i) {
|
||||
u32 new_dscr;
|
||||
|
||||
rmb(); /* ensure DSCR is read after CTRLA */
|
||||
@ -408,7 +411,7 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
|
||||
rmb(); /* ensure DSCR is read before CTRLA */
|
||||
ctrla = channel_readl(atchan, CTRLA);
|
||||
}
|
||||
if (unlikely(trials >= ATC_MAX_DSCR_TRIALS))
|
||||
if (unlikely(i == ATC_MAX_DSCR_TRIALS))
|
||||
return -ETIMEDOUT;
|
||||
|
||||
/* for the first descriptor we can be more accurate */
|
||||
@ -556,10 +559,6 @@ static void atc_handle_error(struct at_dma_chan *atchan)
|
||||
bad_desc = atc_first_active(atchan);
|
||||
list_del_init(&bad_desc->desc_node);
|
||||
|
||||
/* As we are stopped, take advantage to push queued descriptors
|
||||
* in active_list */
|
||||
list_splice_init(&atchan->queue, atchan->active_list.prev);
|
||||
|
||||
/* Try to restart the controller */
|
||||
if (!list_empty(&atchan->active_list))
|
||||
atc_dostart(atchan, atc_first_active(atchan));
|
||||
@ -680,19 +679,11 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
|
||||
spin_lock_irqsave(&atchan->lock, flags);
|
||||
cookie = dma_cookie_assign(tx);
|
||||
|
||||
if (list_empty(&atchan->active_list)) {
|
||||
dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
|
||||
desc->txd.cookie);
|
||||
atc_dostart(atchan, desc);
|
||||
list_add_tail(&desc->desc_node, &atchan->active_list);
|
||||
} else {
|
||||
dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
|
||||
desc->txd.cookie);
|
||||
list_add_tail(&desc->desc_node, &atchan->queue);
|
||||
}
|
||||
|
||||
list_add_tail(&desc->desc_node, &atchan->queue);
|
||||
spin_unlock_irqrestore(&atchan->lock, flags);
|
||||
|
||||
dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
|
||||
desc->txd.cookie);
|
||||
return cookie;
|
||||
}
|
||||
|
||||
@ -1962,7 +1953,11 @@ static int __init at_dma_probe(struct platform_device *pdev)
|
||||
dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
|
||||
plat_dat->nr_channels);
|
||||
|
||||
dma_async_device_register(&atdma->dma_common);
|
||||
err = dma_async_device_register(&atdma->dma_common);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "Unable to register: %d.\n", err);
|
||||
goto err_dma_async_device_register;
|
||||
}
|
||||
|
||||
/*
|
||||
* Do not return an error if the dmac node is not present in order to
|
||||
@ -1982,6 +1977,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
|
||||
|
||||
err_of_dma_controller_register:
|
||||
dma_async_device_unregister(&atdma->dma_common);
|
||||
err_dma_async_device_register:
|
||||
dma_pool_destroy(atdma->memset_pool);
|
||||
err_memset_pool_create:
|
||||
dma_pool_destroy(atdma->dma_desc_pool);
|
||||
|
@ -168,13 +168,13 @@
|
||||
/* LLI == Linked List Item; aka DMA buffer descriptor */
|
||||
struct at_lli {
|
||||
/* values that are not changed by hardware */
|
||||
dma_addr_t saddr;
|
||||
dma_addr_t daddr;
|
||||
u32 saddr;
|
||||
u32 daddr;
|
||||
/* value that may get written back: */
|
||||
u32 ctrla;
|
||||
u32 ctrla;
|
||||
/* more values that are not changed by hardware */
|
||||
u32 ctrlb;
|
||||
dma_addr_t dscr; /* chain to next lli */
|
||||
u32 ctrlb;
|
||||
u32 dscr; /* chain to next lli */
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -901,6 +901,7 @@ static int mv_xor_v2_remove(struct platform_device *pdev)
|
||||
tasklet_kill(&xor_dev->irq_tasklet);
|
||||
|
||||
clk_disable_unprepare(xor_dev->clk);
|
||||
clk_disable_unprepare(xor_dev->reg_clk);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -55,13 +55,13 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
|
||||
goto err_unpin_pages;
|
||||
}
|
||||
|
||||
ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL);
|
||||
ret = sg_alloc_table(st, obj->mm.pages->orig_nents, GFP_KERNEL);
|
||||
if (ret)
|
||||
goto err_free;
|
||||
|
||||
src = obj->mm.pages->sgl;
|
||||
dst = st->sgl;
|
||||
for (i = 0; i < obj->mm.pages->nents; i++) {
|
||||
for (i = 0; i < obj->mm.pages->orig_nents; i++) {
|
||||
sg_set_page(dst, sg_page(src), src->length, 0);
|
||||
dst = sg_next(dst);
|
||||
src = sg_next(src);
|
||||
|
@ -243,8 +243,9 @@ static int imx_tve_connector_get_modes(struct drm_connector *connector)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int imx_tve_connector_mode_valid(struct drm_connector *connector,
|
||||
struct drm_display_mode *mode)
|
||||
static enum drm_mode_status
|
||||
imx_tve_connector_mode_valid(struct drm_connector *connector,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
struct imx_tve *tve = con_to_tve(connector);
|
||||
unsigned long rate;
|
||||
|
@ -364,7 +364,12 @@ static int __init vc4_drm_register(void)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return platform_driver_register(&vc4_platform_driver);
|
||||
ret = platform_driver_register(&vc4_platform_driver);
|
||||
if (ret)
|
||||
platform_unregister_drivers(component_drivers,
|
||||
ARRAY_SIZE(component_drivers));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit vc4_drm_unregister(void)
|
||||
|
@ -500,7 +500,7 @@ static int mousevsc_probe(struct hv_device *device,
|
||||
|
||||
ret = hid_add_device(hid_dev);
|
||||
if (ret)
|
||||
goto probe_err1;
|
||||
goto probe_err2;
|
||||
|
||||
|
||||
ret = hid_parse(hid_dev);
|
||||
|
@ -618,8 +618,10 @@ static struct iio_trigger *at91_adc_allocate_trigger(struct iio_dev *idev,
|
||||
trig->ops = &at91_adc_trigger_ops;
|
||||
|
||||
ret = iio_trigger_register(trig);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
iio_trigger_free(trig);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return trig;
|
||||
}
|
||||
|
@ -95,7 +95,7 @@ static int ms5611_spi_probe(struct spi_device *spi)
|
||||
spi_set_drvdata(spi, indio_dev);
|
||||
|
||||
spi->mode = SPI_MODE_0;
|
||||
spi->max_speed_hz = 20000000;
|
||||
spi->max_speed_hz = min(spi->max_speed_hz, 20000000U);
|
||||
spi->bits_per_word = 8;
|
||||
ret = spi_setup(spi);
|
||||
if (ret < 0)
|
||||
|
@ -212,9 +212,13 @@ static int iio_sysfs_trigger_remove(int id)
|
||||
|
||||
static int __init iio_sysfs_trig_init(void)
|
||||
{
|
||||
int ret;
|
||||
device_initialize(&iio_sysfs_trig_dev);
|
||||
dev_set_name(&iio_sysfs_trig_dev, "iio_sysfs_trigger");
|
||||
return device_add(&iio_sysfs_trig_dev);
|
||||
ret = device_add(&iio_sysfs_trig_dev);
|
||||
if (ret)
|
||||
put_device(&iio_sysfs_trig_dev);
|
||||
return ret;
|
||||
}
|
||||
module_init(iio_sysfs_trig_init);
|
||||
|
||||
|
@ -231,7 +231,7 @@ mISDN_register_device(struct mISDNdevice *dev,
|
||||
|
||||
err = get_free_devid();
|
||||
if (err < 0)
|
||||
goto error1;
|
||||
return err;
|
||||
dev->id = err;
|
||||
|
||||
device_initialize(&dev->dev);
|
||||
|
@ -97,6 +97,7 @@ int mISDN_dsp_element_register(struct mISDN_dsp_element *elem)
|
||||
if (!entry)
|
||||
return -ENOMEM;
|
||||
|
||||
INIT_LIST_HEAD(&entry->list);
|
||||
entry->elem = elem;
|
||||
|
||||
entry->dev.class = elements_class;
|
||||
@ -131,7 +132,7 @@ err2:
|
||||
device_unregister(&entry->dev);
|
||||
return ret;
|
||||
err1:
|
||||
kfree(entry);
|
||||
put_device(&entry->dev);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(mISDN_dsp_element_register);
|
||||
|
@ -573,7 +573,7 @@ static void list_version_get_needed(struct target_type *tt, void *needed_param)
|
||||
size_t *needed = needed_param;
|
||||
|
||||
*needed += sizeof(struct dm_target_versions);
|
||||
*needed += strlen(tt->name);
|
||||
*needed += strlen(tt->name) + 1;
|
||||
*needed += ALIGN_MASK;
|
||||
}
|
||||
|
||||
@ -628,7 +628,7 @@ static int list_versions(struct file *filp, struct dm_ioctl *param, size_t param
|
||||
iter_info.old_vers = NULL;
|
||||
iter_info.vers = vers;
|
||||
iter_info.flags = 0;
|
||||
iter_info.end = (char *)vers+len;
|
||||
iter_info.end = (char *)vers + needed;
|
||||
|
||||
/*
|
||||
* Now loop through filling out the names & versions.
|
||||
|
@ -952,6 +952,7 @@ static int qp_notify_peer_local(bool attach, struct vmci_handle handle)
|
||||
u32 context_id = vmci_get_context_id();
|
||||
struct vmci_event_qp ev;
|
||||
|
||||
memset(&ev, 0, sizeof(ev));
|
||||
ev.msg.hdr.dst = vmci_make_handle(context_id, VMCI_EVENT_HANDLER);
|
||||
ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
|
||||
VMCI_CONTEXT_RESOURCE_ID);
|
||||
@ -1563,6 +1564,7 @@ static int qp_notify_peer(bool attach,
|
||||
* kernel.
|
||||
*/
|
||||
|
||||
memset(&ev, 0, sizeof(ev));
|
||||
ev.msg.hdr.dst = vmci_make_handle(peer_id, VMCI_EVENT_HANDLER);
|
||||
ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
|
||||
VMCI_CONTEXT_RESOURCE_ID);
|
||||
|
@ -1463,7 +1463,13 @@ u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
|
||||
mmc_power_cycle(host, ocr);
|
||||
} else {
|
||||
bit = fls(ocr) - 1;
|
||||
ocr &= 3 << bit;
|
||||
/*
|
||||
* The bit variable represents the highest voltage bit set in
|
||||
* the OCR register.
|
||||
* To keep a range of 2 values (e.g. 3.2V/3.3V and 3.3V/3.4V),
|
||||
* we must shift the mask '3' with (bit - 1).
|
||||
*/
|
||||
ocr &= 3 << (bit - 1);
|
||||
if (bit != host->ios.vdd)
|
||||
dev_warn(mmc_dev(host), "exceeding card's volts\n");
|
||||
}
|
||||
|
@ -1277,6 +1277,8 @@ static int amd_probe(struct sdhci_pci_chip *chip)
|
||||
}
|
||||
}
|
||||
|
||||
pci_dev_put(smbus_dev);
|
||||
|
||||
if (gen == AMD_CHIPSET_BEFORE_ML || gen == AMD_CHIPSET_CZ)
|
||||
chip->quirks2 |= SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD;
|
||||
|
||||
|
@ -1015,8 +1015,10 @@ static int xgene_enet_open(struct net_device *ndev)
|
||||
|
||||
xgene_enet_napi_enable(pdata);
|
||||
ret = xgene_enet_register_irq(ndev);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
xgene_enet_napi_disable(pdata);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (ndev->phydev) {
|
||||
phy_start(ndev->phydev);
|
||||
|
@ -1565,7 +1565,6 @@ void bgmac_enet_remove(struct bgmac *bgmac)
|
||||
phy_disconnect(bgmac->net_dev->phydev);
|
||||
netif_napi_del(&bgmac->napi);
|
||||
bgmac_dma_free(bgmac);
|
||||
free_netdev(bgmac->net_dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bgmac_enet_remove);
|
||||
|
||||
|
@ -7504,8 +7504,8 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
|
||||
rcu_read_lock();
|
||||
hlist_for_each_entry_rcu(fltr, head, hash) {
|
||||
if (bnxt_fltr_match(fltr, new_fltr)) {
|
||||
rc = fltr->sw_id;
|
||||
rcu_read_unlock();
|
||||
rc = 0;
|
||||
goto err_free;
|
||||
}
|
||||
}
|
||||
|
@ -1304,6 +1304,7 @@ static int cxgb_up(struct adapter *adap)
|
||||
if (ret < 0) {
|
||||
CH_ERR(adap, "failed to bind qsets, err %d\n", ret);
|
||||
t3_intr_disable(adap);
|
||||
quiesce_rx(adap);
|
||||
free_irq_resources(adap);
|
||||
err = ret;
|
||||
goto out;
|
||||
|
@ -955,12 +955,21 @@ _return:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int mac_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct mac_device *mac_dev = platform_get_drvdata(pdev);
|
||||
|
||||
platform_device_unregister(mac_dev->priv->eth_dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct platform_driver mac_driver = {
|
||||
.driver = {
|
||||
.name = KBUILD_MODNAME,
|
||||
.of_match_table = mac_match,
|
||||
},
|
||||
.probe = mac_probe,
|
||||
.remove = mac_remove,
|
||||
};
|
||||
|
||||
builtin_platform_driver(mac_driver);
|
||||
|
@ -2495,6 +2495,7 @@ out_free:
|
||||
for (i = 0; i < mp->rxq_count; i++)
|
||||
rxq_deinit(mp->rxq + i);
|
||||
out:
|
||||
napi_disable(&mp->napi);
|
||||
free_irq(dev->irq, dev);
|
||||
|
||||
return err;
|
||||
|
@ -7132,9 +7132,8 @@ static int s2io_card_up(struct s2io_nic *sp)
|
||||
if (ret) {
|
||||
DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
|
||||
dev->name);
|
||||
s2io_reset(sp);
|
||||
free_rx_buffers(sp);
|
||||
return -ENOMEM;
|
||||
ret = -ENOMEM;
|
||||
goto err_fill_buff;
|
||||
}
|
||||
DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
|
||||
ring->rx_bufs_left);
|
||||
@ -7172,18 +7171,16 @@ static int s2io_card_up(struct s2io_nic *sp)
|
||||
/* Enable Rx Traffic and interrupts on the NIC */
|
||||
if (start_nic(sp)) {
|
||||
DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
|
||||
s2io_reset(sp);
|
||||
free_rx_buffers(sp);
|
||||
return -ENODEV;
|
||||
ret = -ENODEV;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
/* Add interrupt service routine */
|
||||
if (s2io_add_isr(sp) != 0) {
|
||||
if (sp->config.intr_type == MSI_X)
|
||||
s2io_rem_isr(sp);
|
||||
s2io_reset(sp);
|
||||
free_rx_buffers(sp);
|
||||
return -ENODEV;
|
||||
ret = -ENODEV;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
|
||||
@ -7202,6 +7199,20 @@ static int s2io_card_up(struct s2io_nic *sp)
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_out:
|
||||
if (config->napi) {
|
||||
if (config->intr_type == MSI_X) {
|
||||
for (i = 0; i < sp->config.rx_ring_num; i++)
|
||||
napi_disable(&sp->mac_control.rings[i].napi);
|
||||
} else {
|
||||
napi_disable(&sp->napi);
|
||||
}
|
||||
}
|
||||
err_fill_buff:
|
||||
s2io_reset(sp);
|
||||
free_rx_buffers(sp);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -551,7 +551,7 @@ static int bpq_device_event(struct notifier_block *this,
|
||||
if (!net_eq(dev_net(dev), &init_net))
|
||||
return NOTIFY_DONE;
|
||||
|
||||
if (!dev_is_ethdev(dev))
|
||||
if (!dev_is_ethdev(dev) && !bpq_get_ax25_dev(dev))
|
||||
return NOTIFY_DONE;
|
||||
|
||||
switch (event) {
|
||||
|
@ -1139,7 +1139,7 @@ void macvlan_common_setup(struct net_device *dev)
|
||||
{
|
||||
ether_setup(dev);
|
||||
|
||||
dev->min_mtu = 0;
|
||||
/* ether_setup() has set dev->min_mtu to ETH_MIN_MTU. */
|
||||
dev->max_mtu = ETH_MAX_MTU;
|
||||
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
|
||||
netif_keep_dst(dev);
|
||||
@ -1456,8 +1456,10 @@ destroy_macvlan_port:
|
||||
/* the macvlan port may be freed by macvlan_uninit when fail to register.
|
||||
* so we destroy the macvlan port only when it's valid.
|
||||
*/
|
||||
if (create && macvlan_port_get_rtnl(lowerdev))
|
||||
if (create && macvlan_port_get_rtnl(lowerdev)) {
|
||||
macvlan_flush_sources(port, vlan);
|
||||
macvlan_port_destroy(port->dev);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(macvlan_common_newlink);
|
||||
|
@ -406,7 +406,7 @@ static int lapbeth_device_event(struct notifier_block *this,
|
||||
if (dev_net(dev) != &init_net)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
if (!dev_is_ethdev(dev))
|
||||
if (!dev_is_ethdev(dev) && !lapbeth_get_x25_dev(dev))
|
||||
return NOTIFY_DONE;
|
||||
|
||||
switch (event) {
|
||||
|
@ -474,7 +474,7 @@ static size_t parport_pc_fifo_write_block_pio(struct parport *port,
|
||||
const unsigned char *bufp = buf;
|
||||
size_t left = length;
|
||||
unsigned long expire = jiffies + port->physport->cad->timeout;
|
||||
const int fifo = FIFO(port);
|
||||
const unsigned long fifo = FIFO(port);
|
||||
int poll_for = 8; /* 80 usecs */
|
||||
const struct parport_pc_private *priv = port->physport->private_data;
|
||||
const int fifo_depth = priv->fifo_depth;
|
||||
|
@ -228,6 +228,8 @@ int pinctrl_dt_to_map(struct pinctrl *p, struct pinctrl_dev *pctldev)
|
||||
for (state = 0; ; state++) {
|
||||
/* Retrieve the pinctrl-* property */
|
||||
propname = kasprintf(GFP_KERNEL, "pinctrl-%d", state);
|
||||
if (!propname)
|
||||
return -ENOMEM;
|
||||
prop = of_find_property(np, propname, &size);
|
||||
kfree(propname);
|
||||
if (!prop) {
|
||||
|
@ -894,8 +894,16 @@ static int __init hp_wmi_bios_setup(struct platform_device *device)
|
||||
wwan_rfkill = NULL;
|
||||
rfkill2_count = 0;
|
||||
|
||||
if (hp_wmi_rfkill_setup(device))
|
||||
hp_wmi_rfkill2_setup(device);
|
||||
/*
|
||||
* In pre-2009 BIOS, command 1Bh return 0x4 to indicate that
|
||||
* BIOS no longer controls the power for the wireless
|
||||
* devices. All features supported by this command will no
|
||||
* longer be supported.
|
||||
*/
|
||||
if (!hp_wmi_bios_2009_later()) {
|
||||
if (hp_wmi_rfkill_setup(device))
|
||||
hp_wmi_rfkill2_setup(device);
|
||||
}
|
||||
|
||||
err = device_create_file(&device->dev, &dev_attr_display);
|
||||
if (err)
|
||||
|
@ -1425,7 +1425,7 @@ static struct gsm_control *gsm_control_send(struct gsm_mux *gsm,
|
||||
unsigned int command, u8 *data, int clen)
|
||||
{
|
||||
struct gsm_control *ctrl = kzalloc(sizeof(struct gsm_control),
|
||||
GFP_KERNEL);
|
||||
GFP_ATOMIC);
|
||||
unsigned long flags;
|
||||
if (ctrl == NULL)
|
||||
return NULL;
|
||||
|
@ -249,8 +249,13 @@ static int lpss8250_dma_setup(struct lpss8250 *lpss, struct uart_8250_port *port
|
||||
struct dw_dma_slave *rx_param, *tx_param;
|
||||
struct device *dev = port->port.dev;
|
||||
|
||||
if (!lpss->dma_param.dma_dev)
|
||||
if (!lpss->dma_param.dma_dev) {
|
||||
dma = port->dma;
|
||||
if (dma)
|
||||
goto out_configuration_only;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
rx_param = devm_kzalloc(dev, sizeof(*rx_param), GFP_KERNEL);
|
||||
if (!rx_param)
|
||||
@ -261,16 +266,18 @@ static int lpss8250_dma_setup(struct lpss8250 *lpss, struct uart_8250_port *port
|
||||
return -ENOMEM;
|
||||
|
||||
*rx_param = lpss->dma_param;
|
||||
dma->rxconf.src_maxburst = lpss->dma_maxburst;
|
||||
|
||||
*tx_param = lpss->dma_param;
|
||||
dma->txconf.dst_maxburst = lpss->dma_maxburst;
|
||||
|
||||
dma->fn = lpss8250_dma_filter;
|
||||
dma->rx_param = rx_param;
|
||||
dma->tx_param = tx_param;
|
||||
|
||||
port->dma = dma;
|
||||
|
||||
out_configuration_only:
|
||||
dma->rxconf.src_maxburst = lpss->dma_maxburst;
|
||||
dma->txconf.dst_maxburst = lpss->dma_maxburst;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -160,27 +160,10 @@ static void omap8250_set_mctrl(struct uart_port *port, unsigned int mctrl)
|
||||
static void omap_8250_mdr1_errataset(struct uart_8250_port *up,
|
||||
struct omap8250_priv *priv)
|
||||
{
|
||||
u8 timeout = 255;
|
||||
|
||||
serial_out(up, UART_OMAP_MDR1, priv->mdr1);
|
||||
udelay(2);
|
||||
serial_out(up, UART_FCR, up->fcr | UART_FCR_CLEAR_XMIT |
|
||||
UART_FCR_CLEAR_RCVR);
|
||||
/*
|
||||
* Wait for FIFO to empty: when empty, RX_FIFO_E bit is 0 and
|
||||
* TX_FIFO_E bit is 1.
|
||||
*/
|
||||
while (UART_LSR_THRE != (serial_in(up, UART_LSR) &
|
||||
(UART_LSR_THRE | UART_LSR_DR))) {
|
||||
timeout--;
|
||||
if (!timeout) {
|
||||
/* Should *never* happen. we warn and carry on */
|
||||
dev_crit(up->port.dev, "Errata i202: timedout %x\n",
|
||||
serial_in(up, UART_LSR));
|
||||
break;
|
||||
}
|
||||
udelay(1);
|
||||
}
|
||||
}
|
||||
|
||||
static void omap_8250_get_divisor(struct uart_port *port, unsigned int baud,
|
||||
@ -1268,6 +1251,7 @@ static int omap8250_remove(struct platform_device *pdev)
|
||||
|
||||
pm_runtime_dont_use_autosuspend(&pdev->dev);
|
||||
pm_runtime_put_sync(&pdev->dev);
|
||||
flush_work(&priv->qos_work);
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
serial8250_unregister_port(priv->line);
|
||||
pm_qos_remove_request(&priv->pm_qos_request);
|
||||
|
@ -1866,10 +1866,13 @@ EXPORT_SYMBOL_GPL(serial8250_modem_status);
|
||||
static bool handle_rx_dma(struct uart_8250_port *up, unsigned int iir)
|
||||
{
|
||||
switch (iir & 0x3f) {
|
||||
case UART_IIR_RX_TIMEOUT:
|
||||
serial8250_rx_dma_flush(up);
|
||||
case UART_IIR_RDI:
|
||||
if (!up->dma->rx_running)
|
||||
break;
|
||||
/* fall-through */
|
||||
case UART_IIR_RLSI:
|
||||
case UART_IIR_RX_TIMEOUT:
|
||||
serial8250_rx_dma_flush(up);
|
||||
return true;
|
||||
}
|
||||
return up->dma->rx_dma(up);
|
||||
|
@ -260,8 +260,10 @@ static void ci_otg_del_timer(struct ci_hdrc *ci, enum otg_fsm_timer t)
|
||||
ci->enabled_otg_timer_bits &= ~(1 << t);
|
||||
if (ci->next_otg_timer == t) {
|
||||
if (ci->enabled_otg_timer_bits == 0) {
|
||||
spin_unlock_irqrestore(&ci->lock, flags);
|
||||
/* No enabled timers after delete it */
|
||||
hrtimer_cancel(&ci->otg_fsm_hrtimer);
|
||||
spin_lock_irqsave(&ci->lock, flags);
|
||||
ci->next_otg_timer = NUM_OTG_FSM_TIMERS;
|
||||
} else {
|
||||
/* Find the next timer */
|
||||
|
@ -209,6 +209,9 @@ static const struct usb_device_id usb_quirk_list[] = {
|
||||
{ USB_DEVICE(0x0781, 0x5583), .driver_info = USB_QUIRK_NO_LPM },
|
||||
{ USB_DEVICE(0x0781, 0x5591), .driver_info = USB_QUIRK_NO_LPM },
|
||||
|
||||
/* Realforce 87U Keyboard */
|
||||
{ USB_DEVICE(0x0853, 0x011b), .driver_info = USB_QUIRK_NO_LPM },
|
||||
|
||||
/* M-Systems Flash Disk Pioneers */
|
||||
{ USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME },
|
||||
|
||||
|
@ -165,6 +165,8 @@ static void option_instat_callback(struct urb *urb);
|
||||
#define NOVATELWIRELESS_PRODUCT_G2 0xA010
|
||||
#define NOVATELWIRELESS_PRODUCT_MC551 0xB001
|
||||
|
||||
#define UBLOX_VENDOR_ID 0x1546
|
||||
|
||||
/* AMOI PRODUCTS */
|
||||
#define AMOI_VENDOR_ID 0x1614
|
||||
#define AMOI_PRODUCT_H01 0x0800
|
||||
@ -243,7 +245,6 @@ static void option_instat_callback(struct urb *urb);
|
||||
#define QUECTEL_PRODUCT_UC15 0x9090
|
||||
/* These u-blox products use Qualcomm's vendor ID */
|
||||
#define UBLOX_PRODUCT_R410M 0x90b2
|
||||
#define UBLOX_PRODUCT_R6XX 0x90fa
|
||||
/* These Yuga products use Qualcomm's vendor ID */
|
||||
#define YUGA_PRODUCT_CLM920_NC5 0x9625
|
||||
|
||||
@ -584,6 +585,9 @@ static void option_instat_callback(struct urb *urb);
|
||||
#define OPPO_VENDOR_ID 0x22d9
|
||||
#define OPPO_PRODUCT_R11 0x276c
|
||||
|
||||
/* Sierra Wireless products */
|
||||
#define SIERRA_VENDOR_ID 0x1199
|
||||
#define SIERRA_PRODUCT_EM9191 0x90d3
|
||||
|
||||
/* Device flags */
|
||||
|
||||
@ -1127,8 +1131,16 @@ static const struct usb_device_id option_ids[] = {
|
||||
/* u-blox products using Qualcomm vendor ID */
|
||||
{ USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R410M),
|
||||
.driver_info = RSVD(1) | RSVD(3) },
|
||||
{ USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R6XX),
|
||||
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x908b), /* u-blox LARA-R6 00B */
|
||||
.driver_info = RSVD(4) },
|
||||
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x90fa),
|
||||
.driver_info = RSVD(3) },
|
||||
/* u-blox products */
|
||||
{ USB_DEVICE(UBLOX_VENDOR_ID, 0x1341) }, /* u-blox LARA-L6 */
|
||||
{ USB_DEVICE(UBLOX_VENDOR_ID, 0x1342), /* u-blox LARA-L6 (RMNET) */
|
||||
.driver_info = RSVD(4) },
|
||||
{ USB_DEVICE(UBLOX_VENDOR_ID, 0x1343), /* u-blox LARA-L6 (ECM) */
|
||||
.driver_info = RSVD(4) },
|
||||
/* Quectel products using Quectel vendor ID */
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21, 0xff, 0xff, 0xff),
|
||||
.driver_info = NUMEP2 },
|
||||
@ -2169,6 +2181,7 @@ static const struct usb_device_id option_ids[] = {
|
||||
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x010a, 0xff) }, /* Fibocom MA510 (ECM mode) */
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) }, /* Fibocom FG150 Diag */
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) }, /* Fibocom FG150 AT */
|
||||
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0111, 0xff) }, /* Fibocom FM160 (MBIM mode) */
|
||||
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
|
||||
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a2, 0xff) }, /* Fibocom FM101-GL (laptop MBIM) */
|
||||
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a4, 0xff), /* Fibocom FM101-GL (laptop MBIM) */
|
||||
@ -2178,6 +2191,8 @@ static const struct usb_device_id option_ids[] = {
|
||||
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */
|
||||
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) }, /* GosunCn GM500 ECM/NCM */
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(OPPO_VENDOR_ID, OPPO_PRODUCT_R11, 0xff, 0xff, 0x30) },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x30) },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0, 0) },
|
||||
{ } /* Terminating entry */
|
||||
};
|
||||
MODULE_DEVICE_TABLE(usb, option_ids);
|
||||
|
@ -228,7 +228,7 @@ static int register_pcpu(struct pcpu *pcpu)
|
||||
|
||||
err = device_register(dev);
|
||||
if (err) {
|
||||
pcpu_release(dev);
|
||||
put_device(dev);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -187,7 +187,7 @@ void btrfs_free_dummy_fs_info(struct btrfs_fs_info *fs_info)
|
||||
|
||||
void btrfs_free_dummy_root(struct btrfs_root *root)
|
||||
{
|
||||
if (!root)
|
||||
if (IS_ERR_OR_NULL(root))
|
||||
return;
|
||||
/* Will be freed by btrfs_free_fs_roots */
|
||||
if (WARN_ON(test_bit(BTRFS_ROOT_IN_RADIX, &root->state)))
|
||||
|
@ -2379,7 +2379,7 @@ int generic_cont_expand_simple(struct inode *inode, loff_t size)
|
||||
{
|
||||
struct address_space *mapping = inode->i_mapping;
|
||||
struct page *page;
|
||||
void *fsdata;
|
||||
void *fsdata = NULL;
|
||||
int err;
|
||||
|
||||
err = inode_newsize_ok(inode, size);
|
||||
@ -2405,7 +2405,7 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
|
||||
struct inode *inode = mapping->host;
|
||||
unsigned int blocksize = i_blocksize(inode);
|
||||
struct page *page;
|
||||
void *fsdata;
|
||||
void *fsdata = NULL;
|
||||
pgoff_t index, curidx;
|
||||
loff_t curpos;
|
||||
unsigned zerofrom, offset, len;
|
||||
|
@ -149,7 +149,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
|
||||
rc = put_user(ExtAttrBits &
|
||||
FS_FL_USER_VISIBLE,
|
||||
(int __user *)arg);
|
||||
if (rc != EOPNOTSUPP)
|
||||
if (rc != -EOPNOTSUPP)
|
||||
break;
|
||||
}
|
||||
#endif /* CONFIG_CIFS_POSIX */
|
||||
@ -178,7 +178,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
|
||||
* pSMBFile->fid.netfid,
|
||||
* extAttrBits,
|
||||
* &ExtAttrMask);
|
||||
* if (rc != EOPNOTSUPP)
|
||||
* if (rc != -EOPNOTSUPP)
|
||||
* break;
|
||||
*/
|
||||
|
||||
|
@ -172,7 +172,10 @@ static int gfs2_check_sb(struct gfs2_sbd *sdp, int silent)
|
||||
pr_warn("Invalid superblock size\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (sb->sb_bsize_shift != ffs(sb->sb_bsize) - 1) {
|
||||
pr_warn("Invalid block size shift\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -388,8 +391,10 @@ static int init_names(struct gfs2_sbd *sdp, int silent)
|
||||
if (!table[0])
|
||||
table = sdp->sd_vfs->s_id;
|
||||
|
||||
strlcpy(sdp->sd_proto_name, proto, GFS2_FSNAME_LEN);
|
||||
strlcpy(sdp->sd_table_name, table, GFS2_FSNAME_LEN);
|
||||
BUILD_BUG_ON(GFS2_LOCKNAME_LEN > GFS2_FSNAME_LEN);
|
||||
|
||||
strscpy(sdp->sd_proto_name, proto, GFS2_LOCKNAME_LEN);
|
||||
strscpy(sdp->sd_table_name, table, GFS2_LOCKNAME_LEN);
|
||||
|
||||
table = sdp->sd_table_name;
|
||||
while ((table = strchr(table, '/')))
|
||||
|
@ -5009,7 +5009,7 @@ int __page_symlink(struct inode *inode, const char *symname, int len, int nofs)
|
||||
{
|
||||
struct address_space *mapping = inode->i_mapping;
|
||||
struct page *page;
|
||||
void *fsdata;
|
||||
void *fsdata = NULL;
|
||||
int err;
|
||||
unsigned int flags = 0;
|
||||
if (nofs)
|
||||
|
@ -331,7 +331,7 @@ void nilfs_relax_pressure_in_lock(struct super_block *sb)
|
||||
struct the_nilfs *nilfs = sb->s_fs_info;
|
||||
struct nilfs_sc_info *sci = nilfs->ns_writer;
|
||||
|
||||
if (!sci || !sci->sc_flush_request)
|
||||
if (sb_rdonly(sb) || unlikely(!sci) || !sci->sc_flush_request)
|
||||
return;
|
||||
|
||||
set_bit(NILFS_SC_PRIOR_FLUSH, &sci->sc_flags);
|
||||
@ -2252,7 +2252,7 @@ int nilfs_construct_segment(struct super_block *sb)
|
||||
struct nilfs_transaction_info *ti;
|
||||
int err;
|
||||
|
||||
if (!sci)
|
||||
if (sb_rdonly(sb) || unlikely(!sci))
|
||||
return -EROFS;
|
||||
|
||||
/* A call inside transactions causes a deadlock. */
|
||||
@ -2291,7 +2291,7 @@ int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode,
|
||||
struct nilfs_transaction_info ti;
|
||||
int err = 0;
|
||||
|
||||
if (!sci)
|
||||
if (sb_rdonly(sb) || unlikely(!sci))
|
||||
return -EROFS;
|
||||
|
||||
nilfs_transaction_lock(sb, &ti, 0);
|
||||
@ -2788,11 +2788,12 @@ int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root)
|
||||
|
||||
if (nilfs->ns_writer) {
|
||||
/*
|
||||
* This happens if the filesystem was remounted
|
||||
* read/write after nilfs_error degenerated it into a
|
||||
* read-only mount.
|
||||
* This happens if the filesystem is made read-only by
|
||||
* __nilfs_error or nilfs_remount and then remounted
|
||||
* read/write. In these cases, reuse the existing
|
||||
* writer.
|
||||
*/
|
||||
nilfs_detach_log_writer(sb);
|
||||
return 0;
|
||||
}
|
||||
|
||||
nilfs->ns_writer = nilfs_segctor_new(sb, root);
|
||||
|
@ -1148,8 +1148,6 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data)
|
||||
if ((bool)(*flags & MS_RDONLY) == sb_rdonly(sb))
|
||||
goto out;
|
||||
if (*flags & MS_RDONLY) {
|
||||
/* Shutting down log writer */
|
||||
nilfs_detach_log_writer(sb);
|
||||
sb->s_flags |= MS_RDONLY;
|
||||
|
||||
/*
|
||||
|
@ -704,9 +704,7 @@ int nilfs_count_free_blocks(struct the_nilfs *nilfs, sector_t *nblocks)
|
||||
{
|
||||
unsigned long ncleansegs;
|
||||
|
||||
down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
|
||||
ncleansegs = nilfs_sufile_get_ncleansegs(nilfs->ns_sufile);
|
||||
up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
|
||||
*nblocks = (sector_t)ncleansegs * nilfs->ns_blocks_per_segment;
|
||||
return 0;
|
||||
}
|
||||
|
@ -608,17 +608,37 @@ static int ntfs_attr_find(const ATTR_TYPE type, const ntfschar *name,
|
||||
for (;; a = (ATTR_RECORD*)((u8*)a + le32_to_cpu(a->length))) {
|
||||
u8 *mrec_end = (u8 *)ctx->mrec +
|
||||
le32_to_cpu(ctx->mrec->bytes_allocated);
|
||||
u8 *name_end = (u8 *)a + le16_to_cpu(a->name_offset) +
|
||||
a->name_length * sizeof(ntfschar);
|
||||
if ((u8*)a < (u8*)ctx->mrec || (u8*)a > mrec_end ||
|
||||
name_end > mrec_end)
|
||||
u8 *name_end;
|
||||
|
||||
/* check whether ATTR_RECORD wrap */
|
||||
if ((u8 *)a < (u8 *)ctx->mrec)
|
||||
break;
|
||||
|
||||
/* check whether Attribute Record Header is within bounds */
|
||||
if ((u8 *)a > mrec_end ||
|
||||
(u8 *)a + sizeof(ATTR_RECORD) > mrec_end)
|
||||
break;
|
||||
|
||||
/* check whether ATTR_RECORD's name is within bounds */
|
||||
name_end = (u8 *)a + le16_to_cpu(a->name_offset) +
|
||||
a->name_length * sizeof(ntfschar);
|
||||
if (name_end > mrec_end)
|
||||
break;
|
||||
|
||||
ctx->attr = a;
|
||||
if (unlikely(le32_to_cpu(a->type) > le32_to_cpu(type) ||
|
||||
a->type == AT_END))
|
||||
return -ENOENT;
|
||||
if (unlikely(!a->length))
|
||||
break;
|
||||
|
||||
/* check whether ATTR_RECORD's length wrap */
|
||||
if ((u8 *)a + le32_to_cpu(a->length) < (u8 *)a)
|
||||
break;
|
||||
/* check whether ATTR_RECORD's length is within bounds */
|
||||
if ((u8 *)a + le32_to_cpu(a->length) > mrec_end)
|
||||
break;
|
||||
|
||||
if (a->type != type)
|
||||
continue;
|
||||
/*
|
||||
|
@ -1863,6 +1863,13 @@ int ntfs_read_inode_mount(struct inode *vi)
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
/* Sanity check offset to the first attribute */
|
||||
if (le16_to_cpu(m->attrs_offset) >= le32_to_cpu(m->bytes_allocated)) {
|
||||
ntfs_error(sb, "Incorrect mft offset to the first attribute %u in superblock.",
|
||||
le16_to_cpu(m->attrs_offset));
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
/* Need this to sanity check attribute list references to $MFT. */
|
||||
vi->i_generation = ni->seq_no = le16_to_cpu(m->sequence_number);
|
||||
|
||||
|
@ -237,7 +237,7 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir,
|
||||
poffset - lfi);
|
||||
else {
|
||||
if (!copy_name) {
|
||||
copy_name = kmalloc(UDF_NAME_LEN,
|
||||
copy_name = kmalloc(UDF_NAME_LEN_CS0,
|
||||
GFP_NOFS);
|
||||
if (!copy_name) {
|
||||
fi = ERR_PTR(-ENOMEM);
|
||||
|
@ -376,7 +376,7 @@ struct vfs_ns_cap_data {
|
||||
*/
|
||||
|
||||
#define CAP_TO_INDEX(x) ((x) >> 5) /* 1 << 5 == bits in __u32 */
|
||||
#define CAP_TO_MASK(x) (1 << ((x) & 31)) /* mask for indexed __u32 */
|
||||
#define CAP_TO_MASK(x) (1U << ((x) & 31)) /* mask for indexed __u32 */
|
||||
|
||||
|
||||
#endif /* _UAPI_LINUX_CAPABILITY_H */
|
||||
|
@ -1397,6 +1397,7 @@ static int ftrace_add_mod(struct trace_array *tr,
|
||||
if (!ftrace_mod)
|
||||
return -ENOMEM;
|
||||
|
||||
INIT_LIST_HEAD(&ftrace_mod->list);
|
||||
ftrace_mod->func = kstrdup(func, GFP_KERNEL);
|
||||
ftrace_mod->module = kstrdup(module, GFP_KERNEL);
|
||||
ftrace_mod->enable = enable;
|
||||
@ -3106,7 +3107,7 @@ static int ftrace_allocate_records(struct ftrace_page *pg, int count)
|
||||
/* if we can't allocate this size, try something smaller */
|
||||
if (!order)
|
||||
return -ENOMEM;
|
||||
order >>= 1;
|
||||
order--;
|
||||
goto again;
|
||||
}
|
||||
|
||||
@ -6016,7 +6017,7 @@ void __init ftrace_init(void)
|
||||
}
|
||||
|
||||
pr_info("ftrace: allocating %ld entries in %ld pages\n",
|
||||
count, count / ENTRIES_PER_PAGE + 1);
|
||||
count, DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
|
||||
|
||||
last_ftrace_enabled = ftrace_enabled = 1;
|
||||
|
||||
|
@ -1271,9 +1271,9 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
|
||||
|
||||
free_buffer_page(cpu_buffer->reader_page);
|
||||
|
||||
rb_head_page_deactivate(cpu_buffer);
|
||||
|
||||
if (head) {
|
||||
rb_head_page_deactivate(cpu_buffer);
|
||||
|
||||
list_for_each_entry_safe(bpage, tmp, head, list) {
|
||||
list_del_init(&bpage->list);
|
||||
free_buffer_page(bpage);
|
||||
|
@ -3114,7 +3114,7 @@ ssize_t generic_perform_write(struct file *file,
|
||||
unsigned long offset; /* Offset into pagecache page */
|
||||
unsigned long bytes; /* Bytes to write to page */
|
||||
size_t copied; /* Bytes copied from user */
|
||||
void *fsdata;
|
||||
void *fsdata = NULL;
|
||||
|
||||
offset = (pos & (PAGE_SIZE - 1));
|
||||
bytes = min_t(unsigned long, PAGE_SIZE - offset,
|
||||
|
@ -219,6 +219,8 @@ static void p9_conn_cancel(struct p9_conn *m, int err)
|
||||
list_move(&req->req_list, &cancel_list);
|
||||
}
|
||||
|
||||
spin_unlock(&m->client->lock);
|
||||
|
||||
list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
|
||||
p9_debug(P9_DEBUG_ERROR, "call back req %p\n", req);
|
||||
list_del(&req->req_list);
|
||||
@ -226,7 +228,6 @@ static void p9_conn_cancel(struct p9_conn *m, int err)
|
||||
req->t_err = err;
|
||||
p9_client_cb(m->client, req, REQ_STATUS_ERROR);
|
||||
}
|
||||
spin_unlock(&m->client->lock);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -833,11 +834,14 @@ static int p9_fd_open(struct p9_client *client, int rfd, int wfd)
|
||||
goto out_free_ts;
|
||||
if (!(ts->rd->f_mode & FMODE_READ))
|
||||
goto out_put_rd;
|
||||
/* prevent workers from hanging on IO when fd is a pipe */
|
||||
ts->rd->f_flags |= O_NONBLOCK;
|
||||
ts->wr = fget(wfd);
|
||||
if (!ts->wr)
|
||||
goto out_put_rd;
|
||||
if (!(ts->wr->f_mode & FMODE_WRITE))
|
||||
goto out_put_wr;
|
||||
ts->wr->f_flags |= O_NONBLOCK;
|
||||
|
||||
client->trans = ts;
|
||||
client->status = Connected;
|
||||
|
@ -1824,7 +1824,7 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
|
||||
if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
|
||||
continue;
|
||||
|
||||
if (c->psm == psm) {
|
||||
if (c->chan_type != L2CAP_CHAN_FIXED && c->psm == psm) {
|
||||
int src_match, dst_match;
|
||||
int src_any, dst_any;
|
||||
|
||||
|
@ -78,6 +78,7 @@ static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
|
||||
if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
size = SKB_DATA_ALIGN(size);
|
||||
data = kzalloc(size + headroom + tailroom, GFP_USER);
|
||||
if (!data)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
@ -315,9 +315,6 @@ static int chnl_net_open(struct net_device *dev)
|
||||
|
||||
if (result == 0) {
|
||||
pr_debug("connect timeout\n");
|
||||
caif_disconnect_client(dev_net(dev), &priv->chnl);
|
||||
priv->state = CAIF_DISCONNECTED;
|
||||
pr_debug("state disconnected\n");
|
||||
result = -ETIMEDOUT;
|
||||
goto error;
|
||||
}
|
||||
|
@ -3545,23 +3545,25 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
|
||||
int pos;
|
||||
int dummy;
|
||||
|
||||
if (list_skb && !list_skb->head_frag && skb_headlen(list_skb) &&
|
||||
(skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY)) {
|
||||
/* gso_size is untrusted, and we have a frag_list with a linear
|
||||
* non head_frag head.
|
||||
*
|
||||
* (we assume checking the first list_skb member suffices;
|
||||
* i.e if either of the list_skb members have non head_frag
|
||||
* head, then the first one has too).
|
||||
*
|
||||
* If head_skb's headlen does not fit requested gso_size, it
|
||||
* means that the frag_list members do NOT terminate on exact
|
||||
* gso_size boundaries. Hence we cannot perform skb_frag_t page
|
||||
* sharing. Therefore we must fallback to copying the frag_list
|
||||
* skbs; we do so by disabling SG.
|
||||
*/
|
||||
if (mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb))
|
||||
features &= ~NETIF_F_SG;
|
||||
if ((skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY) &&
|
||||
mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb)) {
|
||||
struct sk_buff *check_skb;
|
||||
|
||||
for (check_skb = list_skb; check_skb; check_skb = check_skb->next) {
|
||||
if (skb_headlen(check_skb) && !check_skb->head_frag) {
|
||||
/* gso_size is untrusted, and we have a frag_list with
|
||||
* a linear non head_frag item.
|
||||
*
|
||||
* If head_skb's headlen does not fit requested gso_size,
|
||||
* it means that the frag_list members do NOT terminate
|
||||
* on exact gso_size boundaries. Hence we cannot perform
|
||||
* skb_frag_t page sharing. Therefore we must fallback to
|
||||
* copying the frag_list skbs; we do so by disabling SG.
|
||||
*/
|
||||
features &= ~NETIF_F_SG;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
__skb_push(head_skb, doffset);
|
||||
|
@ -374,6 +374,7 @@ static void tcp_cdg_init(struct sock *sk)
|
||||
struct cdg *ca = inet_csk_ca(sk);
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
|
||||
ca->gradients = NULL;
|
||||
/* We silently fall back to window = 1 if allocation fails. */
|
||||
if (window > 1)
|
||||
ca->gradients = kcalloc(window, sizeof(ca->gradients[0]),
|
||||
@ -387,6 +388,7 @@ static void tcp_cdg_release(struct sock *sk)
|
||||
struct cdg *ca = inet_csk_ca(sk);
|
||||
|
||||
kfree(ca->gradients);
|
||||
ca->gradients = NULL;
|
||||
}
|
||||
|
||||
struct tcp_congestion_ops tcp_cdg __read_mostly = {
|
||||
|
@ -474,6 +474,7 @@ static void ip6addrlbl_putmsg(struct nlmsghdr *nlh,
|
||||
{
|
||||
struct ifaddrlblmsg *ifal = nlmsg_data(nlh);
|
||||
ifal->ifal_family = AF_INET6;
|
||||
ifal->__ifal_reserved = 0;
|
||||
ifal->ifal_prefixlen = prefixlen;
|
||||
ifal->ifal_flags = 0;
|
||||
ifal->ifal_index = ifindex;
|
||||
|
@ -224,7 +224,7 @@ static void requeue_rx_msgs(struct kcm_mux *mux, struct sk_buff_head *head)
|
||||
struct sk_buff *skb;
|
||||
struct kcm_sock *kcm;
|
||||
|
||||
while ((skb = __skb_dequeue(head))) {
|
||||
while ((skb = skb_dequeue(head))) {
|
||||
/* Reset destructor to avoid calling kcm_rcv_ready */
|
||||
skb->destructor = sock_rfree;
|
||||
skb_orphan(skb);
|
||||
@ -1085,53 +1085,18 @@ out_error:
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct sk_buff *kcm_wait_data(struct sock *sk, int flags,
|
||||
long timeo, int *err)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
|
||||
while (!(skb = skb_peek(&sk->sk_receive_queue))) {
|
||||
if (sk->sk_err) {
|
||||
*err = sock_error(sk);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (sock_flag(sk, SOCK_DONE))
|
||||
return NULL;
|
||||
|
||||
if ((flags & MSG_DONTWAIT) || !timeo) {
|
||||
*err = -EAGAIN;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
sk_wait_data(sk, &timeo, NULL);
|
||||
|
||||
/* Handle signals */
|
||||
if (signal_pending(current)) {
|
||||
*err = sock_intr_errno(timeo);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
return skb;
|
||||
}
|
||||
|
||||
static int kcm_recvmsg(struct socket *sock, struct msghdr *msg,
|
||||
size_t len, int flags)
|
||||
{
|
||||
int noblock = flags & MSG_DONTWAIT;
|
||||
struct sock *sk = sock->sk;
|
||||
struct kcm_sock *kcm = kcm_sk(sk);
|
||||
int err = 0;
|
||||
long timeo;
|
||||
struct strp_msg *stm;
|
||||
int copied = 0;
|
||||
struct sk_buff *skb;
|
||||
|
||||
timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
|
||||
|
||||
lock_sock(sk);
|
||||
|
||||
skb = kcm_wait_data(sk, flags, timeo, &err);
|
||||
skb = skb_recv_datagram(sk, flags, noblock, &err);
|
||||
if (!skb)
|
||||
goto out;
|
||||
|
||||
@ -1162,14 +1127,11 @@ msg_finished:
|
||||
/* Finished with message */
|
||||
msg->msg_flags |= MSG_EOR;
|
||||
KCM_STATS_INCR(kcm->stats.rx_msgs);
|
||||
skb_unlink(skb, &sk->sk_receive_queue);
|
||||
kfree_skb(skb);
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
release_sock(sk);
|
||||
|
||||
skb_free_datagram(sk, skb);
|
||||
return copied ? : err;
|
||||
}
|
||||
|
||||
@ -1177,9 +1139,9 @@ static ssize_t kcm_splice_read(struct socket *sock, loff_t *ppos,
|
||||
struct pipe_inode_info *pipe, size_t len,
|
||||
unsigned int flags)
|
||||
{
|
||||
int noblock = flags & MSG_DONTWAIT;
|
||||
struct sock *sk = sock->sk;
|
||||
struct kcm_sock *kcm = kcm_sk(sk);
|
||||
long timeo;
|
||||
struct strp_msg *stm;
|
||||
int err = 0;
|
||||
ssize_t copied;
|
||||
@ -1187,11 +1149,7 @@ static ssize_t kcm_splice_read(struct socket *sock, loff_t *ppos,
|
||||
|
||||
/* Only support splice for SOCKSEQPACKET */
|
||||
|
||||
timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
|
||||
|
||||
lock_sock(sk);
|
||||
|
||||
skb = kcm_wait_data(sk, flags, timeo, &err);
|
||||
skb = skb_recv_datagram(sk, flags, noblock, &err);
|
||||
if (!skb)
|
||||
goto err_out;
|
||||
|
||||
@ -1219,13 +1177,11 @@ static ssize_t kcm_splice_read(struct socket *sock, loff_t *ppos,
|
||||
* finish reading the message.
|
||||
*/
|
||||
|
||||
release_sock(sk);
|
||||
|
||||
skb_free_datagram(sk, skb);
|
||||
return copied;
|
||||
|
||||
err_out:
|
||||
release_sock(sk);
|
||||
|
||||
skb_free_datagram(sk, skb);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -1850,10 +1806,10 @@ static int kcm_release(struct socket *sock)
|
||||
kcm = kcm_sk(sk);
|
||||
mux = kcm->mux;
|
||||
|
||||
lock_sock(sk);
|
||||
sock_orphan(sk);
|
||||
kfree_skb(kcm->seq_skb);
|
||||
|
||||
lock_sock(sk);
|
||||
/* Purge queue under lock to avoid race condition with tx_work trying
|
||||
* to act when queue is nonempty. If tx_work runs after this point
|
||||
* it will just return.
|
||||
|
@ -856,7 +856,7 @@ static int tipc_nl_compat_name_table_dump_header(struct tipc_nl_compat_msg *msg)
|
||||
};
|
||||
|
||||
ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req);
|
||||
if (TLV_GET_DATA_LEN(msg->req) < sizeof(struct tipc_name_table_query))
|
||||
if (TLV_GET_DATA_LEN(msg->req) < (int)sizeof(struct tipc_name_table_query))
|
||||
return -EINVAL;
|
||||
|
||||
depth = ntohl(ntq->depth);
|
||||
|
@ -122,7 +122,7 @@ int x25_lapb_receive_frame(struct sk_buff *skb, struct net_device *dev,
|
||||
|
||||
if (!pskb_may_pull(skb, 1)) {
|
||||
x25_neigh_put(nb);
|
||||
return 0;
|
||||
goto drop;
|
||||
}
|
||||
|
||||
switch (skb->data[0]) {
|
||||
|
@ -23,6 +23,13 @@
|
||||
#include <openssl/err.h>
|
||||
#include <openssl/engine.h>
|
||||
|
||||
/*
|
||||
* OpenSSL 3.0 deprecates the OpenSSL's ENGINE API.
|
||||
*
|
||||
* Remove this if/when that API is no longer used
|
||||
*/
|
||||
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
|
||||
|
||||
#define PKEY_ID_PKCS7 2
|
||||
|
||||
static __attribute__((noreturn))
|
||||
|
@ -29,6 +29,13 @@
|
||||
#include <openssl/err.h>
|
||||
#include <openssl/engine.h>
|
||||
|
||||
/*
|
||||
* OpenSSL 3.0 deprecates the OpenSSL's ENGINE API.
|
||||
*
|
||||
* Remove this if/when that API is no longer used
|
||||
*/
|
||||
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
|
||||
|
||||
/*
|
||||
* Use CMS if we have openssl-1.0.0 or newer available - otherwise we have to
|
||||
* assume that it's not available and its header file is missing and that we
|
||||
|
@ -346,8 +346,10 @@ static int add_widget_node(struct kobject *parent, hda_nid_t nid,
|
||||
return -ENOMEM;
|
||||
kobject_init(kobj, &widget_ktype);
|
||||
err = kobject_add(kobj, parent, "%02x", nid);
|
||||
if (err < 0)
|
||||
if (err < 0) {
|
||||
kobject_put(kobj);
|
||||
return err;
|
||||
}
|
||||
err = sysfs_create_group(kobj, group);
|
||||
if (err < 0) {
|
||||
kobject_put(kobj);
|
||||
|
@ -4376,10 +4376,23 @@ EXPORT_SYMBOL_GPL(snd_soc_of_get_dai_link_codecs);
|
||||
|
||||
static int __init snd_soc_init(void)
|
||||
{
|
||||
snd_soc_debugfs_init();
|
||||
snd_soc_util_init();
|
||||
int ret;
|
||||
|
||||
return platform_driver_register(&soc_driver);
|
||||
snd_soc_debugfs_init();
|
||||
ret = snd_soc_util_init();
|
||||
if (ret)
|
||||
goto err_util_init;
|
||||
|
||||
ret = platform_driver_register(&soc_driver);
|
||||
if (ret)
|
||||
goto err_register;
|
||||
return 0;
|
||||
|
||||
err_register:
|
||||
snd_soc_util_exit();
|
||||
err_util_init:
|
||||
snd_soc_debugfs_exit();
|
||||
return ret;
|
||||
}
|
||||
module_init(snd_soc_init);
|
||||
|
||||
|
@ -385,7 +385,7 @@ int __init snd_soc_util_init(void)
|
||||
return ret;
|
||||
}
|
||||
|
||||
void __exit snd_soc_util_exit(void)
|
||||
void snd_soc_util_exit(void)
|
||||
{
|
||||
platform_device_unregister(soc_dummy_dev);
|
||||
platform_driver_unregister(&soc_dummy_driver);
|
||||
|
@ -1148,10 +1148,8 @@ static int snd_usbmidi_output_open(struct snd_rawmidi_substream *substream)
|
||||
port = &umidi->endpoints[i].out->ports[j];
|
||||
break;
|
||||
}
|
||||
if (!port) {
|
||||
snd_BUG();
|
||||
if (!port)
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
substream->runtime->private_data = port;
|
||||
port->state = STATE_UNKNOWN;
|
||||
|
@ -2089,6 +2089,10 @@ YAMAHA_DEVICE(0x7010, "UB99"),
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
/* M-Audio Micro */
|
||||
USB_DEVICE_VENDOR_SPEC(0x0763, 0x201a),
|
||||
},
|
||||
{
|
||||
USB_DEVICE_VENDOR_SPEC(0x0763, 0x2030),
|
||||
.driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
|
||||
|
@ -3,11 +3,11 @@ INCLUDES := -I../include -I../../
|
||||
CFLAGS := $(CFLAGS) -g -O2 -Wall -D_GNU_SOURCE -pthread $(INCLUDES)
|
||||
LDFLAGS := $(LDFLAGS) -pthread -lrt
|
||||
|
||||
HEADERS := \
|
||||
LOCAL_HDRS := \
|
||||
../include/futextest.h \
|
||||
../include/atomic.h \
|
||||
../include/logging.h
|
||||
TEST_GEN_FILES := \
|
||||
TEST_GEN_PROGS := \
|
||||
futex_wait_timeout \
|
||||
futex_wait_wouldblock \
|
||||
futex_requeue_pi \
|
||||
@ -19,5 +19,3 @@ TEST_GEN_FILES := \
|
||||
TEST_PROGS := run.sh
|
||||
|
||||
include ../../lib.mk
|
||||
|
||||
$(TEST_GEN_FILES): $(HEADERS)
|
||||
|
Loading…
x
Reference in New Issue
Block a user