This is the 4.14.162 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAl4QjoEACgkQONu9yGCS
 aT61Ig/9GTbv5+njbemhs01loMsA6H4u+BwFHjxJiTzfj+7TwKDZVDcllmiKkPSQ
 cS3+n6oV1G5VzzmTRU5WKBNQkgu2t6TmcxI4xiDTIZ+GlzdC7b7bp0uEv7bRGIMg
 lm6oHBoy753oMiB/Z4itA58tuLVsEw9sjZJ3O7wvlaFl4NzD8clGCc9iLQaLofDP
 7uXWPgtZ3yRDquOtjPV7c52qwbr/QUZs13iH6xwSHIK6kmTbuhKbQB2TqdrHlKrc
 FxlitA8NAjn8s7PrJd0NWQxxEW0by3W+pYZ6yvzF1zlY1UWkZB7WfKK8kW5A/5Jt
 alPtbHAZGbxuobVewObRosM/DZ6vYHNE78M6FUkyo7113lsvVNhz71h8YbO/beCc
 PPGzvQbbeaWGdVtTFVih75HwtGVktwRpgdA1H0NPZb4eWX9eZl8BrgMvo4EsAvl4
 BMYiWbYvR7ijWvbahwTHlpnpmce2acWD5H+oGE338lXvXfXjgrH5d2DlZ9bWTdKv
 h1YmINQ/cZuUoAe9vlUr/uXIflwza65TJWDRRjzXMZ7FOLwXTTCjqFO+36PZ5zRf
 4jdfZa4Uz0HmfH95bVJRbRuAt1Fny/mK3sx7vjTcu0qT9FpG8P3tSJR9rz8yEbVb
 X0dmyUHl2qNFj0Y/cV3AJJjTEuDbhmXfwPmXPgF4owR6R0rhfM4=
 =57Qt
 -----END PGP SIGNATURE-----

Merge 4.14.162 into android-4.14

Changes in 4.14.162
	scsi: lpfc: Fix discovery failures when target device connectivity bounces
	scsi: mpt3sas: Fix clear pending bit in ioctl status
	scsi: lpfc: Fix locking on mailbox command completion
	Input: atmel_mxt_ts - disable IRQ across suspend
	iommu/tegra-smmu: Fix page tables in > 4 GiB memory
	scsi: target: compare full CHAP_A Algorithm strings
	scsi: lpfc: Fix SLI3 hba in loop mode not discovering devices
	scsi: csiostor: Don't enable IRQs too early
	powerpc/pseries: Mark accumulate_stolen_time() as notrace
	powerpc/pseries: Don't fail hash page table insert for bolted mapping
	powerpc/tools: Don't quote $objdump in scripts
	dma-debug: add a schedule point in debug_dma_dump_mappings()
	clocksource/drivers/asm9260: Add a check for of_clk_get
	powerpc/security/book3s64: Report L1TF status in sysfs
	powerpc/book3s64/hash: Add cond_resched to avoid soft lockup warning
	ext4: update direct I/O read lock pattern for IOCB_NOWAIT
	jbd2: Fix statistics for the number of logged blocks
	scsi: tracing: Fix handling of TRANSFER LENGTH == 0 for READ(6) and WRITE(6)
	scsi: lpfc: Fix duplicate unreg_rpi error in port offline flow
	f2fs: fix to update dir's i_pino during cross_rename
	clk: qcom: Allow constant ratio freq tables for rcg
	irqchip/irq-bcm7038-l1: Enable parent IRQ if necessary
	irqchip: ingenic: Error out if IRQ domain creation failed
	fs/quota: handle overflows of sysctl fs.quota.* and report as unsigned long
	scsi: lpfc: fix: Coverity: lpfc_cmpl_els_rsp(): Null pointer dereferences
	scsi: ufs: fix potential bug which ends in system hang
	powerpc/pseries/cmm: Implement release() function for sysfs device
	powerpc/security: Fix wrong message when RFI Flush is disable
	scsi: atari_scsi: sun3_scsi: Set sg_tablesize to 1 instead of SG_NONE
	clk: pxa: fix one of the pxa RTC clocks
	bcache: at least try to shrink 1 node in bch_mca_scan()
	HID: logitech-hidpp: Silence intermittent get_battery_capacity errors
	libnvdimm/btt: fix variable 'rc' set but not used
	HID: Improve Windows Precision Touchpad detection.
	scsi: pm80xx: Fix for SATA device discovery
	scsi: ufs: Fix error handing during hibern8 enter
	scsi: scsi_debug: num_tgts must be >= 0
	scsi: NCR5380: Add disconnect_mask module parameter
	scsi: iscsi: Don't send data to unbound connection
	scsi: target: iscsi: Wait for all commands to finish before freeing a session
	gpio: mpc8xxx: Don't overwrite default irq_set_type callback
	apparmor: fix unsigned len comparison with less than zero
	scripts/kallsyms: fix definitely-lost memory leak
	cdrom: respect device capabilities during opening action
	perf script: Fix brstackinsn for AUXTRACE
	perf regs: Make perf_reg_name() return "unknown" instead of NULL
	s390/zcrypt: handle new reply code FILTERED_BY_HYPERVISOR
	libfdt: define INT32_MAX and UINT32_MAX in libfdt_env.h
	s390/cpum_sf: Check for SDBT and SDB consistency
	ocfs2: fix passing zero to 'PTR_ERR' warning
	kernel: sysctl: make drop_caches write-only
	userfaultfd: require CAP_SYS_PTRACE for UFFD_FEATURE_EVENT_FORK
	x86/mce: Fix possibly incorrect severity calculation on AMD
	net, sysctl: Fix compiler warning when only cBPF is present
	netfilter: nf_queue: enqueue skbs with NULL dst
	ALSA: hda - Downgrade error message for single-cmd fallback
	bonding: fix active-backup transition after link failure
	perf strbuf: Remove redundant va_end() in strbuf_addv()
	Make filldir[64]() verify the directory entry filename is valid
	filldir[64]: remove WARN_ON_ONCE() for bad directory entries
	netfilter: ebtables: compat: reject all padding in matches/watchers
	6pack,mkiss: fix possible deadlock
	netfilter: bridge: make sure to pull arp header in br_nf_forward_arp()
	inetpeer: fix data-race in inet_putpeer / inet_putpeer
	net: add a READ_ONCE() in skb_peek_tail()
	net: icmp: fix data-race in cmp_global_allow()
	hrtimer: Annotate lockless access to timer->state
	spi: fsl: don't map irq during probe
	tty/serial: atmel: fix out of range clock divider handling
	pinctrl: baytrail: Really serialize all register accesses
	net: ena: fix napi handler misbehavior when the napi budget is zero
	net/mlxfw: Fix out-of-memory error in mfa2 flash burning
	ptp: fix the race between the release of ptp_clock and cdev
	udp: fix integer overflow while computing available space in sk_rcvbuf
	vhost/vsock: accept only packets with the right dst_cid
	net: add bool confirm_neigh parameter for dst_ops.update_pmtu
	ip6_gre: do not confirm neighbor when do pmtu update
	gtp: do not confirm neighbor when do pmtu update
	net/dst: add new function skb_dst_update_pmtu_no_confirm
	tunnel: do not confirm neighbor when do pmtu update
	vti: do not confirm neighbor when do pmtu update
	sit: do not confirm neighbor when do pmtu update
	gtp: do not allow adding duplicate tid and ms_addr pdp context
	tcp/dccp: fix possible race __inet_lookup_established()
	tcp: do not send empty skb from tcp_write_xmit()
	gtp: fix wrong condition in gtp_genl_dump_pdp()
	gtp: fix an use-after-free in ipv4_pdp_find()
	gtp: avoid zero size hashtable
	spi: fsl: use platform_get_irq() instead of of_irq_to_resource()
	Linux 4.14.162

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2020-01-04 19:09:37 +01:00
commit c2bd4f8f0c
110 changed files with 687 additions and 352 deletions

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 14
SUBLEVEL = 161
SUBLEVEL = 162
EXTRAVERSION =
NAME = Petit Gorille

View File

@ -2,11 +2,13 @@
#ifndef _ARM_LIBFDT_ENV_H
#define _ARM_LIBFDT_ENV_H
#include <linux/limits.h>
#include <linux/types.h>
#include <linux/string.h>
#include <asm/byteorder.h>
#define INT_MAX ((int)(~0U>>1))
#define INT32_MAX S32_MAX
#define UINT32_MAX U32_MAX
typedef __be16 fdt16_t;
typedef __be32 fdt32_t;

View File

@ -6,6 +6,8 @@
#include <string.h>
#define INT_MAX ((int)(~0U>>1))
#define UINT32_MAX ((u32)~0U)
#define INT32_MAX ((s32)(UINT32_MAX >> 1))
#include "of.h"

View File

@ -134,32 +134,33 @@ ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, cha
thread_priv = security_ftr_enabled(SEC_FTR_L1D_THREAD_PRIV);
if (rfi_flush || thread_priv) {
if (rfi_flush) {
struct seq_buf s;
seq_buf_init(&s, buf, PAGE_SIZE - 1);
seq_buf_printf(&s, "Mitigation: ");
if (rfi_flush)
seq_buf_printf(&s, "RFI Flush");
if (rfi_flush && thread_priv)
seq_buf_printf(&s, ", ");
seq_buf_printf(&s, "Mitigation: RFI Flush");
if (thread_priv)
seq_buf_printf(&s, "L1D private per thread");
seq_buf_printf(&s, ", L1D private per thread");
seq_buf_printf(&s, "\n");
return s.len;
}
if (thread_priv)
return sprintf(buf, "Vulnerable: L1D private per thread\n");
if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) &&
!security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR))
return sprintf(buf, "Not affected\n");
return sprintf(buf, "Vulnerable\n");
}
ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
{
return cpu_show_meltdown(dev, attr, buf);
}
#endif
ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)

View File

@ -241,7 +241,7 @@ static u64 scan_dispatch_log(u64 stop_tb)
* Accumulate stolen time by scanning the dispatch trace log.
* Called on entry from user mode.
*/
void accumulate_stolen_time(void)
void notrace accumulate_stolen_time(void)
{
u64 sst, ust;
u8 save_soft_enabled = local_paca->soft_enabled;

View File

@ -292,10 +292,18 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
ret = mmu_hash_ops.hpte_insert(hpteg, vpn, paddr, tprot,
HPTE_V_BOLTED, psize, psize,
ssize);
if (ret == -1) {
/* Try to remove a non bolted entry */
ret = mmu_hash_ops.hpte_remove(hpteg);
if (ret != -1)
ret = mmu_hash_ops.hpte_insert(hpteg, vpn, paddr, tprot,
HPTE_V_BOLTED, psize, psize,
ssize);
}
if (ret < 0)
break;
cond_resched();
#ifdef CONFIG_DEBUG_PAGEALLOC
if (debug_pagealloc_enabled() &&
(paddr >> PAGE_SHIFT) < linear_map_hash_count)

View File

@ -425,6 +425,10 @@ static struct bus_type cmm_subsys = {
.dev_name = "cmm",
};
static void cmm_release_device(struct device *dev)
{
}
/**
* cmm_sysfs_register - Register with sysfs
*
@ -440,6 +444,7 @@ static int cmm_sysfs_register(struct device *dev)
dev->id = 0;
dev->bus = &cmm_subsys;
dev->release = cmm_release_device;
if ((rc = device_register(dev)))
goto subsys_unregister;

View File

@ -23,7 +23,7 @@ objdump="$1"
vmlinux="$2"
bad_relocs=$(
"$objdump" -R "$vmlinux" |
$objdump -R "$vmlinux" |
# Only look at relocation lines.
grep -E '\<R_' |
# These relocations are okay

View File

@ -18,14 +18,14 @@ vmlinux="$2"
#__end_interrupts should be located within the first 64K
end_intr=0x$(
"$objdump" -R "$vmlinux" -d --start-address=0xc000000000000000 \
$objdump -R "$vmlinux" -d --start-address=0xc000000000000000 \
--stop-address=0xc000000000010000 |
grep '\<__end_interrupts>:' |
awk '{print $1}'
)
BRANCHES=$(
"$objdump" -R "$vmlinux" -D --start-address=0xc000000000000000 \
$objdump -R "$vmlinux" -D --start-address=0xc000000000000000 \
--stop-address=${end_intr} |
grep -e "^c[0-9a-f]*:[[:space:]]*\([0-9a-f][0-9a-f][[:space:]]\)\{4\}[[:space:]]*b" |
grep -v '\<__start_initialization_multiplatform>' |

View File

@ -185,7 +185,7 @@ static int realloc_sampling_buffer(struct sf_buffer *sfb,
unsigned long num_sdb, gfp_t gfp_flags)
{
int i, rc;
unsigned long *new, *tail;
unsigned long *new, *tail, *tail_prev = NULL;
if (!sfb->sdbt || !sfb->tail)
return -EINVAL;
@ -224,6 +224,7 @@ static int realloc_sampling_buffer(struct sf_buffer *sfb,
sfb->num_sdbt++;
/* Link current page to tail of chain */
*tail = (unsigned long)(void *) new + 1;
tail_prev = tail;
tail = new;
}
@ -233,10 +234,22 @@ static int realloc_sampling_buffer(struct sf_buffer *sfb,
* issue, a new realloc call (if required) might succeed.
*/
rc = alloc_sample_data_block(tail, gfp_flags);
if (rc)
if (rc) {
/* Undo last SDBT. An SDBT with no SDB at its first
* entry but with an SDBT entry instead can not be
* handled by the interrupt handler code.
* Avoid this situation.
*/
if (tail_prev) {
sfb->num_sdbt--;
free_page((unsigned long) new);
tail = tail_prev;
}
break;
}
sfb->num_sdb++;
tail++;
tail_prev = new = NULL; /* Allocated at least one SBD */
}
/* Link sampling buffer to its origin */

View File

@ -802,8 +802,8 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
if (quirk_no_way_out)
quirk_no_way_out(i, m, regs);
m->bank = i;
if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
m->bank = i;
mce_read_aux(m, i);
*msg = tmp;
return 1;

View File

@ -995,6 +995,12 @@ static void cdrom_count_tracks(struct cdrom_device_info *cdi, tracktype *tracks)
tracks->xa = 0;
tracks->error = 0;
cd_dbg(CD_COUNT_TRACKS, "entering cdrom_count_tracks\n");
if (!CDROM_CAN(CDC_PLAY_AUDIO)) {
tracks->error = CDS_NO_INFO;
return;
}
/* Grab the TOC header so we can see how many tracks there are */
ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCHDR, &header);
if (ret) {
@ -1161,7 +1167,8 @@ int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev,
ret = open_for_data(cdi);
if (ret)
goto err;
cdrom_mmc3_profile(cdi);
if (CDROM_CAN(CDC_GENERIC_PACKET))
cdrom_mmc3_profile(cdi);
if (mode & FMODE_WRITE) {
ret = -EROFS;
if (cdrom_open_write(cdi))
@ -2878,6 +2885,9 @@ int cdrom_get_last_written(struct cdrom_device_info *cdi, long *last_written)
it doesn't give enough information or fails. then we return
the toc contents. */
use_toc:
if (!CDROM_CAN(CDC_PLAY_AUDIO))
return -ENOSYS;
toc.cdte_format = CDROM_MSF;
toc.cdte_track = CDROM_LEADOUT;
if ((ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCENTRY, &toc)))

View File

@ -462,6 +462,7 @@ struct dummy_clk {
};
static struct dummy_clk dummy_clks[] __initdata = {
DUMMY_CLK(NULL, "pxa27x-gpio", "osc_32_768khz"),
DUMMY_CLK(NULL, "pxa-rtc", "osc_32_768khz"),
DUMMY_CLK(NULL, "sa1100-rtc", "osc_32_768khz"),
DUMMY_CLK("UARTCLK", "pxa2xx-ir", "STUART"),
};

View File

@ -212,6 +212,8 @@ static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f,
p = clk_hw_get_parent_by_index(hw, index);
if (clk_flags & CLK_SET_RATE_PARENT) {
if (f->pre_div) {
if (!rate)
rate = req->rate;
rate /= 2;
rate *= f->pre_div + 1;
}

View File

@ -37,6 +37,9 @@ struct freq_tbl *qcom_find_freq(const struct freq_tbl *f, unsigned long rate)
if (!f)
return NULL;
if (!f->freq)
return f;
for (; f->freq; f++)
if (rate <= f->freq)
return f;

View File

@ -198,6 +198,10 @@ static int __init asm9260_timer_init(struct device_node *np)
}
clk = of_clk_get(np, 0);
if (IS_ERR(clk)) {
pr_err("Failed to get clk!\n");
return PTR_ERR(clk);
}
ret = clk_prepare_enable(clk);
if (ret) {

View File

@ -337,7 +337,8 @@ static int mpc8xxx_probe(struct platform_device *pdev)
* It's assumed that only a single type of gpio controller is available
* on the current machine, so overwriting global data is fine.
*/
mpc8xxx_irq_chip.irq_set_type = devtype->irq_set_type;
if (devtype->irq_set_type)
mpc8xxx_irq_chip.irq_set_type = devtype->irq_set_type;
if (devtype->gpio_dir_out)
gc->direction_output = devtype->gpio_dir_out;

View File

@ -760,6 +760,10 @@ static void hid_scan_feature_usage(struct hid_parser *parser, u32 usage)
if (usage == 0xff0000c5 && parser->global.report_count == 256 &&
parser->global.report_size == 8)
parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8;
if (usage == 0xff0000c6 && parser->global.report_count == 1 &&
parser->global.report_size == 8)
parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8;
}
static void hid_scan_collection(struct hid_parser *parser, unsigned type)

View File

@ -978,6 +978,9 @@ static int hidpp20_batterylevel_get_battery_capacity(struct hidpp_device *hidpp,
ret = hidpp_send_fap_command_sync(hidpp, feature_index,
CMD_BATTERY_LEVEL_STATUS_GET_BATTERY_LEVEL_STATUS,
NULL, 0, &response);
/* Ignore these intermittent errors */
if (ret == HIDPP_ERROR_RESOURCE_ERROR)
return -EIO;
if (ret > 0) {
hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n",
__func__, ret);

View File

@ -3257,6 +3257,8 @@ static int __maybe_unused mxt_suspend(struct device *dev)
mutex_unlock(&input_dev->mutex);
disable_irq(data->irq);
return 0;
}
@ -3269,6 +3271,8 @@ static int __maybe_unused mxt_resume(struct device *dev)
if (!input_dev)
return 0;
enable_irq(data->irq);
mutex_lock(&input_dev->mutex);
if (input_dev->users)

View File

@ -156,9 +156,9 @@ static bool smmu_dma_addr_valid(struct tegra_smmu *smmu, dma_addr_t addr)
return (addr & smmu->pfn_mask) == addr;
}
static dma_addr_t smmu_pde_to_dma(u32 pde)
static dma_addr_t smmu_pde_to_dma(struct tegra_smmu *smmu, u32 pde)
{
return pde << 12;
return (dma_addr_t)(pde & smmu->pfn_mask) << 12;
}
static void smmu_flush_ptc_all(struct tegra_smmu *smmu)
@ -543,6 +543,7 @@ static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
dma_addr_t *dmap)
{
unsigned int pd_index = iova_pd_index(iova);
struct tegra_smmu *smmu = as->smmu;
struct page *pt_page;
u32 *pd;
@ -551,7 +552,7 @@ static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
return NULL;
pd = page_address(as->pd);
*dmap = smmu_pde_to_dma(pd[pd_index]);
*dmap = smmu_pde_to_dma(smmu, pd[pd_index]);
return tegra_smmu_pte_offset(pt_page, iova);
}
@ -593,7 +594,7 @@ static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
} else {
u32 *pd = page_address(as->pd);
*dmap = smmu_pde_to_dma(pd[pde]);
*dmap = smmu_pde_to_dma(smmu, pd[pde]);
}
return tegra_smmu_pte_offset(as->pts[pde], iova);
@ -618,7 +619,7 @@ static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
if (--as->count[pde] == 0) {
struct tegra_smmu *smmu = as->smmu;
u32 *pd = page_address(as->pd);
dma_addr_t pte_dma = smmu_pde_to_dma(pd[pde]);
dma_addr_t pte_dma = smmu_pde_to_dma(smmu, pd[pde]);
tegra_smmu_set_pde(as, iova, 0);

View File

@ -284,6 +284,10 @@ static int __init bcm7038_l1_init_one(struct device_node *dn,
pr_err("failed to map parent interrupt %d\n", parent_irq);
return -EINVAL;
}
if (of_property_read_bool(dn, "brcm,irq-can-wake"))
enable_irq_wake(parent_irq);
irq_set_chained_handler_and_data(parent_irq, bcm7038_l1_irq_handle,
intc);

View File

@ -117,6 +117,14 @@ static int __init ingenic_intc_of_init(struct device_node *node,
goto out_unmap_irq;
}
domain = irq_domain_add_legacy(node, num_chips * 32,
JZ4740_IRQ_BASE, 0,
&irq_domain_simple_ops, NULL);
if (!domain) {
err = -ENOMEM;
goto out_unmap_base;
}
for (i = 0; i < num_chips; i++) {
/* Mask all irqs */
writel(0xffffffff, intc->base + (i * CHIP_SIZE) +
@ -143,14 +151,11 @@ static int __init ingenic_intc_of_init(struct device_node *node,
IRQ_NOPROBE | IRQ_LEVEL);
}
domain = irq_domain_add_legacy(node, num_chips * 32, JZ4740_IRQ_BASE, 0,
&irq_domain_simple_ops, NULL);
if (!domain)
pr_warn("unable to register IRQ domain\n");
setup_irq(parent_irq, &intc_cascade_action);
return 0;
out_unmap_base:
iounmap(intc->base);
out_unmap_irq:
irq_dispose_mapping(parent_irq);
out_free:

View File

@ -685,6 +685,8 @@ static unsigned long bch_mca_scan(struct shrinker *shrink,
* IO can always make forward progress:
*/
nr /= c->btree_pages;
if (nr == 0)
nr = 1;
nr = min_t(unsigned long, nr, mca_can_free(c));
i = 0;

View File

@ -2186,9 +2186,6 @@ static void bond_miimon_commit(struct bonding *bond)
} else if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
/* make it immediately active */
bond_set_active_slave(slave);
} else if (slave != primary) {
/* prevent it from being the active one */
bond_set_backup_slave(slave);
}
netdev_info(bond->dev, "link status definitely up for interface %s, %u Mbps %s duplex\n",

View File

@ -1196,8 +1196,8 @@ static int ena_io_poll(struct napi_struct *napi, int budget)
struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi);
struct ena_ring *tx_ring, *rx_ring;
u32 tx_work_done;
u32 rx_work_done;
int tx_work_done;
int rx_work_done = 0;
int tx_budget;
int napi_comp_call = 0;
int ret;
@ -1214,7 +1214,11 @@ static int ena_io_poll(struct napi_struct *napi, int budget)
}
tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget);
rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget);
/* On netpoll the budget is zero and the handler should only clean the
* tx completions.
*/
if (likely(budget))
rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget);
/* If the device is about to reset or down, avoid unmask
* the interrupt and return 0 so NAPI won't reschedule

View File

@ -37,6 +37,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netlink.h>
#include <linux/vmalloc.h>
#include <linux/xz.h>
#include "mlxfw_mfa2.h"
#include "mlxfw_mfa2_file.h"
@ -579,7 +580,7 @@ mlxfw_mfa2_file_component_get(const struct mlxfw_mfa2_file *mfa2_file,
comp_size = be32_to_cpu(comp->size);
comp_buf_size = comp_size + mlxfw_mfa2_comp_magic_len;
comp_data = kmalloc(sizeof(*comp_data) + comp_buf_size, GFP_KERNEL);
comp_data = vzalloc(sizeof(*comp_data) + comp_buf_size);
if (!comp_data)
return ERR_PTR(-ENOMEM);
comp_data->comp.data_size = comp_size;
@ -601,7 +602,7 @@ mlxfw_mfa2_file_component_get(const struct mlxfw_mfa2_file *mfa2_file,
comp_data->comp.data = comp_data->buff + mlxfw_mfa2_comp_magic_len;
return &comp_data->comp;
err_out:
kfree(comp_data);
vfree(comp_data);
return ERR_PTR(err);
}
@ -610,7 +611,7 @@ void mlxfw_mfa2_file_component_put(struct mlxfw_mfa2_component *comp)
const struct mlxfw_mfa2_comp_data *comp_data;
comp_data = container_of(comp, struct mlxfw_mfa2_comp_data, comp);
kfree(comp_data);
vfree(comp_data);
}
void mlxfw_mfa2_file_fini(struct mlxfw_mfa2_file *mfa2_file)

View File

@ -42,7 +42,6 @@ struct pdp_ctx {
struct hlist_node hlist_addr;
union {
u64 tid;
struct {
u64 tid;
u16 flow;
@ -545,7 +544,7 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
mtu = dst_mtu(&rt->dst);
}
rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu);
rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu, false);
if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) &&
mtu < ntohs(iph->tot_len)) {
@ -645,9 +644,16 @@ static void gtp_link_setup(struct net_device *dev)
}
static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize);
static void gtp_hashtable_free(struct gtp_dev *gtp);
static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[]);
static void gtp_destructor(struct net_device *dev)
{
struct gtp_dev *gtp = netdev_priv(dev);
kfree(gtp->addr_hash);
kfree(gtp->tid_hash);
}
static int gtp_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
@ -665,10 +671,13 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
if (err < 0)
return err;
if (!data[IFLA_GTP_PDP_HASHSIZE])
if (!data[IFLA_GTP_PDP_HASHSIZE]) {
hashsize = 1024;
else
} else {
hashsize = nla_get_u32(data[IFLA_GTP_PDP_HASHSIZE]);
if (!hashsize)
hashsize = 1024;
}
err = gtp_hashtable_new(gtp, hashsize);
if (err < 0)
@ -682,13 +691,15 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
gn = net_generic(dev_net(dev), gtp_net_id);
list_add_rcu(&gtp->list, &gn->gtp_dev_list);
dev->priv_destructor = gtp_destructor;
netdev_dbg(dev, "registered new GTP interface\n");
return 0;
out_hashtable:
gtp_hashtable_free(gtp);
kfree(gtp->addr_hash);
kfree(gtp->tid_hash);
out_encap:
gtp_encap_disable(gtp);
return err;
@ -697,9 +708,14 @@ out_encap:
static void gtp_dellink(struct net_device *dev, struct list_head *head)
{
struct gtp_dev *gtp = netdev_priv(dev);
struct pdp_ctx *pctx;
int i;
for (i = 0; i < gtp->hash_size; i++)
hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid)
pdp_context_delete(pctx);
gtp_encap_disable(gtp);
gtp_hashtable_free(gtp);
list_del_rcu(&gtp->list);
unregister_netdevice_queue(dev, head);
}
@ -775,20 +791,6 @@ err1:
return -ENOMEM;
}
static void gtp_hashtable_free(struct gtp_dev *gtp)
{
struct pdp_ctx *pctx;
int i;
for (i = 0; i < gtp->hash_size; i++)
hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid)
pdp_context_delete(pctx);
synchronize_rcu();
kfree(gtp->addr_hash);
kfree(gtp->tid_hash);
}
static struct sock *gtp_encap_enable_socket(int fd, int type,
struct gtp_dev *gtp)
{
@ -929,24 +931,31 @@ static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info)
}
}
static int ipv4_pdp_add(struct gtp_dev *gtp, struct sock *sk,
struct genl_info *info)
static int gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk,
struct genl_info *info)
{
struct pdp_ctx *pctx, *pctx_tid = NULL;
struct net_device *dev = gtp->dev;
u32 hash_ms, hash_tid = 0;
struct pdp_ctx *pctx;
unsigned int version;
bool found = false;
__be32 ms_addr;
ms_addr = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
hash_ms = ipv4_hashfn(ms_addr) % gtp->hash_size;
version = nla_get_u32(info->attrs[GTPA_VERSION]);
hlist_for_each_entry_rcu(pctx, &gtp->addr_hash[hash_ms], hlist_addr) {
if (pctx->ms_addr_ip4.s_addr == ms_addr) {
found = true;
break;
}
}
pctx = ipv4_pdp_find(gtp, ms_addr);
if (pctx)
found = true;
if (version == GTP_V0)
pctx_tid = gtp0_pdp_find(gtp,
nla_get_u64(info->attrs[GTPA_TID]));
else if (version == GTP_V1)
pctx_tid = gtp1_pdp_find(gtp,
nla_get_u32(info->attrs[GTPA_I_TEI]));
if (pctx_tid)
found = true;
if (found) {
if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
@ -954,6 +963,11 @@ static int ipv4_pdp_add(struct gtp_dev *gtp, struct sock *sk,
if (info->nlhdr->nlmsg_flags & NLM_F_REPLACE)
return -EOPNOTSUPP;
if (pctx && pctx_tid)
return -EEXIST;
if (!pctx)
pctx = pctx_tid;
ipv4_pdp_fill(pctx, info);
if (pctx->gtp_version == GTP_V0)
@ -1077,7 +1091,7 @@ static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
goto out_unlock;
}
err = ipv4_pdp_add(gtp, sk, info);
err = gtp_pdp_add(gtp, sk, info);
out_unlock:
rcu_read_unlock();
@ -1235,43 +1249,46 @@ static int gtp_genl_dump_pdp(struct sk_buff *skb,
struct netlink_callback *cb)
{
struct gtp_dev *last_gtp = (struct gtp_dev *)cb->args[2], *gtp;
int i, j, bucket = cb->args[0], skip = cb->args[1];
struct net *net = sock_net(skb->sk);
struct gtp_net *gn = net_generic(net, gtp_net_id);
unsigned long tid = cb->args[1];
int i, k = cb->args[0], ret;
struct pdp_ctx *pctx;
struct gtp_net *gn;
gn = net_generic(net, gtp_net_id);
if (cb->args[4])
return 0;
rcu_read_lock();
list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) {
if (last_gtp && last_gtp != gtp)
continue;
else
last_gtp = NULL;
for (i = k; i < gtp->hash_size; i++) {
hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid) {
if (tid && tid != pctx->u.tid)
continue;
else
tid = 0;
ret = gtp_genl_fill_info(skb,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
cb->nlh->nlmsg_type, pctx);
if (ret < 0) {
for (i = bucket; i < gtp->hash_size; i++) {
j = 0;
hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i],
hlist_tid) {
if (j >= skip &&
gtp_genl_fill_info(skb,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
cb->nlh->nlmsg_type, pctx)) {
cb->args[0] = i;
cb->args[1] = pctx->u.tid;
cb->args[1] = j;
cb->args[2] = (unsigned long)gtp;
goto out;
}
j++;
}
skip = 0;
}
bucket = 0;
}
cb->args[4] = 1;
out:
rcu_read_unlock();
return skb->len;
}

View File

@ -665,10 +665,10 @@ static void sixpack_close(struct tty_struct *tty)
{
struct sixpack *sp;
write_lock_bh(&disc_data_lock);
write_lock_irq(&disc_data_lock);
sp = tty->disc_data;
tty->disc_data = NULL;
write_unlock_bh(&disc_data_lock);
write_unlock_irq(&disc_data_lock);
if (!sp)
return;

View File

@ -783,10 +783,10 @@ static void mkiss_close(struct tty_struct *tty)
{
struct mkiss *ax;
write_lock_bh(&disc_data_lock);
write_lock_irq(&disc_data_lock);
ax = tty->disc_data;
tty->disc_data = NULL;
write_unlock_bh(&disc_data_lock);
write_unlock_irq(&disc_data_lock);
if (!ax)
return;

View File

@ -1259,11 +1259,11 @@ static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip,
ret = btt_data_read(arena, page, off, postmap, cur_len);
if (ret) {
int rc;
/* Media error - set the e_flag */
rc = btt_map_write(arena, premap, postmap, 0, 1,
NVDIMM_IO_ATOMIC);
if (btt_map_write(arena, premap, postmap, 0, 1, NVDIMM_IO_ATOMIC))
dev_warn_ratelimited(to_dev(arena),
"Error persistently tracking bad blocks at %#x\n",
premap);
goto out_rtt;
}

View File

@ -204,7 +204,6 @@ struct byt_gpio {
struct platform_device *pdev;
struct pinctrl_dev *pctl_dev;
struct pinctrl_desc pctl_desc;
raw_spinlock_t lock;
const struct byt_pinctrl_soc_data *soc_data;
struct byt_community *communities_copy;
struct byt_gpio_pin_context *saved_context;
@ -715,6 +714,8 @@ static const struct byt_pinctrl_soc_data *byt_soc_data[] = {
NULL,
};
static DEFINE_RAW_SPINLOCK(byt_lock);
static struct byt_community *byt_get_community(struct byt_gpio *vg,
unsigned int pin)
{
@ -856,7 +857,7 @@ static void byt_set_group_simple_mux(struct byt_gpio *vg,
unsigned long flags;
int i;
raw_spin_lock_irqsave(&vg->lock, flags);
raw_spin_lock_irqsave(&byt_lock, flags);
for (i = 0; i < group.npins; i++) {
void __iomem *padcfg0;
@ -876,7 +877,7 @@ static void byt_set_group_simple_mux(struct byt_gpio *vg,
writel(value, padcfg0);
}
raw_spin_unlock_irqrestore(&vg->lock, flags);
raw_spin_unlock_irqrestore(&byt_lock, flags);
}
static void byt_set_group_mixed_mux(struct byt_gpio *vg,
@ -886,7 +887,7 @@ static void byt_set_group_mixed_mux(struct byt_gpio *vg,
unsigned long flags;
int i;
raw_spin_lock_irqsave(&vg->lock, flags);
raw_spin_lock_irqsave(&byt_lock, flags);
for (i = 0; i < group.npins; i++) {
void __iomem *padcfg0;
@ -906,7 +907,7 @@ static void byt_set_group_mixed_mux(struct byt_gpio *vg,
writel(value, padcfg0);
}
raw_spin_unlock_irqrestore(&vg->lock, flags);
raw_spin_unlock_irqrestore(&byt_lock, flags);
}
static int byt_set_mux(struct pinctrl_dev *pctldev, unsigned int func_selector,
@ -955,11 +956,11 @@ static void byt_gpio_clear_triggering(struct byt_gpio *vg, unsigned int offset)
unsigned long flags;
u32 value;
raw_spin_lock_irqsave(&vg->lock, flags);
raw_spin_lock_irqsave(&byt_lock, flags);
value = readl(reg);
value &= ~(BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL);
writel(value, reg);
raw_spin_unlock_irqrestore(&vg->lock, flags);
raw_spin_unlock_irqrestore(&byt_lock, flags);
}
static int byt_gpio_request_enable(struct pinctrl_dev *pctl_dev,
@ -971,7 +972,7 @@ static int byt_gpio_request_enable(struct pinctrl_dev *pctl_dev,
u32 value, gpio_mux;
unsigned long flags;
raw_spin_lock_irqsave(&vg->lock, flags);
raw_spin_lock_irqsave(&byt_lock, flags);
/*
* In most cases, func pin mux 000 means GPIO function.
@ -993,7 +994,7 @@ static int byt_gpio_request_enable(struct pinctrl_dev *pctl_dev,
"pin %u forcibly re-configured as GPIO\n", offset);
}
raw_spin_unlock_irqrestore(&vg->lock, flags);
raw_spin_unlock_irqrestore(&byt_lock, flags);
pm_runtime_get(&vg->pdev->dev);
@ -1021,7 +1022,7 @@ static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev,
unsigned long flags;
u32 value;
raw_spin_lock_irqsave(&vg->lock, flags);
raw_spin_lock_irqsave(&byt_lock, flags);
value = readl(val_reg);
value &= ~BYT_DIR_MASK;
@ -1038,7 +1039,7 @@ static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev,
"Potential Error: Setting GPIO with direct_irq_en to output");
writel(value, val_reg);
raw_spin_unlock_irqrestore(&vg->lock, flags);
raw_spin_unlock_irqrestore(&byt_lock, flags);
return 0;
}
@ -1107,11 +1108,11 @@ static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset,
u32 conf, pull, val, debounce;
u16 arg = 0;
raw_spin_lock_irqsave(&vg->lock, flags);
raw_spin_lock_irqsave(&byt_lock, flags);
conf = readl(conf_reg);
pull = conf & BYT_PULL_ASSIGN_MASK;
val = readl(val_reg);
raw_spin_unlock_irqrestore(&vg->lock, flags);
raw_spin_unlock_irqrestore(&byt_lock, flags);
switch (param) {
case PIN_CONFIG_BIAS_DISABLE:
@ -1138,9 +1139,9 @@ static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset,
if (!(conf & BYT_DEBOUNCE_EN))
return -EINVAL;
raw_spin_lock_irqsave(&vg->lock, flags);
raw_spin_lock_irqsave(&byt_lock, flags);
debounce = readl(db_reg);
raw_spin_unlock_irqrestore(&vg->lock, flags);
raw_spin_unlock_irqrestore(&byt_lock, flags);
switch (debounce & BYT_DEBOUNCE_PULSE_MASK) {
case BYT_DEBOUNCE_PULSE_375US:
@ -1192,7 +1193,7 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
u32 conf, val, debounce;
int i, ret = 0;
raw_spin_lock_irqsave(&vg->lock, flags);
raw_spin_lock_irqsave(&byt_lock, flags);
conf = readl(conf_reg);
val = readl(val_reg);
@ -1300,7 +1301,7 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
if (!ret)
writel(conf, conf_reg);
raw_spin_unlock_irqrestore(&vg->lock, flags);
raw_spin_unlock_irqrestore(&byt_lock, flags);
return ret;
}
@ -1325,9 +1326,9 @@ static int byt_gpio_get(struct gpio_chip *chip, unsigned offset)
unsigned long flags;
u32 val;
raw_spin_lock_irqsave(&vg->lock, flags);
raw_spin_lock_irqsave(&byt_lock, flags);
val = readl(reg);
raw_spin_unlock_irqrestore(&vg->lock, flags);
raw_spin_unlock_irqrestore(&byt_lock, flags);
return !!(val & BYT_LEVEL);
}
@ -1342,13 +1343,13 @@ static void byt_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
if (!reg)
return;
raw_spin_lock_irqsave(&vg->lock, flags);
raw_spin_lock_irqsave(&byt_lock, flags);
old_val = readl(reg);
if (value)
writel(old_val | BYT_LEVEL, reg);
else
writel(old_val & ~BYT_LEVEL, reg);
raw_spin_unlock_irqrestore(&vg->lock, flags);
raw_spin_unlock_irqrestore(&byt_lock, flags);
}
static int byt_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
@ -1361,9 +1362,9 @@ static int byt_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
if (!reg)
return -EINVAL;
raw_spin_lock_irqsave(&vg->lock, flags);
raw_spin_lock_irqsave(&byt_lock, flags);
value = readl(reg);
raw_spin_unlock_irqrestore(&vg->lock, flags);
raw_spin_unlock_irqrestore(&byt_lock, flags);
if (!(value & BYT_OUTPUT_EN))
return GPIOF_DIR_OUT;
@ -1406,14 +1407,14 @@ static void byt_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
const char *label;
unsigned int pin;
raw_spin_lock_irqsave(&vg->lock, flags);
raw_spin_lock_irqsave(&byt_lock, flags);
pin = vg->soc_data->pins[i].number;
reg = byt_gpio_reg(vg, pin, BYT_CONF0_REG);
if (!reg) {
seq_printf(s,
"Could not retrieve pin %i conf0 reg\n",
pin);
raw_spin_unlock_irqrestore(&vg->lock, flags);
raw_spin_unlock_irqrestore(&byt_lock, flags);
continue;
}
conf0 = readl(reg);
@ -1422,11 +1423,11 @@ static void byt_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
if (!reg) {
seq_printf(s,
"Could not retrieve pin %i val reg\n", pin);
raw_spin_unlock_irqrestore(&vg->lock, flags);
raw_spin_unlock_irqrestore(&byt_lock, flags);
continue;
}
val = readl(reg);
raw_spin_unlock_irqrestore(&vg->lock, flags);
raw_spin_unlock_irqrestore(&byt_lock, flags);
comm = byt_get_community(vg, pin);
if (!comm) {
@ -1510,9 +1511,9 @@ static void byt_irq_ack(struct irq_data *d)
if (!reg)
return;
raw_spin_lock(&vg->lock);
raw_spin_lock(&byt_lock);
writel(BIT(offset % 32), reg);
raw_spin_unlock(&vg->lock);
raw_spin_unlock(&byt_lock);
}
static void byt_irq_mask(struct irq_data *d)
@ -1536,7 +1537,7 @@ static void byt_irq_unmask(struct irq_data *d)
if (!reg)
return;
raw_spin_lock_irqsave(&vg->lock, flags);
raw_spin_lock_irqsave(&byt_lock, flags);
value = readl(reg);
switch (irqd_get_trigger_type(d)) {
@ -1557,7 +1558,7 @@ static void byt_irq_unmask(struct irq_data *d)
writel(value, reg);
raw_spin_unlock_irqrestore(&vg->lock, flags);
raw_spin_unlock_irqrestore(&byt_lock, flags);
}
static int byt_irq_type(struct irq_data *d, unsigned int type)
@ -1571,7 +1572,7 @@ static int byt_irq_type(struct irq_data *d, unsigned int type)
if (!reg || offset >= vg->chip.ngpio)
return -EINVAL;
raw_spin_lock_irqsave(&vg->lock, flags);
raw_spin_lock_irqsave(&byt_lock, flags);
value = readl(reg);
WARN(value & BYT_DIRECT_IRQ_EN,
@ -1593,7 +1594,7 @@ static int byt_irq_type(struct irq_data *d, unsigned int type)
else if (type & IRQ_TYPE_LEVEL_MASK)
irq_set_handler_locked(d, handle_level_irq);
raw_spin_unlock_irqrestore(&vg->lock, flags);
raw_spin_unlock_irqrestore(&byt_lock, flags);
return 0;
}
@ -1629,9 +1630,9 @@ static void byt_gpio_irq_handler(struct irq_desc *desc)
continue;
}
raw_spin_lock(&vg->lock);
raw_spin_lock(&byt_lock);
pending = readl(reg);
raw_spin_unlock(&vg->lock);
raw_spin_unlock(&byt_lock);
for_each_set_bit(pin, &pending, 32) {
virq = irq_find_mapping(vg->chip.irqdomain, base + pin);
generic_handle_irq(virq);
@ -1833,8 +1834,6 @@ static int byt_pinctrl_probe(struct platform_device *pdev)
return PTR_ERR(vg->pctl_dev);
}
raw_spin_lock_init(&vg->lock);
ret = byt_gpio_probe(vg);
if (ret)
return ret;
@ -1850,8 +1849,11 @@ static int byt_gpio_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct byt_gpio *vg = platform_get_drvdata(pdev);
unsigned long flags;
int i;
raw_spin_lock_irqsave(&byt_lock, flags);
for (i = 0; i < vg->soc_data->npins; i++) {
void __iomem *reg;
u32 value;
@ -1872,6 +1874,7 @@ static int byt_gpio_suspend(struct device *dev)
vg->saved_context[i].val = value;
}
raw_spin_unlock_irqrestore(&byt_lock, flags);
return 0;
}
@ -1879,8 +1882,11 @@ static int byt_gpio_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct byt_gpio *vg = platform_get_drvdata(pdev);
unsigned long flags;
int i;
raw_spin_lock_irqsave(&byt_lock, flags);
for (i = 0; i < vg->soc_data->npins; i++) {
void __iomem *reg;
u32 value;
@ -1918,6 +1924,7 @@ static int byt_gpio_resume(struct device *dev)
}
}
raw_spin_unlock_irqrestore(&byt_lock, flags);
return 0;
}
#endif

View File

@ -175,9 +175,9 @@ static struct posix_clock_operations ptp_clock_ops = {
.read = ptp_read,
};
static void delete_ptp_clock(struct posix_clock *pc)
static void ptp_clock_release(struct device *dev)
{
struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
struct ptp_clock *ptp = container_of(dev, struct ptp_clock, dev);
mutex_destroy(&ptp->tsevq_mux);
mutex_destroy(&ptp->pincfg_mux);
@ -222,7 +222,6 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
}
ptp->clock.ops = ptp_clock_ops;
ptp->clock.release = delete_ptp_clock;
ptp->info = info;
ptp->devid = MKDEV(major, index);
ptp->index = index;
@ -249,15 +248,6 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
if (err)
goto no_pin_groups;
/* Create a new device in our class. */
ptp->dev = device_create_with_groups(ptp_class, parent, ptp->devid,
ptp, ptp->pin_attr_groups,
"ptp%d", ptp->index);
if (IS_ERR(ptp->dev)) {
err = PTR_ERR(ptp->dev);
goto no_device;
}
/* Register a new PPS source. */
if (info->pps) {
struct pps_source_info pps;
@ -273,8 +263,18 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
}
}
/* Create a posix clock. */
err = posix_clock_register(&ptp->clock, ptp->devid);
/* Initialize a new device of our class in our clock structure. */
device_initialize(&ptp->dev);
ptp->dev.devt = ptp->devid;
ptp->dev.class = ptp_class;
ptp->dev.parent = parent;
ptp->dev.groups = ptp->pin_attr_groups;
ptp->dev.release = ptp_clock_release;
dev_set_drvdata(&ptp->dev, ptp);
dev_set_name(&ptp->dev, "ptp%d", ptp->index);
/* Create a posix clock and link it to the device. */
err = posix_clock_register(&ptp->clock, &ptp->dev);
if (err) {
pr_err("failed to create posix clock\n");
goto no_clock;
@ -286,8 +286,6 @@ no_clock:
if (ptp->pps_source)
pps_unregister_source(ptp->pps_source);
no_pps:
device_destroy(ptp_class, ptp->devid);
no_device:
ptp_cleanup_pin_groups(ptp);
no_pin_groups:
if (ptp->kworker)
@ -317,7 +315,6 @@ int ptp_clock_unregister(struct ptp_clock *ptp)
if (ptp->pps_source)
pps_unregister_source(ptp->pps_source);
device_destroy(ptp_class, ptp->devid);
ptp_cleanup_pin_groups(ptp);
posix_clock_unregister(&ptp->clock);

View File

@ -41,7 +41,7 @@ struct timestamp_event_queue {
struct ptp_clock {
struct posix_clock clock;
struct device *dev;
struct device dev;
struct ptp_clock_info *info;
dev_t devid;
int index; /* index into clocks.map */

View File

@ -75,6 +75,7 @@ struct error_hdr {
#define REP82_ERROR_EVEN_MOD_IN_OPND 0x85
#define REP82_ERROR_RESERVED_FIELD 0x88
#define REP82_ERROR_INVALID_DOMAIN_PENDING 0x8A
#define REP82_ERROR_FILTERED_BY_HYPERVISOR 0x8B
#define REP82_ERROR_TRANSPORT_FAIL 0x90
#define REP82_ERROR_PACKET_TRUNCATED 0xA0
#define REP82_ERROR_ZERO_BUFFER_LEN 0xB0
@ -105,6 +106,7 @@ static inline int convert_error(struct zcrypt_queue *zq,
case REP82_ERROR_INVALID_DOMAIN_PRECHECK:
case REP82_ERROR_INVALID_DOMAIN_PENDING:
case REP82_ERROR_INVALID_SPECIAL_CMD:
case REP82_ERROR_FILTERED_BY_HYPERVISOR:
// REP88_ERROR_INVALID_KEY // '82' CEX2A
// REP88_ERROR_OPERAND // '84' CEX2A
// REP88_ERROR_OPERAND_EVEN_MOD // '85' CEX2A

View File

@ -129,6 +129,9 @@
#define NCR5380_release_dma_irq(x)
#endif
static unsigned int disconnect_mask = ~0;
module_param(disconnect_mask, int, 0444);
static int do_abort(struct Scsi_Host *);
static void do_reset(struct Scsi_Host *);
static void bus_reset_cleanup(struct Scsi_Host *);
@ -946,7 +949,8 @@ static bool NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
int err;
bool ret = true;
bool can_disconnect = instance->irq != NO_IRQ &&
cmd->cmnd[0] != REQUEST_SENSE;
cmd->cmnd[0] != REQUEST_SENSE &&
(disconnect_mask & BIT(scmd_id(cmd)));
NCR5380_dprint(NDEBUG_ARBITRATION, instance);
dsprintk(NDEBUG_ARBITRATION, instance, "starting arbitration, id = %d\n",

View File

@ -742,7 +742,7 @@ static int __init atari_scsi_probe(struct platform_device *pdev)
atari_scsi_template.sg_tablesize = SG_ALL;
} else {
atari_scsi_template.can_queue = 1;
atari_scsi_template.sg_tablesize = SG_NONE;
atari_scsi_template.sg_tablesize = 1;
}
if (setup_can_queue > 0)
@ -751,8 +751,8 @@ static int __init atari_scsi_probe(struct platform_device *pdev)
if (setup_cmd_per_lun > 0)
atari_scsi_template.cmd_per_lun = setup_cmd_per_lun;
/* Leave sg_tablesize at 0 on a Falcon! */
if (ATARIHW_PRESENT(TT_SCSI) && setup_sg_tablesize >= 0)
/* Don't increase sg_tablesize on Falcon! */
if (ATARIHW_PRESENT(TT_SCSI) && setup_sg_tablesize > 0)
atari_scsi_template.sg_tablesize = setup_sg_tablesize;
if (setup_hostid >= 0) {

View File

@ -301,6 +301,7 @@ csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
struct fc_fdmi_port_name *port_name;
uint8_t buf[64];
uint8_t *fc4_type;
unsigned long flags;
if (fdmi_req->wr_status != FW_SUCCESS) {
csio_ln_dbg(ln, "WR error:%x in processing fdmi rhba cmd\n",
@ -377,13 +378,13 @@ csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
len = (uint32_t)(pld - (uint8_t *)cmd);
/* Submit FDMI RPA request */
spin_lock_irq(&hw->lock);
spin_lock_irqsave(&hw->lock, flags);
if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_done,
FCOE_CT, &fdmi_req->dma_buf, len)) {
CSIO_INC_STATS(ln, n_fdmi_err);
csio_ln_dbg(ln, "Failed to issue fdmi rpa req\n");
}
spin_unlock_irq(&hw->lock);
spin_unlock_irqrestore(&hw->lock, flags);
}
/*
@ -404,6 +405,7 @@ csio_ln_fdmi_dprt_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
struct fc_fdmi_rpl *reg_pl;
struct fs_fdmi_attrs *attrib_blk;
uint8_t buf[64];
unsigned long flags;
if (fdmi_req->wr_status != FW_SUCCESS) {
csio_ln_dbg(ln, "WR error:%x in processing fdmi dprt cmd\n",
@ -483,13 +485,13 @@ csio_ln_fdmi_dprt_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
attrib_blk->numattrs = htonl(numattrs);
/* Submit FDMI RHBA request */
spin_lock_irq(&hw->lock);
spin_lock_irqsave(&hw->lock, flags);
if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_rhba_cbfn,
FCOE_CT, &fdmi_req->dma_buf, len)) {
CSIO_INC_STATS(ln, n_fdmi_err);
csio_ln_dbg(ln, "Failed to issue fdmi rhba req\n");
}
spin_unlock_irq(&hw->lock);
spin_unlock_irqrestore(&hw->lock, flags);
}
/*
@ -504,6 +506,7 @@ csio_ln_fdmi_dhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
void *cmd;
struct fc_fdmi_port_name *port_name;
uint32_t len;
unsigned long flags;
if (fdmi_req->wr_status != FW_SUCCESS) {
csio_ln_dbg(ln, "WR error:%x in processing fdmi dhba cmd\n",
@ -534,13 +537,13 @@ csio_ln_fdmi_dhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
len += sizeof(*port_name);
/* Submit FDMI request */
spin_lock_irq(&hw->lock);
spin_lock_irqsave(&hw->lock, flags);
if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_dprt_cbfn,
FCOE_CT, &fdmi_req->dma_buf, len)) {
CSIO_INC_STATS(ln, n_fdmi_err);
csio_ln_dbg(ln, "Failed to issue fdmi dprt req\n");
}
spin_unlock_irq(&hw->lock);
spin_unlock_irqrestore(&hw->lock, flags);
}
/**

View File

@ -372,8 +372,16 @@ static int iscsi_sw_tcp_pdu_xmit(struct iscsi_task *task)
{
struct iscsi_conn *conn = task->conn;
unsigned int noreclaim_flag;
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
int rc = 0;
if (!tcp_sw_conn->sock) {
iscsi_conn_printk(KERN_ERR, conn,
"Transport not bound to socket!\n");
return -EINVAL;
}
noreclaim_flag = memalloc_noreclaim_save();
while (iscsi_sw_tcp_xmit_qlen(conn)) {

View File

@ -4102,7 +4102,7 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
mempool_free(mbox, phba->mbox_mem_pool);
}
out:
if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
if (ndlp && NLP_CHK_NODE_ACT(ndlp) && shost) {
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI);
spin_unlock_irq(shost->host_lock);

View File

@ -5220,9 +5220,14 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
/* If we've already received a PLOGI from this NPort
* we don't need to try to discover it again.
*/
if (ndlp->nlp_flag & NLP_RCV_PLOGI)
if (ndlp->nlp_flag & NLP_RCV_PLOGI &&
!(ndlp->nlp_type &
(NLP_FCP_TARGET | NLP_NVME_TARGET)))
return NULL;
ndlp->nlp_prev_state = ndlp->nlp_state;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
spin_unlock_irq(shost->host_lock);

View File

@ -483,8 +483,10 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
* single discovery thread, this will cause a huge delay in
* discovery. Also this will cause multiple state machines
* running in parallel for this node.
* This only applies to a fabric environment.
*/
if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) {
if ((ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) &&
(vport->fc_flag & FC_FABRIC)) {
/* software abort outstanding PLOGI */
lpfc_els_abort(phba, ndlp);
}

View File

@ -12689,13 +12689,19 @@ send_current_mbox:
phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
/* Setting active mailbox pointer need to be in sync to flag clear */
phba->sli.mbox_active = NULL;
if (bf_get(lpfc_trailer_consumed, mcqe))
lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
spin_unlock_irqrestore(&phba->hbalock, iflags);
/* Wake up worker thread to post the next pending mailbox command */
lpfc_worker_wake_up(phba);
return workposted;
out_no_mqe_complete:
spin_lock_irqsave(&phba->hbalock, iflags);
if (bf_get(lpfc_trailer_consumed, mcqe))
lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
return workposted;
spin_unlock_irqrestore(&phba->hbalock, iflags);
return false;
}
/**
@ -17486,6 +17492,13 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
static void
__lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
{
/*
* if the rpi value indicates a prior unreg has already
* been done, skip the unreg.
*/
if (rpi == LPFC_RPI_ALLOC_ERROR)
return;
if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
phba->sli4_hba.rpi_count--;
phba->sli4_hba.max_cfg_param.rpi_used--;

View File

@ -429,7 +429,7 @@ static int __init mac_scsi_probe(struct platform_device *pdev)
mac_scsi_template.can_queue = setup_can_queue;
if (setup_cmd_per_lun > 0)
mac_scsi_template.cmd_per_lun = setup_cmd_per_lun;
if (setup_sg_tablesize >= 0)
if (setup_sg_tablesize > 0)
mac_scsi_template.sg_tablesize = setup_sg_tablesize;
if (setup_hostid >= 0)
mac_scsi_template.this_id = setup_hostid & 7;

View File

@ -1502,7 +1502,8 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
" for diag buffers, requested size(%d)\n",
ioc->name, __func__, request_data_sz);
mpt3sas_base_free_smid(ioc, smid);
return -ENOMEM;
rc = -ENOMEM;
goto out;
}
ioc->diag_buffer[buffer_type] = request_data;
ioc->diag_buffer_sz[buffer_type] = request_data_sz;

View File

@ -2368,6 +2368,8 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_printk("task 0x%p done with io_status 0x%x"
" resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, status, ts->resp, ts->stat));
if (t->slow_task)
complete(&t->slow_task->completion);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);

View File

@ -4960,6 +4960,11 @@ static int __init scsi_debug_init(void)
return -EINVAL;
}
if (sdebug_num_tgts < 0) {
pr_err("num_tgts must be >= 0\n");
return -EINVAL;
}
if (sdebug_guard > 1) {
pr_err("guard must be 0 or 1\n");
return -EINVAL;

View File

@ -30,15 +30,18 @@ static const char *
scsi_trace_rw6(struct trace_seq *p, unsigned char *cdb, int len)
{
const char *ret = trace_seq_buffer_ptr(p);
sector_t lba = 0, txlen = 0;
u32 lba = 0, txlen;
lba |= ((cdb[1] & 0x1F) << 16);
lba |= (cdb[2] << 8);
lba |= cdb[3];
txlen = cdb[4];
/*
* From SBC-2: a TRANSFER LENGTH field set to zero specifies that 256
* logical blocks shall be read (READ(6)) or written (WRITE(6)).
*/
txlen = cdb[4] ? cdb[4] : 256;
trace_seq_printf(p, "lba=%llu txlen=%llu",
(unsigned long long)lba, (unsigned long long)txlen);
trace_seq_printf(p, "lba=%u txlen=%u", lba, txlen);
trace_seq_putc(p, 0);
return ret;

View File

@ -498,7 +498,7 @@ static struct scsi_host_template sun3_scsi_template = {
.eh_host_reset_handler = sun3scsi_host_reset,
.can_queue = 16,
.this_id = 7,
.sg_tablesize = SG_NONE,
.sg_tablesize = 1,
.cmd_per_lun = 2,
.use_clustering = DISABLE_CLUSTERING,
.cmd_size = NCR5380_CMD_SIZE,
@ -520,7 +520,7 @@ static int __init sun3_scsi_probe(struct platform_device *pdev)
sun3_scsi_template.can_queue = setup_can_queue;
if (setup_cmd_per_lun > 0)
sun3_scsi_template.cmd_per_lun = setup_cmd_per_lun;
if (setup_sg_tablesize >= 0)
if (setup_sg_tablesize > 0)
sun3_scsi_template.sg_tablesize = setup_sg_tablesize;
if (setup_hostid >= 0)
sun3_scsi_template.this_id = setup_hostid & 7;

View File

@ -2867,10 +2867,10 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba,
goto out_unlock;
}
hba->dev_cmd.query.descriptor = NULL;
*buf_len = be16_to_cpu(response->upiu_res.length);
out_unlock:
hba->dev_cmd.query.descriptor = NULL;
mutex_unlock(&hba->dev_cmd.lock);
out:
ufshcd_release(hba);
@ -3684,15 +3684,24 @@ static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
if (ret) {
int err;
dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
__func__, ret);
/*
* If link recovery fails then return error so that caller
* don't retry the hibern8 enter again.
* If link recovery fails then return error code returned from
* ufshcd_link_recovery().
* If link recovery succeeds then return -EAGAIN to attempt
* hibern8 enter retry again.
*/
if (ufshcd_link_recovery(hba))
ret = -ENOLINK;
err = ufshcd_link_recovery(hba);
if (err) {
dev_err(hba->dev, "%s: link recovery failed", __func__);
ret = err;
} else {
ret = -EAGAIN;
}
} else
ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
POST_CHANGE);
@ -3706,7 +3715,7 @@ static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
ret = __ufshcd_uic_hibern8_enter(hba);
if (!ret || ret == -ENOLINK)
if (!ret)
goto out;
}
out:

View File

@ -832,9 +832,9 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
if (ret)
goto err;
irq = irq_of_parse_and_map(np, 0);
if (!irq) {
ret = -EINVAL;
irq = platform_get_irq(ofdev, 0);
if (irq < 0) {
ret = irq;
goto err;
}
@ -847,7 +847,6 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
return 0;
err:
irq_dispose_mapping(irq);
if (type == TYPE_FSL)
of_fsl_spi_free_chipselects(dev);
return ret;

View File

@ -1158,7 +1158,9 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
hdr->cmdsn, be32_to_cpu(hdr->data_length), payload_length,
conn->cid);
target_get_sess_cmd(&cmd->se_cmd, true);
if (target_get_sess_cmd(&cmd->se_cmd, true) < 0)
return iscsit_add_reject_cmd(cmd,
ISCSI_REASON_WAITING_FOR_LOGOUT, buf);
cmd->sense_reason = transport_lookup_cmd_lun(&cmd->se_cmd,
scsilun_to_int(&hdr->lun));
@ -2004,7 +2006,9 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
conn->sess->se_sess, 0, DMA_NONE,
TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
target_get_sess_cmd(&cmd->se_cmd, true);
if (target_get_sess_cmd(&cmd->se_cmd, true) < 0)
return iscsit_add_reject_cmd(cmd,
ISCSI_REASON_WAITING_FOR_LOGOUT, buf);
/*
* TASK_REASSIGN for ERL=2 / connection stays inside of
@ -4236,6 +4240,8 @@ int iscsit_close_connection(
* must wait until they have completed.
*/
iscsit_check_conn_usage_count(conn);
target_sess_cmd_list_set_waiting(sess->se_sess);
target_wait_for_sess_cmds(sess->se_sess);
ahash_request_free(conn->conn_tx_hash);
if (conn->conn_rx_hash) {

View File

@ -78,7 +78,7 @@ static int chap_check_algorithm(const char *a_str)
if (!token)
goto out;
if (!strncmp(token, "5", 1)) {
if (!strcmp(token, "5")) {
pr_debug("Selected MD5 Algorithm\n");
kfree(orig);
return CHAP_DIGEST_MD5;

View File

@ -2183,27 +2183,6 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
mode |= ATMEL_US_USMODE_NORMAL;
}
/* set the mode, clock divisor, parity, stop bits and data size */
atmel_uart_writel(port, ATMEL_US_MR, mode);
/*
* when switching the mode, set the RTS line state according to the
* new mode, otherwise keep the former state
*/
if ((old_mode & ATMEL_US_USMODE) != (mode & ATMEL_US_USMODE)) {
unsigned int rts_state;
if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) {
/* let the hardware control the RTS line */
rts_state = ATMEL_US_RTSDIS;
} else {
/* force RTS line to low level */
rts_state = ATMEL_US_RTSEN;
}
atmel_uart_writel(port, ATMEL_US_CR, rts_state);
}
/*
* Set the baud rate:
* Fractional baudrate allows to setup output frequency more
@ -2229,6 +2208,28 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
quot = cd | fp << ATMEL_US_FP_OFFSET;
atmel_uart_writel(port, ATMEL_US_BRGR, quot);
/* set the mode, clock divisor, parity, stop bits and data size */
atmel_uart_writel(port, ATMEL_US_MR, mode);
/*
* when switching the mode, set the RTS line state according to the
* new mode, otherwise keep the former state
*/
if ((old_mode & ATMEL_US_USMODE) != (mode & ATMEL_US_USMODE)) {
unsigned int rts_state;
if ((mode & ATMEL_US_USMODE) == ATMEL_US_USMODE_HWHS) {
/* let the hardware control the RTS line */
rts_state = ATMEL_US_RTSDIS;
} else {
/* force RTS line to low level */
rts_state = ATMEL_US_RTSEN;
}
atmel_uart_writel(port, ATMEL_US_CR, rts_state);
}
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA | ATMEL_US_RSTRX);
atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN | ATMEL_US_RXEN);

View File

@ -436,7 +436,9 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
virtio_transport_deliver_tap_pkt(pkt);
/* Only accept correctly addressed packets */
if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid)
if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid &&
le64_to_cpu(pkt->hdr.dst_cid) ==
vhost_transport_get_local_cid())
virtio_transport_recv_pkt(pkt);
else
virtio_transport_free_pkt(pkt);

View File

@ -3823,7 +3823,13 @@ static ssize_t ext4_direct_IO_read(struct kiocb *iocb, struct iov_iter *iter)
* writes & truncates and since we take care of writing back page cache,
* we are protected against page writeback as well.
*/
inode_lock_shared(inode);
if (iocb->ki_flags & IOCB_NOWAIT) {
if (!inode_trylock_shared(inode))
return -EAGAIN;
} else {
inode_lock_shared(inode);
}
ret = filemap_write_and_wait_range(mapping, iocb->ki_pos,
iocb->ki_pos + count - 1);
if (ret)

View File

@ -726,7 +726,6 @@ start_journal_io:
submit_bh(REQ_OP_WRITE, REQ_SYNC, bh);
}
cond_resched();
stats.run.rs_blocks_logged += bufs;
/* Force a new descriptor to be generated next
time round the loop. */
@ -813,6 +812,7 @@ start_journal_io:
if (unlikely(!buffer_uptodate(bh)))
err = -EIO;
jbd2_unfile_log_bh(bh);
stats.run.rs_blocks_logged++;
/*
* The list contains temporary buffer heads created by
@ -858,6 +858,7 @@ start_journal_io:
BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
clear_buffer_jwrite(bh);
jbd2_unfile_log_bh(bh);
stats.run.rs_blocks_logged++;
__brelse(bh); /* One for getblk */
/* AKPM: bforget here */
}
@ -879,6 +880,7 @@ start_journal_io:
}
if (cbh)
err = journal_wait_on_commit_record(journal, cbh);
stats.run.rs_blocks_logged++;
if (jbd2_has_feature_async_commit(journal) &&
journal->j_flags & JBD2_BARRIER) {
blkdev_issue_flush(journal->j_dev, GFP_NOFS, NULL);

View File

@ -335,8 +335,8 @@ int ocfs2_acl_chmod(struct inode *inode, struct buffer_head *bh)
down_read(&OCFS2_I(inode)->ip_xattr_sem);
acl = ocfs2_get_acl_nolock(inode, ACL_TYPE_ACCESS, bh);
up_read(&OCFS2_I(inode)->ip_xattr_sem);
if (IS_ERR(acl) || !acl)
return PTR_ERR(acl);
if (IS_ERR_OR_NULL(acl))
return PTR_ERR_OR_ZERO(acl);
ret = __posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode);
if (ret)
return ret;

View File

@ -2849,68 +2849,73 @@ EXPORT_SYMBOL(dquot_quotactl_sysfile_ops);
static int do_proc_dqstats(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
unsigned int type = (int *)table->data - dqstats.stat;
unsigned int type = (unsigned long *)table->data - dqstats.stat;
s64 value = percpu_counter_sum(&dqstats.counter[type]);
/* Filter negative values for non-monotonic counters */
if (value < 0 && (type == DQST_ALLOC_DQUOTS ||
type == DQST_FREE_DQUOTS))
value = 0;
/* Update global table */
dqstats.stat[type] =
percpu_counter_sum_positive(&dqstats.counter[type]);
return proc_dointvec(table, write, buffer, lenp, ppos);
dqstats.stat[type] = value;
return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
}
static struct ctl_table fs_dqstats_table[] = {
{
.procname = "lookups",
.data = &dqstats.stat[DQST_LOOKUPS],
.maxlen = sizeof(int),
.maxlen = sizeof(unsigned long),
.mode = 0444,
.proc_handler = do_proc_dqstats,
},
{
.procname = "drops",
.data = &dqstats.stat[DQST_DROPS],
.maxlen = sizeof(int),
.maxlen = sizeof(unsigned long),
.mode = 0444,
.proc_handler = do_proc_dqstats,
},
{
.procname = "reads",
.data = &dqstats.stat[DQST_READS],
.maxlen = sizeof(int),
.maxlen = sizeof(unsigned long),
.mode = 0444,
.proc_handler = do_proc_dqstats,
},
{
.procname = "writes",
.data = &dqstats.stat[DQST_WRITES],
.maxlen = sizeof(int),
.maxlen = sizeof(unsigned long),
.mode = 0444,
.proc_handler = do_proc_dqstats,
},
{
.procname = "cache_hits",
.data = &dqstats.stat[DQST_CACHE_HITS],
.maxlen = sizeof(int),
.maxlen = sizeof(unsigned long),
.mode = 0444,
.proc_handler = do_proc_dqstats,
},
{
.procname = "allocated_dquots",
.data = &dqstats.stat[DQST_ALLOC_DQUOTS],
.maxlen = sizeof(int),
.maxlen = sizeof(unsigned long),
.mode = 0444,
.proc_handler = do_proc_dqstats,
},
{
.procname = "free_dquots",
.data = &dqstats.stat[DQST_FREE_DQUOTS],
.maxlen = sizeof(int),
.maxlen = sizeof(unsigned long),
.mode = 0444,
.proc_handler = do_proc_dqstats,
},
{
.procname = "syncs",
.data = &dqstats.stat[DQST_SYNCS],
.maxlen = sizeof(int),
.maxlen = sizeof(unsigned long),
.mode = 0444,
.proc_handler = do_proc_dqstats,
},

View File

@ -65,6 +65,40 @@ out:
}
EXPORT_SYMBOL(iterate_dir);
/*
* POSIX says that a dirent name cannot contain NULL or a '/'.
*
* It's not 100% clear what we should really do in this case.
* The filesystem is clearly corrupted, but returning a hard
* error means that you now don't see any of the other names
* either, so that isn't a perfect alternative.
*
* And if you return an error, what error do you use? Several
* filesystems seem to have decided on EUCLEAN being the error
* code for EFSCORRUPTED, and that may be the error to use. Or
* just EIO, which is perhaps more obvious to users.
*
* In order to see the other file names in the directory, the
* caller might want to make this a "soft" error: skip the
* entry, and return the error at the end instead.
*
* Note that this should likely do a "memchr(name, 0, len)"
* check too, since that would be filesystem corruption as
* well. However, that case can't actually confuse user space,
* which has to do a strlen() on the name anyway to find the
* filename length, and the above "soft error" worry means
* that it's probably better left alone until we have that
* issue clarified.
*/
static int verify_dirent_name(const char *name, int len)
{
if (!len)
return -EIO;
if (memchr(name, '/', len))
return -EIO;
return 0;
}
/*
* Traditional linux readdir() handling..
*
@ -174,6 +208,9 @@ static int filldir(struct dir_context *ctx, const char *name, int namlen,
int reclen = ALIGN(offsetof(struct linux_dirent, d_name) + namlen + 2,
sizeof(long));
buf->error = verify_dirent_name(name, namlen);
if (unlikely(buf->error))
return buf->error;
buf->error = -EINVAL; /* only used if we fail.. */
if (reclen > buf->count)
return -EINVAL;
@ -260,6 +297,9 @@ static int filldir64(struct dir_context *ctx, const char *name, int namlen,
int reclen = ALIGN(offsetof(struct linux_dirent64, d_name) + namlen + 1,
sizeof(u64));
buf->error = verify_dirent_name(name, namlen);
if (unlikely(buf->error))
return buf->error;
buf->error = -EINVAL; /* only used if we fail.. */
if (reclen > buf->count)
return -EINVAL;

View File

@ -1812,13 +1812,12 @@ static int userfaultfd_api(struct userfaultfd_ctx *ctx,
if (copy_from_user(&uffdio_api, buf, sizeof(uffdio_api)))
goto out;
features = uffdio_api.features;
if (uffdio_api.api != UFFD_API || (features & ~UFFD_API_FEATURES)) {
memset(&uffdio_api, 0, sizeof(uffdio_api));
if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
goto out;
ret = -EINVAL;
goto out;
}
ret = -EINVAL;
if (uffdio_api.api != UFFD_API || (features & ~UFFD_API_FEATURES))
goto err_out;
ret = -EPERM;
if ((features & UFFD_FEATURE_EVENT_FORK) && !capable(CAP_SYS_PTRACE))
goto err_out;
/* report all available features and ioctls to userland */
uffdio_api.features = UFFD_API_FEATURES;
uffdio_api.ioctls = UFFD_API_IOCTLS;
@ -1831,6 +1830,11 @@ static int userfaultfd_api(struct userfaultfd_ctx *ctx,
ret = 0;
out:
return ret;
err_out:
memset(&uffdio_api, 0, sizeof(uffdio_api));
if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
ret = -EFAULT;
goto out;
}
static long userfaultfd_ioctl(struct file *file, unsigned cmd,

View File

@ -409,12 +409,18 @@ extern u64 hrtimer_next_event_without(const struct hrtimer *exclude);
extern bool hrtimer_active(const struct hrtimer *timer);
/*
* Helper function to check, whether the timer is on one of the queues
/**
* hrtimer_is_queued = check, whether the timer is on one of the queues
* @timer: Timer to check
*
* Returns: True if the timer is queued, false otherwise
*
* The function can be used lockless, but it gives only a current snapshot.
*/
static inline int hrtimer_is_queued(struct hrtimer *timer)
static inline bool hrtimer_is_queued(struct hrtimer *timer)
{
return timer->state & HRTIMER_STATE_ENQUEUED;
/* The READ_ONCE pairs with the update functions of timer->state */
return !!(READ_ONCE(timer->state) & HRTIMER_STATE_ENQUEUED);
}
/*

View File

@ -7,6 +7,9 @@
#include <asm/byteorder.h>
#define INT32_MAX S32_MAX
#define UINT32_MAX U32_MAX
typedef __be16 fdt16_t;
typedef __be32 fdt32_t;
typedef __be64 fdt64_t;

View File

@ -82,29 +82,32 @@ struct posix_clock_operations {
*
* @ops: Functional interface to the clock
* @cdev: Character device instance for this clock
* @kref: Reference count.
* @dev: Pointer to the clock's device.
* @rwsem: Protects the 'zombie' field from concurrent access.
* @zombie: If 'zombie' is true, then the hardware has disappeared.
* @release: A function to free the structure when the reference count reaches
* zero. May be NULL if structure is statically allocated.
*
* Drivers should embed their struct posix_clock within a private
* structure, obtaining a reference to it during callbacks using
* container_of().
*
* Drivers should supply an initialized but not exposed struct device
* to posix_clock_register(). It is used to manage lifetime of the
* driver's private structure. It's 'release' field should be set to
* a release function for this private structure.
*/
struct posix_clock {
struct posix_clock_operations ops;
struct cdev cdev;
struct kref kref;
struct device *dev;
struct rw_semaphore rwsem;
bool zombie;
void (*release)(struct posix_clock *clk);
};
/**
* posix_clock_register() - register a new clock
* @clk: Pointer to the clock. Caller must provide 'ops' and 'release'
* @devid: Allocated device id
* @clk: Pointer to the clock. Caller must provide 'ops' field
* @dev: Pointer to the initialized device. Caller must provide
* 'release' field
*
* A clock driver calls this function to register itself with the
* clock device subsystem. If 'clk' points to dynamically allocated
@ -113,7 +116,7 @@ struct posix_clock {
*
* Returns zero on success, non-zero otherwise.
*/
int posix_clock_register(struct posix_clock *clk, dev_t devid);
int posix_clock_register(struct posix_clock *clk, struct device *dev);
/**
* posix_clock_unregister() - unregister a clock

View File

@ -263,7 +263,7 @@ enum {
};
struct dqstats {
int stat[_DQST_DQSTAT_LAST];
unsigned long stat[_DQST_DQSTAT_LAST];
struct percpu_counter counter[_DQST_DQSTAT_LAST];
};

View File

@ -100,6 +100,43 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
first->pprev = &n->next;
}
/**
* hlist_nulls_add_tail_rcu
* @n: the element to add to the hash list.
* @h: the list to add to.
*
* Description:
* Adds the specified element to the specified hlist_nulls,
* while permitting racing traversals.
*
* The caller must take whatever precautions are necessary
* (such as holding appropriate locks) to avoid racing
* with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
* or hlist_nulls_del_rcu(), running on this same list.
* However, it is perfectly legal to run concurrently with
* the _rcu list-traversal primitives, such as
* hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency
* problems on Alpha CPUs. Regardless of the type of CPU, the
* list-traversal primitive must be guarded by rcu_read_lock().
*/
static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
struct hlist_nulls_head *h)
{
struct hlist_nulls_node *i, *last = NULL;
/* Note: write side code, so rcu accessors are not needed. */
for (i = h->first; !is_a_nulls(i); i = i->next)
last = i;
if (last) {
n->next = last->next;
n->pprev = &last->next;
rcu_assign_pointer(hlist_next_rcu(last), n);
} else {
hlist_nulls_add_head_rcu(n, h);
}
}
/**
* hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type
* @tpos: the type * to use as a loop cursor.

View File

@ -1655,7 +1655,7 @@ static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
*/
static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
{
struct sk_buff *skb = list_->prev;
struct sk_buff *skb = READ_ONCE(list_->prev);
if (skb == (struct sk_buff *)list_)
skb = NULL;
@ -1723,7 +1723,9 @@ static inline void __skb_insert(struct sk_buff *newsk,
struct sk_buff *prev, struct sk_buff *next,
struct sk_buff_head *list)
{
/* see skb_queue_empty_lockless() for the opposite READ_ONCE() */
/* See skb_queue_empty_lockless() and skb_peek_tail()
* for the opposite READ_ONCE()
*/
WRITE_ONCE(newsk->next, next);
WRITE_ONCE(newsk->prev, prev);
WRITE_ONCE(next->prev, newsk);

View File

@ -542,7 +542,16 @@ static inline void skb_dst_update_pmtu(struct sk_buff *skb, u32 mtu)
struct dst_entry *dst = skb_dst(skb);
if (dst && dst->ops->update_pmtu)
dst->ops->update_pmtu(dst, NULL, skb, mtu);
dst->ops->update_pmtu(dst, NULL, skb, mtu, true);
}
/* update dst pmtu but not do neighbor confirm */
static inline void skb_dst_update_pmtu_no_confirm(struct sk_buff *skb, u32 mtu)
{
struct dst_entry *dst = skb_dst(skb);
if (dst && dst->ops->update_pmtu)
dst->ops->update_pmtu(dst, NULL, skb, mtu, false);
}
#endif /* _NET_DST_H */

View File

@ -27,7 +27,8 @@ struct dst_ops {
struct dst_entry * (*negative_advice)(struct dst_entry *);
void (*link_failure)(struct sk_buff *);
void (*update_pmtu)(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb, u32 mtu);
struct sk_buff *skb, u32 mtu,
bool confirm_neigh);
void (*redirect)(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb);
int (*local_out)(struct net *net, struct sock *sk, struct sk_buff *skb);

View File

@ -106,12 +106,18 @@ struct inet_bind_hashbucket {
struct hlist_head chain;
};
/*
* Sockets can be hashed in established or listening table
/* Sockets can be hashed in established or listening table.
* We must use different 'nulls' end-of-chain value for all hash buckets :
* A socket might transition from ESTABLISH to LISTEN state without
* RCU grace period. A lookup in ehash table needs to handle this case.
*/
#define LISTENING_NULLS_BASE (1U << 29)
struct inet_listen_hashbucket {
spinlock_t lock;
struct hlist_head head;
union {
struct hlist_head head;
struct hlist_nulls_head nulls_head;
};
};
/* This is for listening sockets, thus all sockets which possess wildcards. */

View File

@ -693,6 +693,11 @@ static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_h
hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
}
static inline void __sk_nulls_add_node_tail_rcu(struct sock *sk, struct hlist_nulls_head *list)
{
hlist_nulls_add_tail_rcu(&sk->sk_nulls_node, list);
}
static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
{
sock_hold(sk);

View File

@ -638,6 +638,7 @@ struct iscsi_reject {
#define ISCSI_REASON_BOOKMARK_INVALID 9
#define ISCSI_REASON_BOOKMARK_NO_RESOURCES 10
#define ISCSI_REASON_NEGOTIATION_RESET 11
#define ISCSI_REASON_WAITING_FOR_LOGOUT 12
/* Max. number of Key=Value pairs in a text message */
#define MAX_KEY_VALUE_PAIRS 8192

View File

@ -1442,7 +1442,7 @@ static struct ctl_table vm_table[] = {
.procname = "drop_caches",
.data = &sysctl_drop_caches,
.maxlen = sizeof(int),
.mode = 0644,
.mode = 0200,
.proc_handler = drop_caches_sysctl_handler,
.extra1 = &one,
.extra2 = &four,

View File

@ -861,7 +861,8 @@ static int enqueue_hrtimer(struct hrtimer *timer,
base->cpu_base->active_bases |= 1 << base->index;
timer->state = HRTIMER_STATE_ENQUEUED;
/* Pairs with the lockless read in hrtimer_is_queued() */
WRITE_ONCE(timer->state, HRTIMER_STATE_ENQUEUED);
return timerqueue_add(&base->active, &timer->node);
}
@ -883,7 +884,8 @@ static void __remove_hrtimer(struct hrtimer *timer,
struct hrtimer_cpu_base *cpu_base = base->cpu_base;
u8 state = timer->state;
timer->state = newstate;
/* Pairs with the lockless read in hrtimer_is_queued() */
WRITE_ONCE(timer->state, newstate);
if (!(state & HRTIMER_STATE_ENQUEUED))
return;
@ -910,8 +912,9 @@ static void __remove_hrtimer(struct hrtimer *timer,
static inline int
remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, bool restart)
{
if (hrtimer_is_queued(timer)) {
u8 state = timer->state;
u8 state = timer->state;
if (state & HRTIMER_STATE_ENQUEUED) {
int reprogram;
/*

View File

@ -27,8 +27,6 @@
#include "posix-timers.h"
static void delete_clock(struct kref *kref);
/*
* Returns NULL if the posix_clock instance attached to 'fp' is old and stale.
*/
@ -138,7 +136,7 @@ static int posix_clock_open(struct inode *inode, struct file *fp)
err = 0;
if (!err) {
kref_get(&clk->kref);
get_device(clk->dev);
fp->private_data = clk;
}
out:
@ -154,7 +152,7 @@ static int posix_clock_release(struct inode *inode, struct file *fp)
if (clk->ops.release)
err = clk->ops.release(clk);
kref_put(&clk->kref, delete_clock);
put_device(clk->dev);
fp->private_data = NULL;
@ -174,38 +172,35 @@ static const struct file_operations posix_clock_file_operations = {
#endif
};
int posix_clock_register(struct posix_clock *clk, dev_t devid)
int posix_clock_register(struct posix_clock *clk, struct device *dev)
{
int err;
kref_init(&clk->kref);
init_rwsem(&clk->rwsem);
cdev_init(&clk->cdev, &posix_clock_file_operations);
err = cdev_device_add(&clk->cdev, dev);
if (err) {
pr_err("%s unable to add device %d:%d\n",
dev_name(dev), MAJOR(dev->devt), MINOR(dev->devt));
return err;
}
clk->cdev.owner = clk->ops.owner;
err = cdev_add(&clk->cdev, devid, 1);
clk->dev = dev;
return err;
return 0;
}
EXPORT_SYMBOL_GPL(posix_clock_register);
static void delete_clock(struct kref *kref)
{
struct posix_clock *clk = container_of(kref, struct posix_clock, kref);
if (clk->release)
clk->release(clk);
}
void posix_clock_unregister(struct posix_clock *clk)
{
cdev_del(&clk->cdev);
cdev_device_del(&clk->cdev, clk->dev);
down_write(&clk->rwsem);
clk->zombie = true;
up_write(&clk->rwsem);
kref_put(&clk->kref, delete_clock);
put_device(clk->dev);
}
EXPORT_SYMBOL_GPL(posix_clock_unregister);

View File

@ -437,6 +437,7 @@ void debug_dma_dump_mappings(struct device *dev)
}
spin_unlock_irqrestore(&bucket->lock, flags);
cond_resched();
}
}
EXPORT_SYMBOL(debug_dma_dump_mappings);

View File

@ -643,6 +643,9 @@ static unsigned int br_nf_forward_arp(void *priv,
nf_bridge_pull_encap_header(skb);
}
if (unlikely(!pskb_may_pull(skb, sizeof(struct arphdr))))
return NF_DROP;
if (arp_hdr(skb)->ar_pln != 4) {
if (IS_VLAN_ARP(skb))
nf_bridge_push_encap_header(skb);

View File

@ -26,7 +26,8 @@
#endif
static void fake_update_pmtu(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb, u32 mtu)
struct sk_buff *skb, u32 mtu,
bool confirm_neigh)
{
}

View File

@ -1876,7 +1876,7 @@ static int ebt_buf_count(struct ebt_entries_buf_state *state, unsigned int sz)
}
static int ebt_buf_add(struct ebt_entries_buf_state *state,
void *data, unsigned int sz)
const void *data, unsigned int sz)
{
if (state->buf_kern_start == NULL)
goto count_only;
@ -1910,7 +1910,7 @@ enum compat_mwt {
EBT_COMPAT_TARGET,
};
static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
static int compat_mtw_from_user(const struct compat_ebt_entry_mwt *mwt,
enum compat_mwt compat_mwt,
struct ebt_entries_buf_state *state,
const unsigned char *base)
@ -1986,22 +1986,23 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
/* return size of all matches, watchers or target, including necessary
* alignment and padding.
*/
static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
static int ebt_size_mwt(const struct compat_ebt_entry_mwt *match32,
unsigned int size_left, enum compat_mwt type,
struct ebt_entries_buf_state *state, const void *base)
{
const char *buf = (const char *)match32;
int growth = 0;
char *buf;
if (size_left == 0)
return 0;
buf = (char *) match32;
while (size_left >= sizeof(*match32)) {
do {
struct ebt_entry_match *match_kern;
int ret;
if (size_left < sizeof(*match32))
return -EINVAL;
match_kern = (struct ebt_entry_match *) state->buf_kern_start;
if (match_kern) {
char *tmp;
@ -2038,22 +2039,18 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
if (match_kern)
match_kern->match_size = ret;
/* rule should have no remaining data after target */
if (type == EBT_COMPAT_TARGET && size_left)
return -EINVAL;
match32 = (struct compat_ebt_entry_mwt *) buf;
}
} while (size_left);
return growth;
}
/* called for all ebt_entry structures. */
static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
static int size_entry_mwt(const struct ebt_entry *entry, const unsigned char *base,
unsigned int *total,
struct ebt_entries_buf_state *state)
{
unsigned int i, j, startoff, new_offset = 0;
unsigned int i, j, startoff, next_expected_off, new_offset = 0;
/* stores match/watchers/targets & offset of next struct ebt_entry: */
unsigned int offsets[4];
unsigned int *offsets_update = NULL;
@ -2140,11 +2137,13 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
return ret;
}
startoff = state->buf_user_offset - startoff;
if (WARN_ON(*total < startoff))
next_expected_off = state->buf_user_offset - startoff;
if (next_expected_off != entry->next_offset)
return -EINVAL;
*total -= startoff;
if (*total < entry->next_offset)
return -EINVAL;
*total -= entry->next_offset;
return 0;
}

View File

@ -274,6 +274,7 @@ static int proc_dointvec_minmax_bpf_enable(struct ctl_table *table, int write,
return ret;
}
# ifdef CONFIG_HAVE_EBPF_JIT
static int
proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
@ -284,6 +285,7 @@ proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write,
return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
}
# endif /* CONFIG_HAVE_EBPF_JIT */
static int
proc_dolongvec_minmax_bpf_restricted(struct ctl_table *table, int write,

View File

@ -118,7 +118,8 @@ static void dn_dst_ifdown(struct dst_entry *, struct net_device *dev, int how);
static struct dst_entry *dn_dst_negative_advice(struct dst_entry *);
static void dn_dst_link_failure(struct sk_buff *);
static void dn_dst_update_pmtu(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb , u32 mtu);
struct sk_buff *skb , u32 mtu,
bool confirm_neigh);
static void dn_dst_redirect(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb);
static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst,
@ -259,7 +260,8 @@ static int dn_dst_gc(struct dst_ops *ops)
* advertise to the other end).
*/
static void dn_dst_update_pmtu(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb, u32 mtu)
struct sk_buff *skb, u32 mtu,
bool confirm_neigh)
{
struct dn_route *rt = (struct dn_route *) dst;
struct neighbour *n = rt->n;

View File

@ -254,10 +254,11 @@ bool icmp_global_allow(void)
bool rc = false;
/* Check if token bucket is empty and cannot be refilled
* without taking the spinlock.
* without taking the spinlock. The READ_ONCE() are paired
* with the following WRITE_ONCE() in this same function.
*/
if (!icmp_global.credit) {
delta = min_t(u32, now - icmp_global.stamp, HZ);
if (!READ_ONCE(icmp_global.credit)) {
delta = min_t(u32, now - READ_ONCE(icmp_global.stamp), HZ);
if (delta < HZ / 50)
return false;
}
@ -267,14 +268,14 @@ bool icmp_global_allow(void)
if (delta >= HZ / 50) {
incr = sysctl_icmp_msgs_per_sec * delta / HZ ;
if (incr)
icmp_global.stamp = now;
WRITE_ONCE(icmp_global.stamp, now);
}
credit = min_t(u32, icmp_global.credit + incr, sysctl_icmp_msgs_burst);
if (credit) {
credit--;
rc = true;
}
icmp_global.credit = credit;
WRITE_ONCE(icmp_global.credit, credit);
spin_unlock(&icmp_global.lock);
return rc;
}

View File

@ -1088,7 +1088,7 @@ struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu)
if (!dst)
goto out;
}
dst->ops->update_pmtu(dst, sk, NULL, mtu);
dst->ops->update_pmtu(dst, sk, NULL, mtu, true);
dst = __sk_dst_check(sk, 0);
if (!dst)

View File

@ -911,11 +911,12 @@ void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
for (i = s_i; i < INET_LHTABLE_SIZE; i++) {
struct inet_listen_hashbucket *ilb;
struct hlist_nulls_node *node;
num = 0;
ilb = &hashinfo->listening_hash[i];
spin_lock(&ilb->lock);
sk_for_each(sk, &ilb->head) {
sk_nulls_for_each(sk, node, &ilb->nulls_head) {
struct inet_sock *inet = inet_sk(sk);
if (!net_eq(sock_net(sk), net))

View File

@ -219,9 +219,10 @@ struct sock *__inet_lookup_listener(struct net *net,
int score, hiscore = 0, matches = 0, reuseport = 0;
bool exact_dif = inet_exact_dif_match(net, skb);
struct sock *sk, *result = NULL;
struct hlist_nulls_node *node;
u32 phash = 0;
sk_for_each_rcu(sk, &ilb->head) {
sk_nulls_for_each_rcu(sk, node, &ilb->nulls_head) {
score = compute_score(sk, net, hnum, daddr,
dif, sdif, exact_dif);
if (score > hiscore) {
@ -442,10 +443,11 @@ static int inet_reuseport_add_sock(struct sock *sk,
struct inet_listen_hashbucket *ilb)
{
struct inet_bind_bucket *tb = inet_csk(sk)->icsk_bind_hash;
const struct hlist_nulls_node *node;
struct sock *sk2;
kuid_t uid = sock_i_uid(sk);
sk_for_each_rcu(sk2, &ilb->head) {
sk_nulls_for_each_rcu(sk2, node, &ilb->nulls_head) {
if (sk2 != sk &&
sk2->sk_family == sk->sk_family &&
ipv6_only_sock(sk2) == ipv6_only_sock(sk) &&
@ -480,9 +482,9 @@ int __inet_hash(struct sock *sk, struct sock *osk)
}
if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
sk->sk_family == AF_INET6)
hlist_add_tail_rcu(&sk->sk_node, &ilb->head);
__sk_nulls_add_node_tail_rcu(sk, &ilb->nulls_head);
else
hlist_add_head_rcu(&sk->sk_node, &ilb->head);
__sk_nulls_add_node_rcu(sk, &ilb->nulls_head);
sock_set_flag(sk, SOCK_RCU_FREE);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
unlock:
@ -525,10 +527,7 @@ void inet_unhash(struct sock *sk)
spin_lock_bh(lock);
if (rcu_access_pointer(sk->sk_reuseport_cb))
reuseport_detach_sock(sk);
if (listener)
done = __sk_del_node_init(sk);
else
done = __sk_nulls_del_node_init_rcu(sk);
done = __sk_nulls_del_node_init_rcu(sk);
if (done)
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
spin_unlock_bh(lock);
@ -664,7 +663,8 @@ void inet_hashinfo_init(struct inet_hashinfo *h)
for (i = 0; i < INET_LHTABLE_SIZE; i++) {
spin_lock_init(&h->listening_hash[i].lock);
INIT_HLIST_HEAD(&h->listening_hash[i].head);
INIT_HLIST_NULLS_HEAD(&h->listening_hash[i].nulls_head,
i + LISTENING_NULLS_BASE);
}
}
EXPORT_SYMBOL_GPL(inet_hashinfo_init);

View File

@ -159,7 +159,12 @@ static void inet_peer_gc(struct inet_peer_base *base,
base->total / inet_peer_threshold * HZ;
for (i = 0; i < gc_cnt; i++) {
p = gc_stack[i];
delta = (__u32)jiffies - p->dtime;
/* The READ_ONCE() pairs with the WRITE_ONCE()
* in inet_putpeer()
*/
delta = (__u32)jiffies - READ_ONCE(p->dtime);
if (delta < ttl || !refcount_dec_if_one(&p->refcnt))
gc_stack[i] = NULL;
}
@ -236,7 +241,10 @@ EXPORT_SYMBOL_GPL(inet_getpeer);
void inet_putpeer(struct inet_peer *p)
{
p->dtime = (__u32)jiffies;
/* The WRITE_ONCE() pairs with itself (we run lockless)
* and the READ_ONCE() in inet_peer_gc()
*/
WRITE_ONCE(p->dtime, (__u32)jiffies);
if (refcount_dec_and_test(&p->refcnt))
call_rcu(&p->rcu, inetpeer_free_rcu);

View File

@ -521,7 +521,7 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
else
mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
skb_dst_update_pmtu(skb, mtu);
skb_dst_update_pmtu_no_confirm(skb, mtu);
if (skb->protocol == htons(ETH_P_IP)) {
if (!skb_is_gso(skb) &&

View File

@ -244,7 +244,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
mtu = dst_mtu(dst);
if (skb->len > mtu) {
skb_dst_update_pmtu(skb, mtu);
skb_dst_update_pmtu_no_confirm(skb, mtu);
if (skb->protocol == htons(ETH_P_IP)) {
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
htonl(mtu));

View File

@ -145,7 +145,8 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst);
static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
static void ipv4_link_failure(struct sk_buff *skb);
static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb, u32 mtu);
struct sk_buff *skb, u32 mtu,
bool confirm_neigh);
static void ip_do_redirect(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb);
static void ipv4_dst_destroy(struct dst_entry *dst);
@ -1042,7 +1043,8 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
}
static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb, u32 mtu)
struct sk_buff *skb, u32 mtu,
bool confirm_neigh)
{
struct rtable *rt = (struct rtable *) dst;
struct flowi4 fl4;
@ -2529,7 +2531,8 @@ static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
}
static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb, u32 mtu)
struct sk_buff *skb, u32 mtu,
bool confirm_neigh)
{
}

View File

@ -1936,13 +1936,14 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
struct tcp_iter_state *st = seq->private;
struct net *net = seq_file_net(seq);
struct inet_listen_hashbucket *ilb;
struct hlist_nulls_node *node;
struct sock *sk = cur;
if (!sk) {
get_head:
ilb = &tcp_hashinfo.listening_hash[st->bucket];
spin_lock(&ilb->lock);
sk = sk_head(&ilb->head);
sk = sk_nulls_head(&ilb->nulls_head);
st->offset = 0;
goto get_sk;
}
@ -1950,9 +1951,9 @@ get_head:
++st->num;
++st->offset;
sk = sk_next(sk);
sk = sk_nulls_next(sk);
get_sk:
sk_for_each_from(sk) {
sk_nulls_for_each_from(sk, node) {
if (!net_eq(sock_net(sk), net))
continue;
if (sk->sk_family == st->family)

View File

@ -2380,6 +2380,14 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
if (tcp_small_queue_check(sk, skb, 0))
break;
/* Argh, we hit an empty skb(), presumably a thread
* is sleeping in sendmsg()/sk_stream_wait_memory().
* We do not want to send a pure-ack packet and have
* a strange looking rtx queue with empty packet(s).
*/
if (TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq)
break;
if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
break;

View File

@ -1338,7 +1338,7 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
* queue contains some other skb
*/
rmem = atomic_add_return(size, &sk->sk_rmem_alloc);
if (rmem > (size + sk->sk_rcvbuf))
if (rmem > (size + (unsigned int)sk->sk_rcvbuf))
goto uncharge_drop;
spin_lock(&list->lock);

View File

@ -222,12 +222,13 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
}
static void xfrm4_update_pmtu(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb, u32 mtu)
struct sk_buff *skb, u32 mtu,
bool confirm_neigh)
{
struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
struct dst_entry *path = xdst->route;
path->ops->update_pmtu(path, sk, skb, mtu);
path->ops->update_pmtu(path, sk, skb, mtu, confirm_neigh);
}
static void xfrm4_redirect(struct dst_entry *dst, struct sock *sk,

View File

@ -150,7 +150,7 @@ struct dst_entry *inet6_csk_update_pmtu(struct sock *sk, u32 mtu)
if (IS_ERR(dst))
return NULL;
dst->ops->update_pmtu(dst, sk, NULL, mtu);
dst->ops->update_pmtu(dst, sk, NULL, mtu, true);
dst = inet6_csk_route_socket(sk, &fl6);
return IS_ERR(dst) ? NULL : dst;

View File

@ -137,9 +137,10 @@ struct sock *inet6_lookup_listener(struct net *net,
int score, hiscore = 0, matches = 0, reuseport = 0;
bool exact_dif = inet6_exact_dif_match(net, skb);
struct sock *sk, *result = NULL;
struct hlist_nulls_node *node;
u32 phash = 0;
sk_for_each(sk, &ilb->head) {
sk_nulls_for_each(sk, node, &ilb->nulls_head) {
score = compute_score(sk, net, hnum, daddr, dif, sdif, exact_dif);
if (score > hiscore) {
reuseport = sk->sk_reuseport;

View File

@ -527,7 +527,7 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
/* TooBig packet may have updated dst->dev's mtu */
if (dst && dst_mtu(dst) > dst->dev->mtu)
dst->ops->update_pmtu(dst, NULL, skb, dst->dev->mtu);
dst->ops->update_pmtu(dst, NULL, skb, dst->dev->mtu, false);
return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu,
NEXTHDR_GRE);

View File

@ -652,7 +652,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (rel_info > dst_mtu(skb_dst(skb2)))
goto out;
skb_dst_update_pmtu(skb2, rel_info);
skb_dst_update_pmtu_no_confirm(skb2, rel_info);
}
if (rel_type == ICMP_REDIRECT)
skb_dst(skb2)->ops->redirect(skb_dst(skb2), NULL, skb2);
@ -1138,7 +1138,7 @@ route_lookup:
mtu = max(mtu, skb->protocol == htons(ETH_P_IPV6) ?
IPV6_MIN_MTU : IPV4_MIN_MTU);
skb_dst_update_pmtu(skb, mtu);
skb_dst_update_pmtu_no_confirm(skb, mtu);
if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) {
*pmtu = mtu;
err = -EMSGSIZE;

View File

@ -483,7 +483,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
mtu = dst_mtu(dst);
if (skb->len > mtu) {
skb_dst_update_pmtu(skb, mtu);
skb_dst_update_pmtu_no_confirm(skb, mtu);
if (skb->protocol == htons(ETH_P_IPV6)) {
if (mtu < IPV6_MIN_MTU)

View File

@ -93,7 +93,8 @@ static int ip6_pkt_prohibit(struct sk_buff *skb);
static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
static void ip6_link_failure(struct sk_buff *skb);
static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb, u32 mtu);
struct sk_buff *skb, u32 mtu,
bool confirm_neigh);
static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb);
static void rt6_dst_from_metrics_check(struct rt6_info *rt);
@ -264,7 +265,8 @@ static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
}
static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb, u32 mtu)
struct sk_buff *skb, u32 mtu,
bool confirm_neigh)
{
}
@ -1471,7 +1473,8 @@ static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
}
static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
const struct ipv6hdr *iph, u32 mtu)
const struct ipv6hdr *iph, u32 mtu,
bool confirm_neigh)
{
const struct in6_addr *daddr, *saddr;
struct rt6_info *rt6 = (struct rt6_info *)dst;
@ -1489,7 +1492,10 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
daddr = NULL;
saddr = NULL;
}
dst_confirm_neigh(dst, daddr);
if (confirm_neigh)
dst_confirm_neigh(dst, daddr);
mtu = max_t(u32, mtu, IPV6_MIN_MTU);
if (mtu >= dst_mtu(dst))
return;
@ -1518,9 +1524,11 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
}
static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb, u32 mtu)
struct sk_buff *skb, u32 mtu,
bool confirm_neigh)
{
__ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu);
__ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu,
confirm_neigh);
}
void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
@ -1540,7 +1548,7 @@ void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
dst = ip6_route_output(net, NULL, &fl6);
if (!dst->error)
__ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu));
__ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu), true);
dst_release(dst);
}
EXPORT_SYMBOL_GPL(ip6_update_pmtu);

View File

@ -932,7 +932,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
}
if (tunnel->parms.iph.daddr)
skb_dst_update_pmtu(skb, mtu);
skb_dst_update_pmtu_no_confirm(skb, mtu);
if (skb->len > mtu && !skb_is_gso(skb)) {
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);

Some files were not shown because too many files have changed in this diff Show More