mirror of
https://github.com/rd-stuffs/msm-4.14.git
synced 2025-02-20 11:45:48 +08:00
This is the 4.14.242 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmEKansACgkQONu9yGCS aT6UIBAAm4DQDMEH8AGwJVUHaWKoZrqb7kq9Ke4ELg6NwFP42EnpkKx6UXaAn/c/ zzn8Qc6KLdyd/oRV3wOLqkc3xxff/xisDf9JFfI1Nr7xJIsOn1+xTK1LchP9ca2c ef4+mniSVXg6d7AQ8CNBvH6jCg1Cy8OEgvh2Kq91vh5enSJwDk1+ZS3ugnyMMygh XkuQ3xMqmSe07uE07W0a2qTIDLd+V1SbNPQqOhw9xOuBF+pMvA1QyvtcAlvG3sjV n9XSVm56rzguTsqv08MsMmQFjx8hrIx2z8MNG0FxKA4xQRs3LQ8Uuff6fjuDcLYZ 0FTgInMGDMNqjYBWOElqgrTqxPfCNjPygT3um1BmNfXBUyHcIhxiJmctQC1F2CX5 erAK4BDmcouLC/6uOQqUW0VG1oweTCEQxt1Y/1id8wXeOwon+7wYsm8ZeYKfOe1s vECkW9tHJbAgmMflzHehjToobdYVoRB5P4ESZTD5lRy8jblNV+YQVY2ZiRRG6AOt 3am/UyziGe7M3fSTN+GeV3hPmsM8ggHa8/NOOncRFXml0E2g147aIKkGfMq1MCV3 eKZvFRcutCbwAU3hHHe11APJ9KsyFJnAiwb344eNPlhdWju76jyQJ9YKda6zIEUq 1SAwBDvbLDIbri1XYcqswfF8tWS0OndYIkDnaYqHOUfyKLmwa9Q= =HBll -----END PGP SIGNATURE----- Merge 4.14.242 into android-4.14-stable Changes in 4.14.242 selftest: fix build error in tools/testing/selftests/vm/userfaultfd.c KVM: x86: determine if an exception has an error code only when injecting it. net: split out functions related to registering inflight socket files af_unix: fix garbage collect vs MSG_PEEK workqueue: fix UAF in pwq_unbound_release_workfn() net/802/mrp: fix memleak in mrp_request_join() net/802/garp: fix memleak in garp_request_join() net: annotate data race around sk_ll_usec sctp: move 198 addresses from unusable to private scope hfs: add missing clean-up in hfs_fill_super hfs: fix high memory mapping in hfs_bnode_read hfs: add lock nesting notation to hfs_find_init ARM: dts: versatile: Fix up interrupt controller node names virtio_net: Do not pull payload in skb->head gro: ensure frag0 meets IP header alignment x86/kvm: fix vcpu-id indexed array sizes ocfs2: fix zero out valid data ocfs2: issue zeroout to EOF blocks can: raw: raw_setsockopt(): fix raw_rcv panic for sock UAF can: mcba_usb_start(): add missing urb->transfer_dma initialization can: usb_8dev: fix memory leak can: ems_usb: fix memory leak can: esd_usb2: fix memory leak NIU: fix incorrect error return, missed in previous revert nfc: nfcsim: fix use after free during module unload x86/asm: Ensure asm/proto.h can be included stand-alone cfg80211: Fix possible memory leak in function cfg80211_bss_update netfilter: conntrack: adjust stop timestamp to real expiry value netfilter: nft_nat: allow to specify layer 4 protocol NAT only tipc: fix sleeping in tipc accept routine mlx4: Fix missing error code in mlx4_load_one() net: llc: fix skb_over_panic net/mlx5: Fix flow table chaining sctp: fix return value check in __sctp_rcv_asconf_lookup tulip: windbond-840: Fix missing pci_disable_device() in probe and remove sis900: Fix missing pci_disable_device() in probe and remove can: hi311x: fix a signedness bug in hi3110_cmd() Revert "perf map: Fix dso->nsinfo refcounting" Linux 4.14.242 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: Ie1a4d3b3c5e77ece40e540b361471dddef50441d
This commit is contained in:
commit
5d67c93aaf
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 14
|
||||
SUBLEVEL = 241
|
||||
SUBLEVEL = 242
|
||||
EXTRAVERSION =
|
||||
NAME = Petit Gorille
|
||||
|
||||
|
@ -155,16 +155,15 @@
|
||||
#size-cells = <1>;
|
||||
ranges;
|
||||
|
||||
vic: intc@10140000 {
|
||||
vic: interrupt-controller@10140000 {
|
||||
compatible = "arm,versatile-vic";
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <1>;
|
||||
reg = <0x10140000 0x1000>;
|
||||
clear-mask = <0xffffffff>;
|
||||
valid-mask = <0xffffffff>;
|
||||
};
|
||||
|
||||
sic: intc@10003000 {
|
||||
sic: interrupt-controller@10003000 {
|
||||
compatible = "arm,versatile-sic";
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <1>;
|
||||
|
@ -7,7 +7,7 @@
|
||||
|
||||
amba {
|
||||
/* The Versatile PB is using more SIC IRQ lines than the AB */
|
||||
sic: intc@10003000 {
|
||||
sic: interrupt-controller@10003000 {
|
||||
clear-mask = <0xffffffff>;
|
||||
/*
|
||||
* Valid interrupt lines mask according to
|
||||
|
@ -4,6 +4,8 @@
|
||||
|
||||
#include <asm/ldt.h>
|
||||
|
||||
struct task_struct;
|
||||
|
||||
/* misc architecture specific prototypes */
|
||||
|
||||
void syscall_init(void);
|
||||
|
@ -96,7 +96,7 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
|
||||
static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic)
|
||||
{
|
||||
ioapic->rtc_status.pending_eoi = 0;
|
||||
bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_ID);
|
||||
bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_ID + 1);
|
||||
}
|
||||
|
||||
static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic);
|
||||
|
@ -43,13 +43,13 @@ struct kvm_vcpu;
|
||||
|
||||
struct dest_map {
|
||||
/* vcpu bitmap where IRQ has been sent */
|
||||
DECLARE_BITMAP(map, KVM_MAX_VCPU_ID);
|
||||
DECLARE_BITMAP(map, KVM_MAX_VCPU_ID + 1);
|
||||
|
||||
/*
|
||||
* Vector sent to a given vcpu, only valid when
|
||||
* the vcpu's bit in map is set
|
||||
*/
|
||||
u8 vectors[KVM_MAX_VCPU_ID];
|
||||
u8 vectors[KVM_MAX_VCPU_ID + 1];
|
||||
};
|
||||
|
||||
|
||||
|
@ -400,8 +400,6 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
|
||||
|
||||
if (!vcpu->arch.exception.pending && !vcpu->arch.exception.injected) {
|
||||
queue:
|
||||
if (has_error && !is_protmode(vcpu))
|
||||
has_error = false;
|
||||
if (reinject) {
|
||||
/*
|
||||
* On vmentry, vcpu->arch.exception.pending is only
|
||||
@ -6624,13 +6622,20 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu)
|
||||
kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
|
||||
}
|
||||
|
||||
static void kvm_inject_exception(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (vcpu->arch.exception.error_code && !is_protmode(vcpu))
|
||||
vcpu->arch.exception.error_code = false;
|
||||
kvm_x86_ops->queue_exception(vcpu);
|
||||
}
|
||||
|
||||
static int inject_pending_event(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int r;
|
||||
|
||||
/* try to reinject previous events if any */
|
||||
if (vcpu->arch.exception.injected) {
|
||||
kvm_x86_ops->queue_exception(vcpu);
|
||||
kvm_inject_exception(vcpu);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -6675,7 +6680,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu)
|
||||
kvm_update_dr7(vcpu);
|
||||
}
|
||||
|
||||
kvm_x86_ops->queue_exception(vcpu);
|
||||
kvm_inject_exception(vcpu);
|
||||
} else if (vcpu->arch.smi_pending && !is_smm(vcpu)) {
|
||||
vcpu->arch.smi_pending = false;
|
||||
enter_smm(vcpu);
|
||||
|
@ -236,7 +236,7 @@ static int hi3110_spi_trans(struct spi_device *spi, int len)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static u8 hi3110_cmd(struct spi_device *spi, u8 command)
|
||||
static int hi3110_cmd(struct spi_device *spi, u8 command)
|
||||
{
|
||||
struct hi3110_priv *priv = spi_get_drvdata(spi);
|
||||
|
||||
|
@ -267,6 +267,8 @@ struct ems_usb {
|
||||
unsigned int free_slots; /* remember number of available slots */
|
||||
|
||||
struct ems_cpc_msg active_params; /* active controller parameters */
|
||||
void *rxbuf[MAX_RX_URBS];
|
||||
dma_addr_t rxbuf_dma[MAX_RX_URBS];
|
||||
};
|
||||
|
||||
static void ems_usb_read_interrupt_callback(struct urb *urb)
|
||||
@ -598,6 +600,7 @@ static int ems_usb_start(struct ems_usb *dev)
|
||||
for (i = 0; i < MAX_RX_URBS; i++) {
|
||||
struct urb *urb = NULL;
|
||||
u8 *buf = NULL;
|
||||
dma_addr_t buf_dma;
|
||||
|
||||
/* create a URB, and a buffer for it */
|
||||
urb = usb_alloc_urb(0, GFP_KERNEL);
|
||||
@ -607,7 +610,7 @@ static int ems_usb_start(struct ems_usb *dev)
|
||||
}
|
||||
|
||||
buf = usb_alloc_coherent(dev->udev, RX_BUFFER_SIZE, GFP_KERNEL,
|
||||
&urb->transfer_dma);
|
||||
&buf_dma);
|
||||
if (!buf) {
|
||||
netdev_err(netdev, "No memory left for USB buffer\n");
|
||||
usb_free_urb(urb);
|
||||
@ -615,6 +618,8 @@ static int ems_usb_start(struct ems_usb *dev)
|
||||
break;
|
||||
}
|
||||
|
||||
urb->transfer_dma = buf_dma;
|
||||
|
||||
usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe(dev->udev, 2),
|
||||
buf, RX_BUFFER_SIZE,
|
||||
ems_usb_read_bulk_callback, dev);
|
||||
@ -630,6 +635,9 @@ static int ems_usb_start(struct ems_usb *dev)
|
||||
break;
|
||||
}
|
||||
|
||||
dev->rxbuf[i] = buf;
|
||||
dev->rxbuf_dma[i] = buf_dma;
|
||||
|
||||
/* Drop reference, USB core will take care of freeing it */
|
||||
usb_free_urb(urb);
|
||||
}
|
||||
@ -695,6 +703,10 @@ static void unlink_all_urbs(struct ems_usb *dev)
|
||||
|
||||
usb_kill_anchored_urbs(&dev->rx_submitted);
|
||||
|
||||
for (i = 0; i < MAX_RX_URBS; ++i)
|
||||
usb_free_coherent(dev->udev, RX_BUFFER_SIZE,
|
||||
dev->rxbuf[i], dev->rxbuf_dma[i]);
|
||||
|
||||
usb_kill_anchored_urbs(&dev->tx_submitted);
|
||||
atomic_set(&dev->active_tx_urbs, 0);
|
||||
|
||||
|
@ -207,6 +207,8 @@ struct esd_usb2 {
|
||||
int net_count;
|
||||
u32 version;
|
||||
int rxinitdone;
|
||||
void *rxbuf[MAX_RX_URBS];
|
||||
dma_addr_t rxbuf_dma[MAX_RX_URBS];
|
||||
};
|
||||
|
||||
struct esd_usb2_net_priv {
|
||||
@ -556,6 +558,7 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev)
|
||||
for (i = 0; i < MAX_RX_URBS; i++) {
|
||||
struct urb *urb = NULL;
|
||||
u8 *buf = NULL;
|
||||
dma_addr_t buf_dma;
|
||||
|
||||
/* create a URB, and a buffer for it */
|
||||
urb = usb_alloc_urb(0, GFP_KERNEL);
|
||||
@ -565,7 +568,7 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev)
|
||||
}
|
||||
|
||||
buf = usb_alloc_coherent(dev->udev, RX_BUFFER_SIZE, GFP_KERNEL,
|
||||
&urb->transfer_dma);
|
||||
&buf_dma);
|
||||
if (!buf) {
|
||||
dev_warn(dev->udev->dev.parent,
|
||||
"No memory left for USB buffer\n");
|
||||
@ -573,6 +576,8 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev)
|
||||
goto freeurb;
|
||||
}
|
||||
|
||||
urb->transfer_dma = buf_dma;
|
||||
|
||||
usb_fill_bulk_urb(urb, dev->udev,
|
||||
usb_rcvbulkpipe(dev->udev, 1),
|
||||
buf, RX_BUFFER_SIZE,
|
||||
@ -585,8 +590,12 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev)
|
||||
usb_unanchor_urb(urb);
|
||||
usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf,
|
||||
urb->transfer_dma);
|
||||
goto freeurb;
|
||||
}
|
||||
|
||||
dev->rxbuf[i] = buf;
|
||||
dev->rxbuf_dma[i] = buf_dma;
|
||||
|
||||
freeurb:
|
||||
/* Drop reference, USB core will take care of freeing it */
|
||||
usb_free_urb(urb);
|
||||
@ -674,6 +683,11 @@ static void unlink_all_urbs(struct esd_usb2 *dev)
|
||||
int i, j;
|
||||
|
||||
usb_kill_anchored_urbs(&dev->rx_submitted);
|
||||
|
||||
for (i = 0; i < MAX_RX_URBS; ++i)
|
||||
usb_free_coherent(dev->udev, RX_BUFFER_SIZE,
|
||||
dev->rxbuf[i], dev->rxbuf_dma[i]);
|
||||
|
||||
for (i = 0; i < dev->net_count; i++) {
|
||||
priv = dev->nets[i];
|
||||
if (priv) {
|
||||
|
@ -664,6 +664,8 @@ static int mcba_usb_start(struct mcba_priv *priv)
|
||||
break;
|
||||
}
|
||||
|
||||
urb->transfer_dma = buf_dma;
|
||||
|
||||
usb_fill_bulk_urb(urb, priv->udev,
|
||||
usb_rcvbulkpipe(priv->udev, MCBA_USB_EP_IN),
|
||||
buf, MCBA_USB_RX_BUFF_SIZE,
|
||||
|
@ -148,7 +148,8 @@ struct usb_8dev_priv {
|
||||
u8 *cmd_msg_buffer;
|
||||
|
||||
struct mutex usb_8dev_cmd_lock;
|
||||
|
||||
void *rxbuf[MAX_RX_URBS];
|
||||
dma_addr_t rxbuf_dma[MAX_RX_URBS];
|
||||
};
|
||||
|
||||
/* tx frame */
|
||||
@ -744,6 +745,7 @@ static int usb_8dev_start(struct usb_8dev_priv *priv)
|
||||
for (i = 0; i < MAX_RX_URBS; i++) {
|
||||
struct urb *urb = NULL;
|
||||
u8 *buf;
|
||||
dma_addr_t buf_dma;
|
||||
|
||||
/* create a URB, and a buffer for it */
|
||||
urb = usb_alloc_urb(0, GFP_KERNEL);
|
||||
@ -753,7 +755,7 @@ static int usb_8dev_start(struct usb_8dev_priv *priv)
|
||||
}
|
||||
|
||||
buf = usb_alloc_coherent(priv->udev, RX_BUFFER_SIZE, GFP_KERNEL,
|
||||
&urb->transfer_dma);
|
||||
&buf_dma);
|
||||
if (!buf) {
|
||||
netdev_err(netdev, "No memory left for USB buffer\n");
|
||||
usb_free_urb(urb);
|
||||
@ -761,6 +763,8 @@ static int usb_8dev_start(struct usb_8dev_priv *priv)
|
||||
break;
|
||||
}
|
||||
|
||||
urb->transfer_dma = buf_dma;
|
||||
|
||||
usb_fill_bulk_urb(urb, priv->udev,
|
||||
usb_rcvbulkpipe(priv->udev,
|
||||
USB_8DEV_ENDP_DATA_RX),
|
||||
@ -778,6 +782,9 @@ static int usb_8dev_start(struct usb_8dev_priv *priv)
|
||||
break;
|
||||
}
|
||||
|
||||
priv->rxbuf[i] = buf;
|
||||
priv->rxbuf_dma[i] = buf_dma;
|
||||
|
||||
/* Drop reference, USB core will take care of freeing it */
|
||||
usb_free_urb(urb);
|
||||
}
|
||||
@ -847,6 +854,10 @@ static void unlink_all_urbs(struct usb_8dev_priv *priv)
|
||||
|
||||
usb_kill_anchored_urbs(&priv->rx_submitted);
|
||||
|
||||
for (i = 0; i < MAX_RX_URBS; ++i)
|
||||
usb_free_coherent(priv->udev, RX_BUFFER_SIZE,
|
||||
priv->rxbuf[i], priv->rxbuf_dma[i]);
|
||||
|
||||
usb_kill_anchored_urbs(&priv->tx_submitted);
|
||||
atomic_set(&priv->active_tx_urbs, 0);
|
||||
|
||||
|
@ -367,7 +367,7 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
|
||||
void __iomem *ioaddr;
|
||||
|
||||
i = pci_enable_device(pdev);
|
||||
i = pcim_enable_device(pdev);
|
||||
if (i) return i;
|
||||
|
||||
pci_set_master(pdev);
|
||||
@ -389,7 +389,7 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
|
||||
ioaddr = pci_iomap(pdev, TULIP_BAR, netdev_res_size);
|
||||
if (!ioaddr)
|
||||
goto err_out_free_res;
|
||||
goto err_out_netdev;
|
||||
|
||||
for (i = 0; i < 3; i++)
|
||||
((__le16 *)dev->dev_addr)[i] = cpu_to_le16(eeprom_read(ioaddr, i));
|
||||
@ -468,8 +468,6 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
|
||||
err_out_cleardev:
|
||||
pci_iounmap(pdev, ioaddr);
|
||||
err_out_free_res:
|
||||
pci_release_regions(pdev);
|
||||
err_out_netdev:
|
||||
free_netdev (dev);
|
||||
return -ENODEV;
|
||||
@ -1537,7 +1535,6 @@ static void w840_remove1(struct pci_dev *pdev)
|
||||
if (dev) {
|
||||
struct netdev_private *np = netdev_priv(dev);
|
||||
unregister_netdev(dev);
|
||||
pci_release_regions(pdev);
|
||||
pci_iounmap(pdev, np->base_addr);
|
||||
free_netdev(dev);
|
||||
}
|
||||
|
@ -3469,6 +3469,7 @@ slave_start:
|
||||
|
||||
if (!SRIOV_VALID_STATE(dev->flags)) {
|
||||
mlx4_err(dev, "Invalid SRIOV state\n");
|
||||
err = -EINVAL;
|
||||
goto err_close;
|
||||
}
|
||||
}
|
||||
|
@ -795,17 +795,19 @@ static int connect_fwd_rules(struct mlx5_core_dev *dev,
|
||||
static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
|
||||
struct fs_prio *prio)
|
||||
{
|
||||
struct mlx5_flow_table *next_ft;
|
||||
struct mlx5_flow_table *next_ft, *first_ft;
|
||||
int err = 0;
|
||||
|
||||
/* Connect_prev_fts and update_root_ft_create are mutually exclusive */
|
||||
|
||||
if (list_empty(&prio->node.children)) {
|
||||
first_ft = list_first_entry_or_null(&prio->node.children,
|
||||
struct mlx5_flow_table, node.list);
|
||||
if (!first_ft || first_ft->level > ft->level) {
|
||||
err = connect_prev_fts(dev, ft, prio);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
next_ft = find_next_chained_ft(prio);
|
||||
next_ft = first_ft ? first_ft : find_next_chained_ft(prio);
|
||||
err = connect_fwd_rules(dev, ft, next_ft);
|
||||
if (err)
|
||||
return err;
|
||||
@ -1703,7 +1705,7 @@ static int disconnect_flow_table(struct mlx5_flow_table *ft)
|
||||
node.list) == ft))
|
||||
return 0;
|
||||
|
||||
next_ft = find_next_chained_ft(prio);
|
||||
next_ft = find_next_ft(ft);
|
||||
err = connect_fwd_rules(dev, next_ft, ft);
|
||||
if (err)
|
||||
return err;
|
||||
|
@ -441,7 +441,7 @@ static int sis900_probe(struct pci_dev *pci_dev,
|
||||
#endif
|
||||
|
||||
/* setup various bits in PCI command register */
|
||||
ret = pci_enable_device(pci_dev);
|
||||
ret = pcim_enable_device(pci_dev);
|
||||
if(ret) return ret;
|
||||
|
||||
i = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
|
||||
@ -467,7 +467,7 @@ static int sis900_probe(struct pci_dev *pci_dev,
|
||||
ioaddr = pci_iomap(pci_dev, 0, 0);
|
||||
if (!ioaddr) {
|
||||
ret = -ENOMEM;
|
||||
goto err_out_cleardev;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
sis_priv = netdev_priv(net_dev);
|
||||
@ -575,8 +575,6 @@ err_unmap_tx:
|
||||
sis_priv->tx_ring_dma);
|
||||
err_out_unmap:
|
||||
pci_iounmap(pci_dev, ioaddr);
|
||||
err_out_cleardev:
|
||||
pci_release_regions(pci_dev);
|
||||
err_out:
|
||||
free_netdev(net_dev);
|
||||
return ret;
|
||||
@ -2423,7 +2421,6 @@ static void sis900_remove(struct pci_dev *pci_dev)
|
||||
sis_priv->tx_ring_dma);
|
||||
pci_iounmap(pci_dev, sis_priv->ioaddr);
|
||||
free_netdev(net_dev);
|
||||
pci_release_regions(pci_dev);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
|
@ -8211,8 +8211,9 @@ static int niu_pci_vpd_fetch(struct niu *np, u32 start)
|
||||
err = niu_pci_vpd_scan_props(np, here, end);
|
||||
if (err < 0)
|
||||
return err;
|
||||
/* ret == 1 is not an error */
|
||||
if (err == 1)
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -339,9 +339,13 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
|
||||
offset += hdr_padded_len;
|
||||
p += hdr_padded_len;
|
||||
|
||||
copy = len;
|
||||
if (copy > skb_tailroom(skb))
|
||||
copy = skb_tailroom(skb);
|
||||
/* Copy all frame if it fits skb->head, otherwise
|
||||
* we let virtio_net_hdr_to_skb() and GRO pull headers as needed.
|
||||
*/
|
||||
if (len <= skb_tailroom(skb))
|
||||
copy = len;
|
||||
else
|
||||
copy = ETH_HLEN;
|
||||
skb_put_data(skb, p, copy);
|
||||
|
||||
len -= copy;
|
||||
|
@ -201,8 +201,7 @@ static void nfcsim_recv_wq(struct work_struct *work)
|
||||
|
||||
if (!IS_ERR(skb))
|
||||
dev_kfree_skb(skb);
|
||||
|
||||
skb = ERR_PTR(-ENODEV);
|
||||
return;
|
||||
}
|
||||
|
||||
dev->cb(dev->nfc_digital_dev, dev->arg, skb);
|
||||
|
@ -25,7 +25,19 @@ int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd)
|
||||
fd->key = ptr + tree->max_key_len + 2;
|
||||
hfs_dbg(BNODE_REFS, "find_init: %d (%p)\n",
|
||||
tree->cnid, __builtin_return_address(0));
|
||||
mutex_lock(&tree->tree_lock);
|
||||
switch (tree->cnid) {
|
||||
case HFS_CAT_CNID:
|
||||
mutex_lock_nested(&tree->tree_lock, CATALOG_BTREE_MUTEX);
|
||||
break;
|
||||
case HFS_EXT_CNID:
|
||||
mutex_lock_nested(&tree->tree_lock, EXTENTS_BTREE_MUTEX);
|
||||
break;
|
||||
case HFS_ATTR_CNID:
|
||||
mutex_lock_nested(&tree->tree_lock, ATTR_BTREE_MUTEX);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -15,16 +15,31 @@
|
||||
|
||||
#include "btree.h"
|
||||
|
||||
void hfs_bnode_read(struct hfs_bnode *node, void *buf,
|
||||
int off, int len)
|
||||
void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
|
||||
{
|
||||
struct page *page;
|
||||
int pagenum;
|
||||
int bytes_read;
|
||||
int bytes_to_read;
|
||||
void *vaddr;
|
||||
|
||||
off += node->page_offset;
|
||||
page = node->page[0];
|
||||
pagenum = off >> PAGE_SHIFT;
|
||||
off &= ~PAGE_MASK; /* compute page offset for the first page */
|
||||
|
||||
memcpy(buf, kmap(page) + off, len);
|
||||
kunmap(page);
|
||||
for (bytes_read = 0; bytes_read < len; bytes_read += bytes_to_read) {
|
||||
if (pagenum >= node->tree->pages_per_bnode)
|
||||
break;
|
||||
page = node->page[pagenum];
|
||||
bytes_to_read = min_t(int, len - bytes_read, PAGE_SIZE - off);
|
||||
|
||||
vaddr = kmap_atomic(page);
|
||||
memcpy(buf + bytes_read, vaddr + off, bytes_to_read);
|
||||
kunmap_atomic(vaddr);
|
||||
|
||||
pagenum++;
|
||||
off = 0; /* page offset only applies to the first page */
|
||||
}
|
||||
}
|
||||
|
||||
u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off)
|
||||
|
@ -13,6 +13,13 @@ typedef int (*btree_keycmp)(const btree_key *, const btree_key *);
|
||||
|
||||
#define NODE_HASH_SIZE 256
|
||||
|
||||
/* B-tree mutex nested subclasses */
|
||||
enum hfs_btree_mutex_classes {
|
||||
CATALOG_BTREE_MUTEX,
|
||||
EXTENTS_BTREE_MUTEX,
|
||||
ATTR_BTREE_MUTEX,
|
||||
};
|
||||
|
||||
/* A HFS BTree held in memory */
|
||||
struct hfs_btree {
|
||||
struct super_block *sb;
|
||||
|
@ -427,14 +427,12 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
if (!res) {
|
||||
if (fd.entrylength > sizeof(rec) || fd.entrylength < 0) {
|
||||
res = -EIO;
|
||||
goto bail;
|
||||
goto bail_hfs_find;
|
||||
}
|
||||
hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, fd.entrylength);
|
||||
}
|
||||
if (res) {
|
||||
hfs_find_exit(&fd);
|
||||
goto bail_no_root;
|
||||
}
|
||||
if (res)
|
||||
goto bail_hfs_find;
|
||||
res = -EINVAL;
|
||||
root_inode = hfs_iget(sb, &fd.search_key->cat, &rec);
|
||||
hfs_find_exit(&fd);
|
||||
@ -450,6 +448,8 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
/* everything's okay */
|
||||
return 0;
|
||||
|
||||
bail_hfs_find:
|
||||
hfs_find_exit(&fd);
|
||||
bail_no_root:
|
||||
pr_err("get root inode failed\n");
|
||||
bail:
|
||||
|
103
fs/ocfs2/file.c
103
fs/ocfs2/file.c
@ -1535,6 +1535,45 @@ static void ocfs2_truncate_cluster_pages(struct inode *inode, u64 byte_start,
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* zero out partial blocks of one cluster.
|
||||
*
|
||||
* start: file offset where zero starts, will be made upper block aligned.
|
||||
* len: it will be trimmed to the end of current cluster if "start + len"
|
||||
* is bigger than it.
|
||||
*/
|
||||
static int ocfs2_zeroout_partial_cluster(struct inode *inode,
|
||||
u64 start, u64 len)
|
||||
{
|
||||
int ret;
|
||||
u64 start_block, end_block, nr_blocks;
|
||||
u64 p_block, offset;
|
||||
u32 cluster, p_cluster, nr_clusters;
|
||||
struct super_block *sb = inode->i_sb;
|
||||
u64 end = ocfs2_align_bytes_to_clusters(sb, start);
|
||||
|
||||
if (start + len < end)
|
||||
end = start + len;
|
||||
|
||||
start_block = ocfs2_blocks_for_bytes(sb, start);
|
||||
end_block = ocfs2_blocks_for_bytes(sb, end);
|
||||
nr_blocks = end_block - start_block;
|
||||
if (!nr_blocks)
|
||||
return 0;
|
||||
|
||||
cluster = ocfs2_bytes_to_clusters(sb, start);
|
||||
ret = ocfs2_get_clusters(inode, cluster, &p_cluster,
|
||||
&nr_clusters, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (!p_cluster)
|
||||
return 0;
|
||||
|
||||
offset = start_block - ocfs2_clusters_to_blocks(sb, cluster);
|
||||
p_block = ocfs2_clusters_to_blocks(sb, p_cluster) + offset;
|
||||
return sb_issue_zeroout(sb, p_block, nr_blocks, GFP_NOFS);
|
||||
}
|
||||
|
||||
static int ocfs2_zero_partial_clusters(struct inode *inode,
|
||||
u64 start, u64 len)
|
||||
{
|
||||
@ -1544,6 +1583,7 @@ static int ocfs2_zero_partial_clusters(struct inode *inode,
|
||||
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
|
||||
unsigned int csize = osb->s_clustersize;
|
||||
handle_t *handle;
|
||||
loff_t isize = i_size_read(inode);
|
||||
|
||||
/*
|
||||
* The "start" and "end" values are NOT necessarily part of
|
||||
@ -1564,6 +1604,26 @@ static int ocfs2_zero_partial_clusters(struct inode *inode,
|
||||
if ((start & (csize - 1)) == 0 && (end & (csize - 1)) == 0)
|
||||
goto out;
|
||||
|
||||
/* No page cache for EOF blocks, issue zero out to disk. */
|
||||
if (end > isize) {
|
||||
/*
|
||||
* zeroout eof blocks in last cluster starting from
|
||||
* "isize" even "start" > "isize" because it is
|
||||
* complicated to zeroout just at "start" as "start"
|
||||
* may be not aligned with block size, buffer write
|
||||
* would be required to do that, but out of eof buffer
|
||||
* write is not supported.
|
||||
*/
|
||||
ret = ocfs2_zeroout_partial_cluster(inode, isize,
|
||||
end - isize);
|
||||
if (ret) {
|
||||
mlog_errno(ret);
|
||||
goto out;
|
||||
}
|
||||
if (start >= isize)
|
||||
goto out;
|
||||
end = isize;
|
||||
}
|
||||
handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
|
||||
if (IS_ERR(handle)) {
|
||||
ret = PTR_ERR(handle);
|
||||
@ -1861,45 +1921,6 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* zero out partial blocks of one cluster.
|
||||
*
|
||||
* start: file offset where zero starts, will be made upper block aligned.
|
||||
* len: it will be trimmed to the end of current cluster if "start + len"
|
||||
* is bigger than it.
|
||||
*/
|
||||
static int ocfs2_zeroout_partial_cluster(struct inode *inode,
|
||||
u64 start, u64 len)
|
||||
{
|
||||
int ret;
|
||||
u64 start_block, end_block, nr_blocks;
|
||||
u64 p_block, offset;
|
||||
u32 cluster, p_cluster, nr_clusters;
|
||||
struct super_block *sb = inode->i_sb;
|
||||
u64 end = ocfs2_align_bytes_to_clusters(sb, start);
|
||||
|
||||
if (start + len < end)
|
||||
end = start + len;
|
||||
|
||||
start_block = ocfs2_blocks_for_bytes(sb, start);
|
||||
end_block = ocfs2_blocks_for_bytes(sb, end);
|
||||
nr_blocks = end_block - start_block;
|
||||
if (!nr_blocks)
|
||||
return 0;
|
||||
|
||||
cluster = ocfs2_bytes_to_clusters(sb, start);
|
||||
ret = ocfs2_get_clusters(inode, cluster, &p_cluster,
|
||||
&nr_clusters, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (!p_cluster)
|
||||
return 0;
|
||||
|
||||
offset = start_block - ocfs2_clusters_to_blocks(sb, cluster);
|
||||
p_block = ocfs2_clusters_to_blocks(sb, p_cluster) + offset;
|
||||
return sb_issue_zeroout(sb, p_block, nr_blocks, GFP_NOFS);
|
||||
}
|
||||
|
||||
/*
|
||||
* Parts of this function taken from xfs_change_file_space()
|
||||
*/
|
||||
@ -1941,7 +1962,6 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
|
||||
goto out_inode_unlock;
|
||||
}
|
||||
|
||||
orig_isize = i_size_read(inode);
|
||||
switch (sr->l_whence) {
|
||||
case 0: /*SEEK_SET*/
|
||||
break;
|
||||
@ -1949,7 +1969,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
|
||||
sr->l_start += f_pos;
|
||||
break;
|
||||
case 2: /*SEEK_END*/
|
||||
sr->l_start += orig_isize;
|
||||
sr->l_start += i_size_read(inode);
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
@ -2004,6 +2024,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
orig_isize = i_size_read(inode);
|
||||
/* zeroout eof blocks in the cluster. */
|
||||
if (!ret && change_size && orig_isize < size) {
|
||||
ret = ocfs2_zeroout_partial_cluster(inode, orig_isize,
|
||||
|
@ -2784,6 +2784,15 @@ static inline void skb_propagate_pfmemalloc(struct page *page,
|
||||
skb->pfmemalloc = true;
|
||||
}
|
||||
|
||||
/**
|
||||
* skb_frag_off() - Returns the offset of a skb fragment
|
||||
* @frag: the paged fragment
|
||||
*/
|
||||
static inline unsigned int skb_frag_off(const skb_frag_t *frag)
|
||||
{
|
||||
return frag->page_offset;
|
||||
}
|
||||
|
||||
/**
|
||||
* skb_frag_page - retrieve the page referred to by a paged fragment
|
||||
* @frag: the paged fragment
|
||||
|
@ -65,14 +65,18 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
|
||||
skb_reset_mac_header(skb);
|
||||
|
||||
if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
|
||||
u16 start = __virtio16_to_cpu(little_endian, hdr->csum_start);
|
||||
u16 off = __virtio16_to_cpu(little_endian, hdr->csum_offset);
|
||||
u32 start = __virtio16_to_cpu(little_endian, hdr->csum_start);
|
||||
u32 off = __virtio16_to_cpu(little_endian, hdr->csum_offset);
|
||||
u32 needed = start + max_t(u32, thlen, off + sizeof(__sum16));
|
||||
|
||||
if (!pskb_may_pull(skb, needed))
|
||||
return -EINVAL;
|
||||
|
||||
if (!skb_partial_csum_set(skb, start, off))
|
||||
return -EINVAL;
|
||||
|
||||
p_off = skb_transport_offset(skb) + thlen;
|
||||
if (p_off > skb_headlen(skb))
|
||||
if (!pskb_may_pull(skb, p_off))
|
||||
return -EINVAL;
|
||||
} else {
|
||||
/* gso packets without NEEDS_CSUM do not set transport_offset.
|
||||
@ -100,14 +104,14 @@ retry:
|
||||
}
|
||||
|
||||
p_off = keys.control.thoff + thlen;
|
||||
if (p_off > skb_headlen(skb) ||
|
||||
if (!pskb_may_pull(skb, p_off) ||
|
||||
keys.basic.ip_proto != ip_proto)
|
||||
return -EINVAL;
|
||||
|
||||
skb_set_transport_header(skb, keys.control.thoff);
|
||||
} else if (gso_type) {
|
||||
p_off = thlen;
|
||||
if (p_off > skb_headlen(skb))
|
||||
if (!pskb_may_pull(skb, p_off))
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
@ -10,6 +10,7 @@
|
||||
|
||||
void unix_inflight(struct user_struct *user, struct file *fp);
|
||||
void unix_notinflight(struct user_struct *user, struct file *fp);
|
||||
void unix_destruct_scm(struct sk_buff *skb);
|
||||
void unix_gc(void);
|
||||
void wait_for_unix_gc(void);
|
||||
struct sock *unix_get_socket(struct file *filp);
|
||||
|
@ -48,7 +48,7 @@ static inline bool net_busy_loop_on(void)
|
||||
|
||||
static inline bool sk_can_busy_loop(const struct sock *sk)
|
||||
{
|
||||
return sk->sk_ll_usec && !signal_pending(current);
|
||||
return READ_ONCE(sk->sk_ll_usec) && !signal_pending(current);
|
||||
}
|
||||
|
||||
bool sk_busy_loop_end(void *p, unsigned long start_time);
|
||||
|
@ -15,9 +15,11 @@
|
||||
#include <linux/if_ether.h>
|
||||
|
||||
/* Lengths of frame formats */
|
||||
#define LLC_PDU_LEN_I 4 /* header and 2 control bytes */
|
||||
#define LLC_PDU_LEN_S 4
|
||||
#define LLC_PDU_LEN_U 3 /* header and 1 control byte */
|
||||
#define LLC_PDU_LEN_I 4 /* header and 2 control bytes */
|
||||
#define LLC_PDU_LEN_S 4
|
||||
#define LLC_PDU_LEN_U 3 /* header and 1 control byte */
|
||||
/* header and 1 control byte and XID info */
|
||||
#define LLC_PDU_LEN_U_XID (LLC_PDU_LEN_U + sizeof(struct llc_xid_info))
|
||||
/* Known SAP addresses */
|
||||
#define LLC_GLOBAL_SAP 0xFF
|
||||
#define LLC_NULL_SAP 0x00 /* not network-layer visible */
|
||||
@ -50,9 +52,10 @@
|
||||
#define LLC_PDU_TYPE_U_MASK 0x03 /* 8-bit control field */
|
||||
#define LLC_PDU_TYPE_MASK 0x03
|
||||
|
||||
#define LLC_PDU_TYPE_I 0 /* first bit */
|
||||
#define LLC_PDU_TYPE_S 1 /* first two bits */
|
||||
#define LLC_PDU_TYPE_U 3 /* first two bits */
|
||||
#define LLC_PDU_TYPE_I 0 /* first bit */
|
||||
#define LLC_PDU_TYPE_S 1 /* first two bits */
|
||||
#define LLC_PDU_TYPE_U 3 /* first two bits */
|
||||
#define LLC_PDU_TYPE_U_XID 4 /* private type for detecting XID commands */
|
||||
|
||||
#define LLC_PDU_TYPE_IS_I(pdu) \
|
||||
((!(pdu->ctrl_1 & LLC_PDU_TYPE_I_MASK)) ? 1 : 0)
|
||||
@ -230,9 +233,18 @@ static inline struct llc_pdu_un *llc_pdu_un_hdr(struct sk_buff *skb)
|
||||
static inline void llc_pdu_header_init(struct sk_buff *skb, u8 type,
|
||||
u8 ssap, u8 dsap, u8 cr)
|
||||
{
|
||||
const int hlen = type == LLC_PDU_TYPE_U ? 3 : 4;
|
||||
int hlen = 4; /* default value for I and S types */
|
||||
struct llc_pdu_un *pdu;
|
||||
|
||||
switch (type) {
|
||||
case LLC_PDU_TYPE_U:
|
||||
hlen = 3;
|
||||
break;
|
||||
case LLC_PDU_TYPE_U_XID:
|
||||
hlen = 6;
|
||||
break;
|
||||
}
|
||||
|
||||
skb_push(skb, hlen);
|
||||
skb_reset_network_header(skb);
|
||||
pdu = llc_pdu_un_hdr(skb);
|
||||
@ -374,7 +386,10 @@ static inline void llc_pdu_init_as_xid_cmd(struct sk_buff *skb,
|
||||
xid_info->fmt_id = LLC_XID_FMT_ID; /* 0x81 */
|
||||
xid_info->type = svcs_supported;
|
||||
xid_info->rw = rx_window << 1; /* size of receive window */
|
||||
skb_put(skb, sizeof(struct llc_xid_info));
|
||||
|
||||
/* no need to push/put since llc_pdu_header_init() has already
|
||||
* pushed 3 + 3 bytes
|
||||
*/
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -348,8 +348,7 @@ enum {
|
||||
#define SCTP_SCOPE_POLICY_MAX SCTP_SCOPE_POLICY_LINK
|
||||
|
||||
/* Based on IPv4 scoping <draft-stewart-tsvwg-sctp-ipv4-00.txt>,
|
||||
* SCTP IPv4 unusable addresses: 0.0.0.0/8, 224.0.0.0/4, 198.18.0.0/24,
|
||||
* 192.88.99.0/24.
|
||||
* SCTP IPv4 unusable addresses: 0.0.0.0/8, 224.0.0.0/4, 192.88.99.0/24.
|
||||
* Also, RFC 8.4, non-unicast addresses are not considered valid SCTP
|
||||
* addresses.
|
||||
*/
|
||||
@ -357,7 +356,6 @@ enum {
|
||||
((htonl(INADDR_BROADCAST) == a) || \
|
||||
ipv4_is_multicast(a) || \
|
||||
ipv4_is_zeronet(a) || \
|
||||
ipv4_is_test_198(a) || \
|
||||
ipv4_is_anycast_6to4(a))
|
||||
|
||||
/* Flags used for the bind address copy functions. */
|
||||
|
@ -3466,15 +3466,21 @@ static void pwq_unbound_release_workfn(struct work_struct *work)
|
||||
unbound_release_work);
|
||||
struct workqueue_struct *wq = pwq->wq;
|
||||
struct worker_pool *pool = pwq->pool;
|
||||
bool is_last;
|
||||
bool is_last = false;
|
||||
|
||||
if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
|
||||
return;
|
||||
/*
|
||||
* when @pwq is not linked, it doesn't hold any reference to the
|
||||
* @wq, and @wq is invalid to access.
|
||||
*/
|
||||
if (!list_empty(&pwq->pwqs_node)) {
|
||||
if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
|
||||
return;
|
||||
|
||||
mutex_lock(&wq->mutex);
|
||||
list_del_rcu(&pwq->pwqs_node);
|
||||
is_last = list_empty(&wq->pwqs);
|
||||
mutex_unlock(&wq->mutex);
|
||||
mutex_lock(&wq->mutex);
|
||||
list_del_rcu(&pwq->pwqs_node);
|
||||
is_last = list_empty(&wq->pwqs);
|
||||
mutex_unlock(&wq->mutex);
|
||||
}
|
||||
|
||||
mutex_lock(&wq_pool_mutex);
|
||||
put_unbound_pool(pool);
|
||||
|
@ -206,6 +206,19 @@ static void garp_attr_destroy(struct garp_applicant *app, struct garp_attr *attr
|
||||
kfree(attr);
|
||||
}
|
||||
|
||||
static void garp_attr_destroy_all(struct garp_applicant *app)
|
||||
{
|
||||
struct rb_node *node, *next;
|
||||
struct garp_attr *attr;
|
||||
|
||||
for (node = rb_first(&app->gid);
|
||||
next = node ? rb_next(node) : NULL, node != NULL;
|
||||
node = next) {
|
||||
attr = rb_entry(node, struct garp_attr, node);
|
||||
garp_attr_destroy(app, attr);
|
||||
}
|
||||
}
|
||||
|
||||
static int garp_pdu_init(struct garp_applicant *app)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
@ -612,6 +625,7 @@ void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl
|
||||
|
||||
spin_lock_bh(&app->lock);
|
||||
garp_gid_event(app, GARP_EVENT_TRANSMIT_PDU);
|
||||
garp_attr_destroy_all(app);
|
||||
garp_pdu_queue(app);
|
||||
spin_unlock_bh(&app->lock);
|
||||
|
||||
|
@ -295,6 +295,19 @@ static void mrp_attr_destroy(struct mrp_applicant *app, struct mrp_attr *attr)
|
||||
kfree(attr);
|
||||
}
|
||||
|
||||
static void mrp_attr_destroy_all(struct mrp_applicant *app)
|
||||
{
|
||||
struct rb_node *node, *next;
|
||||
struct mrp_attr *attr;
|
||||
|
||||
for (node = rb_first(&app->mad);
|
||||
next = node ? rb_next(node) : NULL, node != NULL;
|
||||
node = next) {
|
||||
attr = rb_entry(node, struct mrp_attr, node);
|
||||
mrp_attr_destroy(app, attr);
|
||||
}
|
||||
}
|
||||
|
||||
static int mrp_pdu_init(struct mrp_applicant *app)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
@ -899,6 +912,7 @@ void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl)
|
||||
|
||||
spin_lock_bh(&app->lock);
|
||||
mrp_mad_event(app, MRP_EVENT_TX);
|
||||
mrp_attr_destroy_all(app);
|
||||
mrp_pdu_queue(app);
|
||||
spin_unlock_bh(&app->lock);
|
||||
|
||||
|
@ -18,7 +18,7 @@ obj-$(CONFIG_NETFILTER) += netfilter/
|
||||
obj-$(CONFIG_INET) += ipv4/
|
||||
obj-$(CONFIG_TLS) += tls/
|
||||
obj-$(CONFIG_XFRM) += xfrm/
|
||||
obj-$(CONFIG_UNIX) += unix/
|
||||
obj-$(CONFIG_UNIX_SCM) += unix/
|
||||
obj-$(CONFIG_NET) += ipv6/
|
||||
obj-$(CONFIG_PACKET) += packet/
|
||||
obj-$(CONFIG_NET_KEY) += key/
|
||||
|
@ -549,10 +549,18 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
rtnl_lock();
|
||||
lock_sock(sk);
|
||||
|
||||
if (ro->bound && ro->ifindex)
|
||||
if (ro->bound && ro->ifindex) {
|
||||
dev = dev_get_by_index(sock_net(sk), ro->ifindex);
|
||||
if (!dev) {
|
||||
if (count > 1)
|
||||
kfree(filter);
|
||||
err = -ENODEV;
|
||||
goto out_fil;
|
||||
}
|
||||
}
|
||||
|
||||
if (ro->bound) {
|
||||
/* (try to) register the new filters */
|
||||
@ -591,6 +599,7 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
|
||||
dev_put(dev);
|
||||
|
||||
release_sock(sk);
|
||||
rtnl_unlock();
|
||||
|
||||
break;
|
||||
|
||||
@ -603,10 +612,16 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
|
||||
|
||||
err_mask &= CAN_ERR_MASK;
|
||||
|
||||
rtnl_lock();
|
||||
lock_sock(sk);
|
||||
|
||||
if (ro->bound && ro->ifindex)
|
||||
if (ro->bound && ro->ifindex) {
|
||||
dev = dev_get_by_index(sock_net(sk), ro->ifindex);
|
||||
if (!dev) {
|
||||
err = -ENODEV;
|
||||
goto out_err;
|
||||
}
|
||||
}
|
||||
|
||||
/* remove current error mask */
|
||||
if (ro->bound) {
|
||||
@ -630,6 +645,7 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
|
||||
dev_put(dev);
|
||||
|
||||
release_sock(sk);
|
||||
rtnl_unlock();
|
||||
|
||||
break;
|
||||
|
||||
|
@ -4763,7 +4763,8 @@ static void skb_gro_reset_offset(struct sk_buff *skb)
|
||||
|
||||
if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
|
||||
pinfo->nr_frags &&
|
||||
!PageHighMem(skb_frag_page(frag0))) {
|
||||
!PageHighMem(skb_frag_page(frag0)) &&
|
||||
(!NET_IP_ALIGN || !(skb_frag_off(frag0) & 3))) {
|
||||
NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
|
||||
NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
|
||||
skb_frag_size(frag0),
|
||||
|
@ -1023,7 +1023,7 @@ set_rcvbuf:
|
||||
if (val < 0)
|
||||
ret = -EINVAL;
|
||||
else
|
||||
sk->sk_ll_usec = val;
|
||||
WRITE_ONCE(sk->sk_ll_usec, val);
|
||||
}
|
||||
break;
|
||||
#endif
|
||||
|
@ -98,8 +98,16 @@ static inline u8 llc_ui_header_len(struct sock *sk, struct sockaddr_llc *addr)
|
||||
{
|
||||
u8 rc = LLC_PDU_LEN_U;
|
||||
|
||||
if (addr->sllc_test || addr->sllc_xid)
|
||||
if (addr->sllc_test)
|
||||
rc = LLC_PDU_LEN_U;
|
||||
else if (addr->sllc_xid)
|
||||
/* We need to expand header to sizeof(struct llc_xid_info)
|
||||
* since llc_pdu_init_as_xid_cmd() sets 4,5,6 bytes of LLC header
|
||||
* as XID PDU. In llc_ui_sendmsg() we reserved header size and then
|
||||
* filled all other space with user data. If we won't reserve this
|
||||
* bytes, llc_pdu_init_as_xid_cmd() will overwrite user data
|
||||
*/
|
||||
rc = LLC_PDU_LEN_U_XID;
|
||||
else if (sk->sk_type == SOCK_STREAM)
|
||||
rc = LLC_PDU_LEN_I;
|
||||
return rc;
|
||||
|
@ -79,7 +79,7 @@ int llc_sap_action_send_xid_c(struct llc_sap *sap, struct sk_buff *skb)
|
||||
struct llc_sap_state_ev *ev = llc_sap_ev(skb);
|
||||
int rc;
|
||||
|
||||
llc_pdu_header_init(skb, LLC_PDU_TYPE_U, ev->saddr.lsap,
|
||||
llc_pdu_header_init(skb, LLC_PDU_TYPE_U_XID, ev->saddr.lsap,
|
||||
ev->daddr.lsap, LLC_PDU_CMD);
|
||||
llc_pdu_init_as_xid_cmd(skb, LLC_XID_NULL_CLASS_2, 0);
|
||||
rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac);
|
||||
|
@ -506,8 +506,13 @@ bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
|
||||
return false;
|
||||
|
||||
tstamp = nf_conn_tstamp_find(ct);
|
||||
if (tstamp && tstamp->stop == 0)
|
||||
if (tstamp) {
|
||||
s32 timeout = ct->timeout - nfct_time_stamp;
|
||||
|
||||
tstamp->stop = ktime_get_real_ns();
|
||||
if (timeout < 0)
|
||||
tstamp->stop -= jiffies_to_nsecs(-timeout);
|
||||
}
|
||||
|
||||
if (nf_conntrack_event_report(IPCT_DESTROY, ct,
|
||||
portid, report) < 0) {
|
||||
|
@ -153,7 +153,9 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
|
||||
alen = FIELD_SIZEOF(struct nf_nat_range, min_addr.ip6);
|
||||
break;
|
||||
default:
|
||||
return -EAFNOSUPPORT;
|
||||
if (tb[NFTA_NAT_REG_ADDR_MIN])
|
||||
return -EAFNOSUPPORT;
|
||||
break;
|
||||
}
|
||||
priv->family = family;
|
||||
|
||||
|
@ -1125,7 +1125,7 @@ static struct sctp_association *__sctp_rcv_asconf_lookup(
|
||||
if (unlikely(!af))
|
||||
return NULL;
|
||||
|
||||
if (af->from_addr_param(&paddr, param, peer_port, 0))
|
||||
if (!af->from_addr_param(&paddr, param, peer_port, 0))
|
||||
return NULL;
|
||||
|
||||
return __sctp_lookup_association(net, laddr, &paddr, transportp);
|
||||
|
@ -423,7 +423,8 @@ static enum sctp_scope sctp_v4_scope(union sctp_addr *addr)
|
||||
retval = SCTP_SCOPE_LINK;
|
||||
} else if (ipv4_is_private_10(addr->v4.sin_addr.s_addr) ||
|
||||
ipv4_is_private_172(addr->v4.sin_addr.s_addr) ||
|
||||
ipv4_is_private_192(addr->v4.sin_addr.s_addr)) {
|
||||
ipv4_is_private_192(addr->v4.sin_addr.s_addr) ||
|
||||
ipv4_is_test_198(addr->v4.sin_addr.s_addr)) {
|
||||
retval = SCTP_SCOPE_PRIVATE;
|
||||
} else {
|
||||
retval = SCTP_SCOPE_GLOBAL;
|
||||
|
@ -2001,7 +2001,7 @@ static int tipc_listen(struct socket *sock, int len)
|
||||
static int tipc_wait_for_accept(struct socket *sock, long timeo)
|
||||
{
|
||||
struct sock *sk = sock->sk;
|
||||
DEFINE_WAIT(wait);
|
||||
DEFINE_WAIT_FUNC(wait, woken_wake_function);
|
||||
int err;
|
||||
|
||||
/* True wake-one mechanism for incoming connections: only
|
||||
@ -2010,12 +2010,12 @@ static int tipc_wait_for_accept(struct socket *sock, long timeo)
|
||||
* anymore, the common case will execute the loop only once.
|
||||
*/
|
||||
for (;;) {
|
||||
prepare_to_wait_exclusive(sk_sleep(sk), &wait,
|
||||
TASK_INTERRUPTIBLE);
|
||||
if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
|
||||
add_wait_queue(sk_sleep(sk), &wait);
|
||||
release_sock(sk);
|
||||
timeo = schedule_timeout(timeo);
|
||||
timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
|
||||
lock_sock(sk);
|
||||
remove_wait_queue(sk_sleep(sk), &wait);
|
||||
}
|
||||
err = 0;
|
||||
if (!skb_queue_empty(&sk->sk_receive_queue))
|
||||
@ -2027,7 +2027,6 @@ static int tipc_wait_for_accept(struct socket *sock, long timeo)
|
||||
if (signal_pending(current))
|
||||
break;
|
||||
}
|
||||
finish_wait(sk_sleep(sk), &wait);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -19,6 +19,11 @@ config UNIX
|
||||
|
||||
Say Y unless you know what you are doing.
|
||||
|
||||
config UNIX_SCM
|
||||
bool
|
||||
depends on UNIX
|
||||
default y
|
||||
|
||||
config UNIX_DIAG
|
||||
tristate "UNIX: socket monitoring interface"
|
||||
depends on UNIX
|
||||
|
@ -10,3 +10,5 @@ unix-$(CONFIG_SYSCTL) += sysctl_net_unix.o
|
||||
|
||||
obj-$(CONFIG_UNIX_DIAG) += unix_diag.o
|
||||
unix_diag-y := diag.o
|
||||
|
||||
obj-$(CONFIG_UNIX_SCM) += scm.o
|
||||
|
@ -119,6 +119,8 @@
|
||||
#include <linux/freezer.h>
|
||||
#include <linux/file.h>
|
||||
|
||||
#include "scm.h"
|
||||
|
||||
struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
|
||||
EXPORT_SYMBOL_GPL(unix_socket_table);
|
||||
DEFINE_SPINLOCK(unix_table_lock);
|
||||
@ -1519,65 +1521,51 @@ out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
|
||||
static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb)
|
||||
{
|
||||
int i;
|
||||
|
||||
scm->fp = UNIXCB(skb).fp;
|
||||
UNIXCB(skb).fp = NULL;
|
||||
|
||||
for (i = scm->fp->count-1; i >= 0; i--)
|
||||
unix_notinflight(scm->fp->user, scm->fp->fp[i]);
|
||||
}
|
||||
|
||||
static void unix_destruct_scm(struct sk_buff *skb)
|
||||
{
|
||||
struct scm_cookie scm;
|
||||
memset(&scm, 0, sizeof(scm));
|
||||
scm.pid = UNIXCB(skb).pid;
|
||||
if (UNIXCB(skb).fp)
|
||||
unix_detach_fds(&scm, skb);
|
||||
|
||||
/* Alas, it calls VFS */
|
||||
/* So fscking what? fput() had been SMP-safe since the last Summer */
|
||||
scm_destroy(&scm);
|
||||
sock_wfree(skb);
|
||||
}
|
||||
|
||||
/*
|
||||
* The "user->unix_inflight" variable is protected by the garbage
|
||||
* collection lock, and we just read it locklessly here. If you go
|
||||
* over the limit, there might be a tiny race in actually noticing
|
||||
* it across threads. Tough.
|
||||
*/
|
||||
static inline bool too_many_unix_fds(struct task_struct *p)
|
||||
{
|
||||
struct user_struct *user = current_user();
|
||||
|
||||
if (unlikely(user->unix_inflight > task_rlimit(p, RLIMIT_NOFILE)))
|
||||
return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
|
||||
return false;
|
||||
}
|
||||
|
||||
static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (too_many_unix_fds(current))
|
||||
return -ETOOMANYREFS;
|
||||
scm->fp = scm_fp_dup(UNIXCB(skb).fp);
|
||||
|
||||
/*
|
||||
* Need to duplicate file references for the sake of garbage
|
||||
* collection. Otherwise a socket in the fps might become a
|
||||
* candidate for GC while the skb is not yet queued.
|
||||
* Garbage collection of unix sockets starts by selecting a set of
|
||||
* candidate sockets which have reference only from being in flight
|
||||
* (total_refs == inflight_refs). This condition is checked once during
|
||||
* the candidate collection phase, and candidates are marked as such, so
|
||||
* that non-candidates can later be ignored. While inflight_refs is
|
||||
* protected by unix_gc_lock, total_refs (file count) is not, hence this
|
||||
* is an instantaneous decision.
|
||||
*
|
||||
* Once a candidate, however, the socket must not be reinstalled into a
|
||||
* file descriptor while the garbage collection is in progress.
|
||||
*
|
||||
* If the above conditions are met, then the directed graph of
|
||||
* candidates (*) does not change while unix_gc_lock is held.
|
||||
*
|
||||
* Any operations that changes the file count through file descriptors
|
||||
* (dup, close, sendmsg) does not change the graph since candidates are
|
||||
* not installed in fds.
|
||||
*
|
||||
* Dequeing a candidate via recvmsg would install it into an fd, but
|
||||
* that takes unix_gc_lock to decrement the inflight count, so it's
|
||||
* serialized with garbage collection.
|
||||
*
|
||||
* MSG_PEEK is special in that it does not change the inflight count,
|
||||
* yet does install the socket into an fd. The following lock/unlock
|
||||
* pair is to ensure serialization with garbage collection. It must be
|
||||
* done between incrementing the file count and installing the file into
|
||||
* an fd.
|
||||
*
|
||||
* If garbage collection starts after the barrier provided by the
|
||||
* lock/unlock, then it will see the elevated refcount and not mark this
|
||||
* as a candidate. If a garbage collection is already in progress
|
||||
* before the file count was incremented, then the lock/unlock pair will
|
||||
* ensure that garbage collection is finished before progressing to
|
||||
* installing the fd.
|
||||
*
|
||||
* (*) A -> B where B is on the queue of A or B is on the queue of C
|
||||
* which is on the queue of listening socket A.
|
||||
*/
|
||||
UNIXCB(skb).fp = scm_fp_dup(scm->fp);
|
||||
if (!UNIXCB(skb).fp)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = scm->fp->count - 1; i >= 0; i--)
|
||||
unix_inflight(scm->fp->user, scm->fp->fp[i]);
|
||||
return 0;
|
||||
spin_lock(&unix_gc_lock);
|
||||
spin_unlock(&unix_gc_lock);
|
||||
}
|
||||
|
||||
static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
|
||||
@ -2205,7 +2193,7 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
|
||||
sk_peek_offset_fwd(sk, size);
|
||||
|
||||
if (UNIXCB(skb).fp)
|
||||
scm.fp = scm_fp_dup(UNIXCB(skb).fp);
|
||||
unix_peek_fds(&scm, skb);
|
||||
}
|
||||
err = (flags & MSG_TRUNC) ? skb->len - skip : size;
|
||||
|
||||
@ -2446,7 +2434,7 @@ unlock:
|
||||
/* It is questionable, see note in unix_dgram_recvmsg.
|
||||
*/
|
||||
if (UNIXCB(skb).fp)
|
||||
scm.fp = scm_fp_dup(UNIXCB(skb).fp);
|
||||
unix_peek_fds(&scm, skb);
|
||||
|
||||
sk_peek_offset_fwd(sk, chunk);
|
||||
|
||||
|
@ -86,77 +86,13 @@
|
||||
#include <net/scm.h>
|
||||
#include <net/tcp_states.h>
|
||||
|
||||
#include "scm.h"
|
||||
|
||||
/* Internal data structures and random procedures: */
|
||||
|
||||
static LIST_HEAD(gc_inflight_list);
|
||||
static LIST_HEAD(gc_candidates);
|
||||
static DEFINE_SPINLOCK(unix_gc_lock);
|
||||
static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait);
|
||||
|
||||
unsigned int unix_tot_inflight;
|
||||
|
||||
struct sock *unix_get_socket(struct file *filp)
|
||||
{
|
||||
struct sock *u_sock = NULL;
|
||||
struct inode *inode = file_inode(filp);
|
||||
|
||||
/* Socket ? */
|
||||
if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) {
|
||||
struct socket *sock = SOCKET_I(inode);
|
||||
struct sock *s = sock->sk;
|
||||
|
||||
/* PF_UNIX ? */
|
||||
if (s && sock->ops && sock->ops->family == PF_UNIX)
|
||||
u_sock = s;
|
||||
}
|
||||
return u_sock;
|
||||
}
|
||||
|
||||
/* Keep the number of times in flight count for the file
|
||||
* descriptor if it is for an AF_UNIX socket.
|
||||
*/
|
||||
|
||||
void unix_inflight(struct user_struct *user, struct file *fp)
|
||||
{
|
||||
struct sock *s = unix_get_socket(fp);
|
||||
|
||||
spin_lock(&unix_gc_lock);
|
||||
|
||||
if (s) {
|
||||
struct unix_sock *u = unix_sk(s);
|
||||
|
||||
if (atomic_long_inc_return(&u->inflight) == 1) {
|
||||
BUG_ON(!list_empty(&u->link));
|
||||
list_add_tail(&u->link, &gc_inflight_list);
|
||||
} else {
|
||||
BUG_ON(list_empty(&u->link));
|
||||
}
|
||||
unix_tot_inflight++;
|
||||
}
|
||||
user->unix_inflight++;
|
||||
spin_unlock(&unix_gc_lock);
|
||||
}
|
||||
|
||||
void unix_notinflight(struct user_struct *user, struct file *fp)
|
||||
{
|
||||
struct sock *s = unix_get_socket(fp);
|
||||
|
||||
spin_lock(&unix_gc_lock);
|
||||
|
||||
if (s) {
|
||||
struct unix_sock *u = unix_sk(s);
|
||||
|
||||
BUG_ON(!atomic_long_read(&u->inflight));
|
||||
BUG_ON(list_empty(&u->link));
|
||||
|
||||
if (atomic_long_dec_and_test(&u->inflight))
|
||||
list_del_init(&u->link);
|
||||
unix_tot_inflight--;
|
||||
}
|
||||
user->unix_inflight--;
|
||||
spin_unlock(&unix_gc_lock);
|
||||
}
|
||||
|
||||
static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
|
||||
struct sk_buff_head *hitlist)
|
||||
{
|
||||
|
149
net/unix/scm.c
Normal file
149
net/unix/scm.c
Normal file
@ -0,0 +1,149 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/module.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/socket.h>
|
||||
#include <linux/net.h>
|
||||
#include <linux/fs.h>
|
||||
#include <net/af_unix.h>
|
||||
#include <net/scm.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/sched/signal.h>
|
||||
|
||||
#include "scm.h"
|
||||
|
||||
unsigned int unix_tot_inflight;
|
||||
EXPORT_SYMBOL(unix_tot_inflight);
|
||||
|
||||
LIST_HEAD(gc_inflight_list);
|
||||
EXPORT_SYMBOL(gc_inflight_list);
|
||||
|
||||
DEFINE_SPINLOCK(unix_gc_lock);
|
||||
EXPORT_SYMBOL(unix_gc_lock);
|
||||
|
||||
struct sock *unix_get_socket(struct file *filp)
|
||||
{
|
||||
struct sock *u_sock = NULL;
|
||||
struct inode *inode = file_inode(filp);
|
||||
|
||||
/* Socket ? */
|
||||
if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) {
|
||||
struct socket *sock = SOCKET_I(inode);
|
||||
struct sock *s = sock->sk;
|
||||
|
||||
/* PF_UNIX ? */
|
||||
if (s && sock->ops && sock->ops->family == PF_UNIX)
|
||||
u_sock = s;
|
||||
}
|
||||
return u_sock;
|
||||
}
|
||||
EXPORT_SYMBOL(unix_get_socket);
|
||||
|
||||
/* Keep the number of times in flight count for the file
|
||||
* descriptor if it is for an AF_UNIX socket.
|
||||
*/
|
||||
void unix_inflight(struct user_struct *user, struct file *fp)
|
||||
{
|
||||
struct sock *s = unix_get_socket(fp);
|
||||
|
||||
spin_lock(&unix_gc_lock);
|
||||
|
||||
if (s) {
|
||||
struct unix_sock *u = unix_sk(s);
|
||||
|
||||
if (atomic_long_inc_return(&u->inflight) == 1) {
|
||||
BUG_ON(!list_empty(&u->link));
|
||||
list_add_tail(&u->link, &gc_inflight_list);
|
||||
} else {
|
||||
BUG_ON(list_empty(&u->link));
|
||||
}
|
||||
unix_tot_inflight++;
|
||||
}
|
||||
user->unix_inflight++;
|
||||
spin_unlock(&unix_gc_lock);
|
||||
}
|
||||
|
||||
void unix_notinflight(struct user_struct *user, struct file *fp)
|
||||
{
|
||||
struct sock *s = unix_get_socket(fp);
|
||||
|
||||
spin_lock(&unix_gc_lock);
|
||||
|
||||
if (s) {
|
||||
struct unix_sock *u = unix_sk(s);
|
||||
|
||||
BUG_ON(!atomic_long_read(&u->inflight));
|
||||
BUG_ON(list_empty(&u->link));
|
||||
|
||||
if (atomic_long_dec_and_test(&u->inflight))
|
||||
list_del_init(&u->link);
|
||||
unix_tot_inflight--;
|
||||
}
|
||||
user->unix_inflight--;
|
||||
spin_unlock(&unix_gc_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* The "user->unix_inflight" variable is protected by the garbage
|
||||
* collection lock, and we just read it locklessly here. If you go
|
||||
* over the limit, there might be a tiny race in actually noticing
|
||||
* it across threads. Tough.
|
||||
*/
|
||||
static inline bool too_many_unix_fds(struct task_struct *p)
|
||||
{
|
||||
struct user_struct *user = current_user();
|
||||
|
||||
if (unlikely(user->unix_inflight > task_rlimit(p, RLIMIT_NOFILE)))
|
||||
return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
|
||||
return false;
|
||||
}
|
||||
|
||||
int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (too_many_unix_fds(current))
|
||||
return -ETOOMANYREFS;
|
||||
|
||||
/*
|
||||
* Need to duplicate file references for the sake of garbage
|
||||
* collection. Otherwise a socket in the fps might become a
|
||||
* candidate for GC while the skb is not yet queued.
|
||||
*/
|
||||
UNIXCB(skb).fp = scm_fp_dup(scm->fp);
|
||||
if (!UNIXCB(skb).fp)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = scm->fp->count - 1; i >= 0; i--)
|
||||
unix_inflight(scm->fp->user, scm->fp->fp[i]);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(unix_attach_fds);
|
||||
|
||||
void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
|
||||
{
|
||||
int i;
|
||||
|
||||
scm->fp = UNIXCB(skb).fp;
|
||||
UNIXCB(skb).fp = NULL;
|
||||
|
||||
for (i = scm->fp->count-1; i >= 0; i--)
|
||||
unix_notinflight(scm->fp->user, scm->fp->fp[i]);
|
||||
}
|
||||
EXPORT_SYMBOL(unix_detach_fds);
|
||||
|
||||
void unix_destruct_scm(struct sk_buff *skb)
|
||||
{
|
||||
struct scm_cookie scm;
|
||||
|
||||
memset(&scm, 0, sizeof(scm));
|
||||
scm.pid = UNIXCB(skb).pid;
|
||||
if (UNIXCB(skb).fp)
|
||||
unix_detach_fds(&scm, skb);
|
||||
|
||||
/* Alas, it calls VFS */
|
||||
/* So fscking what? fput() had been SMP-safe since the last Summer */
|
||||
scm_destroy(&scm);
|
||||
sock_wfree(skb);
|
||||
}
|
||||
EXPORT_SYMBOL(unix_destruct_scm);
|
10
net/unix/scm.h
Normal file
10
net/unix/scm.h
Normal file
@ -0,0 +1,10 @@
|
||||
#ifndef NET_UNIX_SCM_H
|
||||
#define NET_UNIX_SCM_H
|
||||
|
||||
extern struct list_head gc_inflight_list;
|
||||
extern spinlock_t unix_gc_lock;
|
||||
|
||||
int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb);
|
||||
void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb);
|
||||
|
||||
#endif
|
@ -1026,16 +1026,14 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
|
||||
* be grouped with this beacon for updates ...
|
||||
*/
|
||||
if (!cfg80211_combine_bsses(rdev, new)) {
|
||||
kfree(new);
|
||||
bss_ref_put(rdev, new);
|
||||
goto drop;
|
||||
}
|
||||
}
|
||||
|
||||
if (rdev->bss_entries >= bss_entries_limit &&
|
||||
!cfg80211_bss_expire_oldest(rdev)) {
|
||||
if (!list_empty(&new->hidden_list))
|
||||
list_del(&new->hidden_list);
|
||||
kfree(new);
|
||||
bss_ref_put(rdev, new);
|
||||
goto drop;
|
||||
}
|
||||
|
||||
|
@ -216,8 +216,6 @@ struct map *map__new(struct machine *machine, u64 start, u64 len,
|
||||
if (type != MAP__FUNCTION)
|
||||
dso__set_loaded(dso, map->type);
|
||||
}
|
||||
|
||||
nsinfo__put(dso->nsinfo);
|
||||
dso->nsinfo = nsi;
|
||||
dso__put(dso);
|
||||
}
|
||||
|
@ -131,7 +131,7 @@ static void anon_allocate_area(void **alloc_area)
|
||||
{
|
||||
*alloc_area = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE,
|
||||
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
|
||||
if (*alloc_area == MAP_FAILED)
|
||||
if (*alloc_area == MAP_FAILED) {
|
||||
fprintf(stderr, "mmap of anonymous memory failed");
|
||||
*alloc_area = NULL;
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user