mirror of
https://github.com/rd-stuffs/msm-4.14.git
synced 2025-02-20 11:45:48 +08:00
dmaengine-3.17
1/ Step down as dmaengine maintainer see commit 08223d80df38 "dmaengine maintainer update" 2/ Removal of net_dma, as it has been marked 'broken' since 3.13 (commit 77873803363c "net_dma: mark broken"), without reports of performance regression. 3/ Miscellaneous fixes -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABAgAGBQJUKDLKAAoJEB7SkWpmfYgC7wwP/iNHqRjf1suMUTBIF3P6Hgbe VCUwh0IkuujMPDG46WRn6cYzarRxVPLoGaLHLPszgjI6pmGPVv19wqeDOlUxtcmr 0iQWEWv/zqseaAIW+4gj/WYCyMgKil49EUBJKCZCfNmIaad+e0pr8f0uE5yOkHPM tqWoZERu9A4dlXGr1TjeOZVzdnPrCt92MrLDN6ZZ6tMuJaEc5PauaLxKTeGy5fYj UB+k1xJQzECbsYfpB+uCVYl5/qPO1rNyuBYS8THCsW+JYmrbbfH2kkF2lo2FaUpO 8Yd50FtzXHKWwAt7BzfIwU2M7x0wRmryrC/xsQi6M+WmVeHYvvHUIpzaA66xRZ5x fCy3Fu8sEnmnmboAbh2v2c5uTycqRl2xPzbpLAuxglloXIxzi3ckp6ESF/Z4SldH oxIoEievN7lah3vKgvlHZYcWDzrYr8EKf/EzFe9RqDBQDKtzDzre1H9Uivr387Vm uFUcGHYG/GXuX47C7EUsMtaSW2UEoR2ytw/HR6CKFPTVXwAzEO6kA9vg0EqL0iIq 2wVLgavlZuwegmaUBgnr+bgVZMvVN7OU7fAIRVe5xNO6itrPKvheSlQthmRiiq9C uzOu4PS6PexqzHUNPCcJpCsj+lawmCSrE0bxtPzTA/CQInVgWs219V9+W5Gn/0YA EARN9k6ueX9PZPQrPQLm =BBBv -----END PGP SIGNATURE----- Merge tag 'dmaengine-3.17' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/dmaengine Pull dmaengine updates from Dan Williams: "Even though this has fixes marked for -stable, given the size and the needed conflict resolutions this is 3.18-rc1/merge-window material. These patches have been languishing in my tree for a long while. The fact that I do not have the time to do proper/prompt maintenance of this tree is a primary factor in the decision to step down as dmaengine maintainer. That and the fact that the bulk of drivers/dma/ activity is going through Vinod these days. The net_dma removal has not been in -next. It has developed simple conflicts against mainline and net-next (for-3.18). Continuing thanks to Vinod for staying on top of drivers/dma/. Summary: 1/ Step down as dmaengine maintainer see commit 08223d80df38 "dmaengine maintainer update" 2/ Removal of net_dma, as it has been marked 'broken' since 3.13 (commit 77873803363c "net_dma: mark broken"), without reports of performance regression. 3/ Miscellaneous fixes" * tag 'dmaengine-3.17' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/dmaengine: net: make tcp_cleanup_rbuf private net_dma: revert 'copied_early' net_dma: simple removal dmaengine maintainer update dmatest: prevent memory leakage on error path in thread ioat: Use time_before_jiffies() dmaengine: fix xor sources continuation dma: mv_xor: Rename __mv_xor_slot_cleanup() to mv_xor_slot_cleanup() dma: mv_xor: Remove all callers of mv_xor_slot_cleanup() dma: mv_xor: Remove unneeded mv_xor_clean_completed_slots() call ioat: Use pci_enable_msix_exact() instead of pci_enable_msix() drivers: dma: Include appropriate header file in dca.c drivers: dma: Mark functions as static in dma_v3.c dma: mv_xor: Add DMA API error checks ioat/dca: Use dev_is_pci() to check whether it is pci device
This commit is contained in:
commit
d0cd84817c
8
Documentation/ABI/removed/net_dma
Normal file
8
Documentation/ABI/removed/net_dma
Normal file
@ -0,0 +1,8 @@
|
||||
What: tcp_dma_copybreak sysctl
|
||||
Date: Removed in kernel v3.13
|
||||
Contact: Dan Williams <dan.j.williams@intel.com>
|
||||
Description:
|
||||
Formerly the lower limit, in bytes, of the size of socket reads
|
||||
that will be offloaded to a DMA copy engine. Removed due to
|
||||
coherency issues of the cpu potentially touching the buffers
|
||||
while dma is in flight.
|
@ -580,12 +580,6 @@ tcp_workaround_signed_windows - BOOLEAN
|
||||
not receive a window scaling option from them.
|
||||
Default: 0
|
||||
|
||||
tcp_dma_copybreak - INTEGER
|
||||
Lower limit, in bytes, of the size of socket reads that will be
|
||||
offloaded to a DMA copy engine, if one is present in the system
|
||||
and CONFIG_NET_DMA is enabled.
|
||||
Default: 4096
|
||||
|
||||
tcp_thin_linear_timeouts - BOOLEAN
|
||||
Enable dynamic triggering of linear timeouts for thin streams.
|
||||
If set, a check is performed upon retransmission by timeout to
|
||||
|
19
MAINTAINERS
19
MAINTAINERS
@ -1025,24 +1025,20 @@ F: arch/arm/mach-pxa/colibri-pxa270-income.c
|
||||
|
||||
ARM/INTEL IOP32X ARM ARCHITECTURE
|
||||
M: Lennert Buytenhek <kernel@wantstofly.org>
|
||||
M: Dan Williams <dan.j.williams@intel.com>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
|
||||
ARM/INTEL IOP33X ARM ARCHITECTURE
|
||||
M: Dan Williams <dan.j.williams@intel.com>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
S: Orphan
|
||||
|
||||
ARM/INTEL IOP13XX ARM ARCHITECTURE
|
||||
M: Lennert Buytenhek <kernel@wantstofly.org>
|
||||
M: Dan Williams <dan.j.williams@intel.com>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
|
||||
ARM/INTEL IQ81342EX MACHINE SUPPORT
|
||||
M: Lennert Buytenhek <kernel@wantstofly.org>
|
||||
M: Dan Williams <dan.j.williams@intel.com>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
|
||||
@ -1067,7 +1063,6 @@ F: drivers/pcmcia/pxa2xx_stargate2.c
|
||||
|
||||
ARM/INTEL XSC3 (MANZANO) ARM CORE
|
||||
M: Lennert Buytenhek <kernel@wantstofly.org>
|
||||
M: Dan Williams <dan.j.williams@intel.com>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
|
||||
@ -1562,9 +1557,9 @@ F: drivers/platform/x86/asus*.c
|
||||
F: drivers/platform/x86/eeepc*.c
|
||||
|
||||
ASYNCHRONOUS TRANSFERS/TRANSFORMS (IOAT) API
|
||||
M: Dan Williams <dan.j.williams@intel.com>
|
||||
R: Dan Williams <dan.j.williams@intel.com>
|
||||
W: http://sourceforge.net/projects/xscaleiop
|
||||
S: Maintained
|
||||
S: Odd fixes
|
||||
F: Documentation/crypto/async-tx-api.txt
|
||||
F: crypto/async_tx/
|
||||
F: drivers/dma/
|
||||
@ -2995,13 +2990,11 @@ T: git git://git.linaro.org/people/sumitsemwal/linux-dma-buf.git
|
||||
|
||||
DMA GENERIC OFFLOAD ENGINE SUBSYSTEM
|
||||
M: Vinod Koul <vinod.koul@intel.com>
|
||||
M: Dan Williams <dan.j.williams@intel.com>
|
||||
L: dmaengine@vger.kernel.org
|
||||
Q: https://patchwork.kernel.org/project/linux-dmaengine/list/
|
||||
S: Supported
|
||||
S: Maintained
|
||||
F: drivers/dma/
|
||||
F: include/linux/dma*
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx.git
|
||||
T: git git://git.infradead.org/users/vkoul/slave-dma.git (slave-dma)
|
||||
|
||||
DME1737 HARDWARE MONITOR DRIVER
|
||||
@ -4754,8 +4747,8 @@ F: arch/x86/kernel/cpu/microcode/core*
|
||||
F: arch/x86/kernel/cpu/microcode/intel*
|
||||
|
||||
INTEL I/OAT DMA DRIVER
|
||||
M: Dan Williams <dan.j.williams@intel.com>
|
||||
M: Dave Jiang <dave.jiang@intel.com>
|
||||
R: Dan Williams <dan.j.williams@intel.com>
|
||||
L: dmaengine@vger.kernel.org
|
||||
Q: https://patchwork.kernel.org/project/linux-dmaengine/list/
|
||||
S: Supported
|
||||
@ -4770,7 +4763,7 @@ F: drivers/iommu/intel-iommu.c
|
||||
F: include/linux/intel-iommu.h
|
||||
|
||||
INTEL IOP-ADMA DMA DRIVER
|
||||
M: Dan Williams <dan.j.williams@intel.com>
|
||||
R: Dan Williams <dan.j.williams@intel.com>
|
||||
S: Odd fixes
|
||||
F: drivers/dma/iop-adma.c
|
||||
|
||||
|
@ -78,8 +78,6 @@ do_async_xor(struct dma_chan *chan, struct dmaengine_unmap_data *unmap,
|
||||
tx = dma->device_prep_dma_xor(chan, dma_dest, src_list,
|
||||
xor_src_cnt, unmap->len,
|
||||
dma_flags);
|
||||
src_list[0] = tmp;
|
||||
|
||||
|
||||
if (unlikely(!tx))
|
||||
async_tx_quiesce(&submit->depend_tx);
|
||||
@ -92,6 +90,7 @@ do_async_xor(struct dma_chan *chan, struct dmaengine_unmap_data *unmap,
|
||||
xor_src_cnt, unmap->len,
|
||||
dma_flags);
|
||||
}
|
||||
src_list[0] = tmp;
|
||||
|
||||
dma_set_unmap(tx, unmap);
|
||||
async_tx_submit(chan, tx, submit);
|
||||
|
@ -427,18 +427,6 @@ config DMA_OF
|
||||
comment "DMA Clients"
|
||||
depends on DMA_ENGINE
|
||||
|
||||
config NET_DMA
|
||||
bool "Network: TCP receive copy offload"
|
||||
depends on DMA_ENGINE && NET
|
||||
default (INTEL_IOATDMA || FSL_DMA)
|
||||
depends on BROKEN
|
||||
help
|
||||
This enables the use of DMA engines in the network stack to
|
||||
offload receive copy-to-user operations, freeing CPU cycles.
|
||||
|
||||
Say Y here if you enabled INTEL_IOATDMA or FSL_DMA, otherwise
|
||||
say N.
|
||||
|
||||
config ASYNC_TX_DMA
|
||||
bool "Async_tx: Offload support for the async_tx api"
|
||||
depends on DMA_ENGINE
|
||||
|
@ -6,7 +6,6 @@ obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o
|
||||
obj-$(CONFIG_DMA_ACPI) += acpi-dma.o
|
||||
obj-$(CONFIG_DMA_OF) += of-dma.o
|
||||
|
||||
obj-$(CONFIG_NET_DMA) += iovlock.o
|
||||
obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o
|
||||
obj-$(CONFIG_DMATEST) += dmatest.o
|
||||
obj-$(CONFIG_INTEL_IOATDMA) += ioat/
|
||||
|
@ -1081,110 +1081,6 @@ dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
|
||||
}
|
||||
EXPORT_SYMBOL(dmaengine_get_unmap_data);
|
||||
|
||||
/**
|
||||
* dma_async_memcpy_pg_to_pg - offloaded copy from page to page
|
||||
* @chan: DMA channel to offload copy to
|
||||
* @dest_pg: destination page
|
||||
* @dest_off: offset in page to copy to
|
||||
* @src_pg: source page
|
||||
* @src_off: offset in page to copy from
|
||||
* @len: length
|
||||
*
|
||||
* Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
|
||||
* address according to the DMA mapping API rules for streaming mappings.
|
||||
* Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
|
||||
* (kernel memory or locked user space pages).
|
||||
*/
|
||||
dma_cookie_t
|
||||
dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
|
||||
unsigned int dest_off, struct page *src_pg, unsigned int src_off,
|
||||
size_t len)
|
||||
{
|
||||
struct dma_device *dev = chan->device;
|
||||
struct dma_async_tx_descriptor *tx;
|
||||
struct dmaengine_unmap_data *unmap;
|
||||
dma_cookie_t cookie;
|
||||
unsigned long flags;
|
||||
|
||||
unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOWAIT);
|
||||
if (!unmap)
|
||||
return -ENOMEM;
|
||||
|
||||
unmap->to_cnt = 1;
|
||||
unmap->from_cnt = 1;
|
||||
unmap->addr[0] = dma_map_page(dev->dev, src_pg, src_off, len,
|
||||
DMA_TO_DEVICE);
|
||||
unmap->addr[1] = dma_map_page(dev->dev, dest_pg, dest_off, len,
|
||||
DMA_FROM_DEVICE);
|
||||
unmap->len = len;
|
||||
flags = DMA_CTRL_ACK;
|
||||
tx = dev->device_prep_dma_memcpy(chan, unmap->addr[1], unmap->addr[0],
|
||||
len, flags);
|
||||
|
||||
if (!tx) {
|
||||
dmaengine_unmap_put(unmap);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
dma_set_unmap(tx, unmap);
|
||||
cookie = tx->tx_submit(tx);
|
||||
dmaengine_unmap_put(unmap);
|
||||
|
||||
preempt_disable();
|
||||
__this_cpu_add(chan->local->bytes_transferred, len);
|
||||
__this_cpu_inc(chan->local->memcpy_count);
|
||||
preempt_enable();
|
||||
|
||||
return cookie;
|
||||
}
|
||||
EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
|
||||
|
||||
/**
|
||||
* dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
|
||||
* @chan: DMA channel to offload copy to
|
||||
* @dest: destination address (virtual)
|
||||
* @src: source address (virtual)
|
||||
* @len: length
|
||||
*
|
||||
* Both @dest and @src must be mappable to a bus address according to the
|
||||
* DMA mapping API rules for streaming mappings.
|
||||
* Both @dest and @src must stay memory resident (kernel memory or locked
|
||||
* user space pages).
|
||||
*/
|
||||
dma_cookie_t
|
||||
dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
|
||||
void *src, size_t len)
|
||||
{
|
||||
return dma_async_memcpy_pg_to_pg(chan, virt_to_page(dest),
|
||||
(unsigned long) dest & ~PAGE_MASK,
|
||||
virt_to_page(src),
|
||||
(unsigned long) src & ~PAGE_MASK, len);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
|
||||
|
||||
/**
|
||||
* dma_async_memcpy_buf_to_pg - offloaded copy from address to page
|
||||
* @chan: DMA channel to offload copy to
|
||||
* @page: destination page
|
||||
* @offset: offset in page to copy to
|
||||
* @kdata: source address (virtual)
|
||||
* @len: length
|
||||
*
|
||||
* Both @page/@offset and @kdata must be mappable to a bus address according
|
||||
* to the DMA mapping API rules for streaming mappings.
|
||||
* Both @page/@offset and @kdata must stay memory resident (kernel memory or
|
||||
* locked user space pages)
|
||||
*/
|
||||
dma_cookie_t
|
||||
dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
|
||||
unsigned int offset, void *kdata, size_t len)
|
||||
{
|
||||
return dma_async_memcpy_pg_to_pg(chan, page, offset,
|
||||
virt_to_page(kdata),
|
||||
(unsigned long) kdata & ~PAGE_MASK, len);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
|
||||
|
||||
void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
|
||||
struct dma_chan *chan)
|
||||
{
|
||||
|
@ -688,14 +688,14 @@ static int dmatest_func(void *data)
|
||||
runtime = ktime_us_delta(ktime_get(), ktime);
|
||||
|
||||
ret = 0;
|
||||
err_dstbuf:
|
||||
for (i = 0; thread->dsts[i]; i++)
|
||||
kfree(thread->dsts[i]);
|
||||
err_dstbuf:
|
||||
kfree(thread->dsts);
|
||||
err_dsts:
|
||||
err_srcbuf:
|
||||
for (i = 0; thread->srcs[i]; i++)
|
||||
kfree(thread->srcs[i]);
|
||||
err_srcbuf:
|
||||
kfree(thread->srcs);
|
||||
err_srcs:
|
||||
kfree(pq_coefs);
|
||||
|
@ -35,6 +35,7 @@
|
||||
|
||||
#include "dma.h"
|
||||
#include "registers.h"
|
||||
#include "dma_v2.h"
|
||||
|
||||
/*
|
||||
* Bit 7 of a tag map entry is the "valid" bit, if it is set then bits 0:6
|
||||
@ -147,7 +148,7 @@ static int ioat_dca_add_requester(struct dca_provider *dca, struct device *dev)
|
||||
u16 id;
|
||||
|
||||
/* This implementation only supports PCI-Express */
|
||||
if (dev->bus != &pci_bus_type)
|
||||
if (!dev_is_pci(dev))
|
||||
return -ENODEV;
|
||||
pdev = to_pci_dev(dev);
|
||||
id = dcaid_from_pcidev(pdev);
|
||||
@ -179,7 +180,7 @@ static int ioat_dca_remove_requester(struct dca_provider *dca,
|
||||
int i;
|
||||
|
||||
/* This implementation only supports PCI-Express */
|
||||
if (dev->bus != &pci_bus_type)
|
||||
if (!dev_is_pci(dev))
|
||||
return -ENODEV;
|
||||
pdev = to_pci_dev(dev);
|
||||
|
||||
@ -320,7 +321,7 @@ static int ioat2_dca_add_requester(struct dca_provider *dca, struct device *dev)
|
||||
u16 global_req_table;
|
||||
|
||||
/* This implementation only supports PCI-Express */
|
||||
if (dev->bus != &pci_bus_type)
|
||||
if (!dev_is_pci(dev))
|
||||
return -ENODEV;
|
||||
pdev = to_pci_dev(dev);
|
||||
id = dcaid_from_pcidev(pdev);
|
||||
@ -354,7 +355,7 @@ static int ioat2_dca_remove_requester(struct dca_provider *dca,
|
||||
u16 global_req_table;
|
||||
|
||||
/* This implementation only supports PCI-Express */
|
||||
if (dev->bus != &pci_bus_type)
|
||||
if (!dev_is_pci(dev))
|
||||
return -ENODEV;
|
||||
pdev = to_pci_dev(dev);
|
||||
|
||||
@ -496,7 +497,7 @@ static int ioat3_dca_add_requester(struct dca_provider *dca, struct device *dev)
|
||||
u16 global_req_table;
|
||||
|
||||
/* This implementation only supports PCI-Express */
|
||||
if (dev->bus != &pci_bus_type)
|
||||
if (!dev_is_pci(dev))
|
||||
return -ENODEV;
|
||||
pdev = to_pci_dev(dev);
|
||||
id = dcaid_from_pcidev(pdev);
|
||||
@ -530,7 +531,7 @@ static int ioat3_dca_remove_requester(struct dca_provider *dca,
|
||||
u16 global_req_table;
|
||||
|
||||
/* This implementation only supports PCI-Express */
|
||||
if (dev->bus != &pci_bus_type)
|
||||
if (!dev_is_pci(dev))
|
||||
return -ENODEV;
|
||||
pdev = to_pci_dev(dev);
|
||||
|
||||
|
@ -947,7 +947,7 @@ msix:
|
||||
for (i = 0; i < msixcnt; i++)
|
||||
device->msix_entries[i].entry = i;
|
||||
|
||||
err = pci_enable_msix(pdev, device->msix_entries, msixcnt);
|
||||
err = pci_enable_msix_exact(pdev, device->msix_entries, msixcnt);
|
||||
if (err)
|
||||
goto msi;
|
||||
|
||||
@ -1222,7 +1222,6 @@ int ioat1_dma_probe(struct ioatdma_device *device, int dca)
|
||||
err = ioat_probe(device);
|
||||
if (err)
|
||||
return err;
|
||||
ioat_set_tcp_copy_break(4096);
|
||||
err = ioat_register(device);
|
||||
if (err)
|
||||
return err;
|
||||
|
@ -214,13 +214,6 @@ __dump_desc_dbg(struct ioat_chan_common *chan, struct ioat_dma_descriptor *hw,
|
||||
#define dump_desc_dbg(c, d) \
|
||||
({ if (d) __dump_desc_dbg(&c->base, d->hw, &d->txd, desc_id(d)); 0; })
|
||||
|
||||
static inline void ioat_set_tcp_copy_break(unsigned long copybreak)
|
||||
{
|
||||
#ifdef CONFIG_NET_DMA
|
||||
sysctl_tcp_dma_copybreak = copybreak;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline struct ioat_chan_common *
|
||||
ioat_chan_by_index(struct ioatdma_device *device, int index)
|
||||
{
|
||||
|
@ -735,7 +735,8 @@ int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs)
|
||||
* called under bh_disabled so we need to trigger the timer
|
||||
* event directly
|
||||
*/
|
||||
if (jiffies > chan->timer.expires && timer_pending(&chan->timer)) {
|
||||
if (time_is_before_jiffies(chan->timer.expires)
|
||||
&& timer_pending(&chan->timer)) {
|
||||
struct ioatdma_device *device = chan->device;
|
||||
|
||||
mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
|
||||
@ -899,7 +900,6 @@ int ioat2_dma_probe(struct ioatdma_device *device, int dca)
|
||||
err = ioat_probe(device);
|
||||
if (err)
|
||||
return err;
|
||||
ioat_set_tcp_copy_break(2048);
|
||||
|
||||
list_for_each_entry(c, &dma->channels, device_node) {
|
||||
chan = to_chan_common(c);
|
||||
|
@ -740,7 +740,7 @@ ioat3_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
|
||||
return __ioat3_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags);
|
||||
}
|
||||
|
||||
struct dma_async_tx_descriptor *
|
||||
static struct dma_async_tx_descriptor *
|
||||
ioat3_prep_xor_val(struct dma_chan *chan, dma_addr_t *src,
|
||||
unsigned int src_cnt, size_t len,
|
||||
enum sum_check_flags *result, unsigned long flags)
|
||||
@ -1091,7 +1091,7 @@ ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
|
||||
}
|
||||
}
|
||||
|
||||
struct dma_async_tx_descriptor *
|
||||
static struct dma_async_tx_descriptor *
|
||||
ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
|
||||
unsigned int src_cnt, const unsigned char *scf, size_t len,
|
||||
enum sum_check_flags *pqres, unsigned long flags)
|
||||
@ -1133,7 +1133,7 @@ ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
|
||||
flags);
|
||||
}
|
||||
|
||||
struct dma_async_tx_descriptor *
|
||||
static struct dma_async_tx_descriptor *
|
||||
ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
|
||||
unsigned int src_cnt, size_t len,
|
||||
enum sum_check_flags *result, unsigned long flags)
|
||||
@ -1655,7 +1655,6 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
|
||||
err = ioat_probe(device);
|
||||
if (err)
|
||||
return err;
|
||||
ioat_set_tcp_copy_break(262144);
|
||||
|
||||
list_for_each_entry(c, &dma->channels, device_node) {
|
||||
chan = to_chan_common(c);
|
||||
|
@ -1,280 +0,0 @@
|
||||
/*
|
||||
* Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
|
||||
* Portions based on net/core/datagram.c and copyrighted by their authors.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation; either version 2 of the License, or (at your option)
|
||||
* any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program; if not, write to the Free Software Foundation, Inc., 59
|
||||
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
*
|
||||
* The full GNU General Public License is included in this distribution in the
|
||||
* file called COPYING.
|
||||
*/
|
||||
|
||||
/*
|
||||
* This code allows the net stack to make use of a DMA engine for
|
||||
* skb to iovec copies.
|
||||
*/
|
||||
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/slab.h>
|
||||
#include <net/tcp.h> /* for memcpy_toiovec */
|
||||
#include <asm/io.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
static int num_pages_spanned(struct iovec *iov)
|
||||
{
|
||||
return
|
||||
((PAGE_ALIGN((unsigned long)iov->iov_base + iov->iov_len) -
|
||||
((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
/*
|
||||
* Pin down all the iovec pages needed for len bytes.
|
||||
* Return a struct dma_pinned_list to keep track of pages pinned down.
|
||||
*
|
||||
* We are allocating a single chunk of memory, and then carving it up into
|
||||
* 3 sections, the latter 2 whose size depends on the number of iovecs and the
|
||||
* total number of pages, respectively.
|
||||
*/
|
||||
struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len)
|
||||
{
|
||||
struct dma_pinned_list *local_list;
|
||||
struct page **pages;
|
||||
int i;
|
||||
int ret;
|
||||
int nr_iovecs = 0;
|
||||
int iovec_len_used = 0;
|
||||
int iovec_pages_used = 0;
|
||||
|
||||
/* don't pin down non-user-based iovecs */
|
||||
if (segment_eq(get_fs(), KERNEL_DS))
|
||||
return NULL;
|
||||
|
||||
/* determine how many iovecs/pages there are, up front */
|
||||
do {
|
||||
iovec_len_used += iov[nr_iovecs].iov_len;
|
||||
iovec_pages_used += num_pages_spanned(&iov[nr_iovecs]);
|
||||
nr_iovecs++;
|
||||
} while (iovec_len_used < len);
|
||||
|
||||
/* single kmalloc for pinned list, page_list[], and the page arrays */
|
||||
local_list = kmalloc(sizeof(*local_list)
|
||||
+ (nr_iovecs * sizeof (struct dma_page_list))
|
||||
+ (iovec_pages_used * sizeof (struct page*)), GFP_KERNEL);
|
||||
if (!local_list)
|
||||
goto out;
|
||||
|
||||
/* list of pages starts right after the page list array */
|
||||
pages = (struct page **) &local_list->page_list[nr_iovecs];
|
||||
|
||||
local_list->nr_iovecs = 0;
|
||||
|
||||
for (i = 0; i < nr_iovecs; i++) {
|
||||
struct dma_page_list *page_list = &local_list->page_list[i];
|
||||
|
||||
len -= iov[i].iov_len;
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, iov[i].iov_base, iov[i].iov_len))
|
||||
goto unpin;
|
||||
|
||||
page_list->nr_pages = num_pages_spanned(&iov[i]);
|
||||
page_list->base_address = iov[i].iov_base;
|
||||
|
||||
page_list->pages = pages;
|
||||
pages += page_list->nr_pages;
|
||||
|
||||
/* pin pages down */
|
||||
down_read(¤t->mm->mmap_sem);
|
||||
ret = get_user_pages(
|
||||
current,
|
||||
current->mm,
|
||||
(unsigned long) iov[i].iov_base,
|
||||
page_list->nr_pages,
|
||||
1, /* write */
|
||||
0, /* force */
|
||||
page_list->pages,
|
||||
NULL);
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
|
||||
if (ret != page_list->nr_pages)
|
||||
goto unpin;
|
||||
|
||||
local_list->nr_iovecs = i + 1;
|
||||
}
|
||||
|
||||
return local_list;
|
||||
|
||||
unpin:
|
||||
dma_unpin_iovec_pages(local_list);
|
||||
out:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void dma_unpin_iovec_pages(struct dma_pinned_list *pinned_list)
|
||||
{
|
||||
int i, j;
|
||||
|
||||
if (!pinned_list)
|
||||
return;
|
||||
|
||||
for (i = 0; i < pinned_list->nr_iovecs; i++) {
|
||||
struct dma_page_list *page_list = &pinned_list->page_list[i];
|
||||
for (j = 0; j < page_list->nr_pages; j++) {
|
||||
set_page_dirty_lock(page_list->pages[j]);
|
||||
page_cache_release(page_list->pages[j]);
|
||||
}
|
||||
}
|
||||
|
||||
kfree(pinned_list);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* We have already pinned down the pages we will be using in the iovecs.
|
||||
* Each entry in iov array has corresponding entry in pinned_list->page_list.
|
||||
* Using array indexing to keep iov[] and page_list[] in sync.
|
||||
* Initial elements in iov array's iov->iov_len will be 0 if already copied into
|
||||
* by another call.
|
||||
* iov array length remaining guaranteed to be bigger than len.
|
||||
*/
|
||||
dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
|
||||
struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len)
|
||||
{
|
||||
int iov_byte_offset;
|
||||
int copy;
|
||||
dma_cookie_t dma_cookie = 0;
|
||||
int iovec_idx;
|
||||
int page_idx;
|
||||
|
||||
if (!chan)
|
||||
return memcpy_toiovec(iov, kdata, len);
|
||||
|
||||
iovec_idx = 0;
|
||||
while (iovec_idx < pinned_list->nr_iovecs) {
|
||||
struct dma_page_list *page_list;
|
||||
|
||||
/* skip already used-up iovecs */
|
||||
while (!iov[iovec_idx].iov_len)
|
||||
iovec_idx++;
|
||||
|
||||
page_list = &pinned_list->page_list[iovec_idx];
|
||||
|
||||
iov_byte_offset = ((unsigned long)iov[iovec_idx].iov_base & ~PAGE_MASK);
|
||||
page_idx = (((unsigned long)iov[iovec_idx].iov_base & PAGE_MASK)
|
||||
- ((unsigned long)page_list->base_address & PAGE_MASK)) >> PAGE_SHIFT;
|
||||
|
||||
/* break up copies to not cross page boundary */
|
||||
while (iov[iovec_idx].iov_len) {
|
||||
copy = min_t(int, PAGE_SIZE - iov_byte_offset, len);
|
||||
copy = min_t(int, copy, iov[iovec_idx].iov_len);
|
||||
|
||||
dma_cookie = dma_async_memcpy_buf_to_pg(chan,
|
||||
page_list->pages[page_idx],
|
||||
iov_byte_offset,
|
||||
kdata,
|
||||
copy);
|
||||
/* poll for a descriptor slot */
|
||||
if (unlikely(dma_cookie < 0)) {
|
||||
dma_async_issue_pending(chan);
|
||||
continue;
|
||||
}
|
||||
|
||||
len -= copy;
|
||||
iov[iovec_idx].iov_len -= copy;
|
||||
iov[iovec_idx].iov_base += copy;
|
||||
|
||||
if (!len)
|
||||
return dma_cookie;
|
||||
|
||||
kdata += copy;
|
||||
iov_byte_offset = 0;
|
||||
page_idx++;
|
||||
}
|
||||
iovec_idx++;
|
||||
}
|
||||
|
||||
/* really bad if we ever run out of iovecs */
|
||||
BUG();
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
|
||||
struct dma_pinned_list *pinned_list, struct page *page,
|
||||
unsigned int offset, size_t len)
|
||||
{
|
||||
int iov_byte_offset;
|
||||
int copy;
|
||||
dma_cookie_t dma_cookie = 0;
|
||||
int iovec_idx;
|
||||
int page_idx;
|
||||
int err;
|
||||
|
||||
/* this needs as-yet-unimplemented buf-to-buff, so punt. */
|
||||
/* TODO: use dma for this */
|
||||
if (!chan || !pinned_list) {
|
||||
u8 *vaddr = kmap(page);
|
||||
err = memcpy_toiovec(iov, vaddr + offset, len);
|
||||
kunmap(page);
|
||||
return err;
|
||||
}
|
||||
|
||||
iovec_idx = 0;
|
||||
while (iovec_idx < pinned_list->nr_iovecs) {
|
||||
struct dma_page_list *page_list;
|
||||
|
||||
/* skip already used-up iovecs */
|
||||
while (!iov[iovec_idx].iov_len)
|
||||
iovec_idx++;
|
||||
|
||||
page_list = &pinned_list->page_list[iovec_idx];
|
||||
|
||||
iov_byte_offset = ((unsigned long)iov[iovec_idx].iov_base & ~PAGE_MASK);
|
||||
page_idx = (((unsigned long)iov[iovec_idx].iov_base & PAGE_MASK)
|
||||
- ((unsigned long)page_list->base_address & PAGE_MASK)) >> PAGE_SHIFT;
|
||||
|
||||
/* break up copies to not cross page boundary */
|
||||
while (iov[iovec_idx].iov_len) {
|
||||
copy = min_t(int, PAGE_SIZE - iov_byte_offset, len);
|
||||
copy = min_t(int, copy, iov[iovec_idx].iov_len);
|
||||
|
||||
dma_cookie = dma_async_memcpy_pg_to_pg(chan,
|
||||
page_list->pages[page_idx],
|
||||
iov_byte_offset,
|
||||
page,
|
||||
offset,
|
||||
copy);
|
||||
/* poll for a descriptor slot */
|
||||
if (unlikely(dma_cookie < 0)) {
|
||||
dma_async_issue_pending(chan);
|
||||
continue;
|
||||
}
|
||||
|
||||
len -= copy;
|
||||
iov[iovec_idx].iov_len -= copy;
|
||||
iov[iovec_idx].iov_base += copy;
|
||||
|
||||
if (!len)
|
||||
return dma_cookie;
|
||||
|
||||
offset += copy;
|
||||
iov_byte_offset = 0;
|
||||
page_idx++;
|
||||
}
|
||||
iovec_idx++;
|
||||
}
|
||||
|
||||
/* really bad if we ever run out of iovecs */
|
||||
BUG();
|
||||
return -EFAULT;
|
||||
}
|
@ -310,7 +310,8 @@ mv_xor_clean_slot(struct mv_xor_desc_slot *desc,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
|
||||
/* This function must be called with the mv_xor_chan spinlock held */
|
||||
static void mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
|
||||
{
|
||||
struct mv_xor_desc_slot *iter, *_iter;
|
||||
dma_cookie_t cookie = 0;
|
||||
@ -366,18 +367,13 @@ static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
|
||||
mv_chan->dmachan.completed_cookie = cookie;
|
||||
}
|
||||
|
||||
static void
|
||||
mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
|
||||
{
|
||||
spin_lock_bh(&mv_chan->lock);
|
||||
__mv_xor_slot_cleanup(mv_chan);
|
||||
spin_unlock_bh(&mv_chan->lock);
|
||||
}
|
||||
|
||||
static void mv_xor_tasklet(unsigned long data)
|
||||
{
|
||||
struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
|
||||
|
||||
spin_lock_bh(&chan->lock);
|
||||
mv_xor_slot_cleanup(chan);
|
||||
spin_unlock_bh(&chan->lock);
|
||||
}
|
||||
|
||||
static struct mv_xor_desc_slot *
|
||||
@ -656,9 +652,10 @@ static void mv_xor_free_chan_resources(struct dma_chan *chan)
|
||||
struct mv_xor_desc_slot *iter, *_iter;
|
||||
int in_use_descs = 0;
|
||||
|
||||
spin_lock_bh(&mv_chan->lock);
|
||||
|
||||
mv_xor_slot_cleanup(mv_chan);
|
||||
|
||||
spin_lock_bh(&mv_chan->lock);
|
||||
list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
|
||||
chain_node) {
|
||||
in_use_descs++;
|
||||
@ -700,11 +697,12 @@ static enum dma_status mv_xor_status(struct dma_chan *chan,
|
||||
enum dma_status ret;
|
||||
|
||||
ret = dma_cookie_status(chan, cookie, txstate);
|
||||
if (ret == DMA_COMPLETE) {
|
||||
mv_xor_clean_completed_slots(mv_chan);
|
||||
if (ret == DMA_COMPLETE)
|
||||
return ret;
|
||||
}
|
||||
|
||||
spin_lock_bh(&mv_chan->lock);
|
||||
mv_xor_slot_cleanup(mv_chan);
|
||||
spin_unlock_bh(&mv_chan->lock);
|
||||
|
||||
return dma_cookie_status(chan, cookie, txstate);
|
||||
}
|
||||
@ -782,7 +780,7 @@ static void mv_xor_issue_pending(struct dma_chan *chan)
|
||||
|
||||
static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
|
||||
{
|
||||
int i;
|
||||
int i, ret;
|
||||
void *src, *dest;
|
||||
dma_addr_t src_dma, dest_dma;
|
||||
struct dma_chan *dma_chan;
|
||||
@ -819,19 +817,44 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
|
||||
|
||||
src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0,
|
||||
PAGE_SIZE, DMA_TO_DEVICE);
|
||||
unmap->to_cnt = 1;
|
||||
unmap->addr[0] = src_dma;
|
||||
|
||||
ret = dma_mapping_error(dma_chan->device->dev, src_dma);
|
||||
if (ret) {
|
||||
err = -ENOMEM;
|
||||
goto free_resources;
|
||||
}
|
||||
unmap->to_cnt = 1;
|
||||
|
||||
dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0,
|
||||
PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
unmap->from_cnt = 1;
|
||||
unmap->addr[1] = dest_dma;
|
||||
|
||||
ret = dma_mapping_error(dma_chan->device->dev, dest_dma);
|
||||
if (ret) {
|
||||
err = -ENOMEM;
|
||||
goto free_resources;
|
||||
}
|
||||
unmap->from_cnt = 1;
|
||||
unmap->len = PAGE_SIZE;
|
||||
|
||||
tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
|
||||
PAGE_SIZE, 0);
|
||||
if (!tx) {
|
||||
dev_err(dma_chan->device->dev,
|
||||
"Self-test cannot prepare operation, disabling\n");
|
||||
err = -ENODEV;
|
||||
goto free_resources;
|
||||
}
|
||||
|
||||
cookie = mv_xor_tx_submit(tx);
|
||||
if (dma_submit_error(cookie)) {
|
||||
dev_err(dma_chan->device->dev,
|
||||
"Self-test submit error, disabling\n");
|
||||
err = -ENODEV;
|
||||
goto free_resources;
|
||||
}
|
||||
|
||||
mv_xor_issue_pending(dma_chan);
|
||||
async_tx_ack(tx);
|
||||
msleep(1);
|
||||
@ -866,7 +889,7 @@ out:
|
||||
static int
|
||||
mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
|
||||
{
|
||||
int i, src_idx;
|
||||
int i, src_idx, ret;
|
||||
struct page *dest;
|
||||
struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
|
||||
dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
|
||||
@ -929,19 +952,42 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
|
||||
unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
|
||||
0, PAGE_SIZE, DMA_TO_DEVICE);
|
||||
dma_srcs[i] = unmap->addr[i];
|
||||
ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[i]);
|
||||
if (ret) {
|
||||
err = -ENOMEM;
|
||||
goto free_resources;
|
||||
}
|
||||
unmap->to_cnt++;
|
||||
}
|
||||
|
||||
unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
|
||||
DMA_FROM_DEVICE);
|
||||
dest_dma = unmap->addr[src_count];
|
||||
ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[src_count]);
|
||||
if (ret) {
|
||||
err = -ENOMEM;
|
||||
goto free_resources;
|
||||
}
|
||||
unmap->from_cnt = 1;
|
||||
unmap->len = PAGE_SIZE;
|
||||
|
||||
tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
|
||||
src_count, PAGE_SIZE, 0);
|
||||
if (!tx) {
|
||||
dev_err(dma_chan->device->dev,
|
||||
"Self-test cannot prepare operation, disabling\n");
|
||||
err = -ENODEV;
|
||||
goto free_resources;
|
||||
}
|
||||
|
||||
cookie = mv_xor_tx_submit(tx);
|
||||
if (dma_submit_error(cookie)) {
|
||||
dev_err(dma_chan->device->dev,
|
||||
"Self-test submit error, disabling\n");
|
||||
err = -ENODEV;
|
||||
goto free_resources;
|
||||
}
|
||||
|
||||
mv_xor_issue_pending(dma_chan);
|
||||
async_tx_ack(tx);
|
||||
msleep(8);
|
||||
|
@ -900,18 +900,6 @@ static inline void dmaengine_put(void)
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_NET_DMA
|
||||
#define net_dmaengine_get() dmaengine_get()
|
||||
#define net_dmaengine_put() dmaengine_put()
|
||||
#else
|
||||
static inline void net_dmaengine_get(void)
|
||||
{
|
||||
}
|
||||
static inline void net_dmaengine_put(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ASYNC_TX_DMA
|
||||
#define async_dmaengine_get() dmaengine_get()
|
||||
#define async_dmaengine_put() dmaengine_put()
|
||||
@ -933,16 +921,8 @@ async_dma_find_channel(enum dma_transaction_type type)
|
||||
return NULL;
|
||||
}
|
||||
#endif /* CONFIG_ASYNC_TX_DMA */
|
||||
|
||||
dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
|
||||
void *dest, void *src, size_t len);
|
||||
dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
|
||||
struct page *page, unsigned int offset, void *kdata, size_t len);
|
||||
dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan,
|
||||
struct page *dest_pg, unsigned int dest_off, struct page *src_pg,
|
||||
unsigned int src_off, size_t len);
|
||||
void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
|
||||
struct dma_chan *chan);
|
||||
struct dma_chan *chan);
|
||||
|
||||
static inline void async_tx_ack(struct dma_async_tx_descriptor *tx)
|
||||
{
|
||||
|
@ -28,7 +28,6 @@
|
||||
#include <linux/textsearch.h>
|
||||
#include <net/checksum.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/hrtimer.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/netdev_features.h>
|
||||
@ -581,11 +580,8 @@ struct sk_buff {
|
||||
/* 2/4 bit hole (depending on ndisc_nodetype presence) */
|
||||
kmemcheck_bitfield_end(flags2);
|
||||
|
||||
#if defined CONFIG_NET_DMA || defined CONFIG_NET_RX_BUSY_POLL
|
||||
union {
|
||||
unsigned int napi_id;
|
||||
dma_cookie_t dma_cookie;
|
||||
};
|
||||
#ifdef CONFIG_NET_RX_BUSY_POLL
|
||||
unsigned int napi_id;
|
||||
#endif
|
||||
#ifdef CONFIG_NETWORK_SECMARK
|
||||
__u32 secmark;
|
||||
|
@ -19,7 +19,6 @@
|
||||
|
||||
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/dmaengine.h>
|
||||
#include <net/sock.h>
|
||||
#include <net/inet_connection_sock.h>
|
||||
#include <net/inet_timewait_sock.h>
|
||||
@ -166,13 +165,6 @@ struct tcp_sock {
|
||||
struct iovec *iov;
|
||||
int memory;
|
||||
int len;
|
||||
#ifdef CONFIG_NET_DMA
|
||||
/* members for async copy */
|
||||
struct dma_chan *dma_chan;
|
||||
int wakeup;
|
||||
struct dma_pinned_list *pinned_list;
|
||||
dma_cookie_t dma_cookie;
|
||||
#endif
|
||||
} ucopy;
|
||||
|
||||
u32 snd_wl1; /* Sequence for window update */
|
||||
|
@ -1,32 +0,0 @@
|
||||
/*
|
||||
* Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation; either version 2 of the License, or (at your option)
|
||||
* any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program; if not, write to the Free Software Foundation, Inc., 59
|
||||
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
*
|
||||
* The full GNU General Public License is included in this distribution in the
|
||||
* file called COPYING.
|
||||
*/
|
||||
#ifndef NETDMA_H
|
||||
#define NETDMA_H
|
||||
#ifdef CONFIG_NET_DMA
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/skbuff.h>
|
||||
|
||||
int dma_skb_copy_datagram_iovec(struct dma_chan* chan,
|
||||
struct sk_buff *skb, int offset, struct iovec *to,
|
||||
size_t len, struct dma_pinned_list *pinned_list);
|
||||
|
||||
#endif /* CONFIG_NET_DMA */
|
||||
#endif /* NETDMA_H */
|
@ -233,7 +233,6 @@ struct cg_proto;
|
||||
* @sk_receive_queue: incoming packets
|
||||
* @sk_wmem_alloc: transmit queue bytes committed
|
||||
* @sk_write_queue: Packet sending queue
|
||||
* @sk_async_wait_queue: DMA copied packets
|
||||
* @sk_omem_alloc: "o" is "option" or "other"
|
||||
* @sk_wmem_queued: persistent queue size
|
||||
* @sk_forward_alloc: space allocated forward
|
||||
@ -362,10 +361,6 @@ struct sock {
|
||||
struct sk_filter __rcu *sk_filter;
|
||||
struct socket_wq __rcu *sk_wq;
|
||||
|
||||
#ifdef CONFIG_NET_DMA
|
||||
struct sk_buff_head sk_async_wait_queue;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_XFRM
|
||||
struct xfrm_policy *sk_policy[2];
|
||||
#endif
|
||||
@ -2206,27 +2201,15 @@ void sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags);
|
||||
* sk_eat_skb - Release a skb if it is no longer needed
|
||||
* @sk: socket to eat this skb from
|
||||
* @skb: socket buffer to eat
|
||||
* @copied_early: flag indicating whether DMA operations copied this data early
|
||||
*
|
||||
* This routine must be called with interrupts disabled or with the socket
|
||||
* locked so that the sk_buff queue operation is ok.
|
||||
*/
|
||||
#ifdef CONFIG_NET_DMA
|
||||
static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, bool copied_early)
|
||||
{
|
||||
__skb_unlink(skb, &sk->sk_receive_queue);
|
||||
if (!copied_early)
|
||||
__kfree_skb(skb);
|
||||
else
|
||||
__skb_queue_tail(&sk->sk_async_wait_queue, skb);
|
||||
}
|
||||
#else
|
||||
static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, bool copied_early)
|
||||
static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
__skb_unlink(skb, &sk->sk_receive_queue);
|
||||
__kfree_skb(skb);
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline
|
||||
struct net *sock_net(const struct sock *sk)
|
||||
|
@ -27,7 +27,6 @@
|
||||
#include <linux/cache.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/cryptohash.h>
|
||||
#include <linux/kref.h>
|
||||
@ -262,7 +261,6 @@ extern int sysctl_tcp_adv_win_scale;
|
||||
extern int sysctl_tcp_tw_reuse;
|
||||
extern int sysctl_tcp_frto;
|
||||
extern int sysctl_tcp_low_latency;
|
||||
extern int sysctl_tcp_dma_copybreak;
|
||||
extern int sysctl_tcp_nometrics_save;
|
||||
extern int sysctl_tcp_moderate_rcvbuf;
|
||||
extern int sysctl_tcp_tso_win_divisor;
|
||||
@ -368,7 +366,6 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
|
||||
void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
|
||||
const struct tcphdr *th, unsigned int len);
|
||||
void tcp_rcv_space_adjust(struct sock *sk);
|
||||
void tcp_cleanup_rbuf(struct sock *sk, int copied);
|
||||
int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
|
||||
void tcp_twsk_destructor(struct sock *sk);
|
||||
ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
|
||||
@ -1031,12 +1028,6 @@ static inline void tcp_prequeue_init(struct tcp_sock *tp)
|
||||
tp->ucopy.len = 0;
|
||||
tp->ucopy.memory = 0;
|
||||
skb_queue_head_init(&tp->ucopy.prequeue);
|
||||
#ifdef CONFIG_NET_DMA
|
||||
tp->ucopy.dma_chan = NULL;
|
||||
tp->ucopy.wakeup = 0;
|
||||
tp->ucopy.pinned_list = NULL;
|
||||
tp->ucopy.dma_cookie = 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
bool tcp_prequeue(struct sock *sk, struct sk_buff *skb);
|
||||
|
@ -390,7 +390,6 @@ static const struct bin_table bin_net_ipv4_table[] = {
|
||||
{ CTL_INT, NET_TCP_MTU_PROBING, "tcp_mtu_probing" },
|
||||
{ CTL_INT, NET_TCP_BASE_MSS, "tcp_base_mss" },
|
||||
{ CTL_INT, NET_IPV4_TCP_WORKAROUND_SIGNED_WINDOWS, "tcp_workaround_signed_windows" },
|
||||
{ CTL_INT, NET_TCP_DMA_COPYBREAK, "tcp_dma_copybreak" },
|
||||
{ CTL_INT, NET_TCP_SLOW_START_AFTER_IDLE, "tcp_slow_start_after_idle" },
|
||||
{ CTL_INT, NET_CIPSOV4_CACHE_ENABLE, "cipso_cache_enable" },
|
||||
{ CTL_INT, NET_CIPSOV4_CACHE_BUCKET_SIZE, "cipso_cache_bucket_size" },
|
||||
|
@ -16,7 +16,6 @@ obj-y += net-sysfs.o
|
||||
obj-$(CONFIG_PROC_FS) += net-procfs.o
|
||||
obj-$(CONFIG_NET_PKTGEN) += pktgen.o
|
||||
obj-$(CONFIG_NETPOLL) += netpoll.o
|
||||
obj-$(CONFIG_NET_DMA) += user_dma.o
|
||||
obj-$(CONFIG_FIB_RULES) += fib_rules.o
|
||||
obj-$(CONFIG_TRACEPOINTS) += net-traces.o
|
||||
obj-$(CONFIG_NET_DROP_MONITOR) += drop_monitor.o
|
||||
|
@ -1284,7 +1284,6 @@ static int __dev_open(struct net_device *dev)
|
||||
clear_bit(__LINK_STATE_START, &dev->state);
|
||||
else {
|
||||
dev->flags |= IFF_UP;
|
||||
net_dmaengine_get();
|
||||
dev_set_rx_mode(dev);
|
||||
dev_activate(dev);
|
||||
add_device_randomness(dev->dev_addr, dev->addr_len);
|
||||
@ -1363,7 +1362,6 @@ static int __dev_close_many(struct list_head *head)
|
||||
ops->ndo_stop(dev);
|
||||
|
||||
dev->flags &= ~IFF_UP;
|
||||
net_dmaengine_put();
|
||||
netpoll_poll_enable(dev);
|
||||
}
|
||||
|
||||
@ -4505,14 +4503,6 @@ static void net_rx_action(struct softirq_action *h)
|
||||
out:
|
||||
net_rps_action_and_irq_enable(sd);
|
||||
|
||||
#ifdef CONFIG_NET_DMA
|
||||
/*
|
||||
* There may not be any more sk_buffs coming right now, so push
|
||||
* any pending DMA copies to hardware
|
||||
*/
|
||||
dma_issue_pending_all();
|
||||
#endif
|
||||
|
||||
return;
|
||||
|
||||
softnet_break:
|
||||
|
@ -1489,9 +1489,6 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
|
||||
atomic_set(&newsk->sk_omem_alloc, 0);
|
||||
skb_queue_head_init(&newsk->sk_receive_queue);
|
||||
skb_queue_head_init(&newsk->sk_write_queue);
|
||||
#ifdef CONFIG_NET_DMA
|
||||
skb_queue_head_init(&newsk->sk_async_wait_queue);
|
||||
#endif
|
||||
|
||||
spin_lock_init(&newsk->sk_dst_lock);
|
||||
rwlock_init(&newsk->sk_callback_lock);
|
||||
@ -2308,9 +2305,6 @@ void sock_init_data(struct socket *sock, struct sock *sk)
|
||||
skb_queue_head_init(&sk->sk_receive_queue);
|
||||
skb_queue_head_init(&sk->sk_write_queue);
|
||||
skb_queue_head_init(&sk->sk_error_queue);
|
||||
#ifdef CONFIG_NET_DMA
|
||||
skb_queue_head_init(&sk->sk_async_wait_queue);
|
||||
#endif
|
||||
|
||||
sk->sk_send_head = NULL;
|
||||
|
||||
|
@ -1,131 +0,0 @@
|
||||
/*
|
||||
* Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
|
||||
* Portions based on net/core/datagram.c and copyrighted by their authors.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation; either version 2 of the License, or (at your option)
|
||||
* any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program; if not, write to the Free Software Foundation, Inc., 59
|
||||
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
*
|
||||
* The full GNU General Public License is included in this distribution in the
|
||||
* file called COPYING.
|
||||
*/
|
||||
|
||||
/*
|
||||
* This code allows the net stack to make use of a DMA engine for
|
||||
* skb to iovec copies.
|
||||
*/
|
||||
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/socket.h>
|
||||
#include <linux/export.h>
|
||||
#include <net/tcp.h>
|
||||
#include <net/netdma.h>
|
||||
|
||||
#define NET_DMA_DEFAULT_COPYBREAK 4096
|
||||
|
||||
int sysctl_tcp_dma_copybreak = NET_DMA_DEFAULT_COPYBREAK;
|
||||
EXPORT_SYMBOL(sysctl_tcp_dma_copybreak);
|
||||
|
||||
/**
|
||||
* dma_skb_copy_datagram_iovec - Copy a datagram to an iovec.
|
||||
* @skb - buffer to copy
|
||||
* @offset - offset in the buffer to start copying from
|
||||
* @iovec - io vector to copy to
|
||||
* @len - amount of data to copy from buffer to iovec
|
||||
* @pinned_list - locked iovec buffer data
|
||||
*
|
||||
* Note: the iovec is modified during the copy.
|
||||
*/
|
||||
int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
|
||||
struct sk_buff *skb, int offset, struct iovec *to,
|
||||
size_t len, struct dma_pinned_list *pinned_list)
|
||||
{
|
||||
int start = skb_headlen(skb);
|
||||
int i, copy = start - offset;
|
||||
struct sk_buff *frag_iter;
|
||||
dma_cookie_t cookie = 0;
|
||||
|
||||
/* Copy header. */
|
||||
if (copy > 0) {
|
||||
if (copy > len)
|
||||
copy = len;
|
||||
cookie = dma_memcpy_to_iovec(chan, to, pinned_list,
|
||||
skb->data + offset, copy);
|
||||
if (cookie < 0)
|
||||
goto fault;
|
||||
len -= copy;
|
||||
if (len == 0)
|
||||
goto end;
|
||||
offset += copy;
|
||||
}
|
||||
|
||||
/* Copy paged appendix. Hmm... why does this look so complicated? */
|
||||
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
|
||||
int end;
|
||||
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
||||
|
||||
WARN_ON(start > offset + len);
|
||||
|
||||
end = start + skb_frag_size(frag);
|
||||
copy = end - offset;
|
||||
if (copy > 0) {
|
||||
struct page *page = skb_frag_page(frag);
|
||||
|
||||
if (copy > len)
|
||||
copy = len;
|
||||
|
||||
cookie = dma_memcpy_pg_to_iovec(chan, to, pinned_list, page,
|
||||
frag->page_offset + offset - start, copy);
|
||||
if (cookie < 0)
|
||||
goto fault;
|
||||
len -= copy;
|
||||
if (len == 0)
|
||||
goto end;
|
||||
offset += copy;
|
||||
}
|
||||
start = end;
|
||||
}
|
||||
|
||||
skb_walk_frags(skb, frag_iter) {
|
||||
int end;
|
||||
|
||||
WARN_ON(start > offset + len);
|
||||
|
||||
end = start + frag_iter->len;
|
||||
copy = end - offset;
|
||||
if (copy > 0) {
|
||||
if (copy > len)
|
||||
copy = len;
|
||||
cookie = dma_skb_copy_datagram_iovec(chan, frag_iter,
|
||||
offset - start,
|
||||
to, copy,
|
||||
pinned_list);
|
||||
if (cookie < 0)
|
||||
goto fault;
|
||||
len -= copy;
|
||||
if (len == 0)
|
||||
goto end;
|
||||
offset += copy;
|
||||
}
|
||||
start = end;
|
||||
}
|
||||
|
||||
end:
|
||||
if (!len) {
|
||||
skb->dma_cookie = cookie;
|
||||
return cookie;
|
||||
}
|
||||
|
||||
fault:
|
||||
return -EFAULT;
|
||||
}
|
@ -848,7 +848,7 @@ int dccp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
|
||||
default:
|
||||
dccp_pr_debug("packet_type=%s\n",
|
||||
dccp_packet_name(dh->dccph_type));
|
||||
sk_eat_skb(sk, skb, false);
|
||||
sk_eat_skb(sk, skb);
|
||||
}
|
||||
verify_sock_status:
|
||||
if (sock_flag(sk, SOCK_DONE)) {
|
||||
@ -905,7 +905,7 @@ verify_sock_status:
|
||||
len = skb->len;
|
||||
found_fin_ok:
|
||||
if (!(flags & MSG_PEEK))
|
||||
sk_eat_skb(sk, skb, false);
|
||||
sk_eat_skb(sk, skb);
|
||||
break;
|
||||
} while (1);
|
||||
out:
|
||||
|
@ -628,15 +628,6 @@ static struct ctl_table ipv4_table[] = {
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec
|
||||
},
|
||||
#ifdef CONFIG_NET_DMA
|
||||
{
|
||||
.procname = "tcp_dma_copybreak",
|
||||
.data = &sysctl_tcp_dma_copybreak,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec
|
||||
},
|
||||
#endif
|
||||
{
|
||||
.procname = "tcp_slow_start_after_idle",
|
||||
.data = &sysctl_tcp_slow_start_after_idle,
|
||||
|
149
net/ipv4/tcp.c
149
net/ipv4/tcp.c
@ -274,7 +274,6 @@
|
||||
#include <net/tcp.h>
|
||||
#include <net/xfrm.h>
|
||||
#include <net/ip.h>
|
||||
#include <net/netdma.h>
|
||||
#include <net/sock.h>
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
@ -1394,7 +1393,7 @@ static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len)
|
||||
* calculation of whether or not we must ACK for the sake of
|
||||
* a window update.
|
||||
*/
|
||||
void tcp_cleanup_rbuf(struct sock *sk, int copied)
|
||||
static void tcp_cleanup_rbuf(struct sock *sk, int copied)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
bool time_to_ack = false;
|
||||
@ -1470,39 +1469,6 @@ static void tcp_prequeue_process(struct sock *sk)
|
||||
tp->ucopy.memory = 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_DMA
|
||||
static void tcp_service_net_dma(struct sock *sk, bool wait)
|
||||
{
|
||||
dma_cookie_t done, used;
|
||||
dma_cookie_t last_issued;
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
|
||||
if (!tp->ucopy.dma_chan)
|
||||
return;
|
||||
|
||||
last_issued = tp->ucopy.dma_cookie;
|
||||
dma_async_issue_pending(tp->ucopy.dma_chan);
|
||||
|
||||
do {
|
||||
if (dma_async_is_tx_complete(tp->ucopy.dma_chan,
|
||||
last_issued, &done,
|
||||
&used) == DMA_COMPLETE) {
|
||||
/* Safe to free early-copied skbs now */
|
||||
__skb_queue_purge(&sk->sk_async_wait_queue);
|
||||
break;
|
||||
} else {
|
||||
struct sk_buff *skb;
|
||||
while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
|
||||
(dma_async_is_complete(skb->dma_cookie, done,
|
||||
used) == DMA_COMPLETE)) {
|
||||
__skb_dequeue(&sk->sk_async_wait_queue);
|
||||
kfree_skb(skb);
|
||||
}
|
||||
}
|
||||
} while (wait);
|
||||
}
|
||||
#endif
|
||||
|
||||
static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
@ -1520,7 +1486,7 @@ static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
|
||||
* splitted a fat GRO packet, while we released socket lock
|
||||
* in skb_splice_bits()
|
||||
*/
|
||||
sk_eat_skb(sk, skb, false);
|
||||
sk_eat_skb(sk, skb);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
@ -1586,11 +1552,11 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
|
||||
continue;
|
||||
}
|
||||
if (tcp_hdr(skb)->fin) {
|
||||
sk_eat_skb(sk, skb, false);
|
||||
sk_eat_skb(sk, skb);
|
||||
++seq;
|
||||
break;
|
||||
}
|
||||
sk_eat_skb(sk, skb, false);
|
||||
sk_eat_skb(sk, skb);
|
||||
if (!desc->count)
|
||||
break;
|
||||
tp->copied_seq = seq;
|
||||
@ -1628,7 +1594,6 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
|
||||
int target; /* Read at least this many bytes */
|
||||
long timeo;
|
||||
struct task_struct *user_recv = NULL;
|
||||
bool copied_early = false;
|
||||
struct sk_buff *skb;
|
||||
u32 urg_hole = 0;
|
||||
|
||||
@ -1674,28 +1639,6 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
|
||||
|
||||
target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
|
||||
|
||||
#ifdef CONFIG_NET_DMA
|
||||
tp->ucopy.dma_chan = NULL;
|
||||
preempt_disable();
|
||||
skb = skb_peek_tail(&sk->sk_receive_queue);
|
||||
{
|
||||
int available = 0;
|
||||
|
||||
if (skb)
|
||||
available = TCP_SKB_CB(skb)->seq + skb->len - (*seq);
|
||||
if ((available < target) &&
|
||||
(len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
|
||||
!sysctl_tcp_low_latency &&
|
||||
net_dma_find_channel()) {
|
||||
preempt_enable();
|
||||
tp->ucopy.pinned_list =
|
||||
dma_pin_iovec_pages(msg->msg_iov, len);
|
||||
} else {
|
||||
preempt_enable();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
do {
|
||||
u32 offset;
|
||||
|
||||
@ -1826,16 +1769,6 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
|
||||
/* __ Set realtime policy in scheduler __ */
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_DMA
|
||||
if (tp->ucopy.dma_chan) {
|
||||
if (tp->rcv_wnd == 0 &&
|
||||
!skb_queue_empty(&sk->sk_async_wait_queue)) {
|
||||
tcp_service_net_dma(sk, true);
|
||||
tcp_cleanup_rbuf(sk, copied);
|
||||
} else
|
||||
dma_async_issue_pending(tp->ucopy.dma_chan);
|
||||
}
|
||||
#endif
|
||||
if (copied >= target) {
|
||||
/* Do not sleep, just process backlog. */
|
||||
release_sock(sk);
|
||||
@ -1843,11 +1776,6 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
|
||||
} else
|
||||
sk_wait_data(sk, &timeo);
|
||||
|
||||
#ifdef CONFIG_NET_DMA
|
||||
tcp_service_net_dma(sk, false); /* Don't block */
|
||||
tp->ucopy.wakeup = 0;
|
||||
#endif
|
||||
|
||||
if (user_recv) {
|
||||
int chunk;
|
||||
|
||||
@ -1905,43 +1833,13 @@ do_prequeue:
|
||||
}
|
||||
|
||||
if (!(flags & MSG_TRUNC)) {
|
||||
#ifdef CONFIG_NET_DMA
|
||||
if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
|
||||
tp->ucopy.dma_chan = net_dma_find_channel();
|
||||
|
||||
if (tp->ucopy.dma_chan) {
|
||||
tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
|
||||
tp->ucopy.dma_chan, skb, offset,
|
||||
msg->msg_iov, used,
|
||||
tp->ucopy.pinned_list);
|
||||
|
||||
if (tp->ucopy.dma_cookie < 0) {
|
||||
|
||||
pr_alert("%s: dma_cookie < 0\n",
|
||||
__func__);
|
||||
|
||||
/* Exception. Bailout! */
|
||||
if (!copied)
|
||||
copied = -EFAULT;
|
||||
break;
|
||||
}
|
||||
|
||||
dma_async_issue_pending(tp->ucopy.dma_chan);
|
||||
|
||||
if ((offset + used) == skb->len)
|
||||
copied_early = true;
|
||||
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
err = skb_copy_datagram_iovec(skb, offset,
|
||||
msg->msg_iov, used);
|
||||
if (err) {
|
||||
/* Exception. Bailout! */
|
||||
if (!copied)
|
||||
copied = -EFAULT;
|
||||
break;
|
||||
}
|
||||
err = skb_copy_datagram_iovec(skb, offset,
|
||||
msg->msg_iov, used);
|
||||
if (err) {
|
||||
/* Exception. Bailout! */
|
||||
if (!copied)
|
||||
copied = -EFAULT;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1961,19 +1859,15 @@ skip_copy:
|
||||
|
||||
if (tcp_hdr(skb)->fin)
|
||||
goto found_fin_ok;
|
||||
if (!(flags & MSG_PEEK)) {
|
||||
sk_eat_skb(sk, skb, copied_early);
|
||||
copied_early = false;
|
||||
}
|
||||
if (!(flags & MSG_PEEK))
|
||||
sk_eat_skb(sk, skb);
|
||||
continue;
|
||||
|
||||
found_fin_ok:
|
||||
/* Process the FIN. */
|
||||
++*seq;
|
||||
if (!(flags & MSG_PEEK)) {
|
||||
sk_eat_skb(sk, skb, copied_early);
|
||||
copied_early = false;
|
||||
}
|
||||
if (!(flags & MSG_PEEK))
|
||||
sk_eat_skb(sk, skb);
|
||||
break;
|
||||
} while (len > 0);
|
||||
|
||||
@ -1996,16 +1890,6 @@ skip_copy:
|
||||
tp->ucopy.len = 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_DMA
|
||||
tcp_service_net_dma(sk, true); /* Wait for queue to drain */
|
||||
tp->ucopy.dma_chan = NULL;
|
||||
|
||||
if (tp->ucopy.pinned_list) {
|
||||
dma_unpin_iovec_pages(tp->ucopy.pinned_list);
|
||||
tp->ucopy.pinned_list = NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* According to UNIX98, msg_name/msg_namelen are ignored
|
||||
* on connected socket. I was just happy when found this 8) --ANK
|
||||
*/
|
||||
@ -2349,9 +2233,6 @@ int tcp_disconnect(struct sock *sk, int flags)
|
||||
__skb_queue_purge(&sk->sk_receive_queue);
|
||||
tcp_write_queue_purge(sk);
|
||||
__skb_queue_purge(&tp->out_of_order_queue);
|
||||
#ifdef CONFIG_NET_DMA
|
||||
__skb_queue_purge(&sk->sk_async_wait_queue);
|
||||
#endif
|
||||
|
||||
inet->inet_dport = 0;
|
||||
|
||||
|
@ -73,7 +73,6 @@
|
||||
#include <net/inet_common.h>
|
||||
#include <linux/ipsec.h>
|
||||
#include <asm/unaligned.h>
|
||||
#include <net/netdma.h>
|
||||
#include <linux/errqueue.h>
|
||||
|
||||
int sysctl_tcp_timestamps __read_mostly = 1;
|
||||
@ -4951,53 +4950,6 @@ static inline bool tcp_checksum_complete_user(struct sock *sk,
|
||||
__tcp_checksum_complete_user(sk, skb);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_DMA
|
||||
static bool tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb,
|
||||
int hlen)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
int chunk = skb->len - hlen;
|
||||
int dma_cookie;
|
||||
bool copied_early = false;
|
||||
|
||||
if (tp->ucopy.wakeup)
|
||||
return false;
|
||||
|
||||
if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
|
||||
tp->ucopy.dma_chan = net_dma_find_channel();
|
||||
|
||||
if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) {
|
||||
|
||||
dma_cookie = dma_skb_copy_datagram_iovec(tp->ucopy.dma_chan,
|
||||
skb, hlen,
|
||||
tp->ucopy.iov, chunk,
|
||||
tp->ucopy.pinned_list);
|
||||
|
||||
if (dma_cookie < 0)
|
||||
goto out;
|
||||
|
||||
tp->ucopy.dma_cookie = dma_cookie;
|
||||
copied_early = true;
|
||||
|
||||
tp->ucopy.len -= chunk;
|
||||
tp->copied_seq += chunk;
|
||||
tcp_rcv_space_adjust(sk);
|
||||
|
||||
if ((tp->ucopy.len == 0) ||
|
||||
(tcp_flag_word(tcp_hdr(skb)) & TCP_FLAG_PSH) ||
|
||||
(atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1))) {
|
||||
tp->ucopy.wakeup = 1;
|
||||
sk->sk_data_ready(sk);
|
||||
}
|
||||
} else if (chunk > 0) {
|
||||
tp->ucopy.wakeup = 1;
|
||||
sk->sk_data_ready(sk);
|
||||
}
|
||||
out:
|
||||
return copied_early;
|
||||
}
|
||||
#endif /* CONFIG_NET_DMA */
|
||||
|
||||
/* Does PAWS and seqno based validation of an incoming segment, flags will
|
||||
* play significant role here.
|
||||
*/
|
||||
@ -5177,27 +5129,15 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
|
||||
}
|
||||
} else {
|
||||
int eaten = 0;
|
||||
int copied_early = 0;
|
||||
bool fragstolen = false;
|
||||
|
||||
if (tp->copied_seq == tp->rcv_nxt &&
|
||||
len - tcp_header_len <= tp->ucopy.len) {
|
||||
#ifdef CONFIG_NET_DMA
|
||||
if (tp->ucopy.task == current &&
|
||||
sock_owned_by_user(sk) &&
|
||||
tcp_dma_try_early_copy(sk, skb, tcp_header_len)) {
|
||||
copied_early = 1;
|
||||
eaten = 1;
|
||||
}
|
||||
#endif
|
||||
if (tp->ucopy.task == current &&
|
||||
sock_owned_by_user(sk) && !copied_early) {
|
||||
__set_current_state(TASK_RUNNING);
|
||||
if (tp->ucopy.task == current &&
|
||||
tp->copied_seq == tp->rcv_nxt &&
|
||||
len - tcp_header_len <= tp->ucopy.len &&
|
||||
sock_owned_by_user(sk)) {
|
||||
__set_current_state(TASK_RUNNING);
|
||||
|
||||
if (!tcp_copy_to_iovec(sk, skb, tcp_header_len))
|
||||
eaten = 1;
|
||||
}
|
||||
if (eaten) {
|
||||
if (!tcp_copy_to_iovec(sk, skb, tcp_header_len)) {
|
||||
/* Predicted packet is in window by definition.
|
||||
* seq == rcv_nxt and rcv_wup <= rcv_nxt.
|
||||
* Hence, check seq<=rcv_wup reduces to:
|
||||
@ -5213,9 +5153,8 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
|
||||
__skb_pull(skb, tcp_header_len);
|
||||
tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITSTOUSER);
|
||||
eaten = 1;
|
||||
}
|
||||
if (copied_early)
|
||||
tcp_cleanup_rbuf(sk, skb->len);
|
||||
}
|
||||
if (!eaten) {
|
||||
if (tcp_checksum_complete_user(sk, skb))
|
||||
@ -5252,14 +5191,8 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
|
||||
goto no_ack;
|
||||
}
|
||||
|
||||
if (!copied_early || tp->rcv_nxt != tp->rcv_wup)
|
||||
__tcp_ack_snd_check(sk, 0);
|
||||
__tcp_ack_snd_check(sk, 0);
|
||||
no_ack:
|
||||
#ifdef CONFIG_NET_DMA
|
||||
if (copied_early)
|
||||
__skb_queue_tail(&sk->sk_async_wait_queue, skb);
|
||||
else
|
||||
#endif
|
||||
if (eaten)
|
||||
kfree_skb_partial(skb, fragstolen);
|
||||
sk->sk_data_ready(sk);
|
||||
|
@ -72,7 +72,6 @@
|
||||
#include <net/inet_common.h>
|
||||
#include <net/timewait_sock.h>
|
||||
#include <net/xfrm.h>
|
||||
#include <net/netdma.h>
|
||||
#include <net/secure_seq.h>
|
||||
#include <net/tcp_memcontrol.h>
|
||||
#include <net/busy_poll.h>
|
||||
@ -1670,18 +1669,8 @@ process:
|
||||
bh_lock_sock_nested(sk);
|
||||
ret = 0;
|
||||
if (!sock_owned_by_user(sk)) {
|
||||
#ifdef CONFIG_NET_DMA
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
|
||||
tp->ucopy.dma_chan = net_dma_find_channel();
|
||||
if (tp->ucopy.dma_chan)
|
||||
if (!tcp_prequeue(sk, skb))
|
||||
ret = tcp_v4_do_rcv(sk, skb);
|
||||
else
|
||||
#endif
|
||||
{
|
||||
if (!tcp_prequeue(sk, skb))
|
||||
ret = tcp_v4_do_rcv(sk, skb);
|
||||
}
|
||||
} else if (unlikely(sk_add_backlog(sk, skb,
|
||||
sk->sk_rcvbuf + sk->sk_sndbuf))) {
|
||||
bh_unlock_sock(sk);
|
||||
@ -1841,11 +1830,6 @@ void tcp_v4_destroy_sock(struct sock *sk)
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_NET_DMA
|
||||
/* Cleans up our sk_async_wait_queue */
|
||||
__skb_queue_purge(&sk->sk_async_wait_queue);
|
||||
#endif
|
||||
|
||||
/* Clean prequeue, it must be empty really */
|
||||
__skb_queue_purge(&tp->ucopy.prequeue);
|
||||
|
||||
|
@ -59,7 +59,6 @@
|
||||
#include <net/snmp.h>
|
||||
#include <net/dsfield.h>
|
||||
#include <net/timewait_sock.h>
|
||||
#include <net/netdma.h>
|
||||
#include <net/inet_common.h>
|
||||
#include <net/secure_seq.h>
|
||||
#include <net/tcp_memcontrol.h>
|
||||
@ -1446,18 +1445,8 @@ process:
|
||||
bh_lock_sock_nested(sk);
|
||||
ret = 0;
|
||||
if (!sock_owned_by_user(sk)) {
|
||||
#ifdef CONFIG_NET_DMA
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
|
||||
tp->ucopy.dma_chan = net_dma_find_channel();
|
||||
if (tp->ucopy.dma_chan)
|
||||
if (!tcp_prequeue(sk, skb))
|
||||
ret = tcp_v6_do_rcv(sk, skb);
|
||||
else
|
||||
#endif
|
||||
{
|
||||
if (!tcp_prequeue(sk, skb))
|
||||
ret = tcp_v6_do_rcv(sk, skb);
|
||||
}
|
||||
} else if (unlikely(sk_add_backlog(sk, skb,
|
||||
sk->sk_rcvbuf + sk->sk_sndbuf))) {
|
||||
bh_unlock_sock(sk);
|
||||
|
@ -839,7 +839,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
|
||||
|
||||
if (!(flags & MSG_PEEK)) {
|
||||
spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
|
||||
sk_eat_skb(sk, skb, false);
|
||||
sk_eat_skb(sk, skb);
|
||||
spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
|
||||
*seq = 0;
|
||||
}
|
||||
@ -861,10 +861,10 @@ copy_uaddr:
|
||||
llc_cmsg_rcv(msg, skb);
|
||||
|
||||
if (!(flags & MSG_PEEK)) {
|
||||
spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
|
||||
sk_eat_skb(sk, skb, false);
|
||||
spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
|
||||
*seq = 0;
|
||||
spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
|
||||
sk_eat_skb(sk, skb);
|
||||
spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
|
||||
*seq = 0;
|
||||
}
|
||||
|
||||
goto out;
|
||||
|
Loading…
x
Reference in New Issue
Block a user