mirror of
https://github.com/rd-stuffs/msm-4.14.git
synced 2025-02-20 11:45:48 +08:00
arm64/iommu: handle atomic pool addresses in ->get_sgtable and ->mmap
Atomic dma pool is remapped to vmalloc area by default. get_sgtable and mmap on addresses from this pool returns an error now. Fix that. Change-Id: I2a5f4f5447ffc6fa6ee3baee05a496110de40b3c Signed-off-by: Vinayak Menon <vinmenon@codeaurora.org>
This commit is contained in:
parent
b136fd5965
commit
9d091f0f5a
@ -843,6 +843,7 @@ static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
|
||||
{
|
||||
struct vm_struct *area;
|
||||
int ret;
|
||||
unsigned long pfn = 0;
|
||||
|
||||
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
|
||||
is_dma_coherent(dev, attrs));
|
||||
@ -850,25 +851,23 @@ static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
|
||||
if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
|
||||
return ret;
|
||||
|
||||
if (!is_vmalloc_addr(cpu_addr)) {
|
||||
unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr));
|
||||
return __swiotlb_mmap_pfn(vma, pfn, size);
|
||||
}
|
||||
|
||||
if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
|
||||
/*
|
||||
* DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
|
||||
* hence in the vmalloc space.
|
||||
*/
|
||||
unsigned long pfn = vmalloc_to_pfn(cpu_addr);
|
||||
return __swiotlb_mmap_pfn(vma, pfn, size);
|
||||
}
|
||||
|
||||
area = find_vm_area(cpu_addr);
|
||||
if (WARN_ON(!area || !area->pages))
|
||||
return -ENXIO;
|
||||
|
||||
return iommu_dma_mmap(area->pages, size, vma);
|
||||
if (area && area->pages)
|
||||
return iommu_dma_mmap(area->pages, size, vma);
|
||||
else if (!is_vmalloc_addr(cpu_addr))
|
||||
pfn = page_to_pfn(virt_to_page(cpu_addr));
|
||||
else if (is_vmalloc_addr(cpu_addr))
|
||||
/*
|
||||
* DMA_ATTR_FORCE_CONTIGUOUS and atomic pool allocations are
|
||||
* always remapped, hence in the vmalloc space.
|
||||
*/
|
||||
pfn = vmalloc_to_pfn(cpu_addr);
|
||||
|
||||
if (pfn)
|
||||
return __swiotlb_mmap_pfn(vma, pfn, size);
|
||||
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
|
||||
@ -876,27 +875,24 @@ static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
|
||||
size_t size, unsigned long attrs)
|
||||
{
|
||||
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||
struct page *page = NULL;
|
||||
struct vm_struct *area = find_vm_area(cpu_addr);
|
||||
|
||||
if (!is_vmalloc_addr(cpu_addr)) {
|
||||
struct page *page = virt_to_page(cpu_addr);
|
||||
return __swiotlb_get_sgtable_page(sgt, page, size);
|
||||
}
|
||||
|
||||
if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
|
||||
if (area && area->pages)
|
||||
return sg_alloc_table_from_pages(sgt, area->pages, count, 0,
|
||||
size, GFP_KERNEL);
|
||||
else if (!is_vmalloc_addr(cpu_addr))
|
||||
page = virt_to_page(cpu_addr);
|
||||
else if (is_vmalloc_addr(cpu_addr))
|
||||
/*
|
||||
* DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
|
||||
* hence in the vmalloc space.
|
||||
* DMA_ATTR_FORCE_CONTIGUOUS and atomic pool allocations
|
||||
* are always remapped, hence in the vmalloc space.
|
||||
*/
|
||||
struct page *page = vmalloc_to_page(cpu_addr);
|
||||
page = vmalloc_to_page(cpu_addr);
|
||||
|
||||
if (page)
|
||||
return __swiotlb_get_sgtable_page(sgt, page, size);
|
||||
}
|
||||
|
||||
if (WARN_ON(!area || !area->pages))
|
||||
return -ENXIO;
|
||||
|
||||
return sg_alloc_table_from_pages(sgt, area->pages, count, 0, size,
|
||||
GFP_KERNEL);
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
static void __iommu_sync_single_for_cpu(struct device *dev,
|
||||
|
Loading…
x
Reference in New Issue
Block a user