From 0f02cfcedc92090e2966188b64f467ae2a100aba Mon Sep 17 00:00:00 2001 From: Sultan Alsawaf Date: Thu, 28 Jan 2021 17:07:00 -0800 Subject: [PATCH] ion: Rewrite to improve clarity and performance The ION driver suffers from massive code bloat caused by excessive debug features, as well as poor lock usage as a result of that. Multiple locks in ION exist to make the debug features thread-safe, which hurts ION's actual performance when doing its job. There are numerous code paths in ION that hold mutexes for no reason and hold them for longer than necessary. This results in not only unwanted lock contention, but also long delays when a mutex lock results in the calling thread getting preempted for a while. All lock usage in ION follows this pattern, which causes poor performance across the board. Furthermore, a big mutex lock is used mostly everywhere, which causes performance degradation due to unnecessary lock overhead. Instead of having a big mutex lock, multiple fine-grained locks are now used, improving performance. Additionally, dup_sg_table is called very frequently, and lies within the rendering path for the display. Speed it up by copying scatterlists in page-sized chunks rather than iterating one at a time. Note that sg_alloc_table zeroes out `table`, so there's no need to zero it out using the memory allocator. This also features a lock-less caching system for DMA attachments and their respective sg_table copies, reducing overhead significantly for code which frequently maps and unmaps DMA buffers and speeding up cache maintenance since iteration through the list of buffer attachments is now lock-free. This is safe since there is no interleaved DMA buffer attaching or accessing for a single ION buffer. Overall, just rewrite ION entirely to fix its deficiencies. This optimizes ION for excellent performance and discards its debug cruft. Signed-off-by: Sultan Alsawaf Change-Id: I0a21435be1eb409cfe140eec8da507cc35f060dd Signed-off-by: Forenche --- drivers/staging/android/ion/Kconfig | 12 - drivers/staging/android/ion/Makefile | 2 +- drivers/staging/android/ion/ion-ioctl.c | 138 -- drivers/staging/android/ion/ion.c | 1518 ++++------------- drivers/staging/android/ion/ion.h | 133 +- .../staging/android/ion/ion_carveout_heap.c | 8 - drivers/staging/android/ion/ion_cma_heap.c | 9 - .../staging/android/ion/ion_cma_secure_heap.c | 18 - drivers/staging/android/ion/ion_heap.c | 143 +- drivers/staging/android/ion/ion_kernel.h | 9 +- drivers/staging/android/ion/ion_page_pool.c | 20 +- drivers/staging/android/ion/ion_system_heap.c | 101 -- .../android/ion/ion_system_secure_heap.c | 8 - include/trace/events/ion.h | 180 -- 14 files changed, 425 insertions(+), 1874 deletions(-) delete mode 100644 drivers/staging/android/ion/ion-ioctl.c delete mode 100644 include/trace/events/ion.h diff --git a/drivers/staging/android/ion/Kconfig b/drivers/staging/android/ion/Kconfig index 563e4a82a73a..8dfb485a9ad9 100644 --- a/drivers/staging/android/ion/Kconfig +++ b/drivers/staging/android/ion/Kconfig @@ -44,18 +44,6 @@ config ION_CMA_HEAP by the Contiguous Memory Allocator (CMA). If your system has these regions, you should say Y here. -config ION_FORCE_DMA_SYNC - bool "Force ION to always DMA sync buffer memory" - depends on ION - help - Force ION to DMA sync buffer memory when it is allocated and to - always DMA sync the buffer memory on calls to begin/end cpu - access. This makes ION DMA sync behavior similar to that of the - older version of ION. - We generally don't want to enable this config as it breaks the - cache maintenance model. - If you're not sure say N here. - config ION_DEFER_FREE_NO_SCHED_IDLE bool "Increases the priority of ION defer free thead" depends on ION diff --git a/drivers/staging/android/ion/Makefile b/drivers/staging/android/ion/Makefile index dc638f080d7f..4224f9ab0d2e 100644 --- a/drivers/staging/android/ion/Makefile +++ b/drivers/staging/android/ion/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -obj-$(CONFIG_ION) += ion.o ion-ioctl.o ion_heap.o \ +obj-$(CONFIG_ION) += ion.o ion_heap.o \ ion_page_pool.o ion_system_heap.o \ ion_carveout_heap.o ion_chunk_heap.o \ ion_system_secure_heap.o ion_cma_heap.o \ diff --git a/drivers/staging/android/ion/ion-ioctl.c b/drivers/staging/android/ion/ion-ioctl.c deleted file mode 100644 index 83cbcc0dcd8c..000000000000 --- a/drivers/staging/android/ion/ion-ioctl.c +++ /dev/null @@ -1,138 +0,0 @@ -/* - * - * Copyright (C) 2011 Google, Inc. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#include -#include -#include -#include - -#include "ion.h" -#include "ion_system_secure_heap.h" - -union ion_ioctl_arg { - struct ion_allocation_data allocation; - struct ion_heap_query query; - struct ion_prefetch_data prefetch_data; -}; - -static int validate_ioctl_arg(unsigned int cmd, union ion_ioctl_arg *arg) -{ - int ret = 0; - - switch (cmd) { - case ION_IOC_HEAP_QUERY: - ret = arg->query.reserved0 != 0; - ret |= arg->query.reserved1 != 0; - ret |= arg->query.reserved2 != 0; - break; - default: - break; - } - - return ret ? -EINVAL : 0; -} - -/* fix up the cases where the ioctl direction bits are incorrect */ -static unsigned int ion_ioctl_dir(unsigned int cmd) -{ - switch (cmd) { - default: - return _IOC_DIR(cmd); - } -} - -long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) -{ - int ret = 0; - unsigned int dir; - union ion_ioctl_arg data; - - dir = ion_ioctl_dir(cmd); - - if (_IOC_SIZE(cmd) > sizeof(data)) - return -EINVAL; - - /* - * The copy_from_user is unconditional here for both read and write - * to do the validate. If there is no write for the ioctl, the - * buffer is cleared - */ - if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd))) - return -EFAULT; - - ret = validate_ioctl_arg(cmd, &data); - if (ret) { - pr_warn_once("%s: ioctl validate failed\n", __func__); - return ret; - } - - if (!(dir & _IOC_WRITE)) - memset(&data, 0, sizeof(data)); - - switch (cmd) { - case ION_IOC_ALLOC: - { - int fd; - - fd = ion_alloc_fd(data.allocation.len, - data.allocation.heap_id_mask, - data.allocation.flags); - if (fd < 0) - return fd; - - data.allocation.fd = fd; - - break; - } - case ION_IOC_HEAP_QUERY: - ret = ion_query_heaps(&data.query); - break; - case ION_IOC_PREFETCH: - { - int ret; - - ret = ion_walk_heaps(data.prefetch_data.heap_id, - (enum ion_heap_type) - ION_HEAP_TYPE_SYSTEM_SECURE, - (void *)&data.prefetch_data, - ion_system_secure_heap_prefetch); - if (ret) - return ret; - break; - } - case ION_IOC_DRAIN: - { - int ret; - - ret = ion_walk_heaps(data.prefetch_data.heap_id, - (enum ion_heap_type) - ION_HEAP_TYPE_SYSTEM_SECURE, - (void *)&data.prefetch_data, - ion_system_secure_heap_drain); - - if (ret) - return ret; - break; - } - default: - return -ENOTTY; - } - - if (dir & _IOC_READ) { - if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) - return -EFAULT; - } - return ret; -} diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c index dc4bf614b0fd..c03a893f0f62 100644 --- a/drivers/staging/android/ion/ion.c +++ b/drivers/staging/android/ion/ion.c @@ -1,540 +1,204 @@ +// SPDX-License-Identifier: GPL-2.0 /* - * - * drivers/staging/android/ion/ion.c - * * Copyright (C) 2011 Google, Inc. - * Copyright (c) 2011-2020, The Linux Foundation. All rights reserved. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * + * Copyright (c) 2011-2018, The Linux Foundation. All rights reserved. + * Copyright (C) 2019-2021 Sultan Alsawaf . */ -#include -#include -#include -#include -#include -#include -#include -#include -#include #include -#include -#include -#include -#include #include -#include #include -#include -#include -#include -#include -#include -#include -#include -#define CREATE_TRACE_POINTS -#include -#include - -#include "ion.h" #include "ion_secure_util.h" +#include "ion_system_secure_heap.h" -static struct ion_device *internal_dev; -static atomic_long_t total_heap_bytes; +struct ion_dma_buf_attachment { + struct ion_dma_buf_attachment *next; + struct device *dev; + struct sg_table table; + struct list_head list; + struct rw_semaphore map_rwsem; + bool dma_mapped; +}; -int ion_walk_heaps(int heap_id, enum ion_heap_type type, void *data, - int (*f)(struct ion_heap *heap, void *data)) -{ - int ret_val = 0; - struct ion_heap *heap; - struct ion_device *dev = internal_dev; - /* - * traverse the list of heaps available in this system - * and find the heap that is specified. - */ - down_write(&dev->lock); - plist_for_each_entry(heap, &dev->heaps, node) { - if (ION_HEAP(heap->id) != heap_id || - type != heap->type) - continue; - ret_val = f(heap, data); - break; +static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); +static const struct file_operations ion_fops = { + .unlocked_ioctl = ion_ioctl, + .compat_ioctl = ion_ioctl +}; + +static struct ion_device ion_dev = { + .heaps = PLIST_HEAD_INIT(ion_dev.heaps), + .heap_rwsem = __RWSEM_INITIALIZER(ion_dev.heap_rwsem), + .dev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "ion", + .fops = &ion_fops } - up_write(&dev->lock); - return ret_val; -} -EXPORT_SYMBOL(ion_walk_heaps); +}; -bool ion_buffer_cached(struct ion_buffer *buffer) +static void ion_buffer_free_work(struct work_struct *work) { - return !!(buffer->flags & ION_FLAG_CACHED); -} + struct ion_buffer *buffer = container_of(work, typeof(*buffer), free); + struct ion_dma_buf_attachment *a, *next; + struct ion_heap *heap = buffer->heap; -/* this function should only be called while dev->lock is held */ -static void ion_buffer_add(struct ion_device *dev, - struct ion_buffer *buffer) -{ - struct rb_node **p = &dev->buffers.rb_node; - struct rb_node *parent = NULL; - struct ion_buffer *entry; - - while (*p) { - parent = *p; - entry = rb_entry(parent, struct ion_buffer, node); - - if (buffer < entry) { - p = &(*p)->rb_left; - } else if (buffer > entry) { - p = &(*p)->rb_right; - } else { - pr_err("%s: buffer already found.", __func__); - BUG(); - } + msm_dma_buf_freed(&buffer->iommu_data); + for (a = buffer->attachments; a; a = next) { + next = a->next; + sg_free_table(&a->table); + kfree(a); } - - rb_link_node(&buffer->node, parent, p); - rb_insert_color(&buffer->node, &dev->buffers); + if (buffer->kmap_refcount) + heap->ops->unmap_kernel(heap, buffer); + heap->ops->free(buffer); + kfree(buffer); } -/* this function should only be called while dev->lock is held */ -static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, - struct ion_device *dev, - unsigned long len, - unsigned long flags) +static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, size_t len, + unsigned int flags) { struct ion_buffer *buffer; - struct sg_table *table; int ret; - buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); + buffer = kmalloc(sizeof(*buffer), GFP_KERNEL); if (!buffer) return ERR_PTR(-ENOMEM); - INIT_LIST_HEAD(&buffer->iommu_data.map_list); - mutex_init(&buffer->iommu_data.lock); - buffer->heap = heap; - buffer->flags = flags; + *buffer = (typeof(*buffer)){ + .flags = flags, + .heap = heap, + .size = len, + .kmap_lock = __MUTEX_INITIALIZER(buffer->kmap_lock), + .free = __WORK_INITIALIZER(buffer->free, ion_buffer_free_work), + .map_freelist = LIST_HEAD_INIT(buffer->map_freelist), + .freelist_lock = __SPIN_LOCK_INITIALIZER(buffer->freelist_lock), + .iommu_data = { + .map_list = LIST_HEAD_INIT(buffer->iommu_data.map_list), + .lock = __MUTEX_INITIALIZER(buffer->iommu_data.lock) + } + }; ret = heap->ops->allocate(heap, buffer, len, flags); - if (ret) { - if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE)) - goto err2; + if (ret == -EINTR || !(heap->flags & ION_HEAP_FLAG_DEFER_FREE)) + goto free_buffer; - if (ret == -EINTR) - goto err2; - - ion_heap_freelist_drain(heap, 0); - ret = heap->ops->allocate(heap, buffer, len, flags); - if (ret) - goto err2; + drain_workqueue(heap->wq); + if (heap->ops->allocate(heap, buffer, len, flags)) + goto free_buffer; } - if (buffer->sg_table == NULL) { - WARN_ONCE(1, "This heap needs to set the sgtable"); - ret = -EINVAL; - goto err1; - } - - table = buffer->sg_table; - buffer->dev = dev; - buffer->size = len; - - buffer->dev = dev; - buffer->size = len; - INIT_LIST_HEAD(&buffer->attachments); - INIT_LIST_HEAD(&buffer->vmas); - mutex_init(&buffer->lock); - - if (IS_ENABLED(CONFIG_ION_FORCE_DMA_SYNC)) { - int i; - struct scatterlist *sg; - - /* - * this will set up dma addresses for the sglist -- it is not - * technically correct as per the dma api -- a specific - * device isn't really taking ownership here. However, in - * practice on our systems the only dma_address space is - * physical addresses. - */ - for_each_sg(table->sgl, sg, table->nents, i) { - sg_dma_address(sg) = sg_phys(sg); - sg_dma_len(sg) = sg->length; - } - } - - mutex_lock(&dev->buffer_lock); - ion_buffer_add(dev, buffer); - mutex_unlock(&dev->buffer_lock); - atomic_long_add(len, &heap->total_allocated); - atomic_long_add(len, &total_heap_bytes); return buffer; -err1: - heap->ops->free(buffer); -err2: +free_buffer: kfree(buffer); return ERR_PTR(ret); } -void ion_buffer_destroy(struct ion_buffer *buffer) -{ - if (buffer->kmap_cnt > 0) { - pr_warn_ratelimited("ION client likely missing a call to dma_buf_kunmap or dma_buf_vunmap\n"); - buffer->heap->ops->unmap_kernel(buffer->heap, buffer); - } - buffer->heap->ops->free(buffer); - kfree(buffer); -} - -static void _ion_buffer_destroy(struct ion_buffer *buffer) -{ - struct ion_heap *heap = buffer->heap; - struct ion_device *dev = buffer->dev; - - msm_dma_buf_freed(&buffer->iommu_data); - - mutex_lock(&dev->buffer_lock); - rb_erase(&buffer->node, &dev->buffers); - mutex_unlock(&dev->buffer_lock); - atomic_long_sub(buffer->size, &total_heap_bytes); - - atomic_long_sub(buffer->size, &buffer->heap->total_allocated); - if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) - ion_heap_freelist_add(heap, buffer); - else - ion_buffer_destroy(buffer); -} - -static void *ion_buffer_kmap_get(struct ion_buffer *buffer) -{ - void *vaddr; - - if (buffer->kmap_cnt) { - buffer->kmap_cnt++; - return buffer->vaddr; - } - vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer); - if (WARN_ONCE(vaddr == NULL, - "heap->ops->map_kernel should return ERR_PTR on error")) - return ERR_PTR(-EINVAL); - if (IS_ERR(vaddr)) - return vaddr; - buffer->vaddr = vaddr; - buffer->kmap_cnt++; - return vaddr; -} - -static void ion_buffer_kmap_put(struct ion_buffer *buffer) -{ - if (buffer->kmap_cnt == 0) { - pr_warn_ratelimited("ION client likely missing a call to dma_buf_kmap or dma_buf_vmap, pid:%d\n", - current->pid); - return; - } - - buffer->kmap_cnt--; - if (!buffer->kmap_cnt) { - buffer->heap->ops->unmap_kernel(buffer->heap, buffer); - buffer->vaddr = NULL; - } -} - -static struct sg_table *dup_sg_table(struct sg_table *table) -{ - struct sg_table *new_table; - int ret, i; - struct scatterlist *sg, *new_sg; - - new_table = kzalloc(sizeof(*new_table), GFP_KERNEL); - if (!new_table) - return ERR_PTR(-ENOMEM); - - ret = sg_alloc_table(new_table, table->nents, GFP_KERNEL); - if (ret) { - kfree(new_table); - return ERR_PTR(-ENOMEM); - } - - new_sg = new_table->sgl; - for_each_sg(table->sgl, sg, table->nents, i) { - memcpy(new_sg, sg, sizeof(*sg)); - sg_dma_address(new_sg) = 0; - sg_dma_len(new_sg) = 0; - new_sg = sg_next(new_sg); - } - - return new_table; -} - -static void free_duped_table(struct sg_table *table) -{ - sg_free_table(table); - kfree(table); -} - -struct ion_dma_buf_attachment { - struct device *dev; - struct sg_table *table; - struct list_head list; - bool dma_mapped; -}; - -static int ion_dma_buf_attach(struct dma_buf *dmabuf, struct device *dev, - struct dma_buf_attachment *attachment) -{ - struct ion_dma_buf_attachment *a; - struct sg_table *table; - struct ion_buffer *buffer = container_of(dmabuf->priv, typeof(*buffer), - iommu_data); - - a = kzalloc(sizeof(*a), GFP_KERNEL); - if (!a) - return -ENOMEM; - - table = dup_sg_table(buffer->sg_table); - if (IS_ERR(table)) { - kfree(a); - return -ENOMEM; - } - - a->table = table; - a->dev = dev; - a->dma_mapped = false; - INIT_LIST_HEAD(&a->list); - - attachment->priv = a; - - mutex_lock(&buffer->lock); - list_add(&a->list, &buffer->attachments); - mutex_unlock(&buffer->lock); - - return 0; -} - -static void ion_dma_buf_detatch(struct dma_buf *dmabuf, - struct dma_buf_attachment *attachment) -{ - struct ion_dma_buf_attachment *a = attachment->priv; - struct ion_buffer *buffer = container_of(dmabuf->priv, typeof(*buffer), - iommu_data); - - mutex_lock(&buffer->lock); - list_del(&a->list); - mutex_unlock(&buffer->lock); - free_duped_table(a->table); - - kfree(a); -} - - static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment, - enum dma_data_direction direction) + enum dma_data_direction dir) { + struct dma_buf *dmabuf = attachment->dmabuf; + struct ion_buffer *buffer = container_of(dmabuf->priv, typeof(*buffer), + iommu_data); struct ion_dma_buf_attachment *a = attachment->priv; - struct sg_table *table; - int count, map_attrs; - struct ion_buffer *buffer = container_of(attachment->dmabuf->priv, - typeof(*buffer), iommu_data); + int count, map_attrs = attachment->dma_map_attrs; - table = a->table; - - map_attrs = attachment->dma_map_attrs; if (!(buffer->flags & ION_FLAG_CACHED) || !hlos_accessible_buffer(buffer)) map_attrs |= DMA_ATTR_SKIP_CPU_SYNC; - mutex_lock(&buffer->lock); - if (map_attrs & DMA_ATTR_SKIP_CPU_SYNC) - trace_ion_dma_map_cmo_skip(attachment->dev, - attachment->dmabuf->buf_name, - ion_buffer_cached(buffer), - hlos_accessible_buffer(buffer), - attachment->dma_map_attrs, - direction); + down_write(&a->map_rwsem); + if (map_attrs & DMA_ATTR_DELAYED_UNMAP) + count = msm_dma_map_sg_attrs(attachment->dev, a->table.sgl, + a->table.nents, dir, dmabuf, + map_attrs); else - trace_ion_dma_map_cmo_apply(attachment->dev, - attachment->dmabuf->buf_name, - ion_buffer_cached(buffer), - hlos_accessible_buffer(buffer), - attachment->dma_map_attrs, - direction); + count = dma_map_sg_attrs(attachment->dev, a->table.sgl, + a->table.nents, dir, map_attrs); + if (count) + a->dma_mapped = true; + up_write(&a->map_rwsem); - if (map_attrs & DMA_ATTR_DELAYED_UNMAP) { - count = msm_dma_map_sg_attrs(attachment->dev, table->sgl, - table->nents, direction, - attachment->dmabuf, map_attrs); - } else { - count = dma_map_sg_attrs(attachment->dev, table->sgl, - table->nents, direction, - map_attrs); - } - - if (count <= 0) { - mutex_unlock(&buffer->lock); - return ERR_PTR(-ENOMEM); - } - - a->dma_mapped = true; - mutex_unlock(&buffer->lock); - return table; + return count ? &a->table : ERR_PTR(-ENOMEM); } static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment, struct sg_table *table, - enum dma_data_direction direction) + enum dma_data_direction dir) { - int map_attrs; - struct ion_buffer *buffer = container_of(attachment->dmabuf->priv, - typeof(*buffer), iommu_data); + struct dma_buf *dmabuf = attachment->dmabuf; + struct ion_buffer *buffer = container_of(dmabuf->priv, typeof(*buffer), + iommu_data); struct ion_dma_buf_attachment *a = attachment->priv; + int map_attrs = attachment->dma_map_attrs; - map_attrs = attachment->dma_map_attrs; if (!(buffer->flags & ION_FLAG_CACHED) || !hlos_accessible_buffer(buffer)) map_attrs |= DMA_ATTR_SKIP_CPU_SYNC; - mutex_lock(&buffer->lock); - if (map_attrs & DMA_ATTR_SKIP_CPU_SYNC) - trace_ion_dma_unmap_cmo_skip(attachment->dev, - attachment->dmabuf->buf_name, - ion_buffer_cached(buffer), - hlos_accessible_buffer(buffer), - attachment->dma_map_attrs, - direction); - else - trace_ion_dma_unmap_cmo_apply(attachment->dev, - attachment->dmabuf->buf_name, - ion_buffer_cached(buffer), - hlos_accessible_buffer(buffer), - attachment->dma_map_attrs, - direction); - + down_write(&a->map_rwsem); if (map_attrs & DMA_ATTR_DELAYED_UNMAP) msm_dma_unmap_sg_attrs(attachment->dev, table->sgl, - table->nents, direction, - attachment->dmabuf, - map_attrs); + table->nents, dir, dmabuf, map_attrs); else dma_unmap_sg_attrs(attachment->dev, table->sgl, table->nents, - direction, map_attrs); + dir, map_attrs); a->dma_mapped = false; - mutex_unlock(&buffer->lock); + up_write(&a->map_rwsem); } -void ion_pages_sync_for_device(struct device *dev, struct page *page, - size_t size, enum dma_data_direction dir) -{ - struct scatterlist sg; - - sg_init_table(&sg, 1); - sg_set_page(&sg, page, size, 0); - /* - * This is not correct - sg_dma_address needs a dma_addr_t that is valid - * for the targeted device, but this works on the currently targeted - * hardware. - */ - sg_dma_address(&sg) = page_to_phys(page); - dma_sync_sg_for_device(dev, &sg, 1, dir); -} - -static void ion_vm_open(struct vm_area_struct *vma) -{ - struct ion_buffer *buffer = vma->vm_private_data; - struct ion_vma_list *vma_list; - - vma_list = kmalloc(sizeof(*vma_list), GFP_KERNEL); - if (!vma_list) - return; - vma_list->vma = vma; - mutex_lock(&buffer->lock); - list_add(&vma_list->list, &buffer->vmas); - mutex_unlock(&buffer->lock); -} - -static void ion_vm_close(struct vm_area_struct *vma) -{ - struct ion_buffer *buffer = vma->vm_private_data; - struct ion_vma_list *vma_list, *tmp; - - mutex_lock(&buffer->lock); - list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) { - if (vma_list->vma != vma) - continue; - list_del(&vma_list->list); - kfree(vma_list); - break; - } - mutex_unlock(&buffer->lock); -} - -static const struct vm_operations_struct ion_vma_ops = { - .open = ion_vm_open, - .close = ion_vm_close, -}; - static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) { struct ion_buffer *buffer = container_of(dmabuf->priv, typeof(*buffer), iommu_data); - int ret = 0; + struct ion_heap *heap = buffer->heap; - if (!buffer->heap->ops->map_user) { - pr_err("%s: this heap does not define a method for mapping to userspace\n", - __func__); + if (!buffer->heap->ops->map_user) return -EINVAL; - } if (!(buffer->flags & ION_FLAG_CACHED)) vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); - vma->vm_private_data = buffer; - vma->vm_ops = &ion_vma_ops; - ion_vm_open(vma); - - mutex_lock(&buffer->lock); - /* now map it to userspace */ - ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma); - mutex_unlock(&buffer->lock); - - if (ret) - pr_err("%s: failure mapping buffer to userspace\n", - __func__); - - return ret; + return heap->ops->map_user(heap, buffer, vma); } static void ion_dma_buf_release(struct dma_buf *dmabuf) { struct ion_buffer *buffer = container_of(dmabuf->priv, typeof(*buffer), iommu_data); + struct ion_heap *heap = buffer->heap; - _ion_buffer_destroy(buffer); - kfree(dmabuf->exp_name); + if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) + queue_work(heap->wq, &buffer->free); + else + ion_buffer_free_work(&buffer->free); } static void *ion_dma_buf_vmap(struct dma_buf *dmabuf) { struct ion_buffer *buffer = container_of(dmabuf->priv, typeof(*buffer), iommu_data); - void *vaddr = ERR_PTR(-EINVAL); + struct ion_heap *heap = buffer->heap; + void *vaddr; - if (buffer->heap->ops->map_kernel) { - mutex_lock(&buffer->lock); - vaddr = ion_buffer_kmap_get(buffer); - mutex_unlock(&buffer->lock); + if (!heap->ops->map_kernel) + return ERR_PTR(-ENODEV); + + mutex_lock(&buffer->kmap_lock); + if (buffer->kmap_refcount) { + vaddr = buffer->vaddr; + buffer->kmap_refcount++; } else { - pr_warn_ratelimited("heap %s doesn't support map_kernel\n", - buffer->heap->name); + vaddr = heap->ops->map_kernel(heap, buffer); + if (IS_ERR_OR_NULL(vaddr)) { + vaddr = ERR_PTR(-EINVAL); + } else { + buffer->vaddr = vaddr; + buffer->kmap_refcount++; + } } + mutex_unlock(&buffer->kmap_lock); return vaddr; } @@ -543,22 +207,19 @@ static void ion_dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr) { struct ion_buffer *buffer = container_of(dmabuf->priv, typeof(*buffer), iommu_data); + struct ion_heap *heap = buffer->heap; - if (buffer->heap->ops->map_kernel) { - mutex_lock(&buffer->lock); - ion_buffer_kmap_put(buffer); - mutex_unlock(&buffer->lock); - } + mutex_lock(&buffer->kmap_lock); + if (!--buffer->kmap_refcount) + heap->ops->unmap_kernel(heap, buffer); + mutex_unlock(&buffer->kmap_lock); } static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset) { - /* - * TODO: Once clients remove their hacks where they assume kmap(ed) - * addresses are virtually contiguous implement this properly - */ - void *vaddr = ion_dma_buf_vmap(dmabuf); + void *vaddr; + vaddr = ion_dma_buf_vmap(dmabuf); if (IS_ERR(vaddr)) return vaddr; @@ -568,305 +229,163 @@ static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset) static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset, void *ptr) { - /* - * TODO: Once clients remove their hacks where they assume kmap(ed) - * addresses are virtually contiguous implement this properly - */ - ion_dma_buf_vunmap(dmabuf, ptr); + ion_dma_buf_vunmap(dmabuf, NULL); } -static int ion_sgl_sync_range(struct device *dev, struct scatterlist *sgl, - unsigned int nents, unsigned long offset, - unsigned long length, - enum dma_data_direction dir, bool for_cpu) +static int ion_dup_sg_table(struct sg_table *dst, struct sg_table *src) { - int i; - struct scatterlist *sg; - unsigned int len = 0; - dma_addr_t sg_dma_addr; + unsigned int nents = src->nents; + struct scatterlist *d, *s; - for_each_sg(sgl, sg, nents, i) { - if (sg_dma_len(sg) == 0) - break; + if (sg_alloc_table(dst, nents, GFP_KERNEL)) + return -ENOMEM; - if (i > 0) { - pr_warn_ratelimited( - "Partial cmo only supported with 1 segment\n" - "is dma_set_max_seg_size being set on dev:%s\n", - dev_name(dev)); - return -EINVAL; + for (d = dst->sgl, s = src->sgl; + nents > SG_MAX_SINGLE_ALLOC; nents -= SG_MAX_SINGLE_ALLOC - 1, + d = sg_chain_ptr(&d[SG_MAX_SINGLE_ALLOC - 1]), + s = sg_chain_ptr(&s[SG_MAX_SINGLE_ALLOC - 1])) + memcpy(d, s, (SG_MAX_SINGLE_ALLOC - 1) * sizeof(*d)); + + if (nents) + memcpy(d, s, nents * sizeof(*d)); + + return 0; +} + +static int ion_dma_buf_attach(struct dma_buf *dmabuf, struct device *dev, + struct dma_buf_attachment *attachment) +{ + struct ion_buffer *buffer = container_of(dmabuf->priv, typeof(*buffer), + iommu_data); + struct ion_dma_buf_attachment *a; + + spin_lock(&buffer->freelist_lock); + list_for_each_entry(a, &buffer->map_freelist, list) { + if (a->dev == dev) { + list_del(&a->list); + spin_unlock(&buffer->freelist_lock); + attachment->priv = a; + return 0; + } + } + spin_unlock(&buffer->freelist_lock); + + a = kmalloc(sizeof(*a), GFP_KERNEL); + if (!a) + return -ENOMEM; + + if (ion_dup_sg_table(&a->table, buffer->sg_table)) { + kfree(a); + return -ENOMEM; + } + + a->dev = dev; + a->dma_mapped = false; + a->map_rwsem = (struct rw_semaphore)__RWSEM_INITIALIZER(a->map_rwsem); + attachment->priv = a; + a->next = buffer->attachments; + buffer->attachments = a; + + return 0; +} + +static void ion_dma_buf_detach(struct dma_buf *dmabuf, + struct dma_buf_attachment *attachment) +{ + struct ion_buffer *buffer = container_of(dmabuf->priv, typeof(*buffer), + iommu_data); + struct ion_dma_buf_attachment *a = attachment->priv; + + spin_lock(&buffer->freelist_lock); + list_add(&a->list, &buffer->map_freelist); + spin_unlock(&buffer->freelist_lock); +} + +static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, + enum dma_data_direction dir) +{ + struct ion_buffer *buffer = container_of(dmabuf->priv, typeof(*buffer), + iommu_data); + struct ion_dma_buf_attachment *a; + + if (!hlos_accessible_buffer(buffer)) + return -EPERM; + + if (!(buffer->flags & ION_FLAG_CACHED)) + return 0; + + for (a = buffer->attachments; a; a = a->next) { + if (down_read_trylock(&a->map_rwsem)) { + if (a->dma_mapped) + dma_sync_sg_for_cpu(a->dev, a->table.sgl, + a->table.nents, dir); + up_read(&a->map_rwsem); } } + return 0; +} + +static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, + enum dma_data_direction dir) +{ + struct ion_buffer *buffer = container_of(dmabuf->priv, typeof(*buffer), + iommu_data); + struct ion_dma_buf_attachment *a; + + if (!hlos_accessible_buffer(buffer)) + return -EPERM; + + if (!(buffer->flags & ION_FLAG_CACHED)) + return 0; + + for (a = buffer->attachments; a; a = a->next) { + if (down_read_trylock(&a->map_rwsem)) { + if (a->dma_mapped) + dma_sync_sg_for_device(a->dev, a->table.sgl, + a->table.nents, dir); + up_read(&a->map_rwsem); + } + } + + return 0; +} + +static void ion_sgl_sync_range(struct device *dev, struct scatterlist *sgl, + unsigned int nents, unsigned long offset, + unsigned long len, enum dma_data_direction dir, + bool for_cpu) +{ + dma_addr_t sg_dma_addr = sg_dma_address(sgl); + unsigned long total = 0; + struct scatterlist *sg; + int i; for_each_sg(sgl, sg, nents, i) { - unsigned int sg_offset, sg_left, size = 0; + unsigned long sg_offset, sg_left, size; - if (i == 0) - sg_dma_addr = sg_dma_address(sg); - - len += sg->length; - if (len <= offset) { + total += sg->length; + if (total <= offset) { sg_dma_addr += sg->length; continue; } - sg_left = len - offset; + sg_left = total - offset; sg_offset = sg->length - sg_left; - - size = (length < sg_left) ? length : sg_left; + size = min(len, sg_left); if (for_cpu) dma_sync_single_range_for_cpu(dev, sg_dma_addr, sg_offset, size, dir); else dma_sync_single_range_for_device(dev, sg_dma_addr, sg_offset, size, dir); + len -= size; + if (!len) + break; offset += size; - length -= size; sg_dma_addr += sg->length; - - if (length == 0) - break; } - - return 0; -} - -static int ion_sgl_sync_mapped(struct device *dev, struct scatterlist *sgl, - unsigned int nents, struct list_head *vmas, - enum dma_data_direction dir, bool for_cpu) -{ - struct ion_vma_list *vma_list; - int ret = 0; - - list_for_each_entry(vma_list, vmas, list) { - struct vm_area_struct *vma = vma_list->vma; - - ret = ion_sgl_sync_range(dev, sgl, nents, - vma->vm_pgoff * PAGE_SIZE, - vma->vm_end - vma->vm_start, dir, - for_cpu); - if (ret) - break; - } - - return ret; -} - -static int __ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, - enum dma_data_direction direction, - bool sync_only_mapped) -{ - struct ion_buffer *buffer = container_of(dmabuf->priv, typeof(*buffer), - iommu_data); - struct ion_dma_buf_attachment *a; - int ret = 0; - - if (!hlos_accessible_buffer(buffer)) { - trace_ion_begin_cpu_access_cmo_skip(NULL, dmabuf->buf_name, - ion_buffer_cached(buffer), - false, direction, - sync_only_mapped); - ret = -EPERM; - goto out; - } - - if (!(buffer->flags & ION_FLAG_CACHED)) { - trace_ion_begin_cpu_access_cmo_skip(NULL, dmabuf->buf_name, - false, true, direction, - sync_only_mapped); - goto out; - } - - mutex_lock(&buffer->lock); - - if (IS_ENABLED(CONFIG_ION_FORCE_DMA_SYNC)) { - struct device *dev = buffer->heap->priv; - struct sg_table *table = buffer->sg_table; - - if (sync_only_mapped) - ret = ion_sgl_sync_mapped(dev, table->sgl, - table->nents, &buffer->vmas, - direction, true); - else - dma_sync_sg_for_cpu(dev, table->sgl, - table->nents, direction); - - if (!ret) - trace_ion_begin_cpu_access_cmo_apply(dev, - dmabuf->buf_name, - true, true, - direction, - sync_only_mapped); - else - trace_ion_begin_cpu_access_cmo_skip(dev, - dmabuf->buf_name, - true, true, - direction, - sync_only_mapped); - mutex_unlock(&buffer->lock); - goto out; - } - - list_for_each_entry(a, &buffer->attachments, list) { - int tmp = 0; - - if (!a->dma_mapped) { - trace_ion_begin_cpu_access_notmapped(a->dev, - dmabuf->buf_name, - true, true, - direction, - sync_only_mapped); - continue; - } - - if (sync_only_mapped) - tmp = ion_sgl_sync_mapped(a->dev, a->table->sgl, - a->table->nents, - &buffer->vmas, - direction, true); - else - dma_sync_sg_for_cpu(a->dev, a->table->sgl, - a->table->nents, direction); - - if (!tmp) { - trace_ion_begin_cpu_access_cmo_apply(a->dev, - dmabuf->buf_name, - true, true, - direction, - sync_only_mapped); - } else { - trace_ion_begin_cpu_access_cmo_skip(a->dev, - dmabuf->buf_name, - true, true, - direction, - sync_only_mapped); - ret = tmp; - } - - } - mutex_unlock(&buffer->lock); - -out: - return ret; -} - -static int __ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, - enum dma_data_direction direction, - bool sync_only_mapped) -{ - struct ion_buffer *buffer = container_of(dmabuf->priv, typeof(*buffer), - iommu_data); - struct ion_dma_buf_attachment *a; - int ret = 0; - - if (!hlos_accessible_buffer(buffer)) { - trace_ion_end_cpu_access_cmo_skip(NULL, dmabuf->buf_name, - ion_buffer_cached(buffer), - false, direction, - sync_only_mapped); - ret = -EPERM; - goto out; - } - - if (!(buffer->flags & ION_FLAG_CACHED)) { - trace_ion_end_cpu_access_cmo_skip(NULL, dmabuf->buf_name, false, - true, direction, - sync_only_mapped); - goto out; - } - - mutex_lock(&buffer->lock); - if (IS_ENABLED(CONFIG_ION_FORCE_DMA_SYNC)) { - struct device *dev = buffer->heap->priv; - struct sg_table *table = buffer->sg_table; - - if (sync_only_mapped) - ret = ion_sgl_sync_mapped(dev, table->sgl, - table->nents, &buffer->vmas, - direction, false); - else - dma_sync_sg_for_device(dev, table->sgl, - table->nents, direction); - - if (!ret) - trace_ion_end_cpu_access_cmo_apply(dev, - dmabuf->buf_name, - true, true, - direction, - sync_only_mapped); - else - trace_ion_end_cpu_access_cmo_skip(dev, dmabuf->buf_name, - true, true, direction, - sync_only_mapped); - mutex_unlock(&buffer->lock); - goto out; - } - - list_for_each_entry(a, &buffer->attachments, list) { - int tmp = 0; - - if (!a->dma_mapped) { - trace_ion_end_cpu_access_notmapped(a->dev, - dmabuf->buf_name, - true, true, - direction, - sync_only_mapped); - continue; - } - - if (sync_only_mapped) - tmp = ion_sgl_sync_mapped(a->dev, a->table->sgl, - a->table->nents, - &buffer->vmas, direction, - false); - else - dma_sync_sg_for_device(a->dev, a->table->sgl, - a->table->nents, direction); - - if (!tmp) { - trace_ion_end_cpu_access_cmo_apply(a->dev, - dmabuf->buf_name, - true, true, - direction, - sync_only_mapped); - } else { - trace_ion_end_cpu_access_cmo_skip(a->dev, - dmabuf->buf_name, - true, true, direction, - sync_only_mapped); - ret = tmp; - } - } - mutex_unlock(&buffer->lock); - -out: - return ret; -} - -static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, - enum dma_data_direction direction) -{ - return __ion_dma_buf_begin_cpu_access(dmabuf, direction, false); -} - -static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, - enum dma_data_direction direction) -{ - return __ion_dma_buf_end_cpu_access(dmabuf, direction, false); -} - -static int ion_dma_buf_begin_cpu_access_umapped(struct dma_buf *dmabuf, - enum dma_data_direction dir) -{ - return __ion_dma_buf_begin_cpu_access(dmabuf, dir, true); -} - -static int ion_dma_buf_end_cpu_access_umapped(struct dma_buf *dmabuf, - enum dma_data_direction dir) -{ - return __ion_dma_buf_end_cpu_access(dmabuf, dir, true); } static int ion_dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf, @@ -879,81 +398,32 @@ static int ion_dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf, struct ion_dma_buf_attachment *a; int ret = 0; - if (!hlos_accessible_buffer(buffer)) { - trace_ion_begin_cpu_access_cmo_skip(NULL, dmabuf->buf_name, - ion_buffer_cached(buffer), - false, dir, - false); - ret = -EPERM; - goto out; - } + if (!hlos_accessible_buffer(buffer)) + return -EPERM; - if (!(buffer->flags & ION_FLAG_CACHED)) { - trace_ion_begin_cpu_access_cmo_skip(NULL, dmabuf->buf_name, - false, true, dir, - false); - goto out; - } + if (!(buffer->flags & ION_FLAG_CACHED)) + return 0; - mutex_lock(&buffer->lock); - if (IS_ENABLED(CONFIG_ION_FORCE_DMA_SYNC)) { - struct device *dev = buffer->heap->priv; - struct sg_table *table = buffer->sg_table; - - ret = ion_sgl_sync_range(dev, table->sgl, table->nents, - offset, len, dir, true); - - if (!ret) - trace_ion_begin_cpu_access_cmo_apply(dev, - dmabuf->buf_name, - true, true, dir, - false); - else - trace_ion_begin_cpu_access_cmo_skip(dev, - dmabuf->buf_name, - true, true, dir, - false); - mutex_unlock(&buffer->lock); - goto out; - } - - list_for_each_entry(a, &buffer->attachments, list) { - int tmp = 0; - - if (!a->dma_mapped) { - trace_ion_begin_cpu_access_notmapped(a->dev, - dmabuf->buf_name, - true, true, - dir, - false); + for (a = buffer->attachments; a; a = a->next) { + if (a->table.nents > 1 && sg_next(a->table.sgl)->dma_length) { + ret = -EINVAL; continue; } - tmp = ion_sgl_sync_range(a->dev, a->table->sgl, a->table->nents, - offset, len, dir, true); - - if (!tmp) { - trace_ion_begin_cpu_access_cmo_apply(a->dev, - dmabuf->buf_name, - true, true, dir, - false); - } else { - trace_ion_begin_cpu_access_cmo_skip(a->dev, - dmabuf->buf_name, - true, true, dir, - false); - ret = tmp; + if (down_read_trylock(&a->map_rwsem)) { + if (a->dma_mapped) + ion_sgl_sync_range(a->dev, a->table.sgl, + a->table.nents, offset, len, + dir, true); + up_read(&a->map_rwsem); } - } - mutex_unlock(&buffer->lock); -out: return ret; } static int ion_dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf, - enum dma_data_direction direction, + enum dma_data_direction dir, unsigned int offset, unsigned int len) { @@ -962,100 +432,48 @@ static int ion_dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf, struct ion_dma_buf_attachment *a; int ret = 0; - if (!hlos_accessible_buffer(buffer)) { - trace_ion_end_cpu_access_cmo_skip(NULL, dmabuf->buf_name, - ion_buffer_cached(buffer), - false, direction, - false); - ret = -EPERM; - goto out; - } + if (!hlos_accessible_buffer(buffer)) + return -EPERM; - if (!(buffer->flags & ION_FLAG_CACHED)) { - trace_ion_end_cpu_access_cmo_skip(NULL, dmabuf->buf_name, false, - true, direction, - false); - goto out; - } + if (!(buffer->flags & ION_FLAG_CACHED)) + return 0; - mutex_lock(&buffer->lock); - if (IS_ENABLED(CONFIG_ION_FORCE_DMA_SYNC)) { - struct device *dev = buffer->heap->priv; - struct sg_table *table = buffer->sg_table; - - ret = ion_sgl_sync_range(dev, table->sgl, table->nents, - offset, len, direction, false); - - if (!ret) - trace_ion_end_cpu_access_cmo_apply(dev, - dmabuf->buf_name, - true, true, - direction, false); - else - trace_ion_end_cpu_access_cmo_skip(dev, dmabuf->buf_name, - true, true, - direction, false); - - mutex_unlock(&buffer->lock); - goto out; - } - - list_for_each_entry(a, &buffer->attachments, list) { - int tmp = 0; - - if (!a->dma_mapped) { - trace_ion_end_cpu_access_notmapped(a->dev, - dmabuf->buf_name, - true, true, - direction, - false); + for (a = buffer->attachments; a; a = a->next) { + if (a->table.nents > 1 && sg_next(a->table.sgl)->dma_length) { + ret = -EINVAL; continue; } - tmp = ion_sgl_sync_range(a->dev, a->table->sgl, a->table->nents, - offset, len, direction, false); - - if (!tmp) { - trace_ion_end_cpu_access_cmo_apply(a->dev, - dmabuf->buf_name, - true, true, - direction, false); - - } else { - trace_ion_end_cpu_access_cmo_skip(a->dev, - dmabuf->buf_name, - true, true, direction, - false); - ret = tmp; + if (down_read_trylock(&a->map_rwsem)) { + if (a->dma_mapped) + ion_sgl_sync_range(a->dev, a->table.sgl, + a->table.nents, offset, len, + dir, false); + up_read(&a->map_rwsem); } } - mutex_unlock(&buffer->lock); -out: return ret; } -static int ion_dma_buf_get_flags(struct dma_buf *dmabuf, - unsigned long *flags) +static int ion_dma_buf_get_flags(struct dma_buf *dmabuf, unsigned long *flags) { struct ion_buffer *buffer = container_of(dmabuf->priv, typeof(*buffer), iommu_data); - *flags = buffer->flags; + *flags = buffer->flags; return 0; } -static const struct dma_buf_ops dma_buf_ops = { +static const struct dma_buf_ops ion_dma_buf_ops = { .map_dma_buf = ion_map_dma_buf, .unmap_dma_buf = ion_unmap_dma_buf, .mmap = ion_mmap, .release = ion_dma_buf_release, .attach = ion_dma_buf_attach, - .detach = ion_dma_buf_detatch, + .detach = ion_dma_buf_detach, .begin_cpu_access = ion_dma_buf_begin_cpu_access, .end_cpu_access = ion_dma_buf_end_cpu_access, - .begin_cpu_access_umapped = ion_dma_buf_begin_cpu_access_umapped, - .end_cpu_access_umapped = ion_dma_buf_end_cpu_access_umapped, .begin_cpu_access_partial = ion_dma_buf_begin_cpu_access_partial, .end_cpu_access_partial = ion_dma_buf_end_cpu_access_partial, .map_atomic = ion_dma_buf_kmap, @@ -1064,42 +482,31 @@ static const struct dma_buf_ops dma_buf_ops = { .unmap = ion_dma_buf_kunmap, .vmap = ion_dma_buf_vmap, .vunmap = ion_dma_buf_vunmap, - .get_flags = ion_dma_buf_get_flags, + .get_flags = ion_dma_buf_get_flags }; struct dma_buf *ion_alloc_dmabuf(size_t len, unsigned int heap_id_mask, unsigned int flags) { - struct ion_device *dev = internal_dev; + struct ion_device *idev = &ion_dev; + struct dma_buf_export_info exp_info; struct ion_buffer *buffer = NULL; - struct ion_heap *heap; - DEFINE_DMA_BUF_EXPORT_INFO(exp_info); struct dma_buf *dmabuf; - char task_comm[TASK_COMM_LEN]; + struct ion_heap *heap; - pr_debug("%s: len %zu heap_id_mask %u flags %x\n", __func__, - len, heap_id_mask, flags); - /* - * traverse the list of heaps available in this system in priority - * order. If the heap type is supported by the client, and matches the - * request of the caller allocate from it. Repeat until allocate has - * succeeded or all heaps have been tried - */ len = PAGE_ALIGN(len); - if (!len) return ERR_PTR(-EINVAL); - down_read(&dev->lock); - plist_for_each_entry(heap, &dev->heaps, node) { - /* if the caller didn't specify this heap id */ - if (!((1 << heap->id) & heap_id_mask)) - continue; - buffer = ion_buffer_create(heap, dev, len, flags); - if (!IS_ERR(buffer) || PTR_ERR(buffer) == -EINTR) - break; + down_read(&idev->heap_rwsem); + plist_for_each_entry(heap, &idev->heaps, node) { + if (BIT(heap->id) & heap_id_mask) { + buffer = ion_buffer_create(heap, len, flags); + if (!IS_ERR(buffer) || PTR_ERR(buffer) == -EINTR) + break; + } } - up_read(&dev->lock); + up_read(&idev->heap_rwsem); if (!buffer) return ERR_PTR(-ENODEV); @@ -1107,74 +514,28 @@ struct dma_buf *ion_alloc_dmabuf(size_t len, unsigned int heap_id_mask, if (IS_ERR(buffer)) return ERR_CAST(buffer); - get_task_comm(task_comm, current->group_leader); - - exp_info.ops = &dma_buf_ops; - exp_info.size = buffer->size; - exp_info.flags = O_RDWR; - exp_info.priv = &buffer->iommu_data; - exp_info.exp_name = kasprintf(GFP_KERNEL, "%s-%s-%d-%s", KBUILD_MODNAME, - heap->name, current->tgid, task_comm); + exp_info = (typeof(exp_info)){ + .ops = &ion_dma_buf_ops, + .size = buffer->size, + .flags = O_RDWR, + .priv = &buffer->iommu_data + }; dmabuf = dma_buf_export(&exp_info); - if (IS_ERR(dmabuf)) { - _ion_buffer_destroy(buffer); - kfree(exp_info.exp_name); - } + if (IS_ERR(dmabuf)) + ion_buffer_free_work(&buffer->free); return dmabuf; } -struct dma_buf *ion_alloc(size_t len, unsigned int heap_id_mask, - unsigned int flags) +static int ion_alloc_fd(struct ion_allocation_data *a) { - struct ion_device *dev = internal_dev; - struct ion_heap *heap; - bool type_valid = false; - - pr_debug("%s: len %zu heap_id_mask %u flags %x\n", __func__, - len, heap_id_mask, flags); - /* - * traverse the list of heaps available in this system in priority - * order. Check the heap type is supported. - */ - - down_read(&dev->lock); - plist_for_each_entry(heap, &dev->heaps, node) { - /* if the caller didn't specify this heap id */ - if (!((1 << heap->id) & heap_id_mask)) - continue; - if (heap->type == ION_HEAP_TYPE_SYSTEM || - heap->type == ION_HEAP_TYPE_CARVEOUT || - heap->type == (enum ion_heap_type)ION_HEAP_TYPE_HYP_CMA || - heap->type == - (enum ion_heap_type)ION_HEAP_TYPE_SYSTEM_SECURE || - heap->type == (enum ion_heap_type)ION_HEAP_TYPE_DMA) { - type_valid = true; - } else { - pr_warn("%s: heap type not supported, type:%d\n", - __func__, heap->type); - } - break; - } - up_read(&dev->lock); - - if (!type_valid) - return ERR_PTR(-EINVAL); - - return ion_alloc_dmabuf(len, heap_id_mask, flags); -} -EXPORT_SYMBOL(ion_alloc); - -int ion_alloc_fd(size_t len, unsigned int heap_id_mask, unsigned int flags) -{ - int fd; struct dma_buf *dmabuf; + int fd; - dmabuf = ion_alloc_dmabuf(len, heap_id_mask, flags); - if (IS_ERR(dmabuf)) { + dmabuf = ion_alloc_dmabuf(a->len, a->heap_id_mask, a->flags); + if (IS_ERR(dmabuf)) return PTR_ERR(dmabuf); - } fd = dma_buf_fd(dmabuf, O_CLOEXEC); if (fd < 0) @@ -1183,235 +544,94 @@ int ion_alloc_fd(size_t len, unsigned int heap_id_mask, unsigned int flags) return fd; } -int ion_query_heaps(struct ion_heap_query *query) +void ion_device_add_heap(struct ion_device *idev, struct ion_heap *heap) { - struct ion_device *dev = internal_dev; - struct ion_heap_data __user *buffer = u64_to_user_ptr(query->heaps); - int ret = -EINVAL, cnt = 0, max_cnt; + if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) { + heap->wq = alloc_workqueue("%s", WQ_UNBOUND, + WQ_UNBOUND_MAX_ACTIVE, heap->name); + BUG_ON(!heap->wq); + } + + if (heap->ops->shrink) + ion_heap_init_shrinker(heap); + + plist_node_init(&heap->node, -heap->id); + + down_write(&idev->heap_rwsem); + plist_add(&heap->node, &idev->heaps); + up_write(&idev->heap_rwsem); +} + +static int ion_walk_heaps(int heap_id, int type, void *data, + int (*f)(struct ion_heap *heap, void *data)) +{ + struct ion_device *idev = &ion_dev; struct ion_heap *heap; - struct ion_heap_data hdata; + int ret = 0; - memset(&hdata, 0, sizeof(hdata)); - - down_read(&dev->lock); - if (!buffer) { - query->cnt = dev->heap_cnt; - ret = 0; - goto out; - } - - if (query->cnt <= 0) - goto out; - - max_cnt = query->cnt; - - plist_for_each_entry(heap, &dev->heaps, node) { - strlcpy(hdata.name, heap->name, sizeof(hdata.name)); - hdata.name[sizeof(hdata.name) - 1] = '\0'; - hdata.type = heap->type; - hdata.heap_id = heap->id; - - if (copy_to_user(&buffer[cnt], &hdata, sizeof(hdata))) { - ret = -EFAULT; - goto out; - } - - cnt++; - if (cnt >= max_cnt) + down_write(&idev->heap_rwsem); + plist_for_each_entry(heap, &idev->heaps, node) { + if (heap->type == type && ION_HEAP(heap->id) == heap_id) { + ret = f(heap, data); break; + } } + up_write(&idev->heap_rwsem); - query->cnt = cnt; - ret = 0; -out: - up_read(&dev->lock); return ret; } -static const struct file_operations ion_fops = { - .owner = THIS_MODULE, - .unlocked_ioctl = ion_ioctl, -#ifdef CONFIG_COMPAT - .compat_ioctl = ion_ioctl, -#endif -}; - -static int debug_shrink_set(void *data, u64 val) +static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { - struct ion_heap *heap = data; - struct shrink_control sc; - int objs; + union { + struct ion_allocation_data allocation; + struct ion_prefetch_data prefetch_data; + } data; + int fd, *output; - sc.gfp_mask = GFP_HIGHUSER; - sc.nr_to_scan = val; + if (_IOC_SIZE(cmd) > sizeof(data)) + return -EINVAL; - if (!val) { - objs = heap->shrinker.count_objects(&heap->shrinker, &sc); - sc.nr_to_scan = objs; + if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd))) + return -EFAULT; + + switch (cmd) { + case ION_IOC_ALLOC: + fd = ion_alloc_fd(&data.allocation); + if (fd < 0) + return fd; + + output = &fd; + arg += offsetof(struct ion_allocation_data, fd); + break; + case ION_IOC_PREFETCH: + return ion_walk_heaps(data.prefetch_data.heap_id, + ION_HEAP_TYPE_SYSTEM_SECURE, + &data.prefetch_data, + ion_system_secure_heap_prefetch); + case ION_IOC_DRAIN: + return ion_walk_heaps(data.prefetch_data.heap_id, + ION_HEAP_TYPE_SYSTEM_SECURE, + &data.prefetch_data, + ion_system_secure_heap_drain); + default: + return -ENOTTY; } - heap->shrinker.scan_objects(&heap->shrinker, &sc); - return 0; -} - -static int debug_shrink_get(void *data, u64 *val) -{ - struct ion_heap *heap = data; - struct shrink_control sc; - int objs; - - sc.gfp_mask = GFP_HIGHUSER; - sc.nr_to_scan = 0; - - objs = heap->shrinker.count_objects(&heap->shrinker, &sc); - *val = objs; - return 0; -} - -DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get, - debug_shrink_set, "%llu\n"); - -void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap) -{ - struct dentry *debug_file; - - if (!heap->ops->allocate || !heap->ops->free) - pr_err("%s: can not add heap with invalid ops struct.\n", - __func__); - - spin_lock_init(&heap->free_lock); - heap->free_list_size = 0; - - if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) - ion_heap_init_deferred_free(heap); - - if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink) - ion_heap_init_shrinker(heap); - - heap->dev = dev; - down_write(&dev->lock); - /* - * use negative heap->id to reverse the priority -- when traversing - * the list later attempt higher id numbers first - */ - plist_node_init(&heap->node, -heap->id); - plist_add(&heap->node, &dev->heaps); - - if (heap->shrinker.count_objects && heap->shrinker.scan_objects) { - char debug_name[64]; - - snprintf(debug_name, 64, "%s_shrink", heap->name); - debug_file = debugfs_create_file( - debug_name, 0644, dev->debug_root, heap, - &debug_shrink_fops); - if (!debug_file) { - char buf[256], *path; - - path = dentry_path(dev->debug_root, buf, 256); - pr_err("Failed to create heap shrinker debugfs at %s/%s\n", - path, debug_name); - } - } - - dev->heap_cnt++; - up_write(&dev->lock); -} -EXPORT_SYMBOL(ion_device_add_heap); - -static ssize_t -total_heaps_kb_show(struct kobject *kobj, struct kobj_attribute *attr, - char *buf) -{ - u64 size_in_bytes = atomic_long_read(&total_heap_bytes); - - return sprintf(buf, "%llu\n", div_u64(size_in_bytes, 1024)); -} - -static ssize_t -total_pools_kb_show(struct kobject *kobj, struct kobj_attribute *attr, - char *buf) -{ - u64 size_in_bytes = ion_page_pool_nr_pages() * PAGE_SIZE; - - return sprintf(buf, "%llu\n", div_u64(size_in_bytes, 1024)); -} - -static struct kobj_attribute total_heaps_kb_attr = - __ATTR_RO(total_heaps_kb); - -static struct kobj_attribute total_pools_kb_attr = - __ATTR_RO(total_pools_kb); - -static struct attribute *ion_device_attrs[] = { - &total_heaps_kb_attr.attr, - &total_pools_kb_attr.attr, - NULL, -}; - -ATTRIBUTE_GROUPS(ion_device); - -static int ion_init_sysfs(void) -{ - struct kobject *ion_kobj; - int ret; - - ion_kobj = kobject_create_and_add("ion", kernel_kobj); - if (!ion_kobj) - return -ENOMEM; - - ret = sysfs_create_groups(ion_kobj, ion_device_groups); - if (ret) { - kobject_put(ion_kobj); - return ret; - } + if (copy_to_user((void __user *)arg, output, sizeof(*output))) + return -EFAULT; return 0; } struct ion_device *ion_device_create(void) { - struct ion_device *idev; + struct ion_device *idev = &ion_dev; int ret; - idev = kzalloc(sizeof(*idev), GFP_KERNEL); - if (!idev) - return ERR_PTR(-ENOMEM); - - idev->dev.minor = MISC_DYNAMIC_MINOR; - idev->dev.name = "ion"; - idev->dev.fops = &ion_fops; - idev->dev.parent = NULL; ret = misc_register(&idev->dev); - if (ret) { - pr_err("ion: failed to register misc device.\n"); - goto err_reg; - } + if (ret) + return ERR_PTR(ret); - ret = ion_init_sysfs(); - if (ret) { - pr_err("ion: failed to add sysfs attributes.\n"); - goto err_sysfs; - } - - idev->debug_root = debugfs_create_dir("ion", NULL); - if (!idev->debug_root) { - pr_err("ion: failed to create debugfs root directory.\n"); - goto debugfs_done; - } - -debugfs_done: - - idev->buffers = RB_ROOT; - mutex_init(&idev->buffer_lock); - init_rwsem(&idev->lock); - plist_head_init(&idev->heaps); - internal_dev = idev; return idev; - -err_sysfs: - misc_deregister(&idev->dev); -err_reg: - kfree(idev); - return ERR_PTR(ret); } -EXPORT_SYMBOL(ion_device_create); diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h index b6cc7b4710ec..a1577a6ab178 100644 --- a/drivers/staging/android/ion/ion.h +++ b/drivers/staging/android/ion/ion.h @@ -121,24 +121,21 @@ struct ion_vma_list { * @sg_table: the sg table for the buffer if dmap_cnt is not zero * @vmas: list of vma's mapping this buffer */ +struct ion_dma_buf_attachment; struct ion_buffer { - union { - struct rb_node node; - struct list_head list; - }; - struct ion_device *dev; struct ion_heap *heap; - unsigned long flags; - unsigned long private_flags; - size_t size; - void *priv_virt; - /* Protect ion buffer */ - struct mutex lock; - int kmap_cnt; - void *vaddr; struct sg_table *sg_table; - struct list_head attachments; - struct list_head vmas; + struct mutex kmap_lock; + struct work_struct free; + struct ion_dma_buf_attachment *attachments; + struct list_head map_freelist; + spinlock_t freelist_lock; + void *priv_virt; + void *vaddr; + unsigned int flags; + unsigned int private_flags; + size_t size; + int kmap_refcount; struct msm_iommu_data iommu_data; }; @@ -153,13 +150,8 @@ void ion_buffer_destroy(struct ion_buffer *buffer); */ struct ion_device { struct miscdevice dev; - struct rb_root buffers; - /* buffer_lock used for adding and removing buffers */ - struct mutex buffer_lock; - struct rw_semaphore lock; struct plist_head heaps; - struct dentry *debug_root; - int heap_cnt; + struct rw_semaphore heap_rwsem; }; /* refer to include/linux/pm.h */ @@ -240,7 +232,6 @@ struct ion_heap_ops { */ struct ion_heap { struct plist_node node; - struct ion_device *dev; enum ion_heap_type type; struct ion_heap_ops *ops; unsigned long flags; @@ -248,15 +239,7 @@ struct ion_heap { const char *name; struct shrinker shrinker; void *priv; - struct list_head free_list; - size_t free_list_size; - /* Protect the free list */ - spinlock_t free_lock; - wait_queue_head_t waitqueue; - struct task_struct *task; - atomic_long_t total_allocated; - - int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *); + struct workqueue_struct *wq; }; /** @@ -265,7 +248,10 @@ struct ion_heap { * * indicates whether this ion buffer is cached */ -bool ion_buffer_cached(struct ion_buffer *buffer); +static inline bool ion_buffer_cached(struct ion_buffer *buffer) +{ + return buffer->flags & ION_FLAG_CACHED; +} /** * ion_buffer_fault_user_mappings - fault in user mappings of this buffer @@ -288,7 +274,7 @@ struct ion_device *ion_device_create(void); * @dev: the device * @heap: the heap to add */ -void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap); +void ion_device_add_heap(struct ion_device *idev, struct ion_heap *heap); /** * some helpers for common operations on buffers using the sg_table @@ -301,8 +287,6 @@ int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, int ion_heap_buffer_zero(struct ion_buffer *buffer); int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot); -int ion_alloc_fd(size_t len, unsigned int heap_id_mask, unsigned int flags); - /** * ion_heap_init_shrinker * @heap: the heap @@ -313,65 +297,6 @@ int ion_alloc_fd(size_t len, unsigned int heap_id_mask, unsigned int flags); */ void ion_heap_init_shrinker(struct ion_heap *heap); -/** - * ion_heap_init_deferred_free -- initialize deferred free functionality - * @heap: the heap - * - * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag this function will - * be called to setup deferred frees. Calls to free the buffer will - * return immediately and the actual free will occur some time later - */ -int ion_heap_init_deferred_free(struct ion_heap *heap); - -/** - * ion_heap_freelist_add - add a buffer to the deferred free list - * @heap: the heap - * @buffer: the buffer - * - * Adds an item to the deferred freelist. - */ -void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer); - -/** - * ion_heap_freelist_drain - drain the deferred free list - * @heap: the heap - * @size: amount of memory to drain in bytes - * - * Drains the indicated amount of memory from the deferred freelist immediately. - * Returns the total amount freed. The total freed may be higher depending - * on the size of the items in the list, or lower if there is insufficient - * total memory on the freelist. - */ -size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size); - -/** - * ion_heap_freelist_shrink - drain the deferred free - * list, skipping any heap-specific - * pooling or caching mechanisms - * - * @heap: the heap - * @size: amount of memory to drain in bytes - * - * Drains the indicated amount of memory from the deferred freelist immediately. - * Returns the total amount freed. The total freed may be higher depending - * on the size of the items in the list, or lower if there is insufficient - * total memory on the freelist. - * - * Unlike with @ion_heap_freelist_drain, don't put any pages back into - * page pools or otherwise cache the pages. Everything must be - * genuinely free'd back to the system. If you're free'ing from a - * shrinker you probably want to use this. Note that this relies on - * the heap.ops.free callback honoring the ION_PRIV_FLAG_SHRINKER_FREE - * flag. - */ -size_t ion_heap_freelist_shrink(struct ion_heap *heap, size_t size); - -/** - * ion_heap_freelist_size - returns the size of the freelist in bytes - * @heap: the heap - */ -size_t ion_heap_freelist_size(struct ion_heap *heap); - /** * functions for creating and destroying the built in ion heaps. * architectures can add their own custom architecture specific @@ -447,7 +372,7 @@ struct ion_page_pool { struct list_head high_items; struct list_head low_items; /* Protect the pool */ - struct mutex mutex; + spinlock_t lock; gfp_t gfp_mask; unsigned int order; struct plist_node list; @@ -490,14 +415,16 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask, * @size: size in bytes of region to be flushed * @dir: direction of dma transfer */ -void ion_pages_sync_for_device(struct device *dev, struct page *page, - size_t size, enum dma_data_direction dir); +static inline void ion_pages_sync_for_device(struct device *dev, + struct page *page, size_t size, + enum dma_data_direction dir) +{ + struct scatterlist sg; -int ion_walk_heaps(int heap_id, enum ion_heap_type type, void *data, - int (*f)(struct ion_heap *heap, void *data)); - -long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); - -int ion_query_heaps(struct ion_heap_query *query); + sg_init_table(&sg, 1); + sg_set_page(&sg, page, size, 0); + sg_dma_address(&sg) = page_to_phys(page); + dma_sync_sg_for_device(dev, &sg, 1, dir); +} #endif /* _ION_H */ diff --git a/drivers/staging/android/ion/ion_carveout_heap.c b/drivers/staging/android/ion/ion_carveout_heap.c index 19c8ad23cf06..d31fc0aaf123 100644 --- a/drivers/staging/android/ion/ion_carveout_heap.c +++ b/drivers/staging/android/ion/ion_carveout_heap.c @@ -246,14 +246,6 @@ static void ion_sc_heap_free(struct ion_buffer *buffer) static int ion_secure_carveout_pm_freeze(struct ion_heap *heap) { - long sz; - - sz = atomic_long_read(&heap->total_allocated); - if (sz) { - pr_err("%s: %lx bytes won't be saved across hibernation. Aborting.", - __func__, sz); - return -EINVAL; - } return 0; } diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c index 07aab5346ee0..a157a683413e 100644 --- a/drivers/staging/android/ion/ion_cma_heap.c +++ b/drivers/staging/android/ion/ion_cma_heap.c @@ -278,15 +278,6 @@ static int ion_secure_cma_map_user(struct ion_heap *mapper, static int ion_secure_cma_pm_freeze(struct ion_heap *heap) { - long sz; - - sz = atomic_long_read(&heap->total_allocated); - if (sz) { - pr_err("%s: %lx bytes won't be saved across hibernation. Aborting.", - __func__, sz); - return -EINVAL; - } - return 0; } diff --git a/drivers/staging/android/ion/ion_cma_secure_heap.c b/drivers/staging/android/ion/ion_cma_secure_heap.c index 8d609eca7153..a3e30d9bf647 100644 --- a/drivers/staging/android/ion/ion_cma_secure_heap.c +++ b/drivers/staging/android/ion/ion_cma_secure_heap.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include @@ -140,9 +139,6 @@ static int ion_secure_cma_add_to_pool( int ret = 0; struct ion_cma_alloc_chunk *chunk; - trace_ion_secure_cma_add_to_pool_start(len, - atomic_read(&sheap->total_pool_size), - prefetch); mutex_lock(&sheap->chunk_lock); chunk = kzalloc(sizeof(*chunk), GFP_KERNEL); @@ -177,10 +173,6 @@ out_free: out: mutex_unlock(&sheap->chunk_lock); - trace_ion_secure_cma_add_to_pool_end(len, - atomic_read(&sheap->total_pool_size), - prefetch); - return ret; } @@ -265,7 +257,6 @@ int ion_secure_cma_prefetch(struct ion_heap *heap, void *data) len = diff; sheap->last_alloc = len; - trace_ion_prefetching(sheap->last_alloc); schedule_work(&sheap->work); return 0; @@ -359,8 +350,6 @@ __ion_secure_cma_shrink_pool(struct ion_cma_secure_heap *sheap, int max_nr) struct list_head *entry, *_n; unsigned long drained_size = 0, skipped_size = 0; - trace_ion_secure_cma_shrink_pool_start(drained_size, skipped_size); - list_for_each_safe(entry, _n, &sheap->chunks) { struct ion_cma_alloc_chunk *chunk = container_of(entry, struct ion_cma_alloc_chunk, entry); @@ -377,7 +366,6 @@ __ion_secure_cma_shrink_pool(struct ion_cma_secure_heap *sheap, int max_nr) } } - trace_ion_secure_cma_shrink_pool_end(drained_size, skipped_size); return drained_size; } @@ -695,14 +683,12 @@ static int ion_secure_cma_allocate(struct ion_heap *heap, __func__, heap->name); return -ENOMEM; } - trace_ion_secure_cma_allocate_start(heap->name, len, flags); if (!allow_non_contig) buf = __ion_secure_cma_allocate(heap, buffer, len, flags); else buf = __ion_secure_cma_allocate_non_contig(heap, buffer, len, flags); - trace_ion_secure_cma_allocate_end(heap->name, len, flags); if (buf) { int ret; @@ -711,11 +697,7 @@ static int ion_secure_cma_allocate(struct ion_heap *heap, __func__); ret = 1; } else { - trace_ion_cp_secure_buffer_start(heap->name, len, - flags); ret = msm_secure_table(buf->table); - trace_ion_cp_secure_buffer_end(heap->name, len, - flags); } if (ret) { struct ion_cma_secure_heap *sheap = diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c index c682f1ea745c..fd8e4fba1ecc 100644 --- a/drivers/staging/android/ion/ion_heap.c +++ b/drivers/staging/android/ion/ion_heap.c @@ -160,153 +160,26 @@ int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot) return ion_heap_sglist_zero(&sg, 1, pgprot); } -void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer) -{ - spin_lock(&heap->free_lock); - list_add(&buffer->list, &heap->free_list); - heap->free_list_size += buffer->size; - spin_unlock(&heap->free_lock); - wake_up(&heap->waitqueue); -} - -size_t ion_heap_freelist_size(struct ion_heap *heap) -{ - size_t size; - - spin_lock(&heap->free_lock); - size = heap->free_list_size; - spin_unlock(&heap->free_lock); - - return size; -} - -static size_t _ion_heap_freelist_drain(struct ion_heap *heap, size_t size, - bool skip_pools) -{ - struct ion_buffer *buffer; - size_t total_drained = 0; - - if (ion_heap_freelist_size(heap) == 0) - return 0; - - spin_lock(&heap->free_lock); - if (size == 0) - size = heap->free_list_size; - - while (!list_empty(&heap->free_list)) { - if (total_drained >= size) - break; - buffer = list_first_entry(&heap->free_list, struct ion_buffer, - list); - list_del(&buffer->list); - heap->free_list_size -= buffer->size; - if (skip_pools) - buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE; - total_drained += buffer->size; - spin_unlock(&heap->free_lock); - ion_buffer_destroy(buffer); - spin_lock(&heap->free_lock); - } - spin_unlock(&heap->free_lock); - - return total_drained; -} - -size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size) -{ - return _ion_heap_freelist_drain(heap, size, false); -} - -size_t ion_heap_freelist_shrink(struct ion_heap *heap, size_t size) -{ - return _ion_heap_freelist_drain(heap, size, true); -} - -static int ion_heap_deferred_free(void *data) -{ - struct ion_heap *heap = data; - - while (true) { - struct ion_buffer *buffer; - - wait_event_freezable(heap->waitqueue, - ion_heap_freelist_size(heap) > 0); - - spin_lock(&heap->free_lock); - if (list_empty(&heap->free_list)) { - spin_unlock(&heap->free_lock); - continue; - } - buffer = list_first_entry(&heap->free_list, struct ion_buffer, - list); - list_del(&buffer->list); - heap->free_list_size -= buffer->size; - spin_unlock(&heap->free_lock); - ion_buffer_destroy(buffer); - } - - return 0; -} - -int ion_heap_init_deferred_free(struct ion_heap *heap) -{ -#ifndef CONFIG_ION_DEFER_FREE_NO_SCHED_IDLE - struct sched_param param = { .sched_priority = 0 }; -#endif - INIT_LIST_HEAD(&heap->free_list); - init_waitqueue_head(&heap->waitqueue); - heap->task = kthread_run(ion_heap_deferred_free, heap, - "%s", heap->name); - if (IS_ERR(heap->task)) { - pr_err("%s: creating thread for deferred free failed\n", - __func__); - return PTR_ERR_OR_ZERO(heap->task); - } -#ifndef CONFIG_ION_DEFER_FREE_NO_SCHED_IDLE - sched_setscheduler(heap->task, SCHED_IDLE, ¶m); -#endif - return 0; -} - static unsigned long ion_heap_shrink_count(struct shrinker *shrinker, struct shrink_control *sc) { - struct ion_heap *heap = container_of(shrinker, struct ion_heap, - shrinker); - int total = 0; + struct ion_heap *heap = container_of(shrinker, typeof(*heap), shrinker); - total = ion_heap_freelist_size(heap) / PAGE_SIZE; if (heap->ops->shrink) - total += heap->ops->shrink(heap, sc->gfp_mask, 0); - return total; + return heap->ops->shrink(heap, sc->gfp_mask, 0); + + return 0; } static unsigned long ion_heap_shrink_scan(struct shrinker *shrinker, struct shrink_control *sc) { - struct ion_heap *heap = container_of(shrinker, struct ion_heap, - shrinker); - int freed = 0; - int to_scan = sc->nr_to_scan; - - if (to_scan == 0) - return 0; - - /* - * shrink the free list first, no point in zeroing the memory if we're - * just going to reclaim it. Also, skip any possible page pooling. - */ - if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) - freed = ion_heap_freelist_shrink(heap, to_scan * PAGE_SIZE) / - PAGE_SIZE; - - to_scan -= freed; - if (to_scan <= 0) - return freed; + struct ion_heap *heap = container_of(shrinker, typeof(*heap), shrinker); if (heap->ops->shrink) - freed += heap->ops->shrink(heap, sc->gfp_mask, to_scan); - return freed; + return heap->ops->shrink(heap, sc->gfp_mask, sc->nr_to_scan); + + return 0; } void ion_heap_init_shrinker(struct ion_heap *heap) diff --git a/drivers/staging/android/ion/ion_kernel.h b/drivers/staging/android/ion/ion_kernel.h index a4c84666546e..bfafbeee98a8 100644 --- a/drivers/staging/android/ion/ion_kernel.h +++ b/drivers/staging/android/ion/ion_kernel.h @@ -22,8 +22,13 @@ * Allocates an ion buffer. * Use IS_ERR on returned pointer to check for success. */ -struct dma_buf *ion_alloc(size_t len, unsigned int heap_id_mask, - unsigned int flags); +struct dma_buf *ion_alloc_dmabuf(size_t len, unsigned int heap_id_mask, + unsigned int flags); +static inline struct dma_buf *ion_alloc(size_t len, unsigned int heap_id_mask, + unsigned int flags) +{ + return ion_alloc_dmabuf(len, heap_id_mask, flags); +} #else diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c index 82e5007b45b4..bf6554dddf11 100644 --- a/drivers/staging/android/ion/ion_page_pool.c +++ b/drivers/staging/android/ion/ion_page_pool.c @@ -48,7 +48,7 @@ static void ion_page_pool_free_pages(struct ion_page_pool *pool, static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page) { - mutex_lock(&pool->mutex); + spin_lock(&pool->lock); if (PageHighMem(page)) { list_add_tail(&page->lru, &pool->high_items); pool->high_count++; @@ -60,7 +60,7 @@ static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page) nr_total_pages += 1 << pool->order; mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE, 1 << pool->order); - mutex_unlock(&pool->mutex); + spin_unlock(&pool->lock); return 0; } @@ -94,12 +94,12 @@ struct page *ion_page_pool_alloc(struct ion_page_pool *pool, bool *from_pool) if (fatal_signal_pending(current)) return ERR_PTR(-EINTR); - if (*from_pool && mutex_trylock(&pool->mutex)) { + if (*from_pool && spin_trylock(&pool->lock)) { if (pool->high_count) page = ion_page_pool_remove(pool, true); else if (pool->low_count) page = ion_page_pool_remove(pool, false); - mutex_unlock(&pool->mutex); + spin_unlock(&pool->lock); } if (!page) { page = ion_page_pool_alloc_pages(pool); @@ -121,12 +121,12 @@ struct page *ion_page_pool_alloc_pool_only(struct ion_page_pool *pool) if (!pool) return ERR_PTR(-EINVAL); - if (mutex_trylock(&pool->mutex)) { + if (spin_trylock(&pool->lock)) { if (pool->high_count) page = ion_page_pool_remove(pool, true); else if (pool->low_count) page = ion_page_pool_remove(pool, false); - mutex_unlock(&pool->mutex); + spin_unlock(&pool->lock); } if (!page) @@ -185,16 +185,16 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask, while (freed < nr_to_scan) { struct page *page; - mutex_lock(&pool->mutex); + spin_lock(&pool->lock); if (pool->low_count) { page = ion_page_pool_remove(pool, false); } else if (high && pool->high_count) { page = ion_page_pool_remove(pool, true); } else { - mutex_unlock(&pool->mutex); + spin_unlock(&pool->lock); break; } - mutex_unlock(&pool->mutex); + spin_unlock(&pool->lock); ion_page_pool_free_pages(pool, page); freed += (1 << pool->order); } @@ -215,7 +215,7 @@ struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order, INIT_LIST_HEAD(&pool->high_items); pool->gfp_mask = gfp_mask; pool->order = order; - mutex_init(&pool->mutex); + spin_lock_init(&pool->lock); plist_node_init(&pool->list, order); if (cached) pool->cached = true; diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c index af1ea09141ab..48d9230dc45a 100644 --- a/drivers/staging/android/ion/ion_system_heap.c +++ b/drivers/staging/android/ion/ion_system_heap.c @@ -513,106 +513,6 @@ static struct ion_heap_ops system_heap_ops = { .shrink = ion_system_heap_shrink, }; -static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s, - void *unused) -{ - - struct ion_system_heap *sys_heap = container_of( - heap, struct ion_system_heap, heap); - bool use_seq = s; - unsigned long uncached_total = 0; - unsigned long cached_total = 0; - unsigned long secure_total = 0; - struct ion_page_pool *pool; - int i, j; - - for (i = 0; i < NUM_ORDERS; i++) { - pool = sys_heap->uncached_pools[i]; - if (use_seq) { - seq_printf(s, - "%d order %u highmem pages in uncached pool = %lu total\n", - pool->high_count, pool->order, - (1 << pool->order) * PAGE_SIZE * - pool->high_count); - seq_printf(s, - "%d order %u lowmem pages in uncached pool = %lu total\n", - pool->low_count, pool->order, - (1 << pool->order) * PAGE_SIZE * - pool->low_count); - } - - uncached_total += (1 << pool->order) * PAGE_SIZE * - pool->high_count; - uncached_total += (1 << pool->order) * PAGE_SIZE * - pool->low_count; - } - - for (i = 0; i < NUM_ORDERS; i++) { - pool = sys_heap->cached_pools[i]; - if (use_seq) { - seq_printf(s, - "%d order %u highmem pages in cached pool = %lu total\n", - pool->high_count, pool->order, - (1 << pool->order) * PAGE_SIZE * - pool->high_count); - seq_printf(s, - "%d order %u lowmem pages in cached pool = %lu total\n", - pool->low_count, pool->order, - (1 << pool->order) * PAGE_SIZE * - pool->low_count); - } - - cached_total += (1 << pool->order) * PAGE_SIZE * - pool->high_count; - cached_total += (1 << pool->order) * PAGE_SIZE * - pool->low_count; - } - - for (i = 0; i < NUM_ORDERS; i++) { - for (j = 0; j < VMID_LAST; j++) { - if (!is_secure_vmid_valid(j)) - continue; - pool = sys_heap->secure_pools[j][i]; - - if (use_seq) { - seq_printf(s, - "VMID %d: %d order %u highmem pages in secure pool = %lu total\n", - j, pool->high_count, pool->order, - (1 << pool->order) * PAGE_SIZE * - pool->high_count); - seq_printf(s, - "VMID %d: %d order %u lowmem pages in secure pool = %lu total\n", - j, pool->low_count, pool->order, - (1 << pool->order) * PAGE_SIZE * - pool->low_count); - } - - secure_total += (1 << pool->order) * PAGE_SIZE * - pool->high_count; - secure_total += (1 << pool->order) * PAGE_SIZE * - pool->low_count; - } - } - - if (use_seq) { - seq_puts(s, "--------------------------------------------\n"); - seq_printf(s, "uncached pool = %lu cached pool = %lu secure pool = %lu\n", - uncached_total, cached_total, secure_total); - seq_printf(s, "pool total (uncached + cached + secure) = %lu\n", - uncached_total + cached_total + secure_total); - seq_puts(s, "--------------------------------------------\n"); - } else { - pr_info("-------------------------------------------------\n"); - pr_info("uncached pool = %lu cached pool = %lu secure pool = %lu\n", - uncached_total, cached_total, secure_total); - pr_info("pool total (uncached + cached + secure) = %lu\n", - uncached_total + cached_total + secure_total); - pr_info("-------------------------------------------------\n"); - } - - return 0; -} - static void ion_system_heap_destroy_pools(struct ion_page_pool **pools) { int i; @@ -678,7 +578,6 @@ struct ion_heap *ion_system_heap_create(struct ion_platform_heap *data) mutex_init(&heap->split_page_mutex); - heap->heap.debug_show = ion_system_heap_debug_show; return &heap->heap; destroy_uncached_pools: diff --git a/drivers/staging/android/ion/ion_system_secure_heap.c b/drivers/staging/android/ion/ion_system_secure_heap.c index 64c3c14e78c2..da8973501ae4 100644 --- a/drivers/staging/android/ion/ion_system_secure_heap.c +++ b/drivers/staging/android/ion/ion_system_secure_heap.c @@ -356,20 +356,12 @@ static int ion_system_secure_heap_pm_freeze(struct ion_heap *heap) { struct ion_system_secure_heap *secure_heap; unsigned long count; - long sz; struct shrink_control sc = { .gfp_mask = GFP_HIGHUSER, }; secure_heap = container_of(heap, struct ion_system_secure_heap, heap); - sz = atomic_long_read(&heap->total_allocated); - if (sz) { - pr_err("%s: %lx bytes won't be saved across hibernation. Aborting.", - __func__, sz); - return -EINVAL; - } - /* Since userspace is frozen, no more requests will be queued */ cancel_delayed_work_sync(&secure_heap->prefetch_work); diff --git a/include/trace/events/ion.h b/include/trace/events/ion.h deleted file mode 100644 index ef71f8bf8ce7..000000000000 --- a/include/trace/events/ion.h +++ /dev/null @@ -1,180 +0,0 @@ -/* Copyright (c) 2018 The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#undef TRACE_SYSTEM -#define TRACE_SYSTEM ion - -#if !defined(_TRACE_ION_H) || defined(TRACE_HEADER_MULTI_READ) -#define _TRACE_ION_H - -#include -#include - -#define DEV_NAME_NONE "None" - -DECLARE_EVENT_CLASS(ion_dma_map_cmo_class, - - TP_PROTO(const struct device *dev, const char *name, - bool cached, bool hlos_accessible, unsigned long map_attrs, - enum dma_data_direction dir), - - TP_ARGS(dev, name, cached, hlos_accessible, map_attrs, dir), - - TP_STRUCT__entry( - __string(dev_name, dev ? dev_name(dev) : DEV_NAME_NONE) - __string(name, name) - __field(bool, cached) - __field(bool, hlos_accessible) - __field(unsigned long, map_attrs) - __field(enum dma_data_direction, dir) - ), - - TP_fast_assign( - __assign_str(dev_name, dev ? dev_name(dev) : DEV_NAME_NONE); - __assign_str(name, name); - __entry->cached = cached; - __entry->hlos_accessible = hlos_accessible; - __entry->map_attrs = map_attrs; - __entry->dir = dir; - ), - - TP_printk("dev=%s name=%s cached=%d access=%d map_attrs=0x%lx dir=%d", - __get_str(dev_name), - __get_str(name), - __entry->cached, - __entry->hlos_accessible, - __entry->map_attrs, - __entry->dir) -); - -DEFINE_EVENT(ion_dma_map_cmo_class, ion_dma_map_cmo_apply, - - TP_PROTO(const struct device *dev, const char *name, - bool cached, bool hlos_accessible, unsigned long map_attrs, - enum dma_data_direction dir), - - TP_ARGS(dev, name, cached, hlos_accessible, map_attrs, dir) -); - -DEFINE_EVENT(ion_dma_map_cmo_class, ion_dma_map_cmo_skip, - - TP_PROTO(const struct device *dev, const char *name, - bool cached, bool hlos_accessible, unsigned long map_attrs, - enum dma_data_direction dir), - - TP_ARGS(dev, name, cached, hlos_accessible, map_attrs, dir) -); - -DEFINE_EVENT(ion_dma_map_cmo_class, ion_dma_unmap_cmo_apply, - - TP_PROTO(const struct device *dev, const char *name, - bool cached, bool hlos_accessible, unsigned long map_attrs, - enum dma_data_direction dir), - - TP_ARGS(dev, name, cached, hlos_accessible, map_attrs, dir) -); - -DEFINE_EVENT(ion_dma_map_cmo_class, ion_dma_unmap_cmo_skip, - - TP_PROTO(const struct device *dev, const char *name, - bool cached, bool hlos_accessible, unsigned long map_attrs, - enum dma_data_direction dir), - - TP_ARGS(dev, name, cached, hlos_accessible, map_attrs, dir) -); - -DECLARE_EVENT_CLASS(ion_access_cmo_class, - - TP_PROTO(const struct device *dev, const char *name, - bool cached, bool hlos_accessible, enum dma_data_direction dir, - bool only_mapped), - - TP_ARGS(dev, name, cached, hlos_accessible, dir, only_mapped), - - TP_STRUCT__entry( - __string(dev_name, dev ? dev_name(dev) : DEV_NAME_NONE) - __string(name, name) - __field(bool, cached) - __field(bool, hlos_accessible) - __field(enum dma_data_direction, dir) - __field(bool, only_mapped) - ), - - TP_fast_assign( - __assign_str(dev_name, dev ? dev_name(dev) : DEV_NAME_NONE); - __assign_str(name, name); - __entry->cached = cached; - __entry->hlos_accessible = hlos_accessible; - __entry->dir = dir; - __entry->only_mapped = only_mapped; - ), - - TP_printk("dev=%s name=%s cached=%d access=%d dir=%d, only_mapped=%d", - __get_str(dev_name), - __get_str(name), - __entry->cached, - __entry->hlos_accessible, - __entry->dir, - __entry->only_mapped) -); - -DEFINE_EVENT(ion_access_cmo_class, ion_begin_cpu_access_cmo_apply, - TP_PROTO(const struct device *dev, const char *name, - bool cached, bool hlos_accessible, enum dma_data_direction dir, - bool only_mapped), - - TP_ARGS(dev, name, cached, hlos_accessible, dir, only_mapped) -); - -DEFINE_EVENT(ion_access_cmo_class, ion_begin_cpu_access_cmo_skip, - TP_PROTO(const struct device *dev, const char *name, - bool cached, bool hlos_accessible, enum dma_data_direction dir, - bool only_mapped), - - TP_ARGS(dev, name, cached, hlos_accessible, dir, only_mapped) -); - -DEFINE_EVENT(ion_access_cmo_class, ion_begin_cpu_access_notmapped, - TP_PROTO(const struct device *dev, const char *name, - bool cached, bool hlos_accessible, enum dma_data_direction dir, - bool only_mapped), - - TP_ARGS(dev, name, cached, hlos_accessible, dir, only_mapped) -); - -DEFINE_EVENT(ion_access_cmo_class, ion_end_cpu_access_cmo_apply, - TP_PROTO(const struct device *dev, const char *name, - bool cached, bool hlos_accessible, enum dma_data_direction dir, - bool only_mapped), - - TP_ARGS(dev, name, cached, hlos_accessible, dir, only_mapped) -); - -DEFINE_EVENT(ion_access_cmo_class, ion_end_cpu_access_cmo_skip, - TP_PROTO(const struct device *dev, const char *name, - bool cached, bool hlos_accessible, enum dma_data_direction dir, - bool only_mapped), - - TP_ARGS(dev, name, cached, hlos_accessible, dir, only_mapped) -); - -DEFINE_EVENT(ion_access_cmo_class, ion_end_cpu_access_notmapped, - TP_PROTO(const struct device *dev, const char *name, - bool cached, bool hlos_accessible, enum dma_data_direction dir, - bool only_mapped), - - TP_ARGS(dev, name, cached, hlos_accessible, dir, only_mapped) -); -#endif /* _TRACE_ION_H */ - -#include -