ion: Rewrite to improve clarity and performance

The ION driver suffers from massive code bloat caused by excessive
debug features, as well as poor lock usage as a result of that. Multiple
locks in ION exist to make the debug features thread-safe, which hurts
ION's actual performance when doing its job.

There are numerous code paths in ION that hold mutexes for no reason and
hold them for longer than necessary. This results in not only unwanted
lock contention, but also long delays when a mutex lock results in the
calling thread getting preempted for a while. All lock usage in ION
follows this pattern, which causes poor performance across the board.
Furthermore, a big mutex lock is used mostly everywhere, which causes
performance degradation due to unnecessary lock overhead.

Instead of having a big mutex lock, multiple fine-grained locks are now
used, improving performance.

Additionally, dup_sg_table is called very frequently, and lies within
the rendering path for the display. Speed it up by copying scatterlists
in page-sized chunks rather than iterating one at a time. Note that
sg_alloc_table zeroes out `table`, so there's no need to zero it out
using the memory allocator.

This also features a lock-less caching system for DMA attachments and
their respective sg_table copies, reducing overhead significantly for
code which frequently maps and unmaps DMA buffers and speeding up cache
maintenance since iteration through the list of buffer attachments is
now lock-free. This is safe since there is no interleaved DMA buffer
attaching or accessing for a single ION buffer.

Overall, just rewrite ION entirely to fix its deficiencies. This
optimizes ION for excellent performance and discards its debug cruft.

Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com>
Change-Id: I0a21435be1eb409cfe140eec8da507cc35f060dd
Signed-off-by: Forenche <prahul2003@gmail.com>
This commit is contained in:
Sultan Alsawaf 2021-01-28 17:07:00 -08:00 committed by Forenche
parent cde6ea8201
commit 0f02cfcedc
No known key found for this signature in database
GPG Key ID: 1337D655BAFE85E2
14 changed files with 425 additions and 1874 deletions

View File

@ -44,18 +44,6 @@ config ION_CMA_HEAP
by the Contiguous Memory Allocator (CMA). If your system has these
regions, you should say Y here.
config ION_FORCE_DMA_SYNC
bool "Force ION to always DMA sync buffer memory"
depends on ION
help
Force ION to DMA sync buffer memory when it is allocated and to
always DMA sync the buffer memory on calls to begin/end cpu
access. This makes ION DMA sync behavior similar to that of the
older version of ION.
We generally don't want to enable this config as it breaks the
cache maintenance model.
If you're not sure say N here.
config ION_DEFER_FREE_NO_SCHED_IDLE
bool "Increases the priority of ION defer free thead"
depends on ION

View File

@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_ION) += ion.o ion-ioctl.o ion_heap.o \
obj-$(CONFIG_ION) += ion.o ion_heap.o \
ion_page_pool.o ion_system_heap.o \
ion_carveout_heap.o ion_chunk_heap.o \
ion_system_secure_heap.o ion_cma_heap.o \

View File

@ -1,138 +0,0 @@
/*
*
* Copyright (C) 2011 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/kernel.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
#include "ion.h"
#include "ion_system_secure_heap.h"
union ion_ioctl_arg {
struct ion_allocation_data allocation;
struct ion_heap_query query;
struct ion_prefetch_data prefetch_data;
};
static int validate_ioctl_arg(unsigned int cmd, union ion_ioctl_arg *arg)
{
int ret = 0;
switch (cmd) {
case ION_IOC_HEAP_QUERY:
ret = arg->query.reserved0 != 0;
ret |= arg->query.reserved1 != 0;
ret |= arg->query.reserved2 != 0;
break;
default:
break;
}
return ret ? -EINVAL : 0;
}
/* fix up the cases where the ioctl direction bits are incorrect */
static unsigned int ion_ioctl_dir(unsigned int cmd)
{
switch (cmd) {
default:
return _IOC_DIR(cmd);
}
}
long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int ret = 0;
unsigned int dir;
union ion_ioctl_arg data;
dir = ion_ioctl_dir(cmd);
if (_IOC_SIZE(cmd) > sizeof(data))
return -EINVAL;
/*
* The copy_from_user is unconditional here for both read and write
* to do the validate. If there is no write for the ioctl, the
* buffer is cleared
*/
if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
return -EFAULT;
ret = validate_ioctl_arg(cmd, &data);
if (ret) {
pr_warn_once("%s: ioctl validate failed\n", __func__);
return ret;
}
if (!(dir & _IOC_WRITE))
memset(&data, 0, sizeof(data));
switch (cmd) {
case ION_IOC_ALLOC:
{
int fd;
fd = ion_alloc_fd(data.allocation.len,
data.allocation.heap_id_mask,
data.allocation.flags);
if (fd < 0)
return fd;
data.allocation.fd = fd;
break;
}
case ION_IOC_HEAP_QUERY:
ret = ion_query_heaps(&data.query);
break;
case ION_IOC_PREFETCH:
{
int ret;
ret = ion_walk_heaps(data.prefetch_data.heap_id,
(enum ion_heap_type)
ION_HEAP_TYPE_SYSTEM_SECURE,
(void *)&data.prefetch_data,
ion_system_secure_heap_prefetch);
if (ret)
return ret;
break;
}
case ION_IOC_DRAIN:
{
int ret;
ret = ion_walk_heaps(data.prefetch_data.heap_id,
(enum ion_heap_type)
ION_HEAP_TYPE_SYSTEM_SECURE,
(void *)&data.prefetch_data,
ion_system_secure_heap_drain);
if (ret)
return ret;
break;
}
default:
return -ENOTTY;
}
if (dir & _IOC_READ) {
if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd)))
return -EFAULT;
}
return ret;
}

File diff suppressed because it is too large Load Diff

View File

@ -121,24 +121,21 @@ struct ion_vma_list {
* @sg_table: the sg table for the buffer if dmap_cnt is not zero
* @vmas: list of vma's mapping this buffer
*/
struct ion_dma_buf_attachment;
struct ion_buffer {
union {
struct rb_node node;
struct list_head list;
};
struct ion_device *dev;
struct ion_heap *heap;
unsigned long flags;
unsigned long private_flags;
size_t size;
void *priv_virt;
/* Protect ion buffer */
struct mutex lock;
int kmap_cnt;
void *vaddr;
struct sg_table *sg_table;
struct list_head attachments;
struct list_head vmas;
struct mutex kmap_lock;
struct work_struct free;
struct ion_dma_buf_attachment *attachments;
struct list_head map_freelist;
spinlock_t freelist_lock;
void *priv_virt;
void *vaddr;
unsigned int flags;
unsigned int private_flags;
size_t size;
int kmap_refcount;
struct msm_iommu_data iommu_data;
};
@ -153,13 +150,8 @@ void ion_buffer_destroy(struct ion_buffer *buffer);
*/
struct ion_device {
struct miscdevice dev;
struct rb_root buffers;
/* buffer_lock used for adding and removing buffers */
struct mutex buffer_lock;
struct rw_semaphore lock;
struct plist_head heaps;
struct dentry *debug_root;
int heap_cnt;
struct rw_semaphore heap_rwsem;
};
/* refer to include/linux/pm.h */
@ -240,7 +232,6 @@ struct ion_heap_ops {
*/
struct ion_heap {
struct plist_node node;
struct ion_device *dev;
enum ion_heap_type type;
struct ion_heap_ops *ops;
unsigned long flags;
@ -248,15 +239,7 @@ struct ion_heap {
const char *name;
struct shrinker shrinker;
void *priv;
struct list_head free_list;
size_t free_list_size;
/* Protect the free list */
spinlock_t free_lock;
wait_queue_head_t waitqueue;
struct task_struct *task;
atomic_long_t total_allocated;
int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *);
struct workqueue_struct *wq;
};
/**
@ -265,7 +248,10 @@ struct ion_heap {
*
* indicates whether this ion buffer is cached
*/
bool ion_buffer_cached(struct ion_buffer *buffer);
static inline bool ion_buffer_cached(struct ion_buffer *buffer)
{
return buffer->flags & ION_FLAG_CACHED;
}
/**
* ion_buffer_fault_user_mappings - fault in user mappings of this buffer
@ -288,7 +274,7 @@ struct ion_device *ion_device_create(void);
* @dev: the device
* @heap: the heap to add
*/
void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap);
void ion_device_add_heap(struct ion_device *idev, struct ion_heap *heap);
/**
* some helpers for common operations on buffers using the sg_table
@ -301,8 +287,6 @@ int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
int ion_heap_buffer_zero(struct ion_buffer *buffer);
int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot);
int ion_alloc_fd(size_t len, unsigned int heap_id_mask, unsigned int flags);
/**
* ion_heap_init_shrinker
* @heap: the heap
@ -313,65 +297,6 @@ int ion_alloc_fd(size_t len, unsigned int heap_id_mask, unsigned int flags);
*/
void ion_heap_init_shrinker(struct ion_heap *heap);
/**
* ion_heap_init_deferred_free -- initialize deferred free functionality
* @heap: the heap
*
* If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag this function will
* be called to setup deferred frees. Calls to free the buffer will
* return immediately and the actual free will occur some time later
*/
int ion_heap_init_deferred_free(struct ion_heap *heap);
/**
* ion_heap_freelist_add - add a buffer to the deferred free list
* @heap: the heap
* @buffer: the buffer
*
* Adds an item to the deferred freelist.
*/
void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer);
/**
* ion_heap_freelist_drain - drain the deferred free list
* @heap: the heap
* @size: amount of memory to drain in bytes
*
* Drains the indicated amount of memory from the deferred freelist immediately.
* Returns the total amount freed. The total freed may be higher depending
* on the size of the items in the list, or lower if there is insufficient
* total memory on the freelist.
*/
size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size);
/**
* ion_heap_freelist_shrink - drain the deferred free
* list, skipping any heap-specific
* pooling or caching mechanisms
*
* @heap: the heap
* @size: amount of memory to drain in bytes
*
* Drains the indicated amount of memory from the deferred freelist immediately.
* Returns the total amount freed. The total freed may be higher depending
* on the size of the items in the list, or lower if there is insufficient
* total memory on the freelist.
*
* Unlike with @ion_heap_freelist_drain, don't put any pages back into
* page pools or otherwise cache the pages. Everything must be
* genuinely free'd back to the system. If you're free'ing from a
* shrinker you probably want to use this. Note that this relies on
* the heap.ops.free callback honoring the ION_PRIV_FLAG_SHRINKER_FREE
* flag.
*/
size_t ion_heap_freelist_shrink(struct ion_heap *heap, size_t size);
/**
* ion_heap_freelist_size - returns the size of the freelist in bytes
* @heap: the heap
*/
size_t ion_heap_freelist_size(struct ion_heap *heap);
/**
* functions for creating and destroying the built in ion heaps.
* architectures can add their own custom architecture specific
@ -447,7 +372,7 @@ struct ion_page_pool {
struct list_head high_items;
struct list_head low_items;
/* Protect the pool */
struct mutex mutex;
spinlock_t lock;
gfp_t gfp_mask;
unsigned int order;
struct plist_node list;
@ -490,14 +415,16 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
* @size: size in bytes of region to be flushed
* @dir: direction of dma transfer
*/
void ion_pages_sync_for_device(struct device *dev, struct page *page,
size_t size, enum dma_data_direction dir);
static inline void ion_pages_sync_for_device(struct device *dev,
struct page *page, size_t size,
enum dma_data_direction dir)
{
struct scatterlist sg;
int ion_walk_heaps(int heap_id, enum ion_heap_type type, void *data,
int (*f)(struct ion_heap *heap, void *data));
long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
int ion_query_heaps(struct ion_heap_query *query);
sg_init_table(&sg, 1);
sg_set_page(&sg, page, size, 0);
sg_dma_address(&sg) = page_to_phys(page);
dma_sync_sg_for_device(dev, &sg, 1, dir);
}
#endif /* _ION_H */

View File

@ -246,14 +246,6 @@ static void ion_sc_heap_free(struct ion_buffer *buffer)
static int ion_secure_carveout_pm_freeze(struct ion_heap *heap)
{
long sz;
sz = atomic_long_read(&heap->total_allocated);
if (sz) {
pr_err("%s: %lx bytes won't be saved across hibernation. Aborting.",
__func__, sz);
return -EINVAL;
}
return 0;
}

View File

@ -278,15 +278,6 @@ static int ion_secure_cma_map_user(struct ion_heap *mapper,
static int ion_secure_cma_pm_freeze(struct ion_heap *heap)
{
long sz;
sz = atomic_long_read(&heap->total_allocated);
if (sz) {
pr_err("%s: %lx bytes won't be saved across hibernation. Aborting.",
__func__, sz);
return -EINVAL;
}
return 0;
}

View File

@ -23,7 +23,6 @@
#include <linux/err.h>
#include <linux/dma-mapping.h>
#include <linux/msm_ion.h>
#include <trace/events/kmem.h>
#include <soc/qcom/secure_buffer.h>
#include <asm/cacheflush.h>
@ -140,9 +139,6 @@ static int ion_secure_cma_add_to_pool(
int ret = 0;
struct ion_cma_alloc_chunk *chunk;
trace_ion_secure_cma_add_to_pool_start(len,
atomic_read(&sheap->total_pool_size),
prefetch);
mutex_lock(&sheap->chunk_lock);
chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
@ -177,10 +173,6 @@ out_free:
out:
mutex_unlock(&sheap->chunk_lock);
trace_ion_secure_cma_add_to_pool_end(len,
atomic_read(&sheap->total_pool_size),
prefetch);
return ret;
}
@ -265,7 +257,6 @@ int ion_secure_cma_prefetch(struct ion_heap *heap, void *data)
len = diff;
sheap->last_alloc = len;
trace_ion_prefetching(sheap->last_alloc);
schedule_work(&sheap->work);
return 0;
@ -359,8 +350,6 @@ __ion_secure_cma_shrink_pool(struct ion_cma_secure_heap *sheap, int max_nr)
struct list_head *entry, *_n;
unsigned long drained_size = 0, skipped_size = 0;
trace_ion_secure_cma_shrink_pool_start(drained_size, skipped_size);
list_for_each_safe(entry, _n, &sheap->chunks) {
struct ion_cma_alloc_chunk *chunk = container_of(entry,
struct ion_cma_alloc_chunk, entry);
@ -377,7 +366,6 @@ __ion_secure_cma_shrink_pool(struct ion_cma_secure_heap *sheap, int max_nr)
}
}
trace_ion_secure_cma_shrink_pool_end(drained_size, skipped_size);
return drained_size;
}
@ -695,14 +683,12 @@ static int ion_secure_cma_allocate(struct ion_heap *heap,
__func__, heap->name);
return -ENOMEM;
}
trace_ion_secure_cma_allocate_start(heap->name, len, flags);
if (!allow_non_contig)
buf = __ion_secure_cma_allocate(heap, buffer, len,
flags);
else
buf = __ion_secure_cma_allocate_non_contig(heap, buffer, len,
flags);
trace_ion_secure_cma_allocate_end(heap->name, len, flags);
if (buf) {
int ret;
@ -711,11 +697,7 @@ static int ion_secure_cma_allocate(struct ion_heap *heap,
__func__);
ret = 1;
} else {
trace_ion_cp_secure_buffer_start(heap->name, len,
flags);
ret = msm_secure_table(buf->table);
trace_ion_cp_secure_buffer_end(heap->name, len,
flags);
}
if (ret) {
struct ion_cma_secure_heap *sheap =

View File

@ -160,153 +160,26 @@ int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot)
return ion_heap_sglist_zero(&sg, 1, pgprot);
}
void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer)
{
spin_lock(&heap->free_lock);
list_add(&buffer->list, &heap->free_list);
heap->free_list_size += buffer->size;
spin_unlock(&heap->free_lock);
wake_up(&heap->waitqueue);
}
size_t ion_heap_freelist_size(struct ion_heap *heap)
{
size_t size;
spin_lock(&heap->free_lock);
size = heap->free_list_size;
spin_unlock(&heap->free_lock);
return size;
}
static size_t _ion_heap_freelist_drain(struct ion_heap *heap, size_t size,
bool skip_pools)
{
struct ion_buffer *buffer;
size_t total_drained = 0;
if (ion_heap_freelist_size(heap) == 0)
return 0;
spin_lock(&heap->free_lock);
if (size == 0)
size = heap->free_list_size;
while (!list_empty(&heap->free_list)) {
if (total_drained >= size)
break;
buffer = list_first_entry(&heap->free_list, struct ion_buffer,
list);
list_del(&buffer->list);
heap->free_list_size -= buffer->size;
if (skip_pools)
buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE;
total_drained += buffer->size;
spin_unlock(&heap->free_lock);
ion_buffer_destroy(buffer);
spin_lock(&heap->free_lock);
}
spin_unlock(&heap->free_lock);
return total_drained;
}
size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
{
return _ion_heap_freelist_drain(heap, size, false);
}
size_t ion_heap_freelist_shrink(struct ion_heap *heap, size_t size)
{
return _ion_heap_freelist_drain(heap, size, true);
}
static int ion_heap_deferred_free(void *data)
{
struct ion_heap *heap = data;
while (true) {
struct ion_buffer *buffer;
wait_event_freezable(heap->waitqueue,
ion_heap_freelist_size(heap) > 0);
spin_lock(&heap->free_lock);
if (list_empty(&heap->free_list)) {
spin_unlock(&heap->free_lock);
continue;
}
buffer = list_first_entry(&heap->free_list, struct ion_buffer,
list);
list_del(&buffer->list);
heap->free_list_size -= buffer->size;
spin_unlock(&heap->free_lock);
ion_buffer_destroy(buffer);
}
return 0;
}
int ion_heap_init_deferred_free(struct ion_heap *heap)
{
#ifndef CONFIG_ION_DEFER_FREE_NO_SCHED_IDLE
struct sched_param param = { .sched_priority = 0 };
#endif
INIT_LIST_HEAD(&heap->free_list);
init_waitqueue_head(&heap->waitqueue);
heap->task = kthread_run(ion_heap_deferred_free, heap,
"%s", heap->name);
if (IS_ERR(heap->task)) {
pr_err("%s: creating thread for deferred free failed\n",
__func__);
return PTR_ERR_OR_ZERO(heap->task);
}
#ifndef CONFIG_ION_DEFER_FREE_NO_SCHED_IDLE
sched_setscheduler(heap->task, SCHED_IDLE, &param);
#endif
return 0;
}
static unsigned long ion_heap_shrink_count(struct shrinker *shrinker,
struct shrink_control *sc)
{
struct ion_heap *heap = container_of(shrinker, struct ion_heap,
shrinker);
int total = 0;
struct ion_heap *heap = container_of(shrinker, typeof(*heap), shrinker);
total = ion_heap_freelist_size(heap) / PAGE_SIZE;
if (heap->ops->shrink)
total += heap->ops->shrink(heap, sc->gfp_mask, 0);
return total;
return heap->ops->shrink(heap, sc->gfp_mask, 0);
return 0;
}
static unsigned long ion_heap_shrink_scan(struct shrinker *shrinker,
struct shrink_control *sc)
{
struct ion_heap *heap = container_of(shrinker, struct ion_heap,
shrinker);
int freed = 0;
int to_scan = sc->nr_to_scan;
if (to_scan == 0)
return 0;
/*
* shrink the free list first, no point in zeroing the memory if we're
* just going to reclaim it. Also, skip any possible page pooling.
*/
if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
freed = ion_heap_freelist_shrink(heap, to_scan * PAGE_SIZE) /
PAGE_SIZE;
to_scan -= freed;
if (to_scan <= 0)
return freed;
struct ion_heap *heap = container_of(shrinker, typeof(*heap), shrinker);
if (heap->ops->shrink)
freed += heap->ops->shrink(heap, sc->gfp_mask, to_scan);
return freed;
return heap->ops->shrink(heap, sc->gfp_mask, sc->nr_to_scan);
return 0;
}
void ion_heap_init_shrinker(struct ion_heap *heap)

View File

@ -22,8 +22,13 @@
* Allocates an ion buffer.
* Use IS_ERR on returned pointer to check for success.
*/
struct dma_buf *ion_alloc(size_t len, unsigned int heap_id_mask,
unsigned int flags);
struct dma_buf *ion_alloc_dmabuf(size_t len, unsigned int heap_id_mask,
unsigned int flags);
static inline struct dma_buf *ion_alloc(size_t len, unsigned int heap_id_mask,
unsigned int flags)
{
return ion_alloc_dmabuf(len, heap_id_mask, flags);
}
#else

View File

@ -48,7 +48,7 @@ static void ion_page_pool_free_pages(struct ion_page_pool *pool,
static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
{
mutex_lock(&pool->mutex);
spin_lock(&pool->lock);
if (PageHighMem(page)) {
list_add_tail(&page->lru, &pool->high_items);
pool->high_count++;
@ -60,7 +60,7 @@ static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
nr_total_pages += 1 << pool->order;
mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE,
1 << pool->order);
mutex_unlock(&pool->mutex);
spin_unlock(&pool->lock);
return 0;
}
@ -94,12 +94,12 @@ struct page *ion_page_pool_alloc(struct ion_page_pool *pool, bool *from_pool)
if (fatal_signal_pending(current))
return ERR_PTR(-EINTR);
if (*from_pool && mutex_trylock(&pool->mutex)) {
if (*from_pool && spin_trylock(&pool->lock)) {
if (pool->high_count)
page = ion_page_pool_remove(pool, true);
else if (pool->low_count)
page = ion_page_pool_remove(pool, false);
mutex_unlock(&pool->mutex);
spin_unlock(&pool->lock);
}
if (!page) {
page = ion_page_pool_alloc_pages(pool);
@ -121,12 +121,12 @@ struct page *ion_page_pool_alloc_pool_only(struct ion_page_pool *pool)
if (!pool)
return ERR_PTR(-EINVAL);
if (mutex_trylock(&pool->mutex)) {
if (spin_trylock(&pool->lock)) {
if (pool->high_count)
page = ion_page_pool_remove(pool, true);
else if (pool->low_count)
page = ion_page_pool_remove(pool, false);
mutex_unlock(&pool->mutex);
spin_unlock(&pool->lock);
}
if (!page)
@ -185,16 +185,16 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
while (freed < nr_to_scan) {
struct page *page;
mutex_lock(&pool->mutex);
spin_lock(&pool->lock);
if (pool->low_count) {
page = ion_page_pool_remove(pool, false);
} else if (high && pool->high_count) {
page = ion_page_pool_remove(pool, true);
} else {
mutex_unlock(&pool->mutex);
spin_unlock(&pool->lock);
break;
}
mutex_unlock(&pool->mutex);
spin_unlock(&pool->lock);
ion_page_pool_free_pages(pool, page);
freed += (1 << pool->order);
}
@ -215,7 +215,7 @@ struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
INIT_LIST_HEAD(&pool->high_items);
pool->gfp_mask = gfp_mask;
pool->order = order;
mutex_init(&pool->mutex);
spin_lock_init(&pool->lock);
plist_node_init(&pool->list, order);
if (cached)
pool->cached = true;

View File

@ -513,106 +513,6 @@ static struct ion_heap_ops system_heap_ops = {
.shrink = ion_system_heap_shrink,
};
static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
void *unused)
{
struct ion_system_heap *sys_heap = container_of(
heap, struct ion_system_heap, heap);
bool use_seq = s;
unsigned long uncached_total = 0;
unsigned long cached_total = 0;
unsigned long secure_total = 0;
struct ion_page_pool *pool;
int i, j;
for (i = 0; i < NUM_ORDERS; i++) {
pool = sys_heap->uncached_pools[i];
if (use_seq) {
seq_printf(s,
"%d order %u highmem pages in uncached pool = %lu total\n",
pool->high_count, pool->order,
(1 << pool->order) * PAGE_SIZE *
pool->high_count);
seq_printf(s,
"%d order %u lowmem pages in uncached pool = %lu total\n",
pool->low_count, pool->order,
(1 << pool->order) * PAGE_SIZE *
pool->low_count);
}
uncached_total += (1 << pool->order) * PAGE_SIZE *
pool->high_count;
uncached_total += (1 << pool->order) * PAGE_SIZE *
pool->low_count;
}
for (i = 0; i < NUM_ORDERS; i++) {
pool = sys_heap->cached_pools[i];
if (use_seq) {
seq_printf(s,
"%d order %u highmem pages in cached pool = %lu total\n",
pool->high_count, pool->order,
(1 << pool->order) * PAGE_SIZE *
pool->high_count);
seq_printf(s,
"%d order %u lowmem pages in cached pool = %lu total\n",
pool->low_count, pool->order,
(1 << pool->order) * PAGE_SIZE *
pool->low_count);
}
cached_total += (1 << pool->order) * PAGE_SIZE *
pool->high_count;
cached_total += (1 << pool->order) * PAGE_SIZE *
pool->low_count;
}
for (i = 0; i < NUM_ORDERS; i++) {
for (j = 0; j < VMID_LAST; j++) {
if (!is_secure_vmid_valid(j))
continue;
pool = sys_heap->secure_pools[j][i];
if (use_seq) {
seq_printf(s,
"VMID %d: %d order %u highmem pages in secure pool = %lu total\n",
j, pool->high_count, pool->order,
(1 << pool->order) * PAGE_SIZE *
pool->high_count);
seq_printf(s,
"VMID %d: %d order %u lowmem pages in secure pool = %lu total\n",
j, pool->low_count, pool->order,
(1 << pool->order) * PAGE_SIZE *
pool->low_count);
}
secure_total += (1 << pool->order) * PAGE_SIZE *
pool->high_count;
secure_total += (1 << pool->order) * PAGE_SIZE *
pool->low_count;
}
}
if (use_seq) {
seq_puts(s, "--------------------------------------------\n");
seq_printf(s, "uncached pool = %lu cached pool = %lu secure pool = %lu\n",
uncached_total, cached_total, secure_total);
seq_printf(s, "pool total (uncached + cached + secure) = %lu\n",
uncached_total + cached_total + secure_total);
seq_puts(s, "--------------------------------------------\n");
} else {
pr_info("-------------------------------------------------\n");
pr_info("uncached pool = %lu cached pool = %lu secure pool = %lu\n",
uncached_total, cached_total, secure_total);
pr_info("pool total (uncached + cached + secure) = %lu\n",
uncached_total + cached_total + secure_total);
pr_info("-------------------------------------------------\n");
}
return 0;
}
static void ion_system_heap_destroy_pools(struct ion_page_pool **pools)
{
int i;
@ -678,7 +578,6 @@ struct ion_heap *ion_system_heap_create(struct ion_platform_heap *data)
mutex_init(&heap->split_page_mutex);
heap->heap.debug_show = ion_system_heap_debug_show;
return &heap->heap;
destroy_uncached_pools:

View File

@ -356,20 +356,12 @@ static int ion_system_secure_heap_pm_freeze(struct ion_heap *heap)
{
struct ion_system_secure_heap *secure_heap;
unsigned long count;
long sz;
struct shrink_control sc = {
.gfp_mask = GFP_HIGHUSER,
};
secure_heap = container_of(heap, struct ion_system_secure_heap, heap);
sz = atomic_long_read(&heap->total_allocated);
if (sz) {
pr_err("%s: %lx bytes won't be saved across hibernation. Aborting.",
__func__, sz);
return -EINVAL;
}
/* Since userspace is frozen, no more requests will be queued */
cancel_delayed_work_sync(&secure_heap->prefetch_work);

View File

@ -1,180 +0,0 @@
/* Copyright (c) 2018 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM ion
#if !defined(_TRACE_ION_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_ION_H
#include <linux/types.h>
#include <linux/tracepoint.h>
#define DEV_NAME_NONE "None"
DECLARE_EVENT_CLASS(ion_dma_map_cmo_class,
TP_PROTO(const struct device *dev, const char *name,
bool cached, bool hlos_accessible, unsigned long map_attrs,
enum dma_data_direction dir),
TP_ARGS(dev, name, cached, hlos_accessible, map_attrs, dir),
TP_STRUCT__entry(
__string(dev_name, dev ? dev_name(dev) : DEV_NAME_NONE)
__string(name, name)
__field(bool, cached)
__field(bool, hlos_accessible)
__field(unsigned long, map_attrs)
__field(enum dma_data_direction, dir)
),
TP_fast_assign(
__assign_str(dev_name, dev ? dev_name(dev) : DEV_NAME_NONE);
__assign_str(name, name);
__entry->cached = cached;
__entry->hlos_accessible = hlos_accessible;
__entry->map_attrs = map_attrs;
__entry->dir = dir;
),
TP_printk("dev=%s name=%s cached=%d access=%d map_attrs=0x%lx dir=%d",
__get_str(dev_name),
__get_str(name),
__entry->cached,
__entry->hlos_accessible,
__entry->map_attrs,
__entry->dir)
);
DEFINE_EVENT(ion_dma_map_cmo_class, ion_dma_map_cmo_apply,
TP_PROTO(const struct device *dev, const char *name,
bool cached, bool hlos_accessible, unsigned long map_attrs,
enum dma_data_direction dir),
TP_ARGS(dev, name, cached, hlos_accessible, map_attrs, dir)
);
DEFINE_EVENT(ion_dma_map_cmo_class, ion_dma_map_cmo_skip,
TP_PROTO(const struct device *dev, const char *name,
bool cached, bool hlos_accessible, unsigned long map_attrs,
enum dma_data_direction dir),
TP_ARGS(dev, name, cached, hlos_accessible, map_attrs, dir)
);
DEFINE_EVENT(ion_dma_map_cmo_class, ion_dma_unmap_cmo_apply,
TP_PROTO(const struct device *dev, const char *name,
bool cached, bool hlos_accessible, unsigned long map_attrs,
enum dma_data_direction dir),
TP_ARGS(dev, name, cached, hlos_accessible, map_attrs, dir)
);
DEFINE_EVENT(ion_dma_map_cmo_class, ion_dma_unmap_cmo_skip,
TP_PROTO(const struct device *dev, const char *name,
bool cached, bool hlos_accessible, unsigned long map_attrs,
enum dma_data_direction dir),
TP_ARGS(dev, name, cached, hlos_accessible, map_attrs, dir)
);
DECLARE_EVENT_CLASS(ion_access_cmo_class,
TP_PROTO(const struct device *dev, const char *name,
bool cached, bool hlos_accessible, enum dma_data_direction dir,
bool only_mapped),
TP_ARGS(dev, name, cached, hlos_accessible, dir, only_mapped),
TP_STRUCT__entry(
__string(dev_name, dev ? dev_name(dev) : DEV_NAME_NONE)
__string(name, name)
__field(bool, cached)
__field(bool, hlos_accessible)
__field(enum dma_data_direction, dir)
__field(bool, only_mapped)
),
TP_fast_assign(
__assign_str(dev_name, dev ? dev_name(dev) : DEV_NAME_NONE);
__assign_str(name, name);
__entry->cached = cached;
__entry->hlos_accessible = hlos_accessible;
__entry->dir = dir;
__entry->only_mapped = only_mapped;
),
TP_printk("dev=%s name=%s cached=%d access=%d dir=%d, only_mapped=%d",
__get_str(dev_name),
__get_str(name),
__entry->cached,
__entry->hlos_accessible,
__entry->dir,
__entry->only_mapped)
);
DEFINE_EVENT(ion_access_cmo_class, ion_begin_cpu_access_cmo_apply,
TP_PROTO(const struct device *dev, const char *name,
bool cached, bool hlos_accessible, enum dma_data_direction dir,
bool only_mapped),
TP_ARGS(dev, name, cached, hlos_accessible, dir, only_mapped)
);
DEFINE_EVENT(ion_access_cmo_class, ion_begin_cpu_access_cmo_skip,
TP_PROTO(const struct device *dev, const char *name,
bool cached, bool hlos_accessible, enum dma_data_direction dir,
bool only_mapped),
TP_ARGS(dev, name, cached, hlos_accessible, dir, only_mapped)
);
DEFINE_EVENT(ion_access_cmo_class, ion_begin_cpu_access_notmapped,
TP_PROTO(const struct device *dev, const char *name,
bool cached, bool hlos_accessible, enum dma_data_direction dir,
bool only_mapped),
TP_ARGS(dev, name, cached, hlos_accessible, dir, only_mapped)
);
DEFINE_EVENT(ion_access_cmo_class, ion_end_cpu_access_cmo_apply,
TP_PROTO(const struct device *dev, const char *name,
bool cached, bool hlos_accessible, enum dma_data_direction dir,
bool only_mapped),
TP_ARGS(dev, name, cached, hlos_accessible, dir, only_mapped)
);
DEFINE_EVENT(ion_access_cmo_class, ion_end_cpu_access_cmo_skip,
TP_PROTO(const struct device *dev, const char *name,
bool cached, bool hlos_accessible, enum dma_data_direction dir,
bool only_mapped),
TP_ARGS(dev, name, cached, hlos_accessible, dir, only_mapped)
);
DEFINE_EVENT(ion_access_cmo_class, ion_end_cpu_access_notmapped,
TP_PROTO(const struct device *dev, const char *name,
bool cached, bool hlos_accessible, enum dma_data_direction dir,
bool only_mapped),
TP_ARGS(dev, name, cached, hlos_accessible, dir, only_mapped)
);
#endif /* _TRACE_ION_H */
#include <trace/define_trace.h>