mirror of
https://github.com/rd-stuffs/msm-4.14.git
synced 2025-02-20 11:45:48 +08:00
The shrinker rwsem is problematic because the actual shrinking path must back off when contention appears, causing some or all shrinkers to be skipped. This can be especially bad when shrinkers are frequently registered and unregistered. A high frequency of shrinker registrations/ unregistrations can effectively DoS the shrinker mechanism, rendering it useless. We can't use SRCU here because it would unilaterally shift the burden back onto the registration/unregistration paths, which defeats the purpose of why the back-off mechanism was added in the first place. Using fine-grained locks and a rwsem at the per-shrinker level to protect the shrinker list rather than a global rwsem lets us balance the burden equally, eliminating the issue. Now, shrinking can happen concurrently with shrinker registrations/unregistrations, without either side being favored more than the other. Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com> Signed-off-by: azrim <mirzaspc@gmail.com>
84 lines
2.7 KiB
C
84 lines
2.7 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _LINUX_SHRINKER_H
|
|
#define _LINUX_SHRINKER_H
|
|
|
|
#include <linux/rwsem.h>
|
|
|
|
/*
|
|
* This struct is used to pass information from page reclaim to the shrinkers.
|
|
* We consolidate the values for easier extention later.
|
|
*
|
|
* The 'gfpmask' refers to the allocation we are currently trying to
|
|
* fulfil.
|
|
*/
|
|
struct shrink_control {
|
|
gfp_t gfp_mask;
|
|
|
|
/*
|
|
* How many objects scan_objects should scan and try to reclaim.
|
|
* This is reset before every call, so it is safe for callees
|
|
* to modify.
|
|
*/
|
|
unsigned long nr_to_scan;
|
|
|
|
/*
|
|
* How many objects did scan_objects process?
|
|
* This defaults to nr_to_scan before every call, but the callee
|
|
* should track its actual progress.
|
|
*/
|
|
unsigned long nr_scanned;
|
|
|
|
/* current node being shrunk (for NUMA aware shrinkers) */
|
|
int nid;
|
|
|
|
/* current memcg being shrunk (for memcg aware shrinkers) */
|
|
struct mem_cgroup *memcg;
|
|
};
|
|
|
|
#define SHRINK_STOP (~0UL)
|
|
/*
|
|
* A callback you can register to apply pressure to ageable caches.
|
|
*
|
|
* @count_objects should return the number of freeable items in the cache. If
|
|
* there are no objects to free or the number of freeable items cannot be
|
|
* determined, it should return 0. No deadlock checks should be done during the
|
|
* count callback - the shrinker relies on aggregating scan counts that couldn't
|
|
* be executed due to potential deadlocks to be run at a later call when the
|
|
* deadlock condition is no longer pending.
|
|
*
|
|
* @scan_objects will only be called if @count_objects returned a non-zero
|
|
* value for the number of freeable objects. The callout should scan the cache
|
|
* and attempt to free items from the cache. It should then return the number
|
|
* of objects freed during the scan, or SHRINK_STOP if progress cannot be made
|
|
* due to potential deadlocks. If SHRINK_STOP is returned, then no further
|
|
* attempts to call the @scan_objects will be made from the current reclaim
|
|
* context.
|
|
*
|
|
* @flags determine the shrinker abilities, like numa awareness
|
|
*/
|
|
struct shrinker {
|
|
unsigned long (*count_objects)(struct shrinker *,
|
|
struct shrink_control *sc);
|
|
unsigned long (*scan_objects)(struct shrinker *,
|
|
struct shrink_control *sc);
|
|
|
|
int seeks; /* seeks to recreate an obj */
|
|
long batch; /* reclaim batch size, 0 = default */
|
|
unsigned long flags;
|
|
|
|
/* These are for internal use */
|
|
struct list_head list;
|
|
/* objs pending delete, per node */
|
|
atomic_long_t *nr_deferred;
|
|
struct rw_semaphore del_rwsem;
|
|
};
|
|
#define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
|
|
|
|
/* Flags */
|
|
#define SHRINKER_NUMA_AWARE (1 << 0)
|
|
#define SHRINKER_MEMCG_AWARE (1 << 1)
|
|
|
|
extern int register_shrinker(struct shrinker *);
|
|
extern void unregister_shrinker(struct shrinker *);
|
|
#endif
|