mm: Revert some hacks

This reverts commits:
- 0fc9fbd21297173aa822f97fe33a481053cb96ec [mm + sysctl: tune swappiness and make some values read only]
- 94181990a4ea1a20bb8bf443f3fbe500d05901c3 [mm: Import oplus memory management hacks]
- 97bdd381c8292d43e68ff55bd08767db17e62810 [mm: Set swappiness for CONFIG_INCREASE_MAXIMUM_SWAPPINESS=y case]
- fa8d2aa0e20da6b943157f6ab58068bd80d68920 [mm: move variable under a proper #ifdef]
- f9daeaa423b745b2c2c34a6fb5ac6b69daf746c4 [mm: merge Samsung mm hacks]
- 1a460a832c9c6550f5cbe32dca4c15cf89806b57 [mm: Make watermark_scale_factor read-only]
- 963a3bfe3352b45ea21c58d53055689e46d81eeb [mm: Tune parameters for Android]

Change-Id: I70495ca93a05384a2d7bc2498fd2d56bd9928390
Signed-off-by: Richard Raya <rdxzv.dev@gmail.com>
This commit is contained in:
Richard Raya 2025-02-08 20:45:51 -03:00
parent 591785ca22
commit d97f51cb57
6 changed files with 28 additions and 158 deletions

View File

@ -132,10 +132,6 @@ static unsigned long zero_ul;
static unsigned long one_ul = 1;
static unsigned long long_max = LONG_MAX;
static int one_hundred = 100;
#ifdef CONFIG_OPLUS_MM_HACKS
extern int direct_vm_swappiness;
static int two_hundred = 200;
#endif /* CONFIG_OPLUS_MM_HACKS */
static int one_thousand = 1000;
static int two_hundred_fifty_five = 255;
#ifdef CONFIG_PELT_COMPATIBILITY_LAYER
@ -146,9 +142,6 @@ static unsigned int __read_mostly sysctl_sched_group_downmigrate_pct = 95;
#ifdef CONFIG_SCHED_WALT
static int two_million = 2000000;
#endif
#ifdef CONFIG_INCREASE_MAXIMUM_SWAPPINESS
static int max_swappiness = 200;
#endif
#ifdef CONFIG_PRINTK
static int ten_thousand = 10000;
#endif
@ -1592,28 +1585,28 @@ static struct ctl_table vm_table[] = {
.procname = "reap_mem_on_sigkill",
.data = &sysctl_reap_mem_on_sigkill,
.maxlen = sizeof(sysctl_reap_mem_on_sigkill),
.mode = 0444,
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "overcommit_ratio",
.data = &sysctl_overcommit_ratio,
.maxlen = sizeof(sysctl_overcommit_ratio),
.mode = 0444,
.mode = 0644,
.proc_handler = overcommit_ratio_handler,
},
{
.procname = "overcommit_kbytes",
.data = &sysctl_overcommit_kbytes,
.maxlen = sizeof(sysctl_overcommit_kbytes),
.mode = 0444,
.mode = 0644,
.proc_handler = overcommit_kbytes_handler,
},
{
.procname = "page-cluster",
.data = &page_cluster,
.maxlen = sizeof(int),
.mode = 0444,
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
},
@ -1621,7 +1614,7 @@ static struct ctl_table vm_table[] = {
.procname = "dirty_background_ratio",
.data = &dirty_background_ratio,
.maxlen = sizeof(dirty_background_ratio),
.mode = 0444,
.mode = 0644,
.proc_handler = dirty_background_ratio_handler,
.extra1 = &zero,
.extra2 = &one_hundred,
@ -1638,7 +1631,7 @@ static struct ctl_table vm_table[] = {
.procname = "dirty_ratio",
.data = &vm_dirty_ratio,
.maxlen = sizeof(vm_dirty_ratio),
.mode = 0444,
.mode = 0644,
.proc_handler = dirty_ratio_handler,
.extra1 = &zero,
.extra2 = &one_hundred,
@ -1655,14 +1648,14 @@ static struct ctl_table vm_table[] = {
.procname = "dirty_writeback_centisecs",
.data = &dirty_writeback_interval,
.maxlen = sizeof(dirty_writeback_interval),
.mode = 0444,
.mode = 0644,
.proc_handler = dirty_writeback_centisecs_handler,
},
{
.procname = "dirty_expire_centisecs",
.data = &dirty_expire_interval,
.maxlen = sizeof(dirty_expire_interval),
.mode = 0444,
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
},
@ -1670,7 +1663,7 @@ static struct ctl_table vm_table[] = {
.procname = "dirtytime_expire_seconds",
.data = &dirtytime_expire_interval,
.maxlen = sizeof(dirty_expire_interval),
.mode = 0444,
.mode = 0644,
.proc_handler = dirtytime_interval_handler,
.extra1 = &zero,
},
@ -1683,30 +1676,11 @@ static struct ctl_table vm_table[] = {
.procname = "swappiness",
.data = &vm_swappiness,
.maxlen = sizeof(vm_swappiness),
.mode = 0444,
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
#ifdef CONFIG_OPLUS_MM_HACKS
.extra2 = &two_hundred,
#else
#ifdef CONFIG_INCREASE_MAXIMUM_SWAPPINESS
.extra2 = &max_swappiness,
#else
.extra2 = &one_hundred,
#endif
#endif /* CONFIG_OPLUS_MM_HACKS */
},
#ifdef CONFIG_OPLUS_MM_HACKS
{
.procname = "direct_swappiness",
.data = &direct_vm_swappiness,
.maxlen = sizeof(direct_vm_swappiness),
.mode = 0444,
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
.extra2 = &two_hundred,
},
#endif /* CONFIG_OPLUS_MM_HACKS */
{
.procname = "want_old_faultaround_pte",
.data = &want_old_faultaround_pte,
@ -1803,7 +1777,7 @@ static struct ctl_table vm_table[] = {
.procname = "min_free_kbytes",
.data = &min_free_kbytes,
.maxlen = sizeof(min_free_kbytes),
.mode = 0444,
.mode = 0644,
.proc_handler = min_free_kbytes_sysctl_handler,
.extra1 = &zero,
},
@ -1811,7 +1785,7 @@ static struct ctl_table vm_table[] = {
.procname = "watermark_scale_factor",
.data = &watermark_scale_factor,
.maxlen = sizeof(watermark_scale_factor),
.mode = 0444,
.mode = 0644,
.proc_handler = watermark_scale_factor_sysctl_handler,
.extra1 = &one,
.extra2 = &one_thousand,
@ -1870,7 +1844,7 @@ static struct ctl_table vm_table[] = {
.procname = "vfs_cache_pressure",
.data = &sysctl_vfs_cache_pressure,
.maxlen = sizeof(sysctl_vfs_cache_pressure),
.mode = 0444,
.mode = 0644,
.proc_handler = proc_dointvec,
.extra1 = &zero,
},
@ -2034,14 +2008,14 @@ static struct ctl_table vm_table[] = {
.procname = "swap_ratio",
.data = &sysctl_swap_ratio,
.maxlen = sizeof(sysctl_swap_ratio),
.mode = 0444,
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
},
{
.procname = "swap_ratio_enable",
.data = &sysctl_swap_ratio_enable,
.maxlen = sizeof(sysctl_swap_ratio_enable),
.mode = 0444,
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
},
#endif

View File

@ -650,31 +650,6 @@ config ZSMALLOC_STAT
information to userspace via debugfs.
If unsure, say N.
config DIRECT_RECLAIM_FILE_PAGES_ONLY
bool "Reclaim file pages only on direct reclaim path"
depends on ZSWAP
default n
config INCREASE_MAXIMUM_SWAPPINESS
bool "Allow swappiness to be set up to 200"
depends on ZSWAP
default n
config FIX_INACTIVE_RATIO
bool "Fix active:inactive anon ratio to 1:1"
depends on ZSWAP
default n
config SWAP_ENABLE_READAHEAD
bool "Enable readahead on page swap in"
depends on SWAP
default y
help
When a page fault occurs, adjacent pages of SWAP_CLUSTER_MAX are
also paged in expecting those pages will be used in near future.
This behaviour is good at disk-based system, but not on in-memory
compression (e.g. zram).
config GENERIC_EARLY_IOREMAP
bool
@ -876,15 +851,6 @@ config FORCE_ALLOC_FROM_DMA_ZONE
If unsure, say "n".
config OPLUS_MM_HACKS
bool "Enable oplus memory management hacks"
default n
help
Improves memory management drastically as these hacks are used in Oplus devices.
Tested on devices made by OEMs like Motorola and Samsung.
If unsure, say "n".
# multi-gen LRU {
config LRU_GEN
bool "Multi-Gen LRU"

View File

@ -1086,14 +1086,15 @@ EXPORT_SYMBOL(pagevec_lookup_range_nr_tag);
*/
void __init swap_setup(void)
{
/* Tweak for Android devices using zram */
page_cluster = 0;
unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT);
/* Use a smaller cluster for small-memory machines */
if (megs < 16)
page_cluster = 2;
else
page_cluster = 3;
/*
* Right now other parts of the system means that we
* _really_ don't want to cluster much more
*/
#ifdef CONFIG_OPLUS_MM_HACKS
page_cluster = 0;
#endif /* CONFIG_OPLUS_MM_HACKS */
}

View File

@ -555,7 +555,6 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
return retpage;
}
#ifdef CONFIG_SWAP_ENABLE_READAHEAD
static unsigned int __swapin_nr_pages(unsigned long prev_offset,
unsigned long offset,
int hits,
@ -616,7 +615,6 @@ static unsigned long swapin_nr_pages(unsigned long offset)
return pages;
}
#endif
/**
* swap_cluster_readahead - swap in pages in hope we need them soon
@ -643,17 +641,15 @@ static unsigned long swapin_nr_pages(unsigned long offset)
struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
unsigned long addr = vmf->address;
bool do_poll = true;
#ifdef CONFIG_SWAP_ENABLE_READAHEAD
struct page *page;
unsigned long entry_offset = swp_offset(entry);
unsigned long offset = entry_offset;
unsigned long start_offset, end_offset;
unsigned long mask;
struct blk_plug plug;
bool page_allocated;
bool do_poll = true, page_allocated;
struct vm_area_struct *vma = vmf->vma;
unsigned long addr = vmf->address;
mask = swapin_nr_pages(offset) - 1;
if (!mask)
@ -688,7 +684,6 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
lru_add_drain(); /* Push any new pages onto the LRU now */
skip:
#endif
return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll);
}
@ -727,7 +722,6 @@ void exit_swap_address_space(unsigned int type)
kvfree(spaces);
}
#ifdef CONFIG_SWAP_ENABLE_READAHEAD
static inline void swap_ra_clamp_pfn(struct vm_area_struct *vma,
unsigned long faddr,
unsigned long lpfn,
@ -808,15 +802,12 @@ static void swap_ra_info(struct vm_fault *vmf,
#endif
pte_unmap(orig_pte);
}
#endif
struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
bool do_poll = true;
#ifdef CONFIG_SWAP_ENABLE_READAHEAD
struct blk_plug plug;
struct vm_area_struct *vma = vmf->vma;
struct page *page;
pte_t *pte, pentry;
swp_entry_t entry;
@ -828,8 +819,6 @@ struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
if (ra_info.win == 1)
goto skip;
do_poll = false;
blk_start_plug(&plug);
for (i = 0, pte = ra_info.ptes; i < ra_info.nr_pte;
i++, pte++) {
@ -858,9 +847,8 @@ struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
blk_finish_plug(&plug);
lru_add_drain();
skip:
#endif
return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address,
do_poll);
ra_info.win == 1);
}
/**

View File

@ -89,8 +89,6 @@ struct scan_control {
*/
struct mem_cgroup *target_mem_cgroup;
int swappiness;
/* Scan (total_size >> priority) pages at once */
int priority;
@ -171,17 +169,7 @@ struct scan_control {
/*
* From 0 .. 100. Higher means more swappy.
*/
#ifndef CONFIG_INCREASE_MAXIMUM_SWAPPINESS
int vm_swappiness = 160;
#ifdef CONFIG_OPLUS_MM_HACKS
/*
* Direct reclaim swappiness, values range from 0 .. 200. Higher means more swappy.
*/
int direct_vm_swappiness = 80;
#endif /* CONFIG_OPLUS_MM_HACKS */
#else
int vm_swappiness = 190;
#endif
int vm_swappiness = 60;
/*
* The total number of pages which are beyond the high watermark within all
* zones.
@ -1448,7 +1436,6 @@ unsigned long reclaim_pages_from_list(struct list_head *page_list,
.may_unmap = 1,
.may_swap = 1,
.target_vma = vma,
.swappiness = vm_swappiness,
};
unsigned long nr_reclaimed;
@ -1838,10 +1825,6 @@ putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
*/
static int current_may_throttle(void)
{
#ifdef CONFIG_OPLUS_MM_HACKS
if ((current->signal->oom_score_adj < 0))
return 0;
#endif /* CONFIG_OPLUS_MM_HACKS */
return !(current->flags & PF_LESS_THROTTLE) ||
current->backing_dev_info == NULL ||
bdi_write_congested(current->backing_dev_info);
@ -2262,14 +2245,12 @@ static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
struct scan_control *sc, bool trace)
{
enum lru_list active_lru = file * LRU_FILE + LRU_ACTIVE;
struct pglist_data *pgdat = lruvec_pgdat(lruvec);
enum lru_list inactive_lru = file * LRU_FILE;
unsigned long inactive, active;
unsigned long inactive_ratio;
#ifndef CONFIG_FIX_INACTIVE_RATIO
struct pglist_data *pgdat = lruvec_pgdat(lruvec);
unsigned long refaults;
unsigned long gb;
#endif
/*
* If we don't have swap space, anonymous page deactivation
@ -2281,9 +2262,6 @@ static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
inactive = lruvec_lru_size(lruvec, inactive_lru, sc->reclaim_idx);
active = lruvec_lru_size(lruvec, active_lru, sc->reclaim_idx);
#ifdef CONFIG_FIX_INACTIVE_RATIO
inactive_ratio = 1;
#else
/*
* When refaults are being observed, it means a new workingset
* is being established. Disable active list protection to get
@ -2294,13 +2272,8 @@ static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
inactive_ratio = 0;
} else {
gb = (inactive + active) >> (30 - PAGE_SHIFT);
#ifdef CONFIG_OPLUS_MM_HACKS
if (file && gb)
inactive_ratio = min(2UL, int_sqrt(10 * gb));
#else
if (gb && is_file_lru(inactive_lru))
inactive_ratio = int_sqrt(10 * gb);
#endif /* CONFIG_OPLUS_MM_HACKS */
else
inactive_ratio = 1;
}
@ -2310,7 +2283,6 @@ static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
lruvec_lru_size(lruvec, inactive_lru, MAX_NR_ZONES), inactive,
lruvec_lru_size(lruvec, active_lru, MAX_NR_ZONES), active,
inactive_ratio, file);
#endif
return inactive * inactive_ratio < active;
}
@ -2357,18 +2329,9 @@ static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
unsigned long anon, file;
unsigned long ap, fp;
enum lru_list lru;
#ifdef CONFIG_OPLUS_MM_HACKS
unsigned long totalswap = total_swap_pages;
#endif /* CONFIG_OPLUS_MM_HACKS */
#ifdef CONFIG_OPLUS_MM_HACKS
if (!current_is_kswapd())
swappiness = direct_vm_swappiness;
if (!sc->may_swap || (mem_cgroup_get_nr_swap_pages(memcg) <= totalswap>>6)) {
#else
/* If we have no swap space, do not bother scanning anon pages. */
if (!sc->may_swap || mem_cgroup_get_nr_swap_pages(memcg) <= 0) {
#endif /* CONFIG_OPLUS_MM_HACKS */
scan_balance = SCAN_FILE;
goto out;
}
@ -5884,16 +5847,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
.priority = DEF_PRIORITY,
.may_writepage = !laptop_mode,
.may_unmap = 1,
#ifdef CONFIG_DIRECT_RECLAIM_FILE_PAGES_ONLY
.may_swap = 0,
#else
.may_swap = 1,
#endif
#ifdef CONFIG_ZSWAP
.swappiness = vm_swappiness / 2,
#else
.swappiness = vm_swappiness,
#endif
};
/*
@ -5930,7 +5884,6 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
.may_unmap = 1,
.reclaim_idx = MAX_NR_ZONES - 1,
.may_swap = !noswap,
.swappiness = vm_swappiness,
};
unsigned long lru_pages;
@ -5977,7 +5930,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
.may_writepage = !laptop_mode,
.may_unmap = 1,
.may_swap = may_swap,
.swappiness = vm_swappiness,
};
/*
@ -6179,7 +6131,6 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
.may_writepage = !laptop_mode,
.may_unmap = 1,
.may_swap = 1,
.swappiness = vm_swappiness,
};
psi_memstall_enter(&pflags);
count_vm_event(PAGEOUTRUN);
@ -6576,7 +6527,6 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
.may_writepage = 1,
.may_unmap = 1,
.may_swap = 1,
.swappiness = vm_swappiness,
.hibernation_mode = 1,
};
struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
@ -6779,7 +6729,6 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in
.nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
.gfp_mask = current_gfp_context(gfp_mask),
.order = order,
.swappiness = vm_swappiness,
.priority = NODE_RECLAIM_PRIORITY,
.may_writepage = !!(node_reclaim_mode & RECLAIM_WRITE),
.may_unmap = !!(node_reclaim_mode & RECLAIM_UNMAP),

View File

@ -69,11 +69,7 @@
* A single 'zspage' is composed of up to 2^N discontiguous 0-order (single)
* pages. ZS_MAX_ZSPAGE_ORDER defines upper limit on N.
*/
#ifdef CONFIG_OPLUS_MM_HACKS
#define ZS_MAX_ZSPAGE_ORDER 3
#else
#define ZS_MAX_ZSPAGE_ORDER 2
#endif /* CONFIG_OPLUS_MM_HACKS */
#define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER)
#define ZS_HANDLE_SIZE (sizeof(unsigned long))
@ -124,11 +120,7 @@
#define FULLNESS_BITS 2
#define CLASS_BITS 8
#ifdef CONFIG_OPLUS_MM_HACKS
#define ISOLATED_BITS (ZS_MAX_ZSPAGE_ORDER+1)
#else
#define ISOLATED_BITS 3
#endif /* CONFIG_OPLUS_MM_HACKS */
#define MAGIC_VAL_BITS 8
#define MAX(a, b) ((a) >= (b) ? (a) : (b))