mm: merge Samsung mm hacks

Forward-port to v4.14

Signed-off-by: Park Ju Hyung <qkrwngud825@gmail.com>
Signed-off-by: Alex Winkowski <dereference23@outlook.com>
Change-Id: I5ea9f3260f293e3a80e45fdb12a6eb738b3ab1e2
Signed-off-by: Cyber Knight <cyberknight755@gmail.com>
Signed-off-by: azrim <mirzaspc@gmail.com>
This commit is contained in:
Park Ju Hyung 2019-06-22 21:37:28 +09:00 committed by azrim
parent ace9aad620
commit f9daeaa423
No known key found for this signature in database
GPG Key ID: 497F8FB059B45D1C
4 changed files with 72 additions and 5 deletions

View File

@ -135,6 +135,9 @@ static int one_thousand = 1000;
#ifdef CONFIG_SCHED_WALT
static int two_million = 2000000;
#endif
#ifdef CONFIG_INCREASE_MAXIMUM_SWAPPINESS
static int max_swappiness = 200;
#endif
#ifdef CONFIG_PRINTK
static int ten_thousand = 10000;
#endif
@ -1574,7 +1577,11 @@ static struct ctl_table vm_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
#ifdef CONFIG_INCREASE_MAXIMUM_SWAPPINESS
.extra2 = &max_swappiness,
#else
.extra2 = &one_hundred,
#endif
},
{
.procname = "want_old_faultaround_pte",

View File

@ -650,6 +650,31 @@ config ZSMALLOC_STAT
information to userspace via debugfs.
If unsure, say N.
config DIRECT_RECLAIM_FILE_PAGES_ONLY
bool "Reclaim file pages only on direct reclaim path"
depends on ZSWAP
default n
config INCREASE_MAXIMUM_SWAPPINESS
bool "Allow swappiness to be set up to 200"
depends on ZSWAP
default n
config FIX_INACTIVE_RATIO
bool "Fix active:inactive anon ratio to 1:1"
depends on ZSWAP
default n
config SWAP_ENABLE_READAHEAD
bool "Enable readahead on page swap in"
depends on SWAP
default y
help
When a page fault occurs, adjacent pages of SWAP_CLUSTER_MAX are
also paged in expecting those pages will be used in near future.
This behaviour is good at disk-based system, but not on in-memory
compression (e.g. zram).
config GENERIC_EARLY_IOREMAP
bool

View File

@ -555,6 +555,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
return retpage;
}
#ifdef CONFIG_SWAP_ENABLE_READAHEAD
static unsigned int __swapin_nr_pages(unsigned long prev_offset,
unsigned long offset,
int hits,
@ -615,6 +616,7 @@ static unsigned long swapin_nr_pages(unsigned long offset)
return pages;
}
#endif
/**
* swap_cluster_readahead - swap in pages in hope we need them soon
@ -641,15 +643,17 @@ static unsigned long swapin_nr_pages(unsigned long offset)
struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
unsigned long addr = vmf->address;
bool do_poll = true;
#ifdef CONFIG_SWAP_ENABLE_READAHEAD
struct page *page;
unsigned long entry_offset = swp_offset(entry);
unsigned long offset = entry_offset;
unsigned long start_offset, end_offset;
unsigned long mask;
struct blk_plug plug;
bool do_poll = true, page_allocated;
struct vm_area_struct *vma = vmf->vma;
unsigned long addr = vmf->address;
bool page_allocated;
mask = swapin_nr_pages(offset) - 1;
if (!mask)
@ -684,6 +688,7 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
lru_add_drain(); /* Push any new pages onto the LRU now */
skip:
#endif
return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll);
}
@ -722,6 +727,7 @@ void exit_swap_address_space(unsigned int type)
kvfree(spaces);
}
#ifdef CONFIG_SWAP_ENABLE_READAHEAD
static inline void swap_ra_clamp_pfn(struct vm_area_struct *vma,
unsigned long faddr,
unsigned long lpfn,
@ -802,12 +808,15 @@ static void swap_ra_info(struct vm_fault *vmf,
#endif
pte_unmap(orig_pte);
}
#endif
struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
struct vm_fault *vmf)
{
struct blk_plug plug;
struct vm_area_struct *vma = vmf->vma;
bool do_poll = true;
#ifdef CONFIG_SWAP_ENABLE_READAHEAD
struct blk_plug plug;
struct page *page;
pte_t *pte, pentry;
swp_entry_t entry;
@ -819,6 +828,8 @@ struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
if (ra_info.win == 1)
goto skip;
do_poll = false;
blk_start_plug(&plug);
for (i = 0, pte = ra_info.ptes; i < ra_info.nr_pte;
i++, pte++) {
@ -847,8 +858,9 @@ struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
blk_finish_plug(&plug);
lru_add_drain();
skip:
#endif
return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address,
ra_info.win == 1);
do_poll);
}
/**

View File

@ -89,6 +89,8 @@ struct scan_control {
*/
struct mem_cgroup *target_mem_cgroup;
int swappiness;
/* Scan (total_size >> priority) pages at once */
int priority;
@ -1447,6 +1449,7 @@ unsigned long reclaim_pages_from_list(struct list_head *page_list,
.may_unmap = 1,
.may_swap = 1,
.target_vma = vma,
.swappiness = vm_swappiness,
};
unsigned long nr_reclaimed;
@ -2204,8 +2207,10 @@ static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
enum lru_list inactive_lru = file * LRU_FILE;
unsigned long inactive, active;
unsigned long inactive_ratio;
#ifndef CONFIG_FIX_INACTIVE_RATIO
unsigned long refaults;
unsigned long gb;
#endif
/*
* If we don't have swap space, anonymous page deactivation
@ -2217,6 +2222,9 @@ static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
inactive = lruvec_lru_size(lruvec, inactive_lru, sc->reclaim_idx);
active = lruvec_lru_size(lruvec, active_lru, sc->reclaim_idx);
#ifdef CONFIG_FIX_INACTIVE_RATIO
inactive_ratio = 1;
#else
/*
* When refaults are being observed, it means a new workingset
* is being established. Disable active list protection to get
@ -2238,6 +2246,7 @@ static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
lruvec_lru_size(lruvec, inactive_lru, MAX_NR_ZONES), inactive,
lruvec_lru_size(lruvec, active_lru, MAX_NR_ZONES), active,
inactive_ratio, file);
#endif
return inactive * inactive_ratio < active;
}
@ -5792,7 +5801,16 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
.priority = DEF_PRIORITY,
.may_writepage = !laptop_mode,
.may_unmap = 1,
#ifdef CONFIG_DIRECT_RECLAIM_FILE_PAGES_ONLY
.may_swap = 0,
#else
.may_swap = 1,
#endif
#ifdef CONFIG_ZSWAP
.swappiness = vm_swappiness / 2,
#else
.swappiness = vm_swappiness,
#endif
};
/*
@ -5829,6 +5847,7 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
.may_unmap = 1,
.reclaim_idx = MAX_NR_ZONES - 1,
.may_swap = !noswap,
.swappiness = vm_swappiness,
};
unsigned long lru_pages;
@ -5875,6 +5894,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
.may_writepage = !laptop_mode,
.may_unmap = 1,
.may_swap = may_swap,
.swappiness = vm_swappiness,
};
/*
@ -6076,6 +6096,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
.may_writepage = !laptop_mode,
.may_unmap = 1,
.may_swap = 1,
.swappiness = vm_swappiness,
};
psi_memstall_enter(&pflags);
count_vm_event(PAGEOUTRUN);
@ -6472,6 +6493,7 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
.may_writepage = 1,
.may_unmap = 1,
.may_swap = 1,
.swappiness = vm_swappiness,
.hibernation_mode = 1,
};
struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
@ -6674,6 +6696,7 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in
.nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
.gfp_mask = current_gfp_context(gfp_mask),
.order = order,
.swappiness = vm_swappiness,
.priority = NODE_RECLAIM_PRIORITY,
.may_writepage = !!(node_reclaim_mode & RECLAIM_WRITE),
.may_unmap = !!(node_reclaim_mode & RECLAIM_UNMAP),