Merge "mm: allow page poisoning to be enabled by default."

This commit is contained in:
qctecmdr Service 2018-05-11 09:02:33 -07:00 committed by Gerrit - the friendly Code Review server
commit e45ac714d8
3 changed files with 59 additions and 21 deletions

View File

@ -63,6 +63,16 @@ config PAGE_POISONING
If unsure, say N
config PAGE_POISONING_ENABLE_DEFAULT
bool "Enable page poisoning by default?"
default n
depends on PAGE_POISONING
---help---
Enable page poisoning of free pages by default? This value
can be overridden by page_poison=off|on. This can be used
to avoid passing the kernel parameter and let page poisoning
feature enabled by default.
config PAGE_POISONING_NO_SANITY
depends on PAGE_POISONING
bool "Only poison, don't sanity check"

View File

@ -7,7 +7,8 @@
#include <linux/poison.h>
#include <linux/ratelimit.h>
static bool want_page_poisoning __read_mostly;
static bool want_page_poisoning __read_mostly
= IS_ENABLED(CONFIG_PAGE_POISONING_ENABLE_DEFAULT);
static int early_page_poison_param(char *buf)
{

View File

@ -1647,30 +1647,31 @@ int isolate_lru_page(struct page *page)
return ret;
}
/*
* A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and
* then get resheduled. When there are massive number of tasks doing page
* allocation, such sleeping direct reclaimers may keep piling up on each CPU,
* the LRU list will go small and be scanned faster than necessary, leading to
* unnecessary swapping, thrashing and OOM.
*/
static int too_many_isolated(struct pglist_data *pgdat, int file,
struct scan_control *sc)
static int __too_many_isolated(struct pglist_data *pgdat, int file,
struct scan_control *sc, bool stalled)
{
unsigned long inactive, isolated;
if (current_is_kswapd())
return 0;
if (!sane_reclaim(sc))
return 0;
if (file) {
inactive = node_page_state(pgdat, NR_INACTIVE_FILE);
isolated = node_page_state(pgdat, NR_ISOLATED_FILE);
if (stalled) {
inactive = node_page_state_snapshot(pgdat,
NR_INACTIVE_FILE);
isolated = node_page_state_snapshot(pgdat,
NR_ISOLATED_FILE);
} else {
inactive = node_page_state(pgdat, NR_INACTIVE_FILE);
isolated = node_page_state(pgdat, NR_ISOLATED_FILE);
}
} else {
inactive = node_page_state(pgdat, NR_INACTIVE_ANON);
isolated = node_page_state(pgdat, NR_ISOLATED_ANON);
if (stalled) {
inactive = node_page_state_snapshot(pgdat,
NR_INACTIVE_ANON);
isolated = node_page_state_snapshot(pgdat,
NR_ISOLATED_ANON);
} else {
inactive = node_page_state(pgdat, NR_INACTIVE_ANON);
isolated = node_page_state(pgdat, NR_ISOLATED_ANON);
}
}
/*
@ -1684,6 +1685,32 @@ static int too_many_isolated(struct pglist_data *pgdat, int file,
return isolated > inactive;
}
/*
* A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and
* then get resheduled. When there are massive number of tasks doing page
* allocation, such sleeping direct reclaimers may keep piling up on each CPU,
* the LRU list will go small and be scanned faster than necessary, leading to
* unnecessary swapping, thrashing and OOM.
*/
static int too_many_isolated(struct pglist_data *pgdat, int file,
struct scan_control *sc, bool stalled)
{
if (current_is_kswapd())
return 0;
if (!sane_reclaim(sc))
return 0;
if (unlikely(__too_many_isolated(pgdat, file, sc, false))) {
if (stalled)
return __too_many_isolated(pgdat, file, sc, stalled);
else
return 1;
}
return 0;
}
static noinline_for_stack void
putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
{
@ -1771,7 +1798,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
bool stalled = false;
while (unlikely(too_many_isolated(pgdat, file, sc))) {
while (unlikely(too_many_isolated(pgdat, file, sc, stalled))) {
if (stalled)
return 0;