mirror of
https://github.com/rd-stuffs/msm-4.14.git
synced 2025-02-20 11:45:48 +08:00
Revert "mm: shuffle initial free memory to improve memory-side-cache utilization"
This reverts commit 6e8d8f0cd2dfd44c9cf01ba432f516577c6f99a7. Change-Id: I08c5f066bfe4ff8c34f06f3b6ad50c216e9d076d Signed-off-by: Richard Raya <rdxzv.dev@gmail.com>
This commit is contained in:
parent
b6eaf5c762
commit
fd082a8cad
5153
Documentation/admin-guide/kernel-parameters.txt
Normal file
5153
Documentation/admin-guide/kernel-parameters.txt
Normal file
File diff suppressed because it is too large
Load Diff
@ -150,23 +150,6 @@ static inline void list_replace_init(struct list_head *old,
|
||||
INIT_LIST_HEAD(old);
|
||||
}
|
||||
|
||||
/**
|
||||
* list_swap - replace entry1 with entry2 and re-add entry1 at entry2's position
|
||||
* @entry1: the location to place entry2
|
||||
* @entry2: the location to place entry1
|
||||
*/
|
||||
static inline void list_swap(struct list_head *entry1,
|
||||
struct list_head *entry2)
|
||||
{
|
||||
struct list_head *pos = entry2->prev;
|
||||
|
||||
list_del(entry2);
|
||||
list_replace(entry1, entry2);
|
||||
if (pos == entry1)
|
||||
pos = entry2;
|
||||
list_add(entry1, pos);
|
||||
}
|
||||
|
||||
/**
|
||||
* list_del_init - deletes entry from list and reinitialize it.
|
||||
* @entry: the element to delete from the list.
|
||||
|
@ -1516,7 +1516,6 @@ void sparse_init(void);
|
||||
#else
|
||||
#define sparse_init() do {} while (0)
|
||||
#define sparse_index_init(_sec, _nid) do {} while (0)
|
||||
#define pfn_present pfn_valid
|
||||
#endif /* CONFIG_SPARSEMEM */
|
||||
|
||||
/*
|
||||
|
24
init/Kconfig
24
init/Kconfig
@ -1851,30 +1851,6 @@ config SLAB_FREELIST_HARDENED
|
||||
sacrifies to harden the kernel slab allocator against common
|
||||
freelist exploit methods.
|
||||
|
||||
config SHUFFLE_PAGE_ALLOCATOR
|
||||
bool "Page allocator randomization"
|
||||
default SLAB_FREELIST_RANDOM && ACPI_NUMA
|
||||
help
|
||||
Randomization of the page allocator improves the average
|
||||
utilization of a direct-mapped memory-side-cache. See section
|
||||
5.2.27 Heterogeneous Memory Attribute Table (HMAT) in the ACPI
|
||||
6.2a specification for an example of how a platform advertises
|
||||
the presence of a memory-side-cache. There are also incidental
|
||||
security benefits as it reduces the predictability of page
|
||||
allocations to compliment SLAB_FREELIST_RANDOM, but the
|
||||
default granularity of shuffling on the "MAX_ORDER - 1" i.e,
|
||||
10th order of pages is selected based on cache utilization
|
||||
benefits on x86.
|
||||
|
||||
While the randomization improves cache utilization it may
|
||||
negatively impact workloads on platforms without a cache. For
|
||||
this reason, by default, the randomization is enabled only
|
||||
after runtime detection of a direct-mapped memory-side-cache.
|
||||
Otherwise, the randomization may be force enabled with the
|
||||
'page_alloc.shuffle' kernel command line parameter.
|
||||
|
||||
Say Y if unsure.
|
||||
|
||||
config SLUB_CPU_PARTIAL
|
||||
default y
|
||||
depends on SLUB && SMP
|
||||
|
@ -33,7 +33,7 @@ mmu-$(CONFIG_MMU) += process_vm_access.o
|
||||
endif
|
||||
|
||||
obj-y := filemap.o mempool.o oom_kill.o fadvise.o \
|
||||
maccess.o page-writeback.o \
|
||||
maccess.o page_alloc.o page-writeback.o \
|
||||
readahead.o swap.o truncate.o vmscan.o shmem.o \
|
||||
util.o mmzone.o vmstat.o backing-dev.o \
|
||||
mm_init.o mmu_context.o percpu.o slab_common.o \
|
||||
@ -41,11 +41,6 @@ obj-y := filemap.o mempool.o oom_kill.o fadvise.o \
|
||||
interval_tree.o list_lru.o workingset.o \
|
||||
debug.o $(mmu-y) showmem.o vmpressure.o
|
||||
|
||||
# Give 'page_alloc' its own module-parameter namespace
|
||||
page-alloc-y := page_alloc.o
|
||||
page-alloc-$(CONFIG_SHUFFLE_PAGE_ALLOCATOR) += shuffle.o
|
||||
|
||||
obj-y += page-alloc.o
|
||||
obj-y += init-mm.o
|
||||
|
||||
ifdef CONFIG_NO_BOOTMEM
|
||||
|
@ -41,7 +41,6 @@
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
#include "internal.h"
|
||||
#include "shuffle.h"
|
||||
|
||||
/*
|
||||
* online_page_callback contains pointer to current page onlining function.
|
||||
@ -960,8 +959,6 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ
|
||||
zone->zone_pgdat->node_present_pages += onlined_pages;
|
||||
pgdat_resize_unlock(zone->zone_pgdat, &flags);
|
||||
|
||||
shuffle_zone(zone);
|
||||
|
||||
if (onlined_pages) {
|
||||
node_states_set_node(nid, &arg);
|
||||
if (need_zonelists_rebuild)
|
||||
|
@ -77,7 +77,6 @@
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/div64.h>
|
||||
#include "internal.h"
|
||||
#include "shuffle.h"
|
||||
|
||||
atomic_long_t kswapd_waiters = ATOMIC_LONG_INIT(0);
|
||||
atomic_long_t kshrinkd_waiters = ATOMIC_LONG_INIT(0);
|
||||
@ -1662,9 +1661,9 @@ free_range:
|
||||
void __init page_alloc_init_late(void)
|
||||
{
|
||||
struct zone *zone;
|
||||
int nid;
|
||||
|
||||
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
|
||||
int nid;
|
||||
|
||||
/* There will be num_node_state(N_MEMORY) threads */
|
||||
atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
|
||||
@ -1683,9 +1682,6 @@ void __init page_alloc_init_late(void)
|
||||
memblock_discard();
|
||||
#endif
|
||||
|
||||
for_each_node_state(nid, N_MEMORY)
|
||||
shuffle_free_memory(NODE_DATA(nid));
|
||||
|
||||
for_each_populated_zone(zone)
|
||||
set_zone_contiguous(zone);
|
||||
}
|
||||
|
184
mm/shuffle.c
184
mm/shuffle.c
@ -1,184 +0,0 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
// Copyright(c) 2018 Intel Corporation. All rights reserved.
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/mmzone.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include "internal.h"
|
||||
#include "shuffle.h"
|
||||
|
||||
DEFINE_STATIC_KEY_FALSE(page_alloc_shuffle_key);
|
||||
static unsigned long shuffle_state __ro_after_init;
|
||||
|
||||
/*
|
||||
* Depending on the architecture, module parameter parsing may run
|
||||
* before, or after the cache detection. SHUFFLE_FORCE_DISABLE prevents,
|
||||
* or reverts the enabling of the shuffle implementation. SHUFFLE_ENABLE
|
||||
* attempts to turn on the implementation, but aborts if it finds
|
||||
* SHUFFLE_FORCE_DISABLE already set.
|
||||
*/
|
||||
__meminit void page_alloc_shuffle(enum mm_shuffle_ctl ctl)
|
||||
{
|
||||
if (ctl == SHUFFLE_FORCE_DISABLE)
|
||||
set_bit(SHUFFLE_FORCE_DISABLE, &shuffle_state);
|
||||
|
||||
if (test_bit(SHUFFLE_FORCE_DISABLE, &shuffle_state)) {
|
||||
if (test_and_clear_bit(SHUFFLE_ENABLE, &shuffle_state))
|
||||
static_branch_disable(&page_alloc_shuffle_key);
|
||||
} else if (ctl == SHUFFLE_ENABLE
|
||||
&& !test_and_set_bit(SHUFFLE_ENABLE, &shuffle_state))
|
||||
static_branch_enable(&page_alloc_shuffle_key);
|
||||
}
|
||||
|
||||
static bool shuffle_param;
|
||||
extern int shuffle_show(char *buffer, const struct kernel_param *kp)
|
||||
{
|
||||
return sprintf(buffer, "%c\n", test_bit(SHUFFLE_ENABLE, &shuffle_state)
|
||||
? 'Y' : 'N');
|
||||
}
|
||||
|
||||
static __meminit int shuffle_store(const char *val,
|
||||
const struct kernel_param *kp)
|
||||
{
|
||||
int rc = param_set_bool(val, kp);
|
||||
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
if (shuffle_param)
|
||||
page_alloc_shuffle(SHUFFLE_ENABLE);
|
||||
else
|
||||
page_alloc_shuffle(SHUFFLE_FORCE_DISABLE);
|
||||
return 0;
|
||||
}
|
||||
module_param_call(shuffle, shuffle_store, shuffle_show, &shuffle_param, 0400);
|
||||
|
||||
/*
|
||||
* For two pages to be swapped in the shuffle, they must be free (on a
|
||||
* 'free_area' lru), have the same order, and have the same migratetype.
|
||||
*/
|
||||
static struct page * __meminit shuffle_valid_page(unsigned long pfn, int order)
|
||||
{
|
||||
struct page *page;
|
||||
|
||||
/*
|
||||
* Given we're dealing with randomly selected pfns in a zone we
|
||||
* need to ask questions like...
|
||||
*/
|
||||
|
||||
/* ...is the pfn even in the memmap? */
|
||||
if (!pfn_valid_within(pfn))
|
||||
return NULL;
|
||||
|
||||
/* ...is the pfn in a present section or a hole? */
|
||||
if (!pfn_present(pfn))
|
||||
return NULL;
|
||||
|
||||
/* ...is the page free and currently on a free_area list? */
|
||||
page = pfn_to_page(pfn);
|
||||
if (!PageBuddy(page))
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* ...is the page on the same list as the page we will
|
||||
* shuffle it with?
|
||||
*/
|
||||
if (page_order(page) != order)
|
||||
return NULL;
|
||||
|
||||
return page;
|
||||
}
|
||||
|
||||
/*
|
||||
* Fisher-Yates shuffle the freelist which prescribes iterating through an
|
||||
* array, pfns in this case, and randomly swapping each entry with another in
|
||||
* the span, end_pfn - start_pfn.
|
||||
*
|
||||
* To keep the implementation simple it does not attempt to correct for sources
|
||||
* of bias in the distribution, like modulo bias or pseudo-random number
|
||||
* generator bias. I.e. the expectation is that this shuffling raises the bar
|
||||
* for attacks that exploit the predictability of page allocations, but need not
|
||||
* be a perfect shuffle.
|
||||
*/
|
||||
#define SHUFFLE_RETRY 10
|
||||
void __meminit __shuffle_zone(struct zone *z)
|
||||
{
|
||||
unsigned long i, flags;
|
||||
unsigned long start_pfn = z->zone_start_pfn;
|
||||
unsigned long end_pfn = zone_end_pfn(z);
|
||||
const int order = SHUFFLE_ORDER;
|
||||
const int order_pages = 1 << order;
|
||||
|
||||
spin_lock_irqsave(&z->lock, flags);
|
||||
start_pfn = ALIGN(start_pfn, order_pages);
|
||||
for (i = start_pfn; i < end_pfn; i += order_pages) {
|
||||
unsigned long j;
|
||||
int migratetype, retry;
|
||||
struct page *page_i, *page_j;
|
||||
|
||||
/*
|
||||
* We expect page_i, in the sub-range of a zone being added
|
||||
* (@start_pfn to @end_pfn), to more likely be valid compared to
|
||||
* page_j randomly selected in the span @zone_start_pfn to
|
||||
* @spanned_pages.
|
||||
*/
|
||||
page_i = shuffle_valid_page(i, order);
|
||||
if (!page_i)
|
||||
continue;
|
||||
|
||||
for (retry = 0; retry < SHUFFLE_RETRY; retry++) {
|
||||
/*
|
||||
* Pick a random order aligned page in the zone span as
|
||||
* a swap target. If the selected pfn is a hole, retry
|
||||
* up to SHUFFLE_RETRY attempts find a random valid pfn
|
||||
* in the zone.
|
||||
*/
|
||||
j = z->zone_start_pfn +
|
||||
ALIGN_DOWN(get_random_long() % z->spanned_pages,
|
||||
order_pages);
|
||||
page_j = shuffle_valid_page(j, order);
|
||||
if (page_j && page_j != page_i)
|
||||
break;
|
||||
}
|
||||
if (retry >= SHUFFLE_RETRY) {
|
||||
pr_debug("%s: failed to swap %#lx\n", __func__, i);
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* Each migratetype corresponds to its own list, make sure the
|
||||
* types match otherwise we're moving pages to lists where they
|
||||
* do not belong.
|
||||
*/
|
||||
migratetype = get_pageblock_migratetype(page_i);
|
||||
if (get_pageblock_migratetype(page_j) != migratetype) {
|
||||
pr_debug("%s: migratetype mismatch %#lx\n", __func__, i);
|
||||
continue;
|
||||
}
|
||||
|
||||
list_swap(&page_i->lru, &page_j->lru);
|
||||
|
||||
pr_debug("%s: swap: %#lx -> %#lx\n", __func__, i, j);
|
||||
|
||||
/* take it easy on the zone lock */
|
||||
if ((i % (100 * order_pages)) == 0) {
|
||||
spin_unlock_irqrestore(&z->lock, flags);
|
||||
cond_resched();
|
||||
spin_lock_irqsave(&z->lock, flags);
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&z->lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* shuffle_free_memory - reduce the predictability of the page allocator
|
||||
* @pgdat: node page data
|
||||
*/
|
||||
void __meminit __shuffle_free_memory(pg_data_t *pgdat)
|
||||
{
|
||||
struct zone *z;
|
||||
|
||||
for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
|
||||
shuffle_zone(z);
|
||||
}
|
52
mm/shuffle.h
52
mm/shuffle.h
@ -1,52 +0,0 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
// Copyright(c) 2018 Intel Corporation. All rights reserved.
|
||||
#ifndef _MM_SHUFFLE_H
|
||||
#define _MM_SHUFFLE_H
|
||||
#include <linux/jump_label.h>
|
||||
|
||||
/*
|
||||
* SHUFFLE_ENABLE is called from the command line enabling path, or by
|
||||
* platform-firmware enabling that indicates the presence of a
|
||||
* direct-mapped memory-side-cache. SHUFFLE_FORCE_DISABLE is called from
|
||||
* the command line path and overrides any previous or future
|
||||
* SHUFFLE_ENABLE.
|
||||
*/
|
||||
enum mm_shuffle_ctl {
|
||||
SHUFFLE_ENABLE,
|
||||
SHUFFLE_FORCE_DISABLE,
|
||||
};
|
||||
|
||||
#define SHUFFLE_ORDER (MAX_ORDER-1)
|
||||
|
||||
#ifdef CONFIG_SHUFFLE_PAGE_ALLOCATOR
|
||||
DECLARE_STATIC_KEY_FALSE(page_alloc_shuffle_key);
|
||||
extern void page_alloc_shuffle(enum mm_shuffle_ctl ctl);
|
||||
extern void __shuffle_free_memory(pg_data_t *pgdat);
|
||||
static inline void shuffle_free_memory(pg_data_t *pgdat)
|
||||
{
|
||||
if (!static_branch_unlikely(&page_alloc_shuffle_key))
|
||||
return;
|
||||
__shuffle_free_memory(pgdat);
|
||||
}
|
||||
|
||||
extern void __shuffle_zone(struct zone *z);
|
||||
static inline void shuffle_zone(struct zone *z)
|
||||
{
|
||||
if (!static_branch_unlikely(&page_alloc_shuffle_key))
|
||||
return;
|
||||
__shuffle_zone(z);
|
||||
}
|
||||
#else
|
||||
static inline void shuffle_free_memory(pg_data_t *pgdat)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void shuffle_zone(struct zone *z)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void page_alloc_shuffle(enum mm_shuffle_ctl ctl)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
#endif /* _MM_SHUFFLE_H */
|
Loading…
x
Reference in New Issue
Block a user