Revert "mm: move buddy list manipulations into helpers"

This reverts commit be1968b3980cb973d3034605735ab12e1fa4672a.

Change-Id: I203cdaca4f6fa23584a7b4c1ea15c73ff2137227
Signed-off-by: Richard Raya <rdxzv.dev@gmail.com>
This commit is contained in:
Richard Raya 2024-09-29 18:32:48 -03:00
parent 555c41b976
commit b6eaf5c762
5 changed files with 48 additions and 73 deletions

View File

@ -451,6 +451,9 @@ struct vm_operations_struct {
struct mmu_gather;
struct inode;
#define page_private(page) ((page)->private)
#define set_page_private(page, v) ((page)->private = (v))
#if !defined(__HAVE_ARCH_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE)
static inline int pmd_devmap(pmd_t pmd)
{

View File

@ -226,9 +226,6 @@ struct page {
#define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK)
#define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE)
#define page_private(page) ((page)->private)
#define set_page_private(page, v) ((page)->private = (v))
struct page_frag_cache {
void * va;
#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)

View File

@ -18,8 +18,6 @@
#include <linux/pageblock-flags.h>
#include <linux/page-flags-layout.h>
#include <linux/atomic.h>
#include <linux/mm_types.h>
#include <linux/page-flags.h>
#include <asm/page.h>
/* Free memory management - zoned buddy allocator. */
@ -106,50 +104,6 @@ struct free_area {
unsigned long nr_free;
};
/* Used for pages not on another list */
static inline void add_to_free_area(struct page *page, struct free_area *area,
int migratetype)
{
list_add(&page->lru, &area->free_list[migratetype]);
area->nr_free++;
}
/* Used for pages not on another list */
static inline void add_to_free_area_tail(struct page *page, struct free_area *area,
int migratetype)
{
list_add_tail(&page->lru, &area->free_list[migratetype]);
area->nr_free++;
}
/* Used for pages which are on another list */
static inline void move_to_free_area(struct page *page, struct free_area *area,
int migratetype)
{
list_move(&page->lru, &area->free_list[migratetype]);
}
static inline struct page *get_page_from_free_area(struct free_area *area,
int migratetype)
{
return list_first_entry_or_null(&area->free_list[migratetype],
struct page, lru);
}
static inline void del_page_from_free_area(struct page *page,
struct free_area *area)
{
list_del(&page->lru);
__ClearPageBuddy(page);
set_page_private(page, 0);
area->nr_free--;
}
static inline bool free_area_empty(struct free_area *area, int migratetype)
{
return list_empty(&area->free_list[migratetype]);
}
struct pglist_data;
/*

View File

@ -1368,13 +1368,13 @@ static enum compact_result __compact_finished(struct zone *zone,
bool can_steal;
/* Job done if page is free of the right migratetype */
if (!free_area_empty(area, migratetype))
if (!list_empty(&area->free_list[migratetype]))
return COMPACT_SUCCESS;
#ifdef CONFIG_CMA
/* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */
if (migratetype == MIGRATE_MOVABLE &&
!free_area_empty(area, MIGRATE_CMA))
!list_empty(&area->free_list[MIGRATE_CMA]))
return COMPACT_SUCCESS;
#endif
/*

View File

@ -804,6 +804,12 @@ static inline void set_page_order(struct page *page, unsigned int order)
__SetPageBuddy(page);
}
static inline void rmv_page_order(struct page *page)
{
__ClearPageBuddy(page);
set_page_private(page, 0);
}
/*
* This function checks whether a page is free && is the buddy
* we can do coalesce a page and its buddy if
@ -900,10 +906,13 @@ continue_merging:
* Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
* merge with it and move up one order.
*/
if (page_is_guard(buddy))
if (page_is_guard(buddy)) {
clear_page_guard(zone, buddy, order, migratetype);
else
del_page_from_free_area(buddy, &zone->free_area[order]);
} else {
list_del(&buddy->lru);
zone->free_area[order].nr_free--;
rmv_page_order(buddy);
}
combined_pfn = buddy_pfn & pfn;
page = page + (combined_pfn - pfn);
pfn = combined_pfn;
@ -953,13 +962,15 @@ done_merging:
higher_buddy = higher_page + (buddy_pfn - combined_pfn);
if (pfn_valid_within(buddy_pfn) &&
page_is_buddy(higher_page, higher_buddy, order + 1)) {
add_to_free_area_tail(page, &zone->free_area[order],
migratetype);
return;
list_add_tail(&page->lru,
&zone->free_area[order].free_list[migratetype]);
goto out;
}
}
add_to_free_area(page, &zone->free_area[order], migratetype);
list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
out:
zone->free_area[order].nr_free++;
}
/*
@ -1745,7 +1756,8 @@ static inline void expand(struct zone *zone, struct page *page,
if (set_page_guard(zone, &page[size], high, migratetype))
continue;
add_to_free_area(&page[size], area, migratetype);
list_add(&page[size].lru, &area->free_list[migratetype]);
area->nr_free++;
set_page_order(&page[size], high);
}
}
@ -1883,10 +1895,13 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
/* Find a page of the appropriate size in the preferred list */
for (current_order = order; current_order < MAX_ORDER; ++current_order) {
area = &(zone->free_area[current_order]);
page = get_page_from_free_area(area, migratetype);
page = list_first_entry_or_null(&area->free_list[migratetype],
struct page, lru);
if (!page)
continue;
del_page_from_free_area(page, area);
list_del(&page->lru);
rmv_page_order(page);
area->nr_free--;
expand(zone, page, order, current_order, area, migratetype);
set_pcppage_migratetype(page, migratetype);
return page;
@ -1974,7 +1989,8 @@ static int move_freepages(struct zone *zone,
}
order = page_order(page);
move_to_free_area(page, &zone->free_area[order], migratetype);
list_move(&page->lru,
&zone->free_area[order].free_list[migratetype]);
page += 1 << order;
pages_moved += 1 << order;
}
@ -2123,7 +2139,7 @@ static void steal_suitable_fallback(struct zone *zone, struct page *page,
single_page:
area = &zone->free_area[current_order];
move_to_free_area(page, area, start_type);
list_move(&page->lru, &area->free_list[start_type]);
}
/*
@ -2147,7 +2163,7 @@ int find_suitable_fallback(struct free_area *area, unsigned int order,
if (fallback_mt == MIGRATE_TYPES)
break;
if (free_area_empty(area, fallback_mt))
if (list_empty(&area->free_list[fallback_mt]))
continue;
if (can_steal_fallback(order, migratetype))
@ -2234,7 +2250,9 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
for (order = 0; order < MAX_ORDER; order++) {
struct free_area *area = &(zone->free_area[order]);
page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC);
page = list_first_entry_or_null(
&area->free_list[MIGRATE_HIGHATOMIC],
struct page, lru);
if (!page)
continue;
@ -2357,7 +2375,8 @@ find_smallest:
VM_BUG_ON(current_order == MAX_ORDER);
do_steal:
page = get_page_from_free_area(area, fallback_mt);
page = list_first_entry(&area->free_list[fallback_mt],
struct page, lru);
steal_suitable_fallback(zone, page, start_migratetype, can_steal);
@ -2817,7 +2836,6 @@ EXPORT_SYMBOL_GPL(split_page);
int __isolate_free_page(struct page *page, unsigned int order)
{
struct free_area *area = &page_zone(page)->free_area[order];
unsigned long watermark;
struct zone *zone;
int mt;
@ -2842,8 +2860,9 @@ int __isolate_free_page(struct page *page, unsigned int order)
}
/* Remove page from free list */
del_page_from_free_area(page, area);
list_del(&page->lru);
zone->free_area[order].nr_free--;
rmv_page_order(page);
/*
* Set the pageblock if the isolated page is at least half of a
@ -3167,13 +3186,13 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
if (mt == MIGRATE_CMA)
continue;
#endif
if (!free_area_empty(area, mt))
if (!list_empty(&area->free_list[mt]))
return true;
}
#ifdef CONFIG_CMA
if ((alloc_flags & ALLOC_CMA) &&
!free_area_empty(area, MIGRATE_CMA)) {
!list_empty(&area->free_list[MIGRATE_CMA])) {
return true;
}
#endif
@ -5247,7 +5266,7 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
types[order] = 0;
for (type = 0; type < MIGRATE_TYPES; type++) {
if (!free_area_empty(area, type))
if (!list_empty(&area->free_list[type]))
types[order] |= 1 << type;
}
}
@ -8218,7 +8237,9 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
pr_info("remove from free list %lx %d %lx\n",
pfn, 1 << order, end_pfn);
#endif
del_page_from_free_area(page, &zone->free_area[order]);
list_del(&page->lru);
rmv_page_order(page);
zone->free_area[order].nr_free--;
for (i = 0; i < (1 << order); i++)
SetPageReserved((page+i));
post_alloc_hook(page, order, GFP_NOWAIT);