mirror of
https://github.com/rd-stuffs/msm-4.14.git
synced 2025-02-20 11:45:48 +08:00
thp: fix huge zero page logic for page with pfn == 0
Current implementation of huge zero page uses pfn value 0 to indicate that the page hasn't allocated yet. It assumes that buddy page allocator can't return page with pfn == 0. Let's rework the code to store 'struct page *' of huge zero page, not its pfn. This way we can avoid the weak assumption. [akpm@linux-foundation.org: fix sparse warning] Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Reported-by: Minchan Kim <minchan@kernel.org> Acked-by: Minchan Kim <minchan@kernel.org> Reviewed-by: Andrea Arcangeli <aarcange@redhat.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Wu Fengguang <fengguang.wu@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
fd0ccaf2bd
commit
5918d10a4b
@ -163,35 +163,34 @@ static int start_khugepaged(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static atomic_t huge_zero_refcount;
|
static atomic_t huge_zero_refcount;
|
||||||
static unsigned long huge_zero_pfn __read_mostly;
|
static struct page *huge_zero_page __read_mostly;
|
||||||
|
|
||||||
static inline bool is_huge_zero_pfn(unsigned long pfn)
|
static inline bool is_huge_zero_page(struct page *page)
|
||||||
{
|
{
|
||||||
unsigned long zero_pfn = ACCESS_ONCE(huge_zero_pfn);
|
return ACCESS_ONCE(huge_zero_page) == page;
|
||||||
return zero_pfn && pfn == zero_pfn;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool is_huge_zero_pmd(pmd_t pmd)
|
static inline bool is_huge_zero_pmd(pmd_t pmd)
|
||||||
{
|
{
|
||||||
return is_huge_zero_pfn(pmd_pfn(pmd));
|
return is_huge_zero_page(pmd_page(pmd));
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned long get_huge_zero_page(void)
|
static struct page *get_huge_zero_page(void)
|
||||||
{
|
{
|
||||||
struct page *zero_page;
|
struct page *zero_page;
|
||||||
retry:
|
retry:
|
||||||
if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
|
if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
|
||||||
return ACCESS_ONCE(huge_zero_pfn);
|
return ACCESS_ONCE(huge_zero_page);
|
||||||
|
|
||||||
zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
|
zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
|
||||||
HPAGE_PMD_ORDER);
|
HPAGE_PMD_ORDER);
|
||||||
if (!zero_page) {
|
if (!zero_page) {
|
||||||
count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
|
count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
|
||||||
return 0;
|
return NULL;
|
||||||
}
|
}
|
||||||
count_vm_event(THP_ZERO_PAGE_ALLOC);
|
count_vm_event(THP_ZERO_PAGE_ALLOC);
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
if (cmpxchg(&huge_zero_pfn, 0, page_to_pfn(zero_page))) {
|
if (cmpxchg(&huge_zero_page, NULL, zero_page)) {
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
__free_page(zero_page);
|
__free_page(zero_page);
|
||||||
goto retry;
|
goto retry;
|
||||||
@ -200,7 +199,7 @@ retry:
|
|||||||
/* We take additional reference here. It will be put back by shrinker */
|
/* We take additional reference here. It will be put back by shrinker */
|
||||||
atomic_set(&huge_zero_refcount, 2);
|
atomic_set(&huge_zero_refcount, 2);
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
return ACCESS_ONCE(huge_zero_pfn);
|
return ACCESS_ONCE(huge_zero_page);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void put_huge_zero_page(void)
|
static void put_huge_zero_page(void)
|
||||||
@ -220,9 +219,9 @@ static int shrink_huge_zero_page(struct shrinker *shrink,
|
|||||||
return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
|
return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
|
||||||
|
|
||||||
if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
|
if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
|
||||||
unsigned long zero_pfn = xchg(&huge_zero_pfn, 0);
|
struct page *zero_page = xchg(&huge_zero_page, NULL);
|
||||||
BUG_ON(zero_pfn == 0);
|
BUG_ON(zero_page == NULL);
|
||||||
__free_page(__pfn_to_page(zero_pfn));
|
__free_page(zero_page);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -764,12 +763,12 @@ static inline struct page *alloc_hugepage(int defrag)
|
|||||||
|
|
||||||
static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
|
static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
|
||||||
struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
|
struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
|
||||||
unsigned long zero_pfn)
|
struct page *zero_page)
|
||||||
{
|
{
|
||||||
pmd_t entry;
|
pmd_t entry;
|
||||||
if (!pmd_none(*pmd))
|
if (!pmd_none(*pmd))
|
||||||
return false;
|
return false;
|
||||||
entry = pfn_pmd(zero_pfn, vma->vm_page_prot);
|
entry = mk_pmd(zero_page, vma->vm_page_prot);
|
||||||
entry = pmd_wrprotect(entry);
|
entry = pmd_wrprotect(entry);
|
||||||
entry = pmd_mkhuge(entry);
|
entry = pmd_mkhuge(entry);
|
||||||
set_pmd_at(mm, haddr, pmd, entry);
|
set_pmd_at(mm, haddr, pmd, entry);
|
||||||
@ -794,20 +793,20 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||||||
if (!(flags & FAULT_FLAG_WRITE) &&
|
if (!(flags & FAULT_FLAG_WRITE) &&
|
||||||
transparent_hugepage_use_zero_page()) {
|
transparent_hugepage_use_zero_page()) {
|
||||||
pgtable_t pgtable;
|
pgtable_t pgtable;
|
||||||
unsigned long zero_pfn;
|
struct page *zero_page;
|
||||||
bool set;
|
bool set;
|
||||||
pgtable = pte_alloc_one(mm, haddr);
|
pgtable = pte_alloc_one(mm, haddr);
|
||||||
if (unlikely(!pgtable))
|
if (unlikely(!pgtable))
|
||||||
return VM_FAULT_OOM;
|
return VM_FAULT_OOM;
|
||||||
zero_pfn = get_huge_zero_page();
|
zero_page = get_huge_zero_page();
|
||||||
if (unlikely(!zero_pfn)) {
|
if (unlikely(!zero_page)) {
|
||||||
pte_free(mm, pgtable);
|
pte_free(mm, pgtable);
|
||||||
count_vm_event(THP_FAULT_FALLBACK);
|
count_vm_event(THP_FAULT_FALLBACK);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
spin_lock(&mm->page_table_lock);
|
spin_lock(&mm->page_table_lock);
|
||||||
set = set_huge_zero_page(pgtable, mm, vma, haddr, pmd,
|
set = set_huge_zero_page(pgtable, mm, vma, haddr, pmd,
|
||||||
zero_pfn);
|
zero_page);
|
||||||
spin_unlock(&mm->page_table_lock);
|
spin_unlock(&mm->page_table_lock);
|
||||||
if (!set) {
|
if (!set) {
|
||||||
pte_free(mm, pgtable);
|
pte_free(mm, pgtable);
|
||||||
@ -886,16 +885,16 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
|
|||||||
* a page table.
|
* a page table.
|
||||||
*/
|
*/
|
||||||
if (is_huge_zero_pmd(pmd)) {
|
if (is_huge_zero_pmd(pmd)) {
|
||||||
unsigned long zero_pfn;
|
struct page *zero_page;
|
||||||
bool set;
|
bool set;
|
||||||
/*
|
/*
|
||||||
* get_huge_zero_page() will never allocate a new page here,
|
* get_huge_zero_page() will never allocate a new page here,
|
||||||
* since we already have a zero page to copy. It just takes a
|
* since we already have a zero page to copy. It just takes a
|
||||||
* reference.
|
* reference.
|
||||||
*/
|
*/
|
||||||
zero_pfn = get_huge_zero_page();
|
zero_page = get_huge_zero_page();
|
||||||
set = set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd,
|
set = set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd,
|
||||||
zero_pfn);
|
zero_page);
|
||||||
BUG_ON(!set); /* unexpected !pmd_none(dst_pmd) */
|
BUG_ON(!set); /* unexpected !pmd_none(dst_pmd) */
|
||||||
ret = 0;
|
ret = 0;
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
@ -1812,7 +1811,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
|
|||||||
struct anon_vma *anon_vma;
|
struct anon_vma *anon_vma;
|
||||||
int ret = 1;
|
int ret = 1;
|
||||||
|
|
||||||
BUG_ON(is_huge_zero_pfn(page_to_pfn(page)));
|
BUG_ON(is_huge_zero_page(page));
|
||||||
BUG_ON(!PageAnon(page));
|
BUG_ON(!PageAnon(page));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
Loading…
x
Reference in New Issue
Block a user