mirror of
https://github.com/rd-stuffs/msm-4.14.git
synced 2025-02-20 11:45:48 +08:00
x86/mm: Sync also unmappings in vmalloc_sync_all()
commit 8e998fc24de47c55b47a887f6c95ab91acd4a720 upstream. With huge-page ioremap areas the unmappings also need to be synced between all page-tables. Otherwise it can cause data corruption when a region is unmapped and later re-used. Make the vmalloc_sync_one() function ready to sync unmappings and make sure vmalloc_sync_all() iterates over all page-tables even when an unmapped PMD is found. Fixes: 5d72b4fba40ef ('x86, mm: support huge I/O mapping capability I/F') Signed-off-by: Joerg Roedel <jroedel@suse.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Dave Hansen <dave.hansen@linux.intel.com> Link: https://lkml.kernel.org/r/20190719184652.11391-3-joro@8bytes.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
7e2e08356e
commit
efa1f5680b
@ -260,11 +260,12 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
|
||||
|
||||
pmd = pmd_offset(pud, address);
|
||||
pmd_k = pmd_offset(pud_k, address);
|
||||
|
||||
if (pmd_present(*pmd) != pmd_present(*pmd_k))
|
||||
set_pmd(pmd, *pmd_k);
|
||||
|
||||
if (!pmd_present(*pmd_k))
|
||||
return NULL;
|
||||
|
||||
if (!pmd_present(*pmd))
|
||||
set_pmd(pmd, *pmd_k);
|
||||
else
|
||||
BUG_ON(pmd_pfn(*pmd) != pmd_pfn(*pmd_k));
|
||||
|
||||
@ -286,17 +287,13 @@ void vmalloc_sync_all(void)
|
||||
spin_lock(&pgd_lock);
|
||||
list_for_each_entry(page, &pgd_list, lru) {
|
||||
spinlock_t *pgt_lock;
|
||||
pmd_t *ret;
|
||||
|
||||
/* the pgt_lock only for Xen */
|
||||
pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
|
||||
|
||||
spin_lock(pgt_lock);
|
||||
ret = vmalloc_sync_one(page_address(page), address);
|
||||
vmalloc_sync_one(page_address(page), address);
|
||||
spin_unlock(pgt_lock);
|
||||
|
||||
if (!ret)
|
||||
break;
|
||||
}
|
||||
spin_unlock(&pgd_lock);
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user