mirror of
https://github.com/rd-stuffs/msm-4.14.git
synced 2025-02-20 11:45:48 +08:00
Merge branch 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: resource: Fix broken indentation resource: Fix generic page_is_ram() for partial RAM pages x86, paravirt: Remove kmap_atomic_pte paravirt op. x86, vmi: Disable highmem PTE allocation even when CONFIG_HIGHPTE=y x86, xen: Disable highmem PTE allocation even when CONFIG_HIGHPTE=y
This commit is contained in:
commit
2a32f2db13
@ -66,10 +66,6 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
|
|||||||
void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
|
void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
|
||||||
struct page *kmap_atomic_to_page(void *ptr);
|
struct page *kmap_atomic_to_page(void *ptr);
|
||||||
|
|
||||||
#ifndef CONFIG_PARAVIRT
|
|
||||||
#define kmap_atomic_pte(page, type) kmap_atomic(page, type)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define flush_cache_kmaps() do { } while (0)
|
#define flush_cache_kmaps() do { } while (0)
|
||||||
|
|
||||||
extern void add_highpages_with_active_regions(int nid, unsigned long start_pfn,
|
extern void add_highpages_with_active_regions(int nid, unsigned long start_pfn,
|
||||||
|
@ -435,15 +435,6 @@ static inline void paravirt_release_pud(unsigned long pfn)
|
|||||||
PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
|
PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_HIGHPTE
|
|
||||||
static inline void *kmap_atomic_pte(struct page *page, enum km_type type)
|
|
||||||
{
|
|
||||||
unsigned long ret;
|
|
||||||
ret = PVOP_CALL2(unsigned long, pv_mmu_ops.kmap_atomic_pte, page, type);
|
|
||||||
return (void *)ret;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static inline void pte_update(struct mm_struct *mm, unsigned long addr,
|
static inline void pte_update(struct mm_struct *mm, unsigned long addr,
|
||||||
pte_t *ptep)
|
pte_t *ptep)
|
||||||
{
|
{
|
||||||
|
@ -304,10 +304,6 @@ struct pv_mmu_ops {
|
|||||||
#endif /* PAGETABLE_LEVELS == 4 */
|
#endif /* PAGETABLE_LEVELS == 4 */
|
||||||
#endif /* PAGETABLE_LEVELS >= 3 */
|
#endif /* PAGETABLE_LEVELS >= 3 */
|
||||||
|
|
||||||
#ifdef CONFIG_HIGHPTE
|
|
||||||
void *(*kmap_atomic_pte)(struct page *page, enum km_type type);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
struct pv_lazy_ops lazy_mode;
|
struct pv_lazy_ops lazy_mode;
|
||||||
|
|
||||||
/* dom0 ops */
|
/* dom0 ops */
|
||||||
|
@ -54,10 +54,10 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
|
|||||||
in_irq() ? KM_IRQ_PTE : \
|
in_irq() ? KM_IRQ_PTE : \
|
||||||
KM_PTE0)
|
KM_PTE0)
|
||||||
#define pte_offset_map(dir, address) \
|
#define pte_offset_map(dir, address) \
|
||||||
((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), __KM_PTE) + \
|
((pte_t *)kmap_atomic(pmd_page(*(dir)), __KM_PTE) + \
|
||||||
pte_index((address)))
|
pte_index((address)))
|
||||||
#define pte_offset_map_nested(dir, address) \
|
#define pte_offset_map_nested(dir, address) \
|
||||||
((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE1) + \
|
((pte_t *)kmap_atomic(pmd_page(*(dir)), KM_PTE1) + \
|
||||||
pte_index((address)))
|
pte_index((address)))
|
||||||
#define pte_unmap(pte) kunmap_atomic((pte), __KM_PTE)
|
#define pte_unmap(pte) kunmap_atomic((pte), __KM_PTE)
|
||||||
#define pte_unmap_nested(pte) kunmap_atomic((pte), KM_PTE1)
|
#define pte_unmap_nested(pte) kunmap_atomic((pte), KM_PTE1)
|
||||||
|
@ -428,10 +428,6 @@ struct pv_mmu_ops pv_mmu_ops = {
|
|||||||
.ptep_modify_prot_start = __ptep_modify_prot_start,
|
.ptep_modify_prot_start = __ptep_modify_prot_start,
|
||||||
.ptep_modify_prot_commit = __ptep_modify_prot_commit,
|
.ptep_modify_prot_commit = __ptep_modify_prot_commit,
|
||||||
|
|
||||||
#ifdef CONFIG_HIGHPTE
|
|
||||||
.kmap_atomic_pte = kmap_atomic,
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if PAGETABLE_LEVELS >= 3
|
#if PAGETABLE_LEVELS >= 3
|
||||||
#ifdef CONFIG_X86_PAE
|
#ifdef CONFIG_X86_PAE
|
||||||
.set_pte_atomic = native_set_pte_atomic,
|
.set_pte_atomic = native_set_pte_atomic,
|
||||||
|
@ -33,6 +33,7 @@
|
|||||||
#include <asm/fixmap.h>
|
#include <asm/fixmap.h>
|
||||||
#include <asm/apicdef.h>
|
#include <asm/apicdef.h>
|
||||||
#include <asm/apic.h>
|
#include <asm/apic.h>
|
||||||
|
#include <asm/pgalloc.h>
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
#include <asm/timer.h>
|
#include <asm/timer.h>
|
||||||
#include <asm/vmi_time.h>
|
#include <asm/vmi_time.h>
|
||||||
@ -266,30 +267,6 @@ static void vmi_nop(void)
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_HIGHPTE
|
|
||||||
static void *vmi_kmap_atomic_pte(struct page *page, enum km_type type)
|
|
||||||
{
|
|
||||||
void *va = kmap_atomic(page, type);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Internally, the VMI ROM must map virtual addresses to physical
|
|
||||||
* addresses for processing MMU updates. By the time MMU updates
|
|
||||||
* are issued, this information is typically already lost.
|
|
||||||
* Fortunately, the VMI provides a cache of mapping slots for active
|
|
||||||
* page tables.
|
|
||||||
*
|
|
||||||
* We use slot zero for the linear mapping of physical memory, and
|
|
||||||
* in HIGHPTE kernels, slot 1 and 2 for KM_PTE0 and KM_PTE1.
|
|
||||||
*
|
|
||||||
* args: SLOT VA COUNT PFN
|
|
||||||
*/
|
|
||||||
BUG_ON(type != KM_PTE0 && type != KM_PTE1);
|
|
||||||
vmi_ops.set_linear_mapping((type - KM_PTE0)+1, va, 1, page_to_pfn(page));
|
|
||||||
|
|
||||||
return va;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static void vmi_allocate_pte(struct mm_struct *mm, unsigned long pfn)
|
static void vmi_allocate_pte(struct mm_struct *mm, unsigned long pfn)
|
||||||
{
|
{
|
||||||
vmi_ops.allocate_page(pfn, VMI_PAGE_L1, 0, 0, 0);
|
vmi_ops.allocate_page(pfn, VMI_PAGE_L1, 0, 0, 0);
|
||||||
@ -640,6 +617,12 @@ static inline int __init activate_vmi(void)
|
|||||||
u64 reloc;
|
u64 reloc;
|
||||||
const struct vmi_relocation_info *rel = (struct vmi_relocation_info *)&reloc;
|
const struct vmi_relocation_info *rel = (struct vmi_relocation_info *)&reloc;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Prevent page tables from being allocated in highmem, even if
|
||||||
|
* CONFIG_HIGHPTE is enabled.
|
||||||
|
*/
|
||||||
|
__userpte_alloc_gfp &= ~__GFP_HIGHMEM;
|
||||||
|
|
||||||
if (call_vrom_func(vmi_rom, vmi_init) != 0) {
|
if (call_vrom_func(vmi_rom, vmi_init) != 0) {
|
||||||
printk(KERN_ERR "VMI ROM failed to initialize!");
|
printk(KERN_ERR "VMI ROM failed to initialize!");
|
||||||
return 0;
|
return 0;
|
||||||
@ -778,10 +761,6 @@ static inline int __init activate_vmi(void)
|
|||||||
|
|
||||||
/* Set linear is needed in all cases */
|
/* Set linear is needed in all cases */
|
||||||
vmi_ops.set_linear_mapping = vmi_get_function(VMI_CALL_SetLinearMapping);
|
vmi_ops.set_linear_mapping = vmi_get_function(VMI_CALL_SetLinearMapping);
|
||||||
#ifdef CONFIG_HIGHPTE
|
|
||||||
if (vmi_ops.set_linear_mapping)
|
|
||||||
pv_mmu_ops.kmap_atomic_pte = vmi_kmap_atomic_pte;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* These MUST always be patched. Don't support indirect jumps
|
* These MUST always be patched. Don't support indirect jumps
|
||||||
|
@ -50,6 +50,7 @@
|
|||||||
#include <asm/traps.h>
|
#include <asm/traps.h>
|
||||||
#include <asm/setup.h>
|
#include <asm/setup.h>
|
||||||
#include <asm/desc.h>
|
#include <asm/desc.h>
|
||||||
|
#include <asm/pgalloc.h>
|
||||||
#include <asm/pgtable.h>
|
#include <asm/pgtable.h>
|
||||||
#include <asm/tlbflush.h>
|
#include <asm/tlbflush.h>
|
||||||
#include <asm/reboot.h>
|
#include <asm/reboot.h>
|
||||||
@ -1094,6 +1095,12 @@ asmlinkage void __init xen_start_kernel(void)
|
|||||||
|
|
||||||
__supported_pte_mask |= _PAGE_IOMAP;
|
__supported_pte_mask |= _PAGE_IOMAP;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Prevent page tables from being allocated in highmem, even
|
||||||
|
* if CONFIG_HIGHPTE is enabled.
|
||||||
|
*/
|
||||||
|
__userpte_alloc_gfp &= ~__GFP_HIGHMEM;
|
||||||
|
|
||||||
/* Work out if we support NX */
|
/* Work out if we support NX */
|
||||||
x86_configure_nx();
|
x86_configure_nx();
|
||||||
|
|
||||||
|
@ -1427,23 +1427,6 @@ static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_HIGHPTE
|
|
||||||
static void *xen_kmap_atomic_pte(struct page *page, enum km_type type)
|
|
||||||
{
|
|
||||||
pgprot_t prot = PAGE_KERNEL;
|
|
||||||
|
|
||||||
if (PagePinned(page))
|
|
||||||
prot = PAGE_KERNEL_RO;
|
|
||||||
|
|
||||||
if (0 && PageHighMem(page))
|
|
||||||
printk("mapping highpte %lx type %d prot %s\n",
|
|
||||||
page_to_pfn(page), type,
|
|
||||||
(unsigned long)pgprot_val(prot) & _PAGE_RW ? "WRITE" : "READ");
|
|
||||||
|
|
||||||
return kmap_atomic_prot(page, type, prot);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
|
static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
|
||||||
{
|
{
|
||||||
@ -1902,10 +1885,6 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
|
|||||||
.alloc_pmd_clone = paravirt_nop,
|
.alloc_pmd_clone = paravirt_nop,
|
||||||
.release_pmd = xen_release_pmd_init,
|
.release_pmd = xen_release_pmd_init,
|
||||||
|
|
||||||
#ifdef CONFIG_HIGHPTE
|
|
||||||
.kmap_atomic_pte = xen_kmap_atomic_pte,
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
.set_pte = xen_set_pte,
|
.set_pte = xen_set_pte,
|
||||||
#else
|
#else
|
||||||
|
@ -304,7 +304,7 @@ int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
|
|||||||
void *arg, int (*func)(unsigned long, unsigned long, void *))
|
void *arg, int (*func)(unsigned long, unsigned long, void *))
|
||||||
{
|
{
|
||||||
struct resource res;
|
struct resource res;
|
||||||
unsigned long pfn, len;
|
unsigned long pfn, end_pfn;
|
||||||
u64 orig_end;
|
u64 orig_end;
|
||||||
int ret = -1;
|
int ret = -1;
|
||||||
|
|
||||||
@ -314,9 +314,10 @@ int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
|
|||||||
orig_end = res.end;
|
orig_end = res.end;
|
||||||
while ((res.start < res.end) &&
|
while ((res.start < res.end) &&
|
||||||
(find_next_system_ram(&res, "System RAM") >= 0)) {
|
(find_next_system_ram(&res, "System RAM") >= 0)) {
|
||||||
pfn = (unsigned long)(res.start >> PAGE_SHIFT);
|
pfn = (res.start + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||||
len = (unsigned long)((res.end + 1 - res.start) >> PAGE_SHIFT);
|
end_pfn = (res.end + 1) >> PAGE_SHIFT;
|
||||||
ret = (*func)(pfn, len, arg);
|
if (end_pfn > pfn)
|
||||||
|
ret = (*func)(pfn, end_pfn - pfn, arg);
|
||||||
if (ret)
|
if (ret)
|
||||||
break;
|
break;
|
||||||
res.start = res.end + 1;
|
res.start = res.end + 1;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user