109cfefb7SHuacai Chen /* SPDX-License-Identifier: GPL-2.0 */ 209cfefb7SHuacai Chen /* 309cfefb7SHuacai Chen * Copyright (C) 2020-2022 Loongson Technology Corporation Limited 409cfefb7SHuacai Chen * 509cfefb7SHuacai Chen * Derived from MIPS: 609cfefb7SHuacai Chen * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle 709cfefb7SHuacai Chen * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc. 809cfefb7SHuacai Chen */ 909cfefb7SHuacai Chen #ifndef _ASM_PGTABLE_H 1009cfefb7SHuacai Chen #define _ASM_PGTABLE_H 1109cfefb7SHuacai Chen 1209cfefb7SHuacai Chen #include <linux/compiler.h> 1309cfefb7SHuacai Chen #include <asm/addrspace.h> 1409cfefb7SHuacai Chen #include <asm/pgtable-bits.h> 1509cfefb7SHuacai Chen 1609cfefb7SHuacai Chen #if CONFIG_PGTABLE_LEVELS == 2 1709cfefb7SHuacai Chen #include <asm-generic/pgtable-nopmd.h> 1809cfefb7SHuacai Chen #elif CONFIG_PGTABLE_LEVELS == 3 1909cfefb7SHuacai Chen #include <asm-generic/pgtable-nopud.h> 2009cfefb7SHuacai Chen #else 2109cfefb7SHuacai Chen #include <asm-generic/pgtable-nop4d.h> 2209cfefb7SHuacai Chen #endif 2309cfefb7SHuacai Chen 2409cfefb7SHuacai Chen #if CONFIG_PGTABLE_LEVELS == 2 251721b412SMike Rapoport #define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3)) 2609cfefb7SHuacai Chen #elif CONFIG_PGTABLE_LEVELS == 3 271721b412SMike Rapoport #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3)) 2809cfefb7SHuacai Chen #define PMD_SIZE (1UL << PMD_SHIFT) 2909cfefb7SHuacai Chen #define PMD_MASK (~(PMD_SIZE-1)) 30b7c0f2d4SMike Rapoport #define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT - 3)) 3109cfefb7SHuacai Chen #elif CONFIG_PGTABLE_LEVELS == 4 321721b412SMike Rapoport #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3)) 3309cfefb7SHuacai Chen #define PMD_SIZE (1UL << PMD_SHIFT) 3409cfefb7SHuacai Chen #define PMD_MASK (~(PMD_SIZE-1)) 35b7c0f2d4SMike Rapoport #define PUD_SHIFT (PMD_SHIFT + (PAGE_SHIFT - 3)) 3609cfefb7SHuacai Chen #define PUD_SIZE (1UL << PUD_SHIFT) 3709cfefb7SHuacai Chen #define PUD_MASK (~(PUD_SIZE-1)) 38f05ecc68SMike Rapoport #define PGDIR_SHIFT (PUD_SHIFT + (PAGE_SHIFT - 3)) 3909cfefb7SHuacai Chen #endif 4009cfefb7SHuacai Chen 4109cfefb7SHuacai Chen #define PGDIR_SIZE (1UL << PGDIR_SHIFT) 4209cfefb7SHuacai Chen #define PGDIR_MASK (~(PGDIR_SIZE-1)) 4309cfefb7SHuacai Chen 44418d5dadSMike Rapoport #define VA_BITS (PGDIR_SHIFT + (PAGE_SHIFT - 3)) 4509cfefb7SHuacai Chen 46418d5dadSMike Rapoport #define PTRS_PER_PGD (PAGE_SIZE >> 3) 4709cfefb7SHuacai Chen #if CONFIG_PGTABLE_LEVELS > 3 48f05ecc68SMike Rapoport #define PTRS_PER_PUD (PAGE_SIZE >> 3) 4909cfefb7SHuacai Chen #endif 5009cfefb7SHuacai Chen #if CONFIG_PGTABLE_LEVELS > 2 51b7c0f2d4SMike Rapoport #define PTRS_PER_PMD (PAGE_SIZE >> 3) 5209cfefb7SHuacai Chen #endif 531721b412SMike Rapoport #define PTRS_PER_PTE (PAGE_SIZE >> 3) 5409cfefb7SHuacai Chen 5509cfefb7SHuacai Chen #define USER_PTRS_PER_PGD ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1) 5609cfefb7SHuacai Chen 5709cfefb7SHuacai Chen #ifndef __ASSEMBLY__ 5809cfefb7SHuacai Chen 5909cfefb7SHuacai Chen #include <linux/mm_types.h> 6009cfefb7SHuacai Chen #include <linux/mmzone.h> 6109cfefb7SHuacai Chen #include <asm/fixmap.h> 6209cfefb7SHuacai Chen 6309cfefb7SHuacai Chen struct mm_struct; 6409cfefb7SHuacai Chen struct vm_area_struct; 6509cfefb7SHuacai Chen 6609cfefb7SHuacai Chen /* 6709cfefb7SHuacai Chen * ZERO_PAGE is a global shared page that is always zero; used 6809cfefb7SHuacai Chen * for zero-mapped memory areas etc.. 6909cfefb7SHuacai Chen */ 7009cfefb7SHuacai Chen 7109cfefb7SHuacai Chen extern unsigned long empty_zero_page; 7209cfefb7SHuacai Chen extern unsigned long zero_page_mask; 7309cfefb7SHuacai Chen 7409cfefb7SHuacai Chen #define ZERO_PAGE(vaddr) \ 7509cfefb7SHuacai Chen (virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask)))) 7609cfefb7SHuacai Chen #define __HAVE_COLOR_ZERO_PAGE 7709cfefb7SHuacai Chen 7809cfefb7SHuacai Chen /* 7909cfefb7SHuacai Chen * TLB refill handlers may also map the vmalloc area into xkvrange. 8009cfefb7SHuacai Chen * Avoid the first couple of pages so NULL pointer dereferences will 8109cfefb7SHuacai Chen * still reliably trap. 8209cfefb7SHuacai Chen */ 8309cfefb7SHuacai Chen #define MODULES_VADDR (vm_map_base + PCI_IOSIZE + (2 * PAGE_SIZE)) 8409cfefb7SHuacai Chen #define MODULES_END (MODULES_VADDR + SZ_256M) 8509cfefb7SHuacai Chen 8609cfefb7SHuacai Chen #define VMALLOC_START MODULES_END 8709cfefb7SHuacai Chen #define VMALLOC_END \ 8809cfefb7SHuacai Chen (vm_map_base + \ 8909cfefb7SHuacai Chen min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits)) - PMD_SIZE) 9009cfefb7SHuacai Chen 9109cfefb7SHuacai Chen #define pte_ERROR(e) \ 9209cfefb7SHuacai Chen pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e)) 9309cfefb7SHuacai Chen #ifndef __PAGETABLE_PMD_FOLDED 9409cfefb7SHuacai Chen #define pmd_ERROR(e) \ 9509cfefb7SHuacai Chen pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e)) 9609cfefb7SHuacai Chen #endif 9709cfefb7SHuacai Chen #ifndef __PAGETABLE_PUD_FOLDED 9809cfefb7SHuacai Chen #define pud_ERROR(e) \ 9909cfefb7SHuacai Chen pr_err("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e)) 10009cfefb7SHuacai Chen #endif 10109cfefb7SHuacai Chen #define pgd_ERROR(e) \ 10209cfefb7SHuacai Chen pr_err("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e)) 10309cfefb7SHuacai Chen 10409cfefb7SHuacai Chen extern pte_t invalid_pte_table[PTRS_PER_PTE]; 10509cfefb7SHuacai Chen 10609cfefb7SHuacai Chen #ifndef __PAGETABLE_PUD_FOLDED 10709cfefb7SHuacai Chen 10809cfefb7SHuacai Chen typedef struct { unsigned long pud; } pud_t; 10909cfefb7SHuacai Chen #define pud_val(x) ((x).pud) 11009cfefb7SHuacai Chen #define __pud(x) ((pud_t) { (x) }) 11109cfefb7SHuacai Chen 11209cfefb7SHuacai Chen extern pud_t invalid_pud_table[PTRS_PER_PUD]; 11309cfefb7SHuacai Chen 11409cfefb7SHuacai Chen /* 11509cfefb7SHuacai Chen * Empty pgd/p4d entries point to the invalid_pud_table. 11609cfefb7SHuacai Chen */ 11709cfefb7SHuacai Chen static inline int p4d_none(p4d_t p4d) 11809cfefb7SHuacai Chen { 11909cfefb7SHuacai Chen return p4d_val(p4d) == (unsigned long)invalid_pud_table; 12009cfefb7SHuacai Chen } 12109cfefb7SHuacai Chen 12209cfefb7SHuacai Chen static inline int p4d_bad(p4d_t p4d) 12309cfefb7SHuacai Chen { 12409cfefb7SHuacai Chen return p4d_val(p4d) & ~PAGE_MASK; 12509cfefb7SHuacai Chen } 12609cfefb7SHuacai Chen 12709cfefb7SHuacai Chen static inline int p4d_present(p4d_t p4d) 12809cfefb7SHuacai Chen { 12909cfefb7SHuacai Chen return p4d_val(p4d) != (unsigned long)invalid_pud_table; 13009cfefb7SHuacai Chen } 13109cfefb7SHuacai Chen 13209cfefb7SHuacai Chen static inline void p4d_clear(p4d_t *p4dp) 13309cfefb7SHuacai Chen { 13409cfefb7SHuacai Chen p4d_val(*p4dp) = (unsigned long)invalid_pud_table; 13509cfefb7SHuacai Chen } 13609cfefb7SHuacai Chen 13709cfefb7SHuacai Chen static inline pud_t *p4d_pgtable(p4d_t p4d) 13809cfefb7SHuacai Chen { 13909cfefb7SHuacai Chen return (pud_t *)p4d_val(p4d); 14009cfefb7SHuacai Chen } 14109cfefb7SHuacai Chen 14209cfefb7SHuacai Chen static inline void set_p4d(p4d_t *p4d, p4d_t p4dval) 14309cfefb7SHuacai Chen { 14409cfefb7SHuacai Chen *p4d = p4dval; 14509cfefb7SHuacai Chen } 14609cfefb7SHuacai Chen 147*092e9ebeSHuacai Chen #define p4d_phys(p4d) PHYSADDR(p4d_val(p4d)) 14809cfefb7SHuacai Chen #define p4d_page(p4d) (pfn_to_page(p4d_phys(p4d) >> PAGE_SHIFT)) 14909cfefb7SHuacai Chen 15009cfefb7SHuacai Chen #endif 15109cfefb7SHuacai Chen 15209cfefb7SHuacai Chen #ifndef __PAGETABLE_PMD_FOLDED 15309cfefb7SHuacai Chen 15409cfefb7SHuacai Chen typedef struct { unsigned long pmd; } pmd_t; 15509cfefb7SHuacai Chen #define pmd_val(x) ((x).pmd) 15609cfefb7SHuacai Chen #define __pmd(x) ((pmd_t) { (x) }) 15709cfefb7SHuacai Chen 15809cfefb7SHuacai Chen extern pmd_t invalid_pmd_table[PTRS_PER_PMD]; 15909cfefb7SHuacai Chen 16009cfefb7SHuacai Chen /* 16109cfefb7SHuacai Chen * Empty pud entries point to the invalid_pmd_table. 16209cfefb7SHuacai Chen */ 16309cfefb7SHuacai Chen static inline int pud_none(pud_t pud) 16409cfefb7SHuacai Chen { 16509cfefb7SHuacai Chen return pud_val(pud) == (unsigned long)invalid_pmd_table; 16609cfefb7SHuacai Chen } 16709cfefb7SHuacai Chen 16809cfefb7SHuacai Chen static inline int pud_bad(pud_t pud) 16909cfefb7SHuacai Chen { 17009cfefb7SHuacai Chen return pud_val(pud) & ~PAGE_MASK; 17109cfefb7SHuacai Chen } 17209cfefb7SHuacai Chen 17309cfefb7SHuacai Chen static inline int pud_present(pud_t pud) 17409cfefb7SHuacai Chen { 17509cfefb7SHuacai Chen return pud_val(pud) != (unsigned long)invalid_pmd_table; 17609cfefb7SHuacai Chen } 17709cfefb7SHuacai Chen 17809cfefb7SHuacai Chen static inline void pud_clear(pud_t *pudp) 17909cfefb7SHuacai Chen { 18009cfefb7SHuacai Chen pud_val(*pudp) = ((unsigned long)invalid_pmd_table); 18109cfefb7SHuacai Chen } 18209cfefb7SHuacai Chen 18309cfefb7SHuacai Chen static inline pmd_t *pud_pgtable(pud_t pud) 18409cfefb7SHuacai Chen { 18509cfefb7SHuacai Chen return (pmd_t *)pud_val(pud); 18609cfefb7SHuacai Chen } 18709cfefb7SHuacai Chen 18809cfefb7SHuacai Chen #define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while (0) 18909cfefb7SHuacai Chen 190*092e9ebeSHuacai Chen #define pud_phys(pud) PHYSADDR(pud_val(pud)) 19109cfefb7SHuacai Chen #define pud_page(pud) (pfn_to_page(pud_phys(pud) >> PAGE_SHIFT)) 19209cfefb7SHuacai Chen 19309cfefb7SHuacai Chen #endif 19409cfefb7SHuacai Chen 19509cfefb7SHuacai Chen /* 19609cfefb7SHuacai Chen * Empty pmd entries point to the invalid_pte_table. 19709cfefb7SHuacai Chen */ 19809cfefb7SHuacai Chen static inline int pmd_none(pmd_t pmd) 19909cfefb7SHuacai Chen { 20009cfefb7SHuacai Chen return pmd_val(pmd) == (unsigned long)invalid_pte_table; 20109cfefb7SHuacai Chen } 20209cfefb7SHuacai Chen 20309cfefb7SHuacai Chen static inline int pmd_bad(pmd_t pmd) 20409cfefb7SHuacai Chen { 20509cfefb7SHuacai Chen return (pmd_val(pmd) & ~PAGE_MASK); 20609cfefb7SHuacai Chen } 20709cfefb7SHuacai Chen 20809cfefb7SHuacai Chen static inline int pmd_present(pmd_t pmd) 20909cfefb7SHuacai Chen { 21009cfefb7SHuacai Chen if (unlikely(pmd_val(pmd) & _PAGE_HUGE)) 21109cfefb7SHuacai Chen return !!(pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE)); 21209cfefb7SHuacai Chen 21309cfefb7SHuacai Chen return pmd_val(pmd) != (unsigned long)invalid_pte_table; 21409cfefb7SHuacai Chen } 21509cfefb7SHuacai Chen 21609cfefb7SHuacai Chen static inline void pmd_clear(pmd_t *pmdp) 21709cfefb7SHuacai Chen { 21809cfefb7SHuacai Chen pmd_val(*pmdp) = ((unsigned long)invalid_pte_table); 21909cfefb7SHuacai Chen } 22009cfefb7SHuacai Chen 22109cfefb7SHuacai Chen #define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while (0) 22209cfefb7SHuacai Chen 223*092e9ebeSHuacai Chen #define pmd_phys(pmd) PHYSADDR(pmd_val(pmd)) 22409cfefb7SHuacai Chen 22509cfefb7SHuacai Chen #ifndef CONFIG_TRANSPARENT_HUGEPAGE 22609cfefb7SHuacai Chen #define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT)) 22709cfefb7SHuacai Chen #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 22809cfefb7SHuacai Chen 22909cfefb7SHuacai Chen #define pmd_page_vaddr(pmd) pmd_val(pmd) 23009cfefb7SHuacai Chen 23109cfefb7SHuacai Chen extern pmd_t mk_pmd(struct page *page, pgprot_t prot); 23209cfefb7SHuacai Chen extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd); 23309cfefb7SHuacai Chen 23409cfefb7SHuacai Chen #define pte_page(x) pfn_to_page(pte_pfn(x)) 23509cfefb7SHuacai Chen #define pte_pfn(x) ((unsigned long)(((x).pte & _PFN_MASK) >> _PFN_SHIFT)) 23609cfefb7SHuacai Chen #define pfn_pte(pfn, prot) __pte(((pfn) << _PFN_SHIFT) | pgprot_val(prot)) 23709cfefb7SHuacai Chen #define pfn_pmd(pfn, prot) __pmd(((pfn) << _PFN_SHIFT) | pgprot_val(prot)) 23809cfefb7SHuacai Chen 23909cfefb7SHuacai Chen /* 24009cfefb7SHuacai Chen * Initialize a new pgd / pmd table with invalid pointers. 24109cfefb7SHuacai Chen */ 24209cfefb7SHuacai Chen extern void pgd_init(unsigned long page); 24309cfefb7SHuacai Chen extern void pud_init(unsigned long page, unsigned long pagetable); 24409cfefb7SHuacai Chen extern void pmd_init(unsigned long page, unsigned long pagetable); 24509cfefb7SHuacai Chen 24609cfefb7SHuacai Chen /* 24709cfefb7SHuacai Chen * Non-present pages: high 40 bits are offset, next 8 bits type, 24809cfefb7SHuacai Chen * low 16 bits zero. 24909cfefb7SHuacai Chen */ 25009cfefb7SHuacai Chen static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) 25109cfefb7SHuacai Chen { pte_t pte; pte_val(pte) = (type << 16) | (offset << 24); return pte; } 25209cfefb7SHuacai Chen 25309cfefb7SHuacai Chen #define __swp_type(x) (((x).val >> 16) & 0xff) 25409cfefb7SHuacai Chen #define __swp_offset(x) ((x).val >> 24) 25509cfefb7SHuacai Chen #define __swp_entry(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((type), (offset))) }) 25609cfefb7SHuacai Chen #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 25709cfefb7SHuacai Chen #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 25809cfefb7SHuacai Chen #define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) }) 25909cfefb7SHuacai Chen #define __swp_entry_to_pmd(x) ((pmd_t) { (x).val | _PAGE_HUGE }) 26009cfefb7SHuacai Chen 26109cfefb7SHuacai Chen extern void paging_init(void); 26209cfefb7SHuacai Chen 26309cfefb7SHuacai Chen #define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL)) 26409cfefb7SHuacai Chen #define pte_present(pte) (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROTNONE)) 26509cfefb7SHuacai Chen #define pte_no_exec(pte) (pte_val(pte) & _PAGE_NO_EXEC) 26609cfefb7SHuacai Chen 26709cfefb7SHuacai Chen static inline void set_pte(pte_t *ptep, pte_t pteval) 26809cfefb7SHuacai Chen { 26909cfefb7SHuacai Chen *ptep = pteval; 27009cfefb7SHuacai Chen if (pte_val(pteval) & _PAGE_GLOBAL) { 27109cfefb7SHuacai Chen pte_t *buddy = ptep_buddy(ptep); 27209cfefb7SHuacai Chen /* 27309cfefb7SHuacai Chen * Make sure the buddy is global too (if it's !none, 27409cfefb7SHuacai Chen * it better already be global) 27509cfefb7SHuacai Chen */ 27646859ac8SHuacai Chen #ifdef CONFIG_SMP 27746859ac8SHuacai Chen /* 27846859ac8SHuacai Chen * For SMP, multiple CPUs can race, so we need to do 27946859ac8SHuacai Chen * this atomically. 28046859ac8SHuacai Chen */ 28146859ac8SHuacai Chen unsigned long page_global = _PAGE_GLOBAL; 28246859ac8SHuacai Chen unsigned long tmp; 28346859ac8SHuacai Chen 28446859ac8SHuacai Chen __asm__ __volatile__ ( 28546859ac8SHuacai Chen "1:" __LL "%[tmp], %[buddy] \n" 28646859ac8SHuacai Chen " bnez %[tmp], 2f \n" 28746859ac8SHuacai Chen " or %[tmp], %[tmp], %[global] \n" 28846859ac8SHuacai Chen __SC "%[tmp], %[buddy] \n" 28946859ac8SHuacai Chen " beqz %[tmp], 1b \n" 29046859ac8SHuacai Chen " nop \n" 29146859ac8SHuacai Chen "2: \n" 29246859ac8SHuacai Chen __WEAK_LLSC_MB 29346859ac8SHuacai Chen : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp) 29446859ac8SHuacai Chen : [global] "r" (page_global)); 29546859ac8SHuacai Chen #else /* !CONFIG_SMP */ 29609cfefb7SHuacai Chen if (pte_none(*buddy)) 29709cfefb7SHuacai Chen pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL; 29846859ac8SHuacai Chen #endif /* CONFIG_SMP */ 29909cfefb7SHuacai Chen } 30009cfefb7SHuacai Chen } 30109cfefb7SHuacai Chen 30209cfefb7SHuacai Chen static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, 30309cfefb7SHuacai Chen pte_t *ptep, pte_t pteval) 30409cfefb7SHuacai Chen { 30509cfefb7SHuacai Chen set_pte(ptep, pteval); 30609cfefb7SHuacai Chen } 30709cfefb7SHuacai Chen 30809cfefb7SHuacai Chen static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 30909cfefb7SHuacai Chen { 31009cfefb7SHuacai Chen /* Preserve global status for the pair */ 31109cfefb7SHuacai Chen if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL) 31209cfefb7SHuacai Chen set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL)); 31309cfefb7SHuacai Chen else 31409cfefb7SHuacai Chen set_pte_at(mm, addr, ptep, __pte(0)); 31509cfefb7SHuacai Chen } 31609cfefb7SHuacai Chen 31709cfefb7SHuacai Chen #define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1) 31809cfefb7SHuacai Chen #define PMD_T_LOG2 (__builtin_ffs(sizeof(pmd_t)) - 1) 31909cfefb7SHuacai Chen #define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1) 32009cfefb7SHuacai Chen 32109cfefb7SHuacai Chen extern pgd_t swapper_pg_dir[]; 32209cfefb7SHuacai Chen extern pgd_t invalid_pg_dir[]; 32309cfefb7SHuacai Chen 32409cfefb7SHuacai Chen /* 32509cfefb7SHuacai Chen * The following only work if pte_present() is true. 32609cfefb7SHuacai Chen * Undefined behaviour if not.. 32709cfefb7SHuacai Chen */ 32809cfefb7SHuacai Chen static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; } 32909cfefb7SHuacai Chen static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } 33009cfefb7SHuacai Chen static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; } 33109cfefb7SHuacai Chen 33209cfefb7SHuacai Chen static inline pte_t pte_mkold(pte_t pte) 33309cfefb7SHuacai Chen { 33409cfefb7SHuacai Chen pte_val(pte) &= ~_PAGE_ACCESSED; 33509cfefb7SHuacai Chen return pte; 33609cfefb7SHuacai Chen } 33709cfefb7SHuacai Chen 33809cfefb7SHuacai Chen static inline pte_t pte_mkyoung(pte_t pte) 33909cfefb7SHuacai Chen { 34009cfefb7SHuacai Chen pte_val(pte) |= _PAGE_ACCESSED; 34109cfefb7SHuacai Chen return pte; 34209cfefb7SHuacai Chen } 34309cfefb7SHuacai Chen 34409cfefb7SHuacai Chen static inline pte_t pte_mkclean(pte_t pte) 34509cfefb7SHuacai Chen { 34609cfefb7SHuacai Chen pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_MODIFIED); 34709cfefb7SHuacai Chen return pte; 34809cfefb7SHuacai Chen } 34909cfefb7SHuacai Chen 35009cfefb7SHuacai Chen static inline pte_t pte_mkdirty(pte_t pte) 35109cfefb7SHuacai Chen { 35209cfefb7SHuacai Chen pte_val(pte) |= (_PAGE_DIRTY | _PAGE_MODIFIED); 35309cfefb7SHuacai Chen return pte; 35409cfefb7SHuacai Chen } 35509cfefb7SHuacai Chen 35609cfefb7SHuacai Chen static inline pte_t pte_mkwrite(pte_t pte) 35709cfefb7SHuacai Chen { 35809cfefb7SHuacai Chen pte_val(pte) |= (_PAGE_WRITE | _PAGE_DIRTY); 35909cfefb7SHuacai Chen return pte; 36009cfefb7SHuacai Chen } 36109cfefb7SHuacai Chen 36209cfefb7SHuacai Chen static inline pte_t pte_wrprotect(pte_t pte) 36309cfefb7SHuacai Chen { 36409cfefb7SHuacai Chen pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_DIRTY); 36509cfefb7SHuacai Chen return pte; 36609cfefb7SHuacai Chen } 36709cfefb7SHuacai Chen 36809cfefb7SHuacai Chen static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; } 36909cfefb7SHuacai Chen 37009cfefb7SHuacai Chen static inline pte_t pte_mkhuge(pte_t pte) 37109cfefb7SHuacai Chen { 37209cfefb7SHuacai Chen pte_val(pte) |= _PAGE_HUGE; 37309cfefb7SHuacai Chen return pte; 37409cfefb7SHuacai Chen } 37509cfefb7SHuacai Chen 37609cfefb7SHuacai Chen #if defined(CONFIG_ARCH_HAS_PTE_SPECIAL) 37709cfefb7SHuacai Chen static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } 37809cfefb7SHuacai Chen static inline pte_t pte_mkspecial(pte_t pte) { pte_val(pte) |= _PAGE_SPECIAL; return pte; } 37909cfefb7SHuacai Chen #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */ 38009cfefb7SHuacai Chen 38109cfefb7SHuacai Chen #define pte_accessible pte_accessible 38209cfefb7SHuacai Chen static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a) 38309cfefb7SHuacai Chen { 38409cfefb7SHuacai Chen if (pte_val(a) & _PAGE_PRESENT) 38509cfefb7SHuacai Chen return true; 38609cfefb7SHuacai Chen 38709cfefb7SHuacai Chen if ((pte_val(a) & _PAGE_PROTNONE) && 38809cfefb7SHuacai Chen atomic_read(&mm->tlb_flush_pending)) 38909cfefb7SHuacai Chen return true; 39009cfefb7SHuacai Chen 39109cfefb7SHuacai Chen return false; 39209cfefb7SHuacai Chen } 39309cfefb7SHuacai Chen 39409cfefb7SHuacai Chen /* 39509cfefb7SHuacai Chen * Conversion functions: convert a page and protection to a page entry, 39609cfefb7SHuacai Chen * and a page entry and page directory to the page they refer to. 39709cfefb7SHuacai Chen */ 39809cfefb7SHuacai Chen #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) 39909cfefb7SHuacai Chen 40009cfefb7SHuacai Chen static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 40109cfefb7SHuacai Chen { 40209cfefb7SHuacai Chen return __pte((pte_val(pte) & _PAGE_CHG_MASK) | 40309cfefb7SHuacai Chen (pgprot_val(newprot) & ~_PAGE_CHG_MASK)); 40409cfefb7SHuacai Chen } 40509cfefb7SHuacai Chen 40609cfefb7SHuacai Chen extern void __update_tlb(struct vm_area_struct *vma, 40709cfefb7SHuacai Chen unsigned long address, pte_t *ptep); 40809cfefb7SHuacai Chen 40909cfefb7SHuacai Chen static inline void update_mmu_cache(struct vm_area_struct *vma, 41009cfefb7SHuacai Chen unsigned long address, pte_t *ptep) 41109cfefb7SHuacai Chen { 41209cfefb7SHuacai Chen __update_tlb(vma, address, ptep); 41309cfefb7SHuacai Chen } 41409cfefb7SHuacai Chen 41509cfefb7SHuacai Chen static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, 41609cfefb7SHuacai Chen unsigned long address, pmd_t *pmdp) 41709cfefb7SHuacai Chen { 41809cfefb7SHuacai Chen __update_tlb(vma, address, (pte_t *)pmdp); 41909cfefb7SHuacai Chen } 42009cfefb7SHuacai Chen 42109cfefb7SHuacai Chen #define kern_addr_valid(addr) (1) 42209cfefb7SHuacai Chen 423501dcbe4SHuacai Chen static inline unsigned long pmd_pfn(pmd_t pmd) 424501dcbe4SHuacai Chen { 425501dcbe4SHuacai Chen return (pmd_val(pmd) & _PFN_MASK) >> _PFN_SHIFT; 426501dcbe4SHuacai Chen } 427501dcbe4SHuacai Chen 42809cfefb7SHuacai Chen #ifdef CONFIG_TRANSPARENT_HUGEPAGE 42909cfefb7SHuacai Chen 43009cfefb7SHuacai Chen /* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/ 43109cfefb7SHuacai Chen #define pmdp_establish generic_pmdp_establish 43209cfefb7SHuacai Chen 43309cfefb7SHuacai Chen static inline int pmd_trans_huge(pmd_t pmd) 43409cfefb7SHuacai Chen { 43509cfefb7SHuacai Chen return !!(pmd_val(pmd) & _PAGE_HUGE) && pmd_present(pmd); 43609cfefb7SHuacai Chen } 43709cfefb7SHuacai Chen 43809cfefb7SHuacai Chen static inline pmd_t pmd_mkhuge(pmd_t pmd) 43909cfefb7SHuacai Chen { 44009cfefb7SHuacai Chen pmd_val(pmd) = (pmd_val(pmd) & ~(_PAGE_GLOBAL)) | 44109cfefb7SHuacai Chen ((pmd_val(pmd) & _PAGE_GLOBAL) << (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)); 44209cfefb7SHuacai Chen pmd_val(pmd) |= _PAGE_HUGE; 44309cfefb7SHuacai Chen 44409cfefb7SHuacai Chen return pmd; 44509cfefb7SHuacai Chen } 44609cfefb7SHuacai Chen 44709cfefb7SHuacai Chen #define pmd_write pmd_write 44809cfefb7SHuacai Chen static inline int pmd_write(pmd_t pmd) 44909cfefb7SHuacai Chen { 45009cfefb7SHuacai Chen return !!(pmd_val(pmd) & _PAGE_WRITE); 45109cfefb7SHuacai Chen } 45209cfefb7SHuacai Chen 45309cfefb7SHuacai Chen static inline pmd_t pmd_mkwrite(pmd_t pmd) 45409cfefb7SHuacai Chen { 45509cfefb7SHuacai Chen pmd_val(pmd) |= (_PAGE_WRITE | _PAGE_DIRTY); 45609cfefb7SHuacai Chen return pmd; 45709cfefb7SHuacai Chen } 45809cfefb7SHuacai Chen 45909cfefb7SHuacai Chen static inline pmd_t pmd_wrprotect(pmd_t pmd) 46009cfefb7SHuacai Chen { 46109cfefb7SHuacai Chen pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_DIRTY); 46209cfefb7SHuacai Chen return pmd; 46309cfefb7SHuacai Chen } 46409cfefb7SHuacai Chen 46509cfefb7SHuacai Chen static inline int pmd_dirty(pmd_t pmd) 46609cfefb7SHuacai Chen { 46709cfefb7SHuacai Chen return !!(pmd_val(pmd) & _PAGE_MODIFIED); 46809cfefb7SHuacai Chen } 46909cfefb7SHuacai Chen 47009cfefb7SHuacai Chen static inline pmd_t pmd_mkclean(pmd_t pmd) 47109cfefb7SHuacai Chen { 47209cfefb7SHuacai Chen pmd_val(pmd) &= ~(_PAGE_DIRTY | _PAGE_MODIFIED); 47309cfefb7SHuacai Chen return pmd; 47409cfefb7SHuacai Chen } 47509cfefb7SHuacai Chen 47609cfefb7SHuacai Chen static inline pmd_t pmd_mkdirty(pmd_t pmd) 47709cfefb7SHuacai Chen { 47809cfefb7SHuacai Chen pmd_val(pmd) |= (_PAGE_DIRTY | _PAGE_MODIFIED); 47909cfefb7SHuacai Chen return pmd; 48009cfefb7SHuacai Chen } 48109cfefb7SHuacai Chen 48209cfefb7SHuacai Chen static inline int pmd_young(pmd_t pmd) 48309cfefb7SHuacai Chen { 48409cfefb7SHuacai Chen return !!(pmd_val(pmd) & _PAGE_ACCESSED); 48509cfefb7SHuacai Chen } 48609cfefb7SHuacai Chen 48709cfefb7SHuacai Chen static inline pmd_t pmd_mkold(pmd_t pmd) 48809cfefb7SHuacai Chen { 48909cfefb7SHuacai Chen pmd_val(pmd) &= ~_PAGE_ACCESSED; 49009cfefb7SHuacai Chen return pmd; 49109cfefb7SHuacai Chen } 49209cfefb7SHuacai Chen 49309cfefb7SHuacai Chen static inline pmd_t pmd_mkyoung(pmd_t pmd) 49409cfefb7SHuacai Chen { 49509cfefb7SHuacai Chen pmd_val(pmd) |= _PAGE_ACCESSED; 49609cfefb7SHuacai Chen return pmd; 49709cfefb7SHuacai Chen } 49809cfefb7SHuacai Chen 49909cfefb7SHuacai Chen static inline struct page *pmd_page(pmd_t pmd) 50009cfefb7SHuacai Chen { 50109cfefb7SHuacai Chen if (pmd_trans_huge(pmd)) 50209cfefb7SHuacai Chen return pfn_to_page(pmd_pfn(pmd)); 50309cfefb7SHuacai Chen 50409cfefb7SHuacai Chen return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT); 50509cfefb7SHuacai Chen } 50609cfefb7SHuacai Chen 50709cfefb7SHuacai Chen static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 50809cfefb7SHuacai Chen { 50909cfefb7SHuacai Chen pmd_val(pmd) = (pmd_val(pmd) & _HPAGE_CHG_MASK) | 51009cfefb7SHuacai Chen (pgprot_val(newprot) & ~_HPAGE_CHG_MASK); 51109cfefb7SHuacai Chen return pmd; 51209cfefb7SHuacai Chen } 51309cfefb7SHuacai Chen 51409cfefb7SHuacai Chen static inline pmd_t pmd_mkinvalid(pmd_t pmd) 51509cfefb7SHuacai Chen { 51609cfefb7SHuacai Chen pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY | _PAGE_PROTNONE); 51709cfefb7SHuacai Chen 51809cfefb7SHuacai Chen return pmd; 51909cfefb7SHuacai Chen } 52009cfefb7SHuacai Chen 52109cfefb7SHuacai Chen /* 52209cfefb7SHuacai Chen * The generic version pmdp_huge_get_and_clear uses a version of pmd_clear() with a 52309cfefb7SHuacai Chen * different prototype. 52409cfefb7SHuacai Chen */ 52509cfefb7SHuacai Chen #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR 52609cfefb7SHuacai Chen static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, 52709cfefb7SHuacai Chen unsigned long address, pmd_t *pmdp) 52809cfefb7SHuacai Chen { 52909cfefb7SHuacai Chen pmd_t old = *pmdp; 53009cfefb7SHuacai Chen 53109cfefb7SHuacai Chen pmd_clear(pmdp); 53209cfefb7SHuacai Chen 53309cfefb7SHuacai Chen return old; 53409cfefb7SHuacai Chen } 53509cfefb7SHuacai Chen 53609cfefb7SHuacai Chen #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 53709cfefb7SHuacai Chen 538d4b6f156SHuacai Chen #ifdef CONFIG_NUMA_BALANCING 539d4b6f156SHuacai Chen static inline long pte_protnone(pte_t pte) 540d4b6f156SHuacai Chen { 541d4b6f156SHuacai Chen return (pte_val(pte) & _PAGE_PROTNONE); 542d4b6f156SHuacai Chen } 543d4b6f156SHuacai Chen 544d4b6f156SHuacai Chen static inline long pmd_protnone(pmd_t pmd) 545d4b6f156SHuacai Chen { 546d4b6f156SHuacai Chen return (pmd_val(pmd) & _PAGE_PROTNONE); 547d4b6f156SHuacai Chen } 548d4b6f156SHuacai Chen #endif /* CONFIG_NUMA_BALANCING */ 549d4b6f156SHuacai Chen 55009cfefb7SHuacai Chen /* 55109cfefb7SHuacai Chen * We provide our own get_unmapped area to cope with the virtual aliasing 55209cfefb7SHuacai Chen * constraints placed on us by the cache architecture. 55309cfefb7SHuacai Chen */ 55409cfefb7SHuacai Chen #define HAVE_ARCH_UNMAPPED_AREA 55509cfefb7SHuacai Chen #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 55609cfefb7SHuacai Chen 55709cfefb7SHuacai Chen #endif /* !__ASSEMBLY__ */ 55809cfefb7SHuacai Chen 55909cfefb7SHuacai Chen #endif /* _ASM_PGTABLE_H */ 560