1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */ 217ed9e31SAneesh Kumar K.V #ifndef _ASM_POWERPC_NOHASH_PGTABLE_H 317ed9e31SAneesh Kumar K.V #define _ASM_POWERPC_NOHASH_PGTABLE_H 417ed9e31SAneesh Kumar K.V 517ed9e31SAneesh Kumar K.V #if defined(CONFIG_PPC64) 617ed9e31SAneesh Kumar K.V #include <asm/nohash/64/pgtable.h> 717ed9e31SAneesh Kumar K.V #else 817ed9e31SAneesh Kumar K.V #include <asm/nohash/32/pgtable.h> 917ed9e31SAneesh Kumar K.V #endif 1017ed9e31SAneesh Kumar K.V 116c5d2d3fSChristophe Leroy /* Permission masks used for kernel mappings */ 126c5d2d3fSChristophe Leroy #define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW) 136c5d2d3fSChristophe Leroy #define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE) 14*6cc07821SChristophe Leroy #define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE | _PAGE_GUARDED) 156c5d2d3fSChristophe Leroy #define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX) 166c5d2d3fSChristophe Leroy #define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO) 176c5d2d3fSChristophe Leroy #define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX) 186c5d2d3fSChristophe Leroy 1917ed9e31SAneesh Kumar K.V #ifndef __ASSEMBLY__ 2017ed9e31SAneesh Kumar K.V 2117ed9e31SAneesh Kumar K.V /* Generic accessors to PTE bits */ 22a0da4bc1SChristophe Leroy #ifndef pte_write 2317ed9e31SAneesh Kumar K.V static inline int pte_write(pte_t pte) 2417ed9e31SAneesh Kumar K.V { 25a0da4bc1SChristophe Leroy return pte_val(pte) & _PAGE_RW; 2617ed9e31SAneesh Kumar K.V } 27a0da4bc1SChristophe Leroy #endif 28ca8afd40SChristophe Leroy static inline int pte_read(pte_t pte) { return 1; } 2917ed9e31SAneesh Kumar K.V static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } 3017ed9e31SAneesh Kumar K.V static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } 3117ed9e31SAneesh Kumar K.V static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; } 32daba7902SChristophe Leroy static inline bool pte_hashpte(pte_t pte) { return false; } 33daba7902SChristophe Leroy static inline bool pte_ci(pte_t pte) { return pte_val(pte) & _PAGE_NO_CACHE; } 34daba7902SChristophe Leroy static inline bool pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; } 3517ed9e31SAneesh Kumar K.V 3617ed9e31SAneesh Kumar K.V #ifdef CONFIG_NUMA_BALANCING 3717ed9e31SAneesh Kumar K.V /* 3817ed9e31SAneesh Kumar K.V * These work without NUMA balancing but the kernel does not care. See the 39ca5999fdSMike Rapoport * comment in include/linux/pgtable.h . On powerpc, this will only 4017ed9e31SAneesh Kumar K.V * work for user pages and always return true for kernel pages. 4117ed9e31SAneesh Kumar K.V */ 4217ed9e31SAneesh Kumar K.V static inline int pte_protnone(pte_t pte) 4317ed9e31SAneesh Kumar K.V { 4426973fa5SChristophe Leroy return pte_present(pte) && !pte_user(pte); 4517ed9e31SAneesh Kumar K.V } 4617ed9e31SAneesh Kumar K.V 4717ed9e31SAneesh Kumar K.V static inline int pmd_protnone(pmd_t pmd) 4817ed9e31SAneesh Kumar K.V { 4917ed9e31SAneesh Kumar K.V return pte_protnone(pmd_pte(pmd)); 5017ed9e31SAneesh Kumar K.V } 5117ed9e31SAneesh Kumar K.V #endif /* CONFIG_NUMA_BALANCING */ 5217ed9e31SAneesh Kumar K.V 5317ed9e31SAneesh Kumar K.V static inline int pte_present(pte_t pte) 5417ed9e31SAneesh Kumar K.V { 5517ed9e31SAneesh Kumar K.V return pte_val(pte) & _PAGE_PRESENT; 5617ed9e31SAneesh Kumar K.V } 5717ed9e31SAneesh Kumar K.V 58daba7902SChristophe Leroy static inline bool pte_hw_valid(pte_t pte) 59daba7902SChristophe Leroy { 60daba7902SChristophe Leroy return pte_val(pte) & _PAGE_PRESENT; 61daba7902SChristophe Leroy } 62daba7902SChristophe Leroy 635769beafSAneesh Kumar K.V /* 64e0f57031SChristophe Leroy * Don't just check for any non zero bits in __PAGE_USER, since for book3e 65e0f57031SChristophe Leroy * and PTE_64BIT, PAGE_KERNEL_X contains _PAGE_BAP_SR which is also in 66e0f57031SChristophe Leroy * _PAGE_USER. Need to explicitly match _PAGE_BAP_UR bit in that case too. 67e0f57031SChristophe Leroy */ 68a0da4bc1SChristophe Leroy #ifndef pte_user 69e0f57031SChristophe Leroy static inline bool pte_user(pte_t pte) 70e0f57031SChristophe Leroy { 71a0da4bc1SChristophe Leroy return (pte_val(pte) & _PAGE_USER) == _PAGE_USER; 72e0f57031SChristophe Leroy } 73a0da4bc1SChristophe Leroy #endif 74e0f57031SChristophe Leroy 75e0f57031SChristophe Leroy /* 765769beafSAneesh Kumar K.V * We only find page table entry in the last level 775769beafSAneesh Kumar K.V * Hence no need for other accessors 785769beafSAneesh Kumar K.V */ 795769beafSAneesh Kumar K.V #define pte_access_permitted pte_access_permitted 805769beafSAneesh Kumar K.V static inline bool pte_access_permitted(pte_t pte, bool write) 815769beafSAneesh Kumar K.V { 825769beafSAneesh Kumar K.V /* 835769beafSAneesh Kumar K.V * A read-only access is controlled by _PAGE_USER bit. 845769beafSAneesh Kumar K.V * We have _PAGE_READ set for WRITE and EXECUTE 855769beafSAneesh Kumar K.V */ 86810e9f86SChristophe Leroy if (!pte_present(pte) || !pte_user(pte) || !pte_read(pte)) 87810e9f86SChristophe Leroy return false; 885769beafSAneesh Kumar K.V 89810e9f86SChristophe Leroy if (write && !pte_write(pte)) 905769beafSAneesh Kumar K.V return false; 915769beafSAneesh Kumar K.V 925769beafSAneesh Kumar K.V return true; 935769beafSAneesh Kumar K.V } 945769beafSAneesh Kumar K.V 9517ed9e31SAneesh Kumar K.V /* Conversion functions: convert a page and protection to a page entry, 9617ed9e31SAneesh Kumar K.V * and a page entry and page directory to the page they refer to. 9717ed9e31SAneesh Kumar K.V * 9817ed9e31SAneesh Kumar K.V * Even if PTEs can be unsigned long long, a PFN is always an unsigned 9917ed9e31SAneesh Kumar K.V * long for now. 10017ed9e31SAneesh Kumar K.V */ 10117ed9e31SAneesh Kumar K.V static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) { 10217ed9e31SAneesh Kumar K.V return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) | 10317ed9e31SAneesh Kumar K.V pgprot_val(pgprot)); } 10417ed9e31SAneesh Kumar K.V static inline unsigned long pte_pfn(pte_t pte) { 10517ed9e31SAneesh Kumar K.V return pte_val(pte) >> PTE_RPN_SHIFT; } 10617ed9e31SAneesh Kumar K.V 10717ed9e31SAneesh Kumar K.V /* Generic modifiers for PTE bits */ 108daba7902SChristophe Leroy static inline pte_t pte_exprotect(pte_t pte) 109daba7902SChristophe Leroy { 110daba7902SChristophe Leroy return __pte(pte_val(pte) & ~_PAGE_EXEC); 111daba7902SChristophe Leroy } 112daba7902SChristophe Leroy 11317ed9e31SAneesh Kumar K.V static inline pte_t pte_mkclean(pte_t pte) 11417ed9e31SAneesh Kumar K.V { 115a0da4bc1SChristophe Leroy return __pte(pte_val(pte) & ~_PAGE_DIRTY); 11617ed9e31SAneesh Kumar K.V } 11717ed9e31SAneesh Kumar K.V 11817ed9e31SAneesh Kumar K.V static inline pte_t pte_mkold(pte_t pte) 11917ed9e31SAneesh Kumar K.V { 12017ed9e31SAneesh Kumar K.V return __pte(pte_val(pte) & ~_PAGE_ACCESSED); 12117ed9e31SAneesh Kumar K.V } 12217ed9e31SAneesh Kumar K.V 12317ed9e31SAneesh Kumar K.V static inline pte_t pte_mkspecial(pte_t pte) 12417ed9e31SAneesh Kumar K.V { 12517ed9e31SAneesh Kumar K.V return __pte(pte_val(pte) | _PAGE_SPECIAL); 12617ed9e31SAneesh Kumar K.V } 12717ed9e31SAneesh Kumar K.V 128a0da4bc1SChristophe Leroy #ifndef pte_mkhuge 12917ed9e31SAneesh Kumar K.V static inline pte_t pte_mkhuge(pte_t pte) 13017ed9e31SAneesh Kumar K.V { 131a0da4bc1SChristophe Leroy return __pte(pte_val(pte)); 13217ed9e31SAneesh Kumar K.V } 133a0da4bc1SChristophe Leroy #endif 13417ed9e31SAneesh Kumar K.V 135a0da4bc1SChristophe Leroy #ifndef pte_mkprivileged 136daba7902SChristophe Leroy static inline pte_t pte_mkprivileged(pte_t pte) 137daba7902SChristophe Leroy { 138a0da4bc1SChristophe Leroy return __pte(pte_val(pte) & ~_PAGE_USER); 139daba7902SChristophe Leroy } 140a0da4bc1SChristophe Leroy #endif 141daba7902SChristophe Leroy 142a0da4bc1SChristophe Leroy #ifndef pte_mkuser 143daba7902SChristophe Leroy static inline pte_t pte_mkuser(pte_t pte) 144daba7902SChristophe Leroy { 145a0da4bc1SChristophe Leroy return __pte(pte_val(pte) | _PAGE_USER); 146daba7902SChristophe Leroy } 147a0da4bc1SChristophe Leroy #endif 148daba7902SChristophe Leroy 14917ed9e31SAneesh Kumar K.V static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 15017ed9e31SAneesh Kumar K.V { 15117ed9e31SAneesh Kumar K.V return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); 15217ed9e31SAneesh Kumar K.V } 15317ed9e31SAneesh Kumar K.V 15417ed9e31SAneesh Kumar K.V /* Insert a PTE, top-level function is out of line. It uses an inline 15517ed9e31SAneesh Kumar K.V * low level function in the respective pgtable-* files 15617ed9e31SAneesh Kumar K.V */ 15717ed9e31SAneesh Kumar K.V extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, 15817ed9e31SAneesh Kumar K.V pte_t pte); 15917ed9e31SAneesh Kumar K.V 16017ed9e31SAneesh Kumar K.V /* This low level function performs the actual PTE insertion 16117ed9e31SAneesh Kumar K.V * Setting the PTE depends on the MMU type and other factors. It's 16217ed9e31SAneesh Kumar K.V * an horrible mess that I'm not going to try to clean up now but 16317ed9e31SAneesh Kumar K.V * I'm keeping it in one place rather than spread around 16417ed9e31SAneesh Kumar K.V */ 16517ed9e31SAneesh Kumar K.V static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, 16617ed9e31SAneesh Kumar K.V pte_t *ptep, pte_t pte, int percpu) 16717ed9e31SAneesh Kumar K.V { 16817ed9e31SAneesh Kumar K.V /* Second case is 32-bit with 64-bit PTE. In this case, we 16917ed9e31SAneesh Kumar K.V * can just store as long as we do the two halves in the right order 17045201c87SChristophe Leroy * with a barrier in between. 17145201c87SChristophe Leroy * In the percpu case, we also fallback to the simple update 17217ed9e31SAneesh Kumar K.V */ 173d5808ffaSChristophe Leroy if (IS_ENABLED(CONFIG_PPC32) && IS_ENABLED(CONFIG_PTE_64BIT) && !percpu) { 17417ed9e31SAneesh Kumar K.V __asm__ __volatile__("\ 175d85be8a4SMathieu Desnoyers stw%X0 %2,%0\n\ 1762255411dSChristophe Leroy mbar\n\ 177d85be8a4SMathieu Desnoyers stw%X1 %L2,%1" 17817ed9e31SAneesh Kumar K.V : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) 17917ed9e31SAneesh Kumar K.V : "r" (pte) : "memory"); 180d5808ffaSChristophe Leroy return; 181d5808ffaSChristophe Leroy } 18217ed9e31SAneesh Kumar K.V /* Anything else just stores the PTE normally. That covers all 64-bit 18317ed9e31SAneesh Kumar K.V * cases, and 32-bit non-hash with 32-bit PTEs. 18417ed9e31SAneesh Kumar K.V */ 18555c8fc3fSChristophe Leroy #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES) 18655c8fc3fSChristophe Leroy ptep->pte = ptep->pte1 = ptep->pte2 = ptep->pte3 = pte_val(pte); 18755c8fc3fSChristophe Leroy #else 18817ed9e31SAneesh Kumar K.V *ptep = pte; 18955c8fc3fSChristophe Leroy #endif 19017ed9e31SAneesh Kumar K.V 19117ed9e31SAneesh Kumar K.V /* 19217ed9e31SAneesh Kumar K.V * With hardware tablewalk, a sync is needed to ensure that 19317ed9e31SAneesh Kumar K.V * subsequent accesses see the PTE we just wrote. Unlike userspace 19417ed9e31SAneesh Kumar K.V * mappings, we can't tolerate spurious faults, so make sure 19517ed9e31SAneesh Kumar K.V * the new PTE will be seen the first time. 19617ed9e31SAneesh Kumar K.V */ 197d5808ffaSChristophe Leroy if (IS_ENABLED(CONFIG_PPC_BOOK3E_64) && is_kernel_addr(addr)) 19817ed9e31SAneesh Kumar K.V mb(); 19917ed9e31SAneesh Kumar K.V } 20017ed9e31SAneesh Kumar K.V 20117ed9e31SAneesh Kumar K.V 20217ed9e31SAneesh Kumar K.V #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 20317ed9e31SAneesh Kumar K.V extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, 20417ed9e31SAneesh Kumar K.V pte_t *ptep, pte_t entry, int dirty); 20517ed9e31SAneesh Kumar K.V 20617ed9e31SAneesh Kumar K.V /* 20717ed9e31SAneesh Kumar K.V * Macro to mark a page protection value as "uncacheable". 20817ed9e31SAneesh Kumar K.V */ 20917ed9e31SAneesh Kumar K.V 21017ed9e31SAneesh Kumar K.V #define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \ 21117ed9e31SAneesh Kumar K.V _PAGE_WRITETHRU) 21217ed9e31SAneesh Kumar K.V 21317ed9e31SAneesh Kumar K.V #define pgprot_noncached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ 21417ed9e31SAneesh Kumar K.V _PAGE_NO_CACHE | _PAGE_GUARDED)) 21517ed9e31SAneesh Kumar K.V 21617ed9e31SAneesh Kumar K.V #define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ 21717ed9e31SAneesh Kumar K.V _PAGE_NO_CACHE)) 21817ed9e31SAneesh Kumar K.V 21917ed9e31SAneesh Kumar K.V #define pgprot_cached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ 22017ed9e31SAneesh Kumar K.V _PAGE_COHERENT)) 22117ed9e31SAneesh Kumar K.V 2225f356497SChristophe Leroy #if _PAGE_WRITETHRU != 0 22317ed9e31SAneesh Kumar K.V #define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ 22417ed9e31SAneesh Kumar K.V _PAGE_COHERENT | _PAGE_WRITETHRU)) 22556f3c141SChristophe Leroy #else 22656f3c141SChristophe Leroy #define pgprot_cached_wthru(prot) pgprot_noncached(prot) 2275f356497SChristophe Leroy #endif 22817ed9e31SAneesh Kumar K.V 22917ed9e31SAneesh Kumar K.V #define pgprot_cached_noncoherent(prot) \ 23017ed9e31SAneesh Kumar K.V (__pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL)) 23117ed9e31SAneesh Kumar K.V 23217ed9e31SAneesh Kumar K.V #define pgprot_writecombine pgprot_noncached_wc 23317ed9e31SAneesh Kumar K.V 23417ed9e31SAneesh Kumar K.V struct file; 23517ed9e31SAneesh Kumar K.V extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 23617ed9e31SAneesh Kumar K.V unsigned long size, pgprot_t vma_prot); 23717ed9e31SAneesh Kumar K.V #define __HAVE_PHYS_MEM_ACCESS_PROT 23817ed9e31SAneesh Kumar K.V 23926a344aeSAneesh Kumar K.V #ifdef CONFIG_HUGETLB_PAGE 24026a344aeSAneesh Kumar K.V static inline int hugepd_ok(hugepd_t hpd) 24126a344aeSAneesh Kumar K.V { 2424b914286SChristophe Leroy #ifdef CONFIG_PPC_8xx 243b250c8c0SChristophe Leroy return ((hpd_val(hpd) & _PMD_PAGE_MASK) == _PMD_PAGE_8M); 2444b914286SChristophe Leroy #else 24520717e1fSAneesh Kumar K.V /* We clear the top bit to indicate hugepd */ 2463fb66a70SLaurentiu Tudor return (hpd_val(hpd) && (hpd_val(hpd) & PD_HUGE) == 0); 2474b914286SChristophe Leroy #endif 24826a344aeSAneesh Kumar K.V } 24926a344aeSAneesh Kumar K.V 25026a344aeSAneesh Kumar K.V static inline int pmd_huge(pmd_t pmd) 25126a344aeSAneesh Kumar K.V { 25226a344aeSAneesh Kumar K.V return 0; 25326a344aeSAneesh Kumar K.V } 25426a344aeSAneesh Kumar K.V 25526a344aeSAneesh Kumar K.V static inline int pud_huge(pud_t pud) 25626a344aeSAneesh Kumar K.V { 25726a344aeSAneesh Kumar K.V return 0; 25826a344aeSAneesh Kumar K.V } 25926a344aeSAneesh Kumar K.V 26026a344aeSAneesh Kumar K.V #define is_hugepd(hpd) (hugepd_ok(hpd)) 26126a344aeSAneesh Kumar K.V #endif 26226a344aeSAneesh Kumar K.V 263d9642117SChristophe Leroy /* 264d9642117SChristophe Leroy * This gets called at the end of handling a page fault, when 265d9642117SChristophe Leroy * the kernel has put a new PTE into the page table for the process. 266d9642117SChristophe Leroy * We use it to ensure coherency between the i-cache and d-cache 267d9642117SChristophe Leroy * for the page which has just been mapped in. 268d9642117SChristophe Leroy */ 269d9642117SChristophe Leroy #if defined(CONFIG_PPC_FSL_BOOK3E) && defined(CONFIG_HUGETLB_PAGE) 270d9642117SChristophe Leroy void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep); 271d9642117SChristophe Leroy #else 272d9642117SChristophe Leroy static inline 273d9642117SChristophe Leroy void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) {} 274d9642117SChristophe Leroy #endif 275d9642117SChristophe Leroy 27617ed9e31SAneesh Kumar K.V #endif /* __ASSEMBLY__ */ 27717ed9e31SAneesh Kumar K.V #endif 278