1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_POWERPC_BOOK3S_PGTABLE_H 3 #define _ASM_POWERPC_BOOK3S_PGTABLE_H 4 5 #ifdef CONFIG_PPC64 6 #include <asm/book3s/64/pgtable.h> 7 #else 8 #include <asm/book3s/32/pgtable.h> 9 #endif 10 11 #ifndef __ASSEMBLY__ 12 /* Insert a PTE, top-level function is out of line. It uses an inline 13 * low level function in the respective pgtable-* files 14 */ 15 extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, 16 pte_t pte); 17 18 19 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 20 extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, 21 pte_t *ptep, pte_t entry, int dirty); 22 23 struct file; 24 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 25 unsigned long size, pgprot_t vma_prot); 26 #define __HAVE_PHYS_MEM_ACCESS_PROT 27 28 #if defined(CONFIG_PPC32) || defined(CONFIG_PPC_64S_HASH_MMU) 29 /* 30 * This gets called at the end of handling a page fault, when 31 * the kernel has put a new PTE into the page table for the process. 32 * We use it to ensure coherency between the i-cache and d-cache 33 * for the page which has just been mapped in. 34 * On machines which use an MMU hash table, we use this to put a 35 * corresponding HPTE into the hash table ahead of time, instead of 36 * waiting for the inevitable extra hash-table miss exception. 37 */ 38 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep); 39 #else 40 static inline void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) {} 41 #endif 42 43 #endif /* __ASSEMBLY__ */ 44 #endif 45