1 #ifndef _ASM_POWERPC_HUGETLB_H 2 #define _ASM_POWERPC_HUGETLB_H 3 4 #include <asm/page.h> 5 6 7 int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, 8 unsigned long len); 9 10 void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr, 11 unsigned long end, unsigned long floor, 12 unsigned long ceiling); 13 14 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 15 pte_t *ptep, pte_t pte); 16 17 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, 18 pte_t *ptep); 19 20 /* 21 * If the arch doesn't supply something else, assume that hugepage 22 * size aligned regions are ok without further preparation. 23 */ 24 static inline int prepare_hugepage_range(struct file *file, 25 unsigned long addr, unsigned long len) 26 { 27 struct hstate *h = hstate_file(file); 28 if (len & ~huge_page_mask(h)) 29 return -EINVAL; 30 if (addr & ~huge_page_mask(h)) 31 return -EINVAL; 32 return 0; 33 } 34 35 static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) 36 { 37 } 38 39 static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, 40 unsigned long addr, pte_t *ptep) 41 { 42 } 43 44 static inline int huge_pte_none(pte_t pte) 45 { 46 return pte_none(pte); 47 } 48 49 static inline pte_t huge_pte_wrprotect(pte_t pte) 50 { 51 return pte_wrprotect(pte); 52 } 53 54 static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, 55 unsigned long addr, pte_t *ptep, 56 pte_t pte, int dirty) 57 { 58 return ptep_set_access_flags(vma, addr, ptep, pte, dirty); 59 } 60 61 static inline pte_t huge_ptep_get(pte_t *ptep) 62 { 63 return *ptep; 64 } 65 66 static inline int arch_prepare_hugepage(struct page *page) 67 { 68 return 0; 69 } 70 71 static inline void arch_release_hugepage(struct page *page) 72 { 73 } 74 75 #endif /* _ASM_POWERPC_HUGETLB_H */ 76