1 #ifndef _ASM_SPARC64_HUGETLB_H 2 #define _ASM_SPARC64_HUGETLB_H 3 4 #include <asm/page.h> 5 6 7 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 8 pte_t *ptep, pte_t pte); 9 10 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, 11 pte_t *ptep); 12 13 void hugetlb_prefault_arch_hook(struct mm_struct *mm); 14 15 static inline int is_hugepage_only_range(struct mm_struct *mm, 16 unsigned long addr, 17 unsigned long len) { 18 return 0; 19 } 20 21 /* 22 * If the arch doesn't supply something else, assume that hugepage 23 * size aligned regions are ok without further preparation. 24 */ 25 static inline int prepare_hugepage_range(struct file *file, 26 unsigned long addr, unsigned long len) 27 { 28 if (len & ~HPAGE_MASK) 29 return -EINVAL; 30 if (addr & ~HPAGE_MASK) 31 return -EINVAL; 32 return 0; 33 } 34 35 static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, 36 unsigned long addr, unsigned long end, 37 unsigned long floor, 38 unsigned long ceiling) 39 { 40 free_pgd_range(tlb, addr, end, floor, ceiling); 41 } 42 43 static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, 44 unsigned long addr, pte_t *ptep) 45 { 46 } 47 48 static inline int huge_pte_none(pte_t pte) 49 { 50 return pte_none(pte); 51 } 52 53 static inline pte_t huge_pte_wrprotect(pte_t pte) 54 { 55 return pte_wrprotect(pte); 56 } 57 58 static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, 59 unsigned long addr, pte_t *ptep) 60 { 61 ptep_set_wrprotect(mm, addr, ptep); 62 } 63 64 static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, 65 unsigned long addr, pte_t *ptep, 66 pte_t pte, int dirty) 67 { 68 return ptep_set_access_flags(vma, addr, ptep, pte, dirty); 69 } 70 71 static inline pte_t huge_ptep_get(pte_t *ptep) 72 { 73 return *ptep; 74 } 75 76 static inline int arch_prepare_hugepage(struct page *page) 77 { 78 return 0; 79 } 80 81 static inline void arch_release_hugepage(struct page *page) 82 { 83 } 84 85 #endif /* _ASM_SPARC64_HUGETLB_H */ 86