1 #ifndef _ASM_POWERPC_BOOK3S_64_HUGETLB_H
2 #define _ASM_POWERPC_BOOK3S_64_HUGETLB_H
3 /*
4  * For radix we want generic code to handle hugetlb. But then if we want
5  * both hash and radix to be enabled together we need to workaround the
6  * limitations.
7  */
8 void radix__flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
9 void radix__local_flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
10 extern unsigned long
11 radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
12 				unsigned long len, unsigned long pgoff,
13 				unsigned long flags);
14 
15 static inline int hstate_get_psize(struct hstate *hstate)
16 {
17 	unsigned long shift;
18 
19 	shift = huge_page_shift(hstate);
20 	if (shift == mmu_psize_defs[MMU_PAGE_2M].shift)
21 		return MMU_PAGE_2M;
22 	else if (shift == mmu_psize_defs[MMU_PAGE_1G].shift)
23 		return MMU_PAGE_1G;
24 	else if (shift == mmu_psize_defs[MMU_PAGE_16M].shift)
25 		return MMU_PAGE_16M;
26 	else if (shift == mmu_psize_defs[MMU_PAGE_16G].shift)
27 		return MMU_PAGE_16G;
28 	else {
29 		WARN(1, "Wrong huge page shift\n");
30 		return mmu_virtual_psize;
31 	}
32 }
33 
34 #define arch_make_huge_pte arch_make_huge_pte
35 static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
36 				       struct page *page, int writable)
37 {
38 	unsigned long page_shift;
39 
40 	if (!cpu_has_feature(CPU_FTR_POWER9_DD1))
41 		return entry;
42 
43 	page_shift = huge_page_shift(hstate_vma(vma));
44 	/*
45 	 * We don't support 1G hugetlb pages yet.
46 	 */
47 	VM_WARN_ON(page_shift == mmu_psize_defs[MMU_PAGE_1G].shift);
48 	if (page_shift == mmu_psize_defs[MMU_PAGE_2M].shift)
49 		return __pte(pte_val(entry) | R_PAGE_LARGE);
50 	else
51 		return entry;
52 }
53 #endif
54