1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_BOOK3S_64_HUGETLB_H
3 #define _ASM_POWERPC_BOOK3S_64_HUGETLB_H
4 /*
5  * For radix we want generic code to handle hugetlb. But then if we want
6  * both hash and radix to be enabled together we need to workaround the
7  * limitations.
8  */
9 void radix__flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
10 void radix__local_flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
11 extern unsigned long
12 radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
13 				unsigned long len, unsigned long pgoff,
14 				unsigned long flags);
15 
16 static inline int hstate_get_psize(struct hstate *hstate)
17 {
18 	unsigned long shift;
19 
20 	shift = huge_page_shift(hstate);
21 	if (shift == mmu_psize_defs[MMU_PAGE_2M].shift)
22 		return MMU_PAGE_2M;
23 	else if (shift == mmu_psize_defs[MMU_PAGE_1G].shift)
24 		return MMU_PAGE_1G;
25 	else if (shift == mmu_psize_defs[MMU_PAGE_16M].shift)
26 		return MMU_PAGE_16M;
27 	else if (shift == mmu_psize_defs[MMU_PAGE_16G].shift)
28 		return MMU_PAGE_16G;
29 	else {
30 		WARN(1, "Wrong huge page shift\n");
31 		return mmu_virtual_psize;
32 	}
33 }
34 
35 #define arch_make_huge_pte arch_make_huge_pte
36 static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
37 				       struct page *page, int writable)
38 {
39 	unsigned long page_shift;
40 
41 	if (!cpu_has_feature(CPU_FTR_POWER9_DD1))
42 		return entry;
43 
44 	page_shift = huge_page_shift(hstate_vma(vma));
45 	/*
46 	 * We don't support 1G hugetlb pages yet.
47 	 */
48 	VM_WARN_ON(page_shift == mmu_psize_defs[MMU_PAGE_1G].shift);
49 	if (page_shift == mmu_psize_defs[MMU_PAGE_2M].shift)
50 		return __pte(pte_val(entry) | R_PAGE_LARGE);
51 	else
52 		return entry;
53 }
54 
55 #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
56 static inline bool gigantic_page_supported(void)
57 {
58 	return true;
59 }
60 #endif
61 
62 #endif
63