xref: /openbmc/linux/arch/riscv/include/asm/vmalloc.h (revision 7f7bed74)
1 #ifndef _ASM_RISCV_VMALLOC_H
2 #define _ASM_RISCV_VMALLOC_H
3 
4 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
5 
6 extern bool pgtable_l4_enabled, pgtable_l5_enabled;
7 
8 #define IOREMAP_MAX_ORDER (PUD_SHIFT)
9 
10 #define arch_vmap_pud_supported arch_vmap_pud_supported
11 static inline bool arch_vmap_pud_supported(pgprot_t prot)
12 {
13 	return pgtable_l4_enabled || pgtable_l5_enabled;
14 }
15 
16 #define arch_vmap_pmd_supported arch_vmap_pmd_supported
17 static inline bool arch_vmap_pmd_supported(pgprot_t prot)
18 {
19 	return true;
20 }
21 
22 #ifdef CONFIG_RISCV_ISA_SVNAPOT
23 #include <linux/pgtable.h>
24 
25 #define arch_vmap_pte_range_map_size arch_vmap_pte_range_map_size
26 static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, unsigned long end,
27 							 u64 pfn, unsigned int max_page_shift)
28 {
29 	unsigned long map_size = PAGE_SIZE;
30 	unsigned long size, order;
31 
32 	if (!has_svnapot())
33 		return map_size;
34 
35 	for_each_napot_order_rev(order) {
36 		if (napot_cont_shift(order) > max_page_shift)
37 			continue;
38 
39 		size = napot_cont_size(order);
40 		if (end - addr < size)
41 			continue;
42 
43 		if (!IS_ALIGNED(addr, size))
44 			continue;
45 
46 		if (!IS_ALIGNED(PFN_PHYS(pfn), size))
47 			continue;
48 
49 		map_size = size;
50 		break;
51 	}
52 
53 	return map_size;
54 }
55 
56 #define arch_vmap_pte_supported_shift arch_vmap_pte_supported_shift
57 static inline int arch_vmap_pte_supported_shift(unsigned long size)
58 {
59 	int shift = PAGE_SHIFT;
60 	unsigned long order;
61 
62 	if (!has_svnapot())
63 		return shift;
64 
65 	WARN_ON_ONCE(size >= PMD_SIZE);
66 
67 	for_each_napot_order_rev(order) {
68 		if (napot_cont_size(order) > size)
69 			continue;
70 
71 		if (!IS_ALIGNED(size, napot_cont_size(order)))
72 			continue;
73 
74 		shift = napot_cont_shift(order);
75 		break;
76 	}
77 
78 	return shift;
79 }
80 
81 #endif /* CONFIG_RISCV_ISA_SVNAPOT */
82 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
83 #endif /* _ASM_RISCV_VMALLOC_H */
84