xref: /openbmc/linux/arch/powerpc/mm/book3s64/pgtable.c (revision da097dcc)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
4  */
5 
6 #include <linux/sched.h>
7 #include <linux/mm_types.h>
8 #include <linux/memblock.h>
9 #include <linux/memremap.h>
10 #include <linux/pkeys.h>
11 #include <linux/debugfs.h>
12 #include <linux/proc_fs.h>
13 #include <misc/cxl-base.h>
14 
15 #include <asm/pgalloc.h>
16 #include <asm/tlb.h>
17 #include <asm/trace.h>
18 #include <asm/powernv.h>
19 #include <asm/firmware.h>
20 #include <asm/ultravisor.h>
21 #include <asm/kexec.h>
22 
23 #include <mm/mmu_decl.h>
24 #include <trace/events/thp.h>
25 
26 #include "internal.h"
27 
28 struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
29 EXPORT_SYMBOL_GPL(mmu_psize_defs);
30 
31 #ifdef CONFIG_SPARSEMEM_VMEMMAP
32 int mmu_vmemmap_psize = MMU_PAGE_4K;
33 #endif
34 
35 unsigned long __pmd_frag_nr;
36 EXPORT_SYMBOL(__pmd_frag_nr);
37 unsigned long __pmd_frag_size_shift;
38 EXPORT_SYMBOL(__pmd_frag_size_shift);
39 
40 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
41 /*
42  * This is called when relaxing access to a hugepage. It's also called in the page
43  * fault path when we don't hit any of the major fault cases, ie, a minor
44  * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have
45  * handled those two for us, we additionally deal with missing execute
46  * permission here on some processors
47  */
48 int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
49 			  pmd_t *pmdp, pmd_t entry, int dirty)
50 {
51 	int changed;
52 #ifdef CONFIG_DEBUG_VM
53 	WARN_ON(!pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
54 	assert_spin_locked(pmd_lockptr(vma->vm_mm, pmdp));
55 #endif
56 	changed = !pmd_same(*(pmdp), entry);
57 	if (changed) {
58 		/*
59 		 * We can use MMU_PAGE_2M here, because only radix
60 		 * path look at the psize.
61 		 */
62 		__ptep_set_access_flags(vma, pmdp_ptep(pmdp),
63 					pmd_pte(entry), address, MMU_PAGE_2M);
64 	}
65 	return changed;
66 }
67 
68 int pudp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
69 			  pud_t *pudp, pud_t entry, int dirty)
70 {
71 	int changed;
72 #ifdef CONFIG_DEBUG_VM
73 	WARN_ON(!pud_devmap(*pudp));
74 	assert_spin_locked(pud_lockptr(vma->vm_mm, pudp));
75 #endif
76 	changed = !pud_same(*(pudp), entry);
77 	if (changed) {
78 		/*
79 		 * We can use MMU_PAGE_1G here, because only radix
80 		 * path look at the psize.
81 		 */
82 		__ptep_set_access_flags(vma, pudp_ptep(pudp),
83 					pud_pte(entry), address, MMU_PAGE_1G);
84 	}
85 	return changed;
86 }
87 
88 
89 int pmdp_test_and_clear_young(struct vm_area_struct *vma,
90 			      unsigned long address, pmd_t *pmdp)
91 {
92 	return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp);
93 }
94 
95 int pudp_test_and_clear_young(struct vm_area_struct *vma,
96 			      unsigned long address, pud_t *pudp)
97 {
98 	return __pudp_test_and_clear_young(vma->vm_mm, address, pudp);
99 }
100 
101 /*
102  * set a new huge pmd. We should not be called for updating
103  * an existing pmd entry. That should go via pmd_hugepage_update.
104  */
105 void set_pmd_at(struct mm_struct *mm, unsigned long addr,
106 		pmd_t *pmdp, pmd_t pmd)
107 {
108 #ifdef CONFIG_DEBUG_VM
109 	/*
110 	 * Make sure hardware valid bit is not set. We don't do
111 	 * tlb flush for this update.
112 	 */
113 
114 	WARN_ON(pte_hw_valid(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp)));
115 	assert_spin_locked(pmd_lockptr(mm, pmdp));
116 	WARN_ON(!(pmd_large(pmd)));
117 #endif
118 	trace_hugepage_set_pmd(addr, pmd_val(pmd));
119 	return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
120 }
121 
122 void set_pud_at(struct mm_struct *mm, unsigned long addr,
123 		pud_t *pudp, pud_t pud)
124 {
125 #ifdef CONFIG_DEBUG_VM
126 	/*
127 	 * Make sure hardware valid bit is not set. We don't do
128 	 * tlb flush for this update.
129 	 */
130 
131 	WARN_ON(pte_hw_valid(pud_pte(*pudp)));
132 	assert_spin_locked(pud_lockptr(mm, pudp));
133 	WARN_ON(!(pud_leaf(pud)));
134 #endif
135 	trace_hugepage_set_pud(addr, pud_val(pud));
136 	return set_pte_at(mm, addr, pudp_ptep(pudp), pud_pte(pud));
137 }
138 
139 static void do_serialize(void *arg)
140 {
141 	/* We've taken the IPI, so try to trim the mask while here */
142 	if (radix_enabled()) {
143 		struct mm_struct *mm = arg;
144 		exit_lazy_flush_tlb(mm, false);
145 	}
146 }
147 
148 /*
149  * Serialize against __find_linux_pte() which does lock-less
150  * lookup in page tables with local interrupts disabled. For huge pages
151  * it casts pmd_t to pte_t. Since format of pte_t is different from
152  * pmd_t we want to prevent transit from pmd pointing to page table
153  * to pmd pointing to huge page (and back) while interrupts are disabled.
154  * We clear pmd to possibly replace it with page table pointer in
155  * different code paths. So make sure we wait for the parallel
156  * __find_linux_pte() to finish.
157  */
158 void serialize_against_pte_lookup(struct mm_struct *mm)
159 {
160 	smp_mb();
161 	smp_call_function_many(mm_cpumask(mm), do_serialize, mm, 1);
162 }
163 
164 /*
165  * We use this to invalidate a pmdp entry before switching from a
166  * hugepte to regular pmd entry.
167  */
168 pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
169 		     pmd_t *pmdp)
170 {
171 	unsigned long old_pmd;
172 
173 	VM_WARN_ON_ONCE(!pmd_present(*pmdp));
174 	old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, _PAGE_INVALID);
175 	flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
176 	return __pmd(old_pmd);
177 }
178 
179 pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
180 				   unsigned long addr, pmd_t *pmdp, int full)
181 {
182 	pmd_t pmd;
183 	VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
184 	VM_BUG_ON((pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) &&
185 		   !pmd_devmap(*pmdp)) || !pmd_present(*pmdp));
186 	pmd = pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
187 	/*
188 	 * if it not a fullmm flush, then we can possibly end up converting
189 	 * this PMD pte entry to a regular level 0 PTE by a parallel page fault.
190 	 * Make sure we flush the tlb in this case.
191 	 */
192 	if (!full)
193 		flush_pmd_tlb_range(vma, addr, addr + HPAGE_PMD_SIZE);
194 	return pmd;
195 }
196 
197 pud_t pudp_huge_get_and_clear_full(struct vm_area_struct *vma,
198 				   unsigned long addr, pud_t *pudp, int full)
199 {
200 	pud_t pud;
201 
202 	VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
203 	VM_BUG_ON((pud_present(*pudp) && !pud_devmap(*pudp)) ||
204 		  !pud_present(*pudp));
205 	pud = pudp_huge_get_and_clear(vma->vm_mm, addr, pudp);
206 	/*
207 	 * if it not a fullmm flush, then we can possibly end up converting
208 	 * this PMD pte entry to a regular level 0 PTE by a parallel page fault.
209 	 * Make sure we flush the tlb in this case.
210 	 */
211 	if (!full)
212 		flush_pud_tlb_range(vma, addr, addr + HPAGE_PUD_SIZE);
213 	return pud;
214 }
215 
216 static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot)
217 {
218 	return __pmd(pmd_val(pmd) | pgprot_val(pgprot));
219 }
220 
221 static pud_t pud_set_protbits(pud_t pud, pgprot_t pgprot)
222 {
223 	return __pud(pud_val(pud) | pgprot_val(pgprot));
224 }
225 
226 /*
227  * At some point we should be able to get rid of
228  * pmd_mkhuge() and mk_huge_pmd() when we update all the
229  * other archs to mark the pmd huge in pfn_pmd()
230  */
231 pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot)
232 {
233 	unsigned long pmdv;
234 
235 	pmdv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK;
236 
237 	return __pmd_mkhuge(pmd_set_protbits(__pmd(pmdv), pgprot));
238 }
239 
240 pud_t pfn_pud(unsigned long pfn, pgprot_t pgprot)
241 {
242 	unsigned long pudv;
243 
244 	pudv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK;
245 
246 	return __pud_mkhuge(pud_set_protbits(__pud(pudv), pgprot));
247 }
248 
249 pmd_t mk_pmd(struct page *page, pgprot_t pgprot)
250 {
251 	return pfn_pmd(page_to_pfn(page), pgprot);
252 }
253 
254 pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
255 {
256 	unsigned long pmdv;
257 
258 	pmdv = pmd_val(pmd);
259 	pmdv &= _HPAGE_CHG_MASK;
260 	return pmd_set_protbits(__pmd(pmdv), newprot);
261 }
262 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
263 
264 /* For use by kexec, called with MMU off */
265 notrace void mmu_cleanup_all(void)
266 {
267 	if (radix_enabled())
268 		radix__mmu_cleanup_all();
269 	else if (mmu_hash_ops.hpte_clear_all)
270 		mmu_hash_ops.hpte_clear_all();
271 
272 	reset_sprs();
273 }
274 
275 #ifdef CONFIG_MEMORY_HOTPLUG
276 int __meminit create_section_mapping(unsigned long start, unsigned long end,
277 				     int nid, pgprot_t prot)
278 {
279 	if (radix_enabled())
280 		return radix__create_section_mapping(start, end, nid, prot);
281 
282 	return hash__create_section_mapping(start, end, nid, prot);
283 }
284 
285 int __meminit remove_section_mapping(unsigned long start, unsigned long end)
286 {
287 	if (radix_enabled())
288 		return radix__remove_section_mapping(start, end);
289 
290 	return hash__remove_section_mapping(start, end);
291 }
292 #endif /* CONFIG_MEMORY_HOTPLUG */
293 
294 void __init mmu_partition_table_init(void)
295 {
296 	unsigned long patb_size = 1UL << PATB_SIZE_SHIFT;
297 	unsigned long ptcr;
298 
299 	/* Initialize the Partition Table with no entries */
300 	partition_tb = memblock_alloc(patb_size, patb_size);
301 	if (!partition_tb)
302 		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
303 		      __func__, patb_size, patb_size);
304 
305 	ptcr = __pa(partition_tb) | (PATB_SIZE_SHIFT - 12);
306 	set_ptcr_when_no_uv(ptcr);
307 	powernv_set_nmmu_ptcr(ptcr);
308 }
309 
310 static void flush_partition(unsigned int lpid, bool radix)
311 {
312 	if (radix) {
313 		radix__flush_all_lpid(lpid);
314 		radix__flush_all_lpid_guest(lpid);
315 	} else {
316 		asm volatile("ptesync" : : : "memory");
317 		asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : :
318 			     "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
319 		/* do we need fixup here ?*/
320 		asm volatile("eieio; tlbsync; ptesync" : : : "memory");
321 		trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 0);
322 	}
323 }
324 
325 void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
326 				  unsigned long dw1, bool flush)
327 {
328 	unsigned long old = be64_to_cpu(partition_tb[lpid].patb0);
329 
330 	/*
331 	 * When ultravisor is enabled, the partition table is stored in secure
332 	 * memory and can only be accessed doing an ultravisor call. However, we
333 	 * maintain a copy of the partition table in normal memory to allow Nest
334 	 * MMU translations to occur (for normal VMs).
335 	 *
336 	 * Therefore, here we always update partition_tb, regardless of whether
337 	 * we are running under an ultravisor or not.
338 	 */
339 	partition_tb[lpid].patb0 = cpu_to_be64(dw0);
340 	partition_tb[lpid].patb1 = cpu_to_be64(dw1);
341 
342 	/*
343 	 * If ultravisor is enabled, we do an ultravisor call to register the
344 	 * partition table entry (PATE), which also do a global flush of TLBs
345 	 * and partition table caches for the lpid. Otherwise, just do the
346 	 * flush. The type of flush (hash or radix) depends on what the previous
347 	 * use of the partition ID was, not the new use.
348 	 */
349 	if (firmware_has_feature(FW_FEATURE_ULTRAVISOR)) {
350 		uv_register_pate(lpid, dw0, dw1);
351 		pr_info("PATE registered by ultravisor: dw0 = 0x%lx, dw1 = 0x%lx\n",
352 			dw0, dw1);
353 	} else if (flush) {
354 		/*
355 		 * Boot does not need to flush, because MMU is off and each
356 		 * CPU does a tlbiel_all() before switching them on, which
357 		 * flushes everything.
358 		 */
359 		flush_partition(lpid, (old & PATB_HR));
360 	}
361 }
362 EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry);
363 
364 static pmd_t *get_pmd_from_cache(struct mm_struct *mm)
365 {
366 	void *pmd_frag, *ret;
367 
368 	if (PMD_FRAG_NR == 1)
369 		return NULL;
370 
371 	spin_lock(&mm->page_table_lock);
372 	ret = mm->context.pmd_frag;
373 	if (ret) {
374 		pmd_frag = ret + PMD_FRAG_SIZE;
375 		/*
376 		 * If we have taken up all the fragments mark PTE page NULL
377 		 */
378 		if (((unsigned long)pmd_frag & ~PAGE_MASK) == 0)
379 			pmd_frag = NULL;
380 		mm->context.pmd_frag = pmd_frag;
381 	}
382 	spin_unlock(&mm->page_table_lock);
383 	return (pmd_t *)ret;
384 }
385 
386 static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm)
387 {
388 	void *ret = NULL;
389 	struct ptdesc *ptdesc;
390 	gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO;
391 
392 	if (mm == &init_mm)
393 		gfp &= ~__GFP_ACCOUNT;
394 	ptdesc = pagetable_alloc(gfp, 0);
395 	if (!ptdesc)
396 		return NULL;
397 	if (!pagetable_pmd_ctor(ptdesc)) {
398 		pagetable_free(ptdesc);
399 		return NULL;
400 	}
401 
402 	atomic_set(&ptdesc->pt_frag_refcount, 1);
403 
404 	ret = ptdesc_address(ptdesc);
405 	/*
406 	 * if we support only one fragment just return the
407 	 * allocated page.
408 	 */
409 	if (PMD_FRAG_NR == 1)
410 		return ret;
411 
412 	spin_lock(&mm->page_table_lock);
413 	/*
414 	 * If we find ptdesc_page set, we return
415 	 * the allocated page with single fragment
416 	 * count.
417 	 */
418 	if (likely(!mm->context.pmd_frag)) {
419 		atomic_set(&ptdesc->pt_frag_refcount, PMD_FRAG_NR);
420 		mm->context.pmd_frag = ret + PMD_FRAG_SIZE;
421 	}
422 	spin_unlock(&mm->page_table_lock);
423 
424 	return (pmd_t *)ret;
425 }
426 
427 pmd_t *pmd_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr)
428 {
429 	pmd_t *pmd;
430 
431 	pmd = get_pmd_from_cache(mm);
432 	if (pmd)
433 		return pmd;
434 
435 	return __alloc_for_pmdcache(mm);
436 }
437 
438 void pmd_fragment_free(unsigned long *pmd)
439 {
440 	struct ptdesc *ptdesc = virt_to_ptdesc(pmd);
441 
442 	if (pagetable_is_reserved(ptdesc))
443 		return free_reserved_ptdesc(ptdesc);
444 
445 	BUG_ON(atomic_read(&ptdesc->pt_frag_refcount) <= 0);
446 	if (atomic_dec_and_test(&ptdesc->pt_frag_refcount)) {
447 		pagetable_pmd_dtor(ptdesc);
448 		pagetable_free(ptdesc);
449 	}
450 }
451 
452 static inline void pgtable_free(void *table, int index)
453 {
454 	switch (index) {
455 	case PTE_INDEX:
456 		pte_fragment_free(table, 0);
457 		break;
458 	case PMD_INDEX:
459 		pmd_fragment_free(table);
460 		break;
461 	case PUD_INDEX:
462 		__pud_free(table);
463 		break;
464 #if defined(CONFIG_PPC_4K_PAGES) && defined(CONFIG_HUGETLB_PAGE)
465 		/* 16M hugepd directory at pud level */
466 	case HTLB_16M_INDEX:
467 		BUILD_BUG_ON(H_16M_CACHE_INDEX <= 0);
468 		kmem_cache_free(PGT_CACHE(H_16M_CACHE_INDEX), table);
469 		break;
470 		/* 16G hugepd directory at the pgd level */
471 	case HTLB_16G_INDEX:
472 		BUILD_BUG_ON(H_16G_CACHE_INDEX <= 0);
473 		kmem_cache_free(PGT_CACHE(H_16G_CACHE_INDEX), table);
474 		break;
475 #endif
476 		/* We don't free pgd table via RCU callback */
477 	default:
478 		BUG();
479 	}
480 }
481 
482 void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index)
483 {
484 	unsigned long pgf = (unsigned long)table;
485 
486 	BUG_ON(index > MAX_PGTABLE_INDEX_SIZE);
487 	pgf |= index;
488 	tlb_remove_table(tlb, (void *)pgf);
489 }
490 
491 void __tlb_remove_table(void *_table)
492 {
493 	void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
494 	unsigned int index = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
495 
496 	return pgtable_free(table, index);
497 }
498 
499 #ifdef CONFIG_PROC_FS
500 atomic_long_t direct_pages_count[MMU_PAGE_COUNT];
501 
502 void arch_report_meminfo(struct seq_file *m)
503 {
504 	/*
505 	 * Hash maps the memory with one size mmu_linear_psize.
506 	 * So don't bother to print these on hash
507 	 */
508 	if (!radix_enabled())
509 		return;
510 	seq_printf(m, "DirectMap4k:    %8lu kB\n",
511 		   atomic_long_read(&direct_pages_count[MMU_PAGE_4K]) << 2);
512 	seq_printf(m, "DirectMap64k:    %8lu kB\n",
513 		   atomic_long_read(&direct_pages_count[MMU_PAGE_64K]) << 6);
514 	seq_printf(m, "DirectMap2M:    %8lu kB\n",
515 		   atomic_long_read(&direct_pages_count[MMU_PAGE_2M]) << 11);
516 	seq_printf(m, "DirectMap1G:    %8lu kB\n",
517 		   atomic_long_read(&direct_pages_count[MMU_PAGE_1G]) << 20);
518 }
519 #endif /* CONFIG_PROC_FS */
520 
521 pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr,
522 			     pte_t *ptep)
523 {
524 	unsigned long pte_val;
525 
526 	/*
527 	 * Clear the _PAGE_PRESENT so that no hardware parallel update is
528 	 * possible. Also keep the pte_present true so that we don't take
529 	 * wrong fault.
530 	 */
531 	pte_val = pte_update(vma->vm_mm, addr, ptep, _PAGE_PRESENT, _PAGE_INVALID, 0);
532 
533 	return __pte(pte_val);
534 
535 }
536 
537 void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
538 			     pte_t *ptep, pte_t old_pte, pte_t pte)
539 {
540 	if (radix_enabled())
541 		return radix__ptep_modify_prot_commit(vma, addr,
542 						      ptep, old_pte, pte);
543 	set_pte_at(vma->vm_mm, addr, ptep, pte);
544 }
545 
546 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
547 /*
548  * For hash translation mode, we use the deposited table to store hash slot
549  * information and they are stored at PTRS_PER_PMD offset from related pmd
550  * location. Hence a pmd move requires deposit and withdraw.
551  *
552  * For radix translation with split pmd ptl, we store the deposited table in the
553  * pmd page. Hence if we have different pmd page we need to withdraw during pmd
554  * move.
555  *
556  * With hash we use deposited table always irrespective of anon or not.
557  * With radix we use deposited table only for anonymous mapping.
558  */
559 int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
560 			   struct spinlock *old_pmd_ptl,
561 			   struct vm_area_struct *vma)
562 {
563 	if (radix_enabled())
564 		return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
565 
566 	return true;
567 }
568 #endif
569 
570 /*
571  * Does the CPU support tlbie?
572  */
573 bool tlbie_capable __read_mostly = true;
574 EXPORT_SYMBOL(tlbie_capable);
575 
576 /*
577  * Should tlbie be used for management of CPU TLBs, for kernel and process
578  * address spaces? tlbie may still be used for nMMU accelerators, and for KVM
579  * guest address spaces.
580  */
581 bool tlbie_enabled __read_mostly = true;
582 
583 static int __init setup_disable_tlbie(char *str)
584 {
585 	if (!radix_enabled()) {
586 		pr_err("disable_tlbie: Unable to disable TLBIE with Hash MMU.\n");
587 		return 1;
588 	}
589 
590 	tlbie_capable = false;
591 	tlbie_enabled = false;
592 
593         return 1;
594 }
595 __setup("disable_tlbie", setup_disable_tlbie);
596 
597 static int __init pgtable_debugfs_setup(void)
598 {
599 	if (!tlbie_capable)
600 		return 0;
601 
602 	/*
603 	 * There is no locking vs tlb flushing when changing this value.
604 	 * The tlb flushers will see one value or another, and use either
605 	 * tlbie or tlbiel with IPIs. In both cases the TLBs will be
606 	 * invalidated as expected.
607 	 */
608 	debugfs_create_bool("tlbie_enabled", 0600,
609 			arch_debugfs_dir,
610 			&tlbie_enabled);
611 
612 	return 0;
613 }
614 arch_initcall(pgtable_debugfs_setup);
615 
616 #if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_ARCH_HAS_MEMREMAP_COMPAT_ALIGN)
617 /*
618  * Override the generic version in mm/memremap.c.
619  *
620  * With hash translation, the direct-map range is mapped with just one
621  * page size selected by htab_init_page_sizes(). Consult
622  * mmu_psize_defs[] to determine the minimum page size alignment.
623 */
624 unsigned long memremap_compat_align(void)
625 {
626 	if (!radix_enabled()) {
627 		unsigned int shift = mmu_psize_defs[mmu_linear_psize].shift;
628 		return max(SUBSECTION_SIZE, 1UL << shift);
629 	}
630 
631 	return SUBSECTION_SIZE;
632 }
633 EXPORT_SYMBOL_GPL(memremap_compat_align);
634 #endif
635 
636 pgprot_t vm_get_page_prot(unsigned long vm_flags)
637 {
638 	unsigned long prot;
639 
640 	/* Radix supports execute-only, but protection_map maps X -> RX */
641 	if (radix_enabled() && ((vm_flags & VM_ACCESS_FLAGS) == VM_EXEC)) {
642 		prot = pgprot_val(PAGE_EXECONLY);
643 	} else {
644 		prot = pgprot_val(protection_map[vm_flags &
645 						 (VM_ACCESS_FLAGS | VM_SHARED)]);
646 	}
647 
648 	if (vm_flags & VM_SAO)
649 		prot |= _PAGE_SAO;
650 
651 #ifdef CONFIG_PPC_MEM_KEYS
652 	prot |= vmflag_to_pte_pkey_bits(vm_flags);
653 #endif
654 
655 	return __pgprot(prot);
656 }
657 EXPORT_SYMBOL(vm_get_page_prot);
658