xref: /openbmc/linux/arch/powerpc/mm/pgtable.c (revision 09a4a03c073bab5b375b71769f708d6932b370f7)
12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
20186f47eSKumar Gala /*
30186f47eSKumar Gala  * This file contains common routines for dealing with free of page tables
48d30c14cSBenjamin Herrenschmidt  * Along with common page table handling code
50186f47eSKumar Gala  *
60186f47eSKumar Gala  *  Derived from arch/powerpc/mm/tlb_64.c:
70186f47eSKumar Gala  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
80186f47eSKumar Gala  *
90186f47eSKumar Gala  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
100186f47eSKumar Gala  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
110186f47eSKumar Gala  *    Copyright (C) 1996 Paul Mackerras
120186f47eSKumar Gala  *
130186f47eSKumar Gala  *  Derived from "arch/i386/mm/init.c"
140186f47eSKumar Gala  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
150186f47eSKumar Gala  *
160186f47eSKumar Gala  *  Dave Engebretsen <engebret@us.ibm.com>
170186f47eSKumar Gala  *      Rework for PPC64 port.
180186f47eSKumar Gala  */
190186f47eSKumar Gala 
200186f47eSKumar Gala #include <linux/kernel.h>
215a0e3ad6STejun Heo #include <linux/gfp.h>
220186f47eSKumar Gala #include <linux/mm.h>
230186f47eSKumar Gala #include <linux/percpu.h>
240186f47eSKumar Gala #include <linux/hardirq.h>
2541151e77SBecky Bruce #include <linux/hugetlb.h>
260186f47eSKumar Gala #include <asm/tlbflush.h>
270186f47eSKumar Gala #include <asm/tlb.h>
280caed4deSChristophe Leroy #include <asm/hugetlb.h>
29d25da505SCédric Le Goater #include <asm/pte-walk.h>
300186f47eSKumar Gala 
313018fbc6SMichael Ellerman #ifdef CONFIG_PPC64
323018fbc6SMichael Ellerman #define PGD_ALIGN (sizeof(pgd_t) * MAX_PTRS_PER_PGD)
333018fbc6SMichael Ellerman #else
343018fbc6SMichael Ellerman #define PGD_ALIGN PAGE_SIZE
353018fbc6SMichael Ellerman #endif
363018fbc6SMichael Ellerman 
373018fbc6SMichael Ellerman pgd_t swapper_pg_dir[MAX_PTRS_PER_PGD] __section(".bss..page_aligned") __aligned(PGD_ALIGN);
38e72421a0SChristophe Leroy 
is_exec_fault(void)398d30c14cSBenjamin Herrenschmidt static inline int is_exec_fault(void)
408d30c14cSBenjamin Herrenschmidt {
418d30c14cSBenjamin Herrenschmidt 	return current->thread.regs && TRAP(current->thread.regs) == 0x400;
428d30c14cSBenjamin Herrenschmidt }
438d30c14cSBenjamin Herrenschmidt 
448d30c14cSBenjamin Herrenschmidt /* We only try to do i/d cache coherency on stuff that looks like
458d30c14cSBenjamin Herrenschmidt  * reasonably "normal" PTEs. We currently require a PTE to be present
4630bda41aSAneesh Kumar K.V  * and we avoid _PAGE_SPECIAL and cache inhibited pte. We also only do that
47ea3cc330SBenjamin Herrenschmidt  * on userspace PTEs
488d30c14cSBenjamin Herrenschmidt  */
pte_looks_normal(pte_t pte)498d30c14cSBenjamin Herrenschmidt static inline int pte_looks_normal(pte_t pte)
508d30c14cSBenjamin Herrenschmidt {
51ac29c640SAneesh Kumar K.V 
5226973fa5SChristophe Leroy 	if (pte_present(pte) && !pte_special(pte)) {
5330bda41aSAneesh Kumar K.V 		if (pte_ci(pte))
5430bda41aSAneesh Kumar K.V 			return 0;
55ac29c640SAneesh Kumar K.V 		if (pte_user(pte))
56ac29c640SAneesh Kumar K.V 			return 1;
57ac29c640SAneesh Kumar K.V 	}
58ac29c640SAneesh Kumar K.V 	return 0;
598d30c14cSBenjamin Herrenschmidt }
608d30c14cSBenjamin Herrenschmidt 
maybe_pte_to_folio(pte_t pte)619fee28baSMatthew Wilcox (Oracle) static struct folio *maybe_pte_to_folio(pte_t pte)
62ea3cc330SBenjamin Herrenschmidt {
63ea3cc330SBenjamin Herrenschmidt 	unsigned long pfn = pte_pfn(pte);
64ea3cc330SBenjamin Herrenschmidt 	struct page *page;
65ea3cc330SBenjamin Herrenschmidt 
66ea3cc330SBenjamin Herrenschmidt 	if (unlikely(!pfn_valid(pfn)))
67ea3cc330SBenjamin Herrenschmidt 		return NULL;
68ea3cc330SBenjamin Herrenschmidt 	page = pfn_to_page(pfn);
69ea3cc330SBenjamin Herrenschmidt 	if (PageReserved(page))
70ea3cc330SBenjamin Herrenschmidt 		return NULL;
719fee28baSMatthew Wilcox (Oracle) 	return page_folio(page);
72ea3cc330SBenjamin Herrenschmidt }
73ea3cc330SBenjamin Herrenschmidt 
74d81e6f8bSChristophe Leroy #ifdef CONFIG_PPC_BOOK3S
75ea3cc330SBenjamin Herrenschmidt 
768d30c14cSBenjamin Herrenschmidt /* Server-style MMU handles coherency when hashing if HW exec permission
77ea3cc330SBenjamin Herrenschmidt  * is supposed per page (currently 64-bit only). If not, then, we always
78ea3cc330SBenjamin Herrenschmidt  * flush the cache for valid PTEs in set_pte. Embedded CPU without HW exec
79ea3cc330SBenjamin Herrenschmidt  * support falls into the same category.
808d30c14cSBenjamin Herrenschmidt  */
81ea3cc330SBenjamin Herrenschmidt 
set_pte_filter_hash(pte_t pte)82385e89d5SChristophe Leroy static pte_t set_pte_filter_hash(pte_t pte)
838d30c14cSBenjamin Herrenschmidt {
84ea3cc330SBenjamin Herrenschmidt 	pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
85ea3cc330SBenjamin Herrenschmidt 	if (pte_looks_normal(pte) && !(cpu_has_feature(CPU_FTR_COHERENT_ICACHE) ||
86ea3cc330SBenjamin Herrenschmidt 				       cpu_has_feature(CPU_FTR_NOEXECUTE))) {
879fee28baSMatthew Wilcox (Oracle) 		struct folio *folio = maybe_pte_to_folio(pte);
889fee28baSMatthew Wilcox (Oracle) 		if (!folio)
89ea3cc330SBenjamin Herrenschmidt 			return pte;
909fee28baSMatthew Wilcox (Oracle) 		if (!test_bit(PG_dcache_clean, &folio->flags)) {
919fee28baSMatthew Wilcox (Oracle) 			flush_dcache_icache_folio(folio);
929fee28baSMatthew Wilcox (Oracle) 			set_bit(PG_dcache_clean, &folio->flags);
938d30c14cSBenjamin Herrenschmidt 		}
94ea3cc330SBenjamin Herrenschmidt 	}
95ea3cc330SBenjamin Herrenschmidt 	return pte;
96ea3cc330SBenjamin Herrenschmidt }
97ea3cc330SBenjamin Herrenschmidt 
98d81e6f8bSChristophe Leroy #else /* CONFIG_PPC_BOOK3S */
99ea3cc330SBenjamin Herrenschmidt 
set_pte_filter_hash(pte_t pte)100385e89d5SChristophe Leroy static pte_t set_pte_filter_hash(pte_t pte) { return pte; }
101385e89d5SChristophe Leroy 
102385e89d5SChristophe Leroy #endif /* CONFIG_PPC_BOOK3S */
103385e89d5SChristophe Leroy 
104ea3cc330SBenjamin Herrenschmidt /* Embedded type MMU with HW exec support. This is a bit more complicated
105ea3cc330SBenjamin Herrenschmidt  * as we don't have two bits to spare for _PAGE_EXEC and _PAGE_HWEXEC so
106ea3cc330SBenjamin Herrenschmidt  * instead we "filter out" the exec permission for non clean pages.
107*47b8def9SAneesh Kumar K.V  *
108*47b8def9SAneesh Kumar K.V  * This is also called once for the folio. So only work with folio->flags here.
1098d30c14cSBenjamin Herrenschmidt  */
set_pte_filter(pte_t pte)110b12c07a4SChristophe Leroy static inline pte_t set_pte_filter(pte_t pte)
1118d30c14cSBenjamin Herrenschmidt {
1129fee28baSMatthew Wilcox (Oracle) 	struct folio *folio;
113ea3cc330SBenjamin Herrenschmidt 
114af3a0ea4SNicholas Piggin 	if (radix_enabled())
115af3a0ea4SNicholas Piggin 		return pte;
116af3a0ea4SNicholas Piggin 
117385e89d5SChristophe Leroy 	if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
118385e89d5SChristophe Leroy 		return set_pte_filter_hash(pte);
119385e89d5SChristophe Leroy 
120ea3cc330SBenjamin Herrenschmidt 	/* No exec permission in the first place, move on */
12126973fa5SChristophe Leroy 	if (!pte_exec(pte) || !pte_looks_normal(pte))
122ea3cc330SBenjamin Herrenschmidt 		return pte;
123ea3cc330SBenjamin Herrenschmidt 
124ea3cc330SBenjamin Herrenschmidt 	/* If you set _PAGE_EXEC on weird pages you're on your own */
1259fee28baSMatthew Wilcox (Oracle) 	folio = maybe_pte_to_folio(pte);
1269fee28baSMatthew Wilcox (Oracle) 	if (unlikely(!folio))
127ea3cc330SBenjamin Herrenschmidt 		return pte;
128ea3cc330SBenjamin Herrenschmidt 
129ea3cc330SBenjamin Herrenschmidt 	/* If the page clean, we move on */
1309fee28baSMatthew Wilcox (Oracle) 	if (test_bit(PG_dcache_clean, &folio->flags))
131ea3cc330SBenjamin Herrenschmidt 		return pte;
132ea3cc330SBenjamin Herrenschmidt 
133ea3cc330SBenjamin Herrenschmidt 	/* If it's an exec fault, we flush the cache and make it clean */
134ea3cc330SBenjamin Herrenschmidt 	if (is_exec_fault()) {
1359fee28baSMatthew Wilcox (Oracle) 		flush_dcache_icache_folio(folio);
1369fee28baSMatthew Wilcox (Oracle) 		set_bit(PG_dcache_clean, &folio->flags);
137ea3cc330SBenjamin Herrenschmidt 		return pte;
1388d30c14cSBenjamin Herrenschmidt 	}
139ea3cc330SBenjamin Herrenschmidt 
140ea3cc330SBenjamin Herrenschmidt 	/* Else, we filter out _PAGE_EXEC */
14126973fa5SChristophe Leroy 	return pte_exprotect(pte);
142ea3cc330SBenjamin Herrenschmidt }
143ea3cc330SBenjamin Herrenschmidt 
set_access_flags_filter(pte_t pte,struct vm_area_struct * vma,int dirty)144ea3cc330SBenjamin Herrenschmidt static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma,
145ea3cc330SBenjamin Herrenschmidt 				     int dirty)
146ea3cc330SBenjamin Herrenschmidt {
1479fee28baSMatthew Wilcox (Oracle) 	struct folio *folio;
148ea3cc330SBenjamin Herrenschmidt 
149af3a0ea4SNicholas Piggin 	if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
150af3a0ea4SNicholas Piggin 		return pte;
151af3a0ea4SNicholas Piggin 
152385e89d5SChristophe Leroy 	if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
153385e89d5SChristophe Leroy 		return pte;
154385e89d5SChristophe Leroy 
155ea3cc330SBenjamin Herrenschmidt 	/* So here, we only care about exec faults, as we use them
156ea3cc330SBenjamin Herrenschmidt 	 * to recover lost _PAGE_EXEC and perform I$/D$ coherency
157ea3cc330SBenjamin Herrenschmidt 	 * if necessary. Also if _PAGE_EXEC is already set, same deal,
158ea3cc330SBenjamin Herrenschmidt 	 * we just bail out
1598d30c14cSBenjamin Herrenschmidt 	 */
16026973fa5SChristophe Leroy 	if (dirty || pte_exec(pte) || !is_exec_fault())
161ea3cc330SBenjamin Herrenschmidt 		return pte;
162ea3cc330SBenjamin Herrenschmidt 
163ea3cc330SBenjamin Herrenschmidt #ifdef CONFIG_DEBUG_VM
164ea3cc330SBenjamin Herrenschmidt 	/* So this is an exec fault, _PAGE_EXEC is not set. If it was
165ea3cc330SBenjamin Herrenschmidt 	 * an error we would have bailed out earlier in do_page_fault()
166ea3cc330SBenjamin Herrenschmidt 	 * but let's make sure of it
167ea3cc330SBenjamin Herrenschmidt 	 */
168ea3cc330SBenjamin Herrenschmidt 	if (WARN_ON(!(vma->vm_flags & VM_EXEC)))
169ea3cc330SBenjamin Herrenschmidt 		return pte;
170ea3cc330SBenjamin Herrenschmidt #endif /* CONFIG_DEBUG_VM */
171ea3cc330SBenjamin Herrenschmidt 
172ea3cc330SBenjamin Herrenschmidt 	/* If you set _PAGE_EXEC on weird pages you're on your own */
1739fee28baSMatthew Wilcox (Oracle) 	folio = maybe_pte_to_folio(pte);
1749fee28baSMatthew Wilcox (Oracle) 	if (unlikely(!folio))
175ea3cc330SBenjamin Herrenschmidt 		goto bail;
176ea3cc330SBenjamin Herrenschmidt 
177ea3cc330SBenjamin Herrenschmidt 	/* If the page is already clean, we move on */
1789fee28baSMatthew Wilcox (Oracle) 	if (test_bit(PG_dcache_clean, &folio->flags))
179ea3cc330SBenjamin Herrenschmidt 		goto bail;
180ea3cc330SBenjamin Herrenschmidt 
181ec94b9b2SAneesh Kumar K.V 	/* Clean the page and set PG_dcache_clean */
1829fee28baSMatthew Wilcox (Oracle) 	flush_dcache_icache_folio(folio);
1839fee28baSMatthew Wilcox (Oracle) 	set_bit(PG_dcache_clean, &folio->flags);
184ea3cc330SBenjamin Herrenschmidt 
185ea3cc330SBenjamin Herrenschmidt  bail:
18626973fa5SChristophe Leroy 	return pte_mkexec(pte);
1878d30c14cSBenjamin Herrenschmidt }
188ea3cc330SBenjamin Herrenschmidt 
1898d30c14cSBenjamin Herrenschmidt /*
1908d30c14cSBenjamin Herrenschmidt  * set_pte stores a linux PTE into the linux page table.
1918d30c14cSBenjamin Herrenschmidt  */
set_ptes(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte,unsigned int nr)1929fee28baSMatthew Wilcox (Oracle) void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
1939fee28baSMatthew Wilcox (Oracle) 		pte_t pte, unsigned int nr)
1948d30c14cSBenjamin Herrenschmidt {
195*47b8def9SAneesh Kumar K.V 
196*47b8def9SAneesh Kumar K.V 	/* Note: mm->context.id might not yet have been assigned as
197*47b8def9SAneesh Kumar K.V 	 * this context might not have been activated yet when this
198*47b8def9SAneesh Kumar K.V 	 * is called. Filter the pte value and use the filtered value
199*47b8def9SAneesh Kumar K.V 	 * to setup all the ptes in the range.
200*47b8def9SAneesh Kumar K.V 	 */
201*47b8def9SAneesh Kumar K.V 	pte = set_pte_filter(pte);
202*47b8def9SAneesh Kumar K.V 
203*47b8def9SAneesh Kumar K.V 	/*
204*47b8def9SAneesh Kumar K.V 	 * We don't need to call arch_enter/leave_lazy_mmu_mode()
205*47b8def9SAneesh Kumar K.V 	 * because we expect set_ptes to be only be used on not present
206*47b8def9SAneesh Kumar K.V 	 * and not hw_valid ptes. Hence there is no translation cache flush
207*47b8def9SAneesh Kumar K.V 	 * involved that need to be batched.
208*47b8def9SAneesh Kumar K.V 	 */
209*47b8def9SAneesh Kumar K.V 	for (;;) {
210*47b8def9SAneesh Kumar K.V 
2118a0516edSMel Gorman 		/*
212da7ad366SAneesh Kumar K.V 		 * Make sure hardware valid bit is not set. We don't do
213da7ad366SAneesh Kumar K.V 		 * tlb flush for this update.
2148a0516edSMel Gorman 		 */
215dd0e144aSAneesh Kumar K.V 		VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep));
216c7d54842SAneesh Kumar K.V 
2178d30c14cSBenjamin Herrenschmidt 		/* Perform the setting of the PTE */
2188d30c14cSBenjamin Herrenschmidt 		__set_pte_at(mm, addr, ptep, pte, 0);
2199fee28baSMatthew Wilcox (Oracle) 		if (--nr == 0)
2209fee28baSMatthew Wilcox (Oracle) 			break;
2219fee28baSMatthew Wilcox (Oracle) 		ptep++;
2229fee28baSMatthew Wilcox (Oracle) 		addr += PAGE_SIZE;
223*47b8def9SAneesh Kumar K.V 		/*
224*47b8def9SAneesh Kumar K.V 		 * increment the pfn.
225*47b8def9SAneesh Kumar K.V 		 */
226*47b8def9SAneesh Kumar K.V 		pte = pfn_pte(pte_pfn(pte) + 1, pte_pgprot((pte)));
2279fee28baSMatthew Wilcox (Oracle) 	}
2288d30c14cSBenjamin Herrenschmidt }
2298d30c14cSBenjamin Herrenschmidt 
unmap_kernel_page(unsigned long va)230aec98260SChristophe Leroy void unmap_kernel_page(unsigned long va)
231aec98260SChristophe Leroy {
232aec98260SChristophe Leroy 	pmd_t *pmdp = pmd_off_k(va);
233aec98260SChristophe Leroy 	pte_t *ptep = pte_offset_kernel(pmdp, va);
234aec98260SChristophe Leroy 
235aec98260SChristophe Leroy 	pte_clear(&init_mm, va, ptep);
236aec98260SChristophe Leroy 	flush_tlb_kernel_range(va, va + PAGE_SIZE);
237aec98260SChristophe Leroy }
238aec98260SChristophe Leroy 
2398d30c14cSBenjamin Herrenschmidt /*
2408d30c14cSBenjamin Herrenschmidt  * This is called when relaxing access to a PTE. It's also called in the page
2418d30c14cSBenjamin Herrenschmidt  * fault path when we don't hit any of the major fault cases, ie, a minor
2428d30c14cSBenjamin Herrenschmidt  * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have
2438d30c14cSBenjamin Herrenschmidt  * handled those two for us, we additionally deal with missing execute
2448d30c14cSBenjamin Herrenschmidt  * permission here on some processors
2458d30c14cSBenjamin Herrenschmidt  */
ptep_set_access_flags(struct vm_area_struct * vma,unsigned long address,pte_t * ptep,pte_t entry,int dirty)2468d30c14cSBenjamin Herrenschmidt int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
2478d30c14cSBenjamin Herrenschmidt 			  pte_t *ptep, pte_t entry, int dirty)
2488d30c14cSBenjamin Herrenschmidt {
2498d30c14cSBenjamin Herrenschmidt 	int changed;
250ea3cc330SBenjamin Herrenschmidt 	entry = set_access_flags_filter(entry, vma, dirty);
2518d30c14cSBenjamin Herrenschmidt 	changed = !pte_same(*(ptep), entry);
2528d30c14cSBenjamin Herrenschmidt 	if (changed) {
2538d30c14cSBenjamin Herrenschmidt 		assert_pte_locked(vma->vm_mm, address);
254e4c1112cSAneesh Kumar K.V 		__ptep_set_access_flags(vma, ptep, entry,
255e4c1112cSAneesh Kumar K.V 					address, mmu_virtual_psize);
2568d30c14cSBenjamin Herrenschmidt 	}
2578d30c14cSBenjamin Herrenschmidt 	return changed;
2588d30c14cSBenjamin Herrenschmidt }
2598d30c14cSBenjamin Herrenschmidt 
260f069ff39SAneesh Kumar K.V #ifdef CONFIG_HUGETLB_PAGE
huge_ptep_set_access_flags(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep,pte_t pte,int dirty)261bce85a16SBreno Leitao int huge_ptep_set_access_flags(struct vm_area_struct *vma,
262f069ff39SAneesh Kumar K.V 			       unsigned long addr, pte_t *ptep,
263f069ff39SAneesh Kumar K.V 			       pte_t pte, int dirty)
264f069ff39SAneesh Kumar K.V {
265f069ff39SAneesh Kumar K.V #ifdef HUGETLB_NEED_PRELOAD
266f069ff39SAneesh Kumar K.V 	/*
267f069ff39SAneesh Kumar K.V 	 * The "return 1" forces a call of update_mmu_cache, which will write a
268f069ff39SAneesh Kumar K.V 	 * TLB entry.  Without this, platforms that don't do a write of the TLB
269f069ff39SAneesh Kumar K.V 	 * entry in the TLB miss handler asm will fault ad infinitum.
270f069ff39SAneesh Kumar K.V 	 */
271f069ff39SAneesh Kumar K.V 	ptep_set_access_flags(vma, addr, ptep, pte, dirty);
272f069ff39SAneesh Kumar K.V 	return 1;
273f069ff39SAneesh Kumar K.V #else
274e4c1112cSAneesh Kumar K.V 	int changed, psize;
275f069ff39SAneesh Kumar K.V 
276f069ff39SAneesh Kumar K.V 	pte = set_access_flags_filter(pte, vma, dirty);
277f069ff39SAneesh Kumar K.V 	changed = !pte_same(*(ptep), pte);
278f069ff39SAneesh Kumar K.V 	if (changed) {
279e4c1112cSAneesh Kumar K.V 
280e4c1112cSAneesh Kumar K.V #ifdef CONFIG_PPC_BOOK3S_64
281ed515b68SAneesh Kumar K.V 		struct hstate *h = hstate_vma(vma);
282ed515b68SAneesh Kumar K.V 
283ed515b68SAneesh Kumar K.V 		psize = hstate_get_psize(h);
284ed515b68SAneesh Kumar K.V #ifdef CONFIG_DEBUG_VM
285ed515b68SAneesh Kumar K.V 		assert_spin_locked(huge_pte_lockptr(h, vma->vm_mm, ptep));
286ed515b68SAneesh Kumar K.V #endif
287ed515b68SAneesh Kumar K.V 
288e4c1112cSAneesh Kumar K.V #else
289e4c1112cSAneesh Kumar K.V 		/*
290b12c07a4SChristophe Leroy 		 * Not used on non book3s64 platforms.
291b12c07a4SChristophe Leroy 		 * 8xx compares it with mmu_virtual_psize to
292b12c07a4SChristophe Leroy 		 * know if it is a huge page or not.
293e4c1112cSAneesh Kumar K.V 		 */
294b12c07a4SChristophe Leroy 		psize = MMU_PAGE_COUNT;
295e4c1112cSAneesh Kumar K.V #endif
296e4c1112cSAneesh Kumar K.V 		__ptep_set_access_flags(vma, ptep, pte, addr, psize);
297f069ff39SAneesh Kumar K.V 	}
298f069ff39SAneesh Kumar K.V 	return changed;
299f069ff39SAneesh Kumar K.V #endif
300f069ff39SAneesh Kumar K.V }
301b12c07a4SChristophe Leroy 
302b12c07a4SChristophe Leroy #if defined(CONFIG_PPC_8xx)
set_huge_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte,unsigned long sz)303935d4f0cSRyan Roberts void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
304935d4f0cSRyan Roberts 		     pte_t pte, unsigned long sz)
305b12c07a4SChristophe Leroy {
30678c24f7bSAndrew Morton 	pmd_t *pmd = pmd_off(mm, addr);
307b250c8c0SChristophe Leroy 	pte_basic_t val;
308c7d19189SChristophe Leroy 	pte_basic_t *entry = (pte_basic_t *)ptep;
309175a9999SChristophe Leroy 	int num, i;
310b250c8c0SChristophe Leroy 
311b12c07a4SChristophe Leroy 	/*
312b12c07a4SChristophe Leroy 	 * Make sure hardware valid bit is not set. We don't do
313b12c07a4SChristophe Leroy 	 * tlb flush for this update.
314b12c07a4SChristophe Leroy 	 */
315b12c07a4SChristophe Leroy 	VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep));
316b12c07a4SChristophe Leroy 
317b12c07a4SChristophe Leroy 	pte = set_pte_filter(pte);
318b12c07a4SChristophe Leroy 
319b250c8c0SChristophe Leroy 	val = pte_val(pte);
320175a9999SChristophe Leroy 
321175a9999SChristophe Leroy 	num = number_of_cells_per_pte(pmd, val, 1);
322175a9999SChristophe Leroy 
323b250c8c0SChristophe Leroy 	for (i = 0; i < num; i++, entry++, val += SZ_4K)
324b250c8c0SChristophe Leroy 		*entry = val;
325b12c07a4SChristophe Leroy }
326b12c07a4SChristophe Leroy #endif
327f069ff39SAneesh Kumar K.V #endif /* CONFIG_HUGETLB_PAGE */
328f069ff39SAneesh Kumar K.V 
3298d30c14cSBenjamin Herrenschmidt #ifdef CONFIG_DEBUG_VM
assert_pte_locked(struct mm_struct * mm,unsigned long addr)3308d30c14cSBenjamin Herrenschmidt void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
3318d30c14cSBenjamin Herrenschmidt {
3328d30c14cSBenjamin Herrenschmidt 	pgd_t *pgd;
3332fb47060SMike Rapoport 	p4d_t *p4d;
3348d30c14cSBenjamin Herrenschmidt 	pud_t *pud;
3358d30c14cSBenjamin Herrenschmidt 	pmd_t *pmd;
3363d140215SHugh Dickins 	pte_t *pte;
3373d140215SHugh Dickins 	spinlock_t *ptl;
3388d30c14cSBenjamin Herrenschmidt 
3398d30c14cSBenjamin Herrenschmidt 	if (mm == &init_mm)
3408d30c14cSBenjamin Herrenschmidt 		return;
3418d30c14cSBenjamin Herrenschmidt 	pgd = mm->pgd + pgd_index(addr);
3428d30c14cSBenjamin Herrenschmidt 	BUG_ON(pgd_none(*pgd));
3432fb47060SMike Rapoport 	p4d = p4d_offset(pgd, addr);
3442fb47060SMike Rapoport 	BUG_ON(p4d_none(*p4d));
3452fb47060SMike Rapoport 	pud = pud_offset(p4d, addr);
3468d30c14cSBenjamin Herrenschmidt 	BUG_ON(pud_none(*pud));
3478d30c14cSBenjamin Herrenschmidt 	pmd = pmd_offset(pud, addr);
348a00e7beaSAneesh Kumar K.V 	/*
349a00e7beaSAneesh Kumar K.V 	 * khugepaged to collapse normal pages to hugepage, first set
350c1e8d7c6SMichel Lespinasse 	 * pmd to none to force page fault/gup to take mmap_lock. After
351a00e7beaSAneesh Kumar K.V 	 * pmd is set to none, we do a pte_clear which does this assertion
352a00e7beaSAneesh Kumar K.V 	 * so if we find pmd none, return.
353a00e7beaSAneesh Kumar K.V 	 */
354a00e7beaSAneesh Kumar K.V 	if (pmd_none(*pmd))
355a00e7beaSAneesh Kumar K.V 		return;
3563d140215SHugh Dickins 	pte = pte_offset_map_nolock(mm, pmd, addr, &ptl);
3573d140215SHugh Dickins 	BUG_ON(!pte);
3583d140215SHugh Dickins 	assert_spin_locked(ptl);
3593d140215SHugh Dickins 	pte_unmap(pte);
3608d30c14cSBenjamin Herrenschmidt }
3618d30c14cSBenjamin Herrenschmidt #endif /* CONFIG_DEBUG_VM */
3628d30c14cSBenjamin Herrenschmidt 
vmalloc_to_phys(void * va)363e9ab1a1cSAlexey Kardashevskiy unsigned long vmalloc_to_phys(void *va)
364e9ab1a1cSAlexey Kardashevskiy {
365e9ab1a1cSAlexey Kardashevskiy 	unsigned long pfn = vmalloc_to_pfn(va);
366e9ab1a1cSAlexey Kardashevskiy 
367e9ab1a1cSAlexey Kardashevskiy 	BUG_ON(!pfn);
368e9ab1a1cSAlexey Kardashevskiy 	return __pa(pfn_to_kaddr(pfn)) + offset_in_page(va);
369e9ab1a1cSAlexey Kardashevskiy }
370e9ab1a1cSAlexey Kardashevskiy EXPORT_SYMBOL_GPL(vmalloc_to_phys);
3710caed4deSChristophe Leroy 
3720caed4deSChristophe Leroy /*
3730caed4deSChristophe Leroy  * We have 4 cases for pgds and pmds:
3740caed4deSChristophe Leroy  * (1) invalid (all zeroes)
3750caed4deSChristophe Leroy  * (2) pointer to next table, as normal; bottom 6 bits == 0
3760caed4deSChristophe Leroy  * (3) leaf pte for huge page _PAGE_PTE set
3770caed4deSChristophe Leroy  * (4) hugepd pointer, _PAGE_PTE = 0 and bits [2..6] indicate size of table
3780caed4deSChristophe Leroy  *
3790caed4deSChristophe Leroy  * So long as we atomically load page table pointers we are safe against teardown,
38087c78b61SMichael Ellerman  * we can follow the address down to the page and take a ref on it.
3810caed4deSChristophe Leroy  * This function need to be called with interrupts disabled. We use this variant
3820caed4deSChristophe Leroy  * when we have MSR[EE] = 0 but the paca->irq_soft_mask = IRQS_ENABLED
3830caed4deSChristophe Leroy  */
__find_linux_pte(pgd_t * pgdir,unsigned long ea,bool * is_thp,unsigned * hpage_shift)3840caed4deSChristophe Leroy pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
3850caed4deSChristophe Leroy 			bool *is_thp, unsigned *hpage_shift)
3860caed4deSChristophe Leroy {
3872fb47060SMike Rapoport 	pgd_t *pgdp;
3882fb47060SMike Rapoport 	p4d_t p4d, *p4dp;
3890caed4deSChristophe Leroy 	pud_t pud, *pudp;
3900caed4deSChristophe Leroy 	pmd_t pmd, *pmdp;
3910caed4deSChristophe Leroy 	pte_t *ret_pte;
3920caed4deSChristophe Leroy 	hugepd_t *hpdp = NULL;
3932fb47060SMike Rapoport 	unsigned pdshift;
3940caed4deSChristophe Leroy 
3950caed4deSChristophe Leroy 	if (hpage_shift)
3960caed4deSChristophe Leroy 		*hpage_shift = 0;
3970caed4deSChristophe Leroy 
3980caed4deSChristophe Leroy 	if (is_thp)
3990caed4deSChristophe Leroy 		*is_thp = false;
4000caed4deSChristophe Leroy 
4010caed4deSChristophe Leroy 	/*
4020caed4deSChristophe Leroy 	 * Always operate on the local stack value. This make sure the
4030caed4deSChristophe Leroy 	 * value don't get updated by a parallel THP split/collapse,
4040caed4deSChristophe Leroy 	 * page fault or a page unmap. The return pte_t * is still not
4050caed4deSChristophe Leroy 	 * stable. So should be checked there for above conditions.
4062fb47060SMike Rapoport 	 * Top level is an exception because it is folded into p4d.
4070caed4deSChristophe Leroy 	 */
4082fb47060SMike Rapoport 	pgdp = pgdir + pgd_index(ea);
4092fb47060SMike Rapoport 	p4dp = p4d_offset(pgdp, ea);
4102fb47060SMike Rapoport 	p4d  = READ_ONCE(*p4dp);
4112fb47060SMike Rapoport 	pdshift = P4D_SHIFT;
4122fb47060SMike Rapoport 
4132fb47060SMike Rapoport 	if (p4d_none(p4d))
4140caed4deSChristophe Leroy 		return NULL;
415fab9a116SChristophe Leroy 
4162fb47060SMike Rapoport 	if (p4d_is_leaf(p4d)) {
4172fb47060SMike Rapoport 		ret_pte = (pte_t *)p4dp;
4180caed4deSChristophe Leroy 		goto out;
419fab9a116SChristophe Leroy 	}
420d6eaceddSAneesh Kumar K.V 
4212fb47060SMike Rapoport 	if (is_hugepd(__hugepd(p4d_val(p4d)))) {
4222fb47060SMike Rapoport 		hpdp = (hugepd_t *)&p4d;
423fab9a116SChristophe Leroy 		goto out_huge;
424fab9a116SChristophe Leroy 	}
42526e66b08SChristophe Leroy 
4260caed4deSChristophe Leroy 	/*
4270caed4deSChristophe Leroy 	 * Even if we end up with an unmap, the pgtable will not
4280caed4deSChristophe Leroy 	 * be freed, because we do an rcu free and here we are
4290caed4deSChristophe Leroy 	 * irq disabled
4300caed4deSChristophe Leroy 	 */
4310caed4deSChristophe Leroy 	pdshift = PUD_SHIFT;
4322fb47060SMike Rapoport 	pudp = pud_offset(&p4d, ea);
4330caed4deSChristophe Leroy 	pud  = READ_ONCE(*pudp);
4340caed4deSChristophe Leroy 
4350caed4deSChristophe Leroy 	if (pud_none(pud))
4360caed4deSChristophe Leroy 		return NULL;
437fab9a116SChristophe Leroy 
438d6eaceddSAneesh Kumar K.V 	if (pud_is_leaf(pud)) {
4390caed4deSChristophe Leroy 		ret_pte = (pte_t *)pudp;
4400caed4deSChristophe Leroy 		goto out;
441fab9a116SChristophe Leroy 	}
442d6eaceddSAneesh Kumar K.V 
443fab9a116SChristophe Leroy 	if (is_hugepd(__hugepd(pud_val(pud)))) {
4440caed4deSChristophe Leroy 		hpdp = (hugepd_t *)&pud;
445fab9a116SChristophe Leroy 		goto out_huge;
446fab9a116SChristophe Leroy 	}
447d6eaceddSAneesh Kumar K.V 
4480caed4deSChristophe Leroy 	pdshift = PMD_SHIFT;
4490caed4deSChristophe Leroy 	pmdp = pmd_offset(&pud, ea);
4500caed4deSChristophe Leroy 	pmd  = READ_ONCE(*pmdp);
451a00196a2SNicholas Piggin 
4520caed4deSChristophe Leroy 	/*
453a00196a2SNicholas Piggin 	 * A hugepage collapse is captured by this condition, see
454a00196a2SNicholas Piggin 	 * pmdp_collapse_flush.
4550caed4deSChristophe Leroy 	 */
4560caed4deSChristophe Leroy 	if (pmd_none(pmd))
4570caed4deSChristophe Leroy 		return NULL;
4580caed4deSChristophe Leroy 
459a00196a2SNicholas Piggin #ifdef CONFIG_PPC_BOOK3S_64
460a00196a2SNicholas Piggin 	/*
461a00196a2SNicholas Piggin 	 * A hugepage split is captured by this condition, see
462a00196a2SNicholas Piggin 	 * pmdp_invalidate.
463a00196a2SNicholas Piggin 	 *
464a00196a2SNicholas Piggin 	 * Huge page modification can be caught here too.
465a00196a2SNicholas Piggin 	 */
466a00196a2SNicholas Piggin 	if (pmd_is_serializing(pmd))
467a00196a2SNicholas Piggin 		return NULL;
468a00196a2SNicholas Piggin #endif
469a00196a2SNicholas Piggin 
4700caed4deSChristophe Leroy 	if (pmd_trans_huge(pmd) || pmd_devmap(pmd)) {
4710caed4deSChristophe Leroy 		if (is_thp)
4720caed4deSChristophe Leroy 			*is_thp = true;
4730caed4deSChristophe Leroy 		ret_pte = (pte_t *)pmdp;
4740caed4deSChristophe Leroy 		goto out;
4750caed4deSChristophe Leroy 	}
476d6eaceddSAneesh Kumar K.V 
477d6eaceddSAneesh Kumar K.V 	if (pmd_is_leaf(pmd)) {
4780caed4deSChristophe Leroy 		ret_pte = (pte_t *)pmdp;
4790caed4deSChristophe Leroy 		goto out;
480fab9a116SChristophe Leroy 	}
481d6eaceddSAneesh Kumar K.V 
482fab9a116SChristophe Leroy 	if (is_hugepd(__hugepd(pmd_val(pmd)))) {
4830caed4deSChristophe Leroy 		hpdp = (hugepd_t *)&pmd;
484fab9a116SChristophe Leroy 		goto out_huge;
485fab9a116SChristophe Leroy 	}
486fab9a116SChristophe Leroy 
4870caed4deSChristophe Leroy 	return pte_offset_kernel(&pmd, ea);
48826e66b08SChristophe Leroy 
489fab9a116SChristophe Leroy out_huge:
4900caed4deSChristophe Leroy 	if (!hpdp)
4910caed4deSChristophe Leroy 		return NULL;
4920caed4deSChristophe Leroy 
4930caed4deSChristophe Leroy 	ret_pte = hugepte_offset(*hpdp, ea, pdshift);
4940caed4deSChristophe Leroy 	pdshift = hugepd_shift(*hpdp);
4950caed4deSChristophe Leroy out:
4960caed4deSChristophe Leroy 	if (hpage_shift)
4970caed4deSChristophe Leroy 		*hpage_shift = pdshift;
4980caed4deSChristophe Leroy 	return ret_pte;
4990caed4deSChristophe Leroy }
5000caed4deSChristophe Leroy EXPORT_SYMBOL_GPL(__find_linux_pte);
5016eac1eafSAnshuman Khandual 
5026eac1eafSAnshuman Khandual /* Note due to the way vm flags are laid out, the bits are XWR */
5036eac1eafSAnshuman Khandual const pgprot_t protection_map[16] = {
5046eac1eafSAnshuman Khandual 	[VM_NONE]					= PAGE_NONE,
5056eac1eafSAnshuman Khandual 	[VM_READ]					= PAGE_READONLY,
5066eac1eafSAnshuman Khandual 	[VM_WRITE]					= PAGE_COPY,
5076eac1eafSAnshuman Khandual 	[VM_WRITE | VM_READ]				= PAGE_COPY,
5086eac1eafSAnshuman Khandual 	[VM_EXEC]					= PAGE_READONLY_X,
5096eac1eafSAnshuman Khandual 	[VM_EXEC | VM_READ]				= PAGE_READONLY_X,
5106eac1eafSAnshuman Khandual 	[VM_EXEC | VM_WRITE]				= PAGE_COPY_X,
5116eac1eafSAnshuman Khandual 	[VM_EXEC | VM_WRITE | VM_READ]			= PAGE_COPY_X,
5126eac1eafSAnshuman Khandual 	[VM_SHARED]					= PAGE_NONE,
5136eac1eafSAnshuman Khandual 	[VM_SHARED | VM_READ]				= PAGE_READONLY,
5146eac1eafSAnshuman Khandual 	[VM_SHARED | VM_WRITE]				= PAGE_SHARED,
5156eac1eafSAnshuman Khandual 	[VM_SHARED | VM_WRITE | VM_READ]		= PAGE_SHARED,
5166eac1eafSAnshuman Khandual 	[VM_SHARED | VM_EXEC]				= PAGE_READONLY_X,
5176eac1eafSAnshuman Khandual 	[VM_SHARED | VM_EXEC | VM_READ]			= PAGE_READONLY_X,
5186eac1eafSAnshuman Khandual 	[VM_SHARED | VM_EXEC | VM_WRITE]		= PAGE_SHARED_X,
5196eac1eafSAnshuman Khandual 	[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ]	= PAGE_SHARED_X
5206eac1eafSAnshuman Khandual };
5216eac1eafSAnshuman Khandual 
5226eac1eafSAnshuman Khandual #ifndef CONFIG_PPC_BOOK3S_64
5236eac1eafSAnshuman Khandual DECLARE_VM_GET_PAGE_PROT
5246eac1eafSAnshuman Khandual #endif
525