xref: /openbmc/linux/arch/powerpc/mm/pgtable_64.c (revision ac29c640)
1 /*
2  *  This file contains ioremap and related functions for 64-bit machines.
3  *
4  *  Derived from arch/ppc64/mm/init.c
5  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6  *
7  *  Modifications by Paul Mackerras (PowerMac) (paulus@samba.org)
8  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
9  *    Copyright (C) 1996 Paul Mackerras
10  *
11  *  Derived from "arch/i386/mm/init.c"
12  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
13  *
14  *  Dave Engebretsen <engebret@us.ibm.com>
15  *      Rework for PPC64 port.
16  *
17  *  This program is free software; you can redistribute it and/or
18  *  modify it under the terms of the GNU General Public License
19  *  as published by the Free Software Foundation; either version
20  *  2 of the License, or (at your option) any later version.
21  *
22  */
23 
24 #include <linux/signal.h>
25 #include <linux/sched.h>
26 #include <linux/kernel.h>
27 #include <linux/errno.h>
28 #include <linux/string.h>
29 #include <linux/export.h>
30 #include <linux/types.h>
31 #include <linux/mman.h>
32 #include <linux/mm.h>
33 #include <linux/swap.h>
34 #include <linux/stddef.h>
35 #include <linux/vmalloc.h>
36 #include <linux/memblock.h>
37 #include <linux/slab.h>
38 #include <linux/hugetlb.h>
39 
40 #include <asm/pgalloc.h>
41 #include <asm/page.h>
42 #include <asm/prom.h>
43 #include <asm/io.h>
44 #include <asm/mmu_context.h>
45 #include <asm/pgtable.h>
46 #include <asm/mmu.h>
47 #include <asm/smp.h>
48 #include <asm/machdep.h>
49 #include <asm/tlb.h>
50 #include <asm/processor.h>
51 #include <asm/cputable.h>
52 #include <asm/sections.h>
53 #include <asm/firmware.h>
54 #include <asm/dma.h>
55 
56 #include "mmu_decl.h"
57 
58 #define CREATE_TRACE_POINTS
59 #include <trace/events/thp.h>
60 
61 /* Some sanity checking */
62 #if TASK_SIZE_USER64 > PGTABLE_RANGE
63 #error TASK_SIZE_USER64 exceeds pagetable range
64 #endif
65 
66 #ifdef CONFIG_PPC_STD_MMU_64
67 #if TASK_SIZE_USER64 > (1UL << (ESID_BITS + SID_SHIFT))
68 #error TASK_SIZE_USER64 exceeds user VSID range
69 #endif
70 #endif
71 
72 unsigned long ioremap_bot = IOREMAP_BASE;
73 
74 #ifdef CONFIG_PPC_MMU_NOHASH
75 static __ref void *early_alloc_pgtable(unsigned long size)
76 {
77 	void *pt;
78 
79 	pt = __va(memblock_alloc_base(size, size, __pa(MAX_DMA_ADDRESS)));
80 	memset(pt, 0, size);
81 
82 	return pt;
83 }
84 #endif /* CONFIG_PPC_MMU_NOHASH */
85 
86 /*
87  * map_kernel_page currently only called by __ioremap
88  * map_kernel_page adds an entry to the ioremap page table
89  * and adds an entry to the HPT, possibly bolting it
90  */
91 int map_kernel_page(unsigned long ea, unsigned long pa, unsigned long flags)
92 {
93 	pgd_t *pgdp;
94 	pud_t *pudp;
95 	pmd_t *pmdp;
96 	pte_t *ptep;
97 
98 	if (slab_is_available()) {
99 		pgdp = pgd_offset_k(ea);
100 		pudp = pud_alloc(&init_mm, pgdp, ea);
101 		if (!pudp)
102 			return -ENOMEM;
103 		pmdp = pmd_alloc(&init_mm, pudp, ea);
104 		if (!pmdp)
105 			return -ENOMEM;
106 		ptep = pte_alloc_kernel(pmdp, ea);
107 		if (!ptep)
108 			return -ENOMEM;
109 		set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
110 							  __pgprot(flags)));
111 	} else {
112 #ifdef CONFIG_PPC_MMU_NOHASH
113 		pgdp = pgd_offset_k(ea);
114 #ifdef PUD_TABLE_SIZE
115 		if (pgd_none(*pgdp)) {
116 			pudp = early_alloc_pgtable(PUD_TABLE_SIZE);
117 			BUG_ON(pudp == NULL);
118 			pgd_populate(&init_mm, pgdp, pudp);
119 		}
120 #endif /* PUD_TABLE_SIZE */
121 		pudp = pud_offset(pgdp, ea);
122 		if (pud_none(*pudp)) {
123 			pmdp = early_alloc_pgtable(PMD_TABLE_SIZE);
124 			BUG_ON(pmdp == NULL);
125 			pud_populate(&init_mm, pudp, pmdp);
126 		}
127 		pmdp = pmd_offset(pudp, ea);
128 		if (!pmd_present(*pmdp)) {
129 			ptep = early_alloc_pgtable(PAGE_SIZE);
130 			BUG_ON(ptep == NULL);
131 			pmd_populate_kernel(&init_mm, pmdp, ptep);
132 		}
133 		ptep = pte_offset_kernel(pmdp, ea);
134 		set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
135 							  __pgprot(flags)));
136 #else /* CONFIG_PPC_MMU_NOHASH */
137 		/*
138 		 * If the mm subsystem is not fully up, we cannot create a
139 		 * linux page table entry for this mapping.  Simply bolt an
140 		 * entry in the hardware page table.
141 		 *
142 		 */
143 		if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, flags,
144 				      mmu_io_psize, mmu_kernel_ssize)) {
145 			printk(KERN_ERR "Failed to do bolted mapping IO "
146 			       "memory at %016lx !\n", pa);
147 			return -ENOMEM;
148 		}
149 #endif /* !CONFIG_PPC_MMU_NOHASH */
150 	}
151 
152 	smp_wmb();
153 	return 0;
154 }
155 
156 
157 /**
158  * __ioremap_at - Low level function to establish the page tables
159  *                for an IO mapping
160  */
161 void __iomem * __ioremap_at(phys_addr_t pa, void *ea, unsigned long size,
162 			    unsigned long flags)
163 {
164 	unsigned long i;
165 
166 	/* Make sure we have the base flags */
167 	if ((flags & _PAGE_PRESENT) == 0)
168 		flags |= pgprot_val(PAGE_KERNEL);
169 
170 	/* Non-cacheable page cannot be coherent */
171 	if (flags & _PAGE_NO_CACHE)
172 		flags &= ~_PAGE_COHERENT;
173 
174 	/* We don't support the 4K PFN hack with ioremap */
175 	if (flags & _PAGE_4K_PFN)
176 		return NULL;
177 
178 	WARN_ON(pa & ~PAGE_MASK);
179 	WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
180 	WARN_ON(size & ~PAGE_MASK);
181 
182 	for (i = 0; i < size; i += PAGE_SIZE)
183 		if (map_kernel_page((unsigned long)ea+i, pa+i, flags))
184 			return NULL;
185 
186 	return (void __iomem *)ea;
187 }
188 
189 /**
190  * __iounmap_from - Low level function to tear down the page tables
191  *                  for an IO mapping. This is used for mappings that
192  *                  are manipulated manually, like partial unmapping of
193  *                  PCI IOs or ISA space.
194  */
195 void __iounmap_at(void *ea, unsigned long size)
196 {
197 	WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
198 	WARN_ON(size & ~PAGE_MASK);
199 
200 	unmap_kernel_range((unsigned long)ea, size);
201 }
202 
203 void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size,
204 				unsigned long flags, void *caller)
205 {
206 	phys_addr_t paligned;
207 	void __iomem *ret;
208 
209 	/*
210 	 * Choose an address to map it to.
211 	 * Once the imalloc system is running, we use it.
212 	 * Before that, we map using addresses going
213 	 * up from ioremap_bot.  imalloc will use
214 	 * the addresses from ioremap_bot through
215 	 * IMALLOC_END
216 	 *
217 	 */
218 	paligned = addr & PAGE_MASK;
219 	size = PAGE_ALIGN(addr + size) - paligned;
220 
221 	if ((size == 0) || (paligned == 0))
222 		return NULL;
223 
224 	if (slab_is_available()) {
225 		struct vm_struct *area;
226 
227 		area = __get_vm_area_caller(size, VM_IOREMAP,
228 					    ioremap_bot, IOREMAP_END,
229 					    caller);
230 		if (area == NULL)
231 			return NULL;
232 
233 		area->phys_addr = paligned;
234 		ret = __ioremap_at(paligned, area->addr, size, flags);
235 		if (!ret)
236 			vunmap(area->addr);
237 	} else {
238 		ret = __ioremap_at(paligned, (void *)ioremap_bot, size, flags);
239 		if (ret)
240 			ioremap_bot += size;
241 	}
242 
243 	if (ret)
244 		ret += addr & ~PAGE_MASK;
245 	return ret;
246 }
247 
248 void __iomem * __ioremap(phys_addr_t addr, unsigned long size,
249 			 unsigned long flags)
250 {
251 	return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
252 }
253 
254 void __iomem * ioremap(phys_addr_t addr, unsigned long size)
255 {
256 	unsigned long flags = _PAGE_NO_CACHE | _PAGE_GUARDED;
257 	void *caller = __builtin_return_address(0);
258 
259 	if (ppc_md.ioremap)
260 		return ppc_md.ioremap(addr, size, flags, caller);
261 	return __ioremap_caller(addr, size, flags, caller);
262 }
263 
264 void __iomem * ioremap_wc(phys_addr_t addr, unsigned long size)
265 {
266 	unsigned long flags = _PAGE_NO_CACHE;
267 	void *caller = __builtin_return_address(0);
268 
269 	if (ppc_md.ioremap)
270 		return ppc_md.ioremap(addr, size, flags, caller);
271 	return __ioremap_caller(addr, size, flags, caller);
272 }
273 
274 void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size,
275 			     unsigned long flags)
276 {
277 	void *caller = __builtin_return_address(0);
278 
279 	/* writeable implies dirty for kernel addresses */
280 	if (flags & _PAGE_WRITE)
281 		flags |= _PAGE_DIRTY;
282 
283 	/* we don't want to let _PAGE_EXEC leak out */
284 	flags &= ~_PAGE_EXEC;
285 	/*
286 	 * Force kernel mapping.
287 	 */
288 #if defined(CONFIG_PPC_BOOK3S_64)
289 	flags |= _PAGE_PRIVILEGED;
290 #else
291 	flags &= ~_PAGE_USER;
292 #endif
293 
294 
295 #ifdef _PAGE_BAP_SR
296 	/* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format
297 	 * which means that we just cleared supervisor access... oops ;-) This
298 	 * restores it
299 	 */
300 	flags |= _PAGE_BAP_SR;
301 #endif
302 
303 	if (ppc_md.ioremap)
304 		return ppc_md.ioremap(addr, size, flags, caller);
305 	return __ioremap_caller(addr, size, flags, caller);
306 }
307 
308 
309 /*
310  * Unmap an IO region and remove it from imalloc'd list.
311  * Access to IO memory should be serialized by driver.
312  */
313 void __iounmap(volatile void __iomem *token)
314 {
315 	void *addr;
316 
317 	if (!slab_is_available())
318 		return;
319 
320 	addr = (void *) ((unsigned long __force)
321 			 PCI_FIX_ADDR(token) & PAGE_MASK);
322 	if ((unsigned long)addr < ioremap_bot) {
323 		printk(KERN_WARNING "Attempt to iounmap early bolted mapping"
324 		       " at 0x%p\n", addr);
325 		return;
326 	}
327 	vunmap(addr);
328 }
329 
330 void iounmap(volatile void __iomem *token)
331 {
332 	if (ppc_md.iounmap)
333 		ppc_md.iounmap(token);
334 	else
335 		__iounmap(token);
336 }
337 
338 EXPORT_SYMBOL(ioremap);
339 EXPORT_SYMBOL(ioremap_wc);
340 EXPORT_SYMBOL(ioremap_prot);
341 EXPORT_SYMBOL(__ioremap);
342 EXPORT_SYMBOL(__ioremap_at);
343 EXPORT_SYMBOL(iounmap);
344 EXPORT_SYMBOL(__iounmap);
345 EXPORT_SYMBOL(__iounmap_at);
346 
347 #ifndef __PAGETABLE_PUD_FOLDED
348 /* 4 level page table */
349 struct page *pgd_page(pgd_t pgd)
350 {
351 	if (pgd_huge(pgd))
352 		return pte_page(pgd_pte(pgd));
353 	return virt_to_page(pgd_page_vaddr(pgd));
354 }
355 #endif
356 
357 struct page *pud_page(pud_t pud)
358 {
359 	if (pud_huge(pud))
360 		return pte_page(pud_pte(pud));
361 	return virt_to_page(pud_page_vaddr(pud));
362 }
363 
364 /*
365  * For hugepage we have pfn in the pmd, we use PTE_RPN_SHIFT bits for flags
366  * For PTE page, we have a PTE_FRAG_SIZE (4K) aligned virtual address.
367  */
368 struct page *pmd_page(pmd_t pmd)
369 {
370 	if (pmd_trans_huge(pmd) || pmd_huge(pmd))
371 		return pte_page(pmd_pte(pmd));
372 	return virt_to_page(pmd_page_vaddr(pmd));
373 }
374 
375 #ifdef CONFIG_PPC_64K_PAGES
376 static pte_t *get_from_cache(struct mm_struct *mm)
377 {
378 	void *pte_frag, *ret;
379 
380 	spin_lock(&mm->page_table_lock);
381 	ret = mm->context.pte_frag;
382 	if (ret) {
383 		pte_frag = ret + PTE_FRAG_SIZE;
384 		/*
385 		 * If we have taken up all the fragments mark PTE page NULL
386 		 */
387 		if (((unsigned long)pte_frag & ~PAGE_MASK) == 0)
388 			pte_frag = NULL;
389 		mm->context.pte_frag = pte_frag;
390 	}
391 	spin_unlock(&mm->page_table_lock);
392 	return (pte_t *)ret;
393 }
394 
395 static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel)
396 {
397 	void *ret = NULL;
398 	struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
399 				       __GFP_REPEAT | __GFP_ZERO);
400 	if (!page)
401 		return NULL;
402 	if (!kernel && !pgtable_page_ctor(page)) {
403 		__free_page(page);
404 		return NULL;
405 	}
406 
407 	ret = page_address(page);
408 	spin_lock(&mm->page_table_lock);
409 	/*
410 	 * If we find pgtable_page set, we return
411 	 * the allocated page with single fragement
412 	 * count.
413 	 */
414 	if (likely(!mm->context.pte_frag)) {
415 		set_page_count(page, PTE_FRAG_NR);
416 		mm->context.pte_frag = ret + PTE_FRAG_SIZE;
417 	}
418 	spin_unlock(&mm->page_table_lock);
419 
420 	return (pte_t *)ret;
421 }
422 
423 pte_t *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel)
424 {
425 	pte_t *pte;
426 
427 	pte = get_from_cache(mm);
428 	if (pte)
429 		return pte;
430 
431 	return __alloc_for_cache(mm, kernel);
432 }
433 
434 void page_table_free(struct mm_struct *mm, unsigned long *table, int kernel)
435 {
436 	struct page *page = virt_to_page(table);
437 	if (put_page_testzero(page)) {
438 		if (!kernel)
439 			pgtable_page_dtor(page);
440 		free_hot_cold_page(page, 0);
441 	}
442 }
443 
444 #ifdef CONFIG_SMP
445 static void page_table_free_rcu(void *table)
446 {
447 	struct page *page = virt_to_page(table);
448 	if (put_page_testzero(page)) {
449 		pgtable_page_dtor(page);
450 		free_hot_cold_page(page, 0);
451 	}
452 }
453 
454 void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
455 {
456 	unsigned long pgf = (unsigned long)table;
457 
458 	BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
459 	pgf |= shift;
460 	tlb_remove_table(tlb, (void *)pgf);
461 }
462 
463 void __tlb_remove_table(void *_table)
464 {
465 	void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
466 	unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
467 
468 	if (!shift)
469 		/* PTE page needs special handling */
470 		page_table_free_rcu(table);
471 	else {
472 		BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
473 		kmem_cache_free(PGT_CACHE(shift), table);
474 	}
475 }
476 #else
477 void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
478 {
479 	if (!shift) {
480 		/* PTE page needs special handling */
481 		struct page *page = virt_to_page(table);
482 		if (put_page_testzero(page)) {
483 			pgtable_page_dtor(page);
484 			free_hot_cold_page(page, 0);
485 		}
486 	} else {
487 		BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
488 		kmem_cache_free(PGT_CACHE(shift), table);
489 	}
490 }
491 #endif
492 #endif /* CONFIG_PPC_64K_PAGES */
493 
494 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
495 
496 /*
497  * This is called when relaxing access to a hugepage. It's also called in the page
498  * fault path when we don't hit any of the major fault cases, ie, a minor
499  * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have
500  * handled those two for us, we additionally deal with missing execute
501  * permission here on some processors
502  */
503 int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
504 			  pmd_t *pmdp, pmd_t entry, int dirty)
505 {
506 	int changed;
507 #ifdef CONFIG_DEBUG_VM
508 	WARN_ON(!pmd_trans_huge(*pmdp));
509 	assert_spin_locked(&vma->vm_mm->page_table_lock);
510 #endif
511 	changed = !pmd_same(*(pmdp), entry);
512 	if (changed) {
513 		__ptep_set_access_flags(pmdp_ptep(pmdp), pmd_pte(entry));
514 		/*
515 		 * Since we are not supporting SW TLB systems, we don't
516 		 * have any thing similar to flush_tlb_page_nohash()
517 		 */
518 	}
519 	return changed;
520 }
521 
522 unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
523 				  pmd_t *pmdp, unsigned long clr,
524 				  unsigned long set)
525 {
526 
527 	__be64 old_be, tmp;
528 	unsigned long old;
529 
530 #ifdef CONFIG_DEBUG_VM
531 	WARN_ON(!pmd_trans_huge(*pmdp));
532 	assert_spin_locked(&mm->page_table_lock);
533 #endif
534 
535 	__asm__ __volatile__(
536 	"1:	ldarx	%0,0,%3\n\
537 		and.	%1,%0,%6\n\
538 		bne-	1b \n\
539 		andc	%1,%0,%4 \n\
540 		or	%1,%1,%7\n\
541 		stdcx.	%1,0,%3 \n\
542 		bne-	1b"
543 	: "=&r" (old_be), "=&r" (tmp), "=m" (*pmdp)
544 	: "r" (pmdp), "r" (cpu_to_be64(clr)), "m" (*pmdp),
545 	  "r" (cpu_to_be64(_PAGE_BUSY)), "r" (cpu_to_be64(set))
546 	: "cc" );
547 
548 	old = be64_to_cpu(old_be);
549 
550 	trace_hugepage_update(addr, old, clr, set);
551 	if (old & _PAGE_HASHPTE)
552 		hpte_do_hugepage_flush(mm, addr, pmdp, old);
553 	return old;
554 }
555 
556 pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
557 			  pmd_t *pmdp)
558 {
559 	pmd_t pmd;
560 
561 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
562 	VM_BUG_ON(pmd_trans_huge(*pmdp));
563 
564 	pmd = *pmdp;
565 	pmd_clear(pmdp);
566 	/*
567 	 * Wait for all pending hash_page to finish. This is needed
568 	 * in case of subpage collapse. When we collapse normal pages
569 	 * to hugepage, we first clear the pmd, then invalidate all
570 	 * the PTE entries. The assumption here is that any low level
571 	 * page fault will see a none pmd and take the slow path that
572 	 * will wait on mmap_sem. But we could very well be in a
573 	 * hash_page with local ptep pointer value. Such a hash page
574 	 * can result in adding new HPTE entries for normal subpages.
575 	 * That means we could be modifying the page content as we
576 	 * copy them to a huge page. So wait for parallel hash_page
577 	 * to finish before invalidating HPTE entries. We can do this
578 	 * by sending an IPI to all the cpus and executing a dummy
579 	 * function there.
580 	 */
581 	kick_all_cpus_sync();
582 	/*
583 	 * Now invalidate the hpte entries in the range
584 	 * covered by pmd. This make sure we take a
585 	 * fault and will find the pmd as none, which will
586 	 * result in a major fault which takes mmap_sem and
587 	 * hence wait for collapse to complete. Without this
588 	 * the __collapse_huge_page_copy can result in copying
589 	 * the old content.
590 	 */
591 	flush_tlb_pmd_range(vma->vm_mm, &pmd, address);
592 	return pmd;
593 }
594 
595 int pmdp_test_and_clear_young(struct vm_area_struct *vma,
596 			      unsigned long address, pmd_t *pmdp)
597 {
598 	return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp);
599 }
600 
601 /*
602  * We currently remove entries from the hashtable regardless of whether
603  * the entry was young or dirty. The generic routines only flush if the
604  * entry was young or dirty which is not good enough.
605  *
606  * We should be more intelligent about this but for the moment we override
607  * these functions and force a tlb flush unconditionally
608  */
609 int pmdp_clear_flush_young(struct vm_area_struct *vma,
610 				  unsigned long address, pmd_t *pmdp)
611 {
612 	return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp);
613 }
614 
615 /*
616  * We want to put the pgtable in pmd and use pgtable for tracking
617  * the base page size hptes
618  */
619 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
620 				pgtable_t pgtable)
621 {
622 	pgtable_t *pgtable_slot;
623 	assert_spin_locked(&mm->page_table_lock);
624 	/*
625 	 * we store the pgtable in the second half of PMD
626 	 */
627 	pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD;
628 	*pgtable_slot = pgtable;
629 	/*
630 	 * expose the deposited pgtable to other cpus.
631 	 * before we set the hugepage PTE at pmd level
632 	 * hash fault code looks at the deposted pgtable
633 	 * to store hash index values.
634 	 */
635 	smp_wmb();
636 }
637 
638 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
639 {
640 	pgtable_t pgtable;
641 	pgtable_t *pgtable_slot;
642 
643 	assert_spin_locked(&mm->page_table_lock);
644 	pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD;
645 	pgtable = *pgtable_slot;
646 	/*
647 	 * Once we withdraw, mark the entry NULL.
648 	 */
649 	*pgtable_slot = NULL;
650 	/*
651 	 * We store HPTE information in the deposited PTE fragment.
652 	 * zero out the content on withdraw.
653 	 */
654 	memset(pgtable, 0, PTE_FRAG_SIZE);
655 	return pgtable;
656 }
657 
658 void pmdp_huge_split_prepare(struct vm_area_struct *vma,
659 			     unsigned long address, pmd_t *pmdp)
660 {
661 	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
662 	VM_BUG_ON(REGION_ID(address) != USER_REGION_ID);
663 
664 	/*
665 	 * We can't mark the pmd none here, because that will cause a race
666 	 * against exit_mmap. We need to continue mark pmd TRANS HUGE, while
667 	 * we spilt, but at the same time we wan't rest of the ppc64 code
668 	 * not to insert hash pte on this, because we will be modifying
669 	 * the deposited pgtable in the caller of this function. Hence
670 	 * clear the _PAGE_USER so that we move the fault handling to
671 	 * higher level function and that will serialize against ptl.
672 	 * We need to flush existing hash pte entries here even though,
673 	 * the translation is still valid, because we will withdraw
674 	 * pgtable_t after this.
675 	 */
676 	pmd_hugepage_update(vma->vm_mm, address, pmdp, 0, _PAGE_PRIVILEGED);
677 }
678 
679 
680 /*
681  * set a new huge pmd. We should not be called for updating
682  * an existing pmd entry. That should go via pmd_hugepage_update.
683  */
684 void set_pmd_at(struct mm_struct *mm, unsigned long addr,
685 		pmd_t *pmdp, pmd_t pmd)
686 {
687 #ifdef CONFIG_DEBUG_VM
688 	WARN_ON(pte_present(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp)));
689 	assert_spin_locked(&mm->page_table_lock);
690 	WARN_ON(!pmd_trans_huge(pmd));
691 #endif
692 	trace_hugepage_set_pmd(addr, pmd_val(pmd));
693 	return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
694 }
695 
696 /*
697  * We use this to invalidate a pmdp entry before switching from a
698  * hugepte to regular pmd entry.
699  */
700 void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
701 		     pmd_t *pmdp)
702 {
703 	pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0);
704 
705 	/*
706 	 * This ensures that generic code that rely on IRQ disabling
707 	 * to prevent a parallel THP split work as expected.
708 	 */
709 	kick_all_cpus_sync();
710 }
711 
712 /*
713  * A linux hugepage PMD was changed and the corresponding hash table entries
714  * neesd to be flushed.
715  */
716 void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
717 			    pmd_t *pmdp, unsigned long old_pmd)
718 {
719 	int ssize;
720 	unsigned int psize;
721 	unsigned long vsid;
722 	unsigned long flags = 0;
723 	const struct cpumask *tmp;
724 
725 	/* get the base page size,vsid and segment size */
726 #ifdef CONFIG_DEBUG_VM
727 	psize = get_slice_psize(mm, addr);
728 	BUG_ON(psize == MMU_PAGE_16M);
729 #endif
730 	if (old_pmd & _PAGE_COMBO)
731 		psize = MMU_PAGE_4K;
732 	else
733 		psize = MMU_PAGE_64K;
734 
735 	if (!is_kernel_addr(addr)) {
736 		ssize = user_segment_size(addr);
737 		vsid = get_vsid(mm->context.id, addr, ssize);
738 		WARN_ON(vsid == 0);
739 	} else {
740 		vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
741 		ssize = mmu_kernel_ssize;
742 	}
743 
744 	tmp = cpumask_of(smp_processor_id());
745 	if (cpumask_equal(mm_cpumask(mm), tmp))
746 		flags |= HPTE_LOCAL_UPDATE;
747 
748 	return flush_hash_hugepage(vsid, addr, pmdp, psize, ssize, flags);
749 }
750 
751 static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot)
752 {
753 	return __pmd(pmd_val(pmd) | pgprot_val(pgprot));
754 }
755 
756 pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot)
757 {
758 	unsigned long pmdv;
759 
760 	pmdv = (pfn << PTE_RPN_SHIFT) & PTE_RPN_MASK;
761 	return pmd_set_protbits(__pmd(pmdv), pgprot);
762 }
763 
764 pmd_t mk_pmd(struct page *page, pgprot_t pgprot)
765 {
766 	return pfn_pmd(page_to_pfn(page), pgprot);
767 }
768 
769 pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
770 {
771 	unsigned long pmdv;
772 
773 	pmdv = pmd_val(pmd);
774 	pmdv &= _HPAGE_CHG_MASK;
775 	return pmd_set_protbits(__pmd(pmdv), newprot);
776 }
777 
778 /*
779  * This is called at the end of handling a user page fault, when the
780  * fault has been handled by updating a HUGE PMD entry in the linux page tables.
781  * We use it to preload an HPTE into the hash table corresponding to
782  * the updated linux HUGE PMD entry.
783  */
784 void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
785 			  pmd_t *pmd)
786 {
787 	return;
788 }
789 
790 pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
791 			      unsigned long addr, pmd_t *pmdp)
792 {
793 	pmd_t old_pmd;
794 	pgtable_t pgtable;
795 	unsigned long old;
796 	pgtable_t *pgtable_slot;
797 
798 	old = pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0);
799 	old_pmd = __pmd(old);
800 	/*
801 	 * We have pmd == none and we are holding page_table_lock.
802 	 * So we can safely go and clear the pgtable hash
803 	 * index info.
804 	 */
805 	pgtable_slot = (pgtable_t *)pmdp + PTRS_PER_PMD;
806 	pgtable = *pgtable_slot;
807 	/*
808 	 * Let's zero out old valid and hash index details
809 	 * hash fault look at them.
810 	 */
811 	memset(pgtable, 0, PTE_FRAG_SIZE);
812 	/*
813 	 * Serialize against find_linux_pte_or_hugepte which does lock-less
814 	 * lookup in page tables with local interrupts disabled. For huge pages
815 	 * it casts pmd_t to pte_t. Since format of pte_t is different from
816 	 * pmd_t we want to prevent transit from pmd pointing to page table
817 	 * to pmd pointing to huge page (and back) while interrupts are disabled.
818 	 * We clear pmd to possibly replace it with page table pointer in
819 	 * different code paths. So make sure we wait for the parallel
820 	 * find_linux_pte_or_hugepage to finish.
821 	 */
822 	kick_all_cpus_sync();
823 	return old_pmd;
824 }
825 
826 int has_transparent_hugepage(void)
827 {
828 
829 	BUILD_BUG_ON_MSG((PMD_SHIFT - PAGE_SHIFT) >= MAX_ORDER,
830 		"hugepages can't be allocated by the buddy allocator");
831 
832 	BUILD_BUG_ON_MSG((PMD_SHIFT - PAGE_SHIFT) < 2,
833 			 "We need more than 2 pages to do deferred thp split");
834 
835 	if (!mmu_has_feature(MMU_FTR_16M_PAGE))
836 		return 0;
837 	/*
838 	 * We support THP only if PMD_SIZE is 16MB.
839 	 */
840 	if (mmu_psize_defs[MMU_PAGE_16M].shift != PMD_SHIFT)
841 		return 0;
842 	/*
843 	 * We need to make sure that we support 16MB hugepage in a segement
844 	 * with base page size 64K or 4K. We only enable THP with a PAGE_SIZE
845 	 * of 64K.
846 	 */
847 	/*
848 	 * If we have 64K HPTE, we will be using that by default
849 	 */
850 	if (mmu_psize_defs[MMU_PAGE_64K].shift &&
851 	    (mmu_psize_defs[MMU_PAGE_64K].penc[MMU_PAGE_16M] == -1))
852 		return 0;
853 	/*
854 	 * Ok we only have 4K HPTE
855 	 */
856 	if (mmu_psize_defs[MMU_PAGE_4K].penc[MMU_PAGE_16M] == -1)
857 		return 0;
858 
859 	return 1;
860 }
861 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
862