xref: /openbmc/linux/arch/powerpc/mm/nohash/tlb.c (revision f3531d1a)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * This file contains the routines for TLB flushing.
4  * On machines where the MMU does not use a hash table to store virtual to
5  * physical translations (ie, SW loaded TLBs or Book3E compilant processors,
6  * this does -not- include 603 however which shares the implementation with
7  * hash based processors)
8  *
9  *  -- BenH
10  *
11  * Copyright 2008,2009 Ben Herrenschmidt <benh@kernel.crashing.org>
12  *                     IBM Corp.
13  *
14  *  Derived from arch/ppc/mm/init.c:
15  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
16  *
17  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
18  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
19  *    Copyright (C) 1996 Paul Mackerras
20  *
21  *  Derived from "arch/i386/mm/init.c"
22  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
23  */
24 
25 #include <linux/kernel.h>
26 #include <linux/export.h>
27 #include <linux/mm.h>
28 #include <linux/init.h>
29 #include <linux/highmem.h>
30 #include <linux/pagemap.h>
31 #include <linux/preempt.h>
32 #include <linux/spinlock.h>
33 #include <linux/memblock.h>
34 #include <linux/of_fdt.h>
35 #include <linux/hugetlb.h>
36 
37 #include <asm/pgalloc.h>
38 #include <asm/tlbflush.h>
39 #include <asm/tlb.h>
40 #include <asm/code-patching.h>
41 #include <asm/cputhreads.h>
42 #include <asm/hugetlb.h>
43 #include <asm/paca.h>
44 
45 #include <mm/mmu_decl.h>
46 
47 /*
48  * This struct lists the sw-supported page sizes.  The hardawre MMU may support
49  * other sizes not listed here.   The .ind field is only used on MMUs that have
50  * indirect page table entries.
51  */
52 #ifdef CONFIG_PPC_E500
53 struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
54 	[MMU_PAGE_4K] = {
55 		.shift	= 12,
56 		.enc	= BOOK3E_PAGESZ_4K,
57 	},
58 	[MMU_PAGE_2M] = {
59 		.shift	= 21,
60 		.enc	= BOOK3E_PAGESZ_2M,
61 	},
62 	[MMU_PAGE_4M] = {
63 		.shift	= 22,
64 		.enc	= BOOK3E_PAGESZ_4M,
65 	},
66 	[MMU_PAGE_16M] = {
67 		.shift	= 24,
68 		.enc	= BOOK3E_PAGESZ_16M,
69 	},
70 	[MMU_PAGE_64M] = {
71 		.shift	= 26,
72 		.enc	= BOOK3E_PAGESZ_64M,
73 	},
74 	[MMU_PAGE_256M] = {
75 		.shift	= 28,
76 		.enc	= BOOK3E_PAGESZ_256M,
77 	},
78 	[MMU_PAGE_1G] = {
79 		.shift	= 30,
80 		.enc	= BOOK3E_PAGESZ_1GB,
81 	},
82 };
83 
84 static inline int mmu_get_tsize(int psize)
85 {
86 	return mmu_psize_defs[psize].enc;
87 }
88 #else
89 static inline int mmu_get_tsize(int psize)
90 {
91 	/* This isn't used on !Book3E for now */
92 	return 0;
93 }
94 #endif
95 
96 #ifdef CONFIG_PPC_8xx
97 struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
98 	[MMU_PAGE_4K] = {
99 		.shift	= 12,
100 	},
101 	[MMU_PAGE_16K] = {
102 		.shift	= 14,
103 	},
104 	[MMU_PAGE_512K] = {
105 		.shift	= 19,
106 	},
107 	[MMU_PAGE_8M] = {
108 		.shift	= 23,
109 	},
110 };
111 #endif
112 
113 /* The variables below are currently only used on 64-bit Book3E
114  * though this will probably be made common with other nohash
115  * implementations at some point
116  */
117 #ifdef CONFIG_PPC64
118 
119 int mmu_pte_psize;		/* Page size used for PTE pages */
120 int mmu_vmemmap_psize;		/* Page size used for the virtual mem map */
121 int book3e_htw_mode;		/* HW tablewalk?  Value is PPC_HTW_* */
122 unsigned long linear_map_top;	/* Top of linear mapping */
123 
124 
125 /*
126  * Number of bytes to add to SPRN_SPRG_TLB_EXFRAME on crit/mcheck/debug
127  * exceptions.  This is used for bolted and e6500 TLB miss handlers which
128  * do not modify this SPRG in the TLB miss code; for other TLB miss handlers,
129  * this is set to zero.
130  */
131 int extlb_level_exc;
132 
133 #endif /* CONFIG_PPC64 */
134 
135 #ifdef CONFIG_PPC_E500
136 /* next_tlbcam_idx is used to round-robin tlbcam entry assignment */
137 DEFINE_PER_CPU(int, next_tlbcam_idx);
138 EXPORT_PER_CPU_SYMBOL(next_tlbcam_idx);
139 #endif
140 
141 /*
142  * Base TLB flushing operations:
143  *
144  *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
145  *  - flush_tlb_page(vma, vmaddr) flushes one page
146  *  - flush_tlb_range(vma, start, end) flushes a range of pages
147  *  - flush_tlb_kernel_range(start, end) flushes kernel pages
148  *
149  *  - local_* variants of page and mm only apply to the current
150  *    processor
151  */
152 
153 #ifndef CONFIG_PPC_8xx
154 /*
155  * These are the base non-SMP variants of page and mm flushing
156  */
157 void local_flush_tlb_mm(struct mm_struct *mm)
158 {
159 	unsigned int pid;
160 
161 	preempt_disable();
162 	pid = mm->context.id;
163 	if (pid != MMU_NO_CONTEXT)
164 		_tlbil_pid(pid);
165 	preempt_enable();
166 }
167 EXPORT_SYMBOL(local_flush_tlb_mm);
168 
169 void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
170 			    int tsize, int ind)
171 {
172 	unsigned int pid;
173 
174 	preempt_disable();
175 	pid = mm ? mm->context.id : 0;
176 	if (pid != MMU_NO_CONTEXT)
177 		_tlbil_va(vmaddr, pid, tsize, ind);
178 	preempt_enable();
179 }
180 
181 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
182 {
183 	__local_flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
184 			       mmu_get_tsize(mmu_virtual_psize), 0);
185 }
186 EXPORT_SYMBOL(local_flush_tlb_page);
187 
188 void local_flush_tlb_page_psize(struct mm_struct *mm,
189 				unsigned long vmaddr, int psize)
190 {
191 	__local_flush_tlb_page(mm, vmaddr, mmu_get_tsize(psize), 0);
192 }
193 EXPORT_SYMBOL(local_flush_tlb_page_psize);
194 
195 #endif
196 
197 /*
198  * And here are the SMP non-local implementations
199  */
200 #ifdef CONFIG_SMP
201 
202 static DEFINE_RAW_SPINLOCK(tlbivax_lock);
203 
204 struct tlb_flush_param {
205 	unsigned long addr;
206 	unsigned int pid;
207 	unsigned int tsize;
208 	unsigned int ind;
209 };
210 
211 static void do_flush_tlb_mm_ipi(void *param)
212 {
213 	struct tlb_flush_param *p = param;
214 
215 	_tlbil_pid(p ? p->pid : 0);
216 }
217 
218 static void do_flush_tlb_page_ipi(void *param)
219 {
220 	struct tlb_flush_param *p = param;
221 
222 	_tlbil_va(p->addr, p->pid, p->tsize, p->ind);
223 }
224 
225 
226 /* Note on invalidations and PID:
227  *
228  * We snapshot the PID with preempt disabled. At this point, it can still
229  * change either because:
230  * - our context is being stolen (PID -> NO_CONTEXT) on another CPU
231  * - we are invaliating some target that isn't currently running here
232  *   and is concurrently acquiring a new PID on another CPU
233  * - some other CPU is re-acquiring a lost PID for this mm
234  * etc...
235  *
236  * However, this shouldn't be a problem as we only guarantee
237  * invalidation of TLB entries present prior to this call, so we
238  * don't care about the PID changing, and invalidating a stale PID
239  * is generally harmless.
240  */
241 
242 void flush_tlb_mm(struct mm_struct *mm)
243 {
244 	unsigned int pid;
245 
246 	preempt_disable();
247 	pid = mm->context.id;
248 	if (unlikely(pid == MMU_NO_CONTEXT))
249 		goto no_context;
250 	if (!mm_is_core_local(mm)) {
251 		struct tlb_flush_param p = { .pid = pid };
252 		/* Ignores smp_processor_id() even if set. */
253 		smp_call_function_many(mm_cpumask(mm),
254 				       do_flush_tlb_mm_ipi, &p, 1);
255 	}
256 	_tlbil_pid(pid);
257  no_context:
258 	preempt_enable();
259 }
260 EXPORT_SYMBOL(flush_tlb_mm);
261 
262 void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
263 		      int tsize, int ind)
264 {
265 	struct cpumask *cpu_mask;
266 	unsigned int pid;
267 
268 	/*
269 	 * This function as well as __local_flush_tlb_page() must only be called
270 	 * for user contexts.
271 	 */
272 	if (WARN_ON(!mm))
273 		return;
274 
275 	preempt_disable();
276 	pid = mm->context.id;
277 	if (unlikely(pid == MMU_NO_CONTEXT))
278 		goto bail;
279 	cpu_mask = mm_cpumask(mm);
280 	if (!mm_is_core_local(mm)) {
281 		/* If broadcast tlbivax is supported, use it */
282 		if (mmu_has_feature(MMU_FTR_USE_TLBIVAX_BCAST)) {
283 			int lock = mmu_has_feature(MMU_FTR_LOCK_BCAST_INVAL);
284 			if (lock)
285 				raw_spin_lock(&tlbivax_lock);
286 			_tlbivax_bcast(vmaddr, pid, tsize, ind);
287 			if (lock)
288 				raw_spin_unlock(&tlbivax_lock);
289 			goto bail;
290 		} else {
291 			struct tlb_flush_param p = {
292 				.pid = pid,
293 				.addr = vmaddr,
294 				.tsize = tsize,
295 				.ind = ind,
296 			};
297 			/* Ignores smp_processor_id() even if set in cpu_mask */
298 			smp_call_function_many(cpu_mask,
299 					       do_flush_tlb_page_ipi, &p, 1);
300 		}
301 	}
302 	_tlbil_va(vmaddr, pid, tsize, ind);
303  bail:
304 	preempt_enable();
305 }
306 
307 void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
308 {
309 #ifdef CONFIG_HUGETLB_PAGE
310 	if (vma && is_vm_hugetlb_page(vma))
311 		flush_hugetlb_page(vma, vmaddr);
312 #endif
313 
314 	__flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
315 			 mmu_get_tsize(mmu_virtual_psize), 0);
316 }
317 EXPORT_SYMBOL(flush_tlb_page);
318 
319 #endif /* CONFIG_SMP */
320 
321 #ifdef CONFIG_PPC_47x
322 void __init early_init_mmu_47x(void)
323 {
324 #ifdef CONFIG_SMP
325 	unsigned long root = of_get_flat_dt_root();
326 	if (of_get_flat_dt_prop(root, "cooperative-partition", NULL))
327 		mmu_clear_feature(MMU_FTR_USE_TLBIVAX_BCAST);
328 #endif /* CONFIG_SMP */
329 }
330 #endif /* CONFIG_PPC_47x */
331 
332 /*
333  * Flush kernel TLB entries in the given range
334  */
335 #ifndef CONFIG_PPC_8xx
336 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
337 {
338 #ifdef CONFIG_SMP
339 	preempt_disable();
340 	smp_call_function(do_flush_tlb_mm_ipi, NULL, 1);
341 	_tlbil_pid(0);
342 	preempt_enable();
343 #else
344 	_tlbil_pid(0);
345 #endif
346 }
347 EXPORT_SYMBOL(flush_tlb_kernel_range);
348 #endif
349 
350 /*
351  * Currently, for range flushing, we just do a full mm flush. This should
352  * be optimized based on a threshold on the size of the range, since
353  * some implementation can stack multiple tlbivax before a tlbsync but
354  * for now, we keep it that way
355  */
356 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
357 		     unsigned long end)
358 
359 {
360 	if (end - start == PAGE_SIZE && !(start & ~PAGE_MASK))
361 		flush_tlb_page(vma, start);
362 	else
363 		flush_tlb_mm(vma->vm_mm);
364 }
365 EXPORT_SYMBOL(flush_tlb_range);
366 
367 void tlb_flush(struct mmu_gather *tlb)
368 {
369 	flush_tlb_mm(tlb->mm);
370 }
371 
372 /*
373  * Below are functions specific to the 64-bit variant of Book3E though that
374  * may change in the future
375  */
376 
377 #ifdef CONFIG_PPC64
378 
379 /*
380  * Handling of virtual linear page tables or indirect TLB entries
381  * flushing when PTE pages are freed
382  */
383 void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address)
384 {
385 	int tsize = mmu_psize_defs[mmu_pte_psize].enc;
386 
387 	if (book3e_htw_mode != PPC_HTW_NONE) {
388 		unsigned long start = address & PMD_MASK;
389 		unsigned long end = address + PMD_SIZE;
390 		unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift;
391 
392 		/* This isn't the most optimal, ideally we would factor out the
393 		 * while preempt & CPU mask mucking around, or even the IPI but
394 		 * it will do for now
395 		 */
396 		while (start < end) {
397 			__flush_tlb_page(tlb->mm, start, tsize, 1);
398 			start += size;
399 		}
400 	} else {
401 		unsigned long rmask = 0xf000000000000000ul;
402 		unsigned long rid = (address & rmask) | 0x1000000000000000ul;
403 		unsigned long vpte = address & ~rmask;
404 
405 		vpte = (vpte >> (PAGE_SHIFT - 3)) & ~0xffful;
406 		vpte |= rid;
407 		__flush_tlb_page(tlb->mm, vpte, tsize, 0);
408 	}
409 }
410 
411 static void __init setup_page_sizes(void)
412 {
413 	unsigned int tlb0cfg;
414 	unsigned int tlb0ps;
415 	unsigned int eptcfg;
416 	int i, psize;
417 
418 #ifdef CONFIG_PPC_E500
419 	unsigned int mmucfg = mfspr(SPRN_MMUCFG);
420 	int fsl_mmu = mmu_has_feature(MMU_FTR_TYPE_FSL_E);
421 
422 	if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V1) {
423 		unsigned int tlb1cfg = mfspr(SPRN_TLB1CFG);
424 		unsigned int min_pg, max_pg;
425 
426 		min_pg = (tlb1cfg & TLBnCFG_MINSIZE) >> TLBnCFG_MINSIZE_SHIFT;
427 		max_pg = (tlb1cfg & TLBnCFG_MAXSIZE) >> TLBnCFG_MAXSIZE_SHIFT;
428 
429 		for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
430 			struct mmu_psize_def *def;
431 			unsigned int shift;
432 
433 			def = &mmu_psize_defs[psize];
434 			shift = def->shift;
435 
436 			if (shift == 0 || shift & 1)
437 				continue;
438 
439 			/* adjust to be in terms of 4^shift Kb */
440 			shift = (shift - 10) >> 1;
441 
442 			if ((shift >= min_pg) && (shift <= max_pg))
443 				def->flags |= MMU_PAGE_SIZE_DIRECT;
444 		}
445 
446 		goto out;
447 	}
448 
449 	if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V2) {
450 		u32 tlb1cfg, tlb1ps;
451 
452 		tlb0cfg = mfspr(SPRN_TLB0CFG);
453 		tlb1cfg = mfspr(SPRN_TLB1CFG);
454 		tlb1ps = mfspr(SPRN_TLB1PS);
455 		eptcfg = mfspr(SPRN_EPTCFG);
456 
457 		if ((tlb1cfg & TLBnCFG_IND) && (tlb0cfg & TLBnCFG_PT))
458 			book3e_htw_mode = PPC_HTW_E6500;
459 
460 		/*
461 		 * We expect 4K subpage size and unrestricted indirect size.
462 		 * The lack of a restriction on indirect size is a Freescale
463 		 * extension, indicated by PSn = 0 but SPSn != 0.
464 		 */
465 		if (eptcfg != 2)
466 			book3e_htw_mode = PPC_HTW_NONE;
467 
468 		for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
469 			struct mmu_psize_def *def = &mmu_psize_defs[psize];
470 
471 			if (!def->shift)
472 				continue;
473 
474 			if (tlb1ps & (1U << (def->shift - 10))) {
475 				def->flags |= MMU_PAGE_SIZE_DIRECT;
476 
477 				if (book3e_htw_mode && psize == MMU_PAGE_2M)
478 					def->flags |= MMU_PAGE_SIZE_INDIRECT;
479 			}
480 		}
481 
482 		goto out;
483 	}
484 #endif
485 
486 	tlb0cfg = mfspr(SPRN_TLB0CFG);
487 	tlb0ps = mfspr(SPRN_TLB0PS);
488 	eptcfg = mfspr(SPRN_EPTCFG);
489 
490 	/* Look for supported direct sizes */
491 	for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
492 		struct mmu_psize_def *def = &mmu_psize_defs[psize];
493 
494 		if (tlb0ps & (1U << (def->shift - 10)))
495 			def->flags |= MMU_PAGE_SIZE_DIRECT;
496 	}
497 
498 	/* Indirect page sizes supported ? */
499 	if ((tlb0cfg & TLBnCFG_IND) == 0 ||
500 	    (tlb0cfg & TLBnCFG_PT) == 0)
501 		goto out;
502 
503 	book3e_htw_mode = PPC_HTW_IBM;
504 
505 	/* Now, we only deal with one IND page size for each
506 	 * direct size. Hopefully all implementations today are
507 	 * unambiguous, but we might want to be careful in the
508 	 * future.
509 	 */
510 	for (i = 0; i < 3; i++) {
511 		unsigned int ps, sps;
512 
513 		sps = eptcfg & 0x1f;
514 		eptcfg >>= 5;
515 		ps = eptcfg & 0x1f;
516 		eptcfg >>= 5;
517 		if (!ps || !sps)
518 			continue;
519 		for (psize = 0; psize < MMU_PAGE_COUNT; psize++) {
520 			struct mmu_psize_def *def = &mmu_psize_defs[psize];
521 
522 			if (ps == (def->shift - 10))
523 				def->flags |= MMU_PAGE_SIZE_INDIRECT;
524 			if (sps == (def->shift - 10))
525 				def->ind = ps + 10;
526 		}
527 	}
528 
529 out:
530 	/* Cleanup array and print summary */
531 	pr_info("MMU: Supported page sizes\n");
532 	for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
533 		struct mmu_psize_def *def = &mmu_psize_defs[psize];
534 		const char *__page_type_names[] = {
535 			"unsupported",
536 			"direct",
537 			"indirect",
538 			"direct & indirect"
539 		};
540 		if (def->flags == 0) {
541 			def->shift = 0;
542 			continue;
543 		}
544 		pr_info("  %8ld KB as %s\n", 1ul << (def->shift - 10),
545 			__page_type_names[def->flags & 0x3]);
546 	}
547 }
548 
549 static void __init setup_mmu_htw(void)
550 {
551 	/*
552 	 * If we want to use HW tablewalk, enable it by patching the TLB miss
553 	 * handlers to branch to the one dedicated to it.
554 	 */
555 
556 	switch (book3e_htw_mode) {
557 	case PPC_HTW_IBM:
558 		patch_exception(0x1c0, exc_data_tlb_miss_htw_book3e);
559 		patch_exception(0x1e0, exc_instruction_tlb_miss_htw_book3e);
560 		break;
561 #ifdef CONFIG_PPC_E500
562 	case PPC_HTW_E6500:
563 		extlb_level_exc = EX_TLB_SIZE;
564 		patch_exception(0x1c0, exc_data_tlb_miss_e6500_book3e);
565 		patch_exception(0x1e0, exc_instruction_tlb_miss_e6500_book3e);
566 		break;
567 #endif
568 	}
569 	pr_info("MMU: Book3E HW tablewalk %s\n",
570 		book3e_htw_mode != PPC_HTW_NONE ? "enabled" : "not supported");
571 }
572 
573 /*
574  * Early initialization of the MMU TLB code
575  */
576 static void early_init_this_mmu(void)
577 {
578 	unsigned int mas4;
579 
580 	/* Set MAS4 based on page table setting */
581 
582 	mas4 = 0x4 << MAS4_WIMGED_SHIFT;
583 	switch (book3e_htw_mode) {
584 	case PPC_HTW_E6500:
585 		mas4 |= MAS4_INDD;
586 		mas4 |= BOOK3E_PAGESZ_2M << MAS4_TSIZED_SHIFT;
587 		mas4 |= MAS4_TLBSELD(1);
588 		mmu_pte_psize = MMU_PAGE_2M;
589 		break;
590 
591 	case PPC_HTW_IBM:
592 		mas4 |= MAS4_INDD;
593 		mas4 |=	BOOK3E_PAGESZ_1M << MAS4_TSIZED_SHIFT;
594 		mmu_pte_psize = MMU_PAGE_1M;
595 		break;
596 
597 	case PPC_HTW_NONE:
598 		mas4 |=	BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT;
599 		mmu_pte_psize = mmu_virtual_psize;
600 		break;
601 	}
602 	mtspr(SPRN_MAS4, mas4);
603 
604 #ifdef CONFIG_PPC_E500
605 	if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
606 		unsigned int num_cams;
607 		bool map = true;
608 
609 		/* use a quarter of the TLBCAM for bolted linear map */
610 		num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
611 
612 		/*
613 		 * Only do the mapping once per core, or else the
614 		 * transient mapping would cause problems.
615 		 */
616 #ifdef CONFIG_SMP
617 		if (hweight32(get_tensr()) > 1)
618 			map = false;
619 #endif
620 
621 		if (map)
622 			linear_map_top = map_mem_in_cams(linear_map_top,
623 							 num_cams, false, true);
624 	}
625 #endif
626 
627 	/* A sync won't hurt us after mucking around with
628 	 * the MMU configuration
629 	 */
630 	mb();
631 }
632 
633 static void __init early_init_mmu_global(void)
634 {
635 	/* XXX This should be decided at runtime based on supported
636 	 * page sizes in the TLB, but for now let's assume 16M is
637 	 * always there and a good fit (which it probably is)
638 	 *
639 	 * Freescale booke only supports 4K pages in TLB0, so use that.
640 	 */
641 	if (mmu_has_feature(MMU_FTR_TYPE_FSL_E))
642 		mmu_vmemmap_psize = MMU_PAGE_4K;
643 	else
644 		mmu_vmemmap_psize = MMU_PAGE_16M;
645 
646 	/* XXX This code only checks for TLB 0 capabilities and doesn't
647 	 *     check what page size combos are supported by the HW. It
648 	 *     also doesn't handle the case where a separate array holds
649 	 *     the IND entries from the array loaded by the PT.
650 	 */
651 	/* Look for supported page sizes */
652 	setup_page_sizes();
653 
654 	/* Look for HW tablewalk support */
655 	setup_mmu_htw();
656 
657 #ifdef CONFIG_PPC_E500
658 	if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
659 		if (book3e_htw_mode == PPC_HTW_NONE) {
660 			extlb_level_exc = EX_TLB_SIZE;
661 			patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e);
662 			patch_exception(0x1e0,
663 				exc_instruction_tlb_miss_bolted_book3e);
664 		}
665 	}
666 #endif
667 
668 	/* Set the global containing the top of the linear mapping
669 	 * for use by the TLB miss code
670 	 */
671 	linear_map_top = memblock_end_of_DRAM();
672 
673 	ioremap_bot = IOREMAP_BASE;
674 }
675 
676 static void __init early_mmu_set_memory_limit(void)
677 {
678 #ifdef CONFIG_PPC_E500
679 	if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
680 		/*
681 		 * Limit memory so we dont have linear faults.
682 		 * Unlike memblock_set_current_limit, which limits
683 		 * memory available during early boot, this permanently
684 		 * reduces the memory available to Linux.  We need to
685 		 * do this because highmem is not supported on 64-bit.
686 		 */
687 		memblock_enforce_memory_limit(linear_map_top);
688 	}
689 #endif
690 
691 	memblock_set_current_limit(linear_map_top);
692 }
693 
694 /* boot cpu only */
695 void __init early_init_mmu(void)
696 {
697 	early_init_mmu_global();
698 	early_init_this_mmu();
699 	early_mmu_set_memory_limit();
700 }
701 
702 void early_init_mmu_secondary(void)
703 {
704 	early_init_this_mmu();
705 }
706 
707 void setup_initial_memory_limit(phys_addr_t first_memblock_base,
708 				phys_addr_t first_memblock_size)
709 {
710 	/* On non-FSL Embedded 64-bit, we adjust the RMA size to match
711 	 * the bolted TLB entry. We know for now that only 1G
712 	 * entries are supported though that may eventually
713 	 * change.
714 	 *
715 	 * on FSL Embedded 64-bit, usually all RAM is bolted, but with
716 	 * unusual memory sizes it's possible for some RAM to not be mapped
717 	 * (such RAM is not used at all by Linux, since we don't support
718 	 * highmem on 64-bit).  We limit ppc64_rma_size to what would be
719 	 * mappable if this memblock is the only one.  Additional memblocks
720 	 * can only increase, not decrease, the amount that ends up getting
721 	 * mapped.  We still limit max to 1G even if we'll eventually map
722 	 * more.  This is due to what the early init code is set up to do.
723 	 *
724 	 * We crop it to the size of the first MEMBLOCK to
725 	 * avoid going over total available memory just in case...
726 	 */
727 #ifdef CONFIG_PPC_E500
728 	if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
729 		unsigned long linear_sz;
730 		unsigned int num_cams;
731 
732 		/* use a quarter of the TLBCAM for bolted linear map */
733 		num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
734 
735 		linear_sz = map_mem_in_cams(first_memblock_size, num_cams,
736 					    true, true);
737 
738 		ppc64_rma_size = min_t(u64, linear_sz, 0x40000000);
739 	} else
740 #endif
741 		ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000);
742 
743 	/* Finally limit subsequent allocations */
744 	memblock_set_current_limit(first_memblock_base + ppc64_rma_size);
745 }
746 #else /* ! CONFIG_PPC64 */
747 void __init early_init_mmu(void)
748 {
749 #ifdef CONFIG_PPC_47x
750 	early_init_mmu_47x();
751 #endif
752 }
753 #endif /* CONFIG_PPC64 */
754