1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * native hashtable management.
4  *
5  * SMP scalability work:
6  *    Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
7  */
8 
9 #undef DEBUG_LOW
10 
11 #include <linux/spinlock.h>
12 #include <linux/bitops.h>
13 #include <linux/of.h>
14 #include <linux/processor.h>
15 #include <linux/threads.h>
16 #include <linux/smp.h>
17 #include <linux/pgtable.h>
18 
19 #include <asm/machdep.h>
20 #include <asm/mmu.h>
21 #include <asm/mmu_context.h>
22 #include <asm/trace.h>
23 #include <asm/tlb.h>
24 #include <asm/cputable.h>
25 #include <asm/udbg.h>
26 #include <asm/kexec.h>
27 #include <asm/ppc-opcode.h>
28 #include <asm/feature-fixups.h>
29 
30 #include <misc/cxl-base.h>
31 
32 #ifdef DEBUG_LOW
33 #define DBG_LOW(fmt...) udbg_printf(fmt)
34 #else
35 #define DBG_LOW(fmt...)
36 #endif
37 
38 #ifdef __BIG_ENDIAN__
39 #define HPTE_LOCK_BIT 3
40 #else
41 #define HPTE_LOCK_BIT (56+3)
42 #endif
43 
44 static DEFINE_RAW_SPINLOCK(native_tlbie_lock);
45 
46 static inline unsigned long  ___tlbie(unsigned long vpn, int psize,
47 						int apsize, int ssize)
48 {
49 	unsigned long va;
50 	unsigned int penc;
51 	unsigned long sllp;
52 
53 	/*
54 	 * We need 14 to 65 bits of va for a tlibe of 4K page
55 	 * With vpn we ignore the lower VPN_SHIFT bits already.
56 	 * And top two bits are already ignored because we can
57 	 * only accomodate 76 bits in a 64 bit vpn with a VPN_SHIFT
58 	 * of 12.
59 	 */
60 	va = vpn << VPN_SHIFT;
61 	/*
62 	 * clear top 16 bits of 64bit va, non SLS segment
63 	 * Older versions of the architecture (2.02 and earler) require the
64 	 * masking of the top 16 bits.
65 	 */
66 	if (mmu_has_feature(MMU_FTR_TLBIE_CROP_VA))
67 		va &= ~(0xffffULL << 48);
68 
69 	switch (psize) {
70 	case MMU_PAGE_4K:
71 		/* clear out bits after (52) [0....52.....63] */
72 		va &= ~((1ul << (64 - 52)) - 1);
73 		va |= ssize << 8;
74 		sllp = get_sllp_encoding(apsize);
75 		va |= sllp << 5;
76 		asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
77 			     : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
78 			     : "memory");
79 		break;
80 	default:
81 		/* We need 14 to 14 + i bits of va */
82 		penc = mmu_psize_defs[psize].penc[apsize];
83 		va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
84 		va |= penc << 12;
85 		va |= ssize << 8;
86 		/*
87 		 * AVAL bits:
88 		 * We don't need all the bits, but rest of the bits
89 		 * must be ignored by the processor.
90 		 * vpn cover upto 65 bits of va. (0...65) and we need
91 		 * 58..64 bits of va.
92 		 */
93 		va |= (vpn & 0xfe); /* AVAL */
94 		va |= 1; /* L */
95 		asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2)
96 			     : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
97 			     : "memory");
98 		break;
99 	}
100 	return va;
101 }
102 
103 static inline void fixup_tlbie_vpn(unsigned long vpn, int psize,
104 				   int apsize, int ssize)
105 {
106 	if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
107 		/* Radix flush for a hash guest */
108 
109 		unsigned long rb,rs,prs,r,ric;
110 
111 		rb = PPC_BIT(52); /* IS = 2 */
112 		rs = 0;  /* lpid = 0 */
113 		prs = 0; /* partition scoped */
114 		r = 1;   /* radix format */
115 		ric = 0; /* RIC_FLSUH_TLB */
116 
117 		/*
118 		 * Need the extra ptesync to make sure we don't
119 		 * re-order the tlbie
120 		 */
121 		asm volatile("ptesync": : :"memory");
122 		asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
123 			     : : "r"(rb), "i"(r), "i"(prs),
124 			       "i"(ric), "r"(rs) : "memory");
125 	}
126 
127 
128 	if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
129 		/* Need the extra ptesync to ensure we don't reorder tlbie*/
130 		asm volatile("ptesync": : :"memory");
131 		___tlbie(vpn, psize, apsize, ssize);
132 	}
133 }
134 
135 static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
136 {
137 	unsigned long rb;
138 
139 	rb = ___tlbie(vpn, psize, apsize, ssize);
140 	trace_tlbie(0, 0, rb, 0, 0, 0, 0);
141 }
142 
143 static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
144 {
145 	unsigned long va;
146 	unsigned int penc;
147 	unsigned long sllp;
148 
149 	/* VPN_SHIFT can be atmost 12 */
150 	va = vpn << VPN_SHIFT;
151 	/*
152 	 * clear top 16 bits of 64 bit va, non SLS segment
153 	 * Older versions of the architecture (2.02 and earler) require the
154 	 * masking of the top 16 bits.
155 	 */
156 	if (mmu_has_feature(MMU_FTR_TLBIE_CROP_VA))
157 		va &= ~(0xffffULL << 48);
158 
159 	switch (psize) {
160 	case MMU_PAGE_4K:
161 		/* clear out bits after(52) [0....52.....63] */
162 		va &= ~((1ul << (64 - 52)) - 1);
163 		va |= ssize << 8;
164 		sllp = get_sllp_encoding(apsize);
165 		va |= sllp << 5;
166 		asm volatile(ASM_FTR_IFSET("tlbiel %0", PPC_TLBIEL_v205(%0, 0), %1)
167 			     : : "r" (va), "i" (CPU_FTR_ARCH_206)
168 			     : "memory");
169 		break;
170 	default:
171 		/* We need 14 to 14 + i bits of va */
172 		penc = mmu_psize_defs[psize].penc[apsize];
173 		va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
174 		va |= penc << 12;
175 		va |= ssize << 8;
176 		/*
177 		 * AVAL bits:
178 		 * We don't need all the bits, but rest of the bits
179 		 * must be ignored by the processor.
180 		 * vpn cover upto 65 bits of va. (0...65) and we need
181 		 * 58..64 bits of va.
182 		 */
183 		va |= (vpn & 0xfe);
184 		va |= 1; /* L */
185 		asm volatile(ASM_FTR_IFSET("tlbiel %0", PPC_TLBIEL_v205(%0, 1), %1)
186 			     : : "r" (va), "i" (CPU_FTR_ARCH_206)
187 			     : "memory");
188 		break;
189 	}
190 	trace_tlbie(0, 1, va, 0, 0, 0, 0);
191 
192 }
193 
194 static inline void tlbie(unsigned long vpn, int psize, int apsize,
195 			 int ssize, int local)
196 {
197 	unsigned int use_local;
198 	int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
199 
200 	use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) && !cxl_ctx_in_use();
201 
202 	if (use_local)
203 		use_local = mmu_psize_defs[psize].tlbiel;
204 	if (lock_tlbie && !use_local)
205 		raw_spin_lock(&native_tlbie_lock);
206 	asm volatile("ptesync": : :"memory");
207 	if (use_local) {
208 		__tlbiel(vpn, psize, apsize, ssize);
209 		ppc_after_tlbiel_barrier();
210 	} else {
211 		__tlbie(vpn, psize, apsize, ssize);
212 		fixup_tlbie_vpn(vpn, psize, apsize, ssize);
213 		asm volatile("eieio; tlbsync; ptesync": : :"memory");
214 	}
215 	if (lock_tlbie && !use_local)
216 		raw_spin_unlock(&native_tlbie_lock);
217 }
218 
219 static inline void native_lock_hpte(struct hash_pte *hptep)
220 {
221 	unsigned long *word = (unsigned long *)&hptep->v;
222 
223 	while (1) {
224 		if (!test_and_set_bit_lock(HPTE_LOCK_BIT, word))
225 			break;
226 		spin_begin();
227 		while(test_bit(HPTE_LOCK_BIT, word))
228 			spin_cpu_relax();
229 		spin_end();
230 	}
231 }
232 
233 static inline void native_unlock_hpte(struct hash_pte *hptep)
234 {
235 	unsigned long *word = (unsigned long *)&hptep->v;
236 
237 	clear_bit_unlock(HPTE_LOCK_BIT, word);
238 }
239 
240 static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
241 			unsigned long pa, unsigned long rflags,
242 			unsigned long vflags, int psize, int apsize, int ssize)
243 {
244 	struct hash_pte *hptep = htab_address + hpte_group;
245 	unsigned long hpte_v, hpte_r;
246 	int i;
247 
248 	if (!(vflags & HPTE_V_BOLTED)) {
249 		DBG_LOW("    insert(group=%lx, vpn=%016lx, pa=%016lx,"
250 			" rflags=%lx, vflags=%lx, psize=%d)\n",
251 			hpte_group, vpn, pa, rflags, vflags, psize);
252 	}
253 
254 	for (i = 0; i < HPTES_PER_GROUP; i++) {
255 		if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID)) {
256 			/* retry with lock held */
257 			native_lock_hpte(hptep);
258 			if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID))
259 				break;
260 			native_unlock_hpte(hptep);
261 		}
262 
263 		hptep++;
264 	}
265 
266 	if (i == HPTES_PER_GROUP)
267 		return -1;
268 
269 	hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
270 	hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
271 
272 	if (!(vflags & HPTE_V_BOLTED)) {
273 		DBG_LOW(" i=%x hpte_v=%016lx, hpte_r=%016lx\n",
274 			i, hpte_v, hpte_r);
275 	}
276 
277 	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
278 		hpte_r = hpte_old_to_new_r(hpte_v, hpte_r);
279 		hpte_v = hpte_old_to_new_v(hpte_v);
280 	}
281 
282 	hptep->r = cpu_to_be64(hpte_r);
283 	/* Guarantee the second dword is visible before the valid bit */
284 	eieio();
285 	/*
286 	 * Now set the first dword including the valid bit
287 	 * NOTE: this also unlocks the hpte
288 	 */
289 	hptep->v = cpu_to_be64(hpte_v);
290 
291 	__asm__ __volatile__ ("ptesync" : : : "memory");
292 
293 	return i | (!!(vflags & HPTE_V_SECONDARY) << 3);
294 }
295 
296 static long native_hpte_remove(unsigned long hpte_group)
297 {
298 	struct hash_pte *hptep;
299 	int i;
300 	int slot_offset;
301 	unsigned long hpte_v;
302 
303 	DBG_LOW("    remove(group=%lx)\n", hpte_group);
304 
305 	/* pick a random entry to start at */
306 	slot_offset = mftb() & 0x7;
307 
308 	for (i = 0; i < HPTES_PER_GROUP; i++) {
309 		hptep = htab_address + hpte_group + slot_offset;
310 		hpte_v = be64_to_cpu(hptep->v);
311 
312 		if ((hpte_v & HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) {
313 			/* retry with lock held */
314 			native_lock_hpte(hptep);
315 			hpte_v = be64_to_cpu(hptep->v);
316 			if ((hpte_v & HPTE_V_VALID)
317 			    && !(hpte_v & HPTE_V_BOLTED))
318 				break;
319 			native_unlock_hpte(hptep);
320 		}
321 
322 		slot_offset++;
323 		slot_offset &= 0x7;
324 	}
325 
326 	if (i == HPTES_PER_GROUP)
327 		return -1;
328 
329 	/* Invalidate the hpte. NOTE: this also unlocks it */
330 	hptep->v = 0;
331 
332 	return i;
333 }
334 
335 static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
336 				 unsigned long vpn, int bpsize,
337 				 int apsize, int ssize, unsigned long flags)
338 {
339 	struct hash_pte *hptep = htab_address + slot;
340 	unsigned long hpte_v, want_v;
341 	int ret = 0, local = 0;
342 
343 	want_v = hpte_encode_avpn(vpn, bpsize, ssize);
344 
345 	DBG_LOW("    update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",
346 		vpn, want_v & HPTE_V_AVPN, slot, newpp);
347 
348 	hpte_v = hpte_get_old_v(hptep);
349 	/*
350 	 * We need to invalidate the TLB always because hpte_remove doesn't do
351 	 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
352 	 * random entry from it. When we do that we don't invalidate the TLB
353 	 * (hpte_remove) because we assume the old translation is still
354 	 * technically "valid".
355 	 */
356 	if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) {
357 		DBG_LOW(" -> miss\n");
358 		ret = -1;
359 	} else {
360 		native_lock_hpte(hptep);
361 		/* recheck with locks held */
362 		hpte_v = hpte_get_old_v(hptep);
363 		if (unlikely(!HPTE_V_COMPARE(hpte_v, want_v) ||
364 			     !(hpte_v & HPTE_V_VALID))) {
365 			ret = -1;
366 		} else {
367 			DBG_LOW(" -> hit\n");
368 			/* Update the HPTE */
369 			hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
370 						~(HPTE_R_PPP | HPTE_R_N)) |
371 					       (newpp & (HPTE_R_PPP | HPTE_R_N |
372 							 HPTE_R_C)));
373 		}
374 		native_unlock_hpte(hptep);
375 	}
376 
377 	if (flags & HPTE_LOCAL_UPDATE)
378 		local = 1;
379 	/*
380 	 * Ensure it is out of the tlb too if it is not a nohpte fault
381 	 */
382 	if (!(flags & HPTE_NOHPTE_UPDATE))
383 		tlbie(vpn, bpsize, apsize, ssize, local);
384 
385 	return ret;
386 }
387 
388 static long __native_hpte_find(unsigned long want_v, unsigned long slot)
389 {
390 	struct hash_pte *hptep;
391 	unsigned long hpte_v;
392 	unsigned long i;
393 
394 	for (i = 0; i < HPTES_PER_GROUP; i++) {
395 
396 		hptep = htab_address + slot;
397 		hpte_v = hpte_get_old_v(hptep);
398 		if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
399 			/* HPTE matches */
400 			return slot;
401 		++slot;
402 	}
403 
404 	return -1;
405 }
406 
407 static long native_hpte_find(unsigned long vpn, int psize, int ssize)
408 {
409 	unsigned long hpte_group;
410 	unsigned long want_v;
411 	unsigned long hash;
412 	long slot;
413 
414 	hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
415 	want_v = hpte_encode_avpn(vpn, psize, ssize);
416 
417 	/*
418 	 * We try to keep bolted entries always in primary hash
419 	 * But in some case we can find them in secondary too.
420 	 */
421 	hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP;
422 	slot = __native_hpte_find(want_v, hpte_group);
423 	if (slot < 0) {
424 		/* Try in secondary */
425 		hpte_group = (~hash & htab_hash_mask) * HPTES_PER_GROUP;
426 		slot = __native_hpte_find(want_v, hpte_group);
427 		if (slot < 0)
428 			return -1;
429 	}
430 
431 	return slot;
432 }
433 
434 /*
435  * Update the page protection bits. Intended to be used to create
436  * guard pages for kernel data structures on pages which are bolted
437  * in the HPT. Assumes pages being operated on will not be stolen.
438  *
439  * No need to lock here because we should be the only user.
440  */
441 static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
442 				       int psize, int ssize)
443 {
444 	unsigned long vpn;
445 	unsigned long vsid;
446 	long slot;
447 	struct hash_pte *hptep;
448 
449 	vsid = get_kernel_vsid(ea, ssize);
450 	vpn = hpt_vpn(ea, vsid, ssize);
451 
452 	slot = native_hpte_find(vpn, psize, ssize);
453 	if (slot == -1)
454 		panic("could not find page to bolt\n");
455 	hptep = htab_address + slot;
456 
457 	/* Update the HPTE */
458 	hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
459 				~(HPTE_R_PPP | HPTE_R_N)) |
460 			       (newpp & (HPTE_R_PPP | HPTE_R_N)));
461 	/*
462 	 * Ensure it is out of the tlb too. Bolted entries base and
463 	 * actual page size will be same.
464 	 */
465 	tlbie(vpn, psize, psize, ssize, 0);
466 }
467 
468 /*
469  * Remove a bolted kernel entry. Memory hotplug uses this.
470  *
471  * No need to lock here because we should be the only user.
472  */
473 static int native_hpte_removebolted(unsigned long ea, int psize, int ssize)
474 {
475 	unsigned long vpn;
476 	unsigned long vsid;
477 	long slot;
478 	struct hash_pte *hptep;
479 
480 	vsid = get_kernel_vsid(ea, ssize);
481 	vpn = hpt_vpn(ea, vsid, ssize);
482 
483 	slot = native_hpte_find(vpn, psize, ssize);
484 	if (slot == -1)
485 		return -ENOENT;
486 
487 	hptep = htab_address + slot;
488 
489 	VM_WARN_ON(!(be64_to_cpu(hptep->v) & HPTE_V_BOLTED));
490 
491 	/* Invalidate the hpte */
492 	hptep->v = 0;
493 
494 	/* Invalidate the TLB */
495 	tlbie(vpn, psize, psize, ssize, 0);
496 	return 0;
497 }
498 
499 
500 static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
501 				   int bpsize, int apsize, int ssize, int local)
502 {
503 	struct hash_pte *hptep = htab_address + slot;
504 	unsigned long hpte_v;
505 	unsigned long want_v;
506 	unsigned long flags;
507 
508 	local_irq_save(flags);
509 
510 	DBG_LOW("    invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot);
511 
512 	want_v = hpte_encode_avpn(vpn, bpsize, ssize);
513 	hpte_v = hpte_get_old_v(hptep);
514 
515 	if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) {
516 		native_lock_hpte(hptep);
517 		/* recheck with locks held */
518 		hpte_v = hpte_get_old_v(hptep);
519 
520 		if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
521 			/* Invalidate the hpte. NOTE: this also unlocks it */
522 			hptep->v = 0;
523 		else
524 			native_unlock_hpte(hptep);
525 	}
526 	/*
527 	 * We need to invalidate the TLB always because hpte_remove doesn't do
528 	 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
529 	 * random entry from it. When we do that we don't invalidate the TLB
530 	 * (hpte_remove) because we assume the old translation is still
531 	 * technically "valid".
532 	 */
533 	tlbie(vpn, bpsize, apsize, ssize, local);
534 
535 	local_irq_restore(flags);
536 }
537 
538 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
539 static void native_hugepage_invalidate(unsigned long vsid,
540 				       unsigned long addr,
541 				       unsigned char *hpte_slot_array,
542 				       int psize, int ssize, int local)
543 {
544 	int i;
545 	struct hash_pte *hptep;
546 	int actual_psize = MMU_PAGE_16M;
547 	unsigned int max_hpte_count, valid;
548 	unsigned long flags, s_addr = addr;
549 	unsigned long hpte_v, want_v, shift;
550 	unsigned long hidx, vpn = 0, hash, slot;
551 
552 	shift = mmu_psize_defs[psize].shift;
553 	max_hpte_count = 1U << (PMD_SHIFT - shift);
554 
555 	local_irq_save(flags);
556 	for (i = 0; i < max_hpte_count; i++) {
557 		valid = hpte_valid(hpte_slot_array, i);
558 		if (!valid)
559 			continue;
560 		hidx =  hpte_hash_index(hpte_slot_array, i);
561 
562 		/* get the vpn */
563 		addr = s_addr + (i * (1ul << shift));
564 		vpn = hpt_vpn(addr, vsid, ssize);
565 		hash = hpt_hash(vpn, shift, ssize);
566 		if (hidx & _PTEIDX_SECONDARY)
567 			hash = ~hash;
568 
569 		slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
570 		slot += hidx & _PTEIDX_GROUP_IX;
571 
572 		hptep = htab_address + slot;
573 		want_v = hpte_encode_avpn(vpn, psize, ssize);
574 		hpte_v = hpte_get_old_v(hptep);
575 
576 		/* Even if we miss, we need to invalidate the TLB */
577 		if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) {
578 			/* recheck with locks held */
579 			native_lock_hpte(hptep);
580 			hpte_v = hpte_get_old_v(hptep);
581 
582 			if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) {
583 				/*
584 				 * Invalidate the hpte. NOTE: this also unlocks it
585 				 */
586 
587 				hptep->v = 0;
588 			} else
589 				native_unlock_hpte(hptep);
590 		}
591 		/*
592 		 * We need to do tlb invalidate for all the address, tlbie
593 		 * instruction compares entry_VA in tlb with the VA specified
594 		 * here
595 		 */
596 		tlbie(vpn, psize, actual_psize, ssize, local);
597 	}
598 	local_irq_restore(flags);
599 }
600 #else
601 static void native_hugepage_invalidate(unsigned long vsid,
602 				       unsigned long addr,
603 				       unsigned char *hpte_slot_array,
604 				       int psize, int ssize, int local)
605 {
606 	WARN(1, "%s called without THP support\n", __func__);
607 }
608 #endif
609 
610 static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
611 			int *psize, int *apsize, int *ssize, unsigned long *vpn)
612 {
613 	unsigned long avpn, pteg, vpi;
614 	unsigned long hpte_v = be64_to_cpu(hpte->v);
615 	unsigned long hpte_r = be64_to_cpu(hpte->r);
616 	unsigned long vsid, seg_off;
617 	int size, a_size, shift;
618 	/* Look at the 8 bit LP value */
619 	unsigned int lp = (hpte_r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
620 
621 	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
622 		hpte_v = hpte_new_to_old_v(hpte_v, hpte_r);
623 		hpte_r = hpte_new_to_old_r(hpte_r);
624 	}
625 	if (!(hpte_v & HPTE_V_LARGE)) {
626 		size   = MMU_PAGE_4K;
627 		a_size = MMU_PAGE_4K;
628 	} else {
629 		size = hpte_page_sizes[lp] & 0xf;
630 		a_size = hpte_page_sizes[lp] >> 4;
631 	}
632 	/* This works for all page sizes, and for 256M and 1T segments */
633 	*ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
634 	shift = mmu_psize_defs[size].shift;
635 
636 	avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm);
637 	pteg = slot / HPTES_PER_GROUP;
638 	if (hpte_v & HPTE_V_SECONDARY)
639 		pteg = ~pteg;
640 
641 	switch (*ssize) {
642 	case MMU_SEGSIZE_256M:
643 		/* We only have 28 - 23 bits of seg_off in avpn */
644 		seg_off = (avpn & 0x1f) << 23;
645 		vsid    =  avpn >> 5;
646 		/* We can find more bits from the pteg value */
647 		if (shift < 23) {
648 			vpi = (vsid ^ pteg) & htab_hash_mask;
649 			seg_off |= vpi << shift;
650 		}
651 		*vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT;
652 		break;
653 	case MMU_SEGSIZE_1T:
654 		/* We only have 40 - 23 bits of seg_off in avpn */
655 		seg_off = (avpn & 0x1ffff) << 23;
656 		vsid    = avpn >> 17;
657 		if (shift < 23) {
658 			vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask;
659 			seg_off |= vpi << shift;
660 		}
661 		*vpn = vsid << (SID_SHIFT_1T - VPN_SHIFT) | seg_off >> VPN_SHIFT;
662 		break;
663 	default:
664 		*vpn = size = 0;
665 	}
666 	*psize  = size;
667 	*apsize = a_size;
668 }
669 
670 /*
671  * clear all mappings on kexec.  All cpus are in real mode (or they will
672  * be when they isi), and we are the only one left.  We rely on our kernel
673  * mapping being 0xC0's and the hardware ignoring those two real bits.
674  *
675  * This must be called with interrupts disabled.
676  *
677  * Taking the native_tlbie_lock is unsafe here due to the possibility of
678  * lockdep being on. On pre POWER5 hardware, not taking the lock could
679  * cause deadlock. POWER5 and newer not taking the lock is fine. This only
680  * gets called during boot before secondary CPUs have come up and during
681  * crashdump and all bets are off anyway.
682  *
683  * TODO: add batching support when enabled.  remember, no dynamic memory here,
684  * although there is the control page available...
685  */
686 static notrace void native_hpte_clear(void)
687 {
688 	unsigned long vpn = 0;
689 	unsigned long slot, slots;
690 	struct hash_pte *hptep = htab_address;
691 	unsigned long hpte_v;
692 	unsigned long pteg_count;
693 	int psize, apsize, ssize;
694 
695 	pteg_count = htab_hash_mask + 1;
696 
697 	slots = pteg_count * HPTES_PER_GROUP;
698 
699 	for (slot = 0; slot < slots; slot++, hptep++) {
700 		/*
701 		 * we could lock the pte here, but we are the only cpu
702 		 * running,  right?  and for crash dump, we probably
703 		 * don't want to wait for a maybe bad cpu.
704 		 */
705 		hpte_v = be64_to_cpu(hptep->v);
706 
707 		/*
708 		 * Call __tlbie() here rather than tlbie() since we can't take the
709 		 * native_tlbie_lock.
710 		 */
711 		if (hpte_v & HPTE_V_VALID) {
712 			hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn);
713 			hptep->v = 0;
714 			___tlbie(vpn, psize, apsize, ssize);
715 		}
716 	}
717 
718 	asm volatile("eieio; tlbsync; ptesync":::"memory");
719 }
720 
721 /*
722  * Batched hash table flush, we batch the tlbie's to avoid taking/releasing
723  * the lock all the time
724  */
725 static void native_flush_hash_range(unsigned long number, int local)
726 {
727 	unsigned long vpn = 0;
728 	unsigned long hash, index, hidx, shift, slot;
729 	struct hash_pte *hptep;
730 	unsigned long hpte_v;
731 	unsigned long want_v;
732 	unsigned long flags;
733 	real_pte_t pte;
734 	struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
735 	unsigned long psize = batch->psize;
736 	int ssize = batch->ssize;
737 	int i;
738 	unsigned int use_local;
739 
740 	use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) &&
741 		mmu_psize_defs[psize].tlbiel && !cxl_ctx_in_use();
742 
743 	local_irq_save(flags);
744 
745 	for (i = 0; i < number; i++) {
746 		vpn = batch->vpn[i];
747 		pte = batch->pte[i];
748 
749 		pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
750 			hash = hpt_hash(vpn, shift, ssize);
751 			hidx = __rpte_to_hidx(pte, index);
752 			if (hidx & _PTEIDX_SECONDARY)
753 				hash = ~hash;
754 			slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
755 			slot += hidx & _PTEIDX_GROUP_IX;
756 			hptep = htab_address + slot;
757 			want_v = hpte_encode_avpn(vpn, psize, ssize);
758 			hpte_v = hpte_get_old_v(hptep);
759 
760 			if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
761 				continue;
762 			/* lock and try again */
763 			native_lock_hpte(hptep);
764 			hpte_v = hpte_get_old_v(hptep);
765 
766 			if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
767 				native_unlock_hpte(hptep);
768 			else
769 				hptep->v = 0;
770 
771 		} pte_iterate_hashed_end();
772 	}
773 
774 	if (use_local) {
775 		asm volatile("ptesync":::"memory");
776 		for (i = 0; i < number; i++) {
777 			vpn = batch->vpn[i];
778 			pte = batch->pte[i];
779 
780 			pte_iterate_hashed_subpages(pte, psize,
781 						    vpn, index, shift) {
782 				__tlbiel(vpn, psize, psize, ssize);
783 			} pte_iterate_hashed_end();
784 		}
785 		ppc_after_tlbiel_barrier();
786 	} else {
787 		int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
788 
789 		if (lock_tlbie)
790 			raw_spin_lock(&native_tlbie_lock);
791 
792 		asm volatile("ptesync":::"memory");
793 		for (i = 0; i < number; i++) {
794 			vpn = batch->vpn[i];
795 			pte = batch->pte[i];
796 
797 			pte_iterate_hashed_subpages(pte, psize,
798 						    vpn, index, shift) {
799 				__tlbie(vpn, psize, psize, ssize);
800 			} pte_iterate_hashed_end();
801 		}
802 		/*
803 		 * Just do one more with the last used values.
804 		 */
805 		fixup_tlbie_vpn(vpn, psize, psize, ssize);
806 		asm volatile("eieio; tlbsync; ptesync":::"memory");
807 
808 		if (lock_tlbie)
809 			raw_spin_unlock(&native_tlbie_lock);
810 	}
811 
812 	local_irq_restore(flags);
813 }
814 
815 void __init hpte_init_native(void)
816 {
817 	mmu_hash_ops.hpte_invalidate	= native_hpte_invalidate;
818 	mmu_hash_ops.hpte_updatepp	= native_hpte_updatepp;
819 	mmu_hash_ops.hpte_updateboltedpp = native_hpte_updateboltedpp;
820 	mmu_hash_ops.hpte_removebolted = native_hpte_removebolted;
821 	mmu_hash_ops.hpte_insert	= native_hpte_insert;
822 	mmu_hash_ops.hpte_remove	= native_hpte_remove;
823 	mmu_hash_ops.hpte_clear_all	= native_hpte_clear;
824 	mmu_hash_ops.flush_hash_range = native_flush_hash_range;
825 	mmu_hash_ops.hugepage_invalidate   = native_hugepage_invalidate;
826 }
827