1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * native hashtable management.
4  *
5  * SMP scalability work:
6  *    Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
7  */
8 
9 #undef DEBUG_LOW
10 
11 #include <linux/spinlock.h>
12 #include <linux/bitops.h>
13 #include <linux/of.h>
14 #include <linux/processor.h>
15 #include <linux/threads.h>
16 #include <linux/smp.h>
17 
18 #include <asm/machdep.h>
19 #include <asm/mmu.h>
20 #include <asm/mmu_context.h>
21 #include <asm/pgtable.h>
22 #include <asm/trace.h>
23 #include <asm/tlb.h>
24 #include <asm/cputable.h>
25 #include <asm/udbg.h>
26 #include <asm/kexec.h>
27 #include <asm/ppc-opcode.h>
28 #include <asm/feature-fixups.h>
29 
30 #include <misc/cxl-base.h>
31 
32 #ifdef DEBUG_LOW
33 #define DBG_LOW(fmt...) udbg_printf(fmt)
34 #else
35 #define DBG_LOW(fmt...)
36 #endif
37 
38 #ifdef __BIG_ENDIAN__
39 #define HPTE_LOCK_BIT 3
40 #else
41 #define HPTE_LOCK_BIT (56+3)
42 #endif
43 
44 static DEFINE_RAW_SPINLOCK(native_tlbie_lock);
45 
46 static inline void tlbiel_hash_set_isa206(unsigned int set, unsigned int is)
47 {
48 	unsigned long rb;
49 
50 	rb = (set << PPC_BITLSHIFT(51)) | (is << PPC_BITLSHIFT(53));
51 
52 	asm volatile("tlbiel %0" : : "r" (rb));
53 }
54 
55 /*
56  * tlbiel instruction for hash, set invalidation
57  * i.e., r=1 and is=01 or is=10 or is=11
58  */
59 static __always_inline void tlbiel_hash_set_isa300(unsigned int set, unsigned int is,
60 					unsigned int pid,
61 					unsigned int ric, unsigned int prs)
62 {
63 	unsigned long rb;
64 	unsigned long rs;
65 	unsigned int r = 0; /* hash format */
66 
67 	rb = (set << PPC_BITLSHIFT(51)) | (is << PPC_BITLSHIFT(53));
68 	rs = ((unsigned long)pid << PPC_BITLSHIFT(31));
69 
70 	asm volatile(PPC_TLBIEL(%0, %1, %2, %3, %4)
71 		     : : "r"(rb), "r"(rs), "i"(ric), "i"(prs), "r"(r)
72 		     : "memory");
73 }
74 
75 
76 static void tlbiel_all_isa206(unsigned int num_sets, unsigned int is)
77 {
78 	unsigned int set;
79 
80 	asm volatile("ptesync": : :"memory");
81 
82 	for (set = 0; set < num_sets; set++)
83 		tlbiel_hash_set_isa206(set, is);
84 
85 	asm volatile("ptesync": : :"memory");
86 }
87 
88 static void tlbiel_all_isa300(unsigned int num_sets, unsigned int is)
89 {
90 	unsigned int set;
91 
92 	asm volatile("ptesync": : :"memory");
93 
94 	/*
95 	 * Flush the first set of the TLB, and any caching of partition table
96 	 * entries. Then flush the remaining sets of the TLB. Hash mode uses
97 	 * partition scoped TLB translations.
98 	 */
99 	tlbiel_hash_set_isa300(0, is, 0, 2, 0);
100 	for (set = 1; set < num_sets; set++)
101 		tlbiel_hash_set_isa300(set, is, 0, 0, 0);
102 
103 	/*
104 	 * Now invalidate the process table cache.
105 	 *
106 	 * From ISA v3.0B p. 1078:
107 	 *     The following forms are invalid.
108 	 *      * PRS=1, R=0, and RIC!=2 (The only process-scoped
109 	 *        HPT caching is of the Process Table.)
110 	 */
111 	tlbiel_hash_set_isa300(0, is, 0, 2, 1);
112 
113 	asm volatile("ptesync": : :"memory");
114 
115 	asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT "; isync" : : :"memory");
116 }
117 
118 void hash__tlbiel_all(unsigned int action)
119 {
120 	unsigned int is;
121 
122 	switch (action) {
123 	case TLB_INVAL_SCOPE_GLOBAL:
124 		is = 3;
125 		break;
126 	case TLB_INVAL_SCOPE_LPID:
127 		is = 2;
128 		break;
129 	default:
130 		BUG();
131 	}
132 
133 	if (early_cpu_has_feature(CPU_FTR_ARCH_300))
134 		tlbiel_all_isa300(POWER9_TLB_SETS_HASH, is);
135 	else if (early_cpu_has_feature(CPU_FTR_ARCH_207S))
136 		tlbiel_all_isa206(POWER8_TLB_SETS, is);
137 	else if (early_cpu_has_feature(CPU_FTR_ARCH_206))
138 		tlbiel_all_isa206(POWER7_TLB_SETS, is);
139 	else
140 		WARN(1, "%s called on pre-POWER7 CPU\n", __func__);
141 }
142 
143 static inline unsigned long  ___tlbie(unsigned long vpn, int psize,
144 						int apsize, int ssize)
145 {
146 	unsigned long va;
147 	unsigned int penc;
148 	unsigned long sllp;
149 
150 	/*
151 	 * We need 14 to 65 bits of va for a tlibe of 4K page
152 	 * With vpn we ignore the lower VPN_SHIFT bits already.
153 	 * And top two bits are already ignored because we can
154 	 * only accomodate 76 bits in a 64 bit vpn with a VPN_SHIFT
155 	 * of 12.
156 	 */
157 	va = vpn << VPN_SHIFT;
158 	/*
159 	 * clear top 16 bits of 64bit va, non SLS segment
160 	 * Older versions of the architecture (2.02 and earler) require the
161 	 * masking of the top 16 bits.
162 	 */
163 	if (mmu_has_feature(MMU_FTR_TLBIE_CROP_VA))
164 		va &= ~(0xffffULL << 48);
165 
166 	switch (psize) {
167 	case MMU_PAGE_4K:
168 		/* clear out bits after (52) [0....52.....63] */
169 		va &= ~((1ul << (64 - 52)) - 1);
170 		va |= ssize << 8;
171 		sllp = get_sllp_encoding(apsize);
172 		va |= sllp << 5;
173 		asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
174 			     : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
175 			     : "memory");
176 		break;
177 	default:
178 		/* We need 14 to 14 + i bits of va */
179 		penc = mmu_psize_defs[psize].penc[apsize];
180 		va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
181 		va |= penc << 12;
182 		va |= ssize << 8;
183 		/*
184 		 * AVAL bits:
185 		 * We don't need all the bits, but rest of the bits
186 		 * must be ignored by the processor.
187 		 * vpn cover upto 65 bits of va. (0...65) and we need
188 		 * 58..64 bits of va.
189 		 */
190 		va |= (vpn & 0xfe); /* AVAL */
191 		va |= 1; /* L */
192 		asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2)
193 			     : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
194 			     : "memory");
195 		break;
196 	}
197 	return va;
198 }
199 
200 static inline void fixup_tlbie(unsigned long vpn, int psize, int apsize, int ssize)
201 {
202 	if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
203 		/* Need the extra ptesync to ensure we don't reorder tlbie*/
204 		asm volatile("ptesync": : :"memory");
205 		___tlbie(vpn, psize, apsize, ssize);
206 	}
207 }
208 
209 static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
210 {
211 	unsigned long rb;
212 
213 	rb = ___tlbie(vpn, psize, apsize, ssize);
214 	trace_tlbie(0, 0, rb, 0, 0, 0, 0);
215 }
216 
217 static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
218 {
219 	unsigned long va;
220 	unsigned int penc;
221 	unsigned long sllp;
222 
223 	/* VPN_SHIFT can be atmost 12 */
224 	va = vpn << VPN_SHIFT;
225 	/*
226 	 * clear top 16 bits of 64 bit va, non SLS segment
227 	 * Older versions of the architecture (2.02 and earler) require the
228 	 * masking of the top 16 bits.
229 	 */
230 	if (mmu_has_feature(MMU_FTR_TLBIE_CROP_VA))
231 		va &= ~(0xffffULL << 48);
232 
233 	switch (psize) {
234 	case MMU_PAGE_4K:
235 		/* clear out bits after(52) [0....52.....63] */
236 		va &= ~((1ul << (64 - 52)) - 1);
237 		va |= ssize << 8;
238 		sllp = get_sllp_encoding(apsize);
239 		va |= sllp << 5;
240 		asm volatile(ASM_FTR_IFSET("tlbiel %0", "tlbiel %0,0", %1)
241 			     : : "r" (va), "i" (CPU_FTR_ARCH_206)
242 			     : "memory");
243 		break;
244 	default:
245 		/* We need 14 to 14 + i bits of va */
246 		penc = mmu_psize_defs[psize].penc[apsize];
247 		va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
248 		va |= penc << 12;
249 		va |= ssize << 8;
250 		/*
251 		 * AVAL bits:
252 		 * We don't need all the bits, but rest of the bits
253 		 * must be ignored by the processor.
254 		 * vpn cover upto 65 bits of va. (0...65) and we need
255 		 * 58..64 bits of va.
256 		 */
257 		va |= (vpn & 0xfe);
258 		va |= 1; /* L */
259 		asm volatile(ASM_FTR_IFSET("tlbiel %0", "tlbiel %0,1", %1)
260 			     : : "r" (va), "i" (CPU_FTR_ARCH_206)
261 			     : "memory");
262 		break;
263 	}
264 	trace_tlbie(0, 1, va, 0, 0, 0, 0);
265 
266 }
267 
268 static inline void tlbie(unsigned long vpn, int psize, int apsize,
269 			 int ssize, int local)
270 {
271 	unsigned int use_local;
272 	int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
273 
274 	use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) && !cxl_ctx_in_use();
275 
276 	if (use_local)
277 		use_local = mmu_psize_defs[psize].tlbiel;
278 	if (lock_tlbie && !use_local)
279 		raw_spin_lock(&native_tlbie_lock);
280 	asm volatile("ptesync": : :"memory");
281 	if (use_local) {
282 		__tlbiel(vpn, psize, apsize, ssize);
283 		asm volatile("ptesync": : :"memory");
284 	} else {
285 		__tlbie(vpn, psize, apsize, ssize);
286 		fixup_tlbie(vpn, psize, apsize, ssize);
287 		asm volatile("eieio; tlbsync; ptesync": : :"memory");
288 	}
289 	if (lock_tlbie && !use_local)
290 		raw_spin_unlock(&native_tlbie_lock);
291 }
292 
293 static inline void native_lock_hpte(struct hash_pte *hptep)
294 {
295 	unsigned long *word = (unsigned long *)&hptep->v;
296 
297 	while (1) {
298 		if (!test_and_set_bit_lock(HPTE_LOCK_BIT, word))
299 			break;
300 		spin_begin();
301 		while(test_bit(HPTE_LOCK_BIT, word))
302 			spin_cpu_relax();
303 		spin_end();
304 	}
305 }
306 
307 static inline void native_unlock_hpte(struct hash_pte *hptep)
308 {
309 	unsigned long *word = (unsigned long *)&hptep->v;
310 
311 	clear_bit_unlock(HPTE_LOCK_BIT, word);
312 }
313 
314 static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
315 			unsigned long pa, unsigned long rflags,
316 			unsigned long vflags, int psize, int apsize, int ssize)
317 {
318 	struct hash_pte *hptep = htab_address + hpte_group;
319 	unsigned long hpte_v, hpte_r;
320 	int i;
321 
322 	if (!(vflags & HPTE_V_BOLTED)) {
323 		DBG_LOW("    insert(group=%lx, vpn=%016lx, pa=%016lx,"
324 			" rflags=%lx, vflags=%lx, psize=%d)\n",
325 			hpte_group, vpn, pa, rflags, vflags, psize);
326 	}
327 
328 	for (i = 0; i < HPTES_PER_GROUP; i++) {
329 		if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID)) {
330 			/* retry with lock held */
331 			native_lock_hpte(hptep);
332 			if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID))
333 				break;
334 			native_unlock_hpte(hptep);
335 		}
336 
337 		hptep++;
338 	}
339 
340 	if (i == HPTES_PER_GROUP)
341 		return -1;
342 
343 	hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
344 	hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
345 
346 	if (!(vflags & HPTE_V_BOLTED)) {
347 		DBG_LOW(" i=%x hpte_v=%016lx, hpte_r=%016lx\n",
348 			i, hpte_v, hpte_r);
349 	}
350 
351 	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
352 		hpte_r = hpte_old_to_new_r(hpte_v, hpte_r);
353 		hpte_v = hpte_old_to_new_v(hpte_v);
354 	}
355 
356 	hptep->r = cpu_to_be64(hpte_r);
357 	/* Guarantee the second dword is visible before the valid bit */
358 	eieio();
359 	/*
360 	 * Now set the first dword including the valid bit
361 	 * NOTE: this also unlocks the hpte
362 	 */
363 	hptep->v = cpu_to_be64(hpte_v);
364 
365 	__asm__ __volatile__ ("ptesync" : : : "memory");
366 
367 	return i | (!!(vflags & HPTE_V_SECONDARY) << 3);
368 }
369 
370 static long native_hpte_remove(unsigned long hpte_group)
371 {
372 	struct hash_pte *hptep;
373 	int i;
374 	int slot_offset;
375 	unsigned long hpte_v;
376 
377 	DBG_LOW("    remove(group=%lx)\n", hpte_group);
378 
379 	/* pick a random entry to start at */
380 	slot_offset = mftb() & 0x7;
381 
382 	for (i = 0; i < HPTES_PER_GROUP; i++) {
383 		hptep = htab_address + hpte_group + slot_offset;
384 		hpte_v = be64_to_cpu(hptep->v);
385 
386 		if ((hpte_v & HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) {
387 			/* retry with lock held */
388 			native_lock_hpte(hptep);
389 			hpte_v = be64_to_cpu(hptep->v);
390 			if ((hpte_v & HPTE_V_VALID)
391 			    && !(hpte_v & HPTE_V_BOLTED))
392 				break;
393 			native_unlock_hpte(hptep);
394 		}
395 
396 		slot_offset++;
397 		slot_offset &= 0x7;
398 	}
399 
400 	if (i == HPTES_PER_GROUP)
401 		return -1;
402 
403 	/* Invalidate the hpte. NOTE: this also unlocks it */
404 	hptep->v = 0;
405 
406 	return i;
407 }
408 
409 static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
410 				 unsigned long vpn, int bpsize,
411 				 int apsize, int ssize, unsigned long flags)
412 {
413 	struct hash_pte *hptep = htab_address + slot;
414 	unsigned long hpte_v, want_v;
415 	int ret = 0, local = 0;
416 
417 	want_v = hpte_encode_avpn(vpn, bpsize, ssize);
418 
419 	DBG_LOW("    update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",
420 		vpn, want_v & HPTE_V_AVPN, slot, newpp);
421 
422 	hpte_v = hpte_get_old_v(hptep);
423 	/*
424 	 * We need to invalidate the TLB always because hpte_remove doesn't do
425 	 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
426 	 * random entry from it. When we do that we don't invalidate the TLB
427 	 * (hpte_remove) because we assume the old translation is still
428 	 * technically "valid".
429 	 */
430 	if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) {
431 		DBG_LOW(" -> miss\n");
432 		ret = -1;
433 	} else {
434 		native_lock_hpte(hptep);
435 		/* recheck with locks held */
436 		hpte_v = hpte_get_old_v(hptep);
437 		if (unlikely(!HPTE_V_COMPARE(hpte_v, want_v) ||
438 			     !(hpte_v & HPTE_V_VALID))) {
439 			ret = -1;
440 		} else {
441 			DBG_LOW(" -> hit\n");
442 			/* Update the HPTE */
443 			hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
444 						~(HPTE_R_PPP | HPTE_R_N)) |
445 					       (newpp & (HPTE_R_PPP | HPTE_R_N |
446 							 HPTE_R_C)));
447 		}
448 		native_unlock_hpte(hptep);
449 	}
450 
451 	if (flags & HPTE_LOCAL_UPDATE)
452 		local = 1;
453 	/*
454 	 * Ensure it is out of the tlb too if it is not a nohpte fault
455 	 */
456 	if (!(flags & HPTE_NOHPTE_UPDATE))
457 		tlbie(vpn, bpsize, apsize, ssize, local);
458 
459 	return ret;
460 }
461 
462 static long native_hpte_find(unsigned long vpn, int psize, int ssize)
463 {
464 	struct hash_pte *hptep;
465 	unsigned long hash;
466 	unsigned long i;
467 	long slot;
468 	unsigned long want_v, hpte_v;
469 
470 	hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
471 	want_v = hpte_encode_avpn(vpn, psize, ssize);
472 
473 	/* Bolted mappings are only ever in the primary group */
474 	slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
475 	for (i = 0; i < HPTES_PER_GROUP; i++) {
476 
477 		hptep = htab_address + slot;
478 		hpte_v = hpte_get_old_v(hptep);
479 		if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
480 			/* HPTE matches */
481 			return slot;
482 		++slot;
483 	}
484 
485 	return -1;
486 }
487 
488 /*
489  * Update the page protection bits. Intended to be used to create
490  * guard pages for kernel data structures on pages which are bolted
491  * in the HPT. Assumes pages being operated on will not be stolen.
492  *
493  * No need to lock here because we should be the only user.
494  */
495 static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
496 				       int psize, int ssize)
497 {
498 	unsigned long vpn;
499 	unsigned long vsid;
500 	long slot;
501 	struct hash_pte *hptep;
502 
503 	vsid = get_kernel_vsid(ea, ssize);
504 	vpn = hpt_vpn(ea, vsid, ssize);
505 
506 	slot = native_hpte_find(vpn, psize, ssize);
507 	if (slot == -1)
508 		panic("could not find page to bolt\n");
509 	hptep = htab_address + slot;
510 
511 	/* Update the HPTE */
512 	hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
513 				~(HPTE_R_PPP | HPTE_R_N)) |
514 			       (newpp & (HPTE_R_PPP | HPTE_R_N)));
515 	/*
516 	 * Ensure it is out of the tlb too. Bolted entries base and
517 	 * actual page size will be same.
518 	 */
519 	tlbie(vpn, psize, psize, ssize, 0);
520 }
521 
522 /*
523  * Remove a bolted kernel entry. Memory hotplug uses this.
524  *
525  * No need to lock here because we should be the only user.
526  */
527 static int native_hpte_removebolted(unsigned long ea, int psize, int ssize)
528 {
529 	unsigned long vpn;
530 	unsigned long vsid;
531 	long slot;
532 	struct hash_pte *hptep;
533 
534 	vsid = get_kernel_vsid(ea, ssize);
535 	vpn = hpt_vpn(ea, vsid, ssize);
536 
537 	slot = native_hpte_find(vpn, psize, ssize);
538 	if (slot == -1)
539 		return -ENOENT;
540 
541 	hptep = htab_address + slot;
542 
543 	VM_WARN_ON(!(be64_to_cpu(hptep->v) & HPTE_V_BOLTED));
544 
545 	/* Invalidate the hpte */
546 	hptep->v = 0;
547 
548 	/* Invalidate the TLB */
549 	tlbie(vpn, psize, psize, ssize, 0);
550 	return 0;
551 }
552 
553 
554 static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
555 				   int bpsize, int apsize, int ssize, int local)
556 {
557 	struct hash_pte *hptep = htab_address + slot;
558 	unsigned long hpte_v;
559 	unsigned long want_v;
560 	unsigned long flags;
561 
562 	local_irq_save(flags);
563 
564 	DBG_LOW("    invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot);
565 
566 	want_v = hpte_encode_avpn(vpn, bpsize, ssize);
567 	hpte_v = hpte_get_old_v(hptep);
568 
569 	if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) {
570 		native_lock_hpte(hptep);
571 		/* recheck with locks held */
572 		hpte_v = hpte_get_old_v(hptep);
573 
574 		if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
575 			/* Invalidate the hpte. NOTE: this also unlocks it */
576 			hptep->v = 0;
577 		else
578 			native_unlock_hpte(hptep);
579 	}
580 	/*
581 	 * We need to invalidate the TLB always because hpte_remove doesn't do
582 	 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
583 	 * random entry from it. When we do that we don't invalidate the TLB
584 	 * (hpte_remove) because we assume the old translation is still
585 	 * technically "valid".
586 	 */
587 	tlbie(vpn, bpsize, apsize, ssize, local);
588 
589 	local_irq_restore(flags);
590 }
591 
592 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
593 static void native_hugepage_invalidate(unsigned long vsid,
594 				       unsigned long addr,
595 				       unsigned char *hpte_slot_array,
596 				       int psize, int ssize, int local)
597 {
598 	int i;
599 	struct hash_pte *hptep;
600 	int actual_psize = MMU_PAGE_16M;
601 	unsigned int max_hpte_count, valid;
602 	unsigned long flags, s_addr = addr;
603 	unsigned long hpte_v, want_v, shift;
604 	unsigned long hidx, vpn = 0, hash, slot;
605 
606 	shift = mmu_psize_defs[psize].shift;
607 	max_hpte_count = 1U << (PMD_SHIFT - shift);
608 
609 	local_irq_save(flags);
610 	for (i = 0; i < max_hpte_count; i++) {
611 		valid = hpte_valid(hpte_slot_array, i);
612 		if (!valid)
613 			continue;
614 		hidx =  hpte_hash_index(hpte_slot_array, i);
615 
616 		/* get the vpn */
617 		addr = s_addr + (i * (1ul << shift));
618 		vpn = hpt_vpn(addr, vsid, ssize);
619 		hash = hpt_hash(vpn, shift, ssize);
620 		if (hidx & _PTEIDX_SECONDARY)
621 			hash = ~hash;
622 
623 		slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
624 		slot += hidx & _PTEIDX_GROUP_IX;
625 
626 		hptep = htab_address + slot;
627 		want_v = hpte_encode_avpn(vpn, psize, ssize);
628 		hpte_v = hpte_get_old_v(hptep);
629 
630 		/* Even if we miss, we need to invalidate the TLB */
631 		if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) {
632 			/* recheck with locks held */
633 			native_lock_hpte(hptep);
634 			hpte_v = hpte_get_old_v(hptep);
635 
636 			if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) {
637 				/*
638 				 * Invalidate the hpte. NOTE: this also unlocks it
639 				 */
640 
641 				hptep->v = 0;
642 			} else
643 				native_unlock_hpte(hptep);
644 		}
645 		/*
646 		 * We need to do tlb invalidate for all the address, tlbie
647 		 * instruction compares entry_VA in tlb with the VA specified
648 		 * here
649 		 */
650 		tlbie(vpn, psize, actual_psize, ssize, local);
651 	}
652 	local_irq_restore(flags);
653 }
654 #else
655 static void native_hugepage_invalidate(unsigned long vsid,
656 				       unsigned long addr,
657 				       unsigned char *hpte_slot_array,
658 				       int psize, int ssize, int local)
659 {
660 	WARN(1, "%s called without THP support\n", __func__);
661 }
662 #endif
663 
664 static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
665 			int *psize, int *apsize, int *ssize, unsigned long *vpn)
666 {
667 	unsigned long avpn, pteg, vpi;
668 	unsigned long hpte_v = be64_to_cpu(hpte->v);
669 	unsigned long hpte_r = be64_to_cpu(hpte->r);
670 	unsigned long vsid, seg_off;
671 	int size, a_size, shift;
672 	/* Look at the 8 bit LP value */
673 	unsigned int lp = (hpte_r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
674 
675 	if (cpu_has_feature(CPU_FTR_ARCH_300)) {
676 		hpte_v = hpte_new_to_old_v(hpte_v, hpte_r);
677 		hpte_r = hpte_new_to_old_r(hpte_r);
678 	}
679 	if (!(hpte_v & HPTE_V_LARGE)) {
680 		size   = MMU_PAGE_4K;
681 		a_size = MMU_PAGE_4K;
682 	} else {
683 		size = hpte_page_sizes[lp] & 0xf;
684 		a_size = hpte_page_sizes[lp] >> 4;
685 	}
686 	/* This works for all page sizes, and for 256M and 1T segments */
687 	*ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
688 	shift = mmu_psize_defs[size].shift;
689 
690 	avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm);
691 	pteg = slot / HPTES_PER_GROUP;
692 	if (hpte_v & HPTE_V_SECONDARY)
693 		pteg = ~pteg;
694 
695 	switch (*ssize) {
696 	case MMU_SEGSIZE_256M:
697 		/* We only have 28 - 23 bits of seg_off in avpn */
698 		seg_off = (avpn & 0x1f) << 23;
699 		vsid    =  avpn >> 5;
700 		/* We can find more bits from the pteg value */
701 		if (shift < 23) {
702 			vpi = (vsid ^ pteg) & htab_hash_mask;
703 			seg_off |= vpi << shift;
704 		}
705 		*vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT;
706 		break;
707 	case MMU_SEGSIZE_1T:
708 		/* We only have 40 - 23 bits of seg_off in avpn */
709 		seg_off = (avpn & 0x1ffff) << 23;
710 		vsid    = avpn >> 17;
711 		if (shift < 23) {
712 			vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask;
713 			seg_off |= vpi << shift;
714 		}
715 		*vpn = vsid << (SID_SHIFT_1T - VPN_SHIFT) | seg_off >> VPN_SHIFT;
716 		break;
717 	default:
718 		*vpn = size = 0;
719 	}
720 	*psize  = size;
721 	*apsize = a_size;
722 }
723 
724 /*
725  * clear all mappings on kexec.  All cpus are in real mode (or they will
726  * be when they isi), and we are the only one left.  We rely on our kernel
727  * mapping being 0xC0's and the hardware ignoring those two real bits.
728  *
729  * This must be called with interrupts disabled.
730  *
731  * Taking the native_tlbie_lock is unsafe here due to the possibility of
732  * lockdep being on. On pre POWER5 hardware, not taking the lock could
733  * cause deadlock. POWER5 and newer not taking the lock is fine. This only
734  * gets called during boot before secondary CPUs have come up and during
735  * crashdump and all bets are off anyway.
736  *
737  * TODO: add batching support when enabled.  remember, no dynamic memory here,
738  * although there is the control page available...
739  */
740 static void native_hpte_clear(void)
741 {
742 	unsigned long vpn = 0;
743 	unsigned long slot, slots;
744 	struct hash_pte *hptep = htab_address;
745 	unsigned long hpte_v;
746 	unsigned long pteg_count;
747 	int psize, apsize, ssize;
748 
749 	pteg_count = htab_hash_mask + 1;
750 
751 	slots = pteg_count * HPTES_PER_GROUP;
752 
753 	for (slot = 0; slot < slots; slot++, hptep++) {
754 		/*
755 		 * we could lock the pte here, but we are the only cpu
756 		 * running,  right?  and for crash dump, we probably
757 		 * don't want to wait for a maybe bad cpu.
758 		 */
759 		hpte_v = be64_to_cpu(hptep->v);
760 
761 		/*
762 		 * Call __tlbie() here rather than tlbie() since we can't take the
763 		 * native_tlbie_lock.
764 		 */
765 		if (hpte_v & HPTE_V_VALID) {
766 			hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn);
767 			hptep->v = 0;
768 			___tlbie(vpn, psize, apsize, ssize);
769 		}
770 	}
771 
772 	asm volatile("eieio; tlbsync; ptesync":::"memory");
773 }
774 
775 /*
776  * Batched hash table flush, we batch the tlbie's to avoid taking/releasing
777  * the lock all the time
778  */
779 static void native_flush_hash_range(unsigned long number, int local)
780 {
781 	unsigned long vpn = 0;
782 	unsigned long hash, index, hidx, shift, slot;
783 	struct hash_pte *hptep;
784 	unsigned long hpte_v;
785 	unsigned long want_v;
786 	unsigned long flags;
787 	real_pte_t pte;
788 	struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
789 	unsigned long psize = batch->psize;
790 	int ssize = batch->ssize;
791 	int i;
792 	unsigned int use_local;
793 
794 	use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) &&
795 		mmu_psize_defs[psize].tlbiel && !cxl_ctx_in_use();
796 
797 	local_irq_save(flags);
798 
799 	for (i = 0; i < number; i++) {
800 		vpn = batch->vpn[i];
801 		pte = batch->pte[i];
802 
803 		pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
804 			hash = hpt_hash(vpn, shift, ssize);
805 			hidx = __rpte_to_hidx(pte, index);
806 			if (hidx & _PTEIDX_SECONDARY)
807 				hash = ~hash;
808 			slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
809 			slot += hidx & _PTEIDX_GROUP_IX;
810 			hptep = htab_address + slot;
811 			want_v = hpte_encode_avpn(vpn, psize, ssize);
812 			hpte_v = hpte_get_old_v(hptep);
813 
814 			if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
815 				continue;
816 			/* lock and try again */
817 			native_lock_hpte(hptep);
818 			hpte_v = hpte_get_old_v(hptep);
819 
820 			if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
821 				native_unlock_hpte(hptep);
822 			else
823 				hptep->v = 0;
824 
825 		} pte_iterate_hashed_end();
826 	}
827 
828 	if (use_local) {
829 		asm volatile("ptesync":::"memory");
830 		for (i = 0; i < number; i++) {
831 			vpn = batch->vpn[i];
832 			pte = batch->pte[i];
833 
834 			pte_iterate_hashed_subpages(pte, psize,
835 						    vpn, index, shift) {
836 				__tlbiel(vpn, psize, psize, ssize);
837 			} pte_iterate_hashed_end();
838 		}
839 		asm volatile("ptesync":::"memory");
840 	} else {
841 		int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
842 
843 		if (lock_tlbie)
844 			raw_spin_lock(&native_tlbie_lock);
845 
846 		asm volatile("ptesync":::"memory");
847 		for (i = 0; i < number; i++) {
848 			vpn = batch->vpn[i];
849 			pte = batch->pte[i];
850 
851 			pte_iterate_hashed_subpages(pte, psize,
852 						    vpn, index, shift) {
853 				__tlbie(vpn, psize, psize, ssize);
854 			} pte_iterate_hashed_end();
855 		}
856 		/*
857 		 * Just do one more with the last used values.
858 		 */
859 		fixup_tlbie(vpn, psize, psize, ssize);
860 		asm volatile("eieio; tlbsync; ptesync":::"memory");
861 
862 		if (lock_tlbie)
863 			raw_spin_unlock(&native_tlbie_lock);
864 	}
865 
866 	local_irq_restore(flags);
867 }
868 
869 void __init hpte_init_native(void)
870 {
871 	mmu_hash_ops.hpte_invalidate	= native_hpte_invalidate;
872 	mmu_hash_ops.hpte_updatepp	= native_hpte_updatepp;
873 	mmu_hash_ops.hpte_updateboltedpp = native_hpte_updateboltedpp;
874 	mmu_hash_ops.hpte_removebolted = native_hpte_removebolted;
875 	mmu_hash_ops.hpte_insert	= native_hpte_insert;
876 	mmu_hash_ops.hpte_remove	= native_hpte_remove;
877 	mmu_hash_ops.hpte_clear_all	= native_hpte_clear;
878 	mmu_hash_ops.flush_hash_range = native_flush_hash_range;
879 	mmu_hash_ops.hugepage_invalidate   = native_hugepage_invalidate;
880 }
881