1 #ifndef _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_
2 #define _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_
3 /*
4  * PowerPC64 memory management structures
5  *
6  * Dave Engebretsen & Mike Corrigan <{engebret|mikejc}@us.ibm.com>
7  *   PPC64 rework.
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * as published by the Free Software Foundation; either version
12  * 2 of the License, or (at your option) any later version.
13  */
14 
15 #include <asm/asm-compat.h>
16 #include <asm/page.h>
17 #include <asm/bug.h>
18 
19 /*
20  * This is necessary to get the definition of PGTABLE_RANGE which we
21  * need for various slices related matters. Note that this isn't the
22  * complete pgtable.h but only a portion of it.
23  */
24 #include <asm/book3s/64/pgtable.h>
25 #include <asm/bug.h>
26 #include <asm/processor.h>
27 
28 /*
29  * SLB
30  */
31 
32 #define SLB_NUM_BOLTED		3
33 #define SLB_CACHE_ENTRIES	8
34 #define SLB_MIN_SIZE		32
35 
36 /* Bits in the SLB ESID word */
37 #define SLB_ESID_V		ASM_CONST(0x0000000008000000) /* valid */
38 
39 /* Bits in the SLB VSID word */
40 #define SLB_VSID_SHIFT		12
41 #define SLB_VSID_SHIFT_1T	24
42 #define SLB_VSID_SSIZE_SHIFT	62
43 #define SLB_VSID_B		ASM_CONST(0xc000000000000000)
44 #define SLB_VSID_B_256M		ASM_CONST(0x0000000000000000)
45 #define SLB_VSID_B_1T		ASM_CONST(0x4000000000000000)
46 #define SLB_VSID_KS		ASM_CONST(0x0000000000000800)
47 #define SLB_VSID_KP		ASM_CONST(0x0000000000000400)
48 #define SLB_VSID_N		ASM_CONST(0x0000000000000200) /* no-execute */
49 #define SLB_VSID_L		ASM_CONST(0x0000000000000100)
50 #define SLB_VSID_C		ASM_CONST(0x0000000000000080) /* class */
51 #define SLB_VSID_LP		ASM_CONST(0x0000000000000030)
52 #define SLB_VSID_LP_00		ASM_CONST(0x0000000000000000)
53 #define SLB_VSID_LP_01		ASM_CONST(0x0000000000000010)
54 #define SLB_VSID_LP_10		ASM_CONST(0x0000000000000020)
55 #define SLB_VSID_LP_11		ASM_CONST(0x0000000000000030)
56 #define SLB_VSID_LLP		(SLB_VSID_L|SLB_VSID_LP)
57 
58 #define SLB_VSID_KERNEL		(SLB_VSID_KP)
59 #define SLB_VSID_USER		(SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C)
60 
61 #define SLBIE_C			(0x08000000)
62 #define SLBIE_SSIZE_SHIFT	25
63 
64 /*
65  * Hash table
66  */
67 
68 #define HPTES_PER_GROUP 8
69 
70 #define HPTE_V_SSIZE_SHIFT	62
71 #define HPTE_V_AVPN_SHIFT	7
72 #define HPTE_V_AVPN		ASM_CONST(0x3fffffffffffff80)
73 #define HPTE_V_AVPN_VAL(x)	(((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
74 #define HPTE_V_COMPARE(x,y)	(!(((x) ^ (y)) & 0xffffffffffffff80UL))
75 #define HPTE_V_BOLTED		ASM_CONST(0x0000000000000010)
76 #define HPTE_V_LOCK		ASM_CONST(0x0000000000000008)
77 #define HPTE_V_LARGE		ASM_CONST(0x0000000000000004)
78 #define HPTE_V_SECONDARY	ASM_CONST(0x0000000000000002)
79 #define HPTE_V_VALID		ASM_CONST(0x0000000000000001)
80 
81 /*
82  * ISA 3.0 have a different HPTE format.
83  */
84 #define HPTE_R_3_0_SSIZE_SHIFT	58
85 #define HPTE_R_PP0		ASM_CONST(0x8000000000000000)
86 #define HPTE_R_TS		ASM_CONST(0x4000000000000000)
87 #define HPTE_R_KEY_HI		ASM_CONST(0x3000000000000000)
88 #define HPTE_R_RPN_SHIFT	12
89 #define HPTE_R_RPN		ASM_CONST(0x0ffffffffffff000)
90 #define HPTE_R_PP		ASM_CONST(0x0000000000000003)
91 #define HPTE_R_PPP		ASM_CONST(0x8000000000000003)
92 #define HPTE_R_N		ASM_CONST(0x0000000000000004)
93 #define HPTE_R_G		ASM_CONST(0x0000000000000008)
94 #define HPTE_R_M		ASM_CONST(0x0000000000000010)
95 #define HPTE_R_I		ASM_CONST(0x0000000000000020)
96 #define HPTE_R_W		ASM_CONST(0x0000000000000040)
97 #define HPTE_R_WIMG		ASM_CONST(0x0000000000000078)
98 #define HPTE_R_C		ASM_CONST(0x0000000000000080)
99 #define HPTE_R_R		ASM_CONST(0x0000000000000100)
100 #define HPTE_R_KEY_LO		ASM_CONST(0x0000000000000e00)
101 
102 #define HPTE_V_1TB_SEG		ASM_CONST(0x4000000000000000)
103 #define HPTE_V_VRMA_MASK	ASM_CONST(0x4001ffffff000000)
104 
105 /* Values for PP (assumes Ks=0, Kp=1) */
106 #define PP_RWXX	0	/* Supervisor read/write, User none */
107 #define PP_RWRX 1	/* Supervisor read/write, User read */
108 #define PP_RWRW 2	/* Supervisor read/write, User read/write */
109 #define PP_RXRX 3	/* Supervisor read,       User read */
110 #define PP_RXXX	(HPTE_R_PP0 | 2)	/* Supervisor read, user none */
111 
112 /* Fields for tlbiel instruction in architecture 2.06 */
113 #define TLBIEL_INVAL_SEL_MASK	0xc00	/* invalidation selector */
114 #define  TLBIEL_INVAL_PAGE	0x000	/* invalidate a single page */
115 #define  TLBIEL_INVAL_SET_LPID	0x800	/* invalidate a set for current LPID */
116 #define  TLBIEL_INVAL_SET	0xc00	/* invalidate a set for all LPIDs */
117 #define TLBIEL_INVAL_SET_MASK	0xfff000	/* set number to inval. */
118 #define TLBIEL_INVAL_SET_SHIFT	12
119 
120 #define POWER7_TLB_SETS		128	/* # sets in POWER7 TLB */
121 #define POWER8_TLB_SETS		512	/* # sets in POWER8 TLB */
122 #define POWER9_TLB_SETS_HASH	256	/* # sets in POWER9 TLB Hash mode */
123 #define POWER9_TLB_SETS_RADIX	128	/* # sets in POWER9 TLB Radix mode */
124 
125 #ifndef __ASSEMBLY__
126 
127 struct mmu_hash_ops {
128 	void            (*hpte_invalidate)(unsigned long slot,
129 					   unsigned long vpn,
130 					   int bpsize, int apsize,
131 					   int ssize, int local);
132 	long		(*hpte_updatepp)(unsigned long slot,
133 					 unsigned long newpp,
134 					 unsigned long vpn,
135 					 int bpsize, int apsize,
136 					 int ssize, unsigned long flags);
137 	void            (*hpte_updateboltedpp)(unsigned long newpp,
138 					       unsigned long ea,
139 					       int psize, int ssize);
140 	long		(*hpte_insert)(unsigned long hpte_group,
141 				       unsigned long vpn,
142 				       unsigned long prpn,
143 				       unsigned long rflags,
144 				       unsigned long vflags,
145 				       int psize, int apsize,
146 				       int ssize);
147 	long		(*hpte_remove)(unsigned long hpte_group);
148 	int             (*hpte_removebolted)(unsigned long ea,
149 					     int psize, int ssize);
150 	void		(*flush_hash_range)(unsigned long number, int local);
151 	void		(*hugepage_invalidate)(unsigned long vsid,
152 					       unsigned long addr,
153 					       unsigned char *hpte_slot_array,
154 					       int psize, int ssize, int local);
155 	/*
156 	 * Special for kexec.
157 	 * To be called in real mode with interrupts disabled. No locks are
158 	 * taken as such, concurrent access on pre POWER5 hardware could result
159 	 * in a deadlock.
160 	 * The linear mapping is destroyed as well.
161 	 */
162 	void		(*hpte_clear_all)(void);
163 };
164 extern struct mmu_hash_ops mmu_hash_ops;
165 
166 struct hash_pte {
167 	__be64 v;
168 	__be64 r;
169 };
170 
171 extern struct hash_pte *htab_address;
172 extern unsigned long htab_size_bytes;
173 extern unsigned long htab_hash_mask;
174 
175 
176 static inline int shift_to_mmu_psize(unsigned int shift)
177 {
178 	int psize;
179 
180 	for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
181 		if (mmu_psize_defs[psize].shift == shift)
182 			return psize;
183 	return -1;
184 }
185 
186 static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
187 {
188 	if (mmu_psize_defs[mmu_psize].shift)
189 		return mmu_psize_defs[mmu_psize].shift;
190 	BUG();
191 }
192 
193 #endif /* __ASSEMBLY__ */
194 
195 /*
196  * Segment sizes.
197  * These are the values used by hardware in the B field of
198  * SLB entries and the first dword of MMU hashtable entries.
199  * The B field is 2 bits; the values 2 and 3 are unused and reserved.
200  */
201 #define MMU_SEGSIZE_256M	0
202 #define MMU_SEGSIZE_1T		1
203 
204 /*
205  * encode page number shift.
206  * in order to fit the 78 bit va in a 64 bit variable we shift the va by
207  * 12 bits. This enable us to address upto 76 bit va.
208  * For hpt hash from a va we can ignore the page size bits of va and for
209  * hpte encoding we ignore up to 23 bits of va. So ignoring lower 12 bits ensure
210  * we work in all cases including 4k page size.
211  */
212 #define VPN_SHIFT	12
213 
214 /*
215  * HPTE Large Page (LP) details
216  */
217 #define LP_SHIFT	12
218 #define LP_BITS		8
219 #define LP_MASK(i)	((0xFF >> (i)) << LP_SHIFT)
220 
221 #ifndef __ASSEMBLY__
222 
223 static inline int slb_vsid_shift(int ssize)
224 {
225 	if (ssize == MMU_SEGSIZE_256M)
226 		return SLB_VSID_SHIFT;
227 	return SLB_VSID_SHIFT_1T;
228 }
229 
230 static inline int segment_shift(int ssize)
231 {
232 	if (ssize == MMU_SEGSIZE_256M)
233 		return SID_SHIFT;
234 	return SID_SHIFT_1T;
235 }
236 
237 /*
238  * The current system page and segment sizes
239  */
240 extern int mmu_kernel_ssize;
241 extern int mmu_highuser_ssize;
242 extern u16 mmu_slb_size;
243 extern unsigned long tce_alloc_start, tce_alloc_end;
244 
245 /*
246  * If the processor supports 64k normal pages but not 64k cache
247  * inhibited pages, we have to be prepared to switch processes
248  * to use 4k pages when they create cache-inhibited mappings.
249  * If this is the case, mmu_ci_restrictions will be set to 1.
250  */
251 extern int mmu_ci_restrictions;
252 
253 /*
254  * This computes the AVPN and B fields of the first dword of a HPTE,
255  * for use when we want to match an existing PTE.  The bottom 7 bits
256  * of the returned value are zero.
257  */
258 static inline unsigned long hpte_encode_avpn(unsigned long vpn, int psize,
259 					     int ssize)
260 {
261 	unsigned long v;
262 	/*
263 	 * The AVA field omits the low-order 23 bits of the 78 bits VA.
264 	 * These bits are not needed in the PTE, because the
265 	 * low-order b of these bits are part of the byte offset
266 	 * into the virtual page and, if b < 23, the high-order
267 	 * 23-b of these bits are always used in selecting the
268 	 * PTEGs to be searched
269 	 */
270 	v = (vpn >> (23 - VPN_SHIFT)) & ~(mmu_psize_defs[psize].avpnm);
271 	v <<= HPTE_V_AVPN_SHIFT;
272 	if (!cpu_has_feature(CPU_FTR_ARCH_300))
273 		v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
274 	return v;
275 }
276 
277 /*
278  * This function sets the AVPN and L fields of the HPTE  appropriately
279  * using the base page size and actual page size.
280  */
281 static inline unsigned long hpte_encode_v(unsigned long vpn, int base_psize,
282 					  int actual_psize, int ssize)
283 {
284 	unsigned long v;
285 	v = hpte_encode_avpn(vpn, base_psize, ssize);
286 	if (actual_psize != MMU_PAGE_4K)
287 		v |= HPTE_V_LARGE;
288 	return v;
289 }
290 
291 /*
292  * This function sets the ARPN, and LP fields of the HPTE appropriately
293  * for the page size. We assume the pa is already "clean" that is properly
294  * aligned for the requested page size
295  */
296 static inline unsigned long hpte_encode_r(unsigned long pa, int base_psize,
297 					  int actual_psize, int ssize)
298 {
299 
300 	if (cpu_has_feature(CPU_FTR_ARCH_300))
301 		pa |= ((unsigned long) ssize) << HPTE_R_3_0_SSIZE_SHIFT;
302 
303 	/* A 4K page needs no special encoding */
304 	if (actual_psize == MMU_PAGE_4K)
305 		return pa & HPTE_R_RPN;
306 	else {
307 		unsigned int penc = mmu_psize_defs[base_psize].penc[actual_psize];
308 		unsigned int shift = mmu_psize_defs[actual_psize].shift;
309 		return (pa & ~((1ul << shift) - 1)) | (penc << LP_SHIFT);
310 	}
311 }
312 
313 /*
314  * Build a VPN_SHIFT bit shifted va given VSID, EA and segment size.
315  */
316 static inline unsigned long hpt_vpn(unsigned long ea,
317 				    unsigned long vsid, int ssize)
318 {
319 	unsigned long mask;
320 	int s_shift = segment_shift(ssize);
321 
322 	mask = (1ul << (s_shift - VPN_SHIFT)) - 1;
323 	return (vsid << (s_shift - VPN_SHIFT)) | ((ea >> VPN_SHIFT) & mask);
324 }
325 
326 /*
327  * This hashes a virtual address
328  */
329 static inline unsigned long hpt_hash(unsigned long vpn,
330 				     unsigned int shift, int ssize)
331 {
332 	int mask;
333 	unsigned long hash, vsid;
334 
335 	/* VPN_SHIFT can be atmost 12 */
336 	if (ssize == MMU_SEGSIZE_256M) {
337 		mask = (1ul << (SID_SHIFT - VPN_SHIFT)) - 1;
338 		hash = (vpn >> (SID_SHIFT - VPN_SHIFT)) ^
339 			((vpn & mask) >> (shift - VPN_SHIFT));
340 	} else {
341 		mask = (1ul << (SID_SHIFT_1T - VPN_SHIFT)) - 1;
342 		vsid = vpn >> (SID_SHIFT_1T - VPN_SHIFT);
343 		hash = vsid ^ (vsid << 25) ^
344 			((vpn & mask) >> (shift - VPN_SHIFT)) ;
345 	}
346 	return hash & 0x7fffffffffUL;
347 }
348 
349 #define HPTE_LOCAL_UPDATE	0x1
350 #define HPTE_NOHPTE_UPDATE	0x2
351 
352 extern int __hash_page_4K(unsigned long ea, unsigned long access,
353 			  unsigned long vsid, pte_t *ptep, unsigned long trap,
354 			  unsigned long flags, int ssize, int subpage_prot);
355 extern int __hash_page_64K(unsigned long ea, unsigned long access,
356 			   unsigned long vsid, pte_t *ptep, unsigned long trap,
357 			   unsigned long flags, int ssize);
358 struct mm_struct;
359 unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap);
360 extern int hash_page_mm(struct mm_struct *mm, unsigned long ea,
361 			unsigned long access, unsigned long trap,
362 			unsigned long flags);
363 extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap,
364 		     unsigned long dsisr);
365 int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
366 		     pte_t *ptep, unsigned long trap, unsigned long flags,
367 		     int ssize, unsigned int shift, unsigned int mmu_psize);
368 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
369 extern int __hash_page_thp(unsigned long ea, unsigned long access,
370 			   unsigned long vsid, pmd_t *pmdp, unsigned long trap,
371 			   unsigned long flags, int ssize, unsigned int psize);
372 #else
373 static inline int __hash_page_thp(unsigned long ea, unsigned long access,
374 				  unsigned long vsid, pmd_t *pmdp,
375 				  unsigned long trap, unsigned long flags,
376 				  int ssize, unsigned int psize)
377 {
378 	BUG();
379 	return -1;
380 }
381 #endif
382 extern void hash_failure_debug(unsigned long ea, unsigned long access,
383 			       unsigned long vsid, unsigned long trap,
384 			       int ssize, int psize, int lpsize,
385 			       unsigned long pte);
386 extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
387 			     unsigned long pstart, unsigned long prot,
388 			     int psize, int ssize);
389 int htab_remove_mapping(unsigned long vstart, unsigned long vend,
390 			int psize, int ssize);
391 extern void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages);
392 extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr);
393 
394 #ifdef CONFIG_PPC_PSERIES
395 void hpte_init_pseries(void);
396 #else
397 static inline void hpte_init_pseries(void) { }
398 #endif
399 
400 extern void hpte_init_native(void);
401 
402 extern void slb_initialize(void);
403 extern void slb_flush_and_rebolt(void);
404 
405 extern void slb_vmalloc_update(void);
406 extern void slb_set_size(u16 size);
407 #endif /* __ASSEMBLY__ */
408 
409 /*
410  * VSID allocation (256MB segment)
411  *
412  * We first generate a 37-bit "proto-VSID". Proto-VSIDs are generated
413  * from mmu context id and effective segment id of the address.
414  *
415  * For user processes max context id is limited to ((1ul << 19) - 5)
416  * for kernel space, we use the top 4 context ids to map address as below
417  * NOTE: each context only support 64TB now.
418  * 0x7fffc -  [ 0xc000000000000000 - 0xc0003fffffffffff ]
419  * 0x7fffd -  [ 0xd000000000000000 - 0xd0003fffffffffff ]
420  * 0x7fffe -  [ 0xe000000000000000 - 0xe0003fffffffffff ]
421  * 0x7ffff -  [ 0xf000000000000000 - 0xf0003fffffffffff ]
422  *
423  * The proto-VSIDs are then scrambled into real VSIDs with the
424  * multiplicative hash:
425  *
426  *	VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS
427  *
428  * VSID_MULTIPLIER is prime, so in particular it is
429  * co-prime to VSID_MODULUS, making this a 1:1 scrambling function.
430  * Because the modulus is 2^n-1 we can compute it efficiently without
431  * a divide or extra multiply (see below). The scramble function gives
432  * robust scattering in the hash table (at least based on some initial
433  * results).
434  *
435  * We also consider VSID 0 special. We use VSID 0 for slb entries mapping
436  * bad address. This enables us to consolidate bad address handling in
437  * hash_page.
438  *
439  * We also need to avoid the last segment of the last context, because that
440  * would give a protovsid of 0x1fffffffff. That will result in a VSID 0
441  * because of the modulo operation in vsid scramble. But the vmemmap
442  * (which is what uses region 0xf) will never be close to 64TB in size
443  * (it's 56 bytes per page of system memory).
444  */
445 
446 #define CONTEXT_BITS		19
447 #define ESID_BITS		18
448 #define ESID_BITS_1T		6
449 
450 /*
451  * 256MB segment
452  * The proto-VSID space has 2^(CONTEX_BITS + ESID_BITS) - 1 segments
453  * available for user + kernel mapping. The top 4 contexts are used for
454  * kernel mapping. Each segment contains 2^28 bytes. Each
455  * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts
456  * (19 == 37 + 28 - 46).
457  */
458 #define MAX_USER_CONTEXT	((ASM_CONST(1) << CONTEXT_BITS) - 5)
459 
460 /*
461  * This should be computed such that protovosid * vsid_mulitplier
462  * doesn't overflow 64 bits. It should also be co-prime to vsid_modulus
463  */
464 #define VSID_MULTIPLIER_256M	ASM_CONST(12538073)	/* 24-bit prime */
465 #define VSID_BITS_256M		(CONTEXT_BITS + ESID_BITS)
466 #define VSID_MODULUS_256M	((1UL<<VSID_BITS_256M)-1)
467 
468 #define VSID_MULTIPLIER_1T	ASM_CONST(12538073)	/* 24-bit prime */
469 #define VSID_BITS_1T		(CONTEXT_BITS + ESID_BITS_1T)
470 #define VSID_MODULUS_1T		((1UL<<VSID_BITS_1T)-1)
471 
472 
473 #define USER_VSID_RANGE	(1UL << (ESID_BITS + SID_SHIFT))
474 
475 /*
476  * This macro generates asm code to compute the VSID scramble
477  * function.  Used in slb_allocate() and do_stab_bolted.  The function
478  * computed is: (protovsid*VSID_MULTIPLIER) % VSID_MODULUS
479  *
480  *	rt = register containing the proto-VSID and into which the
481  *		VSID will be stored
482  *	rx = scratch register (clobbered)
483  *
484  * 	- rt and rx must be different registers
485  * 	- The answer will end up in the low VSID_BITS bits of rt.  The higher
486  * 	  bits may contain other garbage, so you may need to mask the
487  * 	  result.
488  */
489 #define ASM_VSID_SCRAMBLE(rt, rx, size)					\
490 	lis	rx,VSID_MULTIPLIER_##size@h;				\
491 	ori	rx,rx,VSID_MULTIPLIER_##size@l;				\
492 	mulld	rt,rt,rx;		/* rt = rt * MULTIPLIER */	\
493 									\
494 	srdi	rx,rt,VSID_BITS_##size;					\
495 	clrldi	rt,rt,(64-VSID_BITS_##size);				\
496 	add	rt,rt,rx;		/* add high and low bits */	\
497 	/* NOTE: explanation based on VSID_BITS_##size = 36		\
498 	 * Now, r3 == VSID (mod 2^36-1), and lies between 0 and		\
499 	 * 2^36-1+2^28-1.  That in particular means that if r3 >=	\
500 	 * 2^36-1, then r3+1 has the 2^36 bit set.  So, if r3+1 has	\
501 	 * the bit clear, r3 already has the answer we want, if it	\
502 	 * doesn't, the answer is the low 36 bits of r3+1.  So in all	\
503 	 * cases the answer is the low 36 bits of (r3 + ((r3+1) >> 36))*/\
504 	addi	rx,rt,1;						\
505 	srdi	rx,rx,VSID_BITS_##size;	/* extract 2^VSID_BITS bit */	\
506 	add	rt,rt,rx
507 
508 /* 4 bits per slice and we have one slice per 1TB */
509 #define SLICE_ARRAY_SIZE  (H_PGTABLE_RANGE >> 41)
510 
511 #ifndef __ASSEMBLY__
512 
513 #ifdef CONFIG_PPC_SUBPAGE_PROT
514 /*
515  * For the sub-page protection option, we extend the PGD with one of
516  * these.  Basically we have a 3-level tree, with the top level being
517  * the protptrs array.  To optimize speed and memory consumption when
518  * only addresses < 4GB are being protected, pointers to the first
519  * four pages of sub-page protection words are stored in the low_prot
520  * array.
521  * Each page of sub-page protection words protects 1GB (4 bytes
522  * protects 64k).  For the 3-level tree, each page of pointers then
523  * protects 8TB.
524  */
525 struct subpage_prot_table {
526 	unsigned long maxaddr;	/* only addresses < this are protected */
527 	unsigned int **protptrs[(TASK_SIZE_USER64 >> 43)];
528 	unsigned int *low_prot[4];
529 };
530 
531 #define SBP_L1_BITS		(PAGE_SHIFT - 2)
532 #define SBP_L2_BITS		(PAGE_SHIFT - 3)
533 #define SBP_L1_COUNT		(1 << SBP_L1_BITS)
534 #define SBP_L2_COUNT		(1 << SBP_L2_BITS)
535 #define SBP_L2_SHIFT		(PAGE_SHIFT + SBP_L1_BITS)
536 #define SBP_L3_SHIFT		(SBP_L2_SHIFT + SBP_L2_BITS)
537 
538 extern void subpage_prot_free(struct mm_struct *mm);
539 extern void subpage_prot_init_new_context(struct mm_struct *mm);
540 #else
541 static inline void subpage_prot_free(struct mm_struct *mm) {}
542 static inline void subpage_prot_init_new_context(struct mm_struct *mm) { }
543 #endif /* CONFIG_PPC_SUBPAGE_PROT */
544 
545 #if 0
546 /*
547  * The code below is equivalent to this function for arguments
548  * < 2^VSID_BITS, which is all this should ever be called
549  * with.  However gcc is not clever enough to compute the
550  * modulus (2^n-1) without a second multiply.
551  */
552 #define vsid_scramble(protovsid, size) \
553 	((((protovsid) * VSID_MULTIPLIER_##size) % VSID_MODULUS_##size))
554 
555 #else /* 1 */
556 #define vsid_scramble(protovsid, size) \
557 	({								 \
558 		unsigned long x;					 \
559 		x = (protovsid) * VSID_MULTIPLIER_##size;		 \
560 		x = (x >> VSID_BITS_##size) + (x & VSID_MODULUS_##size); \
561 		(x + ((x+1) >> VSID_BITS_##size)) & VSID_MODULUS_##size; \
562 	})
563 #endif /* 1 */
564 
565 /* Returns the segment size indicator for a user address */
566 static inline int user_segment_size(unsigned long addr)
567 {
568 	/* Use 1T segments if possible for addresses >= 1T */
569 	if (addr >= (1UL << SID_SHIFT_1T))
570 		return mmu_highuser_ssize;
571 	return MMU_SEGSIZE_256M;
572 }
573 
574 static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
575 				     int ssize)
576 {
577 	/*
578 	 * Bad address. We return VSID 0 for that
579 	 */
580 	if ((ea & ~REGION_MASK) >= H_PGTABLE_RANGE)
581 		return 0;
582 
583 	if (ssize == MMU_SEGSIZE_256M)
584 		return vsid_scramble((context << ESID_BITS)
585 				     | (ea >> SID_SHIFT), 256M);
586 	return vsid_scramble((context << ESID_BITS_1T)
587 			     | (ea >> SID_SHIFT_1T), 1T);
588 }
589 
590 /*
591  * This is only valid for addresses >= PAGE_OFFSET
592  *
593  * For kernel space, we use the top 4 context ids to map address as below
594  * 0x7fffc -  [ 0xc000000000000000 - 0xc0003fffffffffff ]
595  * 0x7fffd -  [ 0xd000000000000000 - 0xd0003fffffffffff ]
596  * 0x7fffe -  [ 0xe000000000000000 - 0xe0003fffffffffff ]
597  * 0x7ffff -  [ 0xf000000000000000 - 0xf0003fffffffffff ]
598  */
599 static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
600 {
601 	unsigned long context;
602 
603 	/*
604 	 * kernel take the top 4 context from the available range
605 	 */
606 	context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1;
607 	return get_vsid(context, ea, ssize);
608 }
609 
610 unsigned htab_shift_for_mem_size(unsigned long mem_size);
611 
612 #endif /* __ASSEMBLY__ */
613 
614 #endif /* _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_ */
615