xref: /openbmc/linux/arch/powerpc/include/asm/book3s/32/mmu-hash.h (revision 03ab8e6297acd1bc0eedaa050e2a1635c576fd11)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
211a6f6abSAneesh Kumar K.V #ifndef _ASM_POWERPC_BOOK3S_32_MMU_HASH_H_
311a6f6abSAneesh Kumar K.V #define _ASM_POWERPC_BOOK3S_32_MMU_HASH_H_
432ea4c14SChristophe Leroy 
5f64e8084SAneesh Kumar K.V /*
6f64e8084SAneesh Kumar K.V  * 32-bit hash table MMU support
7f64e8084SAneesh Kumar K.V  */
8f64e8084SAneesh Kumar K.V 
9f64e8084SAneesh Kumar K.V /*
10f64e8084SAneesh Kumar K.V  * BATs
11f64e8084SAneesh Kumar K.V  */
12f64e8084SAneesh Kumar K.V 
13f64e8084SAneesh Kumar K.V /* Block size masks */
14f64e8084SAneesh Kumar K.V #define BL_128K	0x000
15f64e8084SAneesh Kumar K.V #define BL_256K 0x001
16f64e8084SAneesh Kumar K.V #define BL_512K 0x003
17f64e8084SAneesh Kumar K.V #define BL_1M   0x007
18f64e8084SAneesh Kumar K.V #define BL_2M   0x00F
19f64e8084SAneesh Kumar K.V #define BL_4M   0x01F
20f64e8084SAneesh Kumar K.V #define BL_8M   0x03F
21f64e8084SAneesh Kumar K.V #define BL_16M  0x07F
22f64e8084SAneesh Kumar K.V #define BL_32M  0x0FF
23f64e8084SAneesh Kumar K.V #define BL_64M  0x1FF
24f64e8084SAneesh Kumar K.V #define BL_128M 0x3FF
25f64e8084SAneesh Kumar K.V #define BL_256M 0x7FF
26f64e8084SAneesh Kumar K.V 
27f64e8084SAneesh Kumar K.V /* BAT Access Protection */
28f64e8084SAneesh Kumar K.V #define BPP_XX	0x00		/* No access */
29f64e8084SAneesh Kumar K.V #define BPP_RX	0x01		/* Read only */
30f64e8084SAneesh Kumar K.V #define BPP_RW	0x02		/* Read/write */
31f64e8084SAneesh Kumar K.V 
32f64e8084SAneesh Kumar K.V #ifndef __ASSEMBLY__
33f64e8084SAneesh Kumar K.V /* Contort a phys_addr_t into the right format/bits for a BAT */
34f64e8084SAneesh Kumar K.V #ifdef CONFIG_PHYS_64BIT
35f64e8084SAneesh Kumar K.V #define BAT_PHYS_ADDR(x) ((u32)((x & 0x00000000fffe0000ULL) | \
36f64e8084SAneesh Kumar K.V 				((x & 0x0000000e00000000ULL) >> 24) | \
37f64e8084SAneesh Kumar K.V 				((x & 0x0000000100000000ULL) >> 30)))
387c91efceSChristophe Leroy #define PHYS_BAT_ADDR(x) (((u64)(x) & 0x00000000fffe0000ULL) | \
397c91efceSChristophe Leroy 			  (((u64)(x) << 24) & 0x0000000e00000000ULL) | \
407c91efceSChristophe Leroy 			  (((u64)(x) << 30) & 0x0000000100000000ULL))
41f64e8084SAneesh Kumar K.V #else
42f64e8084SAneesh Kumar K.V #define BAT_PHYS_ADDR(x) (x)
437c91efceSChristophe Leroy #define PHYS_BAT_ADDR(x) ((x) & 0xfffe0000)
44f64e8084SAneesh Kumar K.V #endif
45f64e8084SAneesh Kumar K.V 
46f64e8084SAneesh Kumar K.V struct ppc_bat {
47f64e8084SAneesh Kumar K.V 	u32 batu;
48f64e8084SAneesh Kumar K.V 	u32 batl;
49f64e8084SAneesh Kumar K.V };
50f64e8084SAneesh Kumar K.V #endif /* !__ASSEMBLY__ */
51f64e8084SAneesh Kumar K.V 
52f64e8084SAneesh Kumar K.V /*
53f64e8084SAneesh Kumar K.V  * Hash table
54f64e8084SAneesh Kumar K.V  */
55f64e8084SAneesh Kumar K.V 
56f64e8084SAneesh Kumar K.V /* Values for PP (assumes Ks=0, Kp=1) */
57f64e8084SAneesh Kumar K.V #define PP_RWXX	0	/* Supervisor read/write, User none */
58f64e8084SAneesh Kumar K.V #define PP_RWRX 1	/* Supervisor read/write, User read */
59f64e8084SAneesh Kumar K.V #define PP_RWRW 2	/* Supervisor read/write, User read/write */
60f64e8084SAneesh Kumar K.V #define PP_RXRX 3	/* Supervisor read,       User read */
61f64e8084SAneesh Kumar K.V 
6231ed2b13SChristophe Leroy /* Values for Segment Registers */
6331ed2b13SChristophe Leroy #define SR_NX	0x10000000	/* No Execute */
64f342adcaSChristophe Leroy #define SR_KP	0x20000000	/* User key */
65f342adcaSChristophe Leroy #define SR_KS	0x40000000	/* Supervisor key */
6631ed2b13SChristophe Leroy 
67526d4a4cSChristophe Leroy #ifdef __ASSEMBLY__
68526d4a4cSChristophe Leroy 
69526d4a4cSChristophe Leroy #include <asm/asm-offsets.h>
70526d4a4cSChristophe Leroy 
71526d4a4cSChristophe Leroy .macro uus_addi sr reg1 reg2 imm
72526d4a4cSChristophe Leroy 	.if NUM_USER_SEGMENTS > \sr
73526d4a4cSChristophe Leroy 	addi	\reg1,\reg2,\imm
74526d4a4cSChristophe Leroy 	.endif
75526d4a4cSChristophe Leroy .endm
76526d4a4cSChristophe Leroy 
77526d4a4cSChristophe Leroy .macro uus_mtsr sr reg1
78526d4a4cSChristophe Leroy 	.if NUM_USER_SEGMENTS > \sr
79526d4a4cSChristophe Leroy 	mtsr	\sr, \reg1
80526d4a4cSChristophe Leroy 	.endif
81526d4a4cSChristophe Leroy .endm
82526d4a4cSChristophe Leroy 
83526d4a4cSChristophe Leroy /*
84526d4a4cSChristophe Leroy  * This isync() shouldn't be necessary as the kernel is not excepted to run
85526d4a4cSChristophe Leroy  * any instruction in userspace soon after the update of segments and 'rfi'
86526d4a4cSChristophe Leroy  * instruction is used to return to userspace, but hash based cores
87526d4a4cSChristophe Leroy  * (at least G3) seem to exhibit a random behaviour when the 'isync' is not
88526d4a4cSChristophe Leroy  * there. 603 cores don't have this behaviour so don't do the 'isync' as it
89526d4a4cSChristophe Leroy  * saves several CPU cycles.
90526d4a4cSChristophe Leroy  */
91526d4a4cSChristophe Leroy .macro uus_isync
92526d4a4cSChristophe Leroy #ifdef CONFIG_PPC_BOOK3S_604
93526d4a4cSChristophe Leroy BEGIN_MMU_FTR_SECTION
94526d4a4cSChristophe Leroy 	isync
95526d4a4cSChristophe Leroy END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
96526d4a4cSChristophe Leroy #endif
97526d4a4cSChristophe Leroy .endm
98526d4a4cSChristophe Leroy 
99526d4a4cSChristophe Leroy .macro update_user_segments_by_4 tmp1 tmp2 tmp3 tmp4
100526d4a4cSChristophe Leroy 	uus_addi	1, \tmp2, \tmp1, 0x111
101526d4a4cSChristophe Leroy 	uus_addi	2, \tmp3, \tmp1, 0x222
102526d4a4cSChristophe Leroy 	uus_addi	3, \tmp4, \tmp1, 0x333
103526d4a4cSChristophe Leroy 
104526d4a4cSChristophe Leroy 	uus_mtsr	0, \tmp1
105526d4a4cSChristophe Leroy 	uus_mtsr	1, \tmp2
106526d4a4cSChristophe Leroy 	uus_mtsr	2, \tmp3
107526d4a4cSChristophe Leroy 	uus_mtsr	3, \tmp4
108526d4a4cSChristophe Leroy 
109526d4a4cSChristophe Leroy 	uus_addi	4, \tmp1, \tmp1, 0x444
110526d4a4cSChristophe Leroy 	uus_addi	5, \tmp2, \tmp2, 0x444
111526d4a4cSChristophe Leroy 	uus_addi	6, \tmp3, \tmp3, 0x444
112526d4a4cSChristophe Leroy 	uus_addi	7, \tmp4, \tmp4, 0x444
113526d4a4cSChristophe Leroy 
114526d4a4cSChristophe Leroy 	uus_mtsr	4, \tmp1
115526d4a4cSChristophe Leroy 	uus_mtsr	5, \tmp2
116526d4a4cSChristophe Leroy 	uus_mtsr	6, \tmp3
117526d4a4cSChristophe Leroy 	uus_mtsr	7, \tmp4
118526d4a4cSChristophe Leroy 
119526d4a4cSChristophe Leroy 	uus_addi	8, \tmp1, \tmp1, 0x444
120526d4a4cSChristophe Leroy 	uus_addi	9, \tmp2, \tmp2, 0x444
121526d4a4cSChristophe Leroy 	uus_addi	10, \tmp3, \tmp3, 0x444
122526d4a4cSChristophe Leroy 	uus_addi	11, \tmp4, \tmp4, 0x444
123526d4a4cSChristophe Leroy 
124526d4a4cSChristophe Leroy 	uus_mtsr	8, \tmp1
125526d4a4cSChristophe Leroy 	uus_mtsr	9, \tmp2
126526d4a4cSChristophe Leroy 	uus_mtsr	10, \tmp3
127526d4a4cSChristophe Leroy 	uus_mtsr	11, \tmp4
128526d4a4cSChristophe Leroy 
129526d4a4cSChristophe Leroy 	uus_addi	12, \tmp1, \tmp1, 0x444
130526d4a4cSChristophe Leroy 	uus_addi	13, \tmp2, \tmp2, 0x444
131526d4a4cSChristophe Leroy 	uus_addi	14, \tmp3, \tmp3, 0x444
132526d4a4cSChristophe Leroy 	uus_addi	15, \tmp4, \tmp4, 0x444
133526d4a4cSChristophe Leroy 
134526d4a4cSChristophe Leroy 	uus_mtsr	12, \tmp1
135526d4a4cSChristophe Leroy 	uus_mtsr	13, \tmp2
136526d4a4cSChristophe Leroy 	uus_mtsr	14, \tmp3
137526d4a4cSChristophe Leroy 	uus_mtsr	15, \tmp4
138526d4a4cSChristophe Leroy 
139526d4a4cSChristophe Leroy 	uus_isync
140526d4a4cSChristophe Leroy .endm
141526d4a4cSChristophe Leroy 
142526d4a4cSChristophe Leroy #else
143f64e8084SAneesh Kumar K.V 
144f64e8084SAneesh Kumar K.V /*
1457235bb35SChristophe Leroy  * This macro defines the mapping from contexts to VSIDs (virtual
1467235bb35SChristophe Leroy  * segment IDs).  We use a skew on both the context and the high 4 bits
1477235bb35SChristophe Leroy  * of the 32-bit virtual address (the "effective segment ID") in order
1487235bb35SChristophe Leroy  * to spread out the entries in the MMU hash table.  Note, if this
1497235bb35SChristophe Leroy  * function is changed then hash functions will have to be
1507235bb35SChristophe Leroy  * changed to correspond.
1517235bb35SChristophe Leroy  */
1527235bb35SChristophe Leroy #define CTX_TO_VSID(c, id)	((((c) * (897 * 16)) + (id * 0x111)) & 0xffffff)
1537235bb35SChristophe Leroy 
1547235bb35SChristophe Leroy /*
155f64e8084SAneesh Kumar K.V  * Hardware Page Table Entry
156f64e8084SAneesh Kumar K.V  * Note that the xpn and x bitfields are used only by processors that
157f64e8084SAneesh Kumar K.V  * support extended addressing; otherwise, those bits are reserved.
158f64e8084SAneesh Kumar K.V  */
159f64e8084SAneesh Kumar K.V struct hash_pte {
160f64e8084SAneesh Kumar K.V 	unsigned long v:1;	/* Entry is valid */
161f64e8084SAneesh Kumar K.V 	unsigned long vsid:24;	/* Virtual segment identifier */
162f64e8084SAneesh Kumar K.V 	unsigned long h:1;	/* Hash algorithm indicator */
163f64e8084SAneesh Kumar K.V 	unsigned long api:6;	/* Abbreviated page index */
164f64e8084SAneesh Kumar K.V 	unsigned long rpn:20;	/* Real (physical) page number */
165f64e8084SAneesh Kumar K.V 	unsigned long xpn:3;	/* Real page number bits 0-2, optional */
166f64e8084SAneesh Kumar K.V 	unsigned long r:1;	/* Referenced */
167f64e8084SAneesh Kumar K.V 	unsigned long c:1;	/* Changed */
168f64e8084SAneesh Kumar K.V 	unsigned long w:1;	/* Write-thru cache mode */
169f64e8084SAneesh Kumar K.V 	unsigned long i:1;	/* Cache inhibited */
170f64e8084SAneesh Kumar K.V 	unsigned long m:1;	/* Memory coherence */
171f64e8084SAneesh Kumar K.V 	unsigned long g:1;	/* Guarded */
172f64e8084SAneesh Kumar K.V 	unsigned long x:1;	/* Real page number bit 3, optional */
173f64e8084SAneesh Kumar K.V 	unsigned long pp:2;	/* Page protection */
174f64e8084SAneesh Kumar K.V };
175f64e8084SAneesh Kumar K.V 
176f64e8084SAneesh Kumar K.V typedef struct {
177f64e8084SAneesh Kumar K.V 	unsigned long id;
17870428da9SChristophe Leroy 	unsigned long sr0;
179c102f076SChristophe Leroy 	void __user *vdso;
180f64e8084SAneesh Kumar K.V } mm_context_t;
181f64e8084SAneesh Kumar K.V 
18270428da9SChristophe Leroy #ifdef CONFIG_PPC_KUEP
18370428da9SChristophe Leroy #define INIT_MM_CONTEXT(mm) .context.sr0 = SR_NX
18470428da9SChristophe Leroy #endif
18570428da9SChristophe Leroy 
1865e04ae85SChristophe Leroy void update_bats(void);
cleanup_cpu_mmu_context(void)1876c6fdbb2SChengyang Fan static inline void cleanup_cpu_mmu_context(void) { }
1885e04ae85SChristophe Leroy 
1899efc74ffSChristophe Leroy /* patch sites */
1909efc74ffSChristophe Leroy extern s32 patch__hash_page_A0, patch__hash_page_A1, patch__hash_page_A2;
1919efc74ffSChristophe Leroy extern s32 patch__hash_page_B, patch__hash_page_C;
1929efc74ffSChristophe Leroy extern s32 patch__flush_hash_A0, patch__flush_hash_A1, patch__flush_hash_A2;
1939efc74ffSChristophe Leroy extern s32 patch__flush_hash_B;
1949efc74ffSChristophe Leroy 
19591bb3082SChristophe Leroy #include <asm/reg.h>
19691bb3082SChristophe Leroy #include <asm/task_size_32.h>
19791bb3082SChristophe Leroy 
update_user_segment(u32 n,u32 val)198882136fbSChristophe Leroy static __always_inline void update_user_segment(u32 n, u32 val)
199882136fbSChristophe Leroy {
200882136fbSChristophe Leroy 	if (n << 28 < TASK_SIZE)
201882136fbSChristophe Leroy 		mtsr(val + n * 0x111, n << 28);
202882136fbSChristophe Leroy }
20391bb3082SChristophe Leroy 
update_user_segments(u32 val)20491bb3082SChristophe Leroy static __always_inline void update_user_segments(u32 val)
20591bb3082SChristophe Leroy {
206882136fbSChristophe Leroy 	val &= 0xf0ffffff;
20791bb3082SChristophe Leroy 
208882136fbSChristophe Leroy 	update_user_segment(0, val);
209882136fbSChristophe Leroy 	update_user_segment(1, val);
210882136fbSChristophe Leroy 	update_user_segment(2, val);
211882136fbSChristophe Leroy 	update_user_segment(3, val);
212882136fbSChristophe Leroy 	update_user_segment(4, val);
213882136fbSChristophe Leroy 	update_user_segment(5, val);
214882136fbSChristophe Leroy 	update_user_segment(6, val);
215882136fbSChristophe Leroy 	update_user_segment(7, val);
216882136fbSChristophe Leroy 	update_user_segment(8, val);
217882136fbSChristophe Leroy 	update_user_segment(9, val);
218882136fbSChristophe Leroy 	update_user_segment(10, val);
219882136fbSChristophe Leroy 	update_user_segment(11, val);
220882136fbSChristophe Leroy 	update_user_segment(12, val);
221882136fbSChristophe Leroy 	update_user_segment(13, val);
222882136fbSChristophe Leroy 	update_user_segment(14, val);
223882136fbSChristophe Leroy 	update_user_segment(15, val);
22491bb3082SChristophe Leroy }
22591bb3082SChristophe Leroy 
226*d37823c3SChristophe Leroy int __init find_free_bat(void);
227*d37823c3SChristophe Leroy unsigned int bat_block_size(unsigned long base, unsigned long top);
228f64e8084SAneesh Kumar K.V #endif /* !__ASSEMBLY__ */
229f64e8084SAneesh Kumar K.V 
230f64e8084SAneesh Kumar K.V /* We happily ignore the smaller BATs on 601, we don't actually use
231f64e8084SAneesh Kumar K.V  * those definitions on hash32 at the moment anyway
232f64e8084SAneesh Kumar K.V  */
233f64e8084SAneesh Kumar K.V #define mmu_virtual_psize	MMU_PAGE_4K
234f64e8084SAneesh Kumar K.V #define mmu_linear_psize	MMU_PAGE_256M
235f64e8084SAneesh Kumar K.V 
23611a6f6abSAneesh Kumar K.V #endif /* _ASM_POWERPC_BOOK3S_32_MMU_HASH_H_ */
237