1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_NOHASH_32_PTE_44x_H
3 #define _ASM_POWERPC_NOHASH_32_PTE_44x_H
4 #ifdef __KERNEL__
5 
6 /*
7  * Definitions for PPC440
8  *
9  * Because of the 3 word TLB entries to support 36-bit addressing,
10  * the attribute are difficult to map in such a fashion that they
11  * are easily loaded during exception processing.  I decided to
12  * organize the entry so the ERPN is the only portion in the
13  * upper word of the PTE and the attribute bits below are packed
14  * in as sensibly as they can be in the area below a 4KB page size
15  * oriented RPN.  This at least makes it easy to load the RPN and
16  * ERPN fields in the TLB. -Matt
17  *
18  * This isn't entirely true anymore, at least some bits are now
19  * easier to move into the TLB from the PTE. -BenH.
20  *
21  * Note that these bits preclude future use of a page size
22  * less than 4KB.
23  *
24  *
25  * PPC 440 core has following TLB attribute fields;
26  *
27  *   TLB1:
28  *   0  1  2  3  4  ... 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
29  *   RPN.................................  -  -  -  -  -  - ERPN.......
30  *
31  *   TLB2:
32  *   0  1  2  3  4  ... 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
33  *   -  -  -  -  -    - U0 U1 U2 U3 W  I  M  G  E   - UX UW UR SX SW SR
34  *
35  * Newer 440 cores (440x6 as used on AMCC 460EX/460GT) have additional
36  * TLB2 storage attribute fields. Those are:
37  *
38  *   TLB2:
39  *   0...10    11   12   13   14   15   16...31
40  *   no change WL1  IL1I IL1D IL2I IL2D no change
41  *
42  * There are some constrains and options, to decide mapping software bits
43  * into TLB entry.
44  *
45  *   - PRESENT *must* be in the bottom three bits because swap cache
46  *     entries use the top 29 bits for TLB2.
47  *
48  *   - CACHE COHERENT bit (M) has no effect on original PPC440 cores,
49  *     because it doesn't support SMP. However, some later 460 variants
50  *     have -some- form of SMP support and so I keep the bit there for
51  *     future use
52  *
53  * With the PPC 44x Linux implementation, the 0-11th LSBs of the PTE are used
54  * for memory protection related functions (see PTE structure in
55  * include/asm-ppc/mmu.h).  The _PAGE_XXX definitions in this file map to the
56  * above bits.  Note that the bit values are CPU specific, not architecture
57  * specific.
58  *
59  * The kernel PTE entry can be an ordinary PTE mapping a page or a special swap
60  * PTE. In case of a swap PTE, LSB 2-24 are used to store information regarding
61  * the swap entry. However LSB 0-1 still hold protection values, for example,
62  * to distinguish swap PTEs from ordinary PTEs, and must be used with care.
63  */
64 
65 #define _PAGE_PRESENT	0x00000001		/* S: PTE valid */
66 #define _PAGE_RW	0x00000002		/* S: Write permission */
67 #define _PAGE_EXEC	0x00000004		/* H: Execute permission */
68 #define _PAGE_ACCESSED	0x00000008		/* S: Page referenced */
69 #define _PAGE_DIRTY	0x00000010		/* S: Page dirty */
70 #define _PAGE_SPECIAL	0x00000020		/* S: Special page */
71 #define _PAGE_USER	0x00000040		/* S: User page */
72 #define _PAGE_ENDIAN	0x00000080		/* H: E bit */
73 #define _PAGE_GUARDED	0x00000100		/* H: G bit */
74 #define _PAGE_COHERENT	0x00000200		/* H: M bit */
75 #define _PAGE_NO_CACHE	0x00000400		/* H: I bit */
76 #define _PAGE_WRITETHRU	0x00000800		/* H: W bit */
77 
78 /* No page size encoding in the linux PTE */
79 #define _PAGE_PSIZE		0
80 
81 #define _PAGE_KERNEL_RO		0
82 #define _PAGE_KERNEL_ROX	_PAGE_EXEC
83 #define _PAGE_KERNEL_RW		(_PAGE_DIRTY | _PAGE_RW)
84 #define _PAGE_KERNEL_RWX	(_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC)
85 
86 /* TODO: Add large page lowmem mapping support */
87 #define _PMD_PRESENT	0
88 #define _PMD_PRESENT_MASK (PAGE_MASK)
89 #define _PMD_BAD	(~PAGE_MASK)
90 #define _PMD_USER	0
91 
92 /* ERPN in a PTE never gets cleared, ignore it */
93 #define _PTE_NONE_MASK	0xffffffff00000000ULL
94 
95 /*
96  * We define 2 sets of base prot bits, one for basic pages (ie,
97  * cacheable kernel and user pages) and one for non cacheable
98  * pages. We always set _PAGE_COHERENT when SMP is enabled or
99  * the processor might need it for DMA coherency.
100  */
101 #define _PAGE_BASE_NC	(_PAGE_PRESENT | _PAGE_ACCESSED)
102 #if defined(CONFIG_SMP)
103 #define _PAGE_BASE	(_PAGE_BASE_NC | _PAGE_COHERENT)
104 #else
105 #define _PAGE_BASE	(_PAGE_BASE_NC)
106 #endif
107 
108 /* Permission masks used to generate the __P and __S table */
109 #define PAGE_NONE	__pgprot(_PAGE_BASE)
110 #define PAGE_SHARED	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
111 #define PAGE_SHARED_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
112 #define PAGE_COPY	__pgprot(_PAGE_BASE | _PAGE_USER)
113 #define PAGE_COPY_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
114 #define PAGE_READONLY	__pgprot(_PAGE_BASE | _PAGE_USER)
115 #define PAGE_READONLY_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
116 
117 #endif /* __KERNEL__ */
118 #endif /*  _ASM_POWERPC_NOHASH_32_PTE_44x_H */
119