1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_POWERPC_MMU_8XX_H_ 3 #define _ASM_POWERPC_MMU_8XX_H_ 4 /* 5 * PPC8xx support 6 */ 7 8 /* Control/status registers for the MPC8xx. 9 * A write operation to these registers causes serialized access. 10 * During software tablewalk, the registers used perform mask/shift-add 11 * operations when written/read. A TLB entry is created when the Mx_RPN 12 * is written, and the contents of several registers are used to 13 * create the entry. 14 */ 15 #define SPRN_MI_CTR 784 /* Instruction TLB control register */ 16 #define MI_GPM 0x80000000 /* Set domain manager mode */ 17 #define MI_PPM 0x40000000 /* Set subpage protection */ 18 #define MI_CIDEF 0x20000000 /* Set cache inhibit when MMU dis */ 19 #define MI_RSV4I 0x08000000 /* Reserve 4 TLB entries */ 20 #define MI_PPCS 0x02000000 /* Use MI_RPN prob/priv state */ 21 #define MI_IDXMASK 0x00001f00 /* TLB index to be loaded */ 22 23 /* These are the Ks and Kp from the PowerPC books. For proper operation, 24 * Ks = 0, Kp = 1. 25 */ 26 #define SPRN_MI_AP 786 27 #define MI_Ks 0x80000000 /* Should not be set */ 28 #define MI_Kp 0x40000000 /* Should always be set */ 29 30 /* 31 * All pages' PP data bits are set to either 001 or 011 by copying _PAGE_EXEC 32 * into bit 21 in the ITLBmiss handler (bit 21 is the middle bit), which means 33 * respectively NA for All or X for Supervisor and no access for User. 34 * Then we use the APG to say whether accesses are according to Page rules or 35 * "all Supervisor" rules (Access to all) 36 * _PAGE_ACCESSED is also managed via APG. When _PAGE_ACCESSED is not set, say 37 * "all User" rules, that will lead to NA for all. 38 * Therefore, we define 4 APG groups. lsb is _PAGE_ACCESSED 39 * 0 => Kernel => 11 (all accesses performed according as user iaw page definition) 40 * 1 => Kernel+Accessed => 01 (all accesses performed according to page definition) 41 * 2 => User => 11 (all accesses performed according as user iaw page definition) 42 * 3 => User+Accessed => 00 (all accesses performed as supervisor iaw page definition) for INIT 43 * => 10 (all accesses performed according to swaped page definition) for KUEP 44 * 4-15 => Not Used 45 */ 46 #define MI_APG_INIT 0xdc000000 47 #define MI_APG_KUEP 0xde000000 48 49 /* The effective page number register. When read, contains the information 50 * about the last instruction TLB miss. When MI_RPN is written, bits in 51 * this register are used to create the TLB entry. 52 */ 53 #define SPRN_MI_EPN 787 54 #define MI_EPNMASK 0xfffff000 /* Effective page number for entry */ 55 #define MI_EVALID 0x00000200 /* Entry is valid */ 56 #define MI_ASIDMASK 0x0000000f /* ASID match value */ 57 /* Reset value is undefined */ 58 59 /* A "level 1" or "segment" or whatever you want to call it register. 60 * For the instruction TLB, it contains bits that get loaded into the 61 * TLB entry when the MI_RPN is written. 62 */ 63 #define SPRN_MI_TWC 789 64 #define MI_APG 0x000001e0 /* Access protection group (0) */ 65 #define MI_GUARDED 0x00000010 /* Guarded storage */ 66 #define MI_PSMASK 0x0000000c /* Mask of page size bits */ 67 #define MI_PS8MEG 0x0000000c /* 8M page size */ 68 #define MI_PS512K 0x00000004 /* 512K page size */ 69 #define MI_PS4K_16K 0x00000000 /* 4K or 16K page size */ 70 #define MI_SVALID 0x00000001 /* Segment entry is valid */ 71 /* Reset value is undefined */ 72 73 /* Real page number. Defined by the pte. Writing this register 74 * causes a TLB entry to be created for the instruction TLB, using 75 * additional information from the MI_EPN, and MI_TWC registers. 76 */ 77 #define SPRN_MI_RPN 790 78 #define MI_SPS16K 0x00000008 /* Small page size (0 = 4k, 1 = 16k) */ 79 80 /* Define an RPN value for mapping kernel memory to large virtual 81 * pages for boot initialization. This has real page number of 0, 82 * large page size, shared page, cache enabled, and valid. 83 * Also mark all subpages valid and write access. 84 */ 85 #define MI_BOOTINIT 0x000001fd 86 87 #define SPRN_MD_CTR 792 /* Data TLB control register */ 88 #define MD_GPM 0x80000000 /* Set domain manager mode */ 89 #define MD_PPM 0x40000000 /* Set subpage protection */ 90 #define MD_CIDEF 0x20000000 /* Set cache inhibit when MMU dis */ 91 #define MD_WTDEF 0x10000000 /* Set writethrough when MMU dis */ 92 #define MD_RSV4I 0x08000000 /* Reserve 4 TLB entries */ 93 #define MD_TWAM 0x04000000 /* Use 4K page hardware assist */ 94 #define MD_PPCS 0x02000000 /* Use MI_RPN prob/priv state */ 95 #define MD_IDXMASK 0x00001f00 /* TLB index to be loaded */ 96 97 #define SPRN_M_CASID 793 /* Address space ID (context) to match */ 98 #define MC_ASIDMASK 0x0000000f /* Bits used for ASID value */ 99 100 101 /* These are the Ks and Kp from the PowerPC books. For proper operation, 102 * Ks = 0, Kp = 1. 103 */ 104 #define SPRN_MD_AP 794 105 #define MD_Ks 0x80000000 /* Should not be set */ 106 #define MD_Kp 0x40000000 /* Should always be set */ 107 108 /* See explanation above at the definition of MI_APG_INIT */ 109 #define MD_APG_INIT 0xdc000000 110 #define MD_APG_KUAP 0xde000000 111 112 /* The effective page number register. When read, contains the information 113 * about the last instruction TLB miss. When MD_RPN is written, bits in 114 * this register are used to create the TLB entry. 115 */ 116 #define SPRN_MD_EPN 795 117 #define MD_EPNMASK 0xfffff000 /* Effective page number for entry */ 118 #define MD_EVALID 0x00000200 /* Entry is valid */ 119 #define MD_ASIDMASK 0x0000000f /* ASID match value */ 120 /* Reset value is undefined */ 121 122 /* The pointer to the base address of the first level page table. 123 * During a software tablewalk, reading this register provides the address 124 * of the entry associated with MD_EPN. 125 */ 126 #define SPRN_M_TWB 796 127 #define M_L1TB 0xfffff000 /* Level 1 table base address */ 128 #define M_L1INDX 0x00000ffc /* Level 1 index, when read */ 129 /* Reset value is undefined */ 130 131 /* A "level 1" or "segment" or whatever you want to call it register. 132 * For the data TLB, it contains bits that get loaded into the TLB entry 133 * when the MD_RPN is written. It is also provides the hardware assist 134 * for finding the PTE address during software tablewalk. 135 */ 136 #define SPRN_MD_TWC 797 137 #define MD_L2TB 0xfffff000 /* Level 2 table base address */ 138 #define MD_L2INDX 0xfffffe00 /* Level 2 index (*pte), when read */ 139 #define MD_APG 0x000001e0 /* Access protection group (0) */ 140 #define MD_GUARDED 0x00000010 /* Guarded storage */ 141 #define MD_PSMASK 0x0000000c /* Mask of page size bits */ 142 #define MD_PS8MEG 0x0000000c /* 8M page size */ 143 #define MD_PS512K 0x00000004 /* 512K page size */ 144 #define MD_PS4K_16K 0x00000000 /* 4K or 16K page size */ 145 #define MD_WT 0x00000002 /* Use writethrough page attribute */ 146 #define MD_SVALID 0x00000001 /* Segment entry is valid */ 147 /* Reset value is undefined */ 148 149 150 /* Real page number. Defined by the pte. Writing this register 151 * causes a TLB entry to be created for the data TLB, using 152 * additional information from the MD_EPN, and MD_TWC registers. 153 */ 154 #define SPRN_MD_RPN 798 155 #define MD_SPS16K 0x00000008 /* Small page size (0 = 4k, 1 = 16k) */ 156 157 /* This is a temporary storage register that could be used to save 158 * a processor working register during a tablewalk. 159 */ 160 #define SPRN_M_TW 799 161 162 #if defined(CONFIG_PPC_4K_PAGES) 163 #define mmu_virtual_psize MMU_PAGE_4K 164 #elif defined(CONFIG_PPC_16K_PAGES) 165 #define mmu_virtual_psize MMU_PAGE_16K 166 #define PTE_FRAG_NR 4 167 #define PTE_FRAG_SIZE_SHIFT 12 168 #define PTE_FRAG_SIZE (1UL << 12) 169 #else 170 #error "Unsupported PAGE_SIZE" 171 #endif 172 173 #define mmu_linear_psize MMU_PAGE_8M 174 175 #define MODULES_VADDR (PAGE_OFFSET - SZ_256M) 176 #define MODULES_END PAGE_OFFSET 177 178 #ifndef __ASSEMBLY__ 179 180 #include <linux/mmdebug.h> 181 #include <linux/sizes.h> 182 183 void mmu_pin_tlb(unsigned long top, bool readonly); 184 185 typedef struct { 186 unsigned int id; 187 unsigned int active; 188 void __user *vdso; 189 void *pte_frag; 190 } mm_context_t; 191 192 #define PHYS_IMMR_BASE (mfspr(SPRN_IMMR) & 0xfff80000) 193 #define VIRT_IMMR_BASE (__fix_to_virt(FIX_IMMR_BASE)) 194 195 /* Page size definitions, common between 32 and 64-bit 196 * 197 * shift : is the "PAGE_SHIFT" value for that page size 198 * penc : is the pte encoding mask 199 * 200 */ 201 struct mmu_psize_def { 202 unsigned int shift; /* number of bits */ 203 unsigned int enc; /* PTE encoding */ 204 unsigned int ind; /* Corresponding indirect page size shift */ 205 unsigned int flags; 206 #define MMU_PAGE_SIZE_DIRECT 0x1 /* Supported as a direct size */ 207 #define MMU_PAGE_SIZE_INDIRECT 0x2 /* Supported as an indirect size */ 208 }; 209 210 extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT]; 211 212 static inline int shift_to_mmu_psize(unsigned int shift) 213 { 214 int psize; 215 216 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) 217 if (mmu_psize_defs[psize].shift == shift) 218 return psize; 219 return -1; 220 } 221 222 static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize) 223 { 224 if (mmu_psize_defs[mmu_psize].shift) 225 return mmu_psize_defs[mmu_psize].shift; 226 BUG(); 227 } 228 229 static inline bool arch_vmap_try_size(unsigned long addr, unsigned long end, u64 pfn, 230 unsigned int max_page_shift, unsigned long size) 231 { 232 if (end - addr < size) 233 return false; 234 235 if ((1UL << max_page_shift) < size) 236 return false; 237 238 if (!IS_ALIGNED(addr, size)) 239 return false; 240 241 if (!IS_ALIGNED(PFN_PHYS(pfn), size)) 242 return false; 243 244 return true; 245 } 246 247 static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, unsigned long end, 248 u64 pfn, unsigned int max_page_shift) 249 { 250 if (arch_vmap_try_size(addr, end, pfn, max_page_shift, SZ_512K)) 251 return SZ_512K; 252 if (PAGE_SIZE == SZ_16K) 253 return SZ_16K; 254 if (arch_vmap_try_size(addr, end, pfn, max_page_shift, SZ_16K)) 255 return SZ_16K; 256 return PAGE_SIZE; 257 } 258 #define arch_vmap_pte_range_map_size arch_vmap_pte_range_map_size 259 260 static inline int arch_vmap_pte_supported_shift(unsigned long size) 261 { 262 if (size >= SZ_512K) 263 return 19; 264 else if (size >= SZ_16K) 265 return 14; 266 else 267 return PAGE_SHIFT; 268 } 269 #define arch_vmap_pte_supported_shift arch_vmap_pte_supported_shift 270 271 /* patch sites */ 272 extern s32 patch__itlbmiss_exit_1, patch__dtlbmiss_exit_1; 273 extern s32 patch__itlbmiss_perf, patch__dtlbmiss_perf; 274 275 #endif /* !__ASSEMBLY__ */ 276 277 #endif /* _ASM_POWERPC_MMU_8XX_H_ */ 278