1 /* SPDX-License-Identifier: GPL-2.0 */ 2 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. 3 4 #ifndef __ASM_CSKY_PGTABLE_H 5 #define __ASM_CSKY_PGTABLE_H 6 7 #include <asm/fixmap.h> 8 #include <asm/addrspace.h> 9 #include <abi/pgtable-bits.h> 10 #include <asm-generic/pgtable-nopmd.h> 11 12 #define PGDIR_SHIFT 22 13 #define PGDIR_SIZE (1UL << PGDIR_SHIFT) 14 #define PGDIR_MASK (~(PGDIR_SIZE-1)) 15 16 #define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE) 17 #define FIRST_USER_ADDRESS 0UL 18 19 #define PKMAP_BASE (0xff800000) 20 21 #define VMALLOC_START (0xc0008000) 22 #define VMALLOC_END (PKMAP_BASE - 2*PAGE_SIZE) 23 24 /* 25 * C-SKY is two-level paging structure: 26 */ 27 #define PGD_ORDER 0 28 #define PTE_ORDER 0 29 30 #define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t)) 31 #define PTRS_PER_PMD 1 32 #define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t)) 33 34 #define pte_ERROR(e) \ 35 pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low) 36 #define pgd_ERROR(e) \ 37 pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) 38 39 /* Find an entry in the third-level page table.. */ 40 #define __pte_offset_t(address) \ 41 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) 42 #define pte_offset_kernel(dir, address) \ 43 (pmd_page_vaddr(*(dir)) + __pte_offset_t(address)) 44 #define pte_offset_map(dir, address) \ 45 ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset_t(address)) 46 #define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT)) 47 #define pte_clear(mm, addr, ptep) set_pte((ptep), \ 48 (((unsigned int)addr&0x80000000)?__pte(1):__pte(0))) 49 #define pte_none(pte) (!(pte_val(pte)&0xfffffffe)) 50 #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) 51 #define pte_pfn(x) ((unsigned long)((x).pte_low >> PAGE_SHIFT)) 52 #define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << PAGE_SHIFT) \ 53 | pgprot_val(prot)) 54 55 #define __READABLE (_PAGE_READ | _PAGE_VALID | _PAGE_ACCESSED) 56 #define __WRITEABLE (_PAGE_WRITE | _PAGE_DIRTY | _PAGE_MODIFIED) 57 58 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | \ 59 _CACHE_MASK) 60 61 #define pte_unmap(pte) ((void)(pte)) 62 63 #define __swp_type(x) (((x).val >> 4) & 0xff) 64 #define __swp_offset(x) ((x).val >> 12) 65 #define __swp_entry(type, offset) ((swp_entry_t) {((type) << 4) | \ 66 ((offset) << 12) }) 67 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 68 #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 69 70 #define pte_page(x) pfn_to_page(pte_pfn(x)) 71 #define __mk_pte(page_nr, pgprot) __pte(((page_nr) << PAGE_SHIFT) | \ 72 pgprot_val(pgprot)) 73 74 /* 75 * CSKY can't do page protection for execute, and considers that the same like 76 * read. Also, write permissions imply read permissions. This is the closest 77 * we can get by reasonable means.. 78 */ 79 #define PAGE_NONE __pgprot(_PAGE_PRESENT | _CACHE_CACHED) 80 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ 81 _CACHE_CACHED) 82 #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_READ | _CACHE_CACHED) 83 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_READ | _CACHE_CACHED) 84 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ 85 _PAGE_GLOBAL | _CACHE_CACHED) 86 #define PAGE_USERIO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ 87 _CACHE_CACHED) 88 89 #define __P000 PAGE_NONE 90 #define __P001 PAGE_READONLY 91 #define __P010 PAGE_COPY 92 #define __P011 PAGE_COPY 93 #define __P100 PAGE_READONLY 94 #define __P101 PAGE_READONLY 95 #define __P110 PAGE_COPY 96 #define __P111 PAGE_COPY 97 98 #define __S000 PAGE_NONE 99 #define __S001 PAGE_READONLY 100 #define __S010 PAGE_SHARED 101 #define __S011 PAGE_SHARED 102 #define __S100 PAGE_READONLY 103 #define __S101 PAGE_READONLY 104 #define __S110 PAGE_SHARED 105 #define __S111 PAGE_SHARED 106 107 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; 108 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) 109 110 extern void load_pgd(unsigned long pg_dir); 111 extern pte_t invalid_pte_table[PTRS_PER_PTE]; 112 113 static inline int pte_special(pte_t pte) { return 0; } 114 static inline pte_t pte_mkspecial(pte_t pte) { return pte; } 115 116 static inline void set_pte(pte_t *p, pte_t pte) 117 { 118 *p = pte; 119 #if defined(CONFIG_CPU_NEED_TLBSYNC) 120 dcache_wb_line((u32)p); 121 #endif 122 /* prevent out of order excution */ 123 smp_mb(); 124 } 125 #define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval) 126 127 static inline pte_t *pmd_page_vaddr(pmd_t pmd) 128 { 129 unsigned long ptr; 130 131 ptr = pmd_val(pmd); 132 133 return __va(ptr); 134 } 135 136 #define pmd_phys(pmd) pmd_val(pmd) 137 138 static inline void set_pmd(pmd_t *p, pmd_t pmd) 139 { 140 *p = pmd; 141 #if defined(CONFIG_CPU_NEED_TLBSYNC) 142 dcache_wb_line((u32)p); 143 #endif 144 /* prevent specul excute */ 145 smp_mb(); 146 } 147 148 149 static inline int pmd_none(pmd_t pmd) 150 { 151 return pmd_val(pmd) == __pa(invalid_pte_table); 152 } 153 154 #define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK) 155 156 static inline int pmd_present(pmd_t pmd) 157 { 158 return (pmd_val(pmd) != __pa(invalid_pte_table)); 159 } 160 161 static inline void pmd_clear(pmd_t *p) 162 { 163 pmd_val(*p) = (__pa(invalid_pte_table)); 164 #if defined(CONFIG_CPU_NEED_TLBSYNC) 165 dcache_wb_line((u32)p); 166 #endif 167 } 168 169 /* 170 * The following only work if pte_present() is true. 171 * Undefined behaviour if not.. 172 */ 173 static inline int pte_read(pte_t pte) 174 { 175 return pte.pte_low & _PAGE_READ; 176 } 177 178 static inline int pte_write(pte_t pte) 179 { 180 return (pte).pte_low & _PAGE_WRITE; 181 } 182 183 static inline int pte_dirty(pte_t pte) 184 { 185 return (pte).pte_low & _PAGE_MODIFIED; 186 } 187 188 static inline int pte_young(pte_t pte) 189 { 190 return (pte).pte_low & _PAGE_ACCESSED; 191 } 192 193 static inline pte_t pte_wrprotect(pte_t pte) 194 { 195 pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_DIRTY); 196 return pte; 197 } 198 199 static inline pte_t pte_mkclean(pte_t pte) 200 { 201 pte_val(pte) &= ~(_PAGE_MODIFIED|_PAGE_DIRTY); 202 return pte; 203 } 204 205 static inline pte_t pte_mkold(pte_t pte) 206 { 207 pte_val(pte) &= ~(_PAGE_ACCESSED|_PAGE_VALID); 208 return pte; 209 } 210 211 static inline pte_t pte_mkwrite(pte_t pte) 212 { 213 pte_val(pte) |= _PAGE_WRITE; 214 if (pte_val(pte) & _PAGE_MODIFIED) 215 pte_val(pte) |= _PAGE_DIRTY; 216 return pte; 217 } 218 219 static inline pte_t pte_mkdirty(pte_t pte) 220 { 221 pte_val(pte) |= _PAGE_MODIFIED; 222 if (pte_val(pte) & _PAGE_WRITE) 223 pte_val(pte) |= _PAGE_DIRTY; 224 return pte; 225 } 226 227 static inline pte_t pte_mkyoung(pte_t pte) 228 { 229 pte_val(pte) |= _PAGE_ACCESSED; 230 if (pte_val(pte) & _PAGE_READ) 231 pte_val(pte) |= _PAGE_VALID; 232 return pte; 233 } 234 235 #define __pgd_offset(address) pgd_index(address) 236 #define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) 237 #define __pmd_offset(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) 238 239 /* to find an entry in a kernel page-table-directory */ 240 #define pgd_offset_k(address) pgd_offset(&init_mm, address) 241 242 #define pgd_index(address) ((address) >> PGDIR_SHIFT) 243 244 /* 245 * Macro to make mark a page protection value as "uncacheable". Note 246 * that "protection" is really a misnomer here as the protection value 247 * contains the memory attribute bits, dirty bits, and various other 248 * bits as well. 249 */ 250 #define pgprot_noncached pgprot_noncached 251 252 static inline pgprot_t pgprot_noncached(pgprot_t _prot) 253 { 254 unsigned long prot = pgprot_val(_prot); 255 256 prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED; 257 258 return __pgprot(prot); 259 } 260 261 /* 262 * Conversion functions: convert a page and protection to a page entry, 263 * and a page entry and page directory to the page they refer to. 264 */ 265 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) 266 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 267 { 268 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | 269 (pgprot_val(newprot))); 270 } 271 272 /* to find an entry in a page-table-directory */ 273 static inline pgd_t *pgd_offset(struct mm_struct *mm, unsigned long address) 274 { 275 return mm->pgd + pgd_index(address); 276 } 277 278 /* Find an entry in the third-level page table.. */ 279 static inline pte_t *pte_offset(pmd_t *dir, unsigned long address) 280 { 281 return (pte_t *) (pmd_page_vaddr(*dir)) + 282 ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)); 283 } 284 285 extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; 286 extern void paging_init(void); 287 288 extern void show_jtlb_table(void); 289 290 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, 291 pte_t *pte); 292 293 /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ 294 #define kern_addr_valid(addr) (1) 295 296 /* 297 * No page table caches to initialise 298 */ 299 #define pgtable_cache_init() do {} while (0) 300 301 #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ 302 remap_pfn_range(vma, vaddr, pfn, size, prot) 303 304 #include <asm-generic/pgtable.h> 305 306 #endif /* __ASM_CSKY_PGTABLE_H */ 307