1 /* SPDX-License-Identifier: GPL-2.0 2 * 3 * This file contains the functions and defines necessary to modify and 4 * use the SuperH page table tree. 5 * 6 * Copyright (C) 1999 Niibe Yutaka 7 * Copyright (C) 2002 - 2007 Paul Mundt 8 */ 9 #ifndef __ASM_SH_PGTABLE_H 10 #define __ASM_SH_PGTABLE_H 11 12 #ifdef CONFIG_X2TLB 13 #include <asm/pgtable-3level.h> 14 #else 15 #include <asm/pgtable-2level.h> 16 #endif 17 #include <asm/page.h> 18 #include <asm/mmu.h> 19 20 #ifndef __ASSEMBLY__ 21 #include <asm/addrspace.h> 22 #include <asm/fixmap.h> 23 24 /* 25 * ZERO_PAGE is a global shared page that is always zero: used 26 * for zero-mapped memory areas etc.. 27 */ 28 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; 29 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) 30 31 #endif /* !__ASSEMBLY__ */ 32 33 /* 34 * Effective and physical address definitions, to aid with sign 35 * extension. 36 */ 37 #define NEFF 32 38 #define NEFF_SIGN (1LL << (NEFF - 1)) 39 #define NEFF_MASK (-1LL << NEFF) 40 41 static inline unsigned long long neff_sign_extend(unsigned long val) 42 { 43 unsigned long long extended = val; 44 return (extended & NEFF_SIGN) ? (extended | NEFF_MASK) : extended; 45 } 46 47 #ifdef CONFIG_29BIT 48 #define NPHYS 29 49 #else 50 #define NPHYS 32 51 #endif 52 53 #define NPHYS_SIGN (1LL << (NPHYS - 1)) 54 #define NPHYS_MASK (-1LL << NPHYS) 55 56 #define PGDIR_SIZE (1UL << PGDIR_SHIFT) 57 #define PGDIR_MASK (~(PGDIR_SIZE-1)) 58 59 /* Entries per level */ 60 #define PTRS_PER_PTE (PAGE_SIZE / (1 << PTE_MAGNITUDE)) 61 62 #define PHYS_ADDR_MASK29 0x1fffffff 63 #define PHYS_ADDR_MASK32 0xffffffff 64 65 static inline unsigned long phys_addr_mask(void) 66 { 67 /* Is the MMU in 29bit mode? */ 68 if (__in_29bit_mode()) 69 return PHYS_ADDR_MASK29; 70 71 return PHYS_ADDR_MASK32; 72 } 73 74 #define PTE_PHYS_MASK (phys_addr_mask() & PAGE_MASK) 75 #define PTE_FLAGS_MASK (~(PTE_PHYS_MASK) << PAGE_SHIFT) 76 77 #define VMALLOC_START (P3SEG) 78 #define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE) 79 80 #include <asm/pgtable_32.h> 81 82 /* 83 * SH-X and lower (legacy) SuperH parts (SH-3, SH-4, some SH-4A) can't do page 84 * protection for execute, and considers it the same as a read. Also, write 85 * permission implies read permission. This is the closest we can get.. 86 * 87 * SH-X2 (SH7785) and later parts take this to the opposite end of the extreme, 88 * not only supporting separate execute, read, and write bits, but having 89 * completely separate permission bits for user and kernel space. 90 */ 91 /*xwr*/ 92 #define __P000 PAGE_NONE 93 #define __P001 PAGE_READONLY 94 #define __P010 PAGE_COPY 95 #define __P011 PAGE_COPY 96 #define __P100 PAGE_EXECREAD 97 #define __P101 PAGE_EXECREAD 98 #define __P110 PAGE_COPY 99 #define __P111 PAGE_COPY 100 101 #define __S000 PAGE_NONE 102 #define __S001 PAGE_READONLY 103 #define __S010 PAGE_WRITEONLY 104 #define __S011 PAGE_SHARED 105 #define __S100 PAGE_EXECREAD 106 #define __S101 PAGE_EXECREAD 107 #define __S110 PAGE_RWX 108 #define __S111 PAGE_RWX 109 110 typedef pte_t *pte_addr_t; 111 112 #define kern_addr_valid(addr) (1) 113 114 #define pte_pfn(x) ((unsigned long)(((x).pte_low >> PAGE_SHIFT))) 115 116 struct vm_area_struct; 117 struct mm_struct; 118 119 extern void __update_cache(struct vm_area_struct *vma, 120 unsigned long address, pte_t pte); 121 extern void __update_tlb(struct vm_area_struct *vma, 122 unsigned long address, pte_t pte); 123 124 static inline void 125 update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) 126 { 127 pte_t pte = *ptep; 128 __update_cache(vma, address, pte); 129 __update_tlb(vma, address, pte); 130 } 131 132 extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; 133 extern void paging_init(void); 134 extern void page_table_range_init(unsigned long start, unsigned long end, 135 pgd_t *pgd); 136 137 static inline bool __pte_access_permitted(pte_t pte, u64 prot) 138 { 139 return (pte_val(pte) & (prot | _PAGE_SPECIAL)) == prot; 140 } 141 142 #ifdef CONFIG_X2TLB 143 static inline bool pte_access_permitted(pte_t pte, bool write) 144 { 145 u64 prot = _PAGE_PRESENT; 146 147 prot |= _PAGE_EXT(_PAGE_EXT_KERN_READ | _PAGE_EXT_USER_READ); 148 if (write) 149 prot |= _PAGE_EXT(_PAGE_EXT_KERN_WRITE | _PAGE_EXT_USER_WRITE); 150 return __pte_access_permitted(pte, prot); 151 } 152 #else 153 static inline bool pte_access_permitted(pte_t pte, bool write) 154 { 155 u64 prot = _PAGE_PRESENT | _PAGE_USER; 156 157 if (write) 158 prot |= _PAGE_RW; 159 return __pte_access_permitted(pte, prot); 160 } 161 #endif 162 163 #define pte_access_permitted pte_access_permitted 164 165 /* arch/sh/mm/mmap.c */ 166 #define HAVE_ARCH_UNMAPPED_AREA 167 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 168 169 #endif /* __ASM_SH_PGTABLE_H */ 170