1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_IA64_PGTABLE_H 3 #define _ASM_IA64_PGTABLE_H 4 5 /* 6 * This file contains the functions and defines necessary to modify and use 7 * the IA-64 page table tree. 8 * 9 * This hopefully works with any (fixed) IA-64 page-size, as defined 10 * in <asm/page.h>. 11 * 12 * Copyright (C) 1998-2005 Hewlett-Packard Co 13 * David Mosberger-Tang <davidm@hpl.hp.com> 14 */ 15 16 17 #include <asm/mman.h> 18 #include <asm/page.h> 19 #include <asm/processor.h> 20 #include <asm/types.h> 21 22 #define IA64_MAX_PHYS_BITS 50 /* max. number of physical address bits (architected) */ 23 24 /* 25 * First, define the various bits in a PTE. Note that the PTE format 26 * matches the VHPT short format, the firt doubleword of the VHPD long 27 * format, and the first doubleword of the TLB insertion format. 28 */ 29 #define _PAGE_P_BIT 0 30 #define _PAGE_A_BIT 5 31 #define _PAGE_D_BIT 6 32 33 #define _PAGE_P (1 << _PAGE_P_BIT) /* page present bit */ 34 #define _PAGE_MA_WB (0x0 << 2) /* write back memory attribute */ 35 #define _PAGE_MA_UC (0x4 << 2) /* uncacheable memory attribute */ 36 #define _PAGE_MA_UCE (0x5 << 2) /* UC exported attribute */ 37 #define _PAGE_MA_WC (0x6 << 2) /* write coalescing memory attribute */ 38 #define _PAGE_MA_NAT (0x7 << 2) /* not-a-thing attribute */ 39 #define _PAGE_MA_MASK (0x7 << 2) 40 #define _PAGE_PL_0 (0 << 7) /* privilege level 0 (kernel) */ 41 #define _PAGE_PL_1 (1 << 7) /* privilege level 1 (unused) */ 42 #define _PAGE_PL_2 (2 << 7) /* privilege level 2 (unused) */ 43 #define _PAGE_PL_3 (3 << 7) /* privilege level 3 (user) */ 44 #define _PAGE_PL_MASK (3 << 7) 45 #define _PAGE_AR_R (0 << 9) /* read only */ 46 #define _PAGE_AR_RX (1 << 9) /* read & execute */ 47 #define _PAGE_AR_RW (2 << 9) /* read & write */ 48 #define _PAGE_AR_RWX (3 << 9) /* read, write & execute */ 49 #define _PAGE_AR_R_RW (4 << 9) /* read / read & write */ 50 #define _PAGE_AR_RX_RWX (5 << 9) /* read & exec / read, write & exec */ 51 #define _PAGE_AR_RWX_RW (6 << 9) /* read, write & exec / read & write */ 52 #define _PAGE_AR_X_RX (7 << 9) /* exec & promote / read & exec */ 53 #define _PAGE_AR_MASK (7 << 9) 54 #define _PAGE_AR_SHIFT 9 55 #define _PAGE_A (1 << _PAGE_A_BIT) /* page accessed bit */ 56 #define _PAGE_D (1 << _PAGE_D_BIT) /* page dirty bit */ 57 #define _PAGE_PPN_MASK (((__IA64_UL(1) << IA64_MAX_PHYS_BITS) - 1) & ~0xfffUL) 58 #define _PAGE_ED (__IA64_UL(1) << 52) /* exception deferral */ 59 #define _PAGE_PROTNONE (__IA64_UL(1) << 63) 60 61 #define _PFN_MASK _PAGE_PPN_MASK 62 /* Mask of bits which may be changed by pte_modify(); the odd bits are there for _PAGE_PROTNONE */ 63 #define _PAGE_CHG_MASK (_PAGE_P | _PAGE_PROTNONE | _PAGE_PL_MASK | _PAGE_AR_MASK | _PAGE_ED) 64 65 #define _PAGE_SIZE_4K 12 66 #define _PAGE_SIZE_8K 13 67 #define _PAGE_SIZE_16K 14 68 #define _PAGE_SIZE_64K 16 69 #define _PAGE_SIZE_256K 18 70 #define _PAGE_SIZE_1M 20 71 #define _PAGE_SIZE_4M 22 72 #define _PAGE_SIZE_16M 24 73 #define _PAGE_SIZE_64M 26 74 #define _PAGE_SIZE_256M 28 75 #define _PAGE_SIZE_1G 30 76 #define _PAGE_SIZE_4G 32 77 78 #define __ACCESS_BITS _PAGE_ED | _PAGE_A | _PAGE_P | _PAGE_MA_WB 79 #define __DIRTY_BITS_NO_ED _PAGE_A | _PAGE_P | _PAGE_D | _PAGE_MA_WB 80 #define __DIRTY_BITS _PAGE_ED | __DIRTY_BITS_NO_ED 81 82 /* 83 * How many pointers will a page table level hold expressed in shift 84 */ 85 #define PTRS_PER_PTD_SHIFT (PAGE_SHIFT-3) 86 87 /* 88 * Definitions for fourth level: 89 */ 90 #define PTRS_PER_PTE (__IA64_UL(1) << (PTRS_PER_PTD_SHIFT)) 91 92 /* 93 * Definitions for third level: 94 * 95 * PMD_SHIFT determines the size of the area a third-level page table 96 * can map. 97 */ 98 #define PMD_SHIFT (PAGE_SHIFT + (PTRS_PER_PTD_SHIFT)) 99 #define PMD_SIZE (1UL << PMD_SHIFT) 100 #define PMD_MASK (~(PMD_SIZE-1)) 101 #define PTRS_PER_PMD (1UL << (PTRS_PER_PTD_SHIFT)) 102 103 #if CONFIG_PGTABLE_LEVELS == 4 104 /* 105 * Definitions for second level: 106 * 107 * PUD_SHIFT determines the size of the area a second-level page table 108 * can map. 109 */ 110 #define PUD_SHIFT (PMD_SHIFT + (PTRS_PER_PTD_SHIFT)) 111 #define PUD_SIZE (1UL << PUD_SHIFT) 112 #define PUD_MASK (~(PUD_SIZE-1)) 113 #define PTRS_PER_PUD (1UL << (PTRS_PER_PTD_SHIFT)) 114 #endif 115 116 /* 117 * Definitions for first level: 118 * 119 * PGDIR_SHIFT determines what a first-level page table entry can map. 120 */ 121 #if CONFIG_PGTABLE_LEVELS == 4 122 #define PGDIR_SHIFT (PUD_SHIFT + (PTRS_PER_PTD_SHIFT)) 123 #else 124 #define PGDIR_SHIFT (PMD_SHIFT + (PTRS_PER_PTD_SHIFT)) 125 #endif 126 #define PGDIR_SIZE (__IA64_UL(1) << PGDIR_SHIFT) 127 #define PGDIR_MASK (~(PGDIR_SIZE-1)) 128 #define PTRS_PER_PGD_SHIFT PTRS_PER_PTD_SHIFT 129 #define PTRS_PER_PGD (1UL << PTRS_PER_PGD_SHIFT) 130 #define USER_PTRS_PER_PGD (5*PTRS_PER_PGD/8) /* regions 0-4 are user regions */ 131 132 /* 133 * All the normal masks have the "page accessed" bits on, as any time 134 * they are used, the page is accessed. They are cleared only by the 135 * page-out routines. 136 */ 137 #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_A) 138 #define PAGE_SHARED __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW) 139 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R) 140 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R) 141 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX) 142 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX) 143 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX) 144 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX) 145 #define PAGE_KERNEL_UC __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX | \ 146 _PAGE_MA_UC) 147 148 # ifndef __ASSEMBLY__ 149 150 #include <linux/sched/mm.h> /* for mm_struct */ 151 #include <linux/bitops.h> 152 #include <asm/cacheflush.h> 153 #include <asm/mmu_context.h> 154 155 /* 156 * Next come the mappings that determine how mmap() protection bits 157 * (PROT_EXEC, PROT_READ, PROT_WRITE, PROT_NONE) get implemented. The 158 * _P version gets used for a private shared memory segment, the _S 159 * version gets used for a shared memory segment with MAP_SHARED on. 160 * In a private shared memory segment, we do a copy-on-write if a task 161 * attempts to write to the page. 162 */ 163 /* xwr */ 164 #define pgd_ERROR(e) printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e)) 165 #if CONFIG_PGTABLE_LEVELS == 4 166 #define pud_ERROR(e) printk("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e)) 167 #endif 168 #define pmd_ERROR(e) printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e)) 169 #define pte_ERROR(e) printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e)) 170 171 172 /* 173 * Some definitions to translate between mem_map, PTEs, and page addresses: 174 */ 175 176 177 /* Quick test to see if ADDR is a (potentially) valid physical address. */ 178 static inline long 179 ia64_phys_addr_valid (unsigned long addr) 180 { 181 return (addr & (local_cpu_data->unimpl_pa_mask)) == 0; 182 } 183 184 /* 185 * Now come the defines and routines to manage and access the three-level 186 * page table. 187 */ 188 189 190 #define VMALLOC_START (RGN_BASE(RGN_GATE) + 0x200000000UL) 191 #if defined(CONFIG_SPARSEMEM) && defined(CONFIG_SPARSEMEM_VMEMMAP) 192 /* SPARSEMEM_VMEMMAP uses half of vmalloc... */ 193 # define VMALLOC_END (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 10))) 194 # define vmemmap ((struct page *)VMALLOC_END) 195 #else 196 # define VMALLOC_END (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9))) 197 #endif 198 199 /* fs/proc/kcore.c */ 200 #define kc_vaddr_to_offset(v) ((v) - RGN_BASE(RGN_GATE)) 201 #define kc_offset_to_vaddr(o) ((o) + RGN_BASE(RGN_GATE)) 202 203 #define RGN_MAP_SHIFT (PGDIR_SHIFT + PTRS_PER_PGD_SHIFT - 3) 204 #define RGN_MAP_LIMIT ((1UL << RGN_MAP_SHIFT) - PAGE_SIZE) /* per region addr limit */ 205 206 /* 207 * Conversion functions: convert page frame number (pfn) and a protection value to a page 208 * table entry (pte). 209 */ 210 #define pfn_pte(pfn, pgprot) \ 211 ({ pte_t __pte; pte_val(__pte) = ((pfn) << PAGE_SHIFT) | pgprot_val(pgprot); __pte; }) 212 213 /* Extract pfn from pte. */ 214 #define pte_pfn(_pte) ((pte_val(_pte) & _PFN_MASK) >> PAGE_SHIFT) 215 216 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) 217 218 /* This takes a physical page address that is used by the remapping functions */ 219 #define mk_pte_phys(physpage, pgprot) \ 220 ({ pte_t __pte; pte_val(__pte) = physpage + pgprot_val(pgprot); __pte; }) 221 222 #define pte_modify(_pte, newprot) \ 223 (__pte((pte_val(_pte) & ~_PAGE_CHG_MASK) | (pgprot_val(newprot) & _PAGE_CHG_MASK))) 224 225 #define pte_none(pte) (!pte_val(pte)) 226 #define pte_present(pte) (pte_val(pte) & (_PAGE_P | _PAGE_PROTNONE)) 227 #define pte_clear(mm,addr,pte) (pte_val(*(pte)) = 0UL) 228 /* pte_page() returns the "struct page *" corresponding to the PTE: */ 229 #define pte_page(pte) virt_to_page(((pte_val(pte) & _PFN_MASK) + PAGE_OFFSET)) 230 231 #define pmd_none(pmd) (!pmd_val(pmd)) 232 #define pmd_bad(pmd) (!ia64_phys_addr_valid(pmd_val(pmd))) 233 #define pmd_present(pmd) (pmd_val(pmd) != 0UL) 234 #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL) 235 #define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & _PFN_MASK)) 236 #define pmd_pfn(pmd) ((pmd_val(pmd) & _PFN_MASK) >> PAGE_SHIFT) 237 #define pmd_page(pmd) virt_to_page((pmd_val(pmd) + PAGE_OFFSET)) 238 239 #define pud_none(pud) (!pud_val(pud)) 240 #define pud_bad(pud) (!ia64_phys_addr_valid(pud_val(pud))) 241 #define pud_present(pud) (pud_val(pud) != 0UL) 242 #define pud_clear(pudp) (pud_val(*(pudp)) = 0UL) 243 #define pud_pgtable(pud) ((pmd_t *) __va(pud_val(pud) & _PFN_MASK)) 244 #define pud_page(pud) virt_to_page((pud_val(pud) + PAGE_OFFSET)) 245 246 #if CONFIG_PGTABLE_LEVELS == 4 247 #define p4d_none(p4d) (!p4d_val(p4d)) 248 #define p4d_bad(p4d) (!ia64_phys_addr_valid(p4d_val(p4d))) 249 #define p4d_present(p4d) (p4d_val(p4d) != 0UL) 250 #define p4d_clear(p4dp) (p4d_val(*(p4dp)) = 0UL) 251 #define p4d_pgtable(p4d) ((pud_t *) __va(p4d_val(p4d) & _PFN_MASK)) 252 #define p4d_page(p4d) virt_to_page((p4d_val(p4d) + PAGE_OFFSET)) 253 #endif 254 255 /* 256 * The following have defined behavior only work if pte_present() is true. 257 */ 258 #define pte_write(pte) ((unsigned) (((pte_val(pte) & _PAGE_AR_MASK) >> _PAGE_AR_SHIFT) - 2) <= 4) 259 #define pte_exec(pte) ((pte_val(pte) & _PAGE_AR_RX) != 0) 260 #define pte_dirty(pte) ((pte_val(pte) & _PAGE_D) != 0) 261 #define pte_young(pte) ((pte_val(pte) & _PAGE_A) != 0) 262 263 /* 264 * Note: we convert AR_RWX to AR_RX and AR_RW to AR_R by clearing the 2nd bit in the 265 * access rights: 266 */ 267 #define pte_wrprotect(pte) (__pte(pte_val(pte) & ~_PAGE_AR_RW)) 268 #define pte_mkwrite(pte) (__pte(pte_val(pte) | _PAGE_AR_RW)) 269 #define pte_mkold(pte) (__pte(pte_val(pte) & ~_PAGE_A)) 270 #define pte_mkyoung(pte) (__pte(pte_val(pte) | _PAGE_A)) 271 #define pte_mkclean(pte) (__pte(pte_val(pte) & ~_PAGE_D)) 272 #define pte_mkdirty(pte) (__pte(pte_val(pte) | _PAGE_D)) 273 #define pte_mkhuge(pte) (__pte(pte_val(pte))) 274 275 /* 276 * Because ia64's Icache and Dcache is not coherent (on a cpu), we need to 277 * sync icache and dcache when we insert *new* executable page. 278 * __ia64_sync_icache_dcache() check Pg_arch_1 bit and flush icache 279 * if necessary. 280 * 281 * set_pte() is also called by the kernel, but we can expect that the kernel 282 * flushes icache explicitly if necessary. 283 */ 284 #define pte_present_exec_user(pte)\ 285 ((pte_val(pte) & (_PAGE_P | _PAGE_PL_MASK | _PAGE_AR_RX)) == \ 286 (_PAGE_P | _PAGE_PL_3 | _PAGE_AR_RX)) 287 288 extern void __ia64_sync_icache_dcache(pte_t pteval); 289 static inline void set_pte(pte_t *ptep, pte_t pteval) 290 { 291 /* page is present && page is user && page is executable 292 * && (page swapin or new page or page migration 293 * || copy_on_write with page copying.) 294 */ 295 if (pte_present_exec_user(pteval) && 296 (!pte_present(*ptep) || 297 pte_pfn(*ptep) != pte_pfn(pteval))) 298 /* load_module() calles flush_icache_range() explicitly*/ 299 __ia64_sync_icache_dcache(pteval); 300 *ptep = pteval; 301 } 302 303 #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) 304 305 /* 306 * Make page protection values cacheable, uncacheable, or write- 307 * combining. Note that "protection" is really a misnomer here as the 308 * protection value contains the memory attribute bits, dirty bits, and 309 * various other bits as well. 310 */ 311 #define pgprot_cacheable(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_WB) 312 #define pgprot_noncached(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_UC) 313 #define pgprot_writecombine(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_WC) 314 315 struct file; 316 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 317 unsigned long size, pgprot_t vma_prot); 318 #define __HAVE_PHYS_MEM_ACCESS_PROT 319 320 static inline unsigned long 321 pgd_index (unsigned long address) 322 { 323 unsigned long region = address >> 61; 324 unsigned long l1index = (address >> PGDIR_SHIFT) & ((PTRS_PER_PGD >> 3) - 1); 325 326 return (region << (PAGE_SHIFT - 6)) | l1index; 327 } 328 #define pgd_index pgd_index 329 330 /* 331 * In the kernel's mapped region we know everything is in region number 5, so 332 * as an optimisation its PGD already points to the area for that region. 333 * However, this also means that we cannot use pgd_index() and we must 334 * never add the region here. 335 */ 336 #define pgd_offset_k(addr) \ 337 (init_mm.pgd + (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))) 338 339 /* Look up a pgd entry in the gate area. On IA-64, the gate-area 340 resides in the kernel-mapped segment, hence we use pgd_offset_k() 341 here. */ 342 #define pgd_offset_gate(mm, addr) pgd_offset_k(addr) 343 344 /* atomic versions of the some PTE manipulations: */ 345 346 static inline int 347 ptep_test_and_clear_young (struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) 348 { 349 #ifdef CONFIG_SMP 350 if (!pte_young(*ptep)) 351 return 0; 352 return test_and_clear_bit(_PAGE_A_BIT, ptep); 353 #else 354 pte_t pte = *ptep; 355 if (!pte_young(pte)) 356 return 0; 357 set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte)); 358 return 1; 359 #endif 360 } 361 362 static inline pte_t 363 ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 364 { 365 #ifdef CONFIG_SMP 366 return __pte(xchg((long *) ptep, 0)); 367 #else 368 pte_t pte = *ptep; 369 pte_clear(mm, addr, ptep); 370 return pte; 371 #endif 372 } 373 374 static inline void 375 ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 376 { 377 #ifdef CONFIG_SMP 378 unsigned long new, old; 379 380 do { 381 old = pte_val(*ptep); 382 new = pte_val(pte_wrprotect(__pte (old))); 383 } while (cmpxchg((unsigned long *) ptep, old, new) != old); 384 #else 385 pte_t old_pte = *ptep; 386 set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); 387 #endif 388 } 389 390 static inline int 391 pte_same (pte_t a, pte_t b) 392 { 393 return pte_val(a) == pte_val(b); 394 } 395 396 #define update_mmu_cache(vma, address, ptep) do { } while (0) 397 398 extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; 399 extern void paging_init (void); 400 401 /* 402 * Note: The macros below rely on the fact that MAX_SWAPFILES_SHIFT <= number of 403 * bits in the swap-type field of the swap pte. It would be nice to 404 * enforce that, but we can't easily include <linux/swap.h> here. 405 * (Of course, better still would be to define MAX_SWAPFILES_SHIFT here...). 406 * 407 * Format of swap pte: 408 * bit 0 : present bit (must be zero) 409 * bits 1- 7: swap-type 410 * bits 8-62: swap offset 411 * bit 63 : _PAGE_PROTNONE bit 412 */ 413 #define __swp_type(entry) (((entry).val >> 1) & 0x7f) 414 #define __swp_offset(entry) (((entry).val << 1) >> 9) 415 #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 1) | ((long) (offset) << 8) }) 416 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 417 #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 418 419 /* 420 * ZERO_PAGE is a global shared page that is always zero: used 421 * for zero-mapped memory areas etc.. 422 */ 423 extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)]; 424 extern struct page *zero_page_memmap_ptr; 425 #define ZERO_PAGE(vaddr) (zero_page_memmap_ptr) 426 427 /* We provide our own get_unmapped_area to cope with VA holes for userland */ 428 #define HAVE_ARCH_UNMAPPED_AREA 429 430 #ifdef CONFIG_HUGETLB_PAGE 431 #define HUGETLB_PGDIR_SHIFT (HPAGE_SHIFT + 2*(PAGE_SHIFT-3)) 432 #define HUGETLB_PGDIR_SIZE (__IA64_UL(1) << HUGETLB_PGDIR_SHIFT) 433 #define HUGETLB_PGDIR_MASK (~(HUGETLB_PGDIR_SIZE-1)) 434 #endif 435 436 437 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 438 /* 439 * Update PTEP with ENTRY, which is guaranteed to be a less 440 * restrictive PTE. That is, ENTRY may have the ACCESSED, DIRTY, and 441 * WRITABLE bits turned on, when the value at PTEP did not. The 442 * WRITABLE bit may only be turned if SAFELY_WRITABLE is TRUE. 443 * 444 * SAFELY_WRITABLE is TRUE if we can update the value at PTEP without 445 * having to worry about races. On SMP machines, there are only two 446 * cases where this is true: 447 * 448 * (1) *PTEP has the PRESENT bit turned OFF 449 * (2) ENTRY has the DIRTY bit turned ON 450 * 451 * On ia64, we could implement this routine with a cmpxchg()-loop 452 * which ORs in the _PAGE_A/_PAGE_D bit if they're set in ENTRY. 453 * However, like on x86, we can get a more streamlined version by 454 * observing that it is OK to drop ACCESSED bit updates when 455 * SAFELY_WRITABLE is FALSE. Besides being rare, all that would do is 456 * result in an extra Access-bit fault, which would then turn on the 457 * ACCESSED bit in the low-level fault handler (iaccess_bit or 458 * daccess_bit in ivt.S). 459 */ 460 #ifdef CONFIG_SMP 461 # define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \ 462 ({ \ 463 int __changed = !pte_same(*(__ptep), __entry); \ 464 if (__changed && __safely_writable) { \ 465 set_pte(__ptep, __entry); \ 466 flush_tlb_page(__vma, __addr); \ 467 } \ 468 __changed; \ 469 }) 470 #else 471 # define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \ 472 ({ \ 473 int __changed = !pte_same(*(__ptep), __entry); \ 474 if (__changed) { \ 475 set_pte_at((__vma)->vm_mm, (__addr), __ptep, __entry); \ 476 flush_tlb_page(__vma, __addr); \ 477 } \ 478 __changed; \ 479 }) 480 #endif 481 # endif /* !__ASSEMBLY__ */ 482 483 /* 484 * Identity-mapped regions use a large page size. We'll call such large pages 485 * "granules". If you can think of a better name that's unambiguous, let me 486 * know... 487 */ 488 #if defined(CONFIG_IA64_GRANULE_64MB) 489 # define IA64_GRANULE_SHIFT _PAGE_SIZE_64M 490 #elif defined(CONFIG_IA64_GRANULE_16MB) 491 # define IA64_GRANULE_SHIFT _PAGE_SIZE_16M 492 #endif 493 #define IA64_GRANULE_SIZE (1 << IA64_GRANULE_SHIFT) 494 /* 495 * log2() of the page size we use to map the kernel image (IA64_TR_KERNEL): 496 */ 497 #define KERNEL_TR_PAGE_SHIFT _PAGE_SIZE_64M 498 #define KERNEL_TR_PAGE_SIZE (1 << KERNEL_TR_PAGE_SHIFT) 499 500 /* These tell get_user_pages() that the first gate page is accessible from user-level. */ 501 #define FIXADDR_USER_START GATE_ADDR 502 #ifdef HAVE_BUGGY_SEGREL 503 # define FIXADDR_USER_END (GATE_ADDR + 2*PAGE_SIZE) 504 #else 505 # define FIXADDR_USER_END (GATE_ADDR + 2*PERCPU_PAGE_SIZE) 506 #endif 507 508 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 509 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR 510 #define __HAVE_ARCH_PTEP_SET_WRPROTECT 511 #define __HAVE_ARCH_PTE_SAME 512 #define __HAVE_ARCH_PGD_OFFSET_GATE 513 514 515 #if CONFIG_PGTABLE_LEVELS == 3 516 #include <asm-generic/pgtable-nopud.h> 517 #endif 518 #include <asm-generic/pgtable-nop4d.h> 519 520 #endif /* _ASM_IA64_PGTABLE_H */ 521