108dbd0f8SThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */ 2a7e79840SRichard Kuo /* 3a7e79840SRichard Kuo * Page table support for the Hexagon architecture 4a7e79840SRichard Kuo * 5e1858b2aSRichard Kuo * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. 6a7e79840SRichard Kuo */ 7a7e79840SRichard Kuo 8a7e79840SRichard Kuo #ifndef _ASM_PGTABLE_H 9a7e79840SRichard Kuo #define _ASM_PGTABLE_H 10a7e79840SRichard Kuo 11a7e79840SRichard Kuo /* 12a7e79840SRichard Kuo * Page table definitions for Qualcomm Hexagon processor. 13a7e79840SRichard Kuo */ 14a7e79840SRichard Kuo #include <asm/page.h> 15a7e79840SRichard Kuo #include <asm-generic/pgtable-nopmd.h> 16a7e79840SRichard Kuo 17a7e79840SRichard Kuo /* A handy thing to have if one has the RAM. Declared in head.S */ 18a7e79840SRichard Kuo extern unsigned long empty_zero_page; 19a7e79840SRichard Kuo 20a7e79840SRichard Kuo /* 21a7e79840SRichard Kuo * The PTE model described here is that of the Hexagon Virtual Machine, 22a7e79840SRichard Kuo * which autonomously walks 2-level page tables. At a lower level, we 23a7e79840SRichard Kuo * also describe the RISCish software-loaded TLB entry structure of 24a7e79840SRichard Kuo * the underlying Hexagon processor. A kernel built to run on the 25a7e79840SRichard Kuo * virtual machine has no need to know about the underlying hardware. 26a7e79840SRichard Kuo */ 27a7e79840SRichard Kuo #include <asm/vm_mmu.h> 28a7e79840SRichard Kuo 29a7e79840SRichard Kuo /* 30a7e79840SRichard Kuo * To maximize the comfort level for the PTE manipulation macros, 31a7e79840SRichard Kuo * define the "well known" architecture-specific bits. 32a7e79840SRichard Kuo */ 33a7e79840SRichard Kuo #define _PAGE_READ __HVM_PTE_R 34a7e79840SRichard Kuo #define _PAGE_WRITE __HVM_PTE_W 35a7e79840SRichard Kuo #define _PAGE_EXECUTE __HVM_PTE_X 36a7e79840SRichard Kuo #define _PAGE_USER __HVM_PTE_U 37a7e79840SRichard Kuo 38a7e79840SRichard Kuo /* 39a7e79840SRichard Kuo * We have a total of 4 "soft" bits available in the abstract PTE. 40a7e79840SRichard Kuo * The two mandatory software bits are Dirty and Accessed. 41a7e79840SRichard Kuo * To make nonlinear swap work according to the more recent 42a7e79840SRichard Kuo * model, we want a low order "Present" bit to indicate whether 43a7e79840SRichard Kuo * the PTE describes MMU programming or swap space. 44a7e79840SRichard Kuo */ 45a7e79840SRichard Kuo #define _PAGE_PRESENT (1<<0) 46a7e79840SRichard Kuo #define _PAGE_DIRTY (1<<1) 47a7e79840SRichard Kuo #define _PAGE_ACCESSED (1<<2) 48a7e79840SRichard Kuo 49a7e79840SRichard Kuo /* 50a7e79840SRichard Kuo * For now, let's say that Valid and Present are the same thing. 51a7e79840SRichard Kuo * Alternatively, we could say that it's the "or" of R, W, and X 52a7e79840SRichard Kuo * permissions. 53a7e79840SRichard Kuo */ 54a7e79840SRichard Kuo #define _PAGE_VALID _PAGE_PRESENT 55a7e79840SRichard Kuo 56a7e79840SRichard Kuo /* 57a7e79840SRichard Kuo * We're not defining _PAGE_GLOBAL here, since there's no concept 58a7e79840SRichard Kuo * of global pages or ASIDs exposed to the Hexagon Virtual Machine, 59a7e79840SRichard Kuo * and we want to use the same page table structures and macros in 60a7e79840SRichard Kuo * the native kernel as we do in the virtual machine kernel. 61a7e79840SRichard Kuo * So we'll put up with a bit of inefficiency for now... 62a7e79840SRichard Kuo */ 63a7e79840SRichard Kuo 6461f4a896SDavid Hildenbrand /* We borrow bit 6 to store the exclusive marker in swap PTEs. */ 6561f4a896SDavid Hildenbrand #define _PAGE_SWP_EXCLUSIVE (1<<6) 6661f4a896SDavid Hildenbrand 67a7e79840SRichard Kuo /* 68a7e79840SRichard Kuo * Top "FOURTH" level (pgd), which for the Hexagon VM is really 69a7e79840SRichard Kuo * only the second from the bottom, pgd and pud both being collapsed. 70a7e79840SRichard Kuo * Each entry represents 4MB of virtual address space, 4K of table 71a7e79840SRichard Kuo * thus maps the full 4GB. 72a7e79840SRichard Kuo */ 73a7e79840SRichard Kuo #define PGDIR_SHIFT 22 74a7e79840SRichard Kuo #define PTRS_PER_PGD 1024 75a7e79840SRichard Kuo 76a7e79840SRichard Kuo #define PGDIR_SIZE (1UL << PGDIR_SHIFT) 77a7e79840SRichard Kuo #define PGDIR_MASK (~(PGDIR_SIZE-1)) 78a7e79840SRichard Kuo 79a7e79840SRichard Kuo #ifdef CONFIG_PAGE_SIZE_4KB 80a7e79840SRichard Kuo #define PTRS_PER_PTE 1024 81a7e79840SRichard Kuo #endif 82a7e79840SRichard Kuo 83a7e79840SRichard Kuo #ifdef CONFIG_PAGE_SIZE_16KB 84a7e79840SRichard Kuo #define PTRS_PER_PTE 256 85a7e79840SRichard Kuo #endif 86a7e79840SRichard Kuo 87a7e79840SRichard Kuo #ifdef CONFIG_PAGE_SIZE_64KB 88a7e79840SRichard Kuo #define PTRS_PER_PTE 64 89a7e79840SRichard Kuo #endif 90a7e79840SRichard Kuo 91a7e79840SRichard Kuo #ifdef CONFIG_PAGE_SIZE_256KB 92a7e79840SRichard Kuo #define PTRS_PER_PTE 16 93a7e79840SRichard Kuo #endif 94a7e79840SRichard Kuo 95a7e79840SRichard Kuo #ifdef CONFIG_PAGE_SIZE_1MB 96a7e79840SRichard Kuo #define PTRS_PER_PTE 4 97a7e79840SRichard Kuo #endif 98a7e79840SRichard Kuo 99a7e79840SRichard Kuo /* Any bigger and the PTE disappears. */ 100a7e79840SRichard Kuo #define pgd_ERROR(e) \ 101a7e79840SRichard Kuo printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__,\ 102a7e79840SRichard Kuo pgd_val(e)) 103a7e79840SRichard Kuo 104a7e79840SRichard Kuo /* 105a7e79840SRichard Kuo * Page Protection Constants. Includes (in this variant) cache attributes. 106a7e79840SRichard Kuo */ 107a7e79840SRichard Kuo extern unsigned long _dflt_cache_att; 108a7e79840SRichard Kuo 109a7e79840SRichard Kuo #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_USER | \ 110a7e79840SRichard Kuo _dflt_cache_att) 111a7e79840SRichard Kuo #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | \ 112a7e79840SRichard Kuo _PAGE_READ | _PAGE_EXECUTE | _dflt_cache_att) 113a7e79840SRichard Kuo #define PAGE_COPY PAGE_READONLY 114a7e79840SRichard Kuo #define PAGE_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \ 115a7e79840SRichard Kuo _PAGE_READ | _PAGE_EXECUTE | _dflt_cache_att) 116a7e79840SRichard Kuo #define PAGE_COPY_EXEC PAGE_EXEC 117a7e79840SRichard Kuo #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | \ 118a7e79840SRichard Kuo _PAGE_EXECUTE | _PAGE_WRITE | _dflt_cache_att) 119a7e79840SRichard Kuo #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | \ 120a7e79840SRichard Kuo _PAGE_WRITE | _PAGE_EXECUTE | _dflt_cache_att) 121a7e79840SRichard Kuo 122a7e79840SRichard Kuo 123a7e79840SRichard Kuo /* 124a7e79840SRichard Kuo * Aliases for mapping mmap() protection bits to page protections. 125a7e79840SRichard Kuo * These get used for static initialization, so using the _dflt_cache_att 126a7e79840SRichard Kuo * variable for the default cache attribute isn't workable. If the 127a7e79840SRichard Kuo * default gets changed at boot time, the boot option code has to 128a7e79840SRichard Kuo * update data structures like the protaction_map[] array. 129a7e79840SRichard Kuo */ 130a7e79840SRichard Kuo #define CACHEDEF (CACHE_DEFAULT << 6) 131a7e79840SRichard Kuo 132a7e79840SRichard Kuo extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* located in head.S */ 133a7e79840SRichard Kuo 134a7e79840SRichard Kuo /* HUGETLB not working currently */ 135a7e79840SRichard Kuo #ifdef CONFIG_HUGETLB_PAGE 136a7e79840SRichard Kuo #define pte_mkhuge(pte) __pte((pte_val(pte) & ~0x3) | HVM_HUGEPAGE_SIZE) 137a7e79840SRichard Kuo #endif 138a7e79840SRichard Kuo 139a7e79840SRichard Kuo /* 140a7e79840SRichard Kuo * For now, assume that higher-level code will do TLB/MMU invalidations 141a7e79840SRichard Kuo * and don't insert that overhead into this low-level function. 142a7e79840SRichard Kuo */ 143a7e79840SRichard Kuo extern void sync_icache_dcache(pte_t pte); 144a7e79840SRichard Kuo 145a7e79840SRichard Kuo #define pte_present_exec_user(pte) \ 146a7e79840SRichard Kuo ((pte_val(pte) & (_PAGE_EXECUTE | _PAGE_USER)) == \ 147a7e79840SRichard Kuo (_PAGE_EXECUTE | _PAGE_USER)) 148a7e79840SRichard Kuo 149a7e79840SRichard Kuo static inline void set_pte(pte_t *ptep, pte_t pteval) 150a7e79840SRichard Kuo { 151a7e79840SRichard Kuo /* should really be using pte_exec, if it weren't declared later. */ 152a7e79840SRichard Kuo if (pte_present_exec_user(pteval)) 153a7e79840SRichard Kuo sync_icache_dcache(pteval); 154a7e79840SRichard Kuo 155a7e79840SRichard Kuo *ptep = pteval; 156a7e79840SRichard Kuo } 157a7e79840SRichard Kuo 158a7e79840SRichard Kuo /* 159a7e79840SRichard Kuo * For the Hexagon Virtual Machine MMU (or its emulation), a null/invalid 160a7e79840SRichard Kuo * L1 PTE (PMD/PGD) has 7 in the least significant bits. For the L2 PTE 161a7e79840SRichard Kuo * (Linux PTE), the key is to have bits 11..9 all zero. We'd use 0x7 162a7e79840SRichard Kuo * as a universal null entry, but some of those least significant bits 163a7e79840SRichard Kuo * are interpreted by software. 164a7e79840SRichard Kuo */ 165a7e79840SRichard Kuo #define _NULL_PMD 0x7 166a7e79840SRichard Kuo #define _NULL_PTE 0x0 167a7e79840SRichard Kuo 168a7e79840SRichard Kuo static inline void pmd_clear(pmd_t *pmd_entry_ptr) 169a7e79840SRichard Kuo { 170a7e79840SRichard Kuo pmd_val(*pmd_entry_ptr) = _NULL_PMD; 171a7e79840SRichard Kuo } 172a7e79840SRichard Kuo 173a7e79840SRichard Kuo /* 174a7e79840SRichard Kuo * Conveniently, a null PTE value is invalid. 175a7e79840SRichard Kuo */ 176a7e79840SRichard Kuo static inline void pte_clear(struct mm_struct *mm, unsigned long addr, 177a7e79840SRichard Kuo pte_t *ptep) 178a7e79840SRichard Kuo { 179a7e79840SRichard Kuo pte_val(*ptep) = _NULL_PTE; 180a7e79840SRichard Kuo } 181a7e79840SRichard Kuo 182a7e79840SRichard Kuo /** 183a7e79840SRichard Kuo * pmd_none - check if pmd_entry is mapped 184a7e79840SRichard Kuo * @pmd_entry: pmd entry 185a7e79840SRichard Kuo * 186a7e79840SRichard Kuo * MIPS checks it against that "invalid pte table" thing. 187a7e79840SRichard Kuo */ 188a7e79840SRichard Kuo static inline int pmd_none(pmd_t pmd) 189a7e79840SRichard Kuo { 190a7e79840SRichard Kuo return pmd_val(pmd) == _NULL_PMD; 191a7e79840SRichard Kuo } 192a7e79840SRichard Kuo 193a7e79840SRichard Kuo /** 194a7e79840SRichard Kuo * pmd_present - is there a page table behind this? 195a7e79840SRichard Kuo * Essentially the inverse of pmd_none. We maybe 196a7e79840SRichard Kuo * save an inline instruction by defining it this 197a7e79840SRichard Kuo * way, instead of simply "!pmd_none". 198a7e79840SRichard Kuo */ 199a7e79840SRichard Kuo static inline int pmd_present(pmd_t pmd) 200a7e79840SRichard Kuo { 201a7e79840SRichard Kuo return pmd_val(pmd) != (unsigned long)_NULL_PMD; 202a7e79840SRichard Kuo } 203a7e79840SRichard Kuo 204a7e79840SRichard Kuo /** 205a7e79840SRichard Kuo * pmd_bad - check if a PMD entry is "bad". That might mean swapped out. 206a7e79840SRichard Kuo * As we have no known cause of badness, it's null, as it is for many 207a7e79840SRichard Kuo * architectures. 208a7e79840SRichard Kuo */ 209a7e79840SRichard Kuo static inline int pmd_bad(pmd_t pmd) 210a7e79840SRichard Kuo { 211a7e79840SRichard Kuo return 0; 212a7e79840SRichard Kuo } 213a7e79840SRichard Kuo 214a7e79840SRichard Kuo /* 2157106c51eSMike Rapoport * pmd_pfn - converts a PMD entry to a page frame number 2167106c51eSMike Rapoport */ 2177106c51eSMike Rapoport #define pmd_pfn(pmd) (pmd_val(pmd) >> PAGE_SHIFT) 2187106c51eSMike Rapoport 2197106c51eSMike Rapoport /* 220a7e79840SRichard Kuo * pmd_page - converts a PMD entry to a page pointer 221a7e79840SRichard Kuo */ 222a7e79840SRichard Kuo #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) 223a7e79840SRichard Kuo 224a7e79840SRichard Kuo /** 225a7e79840SRichard Kuo * pte_none - check if pte is mapped 226a7e79840SRichard Kuo * @pte: pte_t entry 227a7e79840SRichard Kuo */ 228a7e79840SRichard Kuo static inline int pte_none(pte_t pte) 229a7e79840SRichard Kuo { 230a7e79840SRichard Kuo return pte_val(pte) == _NULL_PTE; 231a7e79840SRichard Kuo }; 232a7e79840SRichard Kuo 233a7e79840SRichard Kuo /* 234a7e79840SRichard Kuo * pte_present - check if page is present 235a7e79840SRichard Kuo */ 236a7e79840SRichard Kuo static inline int pte_present(pte_t pte) 237a7e79840SRichard Kuo { 238a7e79840SRichard Kuo return pte_val(pte) & _PAGE_PRESENT; 239a7e79840SRichard Kuo } 240a7e79840SRichard Kuo 241a7e79840SRichard Kuo /* mk_pte - make a PTE out of a page pointer and protection bits */ 242a7e79840SRichard Kuo #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) 243a7e79840SRichard Kuo 244a7e79840SRichard Kuo /* pte_page - returns a page (frame pointer/descriptor?) based on a PTE */ 245a7e79840SRichard Kuo #define pte_page(x) pfn_to_page(pte_pfn(x)) 246a7e79840SRichard Kuo 247a7e79840SRichard Kuo /* pte_mkold - mark PTE as not recently accessed */ 248a7e79840SRichard Kuo static inline pte_t pte_mkold(pte_t pte) 249a7e79840SRichard Kuo { 250a7e79840SRichard Kuo pte_val(pte) &= ~_PAGE_ACCESSED; 251a7e79840SRichard Kuo return pte; 252a7e79840SRichard Kuo } 253a7e79840SRichard Kuo 254a7e79840SRichard Kuo /* pte_mkyoung - mark PTE as recently accessed */ 255a7e79840SRichard Kuo static inline pte_t pte_mkyoung(pte_t pte) 256a7e79840SRichard Kuo { 257a7e79840SRichard Kuo pte_val(pte) |= _PAGE_ACCESSED; 258a7e79840SRichard Kuo return pte; 259a7e79840SRichard Kuo } 260a7e79840SRichard Kuo 261a7e79840SRichard Kuo /* pte_mkclean - mark page as in sync with backing store */ 262a7e79840SRichard Kuo static inline pte_t pte_mkclean(pte_t pte) 263a7e79840SRichard Kuo { 264a7e79840SRichard Kuo pte_val(pte) &= ~_PAGE_DIRTY; 265a7e79840SRichard Kuo return pte; 266a7e79840SRichard Kuo } 267a7e79840SRichard Kuo 268a7e79840SRichard Kuo /* pte_mkdirty - mark page as modified */ 269a7e79840SRichard Kuo static inline pte_t pte_mkdirty(pte_t pte) 270a7e79840SRichard Kuo { 271a7e79840SRichard Kuo pte_val(pte) |= _PAGE_DIRTY; 272a7e79840SRichard Kuo return pte; 273a7e79840SRichard Kuo } 274a7e79840SRichard Kuo 275a7e79840SRichard Kuo /* pte_young - "is PTE marked as accessed"? */ 276a7e79840SRichard Kuo static inline int pte_young(pte_t pte) 277a7e79840SRichard Kuo { 278a7e79840SRichard Kuo return pte_val(pte) & _PAGE_ACCESSED; 279a7e79840SRichard Kuo } 280a7e79840SRichard Kuo 281a7e79840SRichard Kuo /* pte_dirty - "is PTE dirty?" */ 282a7e79840SRichard Kuo static inline int pte_dirty(pte_t pte) 283a7e79840SRichard Kuo { 284a7e79840SRichard Kuo return pte_val(pte) & _PAGE_DIRTY; 285a7e79840SRichard Kuo } 286a7e79840SRichard Kuo 287a7e79840SRichard Kuo /* pte_modify - set protection bits on PTE */ 288a7e79840SRichard Kuo static inline pte_t pte_modify(pte_t pte, pgprot_t prot) 289a7e79840SRichard Kuo { 290a7e79840SRichard Kuo pte_val(pte) &= PAGE_MASK; 291a7e79840SRichard Kuo pte_val(pte) |= pgprot_val(prot); 292a7e79840SRichard Kuo return pte; 293a7e79840SRichard Kuo } 294a7e79840SRichard Kuo 295a7e79840SRichard Kuo /* pte_wrprotect - mark page as not writable */ 296a7e79840SRichard Kuo static inline pte_t pte_wrprotect(pte_t pte) 297a7e79840SRichard Kuo { 298a7e79840SRichard Kuo pte_val(pte) &= ~_PAGE_WRITE; 299a7e79840SRichard Kuo return pte; 300a7e79840SRichard Kuo } 301a7e79840SRichard Kuo 302a7e79840SRichard Kuo /* pte_mkwrite - mark page as writable */ 303a7e79840SRichard Kuo static inline pte_t pte_mkwrite(pte_t pte) 304a7e79840SRichard Kuo { 305a7e79840SRichard Kuo pte_val(pte) |= _PAGE_WRITE; 306a7e79840SRichard Kuo return pte; 307a7e79840SRichard Kuo } 308a7e79840SRichard Kuo 309a7e79840SRichard Kuo /* pte_mkexec - mark PTE as executable */ 310a7e79840SRichard Kuo static inline pte_t pte_mkexec(pte_t pte) 311a7e79840SRichard Kuo { 312a7e79840SRichard Kuo pte_val(pte) |= _PAGE_EXECUTE; 313a7e79840SRichard Kuo return pte; 314a7e79840SRichard Kuo } 315a7e79840SRichard Kuo 316a7e79840SRichard Kuo /* pte_read - "is PTE marked as readable?" */ 317a7e79840SRichard Kuo static inline int pte_read(pte_t pte) 318a7e79840SRichard Kuo { 319a7e79840SRichard Kuo return pte_val(pte) & _PAGE_READ; 320a7e79840SRichard Kuo } 321a7e79840SRichard Kuo 322a7e79840SRichard Kuo /* pte_write - "is PTE marked as writable?" */ 323a7e79840SRichard Kuo static inline int pte_write(pte_t pte) 324a7e79840SRichard Kuo { 325a7e79840SRichard Kuo return pte_val(pte) & _PAGE_WRITE; 326a7e79840SRichard Kuo } 327a7e79840SRichard Kuo 328a7e79840SRichard Kuo 329a7e79840SRichard Kuo /* pte_exec - "is PTE marked as executable?" */ 330a7e79840SRichard Kuo static inline int pte_exec(pte_t pte) 331a7e79840SRichard Kuo { 332a7e79840SRichard Kuo return pte_val(pte) & _PAGE_EXECUTE; 333a7e79840SRichard Kuo } 334a7e79840SRichard Kuo 335a7e79840SRichard Kuo /* __pte_to_swp_entry - extract swap entry from PTE */ 336a7e79840SRichard Kuo #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 337a7e79840SRichard Kuo 338a7e79840SRichard Kuo /* __swp_entry_to_pte - extract PTE from swap entry */ 339a7e79840SRichard Kuo #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 340a7e79840SRichard Kuo 341*9ff63394SMatthew Wilcox (Oracle) #define PFN_PTE_SHIFT PAGE_SHIFT 342a7e79840SRichard Kuo /* pfn_pte - convert page number and protection value to page table entry */ 343a7e79840SRichard Kuo #define pfn_pte(pfn, pgprot) __pte((pfn << PAGE_SHIFT) | pgprot_val(pgprot)) 344a7e79840SRichard Kuo 345a7e79840SRichard Kuo /* pte_pfn - convert pte to page frame number */ 346a7e79840SRichard Kuo #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) 347a7e79840SRichard Kuo #define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval)) 348a7e79840SRichard Kuo 349974b9b2cSMike Rapoport static inline unsigned long pmd_page_vaddr(pmd_t pmd) 350974b9b2cSMike Rapoport { 351974b9b2cSMike Rapoport return (unsigned long)__va(pmd_val(pmd) & PAGE_MASK); 352974b9b2cSMike Rapoport } 353a7e79840SRichard Kuo 354a7e79840SRichard Kuo /* ZERO_PAGE - returns the globally shared zero page */ 355a7e79840SRichard Kuo #define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page)) 356a7e79840SRichard Kuo 357a7e79840SRichard Kuo /* 35861f4a896SDavid Hildenbrand * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that 35961f4a896SDavid Hildenbrand * are !pte_none() && !pte_present(). 36061f4a896SDavid Hildenbrand * 361d99f95e6SKirill A. Shutemov * Swap/file PTE definitions. If _PAGE_PRESENT is zero, the rest of the PTE is 362d99f95e6SKirill A. Shutemov * interpreted as swap information. The remaining free bits are interpreted as 36361f4a896SDavid Hildenbrand * listed below. Rather than have the TLB fill handler test 364d99f95e6SKirill A. Shutemov * _PAGE_PRESENT, we're going to reserve the permissions bits and set them to 365d99f95e6SKirill A. Shutemov * all zeros for swap entries, which speeds up the miss handler at the cost of 366d99f95e6SKirill A. Shutemov * 3 bits of offset. That trade-off can be revisited if necessary, but Hexagon 367d99f95e6SKirill A. Shutemov * processor architecture and target applications suggest a lot of TLB misses 368d99f95e6SKirill A. Shutemov * and not much swap space. 369a7e79840SRichard Kuo * 370a7e79840SRichard Kuo * Format of swap PTE: 371a7e79840SRichard Kuo * bit 0: Present (zero) 372d99f95e6SKirill A. Shutemov * bits 1-5: swap type (arch independent layer uses 5 bits max) 37361f4a896SDavid Hildenbrand * bit 6: exclusive marker 37461f4a896SDavid Hildenbrand * bits 7-9: bits 2:0 of offset 375a7e79840SRichard Kuo * bits 10-12: effectively _PAGE_PROTNONE (all zero) 37661f4a896SDavid Hildenbrand * bits 13-31: bits 21:3 of swap offset 377a7e79840SRichard Kuo * 378a7e79840SRichard Kuo * The split offset makes some of the following macros a little gnarly, 379a7e79840SRichard Kuo * but there's plenty of precedent for this sort of thing. 380a7e79840SRichard Kuo */ 381a7e79840SRichard Kuo 382a7e79840SRichard Kuo /* Used for swap PTEs */ 383d99f95e6SKirill A. Shutemov #define __swp_type(swp_pte) (((swp_pte).val >> 1) & 0x1f) 384a7e79840SRichard Kuo 385a7e79840SRichard Kuo #define __swp_offset(swp_pte) \ 38661f4a896SDavid Hildenbrand ((((swp_pte).val >> 7) & 0x7) | (((swp_pte).val >> 10) & 0x3ffff8)) 387a7e79840SRichard Kuo 388a7e79840SRichard Kuo #define __swp_entry(type, offset) \ 389a7e79840SRichard Kuo ((swp_entry_t) { \ 39061f4a896SDavid Hildenbrand (((type & 0x1f) << 1) | \ 39161f4a896SDavid Hildenbrand ((offset & 0x3ffff8) << 10) | ((offset & 0x7) << 7)) }) 39261f4a896SDavid Hildenbrand 39361f4a896SDavid Hildenbrand static inline int pte_swp_exclusive(pte_t pte) 39461f4a896SDavid Hildenbrand { 39561f4a896SDavid Hildenbrand return pte_val(pte) & _PAGE_SWP_EXCLUSIVE; 39661f4a896SDavid Hildenbrand } 39761f4a896SDavid Hildenbrand 39861f4a896SDavid Hildenbrand static inline pte_t pte_swp_mkexclusive(pte_t pte) 39961f4a896SDavid Hildenbrand { 40061f4a896SDavid Hildenbrand pte_val(pte) |= _PAGE_SWP_EXCLUSIVE; 40161f4a896SDavid Hildenbrand return pte; 40261f4a896SDavid Hildenbrand } 40361f4a896SDavid Hildenbrand 40461f4a896SDavid Hildenbrand static inline pte_t pte_swp_clear_exclusive(pte_t pte) 40561f4a896SDavid Hildenbrand { 40661f4a896SDavid Hildenbrand pte_val(pte) &= ~_PAGE_SWP_EXCLUSIVE; 40761f4a896SDavid Hildenbrand return pte; 40861f4a896SDavid Hildenbrand } 409a7e79840SRichard Kuo 410a7e79840SRichard Kuo #endif 411