1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com> 4 * Copyright (C) 2012 Regents of the University of California 5 * Copyright (C) 2017 SiFive 6 * Copyright (C) 2017 XiaojingZhu <zhuxiaoj@ict.ac.cn> 7 */ 8 9 #ifndef _ASM_RISCV_PAGE_H 10 #define _ASM_RISCV_PAGE_H 11 12 #include <linux/pfn.h> 13 #include <linux/const.h> 14 15 #define PAGE_SHIFT (12) 16 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) 17 #define PAGE_MASK (~(PAGE_SIZE - 1)) 18 19 #ifdef CONFIG_64BIT 20 #define HUGE_MAX_HSTATE 2 21 #else 22 #define HUGE_MAX_HSTATE 1 23 #endif 24 #define HPAGE_SHIFT PMD_SHIFT 25 #define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT) 26 #define HPAGE_MASK (~(HPAGE_SIZE - 1)) 27 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 28 29 /* 30 * PAGE_OFFSET -- the first address of the first page of memory. 31 * When not using MMU this corresponds to the first free page in 32 * physical memory (aligned on a page boundary). 33 */ 34 #define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) 35 36 #define KERN_VIRT_SIZE (-PAGE_OFFSET) 37 38 #ifndef __ASSEMBLY__ 39 40 #define PAGE_UP(addr) (((addr)+((PAGE_SIZE)-1))&(~((PAGE_SIZE)-1))) 41 #define PAGE_DOWN(addr) ((addr)&(~((PAGE_SIZE)-1))) 42 43 /* align addr on a size boundary - adjust address up/down if needed */ 44 #define _ALIGN_UP(addr, size) (((addr)+((size)-1))&(~((size)-1))) 45 #define _ALIGN_DOWN(addr, size) ((addr)&(~((size)-1))) 46 47 /* align addr on a size boundary - adjust address up if needed */ 48 #define _ALIGN(addr, size) _ALIGN_UP(addr, size) 49 50 #define clear_page(pgaddr) memset((pgaddr), 0, PAGE_SIZE) 51 #define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) 52 53 #define clear_user_page(pgaddr, vaddr, page) memset((pgaddr), 0, PAGE_SIZE) 54 #define copy_user_page(vto, vfrom, vaddr, topg) \ 55 memcpy((vto), (vfrom), PAGE_SIZE) 56 57 /* 58 * Use struct definitions to apply C type checking 59 */ 60 61 /* Page Global Directory entry */ 62 typedef struct { 63 unsigned long pgd; 64 } pgd_t; 65 66 /* Page Table entry */ 67 typedef struct { 68 unsigned long pte; 69 } pte_t; 70 71 typedef struct { 72 unsigned long pgprot; 73 } pgprot_t; 74 75 typedef struct page *pgtable_t; 76 77 #define pte_val(x) ((x).pte) 78 #define pgd_val(x) ((x).pgd) 79 #define pgprot_val(x) ((x).pgprot) 80 81 #define __pte(x) ((pte_t) { (x) }) 82 #define __pgd(x) ((pgd_t) { (x) }) 83 #define __pgprot(x) ((pgprot_t) { (x) }) 84 85 #ifdef CONFIG_64BIT 86 #define PTE_FMT "%016lx" 87 #else 88 #define PTE_FMT "%08lx" 89 #endif 90 91 #ifdef CONFIG_MMU 92 extern unsigned long va_pa_offset; 93 #ifdef CONFIG_64BIT 94 extern unsigned long va_kernel_pa_offset; 95 #endif 96 #ifdef CONFIG_XIP_KERNEL 97 extern unsigned long va_kernel_xip_pa_offset; 98 #endif 99 extern unsigned long pfn_base; 100 #define ARCH_PFN_OFFSET (pfn_base) 101 #else 102 #define va_pa_offset 0 103 #ifdef CONFIG_64BIT 104 #define va_kernel_pa_offset 0 105 #endif 106 #define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT) 107 #endif /* CONFIG_MMU */ 108 109 extern unsigned long kernel_virt_addr; 110 111 #ifdef CONFIG_64BIT 112 #define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + va_pa_offset)) 113 #ifdef CONFIG_XIP_KERNEL 114 #define kernel_mapping_pa_to_va(y) ({ \ 115 unsigned long _y = y; \ 116 (_y >= CONFIG_PHYS_RAM_BASE) ? \ 117 (void *)((unsigned long)(_y) + va_kernel_pa_offset + XIP_OFFSET) : \ 118 (void *)((unsigned long)(_y) + va_kernel_xip_pa_offset); \ 119 }) 120 #else 121 #define kernel_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + va_kernel_pa_offset)) 122 #endif 123 #define __pa_to_va_nodebug(x) linear_mapping_pa_to_va(x) 124 125 #define linear_mapping_va_to_pa(x) ((unsigned long)(x) - va_pa_offset) 126 #ifdef CONFIG_XIP_KERNEL 127 #define kernel_mapping_va_to_pa(y) ({ \ 128 unsigned long _y = y; \ 129 (_y < kernel_virt_addr + XIP_OFFSET) ? \ 130 ((unsigned long)(_y) - va_kernel_xip_pa_offset) : \ 131 ((unsigned long)(_y) - va_kernel_pa_offset - XIP_OFFSET); \ 132 }) 133 #else 134 #define kernel_mapping_va_to_pa(x) ((unsigned long)(x) - va_kernel_pa_offset) 135 #endif 136 #define __va_to_pa_nodebug(x) ({ \ 137 unsigned long _x = x; \ 138 (_x < kernel_virt_addr) ? \ 139 linear_mapping_va_to_pa(_x) : kernel_mapping_va_to_pa(_x); \ 140 }) 141 #else 142 #define __pa_to_va_nodebug(x) ((void *)((unsigned long) (x) + va_pa_offset)) 143 #define __va_to_pa_nodebug(x) ((unsigned long)(x) - va_pa_offset) 144 #endif 145 146 #ifdef CONFIG_DEBUG_VIRTUAL 147 extern phys_addr_t __virt_to_phys(unsigned long x); 148 extern phys_addr_t __phys_addr_symbol(unsigned long x); 149 #else 150 #define __virt_to_phys(x) __va_to_pa_nodebug(x) 151 #define __phys_addr_symbol(x) __va_to_pa_nodebug(x) 152 #endif /* CONFIG_DEBUG_VIRTUAL */ 153 154 #define __pa_symbol(x) __phys_addr_symbol(RELOC_HIDE((unsigned long)(x), 0)) 155 #define __pa(x) __virt_to_phys((unsigned long)(x)) 156 #define __va(x) ((void *)__pa_to_va_nodebug((phys_addr_t)(x))) 157 158 #define phys_to_pfn(phys) (PFN_DOWN(phys)) 159 #define pfn_to_phys(pfn) (PFN_PHYS(pfn)) 160 161 #define virt_to_pfn(vaddr) (phys_to_pfn(__pa(vaddr))) 162 #define pfn_to_virt(pfn) (__va(pfn_to_phys(pfn))) 163 164 #define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr))) 165 #define page_to_virt(page) (pfn_to_virt(page_to_pfn(page))) 166 167 #define page_to_phys(page) (pfn_to_phys(page_to_pfn(page))) 168 #define page_to_bus(page) (page_to_phys(page)) 169 #define phys_to_page(paddr) (pfn_to_page(phys_to_pfn(paddr))) 170 171 #ifdef CONFIG_FLATMEM 172 #define pfn_valid(pfn) \ 173 (((pfn) >= ARCH_PFN_OFFSET) && (((pfn) - ARCH_PFN_OFFSET) < max_mapnr)) 174 #endif 175 176 #endif /* __ASSEMBLY__ */ 177 178 #define virt_addr_valid(vaddr) ({ \ 179 unsigned long _addr = (unsigned long)vaddr; \ 180 (unsigned long)(_addr) >= PAGE_OFFSET && pfn_valid(virt_to_pfn(_addr)); \ 181 }) 182 183 #define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC 184 185 #include <asm-generic/memory_model.h> 186 #include <asm-generic/getorder.h> 187 188 #endif /* _ASM_RISCV_PAGE_H */ 189