1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_HIGHMEM_H 3 #define _LINUX_HIGHMEM_H 4 5 #include <linux/fs.h> 6 #include <linux/kernel.h> 7 #include <linux/bug.h> 8 #include <linux/mm.h> 9 #include <linux/uaccess.h> 10 #include <linux/hardirq.h> 11 12 #include <asm/cacheflush.h> 13 14 #include "highmem-internal.h" 15 16 /** 17 * kmap - Map a page for long term usage 18 * @page: Pointer to the page to be mapped 19 * 20 * Returns: The virtual address of the mapping 21 * 22 * Can only be invoked from preemptible task context because on 32bit 23 * systems with CONFIG_HIGHMEM enabled this function might sleep. 24 * 25 * For systems with CONFIG_HIGHMEM=n and for pages in the low memory area 26 * this returns the virtual address of the direct kernel mapping. 27 * 28 * The returned virtual address is globally visible and valid up to the 29 * point where it is unmapped via kunmap(). The pointer can be handed to 30 * other contexts. 31 * 32 * For highmem pages on 32bit systems this can be slow as the mapping space 33 * is limited and protected by a global lock. In case that there is no 34 * mapping slot available the function blocks until a slot is released via 35 * kunmap(). 36 */ 37 static inline void *kmap(struct page *page); 38 39 /** 40 * kunmap - Unmap the virtual address mapped by kmap() 41 * @addr: Virtual address to be unmapped 42 * 43 * Counterpart to kmap(). A NOOP for CONFIG_HIGHMEM=n and for mappings of 44 * pages in the low memory area. 45 */ 46 static inline void kunmap(struct page *page); 47 48 /** 49 * kmap_to_page - Get the page for a kmap'ed address 50 * @addr: The address to look up 51 * 52 * Returns: The page which is mapped to @addr. 53 */ 54 static inline struct page *kmap_to_page(void *addr); 55 56 /** 57 * kmap_flush_unused - Flush all unused kmap mappings in order to 58 * remove stray mappings 59 */ 60 static inline void kmap_flush_unused(void); 61 62 /** 63 * kmap_local_page - Map a page for temporary usage 64 * @page: Pointer to the page to be mapped 65 * 66 * Returns: The virtual address of the mapping 67 * 68 * Can be invoked from any context. 69 * 70 * Requires careful handling when nesting multiple mappings because the map 71 * management is stack based. The unmap has to be in the reverse order of 72 * the map operation: 73 * 74 * addr1 = kmap_local_page(page1); 75 * addr2 = kmap_local_page(page2); 76 * ... 77 * kunmap_local(addr2); 78 * kunmap_local(addr1); 79 * 80 * Unmapping addr1 before addr2 is invalid and causes malfunction. 81 * 82 * Contrary to kmap() mappings the mapping is only valid in the context of 83 * the caller and cannot be handed to other contexts. 84 * 85 * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the 86 * virtual address of the direct mapping. Only real highmem pages are 87 * temporarily mapped. 88 * 89 * While it is significantly faster than kmap() for the higmem case it 90 * comes with restrictions about the pointer validity. Only use when really 91 * necessary. 92 * 93 * On HIGHMEM enabled systems mapping a highmem page has the side effect of 94 * disabling migration in order to keep the virtual address stable across 95 * preemption. No caller of kmap_local_page() can rely on this side effect. 96 */ 97 static inline void *kmap_local_page(struct page *page); 98 99 /** 100 * kmap_atomic - Atomically map a page for temporary usage - Deprecated! 101 * @page: Pointer to the page to be mapped 102 * 103 * Returns: The virtual address of the mapping 104 * 105 * Effectively a wrapper around kmap_local_page() which disables pagefaults 106 * and preemption. 107 * 108 * Do not use in new code. Use kmap_local_page() instead. 109 */ 110 static inline void *kmap_atomic(struct page *page); 111 112 /** 113 * kunmap_atomic - Unmap the virtual address mapped by kmap_atomic() 114 * @addr: Virtual address to be unmapped 115 * 116 * Counterpart to kmap_atomic(). 117 * 118 * Effectively a wrapper around kunmap_local() which additionally undoes 119 * the side effects of kmap_atomic(), i.e. reenabling pagefaults and 120 * preemption. 121 */ 122 123 /* Highmem related interfaces for management code */ 124 static inline unsigned int nr_free_highpages(void); 125 static inline unsigned long totalhigh_pages(void); 126 127 #ifndef ARCH_HAS_FLUSH_ANON_PAGE 128 static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) 129 { 130 } 131 #endif 132 133 #ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE 134 static inline void flush_kernel_dcache_page(struct page *page) 135 { 136 } 137 static inline void flush_kernel_vmap_range(void *vaddr, int size) 138 { 139 } 140 static inline void invalidate_kernel_vmap_range(void *vaddr, int size) 141 { 142 } 143 #endif 144 145 /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */ 146 #ifndef clear_user_highpage 147 static inline void clear_user_highpage(struct page *page, unsigned long vaddr) 148 { 149 void *addr = kmap_atomic(page); 150 clear_user_page(addr, vaddr, page); 151 kunmap_atomic(addr); 152 } 153 #endif 154 155 #ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE 156 /** 157 * alloc_zeroed_user_highpage_movable - Allocate a zeroed HIGHMEM page for a VMA that the caller knows can move 158 * @vma: The VMA the page is to be allocated for 159 * @vaddr: The virtual address the page will be inserted into 160 * 161 * This function will allocate a page for a VMA that the caller knows will 162 * be able to migrate in the future using move_pages() or reclaimed 163 * 164 * An architecture may override this function by defining 165 * __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE and providing their own 166 * implementation. 167 */ 168 static inline struct page * 169 alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma, 170 unsigned long vaddr) 171 { 172 struct page *page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr); 173 174 if (page) 175 clear_user_highpage(page, vaddr); 176 177 return page; 178 } 179 #endif 180 181 static inline void clear_highpage(struct page *page) 182 { 183 void *kaddr = kmap_atomic(page); 184 clear_page(kaddr); 185 kunmap_atomic(kaddr); 186 } 187 188 #ifndef __HAVE_ARCH_TAG_CLEAR_HIGHPAGE 189 190 static inline void tag_clear_highpage(struct page *page) 191 { 192 } 193 194 #endif 195 196 /* 197 * If we pass in a base or tail page, we can zero up to PAGE_SIZE. 198 * If we pass in a head page, we can zero up to the size of the compound page. 199 */ 200 #if defined(CONFIG_HIGHMEM) && defined(CONFIG_TRANSPARENT_HUGEPAGE) 201 void zero_user_segments(struct page *page, unsigned start1, unsigned end1, 202 unsigned start2, unsigned end2); 203 #else /* !HIGHMEM || !TRANSPARENT_HUGEPAGE */ 204 static inline void zero_user_segments(struct page *page, 205 unsigned start1, unsigned end1, 206 unsigned start2, unsigned end2) 207 { 208 void *kaddr = kmap_atomic(page); 209 unsigned int i; 210 211 BUG_ON(end1 > page_size(page) || end2 > page_size(page)); 212 213 if (end1 > start1) 214 memset(kaddr + start1, 0, end1 - start1); 215 216 if (end2 > start2) 217 memset(kaddr + start2, 0, end2 - start2); 218 219 kunmap_atomic(kaddr); 220 for (i = 0; i < compound_nr(page); i++) 221 flush_dcache_page(page + i); 222 } 223 #endif /* !HIGHMEM || !TRANSPARENT_HUGEPAGE */ 224 225 static inline void zero_user_segment(struct page *page, 226 unsigned start, unsigned end) 227 { 228 zero_user_segments(page, start, end, 0, 0); 229 } 230 231 static inline void zero_user(struct page *page, 232 unsigned start, unsigned size) 233 { 234 zero_user_segments(page, start, start + size, 0, 0); 235 } 236 237 #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE 238 239 static inline void copy_user_highpage(struct page *to, struct page *from, 240 unsigned long vaddr, struct vm_area_struct *vma) 241 { 242 char *vfrom, *vto; 243 244 vfrom = kmap_atomic(from); 245 vto = kmap_atomic(to); 246 copy_user_page(vto, vfrom, vaddr, to); 247 kunmap_atomic(vto); 248 kunmap_atomic(vfrom); 249 } 250 251 #endif 252 253 #ifndef __HAVE_ARCH_COPY_HIGHPAGE 254 255 static inline void copy_highpage(struct page *to, struct page *from) 256 { 257 char *vfrom, *vto; 258 259 vfrom = kmap_atomic(from); 260 vto = kmap_atomic(to); 261 copy_page(vto, vfrom); 262 kunmap_atomic(vto); 263 kunmap_atomic(vfrom); 264 } 265 266 #endif 267 268 static inline void memcpy_page(struct page *dst_page, size_t dst_off, 269 struct page *src_page, size_t src_off, 270 size_t len) 271 { 272 char *dst = kmap_local_page(dst_page); 273 char *src = kmap_local_page(src_page); 274 275 VM_BUG_ON(dst_off + len > PAGE_SIZE || src_off + len > PAGE_SIZE); 276 memcpy(dst + dst_off, src + src_off, len); 277 kunmap_local(src); 278 kunmap_local(dst); 279 } 280 281 static inline void memmove_page(struct page *dst_page, size_t dst_off, 282 struct page *src_page, size_t src_off, 283 size_t len) 284 { 285 char *dst = kmap_local_page(dst_page); 286 char *src = kmap_local_page(src_page); 287 288 VM_BUG_ON(dst_off + len > PAGE_SIZE || src_off + len > PAGE_SIZE); 289 memmove(dst + dst_off, src + src_off, len); 290 kunmap_local(src); 291 kunmap_local(dst); 292 } 293 294 static inline void memset_page(struct page *page, size_t offset, int val, 295 size_t len) 296 { 297 char *addr = kmap_local_page(page); 298 299 VM_BUG_ON(offset + len > PAGE_SIZE); 300 memset(addr + offset, val, len); 301 kunmap_local(addr); 302 } 303 304 static inline void memcpy_from_page(char *to, struct page *page, 305 size_t offset, size_t len) 306 { 307 char *from = kmap_local_page(page); 308 309 VM_BUG_ON(offset + len > PAGE_SIZE); 310 memcpy(to, from + offset, len); 311 kunmap_local(from); 312 } 313 314 static inline void memcpy_to_page(struct page *page, size_t offset, 315 const char *from, size_t len) 316 { 317 char *to = kmap_local_page(page); 318 319 VM_BUG_ON(offset + len > PAGE_SIZE); 320 memcpy(to + offset, from, len); 321 kunmap_local(to); 322 } 323 324 static inline void memzero_page(struct page *page, size_t offset, size_t len) 325 { 326 char *addr = kmap_atomic(page); 327 memset(addr + offset, 0, len); 328 kunmap_atomic(addr); 329 } 330 331 #endif /* _LINUX_HIGHMEM_H */ 332