1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * highmem.h: virtual kernel memory mappings for high memory 4 * 5 * Used in CONFIG_HIGHMEM systems for memory pages which 6 * are not addressable by direct kernel virtual addresses. 7 * 8 * Copyright (C) 1999 Gerhard Wichert, Siemens AG 9 * Gerhard.Wichert@pdb.siemens.de 10 * 11 * 12 * Redesigned the x86 32-bit VM architecture to deal with 13 * up to 16 Terabyte physical memory. With current x86 CPUs 14 * we now support up to 64 Gigabytes physical RAM. 15 * 16 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com> 17 */ 18 #ifndef _ASM_HIGHMEM_H 19 #define _ASM_HIGHMEM_H 20 21 #ifdef __KERNEL__ 22 23 #include <linux/init.h> 24 #include <linux/interrupt.h> 25 #include <linux/uaccess.h> 26 #include <asm/fixmap.h> 27 28 extern pte_t *pkmap_page_table; 29 30 /* 31 * Right now we initialize only a single pte table. It can be extended 32 * easily, subsequent pte tables have to be allocated in one physical 33 * chunk of RAM. 34 */ 35 /* 36 * We use one full pte table with 4K pages. And with 16K/64K/256K pages pte 37 * table covers enough memory (32MB/512MB/2GB resp.), so that both FIXMAP 38 * and PKMAP can be placed in a single pte table. We use 512 pages for PKMAP 39 * in case of 16K/64K/256K page sizes. 40 */ 41 42 #define PKMAP_ORDER PTE_SHIFT 43 #define LAST_PKMAP (1 << PKMAP_ORDER) 44 45 #define PKMAP_BASE ((FIXADDR_START - PAGE_SIZE * (LAST_PKMAP + 1)) \ 46 & PMD_MASK) 47 48 #define LAST_PKMAP_MASK (LAST_PKMAP - 1) 49 #define PKMAP_NR(virt) ((virt - PKMAP_BASE) >> PAGE_SHIFT) 50 #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) 51 52 #define flush_cache_kmaps() { flush_icache(); flush_dcache(); } 53 54 #define arch_kmap_local_post_map(vaddr, pteval) \ 55 local_flush_tlb_page(NULL, vaddr); 56 #define arch_kmap_local_post_unmap(vaddr) \ 57 local_flush_tlb_page(NULL, vaddr); 58 59 #endif /* __KERNEL__ */ 60 61 #endif /* _ASM_HIGHMEM_H */ 62