xref: /openbmc/linux/arch/sparc/include/asm/highmem.h (revision f99e0237)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2a439fe51SSam Ravnborg /*
3a439fe51SSam Ravnborg  * highmem.h: virtual kernel memory mappings for high memory
4a439fe51SSam Ravnborg  *
5a439fe51SSam Ravnborg  * Used in CONFIG_HIGHMEM systems for memory pages which
6a439fe51SSam Ravnborg  * are not addressable by direct kernel virtual addresses.
7a439fe51SSam Ravnborg  *
8a439fe51SSam Ravnborg  * Copyright (C) 1999 Gerhard Wichert, Siemens AG
9a439fe51SSam Ravnborg  *		      Gerhard.Wichert@pdb.siemens.de
10a439fe51SSam Ravnborg  *
11a439fe51SSam Ravnborg  *
12a439fe51SSam Ravnborg  * Redesigned the x86 32-bit VM architecture to deal with
13a439fe51SSam Ravnborg  * up to 16 Terrabyte physical memory. With current x86 CPUs
14a439fe51SSam Ravnborg  * we now support up to 64 Gigabytes physical RAM.
15a439fe51SSam Ravnborg  *
16a439fe51SSam Ravnborg  * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
17a439fe51SSam Ravnborg  */
18a439fe51SSam Ravnborg 
19a439fe51SSam Ravnborg #ifndef _ASM_HIGHMEM_H
20a439fe51SSam Ravnborg #define _ASM_HIGHMEM_H
21a439fe51SSam Ravnborg 
22a439fe51SSam Ravnborg #ifdef __KERNEL__
23a439fe51SSam Ravnborg 
24a439fe51SSam Ravnborg #include <linux/interrupt.h>
2565fddcfcSMike Rapoport #include <linux/pgtable.h>
26a439fe51SSam Ravnborg #include <asm/vaddrs.h>
27090e77e1SIra Weiny #include <asm/pgtsrmmu.h>
28a439fe51SSam Ravnborg 
29a439fe51SSam Ravnborg /* declarations for highmem.c */
30a439fe51SSam Ravnborg extern unsigned long highstart_pfn, highend_pfn;
31a439fe51SSam Ravnborg 
32090e77e1SIra Weiny #define kmap_prot __pgprot(SRMMU_ET_PTE | SRMMU_PRIV | SRMMU_CACHE)
33a439fe51SSam Ravnborg extern pte_t *pkmap_page_table;
34a439fe51SSam Ravnborg 
35a439fe51SSam Ravnborg /*
36a439fe51SSam Ravnborg  * Right now we initialize only a single pte table. It can be extended
37a439fe51SSam Ravnborg  * easily, subsequent pte tables have to be allocated in one physical
38a439fe51SSam Ravnborg  * chunk of RAM.  Currently the simplest way to do this is to align the
39a439fe51SSam Ravnborg  * pkmap region on a pagetable boundary (4MB).
40a439fe51SSam Ravnborg  */
41a439fe51SSam Ravnborg #define LAST_PKMAP 1024
42a439fe51SSam Ravnborg #define PKMAP_SIZE (LAST_PKMAP << PAGE_SHIFT)
43a439fe51SSam Ravnborg #define PKMAP_BASE PMD_ALIGN(SRMMU_NOCACHE_VADDR + (SRMMU_MAX_NOCACHE_PAGES << PAGE_SHIFT))
44a439fe51SSam Ravnborg 
45a439fe51SSam Ravnborg #define LAST_PKMAP_MASK (LAST_PKMAP - 1)
46a439fe51SSam Ravnborg #define PKMAP_NR(virt)  ((virt - PKMAP_BASE) >> PAGE_SHIFT)
47a439fe51SSam Ravnborg #define PKMAP_ADDR(nr)  (PKMAP_BASE + ((nr) << PAGE_SHIFT))
48a439fe51SSam Ravnborg 
49a439fe51SSam Ravnborg #define PKMAP_END (PKMAP_ADDR(LAST_PKMAP))
50a439fe51SSam Ravnborg 
51a439fe51SSam Ravnborg #define flush_cache_kmaps()	flush_cache_all()
52a439fe51SSam Ravnborg 
53*f99e0237SThomas Gleixner /* FIXME: Use __flush_*_one(vaddr) instead of flush_*_all() -- Anton */
54*f99e0237SThomas Gleixner #define arch_kmap_local_pre_map(vaddr, pteval)	flush_cache_all()
55*f99e0237SThomas Gleixner #define arch_kmap_local_pre_unmap(vaddr)	flush_cache_all()
56*f99e0237SThomas Gleixner #define arch_kmap_local_post_map(vaddr, pteval)	flush_tlb_all()
57*f99e0237SThomas Gleixner #define arch_kmap_local_post_unmap(vaddr)	flush_tlb_all()
583293efa9SThomas Gleixner 
59a439fe51SSam Ravnborg #endif /* __KERNEL__ */
60a439fe51SSam Ravnborg 
61a439fe51SSam Ravnborg #endif /* _ASM_HIGHMEM_H */
62