1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * arch/arm/include/asm/pgtable-nommu.h 4 * 5 * Copyright (C) 1995-2002 Russell King 6 * Copyright (C) 2004 Hyok S. Choi 7 */ 8 #ifndef _ASMARM_PGTABLE_NOMMU_H 9 #define _ASMARM_PGTABLE_NOMMU_H 10 11 #ifndef __ASSEMBLY__ 12 13 #include <linux/slab.h> 14 #include <asm/processor.h> 15 #include <asm/page.h> 16 17 /* 18 * Trivial page table functions. 19 */ 20 #define pgd_present(pgd) (1) 21 #define pgd_none(pgd) (0) 22 #define pgd_bad(pgd) (0) 23 #define pgd_clear(pgdp) 24 #define kern_addr_valid(addr) (1) 25 #define pmd_offset(a, b) ((void *)0) 26 /* FIXME */ 27 /* 28 * PMD_SHIFT determines the size of the area a second-level page table can map 29 * PGDIR_SHIFT determines what a third-level page table entry can map 30 */ 31 #define PGDIR_SHIFT 21 32 33 #define PGDIR_SIZE (1UL << PGDIR_SHIFT) 34 #define PGDIR_MASK (~(PGDIR_SIZE-1)) 35 /* FIXME */ 36 37 #define PAGE_NONE __pgprot(0) 38 #define PAGE_SHARED __pgprot(0) 39 #define PAGE_COPY __pgprot(0) 40 #define PAGE_READONLY __pgprot(0) 41 #define PAGE_KERNEL __pgprot(0) 42 43 #define swapper_pg_dir ((pgd_t *) 0) 44 45 #define __swp_type(x) (0) 46 #define __swp_offset(x) (0) 47 #define __swp_entry(typ,off) ((swp_entry_t) { ((typ) | ((off) << 7)) }) 48 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 49 #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 50 51 52 typedef pte_t *pte_addr_t; 53 54 /* 55 * ZERO_PAGE is a global shared page that is always zero: used 56 * for zero-mapped memory areas etc.. 57 */ 58 #define ZERO_PAGE(vaddr) (virt_to_page(0)) 59 60 /* 61 * Mark the prot value as uncacheable and unbufferable. 62 */ 63 #define pgprot_noncached(prot) (prot) 64 #define pgprot_writecombine(prot) (prot) 65 #define pgprot_dmacoherent(prot) (prot) 66 #define pgprot_device(prot) (prot) 67 68 69 /* 70 * These would be in other places but having them here reduces the diffs. 71 */ 72 extern unsigned int kobjsize(const void *objp); 73 74 /* 75 * No page table caches to initialise. 76 */ 77 #define pgtable_cache_init() do { } while (0) 78 79 /* 80 * All 32bit addresses are effectively valid for vmalloc... 81 * Sort of meaningless for non-VM targets. 82 */ 83 #define VMALLOC_START 0UL 84 #define VMALLOC_END 0xffffffffUL 85 86 #define FIRST_USER_ADDRESS 0UL 87 88 #include <asm-generic/pgtable.h> 89 90 #else 91 92 /* 93 * dummy tlb and user structures. 94 */ 95 #define v3_tlb_fns (0) 96 #define v4_tlb_fns (0) 97 #define v4wb_tlb_fns (0) 98 #define v4wbi_tlb_fns (0) 99 #define v6wbi_tlb_fns (0) 100 #define v7wbi_tlb_fns (0) 101 102 #define v3_user_fns (0) 103 #define v4_user_fns (0) 104 #define v4_mc_user_fns (0) 105 #define v4wb_user_fns (0) 106 #define v4wt_user_fns (0) 107 #define v6_user_fns (0) 108 #define xscale_mc_user_fns (0) 109 110 #endif /*__ASSEMBLY__*/ 111 112 #endif /* _ASMARM_PGTABLE_H */ 113