1 #ifndef _ASM_X86_PAGE_32_H 2 #define _ASM_X86_PAGE_32_H 3 4 /* 5 * This handles the memory map. 6 * 7 * A __PAGE_OFFSET of 0xC0000000 means that the kernel has 8 * a virtual address space of one gigabyte, which limits the 9 * amount of physical memory you can use to about 950MB. 10 * 11 * If you want more physical memory than this then see the CONFIG_HIGHMEM4G 12 * and CONFIG_HIGHMEM64G options in the kernel configuration. 13 */ 14 #define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) 15 16 #ifdef CONFIG_4KSTACKS 17 #define THREAD_ORDER 0 18 #else 19 #define THREAD_ORDER 1 20 #endif 21 #define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) 22 23 #define STACKFAULT_STACK 0 24 #define DOUBLEFAULT_STACK 1 25 #define NMI_STACK 0 26 #define DEBUG_STACK 0 27 #define MCE_STACK 0 28 #define N_EXCEPTION_STACKS 1 29 30 #ifdef CONFIG_X86_PAE 31 /* 44=32+12, the limit we can fit into an unsigned long pfn */ 32 #define __PHYSICAL_MASK_SHIFT 44 33 #define __VIRTUAL_MASK_SHIFT 32 34 #define PAGETABLE_LEVELS 3 35 36 #ifndef __ASSEMBLY__ 37 typedef u64 pteval_t; 38 typedef u64 pmdval_t; 39 typedef u64 pudval_t; 40 typedef u64 pgdval_t; 41 typedef u64 pgprotval_t; 42 43 typedef union { 44 struct { 45 unsigned long pte_low, pte_high; 46 }; 47 pteval_t pte; 48 } pte_t; 49 #endif /* __ASSEMBLY__ 50 */ 51 #else /* !CONFIG_X86_PAE */ 52 #define __PHYSICAL_MASK_SHIFT 32 53 #define __VIRTUAL_MASK_SHIFT 32 54 #define PAGETABLE_LEVELS 2 55 56 #ifndef __ASSEMBLY__ 57 typedef unsigned long pteval_t; 58 typedef unsigned long pmdval_t; 59 typedef unsigned long pudval_t; 60 typedef unsigned long pgdval_t; 61 typedef unsigned long pgprotval_t; 62 63 typedef union { 64 pteval_t pte; 65 pteval_t pte_low; 66 } pte_t; 67 68 #endif /* __ASSEMBLY__ */ 69 #endif /* CONFIG_X86_PAE */ 70 71 #ifndef __ASSEMBLY__ 72 typedef struct page *pgtable_t; 73 #endif 74 75 #ifdef CONFIG_HUGETLB_PAGE 76 #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA 77 #endif 78 79 #ifndef __ASSEMBLY__ 80 #define __phys_addr_nodebug(x) ((x) - PAGE_OFFSET) 81 #ifdef CONFIG_DEBUG_VIRTUAL 82 extern unsigned long __phys_addr(unsigned long); 83 #else 84 #define __phys_addr(x) __phys_addr_nodebug(x) 85 #endif 86 #define __phys_reloc_hide(x) RELOC_HIDE((x), 0) 87 88 #ifdef CONFIG_FLATMEM 89 #define pfn_valid(pfn) ((pfn) < max_mapnr) 90 #endif /* CONFIG_FLATMEM */ 91 92 extern int nx_enabled; 93 94 /* 95 * This much address space is reserved for vmalloc() and iomap() 96 * as well as fixmap mappings. 97 */ 98 extern unsigned int __VMALLOC_RESERVE; 99 extern int sysctl_legacy_va_layout; 100 101 extern void find_low_pfn_range(void); 102 extern unsigned long init_memory_mapping(unsigned long start, 103 unsigned long end); 104 extern void initmem_init(unsigned long, unsigned long); 105 extern void free_initmem(void); 106 extern void setup_bootmem_allocator(void); 107 108 109 #ifdef CONFIG_X86_USE_3DNOW 110 #include <asm/mmx.h> 111 112 static inline void clear_page(void *page) 113 { 114 mmx_clear_page(page); 115 } 116 117 static inline void copy_page(void *to, void *from) 118 { 119 mmx_copy_page(to, from); 120 } 121 #else /* !CONFIG_X86_USE_3DNOW */ 122 #include <linux/string.h> 123 124 static inline void clear_page(void *page) 125 { 126 memset(page, 0, PAGE_SIZE); 127 } 128 129 static inline void copy_page(void *to, void *from) 130 { 131 memcpy(to, from, PAGE_SIZE); 132 } 133 #endif /* CONFIG_X86_3DNOW */ 134 #endif /* !__ASSEMBLY__ */ 135 136 #endif /* _ASM_X86_PAGE_32_H */ 137