1 #ifndef __ASM_SH_PAGE_H 2 #define __ASM_SH_PAGE_H 3 4 /* 5 * Copyright (C) 1999 Niibe Yutaka 6 */ 7 8 #include <linux/const.h> 9 10 /* PAGE_SHIFT determines the page size */ 11 #if defined(CONFIG_PAGE_SIZE_4KB) 12 # define PAGE_SHIFT 12 13 #elif defined(CONFIG_PAGE_SIZE_8KB) 14 # define PAGE_SHIFT 13 15 #elif defined(CONFIG_PAGE_SIZE_16KB) 16 # define PAGE_SHIFT 14 17 #elif defined(CONFIG_PAGE_SIZE_64KB) 18 # define PAGE_SHIFT 16 19 #else 20 # error "Bogus kernel page size?" 21 #endif 22 23 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) 24 #define PAGE_MASK (~(PAGE_SIZE-1)) 25 #define PTE_MASK PAGE_MASK 26 27 #if defined(CONFIG_HUGETLB_PAGE_SIZE_64K) 28 #define HPAGE_SHIFT 16 29 #elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K) 30 #define HPAGE_SHIFT 18 31 #elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB) 32 #define HPAGE_SHIFT 20 33 #elif defined(CONFIG_HUGETLB_PAGE_SIZE_4MB) 34 #define HPAGE_SHIFT 22 35 #elif defined(CONFIG_HUGETLB_PAGE_SIZE_64MB) 36 #define HPAGE_SHIFT 26 37 #elif defined(CONFIG_HUGETLB_PAGE_SIZE_512MB) 38 #define HPAGE_SHIFT 29 39 #endif 40 41 #ifdef CONFIG_HUGETLB_PAGE 42 #define HPAGE_SIZE (1UL << HPAGE_SHIFT) 43 #define HPAGE_MASK (~(HPAGE_SIZE-1)) 44 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT-PAGE_SHIFT) 45 #endif 46 47 #ifndef __ASSEMBLY__ 48 49 extern unsigned long shm_align_mask; 50 extern unsigned long max_low_pfn, min_low_pfn; 51 extern unsigned long memory_start, memory_end; 52 53 static inline unsigned long 54 pages_do_alias(unsigned long addr1, unsigned long addr2) 55 { 56 return (addr1 ^ addr2) & shm_align_mask; 57 } 58 59 60 #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) 61 extern void copy_page(void *to, void *from); 62 63 struct page; 64 struct vm_area_struct; 65 66 extern void copy_user_highpage(struct page *to, struct page *from, 67 unsigned long vaddr, struct vm_area_struct *vma); 68 #define __HAVE_ARCH_COPY_USER_HIGHPAGE 69 extern void clear_user_highpage(struct page *page, unsigned long vaddr); 70 #define clear_user_highpage clear_user_highpage 71 72 /* 73 * These are used to make use of C type-checking.. 74 */ 75 #ifdef CONFIG_X2TLB 76 typedef struct { unsigned long pte_low, pte_high; } pte_t; 77 typedef struct { unsigned long long pgprot; } pgprot_t; 78 typedef struct { unsigned long long pgd; } pgd_t; 79 #define pte_val(x) \ 80 ((x).pte_low | ((unsigned long long)(x).pte_high << 32)) 81 #define __pte(x) \ 82 ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; }) 83 #elif defined(CONFIG_SUPERH32) 84 typedef struct { unsigned long pte_low; } pte_t; 85 typedef struct { unsigned long pgprot; } pgprot_t; 86 typedef struct { unsigned long pgd; } pgd_t; 87 #define pte_val(x) ((x).pte_low) 88 #define __pte(x) ((pte_t) { (x) } ) 89 #else 90 typedef struct { unsigned long long pte_low; } pte_t; 91 typedef struct { unsigned long pgprot; } pgprot_t; 92 typedef struct { unsigned long pgd; } pgd_t; 93 #define pte_val(x) ((x).pte_low) 94 #define __pte(x) ((pte_t) { (x) } ) 95 #endif 96 97 #define pgd_val(x) ((x).pgd) 98 #define pgprot_val(x) ((x).pgprot) 99 100 #define __pgd(x) ((pgd_t) { (x) } ) 101 #define __pgprot(x) ((pgprot_t) { (x) } ) 102 103 typedef struct page *pgtable_t; 104 105 #define pte_pgprot(x) __pgprot(pte_val(x) & PTE_FLAGS_MASK) 106 107 #endif /* !__ASSEMBLY__ */ 108 109 /* 110 * __MEMORY_START and SIZE are the physical addresses and size of RAM. 111 */ 112 #define __MEMORY_START CONFIG_MEMORY_START 113 #define __MEMORY_SIZE CONFIG_MEMORY_SIZE 114 115 /* 116 * PAGE_OFFSET is the virtual address of the start of kernel address 117 * space. 118 */ 119 #define PAGE_OFFSET CONFIG_PAGE_OFFSET 120 121 /* 122 * Virtual to physical RAM address translation. 123 * 124 * In 29 bit mode, the physical offset of RAM from address 0 is visible in 125 * the kernel virtual address space, and thus we don't have to take 126 * this into account when translating. However in 32 bit mode this offset 127 * is not visible (it is part of the PMB mapping) and so needs to be 128 * added or subtracted as required. 129 */ 130 #if defined(CONFIG_PMB_LEGACY) 131 /* phys = virt - PAGE_OFFSET - (__MEMORY_START & 0xe0000000) */ 132 #define PMB_OFFSET (PAGE_OFFSET - PXSEG(__MEMORY_START)) 133 #define __pa(x) ((unsigned long)(x) - PMB_OFFSET) 134 #define __va(x) ((void *)((unsigned long)(x) + PMB_OFFSET)) 135 #elif defined(CONFIG_32BIT) 136 #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET+__MEMORY_START) 137 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET-__MEMORY_START)) 138 #else 139 #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET) 140 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) 141 #endif 142 143 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) 144 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) 145 146 /* 147 * PFN = physical frame number (ie PFN 0 == physical address 0) 148 * PFN_START is the PFN of the first page of RAM. By defining this we 149 * don't have struct page entries for the portion of address space 150 * between physical address 0 and the start of RAM. 151 */ 152 #define PFN_START (__MEMORY_START >> PAGE_SHIFT) 153 #define ARCH_PFN_OFFSET (PFN_START) 154 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) 155 #ifdef CONFIG_FLATMEM 156 #define pfn_valid(pfn) ((pfn) >= min_low_pfn && (pfn) < max_low_pfn) 157 #endif 158 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) 159 160 #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ 161 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 162 163 #include <asm-generic/memory_model.h> 164 #include <asm-generic/getorder.h> 165 166 /* vDSO support */ 167 #ifdef CONFIG_VSYSCALL 168 #define __HAVE_ARCH_GATE_AREA 169 #endif 170 171 /* 172 * Some drivers need to perform DMA into kmalloc'ed buffers 173 * and so we have to increase the kmalloc minalign for this. 174 */ 175 #define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES 176 177 #ifdef CONFIG_SUPERH64 178 /* 179 * While BYTES_PER_WORD == 4 on the current sh64 ABI, GCC will still 180 * happily generate {ld/st}.q pairs, requiring us to have 8-byte 181 * alignment to avoid traps. The kmalloc alignment is gauranteed by 182 * virtue of L1_CACHE_BYTES, requiring this to only be special cased 183 * for slab caches. 184 */ 185 #define ARCH_SLAB_MINALIGN 8 186 #endif 187 188 #endif /* __ASM_SH_PAGE_H */ 189