1 #ifndef __ASM_SH_PAGE_H 2 #define __ASM_SH_PAGE_H 3 4 /* 5 * Copyright (C) 1999 Niibe Yutaka 6 */ 7 8 #include <linux/const.h> 9 10 /* PAGE_SHIFT determines the page size */ 11 #if defined(CONFIG_PAGE_SIZE_4KB) 12 # define PAGE_SHIFT 12 13 #elif defined(CONFIG_PAGE_SIZE_8KB) 14 # define PAGE_SHIFT 13 15 #elif defined(CONFIG_PAGE_SIZE_16KB) 16 # define PAGE_SHIFT 14 17 #elif defined(CONFIG_PAGE_SIZE_64KB) 18 # define PAGE_SHIFT 16 19 #else 20 # error "Bogus kernel page size?" 21 #endif 22 23 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) 24 #define PAGE_MASK (~(PAGE_SIZE-1)) 25 #define PTE_MASK PAGE_MASK 26 27 #if defined(CONFIG_HUGETLB_PAGE_SIZE_64K) 28 #define HPAGE_SHIFT 16 29 #elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K) 30 #define HPAGE_SHIFT 18 31 #elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB) 32 #define HPAGE_SHIFT 20 33 #elif defined(CONFIG_HUGETLB_PAGE_SIZE_4MB) 34 #define HPAGE_SHIFT 22 35 #elif defined(CONFIG_HUGETLB_PAGE_SIZE_64MB) 36 #define HPAGE_SHIFT 26 37 #elif defined(CONFIG_HUGETLB_PAGE_SIZE_512MB) 38 #define HPAGE_SHIFT 29 39 #endif 40 41 #ifdef CONFIG_HUGETLB_PAGE 42 #define HPAGE_SIZE (1UL << HPAGE_SHIFT) 43 #define HPAGE_MASK (~(HPAGE_SIZE-1)) 44 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT-PAGE_SHIFT) 45 #endif 46 47 #ifndef __ASSEMBLY__ 48 #include <asm/uncached.h> 49 50 extern unsigned long shm_align_mask; 51 extern unsigned long max_low_pfn, min_low_pfn; 52 extern unsigned long memory_start, memory_end, memory_limit; 53 54 static inline unsigned long 55 pages_do_alias(unsigned long addr1, unsigned long addr2) 56 { 57 return (addr1 ^ addr2) & shm_align_mask; 58 } 59 60 #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) 61 extern void copy_page(void *to, void *from); 62 #define copy_user_page(to, from, vaddr, pg) __copy_user(to, from, PAGE_SIZE) 63 64 struct page; 65 struct vm_area_struct; 66 67 extern void copy_user_highpage(struct page *to, struct page *from, 68 unsigned long vaddr, struct vm_area_struct *vma); 69 #define __HAVE_ARCH_COPY_USER_HIGHPAGE 70 extern void clear_user_highpage(struct page *page, unsigned long vaddr); 71 #define clear_user_highpage clear_user_highpage 72 73 /* 74 * These are used to make use of C type-checking.. 75 */ 76 #ifdef CONFIG_X2TLB 77 typedef struct { unsigned long pte_low, pte_high; } pte_t; 78 typedef struct { unsigned long long pgprot; } pgprot_t; 79 typedef struct { unsigned long long pgd; } pgd_t; 80 #define pte_val(x) \ 81 ((x).pte_low | ((unsigned long long)(x).pte_high << 32)) 82 #define __pte(x) \ 83 ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; }) 84 #elif defined(CONFIG_SUPERH32) 85 typedef struct { unsigned long pte_low; } pte_t; 86 typedef struct { unsigned long pgprot; } pgprot_t; 87 typedef struct { unsigned long pgd; } pgd_t; 88 #define pte_val(x) ((x).pte_low) 89 #define __pte(x) ((pte_t) { (x) } ) 90 #else 91 typedef struct { unsigned long long pte_low; } pte_t; 92 typedef struct { unsigned long long pgprot; } pgprot_t; 93 typedef struct { unsigned long pgd; } pgd_t; 94 #define pte_val(x) ((x).pte_low) 95 #define __pte(x) ((pte_t) { (x) } ) 96 #endif 97 98 #define pgd_val(x) ((x).pgd) 99 #define pgprot_val(x) ((x).pgprot) 100 101 #define __pgd(x) ((pgd_t) { (x) } ) 102 #define __pgprot(x) ((pgprot_t) { (x) } ) 103 104 typedef struct page *pgtable_t; 105 106 #define pte_pgprot(x) __pgprot(pte_val(x) & PTE_FLAGS_MASK) 107 108 #endif /* !__ASSEMBLY__ */ 109 110 /* 111 * __MEMORY_START and SIZE are the physical addresses and size of RAM. 112 */ 113 #define __MEMORY_START CONFIG_MEMORY_START 114 #define __MEMORY_SIZE CONFIG_MEMORY_SIZE 115 116 /* 117 * PHYSICAL_OFFSET is the offset in physical memory where the base 118 * of the kernel is loaded. 119 */ 120 #ifdef CONFIG_PHYSICAL_START 121 #define PHYSICAL_OFFSET (CONFIG_PHYSICAL_START - __MEMORY_START) 122 #else 123 #define PHYSICAL_OFFSET 0 124 #endif 125 126 /* 127 * PAGE_OFFSET is the virtual address of the start of kernel address 128 * space. 129 */ 130 #define PAGE_OFFSET CONFIG_PAGE_OFFSET 131 132 /* 133 * Virtual to physical RAM address translation. 134 * 135 * In 29 bit mode, the physical offset of RAM from address 0 is visible in 136 * the kernel virtual address space, and thus we don't have to take 137 * this into account when translating. However in 32 bit mode this offset 138 * is not visible (it is part of the PMB mapping) and so needs to be 139 * added or subtracted as required. 140 */ 141 #ifdef CONFIG_PMB 142 #define ___pa(x) ((x)-PAGE_OFFSET+__MEMORY_START) 143 #define ___va(x) ((x)+PAGE_OFFSET-__MEMORY_START) 144 #else 145 #define ___pa(x) ((x)-PAGE_OFFSET) 146 #define ___va(x) ((x)+PAGE_OFFSET) 147 #endif 148 149 #ifndef __ASSEMBLY__ 150 #define __pa(x) ___pa((unsigned long)x) 151 #define __va(x) (void *)___va((unsigned long)x) 152 #endif /* !__ASSEMBLY__ */ 153 154 #ifdef CONFIG_UNCACHED_MAPPING 155 #if defined(CONFIG_29BIT) 156 #define UNCAC_ADDR(addr) P2SEGADDR(addr) 157 #define CAC_ADDR(addr) P1SEGADDR(addr) 158 #else 159 #define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + uncached_start) 160 #define CAC_ADDR(addr) ((addr) - uncached_start + PAGE_OFFSET) 161 #endif 162 #else 163 #define UNCAC_ADDR(addr) ((addr)) 164 #define CAC_ADDR(addr) ((addr)) 165 #endif 166 167 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) 168 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) 169 170 /* 171 * PFN = physical frame number (ie PFN 0 == physical address 0) 172 * PFN_START is the PFN of the first page of RAM. By defining this we 173 * don't have struct page entries for the portion of address space 174 * between physical address 0 and the start of RAM. 175 */ 176 #define PFN_START (__MEMORY_START >> PAGE_SHIFT) 177 #define ARCH_PFN_OFFSET (PFN_START) 178 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) 179 #ifdef CONFIG_FLATMEM 180 #define pfn_valid(pfn) ((pfn) >= min_low_pfn && (pfn) < max_low_pfn) 181 #endif 182 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) 183 184 #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ 185 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 186 187 #include <asm-generic/memory_model.h> 188 #include <asm-generic/getorder.h> 189 190 /* 191 * Some drivers need to perform DMA into kmalloc'ed buffers 192 * and so we have to increase the kmalloc minalign for this. 193 */ 194 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES 195 196 #ifdef CONFIG_SUPERH64 197 /* 198 * While BYTES_PER_WORD == 4 on the current sh64 ABI, GCC will still 199 * happily generate {ld/st}.q pairs, requiring us to have 8-byte 200 * alignment to avoid traps. The kmalloc alignment is guaranteed by 201 * virtue of L1_CACHE_BYTES, requiring this to only be special cased 202 * for slab caches. 203 */ 204 #define ARCH_SLAB_MINALIGN 8 205 #endif 206 207 #endif /* __ASM_SH_PAGE_H */ 208