1 #ifndef __ASM_SH_PAGE_H 2 #define __ASM_SH_PAGE_H 3 4 /* 5 * Copyright (C) 1999 Niibe Yutaka 6 */ 7 8 #include <linux/const.h> 9 10 /* PAGE_SHIFT determines the page size */ 11 #if defined(CONFIG_PAGE_SIZE_4KB) 12 # define PAGE_SHIFT 12 13 #elif defined(CONFIG_PAGE_SIZE_8KB) 14 # define PAGE_SHIFT 13 15 #elif defined(CONFIG_PAGE_SIZE_16KB) 16 # define PAGE_SHIFT 14 17 #elif defined(CONFIG_PAGE_SIZE_64KB) 18 # define PAGE_SHIFT 16 19 #else 20 # error "Bogus kernel page size?" 21 #endif 22 23 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) 24 #define PAGE_MASK (~(PAGE_SIZE-1)) 25 #define PTE_MASK PAGE_MASK 26 27 #if defined(CONFIG_HUGETLB_PAGE_SIZE_64K) 28 #define HPAGE_SHIFT 16 29 #elif defined(CONFIG_HUGETLB_PAGE_SIZE_256K) 30 #define HPAGE_SHIFT 18 31 #elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB) 32 #define HPAGE_SHIFT 20 33 #elif defined(CONFIG_HUGETLB_PAGE_SIZE_4MB) 34 #define HPAGE_SHIFT 22 35 #elif defined(CONFIG_HUGETLB_PAGE_SIZE_64MB) 36 #define HPAGE_SHIFT 26 37 #elif defined(CONFIG_HUGETLB_PAGE_SIZE_512MB) 38 #define HPAGE_SHIFT 29 39 #endif 40 41 #ifdef CONFIG_HUGETLB_PAGE 42 #define HPAGE_SIZE (1UL << HPAGE_SHIFT) 43 #define HPAGE_MASK (~(HPAGE_SIZE-1)) 44 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT-PAGE_SHIFT) 45 #endif 46 47 #ifndef __ASSEMBLY__ 48 49 extern unsigned long shm_align_mask; 50 extern unsigned long max_low_pfn, min_low_pfn; 51 extern unsigned long memory_start, memory_end; 52 53 extern void clear_page(void *to); 54 extern void copy_page(void *to, void *from); 55 56 #if !defined(CONFIG_CACHE_OFF) && defined(CONFIG_MMU) && \ 57 (defined(CONFIG_CPU_SH5) || defined(CONFIG_CPU_SH4) || \ 58 defined(CONFIG_SH7705_CACHE_32KB)) 59 struct page; 60 struct vm_area_struct; 61 extern void clear_user_page(void *to, unsigned long address, struct page *page); 62 extern void copy_user_page(void *to, void *from, unsigned long address, 63 struct page *page); 64 #if defined(CONFIG_CPU_SH4) 65 extern void copy_user_highpage(struct page *to, struct page *from, 66 unsigned long vaddr, struct vm_area_struct *vma); 67 #define __HAVE_ARCH_COPY_USER_HIGHPAGE 68 #endif 69 #else 70 #define clear_user_page(page, vaddr, pg) clear_page(page) 71 #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) 72 #endif 73 74 /* 75 * These are used to make use of C type-checking.. 76 */ 77 #ifdef CONFIG_X2TLB 78 typedef struct { unsigned long pte_low, pte_high; } pte_t; 79 typedef struct { unsigned long long pgprot; } pgprot_t; 80 typedef struct { unsigned long long pgd; } pgd_t; 81 #define pte_val(x) \ 82 ((x).pte_low | ((unsigned long long)(x).pte_high << 32)) 83 #define __pte(x) \ 84 ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; }) 85 #elif defined(CONFIG_SUPERH32) 86 typedef struct { unsigned long pte_low; } pte_t; 87 typedef struct { unsigned long pgprot; } pgprot_t; 88 typedef struct { unsigned long pgd; } pgd_t; 89 #define pte_val(x) ((x).pte_low) 90 #define __pte(x) ((pte_t) { (x) } ) 91 #else 92 typedef struct { unsigned long long pte_low; } pte_t; 93 typedef struct { unsigned long pgprot; } pgprot_t; 94 typedef struct { unsigned long pgd; } pgd_t; 95 #define pte_val(x) ((x).pte_low) 96 #define __pte(x) ((pte_t) { (x) } ) 97 #endif 98 99 #define pgd_val(x) ((x).pgd) 100 #define pgprot_val(x) ((x).pgprot) 101 102 #define __pgd(x) ((pgd_t) { (x) } ) 103 #define __pgprot(x) ((pgprot_t) { (x) } ) 104 105 typedef struct page *pgtable_t; 106 107 #define pte_pgprot(x) __pgprot(pte_val(x) & PTE_FLAGS_MASK) 108 109 #endif /* !__ASSEMBLY__ */ 110 111 /* 112 * __MEMORY_START and SIZE are the physical addresses and size of RAM. 113 */ 114 #define __MEMORY_START CONFIG_MEMORY_START 115 #define __MEMORY_SIZE CONFIG_MEMORY_SIZE 116 117 /* 118 * PAGE_OFFSET is the virtual address of the start of kernel address 119 * space. 120 */ 121 #define PAGE_OFFSET CONFIG_PAGE_OFFSET 122 123 /* 124 * Virtual to physical RAM address translation. 125 * 126 * In 29 bit mode, the physical offset of RAM from address 0 is visible in 127 * the kernel virtual address space, and thus we don't have to take 128 * this into account when translating. However in 32 bit mode this offset 129 * is not visible (it is part of the PMB mapping) and so needs to be 130 * added or subtracted as required. 131 */ 132 #if defined(CONFIG_PMB_FIXED) 133 /* phys = virt - PAGE_OFFSET - (__MEMORY_START & 0xe0000000) */ 134 #define PMB_OFFSET (PAGE_OFFSET - PXSEG(__MEMORY_START)) 135 #define __pa(x) ((unsigned long)(x) - PMB_OFFSET) 136 #define __va(x) ((void *)((unsigned long)(x) + PMB_OFFSET)) 137 #elif defined(CONFIG_32BIT) 138 #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET+__MEMORY_START) 139 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET-__MEMORY_START)) 140 #else 141 #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET) 142 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) 143 #endif 144 145 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) 146 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) 147 148 /* 149 * PFN = physical frame number (ie PFN 0 == physical address 0) 150 * PFN_START is the PFN of the first page of RAM. By defining this we 151 * don't have struct page entries for the portion of address space 152 * between physical address 0 and the start of RAM. 153 */ 154 #define PFN_START (__MEMORY_START >> PAGE_SHIFT) 155 #define ARCH_PFN_OFFSET (PFN_START) 156 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) 157 #ifdef CONFIG_FLATMEM 158 #define pfn_valid(pfn) ((pfn) >= min_low_pfn && (pfn) < max_low_pfn) 159 #endif 160 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) 161 162 #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ 163 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 164 165 #include <asm-generic/memory_model.h> 166 #include <asm-generic/page.h> 167 168 /* vDSO support */ 169 #ifdef CONFIG_VSYSCALL 170 #define __HAVE_ARCH_GATE_AREA 171 #endif 172 173 /* 174 * Some drivers need to perform DMA into kmalloc'ed buffers 175 * and so we have to increase the kmalloc minalign for this. 176 */ 177 #define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES 178 179 #ifdef CONFIG_SUPERH64 180 /* 181 * While BYTES_PER_WORD == 4 on the current sh64 ABI, GCC will still 182 * happily generate {ld/st}.q pairs, requiring us to have 8-byte 183 * alignment to avoid traps. The kmalloc alignment is gauranteed by 184 * virtue of L1_CACHE_BYTES, requiring this to only be special cased 185 * for slab caches. 186 */ 187 #define ARCH_SLAB_MINALIGN 8 188 #endif 189 190 #endif /* __ASM_SH_PAGE_H */ 191