1 /* 2 * include/asm-xtensa/page.h 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version2 as 6 * published by the Free Software Foundation. 7 * 8 * Copyright (C) 2001 - 2007 Tensilica Inc. 9 */ 10 11 #ifndef _XTENSA_PAGE_H 12 #define _XTENSA_PAGE_H 13 14 #include <asm/processor.h> 15 #include <asm/types.h> 16 #include <asm/cache.h> 17 18 /* 19 * Fixed TLB translations in the processor. 20 */ 21 22 #define XCHAL_KSEG_CACHED_VADDR 0xd0000000 23 #define XCHAL_KSEG_BYPASS_VADDR 0xd8000000 24 #define XCHAL_KSEG_PADDR 0x00000000 25 #define XCHAL_KSEG_SIZE 0x08000000 26 27 /* 28 * PAGE_SHIFT determines the page size 29 */ 30 31 #define PAGE_SHIFT 12 32 #define PAGE_SIZE (__XTENSA_UL_CONST(1) << PAGE_SHIFT) 33 #define PAGE_MASK (~(PAGE_SIZE-1)) 34 35 #define PAGE_OFFSET XCHAL_KSEG_CACHED_VADDR 36 #define MAX_MEM_PFN XCHAL_KSEG_SIZE 37 #define PGTABLE_START 0x80000000 38 39 /* 40 * Cache aliasing: 41 * 42 * If the cache size for one way is greater than the page size, we have to 43 * deal with cache aliasing. The cache index is wider than the page size: 44 * 45 * | |cache| cache index 46 * | pfn |off| virtual address 47 * |xxxx:X|zzz| 48 * | : | | 49 * | \ / | | 50 * |trans.| | 51 * | / \ | | 52 * |yyyy:Y|zzz| physical address 53 * 54 * When the page number is translated to the physical page address, the lowest 55 * bit(s) (X) that are part of the cache index are also translated (Y). 56 * If this translation changes bit(s) (X), the cache index is also afected, 57 * thus resulting in a different cache line than before. 58 * The kernel does not provide a mechanism to ensure that the page color 59 * (represented by this bit) remains the same when allocated or when pages 60 * are remapped. When user pages are mapped into kernel space, the color of 61 * the page might also change. 62 * 63 * We use the address space VMALLOC_END ... VMALLOC_END + DCACHE_WAY_SIZE * 2 64 * to temporarily map a patch so we can match the color. 65 */ 66 67 #if DCACHE_WAY_SIZE > PAGE_SIZE 68 # define DCACHE_ALIAS_ORDER (DCACHE_WAY_SHIFT - PAGE_SHIFT) 69 # define DCACHE_ALIAS_MASK (PAGE_MASK & (DCACHE_WAY_SIZE - 1)) 70 # define DCACHE_ALIAS(a) (((a) & DCACHE_ALIAS_MASK) >> PAGE_SHIFT) 71 # define DCACHE_ALIAS_EQ(a,b) ((((a) ^ (b)) & DCACHE_ALIAS_MASK) == 0) 72 #else 73 # define DCACHE_ALIAS_ORDER 0 74 #endif 75 76 #if ICACHE_WAY_SIZE > PAGE_SIZE 77 # define ICACHE_ALIAS_ORDER (ICACHE_WAY_SHIFT - PAGE_SHIFT) 78 # define ICACHE_ALIAS_MASK (PAGE_MASK & (ICACHE_WAY_SIZE - 1)) 79 # define ICACHE_ALIAS(a) (((a) & ICACHE_ALIAS_MASK) >> PAGE_SHIFT) 80 # define ICACHE_ALIAS_EQ(a,b) ((((a) ^ (b)) & ICACHE_ALIAS_MASK) == 0) 81 #else 82 # define ICACHE_ALIAS_ORDER 0 83 #endif 84 85 86 #ifdef __ASSEMBLY__ 87 88 #define __pgprot(x) (x) 89 90 #else 91 92 /* 93 * These are used to make use of C type-checking.. 94 */ 95 96 typedef struct { unsigned long pte; } pte_t; /* page table entry */ 97 typedef struct { unsigned long pgd; } pgd_t; /* PGD table entry */ 98 typedef struct { unsigned long pgprot; } pgprot_t; 99 typedef struct page *pgtable_t; 100 101 #define pte_val(x) ((x).pte) 102 #define pgd_val(x) ((x).pgd) 103 #define pgprot_val(x) ((x).pgprot) 104 105 #define __pte(x) ((pte_t) { (x) } ) 106 #define __pgd(x) ((pgd_t) { (x) } ) 107 #define __pgprot(x) ((pgprot_t) { (x) } ) 108 109 /* 110 * Pure 2^n version of get_order 111 * Use 'nsau' instructions if supported by the processor or the generic version. 112 */ 113 114 #if XCHAL_HAVE_NSA 115 116 static inline __attribute_const__ int get_order(unsigned long size) 117 { 118 int lz; 119 asm ("nsau %0, %1" : "=r" (lz) : "r" ((size - 1) >> PAGE_SHIFT)); 120 return 32 - lz; 121 } 122 123 #else 124 125 # include <asm-generic/page.h> 126 127 #endif 128 129 struct page; 130 extern void clear_page(void *page); 131 extern void copy_page(void *to, void *from); 132 133 /* 134 * If we have cache aliasing and writeback caches, we might have to do 135 * some extra work 136 */ 137 138 #if DCACHE_WAY_SIZE > PAGE_SIZE 139 extern void clear_user_page(void*, unsigned long, struct page*); 140 extern void copy_user_page(void*, void*, unsigned long, struct page*); 141 #else 142 # define clear_user_page(page, vaddr, pg) clear_page(page) 143 # define copy_user_page(to, from, vaddr, pg) copy_page(to, from) 144 #endif 145 146 /* 147 * This handles the memory map. We handle pages at 148 * XCHAL_KSEG_CACHED_VADDR for kernels with 32 bit address space. 149 * These macros are for conversion of kernel address, not user 150 * addresses. 151 */ 152 153 #define __pa(x) ((unsigned long) (x) - PAGE_OFFSET) 154 #define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET)) 155 #define pfn_valid(pfn) ((unsigned long)pfn < max_mapnr) 156 #ifdef CONFIG_DISCONTIGMEM 157 # error CONFIG_DISCONTIGMEM not supported 158 #endif 159 160 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) 161 #define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT) 162 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) 163 #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) 164 165 #define WANT_PAGE_VIRTUAL 166 167 168 #endif /* __ASSEMBLY__ */ 169 170 #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ 171 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 172 173 #include <asm-generic/memory_model.h> 174 #endif /* _XTENSA_PAGE_H */ 175