1 #ifndef _ASM_IA64_TLB_H 2 #define _ASM_IA64_TLB_H 3 /* 4 * Based on <asm-generic/tlb.h>. 5 * 6 * Copyright (C) 2002-2003 Hewlett-Packard Co 7 * David Mosberger-Tang <davidm@hpl.hp.com> 8 */ 9 /* 10 * Removing a translation from a page table (including TLB-shootdown) is a four-step 11 * procedure: 12 * 13 * (1) Flush (virtual) caches --- ensures virtual memory is coherent with kernel memory 14 * (this is a no-op on ia64). 15 * (2) Clear the relevant portions of the page-table 16 * (3) Flush the TLBs --- ensures that stale content is gone from CPU TLBs 17 * (4) Release the pages that were freed up in step (2). 18 * 19 * Note that the ordering of these steps is crucial to avoid races on MP machines. 20 * 21 * The Linux kernel defines several platform-specific hooks for TLB-shootdown. When 22 * unmapping a portion of the virtual address space, these hooks are called according to 23 * the following template: 24 * 25 * tlb <- tlb_gather_mmu(mm, start, end); // start unmap for address space MM 26 * { 27 * for each vma that needs a shootdown do { 28 * tlb_start_vma(tlb, vma); 29 * for each page-table-entry PTE that needs to be removed do { 30 * tlb_remove_tlb_entry(tlb, pte, address); 31 * if (pte refers to a normal page) { 32 * tlb_remove_page(tlb, page); 33 * } 34 * } 35 * tlb_end_vma(tlb, vma); 36 * } 37 * } 38 * tlb_finish_mmu(tlb, start, end); // finish unmap for address space MM 39 */ 40 #include <linux/mm.h> 41 #include <linux/pagemap.h> 42 #include <linux/swap.h> 43 44 #include <asm/pgalloc.h> 45 #include <asm/processor.h> 46 #include <asm/tlbflush.h> 47 #include <asm/machvec.h> 48 49 /* 50 * If we can't allocate a page to make a big batch of page pointers 51 * to work on, then just handle a few from the on-stack structure. 52 */ 53 #define IA64_GATHER_BUNDLE 8 54 55 struct mmu_gather { 56 struct mm_struct *mm; 57 unsigned int nr; 58 unsigned int max; 59 unsigned char fullmm; /* non-zero means full mm flush */ 60 unsigned char need_flush; /* really unmapped some PTEs? */ 61 unsigned long start, end; 62 unsigned long start_addr; 63 unsigned long end_addr; 64 struct page **pages; 65 struct page *local[IA64_GATHER_BUNDLE]; 66 }; 67 68 struct ia64_tr_entry { 69 u64 ifa; 70 u64 itir; 71 u64 pte; 72 u64 rr; 73 }; /*Record for tr entry!*/ 74 75 extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size); 76 extern void ia64_ptr_entry(u64 target_mask, int slot); 77 78 extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS]; 79 80 /* 81 region register macros 82 */ 83 #define RR_TO_VE(val) (((val) >> 0) & 0x0000000000000001) 84 #define RR_VE(val) (((val) & 0x0000000000000001) << 0) 85 #define RR_VE_MASK 0x0000000000000001L 86 #define RR_VE_SHIFT 0 87 #define RR_TO_PS(val) (((val) >> 2) & 0x000000000000003f) 88 #define RR_PS(val) (((val) & 0x000000000000003f) << 2) 89 #define RR_PS_MASK 0x00000000000000fcL 90 #define RR_PS_SHIFT 2 91 #define RR_RID_MASK 0x00000000ffffff00L 92 #define RR_TO_RID(val) ((val >> 8) & 0xffffff) 93 94 /* 95 * Flush the TLB for address range START to END and, if not in fast mode, release the 96 * freed pages that where gathered up to this point. 97 */ 98 static inline void 99 ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end) 100 { 101 unsigned long i; 102 unsigned int nr; 103 104 if (!tlb->need_flush) 105 return; 106 tlb->need_flush = 0; 107 108 if (tlb->fullmm) { 109 /* 110 * Tearing down the entire address space. This happens both as a result 111 * of exit() and execve(). The latter case necessitates the call to 112 * flush_tlb_mm() here. 113 */ 114 flush_tlb_mm(tlb->mm); 115 } else if (unlikely (end - start >= 1024*1024*1024*1024UL 116 || REGION_NUMBER(start) != REGION_NUMBER(end - 1))) 117 { 118 /* 119 * If we flush more than a tera-byte or across regions, we're probably 120 * better off just flushing the entire TLB(s). This should be very rare 121 * and is not worth optimizing for. 122 */ 123 flush_tlb_all(); 124 } else { 125 /* 126 * XXX fix me: flush_tlb_range() should take an mm pointer instead of a 127 * vma pointer. 128 */ 129 struct vm_area_struct vma; 130 131 vma.vm_mm = tlb->mm; 132 /* flush the address range from the tlb: */ 133 flush_tlb_range(&vma, start, end); 134 /* now flush the virt. page-table area mapping the address range: */ 135 flush_tlb_range(&vma, ia64_thash(start), ia64_thash(end)); 136 } 137 138 /* lastly, release the freed pages */ 139 nr = tlb->nr; 140 141 tlb->nr = 0; 142 tlb->start_addr = ~0UL; 143 for (i = 0; i < nr; ++i) 144 free_page_and_swap_cache(tlb->pages[i]); 145 } 146 147 static inline void __tlb_alloc_page(struct mmu_gather *tlb) 148 { 149 unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); 150 151 if (addr) { 152 tlb->pages = (void *)addr; 153 tlb->max = PAGE_SIZE / sizeof(void *); 154 } 155 } 156 157 158 static inline void 159 tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) 160 { 161 tlb->mm = mm; 162 tlb->max = ARRAY_SIZE(tlb->local); 163 tlb->pages = tlb->local; 164 tlb->nr = 0; 165 tlb->fullmm = !(start | (end+1)); 166 tlb->start = start; 167 tlb->end = end; 168 tlb->start_addr = ~0UL; 169 } 170 171 /* 172 * Called at the end of the shootdown operation to free up any resources that were 173 * collected. 174 */ 175 static inline void 176 tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) 177 { 178 /* 179 * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and 180 * tlb->end_addr. 181 */ 182 ia64_tlb_flush_mmu(tlb, start, end); 183 184 /* keep the page table cache within bounds */ 185 check_pgt_cache(); 186 187 if (tlb->pages != tlb->local) 188 free_pages((unsigned long)tlb->pages, 0); 189 } 190 191 /* 192 * Logically, this routine frees PAGE. On MP machines, the actual freeing of the page 193 * must be delayed until after the TLB has been flushed (see comments at the beginning of 194 * this file). 195 */ 196 static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) 197 { 198 tlb->need_flush = 1; 199 200 if (!tlb->nr && tlb->pages == tlb->local) 201 __tlb_alloc_page(tlb); 202 203 tlb->pages[tlb->nr++] = page; 204 VM_BUG_ON(tlb->nr > tlb->max); 205 206 return tlb->max - tlb->nr; 207 } 208 209 static inline void tlb_flush_mmu(struct mmu_gather *tlb) 210 { 211 ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr); 212 } 213 214 static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) 215 { 216 if (!__tlb_remove_page(tlb, page)) 217 tlb_flush_mmu(tlb); 218 } 219 220 /* 221 * Remove TLB entry for PTE mapped at virtual address ADDRESS. This is called for any 222 * PTE, not just those pointing to (normal) physical memory. 223 */ 224 static inline void 225 __tlb_remove_tlb_entry (struct mmu_gather *tlb, pte_t *ptep, unsigned long address) 226 { 227 if (tlb->start_addr == ~0UL) 228 tlb->start_addr = address; 229 tlb->end_addr = address + PAGE_SIZE; 230 } 231 232 #define tlb_migrate_finish(mm) platform_tlb_migrate_finish(mm) 233 234 #define tlb_start_vma(tlb, vma) do { } while (0) 235 #define tlb_end_vma(tlb, vma) do { } while (0) 236 237 #define tlb_remove_tlb_entry(tlb, ptep, addr) \ 238 do { \ 239 tlb->need_flush = 1; \ 240 __tlb_remove_tlb_entry(tlb, ptep, addr); \ 241 } while (0) 242 243 #define pte_free_tlb(tlb, ptep, address) \ 244 do { \ 245 tlb->need_flush = 1; \ 246 __pte_free_tlb(tlb, ptep, address); \ 247 } while (0) 248 249 #define pmd_free_tlb(tlb, ptep, address) \ 250 do { \ 251 tlb->need_flush = 1; \ 252 __pmd_free_tlb(tlb, ptep, address); \ 253 } while (0) 254 255 #define pud_free_tlb(tlb, pudp, address) \ 256 do { \ 257 tlb->need_flush = 1; \ 258 __pud_free_tlb(tlb, pudp, address); \ 259 } while (0) 260 261 #endif /* _ASM_IA64_TLB_H */ 262