1 /* 2 * TLB support routines. 3 * 4 * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co 5 * David Mosberger-Tang <davidm@hpl.hp.com> 6 * 7 * 08/02/00 A. Mallick <asit.k.mallick@intel.com> 8 * Modified RID allocation for SMP 9 * Goutham Rao <goutham.rao@intel.com> 10 * IPI based ptc implementation and A-step IPI implementation. 11 * Rohit Seth <rohit.seth@intel.com> 12 * Ken Chen <kenneth.w.chen@intel.com> 13 */ 14 #include <linux/config.h> 15 #include <linux/module.h> 16 #include <linux/init.h> 17 #include <linux/kernel.h> 18 #include <linux/sched.h> 19 #include <linux/smp.h> 20 #include <linux/mm.h> 21 #include <linux/bootmem.h> 22 23 #include <asm/delay.h> 24 #include <asm/mmu_context.h> 25 #include <asm/pgalloc.h> 26 #include <asm/pal.h> 27 #include <asm/tlbflush.h> 28 #include <asm/dma.h> 29 30 static struct { 31 unsigned long mask; /* mask of supported purge page-sizes */ 32 unsigned long max_bits; /* log2 of largest supported purge page-size */ 33 } purge; 34 35 struct ia64_ctx ia64_ctx = { 36 .lock = SPIN_LOCK_UNLOCKED, 37 .next = 1, 38 .max_ctx = ~0U 39 }; 40 41 DEFINE_PER_CPU(u8, ia64_need_tlb_flush); 42 43 /* 44 * Initializes the ia64_ctx.bitmap array based on max_ctx+1. 45 * Called after cpu_init() has setup ia64_ctx.max_ctx based on 46 * maximum RID that is supported by boot CPU. 47 */ 48 void __init 49 mmu_context_init (void) 50 { 51 ia64_ctx.bitmap = alloc_bootmem((ia64_ctx.max_ctx+1)>>3); 52 ia64_ctx.flushmap = alloc_bootmem((ia64_ctx.max_ctx+1)>>3); 53 } 54 55 /* 56 * Acquire the ia64_ctx.lock before calling this function! 57 */ 58 void 59 wrap_mmu_context (struct mm_struct *mm) 60 { 61 int i, cpu; 62 unsigned long flush_bit; 63 64 for (i=0; i <= ia64_ctx.max_ctx / BITS_PER_LONG; i++) { 65 flush_bit = xchg(&ia64_ctx.flushmap[i], 0); 66 ia64_ctx.bitmap[i] ^= flush_bit; 67 } 68 69 /* use offset at 300 to skip daemons */ 70 ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap, 71 ia64_ctx.max_ctx, 300); 72 ia64_ctx.limit = find_next_bit(ia64_ctx.bitmap, 73 ia64_ctx.max_ctx, ia64_ctx.next); 74 75 /* 76 * can't call flush_tlb_all() here because of race condition 77 * with O(1) scheduler [EF] 78 */ 79 cpu = get_cpu(); /* prevent preemption/migration */ 80 for_each_online_cpu(i) 81 if (i != cpu) 82 per_cpu(ia64_need_tlb_flush, i) = 1; 83 put_cpu(); 84 local_flush_tlb_all(); 85 } 86 87 void 88 ia64_global_tlb_purge (struct mm_struct *mm, unsigned long start, 89 unsigned long end, unsigned long nbits) 90 { 91 static DEFINE_SPINLOCK(ptcg_lock); 92 93 if (mm != current->active_mm) { 94 flush_tlb_all(); 95 return; 96 } 97 98 /* HW requires global serialization of ptc.ga. */ 99 spin_lock(&ptcg_lock); 100 { 101 do { 102 /* 103 * Flush ALAT entries also. 104 */ 105 ia64_ptcga(start, (nbits<<2)); 106 ia64_srlz_i(); 107 start += (1UL << nbits); 108 } while (start < end); 109 } 110 spin_unlock(&ptcg_lock); 111 } 112 113 void 114 local_flush_tlb_all (void) 115 { 116 unsigned long i, j, flags, count0, count1, stride0, stride1, addr; 117 118 addr = local_cpu_data->ptce_base; 119 count0 = local_cpu_data->ptce_count[0]; 120 count1 = local_cpu_data->ptce_count[1]; 121 stride0 = local_cpu_data->ptce_stride[0]; 122 stride1 = local_cpu_data->ptce_stride[1]; 123 124 local_irq_save(flags); 125 for (i = 0; i < count0; ++i) { 126 for (j = 0; j < count1; ++j) { 127 ia64_ptce(addr); 128 addr += stride1; 129 } 130 addr += stride0; 131 } 132 local_irq_restore(flags); 133 ia64_srlz_i(); /* srlz.i implies srlz.d */ 134 } 135 136 void 137 flush_tlb_range (struct vm_area_struct *vma, unsigned long start, 138 unsigned long end) 139 { 140 struct mm_struct *mm = vma->vm_mm; 141 unsigned long size = end - start; 142 unsigned long nbits; 143 144 #ifndef CONFIG_SMP 145 if (mm != current->active_mm) { 146 mm->context = 0; 147 return; 148 } 149 #endif 150 151 nbits = ia64_fls(size + 0xfff); 152 while (unlikely (((1UL << nbits) & purge.mask) == 0) && 153 (nbits < purge.max_bits)) 154 ++nbits; 155 if (nbits > purge.max_bits) 156 nbits = purge.max_bits; 157 start &= ~((1UL << nbits) - 1); 158 159 # ifdef CONFIG_SMP 160 platform_global_tlb_purge(mm, start, end, nbits); 161 # else 162 preempt_disable(); 163 do { 164 ia64_ptcl(start, (nbits<<2)); 165 start += (1UL << nbits); 166 } while (start < end); 167 preempt_enable(); 168 # endif 169 170 ia64_srlz_i(); /* srlz.i implies srlz.d */ 171 } 172 EXPORT_SYMBOL(flush_tlb_range); 173 174 void __devinit 175 ia64_tlb_init (void) 176 { 177 ia64_ptce_info_t ptce_info; 178 unsigned long tr_pgbits; 179 long status; 180 181 if ((status = ia64_pal_vm_page_size(&tr_pgbits, &purge.mask)) != 0) { 182 printk(KERN_ERR "PAL_VM_PAGE_SIZE failed with status=%ld;" 183 "defaulting to architected purge page-sizes.\n", status); 184 purge.mask = 0x115557000UL; 185 } 186 purge.max_bits = ia64_fls(purge.mask); 187 188 ia64_get_ptce(&ptce_info); 189 local_cpu_data->ptce_base = ptce_info.base; 190 local_cpu_data->ptce_count[0] = ptce_info.count[0]; 191 local_cpu_data->ptce_count[1] = ptce_info.count[1]; 192 local_cpu_data->ptce_stride[0] = ptce_info.stride[0]; 193 local_cpu_data->ptce_stride[1] = ptce_info.stride[1]; 194 195 local_flush_tlb_all(); /* nuke left overs from bootstrapping... */ 196 } 197