1 /* 2 * TLB support routines. 3 * 4 * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co 5 * David Mosberger-Tang <davidm@hpl.hp.com> 6 * 7 * 08/02/00 A. Mallick <asit.k.mallick@intel.com> 8 * Modified RID allocation for SMP 9 * Goutham Rao <goutham.rao@intel.com> 10 * IPI based ptc implementation and A-step IPI implementation. 11 * Rohit Seth <rohit.seth@intel.com> 12 * Ken Chen <kenneth.w.chen@intel.com> 13 * Christophe de Dinechin <ddd@hp.com>: Avoid ptc.e on memory allocation 14 * Copyright (C) 2007 Intel Corp 15 * Fenghua Yu <fenghua.yu@intel.com> 16 * Add multiple ptc.g/ptc.ga instruction support in global tlb purge. 17 */ 18 #include <linux/module.h> 19 #include <linux/init.h> 20 #include <linux/kernel.h> 21 #include <linux/sched.h> 22 #include <linux/smp.h> 23 #include <linux/mm.h> 24 #include <linux/memblock.h> 25 #include <linux/slab.h> 26 27 #include <asm/delay.h> 28 #include <asm/mmu_context.h> 29 #include <asm/pgalloc.h> 30 #include <asm/pal.h> 31 #include <asm/tlbflush.h> 32 #include <asm/dma.h> 33 #include <asm/processor.h> 34 #include <asm/sal.h> 35 #include <asm/tlb.h> 36 37 static struct { 38 u64 mask; /* mask of supported purge page-sizes */ 39 unsigned long max_bits; /* log2 of largest supported purge page-size */ 40 } purge; 41 42 struct ia64_ctx ia64_ctx = { 43 .lock = __SPIN_LOCK_UNLOCKED(ia64_ctx.lock), 44 .next = 1, 45 .max_ctx = ~0U 46 }; 47 48 DEFINE_PER_CPU(u8, ia64_need_tlb_flush); 49 DEFINE_PER_CPU(u8, ia64_tr_num); /*Number of TR slots in current processor*/ 50 DEFINE_PER_CPU(u8, ia64_tr_used); /*Max Slot number used by kernel*/ 51 52 struct ia64_tr_entry *ia64_idtrs[NR_CPUS]; 53 54 /* 55 * Initializes the ia64_ctx.bitmap array based on max_ctx+1. 56 * Called after cpu_init() has setup ia64_ctx.max_ctx based on 57 * maximum RID that is supported by boot CPU. 58 */ 59 void __init 60 mmu_context_init (void) 61 { 62 ia64_ctx.bitmap = memblock_alloc((ia64_ctx.max_ctx + 1) >> 3, 63 SMP_CACHE_BYTES); 64 if (!ia64_ctx.bitmap) 65 panic("%s: Failed to allocate %u bytes\n", __func__, 66 (ia64_ctx.max_ctx + 1) >> 3); 67 ia64_ctx.flushmap = memblock_alloc((ia64_ctx.max_ctx + 1) >> 3, 68 SMP_CACHE_BYTES); 69 if (!ia64_ctx.flushmap) 70 panic("%s: Failed to allocate %u bytes\n", __func__, 71 (ia64_ctx.max_ctx + 1) >> 3); 72 } 73 74 /* 75 * Acquire the ia64_ctx.lock before calling this function! 76 */ 77 void 78 wrap_mmu_context (struct mm_struct *mm) 79 { 80 int i, cpu; 81 unsigned long flush_bit; 82 83 for (i=0; i <= ia64_ctx.max_ctx / BITS_PER_LONG; i++) { 84 flush_bit = xchg(&ia64_ctx.flushmap[i], 0); 85 ia64_ctx.bitmap[i] ^= flush_bit; 86 } 87 88 /* use offset at 300 to skip daemons */ 89 ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap, 90 ia64_ctx.max_ctx, 300); 91 ia64_ctx.limit = find_next_bit(ia64_ctx.bitmap, 92 ia64_ctx.max_ctx, ia64_ctx.next); 93 94 /* 95 * can't call flush_tlb_all() here because of race condition 96 * with O(1) scheduler [EF] 97 */ 98 cpu = get_cpu(); /* prevent preemption/migration */ 99 for_each_online_cpu(i) 100 if (i != cpu) 101 per_cpu(ia64_need_tlb_flush, i) = 1; 102 put_cpu(); 103 local_flush_tlb_all(); 104 } 105 106 /* 107 * Implement "spinaphores" ... like counting semaphores, but they 108 * spin instead of sleeping. If there are ever any other users for 109 * this primitive it can be moved up to a spinaphore.h header. 110 */ 111 struct spinaphore { 112 unsigned long ticket; 113 unsigned long serve; 114 }; 115 116 static inline void spinaphore_init(struct spinaphore *ss, int val) 117 { 118 ss->ticket = 0; 119 ss->serve = val; 120 } 121 122 static inline void down_spin(struct spinaphore *ss) 123 { 124 unsigned long t = ia64_fetchadd(1, &ss->ticket, acq), serve; 125 126 if (time_before(t, ss->serve)) 127 return; 128 129 ia64_invala(); 130 131 for (;;) { 132 asm volatile ("ld8.c.nc %0=[%1]" : "=r"(serve) : "r"(&ss->serve) : "memory"); 133 if (time_before(t, serve)) 134 return; 135 cpu_relax(); 136 } 137 } 138 139 static inline void up_spin(struct spinaphore *ss) 140 { 141 ia64_fetchadd(1, &ss->serve, rel); 142 } 143 144 static struct spinaphore ptcg_sem; 145 static u16 nptcg = 1; 146 static int need_ptcg_sem = 1; 147 static int toolatetochangeptcgsem = 0; 148 149 /* 150 * Kernel parameter "nptcg=" overrides max number of concurrent global TLB 151 * purges which is reported from either PAL or SAL PALO. 152 * 153 * We don't have sanity checking for nptcg value. It's the user's responsibility 154 * for valid nptcg value on the platform. Otherwise, kernel may hang in some 155 * cases. 156 */ 157 static int __init 158 set_nptcg(char *str) 159 { 160 int value = 0; 161 162 get_option(&str, &value); 163 setup_ptcg_sem(value, NPTCG_FROM_KERNEL_PARAMETER); 164 165 return 1; 166 } 167 168 __setup("nptcg=", set_nptcg); 169 170 /* 171 * Maximum number of simultaneous ptc.g purges in the system can 172 * be defined by PAL_VM_SUMMARY (in which case we should take 173 * the smallest value for any cpu in the system) or by the PAL 174 * override table (in which case we should ignore the value from 175 * PAL_VM_SUMMARY). 176 * 177 * Kernel parameter "nptcg=" overrides maximum number of simultanesous ptc.g 178 * purges defined in either PAL_VM_SUMMARY or PAL override table. In this case, 179 * we should ignore the value from either PAL_VM_SUMMARY or PAL override table. 180 * 181 * Complicating the logic here is the fact that num_possible_cpus() 182 * isn't fully setup until we start bringing cpus online. 183 */ 184 void 185 setup_ptcg_sem(int max_purges, int nptcg_from) 186 { 187 static int kp_override; 188 static int palo_override; 189 static int firstcpu = 1; 190 191 if (toolatetochangeptcgsem) { 192 if (nptcg_from == NPTCG_FROM_PAL && max_purges == 0) 193 BUG_ON(1 < nptcg); 194 else 195 BUG_ON(max_purges < nptcg); 196 return; 197 } 198 199 if (nptcg_from == NPTCG_FROM_KERNEL_PARAMETER) { 200 kp_override = 1; 201 nptcg = max_purges; 202 goto resetsema; 203 } 204 if (kp_override) { 205 need_ptcg_sem = num_possible_cpus() > nptcg; 206 return; 207 } 208 209 if (nptcg_from == NPTCG_FROM_PALO) { 210 palo_override = 1; 211 212 /* In PALO max_purges == 0 really means it! */ 213 if (max_purges == 0) 214 panic("Whoa! Platform does not support global TLB purges.\n"); 215 nptcg = max_purges; 216 if (nptcg == PALO_MAX_TLB_PURGES) { 217 need_ptcg_sem = 0; 218 return; 219 } 220 goto resetsema; 221 } 222 if (palo_override) { 223 if (nptcg != PALO_MAX_TLB_PURGES) 224 need_ptcg_sem = (num_possible_cpus() > nptcg); 225 return; 226 } 227 228 /* In PAL_VM_SUMMARY max_purges == 0 actually means 1 */ 229 if (max_purges == 0) max_purges = 1; 230 231 if (firstcpu) { 232 nptcg = max_purges; 233 firstcpu = 0; 234 } 235 if (max_purges < nptcg) 236 nptcg = max_purges; 237 if (nptcg == PAL_MAX_PURGES) { 238 need_ptcg_sem = 0; 239 return; 240 } else 241 need_ptcg_sem = (num_possible_cpus() > nptcg); 242 243 resetsema: 244 spinaphore_init(&ptcg_sem, max_purges); 245 } 246 247 void 248 ia64_global_tlb_purge (struct mm_struct *mm, unsigned long start, 249 unsigned long end, unsigned long nbits) 250 { 251 struct mm_struct *active_mm = current->active_mm; 252 253 toolatetochangeptcgsem = 1; 254 255 if (mm != active_mm) { 256 /* Restore region IDs for mm */ 257 if (mm && active_mm) { 258 activate_context(mm); 259 } else { 260 flush_tlb_all(); 261 return; 262 } 263 } 264 265 if (need_ptcg_sem) 266 down_spin(&ptcg_sem); 267 268 do { 269 /* 270 * Flush ALAT entries also. 271 */ 272 ia64_ptcga(start, (nbits << 2)); 273 ia64_srlz_i(); 274 start += (1UL << nbits); 275 } while (start < end); 276 277 if (need_ptcg_sem) 278 up_spin(&ptcg_sem); 279 280 if (mm != active_mm) { 281 activate_context(active_mm); 282 } 283 } 284 285 void 286 local_flush_tlb_all (void) 287 { 288 unsigned long i, j, flags, count0, count1, stride0, stride1, addr; 289 290 addr = local_cpu_data->ptce_base; 291 count0 = local_cpu_data->ptce_count[0]; 292 count1 = local_cpu_data->ptce_count[1]; 293 stride0 = local_cpu_data->ptce_stride[0]; 294 stride1 = local_cpu_data->ptce_stride[1]; 295 296 local_irq_save(flags); 297 for (i = 0; i < count0; ++i) { 298 for (j = 0; j < count1; ++j) { 299 ia64_ptce(addr); 300 addr += stride1; 301 } 302 addr += stride0; 303 } 304 local_irq_restore(flags); 305 ia64_srlz_i(); /* srlz.i implies srlz.d */ 306 } 307 308 static void 309 __flush_tlb_range (struct vm_area_struct *vma, unsigned long start, 310 unsigned long end) 311 { 312 struct mm_struct *mm = vma->vm_mm; 313 unsigned long size = end - start; 314 unsigned long nbits; 315 316 #ifndef CONFIG_SMP 317 if (mm != current->active_mm) { 318 mm->context = 0; 319 return; 320 } 321 #endif 322 323 nbits = ia64_fls(size + 0xfff); 324 while (unlikely (((1UL << nbits) & purge.mask) == 0) && 325 (nbits < purge.max_bits)) 326 ++nbits; 327 if (nbits > purge.max_bits) 328 nbits = purge.max_bits; 329 start &= ~((1UL << nbits) - 1); 330 331 preempt_disable(); 332 #ifdef CONFIG_SMP 333 if (mm != current->active_mm || cpumask_weight(mm_cpumask(mm)) != 1) { 334 platform_global_tlb_purge(mm, start, end, nbits); 335 preempt_enable(); 336 return; 337 } 338 #endif 339 do { 340 ia64_ptcl(start, (nbits<<2)); 341 start += (1UL << nbits); 342 } while (start < end); 343 preempt_enable(); 344 ia64_srlz_i(); /* srlz.i implies srlz.d */ 345 } 346 347 void flush_tlb_range(struct vm_area_struct *vma, 348 unsigned long start, unsigned long end) 349 { 350 if (unlikely(end - start >= 1024*1024*1024*1024UL 351 || REGION_NUMBER(start) != REGION_NUMBER(end - 1))) { 352 /* 353 * If we flush more than a tera-byte or across regions, we're 354 * probably better off just flushing the entire TLB(s). This 355 * should be very rare and is not worth optimizing for. 356 */ 357 flush_tlb_all(); 358 } else { 359 /* flush the address range from the tlb */ 360 __flush_tlb_range(vma, start, end); 361 /* flush the virt. page-table area mapping the addr range */ 362 __flush_tlb_range(vma, ia64_thash(start), ia64_thash(end)); 363 } 364 } 365 EXPORT_SYMBOL(flush_tlb_range); 366 367 void ia64_tlb_init(void) 368 { 369 ia64_ptce_info_t uninitialized_var(ptce_info); /* GCC be quiet */ 370 u64 tr_pgbits; 371 long status; 372 pal_vm_info_1_u_t vm_info_1; 373 pal_vm_info_2_u_t vm_info_2; 374 int cpu = smp_processor_id(); 375 376 if ((status = ia64_pal_vm_page_size(&tr_pgbits, &purge.mask)) != 0) { 377 printk(KERN_ERR "PAL_VM_PAGE_SIZE failed with status=%ld; " 378 "defaulting to architected purge page-sizes.\n", status); 379 purge.mask = 0x115557000UL; 380 } 381 purge.max_bits = ia64_fls(purge.mask); 382 383 ia64_get_ptce(&ptce_info); 384 local_cpu_data->ptce_base = ptce_info.base; 385 local_cpu_data->ptce_count[0] = ptce_info.count[0]; 386 local_cpu_data->ptce_count[1] = ptce_info.count[1]; 387 local_cpu_data->ptce_stride[0] = ptce_info.stride[0]; 388 local_cpu_data->ptce_stride[1] = ptce_info.stride[1]; 389 390 local_flush_tlb_all(); /* nuke left overs from bootstrapping... */ 391 status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2); 392 393 if (status) { 394 printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status); 395 per_cpu(ia64_tr_num, cpu) = 8; 396 return; 397 } 398 per_cpu(ia64_tr_num, cpu) = vm_info_1.pal_vm_info_1_s.max_itr_entry+1; 399 if (per_cpu(ia64_tr_num, cpu) > 400 (vm_info_1.pal_vm_info_1_s.max_dtr_entry+1)) 401 per_cpu(ia64_tr_num, cpu) = 402 vm_info_1.pal_vm_info_1_s.max_dtr_entry+1; 403 if (per_cpu(ia64_tr_num, cpu) > IA64_TR_ALLOC_MAX) { 404 static int justonce = 1; 405 per_cpu(ia64_tr_num, cpu) = IA64_TR_ALLOC_MAX; 406 if (justonce) { 407 justonce = 0; 408 printk(KERN_DEBUG "TR register number exceeds " 409 "IA64_TR_ALLOC_MAX!\n"); 410 } 411 } 412 } 413 414 /* 415 * is_tr_overlap 416 * 417 * Check overlap with inserted TRs. 418 */ 419 static int is_tr_overlap(struct ia64_tr_entry *p, u64 va, u64 log_size) 420 { 421 u64 tr_log_size; 422 u64 tr_end; 423 u64 va_rr = ia64_get_rr(va); 424 u64 va_rid = RR_TO_RID(va_rr); 425 u64 va_end = va + (1<<log_size) - 1; 426 427 if (va_rid != RR_TO_RID(p->rr)) 428 return 0; 429 tr_log_size = (p->itir & 0xff) >> 2; 430 tr_end = p->ifa + (1<<tr_log_size) - 1; 431 432 if (va > tr_end || p->ifa > va_end) 433 return 0; 434 return 1; 435 436 } 437 438 /* 439 * ia64_insert_tr in virtual mode. Allocate a TR slot 440 * 441 * target_mask : 0x1 : itr, 0x2 : dtr, 0x3 : idtr 442 * 443 * va : virtual address. 444 * pte : pte entries inserted. 445 * log_size: range to be covered. 446 * 447 * Return value: <0 : error No. 448 * 449 * >=0 : slot number allocated for TR. 450 * Must be called with preemption disabled. 451 */ 452 int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size) 453 { 454 int i, r; 455 unsigned long psr; 456 struct ia64_tr_entry *p; 457 int cpu = smp_processor_id(); 458 459 if (!ia64_idtrs[cpu]) { 460 ia64_idtrs[cpu] = kmalloc_array(2 * IA64_TR_ALLOC_MAX, 461 sizeof(struct ia64_tr_entry), 462 GFP_KERNEL); 463 if (!ia64_idtrs[cpu]) 464 return -ENOMEM; 465 } 466 r = -EINVAL; 467 /*Check overlap with existing TR entries*/ 468 if (target_mask & 0x1) { 469 p = ia64_idtrs[cpu]; 470 for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu); 471 i++, p++) { 472 if (p->pte & 0x1) 473 if (is_tr_overlap(p, va, log_size)) { 474 printk(KERN_DEBUG "Overlapped Entry" 475 "Inserted for TR Register!!\n"); 476 goto out; 477 } 478 } 479 } 480 if (target_mask & 0x2) { 481 p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX; 482 for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu); 483 i++, p++) { 484 if (p->pte & 0x1) 485 if (is_tr_overlap(p, va, log_size)) { 486 printk(KERN_DEBUG "Overlapped Entry" 487 "Inserted for TR Register!!\n"); 488 goto out; 489 } 490 } 491 } 492 493 for (i = IA64_TR_ALLOC_BASE; i < per_cpu(ia64_tr_num, cpu); i++) { 494 switch (target_mask & 0x3) { 495 case 1: 496 if (!((ia64_idtrs[cpu] + i)->pte & 0x1)) 497 goto found; 498 continue; 499 case 2: 500 if (!((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1)) 501 goto found; 502 continue; 503 case 3: 504 if (!((ia64_idtrs[cpu] + i)->pte & 0x1) && 505 !((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1)) 506 goto found; 507 continue; 508 default: 509 r = -EINVAL; 510 goto out; 511 } 512 } 513 found: 514 if (i >= per_cpu(ia64_tr_num, cpu)) 515 return -EBUSY; 516 517 /*Record tr info for mca hander use!*/ 518 if (i > per_cpu(ia64_tr_used, cpu)) 519 per_cpu(ia64_tr_used, cpu) = i; 520 521 psr = ia64_clear_ic(); 522 if (target_mask & 0x1) { 523 ia64_itr(0x1, i, va, pte, log_size); 524 ia64_srlz_i(); 525 p = ia64_idtrs[cpu] + i; 526 p->ifa = va; 527 p->pte = pte; 528 p->itir = log_size << 2; 529 p->rr = ia64_get_rr(va); 530 } 531 if (target_mask & 0x2) { 532 ia64_itr(0x2, i, va, pte, log_size); 533 ia64_srlz_i(); 534 p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i; 535 p->ifa = va; 536 p->pte = pte; 537 p->itir = log_size << 2; 538 p->rr = ia64_get_rr(va); 539 } 540 ia64_set_psr(psr); 541 r = i; 542 out: 543 return r; 544 } 545 EXPORT_SYMBOL_GPL(ia64_itr_entry); 546 547 /* 548 * ia64_purge_tr 549 * 550 * target_mask: 0x1: purge itr, 0x2 : purge dtr, 0x3 purge idtr. 551 * slot: slot number to be freed. 552 * 553 * Must be called with preemption disabled. 554 */ 555 void ia64_ptr_entry(u64 target_mask, int slot) 556 { 557 int cpu = smp_processor_id(); 558 int i; 559 struct ia64_tr_entry *p; 560 561 if (slot < IA64_TR_ALLOC_BASE || slot >= per_cpu(ia64_tr_num, cpu)) 562 return; 563 564 if (target_mask & 0x1) { 565 p = ia64_idtrs[cpu] + slot; 566 if ((p->pte&0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) { 567 p->pte = 0; 568 ia64_ptr(0x1, p->ifa, p->itir>>2); 569 ia64_srlz_i(); 570 } 571 } 572 573 if (target_mask & 0x2) { 574 p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + slot; 575 if ((p->pte & 0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) { 576 p->pte = 0; 577 ia64_ptr(0x2, p->ifa, p->itir>>2); 578 ia64_srlz_i(); 579 } 580 } 581 582 for (i = per_cpu(ia64_tr_used, cpu); i >= IA64_TR_ALLOC_BASE; i--) { 583 if (((ia64_idtrs[cpu] + i)->pte & 0x1) || 584 ((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1)) 585 break; 586 } 587 per_cpu(ia64_tr_used, cpu) = i; 588 } 589 EXPORT_SYMBOL_GPL(ia64_ptr_entry); 590