tlb.c (32032df6c2f6c9c6b2ada2ce42322231824f70c2) | tlb.c (6c57a332901f851bd092aba7a2b4d8ef4e643829) |
---|---|
1/* 2 * TLB support routines. 3 * 4 * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co 5 * David Mosberger-Tang <davidm@hpl.hp.com> 6 * 7 * 08/02/00 A. Mallick <asit.k.mallick@intel.com> 8 * Modified RID allocation for SMP --- 34 unchanged lines hidden (view full) --- 43 .next = 1, 44 .max_ctx = ~0U 45}; 46 47DEFINE_PER_CPU(u8, ia64_need_tlb_flush); 48DEFINE_PER_CPU(u8, ia64_tr_num); /*Number of TR slots in current processor*/ 49DEFINE_PER_CPU(u8, ia64_tr_used); /*Max Slot number used by kernel*/ 50 | 1/* 2 * TLB support routines. 3 * 4 * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co 5 * David Mosberger-Tang <davidm@hpl.hp.com> 6 * 7 * 08/02/00 A. Mallick <asit.k.mallick@intel.com> 8 * Modified RID allocation for SMP --- 34 unchanged lines hidden (view full) --- 43 .next = 1, 44 .max_ctx = ~0U 45}; 46 47DEFINE_PER_CPU(u8, ia64_need_tlb_flush); 48DEFINE_PER_CPU(u8, ia64_tr_num); /*Number of TR slots in current processor*/ 49DEFINE_PER_CPU(u8, ia64_tr_used); /*Max Slot number used by kernel*/ 50 |
51struct ia64_tr_entry __per_cpu_idtrs[NR_CPUS][2][IA64_TR_ALLOC_MAX]; | 51struct ia64_tr_entry *ia64_idtrs[NR_CPUS]; |
52 53/* 54 * Initializes the ia64_ctx.bitmap array based on max_ctx+1. 55 * Called after cpu_init() has setup ia64_ctx.max_ctx based on 56 * maximum RID that is supported by boot CPU. 57 */ 58void __init 59mmu_context_init (void) --- 364 unchanged lines hidden (view full) --- 424 */ 425int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size) 426{ 427 int i, r; 428 unsigned long psr; 429 struct ia64_tr_entry *p; 430 int cpu = smp_processor_id(); 431 | 52 53/* 54 * Initializes the ia64_ctx.bitmap array based on max_ctx+1. 55 * Called after cpu_init() has setup ia64_ctx.max_ctx based on 56 * maximum RID that is supported by boot CPU. 57 */ 58void __init 59mmu_context_init (void) --- 364 unchanged lines hidden (view full) --- 424 */ 425int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size) 426{ 427 int i, r; 428 unsigned long psr; 429 struct ia64_tr_entry *p; 430 int cpu = smp_processor_id(); 431 |
432 if (!ia64_idtrs[cpu]) { 433 ia64_idtrs[cpu] = kmalloc(2 * IA64_TR_ALLOC_MAX * 434 sizeof (struct ia64_tr_entry), GFP_KERNEL); 435 if (!ia64_idtrs[cpu]) 436 return -ENOMEM; 437 } |
|
432 r = -EINVAL; 433 /*Check overlap with existing TR entries*/ 434 if (target_mask & 0x1) { | 438 r = -EINVAL; 439 /*Check overlap with existing TR entries*/ 440 if (target_mask & 0x1) { |
435 p = &__per_cpu_idtrs[cpu][0][0]; | 441 p = ia64_idtrs[cpu]; |
436 for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu); 437 i++, p++) { 438 if (p->pte & 0x1) 439 if (is_tr_overlap(p, va, log_size)) { 440 printk(KERN_DEBUG "Overlapped Entry" 441 "Inserted for TR Reigster!!\n"); 442 goto out; 443 } 444 } 445 } 446 if (target_mask & 0x2) { | 442 for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu); 443 i++, p++) { 444 if (p->pte & 0x1) 445 if (is_tr_overlap(p, va, log_size)) { 446 printk(KERN_DEBUG "Overlapped Entry" 447 "Inserted for TR Reigster!!\n"); 448 goto out; 449 } 450 } 451 } 452 if (target_mask & 0x2) { |
447 p = &__per_cpu_idtrs[cpu][1][0]; | 453 p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX; |
448 for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu); 449 i++, p++) { 450 if (p->pte & 0x1) 451 if (is_tr_overlap(p, va, log_size)) { 452 printk(KERN_DEBUG "Overlapped Entry" 453 "Inserted for TR Reigster!!\n"); 454 goto out; 455 } 456 } 457 } 458 459 for (i = IA64_TR_ALLOC_BASE; i < per_cpu(ia64_tr_num, cpu); i++) { 460 switch (target_mask & 0x3) { 461 case 1: | 454 for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu); 455 i++, p++) { 456 if (p->pte & 0x1) 457 if (is_tr_overlap(p, va, log_size)) { 458 printk(KERN_DEBUG "Overlapped Entry" 459 "Inserted for TR Reigster!!\n"); 460 goto out; 461 } 462 } 463 } 464 465 for (i = IA64_TR_ALLOC_BASE; i < per_cpu(ia64_tr_num, cpu); i++) { 466 switch (target_mask & 0x3) { 467 case 1: |
462 if (!(__per_cpu_idtrs[cpu][0][i].pte & 0x1)) | 468 if (!((ia64_idtrs[cpu] + i)->pte & 0x1)) |
463 goto found; 464 continue; 465 case 2: | 469 goto found; 470 continue; 471 case 2: |
466 if (!(__per_cpu_idtrs[cpu][1][i].pte & 0x1)) | 472 if (!((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1)) |
467 goto found; 468 continue; 469 case 3: | 473 goto found; 474 continue; 475 case 3: |
470 if (!(__per_cpu_idtrs[cpu][0][i].pte & 0x1) && 471 !(__per_cpu_idtrs[cpu][1][i].pte & 0x1)) | 476 if (!((ia64_idtrs[cpu] + i)->pte & 0x1) && 477 !((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1)) |
472 goto found; 473 continue; 474 default: 475 r = -EINVAL; 476 goto out; 477 } 478 } 479found: 480 if (i >= per_cpu(ia64_tr_num, cpu)) 481 return -EBUSY; 482 483 /*Record tr info for mca hander use!*/ 484 if (i > per_cpu(ia64_tr_used, cpu)) 485 per_cpu(ia64_tr_used, cpu) = i; 486 487 psr = ia64_clear_ic(); 488 if (target_mask & 0x1) { 489 ia64_itr(0x1, i, va, pte, log_size); 490 ia64_srlz_i(); | 478 goto found; 479 continue; 480 default: 481 r = -EINVAL; 482 goto out; 483 } 484 } 485found: 486 if (i >= per_cpu(ia64_tr_num, cpu)) 487 return -EBUSY; 488 489 /*Record tr info for mca hander use!*/ 490 if (i > per_cpu(ia64_tr_used, cpu)) 491 per_cpu(ia64_tr_used, cpu) = i; 492 493 psr = ia64_clear_ic(); 494 if (target_mask & 0x1) { 495 ia64_itr(0x1, i, va, pte, log_size); 496 ia64_srlz_i(); |
491 p = &__per_cpu_idtrs[cpu][0][i]; | 497 p = ia64_idtrs[cpu] + i; |
492 p->ifa = va; 493 p->pte = pte; 494 p->itir = log_size << 2; 495 p->rr = ia64_get_rr(va); 496 } 497 if (target_mask & 0x2) { 498 ia64_itr(0x2, i, va, pte, log_size); 499 ia64_srlz_i(); | 498 p->ifa = va; 499 p->pte = pte; 500 p->itir = log_size << 2; 501 p->rr = ia64_get_rr(va); 502 } 503 if (target_mask & 0x2) { 504 ia64_itr(0x2, i, va, pte, log_size); 505 ia64_srlz_i(); |
500 p = &__per_cpu_idtrs[cpu][1][i]; | 506 p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i; |
501 p->ifa = va; 502 p->pte = pte; 503 p->itir = log_size << 2; 504 p->rr = ia64_get_rr(va); 505 } 506 ia64_set_psr(psr); 507 r = i; 508out: --- 14 unchanged lines hidden (view full) --- 523 int cpu = smp_processor_id(); 524 int i; 525 struct ia64_tr_entry *p; 526 527 if (slot < IA64_TR_ALLOC_BASE || slot >= per_cpu(ia64_tr_num, cpu)) 528 return; 529 530 if (target_mask & 0x1) { | 507 p->ifa = va; 508 p->pte = pte; 509 p->itir = log_size << 2; 510 p->rr = ia64_get_rr(va); 511 } 512 ia64_set_psr(psr); 513 r = i; 514out: --- 14 unchanged lines hidden (view full) --- 529 int cpu = smp_processor_id(); 530 int i; 531 struct ia64_tr_entry *p; 532 533 if (slot < IA64_TR_ALLOC_BASE || slot >= per_cpu(ia64_tr_num, cpu)) 534 return; 535 536 if (target_mask & 0x1) { |
531 p = &__per_cpu_idtrs[cpu][0][slot]; | 537 p = ia64_idtrs[cpu] + slot; |
532 if ((p->pte&0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) { 533 p->pte = 0; 534 ia64_ptr(0x1, p->ifa, p->itir>>2); 535 ia64_srlz_i(); 536 } 537 } 538 539 if (target_mask & 0x2) { | 538 if ((p->pte&0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) { 539 p->pte = 0; 540 ia64_ptr(0x1, p->ifa, p->itir>>2); 541 ia64_srlz_i(); 542 } 543 } 544 545 if (target_mask & 0x2) { |
540 p = &__per_cpu_idtrs[cpu][1][slot]; | 546 p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + slot; |
541 if ((p->pte & 0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) { 542 p->pte = 0; 543 ia64_ptr(0x2, p->ifa, p->itir>>2); 544 ia64_srlz_i(); 545 } 546 } 547 548 for (i = per_cpu(ia64_tr_used, cpu); i >= IA64_TR_ALLOC_BASE; i--) { | 547 if ((p->pte & 0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) { 548 p->pte = 0; 549 ia64_ptr(0x2, p->ifa, p->itir>>2); 550 ia64_srlz_i(); 551 } 552 } 553 554 for (i = per_cpu(ia64_tr_used, cpu); i >= IA64_TR_ALLOC_BASE; i--) { |
549 if ((__per_cpu_idtrs[cpu][0][i].pte & 0x1) || 550 (__per_cpu_idtrs[cpu][1][i].pte & 0x1)) | 555 if (((ia64_idtrs[cpu] + i)->pte & 0x1) || 556 ((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1)) |
551 break; 552 } 553 per_cpu(ia64_tr_used, cpu) = i; 554} 555EXPORT_SYMBOL_GPL(ia64_ptr_entry); | 557 break; 558 } 559 per_cpu(ia64_tr_used, cpu) = i; 560} 561EXPORT_SYMBOL_GPL(ia64_ptr_entry); |