1 /* 2 * linux/arch/alpha/kernel/smp.c 3 * 4 * 2001-07-09 Phil Ezolt (Phillip.Ezolt@compaq.com) 5 * Renamed modified smp_call_function to smp_call_function_on_cpu() 6 * Created an function that conforms to the old calling convention 7 * of smp_call_function(). 8 * 9 * This is helpful for DCPI. 10 * 11 */ 12 13 #include <linux/errno.h> 14 #include <linux/kernel.h> 15 #include <linux/kernel_stat.h> 16 #include <linux/module.h> 17 #include <linux/sched.h> 18 #include <linux/mm.h> 19 #include <linux/threads.h> 20 #include <linux/smp.h> 21 #include <linux/smp_lock.h> 22 #include <linux/interrupt.h> 23 #include <linux/init.h> 24 #include <linux/delay.h> 25 #include <linux/spinlock.h> 26 #include <linux/irq.h> 27 #include <linux/cache.h> 28 #include <linux/profile.h> 29 #include <linux/bitops.h> 30 31 #include <asm/hwrpb.h> 32 #include <asm/ptrace.h> 33 #include <asm/atomic.h> 34 35 #include <asm/io.h> 36 #include <asm/irq.h> 37 #include <asm/pgtable.h> 38 #include <asm/pgalloc.h> 39 #include <asm/mmu_context.h> 40 #include <asm/tlbflush.h> 41 42 #include "proto.h" 43 #include "irq_impl.h" 44 45 46 #define DEBUG_SMP 0 47 #if DEBUG_SMP 48 #define DBGS(args) printk args 49 #else 50 #define DBGS(args) 51 #endif 52 53 /* A collection of per-processor data. */ 54 struct cpuinfo_alpha cpu_data[NR_CPUS]; 55 56 /* A collection of single bit ipi messages. */ 57 static struct { 58 unsigned long bits ____cacheline_aligned; 59 } ipi_data[NR_CPUS] __cacheline_aligned; 60 61 enum ipi_message_type { 62 IPI_RESCHEDULE, 63 IPI_CALL_FUNC, 64 IPI_CPU_STOP, 65 }; 66 67 /* Set to a secondary's cpuid when it comes online. */ 68 static int smp_secondary_alive __initdata = 0; 69 70 /* Which cpus ids came online. */ 71 cpumask_t cpu_present_mask; 72 cpumask_t cpu_online_map; 73 74 EXPORT_SYMBOL(cpu_online_map); 75 76 /* cpus reported in the hwrpb */ 77 static unsigned long hwrpb_cpu_present_mask __initdata = 0; 78 79 int smp_num_probed; /* Internal processor count */ 80 int smp_num_cpus = 1; /* Number that came online. */ 81 82 extern void calibrate_delay(void); 83 84 85 86 /* 87 * Called by both boot and secondaries to move global data into 88 * per-processor storage. 89 */ 90 static inline void __init 91 smp_store_cpu_info(int cpuid) 92 { 93 cpu_data[cpuid].loops_per_jiffy = loops_per_jiffy; 94 cpu_data[cpuid].last_asn = ASN_FIRST_VERSION; 95 cpu_data[cpuid].need_new_asn = 0; 96 cpu_data[cpuid].asn_lock = 0; 97 } 98 99 /* 100 * Ideally sets up per-cpu profiling hooks. Doesn't do much now... 101 */ 102 static inline void __init 103 smp_setup_percpu_timer(int cpuid) 104 { 105 cpu_data[cpuid].prof_counter = 1; 106 cpu_data[cpuid].prof_multiplier = 1; 107 } 108 109 static void __init 110 wait_boot_cpu_to_stop(int cpuid) 111 { 112 unsigned long stop = jiffies + 10*HZ; 113 114 while (time_before(jiffies, stop)) { 115 if (!smp_secondary_alive) 116 return; 117 barrier(); 118 } 119 120 printk("wait_boot_cpu_to_stop: FAILED on CPU %d, hanging now\n", cpuid); 121 for (;;) 122 barrier(); 123 } 124 125 /* 126 * Where secondaries begin a life of C. 127 */ 128 void __init 129 smp_callin(void) 130 { 131 int cpuid = hard_smp_processor_id(); 132 133 if (cpu_test_and_set(cpuid, cpu_online_map)) { 134 printk("??, cpu 0x%x already present??\n", cpuid); 135 BUG(); 136 } 137 138 /* Turn on machine checks. */ 139 wrmces(7); 140 141 /* Set trap vectors. */ 142 trap_init(); 143 144 /* Set interrupt vector. */ 145 wrent(entInt, 0); 146 147 /* Get our local ticker going. */ 148 smp_setup_percpu_timer(cpuid); 149 150 /* Call platform-specific callin, if specified */ 151 if (alpha_mv.smp_callin) alpha_mv.smp_callin(); 152 153 /* All kernel threads share the same mm context. */ 154 atomic_inc(&init_mm.mm_count); 155 current->active_mm = &init_mm; 156 157 /* Must have completely accurate bogos. */ 158 local_irq_enable(); 159 160 /* Wait boot CPU to stop with irq enabled before running 161 calibrate_delay. */ 162 wait_boot_cpu_to_stop(cpuid); 163 mb(); 164 calibrate_delay(); 165 166 smp_store_cpu_info(cpuid); 167 /* Allow master to continue only after we written loops_per_jiffy. */ 168 wmb(); 169 smp_secondary_alive = 1; 170 171 DBGS(("smp_callin: commencing CPU %d current %p active_mm %p\n", 172 cpuid, current, current->active_mm)); 173 174 /* Do nothing. */ 175 cpu_idle(); 176 } 177 178 /* Wait until hwrpb->txrdy is clear for cpu. Return -1 on timeout. */ 179 static int __init 180 wait_for_txrdy (unsigned long cpumask) 181 { 182 unsigned long timeout; 183 184 if (!(hwrpb->txrdy & cpumask)) 185 return 0; 186 187 timeout = jiffies + 10*HZ; 188 while (time_before(jiffies, timeout)) { 189 if (!(hwrpb->txrdy & cpumask)) 190 return 0; 191 udelay(10); 192 barrier(); 193 } 194 195 return -1; 196 } 197 198 /* 199 * Send a message to a secondary's console. "START" is one such 200 * interesting message. ;-) 201 */ 202 static void __init 203 send_secondary_console_msg(char *str, int cpuid) 204 { 205 struct percpu_struct *cpu; 206 register char *cp1, *cp2; 207 unsigned long cpumask; 208 size_t len; 209 210 cpu = (struct percpu_struct *) 211 ((char*)hwrpb 212 + hwrpb->processor_offset 213 + cpuid * hwrpb->processor_size); 214 215 cpumask = (1UL << cpuid); 216 if (wait_for_txrdy(cpumask)) 217 goto timeout; 218 219 cp2 = str; 220 len = strlen(cp2); 221 *(unsigned int *)&cpu->ipc_buffer[0] = len; 222 cp1 = (char *) &cpu->ipc_buffer[1]; 223 memcpy(cp1, cp2, len); 224 225 /* atomic test and set */ 226 wmb(); 227 set_bit(cpuid, &hwrpb->rxrdy); 228 229 if (wait_for_txrdy(cpumask)) 230 goto timeout; 231 return; 232 233 timeout: 234 printk("Processor %x not ready\n", cpuid); 235 } 236 237 /* 238 * A secondary console wants to send a message. Receive it. 239 */ 240 static void 241 recv_secondary_console_msg(void) 242 { 243 int mycpu, i, cnt; 244 unsigned long txrdy = hwrpb->txrdy; 245 char *cp1, *cp2, buf[80]; 246 struct percpu_struct *cpu; 247 248 DBGS(("recv_secondary_console_msg: TXRDY 0x%lx.\n", txrdy)); 249 250 mycpu = hard_smp_processor_id(); 251 252 for (i = 0; i < NR_CPUS; i++) { 253 if (!(txrdy & (1UL << i))) 254 continue; 255 256 DBGS(("recv_secondary_console_msg: " 257 "TXRDY contains CPU %d.\n", i)); 258 259 cpu = (struct percpu_struct *) 260 ((char*)hwrpb 261 + hwrpb->processor_offset 262 + i * hwrpb->processor_size); 263 264 DBGS(("recv_secondary_console_msg: on %d from %d" 265 " HALT_REASON 0x%lx FLAGS 0x%lx\n", 266 mycpu, i, cpu->halt_reason, cpu->flags)); 267 268 cnt = cpu->ipc_buffer[0] >> 32; 269 if (cnt <= 0 || cnt >= 80) 270 strcpy(buf, "<<< BOGUS MSG >>>"); 271 else { 272 cp1 = (char *) &cpu->ipc_buffer[11]; 273 cp2 = buf; 274 strcpy(cp2, cp1); 275 276 while ((cp2 = strchr(cp2, '\r')) != 0) { 277 *cp2 = ' '; 278 if (cp2[1] == '\n') 279 cp2[1] = ' '; 280 } 281 } 282 283 DBGS((KERN_INFO "recv_secondary_console_msg: on %d " 284 "message is '%s'\n", mycpu, buf)); 285 } 286 287 hwrpb->txrdy = 0; 288 } 289 290 /* 291 * Convince the console to have a secondary cpu begin execution. 292 */ 293 static int __init 294 secondary_cpu_start(int cpuid, struct task_struct *idle) 295 { 296 struct percpu_struct *cpu; 297 struct pcb_struct *hwpcb, *ipcb; 298 unsigned long timeout; 299 300 cpu = (struct percpu_struct *) 301 ((char*)hwrpb 302 + hwrpb->processor_offset 303 + cpuid * hwrpb->processor_size); 304 hwpcb = (struct pcb_struct *) cpu->hwpcb; 305 ipcb = &idle->thread_info->pcb; 306 307 /* Initialize the CPU's HWPCB to something just good enough for 308 us to get started. Immediately after starting, we'll swpctx 309 to the target idle task's pcb. Reuse the stack in the mean 310 time. Precalculate the target PCBB. */ 311 hwpcb->ksp = (unsigned long)ipcb + sizeof(union thread_union) - 16; 312 hwpcb->usp = 0; 313 hwpcb->ptbr = ipcb->ptbr; 314 hwpcb->pcc = 0; 315 hwpcb->asn = 0; 316 hwpcb->unique = virt_to_phys(ipcb); 317 hwpcb->flags = ipcb->flags; 318 hwpcb->res1 = hwpcb->res2 = 0; 319 320 #if 0 321 DBGS(("KSP 0x%lx PTBR 0x%lx VPTBR 0x%lx UNIQUE 0x%lx\n", 322 hwpcb->ksp, hwpcb->ptbr, hwrpb->vptb, hwpcb->unique)); 323 #endif 324 DBGS(("Starting secondary cpu %d: state 0x%lx pal_flags 0x%lx\n", 325 cpuid, idle->state, ipcb->flags)); 326 327 /* Setup HWRPB fields that SRM uses to activate secondary CPU */ 328 hwrpb->CPU_restart = __smp_callin; 329 hwrpb->CPU_restart_data = (unsigned long) __smp_callin; 330 331 /* Recalculate and update the HWRPB checksum */ 332 hwrpb_update_checksum(hwrpb); 333 334 /* 335 * Send a "start" command to the specified processor. 336 */ 337 338 /* SRM III 3.4.1.3 */ 339 cpu->flags |= 0x22; /* turn on Context Valid and Restart Capable */ 340 cpu->flags &= ~1; /* turn off Bootstrap In Progress */ 341 wmb(); 342 343 send_secondary_console_msg("START\r\n", cpuid); 344 345 /* Wait 10 seconds for an ACK from the console. */ 346 timeout = jiffies + 10*HZ; 347 while (time_before(jiffies, timeout)) { 348 if (cpu->flags & 1) 349 goto started; 350 udelay(10); 351 barrier(); 352 } 353 printk(KERN_ERR "SMP: Processor %d failed to start.\n", cpuid); 354 return -1; 355 356 started: 357 DBGS(("secondary_cpu_start: SUCCESS for CPU %d!!!\n", cpuid)); 358 return 0; 359 } 360 361 /* 362 * Bring one cpu online. 363 */ 364 static int __init 365 smp_boot_one_cpu(int cpuid) 366 { 367 struct task_struct *idle; 368 unsigned long timeout; 369 370 /* Cook up an idler for this guy. Note that the address we 371 give to kernel_thread is irrelevant -- it's going to start 372 where HWRPB.CPU_restart says to start. But this gets all 373 the other task-y sort of data structures set up like we 374 wish. We can't use kernel_thread since we must avoid 375 rescheduling the child. */ 376 idle = fork_idle(cpuid); 377 if (IS_ERR(idle)) 378 panic("failed fork for CPU %d", cpuid); 379 380 DBGS(("smp_boot_one_cpu: CPU %d state 0x%lx flags 0x%lx\n", 381 cpuid, idle->state, idle->flags)); 382 383 /* Signal the secondary to wait a moment. */ 384 smp_secondary_alive = -1; 385 386 /* Whirrr, whirrr, whirrrrrrrrr... */ 387 if (secondary_cpu_start(cpuid, idle)) 388 return -1; 389 390 /* Notify the secondary CPU it can run calibrate_delay. */ 391 mb(); 392 smp_secondary_alive = 0; 393 394 /* We've been acked by the console; wait one second for 395 the task to start up for real. */ 396 timeout = jiffies + 1*HZ; 397 while (time_before(jiffies, timeout)) { 398 if (smp_secondary_alive == 1) 399 goto alive; 400 udelay(10); 401 barrier(); 402 } 403 404 /* We failed to boot the CPU. */ 405 406 printk(KERN_ERR "SMP: Processor %d is stuck.\n", cpuid); 407 return -1; 408 409 alive: 410 /* Another "Red Snapper". */ 411 return 0; 412 } 413 414 /* 415 * Called from setup_arch. Detect an SMP system and which processors 416 * are present. 417 */ 418 void __init 419 setup_smp(void) 420 { 421 struct percpu_struct *cpubase, *cpu; 422 unsigned long i; 423 424 if (boot_cpuid != 0) { 425 printk(KERN_WARNING "SMP: Booting off cpu %d instead of 0?\n", 426 boot_cpuid); 427 } 428 429 if (hwrpb->nr_processors > 1) { 430 int boot_cpu_palrev; 431 432 DBGS(("setup_smp: nr_processors %ld\n", 433 hwrpb->nr_processors)); 434 435 cpubase = (struct percpu_struct *) 436 ((char*)hwrpb + hwrpb->processor_offset); 437 boot_cpu_palrev = cpubase->pal_revision; 438 439 for (i = 0; i < hwrpb->nr_processors; i++) { 440 cpu = (struct percpu_struct *) 441 ((char *)cpubase + i*hwrpb->processor_size); 442 if ((cpu->flags & 0x1cc) == 0x1cc) { 443 smp_num_probed++; 444 /* Assume here that "whami" == index */ 445 hwrpb_cpu_present_mask |= (1UL << i); 446 cpu->pal_revision = boot_cpu_palrev; 447 } 448 449 DBGS(("setup_smp: CPU %d: flags 0x%lx type 0x%lx\n", 450 i, cpu->flags, cpu->type)); 451 DBGS(("setup_smp: CPU %d: PAL rev 0x%lx\n", 452 i, cpu->pal_revision)); 453 } 454 } else { 455 smp_num_probed = 1; 456 hwrpb_cpu_present_mask = (1UL << boot_cpuid); 457 } 458 cpu_present_mask = cpumask_of_cpu(boot_cpuid); 459 460 printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_mask = %lx\n", 461 smp_num_probed, hwrpb_cpu_present_mask); 462 } 463 464 /* 465 * Called by smp_init prepare the secondaries 466 */ 467 void __init 468 smp_prepare_cpus(unsigned int max_cpus) 469 { 470 int cpu_count, i; 471 472 /* Take care of some initial bookkeeping. */ 473 memset(ipi_data, 0, sizeof(ipi_data)); 474 475 current_thread_info()->cpu = boot_cpuid; 476 477 smp_store_cpu_info(boot_cpuid); 478 smp_setup_percpu_timer(boot_cpuid); 479 480 /* Nothing to do on a UP box, or when told not to. */ 481 if (smp_num_probed == 1 || max_cpus == 0) { 482 cpu_present_mask = cpumask_of_cpu(boot_cpuid); 483 printk(KERN_INFO "SMP mode deactivated.\n"); 484 return; 485 } 486 487 printk(KERN_INFO "SMP starting up secondaries.\n"); 488 489 cpu_count = 1; 490 for (i = 0; (i < NR_CPUS) && (cpu_count < max_cpus); i++) { 491 if (i == boot_cpuid) 492 continue; 493 494 if (((hwrpb_cpu_present_mask >> i) & 1) == 0) 495 continue; 496 497 cpu_set(i, cpu_possible_map); 498 cpu_count++; 499 } 500 501 smp_num_cpus = cpu_count; 502 } 503 504 void __devinit 505 smp_prepare_boot_cpu(void) 506 { 507 /* 508 * Mark the boot cpu (current cpu) as both present and online 509 */ 510 cpu_set(smp_processor_id(), cpu_present_mask); 511 cpu_set(smp_processor_id(), cpu_online_map); 512 } 513 514 int __devinit 515 __cpu_up(unsigned int cpu) 516 { 517 smp_boot_one_cpu(cpu); 518 519 return cpu_online(cpu) ? 0 : -ENOSYS; 520 } 521 522 void __init 523 smp_cpus_done(unsigned int max_cpus) 524 { 525 int cpu; 526 unsigned long bogosum = 0; 527 528 for(cpu = 0; cpu < NR_CPUS; cpu++) 529 if (cpu_online(cpu)) 530 bogosum += cpu_data[cpu].loops_per_jiffy; 531 532 printk(KERN_INFO "SMP: Total of %d processors activated " 533 "(%lu.%02lu BogoMIPS).\n", 534 num_online_cpus(), 535 (bogosum + 2500) / (500000/HZ), 536 ((bogosum + 2500) / (5000/HZ)) % 100); 537 } 538 539 540 void 541 smp_percpu_timer_interrupt(struct pt_regs *regs) 542 { 543 int cpu = smp_processor_id(); 544 unsigned long user = user_mode(regs); 545 struct cpuinfo_alpha *data = &cpu_data[cpu]; 546 547 /* Record kernel PC. */ 548 profile_tick(CPU_PROFILING, regs); 549 550 if (!--data->prof_counter) { 551 /* We need to make like a normal interrupt -- otherwise 552 timer interrupts ignore the global interrupt lock, 553 which would be a Bad Thing. */ 554 irq_enter(); 555 556 update_process_times(user); 557 558 data->prof_counter = data->prof_multiplier; 559 560 irq_exit(); 561 } 562 } 563 564 int __init 565 setup_profiling_timer(unsigned int multiplier) 566 { 567 return -EINVAL; 568 } 569 570 571 static void 572 send_ipi_message(cpumask_t to_whom, enum ipi_message_type operation) 573 { 574 int i; 575 576 mb(); 577 for_each_cpu_mask(i, to_whom) 578 set_bit(operation, &ipi_data[i].bits); 579 580 mb(); 581 for_each_cpu_mask(i, to_whom) 582 wripir(i); 583 } 584 585 /* Structure and data for smp_call_function. This is designed to 586 minimize static memory requirements. Plus it looks cleaner. */ 587 588 struct smp_call_struct { 589 void (*func) (void *info); 590 void *info; 591 long wait; 592 atomic_t unstarted_count; 593 atomic_t unfinished_count; 594 }; 595 596 static struct smp_call_struct *smp_call_function_data; 597 598 /* Atomicly drop data into a shared pointer. The pointer is free if 599 it is initially locked. If retry, spin until free. */ 600 601 static int 602 pointer_lock (void *lock, void *data, int retry) 603 { 604 void *old, *tmp; 605 606 mb(); 607 again: 608 /* Compare and swap with zero. */ 609 asm volatile ( 610 "1: ldq_l %0,%1\n" 611 " mov %3,%2\n" 612 " bne %0,2f\n" 613 " stq_c %2,%1\n" 614 " beq %2,1b\n" 615 "2:" 616 : "=&r"(old), "=m"(*(void **)lock), "=&r"(tmp) 617 : "r"(data) 618 : "memory"); 619 620 if (old == 0) 621 return 0; 622 if (! retry) 623 return -EBUSY; 624 625 while (*(void **)lock) 626 barrier(); 627 goto again; 628 } 629 630 void 631 handle_ipi(struct pt_regs *regs) 632 { 633 int this_cpu = smp_processor_id(); 634 unsigned long *pending_ipis = &ipi_data[this_cpu].bits; 635 unsigned long ops; 636 637 #if 0 638 DBGS(("handle_ipi: on CPU %d ops 0x%lx PC 0x%lx\n", 639 this_cpu, *pending_ipis, regs->pc)); 640 #endif 641 642 mb(); /* Order interrupt and bit testing. */ 643 while ((ops = xchg(pending_ipis, 0)) != 0) { 644 mb(); /* Order bit clearing and data access. */ 645 do { 646 unsigned long which; 647 648 which = ops & -ops; 649 ops &= ~which; 650 which = __ffs(which); 651 652 switch (which) { 653 case IPI_RESCHEDULE: 654 /* Reschedule callback. Everything to be done 655 is done by the interrupt return path. */ 656 break; 657 658 case IPI_CALL_FUNC: 659 { 660 struct smp_call_struct *data; 661 void (*func)(void *info); 662 void *info; 663 int wait; 664 665 data = smp_call_function_data; 666 func = data->func; 667 info = data->info; 668 wait = data->wait; 669 670 /* Notify the sending CPU that the data has been 671 received, and execution is about to begin. */ 672 mb(); 673 atomic_dec (&data->unstarted_count); 674 675 /* At this point the structure may be gone unless 676 wait is true. */ 677 (*func)(info); 678 679 /* Notify the sending CPU that the task is done. */ 680 mb(); 681 if (wait) atomic_dec (&data->unfinished_count); 682 break; 683 } 684 685 case IPI_CPU_STOP: 686 halt(); 687 688 default: 689 printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n", 690 this_cpu, which); 691 break; 692 } 693 } while (ops); 694 695 mb(); /* Order data access and bit testing. */ 696 } 697 698 cpu_data[this_cpu].ipi_count++; 699 700 if (hwrpb->txrdy) 701 recv_secondary_console_msg(); 702 } 703 704 void 705 smp_send_reschedule(int cpu) 706 { 707 #ifdef DEBUG_IPI_MSG 708 if (cpu == hard_smp_processor_id()) 709 printk(KERN_WARNING 710 "smp_send_reschedule: Sending IPI to self.\n"); 711 #endif 712 send_ipi_message(cpumask_of_cpu(cpu), IPI_RESCHEDULE); 713 } 714 715 void 716 smp_send_stop(void) 717 { 718 cpumask_t to_whom = cpu_possible_map; 719 cpu_clear(smp_processor_id(), to_whom); 720 #ifdef DEBUG_IPI_MSG 721 if (hard_smp_processor_id() != boot_cpu_id) 722 printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n"); 723 #endif 724 send_ipi_message(to_whom, IPI_CPU_STOP); 725 } 726 727 /* 728 * Run a function on all other CPUs. 729 * <func> The function to run. This must be fast and non-blocking. 730 * <info> An arbitrary pointer to pass to the function. 731 * <retry> If true, keep retrying until ready. 732 * <wait> If true, wait until function has completed on other CPUs. 733 * [RETURNS] 0 on success, else a negative status code. 734 * 735 * Does not return until remote CPUs are nearly ready to execute <func> 736 * or are or have executed. 737 * You must not call this function with disabled interrupts or from a 738 * hardware interrupt handler or from a bottom half handler. 739 */ 740 741 int 742 smp_call_function_on_cpu (void (*func) (void *info), void *info, int retry, 743 int wait, cpumask_t to_whom) 744 { 745 struct smp_call_struct data; 746 unsigned long timeout; 747 int num_cpus_to_call; 748 749 /* Can deadlock when called with interrupts disabled */ 750 WARN_ON(irqs_disabled()); 751 752 data.func = func; 753 data.info = info; 754 data.wait = wait; 755 756 cpu_clear(smp_processor_id(), to_whom); 757 num_cpus_to_call = cpus_weight(to_whom); 758 759 atomic_set(&data.unstarted_count, num_cpus_to_call); 760 atomic_set(&data.unfinished_count, num_cpus_to_call); 761 762 /* Acquire the smp_call_function_data mutex. */ 763 if (pointer_lock(&smp_call_function_data, &data, retry)) 764 return -EBUSY; 765 766 /* Send a message to the requested CPUs. */ 767 send_ipi_message(to_whom, IPI_CALL_FUNC); 768 769 /* Wait for a minimal response. */ 770 timeout = jiffies + HZ; 771 while (atomic_read (&data.unstarted_count) > 0 772 && time_before (jiffies, timeout)) 773 barrier(); 774 775 /* If there's no response yet, log a message but allow a longer 776 * timeout period -- if we get a response this time, log 777 * a message saying when we got it.. 778 */ 779 if (atomic_read(&data.unstarted_count) > 0) { 780 long start_time = jiffies; 781 printk(KERN_ERR "%s: initial timeout -- trying long wait\n", 782 __FUNCTION__); 783 timeout = jiffies + 30 * HZ; 784 while (atomic_read(&data.unstarted_count) > 0 785 && time_before(jiffies, timeout)) 786 barrier(); 787 if (atomic_read(&data.unstarted_count) <= 0) { 788 long delta = jiffies - start_time; 789 printk(KERN_ERR 790 "%s: response %ld.%ld seconds into long wait\n", 791 __FUNCTION__, delta / HZ, 792 (100 * (delta - ((delta / HZ) * HZ))) / HZ); 793 } 794 } 795 796 /* We either got one or timed out -- clear the lock. */ 797 mb(); 798 smp_call_function_data = NULL; 799 800 /* 801 * If after both the initial and long timeout periods we still don't 802 * have a response, something is very wrong... 803 */ 804 BUG_ON(atomic_read (&data.unstarted_count) > 0); 805 806 /* Wait for a complete response, if needed. */ 807 if (wait) { 808 while (atomic_read (&data.unfinished_count) > 0) 809 barrier(); 810 } 811 812 return 0; 813 } 814 815 int 816 smp_call_function (void (*func) (void *info), void *info, int retry, int wait) 817 { 818 return smp_call_function_on_cpu (func, info, retry, wait, 819 cpu_online_map); 820 } 821 822 static void 823 ipi_imb(void *ignored) 824 { 825 imb(); 826 } 827 828 void 829 smp_imb(void) 830 { 831 /* Must wait other processors to flush their icache before continue. */ 832 if (on_each_cpu(ipi_imb, NULL, 1, 1)) 833 printk(KERN_CRIT "smp_imb: timed out\n"); 834 } 835 836 static void 837 ipi_flush_tlb_all(void *ignored) 838 { 839 tbia(); 840 } 841 842 void 843 flush_tlb_all(void) 844 { 845 /* Although we don't have any data to pass, we do want to 846 synchronize with the other processors. */ 847 if (on_each_cpu(ipi_flush_tlb_all, NULL, 1, 1)) { 848 printk(KERN_CRIT "flush_tlb_all: timed out\n"); 849 } 850 } 851 852 #define asn_locked() (cpu_data[smp_processor_id()].asn_lock) 853 854 static void 855 ipi_flush_tlb_mm(void *x) 856 { 857 struct mm_struct *mm = (struct mm_struct *) x; 858 if (mm == current->active_mm && !asn_locked()) 859 flush_tlb_current(mm); 860 else 861 flush_tlb_other(mm); 862 } 863 864 void 865 flush_tlb_mm(struct mm_struct *mm) 866 { 867 preempt_disable(); 868 869 if (mm == current->active_mm) { 870 flush_tlb_current(mm); 871 if (atomic_read(&mm->mm_users) <= 1) { 872 int cpu, this_cpu = smp_processor_id(); 873 for (cpu = 0; cpu < NR_CPUS; cpu++) { 874 if (!cpu_online(cpu) || cpu == this_cpu) 875 continue; 876 if (mm->context[cpu]) 877 mm->context[cpu] = 0; 878 } 879 preempt_enable(); 880 return; 881 } 882 } 883 884 if (smp_call_function(ipi_flush_tlb_mm, mm, 1, 1)) { 885 printk(KERN_CRIT "flush_tlb_mm: timed out\n"); 886 } 887 888 preempt_enable(); 889 } 890 891 struct flush_tlb_page_struct { 892 struct vm_area_struct *vma; 893 struct mm_struct *mm; 894 unsigned long addr; 895 }; 896 897 static void 898 ipi_flush_tlb_page(void *x) 899 { 900 struct flush_tlb_page_struct *data = (struct flush_tlb_page_struct *)x; 901 struct mm_struct * mm = data->mm; 902 903 if (mm == current->active_mm && !asn_locked()) 904 flush_tlb_current_page(mm, data->vma, data->addr); 905 else 906 flush_tlb_other(mm); 907 } 908 909 void 910 flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) 911 { 912 struct flush_tlb_page_struct data; 913 struct mm_struct *mm = vma->vm_mm; 914 915 preempt_disable(); 916 917 if (mm == current->active_mm) { 918 flush_tlb_current_page(mm, vma, addr); 919 if (atomic_read(&mm->mm_users) <= 1) { 920 int cpu, this_cpu = smp_processor_id(); 921 for (cpu = 0; cpu < NR_CPUS; cpu++) { 922 if (!cpu_online(cpu) || cpu == this_cpu) 923 continue; 924 if (mm->context[cpu]) 925 mm->context[cpu] = 0; 926 } 927 preempt_enable(); 928 return; 929 } 930 } 931 932 data.vma = vma; 933 data.mm = mm; 934 data.addr = addr; 935 936 if (smp_call_function(ipi_flush_tlb_page, &data, 1, 1)) { 937 printk(KERN_CRIT "flush_tlb_page: timed out\n"); 938 } 939 940 preempt_enable(); 941 } 942 943 void 944 flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) 945 { 946 /* On the Alpha we always flush the whole user tlb. */ 947 flush_tlb_mm(vma->vm_mm); 948 } 949 950 static void 951 ipi_flush_icache_page(void *x) 952 { 953 struct mm_struct *mm = (struct mm_struct *) x; 954 if (mm == current->active_mm && !asn_locked()) 955 __load_new_mm_context(mm); 956 else 957 flush_tlb_other(mm); 958 } 959 960 void 961 flush_icache_user_range(struct vm_area_struct *vma, struct page *page, 962 unsigned long addr, int len) 963 { 964 struct mm_struct *mm = vma->vm_mm; 965 966 if ((vma->vm_flags & VM_EXEC) == 0) 967 return; 968 969 preempt_disable(); 970 971 if (mm == current->active_mm) { 972 __load_new_mm_context(mm); 973 if (atomic_read(&mm->mm_users) <= 1) { 974 int cpu, this_cpu = smp_processor_id(); 975 for (cpu = 0; cpu < NR_CPUS; cpu++) { 976 if (!cpu_online(cpu) || cpu == this_cpu) 977 continue; 978 if (mm->context[cpu]) 979 mm->context[cpu] = 0; 980 } 981 preempt_enable(); 982 return; 983 } 984 } 985 986 if (smp_call_function(ipi_flush_icache_page, mm, 1, 1)) { 987 printk(KERN_CRIT "flush_icache_page: timed out\n"); 988 } 989 990 preempt_enable(); 991 } 992