xref: /openbmc/linux/arch/alpha/kernel/smp.c (revision 545e4006)
1 /*
2  *	linux/arch/alpha/kernel/smp.c
3  *
4  *      2001-07-09 Phil Ezolt (Phillip.Ezolt@compaq.com)
5  *            Renamed modified smp_call_function to smp_call_function_on_cpu()
6  *            Created an function that conforms to the old calling convention
7  *            of smp_call_function().
8  *
9  *            This is helpful for DCPI.
10  *
11  */
12 
13 #include <linux/errno.h>
14 #include <linux/kernel.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/module.h>
17 #include <linux/sched.h>
18 #include <linux/mm.h>
19 #include <linux/err.h>
20 #include <linux/threads.h>
21 #include <linux/smp.h>
22 #include <linux/interrupt.h>
23 #include <linux/init.h>
24 #include <linux/delay.h>
25 #include <linux/spinlock.h>
26 #include <linux/irq.h>
27 #include <linux/cache.h>
28 #include <linux/profile.h>
29 #include <linux/bitops.h>
30 
31 #include <asm/hwrpb.h>
32 #include <asm/ptrace.h>
33 #include <asm/atomic.h>
34 
35 #include <asm/io.h>
36 #include <asm/irq.h>
37 #include <asm/pgtable.h>
38 #include <asm/pgalloc.h>
39 #include <asm/mmu_context.h>
40 #include <asm/tlbflush.h>
41 
42 #include "proto.h"
43 #include "irq_impl.h"
44 
45 
46 #define DEBUG_SMP 0
47 #if DEBUG_SMP
48 #define DBGS(args)	printk args
49 #else
50 #define DBGS(args)
51 #endif
52 
53 /* A collection of per-processor data.  */
54 struct cpuinfo_alpha cpu_data[NR_CPUS];
55 EXPORT_SYMBOL(cpu_data);
56 
57 /* A collection of single bit ipi messages.  */
58 static struct {
59 	unsigned long bits ____cacheline_aligned;
60 } ipi_data[NR_CPUS] __cacheline_aligned;
61 
62 enum ipi_message_type {
63 	IPI_RESCHEDULE,
64 	IPI_CALL_FUNC,
65 	IPI_CALL_FUNC_SINGLE,
66 	IPI_CPU_STOP,
67 };
68 
69 /* Set to a secondary's cpuid when it comes online.  */
70 static int smp_secondary_alive __devinitdata = 0;
71 
72 /* Which cpus ids came online.  */
73 cpumask_t cpu_online_map;
74 
75 EXPORT_SYMBOL(cpu_online_map);
76 
77 int smp_num_probed;		/* Internal processor count */
78 int smp_num_cpus = 1;		/* Number that came online.  */
79 EXPORT_SYMBOL(smp_num_cpus);
80 
81 /*
82  * Called by both boot and secondaries to move global data into
83  *  per-processor storage.
84  */
85 static inline void __init
86 smp_store_cpu_info(int cpuid)
87 {
88 	cpu_data[cpuid].loops_per_jiffy = loops_per_jiffy;
89 	cpu_data[cpuid].last_asn = ASN_FIRST_VERSION;
90 	cpu_data[cpuid].need_new_asn = 0;
91 	cpu_data[cpuid].asn_lock = 0;
92 }
93 
94 /*
95  * Ideally sets up per-cpu profiling hooks.  Doesn't do much now...
96  */
97 static inline void __init
98 smp_setup_percpu_timer(int cpuid)
99 {
100 	cpu_data[cpuid].prof_counter = 1;
101 	cpu_data[cpuid].prof_multiplier = 1;
102 }
103 
104 static void __init
105 wait_boot_cpu_to_stop(int cpuid)
106 {
107 	unsigned long stop = jiffies + 10*HZ;
108 
109 	while (time_before(jiffies, stop)) {
110 	        if (!smp_secondary_alive)
111 			return;
112 		barrier();
113 	}
114 
115 	printk("wait_boot_cpu_to_stop: FAILED on CPU %d, hanging now\n", cpuid);
116 	for (;;)
117 		barrier();
118 }
119 
120 /*
121  * Where secondaries begin a life of C.
122  */
123 void __init
124 smp_callin(void)
125 {
126 	int cpuid = hard_smp_processor_id();
127 
128 	if (cpu_test_and_set(cpuid, cpu_online_map)) {
129 		printk("??, cpu 0x%x already present??\n", cpuid);
130 		BUG();
131 	}
132 
133 	/* Turn on machine checks.  */
134 	wrmces(7);
135 
136 	/* Set trap vectors.  */
137 	trap_init();
138 
139 	/* Set interrupt vector.  */
140 	wrent(entInt, 0);
141 
142 	/* Get our local ticker going. */
143 	smp_setup_percpu_timer(cpuid);
144 
145 	/* Call platform-specific callin, if specified */
146 	if (alpha_mv.smp_callin) alpha_mv.smp_callin();
147 
148 	/* All kernel threads share the same mm context.  */
149 	atomic_inc(&init_mm.mm_count);
150 	current->active_mm = &init_mm;
151 
152 	/* Must have completely accurate bogos.  */
153 	local_irq_enable();
154 
155 	/* Wait boot CPU to stop with irq enabled before running
156 	   calibrate_delay. */
157 	wait_boot_cpu_to_stop(cpuid);
158 	mb();
159 	calibrate_delay();
160 
161 	smp_store_cpu_info(cpuid);
162 	/* Allow master to continue only after we written loops_per_jiffy.  */
163 	wmb();
164 	smp_secondary_alive = 1;
165 
166 	DBGS(("smp_callin: commencing CPU %d current %p active_mm %p\n",
167 	      cpuid, current, current->active_mm));
168 
169 	/* Do nothing.  */
170 	cpu_idle();
171 }
172 
173 /* Wait until hwrpb->txrdy is clear for cpu.  Return -1 on timeout.  */
174 static int __devinit
175 wait_for_txrdy (unsigned long cpumask)
176 {
177 	unsigned long timeout;
178 
179 	if (!(hwrpb->txrdy & cpumask))
180 		return 0;
181 
182 	timeout = jiffies + 10*HZ;
183 	while (time_before(jiffies, timeout)) {
184 		if (!(hwrpb->txrdy & cpumask))
185 			return 0;
186 		udelay(10);
187 		barrier();
188 	}
189 
190 	return -1;
191 }
192 
193 /*
194  * Send a message to a secondary's console.  "START" is one such
195  * interesting message.  ;-)
196  */
197 static void __init
198 send_secondary_console_msg(char *str, int cpuid)
199 {
200 	struct percpu_struct *cpu;
201 	register char *cp1, *cp2;
202 	unsigned long cpumask;
203 	size_t len;
204 
205 	cpu = (struct percpu_struct *)
206 		((char*)hwrpb
207 		 + hwrpb->processor_offset
208 		 + cpuid * hwrpb->processor_size);
209 
210 	cpumask = (1UL << cpuid);
211 	if (wait_for_txrdy(cpumask))
212 		goto timeout;
213 
214 	cp2 = str;
215 	len = strlen(cp2);
216 	*(unsigned int *)&cpu->ipc_buffer[0] = len;
217 	cp1 = (char *) &cpu->ipc_buffer[1];
218 	memcpy(cp1, cp2, len);
219 
220 	/* atomic test and set */
221 	wmb();
222 	set_bit(cpuid, &hwrpb->rxrdy);
223 
224 	if (wait_for_txrdy(cpumask))
225 		goto timeout;
226 	return;
227 
228  timeout:
229 	printk("Processor %x not ready\n", cpuid);
230 }
231 
232 /*
233  * A secondary console wants to send a message.  Receive it.
234  */
235 static void
236 recv_secondary_console_msg(void)
237 {
238 	int mycpu, i, cnt;
239 	unsigned long txrdy = hwrpb->txrdy;
240 	char *cp1, *cp2, buf[80];
241 	struct percpu_struct *cpu;
242 
243 	DBGS(("recv_secondary_console_msg: TXRDY 0x%lx.\n", txrdy));
244 
245 	mycpu = hard_smp_processor_id();
246 
247 	for (i = 0; i < NR_CPUS; i++) {
248 		if (!(txrdy & (1UL << i)))
249 			continue;
250 
251 		DBGS(("recv_secondary_console_msg: "
252 		      "TXRDY contains CPU %d.\n", i));
253 
254 		cpu = (struct percpu_struct *)
255 		  ((char*)hwrpb
256 		   + hwrpb->processor_offset
257 		   + i * hwrpb->processor_size);
258 
259  		DBGS(("recv_secondary_console_msg: on %d from %d"
260 		      " HALT_REASON 0x%lx FLAGS 0x%lx\n",
261 		      mycpu, i, cpu->halt_reason, cpu->flags));
262 
263 		cnt = cpu->ipc_buffer[0] >> 32;
264 		if (cnt <= 0 || cnt >= 80)
265 			strcpy(buf, "<<< BOGUS MSG >>>");
266 		else {
267 			cp1 = (char *) &cpu->ipc_buffer[11];
268 			cp2 = buf;
269 			strcpy(cp2, cp1);
270 
271 			while ((cp2 = strchr(cp2, '\r')) != 0) {
272 				*cp2 = ' ';
273 				if (cp2[1] == '\n')
274 					cp2[1] = ' ';
275 			}
276 		}
277 
278 		DBGS((KERN_INFO "recv_secondary_console_msg: on %d "
279 		      "message is '%s'\n", mycpu, buf));
280 	}
281 
282 	hwrpb->txrdy = 0;
283 }
284 
285 /*
286  * Convince the console to have a secondary cpu begin execution.
287  */
288 static int __init
289 secondary_cpu_start(int cpuid, struct task_struct *idle)
290 {
291 	struct percpu_struct *cpu;
292 	struct pcb_struct *hwpcb, *ipcb;
293 	unsigned long timeout;
294 
295 	cpu = (struct percpu_struct *)
296 		((char*)hwrpb
297 		 + hwrpb->processor_offset
298 		 + cpuid * hwrpb->processor_size);
299 	hwpcb = (struct pcb_struct *) cpu->hwpcb;
300 	ipcb = &task_thread_info(idle)->pcb;
301 
302 	/* Initialize the CPU's HWPCB to something just good enough for
303 	   us to get started.  Immediately after starting, we'll swpctx
304 	   to the target idle task's pcb.  Reuse the stack in the mean
305 	   time.  Precalculate the target PCBB.  */
306 	hwpcb->ksp = (unsigned long)ipcb + sizeof(union thread_union) - 16;
307 	hwpcb->usp = 0;
308 	hwpcb->ptbr = ipcb->ptbr;
309 	hwpcb->pcc = 0;
310 	hwpcb->asn = 0;
311 	hwpcb->unique = virt_to_phys(ipcb);
312 	hwpcb->flags = ipcb->flags;
313 	hwpcb->res1 = hwpcb->res2 = 0;
314 
315 #if 0
316 	DBGS(("KSP 0x%lx PTBR 0x%lx VPTBR 0x%lx UNIQUE 0x%lx\n",
317 	      hwpcb->ksp, hwpcb->ptbr, hwrpb->vptb, hwpcb->unique));
318 #endif
319 	DBGS(("Starting secondary cpu %d: state 0x%lx pal_flags 0x%lx\n",
320 	      cpuid, idle->state, ipcb->flags));
321 
322 	/* Setup HWRPB fields that SRM uses to activate secondary CPU */
323 	hwrpb->CPU_restart = __smp_callin;
324 	hwrpb->CPU_restart_data = (unsigned long) __smp_callin;
325 
326 	/* Recalculate and update the HWRPB checksum */
327 	hwrpb_update_checksum(hwrpb);
328 
329 	/*
330 	 * Send a "start" command to the specified processor.
331 	 */
332 
333 	/* SRM III 3.4.1.3 */
334 	cpu->flags |= 0x22;	/* turn on Context Valid and Restart Capable */
335 	cpu->flags &= ~1;	/* turn off Bootstrap In Progress */
336 	wmb();
337 
338 	send_secondary_console_msg("START\r\n", cpuid);
339 
340 	/* Wait 10 seconds for an ACK from the console.  */
341 	timeout = jiffies + 10*HZ;
342 	while (time_before(jiffies, timeout)) {
343 		if (cpu->flags & 1)
344 			goto started;
345 		udelay(10);
346 		barrier();
347 	}
348 	printk(KERN_ERR "SMP: Processor %d failed to start.\n", cpuid);
349 	return -1;
350 
351  started:
352 	DBGS(("secondary_cpu_start: SUCCESS for CPU %d!!!\n", cpuid));
353 	return 0;
354 }
355 
356 /*
357  * Bring one cpu online.
358  */
359 static int __cpuinit
360 smp_boot_one_cpu(int cpuid)
361 {
362 	struct task_struct *idle;
363 	unsigned long timeout;
364 
365 	/* Cook up an idler for this guy.  Note that the address we
366 	   give to kernel_thread is irrelevant -- it's going to start
367 	   where HWRPB.CPU_restart says to start.  But this gets all
368 	   the other task-y sort of data structures set up like we
369 	   wish.  We can't use kernel_thread since we must avoid
370 	   rescheduling the child.  */
371 	idle = fork_idle(cpuid);
372 	if (IS_ERR(idle))
373 		panic("failed fork for CPU %d", cpuid);
374 
375 	DBGS(("smp_boot_one_cpu: CPU %d state 0x%lx flags 0x%lx\n",
376 	      cpuid, idle->state, idle->flags));
377 
378 	/* Signal the secondary to wait a moment.  */
379 	smp_secondary_alive = -1;
380 
381 	/* Whirrr, whirrr, whirrrrrrrrr... */
382 	if (secondary_cpu_start(cpuid, idle))
383 		return -1;
384 
385 	/* Notify the secondary CPU it can run calibrate_delay.  */
386 	mb();
387 	smp_secondary_alive = 0;
388 
389 	/* We've been acked by the console; wait one second for
390 	   the task to start up for real.  */
391 	timeout = jiffies + 1*HZ;
392 	while (time_before(jiffies, timeout)) {
393 		if (smp_secondary_alive == 1)
394 			goto alive;
395 		udelay(10);
396 		barrier();
397 	}
398 
399 	/* We failed to boot the CPU.  */
400 
401 	printk(KERN_ERR "SMP: Processor %d is stuck.\n", cpuid);
402 	return -1;
403 
404  alive:
405 	/* Another "Red Snapper". */
406 	return 0;
407 }
408 
409 /*
410  * Called from setup_arch.  Detect an SMP system and which processors
411  * are present.
412  */
413 void __init
414 setup_smp(void)
415 {
416 	struct percpu_struct *cpubase, *cpu;
417 	unsigned long i;
418 
419 	if (boot_cpuid != 0) {
420 		printk(KERN_WARNING "SMP: Booting off cpu %d instead of 0?\n",
421 		       boot_cpuid);
422 	}
423 
424 	if (hwrpb->nr_processors > 1) {
425 		int boot_cpu_palrev;
426 
427 		DBGS(("setup_smp: nr_processors %ld\n",
428 		      hwrpb->nr_processors));
429 
430 		cpubase = (struct percpu_struct *)
431 			((char*)hwrpb + hwrpb->processor_offset);
432 		boot_cpu_palrev = cpubase->pal_revision;
433 
434 		for (i = 0; i < hwrpb->nr_processors; i++) {
435 			cpu = (struct percpu_struct *)
436 				((char *)cpubase + i*hwrpb->processor_size);
437 			if ((cpu->flags & 0x1cc) == 0x1cc) {
438 				smp_num_probed++;
439 				cpu_set(i, cpu_present_map);
440 				cpu->pal_revision = boot_cpu_palrev;
441 			}
442 
443 			DBGS(("setup_smp: CPU %d: flags 0x%lx type 0x%lx\n",
444 			      i, cpu->flags, cpu->type));
445 			DBGS(("setup_smp: CPU %d: PAL rev 0x%lx\n",
446 			      i, cpu->pal_revision));
447 		}
448 	} else {
449 		smp_num_probed = 1;
450 	}
451 
452 	printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_map = %lx\n",
453 	       smp_num_probed, cpu_present_map.bits[0]);
454 }
455 
456 /*
457  * Called by smp_init prepare the secondaries
458  */
459 void __init
460 smp_prepare_cpus(unsigned int max_cpus)
461 {
462 	/* Take care of some initial bookkeeping.  */
463 	memset(ipi_data, 0, sizeof(ipi_data));
464 
465 	current_thread_info()->cpu = boot_cpuid;
466 
467 	smp_store_cpu_info(boot_cpuid);
468 	smp_setup_percpu_timer(boot_cpuid);
469 
470 	/* Nothing to do on a UP box, or when told not to.  */
471 	if (smp_num_probed == 1 || max_cpus == 0) {
472 		cpu_present_map = cpumask_of_cpu(boot_cpuid);
473 		printk(KERN_INFO "SMP mode deactivated.\n");
474 		return;
475 	}
476 
477 	printk(KERN_INFO "SMP starting up secondaries.\n");
478 
479 	smp_num_cpus = smp_num_probed;
480 }
481 
482 void __devinit
483 smp_prepare_boot_cpu(void)
484 {
485 }
486 
487 int __cpuinit
488 __cpu_up(unsigned int cpu)
489 {
490 	smp_boot_one_cpu(cpu);
491 
492 	return cpu_online(cpu) ? 0 : -ENOSYS;
493 }
494 
495 void __init
496 smp_cpus_done(unsigned int max_cpus)
497 {
498 	int cpu;
499 	unsigned long bogosum = 0;
500 
501 	for(cpu = 0; cpu < NR_CPUS; cpu++)
502 		if (cpu_online(cpu))
503 			bogosum += cpu_data[cpu].loops_per_jiffy;
504 
505 	printk(KERN_INFO "SMP: Total of %d processors activated "
506 	       "(%lu.%02lu BogoMIPS).\n",
507 	       num_online_cpus(),
508 	       (bogosum + 2500) / (500000/HZ),
509 	       ((bogosum + 2500) / (5000/HZ)) % 100);
510 }
511 
512 
513 void
514 smp_percpu_timer_interrupt(struct pt_regs *regs)
515 {
516 	struct pt_regs *old_regs;
517 	int cpu = smp_processor_id();
518 	unsigned long user = user_mode(regs);
519 	struct cpuinfo_alpha *data = &cpu_data[cpu];
520 
521 	old_regs = set_irq_regs(regs);
522 
523 	/* Record kernel PC.  */
524 	profile_tick(CPU_PROFILING);
525 
526 	if (!--data->prof_counter) {
527 		/* We need to make like a normal interrupt -- otherwise
528 		   timer interrupts ignore the global interrupt lock,
529 		   which would be a Bad Thing.  */
530 		irq_enter();
531 
532 		update_process_times(user);
533 
534 		data->prof_counter = data->prof_multiplier;
535 
536 		irq_exit();
537 	}
538 	set_irq_regs(old_regs);
539 }
540 
541 int
542 setup_profiling_timer(unsigned int multiplier)
543 {
544 	return -EINVAL;
545 }
546 
547 
548 static void
549 send_ipi_message(cpumask_t to_whom, enum ipi_message_type operation)
550 {
551 	int i;
552 
553 	mb();
554 	for_each_cpu_mask(i, to_whom)
555 		set_bit(operation, &ipi_data[i].bits);
556 
557 	mb();
558 	for_each_cpu_mask(i, to_whom)
559 		wripir(i);
560 }
561 
562 void
563 handle_ipi(struct pt_regs *regs)
564 {
565 	int this_cpu = smp_processor_id();
566 	unsigned long *pending_ipis = &ipi_data[this_cpu].bits;
567 	unsigned long ops;
568 
569 #if 0
570 	DBGS(("handle_ipi: on CPU %d ops 0x%lx PC 0x%lx\n",
571 	      this_cpu, *pending_ipis, regs->pc));
572 #endif
573 
574 	mb();	/* Order interrupt and bit testing. */
575 	while ((ops = xchg(pending_ipis, 0)) != 0) {
576 	  mb();	/* Order bit clearing and data access. */
577 	  do {
578 		unsigned long which;
579 
580 		which = ops & -ops;
581 		ops &= ~which;
582 		which = __ffs(which);
583 
584 		switch (which) {
585 		case IPI_RESCHEDULE:
586 			/* Reschedule callback.  Everything to be done
587 			   is done by the interrupt return path.  */
588 			break;
589 
590 		case IPI_CALL_FUNC:
591 			generic_smp_call_function_interrupt();
592 			break;
593 
594 		case IPI_CALL_FUNC_SINGLE:
595 			generic_smp_call_function_single_interrupt();
596 			break;
597 
598 		case IPI_CPU_STOP:
599 			halt();
600 
601 		default:
602 			printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n",
603 			       this_cpu, which);
604 			break;
605 		}
606 	  } while (ops);
607 
608 	  mb();	/* Order data access and bit testing. */
609 	}
610 
611 	cpu_data[this_cpu].ipi_count++;
612 
613 	if (hwrpb->txrdy)
614 		recv_secondary_console_msg();
615 }
616 
617 void
618 smp_send_reschedule(int cpu)
619 {
620 #ifdef DEBUG_IPI_MSG
621 	if (cpu == hard_smp_processor_id())
622 		printk(KERN_WARNING
623 		       "smp_send_reschedule: Sending IPI to self.\n");
624 #endif
625 	send_ipi_message(cpumask_of_cpu(cpu), IPI_RESCHEDULE);
626 }
627 
628 void
629 smp_send_stop(void)
630 {
631 	cpumask_t to_whom = cpu_possible_map;
632 	cpu_clear(smp_processor_id(), to_whom);
633 #ifdef DEBUG_IPI_MSG
634 	if (hard_smp_processor_id() != boot_cpu_id)
635 		printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n");
636 #endif
637 	send_ipi_message(to_whom, IPI_CPU_STOP);
638 }
639 
640 void arch_send_call_function_ipi(cpumask_t mask)
641 {
642 	send_ipi_message(mask, IPI_CALL_FUNC);
643 }
644 
645 void arch_send_call_function_single_ipi(int cpu)
646 {
647 	send_ipi_message(cpumask_of_cpu(cpu), IPI_CALL_FUNC_SINGLE);
648 }
649 
650 static void
651 ipi_imb(void *ignored)
652 {
653 	imb();
654 }
655 
656 void
657 smp_imb(void)
658 {
659 	/* Must wait other processors to flush their icache before continue. */
660 	if (on_each_cpu(ipi_imb, NULL, 1))
661 		printk(KERN_CRIT "smp_imb: timed out\n");
662 }
663 EXPORT_SYMBOL(smp_imb);
664 
665 static void
666 ipi_flush_tlb_all(void *ignored)
667 {
668 	tbia();
669 }
670 
671 void
672 flush_tlb_all(void)
673 {
674 	/* Although we don't have any data to pass, we do want to
675 	   synchronize with the other processors.  */
676 	if (on_each_cpu(ipi_flush_tlb_all, NULL, 1)) {
677 		printk(KERN_CRIT "flush_tlb_all: timed out\n");
678 	}
679 }
680 
681 #define asn_locked() (cpu_data[smp_processor_id()].asn_lock)
682 
683 static void
684 ipi_flush_tlb_mm(void *x)
685 {
686 	struct mm_struct *mm = (struct mm_struct *) x;
687 	if (mm == current->active_mm && !asn_locked())
688 		flush_tlb_current(mm);
689 	else
690 		flush_tlb_other(mm);
691 }
692 
693 void
694 flush_tlb_mm(struct mm_struct *mm)
695 {
696 	preempt_disable();
697 
698 	if (mm == current->active_mm) {
699 		flush_tlb_current(mm);
700 		if (atomic_read(&mm->mm_users) <= 1) {
701 			int cpu, this_cpu = smp_processor_id();
702 			for (cpu = 0; cpu < NR_CPUS; cpu++) {
703 				if (!cpu_online(cpu) || cpu == this_cpu)
704 					continue;
705 				if (mm->context[cpu])
706 					mm->context[cpu] = 0;
707 			}
708 			preempt_enable();
709 			return;
710 		}
711 	}
712 
713 	if (smp_call_function(ipi_flush_tlb_mm, mm, 1)) {
714 		printk(KERN_CRIT "flush_tlb_mm: timed out\n");
715 	}
716 
717 	preempt_enable();
718 }
719 EXPORT_SYMBOL(flush_tlb_mm);
720 
721 struct flush_tlb_page_struct {
722 	struct vm_area_struct *vma;
723 	struct mm_struct *mm;
724 	unsigned long addr;
725 };
726 
727 static void
728 ipi_flush_tlb_page(void *x)
729 {
730 	struct flush_tlb_page_struct *data = (struct flush_tlb_page_struct *)x;
731 	struct mm_struct * mm = data->mm;
732 
733 	if (mm == current->active_mm && !asn_locked())
734 		flush_tlb_current_page(mm, data->vma, data->addr);
735 	else
736 		flush_tlb_other(mm);
737 }
738 
739 void
740 flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
741 {
742 	struct flush_tlb_page_struct data;
743 	struct mm_struct *mm = vma->vm_mm;
744 
745 	preempt_disable();
746 
747 	if (mm == current->active_mm) {
748 		flush_tlb_current_page(mm, vma, addr);
749 		if (atomic_read(&mm->mm_users) <= 1) {
750 			int cpu, this_cpu = smp_processor_id();
751 			for (cpu = 0; cpu < NR_CPUS; cpu++) {
752 				if (!cpu_online(cpu) || cpu == this_cpu)
753 					continue;
754 				if (mm->context[cpu])
755 					mm->context[cpu] = 0;
756 			}
757 			preempt_enable();
758 			return;
759 		}
760 	}
761 
762 	data.vma = vma;
763 	data.mm = mm;
764 	data.addr = addr;
765 
766 	if (smp_call_function(ipi_flush_tlb_page, &data, 1)) {
767 		printk(KERN_CRIT "flush_tlb_page: timed out\n");
768 	}
769 
770 	preempt_enable();
771 }
772 EXPORT_SYMBOL(flush_tlb_page);
773 
774 void
775 flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
776 {
777 	/* On the Alpha we always flush the whole user tlb.  */
778 	flush_tlb_mm(vma->vm_mm);
779 }
780 EXPORT_SYMBOL(flush_tlb_range);
781 
782 static void
783 ipi_flush_icache_page(void *x)
784 {
785 	struct mm_struct *mm = (struct mm_struct *) x;
786 	if (mm == current->active_mm && !asn_locked())
787 		__load_new_mm_context(mm);
788 	else
789 		flush_tlb_other(mm);
790 }
791 
792 void
793 flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
794 			unsigned long addr, int len)
795 {
796 	struct mm_struct *mm = vma->vm_mm;
797 
798 	if ((vma->vm_flags & VM_EXEC) == 0)
799 		return;
800 
801 	preempt_disable();
802 
803 	if (mm == current->active_mm) {
804 		__load_new_mm_context(mm);
805 		if (atomic_read(&mm->mm_users) <= 1) {
806 			int cpu, this_cpu = smp_processor_id();
807 			for (cpu = 0; cpu < NR_CPUS; cpu++) {
808 				if (!cpu_online(cpu) || cpu == this_cpu)
809 					continue;
810 				if (mm->context[cpu])
811 					mm->context[cpu] = 0;
812 			}
813 			preempt_enable();
814 			return;
815 		}
816 	}
817 
818 	if (smp_call_function(ipi_flush_icache_page, mm, 1)) {
819 		printk(KERN_CRIT "flush_icache_page: timed out\n");
820 	}
821 
822 	preempt_enable();
823 }
824