1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4 *
5 * Derived from MIPS:
6 * Copyright (C) 2000, 2001 Kanoj Sarcar
7 * Copyright (C) 2000, 2001 Ralf Baechle
8 * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
9 * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
10 */
11 #include <linux/acpi.h>
12 #include <linux/cpu.h>
13 #include <linux/cpumask.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/profile.h>
17 #include <linux/seq_file.h>
18 #include <linux/smp.h>
19 #include <linux/threads.h>
20 #include <linux/export.h>
21 #include <linux/syscore_ops.h>
22 #include <linux/time.h>
23 #include <linux/tracepoint.h>
24 #include <linux/sched/hotplug.h>
25 #include <linux/sched/task_stack.h>
26
27 #include <asm/cpu.h>
28 #include <asm/idle.h>
29 #include <asm/loongson.h>
30 #include <asm/mmu_context.h>
31 #include <asm/numa.h>
32 #include <asm/processor.h>
33 #include <asm/setup.h>
34 #include <asm/time.h>
35
36 int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
37 EXPORT_SYMBOL(__cpu_number_map);
38
39 int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
40 EXPORT_SYMBOL(__cpu_logical_map);
41
42 /* Representing the threads (siblings) of each logical CPU */
43 cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
44 EXPORT_SYMBOL(cpu_sibling_map);
45
46 /* Representing the core map of multi-core chips of each logical CPU */
47 cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
48 EXPORT_SYMBOL(cpu_core_map);
49
50 static DECLARE_COMPLETION(cpu_starting);
51 static DECLARE_COMPLETION(cpu_running);
52
53 /*
54 * A logcal cpu mask containing only one VPE per core to
55 * reduce the number of IPIs on large MT systems.
56 */
57 cpumask_t cpu_foreign_map[NR_CPUS] __read_mostly;
58 EXPORT_SYMBOL(cpu_foreign_map);
59
60 /* representing cpus for which sibling maps can be computed */
61 static cpumask_t cpu_sibling_setup_map;
62
63 /* representing cpus for which core maps can be computed */
64 static cpumask_t cpu_core_setup_map;
65
66 struct secondary_data cpuboot_data;
67 static DEFINE_PER_CPU(int, cpu_state);
68
69 enum ipi_msg_type {
70 IPI_RESCHEDULE,
71 IPI_CALL_FUNCTION,
72 };
73
74 static const char *ipi_types[NR_IPI] __tracepoint_string = {
75 [IPI_RESCHEDULE] = "Rescheduling interrupts",
76 [IPI_CALL_FUNCTION] = "Function call interrupts",
77 };
78
show_ipi_list(struct seq_file * p,int prec)79 void show_ipi_list(struct seq_file *p, int prec)
80 {
81 unsigned int cpu, i;
82
83 for (i = 0; i < NR_IPI; i++) {
84 seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i, prec >= 4 ? " " : "");
85 for_each_online_cpu(cpu)
86 seq_printf(p, "%10u ", per_cpu(irq_stat, cpu).ipi_irqs[i]);
87 seq_printf(p, " LoongArch %d %s\n", i + 1, ipi_types[i]);
88 }
89 }
90
set_cpu_core_map(int cpu)91 static inline void set_cpu_core_map(int cpu)
92 {
93 int i;
94
95 cpumask_set_cpu(cpu, &cpu_core_setup_map);
96
97 for_each_cpu(i, &cpu_core_setup_map) {
98 if (cpu_data[cpu].package == cpu_data[i].package) {
99 cpumask_set_cpu(i, &cpu_core_map[cpu]);
100 cpumask_set_cpu(cpu, &cpu_core_map[i]);
101 }
102 }
103 }
104
set_cpu_sibling_map(int cpu)105 static inline void set_cpu_sibling_map(int cpu)
106 {
107 int i;
108
109 cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
110
111 for_each_cpu(i, &cpu_sibling_setup_map) {
112 if (cpus_are_siblings(cpu, i)) {
113 cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
114 cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
115 }
116 }
117 }
118
clear_cpu_sibling_map(int cpu)119 static inline void clear_cpu_sibling_map(int cpu)
120 {
121 int i;
122
123 for_each_cpu(i, &cpu_sibling_setup_map) {
124 if (cpus_are_siblings(cpu, i)) {
125 cpumask_clear_cpu(i, &cpu_sibling_map[cpu]);
126 cpumask_clear_cpu(cpu, &cpu_sibling_map[i]);
127 }
128 }
129
130 cpumask_clear_cpu(cpu, &cpu_sibling_setup_map);
131 }
132
133 /*
134 * Calculate a new cpu_foreign_map mask whenever a
135 * new cpu appears or disappears.
136 */
calculate_cpu_foreign_map(void)137 void calculate_cpu_foreign_map(void)
138 {
139 int i, k, core_present;
140 cpumask_t temp_foreign_map;
141
142 /* Re-calculate the mask */
143 cpumask_clear(&temp_foreign_map);
144 for_each_online_cpu(i) {
145 core_present = 0;
146 for_each_cpu(k, &temp_foreign_map)
147 if (cpus_are_siblings(i, k))
148 core_present = 1;
149 if (!core_present)
150 cpumask_set_cpu(i, &temp_foreign_map);
151 }
152
153 for_each_online_cpu(i)
154 cpumask_andnot(&cpu_foreign_map[i],
155 &temp_foreign_map, &cpu_sibling_map[i]);
156 }
157
158 /* Send mailbox buffer via Mail_Send */
csr_mail_send(uint64_t data,int cpu,int mailbox)159 static void csr_mail_send(uint64_t data, int cpu, int mailbox)
160 {
161 uint64_t val;
162
163 /* Send high 32 bits */
164 val = IOCSR_MBUF_SEND_BLOCKING;
165 val |= (IOCSR_MBUF_SEND_BOX_HI(mailbox) << IOCSR_MBUF_SEND_BOX_SHIFT);
166 val |= (cpu << IOCSR_MBUF_SEND_CPU_SHIFT);
167 val |= (data & IOCSR_MBUF_SEND_H32_MASK);
168 iocsr_write64(val, LOONGARCH_IOCSR_MBUF_SEND);
169
170 /* Send low 32 bits */
171 val = IOCSR_MBUF_SEND_BLOCKING;
172 val |= (IOCSR_MBUF_SEND_BOX_LO(mailbox) << IOCSR_MBUF_SEND_BOX_SHIFT);
173 val |= (cpu << IOCSR_MBUF_SEND_CPU_SHIFT);
174 val |= (data << IOCSR_MBUF_SEND_BUF_SHIFT);
175 iocsr_write64(val, LOONGARCH_IOCSR_MBUF_SEND);
176 };
177
ipi_read_clear(int cpu)178 static u32 ipi_read_clear(int cpu)
179 {
180 u32 action;
181
182 /* Load the ipi register to figure out what we're supposed to do */
183 action = iocsr_read32(LOONGARCH_IOCSR_IPI_STATUS);
184 /* Clear the ipi register to clear the interrupt */
185 iocsr_write32(action, LOONGARCH_IOCSR_IPI_CLEAR);
186 wbflush();
187
188 return action;
189 }
190
ipi_write_action(int cpu,u32 action)191 static void ipi_write_action(int cpu, u32 action)
192 {
193 unsigned int irq = 0;
194
195 while ((irq = ffs(action))) {
196 uint32_t val = IOCSR_IPI_SEND_BLOCKING;
197
198 val |= (irq - 1);
199 val |= (cpu << IOCSR_IPI_SEND_CPU_SHIFT);
200 iocsr_write32(val, LOONGARCH_IOCSR_IPI_SEND);
201 action &= ~BIT(irq - 1);
202 }
203 }
204
loongson_send_ipi_single(int cpu,unsigned int action)205 void loongson_send_ipi_single(int cpu, unsigned int action)
206 {
207 ipi_write_action(cpu_logical_map(cpu), (u32)action);
208 }
209
loongson_send_ipi_mask(const struct cpumask * mask,unsigned int action)210 void loongson_send_ipi_mask(const struct cpumask *mask, unsigned int action)
211 {
212 unsigned int i;
213
214 for_each_cpu(i, mask)
215 ipi_write_action(cpu_logical_map(i), (u32)action);
216 }
217
218 /*
219 * This function sends a 'reschedule' IPI to another CPU.
220 * it goes straight through and wastes no time serializing
221 * anything. Worst case is that we lose a reschedule ...
222 */
arch_smp_send_reschedule(int cpu)223 void arch_smp_send_reschedule(int cpu)
224 {
225 loongson_send_ipi_single(cpu, SMP_RESCHEDULE);
226 }
227 EXPORT_SYMBOL_GPL(arch_smp_send_reschedule);
228
loongson_ipi_interrupt(int irq,void * dev)229 irqreturn_t loongson_ipi_interrupt(int irq, void *dev)
230 {
231 unsigned int action;
232 unsigned int cpu = smp_processor_id();
233
234 action = ipi_read_clear(cpu_logical_map(cpu));
235
236 if (action & SMP_RESCHEDULE) {
237 scheduler_ipi();
238 per_cpu(irq_stat, cpu).ipi_irqs[IPI_RESCHEDULE]++;
239 }
240
241 if (action & SMP_CALL_FUNCTION) {
242 generic_smp_call_function_interrupt();
243 per_cpu(irq_stat, cpu).ipi_irqs[IPI_CALL_FUNCTION]++;
244 }
245
246 return IRQ_HANDLED;
247 }
248
fdt_smp_setup(void)249 static void __init fdt_smp_setup(void)
250 {
251 #ifdef CONFIG_OF
252 unsigned int cpu, cpuid;
253 struct device_node *node = NULL;
254
255 for_each_of_cpu_node(node) {
256 if (!of_device_is_available(node))
257 continue;
258
259 cpuid = of_get_cpu_hwid(node, 0);
260 if (cpuid >= nr_cpu_ids)
261 continue;
262
263 if (cpuid == loongson_sysconf.boot_cpu_id) {
264 cpu = 0;
265 } else {
266 cpu = cpumask_next_zero(-1, cpu_present_mask);
267 }
268
269 num_processors++;
270 set_cpu_possible(cpu, true);
271 set_cpu_present(cpu, true);
272 __cpu_number_map[cpuid] = cpu;
273 __cpu_logical_map[cpu] = cpuid;
274
275 early_numa_add_cpu(cpu, 0);
276 set_cpuid_to_node(cpuid, 0);
277 }
278
279 loongson_sysconf.nr_cpus = num_processors;
280 set_bit(0, &(loongson_sysconf.cores_io_master));
281 #endif
282 }
283
loongson_smp_setup(void)284 void __init loongson_smp_setup(void)
285 {
286 fdt_smp_setup();
287
288 cpu_data[0].core = cpu_logical_map(0) % loongson_sysconf.cores_per_package;
289 cpu_data[0].package = cpu_logical_map(0) / loongson_sysconf.cores_per_package;
290
291 iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_EN);
292 pr_info("Detected %i available CPU(s)\n", loongson_sysconf.nr_cpus);
293 }
294
loongson_prepare_cpus(unsigned int max_cpus)295 void __init loongson_prepare_cpus(unsigned int max_cpus)
296 {
297 int i = 0;
298
299 parse_acpi_topology();
300
301 for (i = 0; i < loongson_sysconf.nr_cpus; i++) {
302 set_cpu_present(i, true);
303 csr_mail_send(0, __cpu_logical_map[i], 0);
304 cpu_data[i].global_id = __cpu_logical_map[i];
305 }
306
307 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
308 }
309
310 /*
311 * Setup the PC, SP, and TP of a secondary processor and start it running!
312 */
loongson_boot_secondary(int cpu,struct task_struct * idle)313 void loongson_boot_secondary(int cpu, struct task_struct *idle)
314 {
315 unsigned long entry;
316
317 pr_info("Booting CPU#%d...\n", cpu);
318
319 entry = __pa_symbol((unsigned long)&smpboot_entry);
320 cpuboot_data.stack = (unsigned long)__KSTK_TOS(idle);
321 cpuboot_data.thread_info = (unsigned long)task_thread_info(idle);
322
323 csr_mail_send(entry, cpu_logical_map(cpu), 0);
324
325 loongson_send_ipi_single(cpu, SMP_BOOT_CPU);
326 }
327
328 /*
329 * SMP init and finish on secondary CPUs
330 */
loongson_init_secondary(void)331 void loongson_init_secondary(void)
332 {
333 unsigned int cpu = smp_processor_id();
334 unsigned int imask = ECFGF_IP0 | ECFGF_IP1 | ECFGF_IP2 |
335 ECFGF_IPI | ECFGF_PMC | ECFGF_TIMER;
336
337 change_csr_ecfg(ECFG0_IM, imask);
338
339 iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_EN);
340
341 #ifdef CONFIG_NUMA
342 numa_add_cpu(cpu);
343 #endif
344 per_cpu(cpu_state, cpu) = CPU_ONLINE;
345 cpu_data[cpu].package =
346 cpu_logical_map(cpu) / loongson_sysconf.cores_per_package;
347 cpu_data[cpu].core = pptt_enabled ? cpu_data[cpu].core :
348 cpu_logical_map(cpu) % loongson_sysconf.cores_per_package;
349 }
350
loongson_smp_finish(void)351 void loongson_smp_finish(void)
352 {
353 local_irq_enable();
354 iocsr_write64(0, LOONGARCH_IOCSR_MBUF0);
355 pr_info("CPU#%d finished\n", smp_processor_id());
356 }
357
358 #ifdef CONFIG_HOTPLUG_CPU
359
loongson_cpu_disable(void)360 int loongson_cpu_disable(void)
361 {
362 unsigned long flags;
363 unsigned int cpu = smp_processor_id();
364
365 if (io_master(cpu))
366 return -EBUSY;
367
368 #ifdef CONFIG_NUMA
369 numa_remove_cpu(cpu);
370 #endif
371 set_cpu_online(cpu, false);
372 clear_cpu_sibling_map(cpu);
373 calculate_cpu_foreign_map();
374 local_irq_save(flags);
375 irq_migrate_all_off_this_cpu();
376 clear_csr_ecfg(ECFG0_IM);
377 local_irq_restore(flags);
378 local_flush_tlb_all();
379
380 return 0;
381 }
382
loongson_cpu_die(unsigned int cpu)383 void loongson_cpu_die(unsigned int cpu)
384 {
385 while (per_cpu(cpu_state, cpu) != CPU_DEAD)
386 cpu_relax();
387
388 mb();
389 }
390
arch_cpu_idle_dead(void)391 void __noreturn arch_cpu_idle_dead(void)
392 {
393 register uint64_t addr;
394 register void (*init_fn)(void);
395
396 idle_task_exit();
397 local_irq_enable();
398 set_csr_ecfg(ECFGF_IPI);
399 __this_cpu_write(cpu_state, CPU_DEAD);
400
401 __smp_mb();
402 do {
403 __asm__ __volatile__("idle 0\n\t");
404 addr = iocsr_read64(LOONGARCH_IOCSR_MBUF0);
405 } while (addr == 0);
406
407 local_irq_disable();
408 init_fn = (void *)TO_CACHE(addr);
409 iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_CLEAR);
410
411 init_fn();
412 BUG();
413 }
414
415 #endif
416
417 /*
418 * Power management
419 */
420 #ifdef CONFIG_PM
421
loongson_ipi_suspend(void)422 static int loongson_ipi_suspend(void)
423 {
424 return 0;
425 }
426
loongson_ipi_resume(void)427 static void loongson_ipi_resume(void)
428 {
429 iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_EN);
430 }
431
432 static struct syscore_ops loongson_ipi_syscore_ops = {
433 .resume = loongson_ipi_resume,
434 .suspend = loongson_ipi_suspend,
435 };
436
437 /*
438 * Enable boot cpu ipi before enabling nonboot cpus
439 * during syscore_resume.
440 */
ipi_pm_init(void)441 static int __init ipi_pm_init(void)
442 {
443 register_syscore_ops(&loongson_ipi_syscore_ops);
444 return 0;
445 }
446
447 core_initcall(ipi_pm_init);
448 #endif
449
450 /* Preload SMP state for boot cpu */
smp_prepare_boot_cpu(void)451 void smp_prepare_boot_cpu(void)
452 {
453 unsigned int cpu, node, rr_node;
454
455 set_cpu_possible(0, true);
456 set_cpu_online(0, true);
457 set_my_cpu_offset(per_cpu_offset(0));
458 numa_add_cpu(0);
459
460 rr_node = first_node(node_online_map);
461 for_each_possible_cpu(cpu) {
462 node = early_cpu_to_node(cpu);
463
464 /*
465 * The mapping between present cpus and nodes has been
466 * built during MADT and SRAT parsing.
467 *
468 * If possible cpus = present cpus here, early_cpu_to_node
469 * will return valid node.
470 *
471 * If possible cpus > present cpus here (e.g. some possible
472 * cpus will be added by cpu-hotplug later), for possible but
473 * not present cpus, early_cpu_to_node will return NUMA_NO_NODE,
474 * and we just map them to online nodes in round-robin way.
475 * Once hotplugged, new correct mapping will be built for them.
476 */
477 if (node != NUMA_NO_NODE)
478 set_cpu_numa_node(cpu, node);
479 else {
480 set_cpu_numa_node(cpu, rr_node);
481 rr_node = next_node_in(rr_node, node_online_map);
482 }
483 }
484 }
485
486 /* called from main before smp_init() */
smp_prepare_cpus(unsigned int max_cpus)487 void __init smp_prepare_cpus(unsigned int max_cpus)
488 {
489 init_new_context(current, &init_mm);
490 current_thread_info()->cpu = 0;
491 loongson_prepare_cpus(max_cpus);
492 set_cpu_sibling_map(0);
493 set_cpu_core_map(0);
494 calculate_cpu_foreign_map();
495 #ifndef CONFIG_HOTPLUG_CPU
496 init_cpu_present(cpu_possible_mask);
497 #endif
498 }
499
__cpu_up(unsigned int cpu,struct task_struct * tidle)500 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
501 {
502 loongson_boot_secondary(cpu, tidle);
503
504 /* Wait for CPU to start and be ready to sync counters */
505 if (!wait_for_completion_timeout(&cpu_starting,
506 msecs_to_jiffies(5000))) {
507 pr_crit("CPU%u: failed to start\n", cpu);
508 return -EIO;
509 }
510
511 /* Wait for CPU to finish startup & mark itself online before return */
512 wait_for_completion(&cpu_running);
513
514 return 0;
515 }
516
517 /*
518 * First C code run on the secondary CPUs after being started up by
519 * the master.
520 */
start_secondary(void)521 asmlinkage void start_secondary(void)
522 {
523 unsigned int cpu;
524
525 sync_counter();
526 cpu = raw_smp_processor_id();
527 set_my_cpu_offset(per_cpu_offset(cpu));
528
529 cpu_probe();
530 constant_clockevent_init();
531 loongson_init_secondary();
532
533 set_cpu_sibling_map(cpu);
534 set_cpu_core_map(cpu);
535
536 notify_cpu_starting(cpu);
537
538 /* Notify boot CPU that we're starting */
539 complete(&cpu_starting);
540
541 /* The CPU is running, now mark it online */
542 set_cpu_online(cpu, true);
543
544 calculate_cpu_foreign_map();
545
546 /*
547 * Notify boot CPU that we're up & online and it can safely return
548 * from __cpu_up()
549 */
550 complete(&cpu_running);
551
552 /*
553 * irq will be enabled in loongson_smp_finish(), enabling it too
554 * early is dangerous.
555 */
556 WARN_ON_ONCE(!irqs_disabled());
557 loongson_smp_finish();
558
559 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
560 }
561
smp_cpus_done(unsigned int max_cpus)562 void __init smp_cpus_done(unsigned int max_cpus)
563 {
564 }
565
stop_this_cpu(void * dummy)566 static void stop_this_cpu(void *dummy)
567 {
568 set_cpu_online(smp_processor_id(), false);
569 calculate_cpu_foreign_map();
570 local_irq_disable();
571 while (true);
572 }
573
smp_send_stop(void)574 void smp_send_stop(void)
575 {
576 smp_call_function(stop_this_cpu, NULL, 0);
577 }
578
579 #ifdef CONFIG_PROFILING
setup_profiling_timer(unsigned int multiplier)580 int setup_profiling_timer(unsigned int multiplier)
581 {
582 return 0;
583 }
584 #endif
585
flush_tlb_all_ipi(void * info)586 static void flush_tlb_all_ipi(void *info)
587 {
588 local_flush_tlb_all();
589 }
590
flush_tlb_all(void)591 void flush_tlb_all(void)
592 {
593 on_each_cpu(flush_tlb_all_ipi, NULL, 1);
594 }
595
flush_tlb_mm_ipi(void * mm)596 static void flush_tlb_mm_ipi(void *mm)
597 {
598 local_flush_tlb_mm((struct mm_struct *)mm);
599 }
600
flush_tlb_mm(struct mm_struct * mm)601 void flush_tlb_mm(struct mm_struct *mm)
602 {
603 if (atomic_read(&mm->mm_users) == 0)
604 return; /* happens as a result of exit_mmap() */
605
606 preempt_disable();
607
608 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
609 on_each_cpu_mask(mm_cpumask(mm), flush_tlb_mm_ipi, mm, 1);
610 } else {
611 unsigned int cpu;
612
613 for_each_online_cpu(cpu) {
614 if (cpu != smp_processor_id() && cpu_context(cpu, mm))
615 cpu_context(cpu, mm) = 0;
616 }
617 local_flush_tlb_mm(mm);
618 }
619
620 preempt_enable();
621 }
622
623 struct flush_tlb_data {
624 struct vm_area_struct *vma;
625 unsigned long addr1;
626 unsigned long addr2;
627 };
628
flush_tlb_range_ipi(void * info)629 static void flush_tlb_range_ipi(void *info)
630 {
631 struct flush_tlb_data *fd = info;
632
633 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
634 }
635
flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)636 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
637 {
638 struct mm_struct *mm = vma->vm_mm;
639
640 preempt_disable();
641 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
642 struct flush_tlb_data fd = {
643 .vma = vma,
644 .addr1 = start,
645 .addr2 = end,
646 };
647
648 on_each_cpu_mask(mm_cpumask(mm), flush_tlb_range_ipi, &fd, 1);
649 } else {
650 unsigned int cpu;
651
652 for_each_online_cpu(cpu) {
653 if (cpu != smp_processor_id() && cpu_context(cpu, mm))
654 cpu_context(cpu, mm) = 0;
655 }
656 local_flush_tlb_range(vma, start, end);
657 }
658 preempt_enable();
659 }
660
flush_tlb_kernel_range_ipi(void * info)661 static void flush_tlb_kernel_range_ipi(void *info)
662 {
663 struct flush_tlb_data *fd = info;
664
665 local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
666 }
667
flush_tlb_kernel_range(unsigned long start,unsigned long end)668 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
669 {
670 struct flush_tlb_data fd = {
671 .addr1 = start,
672 .addr2 = end,
673 };
674
675 on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
676 }
677
flush_tlb_page_ipi(void * info)678 static void flush_tlb_page_ipi(void *info)
679 {
680 struct flush_tlb_data *fd = info;
681
682 local_flush_tlb_page(fd->vma, fd->addr1);
683 }
684
flush_tlb_page(struct vm_area_struct * vma,unsigned long page)685 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
686 {
687 preempt_disable();
688 if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
689 struct flush_tlb_data fd = {
690 .vma = vma,
691 .addr1 = page,
692 };
693
694 on_each_cpu_mask(mm_cpumask(vma->vm_mm), flush_tlb_page_ipi, &fd, 1);
695 } else {
696 unsigned int cpu;
697
698 for_each_online_cpu(cpu) {
699 if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
700 cpu_context(cpu, vma->vm_mm) = 0;
701 }
702 local_flush_tlb_page(vma, page);
703 }
704 preempt_enable();
705 }
706 EXPORT_SYMBOL(flush_tlb_page);
707
flush_tlb_one_ipi(void * info)708 static void flush_tlb_one_ipi(void *info)
709 {
710 unsigned long vaddr = (unsigned long) info;
711
712 local_flush_tlb_one(vaddr);
713 }
714
flush_tlb_one(unsigned long vaddr)715 void flush_tlb_one(unsigned long vaddr)
716 {
717 on_each_cpu(flush_tlb_one_ipi, (void *)vaddr, 1);
718 }
719 EXPORT_SYMBOL(flush_tlb_one);
720