1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4 *
5 * Derived from MIPS:
6 * Copyright (C) 2000, 2001 Kanoj Sarcar
7 * Copyright (C) 2000, 2001 Ralf Baechle
8 * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
9 * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
10 */
11 #include <linux/acpi.h>
12 #include <linux/cpu.h>
13 #include <linux/cpumask.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/profile.h>
17 #include <linux/seq_file.h>
18 #include <linux/smp.h>
19 #include <linux/threads.h>
20 #include <linux/export.h>
21 #include <linux/suspend.h>
22 #include <linux/syscore_ops.h>
23 #include <linux/time.h>
24 #include <linux/tracepoint.h>
25 #include <linux/sched/hotplug.h>
26 #include <linux/sched/task_stack.h>
27
28 #include <asm/cpu.h>
29 #include <asm/idle.h>
30 #include <asm/loongson.h>
31 #include <asm/mmu_context.h>
32 #include <asm/numa.h>
33 #include <asm/processor.h>
34 #include <asm/setup.h>
35 #include <asm/time.h>
36
37 int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
38 EXPORT_SYMBOL(__cpu_number_map);
39
40 int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
41 EXPORT_SYMBOL(__cpu_logical_map);
42
43 /* Representing the threads (siblings) of each logical CPU */
44 cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
45 EXPORT_SYMBOL(cpu_sibling_map);
46
47 /* Representing the core map of multi-core chips of each logical CPU */
48 cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
49 EXPORT_SYMBOL(cpu_core_map);
50
51 static DECLARE_COMPLETION(cpu_starting);
52 static DECLARE_COMPLETION(cpu_running);
53
54 /*
55 * A logcal cpu mask containing only one VPE per core to
56 * reduce the number of IPIs on large MT systems.
57 */
58 cpumask_t cpu_foreign_map[NR_CPUS] __read_mostly;
59 EXPORT_SYMBOL(cpu_foreign_map);
60
61 /* representing cpus for which sibling maps can be computed */
62 static cpumask_t cpu_sibling_setup_map;
63
64 /* representing cpus for which core maps can be computed */
65 static cpumask_t cpu_core_setup_map;
66
67 struct secondary_data cpuboot_data;
68 static DEFINE_PER_CPU(int, cpu_state);
69
70 enum ipi_msg_type {
71 IPI_RESCHEDULE,
72 IPI_CALL_FUNCTION,
73 };
74
75 static const char *ipi_types[NR_IPI] __tracepoint_string = {
76 [IPI_RESCHEDULE] = "Rescheduling interrupts",
77 [IPI_CALL_FUNCTION] = "Function call interrupts",
78 };
79
show_ipi_list(struct seq_file * p,int prec)80 void show_ipi_list(struct seq_file *p, int prec)
81 {
82 unsigned int cpu, i;
83
84 for (i = 0; i < NR_IPI; i++) {
85 seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i, prec >= 4 ? " " : "");
86 for_each_online_cpu(cpu)
87 seq_printf(p, "%10u ", per_cpu(irq_stat, cpu).ipi_irqs[i]);
88 seq_printf(p, " LoongArch %d %s\n", i + 1, ipi_types[i]);
89 }
90 }
91
set_cpu_core_map(int cpu)92 static inline void set_cpu_core_map(int cpu)
93 {
94 int i;
95
96 cpumask_set_cpu(cpu, &cpu_core_setup_map);
97
98 for_each_cpu(i, &cpu_core_setup_map) {
99 if (cpu_data[cpu].package == cpu_data[i].package) {
100 cpumask_set_cpu(i, &cpu_core_map[cpu]);
101 cpumask_set_cpu(cpu, &cpu_core_map[i]);
102 }
103 }
104 }
105
set_cpu_sibling_map(int cpu)106 static inline void set_cpu_sibling_map(int cpu)
107 {
108 int i;
109
110 cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
111
112 for_each_cpu(i, &cpu_sibling_setup_map) {
113 if (cpus_are_siblings(cpu, i)) {
114 cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
115 cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
116 }
117 }
118 }
119
clear_cpu_sibling_map(int cpu)120 static inline void clear_cpu_sibling_map(int cpu)
121 {
122 int i;
123
124 for_each_cpu(i, &cpu_sibling_setup_map) {
125 if (cpus_are_siblings(cpu, i)) {
126 cpumask_clear_cpu(i, &cpu_sibling_map[cpu]);
127 cpumask_clear_cpu(cpu, &cpu_sibling_map[i]);
128 }
129 }
130
131 cpumask_clear_cpu(cpu, &cpu_sibling_setup_map);
132 }
133
134 /*
135 * Calculate a new cpu_foreign_map mask whenever a
136 * new cpu appears or disappears.
137 */
calculate_cpu_foreign_map(void)138 void calculate_cpu_foreign_map(void)
139 {
140 int i, k, core_present;
141 cpumask_t temp_foreign_map;
142
143 /* Re-calculate the mask */
144 cpumask_clear(&temp_foreign_map);
145 for_each_online_cpu(i) {
146 core_present = 0;
147 for_each_cpu(k, &temp_foreign_map)
148 if (cpus_are_siblings(i, k))
149 core_present = 1;
150 if (!core_present)
151 cpumask_set_cpu(i, &temp_foreign_map);
152 }
153
154 for_each_online_cpu(i)
155 cpumask_andnot(&cpu_foreign_map[i],
156 &temp_foreign_map, &cpu_sibling_map[i]);
157 }
158
159 /* Send mailbox buffer via Mail_Send */
csr_mail_send(uint64_t data,int cpu,int mailbox)160 static void csr_mail_send(uint64_t data, int cpu, int mailbox)
161 {
162 uint64_t val;
163
164 /* Send high 32 bits */
165 val = IOCSR_MBUF_SEND_BLOCKING;
166 val |= (IOCSR_MBUF_SEND_BOX_HI(mailbox) << IOCSR_MBUF_SEND_BOX_SHIFT);
167 val |= (cpu << IOCSR_MBUF_SEND_CPU_SHIFT);
168 val |= (data & IOCSR_MBUF_SEND_H32_MASK);
169 iocsr_write64(val, LOONGARCH_IOCSR_MBUF_SEND);
170
171 /* Send low 32 bits */
172 val = IOCSR_MBUF_SEND_BLOCKING;
173 val |= (IOCSR_MBUF_SEND_BOX_LO(mailbox) << IOCSR_MBUF_SEND_BOX_SHIFT);
174 val |= (cpu << IOCSR_MBUF_SEND_CPU_SHIFT);
175 val |= (data << IOCSR_MBUF_SEND_BUF_SHIFT);
176 iocsr_write64(val, LOONGARCH_IOCSR_MBUF_SEND);
177 };
178
ipi_read_clear(int cpu)179 static u32 ipi_read_clear(int cpu)
180 {
181 u32 action;
182
183 /* Load the ipi register to figure out what we're supposed to do */
184 action = iocsr_read32(LOONGARCH_IOCSR_IPI_STATUS);
185 /* Clear the ipi register to clear the interrupt */
186 iocsr_write32(action, LOONGARCH_IOCSR_IPI_CLEAR);
187 wbflush();
188
189 return action;
190 }
191
ipi_write_action(int cpu,u32 action)192 static void ipi_write_action(int cpu, u32 action)
193 {
194 unsigned int irq = 0;
195
196 while ((irq = ffs(action))) {
197 uint32_t val = IOCSR_IPI_SEND_BLOCKING;
198
199 val |= (irq - 1);
200 val |= (cpu << IOCSR_IPI_SEND_CPU_SHIFT);
201 iocsr_write32(val, LOONGARCH_IOCSR_IPI_SEND);
202 action &= ~BIT(irq - 1);
203 }
204 }
205
loongson_send_ipi_single(int cpu,unsigned int action)206 void loongson_send_ipi_single(int cpu, unsigned int action)
207 {
208 ipi_write_action(cpu_logical_map(cpu), (u32)action);
209 }
210
loongson_send_ipi_mask(const struct cpumask * mask,unsigned int action)211 void loongson_send_ipi_mask(const struct cpumask *mask, unsigned int action)
212 {
213 unsigned int i;
214
215 for_each_cpu(i, mask)
216 ipi_write_action(cpu_logical_map(i), (u32)action);
217 }
218
219 /*
220 * This function sends a 'reschedule' IPI to another CPU.
221 * it goes straight through and wastes no time serializing
222 * anything. Worst case is that we lose a reschedule ...
223 */
arch_smp_send_reschedule(int cpu)224 void arch_smp_send_reschedule(int cpu)
225 {
226 loongson_send_ipi_single(cpu, SMP_RESCHEDULE);
227 }
228 EXPORT_SYMBOL_GPL(arch_smp_send_reschedule);
229
loongson_ipi_interrupt(int irq,void * dev)230 irqreturn_t loongson_ipi_interrupt(int irq, void *dev)
231 {
232 unsigned int action;
233 unsigned int cpu = smp_processor_id();
234
235 action = ipi_read_clear(cpu_logical_map(cpu));
236
237 if (action & SMP_RESCHEDULE) {
238 scheduler_ipi();
239 per_cpu(irq_stat, cpu).ipi_irqs[IPI_RESCHEDULE]++;
240 }
241
242 if (action & SMP_CALL_FUNCTION) {
243 generic_smp_call_function_interrupt();
244 per_cpu(irq_stat, cpu).ipi_irqs[IPI_CALL_FUNCTION]++;
245 }
246
247 return IRQ_HANDLED;
248 }
249
fdt_smp_setup(void)250 static void __init fdt_smp_setup(void)
251 {
252 #ifdef CONFIG_OF
253 unsigned int cpu, cpuid;
254 struct device_node *node = NULL;
255
256 for_each_of_cpu_node(node) {
257 if (!of_device_is_available(node))
258 continue;
259
260 cpuid = of_get_cpu_hwid(node, 0);
261 if (cpuid >= nr_cpu_ids)
262 continue;
263
264 if (cpuid == loongson_sysconf.boot_cpu_id) {
265 cpu = 0;
266 } else {
267 cpu = cpumask_next_zero(-1, cpu_present_mask);
268 }
269
270 num_processors++;
271 set_cpu_possible(cpu, true);
272 set_cpu_present(cpu, true);
273 __cpu_number_map[cpuid] = cpu;
274 __cpu_logical_map[cpu] = cpuid;
275
276 early_numa_add_cpu(cpuid, 0);
277 set_cpuid_to_node(cpuid, 0);
278 }
279
280 loongson_sysconf.nr_cpus = num_processors;
281 set_bit(0, &(loongson_sysconf.cores_io_master));
282 #endif
283 }
284
loongson_smp_setup(void)285 void __init loongson_smp_setup(void)
286 {
287 fdt_smp_setup();
288
289 cpu_data[0].core = cpu_logical_map(0) % loongson_sysconf.cores_per_package;
290 cpu_data[0].package = cpu_logical_map(0) / loongson_sysconf.cores_per_package;
291
292 iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_EN);
293 pr_info("Detected %i available CPU(s)\n", loongson_sysconf.nr_cpus);
294 }
295
loongson_prepare_cpus(unsigned int max_cpus)296 void __init loongson_prepare_cpus(unsigned int max_cpus)
297 {
298 int i = 0;
299
300 parse_acpi_topology();
301
302 for (i = 0; i < loongson_sysconf.nr_cpus; i++) {
303 set_cpu_present(i, true);
304 csr_mail_send(0, __cpu_logical_map[i], 0);
305 cpu_data[i].global_id = __cpu_logical_map[i];
306 }
307
308 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
309 }
310
311 /*
312 * Setup the PC, SP, and TP of a secondary processor and start it running!
313 */
loongson_boot_secondary(int cpu,struct task_struct * idle)314 void loongson_boot_secondary(int cpu, struct task_struct *idle)
315 {
316 unsigned long entry;
317
318 pr_info("Booting CPU#%d...\n", cpu);
319
320 entry = __pa_symbol((unsigned long)&smpboot_entry);
321 cpuboot_data.stack = (unsigned long)__KSTK_TOS(idle);
322 cpuboot_data.thread_info = (unsigned long)task_thread_info(idle);
323
324 csr_mail_send(entry, cpu_logical_map(cpu), 0);
325
326 loongson_send_ipi_single(cpu, SMP_BOOT_CPU);
327 }
328
329 /*
330 * SMP init and finish on secondary CPUs
331 */
loongson_init_secondary(void)332 void loongson_init_secondary(void)
333 {
334 unsigned int cpu = smp_processor_id();
335 unsigned int imask = ECFGF_IP0 | ECFGF_IP1 | ECFGF_IP2 |
336 ECFGF_IPI | ECFGF_PMC | ECFGF_TIMER;
337
338 change_csr_ecfg(ECFG0_IM, imask);
339
340 iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_EN);
341
342 #ifdef CONFIG_NUMA
343 numa_add_cpu(cpu);
344 #endif
345 per_cpu(cpu_state, cpu) = CPU_ONLINE;
346 cpu_data[cpu].package =
347 cpu_logical_map(cpu) / loongson_sysconf.cores_per_package;
348 cpu_data[cpu].core = pptt_enabled ? cpu_data[cpu].core :
349 cpu_logical_map(cpu) % loongson_sysconf.cores_per_package;
350 }
351
loongson_smp_finish(void)352 void loongson_smp_finish(void)
353 {
354 local_irq_enable();
355 iocsr_write64(0, LOONGARCH_IOCSR_MBUF0);
356 pr_info("CPU#%d finished\n", smp_processor_id());
357 }
358
359 #ifdef CONFIG_HOTPLUG_CPU
360
loongson_cpu_disable(void)361 int loongson_cpu_disable(void)
362 {
363 unsigned long flags;
364 unsigned int cpu = smp_processor_id();
365
366 if (io_master(cpu))
367 return -EBUSY;
368
369 #ifdef CONFIG_NUMA
370 numa_remove_cpu(cpu);
371 #endif
372 set_cpu_online(cpu, false);
373 clear_cpu_sibling_map(cpu);
374 calculate_cpu_foreign_map();
375 local_irq_save(flags);
376 irq_migrate_all_off_this_cpu();
377 clear_csr_ecfg(ECFG0_IM);
378 local_irq_restore(flags);
379 local_flush_tlb_all();
380
381 return 0;
382 }
383
loongson_cpu_die(unsigned int cpu)384 void loongson_cpu_die(unsigned int cpu)
385 {
386 while (per_cpu(cpu_state, cpu) != CPU_DEAD)
387 cpu_relax();
388
389 mb();
390 }
391
idle_play_dead(void)392 static void __noreturn idle_play_dead(void)
393 {
394 register uint64_t addr;
395 register void (*init_fn)(void);
396
397 idle_task_exit();
398 local_irq_enable();
399 set_csr_ecfg(ECFGF_IPI);
400 __this_cpu_write(cpu_state, CPU_DEAD);
401
402 __smp_mb();
403 do {
404 __asm__ __volatile__("idle 0\n\t");
405 addr = iocsr_read64(LOONGARCH_IOCSR_MBUF0);
406 } while (addr == 0);
407
408 local_irq_disable();
409 init_fn = (void *)TO_CACHE(addr);
410 iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_CLEAR);
411
412 init_fn();
413 BUG();
414 }
415
416 #ifdef CONFIG_HIBERNATION
poll_play_dead(void)417 static void __noreturn poll_play_dead(void)
418 {
419 register uint64_t addr;
420 register void (*init_fn)(void);
421
422 idle_task_exit();
423 __this_cpu_write(cpu_state, CPU_DEAD);
424
425 __smp_mb();
426 do {
427 __asm__ __volatile__("nop\n\t");
428 addr = iocsr_read64(LOONGARCH_IOCSR_MBUF0);
429 } while (addr == 0);
430
431 init_fn = (void *)TO_CACHE(addr);
432 iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_CLEAR);
433
434 init_fn();
435 BUG();
436 }
437 #endif
438
439 static void (*play_dead)(void) = idle_play_dead;
440
arch_cpu_idle_dead(void)441 void __noreturn arch_cpu_idle_dead(void)
442 {
443 play_dead();
444 BUG(); /* play_dead() doesn't return */
445 }
446
447 #ifdef CONFIG_HIBERNATION
hibernate_resume_nonboot_cpu_disable(void)448 int hibernate_resume_nonboot_cpu_disable(void)
449 {
450 int ret;
451
452 play_dead = poll_play_dead;
453 ret = suspend_disable_secondary_cpus();
454 play_dead = idle_play_dead;
455
456 return ret;
457 }
458 #endif
459
460 #endif
461
462 /*
463 * Power management
464 */
465 #ifdef CONFIG_PM
466
loongson_ipi_suspend(void)467 static int loongson_ipi_suspend(void)
468 {
469 return 0;
470 }
471
loongson_ipi_resume(void)472 static void loongson_ipi_resume(void)
473 {
474 iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_EN);
475 }
476
477 static struct syscore_ops loongson_ipi_syscore_ops = {
478 .resume = loongson_ipi_resume,
479 .suspend = loongson_ipi_suspend,
480 };
481
482 /*
483 * Enable boot cpu ipi before enabling nonboot cpus
484 * during syscore_resume.
485 */
ipi_pm_init(void)486 static int __init ipi_pm_init(void)
487 {
488 register_syscore_ops(&loongson_ipi_syscore_ops);
489 return 0;
490 }
491
492 core_initcall(ipi_pm_init);
493 #endif
494
495 /* Preload SMP state for boot cpu */
smp_prepare_boot_cpu(void)496 void smp_prepare_boot_cpu(void)
497 {
498 unsigned int cpu, node, rr_node;
499
500 set_cpu_possible(0, true);
501 set_cpu_online(0, true);
502 set_my_cpu_offset(per_cpu_offset(0));
503 numa_add_cpu(0);
504
505 rr_node = first_node(node_online_map);
506 for_each_possible_cpu(cpu) {
507 node = early_cpu_to_node(cpu);
508
509 /*
510 * The mapping between present cpus and nodes has been
511 * built during MADT and SRAT parsing.
512 *
513 * If possible cpus = present cpus here, early_cpu_to_node
514 * will return valid node.
515 *
516 * If possible cpus > present cpus here (e.g. some possible
517 * cpus will be added by cpu-hotplug later), for possible but
518 * not present cpus, early_cpu_to_node will return NUMA_NO_NODE,
519 * and we just map them to online nodes in round-robin way.
520 * Once hotplugged, new correct mapping will be built for them.
521 */
522 if (node != NUMA_NO_NODE)
523 set_cpu_numa_node(cpu, node);
524 else {
525 set_cpu_numa_node(cpu, rr_node);
526 rr_node = next_node_in(rr_node, node_online_map);
527 }
528 }
529 }
530
531 /* called from main before smp_init() */
smp_prepare_cpus(unsigned int max_cpus)532 void __init smp_prepare_cpus(unsigned int max_cpus)
533 {
534 init_new_context(current, &init_mm);
535 current_thread_info()->cpu = 0;
536 loongson_prepare_cpus(max_cpus);
537 set_cpu_sibling_map(0);
538 set_cpu_core_map(0);
539 calculate_cpu_foreign_map();
540 #ifndef CONFIG_HOTPLUG_CPU
541 init_cpu_present(cpu_possible_mask);
542 #endif
543 }
544
__cpu_up(unsigned int cpu,struct task_struct * tidle)545 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
546 {
547 loongson_boot_secondary(cpu, tidle);
548
549 /* Wait for CPU to start and be ready to sync counters */
550 if (!wait_for_completion_timeout(&cpu_starting,
551 msecs_to_jiffies(5000))) {
552 pr_crit("CPU%u: failed to start\n", cpu);
553 return -EIO;
554 }
555
556 /* Wait for CPU to finish startup & mark itself online before return */
557 wait_for_completion(&cpu_running);
558
559 return 0;
560 }
561
562 /*
563 * First C code run on the secondary CPUs after being started up by
564 * the master.
565 */
start_secondary(void)566 asmlinkage void start_secondary(void)
567 {
568 unsigned int cpu;
569
570 sync_counter();
571 cpu = raw_smp_processor_id();
572 set_my_cpu_offset(per_cpu_offset(cpu));
573
574 cpu_probe();
575 constant_clockevent_init();
576 loongson_init_secondary();
577
578 set_cpu_sibling_map(cpu);
579 set_cpu_core_map(cpu);
580
581 notify_cpu_starting(cpu);
582
583 /* Notify boot CPU that we're starting */
584 complete(&cpu_starting);
585
586 /* The CPU is running, now mark it online */
587 set_cpu_online(cpu, true);
588
589 calculate_cpu_foreign_map();
590
591 /*
592 * Notify boot CPU that we're up & online and it can safely return
593 * from __cpu_up()
594 */
595 complete(&cpu_running);
596
597 /*
598 * irq will be enabled in loongson_smp_finish(), enabling it too
599 * early is dangerous.
600 */
601 WARN_ON_ONCE(!irqs_disabled());
602 loongson_smp_finish();
603
604 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
605 }
606
smp_cpus_done(unsigned int max_cpus)607 void __init smp_cpus_done(unsigned int max_cpus)
608 {
609 }
610
stop_this_cpu(void * dummy)611 static void stop_this_cpu(void *dummy)
612 {
613 set_cpu_online(smp_processor_id(), false);
614 calculate_cpu_foreign_map();
615 local_irq_disable();
616 while (true);
617 }
618
smp_send_stop(void)619 void smp_send_stop(void)
620 {
621 smp_call_function(stop_this_cpu, NULL, 0);
622 }
623
624 #ifdef CONFIG_PROFILING
setup_profiling_timer(unsigned int multiplier)625 int setup_profiling_timer(unsigned int multiplier)
626 {
627 return 0;
628 }
629 #endif
630
flush_tlb_all_ipi(void * info)631 static void flush_tlb_all_ipi(void *info)
632 {
633 local_flush_tlb_all();
634 }
635
flush_tlb_all(void)636 void flush_tlb_all(void)
637 {
638 on_each_cpu(flush_tlb_all_ipi, NULL, 1);
639 }
640
flush_tlb_mm_ipi(void * mm)641 static void flush_tlb_mm_ipi(void *mm)
642 {
643 local_flush_tlb_mm((struct mm_struct *)mm);
644 }
645
flush_tlb_mm(struct mm_struct * mm)646 void flush_tlb_mm(struct mm_struct *mm)
647 {
648 if (atomic_read(&mm->mm_users) == 0)
649 return; /* happens as a result of exit_mmap() */
650
651 preempt_disable();
652
653 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
654 on_each_cpu_mask(mm_cpumask(mm), flush_tlb_mm_ipi, mm, 1);
655 } else {
656 unsigned int cpu;
657
658 for_each_online_cpu(cpu) {
659 if (cpu != smp_processor_id() && cpu_context(cpu, mm))
660 cpu_context(cpu, mm) = 0;
661 }
662 local_flush_tlb_mm(mm);
663 }
664
665 preempt_enable();
666 }
667
668 struct flush_tlb_data {
669 struct vm_area_struct *vma;
670 unsigned long addr1;
671 unsigned long addr2;
672 };
673
flush_tlb_range_ipi(void * info)674 static void flush_tlb_range_ipi(void *info)
675 {
676 struct flush_tlb_data *fd = info;
677
678 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
679 }
680
flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)681 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
682 {
683 struct mm_struct *mm = vma->vm_mm;
684
685 preempt_disable();
686 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
687 struct flush_tlb_data fd = {
688 .vma = vma,
689 .addr1 = start,
690 .addr2 = end,
691 };
692
693 on_each_cpu_mask(mm_cpumask(mm), flush_tlb_range_ipi, &fd, 1);
694 } else {
695 unsigned int cpu;
696
697 for_each_online_cpu(cpu) {
698 if (cpu != smp_processor_id() && cpu_context(cpu, mm))
699 cpu_context(cpu, mm) = 0;
700 }
701 local_flush_tlb_range(vma, start, end);
702 }
703 preempt_enable();
704 }
705
flush_tlb_kernel_range_ipi(void * info)706 static void flush_tlb_kernel_range_ipi(void *info)
707 {
708 struct flush_tlb_data *fd = info;
709
710 local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
711 }
712
flush_tlb_kernel_range(unsigned long start,unsigned long end)713 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
714 {
715 struct flush_tlb_data fd = {
716 .addr1 = start,
717 .addr2 = end,
718 };
719
720 on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
721 }
722
flush_tlb_page_ipi(void * info)723 static void flush_tlb_page_ipi(void *info)
724 {
725 struct flush_tlb_data *fd = info;
726
727 local_flush_tlb_page(fd->vma, fd->addr1);
728 }
729
flush_tlb_page(struct vm_area_struct * vma,unsigned long page)730 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
731 {
732 preempt_disable();
733 if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
734 struct flush_tlb_data fd = {
735 .vma = vma,
736 .addr1 = page,
737 };
738
739 on_each_cpu_mask(mm_cpumask(vma->vm_mm), flush_tlb_page_ipi, &fd, 1);
740 } else {
741 unsigned int cpu;
742
743 for_each_online_cpu(cpu) {
744 if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
745 cpu_context(cpu, vma->vm_mm) = 0;
746 }
747 local_flush_tlb_page(vma, page);
748 }
749 preempt_enable();
750 }
751 EXPORT_SYMBOL(flush_tlb_page);
752
flush_tlb_one_ipi(void * info)753 static void flush_tlb_one_ipi(void *info)
754 {
755 unsigned long vaddr = (unsigned long) info;
756
757 local_flush_tlb_one(vaddr);
758 }
759
flush_tlb_one(unsigned long vaddr)760 void flush_tlb_one(unsigned long vaddr)
761 {
762 on_each_cpu(flush_tlb_one_ipi, (void *)vaddr, 1);
763 }
764 EXPORT_SYMBOL(flush_tlb_one);
765