15933f6d2SKuninori Morimoto // SPDX-License-Identifier: GPL-2.0
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds * arch/sh/kernel/smp.c
41da177e4SLinus Torvalds *
51da177e4SLinus Torvalds * SMP support for the SuperH processors.
61da177e4SLinus Torvalds *
73366e358SPaul Mundt * Copyright (C) 2002 - 2010 Paul Mundt
8aba1030aSPaul Mundt * Copyright (C) 2006 - 2007 Akio Idehara
91da177e4SLinus Torvalds */
1066c5227eSEvgeniy Polyakov #include <linux/err.h>
111da177e4SLinus Torvalds #include <linux/cache.h>
121da177e4SLinus Torvalds #include <linux/cpumask.h>
131da177e4SLinus Torvalds #include <linux/delay.h>
141da177e4SLinus Torvalds #include <linux/init.h>
151da177e4SLinus Torvalds #include <linux/spinlock.h>
16aba1030aSPaul Mundt #include <linux/mm.h>
171da177e4SLinus Torvalds #include <linux/module.h>
18b56050aeSPaul Mundt #include <linux/cpu.h>
19aba1030aSPaul Mundt #include <linux/interrupt.h>
2068e21be2SIngo Molnar #include <linux/sched/mm.h>
21ef8bd77fSIngo Molnar #include <linux/sched/hotplug.h>
2260063497SArun Sharma #include <linux/atomic.h>
2345624ac3SRich Felker #include <linux/clockchips.h>
241da177e4SLinus Torvalds #include <asm/processor.h>
251da177e4SLinus Torvalds #include <asm/mmu_context.h>
261da177e4SLinus Torvalds #include <asm/smp.h>
27aba1030aSPaul Mundt #include <asm/cacheflush.h>
28aba1030aSPaul Mundt #include <asm/sections.h>
29f03c4866SPaul Mundt #include <asm/setup.h>
301da177e4SLinus Torvalds
31aba1030aSPaul Mundt int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
32aba1030aSPaul Mundt int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
331da177e4SLinus Torvalds
343366e358SPaul Mundt struct plat_smp_ops *mp_ops = NULL;
353366e358SPaul Mundt
369715b8c7SPaul Mundt /* State of each CPU */
379715b8c7SPaul Mundt DEFINE_PER_CPU(int, cpu_state) = { 0 };
389715b8c7SPaul Mundt
register_smp_ops(struct plat_smp_ops * ops)394603f53aSPaul Gortmaker void register_smp_ops(struct plat_smp_ops *ops)
403366e358SPaul Mundt {
413366e358SPaul Mundt if (mp_ops)
423366e358SPaul Mundt printk(KERN_WARNING "Overriding previously set SMP ops\n");
433366e358SPaul Mundt
443366e358SPaul Mundt mp_ops = ops;
453366e358SPaul Mundt }
463366e358SPaul Mundt
smp_store_cpu_info(unsigned int cpu)474603f53aSPaul Gortmaker static inline void smp_store_cpu_info(unsigned int cpu)
481da177e4SLinus Torvalds {
49aba1030aSPaul Mundt struct sh_cpuinfo *c = cpu_data + cpu;
50aba1030aSPaul Mundt
51a66c2edeSPaul Mundt memcpy(c, &boot_cpu_data, sizeof(struct sh_cpuinfo));
52a66c2edeSPaul Mundt
53aba1030aSPaul Mundt c->loops_per_jiffy = loops_per_jiffy;
541da177e4SLinus Torvalds }
551da177e4SLinus Torvalds
smp_prepare_cpus(unsigned int max_cpus)561da177e4SLinus Torvalds void __init smp_prepare_cpus(unsigned int max_cpus)
571da177e4SLinus Torvalds {
581da177e4SLinus Torvalds unsigned int cpu = smp_processor_id();
591da177e4SLinus Torvalds
60aba1030aSPaul Mundt init_new_context(current, &init_mm);
61aba1030aSPaul Mundt current_thread_info()->cpu = cpu;
623366e358SPaul Mundt mp_ops->prepare_cpus(max_cpus);
631da177e4SLinus Torvalds
64aba1030aSPaul Mundt #ifndef CONFIG_HOTPLUG_CPU
65004f4ce9SRusty Russell init_cpu_present(cpu_possible_mask);
66aba1030aSPaul Mundt #endif
671da177e4SLinus Torvalds }
681da177e4SLinus Torvalds
smp_prepare_boot_cpu(void)691cfa1e8fSPaul Mundt void __init smp_prepare_boot_cpu(void)
701da177e4SLinus Torvalds {
711da177e4SLinus Torvalds unsigned int cpu = smp_processor_id();
721da177e4SLinus Torvalds
73aba1030aSPaul Mundt __cpu_number_map[0] = cpu;
74aba1030aSPaul Mundt __cpu_logical_map[0] = cpu;
75aba1030aSPaul Mundt
76e09377baSRusty Russell set_cpu_online(cpu, true);
77e09377baSRusty Russell set_cpu_possible(cpu, true);
789715b8c7SPaul Mundt
799715b8c7SPaul Mundt per_cpu(cpu_state, cpu) = CPU_ONLINE;
801da177e4SLinus Torvalds }
811da177e4SLinus Torvalds
82763142d1SPaul Mundt #ifdef CONFIG_HOTPLUG_CPU
native_cpu_die(unsigned int cpu)83763142d1SPaul Mundt void native_cpu_die(unsigned int cpu)
84763142d1SPaul Mundt {
85763142d1SPaul Mundt unsigned int i;
86763142d1SPaul Mundt
87763142d1SPaul Mundt for (i = 0; i < 10; i++) {
88763142d1SPaul Mundt smp_rmb();
89763142d1SPaul Mundt if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
90763142d1SPaul Mundt if (system_state == SYSTEM_RUNNING)
91763142d1SPaul Mundt pr_info("CPU %u is now offline\n", cpu);
92763142d1SPaul Mundt
93763142d1SPaul Mundt return;
94763142d1SPaul Mundt }
95763142d1SPaul Mundt
96763142d1SPaul Mundt msleep(100);
97763142d1SPaul Mundt }
98763142d1SPaul Mundt
99763142d1SPaul Mundt pr_err("CPU %u didn't die...\n", cpu);
100763142d1SPaul Mundt }
101763142d1SPaul Mundt
native_cpu_disable(unsigned int cpu)102763142d1SPaul Mundt int native_cpu_disable(unsigned int cpu)
103763142d1SPaul Mundt {
104763142d1SPaul Mundt return cpu == 0 ? -EPERM : 0;
105763142d1SPaul Mundt }
106763142d1SPaul Mundt
play_dead_common(void)107763142d1SPaul Mundt void play_dead_common(void)
108763142d1SPaul Mundt {
109763142d1SPaul Mundt idle_task_exit();
110763142d1SPaul Mundt irq_ctx_exit(raw_smp_processor_id());
111763142d1SPaul Mundt mb();
112763142d1SPaul Mundt
113c473b2c6SChristoph Lameter __this_cpu_write(cpu_state, CPU_DEAD);
114763142d1SPaul Mundt local_irq_disable();
115763142d1SPaul Mundt }
116763142d1SPaul Mundt
native_play_dead(void)117763142d1SPaul Mundt void native_play_dead(void)
118763142d1SPaul Mundt {
119763142d1SPaul Mundt play_dead_common();
120763142d1SPaul Mundt }
121763142d1SPaul Mundt
__cpu_disable(void)122763142d1SPaul Mundt int __cpu_disable(void)
123763142d1SPaul Mundt {
124763142d1SPaul Mundt unsigned int cpu = smp_processor_id();
125763142d1SPaul Mundt int ret;
126763142d1SPaul Mundt
127763142d1SPaul Mundt ret = mp_ops->cpu_disable(cpu);
128763142d1SPaul Mundt if (ret)
129763142d1SPaul Mundt return ret;
130763142d1SPaul Mundt
131763142d1SPaul Mundt /*
132763142d1SPaul Mundt * Take this CPU offline. Once we clear this, we can't return,
133763142d1SPaul Mundt * and we must not schedule until we're ready to give up the cpu.
134763142d1SPaul Mundt */
135763142d1SPaul Mundt set_cpu_online(cpu, false);
136763142d1SPaul Mundt
137763142d1SPaul Mundt /*
138763142d1SPaul Mundt * OK - migrate IRQs away from this CPU
139763142d1SPaul Mundt */
140763142d1SPaul Mundt migrate_irqs();
141763142d1SPaul Mundt
142763142d1SPaul Mundt /*
143763142d1SPaul Mundt * Flush user cache and TLB mappings, and then remove this CPU
144763142d1SPaul Mundt * from the vm mask set of all processes.
145763142d1SPaul Mundt */
146763142d1SPaul Mundt flush_cache_all();
1475f2cb34dSRich Felker #ifdef CONFIG_MMU
148763142d1SPaul Mundt local_flush_tlb_all();
1495f2cb34dSRich Felker #endif
150763142d1SPaul Mundt
1511198c8b9SAnton Vorontsov clear_tasks_mm_cpumask(cpu);
152763142d1SPaul Mundt
153763142d1SPaul Mundt return 0;
154763142d1SPaul Mundt }
155763142d1SPaul Mundt #else /* ... !CONFIG_HOTPLUG_CPU */
native_cpu_disable(unsigned int cpu)1561483feacSMatt Fleming int native_cpu_disable(unsigned int cpu)
157763142d1SPaul Mundt {
158763142d1SPaul Mundt return -ENOSYS;
159763142d1SPaul Mundt }
160763142d1SPaul Mundt
native_cpu_die(unsigned int cpu)161763142d1SPaul Mundt void native_cpu_die(unsigned int cpu)
162763142d1SPaul Mundt {
163763142d1SPaul Mundt /* We said "no" in __cpu_disable */
164763142d1SPaul Mundt BUG();
165763142d1SPaul Mundt }
166763142d1SPaul Mundt
native_play_dead(void)167763142d1SPaul Mundt void native_play_dead(void)
168763142d1SPaul Mundt {
169763142d1SPaul Mundt BUG();
170763142d1SPaul Mundt }
171763142d1SPaul Mundt #endif
172763142d1SPaul Mundt
start_secondary(void)1734603f53aSPaul Gortmaker asmlinkage void start_secondary(void)
1741da177e4SLinus Torvalds {
1759715b8c7SPaul Mundt unsigned int cpu = smp_processor_id();
176aba1030aSPaul Mundt struct mm_struct *mm = &init_mm;
1771da177e4SLinus Torvalds
1784bea3418SMatt Fleming enable_mmu();
179f1f10076SVegard Nossum mmgrab(mm);
1803fce371bSVegard Nossum mmget(mm);
181aba1030aSPaul Mundt current->active_mm = mm;
1825f2cb34dSRich Felker #ifdef CONFIG_MMU
183aba1030aSPaul Mundt enter_lazy_tlb(mm, current);
184763142d1SPaul Mundt local_flush_tlb_all();
1855f2cb34dSRich Felker #endif
1861da177e4SLinus Torvalds
187aba1030aSPaul Mundt per_cpu_trap_init();
1881da177e4SLinus Torvalds
1899715b8c7SPaul Mundt notify_cpu_starting(cpu);
190e545a614SManfred Spraul
191aba1030aSPaul Mundt local_irq_enable();
192aba1030aSPaul Mundt
193aba1030aSPaul Mundt calibrate_delay();
194aba1030aSPaul Mundt
195aba1030aSPaul Mundt smp_store_cpu_info(cpu);
1961da177e4SLinus Torvalds
197f0ccf277SPaul Mundt set_cpu_online(cpu, true);
1989715b8c7SPaul Mundt per_cpu(cpu_state, cpu) = CPU_ONLINE;
1991da177e4SLinus Torvalds
200fc6d73d6SThomas Gleixner cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
2011da177e4SLinus Torvalds }
2021da177e4SLinus Torvalds
203aba1030aSPaul Mundt extern struct {
204aba1030aSPaul Mundt unsigned long sp;
205aba1030aSPaul Mundt unsigned long bss_start;
206aba1030aSPaul Mundt unsigned long bss_end;
207aba1030aSPaul Mundt void *start_kernel_fn;
208aba1030aSPaul Mundt void *cpu_init_fn;
209aba1030aSPaul Mundt void *thread_info;
210aba1030aSPaul Mundt } stack_start;
211aba1030aSPaul Mundt
__cpu_up(unsigned int cpu,struct task_struct * tsk)2124603f53aSPaul Gortmaker int __cpu_up(unsigned int cpu, struct task_struct *tsk)
2131da177e4SLinus Torvalds {
214aba1030aSPaul Mundt unsigned long timeout;
2155bfb5d69SNick Piggin
2169715b8c7SPaul Mundt per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
2179715b8c7SPaul Mundt
218aba1030aSPaul Mundt /* Fill in data in head.S for secondary cpus */
219aba1030aSPaul Mundt stack_start.sp = tsk->thread.sp;
220aba1030aSPaul Mundt stack_start.thread_info = tsk->stack;
221aba1030aSPaul Mundt stack_start.bss_start = 0; /* don't clear bss for secondary cpus */
222aba1030aSPaul Mundt stack_start.start_kernel_fn = start_secondary;
2231da177e4SLinus Torvalds
224d780613aSPaul Mundt flush_icache_range((unsigned long)&stack_start,
225d780613aSPaul Mundt (unsigned long)&stack_start + sizeof(stack_start));
226d780613aSPaul Mundt wmb();
2271da177e4SLinus Torvalds
2283366e358SPaul Mundt mp_ops->start_cpu(cpu, (unsigned long)_stext);
2291da177e4SLinus Torvalds
230aba1030aSPaul Mundt timeout = jiffies + HZ;
231aba1030aSPaul Mundt while (time_before(jiffies, timeout)) {
232aba1030aSPaul Mundt if (cpu_online(cpu))
233aba1030aSPaul Mundt break;
2341da177e4SLinus Torvalds
235aba1030aSPaul Mundt udelay(10);
236763142d1SPaul Mundt barrier();
237aba1030aSPaul Mundt }
238aba1030aSPaul Mundt
239aba1030aSPaul Mundt if (cpu_online(cpu))
2401da177e4SLinus Torvalds return 0;
241aba1030aSPaul Mundt
242aba1030aSPaul Mundt return -ENOENT;
2431da177e4SLinus Torvalds }
2441da177e4SLinus Torvalds
smp_cpus_done(unsigned int max_cpus)2451da177e4SLinus Torvalds void __init smp_cpus_done(unsigned int max_cpus)
2461da177e4SLinus Torvalds {
247aba1030aSPaul Mundt unsigned long bogosum = 0;
248aba1030aSPaul Mundt int cpu;
249aba1030aSPaul Mundt
250aba1030aSPaul Mundt for_each_online_cpu(cpu)
251aba1030aSPaul Mundt bogosum += cpu_data[cpu].loops_per_jiffy;
252aba1030aSPaul Mundt
253aba1030aSPaul Mundt printk(KERN_INFO "SMP: Total of %d processors activated "
254aba1030aSPaul Mundt "(%lu.%02lu BogoMIPS).\n", num_online_cpus(),
255aba1030aSPaul Mundt bogosum / (500000/HZ),
256aba1030aSPaul Mundt (bogosum / (5000/HZ)) % 100);
2571da177e4SLinus Torvalds }
2581da177e4SLinus Torvalds
arch_smp_send_reschedule(int cpu)259*4c8c3c7fSValentin Schneider void arch_smp_send_reschedule(int cpu)
2601da177e4SLinus Torvalds {
2613366e358SPaul Mundt mp_ops->send_ipi(cpu, SMP_MSG_RESCHEDULE);
2621da177e4SLinus Torvalds }
2631da177e4SLinus Torvalds
smp_send_stop(void)2641da177e4SLinus Torvalds void smp_send_stop(void)
2651da177e4SLinus Torvalds {
2668691e5a8SJens Axboe smp_call_function(stop_this_cpu, 0, 0);
2671da177e4SLinus Torvalds }
2681da177e4SLinus Torvalds
arch_send_call_function_ipi_mask(const struct cpumask * mask)269819807dfSRusty Russell void arch_send_call_function_ipi_mask(const struct cpumask *mask)
2701da177e4SLinus Torvalds {
271490f5de5SJens Axboe int cpu;
2721da177e4SLinus Torvalds
273819807dfSRusty Russell for_each_cpu(cpu, mask)
2743366e358SPaul Mundt mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION);
275490f5de5SJens Axboe }
2761da177e4SLinus Torvalds
arch_send_call_function_single_ipi(int cpu)277490f5de5SJens Axboe void arch_send_call_function_single_ipi(int cpu)
278490f5de5SJens Axboe {
2793366e358SPaul Mundt mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);
2801da177e4SLinus Torvalds }
2811da177e4SLinus Torvalds
28245624ac3SRich Felker #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
tick_broadcast(const struct cpumask * mask)28345624ac3SRich Felker void tick_broadcast(const struct cpumask *mask)
2846f52707eSPaul Mundt {
2856f52707eSPaul Mundt int cpu;
2866f52707eSPaul Mundt
287320ab2b0SRusty Russell for_each_cpu(cpu, mask)
2883366e358SPaul Mundt mp_ops->send_ipi(cpu, SMP_MSG_TIMER);
2896f52707eSPaul Mundt }
2906f52707eSPaul Mundt
ipi_timer(void)2916f52707eSPaul Mundt static void ipi_timer(void)
2926f52707eSPaul Mundt {
2936f52707eSPaul Mundt irq_enter();
29445624ac3SRich Felker tick_receive_broadcast();
2956f52707eSPaul Mundt irq_exit();
2966f52707eSPaul Mundt }
29745624ac3SRich Felker #endif
2986f52707eSPaul Mundt
smp_message_recv(unsigned int msg)299173a44ddSPaul Mundt void smp_message_recv(unsigned int msg)
300173a44ddSPaul Mundt {
301173a44ddSPaul Mundt switch (msg) {
302173a44ddSPaul Mundt case SMP_MSG_FUNCTION:
303173a44ddSPaul Mundt generic_smp_call_function_interrupt();
304173a44ddSPaul Mundt break;
305173a44ddSPaul Mundt case SMP_MSG_RESCHEDULE:
306184748ccSPeter Zijlstra scheduler_ipi();
307173a44ddSPaul Mundt break;
308173a44ddSPaul Mundt case SMP_MSG_FUNCTION_SINGLE:
309173a44ddSPaul Mundt generic_smp_call_function_single_interrupt();
310173a44ddSPaul Mundt break;
31145624ac3SRich Felker #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
3126f52707eSPaul Mundt case SMP_MSG_TIMER:
3136f52707eSPaul Mundt ipi_timer();
3146f52707eSPaul Mundt break;
31545624ac3SRich Felker #endif
316173a44ddSPaul Mundt default:
317173a44ddSPaul Mundt printk(KERN_WARNING "SMP %d: %s(): unknown IPI %d\n",
318173a44ddSPaul Mundt smp_processor_id(), __func__, msg);
319173a44ddSPaul Mundt break;
320173a44ddSPaul Mundt }
321173a44ddSPaul Mundt }
322173a44ddSPaul Mundt
3231da177e4SLinus Torvalds /* Not really SMP stuff ... */
setup_profiling_timer(unsigned int multiplier)3241da177e4SLinus Torvalds int setup_profiling_timer(unsigned int multiplier)
3251da177e4SLinus Torvalds {
3261da177e4SLinus Torvalds return 0;
3271da177e4SLinus Torvalds }
3281da177e4SLinus Torvalds
3295f2cb34dSRich Felker #ifdef CONFIG_MMU
3305f2cb34dSRich Felker
flush_tlb_all_ipi(void * info)3319964fa8bSPaul Mundt static void flush_tlb_all_ipi(void *info)
3329964fa8bSPaul Mundt {
3339964fa8bSPaul Mundt local_flush_tlb_all();
3349964fa8bSPaul Mundt }
3359964fa8bSPaul Mundt
flush_tlb_all(void)3369964fa8bSPaul Mundt void flush_tlb_all(void)
3379964fa8bSPaul Mundt {
33815c8b6c1SJens Axboe on_each_cpu(flush_tlb_all_ipi, 0, 1);
3399964fa8bSPaul Mundt }
3409964fa8bSPaul Mundt
flush_tlb_mm_ipi(void * mm)3419964fa8bSPaul Mundt static void flush_tlb_mm_ipi(void *mm)
3429964fa8bSPaul Mundt {
3439964fa8bSPaul Mundt local_flush_tlb_mm((struct mm_struct *)mm);
3449964fa8bSPaul Mundt }
3459964fa8bSPaul Mundt
3469964fa8bSPaul Mundt /*
3479964fa8bSPaul Mundt * The following tlb flush calls are invoked when old translations are
3489964fa8bSPaul Mundt * being torn down, or pte attributes are changing. For single threaded
3499964fa8bSPaul Mundt * address spaces, a new context is obtained on the current cpu, and tlb
3509964fa8bSPaul Mundt * context on other cpus are invalidated to force a new context allocation
3519964fa8bSPaul Mundt * at switch_mm time, should the mm ever be used on other cpus. For
3529964fa8bSPaul Mundt * multithreaded address spaces, intercpu interrupts have to be sent.
3539964fa8bSPaul Mundt * Another case where intercpu interrupts are required is when the target
3549964fa8bSPaul Mundt * mm might be active on another cpu (eg debuggers doing the flushes on
3559964fa8bSPaul Mundt * behalf of debugees, kswapd stealing pages from another process etc).
3569964fa8bSPaul Mundt * Kanoj 07/00.
3579964fa8bSPaul Mundt */
flush_tlb_mm(struct mm_struct * mm)3589964fa8bSPaul Mundt void flush_tlb_mm(struct mm_struct *mm)
3599964fa8bSPaul Mundt {
3609964fa8bSPaul Mundt preempt_disable();
3619964fa8bSPaul Mundt
3629964fa8bSPaul Mundt if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
3638691e5a8SJens Axboe smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1);
3649964fa8bSPaul Mundt } else {
3659964fa8bSPaul Mundt int i;
366c8ed0010SRusty Russell for_each_online_cpu(i)
3679964fa8bSPaul Mundt if (smp_processor_id() != i)
3689964fa8bSPaul Mundt cpu_context(i, mm) = 0;
3699964fa8bSPaul Mundt }
3709964fa8bSPaul Mundt local_flush_tlb_mm(mm);
3719964fa8bSPaul Mundt
3729964fa8bSPaul Mundt preempt_enable();
3739964fa8bSPaul Mundt }
3749964fa8bSPaul Mundt
3759964fa8bSPaul Mundt struct flush_tlb_data {
3769964fa8bSPaul Mundt struct vm_area_struct *vma;
3779964fa8bSPaul Mundt unsigned long addr1;
3789964fa8bSPaul Mundt unsigned long addr2;
3799964fa8bSPaul Mundt };
3809964fa8bSPaul Mundt
flush_tlb_range_ipi(void * info)3819964fa8bSPaul Mundt static void flush_tlb_range_ipi(void *info)
3829964fa8bSPaul Mundt {
3839964fa8bSPaul Mundt struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
3849964fa8bSPaul Mundt
3859964fa8bSPaul Mundt local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
3869964fa8bSPaul Mundt }
3879964fa8bSPaul Mundt
flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)3889964fa8bSPaul Mundt void flush_tlb_range(struct vm_area_struct *vma,
3899964fa8bSPaul Mundt unsigned long start, unsigned long end)
3909964fa8bSPaul Mundt {
3919964fa8bSPaul Mundt struct mm_struct *mm = vma->vm_mm;
3929964fa8bSPaul Mundt
3939964fa8bSPaul Mundt preempt_disable();
3949964fa8bSPaul Mundt if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
3959964fa8bSPaul Mundt struct flush_tlb_data fd;
3969964fa8bSPaul Mundt
3979964fa8bSPaul Mundt fd.vma = vma;
3989964fa8bSPaul Mundt fd.addr1 = start;
3999964fa8bSPaul Mundt fd.addr2 = end;
4008691e5a8SJens Axboe smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1);
4019964fa8bSPaul Mundt } else {
4029964fa8bSPaul Mundt int i;
403c8ed0010SRusty Russell for_each_online_cpu(i)
4049964fa8bSPaul Mundt if (smp_processor_id() != i)
4059964fa8bSPaul Mundt cpu_context(i, mm) = 0;
4069964fa8bSPaul Mundt }
4079964fa8bSPaul Mundt local_flush_tlb_range(vma, start, end);
4089964fa8bSPaul Mundt preempt_enable();
4099964fa8bSPaul Mundt }
4109964fa8bSPaul Mundt
flush_tlb_kernel_range_ipi(void * info)4119964fa8bSPaul Mundt static void flush_tlb_kernel_range_ipi(void *info)
4129964fa8bSPaul Mundt {
4139964fa8bSPaul Mundt struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
4149964fa8bSPaul Mundt
4159964fa8bSPaul Mundt local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
4169964fa8bSPaul Mundt }
4179964fa8bSPaul Mundt
flush_tlb_kernel_range(unsigned long start,unsigned long end)4189964fa8bSPaul Mundt void flush_tlb_kernel_range(unsigned long start, unsigned long end)
4199964fa8bSPaul Mundt {
4209964fa8bSPaul Mundt struct flush_tlb_data fd;
4219964fa8bSPaul Mundt
4229964fa8bSPaul Mundt fd.addr1 = start;
4239964fa8bSPaul Mundt fd.addr2 = end;
42415c8b6c1SJens Axboe on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1);
4259964fa8bSPaul Mundt }
4269964fa8bSPaul Mundt
flush_tlb_page_ipi(void * info)4279964fa8bSPaul Mundt static void flush_tlb_page_ipi(void *info)
4289964fa8bSPaul Mundt {
4299964fa8bSPaul Mundt struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
4309964fa8bSPaul Mundt
4319964fa8bSPaul Mundt local_flush_tlb_page(fd->vma, fd->addr1);
4329964fa8bSPaul Mundt }
4339964fa8bSPaul Mundt
flush_tlb_page(struct vm_area_struct * vma,unsigned long page)4349964fa8bSPaul Mundt void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
4359964fa8bSPaul Mundt {
4369964fa8bSPaul Mundt preempt_disable();
4379964fa8bSPaul Mundt if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
4389964fa8bSPaul Mundt (current->mm != vma->vm_mm)) {
4399964fa8bSPaul Mundt struct flush_tlb_data fd;
4409964fa8bSPaul Mundt
4419964fa8bSPaul Mundt fd.vma = vma;
4429964fa8bSPaul Mundt fd.addr1 = page;
4438691e5a8SJens Axboe smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1);
4449964fa8bSPaul Mundt } else {
4459964fa8bSPaul Mundt int i;
446c8ed0010SRusty Russell for_each_online_cpu(i)
4479964fa8bSPaul Mundt if (smp_processor_id() != i)
4489964fa8bSPaul Mundt cpu_context(i, vma->vm_mm) = 0;
4499964fa8bSPaul Mundt }
4509964fa8bSPaul Mundt local_flush_tlb_page(vma, page);
4519964fa8bSPaul Mundt preempt_enable();
4529964fa8bSPaul Mundt }
4539964fa8bSPaul Mundt
flush_tlb_one_ipi(void * info)4549964fa8bSPaul Mundt static void flush_tlb_one_ipi(void *info)
4559964fa8bSPaul Mundt {
4569964fa8bSPaul Mundt struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
4579964fa8bSPaul Mundt local_flush_tlb_one(fd->addr1, fd->addr2);
4589964fa8bSPaul Mundt }
4599964fa8bSPaul Mundt
flush_tlb_one(unsigned long asid,unsigned long vaddr)4609964fa8bSPaul Mundt void flush_tlb_one(unsigned long asid, unsigned long vaddr)
4619964fa8bSPaul Mundt {
4629964fa8bSPaul Mundt struct flush_tlb_data fd;
4639964fa8bSPaul Mundt
4649964fa8bSPaul Mundt fd.addr1 = asid;
4659964fa8bSPaul Mundt fd.addr2 = vaddr;
4669964fa8bSPaul Mundt
4678691e5a8SJens Axboe smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1);
4689964fa8bSPaul Mundt local_flush_tlb_one(asid, vaddr);
4699964fa8bSPaul Mundt }
4705f2cb34dSRich Felker
4715f2cb34dSRich Felker #endif
472