1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
21da177e4SLinus Torvalds /*
38b646bd7SMartin Schwidefsky * SMP related functions
41da177e4SLinus Torvalds *
58b646bd7SMartin Schwidefsky * Copyright IBM Corp. 1999, 2012
68b646bd7SMartin Schwidefsky * Author(s): Denis Joseph Barrow,
78b646bd7SMartin Schwidefsky * Martin Schwidefsky <schwidefsky@de.ibm.com>,
81da177e4SLinus Torvalds *
91da177e4SLinus Torvalds * based on other smp stuff by
101da177e4SLinus Torvalds * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
111da177e4SLinus Torvalds * (c) 1998 Ingo Molnar
121da177e4SLinus Torvalds *
138b646bd7SMartin Schwidefsky * The code outside of smp.c uses logical cpu numbers, only smp.c does
148b646bd7SMartin Schwidefsky * the translation of logical to physical cpu ids. All new code that
158b646bd7SMartin Schwidefsky * operates on physical cpu numbers needs to go into smp.c.
161da177e4SLinus Torvalds */
171da177e4SLinus Torvalds
18395d31d4SMartin Schwidefsky #define KMSG_COMPONENT "cpu"
19395d31d4SMartin Schwidefsky #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
20395d31d4SMartin Schwidefsky
21f230886bSHeiko Carstens #include <linux/workqueue.h>
2257c8a661SMike Rapoport #include <linux/memblock.h>
233994a52bSPaul Gortmaker #include <linux/export.h>
241da177e4SLinus Torvalds #include <linux/init.h>
251da177e4SLinus Torvalds #include <linux/mm.h>
264e950f6fSAlexey Dobriyan #include <linux/err.h>
271da177e4SLinus Torvalds #include <linux/spinlock.h>
281da177e4SLinus Torvalds #include <linux/kernel_stat.h>
291da177e4SLinus Torvalds #include <linux/delay.h>
301da177e4SLinus Torvalds #include <linux/interrupt.h>
313324e60aSChristian Borntraeger #include <linux/irqflags.h>
3255f03123SIlya Leoshkevich #include <linux/irq_work.h>
331da177e4SLinus Torvalds #include <linux/cpu.h>
345a0e3ad6STejun Heo #include <linux/slab.h>
35ef8bd77fSIngo Molnar #include <linux/sched/hotplug.h>
3668db0cf1SIngo Molnar #include <linux/sched/task_stack.h>
3760a0c68dSMichael Holzheu #include <linux/crash_dump.h>
3800a8f886SMartin Schwidefsky #include <linux/kprobes.h>
39cbb870c8SHeiko Carstens #include <asm/asm-offsets.h>
40c28c07feSHeiko Carstens #include <asm/pfault.h>
411ec2772eSMartin Schwidefsky #include <asm/diag.h>
421e3cab2fSHeiko Carstens #include <asm/switch_to.h>
431e3cab2fSHeiko Carstens #include <asm/facility.h>
4446b05d26SMichael Holzheu #include <asm/ipl.h>
452b67fc46SHeiko Carstens #include <asm/setup.h>
461da177e4SLinus Torvalds #include <asm/irq.h>
471da177e4SLinus Torvalds #include <asm/tlbflush.h>
4827f6b416SMartin Schwidefsky #include <asm/vtimer.h>
494df29d2bSAlexander Gordeev #include <asm/abs_lowcore.h>
5008d07968SHeiko Carstens #include <asm/sclp.h>
513ab121abSMichael Holzheu #include <asm/debug.h>
524857d4bbSMichael Holzheu #include <asm/os_info.h>
53a9ae32c3SHeiko Carstens #include <asm/sigp.h>
54b5f87f15SMartin Schwidefsky #include <asm/idle.h>
55916cda1aSMartin Schwidefsky #include <asm/nmi.h>
5678c98f90SMartin Schwidefsky #include <asm/stacktrace.h>
5738389ec8SThomas Richter #include <asm/topology.h>
5880f06306SHeiko Carstens #include <asm/vdso.h>
59fba07cd4SAlexander Gordeev #include <asm/maccess.h>
60a806170eSHeiko Carstens #include "entry.h"
611da177e4SLinus Torvalds
628b646bd7SMartin Schwidefsky enum {
638b646bd7SMartin Schwidefsky ec_schedule = 0,
648b646bd7SMartin Schwidefsky ec_call_function_single,
658b646bd7SMartin Schwidefsky ec_stop_cpu,
6611886c19SSven Schnelle ec_mcck_pending,
6755f03123SIlya Leoshkevich ec_irq_work,
688b646bd7SMartin Schwidefsky };
6908d07968SHeiko Carstens
708b646bd7SMartin Schwidefsky enum {
7108d07968SHeiko Carstens CPU_STATE_STANDBY,
7208d07968SHeiko Carstens CPU_STATE_CONFIGURED,
7308d07968SHeiko Carstens };
7408d07968SHeiko Carstens
752f859d0dSHeiko Carstens static DEFINE_PER_CPU(struct cpu *, cpu_device);
762f859d0dSHeiko Carstens
778b646bd7SMartin Schwidefsky struct pcpu {
788b646bd7SMartin Schwidefsky unsigned long ec_mask; /* bit mask for ec_xxx functions */
793dbc78d3SHeiko Carstens unsigned long ec_clk; /* sigp timestamp for ec_xxx */
802f859d0dSHeiko Carstens signed char state; /* physical cpu state */
812f859d0dSHeiko Carstens signed char polarization; /* physical polarization */
828b646bd7SMartin Schwidefsky u16 address; /* physical cpu address */
838b646bd7SMartin Schwidefsky };
848b646bd7SMartin Schwidefsky
85d08d9430SMartin Schwidefsky static u8 boot_core_type;
868b646bd7SMartin Schwidefsky static struct pcpu pcpu_devices[NR_CPUS];
878b646bd7SMartin Schwidefsky
8810ad34bcSMartin Schwidefsky unsigned int smp_cpu_mt_shift;
8910ad34bcSMartin Schwidefsky EXPORT_SYMBOL(smp_cpu_mt_shift);
9010ad34bcSMartin Schwidefsky
9110ad34bcSMartin Schwidefsky unsigned int smp_cpu_mtid;
9210ad34bcSMartin Schwidefsky EXPORT_SYMBOL(smp_cpu_mtid);
9310ad34bcSMartin Schwidefsky
941a36a39eSMartin Schwidefsky #ifdef CONFIG_CRASH_DUMP
951a36a39eSMartin Schwidefsky __vector128 __initdata boot_cpu_vector_save_area[__NUM_VXRS];
961a36a39eSMartin Schwidefsky #endif
971a36a39eSMartin Schwidefsky
9810ad34bcSMartin Schwidefsky static unsigned int smp_max_threads __initdata = -1U;
99a052096bSSven Schnelle cpumask_t cpu_setup_mask;
10010ad34bcSMartin Schwidefsky
early_nosmt(char * s)10110ad34bcSMartin Schwidefsky static int __init early_nosmt(char *s)
10210ad34bcSMartin Schwidefsky {
10310ad34bcSMartin Schwidefsky smp_max_threads = 1;
10410ad34bcSMartin Schwidefsky return 0;
10510ad34bcSMartin Schwidefsky }
10610ad34bcSMartin Schwidefsky early_param("nosmt", early_nosmt);
10710ad34bcSMartin Schwidefsky
early_smt(char * s)10810ad34bcSMartin Schwidefsky static int __init early_smt(char *s)
10910ad34bcSMartin Schwidefsky {
11010ad34bcSMartin Schwidefsky get_option(&s, &smp_max_threads);
11110ad34bcSMartin Schwidefsky return 0;
11210ad34bcSMartin Schwidefsky }
11310ad34bcSMartin Schwidefsky early_param("smt", early_smt);
11410ad34bcSMartin Schwidefsky
11550ab9a9aSHeiko Carstens /*
11650ab9a9aSHeiko Carstens * The smp_cpu_state_mutex must be held when changing the state or polarization
117cada938aSHeiko Carstens * member of a pcpu data structure within the pcpu_devices array.
11850ab9a9aSHeiko Carstens */
119dbd70fb4SHeiko Carstens DEFINE_MUTEX(smp_cpu_state_mutex);
12008d07968SHeiko Carstens
1218b646bd7SMartin Schwidefsky /*
1228b646bd7SMartin Schwidefsky * Signal processor helper functions.
1238b646bd7SMartin Schwidefsky */
__pcpu_sigp_relax(u16 addr,u8 order,unsigned long parm)1241a36a39eSMartin Schwidefsky static inline int __pcpu_sigp_relax(u16 addr, u8 order, unsigned long parm)
1258b646bd7SMartin Schwidefsky {
1268b646bd7SMartin Schwidefsky int cc;
1278b646bd7SMartin Schwidefsky
1288b646bd7SMartin Schwidefsky while (1) {
129c5e3acd6SHeiko Carstens cc = __pcpu_sigp(addr, order, parm, NULL);
130a9ae32c3SHeiko Carstens if (cc != SIGP_CC_BUSY)
1318b646bd7SMartin Schwidefsky return cc;
1328b646bd7SMartin Schwidefsky cpu_relax();
1338b646bd7SMartin Schwidefsky }
1348b646bd7SMartin Schwidefsky }
1358b646bd7SMartin Schwidefsky
pcpu_sigp_retry(struct pcpu * pcpu,u8 order,u32 parm)1368b646bd7SMartin Schwidefsky static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm)
1378b646bd7SMartin Schwidefsky {
1388b646bd7SMartin Schwidefsky int cc, retry;
1398b646bd7SMartin Schwidefsky
1408b646bd7SMartin Schwidefsky for (retry = 0; ; retry++) {
141c5e3acd6SHeiko Carstens cc = __pcpu_sigp(pcpu->address, order, parm, NULL);
142a9ae32c3SHeiko Carstens if (cc != SIGP_CC_BUSY)
1438b646bd7SMartin Schwidefsky break;
1448b646bd7SMartin Schwidefsky if (retry >= 3)
1458b646bd7SMartin Schwidefsky udelay(10);
1468b646bd7SMartin Schwidefsky }
1478b646bd7SMartin Schwidefsky return cc;
1488b646bd7SMartin Schwidefsky }
1498b646bd7SMartin Schwidefsky
pcpu_stopped(struct pcpu * pcpu)1508b646bd7SMartin Schwidefsky static inline int pcpu_stopped(struct pcpu *pcpu)
1518b646bd7SMartin Schwidefsky {
1523f649ab7SKees Cook u32 status;
153c5e3acd6SHeiko Carstens
154a9ae32c3SHeiko Carstens if (__pcpu_sigp(pcpu->address, SIGP_SENSE,
155c5e3acd6SHeiko Carstens 0, &status) != SIGP_CC_STATUS_STORED)
1565c0b912eSHeiko Carstens return 0;
157c5e3acd6SHeiko Carstens return !!(status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED));
1585c0b912eSHeiko Carstens }
1595c0b912eSHeiko Carstens
pcpu_running(struct pcpu * pcpu)1608b646bd7SMartin Schwidefsky static inline int pcpu_running(struct pcpu *pcpu)
161a93b8ec1SHeiko Carstens {
162a9ae32c3SHeiko Carstens if (__pcpu_sigp(pcpu->address, SIGP_SENSE_RUNNING,
163c5e3acd6SHeiko Carstens 0, NULL) != SIGP_CC_STATUS_STORED)
1648b646bd7SMartin Schwidefsky return 1;
165524b24adSHeiko Carstens /* Status stored condition code is equivalent to cpu not running. */
166524b24adSHeiko Carstens return 0;
167a93b8ec1SHeiko Carstens }
168a93b8ec1SHeiko Carstens
1691943f53cSMichael Holzheu /*
1708b646bd7SMartin Schwidefsky * Find struct pcpu by cpu address.
1711943f53cSMichael Holzheu */
pcpu_find_address(const struct cpumask * mask,u16 address)17210ad34bcSMartin Schwidefsky static struct pcpu *pcpu_find_address(const struct cpumask *mask, u16 address)
1731943f53cSMichael Holzheu {
1741943f53cSMichael Holzheu int cpu;
1751943f53cSMichael Holzheu
1768b646bd7SMartin Schwidefsky for_each_cpu(cpu, mask)
1778b646bd7SMartin Schwidefsky if (pcpu_devices[cpu].address == address)
1788b646bd7SMartin Schwidefsky return pcpu_devices + cpu;
1798b646bd7SMartin Schwidefsky return NULL;
1808b646bd7SMartin Schwidefsky }
1818b646bd7SMartin Schwidefsky
pcpu_ec_call(struct pcpu * pcpu,int ec_bit)1828b646bd7SMartin Schwidefsky static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
1838b646bd7SMartin Schwidefsky {
1848b646bd7SMartin Schwidefsky int order;
1858b646bd7SMartin Schwidefsky
186dea24190SHeiko Carstens if (test_and_set_bit(ec_bit, &pcpu->ec_mask))
187dea24190SHeiko Carstens return;
188dea24190SHeiko Carstens order = pcpu_running(pcpu) ? SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL;
1893dbc78d3SHeiko Carstens pcpu->ec_clk = get_tod_clock_fast();
1908b646bd7SMartin Schwidefsky pcpu_sigp_retry(pcpu, order, 0);
1918b646bd7SMartin Schwidefsky }
1928b646bd7SMartin Schwidefsky
pcpu_alloc_lowcore(struct pcpu * pcpu,int cpu)193e2741f17SPaul Gortmaker static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
1948b646bd7SMartin Schwidefsky {
195b61b1595SSven Schnelle unsigned long async_stack, nodat_stack, mcck_stack;
196c667aeacSHeiko Carstens struct lowcore *lc;
1978b646bd7SMartin Schwidefsky
198587704efSAlexander Gordeev lc = (struct lowcore *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
19932ce55a6SVasily Gorbik nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
200ce3dc447SMartin Schwidefsky async_stack = stack_alloc();
201b61b1595SSven Schnelle mcck_stack = stack_alloc();
202587704efSAlexander Gordeev if (!lc || !nodat_stack || !async_stack || !mcck_stack)
203587704efSAlexander Gordeev goto out;
2048b646bd7SMartin Schwidefsky memcpy(lc, &S390_lowcore, 512);
2058b646bd7SMartin Schwidefsky memset((char *) lc + 512, 0, sizeof(*lc) - 512);
206ce3dc447SMartin Schwidefsky lc->async_stack = async_stack + STACK_INIT_OFFSET;
207ce3dc447SMartin Schwidefsky lc->nodat_stack = nodat_stack + STACK_INIT_OFFSET;
208b61b1595SSven Schnelle lc->mcck_stack = mcck_stack + STACK_INIT_OFFSET;
2098b646bd7SMartin Schwidefsky lc->cpu_nr = cpu;
2106c8cd5bbSPhilipp Hachtmann lc->spinlock_lockval = arch_spin_lockval(cpu);
211b96f7d88SMartin Schwidefsky lc->spinlock_index = 0;
2120b38b5e1SSven Schnelle lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
2130b38b5e1SSven Schnelle lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
2146a942f57SValentin Schneider lc->preempt_count = PREEMPT_DISABLED;
215c7ed509bSAlexander Gordeev if (nmi_alloc_mcesa(&lc->mcesad))
216587704efSAlexander Gordeev goto out;
2174df29d2bSAlexander Gordeev if (abs_lowcore_map(cpu, lc, true))
2184df29d2bSAlexander Gordeev goto out_mcesa;
2198b646bd7SMartin Schwidefsky lowcore_ptr[cpu] = lc;
2207277b421SAlexander Gordeev pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, __pa(lc));
2218b646bd7SMartin Schwidefsky return 0;
2226c81511cSMartin Schwidefsky
2234df29d2bSAlexander Gordeev out_mcesa:
2244df29d2bSAlexander Gordeev nmi_free_mcesa(&lc->mcesad);
225587704efSAlexander Gordeev out:
226b61b1595SSven Schnelle stack_free(mcck_stack);
227ce3dc447SMartin Schwidefsky stack_free(async_stack);
22832ce55a6SVasily Gorbik free_pages(nodat_stack, THREAD_SIZE_ORDER);
229587704efSAlexander Gordeev free_pages((unsigned long) lc, LC_ORDER);
2308b646bd7SMartin Schwidefsky return -ENOMEM;
2318b646bd7SMartin Schwidefsky }
2328b646bd7SMartin Schwidefsky
pcpu_free_lowcore(struct pcpu * pcpu)2338b646bd7SMartin Schwidefsky static void pcpu_free_lowcore(struct pcpu *pcpu)
2348b646bd7SMartin Schwidefsky {
235587704efSAlexander Gordeev unsigned long async_stack, nodat_stack, mcck_stack;
236587704efSAlexander Gordeev struct lowcore *lc;
237d2e834c6SAlexander Gordeev int cpu;
238ce3dc447SMartin Schwidefsky
239d2e834c6SAlexander Gordeev cpu = pcpu - pcpu_devices;
240d2e834c6SAlexander Gordeev lc = lowcore_ptr[cpu];
241587704efSAlexander Gordeev nodat_stack = lc->nodat_stack - STACK_INIT_OFFSET;
242587704efSAlexander Gordeev async_stack = lc->async_stack - STACK_INIT_OFFSET;
243587704efSAlexander Gordeev mcck_stack = lc->mcck_stack - STACK_INIT_OFFSET;
244a9ae32c3SHeiko Carstens pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0);
245d2e834c6SAlexander Gordeev lowcore_ptr[cpu] = NULL;
2464df29d2bSAlexander Gordeev abs_lowcore_unmap(cpu);
247c7ed509bSAlexander Gordeev nmi_free_mcesa(&lc->mcesad);
248ce3dc447SMartin Schwidefsky stack_free(async_stack);
249b61b1595SSven Schnelle stack_free(mcck_stack);
25032ce55a6SVasily Gorbik free_pages(nodat_stack, THREAD_SIZE_ORDER);
251587704efSAlexander Gordeev free_pages((unsigned long) lc, LC_ORDER);
2521943f53cSMichael Holzheu }
2538b646bd7SMartin Schwidefsky
pcpu_prepare_secondary(struct pcpu * pcpu,int cpu)2548b646bd7SMartin Schwidefsky static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
2558b646bd7SMartin Schwidefsky {
256e1b9c274SHeiko Carstens struct lowcore *lc, *abs_lc;
2578b646bd7SMartin Schwidefsky
258e1b9c274SHeiko Carstens lc = lowcore_ptr[cpu];
2591b948d6cSMartin Schwidefsky cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask);
2601b948d6cSMartin Schwidefsky cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
2618b646bd7SMartin Schwidefsky lc->cpu_nr = cpu;
262915fea04SAlexander Gordeev lc->restart_flags = RESTART_FLAG_CTLREGS;
2636c8cd5bbSPhilipp Hachtmann lc->spinlock_lockval = arch_spin_lockval(cpu);
264b96f7d88SMartin Schwidefsky lc->spinlock_index = 0;
2658b646bd7SMartin Schwidefsky lc->percpu_offset = __per_cpu_offset[cpu];
2668b646bd7SMartin Schwidefsky lc->kernel_asce = S390_lowcore.kernel_asce;
2670290c9e3SHeiko Carstens lc->user_asce = s390_invalid_asce;
2688b646bd7SMartin Schwidefsky lc->machine_flags = S390_lowcore.machine_flags;
269152e9b86SMartin Schwidefsky lc->user_timer = lc->system_timer =
270152e9b86SMartin Schwidefsky lc->steal_timer = lc->avg_steal_timer = 0;
271e1b9c274SHeiko Carstens abs_lc = get_abs_lowcore();
272e1b9c274SHeiko Carstens memcpy(lc->cregs_save_area, abs_lc->cregs_save_area, sizeof(lc->cregs_save_area));
273e1b9c274SHeiko Carstens put_abs_lowcore(abs_lc);
274a2308c11SHeiko Carstens lc->cregs_save_area[1] = lc->kernel_asce;
27587d59863SHeiko Carstens lc->cregs_save_area[7] = lc->user_asce;
2768b646bd7SMartin Schwidefsky save_access_regs((unsigned int *) lc->access_regs_save_area);
277b96f7d88SMartin Schwidefsky arch_spin_lock_setup(cpu);
2788b646bd7SMartin Schwidefsky }
2798b646bd7SMartin Schwidefsky
pcpu_attach_task(struct pcpu * pcpu,struct task_struct * tsk)2808b646bd7SMartin Schwidefsky static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
2818b646bd7SMartin Schwidefsky {
282d2e834c6SAlexander Gordeev struct lowcore *lc;
283d2e834c6SAlexander Gordeev int cpu;
2848b646bd7SMartin Schwidefsky
285d2e834c6SAlexander Gordeev cpu = pcpu - pcpu_devices;
286d2e834c6SAlexander Gordeev lc = lowcore_ptr[cpu];
287c2c3258fSHeiko Carstens lc->kernel_stack = (unsigned long)task_stack_page(tsk) + STACK_INIT_OFFSET;
2888b646bd7SMartin Schwidefsky lc->current_task = (unsigned long)tsk;
289e22cf8caSChristian Borntraeger lc->lpp = LPP_MAGIC;
290e22cf8caSChristian Borntraeger lc->current_pid = tsk->pid;
29190c53e65SMartin Schwidefsky lc->user_timer = tsk->thread.user_timer;
292b7662eefSChristian Borntraeger lc->guest_timer = tsk->thread.guest_timer;
29390c53e65SMartin Schwidefsky lc->system_timer = tsk->thread.system_timer;
294b7662eefSChristian Borntraeger lc->hardirq_timer = tsk->thread.hardirq_timer;
295b7662eefSChristian Borntraeger lc->softirq_timer = tsk->thread.softirq_timer;
2968b646bd7SMartin Schwidefsky lc->steal_timer = 0;
2978b646bd7SMartin Schwidefsky }
2988b646bd7SMartin Schwidefsky
pcpu_start_fn(struct pcpu * pcpu,void (* func)(void *),void * data)2998b646bd7SMartin Schwidefsky static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
3008b646bd7SMartin Schwidefsky {
301d2e834c6SAlexander Gordeev struct lowcore *lc;
302d2e834c6SAlexander Gordeev int cpu;
3038b646bd7SMartin Schwidefsky
304d2e834c6SAlexander Gordeev cpu = pcpu - pcpu_devices;
305d2e834c6SAlexander Gordeev lc = lowcore_ptr[cpu];
306d6be5d0aSAlexander Gordeev lc->restart_stack = lc->kernel_stack;
3078b646bd7SMartin Schwidefsky lc->restart_fn = (unsigned long) func;
3088b646bd7SMartin Schwidefsky lc->restart_data = (unsigned long) data;
309915fea04SAlexander Gordeev lc->restart_source = -1U;
310a9ae32c3SHeiko Carstens pcpu_sigp_retry(pcpu, SIGP_RESTART, 0);
3118b646bd7SMartin Schwidefsky }
3128b646bd7SMartin Schwidefsky
3130f541cc2SHeiko Carstens typedef void (pcpu_delegate_fn)(void *);
3140f541cc2SHeiko Carstens
3158b646bd7SMartin Schwidefsky /*
3168b646bd7SMartin Schwidefsky * Call function via PSW restart on pcpu and stop the current cpu.
3178b646bd7SMartin Schwidefsky */
__pcpu_delegate(pcpu_delegate_fn * func,void * data)3180f541cc2SHeiko Carstens static void __pcpu_delegate(pcpu_delegate_fn *func, void *data)
319ce3dc447SMartin Schwidefsky {
320ce3dc447SMartin Schwidefsky func(data); /* should not return */
321ce3dc447SMartin Schwidefsky }
322ce3dc447SMartin Schwidefsky
pcpu_delegate(struct pcpu * pcpu,pcpu_delegate_fn * func,void * data,unsigned long stack)323915fea04SAlexander Gordeev static void pcpu_delegate(struct pcpu *pcpu,
3240f541cc2SHeiko Carstens pcpu_delegate_fn *func,
3258b646bd7SMartin Schwidefsky void *data, unsigned long stack)
3268b646bd7SMartin Schwidefsky {
3274df29d2bSAlexander Gordeev struct lowcore *lc, *abs_lc;
3284df29d2bSAlexander Gordeev unsigned int source_cpu;
3298b646bd7SMartin Schwidefsky
3304df29d2bSAlexander Gordeev lc = lowcore_ptr[pcpu - pcpu_devices];
3314df29d2bSAlexander Gordeev source_cpu = stap();
332bb1520d5SAlexander Gordeev
3330f541cc2SHeiko Carstens if (pcpu->address == source_cpu) {
3340f541cc2SHeiko Carstens call_on_stack(2, stack, void, __pcpu_delegate,
3350f541cc2SHeiko Carstens pcpu_delegate_fn *, func, void *, data);
3360f541cc2SHeiko Carstens }
3378b646bd7SMartin Schwidefsky /* Stop target cpu (if func returns this stops the current cpu). */
338a9ae32c3SHeiko Carstens pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
339e688c625SHeiko Carstens pcpu_sigp_retry(pcpu, SIGP_CPU_RESET, 0);
3408b646bd7SMartin Schwidefsky /* Restart func on the target cpu and stop the current cpu. */
341dc2ab23bSAlexander Gordeev if (lc) {
342dc2ab23bSAlexander Gordeev lc->restart_stack = stack;
343dc2ab23bSAlexander Gordeev lc->restart_fn = (unsigned long)func;
344dc2ab23bSAlexander Gordeev lc->restart_data = (unsigned long)data;
345dc2ab23bSAlexander Gordeev lc->restart_source = source_cpu;
346dc2ab23bSAlexander Gordeev } else {
3472154e0b3SAlexander Gordeev abs_lc = get_abs_lowcore();
3484df29d2bSAlexander Gordeev abs_lc->restart_stack = stack;
3494df29d2bSAlexander Gordeev abs_lc->restart_fn = (unsigned long)func;
3504df29d2bSAlexander Gordeev abs_lc->restart_data = (unsigned long)data;
3514df29d2bSAlexander Gordeev abs_lc->restart_source = source_cpu;
3522154e0b3SAlexander Gordeev put_abs_lowcore(abs_lc);
353dc2ab23bSAlexander Gordeev }
3548b646bd7SMartin Schwidefsky asm volatile(
355eb546195SHeiko Carstens "0: sigp 0,%0,%2 # sigp restart to target cpu\n"
3568b646bd7SMartin Schwidefsky " brc 2,0b # busy, try again\n"
357eb546195SHeiko Carstens "1: sigp 0,%1,%3 # sigp stop to current cpu\n"
3588b646bd7SMartin Schwidefsky " brc 2,1b # busy, try again\n"
359fbe76568SHeiko Carstens : : "d" (pcpu->address), "d" (source_cpu),
360eb546195SHeiko Carstens "K" (SIGP_RESTART), "K" (SIGP_STOP)
361eb546195SHeiko Carstens : "0", "1", "cc");
3621943f53cSMichael Holzheu for (;;) ;
3631943f53cSMichael Holzheu }
3641943f53cSMichael Holzheu
3658b646bd7SMartin Schwidefsky /*
36610ad34bcSMartin Schwidefsky * Enable additional logical cpus for multi-threading.
36710ad34bcSMartin Schwidefsky */
pcpu_set_smt(unsigned int mtid)36810ad34bcSMartin Schwidefsky static int pcpu_set_smt(unsigned int mtid)
36910ad34bcSMartin Schwidefsky {
37010ad34bcSMartin Schwidefsky int cc;
37110ad34bcSMartin Schwidefsky
37210ad34bcSMartin Schwidefsky if (smp_cpu_mtid == mtid)
37310ad34bcSMartin Schwidefsky return 0;
37480a60f6eSHeiko Carstens cc = __pcpu_sigp(0, SIGP_SET_MULTI_THREADING, mtid, NULL);
37510ad34bcSMartin Schwidefsky if (cc == 0) {
37610ad34bcSMartin Schwidefsky smp_cpu_mtid = mtid;
37710ad34bcSMartin Schwidefsky smp_cpu_mt_shift = 0;
37810ad34bcSMartin Schwidefsky while (smp_cpu_mtid >= (1U << smp_cpu_mt_shift))
37910ad34bcSMartin Schwidefsky smp_cpu_mt_shift++;
38010ad34bcSMartin Schwidefsky pcpu_devices[0].address = stap();
38110ad34bcSMartin Schwidefsky }
38210ad34bcSMartin Schwidefsky return cc;
38310ad34bcSMartin Schwidefsky }
38410ad34bcSMartin Schwidefsky
38510ad34bcSMartin Schwidefsky /*
3868b646bd7SMartin Schwidefsky * Call function on an online CPU.
3878b646bd7SMartin Schwidefsky */
smp_call_online_cpu(void (* func)(void *),void * data)3888b646bd7SMartin Schwidefsky void smp_call_online_cpu(void (*func)(void *), void *data)
3892c2df118SHeiko Carstens {
3908b646bd7SMartin Schwidefsky struct pcpu *pcpu;
3912c2df118SHeiko Carstens
3928b646bd7SMartin Schwidefsky /* Use the current cpu if it is online. */
3938b646bd7SMartin Schwidefsky pcpu = pcpu_find_address(cpu_online_mask, stap());
3948b646bd7SMartin Schwidefsky if (!pcpu)
3958b646bd7SMartin Schwidefsky /* Use the first online cpu. */
3968b646bd7SMartin Schwidefsky pcpu = pcpu_devices + cpumask_first(cpu_online_mask);
3978b646bd7SMartin Schwidefsky pcpu_delegate(pcpu, func, data, (unsigned long) restart_stack);
3982c2df118SHeiko Carstens }
3992c2df118SHeiko Carstens
4008b646bd7SMartin Schwidefsky /*
4018b646bd7SMartin Schwidefsky * Call function on the ipl CPU.
4028b646bd7SMartin Schwidefsky */
smp_call_ipl_cpu(void (* func)(void *),void * data)4038b646bd7SMartin Schwidefsky void smp_call_ipl_cpu(void (*func)(void *), void *data)
40485ac7ca5SMartin Schwidefsky {
405d2e834c6SAlexander Gordeev struct lowcore *lc = lowcore_ptr[0];
40660f1bf29SDavid Hildenbrand
40760f1bf29SDavid Hildenbrand if (pcpu_devices[0].address == stap())
40860f1bf29SDavid Hildenbrand lc = &S390_lowcore;
40960f1bf29SDavid Hildenbrand
410c6da39f2SMichael Holzheu pcpu_delegate(&pcpu_devices[0], func, data,
41160f1bf29SDavid Hildenbrand lc->nodat_stack);
41285ac7ca5SMartin Schwidefsky }
41385ac7ca5SMartin Schwidefsky
smp_find_processor_id(u16 address)4148b646bd7SMartin Schwidefsky int smp_find_processor_id(u16 address)
4158b646bd7SMartin Schwidefsky {
4168b646bd7SMartin Schwidefsky int cpu;
4178b646bd7SMartin Schwidefsky
4188b646bd7SMartin Schwidefsky for_each_present_cpu(cpu)
4198b646bd7SMartin Schwidefsky if (pcpu_devices[cpu].address == address)
4208b646bd7SMartin Schwidefsky return cpu;
4218b646bd7SMartin Schwidefsky return -1;
4228b646bd7SMartin Schwidefsky }
4238b646bd7SMartin Schwidefsky
schedule_mcck_handler(void)42411886c19SSven Schnelle void schedule_mcck_handler(void)
42511886c19SSven Schnelle {
42611886c19SSven Schnelle pcpu_ec_call(pcpu_devices + smp_processor_id(), ec_mcck_pending);
42711886c19SSven Schnelle }
42811886c19SSven Schnelle
arch_vcpu_is_preempted(int cpu)4298ebf6da9SPhilipp Rudo bool notrace arch_vcpu_is_preempted(int cpu)
4308b646bd7SMartin Schwidefsky {
431760928c0SChristian Borntraeger if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
432760928c0SChristian Borntraeger return false;
433760928c0SChristian Borntraeger if (pcpu_running(pcpu_devices + cpu))
434760928c0SChristian Borntraeger return false;
435760928c0SChristian Borntraeger return true;
4368b646bd7SMartin Schwidefsky }
437760928c0SChristian Borntraeger EXPORT_SYMBOL(arch_vcpu_is_preempted);
4388b646bd7SMartin Schwidefsky
smp_yield_cpu(int cpu)4398ebf6da9SPhilipp Rudo void notrace smp_yield_cpu(int cpu)
4408b646bd7SMartin Schwidefsky {
4411b68ac86SHeiko Carstens if (!MACHINE_HAS_DIAG9C)
4421b68ac86SHeiko Carstens return;
443b5a6b71bSMartin Schwidefsky diag_stat_inc_norecursion(DIAG_STAT_X09C);
4448b646bd7SMartin Schwidefsky asm volatile("diag %0,0,0x9c"
4458b646bd7SMartin Schwidefsky : : "d" (pcpu_devices[cpu].address));
4461ec2772eSMartin Schwidefsky }
44787e28a15SPierre Morel EXPORT_SYMBOL_GPL(smp_yield_cpu);
4488b646bd7SMartin Schwidefsky
4498b646bd7SMartin Schwidefsky /*
4508b646bd7SMartin Schwidefsky * Send cpus emergency shutdown signal. This gives the cpus the
4518b646bd7SMartin Schwidefsky * opportunity to complete outstanding interrupts.
4528b646bd7SMartin Schwidefsky */
smp_emergency_stop(void)45300a8f886SMartin Schwidefsky void notrace smp_emergency_stop(void)
4548b646bd7SMartin Schwidefsky {
455f213e550SHeiko Carstens static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED;
456f213e550SHeiko Carstens static cpumask_t cpumask;
4578b646bd7SMartin Schwidefsky u64 end;
4588b646bd7SMartin Schwidefsky int cpu;
4598b646bd7SMartin Schwidefsky
460f213e550SHeiko Carstens arch_spin_lock(&lock);
46100a8f886SMartin Schwidefsky cpumask_copy(&cpumask, cpu_online_mask);
46200a8f886SMartin Schwidefsky cpumask_clear_cpu(smp_processor_id(), &cpumask);
46300a8f886SMartin Schwidefsky
4641aae0560SHeiko Carstens end = get_tod_clock() + (1000000UL << 12);
46500a8f886SMartin Schwidefsky for_each_cpu(cpu, &cpumask) {
4668b646bd7SMartin Schwidefsky struct pcpu *pcpu = pcpu_devices + cpu;
4678b646bd7SMartin Schwidefsky set_bit(ec_stop_cpu, &pcpu->ec_mask);
468a9ae32c3SHeiko Carstens while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL,
469a9ae32c3SHeiko Carstens 0, NULL) == SIGP_CC_BUSY &&
4701aae0560SHeiko Carstens get_tod_clock() < end)
4718b646bd7SMartin Schwidefsky cpu_relax();
4728b646bd7SMartin Schwidefsky }
4731aae0560SHeiko Carstens while (get_tod_clock() < end) {
47400a8f886SMartin Schwidefsky for_each_cpu(cpu, &cpumask)
4758b646bd7SMartin Schwidefsky if (pcpu_stopped(pcpu_devices + cpu))
47600a8f886SMartin Schwidefsky cpumask_clear_cpu(cpu, &cpumask);
47700a8f886SMartin Schwidefsky if (cpumask_empty(&cpumask))
4788b646bd7SMartin Schwidefsky break;
4798b646bd7SMartin Schwidefsky cpu_relax();
4808b646bd7SMartin Schwidefsky }
481f213e550SHeiko Carstens arch_spin_unlock(&lock);
4828b646bd7SMartin Schwidefsky }
48300a8f886SMartin Schwidefsky NOKPROBE_SYMBOL(smp_emergency_stop);
4848b646bd7SMartin Schwidefsky
4858b646bd7SMartin Schwidefsky /*
4868b646bd7SMartin Schwidefsky * Stop all cpus but the current one.
4878b646bd7SMartin Schwidefsky */
smp_send_stop(void)488677d7623SHeiko Carstens void smp_send_stop(void)
4891da177e4SLinus Torvalds {
49085ac7ca5SMartin Schwidefsky int cpu;
4911da177e4SLinus Torvalds
492677d7623SHeiko Carstens /* Disable all interrupts/machine checks */
493bb1520d5SAlexander Gordeev __load_psw_mask(PSW_KERNEL_BITS);
4943324e60aSChristian Borntraeger trace_hardirqs_off();
495677d7623SHeiko Carstens
4963ab121abSMichael Holzheu debug_set_critical();
4971da177e4SLinus Torvalds
4988b646bd7SMartin Schwidefsky if (oops_in_progress)
49900a8f886SMartin Schwidefsky smp_emergency_stop();
5008b646bd7SMartin Schwidefsky
5018b646bd7SMartin Schwidefsky /* stop all processors */
50200a8f886SMartin Schwidefsky for_each_online_cpu(cpu) {
50300a8f886SMartin Schwidefsky if (cpu == smp_processor_id())
50400a8f886SMartin Schwidefsky continue;
50500a8f886SMartin Schwidefsky pcpu_sigp_retry(pcpu_devices + cpu, SIGP_STOP, 0);
50600a8f886SMartin Schwidefsky while (!pcpu_stopped(pcpu_devices + cpu))
50785ac7ca5SMartin Schwidefsky cpu_relax();
50885ac7ca5SMartin Schwidefsky }
50985ac7ca5SMartin Schwidefsky }
51085ac7ca5SMartin Schwidefsky
5118b646bd7SMartin Schwidefsky /*
5121da177e4SLinus Torvalds * This is the main routine where commands issued by other
5131da177e4SLinus Torvalds * cpus are handled.
5141da177e4SLinus Torvalds */
smp_handle_ext_call(void)5159acf73b7SHeiko Carstens static void smp_handle_ext_call(void)
5169acf73b7SHeiko Carstens {
5179acf73b7SHeiko Carstens unsigned long bits;
5189acf73b7SHeiko Carstens
5199acf73b7SHeiko Carstens /* handle bit signal external calls */
5209acf73b7SHeiko Carstens bits = xchg(&pcpu_devices[smp_processor_id()].ec_mask, 0);
5219acf73b7SHeiko Carstens if (test_bit(ec_stop_cpu, &bits))
5229acf73b7SHeiko Carstens smp_stop_cpu();
5239acf73b7SHeiko Carstens if (test_bit(ec_schedule, &bits))
5249acf73b7SHeiko Carstens scheduler_ipi();
5259acf73b7SHeiko Carstens if (test_bit(ec_call_function_single, &bits))
5269acf73b7SHeiko Carstens generic_smp_call_function_single_interrupt();
52711886c19SSven Schnelle if (test_bit(ec_mcck_pending, &bits))
528e7ec1d2eSAlexander Gordeev s390_handle_mcck();
52955f03123SIlya Leoshkevich if (test_bit(ec_irq_work, &bits))
53055f03123SIlya Leoshkevich irq_work_run();
5319acf73b7SHeiko Carstens }
5329acf73b7SHeiko Carstens
do_ext_call_interrupt(struct ext_code ext_code,unsigned int param32,unsigned long param64)533fde15c3aSHeiko Carstens static void do_ext_call_interrupt(struct ext_code ext_code,
534f6649a7eSMartin Schwidefsky unsigned int param32, unsigned long param64)
5351da177e4SLinus Torvalds {
5369acf73b7SHeiko Carstens inc_irq_stat(ext_code.code == 0x1202 ? IRQEXT_EXC : IRQEXT_EMS);
5379acf73b7SHeiko Carstens smp_handle_ext_call();
5381da177e4SLinus Torvalds }
5391da177e4SLinus Torvalds
arch_send_call_function_ipi_mask(const struct cpumask * mask)540630cd046SRusty Russell void arch_send_call_function_ipi_mask(const struct cpumask *mask)
541ca9fc75aSHeiko Carstens {
542ca9fc75aSHeiko Carstens int cpu;
543ca9fc75aSHeiko Carstens
544630cd046SRusty Russell for_each_cpu(cpu, mask)
545b6ed49e0SHeiko Carstens pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
546ca9fc75aSHeiko Carstens }
547ca9fc75aSHeiko Carstens
arch_send_call_function_single_ipi(int cpu)548ca9fc75aSHeiko Carstens void arch_send_call_function_single_ipi(int cpu)
549ca9fc75aSHeiko Carstens {
5508b646bd7SMartin Schwidefsky pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
551ca9fc75aSHeiko Carstens }
552ca9fc75aSHeiko Carstens
5531da177e4SLinus Torvalds /*
5541da177e4SLinus Torvalds * this function sends a 'reschedule' IPI to another CPU.
5551da177e4SLinus Torvalds * it goes straight through and wastes no time serializing
5561da177e4SLinus Torvalds * anything. Worst case is that we lose a reschedule ...
5571da177e4SLinus Torvalds */
arch_smp_send_reschedule(int cpu)5584c8c3c7fSValentin Schneider void arch_smp_send_reschedule(int cpu)
5591da177e4SLinus Torvalds {
5608b646bd7SMartin Schwidefsky pcpu_ec_call(pcpu_devices + cpu, ec_schedule);
5611da177e4SLinus Torvalds }
5621da177e4SLinus Torvalds
56355f03123SIlya Leoshkevich #ifdef CONFIG_IRQ_WORK
arch_irq_work_raise(void)56455f03123SIlya Leoshkevich void arch_irq_work_raise(void)
56555f03123SIlya Leoshkevich {
56655f03123SIlya Leoshkevich pcpu_ec_call(pcpu_devices + smp_processor_id(), ec_irq_work);
56755f03123SIlya Leoshkevich }
56855f03123SIlya Leoshkevich #endif
56955f03123SIlya Leoshkevich
5701da177e4SLinus Torvalds /*
5711da177e4SLinus Torvalds * parameter area for the set/clear control bit callbacks
5721da177e4SLinus Torvalds */
57394c12cc7SMartin Schwidefsky struct ec_creg_mask_parms {
5748b646bd7SMartin Schwidefsky unsigned long orval;
5758b646bd7SMartin Schwidefsky unsigned long andval;
5768b646bd7SMartin Schwidefsky int cr;
57794c12cc7SMartin Schwidefsky };
5781da177e4SLinus Torvalds
5791da177e4SLinus Torvalds /*
5801da177e4SLinus Torvalds * callback for setting/clearing control bits
5811da177e4SLinus Torvalds */
smp_ctl_bit_callback(void * info)58239ce010dSHeiko Carstens static void smp_ctl_bit_callback(void *info)
58339ce010dSHeiko Carstens {
58494c12cc7SMartin Schwidefsky struct ec_creg_mask_parms *pp = info;
5851da177e4SLinus Torvalds unsigned long cregs[16];
5861da177e4SLinus Torvalds
58794c12cc7SMartin Schwidefsky __ctl_store(cregs, 0, 15);
5888b646bd7SMartin Schwidefsky cregs[pp->cr] = (cregs[pp->cr] & pp->andval) | pp->orval;
58994c12cc7SMartin Schwidefsky __ctl_load(cregs, 0, 15);
5901da177e4SLinus Torvalds }
5911da177e4SLinus Torvalds
592915fea04SAlexander Gordeev static DEFINE_SPINLOCK(ctl_lock);
593915fea04SAlexander Gordeev
smp_ctl_set_clear_bit(int cr,int bit,bool set)5949097fc79SAlexander Gordeev void smp_ctl_set_clear_bit(int cr, int bit, bool set)
59594c12cc7SMartin Schwidefsky {
5969097fc79SAlexander Gordeev struct ec_creg_mask_parms parms = { .cr = cr, };
5974df29d2bSAlexander Gordeev struct lowcore *abs_lc;
5989097fc79SAlexander Gordeev u64 ctlreg;
5991da177e4SLinus Torvalds
6009097fc79SAlexander Gordeev if (set) {
6019097fc79SAlexander Gordeev parms.orval = 1UL << bit;
6029097fc79SAlexander Gordeev parms.andval = -1UL;
6039097fc79SAlexander Gordeev } else {
6049097fc79SAlexander Gordeev parms.orval = 0;
6059097fc79SAlexander Gordeev parms.andval = ~(1UL << bit);
6069097fc79SAlexander Gordeev }
607915fea04SAlexander Gordeev spin_lock(&ctl_lock);
6082154e0b3SAlexander Gordeev abs_lc = get_abs_lowcore();
6094df29d2bSAlexander Gordeev ctlreg = abs_lc->cregs_save_area[cr];
6109097fc79SAlexander Gordeev ctlreg = (ctlreg & parms.andval) | parms.orval;
6114df29d2bSAlexander Gordeev abs_lc->cregs_save_area[cr] = ctlreg;
6122154e0b3SAlexander Gordeev put_abs_lowcore(abs_lc);
61315c8b6c1SJens Axboe on_each_cpu(smp_ctl_bit_callback, &parms, 1);
614e1b9c274SHeiko Carstens spin_unlock(&ctl_lock);
6151da177e4SLinus Torvalds }
6169097fc79SAlexander Gordeev EXPORT_SYMBOL(smp_ctl_set_clear_bit);
6171da177e4SLinus Torvalds
618bf28a597SMichael Holzheu #ifdef CONFIG_CRASH_DUMP
619411ed322SMichael Holzheu
smp_store_status(int cpu)6201af135a1SHeiko Carstens int smp_store_status(int cpu)
6211af135a1SHeiko Carstens {
622d2e834c6SAlexander Gordeev struct lowcore *lc;
623d2e834c6SAlexander Gordeev struct pcpu *pcpu;
6241a36a39eSMartin Schwidefsky unsigned long pa;
6251af135a1SHeiko Carstens
626d2e834c6SAlexander Gordeev pcpu = pcpu_devices + cpu;
627d2e834c6SAlexander Gordeev lc = lowcore_ptr[cpu];
628d2e834c6SAlexander Gordeev pa = __pa(&lc->floating_pt_save_area);
6291a36a39eSMartin Schwidefsky if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_STATUS_AT_ADDRESS,
6301a36a39eSMartin Schwidefsky pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
6311af135a1SHeiko Carstens return -EIO;
632916cda1aSMartin Schwidefsky if (!MACHINE_HAS_VX && !MACHINE_HAS_GS)
6331af135a1SHeiko Carstens return 0;
634402ff5a3SHeiko Carstens pa = lc->mcesad & MCESA_ORIGIN_MASK;
635916cda1aSMartin Schwidefsky if (MACHINE_HAS_GS)
636d2e834c6SAlexander Gordeev pa |= lc->mcesad & MCESA_LC_MASK;
6371a36a39eSMartin Schwidefsky if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS,
6381a36a39eSMartin Schwidefsky pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
6391a36a39eSMartin Schwidefsky return -EIO;
6401af135a1SHeiko Carstens return 0;
6411af135a1SHeiko Carstens }
6421af135a1SHeiko Carstens
64310ad34bcSMartin Schwidefsky /*
64410ad34bcSMartin Schwidefsky * Collect CPU state of the previous, crashed system.
64510ad34bcSMartin Schwidefsky * There are four cases:
646bd37b368SAlexander Egorenkov * 1) standard zfcp/nvme dump
647bd37b368SAlexander Egorenkov * condition: OLDMEM_BASE == NULL && is_ipl_type_dump() == true
64810ad34bcSMartin Schwidefsky * The state for all CPUs except the boot CPU needs to be collected
64910ad34bcSMartin Schwidefsky * with sigp stop-and-store-status. The boot CPU state is located in
65010ad34bcSMartin Schwidefsky * the absolute lowcore of the memory stored in the HSA. The zcore code
6511a36a39eSMartin Schwidefsky * will copy the boot CPU state from the HSA.
652bd37b368SAlexander Egorenkov * 2) stand-alone kdump for SCSI/NVMe (zfcp/nvme dump with swapped memory)
653bd37b368SAlexander Egorenkov * condition: OLDMEM_BASE != NULL && is_ipl_type_dump() == true
65410ad34bcSMartin Schwidefsky * The state for all CPUs except the boot CPU needs to be collected
65510ad34bcSMartin Schwidefsky * with sigp stop-and-store-status. The firmware or the boot-loader
65610ad34bcSMartin Schwidefsky * stored the registers of the boot CPU in the absolute lowcore in the
65710ad34bcSMartin Schwidefsky * memory of the old system.
65810ad34bcSMartin Schwidefsky * 3) kdump and the old kernel did not store the CPU state,
65910ad34bcSMartin Schwidefsky * or stand-alone kdump for DASD
66010ad34bcSMartin Schwidefsky * condition: OLDMEM_BASE != NULL && !is_kdump_kernel()
66110ad34bcSMartin Schwidefsky * The state for all CPUs except the boot CPU needs to be collected
66210ad34bcSMartin Schwidefsky * with sigp stop-and-store-status. The kexec code or the boot-loader
66310ad34bcSMartin Schwidefsky * stored the registers of the boot CPU in the memory of the old system.
66410ad34bcSMartin Schwidefsky * 4) kdump and the old kernel stored the CPU state
66510ad34bcSMartin Schwidefsky * condition: OLDMEM_BASE != NULL && is_kdump_kernel()
6668a07dd02SMartin Schwidefsky * This case does not exist for s390 anymore, setup_arch explicitly
6678a07dd02SMartin Schwidefsky * deactivates the elfcorehdr= kernel parameter
66810ad34bcSMartin Schwidefsky */
dump_available(void)66914a3a262SAlexander Gordeev static bool dump_available(void)
6701a36a39eSMartin Schwidefsky {
67114a3a262SAlexander Gordeev return oldmem_data.start || is_ipl_type_dump();
6721a36a39eSMartin Schwidefsky }
6731a36a39eSMartin Schwidefsky
smp_save_dump_ipl_cpu(void)67414a3a262SAlexander Gordeev void __init smp_save_dump_ipl_cpu(void)
6751a36a39eSMartin Schwidefsky {
67614a3a262SAlexander Gordeev struct save_area *sa;
67714a3a262SAlexander Gordeev void *regs;
67814a3a262SAlexander Gordeev
67914a3a262SAlexander Gordeev if (!dump_available())
68014a3a262SAlexander Gordeev return;
68114a3a262SAlexander Gordeev sa = save_area_alloc(true);
68214a3a262SAlexander Gordeev regs = memblock_alloc(512, 8);
68314a3a262SAlexander Gordeev if (!sa || !regs)
68414a3a262SAlexander Gordeev panic("could not allocate memory for boot CPU save area\n");
685303fd988SAlexander Gordeev copy_oldmem_kernel(regs, __LC_FPREGS_SAVE_AREA, 512);
6861a2c5840SMartin Schwidefsky save_area_add_regs(sa, regs);
68714a3a262SAlexander Gordeev memblock_free(regs, 512);
68814a3a262SAlexander Gordeev if (MACHINE_HAS_VX)
68914a3a262SAlexander Gordeev save_area_add_vxrs(sa, boot_cpu_vector_save_area);
6901a36a39eSMartin Schwidefsky }
6911a36a39eSMartin Schwidefsky
smp_save_dump_secondary_cpus(void)69214a3a262SAlexander Gordeev void __init smp_save_dump_secondary_cpus(void)
69310ad34bcSMartin Schwidefsky {
6941a2c5840SMartin Schwidefsky int addr, boot_cpu_addr, max_cpu_addr;
6951a2c5840SMartin Schwidefsky struct save_area *sa;
696a0e45d40SHeiko Carstens void *page;
69710ad34bcSMartin Schwidefsky
69814a3a262SAlexander Gordeev if (!dump_available())
69910ad34bcSMartin Schwidefsky return;
7001a36a39eSMartin Schwidefsky /* Allocate a page as dumping area for the store status sigps */
701a0e45d40SHeiko Carstens page = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
7020ba9e6edSMike Rapoport if (!page)
7038a7f97b9SMike Rapoport panic("ERROR: Failed to allocate %lx bytes below %lx\n",
7040ba9e6edSMike Rapoport PAGE_SIZE, 1UL << 31);
7050ba9e6edSMike Rapoport
70610ad34bcSMartin Schwidefsky /* Set multi-threading state to the previous system. */
70737c5f6c8SDavid Hildenbrand pcpu_set_smt(sclp.mtid_prev);
7081592a8e4SMichael Holzheu boot_cpu_addr = stap();
7091a2c5840SMartin Schwidefsky max_cpu_addr = SCLP_MAX_CORES << sclp.mtid_prev;
7101a2c5840SMartin Schwidefsky for (addr = 0; addr <= max_cpu_addr; addr++) {
71114a3a262SAlexander Gordeev if (addr == boot_cpu_addr)
71214a3a262SAlexander Gordeev continue;
7131a36a39eSMartin Schwidefsky if (__pcpu_sigp_relax(addr, SIGP_SENSE, 0) ==
7141592a8e4SMichael Holzheu SIGP_CC_NOT_OPERATIONAL)
7151592a8e4SMichael Holzheu continue;
71614a3a262SAlexander Gordeev sa = save_area_alloc(false);
7171a2c5840SMartin Schwidefsky if (!sa)
7181a2c5840SMartin Schwidefsky panic("could not allocate memory for save area\n");
71914a3a262SAlexander Gordeev __pcpu_sigp_relax(addr, SIGP_STORE_STATUS_AT_ADDRESS, __pa(page));
72014a3a262SAlexander Gordeev save_area_add_regs(sa, page);
72114a3a262SAlexander Gordeev if (MACHINE_HAS_VX) {
72214a3a262SAlexander Gordeev __pcpu_sigp_relax(addr, SIGP_STORE_ADDITIONAL_STATUS, __pa(page));
72314a3a262SAlexander Gordeev save_area_add_vxrs(sa, page);
72414a3a262SAlexander Gordeev }
72510ad34bcSMartin Schwidefsky }
726a0e45d40SHeiko Carstens memblock_free(page, PAGE_SIZE);
727c78d0c74SHeiko Carstens diag_amode31_ops.diag308_reset();
7281592a8e4SMichael Holzheu pcpu_set_smt(0);
7291af135a1SHeiko Carstens }
7301a36a39eSMartin Schwidefsky #endif /* CONFIG_CRASH_DUMP */
7318b646bd7SMartin Schwidefsky
smp_cpu_set_polarization(int cpu,int val)73250ab9a9aSHeiko Carstens void smp_cpu_set_polarization(int cpu, int val)
73350ab9a9aSHeiko Carstens {
73450ab9a9aSHeiko Carstens pcpu_devices[cpu].polarization = val;
73550ab9a9aSHeiko Carstens }
73650ab9a9aSHeiko Carstens
smp_cpu_get_polarization(int cpu)73750ab9a9aSHeiko Carstens int smp_cpu_get_polarization(int cpu)
73850ab9a9aSHeiko Carstens {
73950ab9a9aSHeiko Carstens return pcpu_devices[cpu].polarization;
74050ab9a9aSHeiko Carstens }
74150ab9a9aSHeiko Carstens
smp_cpu_get_cpu_address(int cpu)74242d211a1SAlexander Gordeev int smp_cpu_get_cpu_address(int cpu)
74342d211a1SAlexander Gordeev {
74442d211a1SAlexander Gordeev return pcpu_devices[cpu].address;
74542d211a1SAlexander Gordeev }
74642d211a1SAlexander Gordeev
smp_get_core_info(struct sclp_core_info * info,int early)747af51160eSHeiko Carstens static void __ref smp_get_core_info(struct sclp_core_info *info, int early)
74808d07968SHeiko Carstens {
7498b646bd7SMartin Schwidefsky static int use_sigp_detection;
7508b646bd7SMartin Schwidefsky int address;
75108d07968SHeiko Carstens
752af51160eSHeiko Carstens if (use_sigp_detection || sclp_get_core_info(info, early)) {
7538b646bd7SMartin Schwidefsky use_sigp_detection = 1;
754e7086eb1SMartin Schwidefsky for (address = 0;
755d08d9430SMartin Schwidefsky address < (SCLP_MAX_CORES << smp_cpu_mt_shift);
75610ad34bcSMartin Schwidefsky address += (1U << smp_cpu_mt_shift)) {
7571a36a39eSMartin Schwidefsky if (__pcpu_sigp_relax(address, SIGP_SENSE, 0) ==
758a9ae32c3SHeiko Carstens SIGP_CC_NOT_OPERATIONAL)
75908d07968SHeiko Carstens continue;
760d08d9430SMartin Schwidefsky info->core[info->configured].core_id =
76110ad34bcSMartin Schwidefsky address >> smp_cpu_mt_shift;
7628b646bd7SMartin Schwidefsky info->configured++;
76308d07968SHeiko Carstens }
7648b646bd7SMartin Schwidefsky info->combined = info->configured;
7658b646bd7SMartin Schwidefsky }
76608d07968SHeiko Carstens }
76708d07968SHeiko Carstens
768e2741f17SPaul Gortmaker static int smp_add_present_cpu(int cpu);
7698b646bd7SMartin Schwidefsky
smp_add_core(struct sclp_core_entry * core,cpumask_t * avail,bool configured,bool early)77072a81ad9SHeiko Carstens static int smp_add_core(struct sclp_core_entry *core, cpumask_t *avail,
77172a81ad9SHeiko Carstens bool configured, bool early)
77208d07968SHeiko Carstens {
7738b646bd7SMartin Schwidefsky struct pcpu *pcpu;
77472a81ad9SHeiko Carstens int cpu, nr, i;
77510ad34bcSMartin Schwidefsky u16 address;
77608d07968SHeiko Carstens
7778b646bd7SMartin Schwidefsky nr = 0;
77872a81ad9SHeiko Carstens if (sclp.has_core_type && core->type != boot_core_type)
77972a81ad9SHeiko Carstens return nr;
78072a81ad9SHeiko Carstens cpu = cpumask_first(avail);
78172a81ad9SHeiko Carstens address = core->core_id << smp_cpu_mt_shift;
78272a81ad9SHeiko Carstens for (i = 0; (i <= smp_cpu_mtid) && (cpu < nr_cpu_ids); i++) {
78372a81ad9SHeiko Carstens if (pcpu_find_address(cpu_present_mask, address + i))
7848b646bd7SMartin Schwidefsky continue;
7858b646bd7SMartin Schwidefsky pcpu = pcpu_devices + cpu;
78672a81ad9SHeiko Carstens pcpu->address = address + i;
78772a81ad9SHeiko Carstens if (configured)
78872a81ad9SHeiko Carstens pcpu->state = CPU_STATE_CONFIGURED;
78972a81ad9SHeiko Carstens else
79072a81ad9SHeiko Carstens pcpu->state = CPU_STATE_STANDBY;
79150ab9a9aSHeiko Carstens smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
7928b646bd7SMartin Schwidefsky set_cpu_present(cpu, true);
79372a81ad9SHeiko Carstens if (!early && smp_add_present_cpu(cpu) != 0)
7948b646bd7SMartin Schwidefsky set_cpu_present(cpu, false);
79508d07968SHeiko Carstens else
7968b646bd7SMartin Schwidefsky nr++;
79772a81ad9SHeiko Carstens cpumask_clear_cpu(cpu, avail);
79872a81ad9SHeiko Carstens cpu = cpumask_next(cpu, avail);
79972a81ad9SHeiko Carstens }
80072a81ad9SHeiko Carstens return nr;
80172a81ad9SHeiko Carstens }
80272a81ad9SHeiko Carstens
__smp_rescan_cpus(struct sclp_core_info * info,bool early)80372a81ad9SHeiko Carstens static int __smp_rescan_cpus(struct sclp_core_info *info, bool early)
80472a81ad9SHeiko Carstens {
80572a81ad9SHeiko Carstens struct sclp_core_entry *core;
80662c8dca9SHeiko Carstens static cpumask_t avail;
80772a81ad9SHeiko Carstens bool configured;
80872a81ad9SHeiko Carstens u16 core_id;
80972a81ad9SHeiko Carstens int nr, i;
81072a81ad9SHeiko Carstens
811a73de293SSebastian Andrzej Siewior cpus_read_lock();
812588a079eSHeiko Carstens mutex_lock(&smp_cpu_state_mutex);
81372a81ad9SHeiko Carstens nr = 0;
81472a81ad9SHeiko Carstens cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
81572a81ad9SHeiko Carstens /*
81672a81ad9SHeiko Carstens * Add IPL core first (which got logical CPU number 0) to make sure
81772a81ad9SHeiko Carstens * that all SMT threads get subsequent logical CPU numbers.
81872a81ad9SHeiko Carstens */
81972a81ad9SHeiko Carstens if (early) {
82072a81ad9SHeiko Carstens core_id = pcpu_devices[0].address >> smp_cpu_mt_shift;
82172a81ad9SHeiko Carstens for (i = 0; i < info->configured; i++) {
82272a81ad9SHeiko Carstens core = &info->core[i];
82372a81ad9SHeiko Carstens if (core->core_id == core_id) {
82472a81ad9SHeiko Carstens nr += smp_add_core(core, &avail, true, early);
82510ad34bcSMartin Schwidefsky break;
82610ad34bcSMartin Schwidefsky }
8278b646bd7SMartin Schwidefsky }
82872a81ad9SHeiko Carstens }
82972a81ad9SHeiko Carstens for (i = 0; i < info->combined; i++) {
83072a81ad9SHeiko Carstens configured = i < info->configured;
83172a81ad9SHeiko Carstens nr += smp_add_core(&info->core[i], &avail, configured, early);
83272a81ad9SHeiko Carstens }
833588a079eSHeiko Carstens mutex_unlock(&smp_cpu_state_mutex);
834a73de293SSebastian Andrzej Siewior cpus_read_unlock();
8358b646bd7SMartin Schwidefsky return nr;
8361da177e4SLinus Torvalds }
8371da177e4SLinus Torvalds
smp_detect_cpus(void)838af51160eSHeiko Carstens void __init smp_detect_cpus(void)
83948483b32SHeiko Carstens {
84010ad34bcSMartin Schwidefsky unsigned int cpu, mtid, c_cpus, s_cpus;
841d08d9430SMartin Schwidefsky struct sclp_core_info *info;
84210ad34bcSMartin Schwidefsky u16 address;
84348483b32SHeiko Carstens
84410ad34bcSMartin Schwidefsky /* Get CPU information */
845eb31d559SMike Rapoport info = memblock_alloc(sizeof(*info), 8);
8468a7f97b9SMike Rapoport if (!info)
8478a7f97b9SMike Rapoport panic("%s: Failed to allocate %zu bytes align=0x%x\n",
8488a7f97b9SMike Rapoport __func__, sizeof(*info), 8);
849af51160eSHeiko Carstens smp_get_core_info(info, 1);
85010ad34bcSMartin Schwidefsky /* Find boot CPU type */
851d08d9430SMartin Schwidefsky if (sclp.has_core_type) {
85210ad34bcSMartin Schwidefsky address = stap();
85310ad34bcSMartin Schwidefsky for (cpu = 0; cpu < info->combined; cpu++)
854d08d9430SMartin Schwidefsky if (info->core[cpu].core_id == address) {
8558b646bd7SMartin Schwidefsky /* The boot cpu dictates the cpu type. */
856d08d9430SMartin Schwidefsky boot_core_type = info->core[cpu].type;
85748483b32SHeiko Carstens break;
85848483b32SHeiko Carstens }
85910ad34bcSMartin Schwidefsky if (cpu >= info->combined)
86010ad34bcSMartin Schwidefsky panic("Could not find boot CPU type");
86148483b32SHeiko Carstens }
86210ad34bcSMartin Schwidefsky
86310ad34bcSMartin Schwidefsky /* Set multi-threading state for the current system */
864d08d9430SMartin Schwidefsky mtid = boot_core_type ? sclp.mtid : sclp.mtid_cp;
86510ad34bcSMartin Schwidefsky mtid = (mtid < smp_max_threads) ? mtid : smp_max_threads - 1;
86610ad34bcSMartin Schwidefsky pcpu_set_smt(mtid);
86710ad34bcSMartin Schwidefsky
86810ad34bcSMartin Schwidefsky /* Print number of CPUs */
8698b646bd7SMartin Schwidefsky c_cpus = s_cpus = 0;
87048483b32SHeiko Carstens for (cpu = 0; cpu < info->combined; cpu++) {
871d08d9430SMartin Schwidefsky if (sclp.has_core_type &&
872d08d9430SMartin Schwidefsky info->core[cpu].type != boot_core_type)
87348483b32SHeiko Carstens continue;
87410ad34bcSMartin Schwidefsky if (cpu < info->configured)
87510ad34bcSMartin Schwidefsky c_cpus += smp_cpu_mtid + 1;
87610ad34bcSMartin Schwidefsky else
87710ad34bcSMartin Schwidefsky s_cpus += smp_cpu_mtid + 1;
87848483b32SHeiko Carstens }
879395d31d4SMartin Schwidefsky pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
88010ad34bcSMartin Schwidefsky
88110ad34bcSMartin Schwidefsky /* Add CPUs present at boot */
88272a81ad9SHeiko Carstens __smp_rescan_cpus(info, true);
883fcfcba6dSHeiko Carstens memblock_free(info, sizeof(*info));
88448483b32SHeiko Carstens }
88548483b32SHeiko Carstens
886d6be5d0aSAlexander Gordeev /*
887d6be5d0aSAlexander Gordeev * Activate a secondary processor.
888d6be5d0aSAlexander Gordeev */
smp_start_secondary(void * cpuvoid)889d6be5d0aSAlexander Gordeev static void smp_start_secondary(void *cpuvoid)
890ce3dc447SMartin Schwidefsky {
891de5d9daeSQian Cai int cpu = raw_smp_processor_id();
892ce3dc447SMartin Schwidefsky
8939e8df6daSVasily Gorbik S390_lowcore.last_update_clock = get_tod_clock();
894d6be5d0aSAlexander Gordeev S390_lowcore.restart_stack = (unsigned long)restart_stack;
895d6be5d0aSAlexander Gordeev S390_lowcore.restart_fn = (unsigned long)do_restart;
896d6be5d0aSAlexander Gordeev S390_lowcore.restart_data = 0;
897d6be5d0aSAlexander Gordeev S390_lowcore.restart_source = -1U;
898d6be5d0aSAlexander Gordeev S390_lowcore.restart_flags = 0;
8999e8df6daSVasily Gorbik restore_access_regs(S390_lowcore.access_regs_save_area);
900ce3dc447SMartin Schwidefsky cpu_init();
901de5d9daeSQian Cai rcu_cpu_starting(cpu);
902ce3dc447SMartin Schwidefsky init_cpu_timer();
903ce3dc447SMartin Schwidefsky vtime_init();
90480f06306SHeiko Carstens vdso_getcpu_init();
905ce3dc447SMartin Schwidefsky pfault_init();
906a052096bSSven Schnelle cpumask_set_cpu(cpu, &cpu_setup_mask);
907a052096bSSven Schnelle update_cpu_masks();
90852aeda7aSAlexander Gordeev notify_cpu_starting(cpu);
909ce3dc447SMartin Schwidefsky if (topology_cpu_dedicated(cpu))
910ce3dc447SMartin Schwidefsky set_cpu_flag(CIF_DEDICATED_CPU);
911ce3dc447SMartin Schwidefsky else
912ce3dc447SMartin Schwidefsky clear_cpu_flag(CIF_DEDICATED_CPU);
91352aeda7aSAlexander Gordeev set_cpu_online(cpu, true);
914ce3dc447SMartin Schwidefsky inc_irq_stat(CPU_RST);
915ce3dc447SMartin Schwidefsky local_irq_enable();
916ce3dc447SMartin Schwidefsky cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
917ce3dc447SMartin Schwidefsky }
918ce3dc447SMartin Schwidefsky
9191da177e4SLinus Torvalds /* Upping and downing of CPUs */
__cpu_up(unsigned int cpu,struct task_struct * tidle)920e2741f17SPaul Gortmaker int __cpu_up(unsigned int cpu, struct task_struct *tidle)
9211da177e4SLinus Torvalds {
922b5e438ebSSven Schnelle struct pcpu *pcpu = pcpu_devices + cpu;
923b5e438ebSSven Schnelle int rc;
9241da177e4SLinus Torvalds
9258b646bd7SMartin Schwidefsky if (pcpu->state != CPU_STATE_CONFIGURED)
92608d07968SHeiko Carstens return -EIO;
927b5e438ebSSven Schnelle if (pcpu_sigp_retry(pcpu, SIGP_INITIAL_CPU_RESET, 0) !=
928a9ae32c3SHeiko Carstens SIGP_CC_ORDER_CODE_ACCEPTED)
9298b646bd7SMartin Schwidefsky return -EIO;
930e80e7813SThomas Gleixner
9318b646bd7SMartin Schwidefsky rc = pcpu_alloc_lowcore(pcpu, cpu);
9328b646bd7SMartin Schwidefsky if (rc)
9338b646bd7SMartin Schwidefsky return rc;
934e1b9c274SHeiko Carstens /*
935e1b9c274SHeiko Carstens * Make sure global control register contents do not change
936e1b9c274SHeiko Carstens * until new CPU has initialized control registers.
937e1b9c274SHeiko Carstens */
938e1b9c274SHeiko Carstens spin_lock(&ctl_lock);
9398b646bd7SMartin Schwidefsky pcpu_prepare_secondary(pcpu, cpu);
940e80e7813SThomas Gleixner pcpu_attach_task(pcpu, tidle);
9418b646bd7SMartin Schwidefsky pcpu_start_fn(pcpu, smp_start_secondary, NULL);
942a1307bbaSHeiko Carstens /* Wait until cpu puts itself in the online & active maps */
943e9d867a6SPeter Zijlstra (Intel) while (!cpu_online(cpu))
9441da177e4SLinus Torvalds cpu_relax();
945e1b9c274SHeiko Carstens spin_unlock(&ctl_lock);
9461da177e4SLinus Torvalds return 0;
9471da177e4SLinus Torvalds }
9481da177e4SLinus Torvalds
949d80512f8SHeiko Carstens static unsigned int setup_possible_cpus __initdata;
95048483b32SHeiko Carstens
_setup_possible_cpus(char * s)951d80512f8SHeiko Carstens static int __init _setup_possible_cpus(char *s)
952d80512f8SHeiko Carstens {
953d80512f8SHeiko Carstens get_option(&s, &setup_possible_cpus);
95437a33026SHeiko Carstens return 0;
95537a33026SHeiko Carstens }
956d80512f8SHeiko Carstens early_param("possible_cpus", _setup_possible_cpus);
95737a33026SHeiko Carstens
__cpu_disable(void)95839ce010dSHeiko Carstens int __cpu_disable(void)
9591da177e4SLinus Torvalds {
9608b646bd7SMartin Schwidefsky unsigned long cregs[16];
961a052096bSSven Schnelle int cpu;
9621da177e4SLinus Torvalds
9639acf73b7SHeiko Carstens /* Handle possible pending IPIs */
9649acf73b7SHeiko Carstens smp_handle_ext_call();
965a052096bSSven Schnelle cpu = smp_processor_id();
966a052096bSSven Schnelle set_cpu_online(cpu, false);
967a052096bSSven Schnelle cpumask_clear_cpu(cpu, &cpu_setup_mask);
96852aeda7aSAlexander Gordeev update_cpu_masks();
9698b646bd7SMartin Schwidefsky /* Disable pseudo page faults on this cpu. */
9701da177e4SLinus Torvalds pfault_fini();
9718b646bd7SMartin Schwidefsky /* Disable interrupt sources via control register. */
9728b646bd7SMartin Schwidefsky __ctl_store(cregs, 0, 15);
9738b646bd7SMartin Schwidefsky cregs[0] &= ~0x0000ee70UL; /* disable all external interrupts */
9748b646bd7SMartin Schwidefsky cregs[6] &= ~0xff000000UL; /* disable all I/O interrupts */
9758b646bd7SMartin Schwidefsky cregs[14] &= ~0x1f000000UL; /* disable most machine checks */
9768b646bd7SMartin Schwidefsky __ctl_load(cregs, 0, 15);
977fe0f4976SMartin Schwidefsky clear_cpu_flag(CIF_NOHZ_DELAY);
9781da177e4SLinus Torvalds return 0;
9791da177e4SLinus Torvalds }
9801da177e4SLinus Torvalds
__cpu_die(unsigned int cpu)98139ce010dSHeiko Carstens void __cpu_die(unsigned int cpu)
9821da177e4SLinus Torvalds {
9838b646bd7SMartin Schwidefsky struct pcpu *pcpu;
9848b646bd7SMartin Schwidefsky
9851da177e4SLinus Torvalds /* Wait until target cpu is down */
9868b646bd7SMartin Schwidefsky pcpu = pcpu_devices + cpu;
9878b646bd7SMartin Schwidefsky while (!pcpu_stopped(pcpu))
9881da177e4SLinus Torvalds cpu_relax();
9898b646bd7SMartin Schwidefsky pcpu_free_lowcore(pcpu);
9901b948d6cSMartin Schwidefsky cpumask_clear_cpu(cpu, mm_cpumask(&init_mm));
9911b948d6cSMartin Schwidefsky cpumask_clear_cpu(cpu, &init_mm.context.cpu_attach_mask);
9921da177e4SLinus Torvalds }
9931da177e4SLinus Torvalds
cpu_die(void)994b456d94aSHeiko Carstens void __noreturn cpu_die(void)
9951da177e4SLinus Torvalds {
9961da177e4SLinus Torvalds idle_task_exit();
997a9ae32c3SHeiko Carstens pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0);
9981da177e4SLinus Torvalds for (;;) ;
9991da177e4SLinus Torvalds }
10001da177e4SLinus Torvalds
smp_fill_possible_mask(void)1001d80512f8SHeiko Carstens void __init smp_fill_possible_mask(void)
1002d80512f8SHeiko Carstens {
10039747bc47SDavid Hildenbrand unsigned int possible, sclp_max, cpu;
1004d80512f8SHeiko Carstens
10053a9f3fe6SDavid Hildenbrand sclp_max = max(sclp.mtid, sclp.mtid_cp) + 1;
10063a9f3fe6SDavid Hildenbrand sclp_max = min(smp_max_threads, sclp_max);
100761282affSDan Carpenter sclp_max = (sclp.max_cores * sclp_max) ?: nr_cpu_ids;
1008cf813db0SHeiko Carstens possible = setup_possible_cpus ?: nr_cpu_ids;
10099747bc47SDavid Hildenbrand possible = min(possible, sclp_max);
1010d80512f8SHeiko Carstens for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++)
1011d80512f8SHeiko Carstens set_cpu_possible(cpu, true);
1012d80512f8SHeiko Carstens }
1013d80512f8SHeiko Carstens
smp_prepare_cpus(unsigned int max_cpus)10141da177e4SLinus Torvalds void __init smp_prepare_cpus(unsigned int max_cpus)
10151da177e4SLinus Torvalds {
10161dad093bSThomas Huth if (register_external_irq(EXT_IRQ_EMERGENCY_SIG, do_ext_call_interrupt))
101799b2d8dfSHeiko Carstens panic("Couldn't request external interrupt 0x1201");
1018*8653d7bdSHeiko Carstens ctl_set_bit(0, 14);
10191dad093bSThomas Huth if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt))
1020d98e19ccSMartin Schwidefsky panic("Couldn't request external interrupt 0x1202");
1021*8653d7bdSHeiko Carstens ctl_set_bit(0, 13);
10221da177e4SLinus Torvalds }
10231da177e4SLinus Torvalds
smp_prepare_boot_cpu(void)1024ea1f4eecSHeiko Carstens void __init smp_prepare_boot_cpu(void)
10251da177e4SLinus Torvalds {
10268b646bd7SMartin Schwidefsky struct pcpu *pcpu = pcpu_devices;
10271da177e4SLinus Torvalds
10280861b5a7SHeiko Carstens WARN_ON(!cpu_present(0) || !cpu_online(0));
10298b646bd7SMartin Schwidefsky pcpu->state = CPU_STATE_CONFIGURED;
10308b646bd7SMartin Schwidefsky S390_lowcore.percpu_offset = __per_cpu_offset[0];
103150ab9a9aSHeiko Carstens smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
10321da177e4SLinus Torvalds }
10331da177e4SLinus Torvalds
smp_setup_processor_id(void)103402beacccSHeiko Carstens void __init smp_setup_processor_id(void)
103502beacccSHeiko Carstens {
10360861b5a7SHeiko Carstens pcpu_devices[0].address = stap();
103702beacccSHeiko Carstens S390_lowcore.cpu_nr = 0;
10386c8cd5bbSPhilipp Hachtmann S390_lowcore.spinlock_lockval = arch_spin_lockval(0);
1039b96f7d88SMartin Schwidefsky S390_lowcore.spinlock_index = 0;
104002beacccSHeiko Carstens }
104102beacccSHeiko Carstens
10421da177e4SLinus Torvalds /*
10431da177e4SLinus Torvalds * the frequency of the profiling timer can be changed
10441da177e4SLinus Torvalds * by writing a multiplier value into /proc/profile.
10451da177e4SLinus Torvalds *
10461da177e4SLinus Torvalds * usually you want to run this on all CPUs ;)
10471da177e4SLinus Torvalds */
setup_profiling_timer(unsigned int multiplier)10481da177e4SLinus Torvalds int setup_profiling_timer(unsigned int multiplier)
10491da177e4SLinus Torvalds {
10501da177e4SLinus Torvalds return 0;
10511da177e4SLinus Torvalds }
10521da177e4SLinus Torvalds
cpu_configure_show(struct device * dev,struct device_attribute * attr,char * buf)10538a25a2fdSKay Sievers static ssize_t cpu_configure_show(struct device *dev,
10548a25a2fdSKay Sievers struct device_attribute *attr, char *buf)
105508d07968SHeiko Carstens {
105608d07968SHeiko Carstens ssize_t count;
105708d07968SHeiko Carstens
105808d07968SHeiko Carstens mutex_lock(&smp_cpu_state_mutex);
10598b646bd7SMartin Schwidefsky count = sprintf(buf, "%d\n", pcpu_devices[dev->id].state);
106008d07968SHeiko Carstens mutex_unlock(&smp_cpu_state_mutex);
106108d07968SHeiko Carstens return count;
106208d07968SHeiko Carstens }
106308d07968SHeiko Carstens
cpu_configure_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)10648a25a2fdSKay Sievers static ssize_t cpu_configure_store(struct device *dev,
10658a25a2fdSKay Sievers struct device_attribute *attr,
10664a0b2b4dSAndi Kleen const char *buf, size_t count)
106708d07968SHeiko Carstens {
10688b646bd7SMartin Schwidefsky struct pcpu *pcpu;
106910ad34bcSMartin Schwidefsky int cpu, val, rc, i;
107008d07968SHeiko Carstens char delim;
107108d07968SHeiko Carstens
107208d07968SHeiko Carstens if (sscanf(buf, "%d %c", &val, &delim) != 1)
107308d07968SHeiko Carstens return -EINVAL;
107408d07968SHeiko Carstens if (val != 0 && val != 1)
107508d07968SHeiko Carstens return -EINVAL;
1076a73de293SSebastian Andrzej Siewior cpus_read_lock();
10770b18d318SHeiko Carstens mutex_lock(&smp_cpu_state_mutex);
107808d07968SHeiko Carstens rc = -EBUSY;
10792c2df118SHeiko Carstens /* disallow configuration changes of online cpus and cpu 0 */
10808b646bd7SMartin Schwidefsky cpu = dev->id;
10815423145fSHeiko Carstens cpu = smp_get_base_cpu(cpu);
108210ad34bcSMartin Schwidefsky if (cpu == 0)
108310ad34bcSMartin Schwidefsky goto out;
108410ad34bcSMartin Schwidefsky for (i = 0; i <= smp_cpu_mtid; i++)
108510ad34bcSMartin Schwidefsky if (cpu_online(cpu + i))
108608d07968SHeiko Carstens goto out;
10878b646bd7SMartin Schwidefsky pcpu = pcpu_devices + cpu;
108808d07968SHeiko Carstens rc = 0;
108908d07968SHeiko Carstens switch (val) {
109008d07968SHeiko Carstens case 0:
10918b646bd7SMartin Schwidefsky if (pcpu->state != CPU_STATE_CONFIGURED)
10928b646bd7SMartin Schwidefsky break;
1093d08d9430SMartin Schwidefsky rc = sclp_core_deconfigure(pcpu->address >> smp_cpu_mt_shift);
10948b646bd7SMartin Schwidefsky if (rc)
10958b646bd7SMartin Schwidefsky break;
109610ad34bcSMartin Schwidefsky for (i = 0; i <= smp_cpu_mtid; i++) {
109710ad34bcSMartin Schwidefsky if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
109810ad34bcSMartin Schwidefsky continue;
109910ad34bcSMartin Schwidefsky pcpu[i].state = CPU_STATE_STANDBY;
110010ad34bcSMartin Schwidefsky smp_cpu_set_polarization(cpu + i,
110110ad34bcSMartin Schwidefsky POLARIZATION_UNKNOWN);
110210ad34bcSMartin Schwidefsky }
1103d68bddb7SHeiko Carstens topology_expect_change();
110408d07968SHeiko Carstens break;
110508d07968SHeiko Carstens case 1:
11068b646bd7SMartin Schwidefsky if (pcpu->state != CPU_STATE_STANDBY)
11078b646bd7SMartin Schwidefsky break;
1108d08d9430SMartin Schwidefsky rc = sclp_core_configure(pcpu->address >> smp_cpu_mt_shift);
11098b646bd7SMartin Schwidefsky if (rc)
11108b646bd7SMartin Schwidefsky break;
111110ad34bcSMartin Schwidefsky for (i = 0; i <= smp_cpu_mtid; i++) {
111210ad34bcSMartin Schwidefsky if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
111310ad34bcSMartin Schwidefsky continue;
111410ad34bcSMartin Schwidefsky pcpu[i].state = CPU_STATE_CONFIGURED;
111510ad34bcSMartin Schwidefsky smp_cpu_set_polarization(cpu + i,
111610ad34bcSMartin Schwidefsky POLARIZATION_UNKNOWN);
111710ad34bcSMartin Schwidefsky }
1118d68bddb7SHeiko Carstens topology_expect_change();
111908d07968SHeiko Carstens break;
112008d07968SHeiko Carstens default:
112108d07968SHeiko Carstens break;
112208d07968SHeiko Carstens }
112308d07968SHeiko Carstens out:
112408d07968SHeiko Carstens mutex_unlock(&smp_cpu_state_mutex);
1125a73de293SSebastian Andrzej Siewior cpus_read_unlock();
112608d07968SHeiko Carstens return rc ? rc : count;
112708d07968SHeiko Carstens }
11288a25a2fdSKay Sievers static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
112908d07968SHeiko Carstens
show_cpu_address(struct device * dev,struct device_attribute * attr,char * buf)11308a25a2fdSKay Sievers static ssize_t show_cpu_address(struct device *dev,
11318a25a2fdSKay Sievers struct device_attribute *attr, char *buf)
113208d07968SHeiko Carstens {
11338b646bd7SMartin Schwidefsky return sprintf(buf, "%d\n", pcpu_devices[dev->id].address);
113408d07968SHeiko Carstens }
11358a25a2fdSKay Sievers static DEVICE_ATTR(address, 0444, show_cpu_address, NULL);
113608d07968SHeiko Carstens
113708d07968SHeiko Carstens static struct attribute *cpu_common_attrs[] = {
11388a25a2fdSKay Sievers &dev_attr_configure.attr,
11398a25a2fdSKay Sievers &dev_attr_address.attr,
114008d07968SHeiko Carstens NULL,
114108d07968SHeiko Carstens };
114208d07968SHeiko Carstens
114308d07968SHeiko Carstens static struct attribute_group cpu_common_attr_group = {
114408d07968SHeiko Carstens .attrs = cpu_common_attrs,
114508d07968SHeiko Carstens };
11461da177e4SLinus Torvalds
114708d07968SHeiko Carstens static struct attribute *cpu_online_attrs[] = {
11488a25a2fdSKay Sievers &dev_attr_idle_count.attr,
11498a25a2fdSKay Sievers &dev_attr_idle_time_us.attr,
1150fae8b22dSHeiko Carstens NULL,
1151fae8b22dSHeiko Carstens };
1152fae8b22dSHeiko Carstens
115308d07968SHeiko Carstens static struct attribute_group cpu_online_attr_group = {
115408d07968SHeiko Carstens .attrs = cpu_online_attrs,
1155fae8b22dSHeiko Carstens };
1156fae8b22dSHeiko Carstens
smp_cpu_online(unsigned int cpu)1157dfbbd86aSSebastian Andrzej Siewior static int smp_cpu_online(unsigned int cpu)
11582fc2d1e9SHeiko Carstens {
11592f859d0dSHeiko Carstens struct device *s = &per_cpu(cpu_device, cpu)->dev;
11602fc2d1e9SHeiko Carstens
1161dfbbd86aSSebastian Andrzej Siewior return sysfs_create_group(&s->kobj, &cpu_online_attr_group);
11622fc2d1e9SHeiko Carstens }
11638e1398f8SHeiko Carstens
smp_cpu_pre_down(unsigned int cpu)1164dfbbd86aSSebastian Andrzej Siewior static int smp_cpu_pre_down(unsigned int cpu)
1165dfbbd86aSSebastian Andrzej Siewior {
1166dfbbd86aSSebastian Andrzej Siewior struct device *s = &per_cpu(cpu_device, cpu)->dev;
1167dfbbd86aSSebastian Andrzej Siewior
1168dfbbd86aSSebastian Andrzej Siewior sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
1169dfbbd86aSSebastian Andrzej Siewior return 0;
11702fc2d1e9SHeiko Carstens }
11712fc2d1e9SHeiko Carstens
smp_add_present_cpu(int cpu)1172e2741f17SPaul Gortmaker static int smp_add_present_cpu(int cpu)
117308d07968SHeiko Carstens {
117496619fc1SHeiko Carstens struct device *s;
117596619fc1SHeiko Carstens struct cpu *c;
117608d07968SHeiko Carstens int rc;
117708d07968SHeiko Carstens
117896619fc1SHeiko Carstens c = kzalloc(sizeof(*c), GFP_KERNEL);
117996619fc1SHeiko Carstens if (!c)
118096619fc1SHeiko Carstens return -ENOMEM;
11812f859d0dSHeiko Carstens per_cpu(cpu_device, cpu) = c;
118296619fc1SHeiko Carstens s = &c->dev;
118308d07968SHeiko Carstens c->hotpluggable = 1;
118408d07968SHeiko Carstens rc = register_cpu(c, cpu);
118508d07968SHeiko Carstens if (rc)
118608d07968SHeiko Carstens goto out;
118708d07968SHeiko Carstens rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
118808d07968SHeiko Carstens if (rc)
118908d07968SHeiko Carstens goto out_cpu;
119083a24e32SHeiko Carstens rc = topology_cpu_init(c);
119183a24e32SHeiko Carstens if (rc)
119283a24e32SHeiko Carstens goto out_topology;
119308d07968SHeiko Carstens return 0;
119483a24e32SHeiko Carstens
119583a24e32SHeiko Carstens out_topology:
119608d07968SHeiko Carstens sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
119708d07968SHeiko Carstens out_cpu:
119808d07968SHeiko Carstens unregister_cpu(c);
119908d07968SHeiko Carstens out:
120008d07968SHeiko Carstens return rc;
120108d07968SHeiko Carstens }
120208d07968SHeiko Carstens
smp_rescan_cpus(void)120367060d9cSHeiko Carstens int __ref smp_rescan_cpus(void)
120408d07968SHeiko Carstens {
1205d08d9430SMartin Schwidefsky struct sclp_core_info *info;
12068b646bd7SMartin Schwidefsky int nr;
120708d07968SHeiko Carstens
1208af51160eSHeiko Carstens info = kzalloc(sizeof(*info), GFP_KERNEL);
12098b646bd7SMartin Schwidefsky if (!info)
12108b646bd7SMartin Schwidefsky return -ENOMEM;
1211af51160eSHeiko Carstens smp_get_core_info(info, 0);
121272a81ad9SHeiko Carstens nr = __smp_rescan_cpus(info, false);
12138b646bd7SMartin Schwidefsky kfree(info);
12148b646bd7SMartin Schwidefsky if (nr)
1215c10fde0dSHeiko Carstens topology_schedule_update();
12168b646bd7SMartin Schwidefsky return 0;
12171e489518SHeiko Carstens }
12181e489518SHeiko Carstens
rescan_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)12198a25a2fdSKay Sievers static ssize_t __ref rescan_store(struct device *dev,
12208a25a2fdSKay Sievers struct device_attribute *attr,
1221c9be0a36SAndi Kleen const char *buf,
12221e489518SHeiko Carstens size_t count)
12231e489518SHeiko Carstens {
12241e489518SHeiko Carstens int rc;
12251e489518SHeiko Carstens
1226b7cb707cSGerald Schaefer rc = lock_device_hotplug_sysfs();
1227b7cb707cSGerald Schaefer if (rc)
1228b7cb707cSGerald Schaefer return rc;
12291e489518SHeiko Carstens rc = smp_rescan_cpus();
1230b7cb707cSGerald Schaefer unlock_device_hotplug();
123108d07968SHeiko Carstens return rc ? rc : count;
123208d07968SHeiko Carstens }
12336cbaefb4SJoe Perches static DEVICE_ATTR_WO(rescan);
123408d07968SHeiko Carstens
s390_smp_init(void)123583a24e32SHeiko Carstens static int __init s390_smp_init(void)
1236c10fde0dSHeiko Carstens {
123731e7c4ccSGreg Kroah-Hartman struct device *dev_root;
1238f4edbcd5SSrivatsa S. Bhat int cpu, rc = 0;
12392fc2d1e9SHeiko Carstens
124031e7c4ccSGreg Kroah-Hartman dev_root = bus_get_dev_root(&cpu_subsys);
124131e7c4ccSGreg Kroah-Hartman if (dev_root) {
124231e7c4ccSGreg Kroah-Hartman rc = device_create_file(dev_root, &dev_attr_rescan);
124331e7c4ccSGreg Kroah-Hartman put_device(dev_root);
124408d07968SHeiko Carstens if (rc)
124508d07968SHeiko Carstens return rc;
124631e7c4ccSGreg Kroah-Hartman }
124731e7c4ccSGreg Kroah-Hartman
124808d07968SHeiko Carstens for_each_present_cpu(cpu) {
124908d07968SHeiko Carstens rc = smp_add_present_cpu(cpu);
1250fae8b22dSHeiko Carstens if (rc)
1251f4edbcd5SSrivatsa S. Bhat goto out;
12521da177e4SLinus Torvalds }
1253f4edbcd5SSrivatsa S. Bhat
1254dfbbd86aSSebastian Andrzej Siewior rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "s390/smp:online",
1255dfbbd86aSSebastian Andrzej Siewior smp_cpu_online, smp_cpu_pre_down);
1256e1108e8fSHeiko Carstens rc = rc <= 0 ? rc : 0;
1257f4edbcd5SSrivatsa S. Bhat out:
1258f4edbcd5SSrivatsa S. Bhat return rc;
12591da177e4SLinus Torvalds }
126083a24e32SHeiko Carstens subsys_initcall(s390_smp_init);
126157892847SAlexander Gordeev
set_new_lowcore(struct lowcore * lc)126257892847SAlexander Gordeev static __always_inline void set_new_lowcore(struct lowcore *lc)
126357892847SAlexander Gordeev {
1264ddd38fd2SHeiko Carstens union register_pair dst, src;
126557892847SAlexander Gordeev u32 pfx;
1266ddd38fd2SHeiko Carstens
1267ddd38fd2SHeiko Carstens src.even = (unsigned long) &S390_lowcore;
1268ddd38fd2SHeiko Carstens src.odd = sizeof(S390_lowcore);
1269ddd38fd2SHeiko Carstens dst.even = (unsigned long) lc;
1270ddd38fd2SHeiko Carstens dst.odd = sizeof(*lc);
12714851d226SAlexander Gordeev pfx = __pa(lc);
127257892847SAlexander Gordeev
127357892847SAlexander Gordeev asm volatile(
1274ddd38fd2SHeiko Carstens " mvcl %[dst],%[src]\n"
127557892847SAlexander Gordeev " spx %[pfx]\n"
1276ddd38fd2SHeiko Carstens : [dst] "+&d" (dst.pair), [src] "+&d" (src.pair)
1277ddd38fd2SHeiko Carstens : [pfx] "Q" (pfx)
1278ddd38fd2SHeiko Carstens : "memory", "cc");
127957892847SAlexander Gordeev }
128057892847SAlexander Gordeev
smp_reinit_ipl_cpu(void)12816cbd7cc2SAlexander Gordeev int __init smp_reinit_ipl_cpu(void)
128257892847SAlexander Gordeev {
128357892847SAlexander Gordeev unsigned long async_stack, nodat_stack, mcck_stack;
128457892847SAlexander Gordeev struct lowcore *lc, *lc_ipl;
1285c7ed509bSAlexander Gordeev unsigned long flags, cr0;
1286c7ed509bSAlexander Gordeev u64 mcesad;
128757892847SAlexander Gordeev
128857892847SAlexander Gordeev lc_ipl = lowcore_ptr[0];
128957892847SAlexander Gordeev lc = (struct lowcore *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
129057892847SAlexander Gordeev nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
129157892847SAlexander Gordeev async_stack = stack_alloc();
129257892847SAlexander Gordeev mcck_stack = stack_alloc();
1293c7ed509bSAlexander Gordeev if (!lc || !nodat_stack || !async_stack || !mcck_stack || nmi_alloc_mcesa(&mcesad))
129457892847SAlexander Gordeev panic("Couldn't allocate memory");
129557892847SAlexander Gordeev
129657892847SAlexander Gordeev local_irq_save(flags);
129757892847SAlexander Gordeev local_mcck_disable();
129857892847SAlexander Gordeev set_new_lowcore(lc);
129957892847SAlexander Gordeev S390_lowcore.nodat_stack = nodat_stack + STACK_INIT_OFFSET;
130057892847SAlexander Gordeev S390_lowcore.async_stack = async_stack + STACK_INIT_OFFSET;
130157892847SAlexander Gordeev S390_lowcore.mcck_stack = mcck_stack + STACK_INIT_OFFSET;
1302c7ed509bSAlexander Gordeev __ctl_store(cr0, 0, 0);
1303c7ed509bSAlexander Gordeev __ctl_clear_bit(0, 28); /* disable lowcore protection */
1304c7ed509bSAlexander Gordeev S390_lowcore.mcesad = mcesad;
1305c7ed509bSAlexander Gordeev __ctl_load(cr0, 0, 0);
13064df29d2bSAlexander Gordeev if (abs_lowcore_map(0, lc, false))
13074df29d2bSAlexander Gordeev panic("Couldn't remap absolute lowcore");
130857892847SAlexander Gordeev lowcore_ptr[0] = lc;
130957892847SAlexander Gordeev local_mcck_enable();
131057892847SAlexander Gordeev local_irq_restore(flags);
131157892847SAlexander Gordeev
13124851d226SAlexander Gordeev memblock_free_late(__pa(lc_ipl->mcck_stack - STACK_INIT_OFFSET), THREAD_SIZE);
131360afa6d1SHeiko Carstens memblock_free_late(__pa(lc_ipl->async_stack - STACK_INIT_OFFSET), THREAD_SIZE);
1314944c7837SHeiko Carstens memblock_free_late(__pa(lc_ipl->nodat_stack - STACK_INIT_OFFSET), THREAD_SIZE);
13154851d226SAlexander Gordeev memblock_free_late(__pa(lc_ipl), sizeof(*lc_ipl));
131657892847SAlexander Gordeev return 0;
131757892847SAlexander Gordeev }
1318