xref: /openbmc/linux/arch/s390/kernel/smp.c (revision e368cd72)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  SMP related functions
4  *
5  *    Copyright IBM Corp. 1999, 2012
6  *    Author(s): Denis Joseph Barrow,
7  *		 Martin Schwidefsky <schwidefsky@de.ibm.com>,
8  *		 Heiko Carstens <heiko.carstens@de.ibm.com>,
9  *
10  *  based on other smp stuff by
11  *    (c) 1995 Alan Cox, CymruNET Ltd  <alan@cymru.net>
12  *    (c) 1998 Ingo Molnar
13  *
14  * The code outside of smp.c uses logical cpu numbers, only smp.c does
15  * the translation of logical to physical cpu ids. All new code that
16  * operates on physical cpu numbers needs to go into smp.c.
17  */
18 
19 #define KMSG_COMPONENT "cpu"
20 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
21 
22 #include <linux/workqueue.h>
23 #include <linux/memblock.h>
24 #include <linux/export.h>
25 #include <linux/init.h>
26 #include <linux/mm.h>
27 #include <linux/err.h>
28 #include <linux/spinlock.h>
29 #include <linux/kernel_stat.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/irqflags.h>
33 #include <linux/irq_work.h>
34 #include <linux/cpu.h>
35 #include <linux/slab.h>
36 #include <linux/sched/hotplug.h>
37 #include <linux/sched/task_stack.h>
38 #include <linux/crash_dump.h>
39 #include <linux/kprobes.h>
40 #include <asm/asm-offsets.h>
41 #include <asm/diag.h>
42 #include <asm/switch_to.h>
43 #include <asm/facility.h>
44 #include <asm/ipl.h>
45 #include <asm/setup.h>
46 #include <asm/irq.h>
47 #include <asm/tlbflush.h>
48 #include <asm/vtimer.h>
49 #include <asm/lowcore.h>
50 #include <asm/sclp.h>
51 #include <asm/debug.h>
52 #include <asm/os_info.h>
53 #include <asm/sigp.h>
54 #include <asm/idle.h>
55 #include <asm/nmi.h>
56 #include <asm/stacktrace.h>
57 #include <asm/topology.h>
58 #include <asm/vdso.h>
59 #include "entry.h"
60 
61 enum {
62 	ec_schedule = 0,
63 	ec_call_function_single,
64 	ec_stop_cpu,
65 	ec_mcck_pending,
66 	ec_irq_work,
67 };
68 
69 enum {
70 	CPU_STATE_STANDBY,
71 	CPU_STATE_CONFIGURED,
72 };
73 
74 static DEFINE_PER_CPU(struct cpu *, cpu_device);
75 
76 struct pcpu {
77 	unsigned long ec_mask;		/* bit mask for ec_xxx functions */
78 	unsigned long ec_clk;		/* sigp timestamp for ec_xxx */
79 	signed char state;		/* physical cpu state */
80 	signed char polarization;	/* physical polarization */
81 	u16 address;			/* physical cpu address */
82 };
83 
84 static u8 boot_core_type;
85 static struct pcpu pcpu_devices[NR_CPUS];
86 
87 unsigned int smp_cpu_mt_shift;
88 EXPORT_SYMBOL(smp_cpu_mt_shift);
89 
90 unsigned int smp_cpu_mtid;
91 EXPORT_SYMBOL(smp_cpu_mtid);
92 
93 #ifdef CONFIG_CRASH_DUMP
94 __vector128 __initdata boot_cpu_vector_save_area[__NUM_VXRS];
95 #endif
96 
97 static unsigned int smp_max_threads __initdata = -1U;
98 
99 static int __init early_nosmt(char *s)
100 {
101 	smp_max_threads = 1;
102 	return 0;
103 }
104 early_param("nosmt", early_nosmt);
105 
106 static int __init early_smt(char *s)
107 {
108 	get_option(&s, &smp_max_threads);
109 	return 0;
110 }
111 early_param("smt", early_smt);
112 
113 /*
114  * The smp_cpu_state_mutex must be held when changing the state or polarization
115  * member of a pcpu data structure within the pcpu_devices arreay.
116  */
117 DEFINE_MUTEX(smp_cpu_state_mutex);
118 
119 /*
120  * Signal processor helper functions.
121  */
122 static inline int __pcpu_sigp_relax(u16 addr, u8 order, unsigned long parm)
123 {
124 	int cc;
125 
126 	while (1) {
127 		cc = __pcpu_sigp(addr, order, parm, NULL);
128 		if (cc != SIGP_CC_BUSY)
129 			return cc;
130 		cpu_relax();
131 	}
132 }
133 
134 static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm)
135 {
136 	int cc, retry;
137 
138 	for (retry = 0; ; retry++) {
139 		cc = __pcpu_sigp(pcpu->address, order, parm, NULL);
140 		if (cc != SIGP_CC_BUSY)
141 			break;
142 		if (retry >= 3)
143 			udelay(10);
144 	}
145 	return cc;
146 }
147 
148 static inline int pcpu_stopped(struct pcpu *pcpu)
149 {
150 	u32 status;
151 
152 	if (__pcpu_sigp(pcpu->address, SIGP_SENSE,
153 			0, &status) != SIGP_CC_STATUS_STORED)
154 		return 0;
155 	return !!(status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED));
156 }
157 
158 static inline int pcpu_running(struct pcpu *pcpu)
159 {
160 	if (__pcpu_sigp(pcpu->address, SIGP_SENSE_RUNNING,
161 			0, NULL) != SIGP_CC_STATUS_STORED)
162 		return 1;
163 	/* Status stored condition code is equivalent to cpu not running. */
164 	return 0;
165 }
166 
167 /*
168  * Find struct pcpu by cpu address.
169  */
170 static struct pcpu *pcpu_find_address(const struct cpumask *mask, u16 address)
171 {
172 	int cpu;
173 
174 	for_each_cpu(cpu, mask)
175 		if (pcpu_devices[cpu].address == address)
176 			return pcpu_devices + cpu;
177 	return NULL;
178 }
179 
180 static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
181 {
182 	int order;
183 
184 	if (test_and_set_bit(ec_bit, &pcpu->ec_mask))
185 		return;
186 	order = pcpu_running(pcpu) ? SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL;
187 	pcpu->ec_clk = get_tod_clock_fast();
188 	pcpu_sigp_retry(pcpu, order, 0);
189 }
190 
191 static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
192 {
193 	unsigned long async_stack, nodat_stack, mcck_stack;
194 	struct lowcore *lc;
195 
196 	lc = (struct lowcore *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
197 	nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
198 	async_stack = stack_alloc();
199 	mcck_stack = stack_alloc();
200 	if (!lc || !nodat_stack || !async_stack || !mcck_stack)
201 		goto out;
202 	memcpy(lc, &S390_lowcore, 512);
203 	memset((char *) lc + 512, 0, sizeof(*lc) - 512);
204 	lc->async_stack = async_stack + STACK_INIT_OFFSET;
205 	lc->nodat_stack = nodat_stack + STACK_INIT_OFFSET;
206 	lc->mcck_stack = mcck_stack + STACK_INIT_OFFSET;
207 	lc->cpu_nr = cpu;
208 	lc->spinlock_lockval = arch_spin_lockval(cpu);
209 	lc->spinlock_index = 0;
210 	lc->br_r1_trampoline = 0x07f1;	/* br %r1 */
211 	lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
212 	lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
213 	lc->preempt_count = PREEMPT_DISABLED;
214 	if (nmi_alloc_per_cpu(lc))
215 		goto out;
216 	lowcore_ptr[cpu] = lc;
217 	pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc);
218 	return 0;
219 
220 out:
221 	stack_free(mcck_stack);
222 	stack_free(async_stack);
223 	free_pages(nodat_stack, THREAD_SIZE_ORDER);
224 	free_pages((unsigned long) lc, LC_ORDER);
225 	return -ENOMEM;
226 }
227 
228 static void pcpu_free_lowcore(struct pcpu *pcpu)
229 {
230 	unsigned long async_stack, nodat_stack, mcck_stack;
231 	struct lowcore *lc;
232 	int cpu;
233 
234 	cpu = pcpu - pcpu_devices;
235 	lc = lowcore_ptr[cpu];
236 	nodat_stack = lc->nodat_stack - STACK_INIT_OFFSET;
237 	async_stack = lc->async_stack - STACK_INIT_OFFSET;
238 	mcck_stack = lc->mcck_stack - STACK_INIT_OFFSET;
239 	pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0);
240 	lowcore_ptr[cpu] = NULL;
241 	nmi_free_per_cpu(lc);
242 	stack_free(async_stack);
243 	stack_free(mcck_stack);
244 	free_pages(nodat_stack, THREAD_SIZE_ORDER);
245 	free_pages((unsigned long) lc, LC_ORDER);
246 }
247 
248 static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
249 {
250 	struct lowcore *lc = lowcore_ptr[cpu];
251 
252 	cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask);
253 	cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
254 	lc->cpu_nr = cpu;
255 	lc->restart_flags = RESTART_FLAG_CTLREGS;
256 	lc->spinlock_lockval = arch_spin_lockval(cpu);
257 	lc->spinlock_index = 0;
258 	lc->percpu_offset = __per_cpu_offset[cpu];
259 	lc->kernel_asce = S390_lowcore.kernel_asce;
260 	lc->user_asce = s390_invalid_asce;
261 	lc->machine_flags = S390_lowcore.machine_flags;
262 	lc->user_timer = lc->system_timer =
263 		lc->steal_timer = lc->avg_steal_timer = 0;
264 	__ctl_store(lc->cregs_save_area, 0, 15);
265 	lc->cregs_save_area[1] = lc->kernel_asce;
266 	lc->cregs_save_area[7] = lc->user_asce;
267 	save_access_regs((unsigned int *) lc->access_regs_save_area);
268 	arch_spin_lock_setup(cpu);
269 }
270 
271 static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
272 {
273 	struct lowcore *lc;
274 	int cpu;
275 
276 	cpu = pcpu - pcpu_devices;
277 	lc = lowcore_ptr[cpu];
278 	lc->kernel_stack = (unsigned long) task_stack_page(tsk)
279 		+ THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
280 	lc->current_task = (unsigned long) tsk;
281 	lc->lpp = LPP_MAGIC;
282 	lc->current_pid = tsk->pid;
283 	lc->user_timer = tsk->thread.user_timer;
284 	lc->guest_timer = tsk->thread.guest_timer;
285 	lc->system_timer = tsk->thread.system_timer;
286 	lc->hardirq_timer = tsk->thread.hardirq_timer;
287 	lc->softirq_timer = tsk->thread.softirq_timer;
288 	lc->steal_timer = 0;
289 }
290 
291 static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
292 {
293 	struct lowcore *lc;
294 	int cpu;
295 
296 	cpu = pcpu - pcpu_devices;
297 	lc = lowcore_ptr[cpu];
298 	lc->restart_stack = lc->kernel_stack;
299 	lc->restart_fn = (unsigned long) func;
300 	lc->restart_data = (unsigned long) data;
301 	lc->restart_source = -1U;
302 	pcpu_sigp_retry(pcpu, SIGP_RESTART, 0);
303 }
304 
305 typedef void (pcpu_delegate_fn)(void *);
306 
307 /*
308  * Call function via PSW restart on pcpu and stop the current cpu.
309  */
310 static void __pcpu_delegate(pcpu_delegate_fn *func, void *data)
311 {
312 	func(data);	/* should not return */
313 }
314 
315 static void pcpu_delegate(struct pcpu *pcpu,
316 			  pcpu_delegate_fn *func,
317 			  void *data, unsigned long stack)
318 {
319 	struct lowcore *lc = lowcore_ptr[pcpu - pcpu_devices];
320 	unsigned int source_cpu = stap();
321 
322 	__load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
323 	if (pcpu->address == source_cpu) {
324 		call_on_stack(2, stack, void, __pcpu_delegate,
325 			      pcpu_delegate_fn *, func, void *, data);
326 	}
327 	/* Stop target cpu (if func returns this stops the current cpu). */
328 	pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
329 	/* Restart func on the target cpu and stop the current cpu. */
330 	mem_assign_absolute(lc->restart_stack, stack);
331 	mem_assign_absolute(lc->restart_fn, (unsigned long) func);
332 	mem_assign_absolute(lc->restart_data, (unsigned long) data);
333 	mem_assign_absolute(lc->restart_source, source_cpu);
334 	__bpon();
335 	asm volatile(
336 		"0:	sigp	0,%0,%2	# sigp restart to target cpu\n"
337 		"	brc	2,0b	# busy, try again\n"
338 		"1:	sigp	0,%1,%3	# sigp stop to current cpu\n"
339 		"	brc	2,1b	# busy, try again\n"
340 		: : "d" (pcpu->address), "d" (source_cpu),
341 		    "K" (SIGP_RESTART), "K" (SIGP_STOP)
342 		: "0", "1", "cc");
343 	for (;;) ;
344 }
345 
346 /*
347  * Enable additional logical cpus for multi-threading.
348  */
349 static int pcpu_set_smt(unsigned int mtid)
350 {
351 	int cc;
352 
353 	if (smp_cpu_mtid == mtid)
354 		return 0;
355 	cc = __pcpu_sigp(0, SIGP_SET_MULTI_THREADING, mtid, NULL);
356 	if (cc == 0) {
357 		smp_cpu_mtid = mtid;
358 		smp_cpu_mt_shift = 0;
359 		while (smp_cpu_mtid >= (1U << smp_cpu_mt_shift))
360 			smp_cpu_mt_shift++;
361 		pcpu_devices[0].address = stap();
362 	}
363 	return cc;
364 }
365 
366 /*
367  * Call function on an online CPU.
368  */
369 void smp_call_online_cpu(void (*func)(void *), void *data)
370 {
371 	struct pcpu *pcpu;
372 
373 	/* Use the current cpu if it is online. */
374 	pcpu = pcpu_find_address(cpu_online_mask, stap());
375 	if (!pcpu)
376 		/* Use the first online cpu. */
377 		pcpu = pcpu_devices + cpumask_first(cpu_online_mask);
378 	pcpu_delegate(pcpu, func, data, (unsigned long) restart_stack);
379 }
380 
381 /*
382  * Call function on the ipl CPU.
383  */
384 void smp_call_ipl_cpu(void (*func)(void *), void *data)
385 {
386 	struct lowcore *lc = lowcore_ptr[0];
387 
388 	if (pcpu_devices[0].address == stap())
389 		lc = &S390_lowcore;
390 
391 	pcpu_delegate(&pcpu_devices[0], func, data,
392 		      lc->nodat_stack);
393 }
394 
395 int smp_find_processor_id(u16 address)
396 {
397 	int cpu;
398 
399 	for_each_present_cpu(cpu)
400 		if (pcpu_devices[cpu].address == address)
401 			return cpu;
402 	return -1;
403 }
404 
405 void schedule_mcck_handler(void)
406 {
407 	pcpu_ec_call(pcpu_devices + smp_processor_id(), ec_mcck_pending);
408 }
409 
410 bool notrace arch_vcpu_is_preempted(int cpu)
411 {
412 	if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
413 		return false;
414 	if (pcpu_running(pcpu_devices + cpu))
415 		return false;
416 	return true;
417 }
418 EXPORT_SYMBOL(arch_vcpu_is_preempted);
419 
420 void notrace smp_yield_cpu(int cpu)
421 {
422 	if (!MACHINE_HAS_DIAG9C)
423 		return;
424 	diag_stat_inc_norecursion(DIAG_STAT_X09C);
425 	asm volatile("diag %0,0,0x9c"
426 		     : : "d" (pcpu_devices[cpu].address));
427 }
428 EXPORT_SYMBOL_GPL(smp_yield_cpu);
429 
430 /*
431  * Send cpus emergency shutdown signal. This gives the cpus the
432  * opportunity to complete outstanding interrupts.
433  */
434 void notrace smp_emergency_stop(void)
435 {
436 	static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED;
437 	static cpumask_t cpumask;
438 	u64 end;
439 	int cpu;
440 
441 	arch_spin_lock(&lock);
442 	cpumask_copy(&cpumask, cpu_online_mask);
443 	cpumask_clear_cpu(smp_processor_id(), &cpumask);
444 
445 	end = get_tod_clock() + (1000000UL << 12);
446 	for_each_cpu(cpu, &cpumask) {
447 		struct pcpu *pcpu = pcpu_devices + cpu;
448 		set_bit(ec_stop_cpu, &pcpu->ec_mask);
449 		while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL,
450 				   0, NULL) == SIGP_CC_BUSY &&
451 		       get_tod_clock() < end)
452 			cpu_relax();
453 	}
454 	while (get_tod_clock() < end) {
455 		for_each_cpu(cpu, &cpumask)
456 			if (pcpu_stopped(pcpu_devices + cpu))
457 				cpumask_clear_cpu(cpu, &cpumask);
458 		if (cpumask_empty(&cpumask))
459 			break;
460 		cpu_relax();
461 	}
462 	arch_spin_unlock(&lock);
463 }
464 NOKPROBE_SYMBOL(smp_emergency_stop);
465 
466 /*
467  * Stop all cpus but the current one.
468  */
469 void smp_send_stop(void)
470 {
471 	int cpu;
472 
473 	/* Disable all interrupts/machine checks */
474 	__load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
475 	trace_hardirqs_off();
476 
477 	debug_set_critical();
478 
479 	if (oops_in_progress)
480 		smp_emergency_stop();
481 
482 	/* stop all processors */
483 	for_each_online_cpu(cpu) {
484 		if (cpu == smp_processor_id())
485 			continue;
486 		pcpu_sigp_retry(pcpu_devices + cpu, SIGP_STOP, 0);
487 		while (!pcpu_stopped(pcpu_devices + cpu))
488 			cpu_relax();
489 	}
490 }
491 
492 /*
493  * This is the main routine where commands issued by other
494  * cpus are handled.
495  */
496 static void smp_handle_ext_call(void)
497 {
498 	unsigned long bits;
499 
500 	/* handle bit signal external calls */
501 	bits = xchg(&pcpu_devices[smp_processor_id()].ec_mask, 0);
502 	if (test_bit(ec_stop_cpu, &bits))
503 		smp_stop_cpu();
504 	if (test_bit(ec_schedule, &bits))
505 		scheduler_ipi();
506 	if (test_bit(ec_call_function_single, &bits))
507 		generic_smp_call_function_single_interrupt();
508 	if (test_bit(ec_mcck_pending, &bits))
509 		__s390_handle_mcck();
510 	if (test_bit(ec_irq_work, &bits))
511 		irq_work_run();
512 }
513 
514 static void do_ext_call_interrupt(struct ext_code ext_code,
515 				  unsigned int param32, unsigned long param64)
516 {
517 	inc_irq_stat(ext_code.code == 0x1202 ? IRQEXT_EXC : IRQEXT_EMS);
518 	smp_handle_ext_call();
519 }
520 
521 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
522 {
523 	int cpu;
524 
525 	for_each_cpu(cpu, mask)
526 		pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
527 }
528 
529 void arch_send_call_function_single_ipi(int cpu)
530 {
531 	pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
532 }
533 
534 /*
535  * this function sends a 'reschedule' IPI to another CPU.
536  * it goes straight through and wastes no time serializing
537  * anything. Worst case is that we lose a reschedule ...
538  */
539 void smp_send_reschedule(int cpu)
540 {
541 	pcpu_ec_call(pcpu_devices + cpu, ec_schedule);
542 }
543 
544 #ifdef CONFIG_IRQ_WORK
545 void arch_irq_work_raise(void)
546 {
547 	pcpu_ec_call(pcpu_devices + smp_processor_id(), ec_irq_work);
548 }
549 #endif
550 
551 /*
552  * parameter area for the set/clear control bit callbacks
553  */
554 struct ec_creg_mask_parms {
555 	unsigned long orval;
556 	unsigned long andval;
557 	int cr;
558 };
559 
560 /*
561  * callback for setting/clearing control bits
562  */
563 static void smp_ctl_bit_callback(void *info)
564 {
565 	struct ec_creg_mask_parms *pp = info;
566 	unsigned long cregs[16];
567 
568 	__ctl_store(cregs, 0, 15);
569 	cregs[pp->cr] = (cregs[pp->cr] & pp->andval) | pp->orval;
570 	__ctl_load(cregs, 0, 15);
571 }
572 
573 static DEFINE_SPINLOCK(ctl_lock);
574 static unsigned long ctlreg;
575 
576 /*
577  * Set a bit in a control register of all cpus
578  */
579 void smp_ctl_set_bit(int cr, int bit)
580 {
581 	struct ec_creg_mask_parms parms = { 1UL << bit, -1UL, cr };
582 
583 	spin_lock(&ctl_lock);
584 	memcpy_absolute(&ctlreg, &S390_lowcore.cregs_save_area[cr], sizeof(ctlreg));
585 	__set_bit(bit, &ctlreg);
586 	memcpy_absolute(&S390_lowcore.cregs_save_area[cr], &ctlreg, sizeof(ctlreg));
587 	spin_unlock(&ctl_lock);
588 	on_each_cpu(smp_ctl_bit_callback, &parms, 1);
589 }
590 EXPORT_SYMBOL(smp_ctl_set_bit);
591 
592 /*
593  * Clear a bit in a control register of all cpus
594  */
595 void smp_ctl_clear_bit(int cr, int bit)
596 {
597 	struct ec_creg_mask_parms parms = { 0, ~(1UL << bit), cr };
598 
599 	spin_lock(&ctl_lock);
600 	memcpy_absolute(&ctlreg, &S390_lowcore.cregs_save_area[cr], sizeof(ctlreg));
601 	__clear_bit(bit, &ctlreg);
602 	memcpy_absolute(&S390_lowcore.cregs_save_area[cr], &ctlreg, sizeof(ctlreg));
603 	spin_unlock(&ctl_lock);
604 	on_each_cpu(smp_ctl_bit_callback, &parms, 1);
605 }
606 EXPORT_SYMBOL(smp_ctl_clear_bit);
607 
608 #ifdef CONFIG_CRASH_DUMP
609 
610 int smp_store_status(int cpu)
611 {
612 	struct lowcore *lc;
613 	struct pcpu *pcpu;
614 	unsigned long pa;
615 
616 	pcpu = pcpu_devices + cpu;
617 	lc = lowcore_ptr[cpu];
618 	pa = __pa(&lc->floating_pt_save_area);
619 	if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_STATUS_AT_ADDRESS,
620 			      pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
621 		return -EIO;
622 	if (!MACHINE_HAS_VX && !MACHINE_HAS_GS)
623 		return 0;
624 	pa = __pa(lc->mcesad & MCESA_ORIGIN_MASK);
625 	if (MACHINE_HAS_GS)
626 		pa |= lc->mcesad & MCESA_LC_MASK;
627 	if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS,
628 			      pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
629 		return -EIO;
630 	return 0;
631 }
632 
633 /*
634  * Collect CPU state of the previous, crashed system.
635  * There are four cases:
636  * 1) standard zfcp/nvme dump
637  *    condition: OLDMEM_BASE == NULL && is_ipl_type_dump() == true
638  *    The state for all CPUs except the boot CPU needs to be collected
639  *    with sigp stop-and-store-status. The boot CPU state is located in
640  *    the absolute lowcore of the memory stored in the HSA. The zcore code
641  *    will copy the boot CPU state from the HSA.
642  * 2) stand-alone kdump for SCSI/NVMe (zfcp/nvme dump with swapped memory)
643  *    condition: OLDMEM_BASE != NULL && is_ipl_type_dump() == true
644  *    The state for all CPUs except the boot CPU needs to be collected
645  *    with sigp stop-and-store-status. The firmware or the boot-loader
646  *    stored the registers of the boot CPU in the absolute lowcore in the
647  *    memory of the old system.
648  * 3) kdump and the old kernel did not store the CPU state,
649  *    or stand-alone kdump for DASD
650  *    condition: OLDMEM_BASE != NULL && !is_kdump_kernel()
651  *    The state for all CPUs except the boot CPU needs to be collected
652  *    with sigp stop-and-store-status. The kexec code or the boot-loader
653  *    stored the registers of the boot CPU in the memory of the old system.
654  * 4) kdump and the old kernel stored the CPU state
655  *    condition: OLDMEM_BASE != NULL && is_kdump_kernel()
656  *    This case does not exist for s390 anymore, setup_arch explicitly
657  *    deactivates the elfcorehdr= kernel parameter
658  */
659 static __init void smp_save_cpu_vxrs(struct save_area *sa, u16 addr,
660 				     bool is_boot_cpu, unsigned long page)
661 {
662 	__vector128 *vxrs = (__vector128 *) page;
663 
664 	if (is_boot_cpu)
665 		vxrs = boot_cpu_vector_save_area;
666 	else
667 		__pcpu_sigp_relax(addr, SIGP_STORE_ADDITIONAL_STATUS, page);
668 	save_area_add_vxrs(sa, vxrs);
669 }
670 
671 static __init void smp_save_cpu_regs(struct save_area *sa, u16 addr,
672 				     bool is_boot_cpu, unsigned long page)
673 {
674 	void *regs = (void *) page;
675 
676 	if (is_boot_cpu)
677 		copy_oldmem_kernel(regs, (void *) __LC_FPREGS_SAVE_AREA, 512);
678 	else
679 		__pcpu_sigp_relax(addr, SIGP_STORE_STATUS_AT_ADDRESS, page);
680 	save_area_add_regs(sa, regs);
681 }
682 
683 void __init smp_save_dump_cpus(void)
684 {
685 	int addr, boot_cpu_addr, max_cpu_addr;
686 	struct save_area *sa;
687 	unsigned long page;
688 	bool is_boot_cpu;
689 
690 	if (!(oldmem_data.start || is_ipl_type_dump()))
691 		/* No previous system present, normal boot. */
692 		return;
693 	/* Allocate a page as dumping area for the store status sigps */
694 	page = memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE, 0, 1UL << 31);
695 	if (!page)
696 		panic("ERROR: Failed to allocate %lx bytes below %lx\n",
697 		      PAGE_SIZE, 1UL << 31);
698 
699 	/* Set multi-threading state to the previous system. */
700 	pcpu_set_smt(sclp.mtid_prev);
701 	boot_cpu_addr = stap();
702 	max_cpu_addr = SCLP_MAX_CORES << sclp.mtid_prev;
703 	for (addr = 0; addr <= max_cpu_addr; addr++) {
704 		if (__pcpu_sigp_relax(addr, SIGP_SENSE, 0) ==
705 		    SIGP_CC_NOT_OPERATIONAL)
706 			continue;
707 		is_boot_cpu = (addr == boot_cpu_addr);
708 		/* Allocate save area */
709 		sa = save_area_alloc(is_boot_cpu);
710 		if (!sa)
711 			panic("could not allocate memory for save area\n");
712 		if (MACHINE_HAS_VX)
713 			/* Get the vector registers */
714 			smp_save_cpu_vxrs(sa, addr, is_boot_cpu, page);
715 		/*
716 		 * For a zfcp/nvme dump OLDMEM_BASE == NULL and the registers
717 		 * of the boot CPU are stored in the HSA. To retrieve
718 		 * these registers an SCLP request is required which is
719 		 * done by drivers/s390/char/zcore.c:init_cpu_info()
720 		 */
721 		if (!is_boot_cpu || oldmem_data.start)
722 			/* Get the CPU registers */
723 			smp_save_cpu_regs(sa, addr, is_boot_cpu, page);
724 	}
725 	memblock_free(page, PAGE_SIZE);
726 	diag_amode31_ops.diag308_reset();
727 	pcpu_set_smt(0);
728 }
729 #endif /* CONFIG_CRASH_DUMP */
730 
731 void smp_cpu_set_polarization(int cpu, int val)
732 {
733 	pcpu_devices[cpu].polarization = val;
734 }
735 
736 int smp_cpu_get_polarization(int cpu)
737 {
738 	return pcpu_devices[cpu].polarization;
739 }
740 
741 int smp_cpu_get_cpu_address(int cpu)
742 {
743 	return pcpu_devices[cpu].address;
744 }
745 
746 static void __ref smp_get_core_info(struct sclp_core_info *info, int early)
747 {
748 	static int use_sigp_detection;
749 	int address;
750 
751 	if (use_sigp_detection || sclp_get_core_info(info, early)) {
752 		use_sigp_detection = 1;
753 		for (address = 0;
754 		     address < (SCLP_MAX_CORES << smp_cpu_mt_shift);
755 		     address += (1U << smp_cpu_mt_shift)) {
756 			if (__pcpu_sigp_relax(address, SIGP_SENSE, 0) ==
757 			    SIGP_CC_NOT_OPERATIONAL)
758 				continue;
759 			info->core[info->configured].core_id =
760 				address >> smp_cpu_mt_shift;
761 			info->configured++;
762 		}
763 		info->combined = info->configured;
764 	}
765 }
766 
767 static int smp_add_present_cpu(int cpu);
768 
769 static int smp_add_core(struct sclp_core_entry *core, cpumask_t *avail,
770 			bool configured, bool early)
771 {
772 	struct pcpu *pcpu;
773 	int cpu, nr, i;
774 	u16 address;
775 
776 	nr = 0;
777 	if (sclp.has_core_type && core->type != boot_core_type)
778 		return nr;
779 	cpu = cpumask_first(avail);
780 	address = core->core_id << smp_cpu_mt_shift;
781 	for (i = 0; (i <= smp_cpu_mtid) && (cpu < nr_cpu_ids); i++) {
782 		if (pcpu_find_address(cpu_present_mask, address + i))
783 			continue;
784 		pcpu = pcpu_devices + cpu;
785 		pcpu->address = address + i;
786 		if (configured)
787 			pcpu->state = CPU_STATE_CONFIGURED;
788 		else
789 			pcpu->state = CPU_STATE_STANDBY;
790 		smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
791 		set_cpu_present(cpu, true);
792 		if (!early && smp_add_present_cpu(cpu) != 0)
793 			set_cpu_present(cpu, false);
794 		else
795 			nr++;
796 		cpumask_clear_cpu(cpu, avail);
797 		cpu = cpumask_next(cpu, avail);
798 	}
799 	return nr;
800 }
801 
802 static int __smp_rescan_cpus(struct sclp_core_info *info, bool early)
803 {
804 	struct sclp_core_entry *core;
805 	static cpumask_t avail;
806 	bool configured;
807 	u16 core_id;
808 	int nr, i;
809 
810 	cpus_read_lock();
811 	mutex_lock(&smp_cpu_state_mutex);
812 	nr = 0;
813 	cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
814 	/*
815 	 * Add IPL core first (which got logical CPU number 0) to make sure
816 	 * that all SMT threads get subsequent logical CPU numbers.
817 	 */
818 	if (early) {
819 		core_id = pcpu_devices[0].address >> smp_cpu_mt_shift;
820 		for (i = 0; i < info->configured; i++) {
821 			core = &info->core[i];
822 			if (core->core_id == core_id) {
823 				nr += smp_add_core(core, &avail, true, early);
824 				break;
825 			}
826 		}
827 	}
828 	for (i = 0; i < info->combined; i++) {
829 		configured = i < info->configured;
830 		nr += smp_add_core(&info->core[i], &avail, configured, early);
831 	}
832 	mutex_unlock(&smp_cpu_state_mutex);
833 	cpus_read_unlock();
834 	return nr;
835 }
836 
837 void __init smp_detect_cpus(void)
838 {
839 	unsigned int cpu, mtid, c_cpus, s_cpus;
840 	struct sclp_core_info *info;
841 	u16 address;
842 
843 	/* Get CPU information */
844 	info = memblock_alloc(sizeof(*info), 8);
845 	if (!info)
846 		panic("%s: Failed to allocate %zu bytes align=0x%x\n",
847 		      __func__, sizeof(*info), 8);
848 	smp_get_core_info(info, 1);
849 	/* Find boot CPU type */
850 	if (sclp.has_core_type) {
851 		address = stap();
852 		for (cpu = 0; cpu < info->combined; cpu++)
853 			if (info->core[cpu].core_id == address) {
854 				/* The boot cpu dictates the cpu type. */
855 				boot_core_type = info->core[cpu].type;
856 				break;
857 			}
858 		if (cpu >= info->combined)
859 			panic("Could not find boot CPU type");
860 	}
861 
862 	/* Set multi-threading state for the current system */
863 	mtid = boot_core_type ? sclp.mtid : sclp.mtid_cp;
864 	mtid = (mtid < smp_max_threads) ? mtid : smp_max_threads - 1;
865 	pcpu_set_smt(mtid);
866 
867 	/* Print number of CPUs */
868 	c_cpus = s_cpus = 0;
869 	for (cpu = 0; cpu < info->combined; cpu++) {
870 		if (sclp.has_core_type &&
871 		    info->core[cpu].type != boot_core_type)
872 			continue;
873 		if (cpu < info->configured)
874 			c_cpus += smp_cpu_mtid + 1;
875 		else
876 			s_cpus += smp_cpu_mtid + 1;
877 	}
878 	pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
879 
880 	/* Add CPUs present at boot */
881 	__smp_rescan_cpus(info, true);
882 	memblock_free_early((unsigned long)info, sizeof(*info));
883 }
884 
885 /*
886  *	Activate a secondary processor.
887  */
888 static void smp_start_secondary(void *cpuvoid)
889 {
890 	int cpu = raw_smp_processor_id();
891 
892 	S390_lowcore.last_update_clock = get_tod_clock();
893 	S390_lowcore.restart_stack = (unsigned long)restart_stack;
894 	S390_lowcore.restart_fn = (unsigned long)do_restart;
895 	S390_lowcore.restart_data = 0;
896 	S390_lowcore.restart_source = -1U;
897 	S390_lowcore.restart_flags = 0;
898 	restore_access_regs(S390_lowcore.access_regs_save_area);
899 	cpu_init();
900 	rcu_cpu_starting(cpu);
901 	init_cpu_timer();
902 	vtime_init();
903 	vdso_getcpu_init();
904 	pfault_init();
905 	notify_cpu_starting(cpu);
906 	if (topology_cpu_dedicated(cpu))
907 		set_cpu_flag(CIF_DEDICATED_CPU);
908 	else
909 		clear_cpu_flag(CIF_DEDICATED_CPU);
910 	set_cpu_online(cpu, true);
911 	update_cpu_masks();
912 	inc_irq_stat(CPU_RST);
913 	local_irq_enable();
914 	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
915 }
916 
917 /* Upping and downing of CPUs */
918 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
919 {
920 	struct pcpu *pcpu = pcpu_devices + cpu;
921 	int rc;
922 
923 	if (pcpu->state != CPU_STATE_CONFIGURED)
924 		return -EIO;
925 	if (pcpu_sigp_retry(pcpu, SIGP_INITIAL_CPU_RESET, 0) !=
926 	    SIGP_CC_ORDER_CODE_ACCEPTED)
927 		return -EIO;
928 
929 	rc = pcpu_alloc_lowcore(pcpu, cpu);
930 	if (rc)
931 		return rc;
932 	pcpu_prepare_secondary(pcpu, cpu);
933 	pcpu_attach_task(pcpu, tidle);
934 	pcpu_start_fn(pcpu, smp_start_secondary, NULL);
935 	/* Wait until cpu puts itself in the online & active maps */
936 	while (!cpu_online(cpu))
937 		cpu_relax();
938 	return 0;
939 }
940 
941 static unsigned int setup_possible_cpus __initdata;
942 
943 static int __init _setup_possible_cpus(char *s)
944 {
945 	get_option(&s, &setup_possible_cpus);
946 	return 0;
947 }
948 early_param("possible_cpus", _setup_possible_cpus);
949 
950 int __cpu_disable(void)
951 {
952 	unsigned long cregs[16];
953 
954 	/* Handle possible pending IPIs */
955 	smp_handle_ext_call();
956 	set_cpu_online(smp_processor_id(), false);
957 	update_cpu_masks();
958 	/* Disable pseudo page faults on this cpu. */
959 	pfault_fini();
960 	/* Disable interrupt sources via control register. */
961 	__ctl_store(cregs, 0, 15);
962 	cregs[0]  &= ~0x0000ee70UL;	/* disable all external interrupts */
963 	cregs[6]  &= ~0xff000000UL;	/* disable all I/O interrupts */
964 	cregs[14] &= ~0x1f000000UL;	/* disable most machine checks */
965 	__ctl_load(cregs, 0, 15);
966 	clear_cpu_flag(CIF_NOHZ_DELAY);
967 	return 0;
968 }
969 
970 void __cpu_die(unsigned int cpu)
971 {
972 	struct pcpu *pcpu;
973 
974 	/* Wait until target cpu is down */
975 	pcpu = pcpu_devices + cpu;
976 	while (!pcpu_stopped(pcpu))
977 		cpu_relax();
978 	pcpu_free_lowcore(pcpu);
979 	cpumask_clear_cpu(cpu, mm_cpumask(&init_mm));
980 	cpumask_clear_cpu(cpu, &init_mm.context.cpu_attach_mask);
981 }
982 
983 void __noreturn cpu_die(void)
984 {
985 	idle_task_exit();
986 	__bpon();
987 	pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0);
988 	for (;;) ;
989 }
990 
991 void __init smp_fill_possible_mask(void)
992 {
993 	unsigned int possible, sclp_max, cpu;
994 
995 	sclp_max = max(sclp.mtid, sclp.mtid_cp) + 1;
996 	sclp_max = min(smp_max_threads, sclp_max);
997 	sclp_max = (sclp.max_cores * sclp_max) ?: nr_cpu_ids;
998 	possible = setup_possible_cpus ?: nr_cpu_ids;
999 	possible = min(possible, sclp_max);
1000 	for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++)
1001 		set_cpu_possible(cpu, true);
1002 }
1003 
1004 void __init smp_prepare_cpus(unsigned int max_cpus)
1005 {
1006 	/* request the 0x1201 emergency signal external interrupt */
1007 	if (register_external_irq(EXT_IRQ_EMERGENCY_SIG, do_ext_call_interrupt))
1008 		panic("Couldn't request external interrupt 0x1201");
1009 	/* request the 0x1202 external call external interrupt */
1010 	if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt))
1011 		panic("Couldn't request external interrupt 0x1202");
1012 }
1013 
1014 void __init smp_prepare_boot_cpu(void)
1015 {
1016 	struct pcpu *pcpu = pcpu_devices;
1017 
1018 	WARN_ON(!cpu_present(0) || !cpu_online(0));
1019 	pcpu->state = CPU_STATE_CONFIGURED;
1020 	S390_lowcore.percpu_offset = __per_cpu_offset[0];
1021 	smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
1022 }
1023 
1024 void __init smp_setup_processor_id(void)
1025 {
1026 	pcpu_devices[0].address = stap();
1027 	S390_lowcore.cpu_nr = 0;
1028 	S390_lowcore.spinlock_lockval = arch_spin_lockval(0);
1029 	S390_lowcore.spinlock_index = 0;
1030 }
1031 
1032 /*
1033  * the frequency of the profiling timer can be changed
1034  * by writing a multiplier value into /proc/profile.
1035  *
1036  * usually you want to run this on all CPUs ;)
1037  */
1038 int setup_profiling_timer(unsigned int multiplier)
1039 {
1040 	return 0;
1041 }
1042 
1043 static ssize_t cpu_configure_show(struct device *dev,
1044 				  struct device_attribute *attr, char *buf)
1045 {
1046 	ssize_t count;
1047 
1048 	mutex_lock(&smp_cpu_state_mutex);
1049 	count = sprintf(buf, "%d\n", pcpu_devices[dev->id].state);
1050 	mutex_unlock(&smp_cpu_state_mutex);
1051 	return count;
1052 }
1053 
1054 static ssize_t cpu_configure_store(struct device *dev,
1055 				   struct device_attribute *attr,
1056 				   const char *buf, size_t count)
1057 {
1058 	struct pcpu *pcpu;
1059 	int cpu, val, rc, i;
1060 	char delim;
1061 
1062 	if (sscanf(buf, "%d %c", &val, &delim) != 1)
1063 		return -EINVAL;
1064 	if (val != 0 && val != 1)
1065 		return -EINVAL;
1066 	cpus_read_lock();
1067 	mutex_lock(&smp_cpu_state_mutex);
1068 	rc = -EBUSY;
1069 	/* disallow configuration changes of online cpus and cpu 0 */
1070 	cpu = dev->id;
1071 	cpu = smp_get_base_cpu(cpu);
1072 	if (cpu == 0)
1073 		goto out;
1074 	for (i = 0; i <= smp_cpu_mtid; i++)
1075 		if (cpu_online(cpu + i))
1076 			goto out;
1077 	pcpu = pcpu_devices + cpu;
1078 	rc = 0;
1079 	switch (val) {
1080 	case 0:
1081 		if (pcpu->state != CPU_STATE_CONFIGURED)
1082 			break;
1083 		rc = sclp_core_deconfigure(pcpu->address >> smp_cpu_mt_shift);
1084 		if (rc)
1085 			break;
1086 		for (i = 0; i <= smp_cpu_mtid; i++) {
1087 			if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
1088 				continue;
1089 			pcpu[i].state = CPU_STATE_STANDBY;
1090 			smp_cpu_set_polarization(cpu + i,
1091 						 POLARIZATION_UNKNOWN);
1092 		}
1093 		topology_expect_change();
1094 		break;
1095 	case 1:
1096 		if (pcpu->state != CPU_STATE_STANDBY)
1097 			break;
1098 		rc = sclp_core_configure(pcpu->address >> smp_cpu_mt_shift);
1099 		if (rc)
1100 			break;
1101 		for (i = 0; i <= smp_cpu_mtid; i++) {
1102 			if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
1103 				continue;
1104 			pcpu[i].state = CPU_STATE_CONFIGURED;
1105 			smp_cpu_set_polarization(cpu + i,
1106 						 POLARIZATION_UNKNOWN);
1107 		}
1108 		topology_expect_change();
1109 		break;
1110 	default:
1111 		break;
1112 	}
1113 out:
1114 	mutex_unlock(&smp_cpu_state_mutex);
1115 	cpus_read_unlock();
1116 	return rc ? rc : count;
1117 }
1118 static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
1119 
1120 static ssize_t show_cpu_address(struct device *dev,
1121 				struct device_attribute *attr, char *buf)
1122 {
1123 	return sprintf(buf, "%d\n", pcpu_devices[dev->id].address);
1124 }
1125 static DEVICE_ATTR(address, 0444, show_cpu_address, NULL);
1126 
1127 static struct attribute *cpu_common_attrs[] = {
1128 	&dev_attr_configure.attr,
1129 	&dev_attr_address.attr,
1130 	NULL,
1131 };
1132 
1133 static struct attribute_group cpu_common_attr_group = {
1134 	.attrs = cpu_common_attrs,
1135 };
1136 
1137 static struct attribute *cpu_online_attrs[] = {
1138 	&dev_attr_idle_count.attr,
1139 	&dev_attr_idle_time_us.attr,
1140 	NULL,
1141 };
1142 
1143 static struct attribute_group cpu_online_attr_group = {
1144 	.attrs = cpu_online_attrs,
1145 };
1146 
1147 static int smp_cpu_online(unsigned int cpu)
1148 {
1149 	struct device *s = &per_cpu(cpu_device, cpu)->dev;
1150 
1151 	return sysfs_create_group(&s->kobj, &cpu_online_attr_group);
1152 }
1153 
1154 static int smp_cpu_pre_down(unsigned int cpu)
1155 {
1156 	struct device *s = &per_cpu(cpu_device, cpu)->dev;
1157 
1158 	sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
1159 	return 0;
1160 }
1161 
1162 static int smp_add_present_cpu(int cpu)
1163 {
1164 	struct device *s;
1165 	struct cpu *c;
1166 	int rc;
1167 
1168 	c = kzalloc(sizeof(*c), GFP_KERNEL);
1169 	if (!c)
1170 		return -ENOMEM;
1171 	per_cpu(cpu_device, cpu) = c;
1172 	s = &c->dev;
1173 	c->hotpluggable = 1;
1174 	rc = register_cpu(c, cpu);
1175 	if (rc)
1176 		goto out;
1177 	rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
1178 	if (rc)
1179 		goto out_cpu;
1180 	rc = topology_cpu_init(c);
1181 	if (rc)
1182 		goto out_topology;
1183 	return 0;
1184 
1185 out_topology:
1186 	sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
1187 out_cpu:
1188 	unregister_cpu(c);
1189 out:
1190 	return rc;
1191 }
1192 
1193 int __ref smp_rescan_cpus(void)
1194 {
1195 	struct sclp_core_info *info;
1196 	int nr;
1197 
1198 	info = kzalloc(sizeof(*info), GFP_KERNEL);
1199 	if (!info)
1200 		return -ENOMEM;
1201 	smp_get_core_info(info, 0);
1202 	nr = __smp_rescan_cpus(info, false);
1203 	kfree(info);
1204 	if (nr)
1205 		topology_schedule_update();
1206 	return 0;
1207 }
1208 
1209 static ssize_t __ref rescan_store(struct device *dev,
1210 				  struct device_attribute *attr,
1211 				  const char *buf,
1212 				  size_t count)
1213 {
1214 	int rc;
1215 
1216 	rc = lock_device_hotplug_sysfs();
1217 	if (rc)
1218 		return rc;
1219 	rc = smp_rescan_cpus();
1220 	unlock_device_hotplug();
1221 	return rc ? rc : count;
1222 }
1223 static DEVICE_ATTR_WO(rescan);
1224 
1225 static int __init s390_smp_init(void)
1226 {
1227 	int cpu, rc = 0;
1228 
1229 	rc = device_create_file(cpu_subsys.dev_root, &dev_attr_rescan);
1230 	if (rc)
1231 		return rc;
1232 	for_each_present_cpu(cpu) {
1233 		rc = smp_add_present_cpu(cpu);
1234 		if (rc)
1235 			goto out;
1236 	}
1237 
1238 	rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "s390/smp:online",
1239 			       smp_cpu_online, smp_cpu_pre_down);
1240 	rc = rc <= 0 ? rc : 0;
1241 out:
1242 	return rc;
1243 }
1244 subsys_initcall(s390_smp_init);
1245 
1246 static __always_inline void set_new_lowcore(struct lowcore *lc)
1247 {
1248 	union register_pair dst, src;
1249 	u32 pfx;
1250 
1251 	src.even = (unsigned long) &S390_lowcore;
1252 	src.odd  = sizeof(S390_lowcore);
1253 	dst.even = (unsigned long) lc;
1254 	dst.odd  = sizeof(*lc);
1255 	pfx = (unsigned long) lc;
1256 
1257 	asm volatile(
1258 		"	mvcl	%[dst],%[src]\n"
1259 		"	spx	%[pfx]\n"
1260 		: [dst] "+&d" (dst.pair), [src] "+&d" (src.pair)
1261 		: [pfx] "Q" (pfx)
1262 		: "memory", "cc");
1263 }
1264 
1265 static int __init smp_reinit_ipl_cpu(void)
1266 {
1267 	unsigned long async_stack, nodat_stack, mcck_stack;
1268 	struct lowcore *lc, *lc_ipl;
1269 	unsigned long flags;
1270 
1271 	lc_ipl = lowcore_ptr[0];
1272 	lc = (struct lowcore *)	__get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
1273 	nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
1274 	async_stack = stack_alloc();
1275 	mcck_stack = stack_alloc();
1276 	if (!lc || !nodat_stack || !async_stack || !mcck_stack)
1277 		panic("Couldn't allocate memory");
1278 
1279 	local_irq_save(flags);
1280 	local_mcck_disable();
1281 	set_new_lowcore(lc);
1282 	S390_lowcore.nodat_stack = nodat_stack + STACK_INIT_OFFSET;
1283 	S390_lowcore.async_stack = async_stack + STACK_INIT_OFFSET;
1284 	S390_lowcore.mcck_stack = mcck_stack + STACK_INIT_OFFSET;
1285 	lowcore_ptr[0] = lc;
1286 	local_mcck_enable();
1287 	local_irq_restore(flags);
1288 
1289 	free_pages(lc_ipl->async_stack - STACK_INIT_OFFSET, THREAD_SIZE_ORDER);
1290 	memblock_free_late(lc_ipl->mcck_stack - STACK_INIT_OFFSET, THREAD_SIZE);
1291 	memblock_free_late((unsigned long) lc_ipl, sizeof(*lc_ipl));
1292 
1293 	return 0;
1294 }
1295 early_initcall(smp_reinit_ipl_cpu);
1296