xref: /openbmc/linux/arch/s390/kernel/smp.c (revision 8c749ce9)
1 /*
2  *  SMP related functions
3  *
4  *    Copyright IBM Corp. 1999, 2012
5  *    Author(s): Denis Joseph Barrow,
6  *		 Martin Schwidefsky <schwidefsky@de.ibm.com>,
7  *		 Heiko Carstens <heiko.carstens@de.ibm.com>,
8  *
9  *  based on other smp stuff by
10  *    (c) 1995 Alan Cox, CymruNET Ltd  <alan@cymru.net>
11  *    (c) 1998 Ingo Molnar
12  *
13  * The code outside of smp.c uses logical cpu numbers, only smp.c does
14  * the translation of logical to physical cpu ids. All new code that
15  * operates on physical cpu numbers needs to go into smp.c.
16  */
17 
18 #define KMSG_COMPONENT "cpu"
19 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
20 
21 #include <linux/workqueue.h>
22 #include <linux/module.h>
23 #include <linux/init.h>
24 #include <linux/mm.h>
25 #include <linux/err.h>
26 #include <linux/spinlock.h>
27 #include <linux/kernel_stat.h>
28 #include <linux/delay.h>
29 #include <linux/interrupt.h>
30 #include <linux/irqflags.h>
31 #include <linux/cpu.h>
32 #include <linux/slab.h>
33 #include <linux/crash_dump.h>
34 #include <linux/memblock.h>
35 #include <asm/asm-offsets.h>
36 #include <asm/diag.h>
37 #include <asm/switch_to.h>
38 #include <asm/facility.h>
39 #include <asm/ipl.h>
40 #include <asm/setup.h>
41 #include <asm/irq.h>
42 #include <asm/tlbflush.h>
43 #include <asm/vtimer.h>
44 #include <asm/lowcore.h>
45 #include <asm/sclp.h>
46 #include <asm/vdso.h>
47 #include <asm/debug.h>
48 #include <asm/os_info.h>
49 #include <asm/sigp.h>
50 #include <asm/idle.h>
51 #include "entry.h"
52 
53 enum {
54 	ec_schedule = 0,
55 	ec_call_function_single,
56 	ec_stop_cpu,
57 };
58 
59 enum {
60 	CPU_STATE_STANDBY,
61 	CPU_STATE_CONFIGURED,
62 };
63 
64 static DEFINE_PER_CPU(struct cpu *, cpu_device);
65 
66 struct pcpu {
67 	struct lowcore *lowcore;	/* lowcore page(s) for the cpu */
68 	unsigned long ec_mask;		/* bit mask for ec_xxx functions */
69 	unsigned long ec_clk;		/* sigp timestamp for ec_xxx */
70 	signed char state;		/* physical cpu state */
71 	signed char polarization;	/* physical polarization */
72 	u16 address;			/* physical cpu address */
73 };
74 
75 static u8 boot_core_type;
76 static struct pcpu pcpu_devices[NR_CPUS];
77 
78 unsigned int smp_cpu_mt_shift;
79 EXPORT_SYMBOL(smp_cpu_mt_shift);
80 
81 unsigned int smp_cpu_mtid;
82 EXPORT_SYMBOL(smp_cpu_mtid);
83 
84 #ifdef CONFIG_CRASH_DUMP
85 __vector128 __initdata boot_cpu_vector_save_area[__NUM_VXRS];
86 #endif
87 
88 static unsigned int smp_max_threads __initdata = -1U;
89 
90 static int __init early_nosmt(char *s)
91 {
92 	smp_max_threads = 1;
93 	return 0;
94 }
95 early_param("nosmt", early_nosmt);
96 
97 static int __init early_smt(char *s)
98 {
99 	get_option(&s, &smp_max_threads);
100 	return 0;
101 }
102 early_param("smt", early_smt);
103 
104 /*
105  * The smp_cpu_state_mutex must be held when changing the state or polarization
106  * member of a pcpu data structure within the pcpu_devices arreay.
107  */
108 DEFINE_MUTEX(smp_cpu_state_mutex);
109 
110 /*
111  * Signal processor helper functions.
112  */
113 static inline int __pcpu_sigp_relax(u16 addr, u8 order, unsigned long parm)
114 {
115 	int cc;
116 
117 	while (1) {
118 		cc = __pcpu_sigp(addr, order, parm, NULL);
119 		if (cc != SIGP_CC_BUSY)
120 			return cc;
121 		cpu_relax();
122 	}
123 }
124 
125 static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm)
126 {
127 	int cc, retry;
128 
129 	for (retry = 0; ; retry++) {
130 		cc = __pcpu_sigp(pcpu->address, order, parm, NULL);
131 		if (cc != SIGP_CC_BUSY)
132 			break;
133 		if (retry >= 3)
134 			udelay(10);
135 	}
136 	return cc;
137 }
138 
139 static inline int pcpu_stopped(struct pcpu *pcpu)
140 {
141 	u32 uninitialized_var(status);
142 
143 	if (__pcpu_sigp(pcpu->address, SIGP_SENSE,
144 			0, &status) != SIGP_CC_STATUS_STORED)
145 		return 0;
146 	return !!(status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED));
147 }
148 
149 static inline int pcpu_running(struct pcpu *pcpu)
150 {
151 	if (__pcpu_sigp(pcpu->address, SIGP_SENSE_RUNNING,
152 			0, NULL) != SIGP_CC_STATUS_STORED)
153 		return 1;
154 	/* Status stored condition code is equivalent to cpu not running. */
155 	return 0;
156 }
157 
158 /*
159  * Find struct pcpu by cpu address.
160  */
161 static struct pcpu *pcpu_find_address(const struct cpumask *mask, u16 address)
162 {
163 	int cpu;
164 
165 	for_each_cpu(cpu, mask)
166 		if (pcpu_devices[cpu].address == address)
167 			return pcpu_devices + cpu;
168 	return NULL;
169 }
170 
171 static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
172 {
173 	int order;
174 
175 	if (test_and_set_bit(ec_bit, &pcpu->ec_mask))
176 		return;
177 	order = pcpu_running(pcpu) ? SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL;
178 	pcpu->ec_clk = get_tod_clock_fast();
179 	pcpu_sigp_retry(pcpu, order, 0);
180 }
181 
182 #define ASYNC_FRAME_OFFSET (ASYNC_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE)
183 #define PANIC_FRAME_OFFSET (PAGE_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE)
184 
185 static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
186 {
187 	unsigned long async_stack, panic_stack;
188 	struct lowcore *lc;
189 
190 	if (pcpu != &pcpu_devices[0]) {
191 		pcpu->lowcore =	(struct lowcore *)
192 			__get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
193 		async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
194 		panic_stack = __get_free_page(GFP_KERNEL);
195 		if (!pcpu->lowcore || !panic_stack || !async_stack)
196 			goto out;
197 	} else {
198 		async_stack = pcpu->lowcore->async_stack - ASYNC_FRAME_OFFSET;
199 		panic_stack = pcpu->lowcore->panic_stack - PANIC_FRAME_OFFSET;
200 	}
201 	lc = pcpu->lowcore;
202 	memcpy(lc, &S390_lowcore, 512);
203 	memset((char *) lc + 512, 0, sizeof(*lc) - 512);
204 	lc->async_stack = async_stack + ASYNC_FRAME_OFFSET;
205 	lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET;
206 	lc->cpu_nr = cpu;
207 	lc->spinlock_lockval = arch_spin_lockval(cpu);
208 	if (MACHINE_HAS_VX)
209 		lc->vector_save_area_addr =
210 			(unsigned long) &lc->vector_save_area;
211 	if (vdso_alloc_per_cpu(lc))
212 		goto out;
213 	lowcore_ptr[cpu] = lc;
214 	pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc);
215 	return 0;
216 out:
217 	if (pcpu != &pcpu_devices[0]) {
218 		free_page(panic_stack);
219 		free_pages(async_stack, ASYNC_ORDER);
220 		free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
221 	}
222 	return -ENOMEM;
223 }
224 
225 #ifdef CONFIG_HOTPLUG_CPU
226 
227 static void pcpu_free_lowcore(struct pcpu *pcpu)
228 {
229 	pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0);
230 	lowcore_ptr[pcpu - pcpu_devices] = NULL;
231 	vdso_free_per_cpu(pcpu->lowcore);
232 	if (pcpu == &pcpu_devices[0])
233 		return;
234 	free_page(pcpu->lowcore->panic_stack-PANIC_FRAME_OFFSET);
235 	free_pages(pcpu->lowcore->async_stack-ASYNC_FRAME_OFFSET, ASYNC_ORDER);
236 	free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
237 }
238 
239 #endif /* CONFIG_HOTPLUG_CPU */
240 
241 static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
242 {
243 	struct lowcore *lc = pcpu->lowcore;
244 
245 	if (MACHINE_HAS_TLB_LC)
246 		cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask);
247 	cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
248 	atomic_inc(&init_mm.context.attach_count);
249 	lc->cpu_nr = cpu;
250 	lc->spinlock_lockval = arch_spin_lockval(cpu);
251 	lc->percpu_offset = __per_cpu_offset[cpu];
252 	lc->kernel_asce = S390_lowcore.kernel_asce;
253 	lc->machine_flags = S390_lowcore.machine_flags;
254 	lc->user_timer = lc->system_timer = lc->steal_timer = 0;
255 	__ctl_store(lc->cregs_save_area, 0, 15);
256 	save_access_regs((unsigned int *) lc->access_regs_save_area);
257 	memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
258 	       MAX_FACILITY_BIT/8);
259 }
260 
261 static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
262 {
263 	struct lowcore *lc = pcpu->lowcore;
264 	struct thread_info *ti = task_thread_info(tsk);
265 
266 	lc->kernel_stack = (unsigned long) task_stack_page(tsk)
267 		+ THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
268 	lc->thread_info = (unsigned long) task_thread_info(tsk);
269 	lc->current_task = (unsigned long) tsk;
270 	lc->lpp = LPP_MAGIC;
271 	lc->current_pid = tsk->pid;
272 	lc->user_timer = ti->user_timer;
273 	lc->system_timer = ti->system_timer;
274 	lc->steal_timer = 0;
275 }
276 
277 static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
278 {
279 	struct lowcore *lc = pcpu->lowcore;
280 
281 	lc->restart_stack = lc->kernel_stack;
282 	lc->restart_fn = (unsigned long) func;
283 	lc->restart_data = (unsigned long) data;
284 	lc->restart_source = -1UL;
285 	pcpu_sigp_retry(pcpu, SIGP_RESTART, 0);
286 }
287 
288 /*
289  * Call function via PSW restart on pcpu and stop the current cpu.
290  */
291 static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
292 			  void *data, unsigned long stack)
293 {
294 	struct lowcore *lc = lowcore_ptr[pcpu - pcpu_devices];
295 	unsigned long source_cpu = stap();
296 
297 	__load_psw_mask(PSW_KERNEL_BITS);
298 	if (pcpu->address == source_cpu)
299 		func(data);	/* should not return */
300 	/* Stop target cpu (if func returns this stops the current cpu). */
301 	pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
302 	/* Restart func on the target cpu and stop the current cpu. */
303 	mem_assign_absolute(lc->restart_stack, stack);
304 	mem_assign_absolute(lc->restart_fn, (unsigned long) func);
305 	mem_assign_absolute(lc->restart_data, (unsigned long) data);
306 	mem_assign_absolute(lc->restart_source, source_cpu);
307 	asm volatile(
308 		"0:	sigp	0,%0,%2	# sigp restart to target cpu\n"
309 		"	brc	2,0b	# busy, try again\n"
310 		"1:	sigp	0,%1,%3	# sigp stop to current cpu\n"
311 		"	brc	2,1b	# busy, try again\n"
312 		: : "d" (pcpu->address), "d" (source_cpu),
313 		    "K" (SIGP_RESTART), "K" (SIGP_STOP)
314 		: "0", "1", "cc");
315 	for (;;) ;
316 }
317 
318 /*
319  * Enable additional logical cpus for multi-threading.
320  */
321 static int pcpu_set_smt(unsigned int mtid)
322 {
323 	register unsigned long reg1 asm ("1") = (unsigned long) mtid;
324 	int cc;
325 
326 	if (smp_cpu_mtid == mtid)
327 		return 0;
328 	asm volatile(
329 		"	sigp	%1,0,%2	# sigp set multi-threading\n"
330 		"	ipm	%0\n"
331 		"	srl	%0,28\n"
332 		: "=d" (cc) : "d" (reg1), "K" (SIGP_SET_MULTI_THREADING)
333 		: "cc");
334 	if (cc == 0) {
335 		smp_cpu_mtid = mtid;
336 		smp_cpu_mt_shift = 0;
337 		while (smp_cpu_mtid >= (1U << smp_cpu_mt_shift))
338 			smp_cpu_mt_shift++;
339 		pcpu_devices[0].address = stap();
340 	}
341 	return cc;
342 }
343 
344 /*
345  * Call function on an online CPU.
346  */
347 void smp_call_online_cpu(void (*func)(void *), void *data)
348 {
349 	struct pcpu *pcpu;
350 
351 	/* Use the current cpu if it is online. */
352 	pcpu = pcpu_find_address(cpu_online_mask, stap());
353 	if (!pcpu)
354 		/* Use the first online cpu. */
355 		pcpu = pcpu_devices + cpumask_first(cpu_online_mask);
356 	pcpu_delegate(pcpu, func, data, (unsigned long) restart_stack);
357 }
358 
359 /*
360  * Call function on the ipl CPU.
361  */
362 void smp_call_ipl_cpu(void (*func)(void *), void *data)
363 {
364 	pcpu_delegate(&pcpu_devices[0], func, data,
365 		      pcpu_devices->lowcore->panic_stack -
366 		      PANIC_FRAME_OFFSET + PAGE_SIZE);
367 }
368 
369 int smp_find_processor_id(u16 address)
370 {
371 	int cpu;
372 
373 	for_each_present_cpu(cpu)
374 		if (pcpu_devices[cpu].address == address)
375 			return cpu;
376 	return -1;
377 }
378 
379 int smp_vcpu_scheduled(int cpu)
380 {
381 	return pcpu_running(pcpu_devices + cpu);
382 }
383 
384 void smp_yield_cpu(int cpu)
385 {
386 	if (MACHINE_HAS_DIAG9C) {
387 		diag_stat_inc_norecursion(DIAG_STAT_X09C);
388 		asm volatile("diag %0,0,0x9c"
389 			     : : "d" (pcpu_devices[cpu].address));
390 	} else if (MACHINE_HAS_DIAG44) {
391 		diag_stat_inc_norecursion(DIAG_STAT_X044);
392 		asm volatile("diag 0,0,0x44");
393 	}
394 }
395 
396 /*
397  * Send cpus emergency shutdown signal. This gives the cpus the
398  * opportunity to complete outstanding interrupts.
399  */
400 static void smp_emergency_stop(cpumask_t *cpumask)
401 {
402 	u64 end;
403 	int cpu;
404 
405 	end = get_tod_clock() + (1000000UL << 12);
406 	for_each_cpu(cpu, cpumask) {
407 		struct pcpu *pcpu = pcpu_devices + cpu;
408 		set_bit(ec_stop_cpu, &pcpu->ec_mask);
409 		while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL,
410 				   0, NULL) == SIGP_CC_BUSY &&
411 		       get_tod_clock() < end)
412 			cpu_relax();
413 	}
414 	while (get_tod_clock() < end) {
415 		for_each_cpu(cpu, cpumask)
416 			if (pcpu_stopped(pcpu_devices + cpu))
417 				cpumask_clear_cpu(cpu, cpumask);
418 		if (cpumask_empty(cpumask))
419 			break;
420 		cpu_relax();
421 	}
422 }
423 
424 /*
425  * Stop all cpus but the current one.
426  */
427 void smp_send_stop(void)
428 {
429 	cpumask_t cpumask;
430 	int cpu;
431 
432 	/* Disable all interrupts/machine checks */
433 	__load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
434 	trace_hardirqs_off();
435 
436 	debug_set_critical();
437 	cpumask_copy(&cpumask, cpu_online_mask);
438 	cpumask_clear_cpu(smp_processor_id(), &cpumask);
439 
440 	if (oops_in_progress)
441 		smp_emergency_stop(&cpumask);
442 
443 	/* stop all processors */
444 	for_each_cpu(cpu, &cpumask) {
445 		struct pcpu *pcpu = pcpu_devices + cpu;
446 		pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
447 		while (!pcpu_stopped(pcpu))
448 			cpu_relax();
449 	}
450 }
451 
452 /*
453  * This is the main routine where commands issued by other
454  * cpus are handled.
455  */
456 static void smp_handle_ext_call(void)
457 {
458 	unsigned long bits;
459 
460 	/* handle bit signal external calls */
461 	bits = xchg(&pcpu_devices[smp_processor_id()].ec_mask, 0);
462 	if (test_bit(ec_stop_cpu, &bits))
463 		smp_stop_cpu();
464 	if (test_bit(ec_schedule, &bits))
465 		scheduler_ipi();
466 	if (test_bit(ec_call_function_single, &bits))
467 		generic_smp_call_function_single_interrupt();
468 }
469 
470 static void do_ext_call_interrupt(struct ext_code ext_code,
471 				  unsigned int param32, unsigned long param64)
472 {
473 	inc_irq_stat(ext_code.code == 0x1202 ? IRQEXT_EXC : IRQEXT_EMS);
474 	smp_handle_ext_call();
475 }
476 
477 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
478 {
479 	int cpu;
480 
481 	for_each_cpu(cpu, mask)
482 		pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
483 }
484 
485 void arch_send_call_function_single_ipi(int cpu)
486 {
487 	pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
488 }
489 
490 /*
491  * this function sends a 'reschedule' IPI to another CPU.
492  * it goes straight through and wastes no time serializing
493  * anything. Worst case is that we lose a reschedule ...
494  */
495 void smp_send_reschedule(int cpu)
496 {
497 	pcpu_ec_call(pcpu_devices + cpu, ec_schedule);
498 }
499 
500 /*
501  * parameter area for the set/clear control bit callbacks
502  */
503 struct ec_creg_mask_parms {
504 	unsigned long orval;
505 	unsigned long andval;
506 	int cr;
507 };
508 
509 /*
510  * callback for setting/clearing control bits
511  */
512 static void smp_ctl_bit_callback(void *info)
513 {
514 	struct ec_creg_mask_parms *pp = info;
515 	unsigned long cregs[16];
516 
517 	__ctl_store(cregs, 0, 15);
518 	cregs[pp->cr] = (cregs[pp->cr] & pp->andval) | pp->orval;
519 	__ctl_load(cregs, 0, 15);
520 }
521 
522 /*
523  * Set a bit in a control register of all cpus
524  */
525 void smp_ctl_set_bit(int cr, int bit)
526 {
527 	struct ec_creg_mask_parms parms = { 1UL << bit, -1UL, cr };
528 
529 	on_each_cpu(smp_ctl_bit_callback, &parms, 1);
530 }
531 EXPORT_SYMBOL(smp_ctl_set_bit);
532 
533 /*
534  * Clear a bit in a control register of all cpus
535  */
536 void smp_ctl_clear_bit(int cr, int bit)
537 {
538 	struct ec_creg_mask_parms parms = { 0, ~(1UL << bit), cr };
539 
540 	on_each_cpu(smp_ctl_bit_callback, &parms, 1);
541 }
542 EXPORT_SYMBOL(smp_ctl_clear_bit);
543 
544 #ifdef CONFIG_CRASH_DUMP
545 
546 int smp_store_status(int cpu)
547 {
548 	struct pcpu *pcpu = pcpu_devices + cpu;
549 	unsigned long pa;
550 
551 	pa = __pa(&pcpu->lowcore->floating_pt_save_area);
552 	if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_STATUS_AT_ADDRESS,
553 			      pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
554 		return -EIO;
555 	if (!MACHINE_HAS_VX)
556 		return 0;
557 	pa = __pa(pcpu->lowcore->vector_save_area_addr);
558 	if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS,
559 			      pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
560 		return -EIO;
561 	return 0;
562 }
563 
564 /*
565  * Collect CPU state of the previous, crashed system.
566  * There are four cases:
567  * 1) standard zfcp dump
568  *    condition: OLDMEM_BASE == NULL && ipl_info.type == IPL_TYPE_FCP_DUMP
569  *    The state for all CPUs except the boot CPU needs to be collected
570  *    with sigp stop-and-store-status. The boot CPU state is located in
571  *    the absolute lowcore of the memory stored in the HSA. The zcore code
572  *    will copy the boot CPU state from the HSA.
573  * 2) stand-alone kdump for SCSI (zfcp dump with swapped memory)
574  *    condition: OLDMEM_BASE != NULL && ipl_info.type == IPL_TYPE_FCP_DUMP
575  *    The state for all CPUs except the boot CPU needs to be collected
576  *    with sigp stop-and-store-status. The firmware or the boot-loader
577  *    stored the registers of the boot CPU in the absolute lowcore in the
578  *    memory of the old system.
579  * 3) kdump and the old kernel did not store the CPU state,
580  *    or stand-alone kdump for DASD
581  *    condition: OLDMEM_BASE != NULL && !is_kdump_kernel()
582  *    The state for all CPUs except the boot CPU needs to be collected
583  *    with sigp stop-and-store-status. The kexec code or the boot-loader
584  *    stored the registers of the boot CPU in the memory of the old system.
585  * 4) kdump and the old kernel stored the CPU state
586  *    condition: OLDMEM_BASE != NULL && is_kdump_kernel()
587  *    This case does not exist for s390 anymore, setup_arch explicitly
588  *    deactivates the elfcorehdr= kernel parameter
589  */
590 static __init void smp_save_cpu_vxrs(struct save_area *sa, u16 addr,
591 				     bool is_boot_cpu, unsigned long page)
592 {
593 	__vector128 *vxrs = (__vector128 *) page;
594 
595 	if (is_boot_cpu)
596 		vxrs = boot_cpu_vector_save_area;
597 	else
598 		__pcpu_sigp_relax(addr, SIGP_STORE_ADDITIONAL_STATUS, page);
599 	save_area_add_vxrs(sa, vxrs);
600 }
601 
602 static __init void smp_save_cpu_regs(struct save_area *sa, u16 addr,
603 				     bool is_boot_cpu, unsigned long page)
604 {
605 	void *regs = (void *) page;
606 
607 	if (is_boot_cpu)
608 		copy_oldmem_kernel(regs, (void *) __LC_FPREGS_SAVE_AREA, 512);
609 	else
610 		__pcpu_sigp_relax(addr, SIGP_STORE_STATUS_AT_ADDRESS, page);
611 	save_area_add_regs(sa, regs);
612 }
613 
614 void __init smp_save_dump_cpus(void)
615 {
616 	int addr, boot_cpu_addr, max_cpu_addr;
617 	struct save_area *sa;
618 	unsigned long page;
619 	bool is_boot_cpu;
620 
621 	if (!(OLDMEM_BASE || ipl_info.type == IPL_TYPE_FCP_DUMP))
622 		/* No previous system present, normal boot. */
623 		return;
624 	/* Allocate a page as dumping area for the store status sigps */
625 	page = memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, 1UL << 31);
626 	if (!page)
627 		panic("could not allocate memory for save area\n");
628 	/* Set multi-threading state to the previous system. */
629 	pcpu_set_smt(sclp.mtid_prev);
630 	boot_cpu_addr = stap();
631 	max_cpu_addr = SCLP_MAX_CORES << sclp.mtid_prev;
632 	for (addr = 0; addr <= max_cpu_addr; addr++) {
633 		if (__pcpu_sigp_relax(addr, SIGP_SENSE, 0) ==
634 		    SIGP_CC_NOT_OPERATIONAL)
635 			continue;
636 		is_boot_cpu = (addr == boot_cpu_addr);
637 		/* Allocate save area */
638 		sa = save_area_alloc(is_boot_cpu);
639 		if (!sa)
640 			panic("could not allocate memory for save area\n");
641 		if (MACHINE_HAS_VX)
642 			/* Get the vector registers */
643 			smp_save_cpu_vxrs(sa, addr, is_boot_cpu, page);
644 		/*
645 		 * For a zfcp dump OLDMEM_BASE == NULL and the registers
646 		 * of the boot CPU are stored in the HSA. To retrieve
647 		 * these registers an SCLP request is required which is
648 		 * done by drivers/s390/char/zcore.c:init_cpu_info()
649 		 */
650 		if (!is_boot_cpu || OLDMEM_BASE)
651 			/* Get the CPU registers */
652 			smp_save_cpu_regs(sa, addr, is_boot_cpu, page);
653 	}
654 	memblock_free(page, PAGE_SIZE);
655 	diag308_reset();
656 	pcpu_set_smt(0);
657 }
658 #endif /* CONFIG_CRASH_DUMP */
659 
660 void smp_cpu_set_polarization(int cpu, int val)
661 {
662 	pcpu_devices[cpu].polarization = val;
663 }
664 
665 int smp_cpu_get_polarization(int cpu)
666 {
667 	return pcpu_devices[cpu].polarization;
668 }
669 
670 static struct sclp_core_info *smp_get_core_info(void)
671 {
672 	static int use_sigp_detection;
673 	struct sclp_core_info *info;
674 	int address;
675 
676 	info = kzalloc(sizeof(*info), GFP_KERNEL);
677 	if (info && (use_sigp_detection || sclp_get_core_info(info))) {
678 		use_sigp_detection = 1;
679 		for (address = 0;
680 		     address < (SCLP_MAX_CORES << smp_cpu_mt_shift);
681 		     address += (1U << smp_cpu_mt_shift)) {
682 			if (__pcpu_sigp_relax(address, SIGP_SENSE, 0) ==
683 			    SIGP_CC_NOT_OPERATIONAL)
684 				continue;
685 			info->core[info->configured].core_id =
686 				address >> smp_cpu_mt_shift;
687 			info->configured++;
688 		}
689 		info->combined = info->configured;
690 	}
691 	return info;
692 }
693 
694 static int smp_add_present_cpu(int cpu);
695 
696 static int __smp_rescan_cpus(struct sclp_core_info *info, int sysfs_add)
697 {
698 	struct pcpu *pcpu;
699 	cpumask_t avail;
700 	int cpu, nr, i, j;
701 	u16 address;
702 
703 	nr = 0;
704 	cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
705 	cpu = cpumask_first(&avail);
706 	for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) {
707 		if (sclp.has_core_type && info->core[i].type != boot_core_type)
708 			continue;
709 		address = info->core[i].core_id << smp_cpu_mt_shift;
710 		for (j = 0; j <= smp_cpu_mtid; j++) {
711 			if (pcpu_find_address(cpu_present_mask, address + j))
712 				continue;
713 			pcpu = pcpu_devices + cpu;
714 			pcpu->address = address + j;
715 			pcpu->state =
716 				(cpu >= info->configured*(smp_cpu_mtid + 1)) ?
717 				CPU_STATE_STANDBY : CPU_STATE_CONFIGURED;
718 			smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
719 			set_cpu_present(cpu, true);
720 			if (sysfs_add && smp_add_present_cpu(cpu) != 0)
721 				set_cpu_present(cpu, false);
722 			else
723 				nr++;
724 			cpu = cpumask_next(cpu, &avail);
725 			if (cpu >= nr_cpu_ids)
726 				break;
727 		}
728 	}
729 	return nr;
730 }
731 
732 static void __init smp_detect_cpus(void)
733 {
734 	unsigned int cpu, mtid, c_cpus, s_cpus;
735 	struct sclp_core_info *info;
736 	u16 address;
737 
738 	/* Get CPU information */
739 	info = smp_get_core_info();
740 	if (!info)
741 		panic("smp_detect_cpus failed to allocate memory\n");
742 
743 	/* Find boot CPU type */
744 	if (sclp.has_core_type) {
745 		address = stap();
746 		for (cpu = 0; cpu < info->combined; cpu++)
747 			if (info->core[cpu].core_id == address) {
748 				/* The boot cpu dictates the cpu type. */
749 				boot_core_type = info->core[cpu].type;
750 				break;
751 			}
752 		if (cpu >= info->combined)
753 			panic("Could not find boot CPU type");
754 	}
755 
756 	/* Set multi-threading state for the current system */
757 	mtid = boot_core_type ? sclp.mtid : sclp.mtid_cp;
758 	mtid = (mtid < smp_max_threads) ? mtid : smp_max_threads - 1;
759 	pcpu_set_smt(mtid);
760 
761 	/* Print number of CPUs */
762 	c_cpus = s_cpus = 0;
763 	for (cpu = 0; cpu < info->combined; cpu++) {
764 		if (sclp.has_core_type &&
765 		    info->core[cpu].type != boot_core_type)
766 			continue;
767 		if (cpu < info->configured)
768 			c_cpus += smp_cpu_mtid + 1;
769 		else
770 			s_cpus += smp_cpu_mtid + 1;
771 	}
772 	pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
773 
774 	/* Add CPUs present at boot */
775 	get_online_cpus();
776 	__smp_rescan_cpus(info, 0);
777 	put_online_cpus();
778 	kfree(info);
779 }
780 
781 /*
782  *	Activate a secondary processor.
783  */
784 static void smp_start_secondary(void *cpuvoid)
785 {
786 	S390_lowcore.last_update_clock = get_tod_clock();
787 	S390_lowcore.restart_stack = (unsigned long) restart_stack;
788 	S390_lowcore.restart_fn = (unsigned long) do_restart;
789 	S390_lowcore.restart_data = 0;
790 	S390_lowcore.restart_source = -1UL;
791 	restore_access_regs(S390_lowcore.access_regs_save_area);
792 	__ctl_load(S390_lowcore.cregs_save_area, 0, 15);
793 	__load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
794 	cpu_init();
795 	preempt_disable();
796 	init_cpu_timer();
797 	vtime_init();
798 	pfault_init();
799 	notify_cpu_starting(smp_processor_id());
800 	set_cpu_online(smp_processor_id(), true);
801 	inc_irq_stat(CPU_RST);
802 	local_irq_enable();
803 	cpu_startup_entry(CPUHP_ONLINE);
804 }
805 
806 /* Upping and downing of CPUs */
807 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
808 {
809 	struct pcpu *pcpu;
810 	int base, i, rc;
811 
812 	pcpu = pcpu_devices + cpu;
813 	if (pcpu->state != CPU_STATE_CONFIGURED)
814 		return -EIO;
815 	base = cpu - (cpu % (smp_cpu_mtid + 1));
816 	for (i = 0; i <= smp_cpu_mtid; i++) {
817 		if (base + i < nr_cpu_ids)
818 			if (cpu_online(base + i))
819 				break;
820 	}
821 	/*
822 	 * If this is the first CPU of the core to get online
823 	 * do an initial CPU reset.
824 	 */
825 	if (i > smp_cpu_mtid &&
826 	    pcpu_sigp_retry(pcpu_devices + base, SIGP_INITIAL_CPU_RESET, 0) !=
827 	    SIGP_CC_ORDER_CODE_ACCEPTED)
828 		return -EIO;
829 
830 	rc = pcpu_alloc_lowcore(pcpu, cpu);
831 	if (rc)
832 		return rc;
833 	pcpu_prepare_secondary(pcpu, cpu);
834 	pcpu_attach_task(pcpu, tidle);
835 	pcpu_start_fn(pcpu, smp_start_secondary, NULL);
836 	/* Wait until cpu puts itself in the online & active maps */
837 	while (!cpu_online(cpu) || !cpu_active(cpu))
838 		cpu_relax();
839 	return 0;
840 }
841 
842 static unsigned int setup_possible_cpus __initdata;
843 
844 static int __init _setup_possible_cpus(char *s)
845 {
846 	get_option(&s, &setup_possible_cpus);
847 	return 0;
848 }
849 early_param("possible_cpus", _setup_possible_cpus);
850 
851 #ifdef CONFIG_HOTPLUG_CPU
852 
853 int __cpu_disable(void)
854 {
855 	unsigned long cregs[16];
856 
857 	/* Handle possible pending IPIs */
858 	smp_handle_ext_call();
859 	set_cpu_online(smp_processor_id(), false);
860 	/* Disable pseudo page faults on this cpu. */
861 	pfault_fini();
862 	/* Disable interrupt sources via control register. */
863 	__ctl_store(cregs, 0, 15);
864 	cregs[0]  &= ~0x0000ee70UL;	/* disable all external interrupts */
865 	cregs[6]  &= ~0xff000000UL;	/* disable all I/O interrupts */
866 	cregs[14] &= ~0x1f000000UL;	/* disable most machine checks */
867 	__ctl_load(cregs, 0, 15);
868 	clear_cpu_flag(CIF_NOHZ_DELAY);
869 	return 0;
870 }
871 
872 void __cpu_die(unsigned int cpu)
873 {
874 	struct pcpu *pcpu;
875 
876 	/* Wait until target cpu is down */
877 	pcpu = pcpu_devices + cpu;
878 	while (!pcpu_stopped(pcpu))
879 		cpu_relax();
880 	pcpu_free_lowcore(pcpu);
881 	atomic_dec(&init_mm.context.attach_count);
882 	cpumask_clear_cpu(cpu, mm_cpumask(&init_mm));
883 	if (MACHINE_HAS_TLB_LC)
884 		cpumask_clear_cpu(cpu, &init_mm.context.cpu_attach_mask);
885 }
886 
887 void __noreturn cpu_die(void)
888 {
889 	idle_task_exit();
890 	pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0);
891 	for (;;) ;
892 }
893 
894 #endif /* CONFIG_HOTPLUG_CPU */
895 
896 void __init smp_fill_possible_mask(void)
897 {
898 	unsigned int possible, sclp_max, cpu;
899 
900 	sclp_max = max(sclp.mtid, sclp.mtid_cp) + 1;
901 	sclp_max = min(smp_max_threads, sclp_max);
902 	sclp_max = sclp.max_cores * sclp_max ?: nr_cpu_ids;
903 	possible = setup_possible_cpus ?: nr_cpu_ids;
904 	possible = min(possible, sclp_max);
905 	for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++)
906 		set_cpu_possible(cpu, true);
907 }
908 
909 void __init smp_prepare_cpus(unsigned int max_cpus)
910 {
911 	/* request the 0x1201 emergency signal external interrupt */
912 	if (register_external_irq(EXT_IRQ_EMERGENCY_SIG, do_ext_call_interrupt))
913 		panic("Couldn't request external interrupt 0x1201");
914 	/* request the 0x1202 external call external interrupt */
915 	if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt))
916 		panic("Couldn't request external interrupt 0x1202");
917 	smp_detect_cpus();
918 }
919 
920 void __init smp_prepare_boot_cpu(void)
921 {
922 	struct pcpu *pcpu = pcpu_devices;
923 
924 	pcpu->state = CPU_STATE_CONFIGURED;
925 	pcpu->address = stap();
926 	pcpu->lowcore = (struct lowcore *)(unsigned long) store_prefix();
927 	S390_lowcore.percpu_offset = __per_cpu_offset[0];
928 	smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
929 	set_cpu_present(0, true);
930 	set_cpu_online(0, true);
931 }
932 
933 void __init smp_cpus_done(unsigned int max_cpus)
934 {
935 }
936 
937 void __init smp_setup_processor_id(void)
938 {
939 	S390_lowcore.cpu_nr = 0;
940 	S390_lowcore.spinlock_lockval = arch_spin_lockval(0);
941 }
942 
943 /*
944  * the frequency of the profiling timer can be changed
945  * by writing a multiplier value into /proc/profile.
946  *
947  * usually you want to run this on all CPUs ;)
948  */
949 int setup_profiling_timer(unsigned int multiplier)
950 {
951 	return 0;
952 }
953 
954 #ifdef CONFIG_HOTPLUG_CPU
955 static ssize_t cpu_configure_show(struct device *dev,
956 				  struct device_attribute *attr, char *buf)
957 {
958 	ssize_t count;
959 
960 	mutex_lock(&smp_cpu_state_mutex);
961 	count = sprintf(buf, "%d\n", pcpu_devices[dev->id].state);
962 	mutex_unlock(&smp_cpu_state_mutex);
963 	return count;
964 }
965 
966 static ssize_t cpu_configure_store(struct device *dev,
967 				   struct device_attribute *attr,
968 				   const char *buf, size_t count)
969 {
970 	struct pcpu *pcpu;
971 	int cpu, val, rc, i;
972 	char delim;
973 
974 	if (sscanf(buf, "%d %c", &val, &delim) != 1)
975 		return -EINVAL;
976 	if (val != 0 && val != 1)
977 		return -EINVAL;
978 	get_online_cpus();
979 	mutex_lock(&smp_cpu_state_mutex);
980 	rc = -EBUSY;
981 	/* disallow configuration changes of online cpus and cpu 0 */
982 	cpu = dev->id;
983 	cpu -= cpu % (smp_cpu_mtid + 1);
984 	if (cpu == 0)
985 		goto out;
986 	for (i = 0; i <= smp_cpu_mtid; i++)
987 		if (cpu_online(cpu + i))
988 			goto out;
989 	pcpu = pcpu_devices + cpu;
990 	rc = 0;
991 	switch (val) {
992 	case 0:
993 		if (pcpu->state != CPU_STATE_CONFIGURED)
994 			break;
995 		rc = sclp_core_deconfigure(pcpu->address >> smp_cpu_mt_shift);
996 		if (rc)
997 			break;
998 		for (i = 0; i <= smp_cpu_mtid; i++) {
999 			if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
1000 				continue;
1001 			pcpu[i].state = CPU_STATE_STANDBY;
1002 			smp_cpu_set_polarization(cpu + i,
1003 						 POLARIZATION_UNKNOWN);
1004 		}
1005 		topology_expect_change();
1006 		break;
1007 	case 1:
1008 		if (pcpu->state != CPU_STATE_STANDBY)
1009 			break;
1010 		rc = sclp_core_configure(pcpu->address >> smp_cpu_mt_shift);
1011 		if (rc)
1012 			break;
1013 		for (i = 0; i <= smp_cpu_mtid; i++) {
1014 			if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
1015 				continue;
1016 			pcpu[i].state = CPU_STATE_CONFIGURED;
1017 			smp_cpu_set_polarization(cpu + i,
1018 						 POLARIZATION_UNKNOWN);
1019 		}
1020 		topology_expect_change();
1021 		break;
1022 	default:
1023 		break;
1024 	}
1025 out:
1026 	mutex_unlock(&smp_cpu_state_mutex);
1027 	put_online_cpus();
1028 	return rc ? rc : count;
1029 }
1030 static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
1031 #endif /* CONFIG_HOTPLUG_CPU */
1032 
1033 static ssize_t show_cpu_address(struct device *dev,
1034 				struct device_attribute *attr, char *buf)
1035 {
1036 	return sprintf(buf, "%d\n", pcpu_devices[dev->id].address);
1037 }
1038 static DEVICE_ATTR(address, 0444, show_cpu_address, NULL);
1039 
1040 static struct attribute *cpu_common_attrs[] = {
1041 #ifdef CONFIG_HOTPLUG_CPU
1042 	&dev_attr_configure.attr,
1043 #endif
1044 	&dev_attr_address.attr,
1045 	NULL,
1046 };
1047 
1048 static struct attribute_group cpu_common_attr_group = {
1049 	.attrs = cpu_common_attrs,
1050 };
1051 
1052 static struct attribute *cpu_online_attrs[] = {
1053 	&dev_attr_idle_count.attr,
1054 	&dev_attr_idle_time_us.attr,
1055 	NULL,
1056 };
1057 
1058 static struct attribute_group cpu_online_attr_group = {
1059 	.attrs = cpu_online_attrs,
1060 };
1061 
1062 static int smp_cpu_notify(struct notifier_block *self, unsigned long action,
1063 			  void *hcpu)
1064 {
1065 	unsigned int cpu = (unsigned int)(long)hcpu;
1066 	struct device *s = &per_cpu(cpu_device, cpu)->dev;
1067 	int err = 0;
1068 
1069 	switch (action & ~CPU_TASKS_FROZEN) {
1070 	case CPU_ONLINE:
1071 		err = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
1072 		break;
1073 	case CPU_DEAD:
1074 		sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
1075 		break;
1076 	}
1077 	return notifier_from_errno(err);
1078 }
1079 
1080 static int smp_add_present_cpu(int cpu)
1081 {
1082 	struct device *s;
1083 	struct cpu *c;
1084 	int rc;
1085 
1086 	c = kzalloc(sizeof(*c), GFP_KERNEL);
1087 	if (!c)
1088 		return -ENOMEM;
1089 	per_cpu(cpu_device, cpu) = c;
1090 	s = &c->dev;
1091 	c->hotpluggable = 1;
1092 	rc = register_cpu(c, cpu);
1093 	if (rc)
1094 		goto out;
1095 	rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
1096 	if (rc)
1097 		goto out_cpu;
1098 	if (cpu_online(cpu)) {
1099 		rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
1100 		if (rc)
1101 			goto out_online;
1102 	}
1103 	rc = topology_cpu_init(c);
1104 	if (rc)
1105 		goto out_topology;
1106 	return 0;
1107 
1108 out_topology:
1109 	if (cpu_online(cpu))
1110 		sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
1111 out_online:
1112 	sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
1113 out_cpu:
1114 #ifdef CONFIG_HOTPLUG_CPU
1115 	unregister_cpu(c);
1116 #endif
1117 out:
1118 	return rc;
1119 }
1120 
1121 #ifdef CONFIG_HOTPLUG_CPU
1122 
1123 int __ref smp_rescan_cpus(void)
1124 {
1125 	struct sclp_core_info *info;
1126 	int nr;
1127 
1128 	info = smp_get_core_info();
1129 	if (!info)
1130 		return -ENOMEM;
1131 	get_online_cpus();
1132 	mutex_lock(&smp_cpu_state_mutex);
1133 	nr = __smp_rescan_cpus(info, 1);
1134 	mutex_unlock(&smp_cpu_state_mutex);
1135 	put_online_cpus();
1136 	kfree(info);
1137 	if (nr)
1138 		topology_schedule_update();
1139 	return 0;
1140 }
1141 
1142 static ssize_t __ref rescan_store(struct device *dev,
1143 				  struct device_attribute *attr,
1144 				  const char *buf,
1145 				  size_t count)
1146 {
1147 	int rc;
1148 
1149 	rc = smp_rescan_cpus();
1150 	return rc ? rc : count;
1151 }
1152 static DEVICE_ATTR(rescan, 0200, NULL, rescan_store);
1153 #endif /* CONFIG_HOTPLUG_CPU */
1154 
1155 static int __init s390_smp_init(void)
1156 {
1157 	int cpu, rc = 0;
1158 
1159 #ifdef CONFIG_HOTPLUG_CPU
1160 	rc = device_create_file(cpu_subsys.dev_root, &dev_attr_rescan);
1161 	if (rc)
1162 		return rc;
1163 #endif
1164 	cpu_notifier_register_begin();
1165 	for_each_present_cpu(cpu) {
1166 		rc = smp_add_present_cpu(cpu);
1167 		if (rc)
1168 			goto out;
1169 	}
1170 
1171 	__hotcpu_notifier(smp_cpu_notify, 0);
1172 
1173 out:
1174 	cpu_notifier_register_done();
1175 	return rc;
1176 }
1177 subsys_initcall(s390_smp_init);
1178