xref: /openbmc/linux/arch/powerpc/kernel/rtas.c (revision 04b3c795)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *
4  * Procedures for interfacing to the RTAS on CHRP machines.
5  *
6  * Peter Bergner, IBM	March 2001.
7  * Copyright (C) 2001 IBM.
8  */
9 
10 #include <stdarg.h>
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/spinlock.h>
14 #include <linux/export.h>
15 #include <linux/init.h>
16 #include <linux/capability.h>
17 #include <linux/delay.h>
18 #include <linux/cpu.h>
19 #include <linux/sched.h>
20 #include <linux/smp.h>
21 #include <linux/completion.h>
22 #include <linux/cpumask.h>
23 #include <linux/memblock.h>
24 #include <linux/slab.h>
25 #include <linux/reboot.h>
26 #include <linux/syscalls.h>
27 
28 #include <asm/prom.h>
29 #include <asm/rtas.h>
30 #include <asm/hvcall.h>
31 #include <asm/machdep.h>
32 #include <asm/firmware.h>
33 #include <asm/page.h>
34 #include <asm/param.h>
35 #include <asm/delay.h>
36 #include <linux/uaccess.h>
37 #include <asm/udbg.h>
38 #include <asm/syscalls.h>
39 #include <asm/smp.h>
40 #include <linux/atomic.h>
41 #include <asm/time.h>
42 #include <asm/mmu.h>
43 #include <asm/topology.h>
44 #include <asm/paca.h>
45 
46 /* This is here deliberately so it's only used in this file */
47 void enter_rtas(unsigned long);
48 
49 struct rtas_t rtas = {
50 	.lock = __ARCH_SPIN_LOCK_UNLOCKED
51 };
52 EXPORT_SYMBOL(rtas);
53 
54 DEFINE_SPINLOCK(rtas_data_buf_lock);
55 EXPORT_SYMBOL(rtas_data_buf_lock);
56 
57 char rtas_data_buf[RTAS_DATA_BUF_SIZE] __cacheline_aligned;
58 EXPORT_SYMBOL(rtas_data_buf);
59 
60 unsigned long rtas_rmo_buf;
61 
62 /*
63  * If non-NULL, this gets called when the kernel terminates.
64  * This is done like this so rtas_flash can be a module.
65  */
66 void (*rtas_flash_term_hook)(int);
67 EXPORT_SYMBOL(rtas_flash_term_hook);
68 
69 /* RTAS use home made raw locking instead of spin_lock_irqsave
70  * because those can be called from within really nasty contexts
71  * such as having the timebase stopped which would lockup with
72  * normal locks and spinlock debugging enabled
73  */
74 static unsigned long lock_rtas(void)
75 {
76 	unsigned long flags;
77 
78 	local_irq_save(flags);
79 	preempt_disable();
80 	arch_spin_lock(&rtas.lock);
81 	return flags;
82 }
83 
84 static void unlock_rtas(unsigned long flags)
85 {
86 	arch_spin_unlock(&rtas.lock);
87 	local_irq_restore(flags);
88 	preempt_enable();
89 }
90 
91 /*
92  * call_rtas_display_status and call_rtas_display_status_delay
93  * are designed only for very early low-level debugging, which
94  * is why the token is hard-coded to 10.
95  */
96 static void call_rtas_display_status(unsigned char c)
97 {
98 	unsigned long s;
99 
100 	if (!rtas.base)
101 		return;
102 
103 	s = lock_rtas();
104 	rtas_call_unlocked(&rtas.args, 10, 1, 1, NULL, c);
105 	unlock_rtas(s);
106 }
107 
108 static void call_rtas_display_status_delay(char c)
109 {
110 	static int pending_newline = 0;  /* did last write end with unprinted newline? */
111 	static int width = 16;
112 
113 	if (c == '\n') {
114 		while (width-- > 0)
115 			call_rtas_display_status(' ');
116 		width = 16;
117 		mdelay(500);
118 		pending_newline = 1;
119 	} else {
120 		if (pending_newline) {
121 			call_rtas_display_status('\r');
122 			call_rtas_display_status('\n');
123 		}
124 		pending_newline = 0;
125 		if (width--) {
126 			call_rtas_display_status(c);
127 			udelay(10000);
128 		}
129 	}
130 }
131 
132 void __init udbg_init_rtas_panel(void)
133 {
134 	udbg_putc = call_rtas_display_status_delay;
135 }
136 
137 #ifdef CONFIG_UDBG_RTAS_CONSOLE
138 
139 /* If you think you're dying before early_init_dt_scan_rtas() does its
140  * work, you can hard code the token values for your firmware here and
141  * hardcode rtas.base/entry etc.
142  */
143 static unsigned int rtas_putchar_token = RTAS_UNKNOWN_SERVICE;
144 static unsigned int rtas_getchar_token = RTAS_UNKNOWN_SERVICE;
145 
146 static void udbg_rtascon_putc(char c)
147 {
148 	int tries;
149 
150 	if (!rtas.base)
151 		return;
152 
153 	/* Add CRs before LFs */
154 	if (c == '\n')
155 		udbg_rtascon_putc('\r');
156 
157 	/* if there is more than one character to be displayed, wait a bit */
158 	for (tries = 0; tries < 16; tries++) {
159 		if (rtas_call(rtas_putchar_token, 1, 1, NULL, c) == 0)
160 			break;
161 		udelay(1000);
162 	}
163 }
164 
165 static int udbg_rtascon_getc_poll(void)
166 {
167 	int c;
168 
169 	if (!rtas.base)
170 		return -1;
171 
172 	if (rtas_call(rtas_getchar_token, 0, 2, &c))
173 		return -1;
174 
175 	return c;
176 }
177 
178 static int udbg_rtascon_getc(void)
179 {
180 	int c;
181 
182 	while ((c = udbg_rtascon_getc_poll()) == -1)
183 		;
184 
185 	return c;
186 }
187 
188 
189 void __init udbg_init_rtas_console(void)
190 {
191 	udbg_putc = udbg_rtascon_putc;
192 	udbg_getc = udbg_rtascon_getc;
193 	udbg_getc_poll = udbg_rtascon_getc_poll;
194 }
195 #endif /* CONFIG_UDBG_RTAS_CONSOLE */
196 
197 void rtas_progress(char *s, unsigned short hex)
198 {
199 	struct device_node *root;
200 	int width;
201 	const __be32 *p;
202 	char *os;
203 	static int display_character, set_indicator;
204 	static int display_width, display_lines, form_feed;
205 	static const int *row_width;
206 	static DEFINE_SPINLOCK(progress_lock);
207 	static int current_line;
208 	static int pending_newline = 0;  /* did last write end with unprinted newline? */
209 
210 	if (!rtas.base)
211 		return;
212 
213 	if (display_width == 0) {
214 		display_width = 0x10;
215 		if ((root = of_find_node_by_path("/rtas"))) {
216 			if ((p = of_get_property(root,
217 					"ibm,display-line-length", NULL)))
218 				display_width = be32_to_cpu(*p);
219 			if ((p = of_get_property(root,
220 					"ibm,form-feed", NULL)))
221 				form_feed = be32_to_cpu(*p);
222 			if ((p = of_get_property(root,
223 					"ibm,display-number-of-lines", NULL)))
224 				display_lines = be32_to_cpu(*p);
225 			row_width = of_get_property(root,
226 					"ibm,display-truncation-length", NULL);
227 			of_node_put(root);
228 		}
229 		display_character = rtas_token("display-character");
230 		set_indicator = rtas_token("set-indicator");
231 	}
232 
233 	if (display_character == RTAS_UNKNOWN_SERVICE) {
234 		/* use hex display if available */
235 		if (set_indicator != RTAS_UNKNOWN_SERVICE)
236 			rtas_call(set_indicator, 3, 1, NULL, 6, 0, hex);
237 		return;
238 	}
239 
240 	spin_lock(&progress_lock);
241 
242 	/*
243 	 * Last write ended with newline, but we didn't print it since
244 	 * it would just clear the bottom line of output. Print it now
245 	 * instead.
246 	 *
247 	 * If no newline is pending and form feed is supported, clear the
248 	 * display with a form feed; otherwise, print a CR to start output
249 	 * at the beginning of the line.
250 	 */
251 	if (pending_newline) {
252 		rtas_call(display_character, 1, 1, NULL, '\r');
253 		rtas_call(display_character, 1, 1, NULL, '\n');
254 		pending_newline = 0;
255 	} else {
256 		current_line = 0;
257 		if (form_feed)
258 			rtas_call(display_character, 1, 1, NULL,
259 				  (char)form_feed);
260 		else
261 			rtas_call(display_character, 1, 1, NULL, '\r');
262 	}
263 
264 	if (row_width)
265 		width = row_width[current_line];
266 	else
267 		width = display_width;
268 	os = s;
269 	while (*os) {
270 		if (*os == '\n' || *os == '\r') {
271 			/* If newline is the last character, save it
272 			 * until next call to avoid bumping up the
273 			 * display output.
274 			 */
275 			if (*os == '\n' && !os[1]) {
276 				pending_newline = 1;
277 				current_line++;
278 				if (current_line > display_lines-1)
279 					current_line = display_lines-1;
280 				spin_unlock(&progress_lock);
281 				return;
282 			}
283 
284 			/* RTAS wants CR-LF, not just LF */
285 
286 			if (*os == '\n') {
287 				rtas_call(display_character, 1, 1, NULL, '\r');
288 				rtas_call(display_character, 1, 1, NULL, '\n');
289 			} else {
290 				/* CR might be used to re-draw a line, so we'll
291 				 * leave it alone and not add LF.
292 				 */
293 				rtas_call(display_character, 1, 1, NULL, *os);
294 			}
295 
296 			if (row_width)
297 				width = row_width[current_line];
298 			else
299 				width = display_width;
300 		} else {
301 			width--;
302 			rtas_call(display_character, 1, 1, NULL, *os);
303 		}
304 
305 		os++;
306 
307 		/* if we overwrite the screen length */
308 		if (width <= 0)
309 			while ((*os != 0) && (*os != '\n') && (*os != '\r'))
310 				os++;
311 	}
312 
313 	spin_unlock(&progress_lock);
314 }
315 EXPORT_SYMBOL(rtas_progress);		/* needed by rtas_flash module */
316 
317 int rtas_token(const char *service)
318 {
319 	const __be32 *tokp;
320 	if (rtas.dev == NULL)
321 		return RTAS_UNKNOWN_SERVICE;
322 	tokp = of_get_property(rtas.dev, service, NULL);
323 	return tokp ? be32_to_cpu(*tokp) : RTAS_UNKNOWN_SERVICE;
324 }
325 EXPORT_SYMBOL(rtas_token);
326 
327 int rtas_service_present(const char *service)
328 {
329 	return rtas_token(service) != RTAS_UNKNOWN_SERVICE;
330 }
331 EXPORT_SYMBOL(rtas_service_present);
332 
333 #ifdef CONFIG_RTAS_ERROR_LOGGING
334 /*
335  * Return the firmware-specified size of the error log buffer
336  *  for all rtas calls that require an error buffer argument.
337  *  This includes 'check-exception' and 'rtas-last-error'.
338  */
339 int rtas_get_error_log_max(void)
340 {
341 	static int rtas_error_log_max;
342 	if (rtas_error_log_max)
343 		return rtas_error_log_max;
344 
345 	rtas_error_log_max = rtas_token ("rtas-error-log-max");
346 	if ((rtas_error_log_max == RTAS_UNKNOWN_SERVICE) ||
347 	    (rtas_error_log_max > RTAS_ERROR_LOG_MAX)) {
348 		printk (KERN_WARNING "RTAS: bad log buffer size %d\n",
349 			rtas_error_log_max);
350 		rtas_error_log_max = RTAS_ERROR_LOG_MAX;
351 	}
352 	return rtas_error_log_max;
353 }
354 EXPORT_SYMBOL(rtas_get_error_log_max);
355 
356 
357 static char rtas_err_buf[RTAS_ERROR_LOG_MAX];
358 static int rtas_last_error_token;
359 
360 /** Return a copy of the detailed error text associated with the
361  *  most recent failed call to rtas.  Because the error text
362  *  might go stale if there are any other intervening rtas calls,
363  *  this routine must be called atomically with whatever produced
364  *  the error (i.e. with rtas.lock still held from the previous call).
365  */
366 static char *__fetch_rtas_last_error(char *altbuf)
367 {
368 	struct rtas_args err_args, save_args;
369 	u32 bufsz;
370 	char *buf = NULL;
371 
372 	if (rtas_last_error_token == -1)
373 		return NULL;
374 
375 	bufsz = rtas_get_error_log_max();
376 
377 	err_args.token = cpu_to_be32(rtas_last_error_token);
378 	err_args.nargs = cpu_to_be32(2);
379 	err_args.nret = cpu_to_be32(1);
380 	err_args.args[0] = cpu_to_be32(__pa(rtas_err_buf));
381 	err_args.args[1] = cpu_to_be32(bufsz);
382 	err_args.args[2] = 0;
383 
384 	save_args = rtas.args;
385 	rtas.args = err_args;
386 
387 	enter_rtas(__pa(&rtas.args));
388 
389 	err_args = rtas.args;
390 	rtas.args = save_args;
391 
392 	/* Log the error in the unlikely case that there was one. */
393 	if (unlikely(err_args.args[2] == 0)) {
394 		if (altbuf) {
395 			buf = altbuf;
396 		} else {
397 			buf = rtas_err_buf;
398 			if (slab_is_available())
399 				buf = kmalloc(RTAS_ERROR_LOG_MAX, GFP_ATOMIC);
400 		}
401 		if (buf)
402 			memcpy(buf, rtas_err_buf, RTAS_ERROR_LOG_MAX);
403 	}
404 
405 	return buf;
406 }
407 
408 #define get_errorlog_buffer()	kmalloc(RTAS_ERROR_LOG_MAX, GFP_KERNEL)
409 
410 #else /* CONFIG_RTAS_ERROR_LOGGING */
411 #define __fetch_rtas_last_error(x)	NULL
412 #define get_errorlog_buffer()		NULL
413 #endif
414 
415 
416 static void
417 va_rtas_call_unlocked(struct rtas_args *args, int token, int nargs, int nret,
418 		      va_list list)
419 {
420 	int i;
421 
422 	args->token = cpu_to_be32(token);
423 	args->nargs = cpu_to_be32(nargs);
424 	args->nret  = cpu_to_be32(nret);
425 	args->rets  = &(args->args[nargs]);
426 
427 	for (i = 0; i < nargs; ++i)
428 		args->args[i] = cpu_to_be32(va_arg(list, __u32));
429 
430 	for (i = 0; i < nret; ++i)
431 		args->rets[i] = 0;
432 
433 	enter_rtas(__pa(args));
434 }
435 
436 void rtas_call_unlocked(struct rtas_args *args, int token, int nargs, int nret, ...)
437 {
438 	va_list list;
439 
440 	va_start(list, nret);
441 	va_rtas_call_unlocked(args, token, nargs, nret, list);
442 	va_end(list);
443 }
444 
445 int rtas_call(int token, int nargs, int nret, int *outputs, ...)
446 {
447 	va_list list;
448 	int i;
449 	unsigned long s;
450 	struct rtas_args *rtas_args;
451 	char *buff_copy = NULL;
452 	int ret;
453 
454 	if (!rtas.entry || token == RTAS_UNKNOWN_SERVICE)
455 		return -1;
456 
457 	s = lock_rtas();
458 
459 	/* We use the global rtas args buffer */
460 	rtas_args = &rtas.args;
461 
462 	va_start(list, outputs);
463 	va_rtas_call_unlocked(rtas_args, token, nargs, nret, list);
464 	va_end(list);
465 
466 	/* A -1 return code indicates that the last command couldn't
467 	   be completed due to a hardware error. */
468 	if (be32_to_cpu(rtas_args->rets[0]) == -1)
469 		buff_copy = __fetch_rtas_last_error(NULL);
470 
471 	if (nret > 1 && outputs != NULL)
472 		for (i = 0; i < nret-1; ++i)
473 			outputs[i] = be32_to_cpu(rtas_args->rets[i+1]);
474 	ret = (nret > 0)? be32_to_cpu(rtas_args->rets[0]): 0;
475 
476 	unlock_rtas(s);
477 
478 	if (buff_copy) {
479 		log_error(buff_copy, ERR_TYPE_RTAS_LOG, 0);
480 		if (slab_is_available())
481 			kfree(buff_copy);
482 	}
483 	return ret;
484 }
485 EXPORT_SYMBOL(rtas_call);
486 
487 /* For RTAS_BUSY (-2), delay for 1 millisecond.  For an extended busy status
488  * code of 990n, perform the hinted delay of 10^n (last digit) milliseconds.
489  */
490 unsigned int rtas_busy_delay_time(int status)
491 {
492 	int order;
493 	unsigned int ms = 0;
494 
495 	if (status == RTAS_BUSY) {
496 		ms = 1;
497 	} else if (status >= RTAS_EXTENDED_DELAY_MIN &&
498 		   status <= RTAS_EXTENDED_DELAY_MAX) {
499 		order = status - RTAS_EXTENDED_DELAY_MIN;
500 		for (ms = 1; order > 0; order--)
501 			ms *= 10;
502 	}
503 
504 	return ms;
505 }
506 EXPORT_SYMBOL(rtas_busy_delay_time);
507 
508 /* For an RTAS busy status code, perform the hinted delay. */
509 unsigned int rtas_busy_delay(int status)
510 {
511 	unsigned int ms;
512 
513 	might_sleep();
514 	ms = rtas_busy_delay_time(status);
515 	if (ms && need_resched())
516 		msleep(ms);
517 
518 	return ms;
519 }
520 EXPORT_SYMBOL(rtas_busy_delay);
521 
522 static int rtas_error_rc(int rtas_rc)
523 {
524 	int rc;
525 
526 	switch (rtas_rc) {
527 		case -1: 		/* Hardware Error */
528 			rc = -EIO;
529 			break;
530 		case -3:		/* Bad indicator/domain/etc */
531 			rc = -EINVAL;
532 			break;
533 		case -9000:		/* Isolation error */
534 			rc = -EFAULT;
535 			break;
536 		case -9001:		/* Outstanding TCE/PTE */
537 			rc = -EEXIST;
538 			break;
539 		case -9002:		/* No usable slot */
540 			rc = -ENODEV;
541 			break;
542 		default:
543 			printk(KERN_ERR "%s: unexpected RTAS error %d\n",
544 					__func__, rtas_rc);
545 			rc = -ERANGE;
546 			break;
547 	}
548 	return rc;
549 }
550 
551 int rtas_get_power_level(int powerdomain, int *level)
552 {
553 	int token = rtas_token("get-power-level");
554 	int rc;
555 
556 	if (token == RTAS_UNKNOWN_SERVICE)
557 		return -ENOENT;
558 
559 	while ((rc = rtas_call(token, 1, 2, level, powerdomain)) == RTAS_BUSY)
560 		udelay(1);
561 
562 	if (rc < 0)
563 		return rtas_error_rc(rc);
564 	return rc;
565 }
566 EXPORT_SYMBOL(rtas_get_power_level);
567 
568 int rtas_set_power_level(int powerdomain, int level, int *setlevel)
569 {
570 	int token = rtas_token("set-power-level");
571 	int rc;
572 
573 	if (token == RTAS_UNKNOWN_SERVICE)
574 		return -ENOENT;
575 
576 	do {
577 		rc = rtas_call(token, 2, 2, setlevel, powerdomain, level);
578 	} while (rtas_busy_delay(rc));
579 
580 	if (rc < 0)
581 		return rtas_error_rc(rc);
582 	return rc;
583 }
584 EXPORT_SYMBOL(rtas_set_power_level);
585 
586 int rtas_get_sensor(int sensor, int index, int *state)
587 {
588 	int token = rtas_token("get-sensor-state");
589 	int rc;
590 
591 	if (token == RTAS_UNKNOWN_SERVICE)
592 		return -ENOENT;
593 
594 	do {
595 		rc = rtas_call(token, 2, 2, state, sensor, index);
596 	} while (rtas_busy_delay(rc));
597 
598 	if (rc < 0)
599 		return rtas_error_rc(rc);
600 	return rc;
601 }
602 EXPORT_SYMBOL(rtas_get_sensor);
603 
604 int rtas_get_sensor_fast(int sensor, int index, int *state)
605 {
606 	int token = rtas_token("get-sensor-state");
607 	int rc;
608 
609 	if (token == RTAS_UNKNOWN_SERVICE)
610 		return -ENOENT;
611 
612 	rc = rtas_call(token, 2, 2, state, sensor, index);
613 	WARN_ON(rc == RTAS_BUSY || (rc >= RTAS_EXTENDED_DELAY_MIN &&
614 				    rc <= RTAS_EXTENDED_DELAY_MAX));
615 
616 	if (rc < 0)
617 		return rtas_error_rc(rc);
618 	return rc;
619 }
620 
621 bool rtas_indicator_present(int token, int *maxindex)
622 {
623 	int proplen, count, i;
624 	const struct indicator_elem {
625 		__be32 token;
626 		__be32 maxindex;
627 	} *indicators;
628 
629 	indicators = of_get_property(rtas.dev, "rtas-indicators", &proplen);
630 	if (!indicators)
631 		return false;
632 
633 	count = proplen / sizeof(struct indicator_elem);
634 
635 	for (i = 0; i < count; i++) {
636 		if (__be32_to_cpu(indicators[i].token) != token)
637 			continue;
638 		if (maxindex)
639 			*maxindex = __be32_to_cpu(indicators[i].maxindex);
640 		return true;
641 	}
642 
643 	return false;
644 }
645 EXPORT_SYMBOL(rtas_indicator_present);
646 
647 int rtas_set_indicator(int indicator, int index, int new_value)
648 {
649 	int token = rtas_token("set-indicator");
650 	int rc;
651 
652 	if (token == RTAS_UNKNOWN_SERVICE)
653 		return -ENOENT;
654 
655 	do {
656 		rc = rtas_call(token, 3, 1, NULL, indicator, index, new_value);
657 	} while (rtas_busy_delay(rc));
658 
659 	if (rc < 0)
660 		return rtas_error_rc(rc);
661 	return rc;
662 }
663 EXPORT_SYMBOL(rtas_set_indicator);
664 
665 /*
666  * Ignoring RTAS extended delay
667  */
668 int rtas_set_indicator_fast(int indicator, int index, int new_value)
669 {
670 	int rc;
671 	int token = rtas_token("set-indicator");
672 
673 	if (token == RTAS_UNKNOWN_SERVICE)
674 		return -ENOENT;
675 
676 	rc = rtas_call(token, 3, 1, NULL, indicator, index, new_value);
677 
678 	WARN_ON(rc == RTAS_BUSY || (rc >= RTAS_EXTENDED_DELAY_MIN &&
679 				    rc <= RTAS_EXTENDED_DELAY_MAX));
680 
681 	if (rc < 0)
682 		return rtas_error_rc(rc);
683 
684 	return rc;
685 }
686 
687 void __noreturn rtas_restart(char *cmd)
688 {
689 	if (rtas_flash_term_hook)
690 		rtas_flash_term_hook(SYS_RESTART);
691 	printk("RTAS system-reboot returned %d\n",
692 	       rtas_call(rtas_token("system-reboot"), 0, 1, NULL));
693 	for (;;);
694 }
695 
696 void rtas_power_off(void)
697 {
698 	if (rtas_flash_term_hook)
699 		rtas_flash_term_hook(SYS_POWER_OFF);
700 	/* allow power on only with power button press */
701 	printk("RTAS power-off returned %d\n",
702 	       rtas_call(rtas_token("power-off"), 2, 1, NULL, -1, -1));
703 	for (;;);
704 }
705 
706 void __noreturn rtas_halt(void)
707 {
708 	if (rtas_flash_term_hook)
709 		rtas_flash_term_hook(SYS_HALT);
710 	/* allow power on only with power button press */
711 	printk("RTAS power-off returned %d\n",
712 	       rtas_call(rtas_token("power-off"), 2, 1, NULL, -1, -1));
713 	for (;;);
714 }
715 
716 /* Must be in the RMO region, so we place it here */
717 static char rtas_os_term_buf[2048];
718 
719 void rtas_os_term(char *str)
720 {
721 	int status;
722 
723 	/*
724 	 * Firmware with the ibm,extended-os-term property is guaranteed
725 	 * to always return from an ibm,os-term call. Earlier versions without
726 	 * this property may terminate the partition which we want to avoid
727 	 * since it interferes with panic_timeout.
728 	 */
729 	if (RTAS_UNKNOWN_SERVICE == rtas_token("ibm,os-term") ||
730 	    RTAS_UNKNOWN_SERVICE == rtas_token("ibm,extended-os-term"))
731 		return;
732 
733 	snprintf(rtas_os_term_buf, 2048, "OS panic: %s", str);
734 
735 	do {
736 		status = rtas_call(rtas_token("ibm,os-term"), 1, 1, NULL,
737 				   __pa(rtas_os_term_buf));
738 	} while (rtas_busy_delay(status));
739 
740 	if (status != 0)
741 		printk(KERN_EMERG "ibm,os-term call failed %d\n", status);
742 }
743 
744 static int ibm_suspend_me_token = RTAS_UNKNOWN_SERVICE;
745 #ifdef CONFIG_PPC_PSERIES
746 static int __rtas_suspend_last_cpu(struct rtas_suspend_me_data *data, int wake_when_done)
747 {
748 	u16 slb_size = mmu_slb_size;
749 	int rc = H_MULTI_THREADS_ACTIVE;
750 	int cpu;
751 
752 	slb_set_size(SLB_MIN_SIZE);
753 	printk(KERN_DEBUG "calling ibm,suspend-me on cpu %i\n", smp_processor_id());
754 
755 	while (rc == H_MULTI_THREADS_ACTIVE && !atomic_read(&data->done) &&
756 	       !atomic_read(&data->error))
757 		rc = rtas_call(data->token, 0, 1, NULL);
758 
759 	if (rc || atomic_read(&data->error)) {
760 		printk(KERN_DEBUG "ibm,suspend-me returned %d\n", rc);
761 		slb_set_size(slb_size);
762 	}
763 
764 	if (atomic_read(&data->error))
765 		rc = atomic_read(&data->error);
766 
767 	atomic_set(&data->error, rc);
768 	pSeries_coalesce_init();
769 
770 	if (wake_when_done) {
771 		atomic_set(&data->done, 1);
772 
773 		for_each_online_cpu(cpu)
774 			plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu));
775 	}
776 
777 	if (atomic_dec_return(&data->working) == 0)
778 		complete(data->complete);
779 
780 	return rc;
781 }
782 
783 int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data)
784 {
785 	atomic_inc(&data->working);
786 	return __rtas_suspend_last_cpu(data, 0);
787 }
788 
789 static int __rtas_suspend_cpu(struct rtas_suspend_me_data *data, int wake_when_done)
790 {
791 	long rc = H_SUCCESS;
792 	unsigned long msr_save;
793 	int cpu;
794 
795 	atomic_inc(&data->working);
796 
797 	/* really need to ensure MSR.EE is off for H_JOIN */
798 	msr_save = mfmsr();
799 	mtmsr(msr_save & ~(MSR_EE));
800 
801 	while (rc == H_SUCCESS && !atomic_read(&data->done) && !atomic_read(&data->error))
802 		rc = plpar_hcall_norets(H_JOIN);
803 
804 	mtmsr(msr_save);
805 
806 	if (rc == H_SUCCESS) {
807 		/* This cpu was prodded and the suspend is complete. */
808 		goto out;
809 	} else if (rc == H_CONTINUE) {
810 		/* All other cpus are in H_JOIN, this cpu does
811 		 * the suspend.
812 		 */
813 		return __rtas_suspend_last_cpu(data, wake_when_done);
814 	} else {
815 		printk(KERN_ERR "H_JOIN on cpu %i failed with rc = %ld\n",
816 		       smp_processor_id(), rc);
817 		atomic_set(&data->error, rc);
818 	}
819 
820 	if (wake_when_done) {
821 		atomic_set(&data->done, 1);
822 
823 		/* This cpu did the suspend or got an error; in either case,
824 		 * we need to prod all other other cpus out of join state.
825 		 * Extra prods are harmless.
826 		 */
827 		for_each_online_cpu(cpu)
828 			plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu));
829 	}
830 out:
831 	if (atomic_dec_return(&data->working) == 0)
832 		complete(data->complete);
833 	return rc;
834 }
835 
836 int rtas_suspend_cpu(struct rtas_suspend_me_data *data)
837 {
838 	return __rtas_suspend_cpu(data, 0);
839 }
840 
841 static void rtas_percpu_suspend_me(void *info)
842 {
843 	__rtas_suspend_cpu((struct rtas_suspend_me_data *)info, 1);
844 }
845 
846 enum rtas_cpu_state {
847 	DOWN,
848 	UP,
849 };
850 
851 #ifndef CONFIG_SMP
852 static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
853 				cpumask_var_t cpus)
854 {
855 	if (!cpumask_empty(cpus)) {
856 		cpumask_clear(cpus);
857 		return -EINVAL;
858 	} else
859 		return 0;
860 }
861 #else
862 /* On return cpumask will be altered to indicate CPUs changed.
863  * CPUs with states changed will be set in the mask,
864  * CPUs with status unchanged will be unset in the mask. */
865 static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
866 				cpumask_var_t cpus)
867 {
868 	int cpu;
869 	int cpuret = 0;
870 	int ret = 0;
871 
872 	if (cpumask_empty(cpus))
873 		return 0;
874 
875 	for_each_cpu(cpu, cpus) {
876 		struct device *dev = get_cpu_device(cpu);
877 
878 		switch (state) {
879 		case DOWN:
880 			cpuret = device_offline(dev);
881 			break;
882 		case UP:
883 			cpuret = device_online(dev);
884 			break;
885 		}
886 		if (cpuret < 0) {
887 			pr_debug("%s: cpu_%s for cpu#%d returned %d.\n",
888 					__func__,
889 					((state == UP) ? "up" : "down"),
890 					cpu, cpuret);
891 			if (!ret)
892 				ret = cpuret;
893 			if (state == UP) {
894 				/* clear bits for unchanged cpus, return */
895 				cpumask_shift_right(cpus, cpus, cpu);
896 				cpumask_shift_left(cpus, cpus, cpu);
897 				break;
898 			} else {
899 				/* clear bit for unchanged cpu, continue */
900 				cpumask_clear_cpu(cpu, cpus);
901 			}
902 		}
903 		cond_resched();
904 	}
905 
906 	return ret;
907 }
908 #endif
909 
910 int rtas_online_cpus_mask(cpumask_var_t cpus)
911 {
912 	int ret;
913 
914 	ret = rtas_cpu_state_change_mask(UP, cpus);
915 
916 	if (ret) {
917 		cpumask_var_t tmp_mask;
918 
919 		if (!alloc_cpumask_var(&tmp_mask, GFP_KERNEL))
920 			return ret;
921 
922 		/* Use tmp_mask to preserve cpus mask from first failure */
923 		cpumask_copy(tmp_mask, cpus);
924 		rtas_offline_cpus_mask(tmp_mask);
925 		free_cpumask_var(tmp_mask);
926 	}
927 
928 	return ret;
929 }
930 
931 int rtas_offline_cpus_mask(cpumask_var_t cpus)
932 {
933 	return rtas_cpu_state_change_mask(DOWN, cpus);
934 }
935 
936 int rtas_ibm_suspend_me(u64 handle)
937 {
938 	long state;
939 	long rc;
940 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
941 	struct rtas_suspend_me_data data;
942 	DECLARE_COMPLETION_ONSTACK(done);
943 	cpumask_var_t offline_mask;
944 	int cpuret;
945 
946 	if (!rtas_service_present("ibm,suspend-me"))
947 		return -ENOSYS;
948 
949 	/* Make sure the state is valid */
950 	rc = plpar_hcall(H_VASI_STATE, retbuf, handle);
951 
952 	state = retbuf[0];
953 
954 	if (rc) {
955 		printk(KERN_ERR "rtas_ibm_suspend_me: vasi_state returned %ld\n",rc);
956 		return rc;
957 	} else if (state == H_VASI_ENABLED) {
958 		return -EAGAIN;
959 	} else if (state != H_VASI_SUSPENDING) {
960 		printk(KERN_ERR "rtas_ibm_suspend_me: vasi_state returned state %ld\n",
961 		       state);
962 		return -EIO;
963 	}
964 
965 	if (!alloc_cpumask_var(&offline_mask, GFP_KERNEL))
966 		return -ENOMEM;
967 
968 	atomic_set(&data.working, 0);
969 	atomic_set(&data.done, 0);
970 	atomic_set(&data.error, 0);
971 	data.token = rtas_token("ibm,suspend-me");
972 	data.complete = &done;
973 
974 	lock_device_hotplug();
975 
976 	/* All present CPUs must be online */
977 	cpumask_andnot(offline_mask, cpu_present_mask, cpu_online_mask);
978 	cpuret = rtas_online_cpus_mask(offline_mask);
979 	if (cpuret) {
980 		pr_err("%s: Could not bring present CPUs online.\n", __func__);
981 		atomic_set(&data.error, cpuret);
982 		goto out;
983 	}
984 
985 	cpu_hotplug_disable();
986 
987 	/* Check if we raced with a CPU-Offline Operation */
988 	if (!cpumask_equal(cpu_present_mask, cpu_online_mask)) {
989 		pr_info("%s: Raced against a concurrent CPU-Offline\n", __func__);
990 		atomic_set(&data.error, -EAGAIN);
991 		goto out_hotplug_enable;
992 	}
993 
994 	/* Call function on all CPUs.  One of us will make the
995 	 * rtas call
996 	 */
997 	on_each_cpu(rtas_percpu_suspend_me, &data, 0);
998 
999 	wait_for_completion(&done);
1000 
1001 	if (atomic_read(&data.error) != 0)
1002 		printk(KERN_ERR "Error doing global join\n");
1003 
1004 out_hotplug_enable:
1005 	cpu_hotplug_enable();
1006 
1007 	/* Take down CPUs not online prior to suspend */
1008 	cpuret = rtas_offline_cpus_mask(offline_mask);
1009 	if (cpuret)
1010 		pr_warn("%s: Could not restore CPUs to offline state.\n",
1011 				__func__);
1012 
1013 out:
1014 	unlock_device_hotplug();
1015 	free_cpumask_var(offline_mask);
1016 	return atomic_read(&data.error);
1017 }
1018 
1019 /**
1020  * rtas_call_reentrant() - Used for reentrant rtas calls
1021  * @token:	Token for desired reentrant RTAS call
1022  * @nargs:	Number of Input Parameters
1023  * @nret:	Number of Output Parameters
1024  * @outputs:	Array of outputs
1025  * @...:	Inputs for desired RTAS call
1026  *
1027  * According to LoPAR documentation, only "ibm,int-on", "ibm,int-off",
1028  * "ibm,get-xive" and "ibm,set-xive" are currently reentrant.
1029  * Reentrant calls need their own rtas_args buffer, so not using rtas.args, but
1030  * PACA one instead.
1031  *
1032  * Return:	-1 on error,
1033  *		First output value of RTAS call if (nret > 0),
1034  *		0 otherwise,
1035  */
1036 int rtas_call_reentrant(int token, int nargs, int nret, int *outputs, ...)
1037 {
1038 	va_list list;
1039 	struct rtas_args *args;
1040 	unsigned long flags;
1041 	int i, ret = 0;
1042 
1043 	if (!rtas.entry || token == RTAS_UNKNOWN_SERVICE)
1044 		return -1;
1045 
1046 	local_irq_save(flags);
1047 	preempt_disable();
1048 
1049 	/* We use the per-cpu (PACA) rtas args buffer */
1050 	args = local_paca->rtas_args_reentrant;
1051 
1052 	va_start(list, outputs);
1053 	va_rtas_call_unlocked(args, token, nargs, nret, list);
1054 	va_end(list);
1055 
1056 	if (nret > 1 && outputs)
1057 		for (i = 0; i < nret - 1; ++i)
1058 			outputs[i] = be32_to_cpu(args->rets[i + 1]);
1059 
1060 	if (nret > 0)
1061 		ret = be32_to_cpu(args->rets[0]);
1062 
1063 	local_irq_restore(flags);
1064 	preempt_enable();
1065 
1066 	return ret;
1067 }
1068 
1069 #else /* CONFIG_PPC_PSERIES */
1070 int rtas_ibm_suspend_me(u64 handle)
1071 {
1072 	return -ENOSYS;
1073 }
1074 #endif
1075 
1076 /**
1077  * Find a specific pseries error log in an RTAS extended event log.
1078  * @log: RTAS error/event log
1079  * @section_id: two character section identifier
1080  *
1081  * Returns a pointer to the specified errorlog or NULL if not found.
1082  */
1083 struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log,
1084 					      uint16_t section_id)
1085 {
1086 	struct rtas_ext_event_log_v6 *ext_log =
1087 		(struct rtas_ext_event_log_v6 *)log->buffer;
1088 	struct pseries_errorlog *sect;
1089 	unsigned char *p, *log_end;
1090 	uint32_t ext_log_length = rtas_error_extended_log_length(log);
1091 	uint8_t log_format = rtas_ext_event_log_format(ext_log);
1092 	uint32_t company_id = rtas_ext_event_company_id(ext_log);
1093 
1094 	/* Check that we understand the format */
1095 	if (ext_log_length < sizeof(struct rtas_ext_event_log_v6) ||
1096 	    log_format != RTAS_V6EXT_LOG_FORMAT_EVENT_LOG ||
1097 	    company_id != RTAS_V6EXT_COMPANY_ID_IBM)
1098 		return NULL;
1099 
1100 	log_end = log->buffer + ext_log_length;
1101 	p = ext_log->vendor_log;
1102 
1103 	while (p < log_end) {
1104 		sect = (struct pseries_errorlog *)p;
1105 		if (pseries_errorlog_id(sect) == section_id)
1106 			return sect;
1107 		p += pseries_errorlog_length(sect);
1108 	}
1109 
1110 	return NULL;
1111 }
1112 
1113 /* We assume to be passed big endian arguments */
1114 SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs)
1115 {
1116 	struct rtas_args args;
1117 	unsigned long flags;
1118 	char *buff_copy, *errbuf = NULL;
1119 	int nargs, nret, token;
1120 
1121 	if (!capable(CAP_SYS_ADMIN))
1122 		return -EPERM;
1123 
1124 	if (!rtas.entry)
1125 		return -EINVAL;
1126 
1127 	if (copy_from_user(&args, uargs, 3 * sizeof(u32)) != 0)
1128 		return -EFAULT;
1129 
1130 	nargs = be32_to_cpu(args.nargs);
1131 	nret  = be32_to_cpu(args.nret);
1132 	token = be32_to_cpu(args.token);
1133 
1134 	if (nargs >= ARRAY_SIZE(args.args)
1135 	    || nret > ARRAY_SIZE(args.args)
1136 	    || nargs + nret > ARRAY_SIZE(args.args))
1137 		return -EINVAL;
1138 
1139 	/* Copy in args. */
1140 	if (copy_from_user(args.args, uargs->args,
1141 			   nargs * sizeof(rtas_arg_t)) != 0)
1142 		return -EFAULT;
1143 
1144 	if (token == RTAS_UNKNOWN_SERVICE)
1145 		return -EINVAL;
1146 
1147 	args.rets = &args.args[nargs];
1148 	memset(args.rets, 0, nret * sizeof(rtas_arg_t));
1149 
1150 	/* Need to handle ibm,suspend_me call specially */
1151 	if (token == ibm_suspend_me_token) {
1152 
1153 		/*
1154 		 * rtas_ibm_suspend_me assumes the streamid handle is in cpu
1155 		 * endian, or at least the hcall within it requires it.
1156 		 */
1157 		int rc = 0;
1158 		u64 handle = ((u64)be32_to_cpu(args.args[0]) << 32)
1159 		              | be32_to_cpu(args.args[1]);
1160 		rc = rtas_ibm_suspend_me(handle);
1161 		if (rc == -EAGAIN)
1162 			args.rets[0] = cpu_to_be32(RTAS_NOT_SUSPENDABLE);
1163 		else if (rc == -EIO)
1164 			args.rets[0] = cpu_to_be32(-1);
1165 		else if (rc)
1166 			return rc;
1167 		goto copy_return;
1168 	}
1169 
1170 	buff_copy = get_errorlog_buffer();
1171 
1172 	flags = lock_rtas();
1173 
1174 	rtas.args = args;
1175 	enter_rtas(__pa(&rtas.args));
1176 	args = rtas.args;
1177 
1178 	/* A -1 return code indicates that the last command couldn't
1179 	   be completed due to a hardware error. */
1180 	if (be32_to_cpu(args.rets[0]) == -1)
1181 		errbuf = __fetch_rtas_last_error(buff_copy);
1182 
1183 	unlock_rtas(flags);
1184 
1185 	if (buff_copy) {
1186 		if (errbuf)
1187 			log_error(errbuf, ERR_TYPE_RTAS_LOG, 0);
1188 		kfree(buff_copy);
1189 	}
1190 
1191  copy_return:
1192 	/* Copy out args. */
1193 	if (copy_to_user(uargs->args + nargs,
1194 			 args.args + nargs,
1195 			 nret * sizeof(rtas_arg_t)) != 0)
1196 		return -EFAULT;
1197 
1198 	return 0;
1199 }
1200 
1201 /*
1202  * Call early during boot, before mem init, to retrieve the RTAS
1203  * information from the device-tree and allocate the RMO buffer for userland
1204  * accesses.
1205  */
1206 void __init rtas_initialize(void)
1207 {
1208 	unsigned long rtas_region = RTAS_INSTANTIATE_MAX;
1209 	u32 base, size, entry;
1210 	int no_base, no_size, no_entry;
1211 
1212 	/* Get RTAS dev node and fill up our "rtas" structure with infos
1213 	 * about it.
1214 	 */
1215 	rtas.dev = of_find_node_by_name(NULL, "rtas");
1216 	if (!rtas.dev)
1217 		return;
1218 
1219 	no_base = of_property_read_u32(rtas.dev, "linux,rtas-base", &base);
1220 	no_size = of_property_read_u32(rtas.dev, "rtas-size", &size);
1221 	if (no_base || no_size) {
1222 		of_node_put(rtas.dev);
1223 		rtas.dev = NULL;
1224 		return;
1225 	}
1226 
1227 	rtas.base = base;
1228 	rtas.size = size;
1229 	no_entry = of_property_read_u32(rtas.dev, "linux,rtas-entry", &entry);
1230 	rtas.entry = no_entry ? rtas.base : entry;
1231 
1232 	/* If RTAS was found, allocate the RMO buffer for it and look for
1233 	 * the stop-self token if any
1234 	 */
1235 #ifdef CONFIG_PPC64
1236 	if (firmware_has_feature(FW_FEATURE_LPAR)) {
1237 		rtas_region = min(ppc64_rma_size, RTAS_INSTANTIATE_MAX);
1238 		ibm_suspend_me_token = rtas_token("ibm,suspend-me");
1239 	}
1240 #endif
1241 	rtas_rmo_buf = memblock_phys_alloc_range(RTAS_RMOBUF_MAX, PAGE_SIZE,
1242 						 0, rtas_region);
1243 	if (!rtas_rmo_buf)
1244 		panic("ERROR: RTAS: Failed to allocate %lx bytes below %pa\n",
1245 		      PAGE_SIZE, &rtas_region);
1246 
1247 #ifdef CONFIG_RTAS_ERROR_LOGGING
1248 	rtas_last_error_token = rtas_token("rtas-last-error");
1249 #endif
1250 }
1251 
1252 int __init early_init_dt_scan_rtas(unsigned long node,
1253 		const char *uname, int depth, void *data)
1254 {
1255 	const u32 *basep, *entryp, *sizep;
1256 
1257 	if (depth != 1 || strcmp(uname, "rtas") != 0)
1258 		return 0;
1259 
1260 	basep  = of_get_flat_dt_prop(node, "linux,rtas-base", NULL);
1261 	entryp = of_get_flat_dt_prop(node, "linux,rtas-entry", NULL);
1262 	sizep  = of_get_flat_dt_prop(node, "rtas-size", NULL);
1263 
1264 	if (basep && entryp && sizep) {
1265 		rtas.base = *basep;
1266 		rtas.entry = *entryp;
1267 		rtas.size = *sizep;
1268 	}
1269 
1270 #ifdef CONFIG_UDBG_RTAS_CONSOLE
1271 	basep = of_get_flat_dt_prop(node, "put-term-char", NULL);
1272 	if (basep)
1273 		rtas_putchar_token = *basep;
1274 
1275 	basep = of_get_flat_dt_prop(node, "get-term-char", NULL);
1276 	if (basep)
1277 		rtas_getchar_token = *basep;
1278 
1279 	if (rtas_putchar_token != RTAS_UNKNOWN_SERVICE &&
1280 	    rtas_getchar_token != RTAS_UNKNOWN_SERVICE)
1281 		udbg_init_rtas_console();
1282 
1283 #endif
1284 
1285 	/* break now */
1286 	return 1;
1287 }
1288 
1289 static arch_spinlock_t timebase_lock;
1290 static u64 timebase = 0;
1291 
1292 void rtas_give_timebase(void)
1293 {
1294 	unsigned long flags;
1295 
1296 	local_irq_save(flags);
1297 	hard_irq_disable();
1298 	arch_spin_lock(&timebase_lock);
1299 	rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL);
1300 	timebase = get_tb();
1301 	arch_spin_unlock(&timebase_lock);
1302 
1303 	while (timebase)
1304 		barrier();
1305 	rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL);
1306 	local_irq_restore(flags);
1307 }
1308 
1309 void rtas_take_timebase(void)
1310 {
1311 	while (!timebase)
1312 		barrier();
1313 	arch_spin_lock(&timebase_lock);
1314 	set_tb(timebase >> 32, timebase & 0xffffffff);
1315 	timebase = 0;
1316 	arch_spin_unlock(&timebase_lock);
1317 }
1318