xref: /openbmc/linux/arch/powerpc/kernel/rtas.c (revision 1c2dd16a)
1 /*
2  *
3  * Procedures for interfacing to the RTAS on CHRP machines.
4  *
5  * Peter Bergner, IBM	March 2001.
6  * Copyright (C) 2001 IBM.
7  *
8  *      This program is free software; you can redistribute it and/or
9  *      modify it under the terms of the GNU General Public License
10  *      as published by the Free Software Foundation; either version
11  *      2 of the License, or (at your option) any later version.
12  */
13 
14 #include <stdarg.h>
15 #include <linux/kernel.h>
16 #include <linux/types.h>
17 #include <linux/spinlock.h>
18 #include <linux/export.h>
19 #include <linux/init.h>
20 #include <linux/capability.h>
21 #include <linux/delay.h>
22 #include <linux/cpu.h>
23 #include <linux/smp.h>
24 #include <linux/completion.h>
25 #include <linux/cpumask.h>
26 #include <linux/memblock.h>
27 #include <linux/slab.h>
28 #include <linux/reboot.h>
29 
30 #include <asm/prom.h>
31 #include <asm/rtas.h>
32 #include <asm/hvcall.h>
33 #include <asm/machdep.h>
34 #include <asm/firmware.h>
35 #include <asm/page.h>
36 #include <asm/param.h>
37 #include <asm/delay.h>
38 #include <linux/uaccess.h>
39 #include <asm/udbg.h>
40 #include <asm/syscalls.h>
41 #include <asm/smp.h>
42 #include <linux/atomic.h>
43 #include <asm/time.h>
44 #include <asm/mmu.h>
45 #include <asm/topology.h>
46 
47 /* This is here deliberately so it's only used in this file */
48 void enter_rtas(unsigned long);
49 
50 struct rtas_t rtas = {
51 	.lock = __ARCH_SPIN_LOCK_UNLOCKED
52 };
53 EXPORT_SYMBOL(rtas);
54 
55 DEFINE_SPINLOCK(rtas_data_buf_lock);
56 EXPORT_SYMBOL(rtas_data_buf_lock);
57 
58 char rtas_data_buf[RTAS_DATA_BUF_SIZE] __cacheline_aligned;
59 EXPORT_SYMBOL(rtas_data_buf);
60 
61 unsigned long rtas_rmo_buf;
62 
63 /*
64  * If non-NULL, this gets called when the kernel terminates.
65  * This is done like this so rtas_flash can be a module.
66  */
67 void (*rtas_flash_term_hook)(int);
68 EXPORT_SYMBOL(rtas_flash_term_hook);
69 
70 /* RTAS use home made raw locking instead of spin_lock_irqsave
71  * because those can be called from within really nasty contexts
72  * such as having the timebase stopped which would lockup with
73  * normal locks and spinlock debugging enabled
74  */
75 static unsigned long lock_rtas(void)
76 {
77 	unsigned long flags;
78 
79 	local_irq_save(flags);
80 	preempt_disable();
81 	arch_spin_lock_flags(&rtas.lock, flags);
82 	return flags;
83 }
84 
85 static void unlock_rtas(unsigned long flags)
86 {
87 	arch_spin_unlock(&rtas.lock);
88 	local_irq_restore(flags);
89 	preempt_enable();
90 }
91 
92 /*
93  * call_rtas_display_status and call_rtas_display_status_delay
94  * are designed only for very early low-level debugging, which
95  * is why the token is hard-coded to 10.
96  */
97 static void call_rtas_display_status(unsigned char c)
98 {
99 	unsigned long s;
100 
101 	if (!rtas.base)
102 		return;
103 
104 	s = lock_rtas();
105 	rtas_call_unlocked(&rtas.args, 10, 1, 1, NULL, c);
106 	unlock_rtas(s);
107 }
108 
109 static void call_rtas_display_status_delay(char c)
110 {
111 	static int pending_newline = 0;  /* did last write end with unprinted newline? */
112 	static int width = 16;
113 
114 	if (c == '\n') {
115 		while (width-- > 0)
116 			call_rtas_display_status(' ');
117 		width = 16;
118 		mdelay(500);
119 		pending_newline = 1;
120 	} else {
121 		if (pending_newline) {
122 			call_rtas_display_status('\r');
123 			call_rtas_display_status('\n');
124 		}
125 		pending_newline = 0;
126 		if (width--) {
127 			call_rtas_display_status(c);
128 			udelay(10000);
129 		}
130 	}
131 }
132 
133 void __init udbg_init_rtas_panel(void)
134 {
135 	udbg_putc = call_rtas_display_status_delay;
136 }
137 
138 #ifdef CONFIG_UDBG_RTAS_CONSOLE
139 
140 /* If you think you're dying before early_init_dt_scan_rtas() does its
141  * work, you can hard code the token values for your firmware here and
142  * hardcode rtas.base/entry etc.
143  */
144 static unsigned int rtas_putchar_token = RTAS_UNKNOWN_SERVICE;
145 static unsigned int rtas_getchar_token = RTAS_UNKNOWN_SERVICE;
146 
147 static void udbg_rtascon_putc(char c)
148 {
149 	int tries;
150 
151 	if (!rtas.base)
152 		return;
153 
154 	/* Add CRs before LFs */
155 	if (c == '\n')
156 		udbg_rtascon_putc('\r');
157 
158 	/* if there is more than one character to be displayed, wait a bit */
159 	for (tries = 0; tries < 16; tries++) {
160 		if (rtas_call(rtas_putchar_token, 1, 1, NULL, c) == 0)
161 			break;
162 		udelay(1000);
163 	}
164 }
165 
166 static int udbg_rtascon_getc_poll(void)
167 {
168 	int c;
169 
170 	if (!rtas.base)
171 		return -1;
172 
173 	if (rtas_call(rtas_getchar_token, 0, 2, &c))
174 		return -1;
175 
176 	return c;
177 }
178 
179 static int udbg_rtascon_getc(void)
180 {
181 	int c;
182 
183 	while ((c = udbg_rtascon_getc_poll()) == -1)
184 		;
185 
186 	return c;
187 }
188 
189 
190 void __init udbg_init_rtas_console(void)
191 {
192 	udbg_putc = udbg_rtascon_putc;
193 	udbg_getc = udbg_rtascon_getc;
194 	udbg_getc_poll = udbg_rtascon_getc_poll;
195 }
196 #endif /* CONFIG_UDBG_RTAS_CONSOLE */
197 
198 void rtas_progress(char *s, unsigned short hex)
199 {
200 	struct device_node *root;
201 	int width;
202 	const __be32 *p;
203 	char *os;
204 	static int display_character, set_indicator;
205 	static int display_width, display_lines, form_feed;
206 	static const int *row_width;
207 	static DEFINE_SPINLOCK(progress_lock);
208 	static int current_line;
209 	static int pending_newline = 0;  /* did last write end with unprinted newline? */
210 
211 	if (!rtas.base)
212 		return;
213 
214 	if (display_width == 0) {
215 		display_width = 0x10;
216 		if ((root = of_find_node_by_path("/rtas"))) {
217 			if ((p = of_get_property(root,
218 					"ibm,display-line-length", NULL)))
219 				display_width = be32_to_cpu(*p);
220 			if ((p = of_get_property(root,
221 					"ibm,form-feed", NULL)))
222 				form_feed = be32_to_cpu(*p);
223 			if ((p = of_get_property(root,
224 					"ibm,display-number-of-lines", NULL)))
225 				display_lines = be32_to_cpu(*p);
226 			row_width = of_get_property(root,
227 					"ibm,display-truncation-length", NULL);
228 			of_node_put(root);
229 		}
230 		display_character = rtas_token("display-character");
231 		set_indicator = rtas_token("set-indicator");
232 	}
233 
234 	if (display_character == RTAS_UNKNOWN_SERVICE) {
235 		/* use hex display if available */
236 		if (set_indicator != RTAS_UNKNOWN_SERVICE)
237 			rtas_call(set_indicator, 3, 1, NULL, 6, 0, hex);
238 		return;
239 	}
240 
241 	spin_lock(&progress_lock);
242 
243 	/*
244 	 * Last write ended with newline, but we didn't print it since
245 	 * it would just clear the bottom line of output. Print it now
246 	 * instead.
247 	 *
248 	 * If no newline is pending and form feed is supported, clear the
249 	 * display with a form feed; otherwise, print a CR to start output
250 	 * at the beginning of the line.
251 	 */
252 	if (pending_newline) {
253 		rtas_call(display_character, 1, 1, NULL, '\r');
254 		rtas_call(display_character, 1, 1, NULL, '\n');
255 		pending_newline = 0;
256 	} else {
257 		current_line = 0;
258 		if (form_feed)
259 			rtas_call(display_character, 1, 1, NULL,
260 				  (char)form_feed);
261 		else
262 			rtas_call(display_character, 1, 1, NULL, '\r');
263 	}
264 
265 	if (row_width)
266 		width = row_width[current_line];
267 	else
268 		width = display_width;
269 	os = s;
270 	while (*os) {
271 		if (*os == '\n' || *os == '\r') {
272 			/* If newline is the last character, save it
273 			 * until next call to avoid bumping up the
274 			 * display output.
275 			 */
276 			if (*os == '\n' && !os[1]) {
277 				pending_newline = 1;
278 				current_line++;
279 				if (current_line > display_lines-1)
280 					current_line = display_lines-1;
281 				spin_unlock(&progress_lock);
282 				return;
283 			}
284 
285 			/* RTAS wants CR-LF, not just LF */
286 
287 			if (*os == '\n') {
288 				rtas_call(display_character, 1, 1, NULL, '\r');
289 				rtas_call(display_character, 1, 1, NULL, '\n');
290 			} else {
291 				/* CR might be used to re-draw a line, so we'll
292 				 * leave it alone and not add LF.
293 				 */
294 				rtas_call(display_character, 1, 1, NULL, *os);
295 			}
296 
297 			if (row_width)
298 				width = row_width[current_line];
299 			else
300 				width = display_width;
301 		} else {
302 			width--;
303 			rtas_call(display_character, 1, 1, NULL, *os);
304 		}
305 
306 		os++;
307 
308 		/* if we overwrite the screen length */
309 		if (width <= 0)
310 			while ((*os != 0) && (*os != '\n') && (*os != '\r'))
311 				os++;
312 	}
313 
314 	spin_unlock(&progress_lock);
315 }
316 EXPORT_SYMBOL(rtas_progress);		/* needed by rtas_flash module */
317 
318 int rtas_token(const char *service)
319 {
320 	const __be32 *tokp;
321 	if (rtas.dev == NULL)
322 		return RTAS_UNKNOWN_SERVICE;
323 	tokp = of_get_property(rtas.dev, service, NULL);
324 	return tokp ? be32_to_cpu(*tokp) : RTAS_UNKNOWN_SERVICE;
325 }
326 EXPORT_SYMBOL(rtas_token);
327 
328 int rtas_service_present(const char *service)
329 {
330 	return rtas_token(service) != RTAS_UNKNOWN_SERVICE;
331 }
332 EXPORT_SYMBOL(rtas_service_present);
333 
334 #ifdef CONFIG_RTAS_ERROR_LOGGING
335 /*
336  * Return the firmware-specified size of the error log buffer
337  *  for all rtas calls that require an error buffer argument.
338  *  This includes 'check-exception' and 'rtas-last-error'.
339  */
340 int rtas_get_error_log_max(void)
341 {
342 	static int rtas_error_log_max;
343 	if (rtas_error_log_max)
344 		return rtas_error_log_max;
345 
346 	rtas_error_log_max = rtas_token ("rtas-error-log-max");
347 	if ((rtas_error_log_max == RTAS_UNKNOWN_SERVICE) ||
348 	    (rtas_error_log_max > RTAS_ERROR_LOG_MAX)) {
349 		printk (KERN_WARNING "RTAS: bad log buffer size %d\n",
350 			rtas_error_log_max);
351 		rtas_error_log_max = RTAS_ERROR_LOG_MAX;
352 	}
353 	return rtas_error_log_max;
354 }
355 EXPORT_SYMBOL(rtas_get_error_log_max);
356 
357 
358 static char rtas_err_buf[RTAS_ERROR_LOG_MAX];
359 static int rtas_last_error_token;
360 
361 /** Return a copy of the detailed error text associated with the
362  *  most recent failed call to rtas.  Because the error text
363  *  might go stale if there are any other intervening rtas calls,
364  *  this routine must be called atomically with whatever produced
365  *  the error (i.e. with rtas.lock still held from the previous call).
366  */
367 static char *__fetch_rtas_last_error(char *altbuf)
368 {
369 	struct rtas_args err_args, save_args;
370 	u32 bufsz;
371 	char *buf = NULL;
372 
373 	if (rtas_last_error_token == -1)
374 		return NULL;
375 
376 	bufsz = rtas_get_error_log_max();
377 
378 	err_args.token = cpu_to_be32(rtas_last_error_token);
379 	err_args.nargs = cpu_to_be32(2);
380 	err_args.nret = cpu_to_be32(1);
381 	err_args.args[0] = cpu_to_be32(__pa(rtas_err_buf));
382 	err_args.args[1] = cpu_to_be32(bufsz);
383 	err_args.args[2] = 0;
384 
385 	save_args = rtas.args;
386 	rtas.args = err_args;
387 
388 	enter_rtas(__pa(&rtas.args));
389 
390 	err_args = rtas.args;
391 	rtas.args = save_args;
392 
393 	/* Log the error in the unlikely case that there was one. */
394 	if (unlikely(err_args.args[2] == 0)) {
395 		if (altbuf) {
396 			buf = altbuf;
397 		} else {
398 			buf = rtas_err_buf;
399 			if (slab_is_available())
400 				buf = kmalloc(RTAS_ERROR_LOG_MAX, GFP_ATOMIC);
401 		}
402 		if (buf)
403 			memcpy(buf, rtas_err_buf, RTAS_ERROR_LOG_MAX);
404 	}
405 
406 	return buf;
407 }
408 
409 #define get_errorlog_buffer()	kmalloc(RTAS_ERROR_LOG_MAX, GFP_KERNEL)
410 
411 #else /* CONFIG_RTAS_ERROR_LOGGING */
412 #define __fetch_rtas_last_error(x)	NULL
413 #define get_errorlog_buffer()		NULL
414 #endif
415 
416 
417 static void
418 va_rtas_call_unlocked(struct rtas_args *args, int token, int nargs, int nret,
419 		      va_list list)
420 {
421 	int i;
422 
423 	args->token = cpu_to_be32(token);
424 	args->nargs = cpu_to_be32(nargs);
425 	args->nret  = cpu_to_be32(nret);
426 	args->rets  = &(args->args[nargs]);
427 
428 	for (i = 0; i < nargs; ++i)
429 		args->args[i] = cpu_to_be32(va_arg(list, __u32));
430 
431 	for (i = 0; i < nret; ++i)
432 		args->rets[i] = 0;
433 
434 	enter_rtas(__pa(args));
435 }
436 
437 void rtas_call_unlocked(struct rtas_args *args, int token, int nargs, int nret, ...)
438 {
439 	va_list list;
440 
441 	va_start(list, nret);
442 	va_rtas_call_unlocked(args, token, nargs, nret, list);
443 	va_end(list);
444 }
445 
446 int rtas_call(int token, int nargs, int nret, int *outputs, ...)
447 {
448 	va_list list;
449 	int i;
450 	unsigned long s;
451 	struct rtas_args *rtas_args;
452 	char *buff_copy = NULL;
453 	int ret;
454 
455 	if (!rtas.entry || token == RTAS_UNKNOWN_SERVICE)
456 		return -1;
457 
458 	s = lock_rtas();
459 
460 	/* We use the global rtas args buffer */
461 	rtas_args = &rtas.args;
462 
463 	va_start(list, outputs);
464 	va_rtas_call_unlocked(rtas_args, token, nargs, nret, list);
465 	va_end(list);
466 
467 	/* A -1 return code indicates that the last command couldn't
468 	   be completed due to a hardware error. */
469 	if (be32_to_cpu(rtas_args->rets[0]) == -1)
470 		buff_copy = __fetch_rtas_last_error(NULL);
471 
472 	if (nret > 1 && outputs != NULL)
473 		for (i = 0; i < nret-1; ++i)
474 			outputs[i] = be32_to_cpu(rtas_args->rets[i+1]);
475 	ret = (nret > 0)? be32_to_cpu(rtas_args->rets[0]): 0;
476 
477 	unlock_rtas(s);
478 
479 	if (buff_copy) {
480 		log_error(buff_copy, ERR_TYPE_RTAS_LOG, 0);
481 		if (slab_is_available())
482 			kfree(buff_copy);
483 	}
484 	return ret;
485 }
486 EXPORT_SYMBOL(rtas_call);
487 
488 /* For RTAS_BUSY (-2), delay for 1 millisecond.  For an extended busy status
489  * code of 990n, perform the hinted delay of 10^n (last digit) milliseconds.
490  */
491 unsigned int rtas_busy_delay_time(int status)
492 {
493 	int order;
494 	unsigned int ms = 0;
495 
496 	if (status == RTAS_BUSY) {
497 		ms = 1;
498 	} else if (status >= RTAS_EXTENDED_DELAY_MIN &&
499 		   status <= RTAS_EXTENDED_DELAY_MAX) {
500 		order = status - RTAS_EXTENDED_DELAY_MIN;
501 		for (ms = 1; order > 0; order--)
502 			ms *= 10;
503 	}
504 
505 	return ms;
506 }
507 EXPORT_SYMBOL(rtas_busy_delay_time);
508 
509 /* For an RTAS busy status code, perform the hinted delay. */
510 unsigned int rtas_busy_delay(int status)
511 {
512 	unsigned int ms;
513 
514 	might_sleep();
515 	ms = rtas_busy_delay_time(status);
516 	if (ms && need_resched())
517 		msleep(ms);
518 
519 	return ms;
520 }
521 EXPORT_SYMBOL(rtas_busy_delay);
522 
523 static int rtas_error_rc(int rtas_rc)
524 {
525 	int rc;
526 
527 	switch (rtas_rc) {
528 		case -1: 		/* Hardware Error */
529 			rc = -EIO;
530 			break;
531 		case -3:		/* Bad indicator/domain/etc */
532 			rc = -EINVAL;
533 			break;
534 		case -9000:		/* Isolation error */
535 			rc = -EFAULT;
536 			break;
537 		case -9001:		/* Outstanding TCE/PTE */
538 			rc = -EEXIST;
539 			break;
540 		case -9002:		/* No usable slot */
541 			rc = -ENODEV;
542 			break;
543 		default:
544 			printk(KERN_ERR "%s: unexpected RTAS error %d\n",
545 					__func__, rtas_rc);
546 			rc = -ERANGE;
547 			break;
548 	}
549 	return rc;
550 }
551 
552 int rtas_get_power_level(int powerdomain, int *level)
553 {
554 	int token = rtas_token("get-power-level");
555 	int rc;
556 
557 	if (token == RTAS_UNKNOWN_SERVICE)
558 		return -ENOENT;
559 
560 	while ((rc = rtas_call(token, 1, 2, level, powerdomain)) == RTAS_BUSY)
561 		udelay(1);
562 
563 	if (rc < 0)
564 		return rtas_error_rc(rc);
565 	return rc;
566 }
567 EXPORT_SYMBOL(rtas_get_power_level);
568 
569 int rtas_set_power_level(int powerdomain, int level, int *setlevel)
570 {
571 	int token = rtas_token("set-power-level");
572 	int rc;
573 
574 	if (token == RTAS_UNKNOWN_SERVICE)
575 		return -ENOENT;
576 
577 	do {
578 		rc = rtas_call(token, 2, 2, setlevel, powerdomain, level);
579 	} while (rtas_busy_delay(rc));
580 
581 	if (rc < 0)
582 		return rtas_error_rc(rc);
583 	return rc;
584 }
585 EXPORT_SYMBOL(rtas_set_power_level);
586 
587 int rtas_get_sensor(int sensor, int index, int *state)
588 {
589 	int token = rtas_token("get-sensor-state");
590 	int rc;
591 
592 	if (token == RTAS_UNKNOWN_SERVICE)
593 		return -ENOENT;
594 
595 	do {
596 		rc = rtas_call(token, 2, 2, state, sensor, index);
597 	} while (rtas_busy_delay(rc));
598 
599 	if (rc < 0)
600 		return rtas_error_rc(rc);
601 	return rc;
602 }
603 EXPORT_SYMBOL(rtas_get_sensor);
604 
605 int rtas_get_sensor_fast(int sensor, int index, int *state)
606 {
607 	int token = rtas_token("get-sensor-state");
608 	int rc;
609 
610 	if (token == RTAS_UNKNOWN_SERVICE)
611 		return -ENOENT;
612 
613 	rc = rtas_call(token, 2, 2, state, sensor, index);
614 	WARN_ON(rc == RTAS_BUSY || (rc >= RTAS_EXTENDED_DELAY_MIN &&
615 				    rc <= RTAS_EXTENDED_DELAY_MAX));
616 
617 	if (rc < 0)
618 		return rtas_error_rc(rc);
619 	return rc;
620 }
621 
622 bool rtas_indicator_present(int token, int *maxindex)
623 {
624 	int proplen, count, i;
625 	const struct indicator_elem {
626 		__be32 token;
627 		__be32 maxindex;
628 	} *indicators;
629 
630 	indicators = of_get_property(rtas.dev, "rtas-indicators", &proplen);
631 	if (!indicators)
632 		return false;
633 
634 	count = proplen / sizeof(struct indicator_elem);
635 
636 	for (i = 0; i < count; i++) {
637 		if (__be32_to_cpu(indicators[i].token) != token)
638 			continue;
639 		if (maxindex)
640 			*maxindex = __be32_to_cpu(indicators[i].maxindex);
641 		return true;
642 	}
643 
644 	return false;
645 }
646 EXPORT_SYMBOL(rtas_indicator_present);
647 
648 int rtas_set_indicator(int indicator, int index, int new_value)
649 {
650 	int token = rtas_token("set-indicator");
651 	int rc;
652 
653 	if (token == RTAS_UNKNOWN_SERVICE)
654 		return -ENOENT;
655 
656 	do {
657 		rc = rtas_call(token, 3, 1, NULL, indicator, index, new_value);
658 	} while (rtas_busy_delay(rc));
659 
660 	if (rc < 0)
661 		return rtas_error_rc(rc);
662 	return rc;
663 }
664 EXPORT_SYMBOL(rtas_set_indicator);
665 
666 /*
667  * Ignoring RTAS extended delay
668  */
669 int rtas_set_indicator_fast(int indicator, int index, int new_value)
670 {
671 	int rc;
672 	int token = rtas_token("set-indicator");
673 
674 	if (token == RTAS_UNKNOWN_SERVICE)
675 		return -ENOENT;
676 
677 	rc = rtas_call(token, 3, 1, NULL, indicator, index, new_value);
678 
679 	WARN_ON(rc == RTAS_BUSY || (rc >= RTAS_EXTENDED_DELAY_MIN &&
680 				    rc <= RTAS_EXTENDED_DELAY_MAX));
681 
682 	if (rc < 0)
683 		return rtas_error_rc(rc);
684 
685 	return rc;
686 }
687 
688 void __noreturn rtas_restart(char *cmd)
689 {
690 	if (rtas_flash_term_hook)
691 		rtas_flash_term_hook(SYS_RESTART);
692 	printk("RTAS system-reboot returned %d\n",
693 	       rtas_call(rtas_token("system-reboot"), 0, 1, NULL));
694 	for (;;);
695 }
696 
697 void rtas_power_off(void)
698 {
699 	if (rtas_flash_term_hook)
700 		rtas_flash_term_hook(SYS_POWER_OFF);
701 	/* allow power on only with power button press */
702 	printk("RTAS power-off returned %d\n",
703 	       rtas_call(rtas_token("power-off"), 2, 1, NULL, -1, -1));
704 	for (;;);
705 }
706 
707 void __noreturn rtas_halt(void)
708 {
709 	if (rtas_flash_term_hook)
710 		rtas_flash_term_hook(SYS_HALT);
711 	/* allow power on only with power button press */
712 	printk("RTAS power-off returned %d\n",
713 	       rtas_call(rtas_token("power-off"), 2, 1, NULL, -1, -1));
714 	for (;;);
715 }
716 
717 /* Must be in the RMO region, so we place it here */
718 static char rtas_os_term_buf[2048];
719 
720 void rtas_os_term(char *str)
721 {
722 	int status;
723 
724 	/*
725 	 * Firmware with the ibm,extended-os-term property is guaranteed
726 	 * to always return from an ibm,os-term call. Earlier versions without
727 	 * this property may terminate the partition which we want to avoid
728 	 * since it interferes with panic_timeout.
729 	 */
730 	if (RTAS_UNKNOWN_SERVICE == rtas_token("ibm,os-term") ||
731 	    RTAS_UNKNOWN_SERVICE == rtas_token("ibm,extended-os-term"))
732 		return;
733 
734 	snprintf(rtas_os_term_buf, 2048, "OS panic: %s", str);
735 
736 	do {
737 		status = rtas_call(rtas_token("ibm,os-term"), 1, 1, NULL,
738 				   __pa(rtas_os_term_buf));
739 	} while (rtas_busy_delay(status));
740 
741 	if (status != 0)
742 		printk(KERN_EMERG "ibm,os-term call failed %d\n", status);
743 }
744 
745 static int ibm_suspend_me_token = RTAS_UNKNOWN_SERVICE;
746 #ifdef CONFIG_PPC_PSERIES
747 static int __rtas_suspend_last_cpu(struct rtas_suspend_me_data *data, int wake_when_done)
748 {
749 	u16 slb_size = mmu_slb_size;
750 	int rc = H_MULTI_THREADS_ACTIVE;
751 	int cpu;
752 
753 	slb_set_size(SLB_MIN_SIZE);
754 	printk(KERN_DEBUG "calling ibm,suspend-me on cpu %i\n", smp_processor_id());
755 
756 	while (rc == H_MULTI_THREADS_ACTIVE && !atomic_read(&data->done) &&
757 	       !atomic_read(&data->error))
758 		rc = rtas_call(data->token, 0, 1, NULL);
759 
760 	if (rc || atomic_read(&data->error)) {
761 		printk(KERN_DEBUG "ibm,suspend-me returned %d\n", rc);
762 		slb_set_size(slb_size);
763 	}
764 
765 	if (atomic_read(&data->error))
766 		rc = atomic_read(&data->error);
767 
768 	atomic_set(&data->error, rc);
769 	pSeries_coalesce_init();
770 
771 	if (wake_when_done) {
772 		atomic_set(&data->done, 1);
773 
774 		for_each_online_cpu(cpu)
775 			plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu));
776 	}
777 
778 	if (atomic_dec_return(&data->working) == 0)
779 		complete(data->complete);
780 
781 	return rc;
782 }
783 
784 int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data)
785 {
786 	atomic_inc(&data->working);
787 	return __rtas_suspend_last_cpu(data, 0);
788 }
789 
790 static int __rtas_suspend_cpu(struct rtas_suspend_me_data *data, int wake_when_done)
791 {
792 	long rc = H_SUCCESS;
793 	unsigned long msr_save;
794 	int cpu;
795 
796 	atomic_inc(&data->working);
797 
798 	/* really need to ensure MSR.EE is off for H_JOIN */
799 	msr_save = mfmsr();
800 	mtmsr(msr_save & ~(MSR_EE));
801 
802 	while (rc == H_SUCCESS && !atomic_read(&data->done) && !atomic_read(&data->error))
803 		rc = plpar_hcall_norets(H_JOIN);
804 
805 	mtmsr(msr_save);
806 
807 	if (rc == H_SUCCESS) {
808 		/* This cpu was prodded and the suspend is complete. */
809 		goto out;
810 	} else if (rc == H_CONTINUE) {
811 		/* All other cpus are in H_JOIN, this cpu does
812 		 * the suspend.
813 		 */
814 		return __rtas_suspend_last_cpu(data, wake_when_done);
815 	} else {
816 		printk(KERN_ERR "H_JOIN on cpu %i failed with rc = %ld\n",
817 		       smp_processor_id(), rc);
818 		atomic_set(&data->error, rc);
819 	}
820 
821 	if (wake_when_done) {
822 		atomic_set(&data->done, 1);
823 
824 		/* This cpu did the suspend or got an error; in either case,
825 		 * we need to prod all other other cpus out of join state.
826 		 * Extra prods are harmless.
827 		 */
828 		for_each_online_cpu(cpu)
829 			plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu));
830 	}
831 out:
832 	if (atomic_dec_return(&data->working) == 0)
833 		complete(data->complete);
834 	return rc;
835 }
836 
837 int rtas_suspend_cpu(struct rtas_suspend_me_data *data)
838 {
839 	return __rtas_suspend_cpu(data, 0);
840 }
841 
842 static void rtas_percpu_suspend_me(void *info)
843 {
844 	__rtas_suspend_cpu((struct rtas_suspend_me_data *)info, 1);
845 }
846 
847 enum rtas_cpu_state {
848 	DOWN,
849 	UP,
850 };
851 
852 #ifndef CONFIG_SMP
853 static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
854 				cpumask_var_t cpus)
855 {
856 	if (!cpumask_empty(cpus)) {
857 		cpumask_clear(cpus);
858 		return -EINVAL;
859 	} else
860 		return 0;
861 }
862 #else
863 /* On return cpumask will be altered to indicate CPUs changed.
864  * CPUs with states changed will be set in the mask,
865  * CPUs with status unchanged will be unset in the mask. */
866 static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
867 				cpumask_var_t cpus)
868 {
869 	int cpu;
870 	int cpuret = 0;
871 	int ret = 0;
872 
873 	if (cpumask_empty(cpus))
874 		return 0;
875 
876 	for_each_cpu(cpu, cpus) {
877 		switch (state) {
878 		case DOWN:
879 			cpuret = cpu_down(cpu);
880 			break;
881 		case UP:
882 			cpuret = cpu_up(cpu);
883 			break;
884 		}
885 		if (cpuret) {
886 			pr_debug("%s: cpu_%s for cpu#%d returned %d.\n",
887 					__func__,
888 					((state == UP) ? "up" : "down"),
889 					cpu, cpuret);
890 			if (!ret)
891 				ret = cpuret;
892 			if (state == UP) {
893 				/* clear bits for unchanged cpus, return */
894 				cpumask_shift_right(cpus, cpus, cpu);
895 				cpumask_shift_left(cpus, cpus, cpu);
896 				break;
897 			} else {
898 				/* clear bit for unchanged cpu, continue */
899 				cpumask_clear_cpu(cpu, cpus);
900 			}
901 		}
902 	}
903 
904 	return ret;
905 }
906 #endif
907 
908 int rtas_online_cpus_mask(cpumask_var_t cpus)
909 {
910 	int ret;
911 
912 	ret = rtas_cpu_state_change_mask(UP, cpus);
913 
914 	if (ret) {
915 		cpumask_var_t tmp_mask;
916 
917 		if (!alloc_cpumask_var(&tmp_mask, GFP_TEMPORARY))
918 			return ret;
919 
920 		/* Use tmp_mask to preserve cpus mask from first failure */
921 		cpumask_copy(tmp_mask, cpus);
922 		rtas_offline_cpus_mask(tmp_mask);
923 		free_cpumask_var(tmp_mask);
924 	}
925 
926 	return ret;
927 }
928 EXPORT_SYMBOL(rtas_online_cpus_mask);
929 
930 int rtas_offline_cpus_mask(cpumask_var_t cpus)
931 {
932 	return rtas_cpu_state_change_mask(DOWN, cpus);
933 }
934 EXPORT_SYMBOL(rtas_offline_cpus_mask);
935 
936 int rtas_ibm_suspend_me(u64 handle)
937 {
938 	long state;
939 	long rc;
940 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
941 	struct rtas_suspend_me_data data;
942 	DECLARE_COMPLETION_ONSTACK(done);
943 	cpumask_var_t offline_mask;
944 	int cpuret;
945 
946 	if (!rtas_service_present("ibm,suspend-me"))
947 		return -ENOSYS;
948 
949 	/* Make sure the state is valid */
950 	rc = plpar_hcall(H_VASI_STATE, retbuf, handle);
951 
952 	state = retbuf[0];
953 
954 	if (rc) {
955 		printk(KERN_ERR "rtas_ibm_suspend_me: vasi_state returned %ld\n",rc);
956 		return rc;
957 	} else if (state == H_VASI_ENABLED) {
958 		return -EAGAIN;
959 	} else if (state != H_VASI_SUSPENDING) {
960 		printk(KERN_ERR "rtas_ibm_suspend_me: vasi_state returned state %ld\n",
961 		       state);
962 		return -EIO;
963 	}
964 
965 	if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY))
966 		return -ENOMEM;
967 
968 	atomic_set(&data.working, 0);
969 	atomic_set(&data.done, 0);
970 	atomic_set(&data.error, 0);
971 	data.token = rtas_token("ibm,suspend-me");
972 	data.complete = &done;
973 
974 	/* All present CPUs must be online */
975 	cpumask_andnot(offline_mask, cpu_present_mask, cpu_online_mask);
976 	cpuret = rtas_online_cpus_mask(offline_mask);
977 	if (cpuret) {
978 		pr_err("%s: Could not bring present CPUs online.\n", __func__);
979 		atomic_set(&data.error, cpuret);
980 		goto out;
981 	}
982 
983 	stop_topology_update();
984 
985 	/* Call function on all CPUs.  One of us will make the
986 	 * rtas call
987 	 */
988 	if (on_each_cpu(rtas_percpu_suspend_me, &data, 0))
989 		atomic_set(&data.error, -EINVAL);
990 
991 	wait_for_completion(&done);
992 
993 	if (atomic_read(&data.error) != 0)
994 		printk(KERN_ERR "Error doing global join\n");
995 
996 	start_topology_update();
997 
998 	/* Take down CPUs not online prior to suspend */
999 	cpuret = rtas_offline_cpus_mask(offline_mask);
1000 	if (cpuret)
1001 		pr_warn("%s: Could not restore CPUs to offline state.\n",
1002 				__func__);
1003 
1004 out:
1005 	free_cpumask_var(offline_mask);
1006 	return atomic_read(&data.error);
1007 }
1008 #else /* CONFIG_PPC_PSERIES */
1009 int rtas_ibm_suspend_me(u64 handle)
1010 {
1011 	return -ENOSYS;
1012 }
1013 #endif
1014 
1015 /**
1016  * Find a specific pseries error log in an RTAS extended event log.
1017  * @log: RTAS error/event log
1018  * @section_id: two character section identifier
1019  *
1020  * Returns a pointer to the specified errorlog or NULL if not found.
1021  */
1022 struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log,
1023 					      uint16_t section_id)
1024 {
1025 	struct rtas_ext_event_log_v6 *ext_log =
1026 		(struct rtas_ext_event_log_v6 *)log->buffer;
1027 	struct pseries_errorlog *sect;
1028 	unsigned char *p, *log_end;
1029 	uint32_t ext_log_length = rtas_error_extended_log_length(log);
1030 	uint8_t log_format = rtas_ext_event_log_format(ext_log);
1031 	uint32_t company_id = rtas_ext_event_company_id(ext_log);
1032 
1033 	/* Check that we understand the format */
1034 	if (ext_log_length < sizeof(struct rtas_ext_event_log_v6) ||
1035 	    log_format != RTAS_V6EXT_LOG_FORMAT_EVENT_LOG ||
1036 	    company_id != RTAS_V6EXT_COMPANY_ID_IBM)
1037 		return NULL;
1038 
1039 	log_end = log->buffer + ext_log_length;
1040 	p = ext_log->vendor_log;
1041 
1042 	while (p < log_end) {
1043 		sect = (struct pseries_errorlog *)p;
1044 		if (pseries_errorlog_id(sect) == section_id)
1045 			return sect;
1046 		p += pseries_errorlog_length(sect);
1047 	}
1048 
1049 	return NULL;
1050 }
1051 
1052 /* We assume to be passed big endian arguments */
1053 asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
1054 {
1055 	struct rtas_args args;
1056 	unsigned long flags;
1057 	char *buff_copy, *errbuf = NULL;
1058 	int nargs, nret, token;
1059 
1060 	if (!capable(CAP_SYS_ADMIN))
1061 		return -EPERM;
1062 
1063 	if (!rtas.entry)
1064 		return -EINVAL;
1065 
1066 	if (copy_from_user(&args, uargs, 3 * sizeof(u32)) != 0)
1067 		return -EFAULT;
1068 
1069 	nargs = be32_to_cpu(args.nargs);
1070 	nret  = be32_to_cpu(args.nret);
1071 	token = be32_to_cpu(args.token);
1072 
1073 	if (nargs >= ARRAY_SIZE(args.args)
1074 	    || nret > ARRAY_SIZE(args.args)
1075 	    || nargs + nret > ARRAY_SIZE(args.args))
1076 		return -EINVAL;
1077 
1078 	/* Copy in args. */
1079 	if (copy_from_user(args.args, uargs->args,
1080 			   nargs * sizeof(rtas_arg_t)) != 0)
1081 		return -EFAULT;
1082 
1083 	if (token == RTAS_UNKNOWN_SERVICE)
1084 		return -EINVAL;
1085 
1086 	args.rets = &args.args[nargs];
1087 	memset(args.rets, 0, nret * sizeof(rtas_arg_t));
1088 
1089 	/* Need to handle ibm,suspend_me call specially */
1090 	if (token == ibm_suspend_me_token) {
1091 
1092 		/*
1093 		 * rtas_ibm_suspend_me assumes the streamid handle is in cpu
1094 		 * endian, or at least the hcall within it requires it.
1095 		 */
1096 		int rc = 0;
1097 		u64 handle = ((u64)be32_to_cpu(args.args[0]) << 32)
1098 		              | be32_to_cpu(args.args[1]);
1099 		rc = rtas_ibm_suspend_me(handle);
1100 		if (rc == -EAGAIN)
1101 			args.rets[0] = cpu_to_be32(RTAS_NOT_SUSPENDABLE);
1102 		else if (rc == -EIO)
1103 			args.rets[0] = cpu_to_be32(-1);
1104 		else if (rc)
1105 			return rc;
1106 		goto copy_return;
1107 	}
1108 
1109 	buff_copy = get_errorlog_buffer();
1110 
1111 	flags = lock_rtas();
1112 
1113 	rtas.args = args;
1114 	enter_rtas(__pa(&rtas.args));
1115 	args = rtas.args;
1116 
1117 	/* A -1 return code indicates that the last command couldn't
1118 	   be completed due to a hardware error. */
1119 	if (be32_to_cpu(args.rets[0]) == -1)
1120 		errbuf = __fetch_rtas_last_error(buff_copy);
1121 
1122 	unlock_rtas(flags);
1123 
1124 	if (buff_copy) {
1125 		if (errbuf)
1126 			log_error(errbuf, ERR_TYPE_RTAS_LOG, 0);
1127 		kfree(buff_copy);
1128 	}
1129 
1130  copy_return:
1131 	/* Copy out args. */
1132 	if (copy_to_user(uargs->args + nargs,
1133 			 args.args + nargs,
1134 			 nret * sizeof(rtas_arg_t)) != 0)
1135 		return -EFAULT;
1136 
1137 	return 0;
1138 }
1139 
1140 /*
1141  * Call early during boot, before mem init, to retrieve the RTAS
1142  * information from the device-tree and allocate the RMO buffer for userland
1143  * accesses.
1144  */
1145 void __init rtas_initialize(void)
1146 {
1147 	unsigned long rtas_region = RTAS_INSTANTIATE_MAX;
1148 	u32 base, size, entry;
1149 	int no_base, no_size, no_entry;
1150 
1151 	/* Get RTAS dev node and fill up our "rtas" structure with infos
1152 	 * about it.
1153 	 */
1154 	rtas.dev = of_find_node_by_name(NULL, "rtas");
1155 	if (!rtas.dev)
1156 		return;
1157 
1158 	no_base = of_property_read_u32(rtas.dev, "linux,rtas-base", &base);
1159 	no_size = of_property_read_u32(rtas.dev, "rtas-size", &size);
1160 	if (no_base || no_size) {
1161 		of_node_put(rtas.dev);
1162 		rtas.dev = NULL;
1163 		return;
1164 	}
1165 
1166 	rtas.base = base;
1167 	rtas.size = size;
1168 	no_entry = of_property_read_u32(rtas.dev, "linux,rtas-entry", &entry);
1169 	rtas.entry = no_entry ? rtas.base : entry;
1170 
1171 	/* If RTAS was found, allocate the RMO buffer for it and look for
1172 	 * the stop-self token if any
1173 	 */
1174 #ifdef CONFIG_PPC64
1175 	if (firmware_has_feature(FW_FEATURE_LPAR)) {
1176 		rtas_region = min(ppc64_rma_size, RTAS_INSTANTIATE_MAX);
1177 		ibm_suspend_me_token = rtas_token("ibm,suspend-me");
1178 	}
1179 #endif
1180 	rtas_rmo_buf = memblock_alloc_base(RTAS_RMOBUF_MAX, PAGE_SIZE, rtas_region);
1181 
1182 #ifdef CONFIG_RTAS_ERROR_LOGGING
1183 	rtas_last_error_token = rtas_token("rtas-last-error");
1184 #endif
1185 }
1186 
1187 int __init early_init_dt_scan_rtas(unsigned long node,
1188 		const char *uname, int depth, void *data)
1189 {
1190 	const u32 *basep, *entryp, *sizep;
1191 
1192 	if (depth != 1 || strcmp(uname, "rtas") != 0)
1193 		return 0;
1194 
1195 	basep  = of_get_flat_dt_prop(node, "linux,rtas-base", NULL);
1196 	entryp = of_get_flat_dt_prop(node, "linux,rtas-entry", NULL);
1197 	sizep  = of_get_flat_dt_prop(node, "rtas-size", NULL);
1198 
1199 	if (basep && entryp && sizep) {
1200 		rtas.base = *basep;
1201 		rtas.entry = *entryp;
1202 		rtas.size = *sizep;
1203 	}
1204 
1205 #ifdef CONFIG_UDBG_RTAS_CONSOLE
1206 	basep = of_get_flat_dt_prop(node, "put-term-char", NULL);
1207 	if (basep)
1208 		rtas_putchar_token = *basep;
1209 
1210 	basep = of_get_flat_dt_prop(node, "get-term-char", NULL);
1211 	if (basep)
1212 		rtas_getchar_token = *basep;
1213 
1214 	if (rtas_putchar_token != RTAS_UNKNOWN_SERVICE &&
1215 	    rtas_getchar_token != RTAS_UNKNOWN_SERVICE)
1216 		udbg_init_rtas_console();
1217 
1218 #endif
1219 
1220 	/* break now */
1221 	return 1;
1222 }
1223 
1224 static arch_spinlock_t timebase_lock;
1225 static u64 timebase = 0;
1226 
1227 void rtas_give_timebase(void)
1228 {
1229 	unsigned long flags;
1230 
1231 	local_irq_save(flags);
1232 	hard_irq_disable();
1233 	arch_spin_lock(&timebase_lock);
1234 	rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL);
1235 	timebase = get_tb();
1236 	arch_spin_unlock(&timebase_lock);
1237 
1238 	while (timebase)
1239 		barrier();
1240 	rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL);
1241 	local_irq_restore(flags);
1242 }
1243 
1244 void rtas_take_timebase(void)
1245 {
1246 	while (!timebase)
1247 		barrier();
1248 	arch_spin_lock(&timebase_lock);
1249 	set_tb(timebase >> 32, timebase & 0xffffffff);
1250 	timebase = 0;
1251 	arch_spin_unlock(&timebase_lock);
1252 }
1253