xref: /openbmc/linux/kernel/printk/printk.c (revision f71a261a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/kernel/printk.c
4  *
5  *  Copyright (C) 1991, 1992  Linus Torvalds
6  *
7  * Modified to make sys_syslog() more flexible: added commands to
8  * return the last 4k of kernel messages, regardless of whether
9  * they've been read or not.  Added option to suppress kernel printk's
10  * to the console.  Added hook for sending the console messages
11  * elsewhere, in preparation for a serial line console (someday).
12  * Ted Ts'o, 2/11/93.
13  * Modified for sysctl support, 1/8/97, Chris Horn.
14  * Fixed SMP synchronization, 08/08/99, Manfred Spraul
15  *     manfred@colorfullife.com
16  * Rewrote bits to get rid of console_lock
17  *	01Mar01 Andrew Morton
18  */
19 
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 
22 #include <linux/kernel.h>
23 #include <linux/mm.h>
24 #include <linux/tty.h>
25 #include <linux/tty_driver.h>
26 #include <linux/console.h>
27 #include <linux/init.h>
28 #include <linux/jiffies.h>
29 #include <linux/nmi.h>
30 #include <linux/module.h>
31 #include <linux/moduleparam.h>
32 #include <linux/delay.h>
33 #include <linux/smp.h>
34 #include <linux/security.h>
35 #include <linux/memblock.h>
36 #include <linux/syscalls.h>
37 #include <linux/crash_core.h>
38 #include <linux/ratelimit.h>
39 #include <linux/kmsg_dump.h>
40 #include <linux/syslog.h>
41 #include <linux/cpu.h>
42 #include <linux/rculist.h>
43 #include <linux/poll.h>
44 #include <linux/irq_work.h>
45 #include <linux/ctype.h>
46 #include <linux/uio.h>
47 #include <linux/sched/clock.h>
48 #include <linux/sched/debug.h>
49 #include <linux/sched/task_stack.h>
50 
51 #include <linux/uaccess.h>
52 #include <asm/sections.h>
53 
54 #include <trace/events/initcall.h>
55 #define CREATE_TRACE_POINTS
56 #include <trace/events/printk.h>
57 
58 #include "printk_ringbuffer.h"
59 #include "console_cmdline.h"
60 #include "braille.h"
61 #include "internal.h"
62 
63 int console_printk[4] = {
64 	CONSOLE_LOGLEVEL_DEFAULT,	/* console_loglevel */
65 	MESSAGE_LOGLEVEL_DEFAULT,	/* default_message_loglevel */
66 	CONSOLE_LOGLEVEL_MIN,		/* minimum_console_loglevel */
67 	CONSOLE_LOGLEVEL_DEFAULT,	/* default_console_loglevel */
68 };
69 EXPORT_SYMBOL_GPL(console_printk);
70 
71 atomic_t ignore_console_lock_warning __read_mostly = ATOMIC_INIT(0);
72 EXPORT_SYMBOL(ignore_console_lock_warning);
73 
74 /*
75  * Low level drivers may need that to know if they can schedule in
76  * their unblank() callback or not. So let's export it.
77  */
78 int oops_in_progress;
79 EXPORT_SYMBOL(oops_in_progress);
80 
81 /*
82  * console_sem protects the console_drivers list, and also
83  * provides serialisation for access to the entire console
84  * driver system.
85  */
86 static DEFINE_SEMAPHORE(console_sem);
87 struct console *console_drivers;
88 EXPORT_SYMBOL_GPL(console_drivers);
89 
90 /*
91  * System may need to suppress printk message under certain
92  * circumstances, like after kernel panic happens.
93  */
94 int __read_mostly suppress_printk;
95 
96 /*
97  * During panic, heavy printk by other CPUs can delay the
98  * panic and risk deadlock on console resources.
99  */
100 static int __read_mostly suppress_panic_printk;
101 
102 #ifdef CONFIG_LOCKDEP
103 static struct lockdep_map console_lock_dep_map = {
104 	.name = "console_lock"
105 };
106 #endif
107 
108 enum devkmsg_log_bits {
109 	__DEVKMSG_LOG_BIT_ON = 0,
110 	__DEVKMSG_LOG_BIT_OFF,
111 	__DEVKMSG_LOG_BIT_LOCK,
112 };
113 
114 enum devkmsg_log_masks {
115 	DEVKMSG_LOG_MASK_ON             = BIT(__DEVKMSG_LOG_BIT_ON),
116 	DEVKMSG_LOG_MASK_OFF            = BIT(__DEVKMSG_LOG_BIT_OFF),
117 	DEVKMSG_LOG_MASK_LOCK           = BIT(__DEVKMSG_LOG_BIT_LOCK),
118 };
119 
120 /* Keep both the 'on' and 'off' bits clear, i.e. ratelimit by default: */
121 #define DEVKMSG_LOG_MASK_DEFAULT	0
122 
123 static unsigned int __read_mostly devkmsg_log = DEVKMSG_LOG_MASK_DEFAULT;
124 
125 static int __control_devkmsg(char *str)
126 {
127 	size_t len;
128 
129 	if (!str)
130 		return -EINVAL;
131 
132 	len = str_has_prefix(str, "on");
133 	if (len) {
134 		devkmsg_log = DEVKMSG_LOG_MASK_ON;
135 		return len;
136 	}
137 
138 	len = str_has_prefix(str, "off");
139 	if (len) {
140 		devkmsg_log = DEVKMSG_LOG_MASK_OFF;
141 		return len;
142 	}
143 
144 	len = str_has_prefix(str, "ratelimit");
145 	if (len) {
146 		devkmsg_log = DEVKMSG_LOG_MASK_DEFAULT;
147 		return len;
148 	}
149 
150 	return -EINVAL;
151 }
152 
153 static int __init control_devkmsg(char *str)
154 {
155 	if (__control_devkmsg(str) < 0) {
156 		pr_warn("printk.devkmsg: bad option string '%s'\n", str);
157 		return 1;
158 	}
159 
160 	/*
161 	 * Set sysctl string accordingly:
162 	 */
163 	if (devkmsg_log == DEVKMSG_LOG_MASK_ON)
164 		strcpy(devkmsg_log_str, "on");
165 	else if (devkmsg_log == DEVKMSG_LOG_MASK_OFF)
166 		strcpy(devkmsg_log_str, "off");
167 	/* else "ratelimit" which is set by default. */
168 
169 	/*
170 	 * Sysctl cannot change it anymore. The kernel command line setting of
171 	 * this parameter is to force the setting to be permanent throughout the
172 	 * runtime of the system. This is a precation measure against userspace
173 	 * trying to be a smarta** and attempting to change it up on us.
174 	 */
175 	devkmsg_log |= DEVKMSG_LOG_MASK_LOCK;
176 
177 	return 1;
178 }
179 __setup("printk.devkmsg=", control_devkmsg);
180 
181 char devkmsg_log_str[DEVKMSG_STR_MAX_SIZE] = "ratelimit";
182 #if defined(CONFIG_PRINTK) && defined(CONFIG_SYSCTL)
183 int devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write,
184 			      void *buffer, size_t *lenp, loff_t *ppos)
185 {
186 	char old_str[DEVKMSG_STR_MAX_SIZE];
187 	unsigned int old;
188 	int err;
189 
190 	if (write) {
191 		if (devkmsg_log & DEVKMSG_LOG_MASK_LOCK)
192 			return -EINVAL;
193 
194 		old = devkmsg_log;
195 		strncpy(old_str, devkmsg_log_str, DEVKMSG_STR_MAX_SIZE);
196 	}
197 
198 	err = proc_dostring(table, write, buffer, lenp, ppos);
199 	if (err)
200 		return err;
201 
202 	if (write) {
203 		err = __control_devkmsg(devkmsg_log_str);
204 
205 		/*
206 		 * Do not accept an unknown string OR a known string with
207 		 * trailing crap...
208 		 */
209 		if (err < 0 || (err + 1 != *lenp)) {
210 
211 			/* ... and restore old setting. */
212 			devkmsg_log = old;
213 			strncpy(devkmsg_log_str, old_str, DEVKMSG_STR_MAX_SIZE);
214 
215 			return -EINVAL;
216 		}
217 	}
218 
219 	return 0;
220 }
221 #endif /* CONFIG_PRINTK && CONFIG_SYSCTL */
222 
223 /* Number of registered extended console drivers. */
224 static int nr_ext_console_drivers;
225 
226 /*
227  * Used to synchronize printing kthreads against direct printing via
228  * console_trylock/console_unlock.
229  *
230  * Values:
231  * -1 = console kthreads atomically blocked (via global trylock)
232  *  0 = no kthread printing, console not locked (via trylock)
233  * >0 = kthread(s) actively printing
234  *
235  * Note: For synchronizing against direct printing via
236  *       console_lock/console_unlock, see the @lock variable in
237  *       struct console.
238  */
239 static atomic_t console_kthreads_active = ATOMIC_INIT(0);
240 
241 #define console_kthreads_atomic_tryblock() \
242 	(atomic_cmpxchg(&console_kthreads_active, 0, -1) == 0)
243 #define console_kthreads_atomic_unblock() \
244 	atomic_cmpxchg(&console_kthreads_active, -1, 0)
245 #define console_kthreads_atomically_blocked() \
246 	(atomic_read(&console_kthreads_active) == -1)
247 
248 #define console_kthread_printing_tryenter() \
249 	atomic_inc_unless_negative(&console_kthreads_active)
250 #define console_kthread_printing_exit() \
251 	atomic_dec(&console_kthreads_active)
252 
253 /*
254  * Helper macros to handle lockdep when locking/unlocking console_sem. We use
255  * macros instead of functions so that _RET_IP_ contains useful information.
256  */
257 #define down_console_sem() do { \
258 	down(&console_sem);\
259 	mutex_acquire(&console_lock_dep_map, 0, 0, _RET_IP_);\
260 } while (0)
261 
262 static int __down_trylock_console_sem(unsigned long ip)
263 {
264 	int lock_failed;
265 	unsigned long flags;
266 
267 	/*
268 	 * Here and in __up_console_sem() we need to be in safe mode,
269 	 * because spindump/WARN/etc from under console ->lock will
270 	 * deadlock in printk()->down_trylock_console_sem() otherwise.
271 	 */
272 	printk_safe_enter_irqsave(flags);
273 	lock_failed = down_trylock(&console_sem);
274 	printk_safe_exit_irqrestore(flags);
275 
276 	if (lock_failed)
277 		return 1;
278 	mutex_acquire(&console_lock_dep_map, 0, 1, ip);
279 	return 0;
280 }
281 #define down_trylock_console_sem() __down_trylock_console_sem(_RET_IP_)
282 
283 static void __up_console_sem(unsigned long ip)
284 {
285 	unsigned long flags;
286 
287 	mutex_release(&console_lock_dep_map, ip);
288 
289 	printk_safe_enter_irqsave(flags);
290 	up(&console_sem);
291 	printk_safe_exit_irqrestore(flags);
292 }
293 #define up_console_sem() __up_console_sem(_RET_IP_)
294 
295 static bool panic_in_progress(void)
296 {
297 	return unlikely(atomic_read(&panic_cpu) != PANIC_CPU_INVALID);
298 }
299 
300 /*
301  * Tracks whether kthread printers are all blocked. A value of true implies
302  * that the console is locked via console_lock() or the console is suspended.
303  * Writing to this variable requires holding @console_sem.
304  */
305 static bool console_kthreads_blocked;
306 
307 /*
308  * Block all kthread printers from a schedulable context.
309  *
310  * Requires holding @console_sem.
311  */
312 static void console_kthreads_block(void)
313 {
314 	struct console *con;
315 
316 	for_each_console(con) {
317 		mutex_lock(&con->lock);
318 		con->blocked = true;
319 		mutex_unlock(&con->lock);
320 	}
321 
322 	console_kthreads_blocked = true;
323 }
324 
325 /*
326  * Unblock all kthread printers from a schedulable context.
327  *
328  * Requires holding @console_sem.
329  */
330 static void console_kthreads_unblock(void)
331 {
332 	struct console *con;
333 
334 	for_each_console(con) {
335 		mutex_lock(&con->lock);
336 		con->blocked = false;
337 		mutex_unlock(&con->lock);
338 	}
339 
340 	console_kthreads_blocked = false;
341 }
342 
343 static int console_suspended;
344 
345 /*
346  *	Array of consoles built from command line options (console=)
347  */
348 
349 #define MAX_CMDLINECONSOLES 8
350 
351 static struct console_cmdline console_cmdline[MAX_CMDLINECONSOLES];
352 
353 static int preferred_console = -1;
354 int console_set_on_cmdline;
355 EXPORT_SYMBOL(console_set_on_cmdline);
356 
357 /* Flag: console code may call schedule() */
358 static int console_may_schedule;
359 
360 enum con_msg_format_flags {
361 	MSG_FORMAT_DEFAULT	= 0,
362 	MSG_FORMAT_SYSLOG	= (1 << 0),
363 };
364 
365 static int console_msg_format = MSG_FORMAT_DEFAULT;
366 
367 /*
368  * The printk log buffer consists of a sequenced collection of records, each
369  * containing variable length message text. Every record also contains its
370  * own meta-data (@info).
371  *
372  * Every record meta-data carries the timestamp in microseconds, as well as
373  * the standard userspace syslog level and syslog facility. The usual kernel
374  * messages use LOG_KERN; userspace-injected messages always carry a matching
375  * syslog facility, by default LOG_USER. The origin of every message can be
376  * reliably determined that way.
377  *
378  * The human readable log message of a record is available in @text, the
379  * length of the message text in @text_len. The stored message is not
380  * terminated.
381  *
382  * Optionally, a record can carry a dictionary of properties (key/value
383  * pairs), to provide userspace with a machine-readable message context.
384  *
385  * Examples for well-defined, commonly used property names are:
386  *   DEVICE=b12:8               device identifier
387  *                                b12:8         block dev_t
388  *                                c127:3        char dev_t
389  *                                n8            netdev ifindex
390  *                                +sound:card0  subsystem:devname
391  *   SUBSYSTEM=pci              driver-core subsystem name
392  *
393  * Valid characters in property names are [a-zA-Z0-9.-_]. Property names
394  * and values are terminated by a '\0' character.
395  *
396  * Example of record values:
397  *   record.text_buf                = "it's a line" (unterminated)
398  *   record.info.seq                = 56
399  *   record.info.ts_nsec            = 36863
400  *   record.info.text_len           = 11
401  *   record.info.facility           = 0 (LOG_KERN)
402  *   record.info.flags              = 0
403  *   record.info.level              = 3 (LOG_ERR)
404  *   record.info.caller_id          = 299 (task 299)
405  *   record.info.dev_info.subsystem = "pci" (terminated)
406  *   record.info.dev_info.device    = "+pci:0000:00:01.0" (terminated)
407  *
408  * The 'struct printk_info' buffer must never be directly exported to
409  * userspace, it is a kernel-private implementation detail that might
410  * need to be changed in the future, when the requirements change.
411  *
412  * /dev/kmsg exports the structured data in the following line format:
413  *   "<level>,<sequnum>,<timestamp>,<contflag>[,additional_values, ... ];<message text>\n"
414  *
415  * Users of the export format should ignore possible additional values
416  * separated by ',', and find the message after the ';' character.
417  *
418  * The optional key/value pairs are attached as continuation lines starting
419  * with a space character and terminated by a newline. All possible
420  * non-prinatable characters are escaped in the "\xff" notation.
421  */
422 
423 /* syslog_lock protects syslog_* variables and write access to clear_seq. */
424 static DEFINE_MUTEX(syslog_lock);
425 
426 /*
427  * A flag to signify if printk_activate_kthreads() has already started the
428  * kthread printers. If true, any later registered consoles must start their
429  * own kthread directly. The flag is write protected by the console_lock.
430  */
431 static bool printk_kthreads_available;
432 
433 #ifdef CONFIG_PRINTK
434 static atomic_t printk_prefer_direct = ATOMIC_INIT(0);
435 
436 /**
437  * printk_prefer_direct_enter - cause printk() calls to attempt direct
438  *                              printing to all enabled consoles
439  *
440  * Since it is not possible to call into the console printing code from any
441  * context, there is no guarantee that direct printing will occur.
442  *
443  * This globally effects all printk() callers.
444  *
445  * Context: Any context.
446  */
447 void printk_prefer_direct_enter(void)
448 {
449 	atomic_inc(&printk_prefer_direct);
450 }
451 
452 /**
453  * printk_prefer_direct_exit - restore printk() behavior
454  *
455  * Context: Any context.
456  */
457 void printk_prefer_direct_exit(void)
458 {
459 	WARN_ON(atomic_dec_if_positive(&printk_prefer_direct) < 0);
460 }
461 
462 /*
463  * Calling printk() always wakes kthread printers so that they can
464  * flush the new message to their respective consoles. Also, if direct
465  * printing is allowed, printk() tries to flush the messages directly.
466  *
467  * Direct printing is allowed in situations when the kthreads
468  * are not available or the system is in a problematic state.
469  *
470  * See the implementation about possible races.
471  */
472 static inline bool allow_direct_printing(void)
473 {
474 	/*
475 	 * Checking kthread availability is a possible race because the
476 	 * kthread printers can become permanently disabled during runtime.
477 	 * However, doing that requires holding the console_lock, so any
478 	 * pending messages will be direct printed by console_unlock().
479 	 */
480 	if (!printk_kthreads_available)
481 		return true;
482 
483 	/*
484 	 * Prefer direct printing when the system is in a problematic state.
485 	 * The context that sets this state will always see the updated value.
486 	 * The other contexts do not care. Anyway, direct printing is just a
487 	 * best effort. The direct output is only possible when console_lock
488 	 * is not already taken and no kthread printers are actively printing.
489 	 */
490 	return (system_state > SYSTEM_RUNNING ||
491 		oops_in_progress ||
492 		atomic_read(&printk_prefer_direct));
493 }
494 
495 DECLARE_WAIT_QUEUE_HEAD(log_wait);
496 /* All 3 protected by @syslog_lock. */
497 /* the next printk record to read by syslog(READ) or /proc/kmsg */
498 static u64 syslog_seq;
499 static size_t syslog_partial;
500 static bool syslog_time;
501 
502 struct latched_seq {
503 	seqcount_latch_t	latch;
504 	u64			val[2];
505 };
506 
507 /*
508  * The next printk record to read after the last 'clear' command. There are
509  * two copies (updated with seqcount_latch) so that reads can locklessly
510  * access a valid value. Writers are synchronized by @syslog_lock.
511  */
512 static struct latched_seq clear_seq = {
513 	.latch		= SEQCNT_LATCH_ZERO(clear_seq.latch),
514 	.val[0]		= 0,
515 	.val[1]		= 0,
516 };
517 
518 #ifdef CONFIG_PRINTK_CALLER
519 #define PREFIX_MAX		48
520 #else
521 #define PREFIX_MAX		32
522 #endif
523 
524 /* the maximum size of a formatted record (i.e. with prefix added per line) */
525 #define CONSOLE_LOG_MAX		1024
526 
527 /* the maximum size for a dropped text message */
528 #define DROPPED_TEXT_MAX	64
529 
530 /* the maximum size allowed to be reserved for a record */
531 #define LOG_LINE_MAX		(CONSOLE_LOG_MAX - PREFIX_MAX)
532 
533 #define LOG_LEVEL(v)		((v) & 0x07)
534 #define LOG_FACILITY(v)		((v) >> 3 & 0xff)
535 
536 /* record buffer */
537 #define LOG_ALIGN __alignof__(unsigned long)
538 #define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
539 #define LOG_BUF_LEN_MAX (u32)(1 << 31)
540 static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN);
541 static char *log_buf = __log_buf;
542 static u32 log_buf_len = __LOG_BUF_LEN;
543 
544 /*
545  * Define the average message size. This only affects the number of
546  * descriptors that will be available. Underestimating is better than
547  * overestimating (too many available descriptors is better than not enough).
548  */
549 #define PRB_AVGBITS 5	/* 32 character average length */
550 
551 #if CONFIG_LOG_BUF_SHIFT <= PRB_AVGBITS
552 #error CONFIG_LOG_BUF_SHIFT value too small.
553 #endif
554 _DEFINE_PRINTKRB(printk_rb_static, CONFIG_LOG_BUF_SHIFT - PRB_AVGBITS,
555 		 PRB_AVGBITS, &__log_buf[0]);
556 
557 static struct printk_ringbuffer printk_rb_dynamic;
558 
559 static struct printk_ringbuffer *prb = &printk_rb_static;
560 
561 /*
562  * We cannot access per-CPU data (e.g. per-CPU flush irq_work) before
563  * per_cpu_areas are initialised. This variable is set to true when
564  * it's safe to access per-CPU data.
565  */
566 static bool __printk_percpu_data_ready __read_mostly;
567 
568 bool printk_percpu_data_ready(void)
569 {
570 	return __printk_percpu_data_ready;
571 }
572 
573 /* Must be called under syslog_lock. */
574 static void latched_seq_write(struct latched_seq *ls, u64 val)
575 {
576 	raw_write_seqcount_latch(&ls->latch);
577 	ls->val[0] = val;
578 	raw_write_seqcount_latch(&ls->latch);
579 	ls->val[1] = val;
580 }
581 
582 /* Can be called from any context. */
583 static u64 latched_seq_read_nolock(struct latched_seq *ls)
584 {
585 	unsigned int seq;
586 	unsigned int idx;
587 	u64 val;
588 
589 	do {
590 		seq = raw_read_seqcount_latch(&ls->latch);
591 		idx = seq & 0x1;
592 		val = ls->val[idx];
593 	} while (read_seqcount_latch_retry(&ls->latch, seq));
594 
595 	return val;
596 }
597 
598 /* Return log buffer address */
599 char *log_buf_addr_get(void)
600 {
601 	return log_buf;
602 }
603 
604 /* Return log buffer size */
605 u32 log_buf_len_get(void)
606 {
607 	return log_buf_len;
608 }
609 
610 /*
611  * Define how much of the log buffer we could take at maximum. The value
612  * must be greater than two. Note that only half of the buffer is available
613  * when the index points to the middle.
614  */
615 #define MAX_LOG_TAKE_PART 4
616 static const char trunc_msg[] = "<truncated>";
617 
618 static void truncate_msg(u16 *text_len, u16 *trunc_msg_len)
619 {
620 	/*
621 	 * The message should not take the whole buffer. Otherwise, it might
622 	 * get removed too soon.
623 	 */
624 	u32 max_text_len = log_buf_len / MAX_LOG_TAKE_PART;
625 
626 	if (*text_len > max_text_len)
627 		*text_len = max_text_len;
628 
629 	/* enable the warning message (if there is room) */
630 	*trunc_msg_len = strlen(trunc_msg);
631 	if (*text_len >= *trunc_msg_len)
632 		*text_len -= *trunc_msg_len;
633 	else
634 		*trunc_msg_len = 0;
635 }
636 
637 int dmesg_restrict = IS_ENABLED(CONFIG_SECURITY_DMESG_RESTRICT);
638 
639 static int syslog_action_restricted(int type)
640 {
641 	if (dmesg_restrict)
642 		return 1;
643 	/*
644 	 * Unless restricted, we allow "read all" and "get buffer size"
645 	 * for everybody.
646 	 */
647 	return type != SYSLOG_ACTION_READ_ALL &&
648 	       type != SYSLOG_ACTION_SIZE_BUFFER;
649 }
650 
651 static int check_syslog_permissions(int type, int source)
652 {
653 	/*
654 	 * If this is from /proc/kmsg and we've already opened it, then we've
655 	 * already done the capabilities checks at open time.
656 	 */
657 	if (source == SYSLOG_FROM_PROC && type != SYSLOG_ACTION_OPEN)
658 		goto ok;
659 
660 	if (syslog_action_restricted(type)) {
661 		if (capable(CAP_SYSLOG))
662 			goto ok;
663 		/*
664 		 * For historical reasons, accept CAP_SYS_ADMIN too, with
665 		 * a warning.
666 		 */
667 		if (capable(CAP_SYS_ADMIN)) {
668 			pr_warn_once("%s (%d): Attempt to access syslog with "
669 				     "CAP_SYS_ADMIN but no CAP_SYSLOG "
670 				     "(deprecated).\n",
671 				 current->comm, task_pid_nr(current));
672 			goto ok;
673 		}
674 		return -EPERM;
675 	}
676 ok:
677 	return security_syslog(type);
678 }
679 
680 static void append_char(char **pp, char *e, char c)
681 {
682 	if (*pp < e)
683 		*(*pp)++ = c;
684 }
685 
686 static ssize_t info_print_ext_header(char *buf, size_t size,
687 				     struct printk_info *info)
688 {
689 	u64 ts_usec = info->ts_nsec;
690 	char caller[20];
691 #ifdef CONFIG_PRINTK_CALLER
692 	u32 id = info->caller_id;
693 
694 	snprintf(caller, sizeof(caller), ",caller=%c%u",
695 		 id & 0x80000000 ? 'C' : 'T', id & ~0x80000000);
696 #else
697 	caller[0] = '\0';
698 #endif
699 
700 	do_div(ts_usec, 1000);
701 
702 	return scnprintf(buf, size, "%u,%llu,%llu,%c%s;",
703 			 (info->facility << 3) | info->level, info->seq,
704 			 ts_usec, info->flags & LOG_CONT ? 'c' : '-', caller);
705 }
706 
707 static ssize_t msg_add_ext_text(char *buf, size_t size,
708 				const char *text, size_t text_len,
709 				unsigned char endc)
710 {
711 	char *p = buf, *e = buf + size;
712 	size_t i;
713 
714 	/* escape non-printable characters */
715 	for (i = 0; i < text_len; i++) {
716 		unsigned char c = text[i];
717 
718 		if (c < ' ' || c >= 127 || c == '\\')
719 			p += scnprintf(p, e - p, "\\x%02x", c);
720 		else
721 			append_char(&p, e, c);
722 	}
723 	append_char(&p, e, endc);
724 
725 	return p - buf;
726 }
727 
728 static ssize_t msg_add_dict_text(char *buf, size_t size,
729 				 const char *key, const char *val)
730 {
731 	size_t val_len = strlen(val);
732 	ssize_t len;
733 
734 	if (!val_len)
735 		return 0;
736 
737 	len = msg_add_ext_text(buf, size, "", 0, ' ');	/* dict prefix */
738 	len += msg_add_ext_text(buf + len, size - len, key, strlen(key), '=');
739 	len += msg_add_ext_text(buf + len, size - len, val, val_len, '\n');
740 
741 	return len;
742 }
743 
744 static ssize_t msg_print_ext_body(char *buf, size_t size,
745 				  char *text, size_t text_len,
746 				  struct dev_printk_info *dev_info)
747 {
748 	ssize_t len;
749 
750 	len = msg_add_ext_text(buf, size, text, text_len, '\n');
751 
752 	if (!dev_info)
753 		goto out;
754 
755 	len += msg_add_dict_text(buf + len, size - len, "SUBSYSTEM",
756 				 dev_info->subsystem);
757 	len += msg_add_dict_text(buf + len, size - len, "DEVICE",
758 				 dev_info->device);
759 out:
760 	return len;
761 }
762 
763 /* /dev/kmsg - userspace message inject/listen interface */
764 struct devkmsg_user {
765 	atomic64_t seq;
766 	struct ratelimit_state rs;
767 	struct mutex lock;
768 	char buf[CONSOLE_EXT_LOG_MAX];
769 
770 	struct printk_info info;
771 	char text_buf[CONSOLE_EXT_LOG_MAX];
772 	struct printk_record record;
773 };
774 
775 static __printf(3, 4) __cold
776 int devkmsg_emit(int facility, int level, const char *fmt, ...)
777 {
778 	va_list args;
779 	int r;
780 
781 	va_start(args, fmt);
782 	r = vprintk_emit(facility, level, NULL, fmt, args);
783 	va_end(args);
784 
785 	return r;
786 }
787 
788 static ssize_t devkmsg_write(struct kiocb *iocb, struct iov_iter *from)
789 {
790 	char *buf, *line;
791 	int level = default_message_loglevel;
792 	int facility = 1;	/* LOG_USER */
793 	struct file *file = iocb->ki_filp;
794 	struct devkmsg_user *user = file->private_data;
795 	size_t len = iov_iter_count(from);
796 	ssize_t ret = len;
797 
798 	if (!user || len > LOG_LINE_MAX)
799 		return -EINVAL;
800 
801 	/* Ignore when user logging is disabled. */
802 	if (devkmsg_log & DEVKMSG_LOG_MASK_OFF)
803 		return len;
804 
805 	/* Ratelimit when not explicitly enabled. */
806 	if (!(devkmsg_log & DEVKMSG_LOG_MASK_ON)) {
807 		if (!___ratelimit(&user->rs, current->comm))
808 			return ret;
809 	}
810 
811 	buf = kmalloc(len+1, GFP_KERNEL);
812 	if (buf == NULL)
813 		return -ENOMEM;
814 
815 	buf[len] = '\0';
816 	if (!copy_from_iter_full(buf, len, from)) {
817 		kfree(buf);
818 		return -EFAULT;
819 	}
820 
821 	/*
822 	 * Extract and skip the syslog prefix <[0-9]*>. Coming from userspace
823 	 * the decimal value represents 32bit, the lower 3 bit are the log
824 	 * level, the rest are the log facility.
825 	 *
826 	 * If no prefix or no userspace facility is specified, we
827 	 * enforce LOG_USER, to be able to reliably distinguish
828 	 * kernel-generated messages from userspace-injected ones.
829 	 */
830 	line = buf;
831 	if (line[0] == '<') {
832 		char *endp = NULL;
833 		unsigned int u;
834 
835 		u = simple_strtoul(line + 1, &endp, 10);
836 		if (endp && endp[0] == '>') {
837 			level = LOG_LEVEL(u);
838 			if (LOG_FACILITY(u) != 0)
839 				facility = LOG_FACILITY(u);
840 			endp++;
841 			line = endp;
842 		}
843 	}
844 
845 	devkmsg_emit(facility, level, "%s", line);
846 	kfree(buf);
847 	return ret;
848 }
849 
850 static ssize_t devkmsg_read(struct file *file, char __user *buf,
851 			    size_t count, loff_t *ppos)
852 {
853 	struct devkmsg_user *user = file->private_data;
854 	struct printk_record *r = &user->record;
855 	size_t len;
856 	ssize_t ret;
857 
858 	if (!user)
859 		return -EBADF;
860 
861 	ret = mutex_lock_interruptible(&user->lock);
862 	if (ret)
863 		return ret;
864 
865 	if (!prb_read_valid(prb, atomic64_read(&user->seq), r)) {
866 		if (file->f_flags & O_NONBLOCK) {
867 			ret = -EAGAIN;
868 			goto out;
869 		}
870 
871 		/*
872 		 * Guarantee this task is visible on the waitqueue before
873 		 * checking the wake condition.
874 		 *
875 		 * The full memory barrier within set_current_state() of
876 		 * prepare_to_wait_event() pairs with the full memory barrier
877 		 * within wq_has_sleeper().
878 		 *
879 		 * This pairs with __wake_up_klogd:A.
880 		 */
881 		ret = wait_event_interruptible(log_wait,
882 				prb_read_valid(prb,
883 					atomic64_read(&user->seq), r)); /* LMM(devkmsg_read:A) */
884 		if (ret)
885 			goto out;
886 	}
887 
888 	if (r->info->seq != atomic64_read(&user->seq)) {
889 		/* our last seen message is gone, return error and reset */
890 		atomic64_set(&user->seq, r->info->seq);
891 		ret = -EPIPE;
892 		goto out;
893 	}
894 
895 	len = info_print_ext_header(user->buf, sizeof(user->buf), r->info);
896 	len += msg_print_ext_body(user->buf + len, sizeof(user->buf) - len,
897 				  &r->text_buf[0], r->info->text_len,
898 				  &r->info->dev_info);
899 
900 	atomic64_set(&user->seq, r->info->seq + 1);
901 
902 	if (len > count) {
903 		ret = -EINVAL;
904 		goto out;
905 	}
906 
907 	if (copy_to_user(buf, user->buf, len)) {
908 		ret = -EFAULT;
909 		goto out;
910 	}
911 	ret = len;
912 out:
913 	mutex_unlock(&user->lock);
914 	return ret;
915 }
916 
917 /*
918  * Be careful when modifying this function!!!
919  *
920  * Only few operations are supported because the device works only with the
921  * entire variable length messages (records). Non-standard values are
922  * returned in the other cases and has been this way for quite some time.
923  * User space applications might depend on this behavior.
924  */
925 static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
926 {
927 	struct devkmsg_user *user = file->private_data;
928 	loff_t ret = 0;
929 
930 	if (!user)
931 		return -EBADF;
932 	if (offset)
933 		return -ESPIPE;
934 
935 	switch (whence) {
936 	case SEEK_SET:
937 		/* the first record */
938 		atomic64_set(&user->seq, prb_first_valid_seq(prb));
939 		break;
940 	case SEEK_DATA:
941 		/*
942 		 * The first record after the last SYSLOG_ACTION_CLEAR,
943 		 * like issued by 'dmesg -c'. Reading /dev/kmsg itself
944 		 * changes no global state, and does not clear anything.
945 		 */
946 		atomic64_set(&user->seq, latched_seq_read_nolock(&clear_seq));
947 		break;
948 	case SEEK_END:
949 		/* after the last record */
950 		atomic64_set(&user->seq, prb_next_seq(prb));
951 		break;
952 	default:
953 		ret = -EINVAL;
954 	}
955 	return ret;
956 }
957 
958 static __poll_t devkmsg_poll(struct file *file, poll_table *wait)
959 {
960 	struct devkmsg_user *user = file->private_data;
961 	struct printk_info info;
962 	__poll_t ret = 0;
963 
964 	if (!user)
965 		return EPOLLERR|EPOLLNVAL;
966 
967 	poll_wait(file, &log_wait, wait);
968 
969 	if (prb_read_valid_info(prb, atomic64_read(&user->seq), &info, NULL)) {
970 		/* return error when data has vanished underneath us */
971 		if (info.seq != atomic64_read(&user->seq))
972 			ret = EPOLLIN|EPOLLRDNORM|EPOLLERR|EPOLLPRI;
973 		else
974 			ret = EPOLLIN|EPOLLRDNORM;
975 	}
976 
977 	return ret;
978 }
979 
980 static int devkmsg_open(struct inode *inode, struct file *file)
981 {
982 	struct devkmsg_user *user;
983 	int err;
984 
985 	if (devkmsg_log & DEVKMSG_LOG_MASK_OFF)
986 		return -EPERM;
987 
988 	/* write-only does not need any file context */
989 	if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
990 		err = check_syslog_permissions(SYSLOG_ACTION_READ_ALL,
991 					       SYSLOG_FROM_READER);
992 		if (err)
993 			return err;
994 	}
995 
996 	user = kvmalloc(sizeof(struct devkmsg_user), GFP_KERNEL);
997 	if (!user)
998 		return -ENOMEM;
999 
1000 	ratelimit_default_init(&user->rs);
1001 	ratelimit_set_flags(&user->rs, RATELIMIT_MSG_ON_RELEASE);
1002 
1003 	mutex_init(&user->lock);
1004 
1005 	prb_rec_init_rd(&user->record, &user->info,
1006 			&user->text_buf[0], sizeof(user->text_buf));
1007 
1008 	atomic64_set(&user->seq, prb_first_valid_seq(prb));
1009 
1010 	file->private_data = user;
1011 	return 0;
1012 }
1013 
1014 static int devkmsg_release(struct inode *inode, struct file *file)
1015 {
1016 	struct devkmsg_user *user = file->private_data;
1017 
1018 	if (!user)
1019 		return 0;
1020 
1021 	ratelimit_state_exit(&user->rs);
1022 
1023 	mutex_destroy(&user->lock);
1024 	kvfree(user);
1025 	return 0;
1026 }
1027 
1028 const struct file_operations kmsg_fops = {
1029 	.open = devkmsg_open,
1030 	.read = devkmsg_read,
1031 	.write_iter = devkmsg_write,
1032 	.llseek = devkmsg_llseek,
1033 	.poll = devkmsg_poll,
1034 	.release = devkmsg_release,
1035 };
1036 
1037 #ifdef CONFIG_CRASH_CORE
1038 /*
1039  * This appends the listed symbols to /proc/vmcore
1040  *
1041  * /proc/vmcore is used by various utilities, like crash and makedumpfile to
1042  * obtain access to symbols that are otherwise very difficult to locate.  These
1043  * symbols are specifically used so that utilities can access and extract the
1044  * dmesg log from a vmcore file after a crash.
1045  */
1046 void log_buf_vmcoreinfo_setup(void)
1047 {
1048 	struct dev_printk_info *dev_info = NULL;
1049 
1050 	VMCOREINFO_SYMBOL(prb);
1051 	VMCOREINFO_SYMBOL(printk_rb_static);
1052 	VMCOREINFO_SYMBOL(clear_seq);
1053 
1054 	/*
1055 	 * Export struct size and field offsets. User space tools can
1056 	 * parse it and detect any changes to structure down the line.
1057 	 */
1058 
1059 	VMCOREINFO_STRUCT_SIZE(printk_ringbuffer);
1060 	VMCOREINFO_OFFSET(printk_ringbuffer, desc_ring);
1061 	VMCOREINFO_OFFSET(printk_ringbuffer, text_data_ring);
1062 	VMCOREINFO_OFFSET(printk_ringbuffer, fail);
1063 
1064 	VMCOREINFO_STRUCT_SIZE(prb_desc_ring);
1065 	VMCOREINFO_OFFSET(prb_desc_ring, count_bits);
1066 	VMCOREINFO_OFFSET(prb_desc_ring, descs);
1067 	VMCOREINFO_OFFSET(prb_desc_ring, infos);
1068 	VMCOREINFO_OFFSET(prb_desc_ring, head_id);
1069 	VMCOREINFO_OFFSET(prb_desc_ring, tail_id);
1070 
1071 	VMCOREINFO_STRUCT_SIZE(prb_desc);
1072 	VMCOREINFO_OFFSET(prb_desc, state_var);
1073 	VMCOREINFO_OFFSET(prb_desc, text_blk_lpos);
1074 
1075 	VMCOREINFO_STRUCT_SIZE(prb_data_blk_lpos);
1076 	VMCOREINFO_OFFSET(prb_data_blk_lpos, begin);
1077 	VMCOREINFO_OFFSET(prb_data_blk_lpos, next);
1078 
1079 	VMCOREINFO_STRUCT_SIZE(printk_info);
1080 	VMCOREINFO_OFFSET(printk_info, seq);
1081 	VMCOREINFO_OFFSET(printk_info, ts_nsec);
1082 	VMCOREINFO_OFFSET(printk_info, text_len);
1083 	VMCOREINFO_OFFSET(printk_info, caller_id);
1084 	VMCOREINFO_OFFSET(printk_info, dev_info);
1085 
1086 	VMCOREINFO_STRUCT_SIZE(dev_printk_info);
1087 	VMCOREINFO_OFFSET(dev_printk_info, subsystem);
1088 	VMCOREINFO_LENGTH(printk_info_subsystem, sizeof(dev_info->subsystem));
1089 	VMCOREINFO_OFFSET(dev_printk_info, device);
1090 	VMCOREINFO_LENGTH(printk_info_device, sizeof(dev_info->device));
1091 
1092 	VMCOREINFO_STRUCT_SIZE(prb_data_ring);
1093 	VMCOREINFO_OFFSET(prb_data_ring, size_bits);
1094 	VMCOREINFO_OFFSET(prb_data_ring, data);
1095 	VMCOREINFO_OFFSET(prb_data_ring, head_lpos);
1096 	VMCOREINFO_OFFSET(prb_data_ring, tail_lpos);
1097 
1098 	VMCOREINFO_SIZE(atomic_long_t);
1099 	VMCOREINFO_TYPE_OFFSET(atomic_long_t, counter);
1100 
1101 	VMCOREINFO_STRUCT_SIZE(latched_seq);
1102 	VMCOREINFO_OFFSET(latched_seq, val);
1103 }
1104 #endif
1105 
1106 /* requested log_buf_len from kernel cmdline */
1107 static unsigned long __initdata new_log_buf_len;
1108 
1109 /* we practice scaling the ring buffer by powers of 2 */
1110 static void __init log_buf_len_update(u64 size)
1111 {
1112 	if (size > (u64)LOG_BUF_LEN_MAX) {
1113 		size = (u64)LOG_BUF_LEN_MAX;
1114 		pr_err("log_buf over 2G is not supported.\n");
1115 	}
1116 
1117 	if (size)
1118 		size = roundup_pow_of_two(size);
1119 	if (size > log_buf_len)
1120 		new_log_buf_len = (unsigned long)size;
1121 }
1122 
1123 /* save requested log_buf_len since it's too early to process it */
1124 static int __init log_buf_len_setup(char *str)
1125 {
1126 	u64 size;
1127 
1128 	if (!str)
1129 		return -EINVAL;
1130 
1131 	size = memparse(str, &str);
1132 
1133 	log_buf_len_update(size);
1134 
1135 	return 0;
1136 }
1137 early_param("log_buf_len", log_buf_len_setup);
1138 
1139 #ifdef CONFIG_SMP
1140 #define __LOG_CPU_MAX_BUF_LEN (1 << CONFIG_LOG_CPU_MAX_BUF_SHIFT)
1141 
1142 static void __init log_buf_add_cpu(void)
1143 {
1144 	unsigned int cpu_extra;
1145 
1146 	/*
1147 	 * archs should set up cpu_possible_bits properly with
1148 	 * set_cpu_possible() after setup_arch() but just in
1149 	 * case lets ensure this is valid.
1150 	 */
1151 	if (num_possible_cpus() == 1)
1152 		return;
1153 
1154 	cpu_extra = (num_possible_cpus() - 1) * __LOG_CPU_MAX_BUF_LEN;
1155 
1156 	/* by default this will only continue through for large > 64 CPUs */
1157 	if (cpu_extra <= __LOG_BUF_LEN / 2)
1158 		return;
1159 
1160 	pr_info("log_buf_len individual max cpu contribution: %d bytes\n",
1161 		__LOG_CPU_MAX_BUF_LEN);
1162 	pr_info("log_buf_len total cpu_extra contributions: %d bytes\n",
1163 		cpu_extra);
1164 	pr_info("log_buf_len min size: %d bytes\n", __LOG_BUF_LEN);
1165 
1166 	log_buf_len_update(cpu_extra + __LOG_BUF_LEN);
1167 }
1168 #else /* !CONFIG_SMP */
1169 static inline void log_buf_add_cpu(void) {}
1170 #endif /* CONFIG_SMP */
1171 
1172 static void __init set_percpu_data_ready(void)
1173 {
1174 	__printk_percpu_data_ready = true;
1175 }
1176 
1177 static unsigned int __init add_to_rb(struct printk_ringbuffer *rb,
1178 				     struct printk_record *r)
1179 {
1180 	struct prb_reserved_entry e;
1181 	struct printk_record dest_r;
1182 
1183 	prb_rec_init_wr(&dest_r, r->info->text_len);
1184 
1185 	if (!prb_reserve(&e, rb, &dest_r))
1186 		return 0;
1187 
1188 	memcpy(&dest_r.text_buf[0], &r->text_buf[0], r->info->text_len);
1189 	dest_r.info->text_len = r->info->text_len;
1190 	dest_r.info->facility = r->info->facility;
1191 	dest_r.info->level = r->info->level;
1192 	dest_r.info->flags = r->info->flags;
1193 	dest_r.info->ts_nsec = r->info->ts_nsec;
1194 	dest_r.info->caller_id = r->info->caller_id;
1195 	memcpy(&dest_r.info->dev_info, &r->info->dev_info, sizeof(dest_r.info->dev_info));
1196 
1197 	prb_final_commit(&e);
1198 
1199 	return prb_record_text_space(&e);
1200 }
1201 
1202 static char setup_text_buf[LOG_LINE_MAX] __initdata;
1203 
1204 void __init setup_log_buf(int early)
1205 {
1206 	struct printk_info *new_infos;
1207 	unsigned int new_descs_count;
1208 	struct prb_desc *new_descs;
1209 	struct printk_info info;
1210 	struct printk_record r;
1211 	unsigned int text_size;
1212 	size_t new_descs_size;
1213 	size_t new_infos_size;
1214 	unsigned long flags;
1215 	char *new_log_buf;
1216 	unsigned int free;
1217 	u64 seq;
1218 
1219 	/*
1220 	 * Some archs call setup_log_buf() multiple times - first is very
1221 	 * early, e.g. from setup_arch(), and second - when percpu_areas
1222 	 * are initialised.
1223 	 */
1224 	if (!early)
1225 		set_percpu_data_ready();
1226 
1227 	if (log_buf != __log_buf)
1228 		return;
1229 
1230 	if (!early && !new_log_buf_len)
1231 		log_buf_add_cpu();
1232 
1233 	if (!new_log_buf_len)
1234 		return;
1235 
1236 	new_descs_count = new_log_buf_len >> PRB_AVGBITS;
1237 	if (new_descs_count == 0) {
1238 		pr_err("new_log_buf_len: %lu too small\n", new_log_buf_len);
1239 		return;
1240 	}
1241 
1242 	new_log_buf = memblock_alloc(new_log_buf_len, LOG_ALIGN);
1243 	if (unlikely(!new_log_buf)) {
1244 		pr_err("log_buf_len: %lu text bytes not available\n",
1245 		       new_log_buf_len);
1246 		return;
1247 	}
1248 
1249 	new_descs_size = new_descs_count * sizeof(struct prb_desc);
1250 	new_descs = memblock_alloc(new_descs_size, LOG_ALIGN);
1251 	if (unlikely(!new_descs)) {
1252 		pr_err("log_buf_len: %zu desc bytes not available\n",
1253 		       new_descs_size);
1254 		goto err_free_log_buf;
1255 	}
1256 
1257 	new_infos_size = new_descs_count * sizeof(struct printk_info);
1258 	new_infos = memblock_alloc(new_infos_size, LOG_ALIGN);
1259 	if (unlikely(!new_infos)) {
1260 		pr_err("log_buf_len: %zu info bytes not available\n",
1261 		       new_infos_size);
1262 		goto err_free_descs;
1263 	}
1264 
1265 	prb_rec_init_rd(&r, &info, &setup_text_buf[0], sizeof(setup_text_buf));
1266 
1267 	prb_init(&printk_rb_dynamic,
1268 		 new_log_buf, ilog2(new_log_buf_len),
1269 		 new_descs, ilog2(new_descs_count),
1270 		 new_infos);
1271 
1272 	local_irq_save(flags);
1273 
1274 	log_buf_len = new_log_buf_len;
1275 	log_buf = new_log_buf;
1276 	new_log_buf_len = 0;
1277 
1278 	free = __LOG_BUF_LEN;
1279 	prb_for_each_record(0, &printk_rb_static, seq, &r) {
1280 		text_size = add_to_rb(&printk_rb_dynamic, &r);
1281 		if (text_size > free)
1282 			free = 0;
1283 		else
1284 			free -= text_size;
1285 	}
1286 
1287 	prb = &printk_rb_dynamic;
1288 
1289 	local_irq_restore(flags);
1290 
1291 	/*
1292 	 * Copy any remaining messages that might have appeared from
1293 	 * NMI context after copying but before switching to the
1294 	 * dynamic buffer.
1295 	 */
1296 	prb_for_each_record(seq, &printk_rb_static, seq, &r) {
1297 		text_size = add_to_rb(&printk_rb_dynamic, &r);
1298 		if (text_size > free)
1299 			free = 0;
1300 		else
1301 			free -= text_size;
1302 	}
1303 
1304 	if (seq != prb_next_seq(&printk_rb_static)) {
1305 		pr_err("dropped %llu messages\n",
1306 		       prb_next_seq(&printk_rb_static) - seq);
1307 	}
1308 
1309 	pr_info("log_buf_len: %u bytes\n", log_buf_len);
1310 	pr_info("early log buf free: %u(%u%%)\n",
1311 		free, (free * 100) / __LOG_BUF_LEN);
1312 	return;
1313 
1314 err_free_descs:
1315 	memblock_free(new_descs, new_descs_size);
1316 err_free_log_buf:
1317 	memblock_free(new_log_buf, new_log_buf_len);
1318 }
1319 
1320 static bool __read_mostly ignore_loglevel;
1321 
1322 static int __init ignore_loglevel_setup(char *str)
1323 {
1324 	ignore_loglevel = true;
1325 	pr_info("debug: ignoring loglevel setting.\n");
1326 
1327 	return 0;
1328 }
1329 
1330 early_param("ignore_loglevel", ignore_loglevel_setup);
1331 module_param(ignore_loglevel, bool, S_IRUGO | S_IWUSR);
1332 MODULE_PARM_DESC(ignore_loglevel,
1333 		 "ignore loglevel setting (prints all kernel messages to the console)");
1334 
1335 static bool suppress_message_printing(int level)
1336 {
1337 	return (level >= console_loglevel && !ignore_loglevel);
1338 }
1339 
1340 #ifdef CONFIG_BOOT_PRINTK_DELAY
1341 
1342 static int boot_delay; /* msecs delay after each printk during bootup */
1343 static unsigned long long loops_per_msec;	/* based on boot_delay */
1344 
1345 static int __init boot_delay_setup(char *str)
1346 {
1347 	unsigned long lpj;
1348 
1349 	lpj = preset_lpj ? preset_lpj : 1000000;	/* some guess */
1350 	loops_per_msec = (unsigned long long)lpj / 1000 * HZ;
1351 
1352 	get_option(&str, &boot_delay);
1353 	if (boot_delay > 10 * 1000)
1354 		boot_delay = 0;
1355 
1356 	pr_debug("boot_delay: %u, preset_lpj: %ld, lpj: %lu, "
1357 		"HZ: %d, loops_per_msec: %llu\n",
1358 		boot_delay, preset_lpj, lpj, HZ, loops_per_msec);
1359 	return 0;
1360 }
1361 early_param("boot_delay", boot_delay_setup);
1362 
1363 static void boot_delay_msec(int level)
1364 {
1365 	unsigned long long k;
1366 	unsigned long timeout;
1367 
1368 	if ((boot_delay == 0 || system_state >= SYSTEM_RUNNING)
1369 		|| suppress_message_printing(level)) {
1370 		return;
1371 	}
1372 
1373 	k = (unsigned long long)loops_per_msec * boot_delay;
1374 
1375 	timeout = jiffies + msecs_to_jiffies(boot_delay);
1376 	while (k) {
1377 		k--;
1378 		cpu_relax();
1379 		/*
1380 		 * use (volatile) jiffies to prevent
1381 		 * compiler reduction; loop termination via jiffies
1382 		 * is secondary and may or may not happen.
1383 		 */
1384 		if (time_after(jiffies, timeout))
1385 			break;
1386 		touch_nmi_watchdog();
1387 	}
1388 }
1389 #else
1390 static inline void boot_delay_msec(int level)
1391 {
1392 }
1393 #endif
1394 
1395 static bool printk_time = IS_ENABLED(CONFIG_PRINTK_TIME);
1396 module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR);
1397 
1398 static size_t print_syslog(unsigned int level, char *buf)
1399 {
1400 	return sprintf(buf, "<%u>", level);
1401 }
1402 
1403 static size_t print_time(u64 ts, char *buf)
1404 {
1405 	unsigned long rem_nsec = do_div(ts, 1000000000);
1406 
1407 	return sprintf(buf, "[%5lu.%06lu]",
1408 		       (unsigned long)ts, rem_nsec / 1000);
1409 }
1410 
1411 #ifdef CONFIG_PRINTK_CALLER
1412 static size_t print_caller(u32 id, char *buf)
1413 {
1414 	char caller[12];
1415 
1416 	snprintf(caller, sizeof(caller), "%c%u",
1417 		 id & 0x80000000 ? 'C' : 'T', id & ~0x80000000);
1418 	return sprintf(buf, "[%6s]", caller);
1419 }
1420 #else
1421 #define print_caller(id, buf) 0
1422 #endif
1423 
1424 static size_t info_print_prefix(const struct printk_info  *info, bool syslog,
1425 				bool time, char *buf)
1426 {
1427 	size_t len = 0;
1428 
1429 	if (syslog)
1430 		len = print_syslog((info->facility << 3) | info->level, buf);
1431 
1432 	if (time)
1433 		len += print_time(info->ts_nsec, buf + len);
1434 
1435 	len += print_caller(info->caller_id, buf + len);
1436 
1437 	if (IS_ENABLED(CONFIG_PRINTK_CALLER) || time) {
1438 		buf[len++] = ' ';
1439 		buf[len] = '\0';
1440 	}
1441 
1442 	return len;
1443 }
1444 
1445 /*
1446  * Prepare the record for printing. The text is shifted within the given
1447  * buffer to avoid a need for another one. The following operations are
1448  * done:
1449  *
1450  *   - Add prefix for each line.
1451  *   - Drop truncated lines that no longer fit into the buffer.
1452  *   - Add the trailing newline that has been removed in vprintk_store().
1453  *   - Add a string terminator.
1454  *
1455  * Since the produced string is always terminated, the maximum possible
1456  * return value is @r->text_buf_size - 1;
1457  *
1458  * Return: The length of the updated/prepared text, including the added
1459  * prefixes and the newline. The terminator is not counted. The dropped
1460  * line(s) are not counted.
1461  */
1462 static size_t record_print_text(struct printk_record *r, bool syslog,
1463 				bool time)
1464 {
1465 	size_t text_len = r->info->text_len;
1466 	size_t buf_size = r->text_buf_size;
1467 	char *text = r->text_buf;
1468 	char prefix[PREFIX_MAX];
1469 	bool truncated = false;
1470 	size_t prefix_len;
1471 	size_t line_len;
1472 	size_t len = 0;
1473 	char *next;
1474 
1475 	/*
1476 	 * If the message was truncated because the buffer was not large
1477 	 * enough, treat the available text as if it were the full text.
1478 	 */
1479 	if (text_len > buf_size)
1480 		text_len = buf_size;
1481 
1482 	prefix_len = info_print_prefix(r->info, syslog, time, prefix);
1483 
1484 	/*
1485 	 * @text_len: bytes of unprocessed text
1486 	 * @line_len: bytes of current line _without_ newline
1487 	 * @text:     pointer to beginning of current line
1488 	 * @len:      number of bytes prepared in r->text_buf
1489 	 */
1490 	for (;;) {
1491 		next = memchr(text, '\n', text_len);
1492 		if (next) {
1493 			line_len = next - text;
1494 		} else {
1495 			/* Drop truncated line(s). */
1496 			if (truncated)
1497 				break;
1498 			line_len = text_len;
1499 		}
1500 
1501 		/*
1502 		 * Truncate the text if there is not enough space to add the
1503 		 * prefix and a trailing newline and a terminator.
1504 		 */
1505 		if (len + prefix_len + text_len + 1 + 1 > buf_size) {
1506 			/* Drop even the current line if no space. */
1507 			if (len + prefix_len + line_len + 1 + 1 > buf_size)
1508 				break;
1509 
1510 			text_len = buf_size - len - prefix_len - 1 - 1;
1511 			truncated = true;
1512 		}
1513 
1514 		memmove(text + prefix_len, text, text_len);
1515 		memcpy(text, prefix, prefix_len);
1516 
1517 		/*
1518 		 * Increment the prepared length to include the text and
1519 		 * prefix that were just moved+copied. Also increment for the
1520 		 * newline at the end of this line. If this is the last line,
1521 		 * there is no newline, but it will be added immediately below.
1522 		 */
1523 		len += prefix_len + line_len + 1;
1524 		if (text_len == line_len) {
1525 			/*
1526 			 * This is the last line. Add the trailing newline
1527 			 * removed in vprintk_store().
1528 			 */
1529 			text[prefix_len + line_len] = '\n';
1530 			break;
1531 		}
1532 
1533 		/*
1534 		 * Advance beyond the added prefix and the related line with
1535 		 * its newline.
1536 		 */
1537 		text += prefix_len + line_len + 1;
1538 
1539 		/*
1540 		 * The remaining text has only decreased by the line with its
1541 		 * newline.
1542 		 *
1543 		 * Note that @text_len can become zero. It happens when @text
1544 		 * ended with a newline (either due to truncation or the
1545 		 * original string ending with "\n\n"). The loop is correctly
1546 		 * repeated and (if not truncated) an empty line with a prefix
1547 		 * will be prepared.
1548 		 */
1549 		text_len -= line_len + 1;
1550 	}
1551 
1552 	/*
1553 	 * If a buffer was provided, it will be terminated. Space for the
1554 	 * string terminator is guaranteed to be available. The terminator is
1555 	 * not counted in the return value.
1556 	 */
1557 	if (buf_size > 0)
1558 		r->text_buf[len] = 0;
1559 
1560 	return len;
1561 }
1562 
1563 static size_t get_record_print_text_size(struct printk_info *info,
1564 					 unsigned int line_count,
1565 					 bool syslog, bool time)
1566 {
1567 	char prefix[PREFIX_MAX];
1568 	size_t prefix_len;
1569 
1570 	prefix_len = info_print_prefix(info, syslog, time, prefix);
1571 
1572 	/*
1573 	 * Each line will be preceded with a prefix. The intermediate
1574 	 * newlines are already within the text, but a final trailing
1575 	 * newline will be added.
1576 	 */
1577 	return ((prefix_len * line_count) + info->text_len + 1);
1578 }
1579 
1580 /*
1581  * Beginning with @start_seq, find the first record where it and all following
1582  * records up to (but not including) @max_seq fit into @size.
1583  *
1584  * @max_seq is simply an upper bound and does not need to exist. If the caller
1585  * does not require an upper bound, -1 can be used for @max_seq.
1586  */
1587 static u64 find_first_fitting_seq(u64 start_seq, u64 max_seq, size_t size,
1588 				  bool syslog, bool time)
1589 {
1590 	struct printk_info info;
1591 	unsigned int line_count;
1592 	size_t len = 0;
1593 	u64 seq;
1594 
1595 	/* Determine the size of the records up to @max_seq. */
1596 	prb_for_each_info(start_seq, prb, seq, &info, &line_count) {
1597 		if (info.seq >= max_seq)
1598 			break;
1599 		len += get_record_print_text_size(&info, line_count, syslog, time);
1600 	}
1601 
1602 	/*
1603 	 * Adjust the upper bound for the next loop to avoid subtracting
1604 	 * lengths that were never added.
1605 	 */
1606 	if (seq < max_seq)
1607 		max_seq = seq;
1608 
1609 	/*
1610 	 * Move first record forward until length fits into the buffer. Ignore
1611 	 * newest messages that were not counted in the above cycle. Messages
1612 	 * might appear and get lost in the meantime. This is a best effort
1613 	 * that prevents an infinite loop that could occur with a retry.
1614 	 */
1615 	prb_for_each_info(start_seq, prb, seq, &info, &line_count) {
1616 		if (len <= size || info.seq >= max_seq)
1617 			break;
1618 		len -= get_record_print_text_size(&info, line_count, syslog, time);
1619 	}
1620 
1621 	return seq;
1622 }
1623 
1624 /* The caller is responsible for making sure @size is greater than 0. */
1625 static int syslog_print(char __user *buf, int size)
1626 {
1627 	struct printk_info info;
1628 	struct printk_record r;
1629 	char *text;
1630 	int len = 0;
1631 	u64 seq;
1632 
1633 	text = kmalloc(CONSOLE_LOG_MAX, GFP_KERNEL);
1634 	if (!text)
1635 		return -ENOMEM;
1636 
1637 	prb_rec_init_rd(&r, &info, text, CONSOLE_LOG_MAX);
1638 
1639 	mutex_lock(&syslog_lock);
1640 
1641 	/*
1642 	 * Wait for the @syslog_seq record to be available. @syslog_seq may
1643 	 * change while waiting.
1644 	 */
1645 	do {
1646 		seq = syslog_seq;
1647 
1648 		mutex_unlock(&syslog_lock);
1649 		/*
1650 		 * Guarantee this task is visible on the waitqueue before
1651 		 * checking the wake condition.
1652 		 *
1653 		 * The full memory barrier within set_current_state() of
1654 		 * prepare_to_wait_event() pairs with the full memory barrier
1655 		 * within wq_has_sleeper().
1656 		 *
1657 		 * This pairs with __wake_up_klogd:A.
1658 		 */
1659 		len = wait_event_interruptible(log_wait,
1660 				prb_read_valid(prb, seq, NULL)); /* LMM(syslog_print:A) */
1661 		mutex_lock(&syslog_lock);
1662 
1663 		if (len)
1664 			goto out;
1665 	} while (syslog_seq != seq);
1666 
1667 	/*
1668 	 * Copy records that fit into the buffer. The above cycle makes sure
1669 	 * that the first record is always available.
1670 	 */
1671 	do {
1672 		size_t n;
1673 		size_t skip;
1674 		int err;
1675 
1676 		if (!prb_read_valid(prb, syslog_seq, &r))
1677 			break;
1678 
1679 		if (r.info->seq != syslog_seq) {
1680 			/* message is gone, move to next valid one */
1681 			syslog_seq = r.info->seq;
1682 			syslog_partial = 0;
1683 		}
1684 
1685 		/*
1686 		 * To keep reading/counting partial line consistent,
1687 		 * use printk_time value as of the beginning of a line.
1688 		 */
1689 		if (!syslog_partial)
1690 			syslog_time = printk_time;
1691 
1692 		skip = syslog_partial;
1693 		n = record_print_text(&r, true, syslog_time);
1694 		if (n - syslog_partial <= size) {
1695 			/* message fits into buffer, move forward */
1696 			syslog_seq = r.info->seq + 1;
1697 			n -= syslog_partial;
1698 			syslog_partial = 0;
1699 		} else if (!len){
1700 			/* partial read(), remember position */
1701 			n = size;
1702 			syslog_partial += n;
1703 		} else
1704 			n = 0;
1705 
1706 		if (!n)
1707 			break;
1708 
1709 		mutex_unlock(&syslog_lock);
1710 		err = copy_to_user(buf, text + skip, n);
1711 		mutex_lock(&syslog_lock);
1712 
1713 		if (err) {
1714 			if (!len)
1715 				len = -EFAULT;
1716 			break;
1717 		}
1718 
1719 		len += n;
1720 		size -= n;
1721 		buf += n;
1722 	} while (size);
1723 out:
1724 	mutex_unlock(&syslog_lock);
1725 	kfree(text);
1726 	return len;
1727 }
1728 
1729 static int syslog_print_all(char __user *buf, int size, bool clear)
1730 {
1731 	struct printk_info info;
1732 	struct printk_record r;
1733 	char *text;
1734 	int len = 0;
1735 	u64 seq;
1736 	bool time;
1737 
1738 	text = kmalloc(CONSOLE_LOG_MAX, GFP_KERNEL);
1739 	if (!text)
1740 		return -ENOMEM;
1741 
1742 	time = printk_time;
1743 	/*
1744 	 * Find first record that fits, including all following records,
1745 	 * into the user-provided buffer for this dump.
1746 	 */
1747 	seq = find_first_fitting_seq(latched_seq_read_nolock(&clear_seq), -1,
1748 				     size, true, time);
1749 
1750 	prb_rec_init_rd(&r, &info, text, CONSOLE_LOG_MAX);
1751 
1752 	len = 0;
1753 	prb_for_each_record(seq, prb, seq, &r) {
1754 		int textlen;
1755 
1756 		textlen = record_print_text(&r, true, time);
1757 
1758 		if (len + textlen > size) {
1759 			seq--;
1760 			break;
1761 		}
1762 
1763 		if (copy_to_user(buf + len, text, textlen))
1764 			len = -EFAULT;
1765 		else
1766 			len += textlen;
1767 
1768 		if (len < 0)
1769 			break;
1770 	}
1771 
1772 	if (clear) {
1773 		mutex_lock(&syslog_lock);
1774 		latched_seq_write(&clear_seq, seq);
1775 		mutex_unlock(&syslog_lock);
1776 	}
1777 
1778 	kfree(text);
1779 	return len;
1780 }
1781 
1782 static void syslog_clear(void)
1783 {
1784 	mutex_lock(&syslog_lock);
1785 	latched_seq_write(&clear_seq, prb_next_seq(prb));
1786 	mutex_unlock(&syslog_lock);
1787 }
1788 
1789 int do_syslog(int type, char __user *buf, int len, int source)
1790 {
1791 	struct printk_info info;
1792 	bool clear = false;
1793 	static int saved_console_loglevel = LOGLEVEL_DEFAULT;
1794 	int error;
1795 
1796 	error = check_syslog_permissions(type, source);
1797 	if (error)
1798 		return error;
1799 
1800 	switch (type) {
1801 	case SYSLOG_ACTION_CLOSE:	/* Close log */
1802 		break;
1803 	case SYSLOG_ACTION_OPEN:	/* Open log */
1804 		break;
1805 	case SYSLOG_ACTION_READ:	/* Read from log */
1806 		if (!buf || len < 0)
1807 			return -EINVAL;
1808 		if (!len)
1809 			return 0;
1810 		if (!access_ok(buf, len))
1811 			return -EFAULT;
1812 		error = syslog_print(buf, len);
1813 		break;
1814 	/* Read/clear last kernel messages */
1815 	case SYSLOG_ACTION_READ_CLEAR:
1816 		clear = true;
1817 		fallthrough;
1818 	/* Read last kernel messages */
1819 	case SYSLOG_ACTION_READ_ALL:
1820 		if (!buf || len < 0)
1821 			return -EINVAL;
1822 		if (!len)
1823 			return 0;
1824 		if (!access_ok(buf, len))
1825 			return -EFAULT;
1826 		error = syslog_print_all(buf, len, clear);
1827 		break;
1828 	/* Clear ring buffer */
1829 	case SYSLOG_ACTION_CLEAR:
1830 		syslog_clear();
1831 		break;
1832 	/* Disable logging to console */
1833 	case SYSLOG_ACTION_CONSOLE_OFF:
1834 		if (saved_console_loglevel == LOGLEVEL_DEFAULT)
1835 			saved_console_loglevel = console_loglevel;
1836 		console_loglevel = minimum_console_loglevel;
1837 		break;
1838 	/* Enable logging to console */
1839 	case SYSLOG_ACTION_CONSOLE_ON:
1840 		if (saved_console_loglevel != LOGLEVEL_DEFAULT) {
1841 			console_loglevel = saved_console_loglevel;
1842 			saved_console_loglevel = LOGLEVEL_DEFAULT;
1843 		}
1844 		break;
1845 	/* Set level of messages printed to console */
1846 	case SYSLOG_ACTION_CONSOLE_LEVEL:
1847 		if (len < 1 || len > 8)
1848 			return -EINVAL;
1849 		if (len < minimum_console_loglevel)
1850 			len = minimum_console_loglevel;
1851 		console_loglevel = len;
1852 		/* Implicitly re-enable logging to console */
1853 		saved_console_loglevel = LOGLEVEL_DEFAULT;
1854 		break;
1855 	/* Number of chars in the log buffer */
1856 	case SYSLOG_ACTION_SIZE_UNREAD:
1857 		mutex_lock(&syslog_lock);
1858 		if (!prb_read_valid_info(prb, syslog_seq, &info, NULL)) {
1859 			/* No unread messages. */
1860 			mutex_unlock(&syslog_lock);
1861 			return 0;
1862 		}
1863 		if (info.seq != syslog_seq) {
1864 			/* messages are gone, move to first one */
1865 			syslog_seq = info.seq;
1866 			syslog_partial = 0;
1867 		}
1868 		if (source == SYSLOG_FROM_PROC) {
1869 			/*
1870 			 * Short-cut for poll(/"proc/kmsg") which simply checks
1871 			 * for pending data, not the size; return the count of
1872 			 * records, not the length.
1873 			 */
1874 			error = prb_next_seq(prb) - syslog_seq;
1875 		} else {
1876 			bool time = syslog_partial ? syslog_time : printk_time;
1877 			unsigned int line_count;
1878 			u64 seq;
1879 
1880 			prb_for_each_info(syslog_seq, prb, seq, &info,
1881 					  &line_count) {
1882 				error += get_record_print_text_size(&info, line_count,
1883 								    true, time);
1884 				time = printk_time;
1885 			}
1886 			error -= syslog_partial;
1887 		}
1888 		mutex_unlock(&syslog_lock);
1889 		break;
1890 	/* Size of the log buffer */
1891 	case SYSLOG_ACTION_SIZE_BUFFER:
1892 		error = log_buf_len;
1893 		break;
1894 	default:
1895 		error = -EINVAL;
1896 		break;
1897 	}
1898 
1899 	return error;
1900 }
1901 
1902 SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
1903 {
1904 	return do_syslog(type, buf, len, SYSLOG_FROM_READER);
1905 }
1906 
1907 /*
1908  * Special console_lock variants that help to reduce the risk of soft-lockups.
1909  * They allow to pass console_lock to another printk() call using a busy wait.
1910  */
1911 
1912 #ifdef CONFIG_LOCKDEP
1913 static struct lockdep_map console_owner_dep_map = {
1914 	.name = "console_owner"
1915 };
1916 #endif
1917 
1918 static DEFINE_RAW_SPINLOCK(console_owner_lock);
1919 static struct task_struct *console_owner;
1920 static bool console_waiter;
1921 
1922 /**
1923  * console_lock_spinning_enable - mark beginning of code where another
1924  *	thread might safely busy wait
1925  *
1926  * This basically converts console_lock into a spinlock. This marks
1927  * the section where the console_lock owner can not sleep, because
1928  * there may be a waiter spinning (like a spinlock). Also it must be
1929  * ready to hand over the lock at the end of the section.
1930  */
1931 static void console_lock_spinning_enable(void)
1932 {
1933 	raw_spin_lock(&console_owner_lock);
1934 	console_owner = current;
1935 	raw_spin_unlock(&console_owner_lock);
1936 
1937 	/* The waiter may spin on us after setting console_owner */
1938 	spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_);
1939 }
1940 
1941 /**
1942  * console_lock_spinning_disable_and_check - mark end of code where another
1943  *	thread was able to busy wait and check if there is a waiter
1944  *
1945  * This is called at the end of the section where spinning is allowed.
1946  * It has two functions. First, it is a signal that it is no longer
1947  * safe to start busy waiting for the lock. Second, it checks if
1948  * there is a busy waiter and passes the lock rights to her.
1949  *
1950  * Important: Callers lose the lock if there was a busy waiter.
1951  *	They must not touch items synchronized by console_lock
1952  *	in this case.
1953  *
1954  * Return: 1 if the lock rights were passed, 0 otherwise.
1955  */
1956 static int console_lock_spinning_disable_and_check(void)
1957 {
1958 	int waiter;
1959 
1960 	raw_spin_lock(&console_owner_lock);
1961 	waiter = READ_ONCE(console_waiter);
1962 	console_owner = NULL;
1963 	raw_spin_unlock(&console_owner_lock);
1964 
1965 	if (!waiter) {
1966 		spin_release(&console_owner_dep_map, _THIS_IP_);
1967 		return 0;
1968 	}
1969 
1970 	/* The waiter is now free to continue */
1971 	WRITE_ONCE(console_waiter, false);
1972 
1973 	spin_release(&console_owner_dep_map, _THIS_IP_);
1974 
1975 	/*
1976 	 * Hand off console_lock to waiter. The waiter will perform
1977 	 * the up(). After this, the waiter is the console_lock owner.
1978 	 */
1979 	mutex_release(&console_lock_dep_map, _THIS_IP_);
1980 	return 1;
1981 }
1982 
1983 /**
1984  * console_trylock_spinning - try to get console_lock by busy waiting
1985  *
1986  * This allows to busy wait for the console_lock when the current
1987  * owner is running in specially marked sections. It means that
1988  * the current owner is running and cannot reschedule until it
1989  * is ready to lose the lock.
1990  *
1991  * Return: 1 if we got the lock, 0 othrewise
1992  */
1993 static int console_trylock_spinning(void)
1994 {
1995 	struct task_struct *owner = NULL;
1996 	bool waiter;
1997 	bool spin = false;
1998 	unsigned long flags;
1999 
2000 	if (console_trylock())
2001 		return 1;
2002 
2003 	/*
2004 	 * It's unsafe to spin once a panic has begun. If we are the
2005 	 * panic CPU, we may have already halted the owner of the
2006 	 * console_sem. If we are not the panic CPU, then we should
2007 	 * avoid taking console_sem, so the panic CPU has a better
2008 	 * chance of cleanly acquiring it later.
2009 	 */
2010 	if (panic_in_progress())
2011 		return 0;
2012 
2013 	printk_safe_enter_irqsave(flags);
2014 
2015 	raw_spin_lock(&console_owner_lock);
2016 	owner = READ_ONCE(console_owner);
2017 	waiter = READ_ONCE(console_waiter);
2018 	if (!waiter && owner && owner != current) {
2019 		WRITE_ONCE(console_waiter, true);
2020 		spin = true;
2021 	}
2022 	raw_spin_unlock(&console_owner_lock);
2023 
2024 	/*
2025 	 * If there is an active printk() writing to the
2026 	 * consoles, instead of having it write our data too,
2027 	 * see if we can offload that load from the active
2028 	 * printer, and do some printing ourselves.
2029 	 * Go into a spin only if there isn't already a waiter
2030 	 * spinning, and there is an active printer, and
2031 	 * that active printer isn't us (recursive printk?).
2032 	 */
2033 	if (!spin) {
2034 		printk_safe_exit_irqrestore(flags);
2035 		return 0;
2036 	}
2037 
2038 	/* We spin waiting for the owner to release us */
2039 	spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_);
2040 	/* Owner will clear console_waiter on hand off */
2041 	while (READ_ONCE(console_waiter))
2042 		cpu_relax();
2043 	spin_release(&console_owner_dep_map, _THIS_IP_);
2044 
2045 	printk_safe_exit_irqrestore(flags);
2046 	/*
2047 	 * The owner passed the console lock to us.
2048 	 * Since we did not spin on console lock, annotate
2049 	 * this as a trylock. Otherwise lockdep will
2050 	 * complain.
2051 	 */
2052 	mutex_acquire(&console_lock_dep_map, 0, 1, _THIS_IP_);
2053 
2054 	return 1;
2055 }
2056 
2057 /*
2058  * Call the specified console driver, asking it to write out the specified
2059  * text and length. If @dropped_text is non-NULL and any records have been
2060  * dropped, a dropped message will be written out first.
2061  */
2062 static void call_console_driver(struct console *con, const char *text, size_t len,
2063 				char *dropped_text)
2064 {
2065 	size_t dropped_len;
2066 
2067 	if (con->dropped && dropped_text) {
2068 		dropped_len = snprintf(dropped_text, DROPPED_TEXT_MAX,
2069 				       "** %lu printk messages dropped **\n",
2070 				       con->dropped);
2071 		con->dropped = 0;
2072 		con->write(con, dropped_text, dropped_len);
2073 	}
2074 
2075 	con->write(con, text, len);
2076 }
2077 
2078 /*
2079  * Recursion is tracked separately on each CPU. If NMIs are supported, an
2080  * additional NMI context per CPU is also separately tracked. Until per-CPU
2081  * is available, a separate "early tracking" is performed.
2082  */
2083 static DEFINE_PER_CPU(u8, printk_count);
2084 static u8 printk_count_early;
2085 #ifdef CONFIG_HAVE_NMI
2086 static DEFINE_PER_CPU(u8, printk_count_nmi);
2087 static u8 printk_count_nmi_early;
2088 #endif
2089 
2090 /*
2091  * Recursion is limited to keep the output sane. printk() should not require
2092  * more than 1 level of recursion (allowing, for example, printk() to trigger
2093  * a WARN), but a higher value is used in case some printk-internal errors
2094  * exist, such as the ringbuffer validation checks failing.
2095  */
2096 #define PRINTK_MAX_RECURSION 3
2097 
2098 /*
2099  * Return a pointer to the dedicated counter for the CPU+context of the
2100  * caller.
2101  */
2102 static u8 *__printk_recursion_counter(void)
2103 {
2104 #ifdef CONFIG_HAVE_NMI
2105 	if (in_nmi()) {
2106 		if (printk_percpu_data_ready())
2107 			return this_cpu_ptr(&printk_count_nmi);
2108 		return &printk_count_nmi_early;
2109 	}
2110 #endif
2111 	if (printk_percpu_data_ready())
2112 		return this_cpu_ptr(&printk_count);
2113 	return &printk_count_early;
2114 }
2115 
2116 /*
2117  * Enter recursion tracking. Interrupts are disabled to simplify tracking.
2118  * The caller must check the boolean return value to see if the recursion is
2119  * allowed. On failure, interrupts are not disabled.
2120  *
2121  * @recursion_ptr must be a variable of type (u8 *) and is the same variable
2122  * that is passed to printk_exit_irqrestore().
2123  */
2124 #define printk_enter_irqsave(recursion_ptr, flags)	\
2125 ({							\
2126 	bool success = true;				\
2127 							\
2128 	typecheck(u8 *, recursion_ptr);			\
2129 	local_irq_save(flags);				\
2130 	(recursion_ptr) = __printk_recursion_counter();	\
2131 	if (*(recursion_ptr) > PRINTK_MAX_RECURSION) {	\
2132 		local_irq_restore(flags);		\
2133 		success = false;			\
2134 	} else {					\
2135 		(*(recursion_ptr))++;			\
2136 	}						\
2137 	success;					\
2138 })
2139 
2140 /* Exit recursion tracking, restoring interrupts. */
2141 #define printk_exit_irqrestore(recursion_ptr, flags)	\
2142 	do {						\
2143 		typecheck(u8 *, recursion_ptr);		\
2144 		(*(recursion_ptr))--;			\
2145 		local_irq_restore(flags);		\
2146 	} while (0)
2147 
2148 int printk_delay_msec __read_mostly;
2149 
2150 static inline void printk_delay(int level)
2151 {
2152 	boot_delay_msec(level);
2153 
2154 	if (unlikely(printk_delay_msec)) {
2155 		int m = printk_delay_msec;
2156 
2157 		while (m--) {
2158 			mdelay(1);
2159 			touch_nmi_watchdog();
2160 		}
2161 	}
2162 }
2163 
2164 static inline u32 printk_caller_id(void)
2165 {
2166 	return in_task() ? task_pid_nr(current) :
2167 		0x80000000 + smp_processor_id();
2168 }
2169 
2170 /**
2171  * printk_parse_prefix - Parse level and control flags.
2172  *
2173  * @text:     The terminated text message.
2174  * @level:    A pointer to the current level value, will be updated.
2175  * @flags:    A pointer to the current printk_info flags, will be updated.
2176  *
2177  * @level may be NULL if the caller is not interested in the parsed value.
2178  * Otherwise the variable pointed to by @level must be set to
2179  * LOGLEVEL_DEFAULT in order to be updated with the parsed value.
2180  *
2181  * @flags may be NULL if the caller is not interested in the parsed value.
2182  * Otherwise the variable pointed to by @flags will be OR'd with the parsed
2183  * value.
2184  *
2185  * Return: The length of the parsed level and control flags.
2186  */
2187 u16 printk_parse_prefix(const char *text, int *level,
2188 			enum printk_info_flags *flags)
2189 {
2190 	u16 prefix_len = 0;
2191 	int kern_level;
2192 
2193 	while (*text) {
2194 		kern_level = printk_get_level(text);
2195 		if (!kern_level)
2196 			break;
2197 
2198 		switch (kern_level) {
2199 		case '0' ... '7':
2200 			if (level && *level == LOGLEVEL_DEFAULT)
2201 				*level = kern_level - '0';
2202 			break;
2203 		case 'c':	/* KERN_CONT */
2204 			if (flags)
2205 				*flags |= LOG_CONT;
2206 		}
2207 
2208 		prefix_len += 2;
2209 		text += 2;
2210 	}
2211 
2212 	return prefix_len;
2213 }
2214 
2215 __printf(5, 0)
2216 static u16 printk_sprint(char *text, u16 size, int facility,
2217 			 enum printk_info_flags *flags, const char *fmt,
2218 			 va_list args)
2219 {
2220 	u16 text_len;
2221 
2222 	text_len = vscnprintf(text, size, fmt, args);
2223 
2224 	/* Mark and strip a trailing newline. */
2225 	if (text_len && text[text_len - 1] == '\n') {
2226 		text_len--;
2227 		*flags |= LOG_NEWLINE;
2228 	}
2229 
2230 	/* Strip log level and control flags. */
2231 	if (facility == 0) {
2232 		u16 prefix_len;
2233 
2234 		prefix_len = printk_parse_prefix(text, NULL, NULL);
2235 		if (prefix_len) {
2236 			text_len -= prefix_len;
2237 			memmove(text, text + prefix_len, text_len);
2238 		}
2239 	}
2240 
2241 	trace_console_rcuidle(text, text_len);
2242 
2243 	return text_len;
2244 }
2245 
2246 __printf(4, 0)
2247 int vprintk_store(int facility, int level,
2248 		  const struct dev_printk_info *dev_info,
2249 		  const char *fmt, va_list args)
2250 {
2251 	struct prb_reserved_entry e;
2252 	enum printk_info_flags flags = 0;
2253 	struct printk_record r;
2254 	unsigned long irqflags;
2255 	u16 trunc_msg_len = 0;
2256 	char prefix_buf[8];
2257 	u8 *recursion_ptr;
2258 	u16 reserve_size;
2259 	va_list args2;
2260 	u32 caller_id;
2261 	u16 text_len;
2262 	int ret = 0;
2263 	u64 ts_nsec;
2264 
2265 	if (!printk_enter_irqsave(recursion_ptr, irqflags))
2266 		return 0;
2267 
2268 	/*
2269 	 * Since the duration of printk() can vary depending on the message
2270 	 * and state of the ringbuffer, grab the timestamp now so that it is
2271 	 * close to the call of printk(). This provides a more deterministic
2272 	 * timestamp with respect to the caller.
2273 	 */
2274 	ts_nsec = local_clock();
2275 
2276 	caller_id = printk_caller_id();
2277 
2278 	/*
2279 	 * The sprintf needs to come first since the syslog prefix might be
2280 	 * passed in as a parameter. An extra byte must be reserved so that
2281 	 * later the vscnprintf() into the reserved buffer has room for the
2282 	 * terminating '\0', which is not counted by vsnprintf().
2283 	 */
2284 	va_copy(args2, args);
2285 	reserve_size = vsnprintf(&prefix_buf[0], sizeof(prefix_buf), fmt, args2) + 1;
2286 	va_end(args2);
2287 
2288 	if (reserve_size > LOG_LINE_MAX)
2289 		reserve_size = LOG_LINE_MAX;
2290 
2291 	/* Extract log level or control flags. */
2292 	if (facility == 0)
2293 		printk_parse_prefix(&prefix_buf[0], &level, &flags);
2294 
2295 	if (level == LOGLEVEL_DEFAULT)
2296 		level = default_message_loglevel;
2297 
2298 	if (dev_info)
2299 		flags |= LOG_NEWLINE;
2300 
2301 	if (flags & LOG_CONT) {
2302 		prb_rec_init_wr(&r, reserve_size);
2303 		if (prb_reserve_in_last(&e, prb, &r, caller_id, LOG_LINE_MAX)) {
2304 			text_len = printk_sprint(&r.text_buf[r.info->text_len], reserve_size,
2305 						 facility, &flags, fmt, args);
2306 			r.info->text_len += text_len;
2307 
2308 			if (flags & LOG_NEWLINE) {
2309 				r.info->flags |= LOG_NEWLINE;
2310 				prb_final_commit(&e);
2311 			} else {
2312 				prb_commit(&e);
2313 			}
2314 
2315 			ret = text_len;
2316 			goto out;
2317 		}
2318 	}
2319 
2320 	/*
2321 	 * Explicitly initialize the record before every prb_reserve() call.
2322 	 * prb_reserve_in_last() and prb_reserve() purposely invalidate the
2323 	 * structure when they fail.
2324 	 */
2325 	prb_rec_init_wr(&r, reserve_size);
2326 	if (!prb_reserve(&e, prb, &r)) {
2327 		/* truncate the message if it is too long for empty buffer */
2328 		truncate_msg(&reserve_size, &trunc_msg_len);
2329 
2330 		prb_rec_init_wr(&r, reserve_size + trunc_msg_len);
2331 		if (!prb_reserve(&e, prb, &r))
2332 			goto out;
2333 	}
2334 
2335 	/* fill message */
2336 	text_len = printk_sprint(&r.text_buf[0], reserve_size, facility, &flags, fmt, args);
2337 	if (trunc_msg_len)
2338 		memcpy(&r.text_buf[text_len], trunc_msg, trunc_msg_len);
2339 	r.info->text_len = text_len + trunc_msg_len;
2340 	r.info->facility = facility;
2341 	r.info->level = level & 7;
2342 	r.info->flags = flags & 0x1f;
2343 	r.info->ts_nsec = ts_nsec;
2344 	r.info->caller_id = caller_id;
2345 	if (dev_info)
2346 		memcpy(&r.info->dev_info, dev_info, sizeof(r.info->dev_info));
2347 
2348 	/* A message without a trailing newline can be continued. */
2349 	if (!(flags & LOG_NEWLINE))
2350 		prb_commit(&e);
2351 	else
2352 		prb_final_commit(&e);
2353 
2354 	ret = text_len + trunc_msg_len;
2355 out:
2356 	printk_exit_irqrestore(recursion_ptr, irqflags);
2357 	return ret;
2358 }
2359 
2360 asmlinkage int vprintk_emit(int facility, int level,
2361 			    const struct dev_printk_info *dev_info,
2362 			    const char *fmt, va_list args)
2363 {
2364 	int printed_len;
2365 	bool in_sched = false;
2366 
2367 	/* Suppress unimportant messages after panic happens */
2368 	if (unlikely(suppress_printk))
2369 		return 0;
2370 
2371 	if (unlikely(suppress_panic_printk) &&
2372 	    atomic_read(&panic_cpu) != raw_smp_processor_id())
2373 		return 0;
2374 
2375 	if (level == LOGLEVEL_SCHED) {
2376 		level = LOGLEVEL_DEFAULT;
2377 		in_sched = true;
2378 	}
2379 
2380 	printk_delay(level);
2381 
2382 	printed_len = vprintk_store(facility, level, dev_info, fmt, args);
2383 
2384 	/* If called from the scheduler, we can not call up(). */
2385 	if (!in_sched && allow_direct_printing()) {
2386 		/*
2387 		 * The caller may be holding system-critical or
2388 		 * timing-sensitive locks. Disable preemption during direct
2389 		 * printing of all remaining records to all consoles so that
2390 		 * this context can return as soon as possible. Hopefully
2391 		 * another printk() caller will take over the printing.
2392 		 */
2393 		preempt_disable();
2394 		/*
2395 		 * Try to acquire and then immediately release the console
2396 		 * semaphore. The release will print out buffers. With the
2397 		 * spinning variant, this context tries to take over the
2398 		 * printing from another printing context.
2399 		 */
2400 		if (console_trylock_spinning())
2401 			console_unlock();
2402 		preempt_enable();
2403 	}
2404 
2405 	wake_up_klogd();
2406 	return printed_len;
2407 }
2408 EXPORT_SYMBOL(vprintk_emit);
2409 
2410 int vprintk_default(const char *fmt, va_list args)
2411 {
2412 	return vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, fmt, args);
2413 }
2414 EXPORT_SYMBOL_GPL(vprintk_default);
2415 
2416 asmlinkage __visible int _printk(const char *fmt, ...)
2417 {
2418 	va_list args;
2419 	int r;
2420 
2421 	va_start(args, fmt);
2422 	r = vprintk(fmt, args);
2423 	va_end(args);
2424 
2425 	return r;
2426 }
2427 EXPORT_SYMBOL(_printk);
2428 
2429 static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress);
2430 
2431 static void printk_start_kthread(struct console *con);
2432 
2433 #else /* CONFIG_PRINTK */
2434 
2435 #define CONSOLE_LOG_MAX		0
2436 #define DROPPED_TEXT_MAX	0
2437 #define printk_time		false
2438 
2439 #define prb_read_valid(rb, seq, r)	false
2440 #define prb_first_valid_seq(rb)		0
2441 #define prb_next_seq(rb)		0
2442 
2443 static u64 syslog_seq;
2444 
2445 static size_t record_print_text(const struct printk_record *r,
2446 				bool syslog, bool time)
2447 {
2448 	return 0;
2449 }
2450 static ssize_t info_print_ext_header(char *buf, size_t size,
2451 				     struct printk_info *info)
2452 {
2453 	return 0;
2454 }
2455 static ssize_t msg_print_ext_body(char *buf, size_t size,
2456 				  char *text, size_t text_len,
2457 				  struct dev_printk_info *dev_info) { return 0; }
2458 static void console_lock_spinning_enable(void) { }
2459 static int console_lock_spinning_disable_and_check(void) { return 0; }
2460 static void call_console_driver(struct console *con, const char *text, size_t len,
2461 				char *dropped_text)
2462 {
2463 }
2464 static bool suppress_message_printing(int level) { return false; }
2465 static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress) { return true; }
2466 static void printk_start_kthread(struct console *con) { }
2467 static bool allow_direct_printing(void) { return true; }
2468 
2469 #endif /* CONFIG_PRINTK */
2470 
2471 #ifdef CONFIG_EARLY_PRINTK
2472 struct console *early_console;
2473 
2474 asmlinkage __visible void early_printk(const char *fmt, ...)
2475 {
2476 	va_list ap;
2477 	char buf[512];
2478 	int n;
2479 
2480 	if (!early_console)
2481 		return;
2482 
2483 	va_start(ap, fmt);
2484 	n = vscnprintf(buf, sizeof(buf), fmt, ap);
2485 	va_end(ap);
2486 
2487 	early_console->write(early_console, buf, n);
2488 }
2489 #endif
2490 
2491 static void set_user_specified(struct console_cmdline *c, bool user_specified)
2492 {
2493 	if (!user_specified)
2494 		return;
2495 
2496 	/*
2497 	 * @c console was defined by the user on the command line.
2498 	 * Do not clear when added twice also by SPCR or the device tree.
2499 	 */
2500 	c->user_specified = true;
2501 	/* At least one console defined by the user on the command line. */
2502 	console_set_on_cmdline = 1;
2503 }
2504 
2505 static int __add_preferred_console(char *name, int idx, char *options,
2506 				   char *brl_options, bool user_specified)
2507 {
2508 	struct console_cmdline *c;
2509 	int i;
2510 
2511 	/*
2512 	 *	See if this tty is not yet registered, and
2513 	 *	if we have a slot free.
2514 	 */
2515 	for (i = 0, c = console_cmdline;
2516 	     i < MAX_CMDLINECONSOLES && c->name[0];
2517 	     i++, c++) {
2518 		if (strcmp(c->name, name) == 0 && c->index == idx) {
2519 			if (!brl_options)
2520 				preferred_console = i;
2521 			set_user_specified(c, user_specified);
2522 			return 0;
2523 		}
2524 	}
2525 	if (i == MAX_CMDLINECONSOLES)
2526 		return -E2BIG;
2527 	if (!brl_options)
2528 		preferred_console = i;
2529 	strlcpy(c->name, name, sizeof(c->name));
2530 	c->options = options;
2531 	set_user_specified(c, user_specified);
2532 	braille_set_options(c, brl_options);
2533 
2534 	c->index = idx;
2535 	return 0;
2536 }
2537 
2538 static int __init console_msg_format_setup(char *str)
2539 {
2540 	if (!strcmp(str, "syslog"))
2541 		console_msg_format = MSG_FORMAT_SYSLOG;
2542 	if (!strcmp(str, "default"))
2543 		console_msg_format = MSG_FORMAT_DEFAULT;
2544 	return 1;
2545 }
2546 __setup("console_msg_format=", console_msg_format_setup);
2547 
2548 /*
2549  * Set up a console.  Called via do_early_param() in init/main.c
2550  * for each "console=" parameter in the boot command line.
2551  */
2552 static int __init console_setup(char *str)
2553 {
2554 	char buf[sizeof(console_cmdline[0].name) + 4]; /* 4 for "ttyS" */
2555 	char *s, *options, *brl_options = NULL;
2556 	int idx;
2557 
2558 	/*
2559 	 * console="" or console=null have been suggested as a way to
2560 	 * disable console output. Use ttynull that has been created
2561 	 * for exactly this purpose.
2562 	 */
2563 	if (str[0] == 0 || strcmp(str, "null") == 0) {
2564 		__add_preferred_console("ttynull", 0, NULL, NULL, true);
2565 		return 1;
2566 	}
2567 
2568 	if (_braille_console_setup(&str, &brl_options))
2569 		return 1;
2570 
2571 	/*
2572 	 * Decode str into name, index, options.
2573 	 */
2574 	if (str[0] >= '0' && str[0] <= '9') {
2575 		strcpy(buf, "ttyS");
2576 		strncpy(buf + 4, str, sizeof(buf) - 5);
2577 	} else {
2578 		strncpy(buf, str, sizeof(buf) - 1);
2579 	}
2580 	buf[sizeof(buf) - 1] = 0;
2581 	options = strchr(str, ',');
2582 	if (options)
2583 		*(options++) = 0;
2584 #ifdef __sparc__
2585 	if (!strcmp(str, "ttya"))
2586 		strcpy(buf, "ttyS0");
2587 	if (!strcmp(str, "ttyb"))
2588 		strcpy(buf, "ttyS1");
2589 #endif
2590 	for (s = buf; *s; s++)
2591 		if (isdigit(*s) || *s == ',')
2592 			break;
2593 	idx = simple_strtoul(s, NULL, 10);
2594 	*s = 0;
2595 
2596 	__add_preferred_console(buf, idx, options, brl_options, true);
2597 	return 1;
2598 }
2599 __setup("console=", console_setup);
2600 
2601 /**
2602  * add_preferred_console - add a device to the list of preferred consoles.
2603  * @name: device name
2604  * @idx: device index
2605  * @options: options for this console
2606  *
2607  * The last preferred console added will be used for kernel messages
2608  * and stdin/out/err for init.  Normally this is used by console_setup
2609  * above to handle user-supplied console arguments; however it can also
2610  * be used by arch-specific code either to override the user or more
2611  * commonly to provide a default console (ie from PROM variables) when
2612  * the user has not supplied one.
2613  */
2614 int add_preferred_console(char *name, int idx, char *options)
2615 {
2616 	return __add_preferred_console(name, idx, options, NULL, false);
2617 }
2618 
2619 bool console_suspend_enabled = true;
2620 EXPORT_SYMBOL(console_suspend_enabled);
2621 
2622 static int __init console_suspend_disable(char *str)
2623 {
2624 	console_suspend_enabled = false;
2625 	return 1;
2626 }
2627 __setup("no_console_suspend", console_suspend_disable);
2628 module_param_named(console_suspend, console_suspend_enabled,
2629 		bool, S_IRUGO | S_IWUSR);
2630 MODULE_PARM_DESC(console_suspend, "suspend console during suspend"
2631 	" and hibernate operations");
2632 
2633 static bool printk_console_no_auto_verbose;
2634 
2635 void console_verbose(void)
2636 {
2637 	if (console_loglevel && !printk_console_no_auto_verbose)
2638 		console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH;
2639 }
2640 EXPORT_SYMBOL_GPL(console_verbose);
2641 
2642 module_param_named(console_no_auto_verbose, printk_console_no_auto_verbose, bool, 0644);
2643 MODULE_PARM_DESC(console_no_auto_verbose, "Disable console loglevel raise to highest on oops/panic/etc");
2644 
2645 /**
2646  * suspend_console - suspend the console subsystem
2647  *
2648  * This disables printk() while we go into suspend states
2649  */
2650 void suspend_console(void)
2651 {
2652 	if (!console_suspend_enabled)
2653 		return;
2654 	pr_info("Suspending console(s) (use no_console_suspend to debug)\n");
2655 	pr_flush(1000, true);
2656 	console_lock();
2657 	console_suspended = 1;
2658 	up_console_sem();
2659 }
2660 
2661 void resume_console(void)
2662 {
2663 	if (!console_suspend_enabled)
2664 		return;
2665 	down_console_sem();
2666 	console_suspended = 0;
2667 	console_unlock();
2668 	pr_flush(1000, true);
2669 }
2670 
2671 /**
2672  * console_cpu_notify - print deferred console messages after CPU hotplug
2673  * @cpu: unused
2674  *
2675  * If printk() is called from a CPU that is not online yet, the messages
2676  * will be printed on the console only if there are CON_ANYTIME consoles.
2677  * This function is called when a new CPU comes online (or fails to come
2678  * up) or goes offline.
2679  */
2680 static int console_cpu_notify(unsigned int cpu)
2681 {
2682 	if (!cpuhp_tasks_frozen) {
2683 		/* If trylock fails, someone else is doing the printing */
2684 		if (console_trylock())
2685 			console_unlock();
2686 		else {
2687 			/*
2688 			 * If a new CPU comes online, the conditions for
2689 			 * printer_should_wake() may have changed for some
2690 			 * kthread printer with !CON_ANYTIME.
2691 			 */
2692 			wake_up_klogd();
2693 		}
2694 	}
2695 	return 0;
2696 }
2697 
2698 /**
2699  * console_lock - lock the console system for exclusive use.
2700  *
2701  * Acquires a lock which guarantees that the caller has
2702  * exclusive access to the console system and the console_drivers list.
2703  *
2704  * Can sleep, returns nothing.
2705  */
2706 void console_lock(void)
2707 {
2708 	might_sleep();
2709 
2710 	down_console_sem();
2711 	if (console_suspended)
2712 		return;
2713 	console_kthreads_block();
2714 	console_may_schedule = 1;
2715 }
2716 EXPORT_SYMBOL(console_lock);
2717 
2718 /**
2719  * console_trylock - try to lock the console system for exclusive use.
2720  *
2721  * Try to acquire a lock which guarantees that the caller has exclusive
2722  * access to the console system and the console_drivers list.
2723  *
2724  * returns 1 on success, and 0 on failure to acquire the lock.
2725  */
2726 int console_trylock(void)
2727 {
2728 	if (down_trylock_console_sem())
2729 		return 0;
2730 	if (console_suspended) {
2731 		up_console_sem();
2732 		return 0;
2733 	}
2734 	if (!console_kthreads_atomic_tryblock()) {
2735 		up_console_sem();
2736 		return 0;
2737 	}
2738 	console_may_schedule = 0;
2739 	return 1;
2740 }
2741 EXPORT_SYMBOL(console_trylock);
2742 
2743 /*
2744  * This is used to help to make sure that certain paths within the VT code are
2745  * running with the console lock held. It is definitely not the perfect debug
2746  * tool (it is not known if the VT code is the task holding the console lock),
2747  * but it helps tracking those weird code paths in the console code such as
2748  * when the console is suspended: where the console is not locked but no
2749  * console printing may occur.
2750  *
2751  * Note: This returns true when the console is suspended but is not locked.
2752  *       This is intentional because the VT code must consider that situation
2753  *       the same as if the console was locked.
2754  */
2755 int is_console_locked(void)
2756 {
2757 	return (console_kthreads_blocked || atomic_read(&console_kthreads_active));
2758 }
2759 EXPORT_SYMBOL(is_console_locked);
2760 
2761 /*
2762  * Return true when this CPU should unlock console_sem without pushing all
2763  * messages to the console. This reduces the chance that the console is
2764  * locked when the panic CPU tries to use it.
2765  */
2766 static bool abandon_console_lock_in_panic(void)
2767 {
2768 	if (!panic_in_progress())
2769 		return false;
2770 
2771 	/*
2772 	 * We can use raw_smp_processor_id() here because it is impossible for
2773 	 * the task to be migrated to the panic_cpu, or away from it. If
2774 	 * panic_cpu has already been set, and we're not currently executing on
2775 	 * that CPU, then we never will be.
2776 	 */
2777 	return atomic_read(&panic_cpu) != raw_smp_processor_id();
2778 }
2779 
2780 static inline bool __console_is_usable(short flags)
2781 {
2782 	if (!(flags & CON_ENABLED))
2783 		return false;
2784 
2785 	/*
2786 	 * Console drivers may assume that per-cpu resources have been
2787 	 * allocated. So unless they're explicitly marked as being able to
2788 	 * cope (CON_ANYTIME) don't call them until this CPU is officially up.
2789 	 */
2790 	if (!cpu_online(raw_smp_processor_id()) &&
2791 	    !(flags & CON_ANYTIME))
2792 		return false;
2793 
2794 	return true;
2795 }
2796 
2797 /*
2798  * Check if the given console is currently capable and allowed to print
2799  * records.
2800  *
2801  * Requires holding the console_lock.
2802  */
2803 static inline bool console_is_usable(struct console *con)
2804 {
2805 	if (!con->write)
2806 		return false;
2807 
2808 	return __console_is_usable(con->flags);
2809 }
2810 
2811 static void __console_unlock(void)
2812 {
2813 	/*
2814 	 * Depending on whether console_lock() or console_trylock() was used,
2815 	 * appropriately allow the kthread printers to continue.
2816 	 */
2817 	if (console_kthreads_blocked)
2818 		console_kthreads_unblock();
2819 	else
2820 		console_kthreads_atomic_unblock();
2821 
2822 	/*
2823 	 * New records may have arrived while the console was locked.
2824 	 * Wake the kthread printers to print them.
2825 	 */
2826 	wake_up_klogd();
2827 
2828 	up_console_sem();
2829 }
2830 
2831 /*
2832  * Print one record for the given console. The record printed is whatever
2833  * record is the next available record for the given console.
2834  *
2835  * @text is a buffer of size CONSOLE_LOG_MAX.
2836  *
2837  * If extended messages should be printed, @ext_text is a buffer of size
2838  * CONSOLE_EXT_LOG_MAX. Otherwise @ext_text must be NULL.
2839  *
2840  * If dropped messages should be printed, @dropped_text is a buffer of size
2841  * DROPPED_TEXT_MAX. Otherwise @dropped_text must be NULL.
2842  *
2843  * @handover will be set to true if a printk waiter has taken over the
2844  * console_lock, in which case the caller is no longer holding the
2845  * console_lock. Otherwise it is set to false. A NULL pointer may be provided
2846  * to disable allowing the console_lock to be taken over by a printk waiter.
2847  *
2848  * Returns false if the given console has no next record to print, otherwise
2849  * true.
2850  *
2851  * Requires the console_lock if @handover is non-NULL.
2852  * Requires con->lock otherwise.
2853  */
2854 static bool __console_emit_next_record(struct console *con, char *text, char *ext_text,
2855 				       char *dropped_text, bool *handover)
2856 {
2857 	static atomic_t panic_console_dropped = ATOMIC_INIT(0);
2858 	struct printk_info info;
2859 	struct printk_record r;
2860 	unsigned long flags;
2861 	char *write_text;
2862 	size_t len;
2863 
2864 	prb_rec_init_rd(&r, &info, text, CONSOLE_LOG_MAX);
2865 
2866 	if (handover)
2867 		*handover = false;
2868 
2869 	if (!prb_read_valid(prb, con->seq, &r))
2870 		return false;
2871 
2872 	if (con->seq != r.info->seq) {
2873 		con->dropped += r.info->seq - con->seq;
2874 		con->seq = r.info->seq;
2875 		if (panic_in_progress() &&
2876 		    atomic_fetch_inc_relaxed(&panic_console_dropped) > 10) {
2877 			suppress_panic_printk = 1;
2878 			pr_warn_once("Too many dropped messages. Suppress messages on non-panic CPUs to prevent livelock.\n");
2879 		}
2880 	}
2881 
2882 	/* Skip record that has level above the console loglevel. */
2883 	if (suppress_message_printing(r.info->level)) {
2884 		con->seq++;
2885 		goto skip;
2886 	}
2887 
2888 	if (ext_text) {
2889 		write_text = ext_text;
2890 		len = info_print_ext_header(ext_text, CONSOLE_EXT_LOG_MAX, r.info);
2891 		len += msg_print_ext_body(ext_text + len, CONSOLE_EXT_LOG_MAX - len,
2892 					  &r.text_buf[0], r.info->text_len, &r.info->dev_info);
2893 	} else {
2894 		write_text = text;
2895 		len = record_print_text(&r, console_msg_format & MSG_FORMAT_SYSLOG, printk_time);
2896 	}
2897 
2898 	if (handover) {
2899 		/*
2900 		 * While actively printing out messages, if another printk()
2901 		 * were to occur on another CPU, it may wait for this one to
2902 		 * finish. This task can not be preempted if there is a
2903 		 * waiter waiting to take over.
2904 		 *
2905 		 * Interrupts are disabled because the hand over to a waiter
2906 		 * must not be interrupted until the hand over is completed
2907 		 * (@console_waiter is cleared).
2908 		 */
2909 		printk_safe_enter_irqsave(flags);
2910 		console_lock_spinning_enable();
2911 
2912 		/* don't trace irqsoff print latency */
2913 		stop_critical_timings();
2914 	}
2915 
2916 	call_console_driver(con, write_text, len, dropped_text);
2917 
2918 	con->seq++;
2919 
2920 	if (handover) {
2921 		start_critical_timings();
2922 		*handover = console_lock_spinning_disable_and_check();
2923 		printk_safe_exit_irqrestore(flags);
2924 	}
2925 skip:
2926 	return true;
2927 }
2928 
2929 /*
2930  * Print a record for a given console, but allow another printk() caller to
2931  * take over the console_lock and continue printing.
2932  *
2933  * Requires the console_lock, but depending on @handover after the call, the
2934  * caller may no longer have the console_lock.
2935  *
2936  * See __console_emit_next_record() for argument and return details.
2937  */
2938 static bool console_emit_next_record_transferable(struct console *con, char *text, char *ext_text,
2939 						  char *dropped_text, bool *handover)
2940 {
2941 	/*
2942 	 * Handovers are only supported if threaded printers are atomically
2943 	 * blocked. The context taking over the console_lock may be atomic.
2944 	 */
2945 	if (!console_kthreads_atomically_blocked()) {
2946 		*handover = false;
2947 		handover = NULL;
2948 	}
2949 
2950 	return __console_emit_next_record(con, text, ext_text, dropped_text, handover);
2951 }
2952 
2953 /*
2954  * Print out all remaining records to all consoles.
2955  *
2956  * @do_cond_resched is set by the caller. It can be true only in schedulable
2957  * context.
2958  *
2959  * @next_seq is set to the sequence number after the last available record.
2960  * The value is valid only when this function returns true. It means that all
2961  * usable consoles are completely flushed.
2962  *
2963  * @handover will be set to true if a printk waiter has taken over the
2964  * console_lock, in which case the caller is no longer holding the
2965  * console_lock. Otherwise it is set to false.
2966  *
2967  * Returns true when there was at least one usable console and all messages
2968  * were flushed to all usable consoles. A returned false informs the caller
2969  * that everything was not flushed (either there were no usable consoles or
2970  * another context has taken over printing or it is a panic situation and this
2971  * is not the panic CPU or direct printing is not preferred). Regardless the
2972  * reason, the caller should assume it is not useful to immediately try again.
2973  *
2974  * Requires the console_lock.
2975  */
2976 static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handover)
2977 {
2978 	static char dropped_text[DROPPED_TEXT_MAX];
2979 	static char ext_text[CONSOLE_EXT_LOG_MAX];
2980 	static char text[CONSOLE_LOG_MAX];
2981 	bool any_usable = false;
2982 	struct console *con;
2983 	bool any_progress;
2984 
2985 	*next_seq = 0;
2986 	*handover = false;
2987 
2988 	do {
2989 		/* Let the kthread printers do the work if they can. */
2990 		if (!allow_direct_printing())
2991 			return false;
2992 
2993 		any_progress = false;
2994 
2995 		for_each_console(con) {
2996 			bool progress;
2997 
2998 			if (!console_is_usable(con))
2999 				continue;
3000 			any_usable = true;
3001 
3002 			if (con->flags & CON_EXTENDED) {
3003 				/* Extended consoles do not print "dropped messages". */
3004 				progress = console_emit_next_record_transferable(con, &text[0],
3005 								&ext_text[0], NULL, handover);
3006 			} else {
3007 				progress = console_emit_next_record_transferable(con, &text[0],
3008 								NULL, &dropped_text[0], handover);
3009 			}
3010 			if (*handover)
3011 				return false;
3012 
3013 			/* Track the next of the highest seq flushed. */
3014 			if (con->seq > *next_seq)
3015 				*next_seq = con->seq;
3016 
3017 			if (!progress)
3018 				continue;
3019 			any_progress = true;
3020 
3021 			/* Allow panic_cpu to take over the consoles safely. */
3022 			if (abandon_console_lock_in_panic())
3023 				return false;
3024 
3025 			if (do_cond_resched)
3026 				cond_resched();
3027 		}
3028 	} while (any_progress);
3029 
3030 	return any_usable;
3031 }
3032 
3033 /**
3034  * console_unlock - unlock the console system
3035  *
3036  * Releases the console_lock which the caller holds on the console system
3037  * and the console driver list.
3038  *
3039  * While the console_lock was held, console output may have been buffered
3040  * by printk().  If this is the case, console_unlock(); emits
3041  * the output prior to releasing the lock.
3042  *
3043  * console_unlock(); may be called from any context.
3044  */
3045 void console_unlock(void)
3046 {
3047 	bool do_cond_resched;
3048 	bool handover;
3049 	bool flushed;
3050 	u64 next_seq;
3051 
3052 	if (console_suspended) {
3053 		up_console_sem();
3054 		return;
3055 	}
3056 
3057 	/*
3058 	 * Console drivers are called with interrupts disabled, so
3059 	 * @console_may_schedule should be cleared before; however, we may
3060 	 * end up dumping a lot of lines, for example, if called from
3061 	 * console registration path, and should invoke cond_resched()
3062 	 * between lines if allowable.  Not doing so can cause a very long
3063 	 * scheduling stall on a slow console leading to RCU stall and
3064 	 * softlockup warnings which exacerbate the issue with more
3065 	 * messages practically incapacitating the system. Therefore, create
3066 	 * a local to use for the printing loop.
3067 	 */
3068 	do_cond_resched = console_may_schedule;
3069 
3070 	do {
3071 		console_may_schedule = 0;
3072 
3073 		flushed = console_flush_all(do_cond_resched, &next_seq, &handover);
3074 		if (!handover)
3075 			__console_unlock();
3076 
3077 		/*
3078 		 * Abort if there was a failure to flush all messages to all
3079 		 * usable consoles. Either it is not possible to flush (in
3080 		 * which case it would be an infinite loop of retrying) or
3081 		 * another context has taken over printing.
3082 		 */
3083 		if (!flushed)
3084 			break;
3085 
3086 		/*
3087 		 * Some context may have added new records after
3088 		 * console_flush_all() but before unlocking the console.
3089 		 * Re-check if there is a new record to flush. If the trylock
3090 		 * fails, another context is already handling the printing.
3091 		 */
3092 	} while (prb_read_valid(prb, next_seq, NULL) && console_trylock());
3093 }
3094 EXPORT_SYMBOL(console_unlock);
3095 
3096 /**
3097  * console_conditional_schedule - yield the CPU if required
3098  *
3099  * If the console code is currently allowed to sleep, and
3100  * if this CPU should yield the CPU to another task, do
3101  * so here.
3102  *
3103  * Must be called within console_lock();.
3104  */
3105 void __sched console_conditional_schedule(void)
3106 {
3107 	if (console_may_schedule)
3108 		cond_resched();
3109 }
3110 EXPORT_SYMBOL(console_conditional_schedule);
3111 
3112 void console_unblank(void)
3113 {
3114 	struct console *c;
3115 
3116 	/*
3117 	 * console_unblank can no longer be called in interrupt context unless
3118 	 * oops_in_progress is set to 1..
3119 	 */
3120 	if (oops_in_progress) {
3121 		if (down_trylock_console_sem() != 0)
3122 			return;
3123 		if (!console_kthreads_atomic_tryblock()) {
3124 			up_console_sem();
3125 			return;
3126 		}
3127 	} else
3128 		console_lock();
3129 
3130 	console_may_schedule = 0;
3131 	for_each_console(c)
3132 		if ((c->flags & CON_ENABLED) && c->unblank)
3133 			c->unblank();
3134 	console_unlock();
3135 
3136 	if (!oops_in_progress)
3137 		pr_flush(1000, true);
3138 }
3139 
3140 /**
3141  * console_flush_on_panic - flush console content on panic
3142  * @mode: flush all messages in buffer or just the pending ones
3143  *
3144  * Immediately output all pending messages no matter what.
3145  */
3146 void console_flush_on_panic(enum con_flush_mode mode)
3147 {
3148 	/*
3149 	 * If someone else is holding the console lock, trylock will fail
3150 	 * and may_schedule may be set.  Ignore and proceed to unlock so
3151 	 * that messages are flushed out.  As this can be called from any
3152 	 * context and we don't want to get preempted while flushing,
3153 	 * ensure may_schedule is cleared.
3154 	 */
3155 	console_trylock();
3156 	console_may_schedule = 0;
3157 
3158 	if (mode == CONSOLE_REPLAY_ALL) {
3159 		struct console *c;
3160 		u64 seq;
3161 
3162 		seq = prb_first_valid_seq(prb);
3163 		for_each_console(c)
3164 			c->seq = seq;
3165 	}
3166 	console_unlock();
3167 }
3168 
3169 /*
3170  * Return the console tty driver structure and its associated index
3171  */
3172 struct tty_driver *console_device(int *index)
3173 {
3174 	struct console *c;
3175 	struct tty_driver *driver = NULL;
3176 
3177 	console_lock();
3178 	for_each_console(c) {
3179 		if (!c->device)
3180 			continue;
3181 		driver = c->device(c, index);
3182 		if (driver)
3183 			break;
3184 	}
3185 	console_unlock();
3186 	return driver;
3187 }
3188 
3189 /*
3190  * Prevent further output on the passed console device so that (for example)
3191  * serial drivers can disable console output before suspending a port, and can
3192  * re-enable output afterwards.
3193  */
3194 void console_stop(struct console *console)
3195 {
3196 	__pr_flush(console, 1000, true);
3197 	console_lock();
3198 	console->flags &= ~CON_ENABLED;
3199 	console_unlock();
3200 }
3201 EXPORT_SYMBOL(console_stop);
3202 
3203 void console_start(struct console *console)
3204 {
3205 	console_lock();
3206 	console->flags |= CON_ENABLED;
3207 	console_unlock();
3208 	__pr_flush(console, 1000, true);
3209 }
3210 EXPORT_SYMBOL(console_start);
3211 
3212 static int __read_mostly keep_bootcon;
3213 
3214 static int __init keep_bootcon_setup(char *str)
3215 {
3216 	keep_bootcon = 1;
3217 	pr_info("debug: skip boot console de-registration.\n");
3218 
3219 	return 0;
3220 }
3221 
3222 early_param("keep_bootcon", keep_bootcon_setup);
3223 
3224 /*
3225  * This is called by register_console() to try to match
3226  * the newly registered console with any of the ones selected
3227  * by either the command line or add_preferred_console() and
3228  * setup/enable it.
3229  *
3230  * Care need to be taken with consoles that are statically
3231  * enabled such as netconsole
3232  */
3233 static int try_enable_preferred_console(struct console *newcon,
3234 					bool user_specified)
3235 {
3236 	struct console_cmdline *c;
3237 	int i, err;
3238 
3239 	for (i = 0, c = console_cmdline;
3240 	     i < MAX_CMDLINECONSOLES && c->name[0];
3241 	     i++, c++) {
3242 		if (c->user_specified != user_specified)
3243 			continue;
3244 		if (!newcon->match ||
3245 		    newcon->match(newcon, c->name, c->index, c->options) != 0) {
3246 			/* default matching */
3247 			BUILD_BUG_ON(sizeof(c->name) != sizeof(newcon->name));
3248 			if (strcmp(c->name, newcon->name) != 0)
3249 				continue;
3250 			if (newcon->index >= 0 &&
3251 			    newcon->index != c->index)
3252 				continue;
3253 			if (newcon->index < 0)
3254 				newcon->index = c->index;
3255 
3256 			if (_braille_register_console(newcon, c))
3257 				return 0;
3258 
3259 			if (newcon->setup &&
3260 			    (err = newcon->setup(newcon, c->options)) != 0)
3261 				return err;
3262 		}
3263 		newcon->flags |= CON_ENABLED;
3264 		if (i == preferred_console)
3265 			newcon->flags |= CON_CONSDEV;
3266 		return 0;
3267 	}
3268 
3269 	/*
3270 	 * Some consoles, such as pstore and netconsole, can be enabled even
3271 	 * without matching. Accept the pre-enabled consoles only when match()
3272 	 * and setup() had a chance to be called.
3273 	 */
3274 	if (newcon->flags & CON_ENABLED && c->user_specified ==	user_specified)
3275 		return 0;
3276 
3277 	return -ENOENT;
3278 }
3279 
3280 /* Try to enable the console unconditionally */
3281 static void try_enable_default_console(struct console *newcon)
3282 {
3283 	if (newcon->index < 0)
3284 		newcon->index = 0;
3285 
3286 	if (newcon->setup && newcon->setup(newcon, NULL) != 0)
3287 		return;
3288 
3289 	newcon->flags |= CON_ENABLED;
3290 
3291 	if (newcon->device)
3292 		newcon->flags |= CON_CONSDEV;
3293 }
3294 
3295 #define con_printk(lvl, con, fmt, ...)			\
3296 	printk(lvl pr_fmt("%sconsole [%s%d] " fmt),	\
3297 	       (con->flags & CON_BOOT) ? "boot" : "",	\
3298 	       con->name, con->index, ##__VA_ARGS__)
3299 
3300 /*
3301  * The console driver calls this routine during kernel initialization
3302  * to register the console printing procedure with printk() and to
3303  * print any messages that were printed by the kernel before the
3304  * console driver was initialized.
3305  *
3306  * This can happen pretty early during the boot process (because of
3307  * early_printk) - sometimes before setup_arch() completes - be careful
3308  * of what kernel features are used - they may not be initialised yet.
3309  *
3310  * There are two types of consoles - bootconsoles (early_printk) and
3311  * "real" consoles (everything which is not a bootconsole) which are
3312  * handled differently.
3313  *  - Any number of bootconsoles can be registered at any time.
3314  *  - As soon as a "real" console is registered, all bootconsoles
3315  *    will be unregistered automatically.
3316  *  - Once a "real" console is registered, any attempt to register a
3317  *    bootconsoles will be rejected
3318  */
3319 void register_console(struct console *newcon)
3320 {
3321 	struct console *con;
3322 	bool bootcon_enabled = false;
3323 	bool realcon_enabled = false;
3324 	int err;
3325 
3326 	for_each_console(con) {
3327 		if (WARN(con == newcon, "console '%s%d' already registered\n",
3328 					 con->name, con->index))
3329 			return;
3330 	}
3331 
3332 	for_each_console(con) {
3333 		if (con->flags & CON_BOOT)
3334 			bootcon_enabled = true;
3335 		else
3336 			realcon_enabled = true;
3337 	}
3338 
3339 	/* Do not register boot consoles when there already is a real one. */
3340 	if (newcon->flags & CON_BOOT && realcon_enabled) {
3341 		pr_info("Too late to register bootconsole %s%d\n",
3342 			newcon->name, newcon->index);
3343 		return;
3344 	}
3345 
3346 	/*
3347 	 * See if we want to enable this console driver by default.
3348 	 *
3349 	 * Nope when a console is preferred by the command line, device
3350 	 * tree, or SPCR.
3351 	 *
3352 	 * The first real console with tty binding (driver) wins. More
3353 	 * consoles might get enabled before the right one is found.
3354 	 *
3355 	 * Note that a console with tty binding will have CON_CONSDEV
3356 	 * flag set and will be first in the list.
3357 	 */
3358 	if (preferred_console < 0) {
3359 		if (!console_drivers || !console_drivers->device ||
3360 		    console_drivers->flags & CON_BOOT) {
3361 			try_enable_default_console(newcon);
3362 		}
3363 	}
3364 
3365 	/* See if this console matches one we selected on the command line */
3366 	err = try_enable_preferred_console(newcon, true);
3367 
3368 	/* If not, try to match against the platform default(s) */
3369 	if (err == -ENOENT)
3370 		err = try_enable_preferred_console(newcon, false);
3371 
3372 	/* printk() messages are not printed to the Braille console. */
3373 	if (err || newcon->flags & CON_BRL)
3374 		return;
3375 
3376 	/*
3377 	 * If we have a bootconsole, and are switching to a real console,
3378 	 * don't print everything out again, since when the boot console, and
3379 	 * the real console are the same physical device, it's annoying to
3380 	 * see the beginning boot messages twice
3381 	 */
3382 	if (bootcon_enabled &&
3383 	    ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV)) {
3384 		newcon->flags &= ~CON_PRINTBUFFER;
3385 	}
3386 
3387 	/*
3388 	 *	Put this console in the list - keep the
3389 	 *	preferred driver at the head of the list.
3390 	 */
3391 	console_lock();
3392 	if ((newcon->flags & CON_CONSDEV) || console_drivers == NULL) {
3393 		newcon->next = console_drivers;
3394 		console_drivers = newcon;
3395 		if (newcon->next)
3396 			newcon->next->flags &= ~CON_CONSDEV;
3397 		/* Ensure this flag is always set for the head of the list */
3398 		newcon->flags |= CON_CONSDEV;
3399 	} else {
3400 		newcon->next = console_drivers->next;
3401 		console_drivers->next = newcon;
3402 	}
3403 
3404 	if (newcon->flags & CON_EXTENDED)
3405 		nr_ext_console_drivers++;
3406 
3407 	newcon->dropped = 0;
3408 	newcon->thread = NULL;
3409 	newcon->blocked = true;
3410 	mutex_init(&newcon->lock);
3411 
3412 	if (newcon->flags & CON_PRINTBUFFER) {
3413 		/* Get a consistent copy of @syslog_seq. */
3414 		mutex_lock(&syslog_lock);
3415 		newcon->seq = syslog_seq;
3416 		mutex_unlock(&syslog_lock);
3417 	} else {
3418 		/* Begin with next message. */
3419 		newcon->seq = prb_next_seq(prb);
3420 	}
3421 
3422 	if (printk_kthreads_available)
3423 		printk_start_kthread(newcon);
3424 
3425 	console_unlock();
3426 	console_sysfs_notify();
3427 
3428 	/*
3429 	 * By unregistering the bootconsoles after we enable the real console
3430 	 * we get the "console xxx enabled" message on all the consoles -
3431 	 * boot consoles, real consoles, etc - this is to ensure that end
3432 	 * users know there might be something in the kernel's log buffer that
3433 	 * went to the bootconsole (that they do not see on the real console)
3434 	 */
3435 	con_printk(KERN_INFO, newcon, "enabled\n");
3436 	if (bootcon_enabled &&
3437 	    ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV) &&
3438 	    !keep_bootcon) {
3439 		/* We need to iterate through all boot consoles, to make
3440 		 * sure we print everything out, before we unregister them.
3441 		 */
3442 		for_each_console(con)
3443 			if (con->flags & CON_BOOT)
3444 				unregister_console(con);
3445 	}
3446 }
3447 EXPORT_SYMBOL(register_console);
3448 
3449 int unregister_console(struct console *console)
3450 {
3451 	struct task_struct *thd;
3452 	struct console *con;
3453 	int res;
3454 
3455 	con_printk(KERN_INFO, console, "disabled\n");
3456 
3457 	res = _braille_unregister_console(console);
3458 	if (res < 0)
3459 		return res;
3460 	if (res > 0)
3461 		return 0;
3462 
3463 	res = -ENODEV;
3464 	console_lock();
3465 	if (console_drivers == console) {
3466 		console_drivers=console->next;
3467 		res = 0;
3468 	} else {
3469 		for_each_console(con) {
3470 			if (con->next == console) {
3471 				con->next = console->next;
3472 				res = 0;
3473 				break;
3474 			}
3475 		}
3476 	}
3477 
3478 	if (res)
3479 		goto out_disable_unlock;
3480 
3481 	if (console->flags & CON_EXTENDED)
3482 		nr_ext_console_drivers--;
3483 
3484 	/*
3485 	 * If this isn't the last console and it has CON_CONSDEV set, we
3486 	 * need to set it on the next preferred console.
3487 	 */
3488 	if (console_drivers != NULL && console->flags & CON_CONSDEV)
3489 		console_drivers->flags |= CON_CONSDEV;
3490 
3491 	console->flags &= ~CON_ENABLED;
3492 
3493 	/*
3494 	 * console->thread can only be cleared under the console lock. But
3495 	 * stopping the thread must be done without the console lock. The
3496 	 * task that clears @thread is the task that stops the kthread.
3497 	 */
3498 	thd = console->thread;
3499 	console->thread = NULL;
3500 
3501 	console_unlock();
3502 
3503 	if (thd)
3504 		kthread_stop(thd);
3505 
3506 	console_sysfs_notify();
3507 
3508 	if (console->exit)
3509 		res = console->exit(console);
3510 
3511 	return res;
3512 
3513 out_disable_unlock:
3514 	console->flags &= ~CON_ENABLED;
3515 	console_unlock();
3516 
3517 	return res;
3518 }
3519 EXPORT_SYMBOL(unregister_console);
3520 
3521 /*
3522  * Initialize the console device. This is called *early*, so
3523  * we can't necessarily depend on lots of kernel help here.
3524  * Just do some early initializations, and do the complex setup
3525  * later.
3526  */
3527 void __init console_init(void)
3528 {
3529 	int ret;
3530 	initcall_t call;
3531 	initcall_entry_t *ce;
3532 
3533 	/* Setup the default TTY line discipline. */
3534 	n_tty_init();
3535 
3536 	/*
3537 	 * set up the console device so that later boot sequences can
3538 	 * inform about problems etc..
3539 	 */
3540 	ce = __con_initcall_start;
3541 	trace_initcall_level("console");
3542 	while (ce < __con_initcall_end) {
3543 		call = initcall_from_entry(ce);
3544 		trace_initcall_start(call);
3545 		ret = call();
3546 		trace_initcall_finish(call, ret);
3547 		ce++;
3548 	}
3549 }
3550 
3551 /*
3552  * Some boot consoles access data that is in the init section and which will
3553  * be discarded after the initcalls have been run. To make sure that no code
3554  * will access this data, unregister the boot consoles in a late initcall.
3555  *
3556  * If for some reason, such as deferred probe or the driver being a loadable
3557  * module, the real console hasn't registered yet at this point, there will
3558  * be a brief interval in which no messages are logged to the console, which
3559  * makes it difficult to diagnose problems that occur during this time.
3560  *
3561  * To mitigate this problem somewhat, only unregister consoles whose memory
3562  * intersects with the init section. Note that all other boot consoles will
3563  * get unregistered when the real preferred console is registered.
3564  */
3565 static int __init printk_late_init(void)
3566 {
3567 	struct console *con;
3568 	int ret;
3569 
3570 	for_each_console(con) {
3571 		if (!(con->flags & CON_BOOT))
3572 			continue;
3573 
3574 		/* Check addresses that might be used for enabled consoles. */
3575 		if (init_section_intersects(con, sizeof(*con)) ||
3576 		    init_section_contains(con->write, 0) ||
3577 		    init_section_contains(con->read, 0) ||
3578 		    init_section_contains(con->device, 0) ||
3579 		    init_section_contains(con->unblank, 0) ||
3580 		    init_section_contains(con->data, 0)) {
3581 			/*
3582 			 * Please, consider moving the reported consoles out
3583 			 * of the init section.
3584 			 */
3585 			pr_warn("bootconsole [%s%d] uses init memory and must be disabled even before the real one is ready\n",
3586 				con->name, con->index);
3587 			unregister_console(con);
3588 		}
3589 	}
3590 	ret = cpuhp_setup_state_nocalls(CPUHP_PRINTK_DEAD, "printk:dead", NULL,
3591 					console_cpu_notify);
3592 	WARN_ON(ret < 0);
3593 	ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "printk:online",
3594 					console_cpu_notify, NULL);
3595 	WARN_ON(ret < 0);
3596 	printk_sysctl_init();
3597 	return 0;
3598 }
3599 late_initcall(printk_late_init);
3600 
3601 static int __init printk_activate_kthreads(void)
3602 {
3603 	struct console *con;
3604 
3605 	console_lock();
3606 	printk_kthreads_available = true;
3607 	for_each_console(con)
3608 		printk_start_kthread(con);
3609 	console_unlock();
3610 
3611 	return 0;
3612 }
3613 early_initcall(printk_activate_kthreads);
3614 
3615 #if defined CONFIG_PRINTK
3616 /* If @con is specified, only wait for that console. Otherwise wait for all. */
3617 static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress)
3618 {
3619 	int remaining = timeout_ms;
3620 	struct console *c;
3621 	u64 last_diff = 0;
3622 	u64 printk_seq;
3623 	u64 diff;
3624 	u64 seq;
3625 
3626 	might_sleep();
3627 
3628 	seq = prb_next_seq(prb);
3629 
3630 	for (;;) {
3631 		diff = 0;
3632 
3633 		console_lock();
3634 		for_each_console(c) {
3635 			if (con && con != c)
3636 				continue;
3637 			if (!console_is_usable(c))
3638 				continue;
3639 			printk_seq = c->seq;
3640 			if (printk_seq < seq)
3641 				diff += seq - printk_seq;
3642 		}
3643 		console_unlock();
3644 
3645 		if (diff != last_diff && reset_on_progress)
3646 			remaining = timeout_ms;
3647 
3648 		if (diff == 0 || remaining == 0)
3649 			break;
3650 
3651 		if (remaining < 0) {
3652 			/* no timeout limit */
3653 			msleep(100);
3654 		} else if (remaining < 100) {
3655 			msleep(remaining);
3656 			remaining = 0;
3657 		} else {
3658 			msleep(100);
3659 			remaining -= 100;
3660 		}
3661 
3662 		last_diff = diff;
3663 	}
3664 
3665 	return (diff == 0);
3666 }
3667 
3668 /**
3669  * pr_flush() - Wait for printing threads to catch up.
3670  *
3671  * @timeout_ms:        The maximum time (in ms) to wait.
3672  * @reset_on_progress: Reset the timeout if forward progress is seen.
3673  *
3674  * A value of 0 for @timeout_ms means no waiting will occur. A value of -1
3675  * represents infinite waiting.
3676  *
3677  * If @reset_on_progress is true, the timeout will be reset whenever any
3678  * printer has been seen to make some forward progress.
3679  *
3680  * Context: Process context. May sleep while acquiring console lock.
3681  * Return: true if all enabled printers are caught up.
3682  */
3683 bool pr_flush(int timeout_ms, bool reset_on_progress)
3684 {
3685 	return __pr_flush(NULL, timeout_ms, reset_on_progress);
3686 }
3687 EXPORT_SYMBOL(pr_flush);
3688 
3689 static void __printk_fallback_preferred_direct(void)
3690 {
3691 	printk_prefer_direct_enter();
3692 	pr_err("falling back to preferred direct printing\n");
3693 	printk_kthreads_available = false;
3694 }
3695 
3696 /*
3697  * Enter preferred direct printing, but never exit. Mark console threads as
3698  * unavailable. The system is then forever in preferred direct printing and
3699  * any printing threads will exit.
3700  *
3701  * Must *not* be called under console_lock. Use
3702  * __printk_fallback_preferred_direct() if already holding console_lock.
3703  */
3704 static void printk_fallback_preferred_direct(void)
3705 {
3706 	console_lock();
3707 	__printk_fallback_preferred_direct();
3708 	console_unlock();
3709 }
3710 
3711 /*
3712  * Print a record for a given console, not allowing another printk() caller
3713  * to take over. This is appropriate for contexts that do not have the
3714  * console_lock.
3715  *
3716  * See __console_emit_next_record() for argument and return details.
3717  */
3718 static bool console_emit_next_record(struct console *con, char *text, char *ext_text,
3719 				     char *dropped_text)
3720 {
3721 	return __console_emit_next_record(con, text, ext_text, dropped_text, NULL);
3722 }
3723 
3724 static bool printer_should_wake(struct console *con, u64 seq)
3725 {
3726 	short flags;
3727 
3728 	if (kthread_should_stop() || !printk_kthreads_available)
3729 		return true;
3730 
3731 	if (con->blocked ||
3732 	    console_kthreads_atomically_blocked()) {
3733 		return false;
3734 	}
3735 
3736 	/*
3737 	 * This is an unsafe read from con->flags, but a false positive is
3738 	 * not a problem. Worst case it would allow the printer to wake up
3739 	 * although it is disabled. But the printer will notice that when
3740 	 * attempting to print and instead go back to sleep.
3741 	 */
3742 	flags = data_race(READ_ONCE(con->flags));
3743 
3744 	if (!__console_is_usable(flags))
3745 		return false;
3746 
3747 	return prb_read_valid(prb, seq, NULL);
3748 }
3749 
3750 static int printk_kthread_func(void *data)
3751 {
3752 	struct console *con = data;
3753 	char *dropped_text = NULL;
3754 	char *ext_text = NULL;
3755 	u64 seq = 0;
3756 	char *text;
3757 	int error;
3758 
3759 	text = kmalloc(CONSOLE_LOG_MAX, GFP_KERNEL);
3760 	if (!text) {
3761 		con_printk(KERN_ERR, con, "failed to allocate text buffer\n");
3762 		printk_fallback_preferred_direct();
3763 		goto out;
3764 	}
3765 
3766 	if (con->flags & CON_EXTENDED) {
3767 		ext_text = kmalloc(CONSOLE_EXT_LOG_MAX, GFP_KERNEL);
3768 		if (!ext_text) {
3769 			con_printk(KERN_ERR, con, "failed to allocate ext_text buffer\n");
3770 			printk_fallback_preferred_direct();
3771 			goto out;
3772 		}
3773 	} else {
3774 		dropped_text = kmalloc(DROPPED_TEXT_MAX, GFP_KERNEL);
3775 		if (!dropped_text) {
3776 			con_printk(KERN_ERR, con, "failed to allocate dropped_text buffer\n");
3777 			printk_fallback_preferred_direct();
3778 			goto out;
3779 		}
3780 	}
3781 
3782 	con_printk(KERN_INFO, con, "printing thread started\n");
3783 
3784 	for (;;) {
3785 		/*
3786 		 * Guarantee this task is visible on the waitqueue before
3787 		 * checking the wake condition.
3788 		 *
3789 		 * The full memory barrier within set_current_state() of
3790 		 * prepare_to_wait_event() pairs with the full memory barrier
3791 		 * within wq_has_sleeper().
3792 		 *
3793 		 * This pairs with __wake_up_klogd:A.
3794 		 */
3795 		error = wait_event_interruptible(log_wait,
3796 				printer_should_wake(con, seq)); /* LMM(printk_kthread_func:A) */
3797 
3798 		if (kthread_should_stop() || !printk_kthreads_available)
3799 			break;
3800 
3801 		if (error)
3802 			continue;
3803 
3804 		error = mutex_lock_interruptible(&con->lock);
3805 		if (error)
3806 			continue;
3807 
3808 		if (con->blocked ||
3809 		    !console_kthread_printing_tryenter()) {
3810 			/* Another context has locked the console_lock. */
3811 			mutex_unlock(&con->lock);
3812 			continue;
3813 		}
3814 
3815 		/*
3816 		 * Although this context has not locked the console_lock, it
3817 		 * is known that the console_lock is not locked and it is not
3818 		 * possible for any other context to lock the console_lock.
3819 		 * Therefore it is safe to read con->flags.
3820 		 */
3821 
3822 		if (!__console_is_usable(con->flags)) {
3823 			console_kthread_printing_exit();
3824 			mutex_unlock(&con->lock);
3825 			continue;
3826 		}
3827 
3828 		/*
3829 		 * Even though the printk kthread is always preemptible, it is
3830 		 * still not allowed to call cond_resched() from within
3831 		 * console drivers. The task may become non-preemptible in the
3832 		 * console driver call chain. For example, vt_console_print()
3833 		 * takes a spinlock and then can call into fbcon_redraw(),
3834 		 * which can conditionally invoke cond_resched().
3835 		 */
3836 		console_may_schedule = 0;
3837 		console_emit_next_record(con, text, ext_text, dropped_text);
3838 
3839 		seq = con->seq;
3840 
3841 		console_kthread_printing_exit();
3842 
3843 		mutex_unlock(&con->lock);
3844 	}
3845 
3846 	con_printk(KERN_INFO, con, "printing thread stopped\n");
3847 out:
3848 	kfree(dropped_text);
3849 	kfree(ext_text);
3850 	kfree(text);
3851 
3852 	console_lock();
3853 	/*
3854 	 * If this kthread is being stopped by another task, con->thread will
3855 	 * already be NULL. That is fine. The important thing is that it is
3856 	 * NULL after the kthread exits.
3857 	 */
3858 	con->thread = NULL;
3859 	console_unlock();
3860 
3861 	return 0;
3862 }
3863 
3864 /* Must be called under console_lock. */
3865 static void printk_start_kthread(struct console *con)
3866 {
3867 	/*
3868 	 * Do not start a kthread if there is no write() callback. The
3869 	 * kthreads assume the write() callback exists.
3870 	 */
3871 	if (!con->write)
3872 		return;
3873 
3874 	con->thread = kthread_run(printk_kthread_func, con,
3875 				  "pr/%s%d", con->name, con->index);
3876 	if (IS_ERR(con->thread)) {
3877 		con->thread = NULL;
3878 		con_printk(KERN_ERR, con, "unable to start printing thread\n");
3879 		__printk_fallback_preferred_direct();
3880 		return;
3881 	}
3882 }
3883 
3884 /*
3885  * Delayed printk version, for scheduler-internal messages:
3886  */
3887 #define PRINTK_PENDING_WAKEUP		0x01
3888 #define PRINTK_PENDING_DIRECT_OUTPUT	0x02
3889 
3890 static DEFINE_PER_CPU(int, printk_pending);
3891 
3892 static void wake_up_klogd_work_func(struct irq_work *irq_work)
3893 {
3894 	int pending = this_cpu_xchg(printk_pending, 0);
3895 
3896 	if (pending & PRINTK_PENDING_DIRECT_OUTPUT) {
3897 		printk_prefer_direct_enter();
3898 
3899 		/* If trylock fails, someone else is doing the printing */
3900 		if (console_trylock())
3901 			console_unlock();
3902 
3903 		printk_prefer_direct_exit();
3904 	}
3905 
3906 	if (pending & PRINTK_PENDING_WAKEUP)
3907 		wake_up_interruptible(&log_wait);
3908 }
3909 
3910 static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) =
3911 	IRQ_WORK_INIT_LAZY(wake_up_klogd_work_func);
3912 
3913 static void __wake_up_klogd(int val)
3914 {
3915 	if (!printk_percpu_data_ready())
3916 		return;
3917 
3918 	preempt_disable();
3919 	/*
3920 	 * Guarantee any new records can be seen by tasks preparing to wait
3921 	 * before this context checks if the wait queue is empty.
3922 	 *
3923 	 * The full memory barrier within wq_has_sleeper() pairs with the full
3924 	 * memory barrier within set_current_state() of
3925 	 * prepare_to_wait_event(), which is called after ___wait_event() adds
3926 	 * the waiter but before it has checked the wait condition.
3927 	 *
3928 	 * This pairs with devkmsg_read:A, syslog_print:A, and
3929 	 * printk_kthread_func:A.
3930 	 */
3931 	if (wq_has_sleeper(&log_wait) || /* LMM(__wake_up_klogd:A) */
3932 	    (val & PRINTK_PENDING_DIRECT_OUTPUT)) {
3933 		this_cpu_or(printk_pending, val);
3934 		irq_work_queue(this_cpu_ptr(&wake_up_klogd_work));
3935 	}
3936 	preempt_enable();
3937 }
3938 
3939 void wake_up_klogd(void)
3940 {
3941 	__wake_up_klogd(PRINTK_PENDING_WAKEUP);
3942 }
3943 
3944 void defer_console_output(void)
3945 {
3946 	/*
3947 	 * New messages may have been added directly to the ringbuffer
3948 	 * using vprintk_store(), so wake any waiters as well.
3949 	 */
3950 	int val = PRINTK_PENDING_WAKEUP;
3951 
3952 	/*
3953 	 * Make sure that some context will print the messages when direct
3954 	 * printing is allowed. This happens in situations when the kthreads
3955 	 * may not be as reliable or perhaps unusable.
3956 	 */
3957 	if (allow_direct_printing())
3958 		val |= PRINTK_PENDING_DIRECT_OUTPUT;
3959 
3960 	__wake_up_klogd(val);
3961 }
3962 
3963 void printk_trigger_flush(void)
3964 {
3965 	defer_console_output();
3966 }
3967 
3968 int vprintk_deferred(const char *fmt, va_list args)
3969 {
3970 	int r;
3971 
3972 	r = vprintk_emit(0, LOGLEVEL_SCHED, NULL, fmt, args);
3973 	defer_console_output();
3974 
3975 	return r;
3976 }
3977 
3978 int _printk_deferred(const char *fmt, ...)
3979 {
3980 	va_list args;
3981 	int r;
3982 
3983 	va_start(args, fmt);
3984 	r = vprintk_deferred(fmt, args);
3985 	va_end(args);
3986 
3987 	return r;
3988 }
3989 
3990 /*
3991  * printk rate limiting, lifted from the networking subsystem.
3992  *
3993  * This enforces a rate limit: not more than 10 kernel messages
3994  * every 5s to make a denial-of-service attack impossible.
3995  */
3996 DEFINE_RATELIMIT_STATE(printk_ratelimit_state, 5 * HZ, 10);
3997 
3998 int __printk_ratelimit(const char *func)
3999 {
4000 	return ___ratelimit(&printk_ratelimit_state, func);
4001 }
4002 EXPORT_SYMBOL(__printk_ratelimit);
4003 
4004 /**
4005  * printk_timed_ratelimit - caller-controlled printk ratelimiting
4006  * @caller_jiffies: pointer to caller's state
4007  * @interval_msecs: minimum interval between prints
4008  *
4009  * printk_timed_ratelimit() returns true if more than @interval_msecs
4010  * milliseconds have elapsed since the last time printk_timed_ratelimit()
4011  * returned true.
4012  */
4013 bool printk_timed_ratelimit(unsigned long *caller_jiffies,
4014 			unsigned int interval_msecs)
4015 {
4016 	unsigned long elapsed = jiffies - *caller_jiffies;
4017 
4018 	if (*caller_jiffies && elapsed <= msecs_to_jiffies(interval_msecs))
4019 		return false;
4020 
4021 	*caller_jiffies = jiffies;
4022 	return true;
4023 }
4024 EXPORT_SYMBOL(printk_timed_ratelimit);
4025 
4026 static DEFINE_SPINLOCK(dump_list_lock);
4027 static LIST_HEAD(dump_list);
4028 
4029 /**
4030  * kmsg_dump_register - register a kernel log dumper.
4031  * @dumper: pointer to the kmsg_dumper structure
4032  *
4033  * Adds a kernel log dumper to the system. The dump callback in the
4034  * structure will be called when the kernel oopses or panics and must be
4035  * set. Returns zero on success and %-EINVAL or %-EBUSY otherwise.
4036  */
4037 int kmsg_dump_register(struct kmsg_dumper *dumper)
4038 {
4039 	unsigned long flags;
4040 	int err = -EBUSY;
4041 
4042 	/* The dump callback needs to be set */
4043 	if (!dumper->dump)
4044 		return -EINVAL;
4045 
4046 	spin_lock_irqsave(&dump_list_lock, flags);
4047 	/* Don't allow registering multiple times */
4048 	if (!dumper->registered) {
4049 		dumper->registered = 1;
4050 		list_add_tail_rcu(&dumper->list, &dump_list);
4051 		err = 0;
4052 	}
4053 	spin_unlock_irqrestore(&dump_list_lock, flags);
4054 
4055 	return err;
4056 }
4057 EXPORT_SYMBOL_GPL(kmsg_dump_register);
4058 
4059 /**
4060  * kmsg_dump_unregister - unregister a kmsg dumper.
4061  * @dumper: pointer to the kmsg_dumper structure
4062  *
4063  * Removes a dump device from the system. Returns zero on success and
4064  * %-EINVAL otherwise.
4065  */
4066 int kmsg_dump_unregister(struct kmsg_dumper *dumper)
4067 {
4068 	unsigned long flags;
4069 	int err = -EINVAL;
4070 
4071 	spin_lock_irqsave(&dump_list_lock, flags);
4072 	if (dumper->registered) {
4073 		dumper->registered = 0;
4074 		list_del_rcu(&dumper->list);
4075 		err = 0;
4076 	}
4077 	spin_unlock_irqrestore(&dump_list_lock, flags);
4078 	synchronize_rcu();
4079 
4080 	return err;
4081 }
4082 EXPORT_SYMBOL_GPL(kmsg_dump_unregister);
4083 
4084 static bool always_kmsg_dump;
4085 module_param_named(always_kmsg_dump, always_kmsg_dump, bool, S_IRUGO | S_IWUSR);
4086 
4087 const char *kmsg_dump_reason_str(enum kmsg_dump_reason reason)
4088 {
4089 	switch (reason) {
4090 	case KMSG_DUMP_PANIC:
4091 		return "Panic";
4092 	case KMSG_DUMP_OOPS:
4093 		return "Oops";
4094 	case KMSG_DUMP_EMERG:
4095 		return "Emergency";
4096 	case KMSG_DUMP_SHUTDOWN:
4097 		return "Shutdown";
4098 	default:
4099 		return "Unknown";
4100 	}
4101 }
4102 EXPORT_SYMBOL_GPL(kmsg_dump_reason_str);
4103 
4104 /**
4105  * kmsg_dump - dump kernel log to kernel message dumpers.
4106  * @reason: the reason (oops, panic etc) for dumping
4107  *
4108  * Call each of the registered dumper's dump() callback, which can
4109  * retrieve the kmsg records with kmsg_dump_get_line() or
4110  * kmsg_dump_get_buffer().
4111  */
4112 void kmsg_dump(enum kmsg_dump_reason reason)
4113 {
4114 	struct kmsg_dumper *dumper;
4115 
4116 	rcu_read_lock();
4117 	list_for_each_entry_rcu(dumper, &dump_list, list) {
4118 		enum kmsg_dump_reason max_reason = dumper->max_reason;
4119 
4120 		/*
4121 		 * If client has not provided a specific max_reason, default
4122 		 * to KMSG_DUMP_OOPS, unless always_kmsg_dump was set.
4123 		 */
4124 		if (max_reason == KMSG_DUMP_UNDEF) {
4125 			max_reason = always_kmsg_dump ? KMSG_DUMP_MAX :
4126 							KMSG_DUMP_OOPS;
4127 		}
4128 		if (reason > max_reason)
4129 			continue;
4130 
4131 		/* invoke dumper which will iterate over records */
4132 		dumper->dump(dumper, reason);
4133 	}
4134 	rcu_read_unlock();
4135 }
4136 
4137 /**
4138  * kmsg_dump_get_line - retrieve one kmsg log line
4139  * @iter: kmsg dump iterator
4140  * @syslog: include the "<4>" prefixes
4141  * @line: buffer to copy the line to
4142  * @size: maximum size of the buffer
4143  * @len: length of line placed into buffer
4144  *
4145  * Start at the beginning of the kmsg buffer, with the oldest kmsg
4146  * record, and copy one record into the provided buffer.
4147  *
4148  * Consecutive calls will return the next available record moving
4149  * towards the end of the buffer with the youngest messages.
4150  *
4151  * A return value of FALSE indicates that there are no more records to
4152  * read.
4153  */
4154 bool kmsg_dump_get_line(struct kmsg_dump_iter *iter, bool syslog,
4155 			char *line, size_t size, size_t *len)
4156 {
4157 	u64 min_seq = latched_seq_read_nolock(&clear_seq);
4158 	struct printk_info info;
4159 	unsigned int line_count;
4160 	struct printk_record r;
4161 	size_t l = 0;
4162 	bool ret = false;
4163 
4164 	if (iter->cur_seq < min_seq)
4165 		iter->cur_seq = min_seq;
4166 
4167 	prb_rec_init_rd(&r, &info, line, size);
4168 
4169 	/* Read text or count text lines? */
4170 	if (line) {
4171 		if (!prb_read_valid(prb, iter->cur_seq, &r))
4172 			goto out;
4173 		l = record_print_text(&r, syslog, printk_time);
4174 	} else {
4175 		if (!prb_read_valid_info(prb, iter->cur_seq,
4176 					 &info, &line_count)) {
4177 			goto out;
4178 		}
4179 		l = get_record_print_text_size(&info, line_count, syslog,
4180 					       printk_time);
4181 
4182 	}
4183 
4184 	iter->cur_seq = r.info->seq + 1;
4185 	ret = true;
4186 out:
4187 	if (len)
4188 		*len = l;
4189 	return ret;
4190 }
4191 EXPORT_SYMBOL_GPL(kmsg_dump_get_line);
4192 
4193 /**
4194  * kmsg_dump_get_buffer - copy kmsg log lines
4195  * @iter: kmsg dump iterator
4196  * @syslog: include the "<4>" prefixes
4197  * @buf: buffer to copy the line to
4198  * @size: maximum size of the buffer
4199  * @len_out: length of line placed into buffer
4200  *
4201  * Start at the end of the kmsg buffer and fill the provided buffer
4202  * with as many of the *youngest* kmsg records that fit into it.
4203  * If the buffer is large enough, all available kmsg records will be
4204  * copied with a single call.
4205  *
4206  * Consecutive calls will fill the buffer with the next block of
4207  * available older records, not including the earlier retrieved ones.
4208  *
4209  * A return value of FALSE indicates that there are no more records to
4210  * read.
4211  */
4212 bool kmsg_dump_get_buffer(struct kmsg_dump_iter *iter, bool syslog,
4213 			  char *buf, size_t size, size_t *len_out)
4214 {
4215 	u64 min_seq = latched_seq_read_nolock(&clear_seq);
4216 	struct printk_info info;
4217 	struct printk_record r;
4218 	u64 seq;
4219 	u64 next_seq;
4220 	size_t len = 0;
4221 	bool ret = false;
4222 	bool time = printk_time;
4223 
4224 	if (!buf || !size)
4225 		goto out;
4226 
4227 	if (iter->cur_seq < min_seq)
4228 		iter->cur_seq = min_seq;
4229 
4230 	if (prb_read_valid_info(prb, iter->cur_seq, &info, NULL)) {
4231 		if (info.seq != iter->cur_seq) {
4232 			/* messages are gone, move to first available one */
4233 			iter->cur_seq = info.seq;
4234 		}
4235 	}
4236 
4237 	/* last entry */
4238 	if (iter->cur_seq >= iter->next_seq)
4239 		goto out;
4240 
4241 	/*
4242 	 * Find first record that fits, including all following records,
4243 	 * into the user-provided buffer for this dump. Pass in size-1
4244 	 * because this function (by way of record_print_text()) will
4245 	 * not write more than size-1 bytes of text into @buf.
4246 	 */
4247 	seq = find_first_fitting_seq(iter->cur_seq, iter->next_seq,
4248 				     size - 1, syslog, time);
4249 
4250 	/*
4251 	 * Next kmsg_dump_get_buffer() invocation will dump block of
4252 	 * older records stored right before this one.
4253 	 */
4254 	next_seq = seq;
4255 
4256 	prb_rec_init_rd(&r, &info, buf, size);
4257 
4258 	len = 0;
4259 	prb_for_each_record(seq, prb, seq, &r) {
4260 		if (r.info->seq >= iter->next_seq)
4261 			break;
4262 
4263 		len += record_print_text(&r, syslog, time);
4264 
4265 		/* Adjust record to store to remaining buffer space. */
4266 		prb_rec_init_rd(&r, &info, buf + len, size - len);
4267 	}
4268 
4269 	iter->next_seq = next_seq;
4270 	ret = true;
4271 out:
4272 	if (len_out)
4273 		*len_out = len;
4274 	return ret;
4275 }
4276 EXPORT_SYMBOL_GPL(kmsg_dump_get_buffer);
4277 
4278 /**
4279  * kmsg_dump_rewind - reset the iterator
4280  * @iter: kmsg dump iterator
4281  *
4282  * Reset the dumper's iterator so that kmsg_dump_get_line() and
4283  * kmsg_dump_get_buffer() can be called again and used multiple
4284  * times within the same dumper.dump() callback.
4285  */
4286 void kmsg_dump_rewind(struct kmsg_dump_iter *iter)
4287 {
4288 	iter->cur_seq = latched_seq_read_nolock(&clear_seq);
4289 	iter->next_seq = prb_next_seq(prb);
4290 }
4291 EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
4292 
4293 #endif
4294 
4295 #ifdef CONFIG_SMP
4296 static atomic_t printk_cpu_sync_owner = ATOMIC_INIT(-1);
4297 static atomic_t printk_cpu_sync_nested = ATOMIC_INIT(0);
4298 
4299 /**
4300  * __printk_cpu_sync_wait() - Busy wait until the printk cpu-reentrant
4301  *                            spinning lock is not owned by any CPU.
4302  *
4303  * Context: Any context.
4304  */
4305 void __printk_cpu_sync_wait(void)
4306 {
4307 	do {
4308 		cpu_relax();
4309 	} while (atomic_read(&printk_cpu_sync_owner) != -1);
4310 }
4311 EXPORT_SYMBOL(__printk_cpu_sync_wait);
4312 
4313 /**
4314  * __printk_cpu_sync_try_get() - Try to acquire the printk cpu-reentrant
4315  *                               spinning lock.
4316  *
4317  * If no processor has the lock, the calling processor takes the lock and
4318  * becomes the owner. If the calling processor is already the owner of the
4319  * lock, this function succeeds immediately.
4320  *
4321  * Context: Any context. Expects interrupts to be disabled.
4322  * Return: 1 on success, otherwise 0.
4323  */
4324 int __printk_cpu_sync_try_get(void)
4325 {
4326 	int cpu;
4327 	int old;
4328 
4329 	cpu = smp_processor_id();
4330 
4331 	/*
4332 	 * Guarantee loads and stores from this CPU when it is the lock owner
4333 	 * are _not_ visible to the previous lock owner. This pairs with
4334 	 * __printk_cpu_sync_put:B.
4335 	 *
4336 	 * Memory barrier involvement:
4337 	 *
4338 	 * If __printk_cpu_sync_try_get:A reads from __printk_cpu_sync_put:B,
4339 	 * then __printk_cpu_sync_put:A can never read from
4340 	 * __printk_cpu_sync_try_get:B.
4341 	 *
4342 	 * Relies on:
4343 	 *
4344 	 * RELEASE from __printk_cpu_sync_put:A to __printk_cpu_sync_put:B
4345 	 * of the previous CPU
4346 	 *    matching
4347 	 * ACQUIRE from __printk_cpu_sync_try_get:A to
4348 	 * __printk_cpu_sync_try_get:B of this CPU
4349 	 */
4350 	old = atomic_cmpxchg_acquire(&printk_cpu_sync_owner, -1,
4351 				     cpu); /* LMM(__printk_cpu_sync_try_get:A) */
4352 	if (old == -1) {
4353 		/*
4354 		 * This CPU is now the owner and begins loading/storing
4355 		 * data: LMM(__printk_cpu_sync_try_get:B)
4356 		 */
4357 		return 1;
4358 
4359 	} else if (old == cpu) {
4360 		/* This CPU is already the owner. */
4361 		atomic_inc(&printk_cpu_sync_nested);
4362 		return 1;
4363 	}
4364 
4365 	return 0;
4366 }
4367 EXPORT_SYMBOL(__printk_cpu_sync_try_get);
4368 
4369 /**
4370  * __printk_cpu_sync_put() - Release the printk cpu-reentrant spinning lock.
4371  *
4372  * The calling processor must be the owner of the lock.
4373  *
4374  * Context: Any context. Expects interrupts to be disabled.
4375  */
4376 void __printk_cpu_sync_put(void)
4377 {
4378 	if (atomic_read(&printk_cpu_sync_nested)) {
4379 		atomic_dec(&printk_cpu_sync_nested);
4380 		return;
4381 	}
4382 
4383 	/*
4384 	 * This CPU is finished loading/storing data:
4385 	 * LMM(__printk_cpu_sync_put:A)
4386 	 */
4387 
4388 	/*
4389 	 * Guarantee loads and stores from this CPU when it was the
4390 	 * lock owner are visible to the next lock owner. This pairs
4391 	 * with __printk_cpu_sync_try_get:A.
4392 	 *
4393 	 * Memory barrier involvement:
4394 	 *
4395 	 * If __printk_cpu_sync_try_get:A reads from __printk_cpu_sync_put:B,
4396 	 * then __printk_cpu_sync_try_get:B reads from __printk_cpu_sync_put:A.
4397 	 *
4398 	 * Relies on:
4399 	 *
4400 	 * RELEASE from __printk_cpu_sync_put:A to __printk_cpu_sync_put:B
4401 	 * of this CPU
4402 	 *    matching
4403 	 * ACQUIRE from __printk_cpu_sync_try_get:A to
4404 	 * __printk_cpu_sync_try_get:B of the next CPU
4405 	 */
4406 	atomic_set_release(&printk_cpu_sync_owner,
4407 			   -1); /* LMM(__printk_cpu_sync_put:B) */
4408 }
4409 EXPORT_SYMBOL(__printk_cpu_sync_put);
4410 #endif /* CONFIG_SMP */
4411