xref: /openbmc/linux/kernel/printk/printk_safe.c (revision 680ef72a)
1 /*
2  * printk_safe.c - Safe printk for printk-deadlock-prone contexts
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, see <http://www.gnu.org/licenses/>.
16  */
17 
18 #include <linux/preempt.h>
19 #include <linux/spinlock.h>
20 #include <linux/debug_locks.h>
21 #include <linux/smp.h>
22 #include <linux/cpumask.h>
23 #include <linux/irq_work.h>
24 #include <linux/printk.h>
25 
26 #include "internal.h"
27 
28 /*
29  * printk() could not take logbuf_lock in NMI context. Instead,
30  * it uses an alternative implementation that temporary stores
31  * the strings into a per-CPU buffer. The content of the buffer
32  * is later flushed into the main ring buffer via IRQ work.
33  *
34  * The alternative implementation is chosen transparently
35  * by examinig current printk() context mask stored in @printk_context
36  * per-CPU variable.
37  *
38  * The implementation allows to flush the strings also from another CPU.
39  * There are situations when we want to make sure that all buffers
40  * were handled or when IRQs are blocked.
41  */
42 static int printk_safe_irq_ready __read_mostly;
43 
44 #define SAFE_LOG_BUF_LEN ((1 << CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT) -	\
45 				sizeof(atomic_t) -			\
46 				sizeof(atomic_t) -			\
47 				sizeof(struct irq_work))
48 
49 struct printk_safe_seq_buf {
50 	atomic_t		len;	/* length of written data */
51 	atomic_t		message_lost;
52 	struct irq_work		work;	/* IRQ work that flushes the buffer */
53 	unsigned char		buffer[SAFE_LOG_BUF_LEN];
54 };
55 
56 static DEFINE_PER_CPU(struct printk_safe_seq_buf, safe_print_seq);
57 static DEFINE_PER_CPU(int, printk_context);
58 
59 #ifdef CONFIG_PRINTK_NMI
60 static DEFINE_PER_CPU(struct printk_safe_seq_buf, nmi_print_seq);
61 #endif
62 
63 /* Get flushed in a more safe context. */
64 static void queue_flush_work(struct printk_safe_seq_buf *s)
65 {
66 	if (printk_safe_irq_ready)
67 		irq_work_queue(&s->work);
68 }
69 
70 /*
71  * Add a message to per-CPU context-dependent buffer. NMI and printk-safe
72  * have dedicated buffers, because otherwise printk-safe preempted by
73  * NMI-printk would have overwritten the NMI messages.
74  *
75  * The messages are flushed from irq work (or from panic()), possibly,
76  * from other CPU, concurrently with printk_safe_log_store(). Should this
77  * happen, printk_safe_log_store() will notice the buffer->len mismatch
78  * and repeat the write.
79  */
80 static __printf(2, 0) int printk_safe_log_store(struct printk_safe_seq_buf *s,
81 						const char *fmt, va_list args)
82 {
83 	int add;
84 	size_t len;
85 
86 again:
87 	len = atomic_read(&s->len);
88 
89 	/* The trailing '\0' is not counted into len. */
90 	if (len >= sizeof(s->buffer) - 1) {
91 		atomic_inc(&s->message_lost);
92 		queue_flush_work(s);
93 		return 0;
94 	}
95 
96 	/*
97 	 * Make sure that all old data have been read before the buffer
98 	 * was reset. This is not needed when we just append data.
99 	 */
100 	if (!len)
101 		smp_rmb();
102 
103 	add = vscnprintf(s->buffer + len, sizeof(s->buffer) - len, fmt, args);
104 	if (!add)
105 		return 0;
106 
107 	/*
108 	 * Do it once again if the buffer has been flushed in the meantime.
109 	 * Note that atomic_cmpxchg() is an implicit memory barrier that
110 	 * makes sure that the data were written before updating s->len.
111 	 */
112 	if (atomic_cmpxchg(&s->len, len, len + add) != len)
113 		goto again;
114 
115 	queue_flush_work(s);
116 	return add;
117 }
118 
119 static inline void printk_safe_flush_line(const char *text, int len)
120 {
121 	/*
122 	 * Avoid any console drivers calls from here, because we may be
123 	 * in NMI or printk_safe context (when in panic). The messages
124 	 * must go only into the ring buffer at this stage.  Consoles will
125 	 * get explicitly called later when a crashdump is not generated.
126 	 */
127 	printk_deferred("%.*s", len, text);
128 }
129 
130 /* printk part of the temporary buffer line by line */
131 static int printk_safe_flush_buffer(const char *start, size_t len)
132 {
133 	const char *c, *end;
134 	bool header;
135 
136 	c = start;
137 	end = start + len;
138 	header = true;
139 
140 	/* Print line by line. */
141 	while (c < end) {
142 		if (*c == '\n') {
143 			printk_safe_flush_line(start, c - start + 1);
144 			start = ++c;
145 			header = true;
146 			continue;
147 		}
148 
149 		/* Handle continuous lines or missing new line. */
150 		if ((c + 1 < end) && printk_get_level(c)) {
151 			if (header) {
152 				c = printk_skip_level(c);
153 				continue;
154 			}
155 
156 			printk_safe_flush_line(start, c - start);
157 			start = c++;
158 			header = true;
159 			continue;
160 		}
161 
162 		header = false;
163 		c++;
164 	}
165 
166 	/* Check if there was a partial line. Ignore pure header. */
167 	if (start < end && !header) {
168 		static const char newline[] = KERN_CONT "\n";
169 
170 		printk_safe_flush_line(start, end - start);
171 		printk_safe_flush_line(newline, strlen(newline));
172 	}
173 
174 	return len;
175 }
176 
177 static void report_message_lost(struct printk_safe_seq_buf *s)
178 {
179 	int lost = atomic_xchg(&s->message_lost, 0);
180 
181 	if (lost)
182 		printk_deferred("Lost %d message(s)!\n", lost);
183 }
184 
185 /*
186  * Flush data from the associated per-CPU buffer. The function
187  * can be called either via IRQ work or independently.
188  */
189 static void __printk_safe_flush(struct irq_work *work)
190 {
191 	static raw_spinlock_t read_lock =
192 		__RAW_SPIN_LOCK_INITIALIZER(read_lock);
193 	struct printk_safe_seq_buf *s =
194 		container_of(work, struct printk_safe_seq_buf, work);
195 	unsigned long flags;
196 	size_t len;
197 	int i;
198 
199 	/*
200 	 * The lock has two functions. First, one reader has to flush all
201 	 * available message to make the lockless synchronization with
202 	 * writers easier. Second, we do not want to mix messages from
203 	 * different CPUs. This is especially important when printing
204 	 * a backtrace.
205 	 */
206 	raw_spin_lock_irqsave(&read_lock, flags);
207 
208 	i = 0;
209 more:
210 	len = atomic_read(&s->len);
211 
212 	/*
213 	 * This is just a paranoid check that nobody has manipulated
214 	 * the buffer an unexpected way. If we printed something then
215 	 * @len must only increase. Also it should never overflow the
216 	 * buffer size.
217 	 */
218 	if ((i && i >= len) || len > sizeof(s->buffer)) {
219 		const char *msg = "printk_safe_flush: internal error\n";
220 
221 		printk_safe_flush_line(msg, strlen(msg));
222 		len = 0;
223 	}
224 
225 	if (!len)
226 		goto out; /* Someone else has already flushed the buffer. */
227 
228 	/* Make sure that data has been written up to the @len */
229 	smp_rmb();
230 	i += printk_safe_flush_buffer(s->buffer + i, len - i);
231 
232 	/*
233 	 * Check that nothing has got added in the meantime and truncate
234 	 * the buffer. Note that atomic_cmpxchg() is an implicit memory
235 	 * barrier that makes sure that the data were copied before
236 	 * updating s->len.
237 	 */
238 	if (atomic_cmpxchg(&s->len, len, 0) != len)
239 		goto more;
240 
241 out:
242 	report_message_lost(s);
243 	raw_spin_unlock_irqrestore(&read_lock, flags);
244 }
245 
246 /**
247  * printk_safe_flush - flush all per-cpu nmi buffers.
248  *
249  * The buffers are flushed automatically via IRQ work. This function
250  * is useful only when someone wants to be sure that all buffers have
251  * been flushed at some point.
252  */
253 void printk_safe_flush(void)
254 {
255 	int cpu;
256 
257 	for_each_possible_cpu(cpu) {
258 #ifdef CONFIG_PRINTK_NMI
259 		__printk_safe_flush(&per_cpu(nmi_print_seq, cpu).work);
260 #endif
261 		__printk_safe_flush(&per_cpu(safe_print_seq, cpu).work);
262 	}
263 }
264 
265 /**
266  * printk_safe_flush_on_panic - flush all per-cpu nmi buffers when the system
267  *	goes down.
268  *
269  * Similar to printk_safe_flush() but it can be called even in NMI context when
270  * the system goes down. It does the best effort to get NMI messages into
271  * the main ring buffer.
272  *
273  * Note that it could try harder when there is only one CPU online.
274  */
275 void printk_safe_flush_on_panic(void)
276 {
277 	/*
278 	 * Make sure that we could access the main ring buffer.
279 	 * Do not risk a double release when more CPUs are up.
280 	 */
281 	if (in_nmi() && raw_spin_is_locked(&logbuf_lock)) {
282 		if (num_online_cpus() > 1)
283 			return;
284 
285 		debug_locks_off();
286 		raw_spin_lock_init(&logbuf_lock);
287 	}
288 
289 	printk_safe_flush();
290 }
291 
292 #ifdef CONFIG_PRINTK_NMI
293 /*
294  * Safe printk() for NMI context. It uses a per-CPU buffer to
295  * store the message. NMIs are not nested, so there is always only
296  * one writer running. But the buffer might get flushed from another
297  * CPU, so we need to be careful.
298  */
299 static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args)
300 {
301 	struct printk_safe_seq_buf *s = this_cpu_ptr(&nmi_print_seq);
302 
303 	return printk_safe_log_store(s, fmt, args);
304 }
305 
306 void printk_nmi_enter(void)
307 {
308 	/*
309 	 * The size of the extra per-CPU buffer is limited. Use it only when
310 	 * the main one is locked. If this CPU is not in the safe context,
311 	 * the lock must be taken on another CPU and we could wait for it.
312 	 */
313 	if ((this_cpu_read(printk_context) & PRINTK_SAFE_CONTEXT_MASK) &&
314 	    raw_spin_is_locked(&logbuf_lock)) {
315 		this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK);
316 	} else {
317 		this_cpu_or(printk_context, PRINTK_NMI_DEFERRED_CONTEXT_MASK);
318 	}
319 }
320 
321 void printk_nmi_exit(void)
322 {
323 	this_cpu_and(printk_context,
324 		     ~(PRINTK_NMI_CONTEXT_MASK |
325 		       PRINTK_NMI_DEFERRED_CONTEXT_MASK));
326 }
327 
328 #else
329 
330 static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args)
331 {
332 	return 0;
333 }
334 
335 #endif /* CONFIG_PRINTK_NMI */
336 
337 /*
338  * Lock-less printk(), to avoid deadlocks should the printk() recurse
339  * into itself. It uses a per-CPU buffer to store the message, just like
340  * NMI.
341  */
342 static __printf(1, 0) int vprintk_safe(const char *fmt, va_list args)
343 {
344 	struct printk_safe_seq_buf *s = this_cpu_ptr(&safe_print_seq);
345 
346 	return printk_safe_log_store(s, fmt, args);
347 }
348 
349 /* Can be preempted by NMI. */
350 void __printk_safe_enter(void)
351 {
352 	this_cpu_inc(printk_context);
353 }
354 
355 /* Can be preempted by NMI. */
356 void __printk_safe_exit(void)
357 {
358 	this_cpu_dec(printk_context);
359 }
360 
361 __printf(1, 0) int vprintk_func(const char *fmt, va_list args)
362 {
363 	/* Use extra buffer in NMI when logbuf_lock is taken or in safe mode. */
364 	if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK)
365 		return vprintk_nmi(fmt, args);
366 
367 	/* Use extra buffer to prevent a recursion deadlock in safe mode. */
368 	if (this_cpu_read(printk_context) & PRINTK_SAFE_CONTEXT_MASK)
369 		return vprintk_safe(fmt, args);
370 
371 	/*
372 	 * Use the main logbuf when logbuf_lock is available in NMI.
373 	 * But avoid calling console drivers that might have their own locks.
374 	 */
375 	if (this_cpu_read(printk_context) & PRINTK_NMI_DEFERRED_CONTEXT_MASK)
376 		return vprintk_deferred(fmt, args);
377 
378 	/* No obstacles. */
379 	return vprintk_default(fmt, args);
380 }
381 
382 void __init printk_safe_init(void)
383 {
384 	int cpu;
385 
386 	for_each_possible_cpu(cpu) {
387 		struct printk_safe_seq_buf *s;
388 
389 		s = &per_cpu(safe_print_seq, cpu);
390 		init_irq_work(&s->work, __printk_safe_flush);
391 
392 #ifdef CONFIG_PRINTK_NMI
393 		s = &per_cpu(nmi_print_seq, cpu);
394 		init_irq_work(&s->work, __printk_safe_flush);
395 #endif
396 	}
397 
398 	/*
399 	 * In the highly unlikely event that a NMI were to trigger at
400 	 * this moment. Make sure IRQ work is set up before this
401 	 * variable is set.
402 	 */
403 	barrier();
404 	printk_safe_irq_ready = 1;
405 
406 	/* Flush pending messages that did not have scheduled IRQ works. */
407 	printk_safe_flush();
408 }
409