1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * printk_safe.c - Safe printk for printk-deadlock-prone contexts 4 */ 5 6 #include <linux/preempt.h> 7 #include <linux/spinlock.h> 8 #include <linux/debug_locks.h> 9 #include <linux/kdb.h> 10 #include <linux/smp.h> 11 #include <linux/cpumask.h> 12 #include <linux/irq_work.h> 13 #include <linux/printk.h> 14 #include <linux/kprobes.h> 15 16 #include "internal.h" 17 18 /* 19 * printk() could not take logbuf_lock in NMI context. Instead, 20 * it uses an alternative implementation that temporary stores 21 * the strings into a per-CPU buffer. The content of the buffer 22 * is later flushed into the main ring buffer via IRQ work. 23 * 24 * The alternative implementation is chosen transparently 25 * by examinig current printk() context mask stored in @printk_context 26 * per-CPU variable. 27 * 28 * The implementation allows to flush the strings also from another CPU. 29 * There are situations when we want to make sure that all buffers 30 * were handled or when IRQs are blocked. 31 */ 32 33 #define SAFE_LOG_BUF_LEN ((1 << CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT) - \ 34 sizeof(atomic_t) - \ 35 sizeof(atomic_t) - \ 36 sizeof(struct irq_work)) 37 38 struct printk_safe_seq_buf { 39 atomic_t len; /* length of written data */ 40 atomic_t message_lost; 41 struct irq_work work; /* IRQ work that flushes the buffer */ 42 unsigned char buffer[SAFE_LOG_BUF_LEN]; 43 }; 44 45 static DEFINE_PER_CPU(struct printk_safe_seq_buf, safe_print_seq); 46 static DEFINE_PER_CPU(int, printk_context); 47 48 #ifdef CONFIG_PRINTK_NMI 49 static DEFINE_PER_CPU(struct printk_safe_seq_buf, nmi_print_seq); 50 #endif 51 52 /* Get flushed in a more safe context. */ 53 static void queue_flush_work(struct printk_safe_seq_buf *s) 54 { 55 if (printk_percpu_data_ready()) 56 irq_work_queue(&s->work); 57 } 58 59 /* 60 * Add a message to per-CPU context-dependent buffer. NMI and printk-safe 61 * have dedicated buffers, because otherwise printk-safe preempted by 62 * NMI-printk would have overwritten the NMI messages. 63 * 64 * The messages are flushed from irq work (or from panic()), possibly, 65 * from other CPU, concurrently with printk_safe_log_store(). Should this 66 * happen, printk_safe_log_store() will notice the buffer->len mismatch 67 * and repeat the write. 68 */ 69 static __printf(2, 0) int printk_safe_log_store(struct printk_safe_seq_buf *s, 70 const char *fmt, va_list args) 71 { 72 int add; 73 size_t len; 74 va_list ap; 75 76 again: 77 len = atomic_read(&s->len); 78 79 /* The trailing '\0' is not counted into len. */ 80 if (len >= sizeof(s->buffer) - 1) { 81 atomic_inc(&s->message_lost); 82 queue_flush_work(s); 83 return 0; 84 } 85 86 /* 87 * Make sure that all old data have been read before the buffer 88 * was reset. This is not needed when we just append data. 89 */ 90 if (!len) 91 smp_rmb(); 92 93 va_copy(ap, args); 94 add = vscnprintf(s->buffer + len, sizeof(s->buffer) - len, fmt, ap); 95 va_end(ap); 96 if (!add) 97 return 0; 98 99 /* 100 * Do it once again if the buffer has been flushed in the meantime. 101 * Note that atomic_cmpxchg() is an implicit memory barrier that 102 * makes sure that the data were written before updating s->len. 103 */ 104 if (atomic_cmpxchg(&s->len, len, len + add) != len) 105 goto again; 106 107 queue_flush_work(s); 108 return add; 109 } 110 111 static inline void printk_safe_flush_line(const char *text, int len) 112 { 113 /* 114 * Avoid any console drivers calls from here, because we may be 115 * in NMI or printk_safe context (when in panic). The messages 116 * must go only into the ring buffer at this stage. Consoles will 117 * get explicitly called later when a crashdump is not generated. 118 */ 119 printk_deferred("%.*s", len, text); 120 } 121 122 /* printk part of the temporary buffer line by line */ 123 static int printk_safe_flush_buffer(const char *start, size_t len) 124 { 125 const char *c, *end; 126 bool header; 127 128 c = start; 129 end = start + len; 130 header = true; 131 132 /* Print line by line. */ 133 while (c < end) { 134 if (*c == '\n') { 135 printk_safe_flush_line(start, c - start + 1); 136 start = ++c; 137 header = true; 138 continue; 139 } 140 141 /* Handle continuous lines or missing new line. */ 142 if ((c + 1 < end) && printk_get_level(c)) { 143 if (header) { 144 c = printk_skip_level(c); 145 continue; 146 } 147 148 printk_safe_flush_line(start, c - start); 149 start = c++; 150 header = true; 151 continue; 152 } 153 154 header = false; 155 c++; 156 } 157 158 /* Check if there was a partial line. Ignore pure header. */ 159 if (start < end && !header) { 160 static const char newline[] = KERN_CONT "\n"; 161 162 printk_safe_flush_line(start, end - start); 163 printk_safe_flush_line(newline, strlen(newline)); 164 } 165 166 return len; 167 } 168 169 static void report_message_lost(struct printk_safe_seq_buf *s) 170 { 171 int lost = atomic_xchg(&s->message_lost, 0); 172 173 if (lost) 174 printk_deferred("Lost %d message(s)!\n", lost); 175 } 176 177 /* 178 * Flush data from the associated per-CPU buffer. The function 179 * can be called either via IRQ work or independently. 180 */ 181 static void __printk_safe_flush(struct irq_work *work) 182 { 183 static raw_spinlock_t read_lock = 184 __RAW_SPIN_LOCK_INITIALIZER(read_lock); 185 struct printk_safe_seq_buf *s = 186 container_of(work, struct printk_safe_seq_buf, work); 187 unsigned long flags; 188 size_t len; 189 int i; 190 191 /* 192 * The lock has two functions. First, one reader has to flush all 193 * available message to make the lockless synchronization with 194 * writers easier. Second, we do not want to mix messages from 195 * different CPUs. This is especially important when printing 196 * a backtrace. 197 */ 198 raw_spin_lock_irqsave(&read_lock, flags); 199 200 i = 0; 201 more: 202 len = atomic_read(&s->len); 203 204 /* 205 * This is just a paranoid check that nobody has manipulated 206 * the buffer an unexpected way. If we printed something then 207 * @len must only increase. Also it should never overflow the 208 * buffer size. 209 */ 210 if ((i && i >= len) || len > sizeof(s->buffer)) { 211 const char *msg = "printk_safe_flush: internal error\n"; 212 213 printk_safe_flush_line(msg, strlen(msg)); 214 len = 0; 215 } 216 217 if (!len) 218 goto out; /* Someone else has already flushed the buffer. */ 219 220 /* Make sure that data has been written up to the @len */ 221 smp_rmb(); 222 i += printk_safe_flush_buffer(s->buffer + i, len - i); 223 224 /* 225 * Check that nothing has got added in the meantime and truncate 226 * the buffer. Note that atomic_cmpxchg() is an implicit memory 227 * barrier that makes sure that the data were copied before 228 * updating s->len. 229 */ 230 if (atomic_cmpxchg(&s->len, len, 0) != len) 231 goto more; 232 233 out: 234 report_message_lost(s); 235 raw_spin_unlock_irqrestore(&read_lock, flags); 236 } 237 238 /** 239 * printk_safe_flush - flush all per-cpu nmi buffers. 240 * 241 * The buffers are flushed automatically via IRQ work. This function 242 * is useful only when someone wants to be sure that all buffers have 243 * been flushed at some point. 244 */ 245 void printk_safe_flush(void) 246 { 247 int cpu; 248 249 for_each_possible_cpu(cpu) { 250 #ifdef CONFIG_PRINTK_NMI 251 __printk_safe_flush(&per_cpu(nmi_print_seq, cpu).work); 252 #endif 253 __printk_safe_flush(&per_cpu(safe_print_seq, cpu).work); 254 } 255 } 256 257 /** 258 * printk_safe_flush_on_panic - flush all per-cpu nmi buffers when the system 259 * goes down. 260 * 261 * Similar to printk_safe_flush() but it can be called even in NMI context when 262 * the system goes down. It does the best effort to get NMI messages into 263 * the main ring buffer. 264 * 265 * Note that it could try harder when there is only one CPU online. 266 */ 267 void printk_safe_flush_on_panic(void) 268 { 269 /* 270 * Make sure that we could access the main ring buffer. 271 * Do not risk a double release when more CPUs are up. 272 */ 273 if (raw_spin_is_locked(&logbuf_lock)) { 274 if (num_online_cpus() > 1) 275 return; 276 277 debug_locks_off(); 278 raw_spin_lock_init(&logbuf_lock); 279 } 280 281 printk_safe_flush(); 282 } 283 284 #ifdef CONFIG_PRINTK_NMI 285 /* 286 * Safe printk() for NMI context. It uses a per-CPU buffer to 287 * store the message. NMIs are not nested, so there is always only 288 * one writer running. But the buffer might get flushed from another 289 * CPU, so we need to be careful. 290 */ 291 static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args) 292 { 293 struct printk_safe_seq_buf *s = this_cpu_ptr(&nmi_print_seq); 294 295 return printk_safe_log_store(s, fmt, args); 296 } 297 298 void noinstr printk_nmi_enter(void) 299 { 300 this_cpu_add(printk_context, PRINTK_NMI_CONTEXT_OFFSET); 301 } 302 303 void noinstr printk_nmi_exit(void) 304 { 305 this_cpu_sub(printk_context, PRINTK_NMI_CONTEXT_OFFSET); 306 } 307 308 /* 309 * Marks a code that might produce many messages in NMI context 310 * and the risk of losing them is more critical than eventual 311 * reordering. 312 * 313 * It has effect only when called in NMI context. Then printk() 314 * will try to store the messages into the main logbuf directly 315 * and use the per-CPU buffers only as a fallback when the lock 316 * is not available. 317 */ 318 void printk_nmi_direct_enter(void) 319 { 320 if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK) 321 this_cpu_or(printk_context, PRINTK_NMI_DIRECT_CONTEXT_MASK); 322 } 323 324 void printk_nmi_direct_exit(void) 325 { 326 this_cpu_and(printk_context, ~PRINTK_NMI_DIRECT_CONTEXT_MASK); 327 } 328 329 #else 330 331 static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args) 332 { 333 return 0; 334 } 335 336 #endif /* CONFIG_PRINTK_NMI */ 337 338 /* 339 * Lock-less printk(), to avoid deadlocks should the printk() recurse 340 * into itself. It uses a per-CPU buffer to store the message, just like 341 * NMI. 342 */ 343 static __printf(1, 0) int vprintk_safe(const char *fmt, va_list args) 344 { 345 struct printk_safe_seq_buf *s = this_cpu_ptr(&safe_print_seq); 346 347 return printk_safe_log_store(s, fmt, args); 348 } 349 350 /* Can be preempted by NMI. */ 351 void __printk_safe_enter(void) 352 { 353 this_cpu_inc(printk_context); 354 } 355 356 /* Can be preempted by NMI. */ 357 void __printk_safe_exit(void) 358 { 359 this_cpu_dec(printk_context); 360 } 361 362 __printf(1, 0) int vprintk_func(const char *fmt, va_list args) 363 { 364 #ifdef CONFIG_KGDB_KDB 365 /* Allow to pass printk() to kdb but avoid a recursion. */ 366 if (unlikely(kdb_trap_printk && kdb_printf_cpu < 0)) 367 return vkdb_printf(KDB_MSGSRC_PRINTK, fmt, args); 368 #endif 369 370 /* 371 * Try to use the main logbuf even in NMI. But avoid calling console 372 * drivers that might have their own locks. 373 */ 374 if ((this_cpu_read(printk_context) & PRINTK_NMI_DIRECT_CONTEXT_MASK) && 375 raw_spin_trylock(&logbuf_lock)) { 376 int len; 377 378 len = vprintk_store(0, LOGLEVEL_DEFAULT, NULL, 0, fmt, args); 379 raw_spin_unlock(&logbuf_lock); 380 defer_console_output(); 381 return len; 382 } 383 384 /* Use extra buffer in NMI when logbuf_lock is taken or in safe mode. */ 385 if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK) 386 return vprintk_nmi(fmt, args); 387 388 /* Use extra buffer to prevent a recursion deadlock in safe mode. */ 389 if (this_cpu_read(printk_context) & PRINTK_SAFE_CONTEXT_MASK) 390 return vprintk_safe(fmt, args); 391 392 /* No obstacles. */ 393 return vprintk_default(fmt, args); 394 } 395 396 void __init printk_safe_init(void) 397 { 398 int cpu; 399 400 for_each_possible_cpu(cpu) { 401 struct printk_safe_seq_buf *s; 402 403 s = &per_cpu(safe_print_seq, cpu); 404 init_irq_work(&s->work, __printk_safe_flush); 405 406 #ifdef CONFIG_PRINTK_NMI 407 s = &per_cpu(nmi_print_seq, cpu); 408 init_irq_work(&s->work, __printk_safe_flush); 409 #endif 410 } 411 412 /* Flush pending messages that did not have scheduled IRQ works. */ 413 printk_safe_flush(); 414 } 415