1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * RAM Oops/Panic logger 4 * 5 * Copyright (C) 2010 Marco Stornelli <marco.stornelli@gmail.com> 6 * Copyright (C) 2011 Kees Cook <keescook@chromium.org> 7 */ 8 9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 10 11 #include <linux/kernel.h> 12 #include <linux/err.h> 13 #include <linux/module.h> 14 #include <linux/version.h> 15 #include <linux/pstore.h> 16 #include <linux/io.h> 17 #include <linux/ioport.h> 18 #include <linux/platform_device.h> 19 #include <linux/slab.h> 20 #include <linux/compiler.h> 21 #include <linux/of.h> 22 #include <linux/of_address.h> 23 24 #include "internal.h" 25 #include "ram_internal.h" 26 27 #define RAMOOPS_KERNMSG_HDR "====" 28 #define MIN_MEM_SIZE 4096UL 29 30 static ulong record_size = MIN_MEM_SIZE; 31 module_param(record_size, ulong, 0400); 32 MODULE_PARM_DESC(record_size, 33 "size of each dump done on oops/panic"); 34 35 static ulong ramoops_console_size = MIN_MEM_SIZE; 36 module_param_named(console_size, ramoops_console_size, ulong, 0400); 37 MODULE_PARM_DESC(console_size, "size of kernel console log"); 38 39 static ulong ramoops_ftrace_size = MIN_MEM_SIZE; 40 module_param_named(ftrace_size, ramoops_ftrace_size, ulong, 0400); 41 MODULE_PARM_DESC(ftrace_size, "size of ftrace log"); 42 43 static ulong ramoops_pmsg_size = MIN_MEM_SIZE; 44 module_param_named(pmsg_size, ramoops_pmsg_size, ulong, 0400); 45 MODULE_PARM_DESC(pmsg_size, "size of user space message log"); 46 47 static unsigned long long mem_address; 48 module_param_hw(mem_address, ullong, other, 0400); 49 MODULE_PARM_DESC(mem_address, 50 "start of reserved RAM used to store oops/panic logs"); 51 52 static ulong mem_size; 53 module_param(mem_size, ulong, 0400); 54 MODULE_PARM_DESC(mem_size, 55 "size of reserved RAM used to store oops/panic logs"); 56 57 static unsigned int mem_type; 58 module_param(mem_type, uint, 0400); 59 MODULE_PARM_DESC(mem_type, 60 "memory type: 0=write-combined (default), 1=unbuffered, 2=cached"); 61 62 static int ramoops_max_reason = -1; 63 module_param_named(max_reason, ramoops_max_reason, int, 0400); 64 MODULE_PARM_DESC(max_reason, 65 "maximum reason for kmsg dump (default 2: Oops and Panic) "); 66 67 static int ramoops_ecc; 68 module_param_named(ecc, ramoops_ecc, int, 0400); 69 MODULE_PARM_DESC(ramoops_ecc, 70 "if non-zero, the option enables ECC support and specifies " 71 "ECC buffer size in bytes (1 is a special value, means 16 " 72 "bytes ECC)"); 73 74 static int ramoops_dump_oops = -1; 75 module_param_named(dump_oops, ramoops_dump_oops, int, 0400); 76 MODULE_PARM_DESC(dump_oops, 77 "(deprecated: use max_reason instead) set to 1 to dump oopses & panics, 0 to only dump panics"); 78 79 struct ramoops_context { 80 struct persistent_ram_zone **dprzs; /* Oops dump zones */ 81 struct persistent_ram_zone *cprz; /* Console zone */ 82 struct persistent_ram_zone **fprzs; /* Ftrace zones */ 83 struct persistent_ram_zone *mprz; /* PMSG zone */ 84 phys_addr_t phys_addr; 85 unsigned long size; 86 unsigned int memtype; 87 size_t record_size; 88 size_t console_size; 89 size_t ftrace_size; 90 size_t pmsg_size; 91 u32 flags; 92 struct persistent_ram_ecc_info ecc_info; 93 unsigned int max_dump_cnt; 94 unsigned int dump_write_cnt; 95 /* _read_cnt need clear on ramoops_pstore_open */ 96 unsigned int dump_read_cnt; 97 unsigned int console_read_cnt; 98 unsigned int max_ftrace_cnt; 99 unsigned int ftrace_read_cnt; 100 unsigned int pmsg_read_cnt; 101 struct pstore_info pstore; 102 }; 103 104 static struct platform_device *dummy; 105 106 static int ramoops_pstore_open(struct pstore_info *psi) 107 { 108 struct ramoops_context *cxt = psi->data; 109 110 cxt->dump_read_cnt = 0; 111 cxt->console_read_cnt = 0; 112 cxt->ftrace_read_cnt = 0; 113 cxt->pmsg_read_cnt = 0; 114 return 0; 115 } 116 117 static struct persistent_ram_zone * 118 ramoops_get_next_prz(struct persistent_ram_zone *przs[], int id, 119 struct pstore_record *record) 120 { 121 struct persistent_ram_zone *prz; 122 123 /* Give up if we never existed or have hit the end. */ 124 if (!przs) 125 return NULL; 126 127 prz = przs[id]; 128 if (!prz) 129 return NULL; 130 131 /* Update old/shadowed buffer. */ 132 if (prz->type == PSTORE_TYPE_DMESG) 133 persistent_ram_save_old(prz); 134 135 if (!persistent_ram_old_size(prz)) 136 return NULL; 137 138 record->type = prz->type; 139 record->id = id; 140 141 return prz; 142 } 143 144 static int ramoops_read_kmsg_hdr(char *buffer, struct timespec64 *time, 145 bool *compressed) 146 { 147 char data_type; 148 int header_length = 0; 149 150 if (sscanf(buffer, RAMOOPS_KERNMSG_HDR "%lld.%lu-%c\n%n", 151 (time64_t *)&time->tv_sec, &time->tv_nsec, &data_type, 152 &header_length) == 3) { 153 time->tv_nsec *= 1000; 154 if (data_type == 'C') 155 *compressed = true; 156 else 157 *compressed = false; 158 } else if (sscanf(buffer, RAMOOPS_KERNMSG_HDR "%lld.%lu\n%n", 159 (time64_t *)&time->tv_sec, &time->tv_nsec, 160 &header_length) == 2) { 161 time->tv_nsec *= 1000; 162 *compressed = false; 163 } else { 164 time->tv_sec = 0; 165 time->tv_nsec = 0; 166 *compressed = false; 167 } 168 return header_length; 169 } 170 171 static bool prz_ok(struct persistent_ram_zone *prz) 172 { 173 return !!prz && !!(persistent_ram_old_size(prz) + 174 persistent_ram_ecc_string(prz, NULL, 0)); 175 } 176 177 static ssize_t ramoops_pstore_read(struct pstore_record *record) 178 { 179 ssize_t size = 0; 180 struct ramoops_context *cxt = record->psi->data; 181 struct persistent_ram_zone *prz = NULL; 182 int header_length = 0; 183 bool free_prz = false; 184 185 /* 186 * Ramoops headers provide time stamps for PSTORE_TYPE_DMESG, but 187 * PSTORE_TYPE_CONSOLE and PSTORE_TYPE_FTRACE don't currently have 188 * valid time stamps, so it is initialized to zero. 189 */ 190 record->time.tv_sec = 0; 191 record->time.tv_nsec = 0; 192 record->compressed = false; 193 194 /* Find the next valid persistent_ram_zone for DMESG */ 195 while (cxt->dump_read_cnt < cxt->max_dump_cnt && !prz) { 196 prz = ramoops_get_next_prz(cxt->dprzs, cxt->dump_read_cnt++, 197 record); 198 if (!prz_ok(prz)) 199 continue; 200 header_length = ramoops_read_kmsg_hdr(persistent_ram_old(prz), 201 &record->time, 202 &record->compressed); 203 /* Clear and skip this DMESG record if it has no valid header */ 204 if (!header_length) { 205 persistent_ram_free_old(prz); 206 persistent_ram_zap(prz); 207 prz = NULL; 208 } 209 } 210 211 if (!prz_ok(prz) && !cxt->console_read_cnt++) 212 prz = ramoops_get_next_prz(&cxt->cprz, 0 /* single */, record); 213 214 if (!prz_ok(prz) && !cxt->pmsg_read_cnt++) 215 prz = ramoops_get_next_prz(&cxt->mprz, 0 /* single */, record); 216 217 /* ftrace is last since it may want to dynamically allocate memory. */ 218 if (!prz_ok(prz)) { 219 if (!(cxt->flags & RAMOOPS_FLAG_FTRACE_PER_CPU) && 220 !cxt->ftrace_read_cnt++) { 221 prz = ramoops_get_next_prz(cxt->fprzs, 0 /* single */, 222 record); 223 } else { 224 /* 225 * Build a new dummy record which combines all the 226 * per-cpu records including metadata and ecc info. 227 */ 228 struct persistent_ram_zone *tmp_prz, *prz_next; 229 230 tmp_prz = kzalloc(sizeof(struct persistent_ram_zone), 231 GFP_KERNEL); 232 if (!tmp_prz) 233 return -ENOMEM; 234 prz = tmp_prz; 235 free_prz = true; 236 237 while (cxt->ftrace_read_cnt < cxt->max_ftrace_cnt) { 238 prz_next = ramoops_get_next_prz(cxt->fprzs, 239 cxt->ftrace_read_cnt++, record); 240 241 if (!prz_ok(prz_next)) 242 continue; 243 244 tmp_prz->ecc_info = prz_next->ecc_info; 245 tmp_prz->corrected_bytes += 246 prz_next->corrected_bytes; 247 tmp_prz->bad_blocks += prz_next->bad_blocks; 248 249 size = pstore_ftrace_combine_log( 250 &tmp_prz->old_log, 251 &tmp_prz->old_log_size, 252 prz_next->old_log, 253 prz_next->old_log_size); 254 if (size) 255 goto out; 256 } 257 record->id = 0; 258 } 259 } 260 261 if (!prz_ok(prz)) { 262 size = 0; 263 goto out; 264 } 265 266 size = persistent_ram_old_size(prz) - header_length; 267 268 /* ECC correction notice */ 269 record->ecc_notice_size = persistent_ram_ecc_string(prz, NULL, 0); 270 271 record->buf = kmalloc(size + record->ecc_notice_size + 1, GFP_KERNEL); 272 if (record->buf == NULL) { 273 size = -ENOMEM; 274 goto out; 275 } 276 277 memcpy(record->buf, (char *)persistent_ram_old(prz) + header_length, 278 size); 279 280 persistent_ram_ecc_string(prz, record->buf + size, 281 record->ecc_notice_size + 1); 282 283 out: 284 if (free_prz) { 285 kfree(prz->old_log); 286 kfree(prz); 287 } 288 289 return size; 290 } 291 292 static size_t ramoops_write_kmsg_hdr(struct persistent_ram_zone *prz, 293 struct pstore_record *record) 294 { 295 char hdr[36]; /* "===="(4), %lld(20), "."(1), %06lu(6), "-%c\n"(3) */ 296 size_t len; 297 298 len = scnprintf(hdr, sizeof(hdr), 299 RAMOOPS_KERNMSG_HDR "%lld.%06lu-%c\n", 300 (time64_t)record->time.tv_sec, 301 record->time.tv_nsec / 1000, 302 record->compressed ? 'C' : 'D'); 303 persistent_ram_write(prz, hdr, len); 304 305 return len; 306 } 307 308 static int notrace ramoops_pstore_write(struct pstore_record *record) 309 { 310 struct ramoops_context *cxt = record->psi->data; 311 struct persistent_ram_zone *prz; 312 size_t size, hlen; 313 314 if (record->type == PSTORE_TYPE_CONSOLE) { 315 if (!cxt->cprz) 316 return -ENOMEM; 317 persistent_ram_write(cxt->cprz, record->buf, record->size); 318 return 0; 319 } else if (record->type == PSTORE_TYPE_FTRACE) { 320 int zonenum; 321 322 if (!cxt->fprzs) 323 return -ENOMEM; 324 /* 325 * Choose zone by if we're using per-cpu buffers. 326 */ 327 if (cxt->flags & RAMOOPS_FLAG_FTRACE_PER_CPU) 328 zonenum = smp_processor_id(); 329 else 330 zonenum = 0; 331 332 persistent_ram_write(cxt->fprzs[zonenum], record->buf, 333 record->size); 334 return 0; 335 } else if (record->type == PSTORE_TYPE_PMSG) { 336 pr_warn_ratelimited("PMSG shouldn't call %s\n", __func__); 337 return -EINVAL; 338 } 339 340 if (record->type != PSTORE_TYPE_DMESG) 341 return -EINVAL; 342 343 /* 344 * We could filter on record->reason here if we wanted to (which 345 * would duplicate what happened before the "max_reason" setting 346 * was added), but that would defeat the purpose of a system 347 * changing printk.always_kmsg_dump, so instead log everything that 348 * the kmsg dumper sends us, since it should be doing the filtering 349 * based on the combination of printk.always_kmsg_dump and our 350 * requested "max_reason". 351 */ 352 353 /* 354 * Explicitly only take the first part of any new crash. 355 * If our buffer is larger than kmsg_bytes, this can never happen, 356 * and if our buffer is smaller than kmsg_bytes, we don't want the 357 * report split across multiple records. 358 */ 359 if (record->part != 1) 360 return -ENOSPC; 361 362 if (!cxt->dprzs) 363 return -ENOSPC; 364 365 prz = cxt->dprzs[cxt->dump_write_cnt]; 366 367 /* 368 * Since this is a new crash dump, we need to reset the buffer in 369 * case it still has an old dump present. Without this, the new dump 370 * will get appended, which would seriously confuse anything trying 371 * to check dump file contents. Specifically, ramoops_read_kmsg_hdr() 372 * expects to find a dump header in the beginning of buffer data, so 373 * we must to reset the buffer values, in order to ensure that the 374 * header will be written to the beginning of the buffer. 375 */ 376 persistent_ram_zap(prz); 377 378 /* Build header and append record contents. */ 379 hlen = ramoops_write_kmsg_hdr(prz, record); 380 if (!hlen) 381 return -ENOMEM; 382 383 size = record->size; 384 if (size + hlen > prz->buffer_size) 385 size = prz->buffer_size - hlen; 386 persistent_ram_write(prz, record->buf, size); 387 388 cxt->dump_write_cnt = (cxt->dump_write_cnt + 1) % cxt->max_dump_cnt; 389 390 return 0; 391 } 392 393 static int notrace ramoops_pstore_write_user(struct pstore_record *record, 394 const char __user *buf) 395 { 396 if (record->type == PSTORE_TYPE_PMSG) { 397 struct ramoops_context *cxt = record->psi->data; 398 399 if (!cxt->mprz) 400 return -ENOMEM; 401 return persistent_ram_write_user(cxt->mprz, buf, record->size); 402 } 403 404 return -EINVAL; 405 } 406 407 static int ramoops_pstore_erase(struct pstore_record *record) 408 { 409 struct ramoops_context *cxt = record->psi->data; 410 struct persistent_ram_zone *prz; 411 412 switch (record->type) { 413 case PSTORE_TYPE_DMESG: 414 if (record->id >= cxt->max_dump_cnt) 415 return -EINVAL; 416 prz = cxt->dprzs[record->id]; 417 break; 418 case PSTORE_TYPE_CONSOLE: 419 prz = cxt->cprz; 420 break; 421 case PSTORE_TYPE_FTRACE: 422 if (record->id >= cxt->max_ftrace_cnt) 423 return -EINVAL; 424 prz = cxt->fprzs[record->id]; 425 break; 426 case PSTORE_TYPE_PMSG: 427 prz = cxt->mprz; 428 break; 429 default: 430 return -EINVAL; 431 } 432 433 persistent_ram_free_old(prz); 434 persistent_ram_zap(prz); 435 436 return 0; 437 } 438 439 static struct ramoops_context oops_cxt = { 440 .pstore = { 441 .owner = THIS_MODULE, 442 .name = "ramoops", 443 .open = ramoops_pstore_open, 444 .read = ramoops_pstore_read, 445 .write = ramoops_pstore_write, 446 .write_user = ramoops_pstore_write_user, 447 .erase = ramoops_pstore_erase, 448 }, 449 }; 450 451 static void ramoops_free_przs(struct ramoops_context *cxt) 452 { 453 int i; 454 455 /* Free pmsg PRZ */ 456 persistent_ram_free(&cxt->mprz); 457 458 /* Free console PRZ */ 459 persistent_ram_free(&cxt->cprz); 460 461 /* Free dump PRZs */ 462 if (cxt->dprzs) { 463 for (i = 0; i < cxt->max_dump_cnt; i++) 464 persistent_ram_free(&cxt->dprzs[i]); 465 466 kfree(cxt->dprzs); 467 cxt->dprzs = NULL; 468 cxt->max_dump_cnt = 0; 469 } 470 471 /* Free ftrace PRZs */ 472 if (cxt->fprzs) { 473 for (i = 0; i < cxt->max_ftrace_cnt; i++) 474 persistent_ram_free(&cxt->fprzs[i]); 475 kfree(cxt->fprzs); 476 cxt->fprzs = NULL; 477 cxt->max_ftrace_cnt = 0; 478 } 479 } 480 481 static int ramoops_init_przs(const char *name, 482 struct device *dev, struct ramoops_context *cxt, 483 struct persistent_ram_zone ***przs, 484 phys_addr_t *paddr, size_t mem_sz, 485 ssize_t record_size, 486 unsigned int *cnt, u32 sig, u32 flags) 487 { 488 int err = -ENOMEM; 489 int i; 490 size_t zone_sz; 491 struct persistent_ram_zone **prz_ar; 492 493 /* Allocate nothing for 0 mem_sz or 0 record_size. */ 494 if (mem_sz == 0 || record_size == 0) { 495 *cnt = 0; 496 return 0; 497 } 498 499 /* 500 * If we have a negative record size, calculate it based on 501 * mem_sz / *cnt. If we have a positive record size, calculate 502 * cnt from mem_sz / record_size. 503 */ 504 if (record_size < 0) { 505 if (*cnt == 0) 506 return 0; 507 record_size = mem_sz / *cnt; 508 if (record_size == 0) { 509 dev_err(dev, "%s record size == 0 (%zu / %u)\n", 510 name, mem_sz, *cnt); 511 goto fail; 512 } 513 } else { 514 *cnt = mem_sz / record_size; 515 if (*cnt == 0) { 516 dev_err(dev, "%s record count == 0 (%zu / %zu)\n", 517 name, mem_sz, record_size); 518 goto fail; 519 } 520 } 521 522 if (*paddr + mem_sz - cxt->phys_addr > cxt->size) { 523 dev_err(dev, "no room for %s mem region (0x%zx@0x%llx) in (0x%lx@0x%llx)\n", 524 name, 525 mem_sz, (unsigned long long)*paddr, 526 cxt->size, (unsigned long long)cxt->phys_addr); 527 goto fail; 528 } 529 530 zone_sz = mem_sz / *cnt; 531 if (!zone_sz) { 532 dev_err(dev, "%s zone size == 0\n", name); 533 goto fail; 534 } 535 536 prz_ar = kcalloc(*cnt, sizeof(**przs), GFP_KERNEL); 537 if (!prz_ar) 538 goto fail; 539 540 for (i = 0; i < *cnt; i++) { 541 char *label; 542 543 if (*cnt == 1) 544 label = kasprintf(GFP_KERNEL, "ramoops:%s", name); 545 else 546 label = kasprintf(GFP_KERNEL, "ramoops:%s(%d/%d)", 547 name, i, *cnt - 1); 548 prz_ar[i] = persistent_ram_new(*paddr, zone_sz, sig, 549 &cxt->ecc_info, 550 cxt->memtype, flags, label); 551 kfree(label); 552 if (IS_ERR(prz_ar[i])) { 553 err = PTR_ERR(prz_ar[i]); 554 dev_err(dev, "failed to request %s mem region (0x%zx@0x%llx): %d\n", 555 name, record_size, 556 (unsigned long long)*paddr, err); 557 558 while (i > 0) { 559 i--; 560 persistent_ram_free(&prz_ar[i]); 561 } 562 kfree(prz_ar); 563 prz_ar = NULL; 564 goto fail; 565 } 566 *paddr += zone_sz; 567 prz_ar[i]->type = pstore_name_to_type(name); 568 } 569 570 *przs = prz_ar; 571 return 0; 572 573 fail: 574 *cnt = 0; 575 return err; 576 } 577 578 static int ramoops_init_prz(const char *name, 579 struct device *dev, struct ramoops_context *cxt, 580 struct persistent_ram_zone **prz, 581 phys_addr_t *paddr, size_t sz, u32 sig) 582 { 583 char *label; 584 585 if (!sz) 586 return 0; 587 588 if (*paddr + sz - cxt->phys_addr > cxt->size) { 589 dev_err(dev, "no room for %s mem region (0x%zx@0x%llx) in (0x%lx@0x%llx)\n", 590 name, sz, (unsigned long long)*paddr, 591 cxt->size, (unsigned long long)cxt->phys_addr); 592 return -ENOMEM; 593 } 594 595 label = kasprintf(GFP_KERNEL, "ramoops:%s", name); 596 *prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info, 597 cxt->memtype, PRZ_FLAG_ZAP_OLD, label); 598 kfree(label); 599 if (IS_ERR(*prz)) { 600 int err = PTR_ERR(*prz); 601 602 dev_err(dev, "failed to request %s mem region (0x%zx@0x%llx): %d\n", 603 name, sz, (unsigned long long)*paddr, err); 604 return err; 605 } 606 607 *paddr += sz; 608 (*prz)->type = pstore_name_to_type(name); 609 610 return 0; 611 } 612 613 /* Read a u32 from a dt property and make sure it's safe for an int. */ 614 static int ramoops_parse_dt_u32(struct platform_device *pdev, 615 const char *propname, 616 u32 default_value, u32 *value) 617 { 618 u32 val32 = 0; 619 int ret; 620 621 ret = of_property_read_u32(pdev->dev.of_node, propname, &val32); 622 if (ret == -EINVAL) { 623 /* field is missing, use default value. */ 624 val32 = default_value; 625 } else if (ret < 0) { 626 dev_err(&pdev->dev, "failed to parse property %s: %d\n", 627 propname, ret); 628 return ret; 629 } 630 631 /* Sanity check our results. */ 632 if (val32 > INT_MAX) { 633 dev_err(&pdev->dev, "%s %u > INT_MAX\n", propname, val32); 634 return -EOVERFLOW; 635 } 636 637 *value = val32; 638 return 0; 639 } 640 641 static int ramoops_parse_dt(struct platform_device *pdev, 642 struct ramoops_platform_data *pdata) 643 { 644 struct device_node *of_node = pdev->dev.of_node; 645 struct device_node *parent_node; 646 struct resource *res; 647 u32 value; 648 int ret; 649 650 dev_dbg(&pdev->dev, "using Device Tree\n"); 651 652 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 653 if (!res) { 654 dev_err(&pdev->dev, 655 "failed to locate DT /reserved-memory resource\n"); 656 return -EINVAL; 657 } 658 659 pdata->mem_size = resource_size(res); 660 pdata->mem_address = res->start; 661 /* 662 * Setting "unbuffered" is deprecated and will be ignored if 663 * "mem_type" is also specified. 664 */ 665 pdata->mem_type = of_property_read_bool(of_node, "unbuffered"); 666 /* 667 * Setting "no-dump-oops" is deprecated and will be ignored if 668 * "max_reason" is also specified. 669 */ 670 if (of_property_read_bool(of_node, "no-dump-oops")) 671 pdata->max_reason = KMSG_DUMP_PANIC; 672 else 673 pdata->max_reason = KMSG_DUMP_OOPS; 674 675 #define parse_u32(name, field, default_value) { \ 676 ret = ramoops_parse_dt_u32(pdev, name, default_value, \ 677 &value); \ 678 if (ret < 0) \ 679 return ret; \ 680 field = value; \ 681 } 682 683 parse_u32("mem-type", pdata->mem_type, pdata->mem_type); 684 parse_u32("record-size", pdata->record_size, 0); 685 parse_u32("console-size", pdata->console_size, 0); 686 parse_u32("ftrace-size", pdata->ftrace_size, 0); 687 parse_u32("pmsg-size", pdata->pmsg_size, 0); 688 parse_u32("ecc-size", pdata->ecc_info.ecc_size, 0); 689 parse_u32("flags", pdata->flags, 0); 690 parse_u32("max-reason", pdata->max_reason, pdata->max_reason); 691 692 #undef parse_u32 693 694 /* 695 * Some old Chromebooks relied on the kernel setting the 696 * console_size and pmsg_size to the record size since that's 697 * what the downstream kernel did. These same Chromebooks had 698 * "ramoops" straight under the root node which isn't 699 * according to the current upstream bindings (though it was 700 * arguably acceptable under a prior version of the bindings). 701 * Let's make those old Chromebooks work by detecting that 702 * we're not a child of "reserved-memory" and mimicking the 703 * expected behavior. 704 */ 705 parent_node = of_get_parent(of_node); 706 if (!of_node_name_eq(parent_node, "reserved-memory") && 707 !pdata->console_size && !pdata->ftrace_size && 708 !pdata->pmsg_size && !pdata->ecc_info.ecc_size) { 709 pdata->console_size = pdata->record_size; 710 pdata->pmsg_size = pdata->record_size; 711 } 712 of_node_put(parent_node); 713 714 return 0; 715 } 716 717 static int ramoops_probe(struct platform_device *pdev) 718 { 719 struct device *dev = &pdev->dev; 720 struct ramoops_platform_data *pdata = dev->platform_data; 721 struct ramoops_platform_data pdata_local; 722 struct ramoops_context *cxt = &oops_cxt; 723 size_t dump_mem_sz; 724 phys_addr_t paddr; 725 int err = -EINVAL; 726 727 /* 728 * Only a single ramoops area allowed at a time, so fail extra 729 * probes. 730 */ 731 if (cxt->max_dump_cnt) { 732 pr_err("already initialized\n"); 733 goto fail_out; 734 } 735 736 if (dev_of_node(dev) && !pdata) { 737 pdata = &pdata_local; 738 memset(pdata, 0, sizeof(*pdata)); 739 740 err = ramoops_parse_dt(pdev, pdata); 741 if (err < 0) 742 goto fail_out; 743 } 744 745 /* Make sure we didn't get bogus platform data pointer. */ 746 if (!pdata) { 747 pr_err("NULL platform data\n"); 748 err = -EINVAL; 749 goto fail_out; 750 } 751 752 if (!pdata->mem_size || (!pdata->record_size && !pdata->console_size && 753 !pdata->ftrace_size && !pdata->pmsg_size)) { 754 pr_err("The memory size and the record/console size must be " 755 "non-zero\n"); 756 err = -EINVAL; 757 goto fail_out; 758 } 759 760 if (pdata->record_size && !is_power_of_2(pdata->record_size)) 761 pdata->record_size = rounddown_pow_of_two(pdata->record_size); 762 if (pdata->console_size && !is_power_of_2(pdata->console_size)) 763 pdata->console_size = rounddown_pow_of_two(pdata->console_size); 764 if (pdata->ftrace_size && !is_power_of_2(pdata->ftrace_size)) 765 pdata->ftrace_size = rounddown_pow_of_two(pdata->ftrace_size); 766 if (pdata->pmsg_size && !is_power_of_2(pdata->pmsg_size)) 767 pdata->pmsg_size = rounddown_pow_of_two(pdata->pmsg_size); 768 769 cxt->size = pdata->mem_size; 770 cxt->phys_addr = pdata->mem_address; 771 cxt->memtype = pdata->mem_type; 772 cxt->record_size = pdata->record_size; 773 cxt->console_size = pdata->console_size; 774 cxt->ftrace_size = pdata->ftrace_size; 775 cxt->pmsg_size = pdata->pmsg_size; 776 cxt->flags = pdata->flags; 777 cxt->ecc_info = pdata->ecc_info; 778 779 paddr = cxt->phys_addr; 780 781 dump_mem_sz = cxt->size - cxt->console_size - cxt->ftrace_size 782 - cxt->pmsg_size; 783 err = ramoops_init_przs("dmesg", dev, cxt, &cxt->dprzs, &paddr, 784 dump_mem_sz, cxt->record_size, 785 &cxt->max_dump_cnt, 0, 0); 786 if (err) 787 goto fail_init; 788 789 err = ramoops_init_prz("console", dev, cxt, &cxt->cprz, &paddr, 790 cxt->console_size, 0); 791 if (err) 792 goto fail_init; 793 794 err = ramoops_init_prz("pmsg", dev, cxt, &cxt->mprz, &paddr, 795 cxt->pmsg_size, 0); 796 if (err) 797 goto fail_init; 798 799 cxt->max_ftrace_cnt = (cxt->flags & RAMOOPS_FLAG_FTRACE_PER_CPU) 800 ? nr_cpu_ids 801 : 1; 802 err = ramoops_init_przs("ftrace", dev, cxt, &cxt->fprzs, &paddr, 803 cxt->ftrace_size, -1, 804 &cxt->max_ftrace_cnt, LINUX_VERSION_CODE, 805 (cxt->flags & RAMOOPS_FLAG_FTRACE_PER_CPU) 806 ? PRZ_FLAG_NO_LOCK : 0); 807 if (err) 808 goto fail_init; 809 810 cxt->pstore.data = cxt; 811 /* 812 * Prepare frontend flags based on which areas are initialized. 813 * For ramoops_init_przs() cases, the "max count" variable tells 814 * if there are regions present. For ramoops_init_prz() cases, 815 * the single region size is how to check. 816 */ 817 cxt->pstore.flags = 0; 818 if (cxt->max_dump_cnt) { 819 cxt->pstore.flags |= PSTORE_FLAGS_DMESG; 820 cxt->pstore.max_reason = pdata->max_reason; 821 } 822 if (cxt->console_size) 823 cxt->pstore.flags |= PSTORE_FLAGS_CONSOLE; 824 if (cxt->max_ftrace_cnt) 825 cxt->pstore.flags |= PSTORE_FLAGS_FTRACE; 826 if (cxt->pmsg_size) 827 cxt->pstore.flags |= PSTORE_FLAGS_PMSG; 828 829 /* 830 * Since bufsize is only used for dmesg crash dumps, it 831 * must match the size of the dprz record (after PRZ header 832 * and ECC bytes have been accounted for). 833 */ 834 if (cxt->pstore.flags & PSTORE_FLAGS_DMESG) { 835 cxt->pstore.bufsize = cxt->dprzs[0]->buffer_size; 836 cxt->pstore.buf = kzalloc(cxt->pstore.bufsize, GFP_KERNEL); 837 if (!cxt->pstore.buf) { 838 pr_err("cannot allocate pstore crash dump buffer\n"); 839 err = -ENOMEM; 840 goto fail_clear; 841 } 842 } 843 844 err = pstore_register(&cxt->pstore); 845 if (err) { 846 pr_err("registering with pstore failed\n"); 847 goto fail_buf; 848 } 849 850 /* 851 * Update the module parameter variables as well so they are visible 852 * through /sys/module/ramoops/parameters/ 853 */ 854 mem_size = pdata->mem_size; 855 mem_address = pdata->mem_address; 856 record_size = pdata->record_size; 857 ramoops_max_reason = pdata->max_reason; 858 ramoops_console_size = pdata->console_size; 859 ramoops_pmsg_size = pdata->pmsg_size; 860 ramoops_ftrace_size = pdata->ftrace_size; 861 862 pr_info("using 0x%lx@0x%llx, ecc: %d\n", 863 cxt->size, (unsigned long long)cxt->phys_addr, 864 cxt->ecc_info.ecc_size); 865 866 return 0; 867 868 fail_buf: 869 kfree(cxt->pstore.buf); 870 fail_clear: 871 cxt->pstore.bufsize = 0; 872 fail_init: 873 ramoops_free_przs(cxt); 874 fail_out: 875 return err; 876 } 877 878 static void ramoops_remove(struct platform_device *pdev) 879 { 880 struct ramoops_context *cxt = &oops_cxt; 881 882 pstore_unregister(&cxt->pstore); 883 884 kfree(cxt->pstore.buf); 885 cxt->pstore.bufsize = 0; 886 887 ramoops_free_przs(cxt); 888 } 889 890 static const struct of_device_id dt_match[] = { 891 { .compatible = "ramoops" }, 892 {} 893 }; 894 895 static struct platform_driver ramoops_driver = { 896 .probe = ramoops_probe, 897 .remove_new = ramoops_remove, 898 .driver = { 899 .name = "ramoops", 900 .of_match_table = dt_match, 901 }, 902 }; 903 904 static inline void ramoops_unregister_dummy(void) 905 { 906 platform_device_unregister(dummy); 907 dummy = NULL; 908 } 909 910 static void __init ramoops_register_dummy(void) 911 { 912 struct ramoops_platform_data pdata; 913 914 /* 915 * Prepare a dummy platform data structure to carry the module 916 * parameters. If mem_size isn't set, then there are no module 917 * parameters, and we can skip this. 918 */ 919 if (!mem_size) 920 return; 921 922 pr_info("using module parameters\n"); 923 924 memset(&pdata, 0, sizeof(pdata)); 925 pdata.mem_size = mem_size; 926 pdata.mem_address = mem_address; 927 pdata.mem_type = mem_type; 928 pdata.record_size = record_size; 929 pdata.console_size = ramoops_console_size; 930 pdata.ftrace_size = ramoops_ftrace_size; 931 pdata.pmsg_size = ramoops_pmsg_size; 932 /* If "max_reason" is set, its value has priority over "dump_oops". */ 933 if (ramoops_max_reason >= 0) 934 pdata.max_reason = ramoops_max_reason; 935 /* Otherwise, if "dump_oops" is set, parse it into "max_reason". */ 936 else if (ramoops_dump_oops != -1) 937 pdata.max_reason = ramoops_dump_oops ? KMSG_DUMP_OOPS 938 : KMSG_DUMP_PANIC; 939 /* And if neither are explicitly set, use the default. */ 940 else 941 pdata.max_reason = KMSG_DUMP_OOPS; 942 pdata.flags = RAMOOPS_FLAG_FTRACE_PER_CPU; 943 944 /* 945 * For backwards compatibility ramoops.ecc=1 means 16 bytes ECC 946 * (using 1 byte for ECC isn't much of use anyway). 947 */ 948 pdata.ecc_info.ecc_size = ramoops_ecc == 1 ? 16 : ramoops_ecc; 949 950 dummy = platform_device_register_data(NULL, "ramoops", -1, 951 &pdata, sizeof(pdata)); 952 if (IS_ERR(dummy)) { 953 pr_info("could not create platform device: %ld\n", 954 PTR_ERR(dummy)); 955 dummy = NULL; 956 } 957 } 958 959 static int __init ramoops_init(void) 960 { 961 int ret; 962 963 ramoops_register_dummy(); 964 ret = platform_driver_register(&ramoops_driver); 965 if (ret != 0) 966 ramoops_unregister_dummy(); 967 968 return ret; 969 } 970 postcore_initcall(ramoops_init); 971 972 static void __exit ramoops_exit(void) 973 { 974 platform_driver_unregister(&ramoops_driver); 975 ramoops_unregister_dummy(); 976 } 977 module_exit(ramoops_exit); 978 979 MODULE_LICENSE("GPL"); 980 MODULE_AUTHOR("Marco Stornelli <marco.stornelli@gmail.com>"); 981 MODULE_DESCRIPTION("RAM Oops/Panic logger/driver"); 982