1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2012 Google, Inc. 4 */ 5 6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 8 #include <linux/device.h> 9 #include <linux/err.h> 10 #include <linux/errno.h> 11 #include <linux/init.h> 12 #include <linux/io.h> 13 #include <linux/kernel.h> 14 #include <linux/list.h> 15 #include <linux/memblock.h> 16 #include <linux/rslib.h> 17 #include <linux/slab.h> 18 #include <linux/uaccess.h> 19 #include <linux/vmalloc.h> 20 #include <asm/page.h> 21 22 #include "ram_internal.h" 23 24 /** 25 * struct persistent_ram_buffer - persistent circular RAM buffer 26 * 27 * @sig: 28 * signature to indicate header (PERSISTENT_RAM_SIG xor PRZ-type value) 29 * @start: 30 * offset into @data where the beginning of the stored bytes begin 31 * @size: 32 * number of valid bytes stored in @data 33 */ 34 struct persistent_ram_buffer { 35 uint32_t sig; 36 atomic_t start; 37 atomic_t size; 38 uint8_t data[]; 39 }; 40 41 #define PERSISTENT_RAM_SIG (0x43474244) /* DBGC */ 42 43 static inline size_t buffer_size(struct persistent_ram_zone *prz) 44 { 45 return atomic_read(&prz->buffer->size); 46 } 47 48 static inline size_t buffer_start(struct persistent_ram_zone *prz) 49 { 50 return atomic_read(&prz->buffer->start); 51 } 52 53 /* increase and wrap the start pointer, returning the old value */ 54 static size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a) 55 { 56 int old; 57 int new; 58 unsigned long flags = 0; 59 60 if (!(prz->flags & PRZ_FLAG_NO_LOCK)) 61 raw_spin_lock_irqsave(&prz->buffer_lock, flags); 62 63 old = atomic_read(&prz->buffer->start); 64 new = old + a; 65 while (unlikely(new >= prz->buffer_size)) 66 new -= prz->buffer_size; 67 atomic_set(&prz->buffer->start, new); 68 69 if (!(prz->flags & PRZ_FLAG_NO_LOCK)) 70 raw_spin_unlock_irqrestore(&prz->buffer_lock, flags); 71 72 return old; 73 } 74 75 /* increase the size counter until it hits the max size */ 76 static void buffer_size_add(struct persistent_ram_zone *prz, size_t a) 77 { 78 size_t old; 79 size_t new; 80 unsigned long flags = 0; 81 82 if (!(prz->flags & PRZ_FLAG_NO_LOCK)) 83 raw_spin_lock_irqsave(&prz->buffer_lock, flags); 84 85 old = atomic_read(&prz->buffer->size); 86 if (old == prz->buffer_size) 87 goto exit; 88 89 new = old + a; 90 if (new > prz->buffer_size) 91 new = prz->buffer_size; 92 atomic_set(&prz->buffer->size, new); 93 94 exit: 95 if (!(prz->flags & PRZ_FLAG_NO_LOCK)) 96 raw_spin_unlock_irqrestore(&prz->buffer_lock, flags); 97 } 98 99 static void notrace persistent_ram_encode_rs8(struct persistent_ram_zone *prz, 100 uint8_t *data, size_t len, uint8_t *ecc) 101 { 102 int i; 103 104 /* Initialize the parity buffer */ 105 memset(prz->ecc_info.par, 0, 106 prz->ecc_info.ecc_size * sizeof(prz->ecc_info.par[0])); 107 encode_rs8(prz->rs_decoder, data, len, prz->ecc_info.par, 0); 108 for (i = 0; i < prz->ecc_info.ecc_size; i++) 109 ecc[i] = prz->ecc_info.par[i]; 110 } 111 112 static int persistent_ram_decode_rs8(struct persistent_ram_zone *prz, 113 void *data, size_t len, uint8_t *ecc) 114 { 115 int i; 116 117 for (i = 0; i < prz->ecc_info.ecc_size; i++) 118 prz->ecc_info.par[i] = ecc[i]; 119 return decode_rs8(prz->rs_decoder, data, prz->ecc_info.par, len, 120 NULL, 0, NULL, 0, NULL); 121 } 122 123 static void notrace persistent_ram_update_ecc(struct persistent_ram_zone *prz, 124 unsigned int start, unsigned int count) 125 { 126 struct persistent_ram_buffer *buffer = prz->buffer; 127 uint8_t *buffer_end = buffer->data + prz->buffer_size; 128 uint8_t *block; 129 uint8_t *par; 130 int ecc_block_size = prz->ecc_info.block_size; 131 int ecc_size = prz->ecc_info.ecc_size; 132 int size = ecc_block_size; 133 134 if (!ecc_size) 135 return; 136 137 block = buffer->data + (start & ~(ecc_block_size - 1)); 138 par = prz->par_buffer + (start / ecc_block_size) * ecc_size; 139 140 do { 141 if (block + ecc_block_size > buffer_end) 142 size = buffer_end - block; 143 persistent_ram_encode_rs8(prz, block, size, par); 144 block += ecc_block_size; 145 par += ecc_size; 146 } while (block < buffer->data + start + count); 147 } 148 149 static void persistent_ram_update_header_ecc(struct persistent_ram_zone *prz) 150 { 151 struct persistent_ram_buffer *buffer = prz->buffer; 152 153 if (!prz->ecc_info.ecc_size) 154 return; 155 156 persistent_ram_encode_rs8(prz, (uint8_t *)buffer, sizeof(*buffer), 157 prz->par_header); 158 } 159 160 static void persistent_ram_ecc_old(struct persistent_ram_zone *prz) 161 { 162 struct persistent_ram_buffer *buffer = prz->buffer; 163 uint8_t *block; 164 uint8_t *par; 165 166 if (!prz->ecc_info.ecc_size) 167 return; 168 169 block = buffer->data; 170 par = prz->par_buffer; 171 while (block < buffer->data + buffer_size(prz)) { 172 int numerr; 173 int size = prz->ecc_info.block_size; 174 if (block + size > buffer->data + prz->buffer_size) 175 size = buffer->data + prz->buffer_size - block; 176 numerr = persistent_ram_decode_rs8(prz, block, size, par); 177 if (numerr > 0) { 178 pr_devel("error in block %p, %d\n", block, numerr); 179 prz->corrected_bytes += numerr; 180 } else if (numerr < 0) { 181 pr_devel("uncorrectable error in block %p\n", block); 182 prz->bad_blocks++; 183 } 184 block += prz->ecc_info.block_size; 185 par += prz->ecc_info.ecc_size; 186 } 187 } 188 189 static int persistent_ram_init_ecc(struct persistent_ram_zone *prz, 190 struct persistent_ram_ecc_info *ecc_info) 191 { 192 int numerr; 193 struct persistent_ram_buffer *buffer = prz->buffer; 194 int ecc_blocks; 195 size_t ecc_total; 196 197 if (!ecc_info || !ecc_info->ecc_size) 198 return 0; 199 200 prz->ecc_info.block_size = ecc_info->block_size ?: 128; 201 prz->ecc_info.ecc_size = ecc_info->ecc_size ?: 16; 202 prz->ecc_info.symsize = ecc_info->symsize ?: 8; 203 prz->ecc_info.poly = ecc_info->poly ?: 0x11d; 204 205 ecc_blocks = DIV_ROUND_UP(prz->buffer_size - prz->ecc_info.ecc_size, 206 prz->ecc_info.block_size + 207 prz->ecc_info.ecc_size); 208 ecc_total = (ecc_blocks + 1) * prz->ecc_info.ecc_size; 209 if (ecc_total >= prz->buffer_size) { 210 pr_err("%s: invalid ecc_size %u (total %zu, buffer size %zu)\n", 211 __func__, prz->ecc_info.ecc_size, 212 ecc_total, prz->buffer_size); 213 return -EINVAL; 214 } 215 216 prz->buffer_size -= ecc_total; 217 prz->par_buffer = buffer->data + prz->buffer_size; 218 prz->par_header = prz->par_buffer + 219 ecc_blocks * prz->ecc_info.ecc_size; 220 221 /* 222 * first consecutive root is 0 223 * primitive element to generate roots = 1 224 */ 225 prz->rs_decoder = init_rs(prz->ecc_info.symsize, prz->ecc_info.poly, 226 0, 1, prz->ecc_info.ecc_size); 227 if (prz->rs_decoder == NULL) { 228 pr_info("init_rs failed\n"); 229 return -EINVAL; 230 } 231 232 /* allocate workspace instead of using stack VLA */ 233 prz->ecc_info.par = kmalloc_array(prz->ecc_info.ecc_size, 234 sizeof(*prz->ecc_info.par), 235 GFP_KERNEL); 236 if (!prz->ecc_info.par) { 237 pr_err("cannot allocate ECC parity workspace\n"); 238 return -ENOMEM; 239 } 240 241 prz->corrected_bytes = 0; 242 prz->bad_blocks = 0; 243 244 numerr = persistent_ram_decode_rs8(prz, buffer, sizeof(*buffer), 245 prz->par_header); 246 if (numerr > 0) { 247 pr_info("error in header, %d\n", numerr); 248 prz->corrected_bytes += numerr; 249 } else if (numerr < 0) { 250 pr_info_ratelimited("uncorrectable error in header\n"); 251 prz->bad_blocks++; 252 } 253 254 return 0; 255 } 256 257 ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz, 258 char *str, size_t len) 259 { 260 ssize_t ret; 261 262 if (!prz->ecc_info.ecc_size) 263 return 0; 264 265 if (prz->corrected_bytes || prz->bad_blocks) 266 ret = snprintf(str, len, "" 267 "\nECC: %d Corrected bytes, %d unrecoverable blocks\n", 268 prz->corrected_bytes, prz->bad_blocks); 269 else 270 ret = snprintf(str, len, "\nECC: No errors detected\n"); 271 272 return ret; 273 } 274 275 static void notrace persistent_ram_update(struct persistent_ram_zone *prz, 276 const void *s, unsigned int start, unsigned int count) 277 { 278 struct persistent_ram_buffer *buffer = prz->buffer; 279 memcpy_toio(buffer->data + start, s, count); 280 persistent_ram_update_ecc(prz, start, count); 281 } 282 283 static int notrace persistent_ram_update_user(struct persistent_ram_zone *prz, 284 const void __user *s, unsigned int start, unsigned int count) 285 { 286 struct persistent_ram_buffer *buffer = prz->buffer; 287 int ret = unlikely(copy_from_user(buffer->data + start, s, count)) ? 288 -EFAULT : 0; 289 persistent_ram_update_ecc(prz, start, count); 290 return ret; 291 } 292 293 void persistent_ram_save_old(struct persistent_ram_zone *prz) 294 { 295 struct persistent_ram_buffer *buffer = prz->buffer; 296 size_t size = buffer_size(prz); 297 size_t start = buffer_start(prz); 298 299 if (!size) 300 return; 301 302 if (!prz->old_log) { 303 persistent_ram_ecc_old(prz); 304 prz->old_log = kmalloc(size, GFP_KERNEL); 305 } 306 if (!prz->old_log) { 307 pr_err("failed to allocate buffer\n"); 308 return; 309 } 310 311 prz->old_log_size = size; 312 memcpy_fromio(prz->old_log, &buffer->data[start], size - start); 313 memcpy_fromio(prz->old_log + size - start, &buffer->data[0], start); 314 } 315 316 int notrace persistent_ram_write(struct persistent_ram_zone *prz, 317 const void *s, unsigned int count) 318 { 319 int rem; 320 int c = count; 321 size_t start; 322 323 if (unlikely(c > prz->buffer_size)) { 324 s += c - prz->buffer_size; 325 c = prz->buffer_size; 326 } 327 328 buffer_size_add(prz, c); 329 330 start = buffer_start_add(prz, c); 331 332 rem = prz->buffer_size - start; 333 if (unlikely(rem < c)) { 334 persistent_ram_update(prz, s, start, rem); 335 s += rem; 336 c -= rem; 337 start = 0; 338 } 339 persistent_ram_update(prz, s, start, c); 340 341 persistent_ram_update_header_ecc(prz); 342 343 return count; 344 } 345 346 int notrace persistent_ram_write_user(struct persistent_ram_zone *prz, 347 const void __user *s, unsigned int count) 348 { 349 int rem, ret = 0, c = count; 350 size_t start; 351 352 if (unlikely(c > prz->buffer_size)) { 353 s += c - prz->buffer_size; 354 c = prz->buffer_size; 355 } 356 357 buffer_size_add(prz, c); 358 359 start = buffer_start_add(prz, c); 360 361 rem = prz->buffer_size - start; 362 if (unlikely(rem < c)) { 363 ret = persistent_ram_update_user(prz, s, start, rem); 364 s += rem; 365 c -= rem; 366 start = 0; 367 } 368 if (likely(!ret)) 369 ret = persistent_ram_update_user(prz, s, start, c); 370 371 persistent_ram_update_header_ecc(prz); 372 373 return unlikely(ret) ? ret : count; 374 } 375 376 size_t persistent_ram_old_size(struct persistent_ram_zone *prz) 377 { 378 return prz->old_log_size; 379 } 380 381 void *persistent_ram_old(struct persistent_ram_zone *prz) 382 { 383 return prz->old_log; 384 } 385 386 void persistent_ram_free_old(struct persistent_ram_zone *prz) 387 { 388 kfree(prz->old_log); 389 prz->old_log = NULL; 390 prz->old_log_size = 0; 391 } 392 393 void persistent_ram_zap(struct persistent_ram_zone *prz) 394 { 395 atomic_set(&prz->buffer->start, 0); 396 atomic_set(&prz->buffer->size, 0); 397 persistent_ram_update_header_ecc(prz); 398 } 399 400 #define MEM_TYPE_WCOMBINE 0 401 #define MEM_TYPE_NONCACHED 1 402 #define MEM_TYPE_NORMAL 2 403 404 static void *persistent_ram_vmap(phys_addr_t start, size_t size, 405 unsigned int memtype) 406 { 407 struct page **pages; 408 phys_addr_t page_start; 409 unsigned int page_count; 410 pgprot_t prot; 411 unsigned int i; 412 void *vaddr; 413 414 page_start = start - offset_in_page(start); 415 page_count = DIV_ROUND_UP(size + offset_in_page(start), PAGE_SIZE); 416 417 switch (memtype) { 418 case MEM_TYPE_NORMAL: 419 prot = PAGE_KERNEL; 420 break; 421 case MEM_TYPE_NONCACHED: 422 prot = pgprot_noncached(PAGE_KERNEL); 423 break; 424 case MEM_TYPE_WCOMBINE: 425 prot = pgprot_writecombine(PAGE_KERNEL); 426 break; 427 default: 428 pr_err("invalid mem_type=%d\n", memtype); 429 return NULL; 430 } 431 432 pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL); 433 if (!pages) { 434 pr_err("%s: Failed to allocate array for %u pages\n", 435 __func__, page_count); 436 return NULL; 437 } 438 439 for (i = 0; i < page_count; i++) { 440 phys_addr_t addr = page_start + i * PAGE_SIZE; 441 pages[i] = pfn_to_page(addr >> PAGE_SHIFT); 442 } 443 /* 444 * VM_IOREMAP used here to bypass this region during vread() 445 * and kmap_atomic() (i.e. kcore) to avoid __va() failures. 446 */ 447 vaddr = vmap(pages, page_count, VM_MAP | VM_IOREMAP, prot); 448 kfree(pages); 449 450 /* 451 * Since vmap() uses page granularity, we must add the offset 452 * into the page here, to get the byte granularity address 453 * into the mapping to represent the actual "start" location. 454 */ 455 return vaddr + offset_in_page(start); 456 } 457 458 static void *persistent_ram_iomap(phys_addr_t start, size_t size, 459 unsigned int memtype, char *label) 460 { 461 void *va; 462 463 if (!request_mem_region(start, size, label ?: "ramoops")) { 464 pr_err("request mem region (%s 0x%llx@0x%llx) failed\n", 465 label ?: "ramoops", 466 (unsigned long long)size, (unsigned long long)start); 467 return NULL; 468 } 469 470 if (memtype) 471 va = ioremap(start, size); 472 else 473 va = ioremap_wc(start, size); 474 475 /* 476 * Since request_mem_region() and ioremap() are byte-granularity 477 * there is no need handle anything special like we do when the 478 * vmap() case in persistent_ram_vmap() above. 479 */ 480 return va; 481 } 482 483 static int persistent_ram_buffer_map(phys_addr_t start, phys_addr_t size, 484 struct persistent_ram_zone *prz, int memtype) 485 { 486 prz->paddr = start; 487 prz->size = size; 488 489 if (pfn_valid(start >> PAGE_SHIFT)) 490 prz->vaddr = persistent_ram_vmap(start, size, memtype); 491 else 492 prz->vaddr = persistent_ram_iomap(start, size, memtype, 493 prz->label); 494 495 if (!prz->vaddr) { 496 pr_err("%s: Failed to map 0x%llx pages at 0x%llx\n", __func__, 497 (unsigned long long)size, (unsigned long long)start); 498 return -ENOMEM; 499 } 500 501 prz->buffer = prz->vaddr; 502 prz->buffer_size = size - sizeof(struct persistent_ram_buffer); 503 504 return 0; 505 } 506 507 static int persistent_ram_post_init(struct persistent_ram_zone *prz, u32 sig, 508 struct persistent_ram_ecc_info *ecc_info) 509 { 510 int ret; 511 bool zap = !!(prz->flags & PRZ_FLAG_ZAP_OLD); 512 513 ret = persistent_ram_init_ecc(prz, ecc_info); 514 if (ret) { 515 pr_warn("ECC failed %s\n", prz->label); 516 return ret; 517 } 518 519 sig ^= PERSISTENT_RAM_SIG; 520 521 if (prz->buffer->sig == sig) { 522 if (buffer_size(prz) == 0) { 523 pr_debug("found existing empty buffer\n"); 524 return 0; 525 } 526 527 if (buffer_size(prz) > prz->buffer_size || 528 buffer_start(prz) > buffer_size(prz)) { 529 pr_info("found existing invalid buffer, size %zu, start %zu\n", 530 buffer_size(prz), buffer_start(prz)); 531 zap = true; 532 } else { 533 pr_debug("found existing buffer, size %zu, start %zu\n", 534 buffer_size(prz), buffer_start(prz)); 535 persistent_ram_save_old(prz); 536 } 537 } else { 538 pr_debug("no valid data in buffer (sig = 0x%08x)\n", 539 prz->buffer->sig); 540 prz->buffer->sig = sig; 541 zap = true; 542 } 543 544 /* Reset missing, invalid, or single-use memory area. */ 545 if (zap) 546 persistent_ram_zap(prz); 547 548 return 0; 549 } 550 551 void persistent_ram_free(struct persistent_ram_zone **_prz) 552 { 553 struct persistent_ram_zone *prz; 554 555 if (!_prz) 556 return; 557 558 prz = *_prz; 559 if (!prz) 560 return; 561 562 if (prz->vaddr) { 563 if (pfn_valid(prz->paddr >> PAGE_SHIFT)) { 564 /* We must vunmap() at page-granularity. */ 565 vunmap(prz->vaddr - offset_in_page(prz->paddr)); 566 } else { 567 iounmap(prz->vaddr); 568 release_mem_region(prz->paddr, prz->size); 569 } 570 prz->vaddr = NULL; 571 } 572 if (prz->rs_decoder) { 573 free_rs(prz->rs_decoder); 574 prz->rs_decoder = NULL; 575 } 576 kfree(prz->ecc_info.par); 577 prz->ecc_info.par = NULL; 578 579 persistent_ram_free_old(prz); 580 kfree(prz->label); 581 kfree(prz); 582 *_prz = NULL; 583 } 584 585 struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size, 586 u32 sig, struct persistent_ram_ecc_info *ecc_info, 587 unsigned int memtype, u32 flags, char *label) 588 { 589 struct persistent_ram_zone *prz; 590 int ret = -ENOMEM; 591 592 prz = kzalloc(sizeof(struct persistent_ram_zone), GFP_KERNEL); 593 if (!prz) { 594 pr_err("failed to allocate persistent ram zone\n"); 595 goto err; 596 } 597 598 /* Initialize general buffer state. */ 599 raw_spin_lock_init(&prz->buffer_lock); 600 prz->flags = flags; 601 prz->label = kstrdup(label, GFP_KERNEL); 602 603 ret = persistent_ram_buffer_map(start, size, prz, memtype); 604 if (ret) 605 goto err; 606 607 ret = persistent_ram_post_init(prz, sig, ecc_info); 608 if (ret) 609 goto err; 610 611 pr_debug("attached %s 0x%zx@0x%llx: %zu header, %zu data, %zu ecc (%d/%d)\n", 612 prz->label, prz->size, (unsigned long long)prz->paddr, 613 sizeof(*prz->buffer), prz->buffer_size, 614 prz->size - sizeof(*prz->buffer) - prz->buffer_size, 615 prz->ecc_info.ecc_size, prz->ecc_info.block_size); 616 617 return prz; 618 err: 619 persistent_ram_free(&prz); 620 return ERR_PTR(ret); 621 } 622