1 /* 2 * fs/proc/vmcore.c Interface for accessing the crash 3 * dump from the system's previous life. 4 * Heavily borrowed from fs/proc/kcore.c 5 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com) 6 * Copyright (C) IBM Corporation, 2004. All rights reserved 7 * 8 */ 9 10 #include <linux/config.h> 11 #include <linux/mm.h> 12 #include <linux/proc_fs.h> 13 #include <linux/user.h> 14 #include <linux/a.out.h> 15 #include <linux/elf.h> 16 #include <linux/elfcore.h> 17 #include <linux/proc_fs.h> 18 #include <linux/highmem.h> 19 #include <linux/bootmem.h> 20 #include <linux/init.h> 21 #include <linux/crash_dump.h> 22 #include <linux/list.h> 23 #include <asm/uaccess.h> 24 #include <asm/io.h> 25 26 /* List representing chunks of contiguous memory areas and their offsets in 27 * vmcore file. 28 */ 29 static LIST_HEAD(vmcore_list); 30 31 /* Stores the pointer to the buffer containing kernel elf core headers. */ 32 static char *elfcorebuf; 33 static size_t elfcorebuf_sz; 34 35 /* Total size of vmcore file. */ 36 static u64 vmcore_size; 37 38 /* Stores the physical address of elf header of crash image. */ 39 unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX; 40 41 struct proc_dir_entry *proc_vmcore = NULL; 42 43 /* Reads a page from the oldmem device from given offset. */ 44 static ssize_t read_from_oldmem(char *buf, size_t count, 45 loff_t *ppos, int userbuf) 46 { 47 unsigned long pfn, offset; 48 size_t nr_bytes; 49 ssize_t read = 0, tmp; 50 51 if (!count) 52 return 0; 53 54 offset = (unsigned long)(*ppos % PAGE_SIZE); 55 pfn = (unsigned long)(*ppos / PAGE_SIZE); 56 if (pfn > saved_max_pfn) 57 return -EINVAL; 58 59 do { 60 if (count > (PAGE_SIZE - offset)) 61 nr_bytes = PAGE_SIZE - offset; 62 else 63 nr_bytes = count; 64 65 tmp = copy_oldmem_page(pfn, buf, nr_bytes, offset, userbuf); 66 if (tmp < 0) 67 return tmp; 68 *ppos += nr_bytes; 69 count -= nr_bytes; 70 buf += nr_bytes; 71 read += nr_bytes; 72 ++pfn; 73 offset = 0; 74 } while (count); 75 76 return read; 77 } 78 79 /* Maps vmcore file offset to respective physical address in memroy. */ 80 static u64 map_offset_to_paddr(loff_t offset, struct list_head *vc_list, 81 struct vmcore **m_ptr) 82 { 83 struct vmcore *m; 84 u64 paddr; 85 86 list_for_each_entry(m, vc_list, list) { 87 u64 start, end; 88 start = m->offset; 89 end = m->offset + m->size - 1; 90 if (offset >= start && offset <= end) { 91 paddr = m->paddr + offset - start; 92 *m_ptr = m; 93 return paddr; 94 } 95 } 96 *m_ptr = NULL; 97 return 0; 98 } 99 100 /* Read from the ELF header and then the crash dump. On error, negative value is 101 * returned otherwise number of bytes read are returned. 102 */ 103 static ssize_t read_vmcore(struct file *file, char __user *buffer, 104 size_t buflen, loff_t *fpos) 105 { 106 ssize_t acc = 0, tmp; 107 size_t tsz, nr_bytes; 108 u64 start; 109 struct vmcore *curr_m = NULL; 110 111 if (buflen == 0 || *fpos >= vmcore_size) 112 return 0; 113 114 /* trim buflen to not go beyond EOF */ 115 if (buflen > vmcore_size - *fpos) 116 buflen = vmcore_size - *fpos; 117 118 /* Read ELF core header */ 119 if (*fpos < elfcorebuf_sz) { 120 tsz = elfcorebuf_sz - *fpos; 121 if (buflen < tsz) 122 tsz = buflen; 123 if (copy_to_user(buffer, elfcorebuf + *fpos, tsz)) 124 return -EFAULT; 125 buflen -= tsz; 126 *fpos += tsz; 127 buffer += tsz; 128 acc += tsz; 129 130 /* leave now if filled buffer already */ 131 if (buflen == 0) 132 return acc; 133 } 134 135 start = map_offset_to_paddr(*fpos, &vmcore_list, &curr_m); 136 if (!curr_m) 137 return -EINVAL; 138 if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen) 139 tsz = buflen; 140 141 /* Calculate left bytes in current memory segment. */ 142 nr_bytes = (curr_m->size - (start - curr_m->paddr)); 143 if (tsz > nr_bytes) 144 tsz = nr_bytes; 145 146 while (buflen) { 147 tmp = read_from_oldmem(buffer, tsz, &start, 1); 148 if (tmp < 0) 149 return tmp; 150 buflen -= tsz; 151 *fpos += tsz; 152 buffer += tsz; 153 acc += tsz; 154 if (start >= (curr_m->paddr + curr_m->size)) { 155 if (curr_m->list.next == &vmcore_list) 156 return acc; /*EOF*/ 157 curr_m = list_entry(curr_m->list.next, 158 struct vmcore, list); 159 start = curr_m->paddr; 160 } 161 if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen) 162 tsz = buflen; 163 /* Calculate left bytes in current memory segment. */ 164 nr_bytes = (curr_m->size - (start - curr_m->paddr)); 165 if (tsz > nr_bytes) 166 tsz = nr_bytes; 167 } 168 return acc; 169 } 170 171 static int open_vmcore(struct inode *inode, struct file *filp) 172 { 173 return 0; 174 } 175 176 struct file_operations proc_vmcore_operations = { 177 .read = read_vmcore, 178 .open = open_vmcore, 179 }; 180 181 static struct vmcore* __init get_new_element(void) 182 { 183 struct vmcore *p; 184 185 p = kmalloc(sizeof(*p), GFP_KERNEL); 186 if (p) 187 memset(p, 0, sizeof(*p)); 188 return p; 189 } 190 191 static u64 __init get_vmcore_size_elf64(char *elfptr) 192 { 193 int i; 194 u64 size; 195 Elf64_Ehdr *ehdr_ptr; 196 Elf64_Phdr *phdr_ptr; 197 198 ehdr_ptr = (Elf64_Ehdr *)elfptr; 199 phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); 200 size = sizeof(Elf64_Ehdr) + ((ehdr_ptr->e_phnum) * sizeof(Elf64_Phdr)); 201 for (i = 0; i < ehdr_ptr->e_phnum; i++) { 202 size += phdr_ptr->p_memsz; 203 phdr_ptr++; 204 } 205 return size; 206 } 207 208 static u64 __init get_vmcore_size_elf32(char *elfptr) 209 { 210 int i; 211 u64 size; 212 Elf32_Ehdr *ehdr_ptr; 213 Elf32_Phdr *phdr_ptr; 214 215 ehdr_ptr = (Elf32_Ehdr *)elfptr; 216 phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); 217 size = sizeof(Elf32_Ehdr) + ((ehdr_ptr->e_phnum) * sizeof(Elf32_Phdr)); 218 for (i = 0; i < ehdr_ptr->e_phnum; i++) { 219 size += phdr_ptr->p_memsz; 220 phdr_ptr++; 221 } 222 return size; 223 } 224 225 /* Merges all the PT_NOTE headers into one. */ 226 static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz, 227 struct list_head *vc_list) 228 { 229 int i, nr_ptnote=0, rc=0; 230 char *tmp; 231 Elf64_Ehdr *ehdr_ptr; 232 Elf64_Phdr phdr, *phdr_ptr; 233 Elf64_Nhdr *nhdr_ptr; 234 u64 phdr_sz = 0, note_off; 235 236 ehdr_ptr = (Elf64_Ehdr *)elfptr; 237 phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); 238 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { 239 int j; 240 void *notes_section; 241 struct vmcore *new; 242 u64 offset, max_sz, sz, real_sz = 0; 243 if (phdr_ptr->p_type != PT_NOTE) 244 continue; 245 nr_ptnote++; 246 max_sz = phdr_ptr->p_memsz; 247 offset = phdr_ptr->p_offset; 248 notes_section = kmalloc(max_sz, GFP_KERNEL); 249 if (!notes_section) 250 return -ENOMEM; 251 rc = read_from_oldmem(notes_section, max_sz, &offset, 0); 252 if (rc < 0) { 253 kfree(notes_section); 254 return rc; 255 } 256 nhdr_ptr = notes_section; 257 for (j = 0; j < max_sz; j += sz) { 258 if (nhdr_ptr->n_namesz == 0) 259 break; 260 sz = sizeof(Elf64_Nhdr) + 261 ((nhdr_ptr->n_namesz + 3) & ~3) + 262 ((nhdr_ptr->n_descsz + 3) & ~3); 263 real_sz += sz; 264 nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz); 265 } 266 267 /* Add this contiguous chunk of notes section to vmcore list.*/ 268 new = get_new_element(); 269 if (!new) { 270 kfree(notes_section); 271 return -ENOMEM; 272 } 273 new->paddr = phdr_ptr->p_offset; 274 new->size = real_sz; 275 list_add_tail(&new->list, vc_list); 276 phdr_sz += real_sz; 277 kfree(notes_section); 278 } 279 280 /* Prepare merged PT_NOTE program header. */ 281 phdr.p_type = PT_NOTE; 282 phdr.p_flags = 0; 283 note_off = sizeof(Elf64_Ehdr) + 284 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr); 285 phdr.p_offset = note_off; 286 phdr.p_vaddr = phdr.p_paddr = 0; 287 phdr.p_filesz = phdr.p_memsz = phdr_sz; 288 phdr.p_align = 0; 289 290 /* Add merged PT_NOTE program header*/ 291 tmp = elfptr + sizeof(Elf64_Ehdr); 292 memcpy(tmp, &phdr, sizeof(phdr)); 293 tmp += sizeof(phdr); 294 295 /* Remove unwanted PT_NOTE program headers. */ 296 i = (nr_ptnote - 1) * sizeof(Elf64_Phdr); 297 *elfsz = *elfsz - i; 298 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr))); 299 300 /* Modify e_phnum to reflect merged headers. */ 301 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1; 302 303 return 0; 304 } 305 306 /* Merges all the PT_NOTE headers into one. */ 307 static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz, 308 struct list_head *vc_list) 309 { 310 int i, nr_ptnote=0, rc=0; 311 char *tmp; 312 Elf32_Ehdr *ehdr_ptr; 313 Elf32_Phdr phdr, *phdr_ptr; 314 Elf32_Nhdr *nhdr_ptr; 315 u64 phdr_sz = 0, note_off; 316 317 ehdr_ptr = (Elf32_Ehdr *)elfptr; 318 phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); 319 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { 320 int j; 321 void *notes_section; 322 struct vmcore *new; 323 u64 offset, max_sz, sz, real_sz = 0; 324 if (phdr_ptr->p_type != PT_NOTE) 325 continue; 326 nr_ptnote++; 327 max_sz = phdr_ptr->p_memsz; 328 offset = phdr_ptr->p_offset; 329 notes_section = kmalloc(max_sz, GFP_KERNEL); 330 if (!notes_section) 331 return -ENOMEM; 332 rc = read_from_oldmem(notes_section, max_sz, &offset, 0); 333 if (rc < 0) { 334 kfree(notes_section); 335 return rc; 336 } 337 nhdr_ptr = notes_section; 338 for (j = 0; j < max_sz; j += sz) { 339 if (nhdr_ptr->n_namesz == 0) 340 break; 341 sz = sizeof(Elf32_Nhdr) + 342 ((nhdr_ptr->n_namesz + 3) & ~3) + 343 ((nhdr_ptr->n_descsz + 3) & ~3); 344 real_sz += sz; 345 nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz); 346 } 347 348 /* Add this contiguous chunk of notes section to vmcore list.*/ 349 new = get_new_element(); 350 if (!new) { 351 kfree(notes_section); 352 return -ENOMEM; 353 } 354 new->paddr = phdr_ptr->p_offset; 355 new->size = real_sz; 356 list_add_tail(&new->list, vc_list); 357 phdr_sz += real_sz; 358 kfree(notes_section); 359 } 360 361 /* Prepare merged PT_NOTE program header. */ 362 phdr.p_type = PT_NOTE; 363 phdr.p_flags = 0; 364 note_off = sizeof(Elf32_Ehdr) + 365 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr); 366 phdr.p_offset = note_off; 367 phdr.p_vaddr = phdr.p_paddr = 0; 368 phdr.p_filesz = phdr.p_memsz = phdr_sz; 369 phdr.p_align = 0; 370 371 /* Add merged PT_NOTE program header*/ 372 tmp = elfptr + sizeof(Elf32_Ehdr); 373 memcpy(tmp, &phdr, sizeof(phdr)); 374 tmp += sizeof(phdr); 375 376 /* Remove unwanted PT_NOTE program headers. */ 377 i = (nr_ptnote - 1) * sizeof(Elf32_Phdr); 378 *elfsz = *elfsz - i; 379 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr))); 380 381 /* Modify e_phnum to reflect merged headers. */ 382 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1; 383 384 return 0; 385 } 386 387 /* Add memory chunks represented by program headers to vmcore list. Also update 388 * the new offset fields of exported program headers. */ 389 static int __init process_ptload_program_headers_elf64(char *elfptr, 390 size_t elfsz, 391 struct list_head *vc_list) 392 { 393 int i; 394 Elf64_Ehdr *ehdr_ptr; 395 Elf64_Phdr *phdr_ptr; 396 loff_t vmcore_off; 397 struct vmcore *new; 398 399 ehdr_ptr = (Elf64_Ehdr *)elfptr; 400 phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */ 401 402 /* First program header is PT_NOTE header. */ 403 vmcore_off = sizeof(Elf64_Ehdr) + 404 (ehdr_ptr->e_phnum) * sizeof(Elf64_Phdr) + 405 phdr_ptr->p_memsz; /* Note sections */ 406 407 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { 408 if (phdr_ptr->p_type != PT_LOAD) 409 continue; 410 411 /* Add this contiguous chunk of memory to vmcore list.*/ 412 new = get_new_element(); 413 if (!new) 414 return -ENOMEM; 415 new->paddr = phdr_ptr->p_offset; 416 new->size = phdr_ptr->p_memsz; 417 list_add_tail(&new->list, vc_list); 418 419 /* Update the program header offset. */ 420 phdr_ptr->p_offset = vmcore_off; 421 vmcore_off = vmcore_off + phdr_ptr->p_memsz; 422 } 423 return 0; 424 } 425 426 static int __init process_ptload_program_headers_elf32(char *elfptr, 427 size_t elfsz, 428 struct list_head *vc_list) 429 { 430 int i; 431 Elf32_Ehdr *ehdr_ptr; 432 Elf32_Phdr *phdr_ptr; 433 loff_t vmcore_off; 434 struct vmcore *new; 435 436 ehdr_ptr = (Elf32_Ehdr *)elfptr; 437 phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */ 438 439 /* First program header is PT_NOTE header. */ 440 vmcore_off = sizeof(Elf32_Ehdr) + 441 (ehdr_ptr->e_phnum) * sizeof(Elf32_Phdr) + 442 phdr_ptr->p_memsz; /* Note sections */ 443 444 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) { 445 if (phdr_ptr->p_type != PT_LOAD) 446 continue; 447 448 /* Add this contiguous chunk of memory to vmcore list.*/ 449 new = get_new_element(); 450 if (!new) 451 return -ENOMEM; 452 new->paddr = phdr_ptr->p_offset; 453 new->size = phdr_ptr->p_memsz; 454 list_add_tail(&new->list, vc_list); 455 456 /* Update the program header offset */ 457 phdr_ptr->p_offset = vmcore_off; 458 vmcore_off = vmcore_off + phdr_ptr->p_memsz; 459 } 460 return 0; 461 } 462 463 /* Sets offset fields of vmcore elements. */ 464 static void __init set_vmcore_list_offsets_elf64(char *elfptr, 465 struct list_head *vc_list) 466 { 467 loff_t vmcore_off; 468 Elf64_Ehdr *ehdr_ptr; 469 struct vmcore *m; 470 471 ehdr_ptr = (Elf64_Ehdr *)elfptr; 472 473 /* Skip Elf header and program headers. */ 474 vmcore_off = sizeof(Elf64_Ehdr) + 475 (ehdr_ptr->e_phnum) * sizeof(Elf64_Phdr); 476 477 list_for_each_entry(m, vc_list, list) { 478 m->offset = vmcore_off; 479 vmcore_off += m->size; 480 } 481 } 482 483 /* Sets offset fields of vmcore elements. */ 484 static void __init set_vmcore_list_offsets_elf32(char *elfptr, 485 struct list_head *vc_list) 486 { 487 loff_t vmcore_off; 488 Elf32_Ehdr *ehdr_ptr; 489 struct vmcore *m; 490 491 ehdr_ptr = (Elf32_Ehdr *)elfptr; 492 493 /* Skip Elf header and program headers. */ 494 vmcore_off = sizeof(Elf32_Ehdr) + 495 (ehdr_ptr->e_phnum) * sizeof(Elf32_Phdr); 496 497 list_for_each_entry(m, vc_list, list) { 498 m->offset = vmcore_off; 499 vmcore_off += m->size; 500 } 501 } 502 503 static int __init parse_crash_elf64_headers(void) 504 { 505 int rc=0; 506 Elf64_Ehdr ehdr; 507 u64 addr; 508 509 addr = elfcorehdr_addr; 510 511 /* Read Elf header */ 512 rc = read_from_oldmem((char*)&ehdr, sizeof(Elf64_Ehdr), &addr, 0); 513 if (rc < 0) 514 return rc; 515 516 /* Do some basic Verification. */ 517 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 || 518 (ehdr.e_type != ET_CORE) || 519 !elf_check_arch(&ehdr) || 520 ehdr.e_ident[EI_CLASS] != ELFCLASS64 || 521 ehdr.e_ident[EI_VERSION] != EV_CURRENT || 522 ehdr.e_version != EV_CURRENT || 523 ehdr.e_ehsize != sizeof(Elf64_Ehdr) || 524 ehdr.e_phentsize != sizeof(Elf64_Phdr) || 525 ehdr.e_phnum == 0) { 526 printk(KERN_WARNING "Warning: Core image elf header is not" 527 "sane\n"); 528 return -EINVAL; 529 } 530 531 /* Read in all elf headers. */ 532 elfcorebuf_sz = sizeof(Elf64_Ehdr) + ehdr.e_phnum * sizeof(Elf64_Phdr); 533 elfcorebuf = kmalloc(elfcorebuf_sz, GFP_KERNEL); 534 if (!elfcorebuf) 535 return -ENOMEM; 536 addr = elfcorehdr_addr; 537 rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz, &addr, 0); 538 if (rc < 0) { 539 kfree(elfcorebuf); 540 return rc; 541 } 542 543 /* Merge all PT_NOTE headers into one. */ 544 rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz, &vmcore_list); 545 if (rc) { 546 kfree(elfcorebuf); 547 return rc; 548 } 549 rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz, 550 &vmcore_list); 551 if (rc) { 552 kfree(elfcorebuf); 553 return rc; 554 } 555 set_vmcore_list_offsets_elf64(elfcorebuf, &vmcore_list); 556 return 0; 557 } 558 559 static int __init parse_crash_elf32_headers(void) 560 { 561 int rc=0; 562 Elf32_Ehdr ehdr; 563 u64 addr; 564 565 addr = elfcorehdr_addr; 566 567 /* Read Elf header */ 568 rc = read_from_oldmem((char*)&ehdr, sizeof(Elf32_Ehdr), &addr, 0); 569 if (rc < 0) 570 return rc; 571 572 /* Do some basic Verification. */ 573 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 || 574 (ehdr.e_type != ET_CORE) || 575 !elf_check_arch(&ehdr) || 576 ehdr.e_ident[EI_CLASS] != ELFCLASS32|| 577 ehdr.e_ident[EI_VERSION] != EV_CURRENT || 578 ehdr.e_version != EV_CURRENT || 579 ehdr.e_ehsize != sizeof(Elf32_Ehdr) || 580 ehdr.e_phentsize != sizeof(Elf32_Phdr) || 581 ehdr.e_phnum == 0) { 582 printk(KERN_WARNING "Warning: Core image elf header is not" 583 "sane\n"); 584 return -EINVAL; 585 } 586 587 /* Read in all elf headers. */ 588 elfcorebuf_sz = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr); 589 elfcorebuf = kmalloc(elfcorebuf_sz, GFP_KERNEL); 590 if (!elfcorebuf) 591 return -ENOMEM; 592 addr = elfcorehdr_addr; 593 rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz, &addr, 0); 594 if (rc < 0) { 595 kfree(elfcorebuf); 596 return rc; 597 } 598 599 /* Merge all PT_NOTE headers into one. */ 600 rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz, &vmcore_list); 601 if (rc) { 602 kfree(elfcorebuf); 603 return rc; 604 } 605 rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz, 606 &vmcore_list); 607 if (rc) { 608 kfree(elfcorebuf); 609 return rc; 610 } 611 set_vmcore_list_offsets_elf32(elfcorebuf, &vmcore_list); 612 return 0; 613 } 614 615 static int __init parse_crash_elf_headers(void) 616 { 617 unsigned char e_ident[EI_NIDENT]; 618 u64 addr; 619 int rc=0; 620 621 addr = elfcorehdr_addr; 622 rc = read_from_oldmem(e_ident, EI_NIDENT, &addr, 0); 623 if (rc < 0) 624 return rc; 625 if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) { 626 printk(KERN_WARNING "Warning: Core image elf header" 627 " not found\n"); 628 return -EINVAL; 629 } 630 631 if (e_ident[EI_CLASS] == ELFCLASS64) { 632 rc = parse_crash_elf64_headers(); 633 if (rc) 634 return rc; 635 636 /* Determine vmcore size. */ 637 vmcore_size = get_vmcore_size_elf64(elfcorebuf); 638 } else if (e_ident[EI_CLASS] == ELFCLASS32) { 639 rc = parse_crash_elf32_headers(); 640 if (rc) 641 return rc; 642 643 /* Determine vmcore size. */ 644 vmcore_size = get_vmcore_size_elf32(elfcorebuf); 645 } else { 646 printk(KERN_WARNING "Warning: Core image elf header is not" 647 " sane\n"); 648 return -EINVAL; 649 } 650 return 0; 651 } 652 653 /* Init function for vmcore module. */ 654 static int __init vmcore_init(void) 655 { 656 int rc = 0; 657 658 /* If elfcorehdr= has been passed in cmdline, then capture the dump.*/ 659 if (!(elfcorehdr_addr < ELFCORE_ADDR_MAX)) 660 return rc; 661 rc = parse_crash_elf_headers(); 662 if (rc) { 663 printk(KERN_WARNING "Kdump: vmcore not initialized\n"); 664 return rc; 665 } 666 667 /* Initialize /proc/vmcore size if proc is already up. */ 668 if (proc_vmcore) 669 proc_vmcore->size = vmcore_size; 670 return 0; 671 } 672 module_init(vmcore_init) 673