1 /* 2 * zcore module to export memory content and register sets for creating system 3 * dumps on SCSI disks (zfcpdump). The "zcore/mem" debugfs file shows the same 4 * dump format as s390 standalone dumps. 5 * 6 * For more information please refer to Documentation/s390/zfcpdump.txt 7 * 8 * Copyright IBM Corp. 2003, 2008 9 * Author(s): Michael Holzheu 10 */ 11 12 #define KMSG_COMPONENT "zdump" 13 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 14 15 #include <linux/init.h> 16 #include <linux/slab.h> 17 #include <linux/miscdevice.h> 18 #include <linux/debugfs.h> 19 #include <linux/module.h> 20 #include <asm/asm-offsets.h> 21 #include <asm/ipl.h> 22 #include <asm/sclp.h> 23 #include <asm/setup.h> 24 #include <asm/uaccess.h> 25 #include <asm/debug.h> 26 #include <asm/processor.h> 27 #include <asm/irqflags.h> 28 #include <asm/checksum.h> 29 #include "sclp.h" 30 31 #define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x) 32 33 #define TO_USER 1 34 #define TO_KERNEL 0 35 #define CHUNK_INFO_SIZE 34 /* 2 16-byte char, each followed by blank */ 36 37 enum arch_id { 38 ARCH_S390 = 0, 39 ARCH_S390X = 1, 40 }; 41 42 /* dump system info */ 43 44 struct sys_info { 45 enum arch_id arch; 46 unsigned long sa_base; 47 u32 sa_size; 48 int cpu_map[NR_CPUS]; 49 unsigned long mem_size; 50 struct save_area lc_mask; 51 }; 52 53 struct ipib_info { 54 unsigned long ipib; 55 u32 checksum; 56 } __attribute__((packed)); 57 58 static struct sys_info sys_info; 59 static struct debug_info *zcore_dbf; 60 static int hsa_available; 61 static struct dentry *zcore_dir; 62 static struct dentry *zcore_file; 63 static struct dentry *zcore_memmap_file; 64 static struct dentry *zcore_reipl_file; 65 static struct dentry *zcore_hsa_file; 66 static struct ipl_parameter_block *ipl_block; 67 68 /* 69 * Copy memory from HSA to kernel or user memory (not reentrant): 70 * 71 * @dest: Kernel or user buffer where memory should be copied to 72 * @src: Start address within HSA where data should be copied 73 * @count: Size of buffer, which should be copied 74 * @mode: Either TO_KERNEL or TO_USER 75 */ 76 int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode) 77 { 78 int offs, blk_num; 79 static char buf[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); 80 81 if (!hsa_available) 82 return -ENODATA; 83 if (count == 0) 84 return 0; 85 86 /* copy first block */ 87 offs = 0; 88 if ((src % PAGE_SIZE) != 0) { 89 blk_num = src / PAGE_SIZE + 2; 90 if (sclp_sdias_copy(buf, blk_num, 1)) { 91 TRACE("sclp_sdias_copy() failed\n"); 92 return -EIO; 93 } 94 offs = min((PAGE_SIZE - (src % PAGE_SIZE)), count); 95 if (mode == TO_USER) { 96 if (copy_to_user((__force __user void*) dest, 97 buf + (src % PAGE_SIZE), offs)) 98 return -EFAULT; 99 } else 100 memcpy(dest, buf + (src % PAGE_SIZE), offs); 101 } 102 if (offs == count) 103 goto out; 104 105 /* copy middle */ 106 for (; (offs + PAGE_SIZE) <= count; offs += PAGE_SIZE) { 107 blk_num = (src + offs) / PAGE_SIZE + 2; 108 if (sclp_sdias_copy(buf, blk_num, 1)) { 109 TRACE("sclp_sdias_copy() failed\n"); 110 return -EIO; 111 } 112 if (mode == TO_USER) { 113 if (copy_to_user((__force __user void*) dest + offs, 114 buf, PAGE_SIZE)) 115 return -EFAULT; 116 } else 117 memcpy(dest + offs, buf, PAGE_SIZE); 118 } 119 if (offs == count) 120 goto out; 121 122 /* copy last block */ 123 blk_num = (src + offs) / PAGE_SIZE + 2; 124 if (sclp_sdias_copy(buf, blk_num, 1)) { 125 TRACE("sclp_sdias_copy() failed\n"); 126 return -EIO; 127 } 128 if (mode == TO_USER) { 129 if (copy_to_user((__force __user void*) dest + offs, buf, 130 count - offs)) 131 return -EFAULT; 132 } else 133 memcpy(dest + offs, buf, count - offs); 134 out: 135 return 0; 136 } 137 138 static int memcpy_hsa_user(void __user *dest, unsigned long src, size_t count) 139 { 140 return memcpy_hsa((void __force *) dest, src, count, TO_USER); 141 } 142 143 static int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count) 144 { 145 return memcpy_hsa(dest, src, count, TO_KERNEL); 146 } 147 148 static int __init init_cpu_info(enum arch_id arch) 149 { 150 struct save_area *sa; 151 152 /* get info for boot cpu from lowcore, stored in the HSA */ 153 154 sa = dump_save_area_create(0); 155 if (!sa) 156 return -ENOMEM; 157 if (memcpy_hsa_kernel(sa, sys_info.sa_base, sys_info.sa_size) < 0) { 158 TRACE("could not copy from HSA\n"); 159 kfree(sa); 160 return -EIO; 161 } 162 return 0; 163 } 164 165 static DEFINE_MUTEX(zcore_mutex); 166 167 #define DUMP_VERSION 0x5 168 #define DUMP_MAGIC 0xa8190173618f23fdULL 169 #define DUMP_ARCH_S390X 2 170 #define DUMP_ARCH_S390 1 171 #define HEADER_SIZE 4096 172 173 /* dump header dumped according to s390 crash dump format */ 174 175 struct zcore_header { 176 u64 magic; 177 u32 version; 178 u32 header_size; 179 u32 dump_level; 180 u32 page_size; 181 u64 mem_size; 182 u64 mem_start; 183 u64 mem_end; 184 u32 num_pages; 185 u32 pad1; 186 u64 tod; 187 struct cpuid cpu_id; 188 u32 arch_id; 189 u32 volnr; 190 u32 build_arch; 191 u64 rmem_size; 192 u8 mvdump; 193 u16 cpu_cnt; 194 u16 real_cpu_cnt; 195 u8 end_pad1[0x200-0x061]; 196 u64 mvdump_sign; 197 u64 mvdump_zipl_time; 198 u8 end_pad2[0x800-0x210]; 199 u32 lc_vec[512]; 200 } __attribute__((packed,__aligned__(16))); 201 202 static struct zcore_header zcore_header = { 203 .magic = DUMP_MAGIC, 204 .version = DUMP_VERSION, 205 .header_size = 4096, 206 .dump_level = 0, 207 .page_size = PAGE_SIZE, 208 .mem_start = 0, 209 #ifdef CONFIG_64BIT 210 .build_arch = DUMP_ARCH_S390X, 211 #else 212 .build_arch = DUMP_ARCH_S390, 213 #endif 214 }; 215 216 /* 217 * Copy lowcore info to buffer. Use map in order to copy only register parts. 218 * 219 * @buf: User buffer 220 * @sa: Pointer to save area 221 * @sa_off: Offset in save area to copy 222 * @len: Number of bytes to copy 223 */ 224 static int copy_lc(void __user *buf, void *sa, int sa_off, int len) 225 { 226 int i; 227 char *lc_mask = (char*)&sys_info.lc_mask; 228 229 for (i = 0; i < len; i++) { 230 if (!lc_mask[i + sa_off]) 231 continue; 232 if (copy_to_user(buf + i, sa + sa_off + i, 1)) 233 return -EFAULT; 234 } 235 return 0; 236 } 237 238 /* 239 * Copy lowcores info to memory, if necessary 240 * 241 * @buf: User buffer 242 * @addr: Start address of buffer in dump memory 243 * @count: Size of buffer 244 */ 245 static int zcore_add_lc(char __user *buf, unsigned long start, size_t count) 246 { 247 unsigned long end; 248 int i; 249 250 if (count == 0) 251 return 0; 252 253 end = start + count; 254 for (i = 0; i < dump_save_areas.count; i++) { 255 unsigned long cp_start, cp_end; /* copy range */ 256 unsigned long sa_start, sa_end; /* save area range */ 257 unsigned long prefix; 258 unsigned long sa_off, len, buf_off; 259 struct save_area *save_area = dump_save_areas.areas[i]; 260 261 prefix = save_area->pref_reg; 262 sa_start = prefix + sys_info.sa_base; 263 sa_end = prefix + sys_info.sa_base + sys_info.sa_size; 264 265 if ((end < sa_start) || (start > sa_end)) 266 continue; 267 cp_start = max(start, sa_start); 268 cp_end = min(end, sa_end); 269 270 buf_off = cp_start - start; 271 sa_off = cp_start - sa_start; 272 len = cp_end - cp_start; 273 274 TRACE("copy_lc for: %lx\n", start); 275 if (copy_lc(buf + buf_off, save_area, sa_off, len)) 276 return -EFAULT; 277 } 278 return 0; 279 } 280 281 /* 282 * Release the HSA 283 */ 284 static void release_hsa(void) 285 { 286 diag308(DIAG308_REL_HSA, NULL); 287 hsa_available = 0; 288 } 289 290 /* 291 * Read routine for zcore character device 292 * First 4K are dump header 293 * Next 32MB are HSA Memory 294 * Rest is read from absolute Memory 295 */ 296 static ssize_t zcore_read(struct file *file, char __user *buf, size_t count, 297 loff_t *ppos) 298 { 299 unsigned long mem_start; /* Start address in memory */ 300 size_t mem_offs; /* Offset in dump memory */ 301 size_t hdr_count; /* Size of header part of output buffer */ 302 size_t size; 303 int rc; 304 305 mutex_lock(&zcore_mutex); 306 307 if (*ppos > (sys_info.mem_size + HEADER_SIZE)) { 308 rc = -EINVAL; 309 goto fail; 310 } 311 312 count = min(count, (size_t) (sys_info.mem_size + HEADER_SIZE - *ppos)); 313 314 /* Copy dump header */ 315 if (*ppos < HEADER_SIZE) { 316 size = min(count, (size_t) (HEADER_SIZE - *ppos)); 317 if (copy_to_user(buf, &zcore_header + *ppos, size)) { 318 rc = -EFAULT; 319 goto fail; 320 } 321 hdr_count = size; 322 mem_start = 0; 323 } else { 324 hdr_count = 0; 325 mem_start = *ppos - HEADER_SIZE; 326 } 327 328 mem_offs = 0; 329 330 /* Copy from HSA data */ 331 if (*ppos < sclp_get_hsa_size() + HEADER_SIZE) { 332 size = min((count - hdr_count), 333 (size_t) (sclp_get_hsa_size() - mem_start)); 334 rc = memcpy_hsa_user(buf + hdr_count, mem_start, size); 335 if (rc) 336 goto fail; 337 338 mem_offs += size; 339 } 340 341 /* Copy from real mem */ 342 size = count - mem_offs - hdr_count; 343 rc = copy_to_user_real(buf + hdr_count + mem_offs, 344 (void *) mem_start + mem_offs, size); 345 if (rc) 346 goto fail; 347 348 /* 349 * Since s390 dump analysis tools like lcrash or crash 350 * expect register sets in the prefix pages of the cpus, 351 * we copy them into the read buffer, if necessary. 352 * buf + hdr_count: Start of memory part of output buffer 353 * mem_start: Start memory address to copy from 354 * count - hdr_count: Size of memory area to copy 355 */ 356 if (zcore_add_lc(buf + hdr_count, mem_start, count - hdr_count)) { 357 rc = -EFAULT; 358 goto fail; 359 } 360 *ppos += count; 361 fail: 362 mutex_unlock(&zcore_mutex); 363 return (rc < 0) ? rc : count; 364 } 365 366 static int zcore_open(struct inode *inode, struct file *filp) 367 { 368 if (!hsa_available) 369 return -ENODATA; 370 else 371 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM; 372 } 373 374 static int zcore_release(struct inode *inode, struct file *filep) 375 { 376 if (hsa_available) 377 release_hsa(); 378 return 0; 379 } 380 381 static loff_t zcore_lseek(struct file *file, loff_t offset, int orig) 382 { 383 loff_t rc; 384 385 mutex_lock(&zcore_mutex); 386 switch (orig) { 387 case 0: 388 file->f_pos = offset; 389 rc = file->f_pos; 390 break; 391 case 1: 392 file->f_pos += offset; 393 rc = file->f_pos; 394 break; 395 default: 396 rc = -EINVAL; 397 } 398 mutex_unlock(&zcore_mutex); 399 return rc; 400 } 401 402 static const struct file_operations zcore_fops = { 403 .owner = THIS_MODULE, 404 .llseek = zcore_lseek, 405 .read = zcore_read, 406 .open = zcore_open, 407 .release = zcore_release, 408 }; 409 410 static ssize_t zcore_memmap_read(struct file *filp, char __user *buf, 411 size_t count, loff_t *ppos) 412 { 413 return simple_read_from_buffer(buf, count, ppos, filp->private_data, 414 MEMORY_CHUNKS * CHUNK_INFO_SIZE); 415 } 416 417 static int zcore_memmap_open(struct inode *inode, struct file *filp) 418 { 419 int i; 420 char *buf; 421 struct mem_chunk *chunk_array; 422 423 chunk_array = kzalloc(MEMORY_CHUNKS * sizeof(struct mem_chunk), 424 GFP_KERNEL); 425 if (!chunk_array) 426 return -ENOMEM; 427 detect_memory_layout(chunk_array, 0); 428 buf = kzalloc(MEMORY_CHUNKS * CHUNK_INFO_SIZE, GFP_KERNEL); 429 if (!buf) { 430 kfree(chunk_array); 431 return -ENOMEM; 432 } 433 for (i = 0; i < MEMORY_CHUNKS; i++) { 434 sprintf(buf + (i * CHUNK_INFO_SIZE), "%016llx %016llx ", 435 (unsigned long long) chunk_array[i].addr, 436 (unsigned long long) chunk_array[i].size); 437 if (chunk_array[i].size == 0) 438 break; 439 } 440 kfree(chunk_array); 441 filp->private_data = buf; 442 return nonseekable_open(inode, filp); 443 } 444 445 static int zcore_memmap_release(struct inode *inode, struct file *filp) 446 { 447 kfree(filp->private_data); 448 return 0; 449 } 450 451 static const struct file_operations zcore_memmap_fops = { 452 .owner = THIS_MODULE, 453 .read = zcore_memmap_read, 454 .open = zcore_memmap_open, 455 .release = zcore_memmap_release, 456 .llseek = no_llseek, 457 }; 458 459 static ssize_t zcore_reipl_write(struct file *filp, const char __user *buf, 460 size_t count, loff_t *ppos) 461 { 462 if (ipl_block) { 463 diag308(DIAG308_SET, ipl_block); 464 diag308(DIAG308_IPL, NULL); 465 } 466 return count; 467 } 468 469 static int zcore_reipl_open(struct inode *inode, struct file *filp) 470 { 471 return nonseekable_open(inode, filp); 472 } 473 474 static int zcore_reipl_release(struct inode *inode, struct file *filp) 475 { 476 return 0; 477 } 478 479 static const struct file_operations zcore_reipl_fops = { 480 .owner = THIS_MODULE, 481 .write = zcore_reipl_write, 482 .open = zcore_reipl_open, 483 .release = zcore_reipl_release, 484 .llseek = no_llseek, 485 }; 486 487 static ssize_t zcore_hsa_read(struct file *filp, char __user *buf, 488 size_t count, loff_t *ppos) 489 { 490 static char str[18]; 491 492 if (hsa_available) 493 snprintf(str, sizeof(str), "%lx\n", sclp_get_hsa_size()); 494 else 495 snprintf(str, sizeof(str), "0\n"); 496 return simple_read_from_buffer(buf, count, ppos, str, strlen(str)); 497 } 498 499 static ssize_t zcore_hsa_write(struct file *filp, const char __user *buf, 500 size_t count, loff_t *ppos) 501 { 502 char value; 503 504 if (*ppos != 0) 505 return -EPIPE; 506 if (copy_from_user(&value, buf, 1)) 507 return -EFAULT; 508 if (value != '0') 509 return -EINVAL; 510 release_hsa(); 511 return count; 512 } 513 514 static const struct file_operations zcore_hsa_fops = { 515 .owner = THIS_MODULE, 516 .write = zcore_hsa_write, 517 .read = zcore_hsa_read, 518 .open = nonseekable_open, 519 .llseek = no_llseek, 520 }; 521 522 #ifdef CONFIG_32BIT 523 524 static void __init set_lc_mask(struct save_area *map) 525 { 526 memset(&map->ext_save, 0xff, sizeof(map->ext_save)); 527 memset(&map->timer, 0xff, sizeof(map->timer)); 528 memset(&map->clk_cmp, 0xff, sizeof(map->clk_cmp)); 529 memset(&map->psw, 0xff, sizeof(map->psw)); 530 memset(&map->pref_reg, 0xff, sizeof(map->pref_reg)); 531 memset(&map->acc_regs, 0xff, sizeof(map->acc_regs)); 532 memset(&map->fp_regs, 0xff, sizeof(map->fp_regs)); 533 memset(&map->gp_regs, 0xff, sizeof(map->gp_regs)); 534 memset(&map->ctrl_regs, 0xff, sizeof(map->ctrl_regs)); 535 } 536 537 #else /* CONFIG_32BIT */ 538 539 static void __init set_lc_mask(struct save_area *map) 540 { 541 memset(&map->fp_regs, 0xff, sizeof(map->fp_regs)); 542 memset(&map->gp_regs, 0xff, sizeof(map->gp_regs)); 543 memset(&map->psw, 0xff, sizeof(map->psw)); 544 memset(&map->pref_reg, 0xff, sizeof(map->pref_reg)); 545 memset(&map->fp_ctrl_reg, 0xff, sizeof(map->fp_ctrl_reg)); 546 memset(&map->tod_reg, 0xff, sizeof(map->tod_reg)); 547 memset(&map->timer, 0xff, sizeof(map->timer)); 548 memset(&map->clk_cmp, 0xff, sizeof(map->clk_cmp)); 549 memset(&map->acc_regs, 0xff, sizeof(map->acc_regs)); 550 memset(&map->ctrl_regs, 0xff, sizeof(map->ctrl_regs)); 551 } 552 553 #endif /* CONFIG_32BIT */ 554 555 /* 556 * Initialize dump globals for a given architecture 557 */ 558 static int __init sys_info_init(enum arch_id arch, unsigned long mem_end) 559 { 560 int rc; 561 562 switch (arch) { 563 case ARCH_S390X: 564 pr_alert("DETECTED 'S390X (64 bit) OS'\n"); 565 break; 566 case ARCH_S390: 567 pr_alert("DETECTED 'S390 (32 bit) OS'\n"); 568 break; 569 default: 570 pr_alert("0x%x is an unknown architecture.\n",arch); 571 return -EINVAL; 572 } 573 sys_info.sa_base = SAVE_AREA_BASE; 574 sys_info.sa_size = sizeof(struct save_area); 575 sys_info.arch = arch; 576 set_lc_mask(&sys_info.lc_mask); 577 rc = init_cpu_info(arch); 578 if (rc) 579 return rc; 580 sys_info.mem_size = mem_end; 581 582 return 0; 583 } 584 585 static int __init check_sdias(void) 586 { 587 if (!sclp_get_hsa_size()) { 588 TRACE("Could not determine HSA size\n"); 589 return -ENODEV; 590 } 591 return 0; 592 } 593 594 static int __init get_mem_info(unsigned long *mem, unsigned long *end) 595 { 596 int i; 597 struct mem_chunk *chunk_array; 598 599 chunk_array = kzalloc(MEMORY_CHUNKS * sizeof(struct mem_chunk), 600 GFP_KERNEL); 601 if (!chunk_array) 602 return -ENOMEM; 603 detect_memory_layout(chunk_array, 0); 604 for (i = 0; i < MEMORY_CHUNKS; i++) { 605 if (chunk_array[i].size == 0) 606 break; 607 *mem += chunk_array[i].size; 608 *end = max(*end, chunk_array[i].addr + chunk_array[i].size); 609 } 610 kfree(chunk_array); 611 return 0; 612 } 613 614 static void __init zcore_header_init(int arch, struct zcore_header *hdr, 615 unsigned long mem_size) 616 { 617 u32 prefix; 618 int i; 619 620 if (arch == ARCH_S390X) 621 hdr->arch_id = DUMP_ARCH_S390X; 622 else 623 hdr->arch_id = DUMP_ARCH_S390; 624 hdr->mem_size = mem_size; 625 hdr->rmem_size = mem_size; 626 hdr->mem_end = sys_info.mem_size; 627 hdr->num_pages = mem_size / PAGE_SIZE; 628 hdr->tod = get_tod_clock(); 629 get_cpu_id(&hdr->cpu_id); 630 for (i = 0; i < dump_save_areas.count; i++) { 631 prefix = dump_save_areas.areas[i]->pref_reg; 632 hdr->real_cpu_cnt++; 633 if (!prefix) 634 continue; 635 hdr->lc_vec[hdr->cpu_cnt] = prefix; 636 hdr->cpu_cnt++; 637 } 638 } 639 640 /* 641 * Provide IPL parameter information block from either HSA or memory 642 * for future reipl 643 */ 644 static int __init zcore_reipl_init(void) 645 { 646 struct ipib_info ipib_info; 647 int rc; 648 649 rc = memcpy_hsa_kernel(&ipib_info, __LC_DUMP_REIPL, sizeof(ipib_info)); 650 if (rc) 651 return rc; 652 if (ipib_info.ipib == 0) 653 return 0; 654 ipl_block = (void *) __get_free_page(GFP_KERNEL); 655 if (!ipl_block) 656 return -ENOMEM; 657 if (ipib_info.ipib < sclp_get_hsa_size()) 658 rc = memcpy_hsa_kernel(ipl_block, ipib_info.ipib, PAGE_SIZE); 659 else 660 rc = memcpy_real(ipl_block, (void *) ipib_info.ipib, PAGE_SIZE); 661 if (rc || csum_partial(ipl_block, ipl_block->hdr.len, 0) != 662 ipib_info.checksum) { 663 TRACE("Checksum does not match\n"); 664 free_page((unsigned long) ipl_block); 665 ipl_block = NULL; 666 } 667 return 0; 668 } 669 670 static int __init zcore_init(void) 671 { 672 unsigned long mem_size, mem_end; 673 unsigned char arch; 674 int rc; 675 676 mem_size = mem_end = 0; 677 if (ipl_info.type != IPL_TYPE_FCP_DUMP) 678 return -ENODATA; 679 if (OLDMEM_BASE) 680 return -ENODATA; 681 682 zcore_dbf = debug_register("zcore", 4, 1, 4 * sizeof(long)); 683 debug_register_view(zcore_dbf, &debug_sprintf_view); 684 debug_set_level(zcore_dbf, 6); 685 686 TRACE("devno: %x\n", ipl_info.data.fcp.dev_id.devno); 687 TRACE("wwpn: %llx\n", (unsigned long long) ipl_info.data.fcp.wwpn); 688 TRACE("lun: %llx\n", (unsigned long long) ipl_info.data.fcp.lun); 689 690 rc = sclp_sdias_init(); 691 if (rc) 692 goto fail; 693 694 rc = check_sdias(); 695 if (rc) 696 goto fail; 697 hsa_available = 1; 698 699 rc = memcpy_hsa_kernel(&arch, __LC_AR_MODE_ID, 1); 700 if (rc) 701 goto fail; 702 703 #ifdef CONFIG_64BIT 704 if (arch == ARCH_S390) { 705 pr_alert("The 64-bit dump tool cannot be used for a " 706 "32-bit system\n"); 707 rc = -EINVAL; 708 goto fail; 709 } 710 #else /* CONFIG_64BIT */ 711 if (arch == ARCH_S390X) { 712 pr_alert("The 32-bit dump tool cannot be used for a " 713 "64-bit system\n"); 714 rc = -EINVAL; 715 goto fail; 716 } 717 #endif /* CONFIG_64BIT */ 718 719 rc = get_mem_info(&mem_size, &mem_end); 720 if (rc) 721 goto fail; 722 723 rc = sys_info_init(arch, mem_end); 724 if (rc) 725 goto fail; 726 zcore_header_init(arch, &zcore_header, mem_size); 727 728 rc = zcore_reipl_init(); 729 if (rc) 730 goto fail; 731 732 zcore_dir = debugfs_create_dir("zcore" , NULL); 733 if (!zcore_dir) { 734 rc = -ENOMEM; 735 goto fail; 736 } 737 zcore_file = debugfs_create_file("mem", S_IRUSR, zcore_dir, NULL, 738 &zcore_fops); 739 if (!zcore_file) { 740 rc = -ENOMEM; 741 goto fail_dir; 742 } 743 zcore_memmap_file = debugfs_create_file("memmap", S_IRUSR, zcore_dir, 744 NULL, &zcore_memmap_fops); 745 if (!zcore_memmap_file) { 746 rc = -ENOMEM; 747 goto fail_file; 748 } 749 zcore_reipl_file = debugfs_create_file("reipl", S_IRUSR, zcore_dir, 750 NULL, &zcore_reipl_fops); 751 if (!zcore_reipl_file) { 752 rc = -ENOMEM; 753 goto fail_memmap_file; 754 } 755 zcore_hsa_file = debugfs_create_file("hsa", S_IRUSR|S_IWUSR, zcore_dir, 756 NULL, &zcore_hsa_fops); 757 if (!zcore_hsa_file) { 758 rc = -ENOMEM; 759 goto fail_reipl_file; 760 } 761 return 0; 762 763 fail_reipl_file: 764 debugfs_remove(zcore_reipl_file); 765 fail_memmap_file: 766 debugfs_remove(zcore_memmap_file); 767 fail_file: 768 debugfs_remove(zcore_file); 769 fail_dir: 770 debugfs_remove(zcore_dir); 771 fail: 772 diag308(DIAG308_REL_HSA, NULL); 773 return rc; 774 } 775 776 static void __exit zcore_exit(void) 777 { 778 debug_unregister(zcore_dbf); 779 sclp_sdias_exit(); 780 free_page((unsigned long) ipl_block); 781 debugfs_remove(zcore_hsa_file); 782 debugfs_remove(zcore_reipl_file); 783 debugfs_remove(zcore_memmap_file); 784 debugfs_remove(zcore_file); 785 debugfs_remove(zcore_dir); 786 diag308(DIAG308_REL_HSA, NULL); 787 } 788 789 MODULE_AUTHOR("Copyright IBM Corp. 2003,2008"); 790 MODULE_DESCRIPTION("zcore module for zfcpdump support"); 791 MODULE_LICENSE("GPL"); 792 793 subsys_initcall(zcore_init); 794 module_exit(zcore_exit); 795