1 /* 2 * Character device driver for extended error reporting. 3 * 4 * Copyright (C) 2005 IBM Corporation 5 * extended error reporting for DASD ECKD devices 6 * Author(s): Stefan Weinhuber <wein@de.ibm.com> 7 */ 8 9 #include <linux/init.h> 10 #include <linux/fs.h> 11 #include <linux/kernel.h> 12 #include <linux/miscdevice.h> 13 #include <linux/module.h> 14 #include <linux/moduleparam.h> 15 #include <linux/device.h> 16 #include <linux/poll.h> 17 18 #include <asm/uaccess.h> 19 #include <asm/semaphore.h> 20 #include <asm/atomic.h> 21 #include <asm/ebcdic.h> 22 23 #include "dasd_int.h" 24 #include "dasd_eckd.h" 25 26 #ifdef PRINTK_HEADER 27 #undef PRINTK_HEADER 28 #endif /* PRINTK_HEADER */ 29 #define PRINTK_HEADER "dasd(eer):" 30 31 /* 32 * SECTION: the internal buffer 33 */ 34 35 /* 36 * The internal buffer is meant to store obaque blobs of data, so it does 37 * not know of higher level concepts like triggers. 38 * It consists of a number of pages that are used as a ringbuffer. Each data 39 * blob is stored in a simple record that consists of an integer, which 40 * contains the size of the following data, and the data bytes themselfes. 41 * 42 * To allow for multiple independent readers we create one internal buffer 43 * each time the device is opened and destroy the buffer when the file is 44 * closed again. The number of pages used for this buffer is determined by 45 * the module parmeter eer_pages. 46 * 47 * One record can be written to a buffer by using the functions 48 * - dasd_eer_start_record (one time per record to write the size to the 49 * buffer and reserve the space for the data) 50 * - dasd_eer_write_buffer (one or more times per record to write the data) 51 * The data can be written in several steps but you will have to compute 52 * the total size up front for the invocation of dasd_eer_start_record. 53 * If the ringbuffer is full, dasd_eer_start_record will remove the required 54 * number of old records. 55 * 56 * A record is typically read in two steps, first read the integer that 57 * specifies the size of the following data, then read the data. 58 * Both can be done by 59 * - dasd_eer_read_buffer 60 * 61 * For all mentioned functions you need to get the bufferlock first and keep 62 * it until a complete record is written or read. 63 * 64 * All information necessary to keep track of an internal buffer is kept in 65 * a struct eerbuffer. The buffer specific to a file pointer is strored in 66 * the private_data field of that file. To be able to write data to all 67 * existing buffers, each buffer is also added to the bufferlist. 68 * If the user does not want to read a complete record in one go, we have to 69 * keep track of the rest of the record. residual stores the number of bytes 70 * that are still to deliver. If the rest of the record is invalidated between 71 * two reads then residual will be set to -1 so that the next read will fail. 72 * All entries in the eerbuffer structure are protected with the bufferlock. 73 * To avoid races between writing to a buffer on the one side and creating 74 * and destroying buffers on the other side, the bufferlock must also be used 75 * to protect the bufferlist. 76 */ 77 78 static int eer_pages = 5; 79 module_param(eer_pages, int, S_IRUGO|S_IWUSR); 80 81 struct eerbuffer { 82 struct list_head list; 83 char **buffer; 84 int buffersize; 85 int buffer_page_count; 86 int head; 87 int tail; 88 int residual; 89 }; 90 91 static LIST_HEAD(bufferlist); 92 static DEFINE_SPINLOCK(bufferlock); 93 static DECLARE_WAIT_QUEUE_HEAD(dasd_eer_read_wait_queue); 94 95 /* 96 * How many free bytes are available on the buffer. 97 * Needs to be called with bufferlock held. 98 */ 99 static int dasd_eer_get_free_bytes(struct eerbuffer *eerb) 100 { 101 if (eerb->head < eerb->tail) 102 return eerb->tail - eerb->head - 1; 103 return eerb->buffersize - eerb->head + eerb->tail -1; 104 } 105 106 /* 107 * How many bytes of buffer space are used. 108 * Needs to be called with bufferlock held. 109 */ 110 static int dasd_eer_get_filled_bytes(struct eerbuffer *eerb) 111 { 112 113 if (eerb->head >= eerb->tail) 114 return eerb->head - eerb->tail; 115 return eerb->buffersize - eerb->tail + eerb->head; 116 } 117 118 /* 119 * The dasd_eer_write_buffer function just copies count bytes of data 120 * to the buffer. Make sure to call dasd_eer_start_record first, to 121 * make sure that enough free space is available. 122 * Needs to be called with bufferlock held. 123 */ 124 static void dasd_eer_write_buffer(struct eerbuffer *eerb, 125 char *data, int count) 126 { 127 128 unsigned long headindex,localhead; 129 unsigned long rest, len; 130 char *nextdata; 131 132 nextdata = data; 133 rest = count; 134 while (rest > 0) { 135 headindex = eerb->head / PAGE_SIZE; 136 localhead = eerb->head % PAGE_SIZE; 137 len = min(rest, PAGE_SIZE - localhead); 138 memcpy(eerb->buffer[headindex]+localhead, nextdata, len); 139 nextdata += len; 140 rest -= len; 141 eerb->head += len; 142 if (eerb->head == eerb->buffersize) 143 eerb->head = 0; /* wrap around */ 144 BUG_ON(eerb->head > eerb->buffersize); 145 } 146 } 147 148 /* 149 * Needs to be called with bufferlock held. 150 */ 151 static int dasd_eer_read_buffer(struct eerbuffer *eerb, char *data, int count) 152 { 153 154 unsigned long tailindex,localtail; 155 unsigned long rest, len, finalcount; 156 char *nextdata; 157 158 finalcount = min(count, dasd_eer_get_filled_bytes(eerb)); 159 nextdata = data; 160 rest = finalcount; 161 while (rest > 0) { 162 tailindex = eerb->tail / PAGE_SIZE; 163 localtail = eerb->tail % PAGE_SIZE; 164 len = min(rest, PAGE_SIZE - localtail); 165 memcpy(nextdata, eerb->buffer[tailindex] + localtail, len); 166 nextdata += len; 167 rest -= len; 168 eerb->tail += len; 169 if (eerb->tail == eerb->buffersize) 170 eerb->tail = 0; /* wrap around */ 171 BUG_ON(eerb->tail > eerb->buffersize); 172 } 173 return finalcount; 174 } 175 176 /* 177 * Whenever you want to write a blob of data to the internal buffer you 178 * have to start by using this function first. It will write the number 179 * of bytes that will be written to the buffer. If necessary it will remove 180 * old records to make room for the new one. 181 * Needs to be called with bufferlock held. 182 */ 183 static int dasd_eer_start_record(struct eerbuffer *eerb, int count) 184 { 185 int tailcount; 186 187 if (count + sizeof(count) > eerb->buffersize) 188 return -ENOMEM; 189 while (dasd_eer_get_free_bytes(eerb) < count + sizeof(count)) { 190 if (eerb->residual > 0) { 191 eerb->tail += eerb->residual; 192 if (eerb->tail >= eerb->buffersize) 193 eerb->tail -= eerb->buffersize; 194 eerb->residual = -1; 195 } 196 dasd_eer_read_buffer(eerb, (char *) &tailcount, 197 sizeof(tailcount)); 198 eerb->tail += tailcount; 199 if (eerb->tail >= eerb->buffersize) 200 eerb->tail -= eerb->buffersize; 201 } 202 dasd_eer_write_buffer(eerb, (char*) &count, sizeof(count)); 203 204 return 0; 205 }; 206 207 /* 208 * Release pages that are not used anymore. 209 */ 210 static void dasd_eer_free_buffer_pages(char **buf, int no_pages) 211 { 212 int i; 213 214 for (i = 0; i < no_pages; i++) 215 free_page((unsigned long) buf[i]); 216 } 217 218 /* 219 * Allocate a new set of memory pages. 220 */ 221 static int dasd_eer_allocate_buffer_pages(char **buf, int no_pages) 222 { 223 int i; 224 225 for (i = 0; i < no_pages; i++) { 226 buf[i] = (char *) get_zeroed_page(GFP_KERNEL); 227 if (!buf[i]) { 228 dasd_eer_free_buffer_pages(buf, i); 229 return -ENOMEM; 230 } 231 } 232 return 0; 233 } 234 235 /* 236 * SECTION: The extended error reporting functionality 237 */ 238 239 /* 240 * When a DASD device driver wants to report an error, it calls the 241 * function dasd_eer_write and gives the respective trigger ID as 242 * parameter. Currently there are four kinds of triggers: 243 * 244 * DASD_EER_FATALERROR: all kinds of unrecoverable I/O problems 245 * DASD_EER_PPRCSUSPEND: PPRC was suspended 246 * DASD_EER_NOPATH: There is no path to the device left. 247 * DASD_EER_STATECHANGE: The state of the device has changed. 248 * 249 * For the first three triggers all required information can be supplied by 250 * the caller. For these triggers a record is written by the function 251 * dasd_eer_write_standard_trigger. 252 * 253 * The DASD_EER_STATECHANGE trigger is special since a sense subsystem 254 * status ccw need to be executed to gather the necessary sense data first. 255 * The dasd_eer_snss function will queue the SNSS request and the request 256 * callback will then call dasd_eer_write with the DASD_EER_STATCHANGE 257 * trigger. 258 * 259 * To avoid memory allocations at runtime, the necessary memory is allocated 260 * when the extended error reporting is enabled for a device (by 261 * dasd_eer_probe). There is one sense subsystem status request for each 262 * eer enabled DASD device. The presence of the cqr in device->eer_cqr 263 * indicates that eer is enable for the device. The use of the snss request 264 * is protected by the DASD_FLAG_EER_IN_USE bit. When this flag indicates 265 * that the cqr is currently in use, dasd_eer_snss cannot start a second 266 * request but sets the DASD_FLAG_EER_SNSS flag instead. The callback of 267 * the SNSS request will check the bit and call dasd_eer_snss again. 268 */ 269 270 #define SNSS_DATA_SIZE 44 271 272 #define DASD_EER_BUSID_SIZE 10 273 struct dasd_eer_header { 274 __u32 total_size; 275 __u32 trigger; 276 __u64 tv_sec; 277 __u64 tv_usec; 278 char busid[DASD_EER_BUSID_SIZE]; 279 } __attribute__ ((packed)); 280 281 /* 282 * The following function can be used for those triggers that have 283 * all necessary data available when the function is called. 284 * If the parameter cqr is not NULL, the chain of requests will be searched 285 * for valid sense data, and all valid sense data sets will be added to 286 * the triggers data. 287 */ 288 static void dasd_eer_write_standard_trigger(struct dasd_device *device, 289 struct dasd_ccw_req *cqr, 290 int trigger) 291 { 292 struct dasd_ccw_req *temp_cqr; 293 int data_size; 294 struct timeval tv; 295 struct dasd_eer_header header; 296 unsigned long flags; 297 struct eerbuffer *eerb; 298 299 /* go through cqr chain and count the valid sense data sets */ 300 data_size = 0; 301 for (temp_cqr = cqr; temp_cqr; temp_cqr = temp_cqr->refers) 302 if (temp_cqr->irb.esw.esw0.erw.cons) 303 data_size += 32; 304 305 header.total_size = sizeof(header) + data_size + 4; /* "EOR" */ 306 header.trigger = trigger; 307 do_gettimeofday(&tv); 308 header.tv_sec = tv.tv_sec; 309 header.tv_usec = tv.tv_usec; 310 strncpy(header.busid, device->cdev->dev.bus_id, DASD_EER_BUSID_SIZE); 311 312 spin_lock_irqsave(&bufferlock, flags); 313 list_for_each_entry(eerb, &bufferlist, list) { 314 dasd_eer_start_record(eerb, header.total_size); 315 dasd_eer_write_buffer(eerb, (char *) &header, sizeof(header)); 316 for (temp_cqr = cqr; temp_cqr; temp_cqr = temp_cqr->refers) 317 if (temp_cqr->irb.esw.esw0.erw.cons) 318 dasd_eer_write_buffer(eerb, cqr->irb.ecw, 32); 319 dasd_eer_write_buffer(eerb, "EOR", 4); 320 } 321 spin_unlock_irqrestore(&bufferlock, flags); 322 wake_up_interruptible(&dasd_eer_read_wait_queue); 323 } 324 325 /* 326 * This function writes a DASD_EER_STATECHANGE trigger. 327 */ 328 static void dasd_eer_write_snss_trigger(struct dasd_device *device, 329 struct dasd_ccw_req *cqr, 330 int trigger) 331 { 332 int data_size; 333 int snss_rc; 334 struct timeval tv; 335 struct dasd_eer_header header; 336 unsigned long flags; 337 struct eerbuffer *eerb; 338 339 snss_rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0; 340 if (snss_rc) 341 data_size = 0; 342 else 343 data_size = SNSS_DATA_SIZE; 344 345 header.total_size = sizeof(header) + data_size + 4; /* "EOR" */ 346 header.trigger = DASD_EER_STATECHANGE; 347 do_gettimeofday(&tv); 348 header.tv_sec = tv.tv_sec; 349 header.tv_usec = tv.tv_usec; 350 strncpy(header.busid, device->cdev->dev.bus_id, DASD_EER_BUSID_SIZE); 351 352 spin_lock_irqsave(&bufferlock, flags); 353 list_for_each_entry(eerb, &bufferlist, list) { 354 dasd_eer_start_record(eerb, header.total_size); 355 dasd_eer_write_buffer(eerb, (char *) &header , sizeof(header)); 356 if (!snss_rc) 357 dasd_eer_write_buffer(eerb, cqr->data, SNSS_DATA_SIZE); 358 dasd_eer_write_buffer(eerb, "EOR", 4); 359 } 360 spin_unlock_irqrestore(&bufferlock, flags); 361 wake_up_interruptible(&dasd_eer_read_wait_queue); 362 } 363 364 /* 365 * This function is called for all triggers. It calls the appropriate 366 * function that writes the actual trigger records. 367 */ 368 void dasd_eer_write(struct dasd_device *device, struct dasd_ccw_req *cqr, 369 unsigned int id) 370 { 371 if (!device->eer_cqr) 372 return; 373 switch (id) { 374 case DASD_EER_FATALERROR: 375 case DASD_EER_PPRCSUSPEND: 376 dasd_eer_write_standard_trigger(device, cqr, id); 377 break; 378 case DASD_EER_NOPATH: 379 dasd_eer_write_standard_trigger(device, NULL, id); 380 break; 381 case DASD_EER_STATECHANGE: 382 dasd_eer_write_snss_trigger(device, cqr, id); 383 break; 384 default: /* unknown trigger, so we write it without any sense data */ 385 dasd_eer_write_standard_trigger(device, NULL, id); 386 break; 387 } 388 } 389 EXPORT_SYMBOL(dasd_eer_write); 390 391 /* 392 * Start a sense subsystem status request. 393 * Needs to be called with the device held. 394 */ 395 void dasd_eer_snss(struct dasd_device *device) 396 { 397 struct dasd_ccw_req *cqr; 398 399 cqr = device->eer_cqr; 400 if (!cqr) /* Device not eer enabled. */ 401 return; 402 if (test_and_set_bit(DASD_FLAG_EER_IN_USE, &device->flags)) { 403 /* Sense subsystem status request in use. */ 404 set_bit(DASD_FLAG_EER_SNSS, &device->flags); 405 return; 406 } 407 clear_bit(DASD_FLAG_EER_SNSS, &device->flags); 408 cqr->status = DASD_CQR_QUEUED; 409 list_add(&cqr->list, &device->ccw_queue); 410 dasd_schedule_bh(device); 411 } 412 413 /* 414 * Callback function for use with sense subsystem status request. 415 */ 416 static void dasd_eer_snss_cb(struct dasd_ccw_req *cqr, void *data) 417 { 418 struct dasd_device *device = cqr->device; 419 unsigned long flags; 420 421 dasd_eer_write(device, cqr, DASD_EER_STATECHANGE); 422 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 423 if (device->eer_cqr == cqr) { 424 clear_bit(DASD_FLAG_EER_IN_USE, &device->flags); 425 if (test_bit(DASD_FLAG_EER_SNSS, &device->flags)) 426 /* Another SNSS has been requested in the meantime. */ 427 dasd_eer_snss(device); 428 cqr = NULL; 429 } 430 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 431 if (cqr) 432 /* 433 * Extended error recovery has been switched off while 434 * the SNSS request was running. It could even have 435 * been switched off and on again in which case there 436 * is a new ccw in device->eer_cqr. Free the "old" 437 * snss request now. 438 */ 439 dasd_kfree_request(cqr, device); 440 } 441 442 /* 443 * Enable error reporting on a given device. 444 */ 445 int dasd_eer_enable(struct dasd_device *device) 446 { 447 struct dasd_ccw_req *cqr; 448 unsigned long flags; 449 450 if (device->eer_cqr) 451 return 0; 452 453 if (!device->discipline || strcmp(device->discipline->name, "ECKD")) 454 return -EPERM; /* FIXME: -EMEDIUMTYPE ? */ 455 456 cqr = dasd_kmalloc_request("ECKD", 1 /* SNSS */, 457 SNSS_DATA_SIZE, device); 458 if (!cqr) 459 return -ENOMEM; 460 461 cqr->device = device; 462 cqr->retries = 255; 463 cqr->expires = 10 * HZ; 464 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 465 466 cqr->cpaddr->cmd_code = DASD_ECKD_CCW_SNSS; 467 cqr->cpaddr->count = SNSS_DATA_SIZE; 468 cqr->cpaddr->flags = 0; 469 cqr->cpaddr->cda = (__u32)(addr_t) cqr->data; 470 471 cqr->buildclk = get_clock(); 472 cqr->status = DASD_CQR_FILLED; 473 cqr->callback = dasd_eer_snss_cb; 474 475 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 476 if (!device->eer_cqr) { 477 device->eer_cqr = cqr; 478 cqr = NULL; 479 } 480 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 481 if (cqr) 482 dasd_kfree_request(cqr, device); 483 return 0; 484 } 485 486 /* 487 * Disable error reporting on a given device. 488 */ 489 void dasd_eer_disable(struct dasd_device *device) 490 { 491 struct dasd_ccw_req *cqr; 492 unsigned long flags; 493 int in_use; 494 495 if (!device->eer_cqr) 496 return; 497 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 498 cqr = device->eer_cqr; 499 device->eer_cqr = NULL; 500 clear_bit(DASD_FLAG_EER_SNSS, &device->flags); 501 in_use = test_and_clear_bit(DASD_FLAG_EER_IN_USE, &device->flags); 502 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 503 if (cqr && !in_use) 504 dasd_kfree_request(cqr, device); 505 } 506 507 /* 508 * SECTION: the device operations 509 */ 510 511 /* 512 * On the one side we need a lock to access our internal buffer, on the 513 * other side a copy_to_user can sleep. So we need to copy the data we have 514 * to transfer in a readbuffer, which is protected by the readbuffer_mutex. 515 */ 516 static char readbuffer[PAGE_SIZE]; 517 static DECLARE_MUTEX(readbuffer_mutex); 518 519 static int dasd_eer_open(struct inode *inp, struct file *filp) 520 { 521 struct eerbuffer *eerb; 522 unsigned long flags; 523 524 eerb = kzalloc(sizeof(struct eerbuffer), GFP_KERNEL); 525 if (!eerb) 526 return -ENOMEM; 527 eerb->buffer_page_count = eer_pages; 528 if (eerb->buffer_page_count < 1 || 529 eerb->buffer_page_count > INT_MAX / PAGE_SIZE) { 530 kfree(eerb); 531 MESSAGE(KERN_WARNING, "can't open device since module " 532 "parameter eer_pages is smaller then 1 or" 533 " bigger then %d", (int)(INT_MAX / PAGE_SIZE)); 534 return -EINVAL; 535 } 536 eerb->buffersize = eerb->buffer_page_count * PAGE_SIZE; 537 eerb->buffer = kmalloc(eerb->buffer_page_count * sizeof(char *), 538 GFP_KERNEL); 539 if (!eerb->buffer) { 540 kfree(eerb); 541 return -ENOMEM; 542 } 543 if (dasd_eer_allocate_buffer_pages(eerb->buffer, 544 eerb->buffer_page_count)) { 545 kfree(eerb->buffer); 546 kfree(eerb); 547 return -ENOMEM; 548 } 549 filp->private_data = eerb; 550 spin_lock_irqsave(&bufferlock, flags); 551 list_add(&eerb->list, &bufferlist); 552 spin_unlock_irqrestore(&bufferlock, flags); 553 554 return nonseekable_open(inp,filp); 555 } 556 557 static int dasd_eer_close(struct inode *inp, struct file *filp) 558 { 559 struct eerbuffer *eerb; 560 unsigned long flags; 561 562 eerb = (struct eerbuffer *) filp->private_data; 563 spin_lock_irqsave(&bufferlock, flags); 564 list_del(&eerb->list); 565 spin_unlock_irqrestore(&bufferlock, flags); 566 dasd_eer_free_buffer_pages(eerb->buffer, eerb->buffer_page_count); 567 kfree(eerb->buffer); 568 kfree(eerb); 569 570 return 0; 571 } 572 573 static ssize_t dasd_eer_read(struct file *filp, char __user *buf, 574 size_t count, loff_t *ppos) 575 { 576 int tc,rc; 577 int tailcount,effective_count; 578 unsigned long flags; 579 struct eerbuffer *eerb; 580 581 eerb = (struct eerbuffer *) filp->private_data; 582 if (down_interruptible(&readbuffer_mutex)) 583 return -ERESTARTSYS; 584 585 spin_lock_irqsave(&bufferlock, flags); 586 587 if (eerb->residual < 0) { /* the remainder of this record */ 588 /* has been deleted */ 589 eerb->residual = 0; 590 spin_unlock_irqrestore(&bufferlock, flags); 591 up(&readbuffer_mutex); 592 return -EIO; 593 } else if (eerb->residual > 0) { 594 /* OK we still have a second half of a record to deliver */ 595 effective_count = min(eerb->residual, (int) count); 596 eerb->residual -= effective_count; 597 } else { 598 tc = 0; 599 while (!tc) { 600 tc = dasd_eer_read_buffer(eerb, (char *) &tailcount, 601 sizeof(tailcount)); 602 if (!tc) { 603 /* no data available */ 604 spin_unlock_irqrestore(&bufferlock, flags); 605 up(&readbuffer_mutex); 606 if (filp->f_flags & O_NONBLOCK) 607 return -EAGAIN; 608 rc = wait_event_interruptible( 609 dasd_eer_read_wait_queue, 610 eerb->head != eerb->tail); 611 if (rc) 612 return rc; 613 if (down_interruptible(&readbuffer_mutex)) 614 return -ERESTARTSYS; 615 spin_lock_irqsave(&bufferlock, flags); 616 } 617 } 618 WARN_ON(tc != sizeof(tailcount)); 619 effective_count = min(tailcount,(int)count); 620 eerb->residual = tailcount - effective_count; 621 } 622 623 tc = dasd_eer_read_buffer(eerb, readbuffer, effective_count); 624 WARN_ON(tc != effective_count); 625 626 spin_unlock_irqrestore(&bufferlock, flags); 627 628 if (copy_to_user(buf, readbuffer, effective_count)) { 629 up(&readbuffer_mutex); 630 return -EFAULT; 631 } 632 633 up(&readbuffer_mutex); 634 return effective_count; 635 } 636 637 static unsigned int dasd_eer_poll(struct file *filp, poll_table *ptable) 638 { 639 unsigned int mask; 640 unsigned long flags; 641 struct eerbuffer *eerb; 642 643 eerb = (struct eerbuffer *) filp->private_data; 644 poll_wait(filp, &dasd_eer_read_wait_queue, ptable); 645 spin_lock_irqsave(&bufferlock, flags); 646 if (eerb->head != eerb->tail) 647 mask = POLLIN | POLLRDNORM ; 648 else 649 mask = 0; 650 spin_unlock_irqrestore(&bufferlock, flags); 651 return mask; 652 } 653 654 static const struct file_operations dasd_eer_fops = { 655 .open = &dasd_eer_open, 656 .release = &dasd_eer_close, 657 .read = &dasd_eer_read, 658 .poll = &dasd_eer_poll, 659 .owner = THIS_MODULE, 660 }; 661 662 static struct miscdevice *dasd_eer_dev = NULL; 663 664 int __init dasd_eer_init(void) 665 { 666 int rc; 667 668 dasd_eer_dev = kzalloc(sizeof(*dasd_eer_dev), GFP_KERNEL); 669 if (!dasd_eer_dev) 670 return -ENOMEM; 671 672 dasd_eer_dev->minor = MISC_DYNAMIC_MINOR; 673 dasd_eer_dev->name = "dasd_eer"; 674 dasd_eer_dev->fops = &dasd_eer_fops; 675 676 rc = misc_register(dasd_eer_dev); 677 if (rc) { 678 kfree(dasd_eer_dev); 679 dasd_eer_dev = NULL; 680 MESSAGE(KERN_ERR, "%s", "dasd_eer_init could not " 681 "register misc device"); 682 return rc; 683 } 684 685 return 0; 686 } 687 688 void dasd_eer_exit(void) 689 { 690 if (dasd_eer_dev) { 691 WARN_ON(misc_deregister(dasd_eer_dev) != 0); 692 kfree(dasd_eer_dev); 693 dasd_eer_dev = NULL; 694 } 695 } 696