1 /* 2 * Character device driver for extended error reporting. 3 * 4 * Copyright (C) 2005 IBM Corporation 5 * extended error reporting for DASD ECKD devices 6 * Author(s): Stefan Weinhuber <wein@de.ibm.com> 7 */ 8 9 #include <linux/init.h> 10 #include <linux/fs.h> 11 #include <linux/kernel.h> 12 #include <linux/miscdevice.h> 13 #include <linux/module.h> 14 #include <linux/moduleparam.h> 15 #include <linux/device.h> 16 #include <linux/poll.h> 17 #include <linux/mutex.h> 18 #include <linux/smp_lock.h> 19 #include <linux/err.h> 20 21 #include <asm/uaccess.h> 22 #include <asm/atomic.h> 23 #include <asm/ebcdic.h> 24 25 #include "dasd_int.h" 26 #include "dasd_eckd.h" 27 28 #ifdef PRINTK_HEADER 29 #undef PRINTK_HEADER 30 #endif /* PRINTK_HEADER */ 31 #define PRINTK_HEADER "dasd(eer):" 32 33 /* 34 * SECTION: the internal buffer 35 */ 36 37 /* 38 * The internal buffer is meant to store obaque blobs of data, so it does 39 * not know of higher level concepts like triggers. 40 * It consists of a number of pages that are used as a ringbuffer. Each data 41 * blob is stored in a simple record that consists of an integer, which 42 * contains the size of the following data, and the data bytes themselfes. 43 * 44 * To allow for multiple independent readers we create one internal buffer 45 * each time the device is opened and destroy the buffer when the file is 46 * closed again. The number of pages used for this buffer is determined by 47 * the module parmeter eer_pages. 48 * 49 * One record can be written to a buffer by using the functions 50 * - dasd_eer_start_record (one time per record to write the size to the 51 * buffer and reserve the space for the data) 52 * - dasd_eer_write_buffer (one or more times per record to write the data) 53 * The data can be written in several steps but you will have to compute 54 * the total size up front for the invocation of dasd_eer_start_record. 55 * If the ringbuffer is full, dasd_eer_start_record will remove the required 56 * number of old records. 57 * 58 * A record is typically read in two steps, first read the integer that 59 * specifies the size of the following data, then read the data. 60 * Both can be done by 61 * - dasd_eer_read_buffer 62 * 63 * For all mentioned functions you need to get the bufferlock first and keep 64 * it until a complete record is written or read. 65 * 66 * All information necessary to keep track of an internal buffer is kept in 67 * a struct eerbuffer. The buffer specific to a file pointer is strored in 68 * the private_data field of that file. To be able to write data to all 69 * existing buffers, each buffer is also added to the bufferlist. 70 * If the user does not want to read a complete record in one go, we have to 71 * keep track of the rest of the record. residual stores the number of bytes 72 * that are still to deliver. If the rest of the record is invalidated between 73 * two reads then residual will be set to -1 so that the next read will fail. 74 * All entries in the eerbuffer structure are protected with the bufferlock. 75 * To avoid races between writing to a buffer on the one side and creating 76 * and destroying buffers on the other side, the bufferlock must also be used 77 * to protect the bufferlist. 78 */ 79 80 static int eer_pages = 5; 81 module_param(eer_pages, int, S_IRUGO|S_IWUSR); 82 83 struct eerbuffer { 84 struct list_head list; 85 char **buffer; 86 int buffersize; 87 int buffer_page_count; 88 int head; 89 int tail; 90 int residual; 91 }; 92 93 static LIST_HEAD(bufferlist); 94 static DEFINE_SPINLOCK(bufferlock); 95 static DECLARE_WAIT_QUEUE_HEAD(dasd_eer_read_wait_queue); 96 97 /* 98 * How many free bytes are available on the buffer. 99 * Needs to be called with bufferlock held. 100 */ 101 static int dasd_eer_get_free_bytes(struct eerbuffer *eerb) 102 { 103 if (eerb->head < eerb->tail) 104 return eerb->tail - eerb->head - 1; 105 return eerb->buffersize - eerb->head + eerb->tail -1; 106 } 107 108 /* 109 * How many bytes of buffer space are used. 110 * Needs to be called with bufferlock held. 111 */ 112 static int dasd_eer_get_filled_bytes(struct eerbuffer *eerb) 113 { 114 115 if (eerb->head >= eerb->tail) 116 return eerb->head - eerb->tail; 117 return eerb->buffersize - eerb->tail + eerb->head; 118 } 119 120 /* 121 * The dasd_eer_write_buffer function just copies count bytes of data 122 * to the buffer. Make sure to call dasd_eer_start_record first, to 123 * make sure that enough free space is available. 124 * Needs to be called with bufferlock held. 125 */ 126 static void dasd_eer_write_buffer(struct eerbuffer *eerb, 127 char *data, int count) 128 { 129 130 unsigned long headindex,localhead; 131 unsigned long rest, len; 132 char *nextdata; 133 134 nextdata = data; 135 rest = count; 136 while (rest > 0) { 137 headindex = eerb->head / PAGE_SIZE; 138 localhead = eerb->head % PAGE_SIZE; 139 len = min(rest, PAGE_SIZE - localhead); 140 memcpy(eerb->buffer[headindex]+localhead, nextdata, len); 141 nextdata += len; 142 rest -= len; 143 eerb->head += len; 144 if (eerb->head == eerb->buffersize) 145 eerb->head = 0; /* wrap around */ 146 BUG_ON(eerb->head > eerb->buffersize); 147 } 148 } 149 150 /* 151 * Needs to be called with bufferlock held. 152 */ 153 static int dasd_eer_read_buffer(struct eerbuffer *eerb, char *data, int count) 154 { 155 156 unsigned long tailindex,localtail; 157 unsigned long rest, len, finalcount; 158 char *nextdata; 159 160 finalcount = min(count, dasd_eer_get_filled_bytes(eerb)); 161 nextdata = data; 162 rest = finalcount; 163 while (rest > 0) { 164 tailindex = eerb->tail / PAGE_SIZE; 165 localtail = eerb->tail % PAGE_SIZE; 166 len = min(rest, PAGE_SIZE - localtail); 167 memcpy(nextdata, eerb->buffer[tailindex] + localtail, len); 168 nextdata += len; 169 rest -= len; 170 eerb->tail += len; 171 if (eerb->tail == eerb->buffersize) 172 eerb->tail = 0; /* wrap around */ 173 BUG_ON(eerb->tail > eerb->buffersize); 174 } 175 return finalcount; 176 } 177 178 /* 179 * Whenever you want to write a blob of data to the internal buffer you 180 * have to start by using this function first. It will write the number 181 * of bytes that will be written to the buffer. If necessary it will remove 182 * old records to make room for the new one. 183 * Needs to be called with bufferlock held. 184 */ 185 static int dasd_eer_start_record(struct eerbuffer *eerb, int count) 186 { 187 int tailcount; 188 189 if (count + sizeof(count) > eerb->buffersize) 190 return -ENOMEM; 191 while (dasd_eer_get_free_bytes(eerb) < count + sizeof(count)) { 192 if (eerb->residual > 0) { 193 eerb->tail += eerb->residual; 194 if (eerb->tail >= eerb->buffersize) 195 eerb->tail -= eerb->buffersize; 196 eerb->residual = -1; 197 } 198 dasd_eer_read_buffer(eerb, (char *) &tailcount, 199 sizeof(tailcount)); 200 eerb->tail += tailcount; 201 if (eerb->tail >= eerb->buffersize) 202 eerb->tail -= eerb->buffersize; 203 } 204 dasd_eer_write_buffer(eerb, (char*) &count, sizeof(count)); 205 206 return 0; 207 }; 208 209 /* 210 * Release pages that are not used anymore. 211 */ 212 static void dasd_eer_free_buffer_pages(char **buf, int no_pages) 213 { 214 int i; 215 216 for (i = 0; i < no_pages; i++) 217 free_page((unsigned long) buf[i]); 218 } 219 220 /* 221 * Allocate a new set of memory pages. 222 */ 223 static int dasd_eer_allocate_buffer_pages(char **buf, int no_pages) 224 { 225 int i; 226 227 for (i = 0; i < no_pages; i++) { 228 buf[i] = (char *) get_zeroed_page(GFP_KERNEL); 229 if (!buf[i]) { 230 dasd_eer_free_buffer_pages(buf, i); 231 return -ENOMEM; 232 } 233 } 234 return 0; 235 } 236 237 /* 238 * SECTION: The extended error reporting functionality 239 */ 240 241 /* 242 * When a DASD device driver wants to report an error, it calls the 243 * function dasd_eer_write and gives the respective trigger ID as 244 * parameter. Currently there are four kinds of triggers: 245 * 246 * DASD_EER_FATALERROR: all kinds of unrecoverable I/O problems 247 * DASD_EER_PPRCSUSPEND: PPRC was suspended 248 * DASD_EER_NOPATH: There is no path to the device left. 249 * DASD_EER_STATECHANGE: The state of the device has changed. 250 * 251 * For the first three triggers all required information can be supplied by 252 * the caller. For these triggers a record is written by the function 253 * dasd_eer_write_standard_trigger. 254 * 255 * The DASD_EER_STATECHANGE trigger is special since a sense subsystem 256 * status ccw need to be executed to gather the necessary sense data first. 257 * The dasd_eer_snss function will queue the SNSS request and the request 258 * callback will then call dasd_eer_write with the DASD_EER_STATCHANGE 259 * trigger. 260 * 261 * To avoid memory allocations at runtime, the necessary memory is allocated 262 * when the extended error reporting is enabled for a device (by 263 * dasd_eer_probe). There is one sense subsystem status request for each 264 * eer enabled DASD device. The presence of the cqr in device->eer_cqr 265 * indicates that eer is enable for the device. The use of the snss request 266 * is protected by the DASD_FLAG_EER_IN_USE bit. When this flag indicates 267 * that the cqr is currently in use, dasd_eer_snss cannot start a second 268 * request but sets the DASD_FLAG_EER_SNSS flag instead. The callback of 269 * the SNSS request will check the bit and call dasd_eer_snss again. 270 */ 271 272 #define SNSS_DATA_SIZE 44 273 274 #define DASD_EER_BUSID_SIZE 10 275 struct dasd_eer_header { 276 __u32 total_size; 277 __u32 trigger; 278 __u64 tv_sec; 279 __u64 tv_usec; 280 char busid[DASD_EER_BUSID_SIZE]; 281 } __attribute__ ((packed)); 282 283 /* 284 * The following function can be used for those triggers that have 285 * all necessary data available when the function is called. 286 * If the parameter cqr is not NULL, the chain of requests will be searched 287 * for valid sense data, and all valid sense data sets will be added to 288 * the triggers data. 289 */ 290 static void dasd_eer_write_standard_trigger(struct dasd_device *device, 291 struct dasd_ccw_req *cqr, 292 int trigger) 293 { 294 struct dasd_ccw_req *temp_cqr; 295 int data_size; 296 struct timeval tv; 297 struct dasd_eer_header header; 298 unsigned long flags; 299 struct eerbuffer *eerb; 300 301 /* go through cqr chain and count the valid sense data sets */ 302 data_size = 0; 303 for (temp_cqr = cqr; temp_cqr; temp_cqr = temp_cqr->refers) 304 if (temp_cqr->irb.esw.esw0.erw.cons) 305 data_size += 32; 306 307 header.total_size = sizeof(header) + data_size + 4; /* "EOR" */ 308 header.trigger = trigger; 309 do_gettimeofday(&tv); 310 header.tv_sec = tv.tv_sec; 311 header.tv_usec = tv.tv_usec; 312 strncpy(header.busid, dev_name(&device->cdev->dev), 313 DASD_EER_BUSID_SIZE); 314 315 spin_lock_irqsave(&bufferlock, flags); 316 list_for_each_entry(eerb, &bufferlist, list) { 317 dasd_eer_start_record(eerb, header.total_size); 318 dasd_eer_write_buffer(eerb, (char *) &header, sizeof(header)); 319 for (temp_cqr = cqr; temp_cqr; temp_cqr = temp_cqr->refers) 320 if (temp_cqr->irb.esw.esw0.erw.cons) 321 dasd_eer_write_buffer(eerb, cqr->irb.ecw, 32); 322 dasd_eer_write_buffer(eerb, "EOR", 4); 323 } 324 spin_unlock_irqrestore(&bufferlock, flags); 325 wake_up_interruptible(&dasd_eer_read_wait_queue); 326 } 327 328 /* 329 * This function writes a DASD_EER_STATECHANGE trigger. 330 */ 331 static void dasd_eer_write_snss_trigger(struct dasd_device *device, 332 struct dasd_ccw_req *cqr, 333 int trigger) 334 { 335 int data_size; 336 int snss_rc; 337 struct timeval tv; 338 struct dasd_eer_header header; 339 unsigned long flags; 340 struct eerbuffer *eerb; 341 342 snss_rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; 343 if (snss_rc) 344 data_size = 0; 345 else 346 data_size = SNSS_DATA_SIZE; 347 348 header.total_size = sizeof(header) + data_size + 4; /* "EOR" */ 349 header.trigger = DASD_EER_STATECHANGE; 350 do_gettimeofday(&tv); 351 header.tv_sec = tv.tv_sec; 352 header.tv_usec = tv.tv_usec; 353 strncpy(header.busid, dev_name(&device->cdev->dev), 354 DASD_EER_BUSID_SIZE); 355 356 spin_lock_irqsave(&bufferlock, flags); 357 list_for_each_entry(eerb, &bufferlist, list) { 358 dasd_eer_start_record(eerb, header.total_size); 359 dasd_eer_write_buffer(eerb, (char *) &header , sizeof(header)); 360 if (!snss_rc) 361 dasd_eer_write_buffer(eerb, cqr->data, SNSS_DATA_SIZE); 362 dasd_eer_write_buffer(eerb, "EOR", 4); 363 } 364 spin_unlock_irqrestore(&bufferlock, flags); 365 wake_up_interruptible(&dasd_eer_read_wait_queue); 366 } 367 368 /* 369 * This function is called for all triggers. It calls the appropriate 370 * function that writes the actual trigger records. 371 */ 372 void dasd_eer_write(struct dasd_device *device, struct dasd_ccw_req *cqr, 373 unsigned int id) 374 { 375 if (!device->eer_cqr) 376 return; 377 switch (id) { 378 case DASD_EER_FATALERROR: 379 case DASD_EER_PPRCSUSPEND: 380 dasd_eer_write_standard_trigger(device, cqr, id); 381 break; 382 case DASD_EER_NOPATH: 383 dasd_eer_write_standard_trigger(device, NULL, id); 384 break; 385 case DASD_EER_STATECHANGE: 386 dasd_eer_write_snss_trigger(device, cqr, id); 387 break; 388 default: /* unknown trigger, so we write it without any sense data */ 389 dasd_eer_write_standard_trigger(device, NULL, id); 390 break; 391 } 392 } 393 EXPORT_SYMBOL(dasd_eer_write); 394 395 /* 396 * Start a sense subsystem status request. 397 * Needs to be called with the device held. 398 */ 399 void dasd_eer_snss(struct dasd_device *device) 400 { 401 struct dasd_ccw_req *cqr; 402 403 cqr = device->eer_cqr; 404 if (!cqr) /* Device not eer enabled. */ 405 return; 406 if (test_and_set_bit(DASD_FLAG_EER_IN_USE, &device->flags)) { 407 /* Sense subsystem status request in use. */ 408 set_bit(DASD_FLAG_EER_SNSS, &device->flags); 409 return; 410 } 411 /* cdev is already locked, can't use dasd_add_request_head */ 412 clear_bit(DASD_FLAG_EER_SNSS, &device->flags); 413 cqr->status = DASD_CQR_QUEUED; 414 list_add(&cqr->devlist, &device->ccw_queue); 415 dasd_schedule_device_bh(device); 416 } 417 418 /* 419 * Callback function for use with sense subsystem status request. 420 */ 421 static void dasd_eer_snss_cb(struct dasd_ccw_req *cqr, void *data) 422 { 423 struct dasd_device *device = cqr->startdev; 424 unsigned long flags; 425 426 dasd_eer_write(device, cqr, DASD_EER_STATECHANGE); 427 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 428 if (device->eer_cqr == cqr) { 429 clear_bit(DASD_FLAG_EER_IN_USE, &device->flags); 430 if (test_bit(DASD_FLAG_EER_SNSS, &device->flags)) 431 /* Another SNSS has been requested in the meantime. */ 432 dasd_eer_snss(device); 433 cqr = NULL; 434 } 435 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 436 if (cqr) 437 /* 438 * Extended error recovery has been switched off while 439 * the SNSS request was running. It could even have 440 * been switched off and on again in which case there 441 * is a new ccw in device->eer_cqr. Free the "old" 442 * snss request now. 443 */ 444 dasd_kfree_request(cqr, device); 445 } 446 447 /* 448 * Enable error reporting on a given device. 449 */ 450 int dasd_eer_enable(struct dasd_device *device) 451 { 452 struct dasd_ccw_req *cqr; 453 unsigned long flags; 454 455 if (device->eer_cqr) 456 return 0; 457 458 if (!device->discipline || strcmp(device->discipline->name, "ECKD")) 459 return -EPERM; /* FIXME: -EMEDIUMTYPE ? */ 460 461 cqr = dasd_kmalloc_request("ECKD", 1 /* SNSS */, 462 SNSS_DATA_SIZE, device); 463 if (IS_ERR(cqr)) 464 return -ENOMEM; 465 466 cqr->startdev = device; 467 cqr->retries = 255; 468 cqr->expires = 10 * HZ; 469 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 470 471 cqr->cpaddr->cmd_code = DASD_ECKD_CCW_SNSS; 472 cqr->cpaddr->count = SNSS_DATA_SIZE; 473 cqr->cpaddr->flags = 0; 474 cqr->cpaddr->cda = (__u32)(addr_t) cqr->data; 475 476 cqr->buildclk = get_clock(); 477 cqr->status = DASD_CQR_FILLED; 478 cqr->callback = dasd_eer_snss_cb; 479 480 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 481 if (!device->eer_cqr) { 482 device->eer_cqr = cqr; 483 cqr = NULL; 484 } 485 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 486 if (cqr) 487 dasd_kfree_request(cqr, device); 488 return 0; 489 } 490 491 /* 492 * Disable error reporting on a given device. 493 */ 494 void dasd_eer_disable(struct dasd_device *device) 495 { 496 struct dasd_ccw_req *cqr; 497 unsigned long flags; 498 int in_use; 499 500 if (!device->eer_cqr) 501 return; 502 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 503 cqr = device->eer_cqr; 504 device->eer_cqr = NULL; 505 clear_bit(DASD_FLAG_EER_SNSS, &device->flags); 506 in_use = test_and_clear_bit(DASD_FLAG_EER_IN_USE, &device->flags); 507 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 508 if (cqr && !in_use) 509 dasd_kfree_request(cqr, device); 510 } 511 512 /* 513 * SECTION: the device operations 514 */ 515 516 /* 517 * On the one side we need a lock to access our internal buffer, on the 518 * other side a copy_to_user can sleep. So we need to copy the data we have 519 * to transfer in a readbuffer, which is protected by the readbuffer_mutex. 520 */ 521 static char readbuffer[PAGE_SIZE]; 522 static DEFINE_MUTEX(readbuffer_mutex); 523 524 static int dasd_eer_open(struct inode *inp, struct file *filp) 525 { 526 struct eerbuffer *eerb; 527 unsigned long flags; 528 529 eerb = kzalloc(sizeof(struct eerbuffer), GFP_KERNEL); 530 if (!eerb) 531 return -ENOMEM; 532 lock_kernel(); 533 eerb->buffer_page_count = eer_pages; 534 if (eerb->buffer_page_count < 1 || 535 eerb->buffer_page_count > INT_MAX / PAGE_SIZE) { 536 kfree(eerb); 537 MESSAGE(KERN_WARNING, "can't open device since module " 538 "parameter eer_pages is smaller then 1 or" 539 " bigger then %d", (int)(INT_MAX / PAGE_SIZE)); 540 unlock_kernel(); 541 return -EINVAL; 542 } 543 eerb->buffersize = eerb->buffer_page_count * PAGE_SIZE; 544 eerb->buffer = kmalloc(eerb->buffer_page_count * sizeof(char *), 545 GFP_KERNEL); 546 if (!eerb->buffer) { 547 kfree(eerb); 548 unlock_kernel(); 549 return -ENOMEM; 550 } 551 if (dasd_eer_allocate_buffer_pages(eerb->buffer, 552 eerb->buffer_page_count)) { 553 kfree(eerb->buffer); 554 kfree(eerb); 555 unlock_kernel(); 556 return -ENOMEM; 557 } 558 filp->private_data = eerb; 559 spin_lock_irqsave(&bufferlock, flags); 560 list_add(&eerb->list, &bufferlist); 561 spin_unlock_irqrestore(&bufferlock, flags); 562 563 unlock_kernel(); 564 return nonseekable_open(inp,filp); 565 } 566 567 static int dasd_eer_close(struct inode *inp, struct file *filp) 568 { 569 struct eerbuffer *eerb; 570 unsigned long flags; 571 572 eerb = (struct eerbuffer *) filp->private_data; 573 spin_lock_irqsave(&bufferlock, flags); 574 list_del(&eerb->list); 575 spin_unlock_irqrestore(&bufferlock, flags); 576 dasd_eer_free_buffer_pages(eerb->buffer, eerb->buffer_page_count); 577 kfree(eerb->buffer); 578 kfree(eerb); 579 580 return 0; 581 } 582 583 static ssize_t dasd_eer_read(struct file *filp, char __user *buf, 584 size_t count, loff_t *ppos) 585 { 586 int tc,rc; 587 int tailcount,effective_count; 588 unsigned long flags; 589 struct eerbuffer *eerb; 590 591 eerb = (struct eerbuffer *) filp->private_data; 592 if (mutex_lock_interruptible(&readbuffer_mutex)) 593 return -ERESTARTSYS; 594 595 spin_lock_irqsave(&bufferlock, flags); 596 597 if (eerb->residual < 0) { /* the remainder of this record */ 598 /* has been deleted */ 599 eerb->residual = 0; 600 spin_unlock_irqrestore(&bufferlock, flags); 601 mutex_unlock(&readbuffer_mutex); 602 return -EIO; 603 } else if (eerb->residual > 0) { 604 /* OK we still have a second half of a record to deliver */ 605 effective_count = min(eerb->residual, (int) count); 606 eerb->residual -= effective_count; 607 } else { 608 tc = 0; 609 while (!tc) { 610 tc = dasd_eer_read_buffer(eerb, (char *) &tailcount, 611 sizeof(tailcount)); 612 if (!tc) { 613 /* no data available */ 614 spin_unlock_irqrestore(&bufferlock, flags); 615 mutex_unlock(&readbuffer_mutex); 616 if (filp->f_flags & O_NONBLOCK) 617 return -EAGAIN; 618 rc = wait_event_interruptible( 619 dasd_eer_read_wait_queue, 620 eerb->head != eerb->tail); 621 if (rc) 622 return rc; 623 if (mutex_lock_interruptible(&readbuffer_mutex)) 624 return -ERESTARTSYS; 625 spin_lock_irqsave(&bufferlock, flags); 626 } 627 } 628 WARN_ON(tc != sizeof(tailcount)); 629 effective_count = min(tailcount,(int)count); 630 eerb->residual = tailcount - effective_count; 631 } 632 633 tc = dasd_eer_read_buffer(eerb, readbuffer, effective_count); 634 WARN_ON(tc != effective_count); 635 636 spin_unlock_irqrestore(&bufferlock, flags); 637 638 if (copy_to_user(buf, readbuffer, effective_count)) { 639 mutex_unlock(&readbuffer_mutex); 640 return -EFAULT; 641 } 642 643 mutex_unlock(&readbuffer_mutex); 644 return effective_count; 645 } 646 647 static unsigned int dasd_eer_poll(struct file *filp, poll_table *ptable) 648 { 649 unsigned int mask; 650 unsigned long flags; 651 struct eerbuffer *eerb; 652 653 eerb = (struct eerbuffer *) filp->private_data; 654 poll_wait(filp, &dasd_eer_read_wait_queue, ptable); 655 spin_lock_irqsave(&bufferlock, flags); 656 if (eerb->head != eerb->tail) 657 mask = POLLIN | POLLRDNORM ; 658 else 659 mask = 0; 660 spin_unlock_irqrestore(&bufferlock, flags); 661 return mask; 662 } 663 664 static const struct file_operations dasd_eer_fops = { 665 .open = &dasd_eer_open, 666 .release = &dasd_eer_close, 667 .read = &dasd_eer_read, 668 .poll = &dasd_eer_poll, 669 .owner = THIS_MODULE, 670 }; 671 672 static struct miscdevice *dasd_eer_dev = NULL; 673 674 int __init dasd_eer_init(void) 675 { 676 int rc; 677 678 dasd_eer_dev = kzalloc(sizeof(*dasd_eer_dev), GFP_KERNEL); 679 if (!dasd_eer_dev) 680 return -ENOMEM; 681 682 dasd_eer_dev->minor = MISC_DYNAMIC_MINOR; 683 dasd_eer_dev->name = "dasd_eer"; 684 dasd_eer_dev->fops = &dasd_eer_fops; 685 686 rc = misc_register(dasd_eer_dev); 687 if (rc) { 688 kfree(dasd_eer_dev); 689 dasd_eer_dev = NULL; 690 MESSAGE(KERN_ERR, "%s", "dasd_eer_init could not " 691 "register misc device"); 692 return rc; 693 } 694 695 return 0; 696 } 697 698 void dasd_eer_exit(void) 699 { 700 if (dasd_eer_dev) { 701 WARN_ON(misc_deregister(dasd_eer_dev) != 0); 702 kfree(dasd_eer_dev); 703 dasd_eer_dev = NULL; 704 } 705 } 706