xref: /openbmc/linux/drivers/s390/block/dasd_eer.c (revision 1c2dd16a)
1 /*
2  *  Character device driver for extended error reporting.
3  *
4  *  Copyright IBM Corp. 2005
5  *  extended error reporting for DASD ECKD devices
6  *  Author(s): Stefan Weinhuber <wein@de.ibm.com>
7  */
8 
9 #define KMSG_COMPONENT "dasd-eckd"
10 
11 #include <linux/init.h>
12 #include <linux/fs.h>
13 #include <linux/kernel.h>
14 #include <linux/miscdevice.h>
15 #include <linux/module.h>
16 #include <linux/moduleparam.h>
17 #include <linux/device.h>
18 #include <linux/poll.h>
19 #include <linux/mutex.h>
20 #include <linux/err.h>
21 #include <linux/slab.h>
22 
23 #include <linux/uaccess.h>
24 #include <linux/atomic.h>
25 #include <asm/ebcdic.h>
26 
27 #include "dasd_int.h"
28 #include "dasd_eckd.h"
29 
30 #ifdef PRINTK_HEADER
31 #undef PRINTK_HEADER
32 #endif				/* PRINTK_HEADER */
33 #define PRINTK_HEADER "dasd(eer):"
34 
35 /*
36  * SECTION: the internal buffer
37  */
38 
39 /*
40  * The internal buffer is meant to store obaque blobs of data, so it does
41  * not know of higher level concepts like triggers.
42  * It consists of a number of pages that are used as a ringbuffer. Each data
43  * blob is stored in a simple record that consists of an integer, which
44  * contains the size of the following data, and the data bytes themselfes.
45  *
46  * To allow for multiple independent readers we create one internal buffer
47  * each time the device is opened and destroy the buffer when the file is
48  * closed again. The number of pages used for this buffer is determined by
49  * the module parmeter eer_pages.
50  *
51  * One record can be written to a buffer by using the functions
52  * - dasd_eer_start_record (one time per record to write the size to the
53  *                          buffer and reserve the space for the data)
54  * - dasd_eer_write_buffer (one or more times per record to write the data)
55  * The data can be written in several steps but you will have to compute
56  * the total size up front for the invocation of dasd_eer_start_record.
57  * If the ringbuffer is full, dasd_eer_start_record will remove the required
58  * number of old records.
59  *
60  * A record is typically read in two steps, first read the integer that
61  * specifies the size of the following data, then read the data.
62  * Both can be done by
63  * - dasd_eer_read_buffer
64  *
65  * For all mentioned functions you need to get the bufferlock first and keep
66  * it until a complete record is written or read.
67  *
68  * All information necessary to keep track of an internal buffer is kept in
69  * a struct eerbuffer. The buffer specific to a file pointer is strored in
70  * the private_data field of that file. To be able to write data to all
71  * existing buffers, each buffer is also added to the bufferlist.
72  * If the user does not want to read a complete record in one go, we have to
73  * keep track of the rest of the record. residual stores the number of bytes
74  * that are still to deliver. If the rest of the record is invalidated between
75  * two reads then residual will be set to -1 so that the next read will fail.
76  * All entries in the eerbuffer structure are protected with the bufferlock.
77  * To avoid races between writing to a buffer on the one side and creating
78  * and destroying buffers on the other side, the bufferlock must also be used
79  * to protect the bufferlist.
80  */
81 
82 static int eer_pages = 5;
83 module_param(eer_pages, int, S_IRUGO|S_IWUSR);
84 
85 struct eerbuffer {
86 	struct list_head list;
87 	char **buffer;
88 	int buffersize;
89 	int buffer_page_count;
90 	int head;
91         int tail;
92 	int residual;
93 };
94 
95 static LIST_HEAD(bufferlist);
96 static DEFINE_SPINLOCK(bufferlock);
97 static DECLARE_WAIT_QUEUE_HEAD(dasd_eer_read_wait_queue);
98 
99 /*
100  * How many free bytes are available on the buffer.
101  * Needs to be called with bufferlock held.
102  */
103 static int dasd_eer_get_free_bytes(struct eerbuffer *eerb)
104 {
105 	if (eerb->head < eerb->tail)
106 		return eerb->tail - eerb->head - 1;
107 	return eerb->buffersize - eerb->head + eerb->tail -1;
108 }
109 
110 /*
111  * How many bytes of buffer space are used.
112  * Needs to be called with bufferlock held.
113  */
114 static int dasd_eer_get_filled_bytes(struct eerbuffer *eerb)
115 {
116 
117 	if (eerb->head >= eerb->tail)
118 		return eerb->head - eerb->tail;
119 	return eerb->buffersize - eerb->tail + eerb->head;
120 }
121 
122 /*
123  * The dasd_eer_write_buffer function just copies count bytes of data
124  * to the buffer. Make sure to call dasd_eer_start_record first, to
125  * make sure that enough free space is available.
126  * Needs to be called with bufferlock held.
127  */
128 static void dasd_eer_write_buffer(struct eerbuffer *eerb,
129 				  char *data, int count)
130 {
131 
132 	unsigned long headindex,localhead;
133 	unsigned long rest, len;
134 	char *nextdata;
135 
136 	nextdata = data;
137 	rest = count;
138 	while (rest > 0) {
139  		headindex = eerb->head / PAGE_SIZE;
140  		localhead = eerb->head % PAGE_SIZE;
141 		len = min(rest, PAGE_SIZE - localhead);
142 		memcpy(eerb->buffer[headindex]+localhead, nextdata, len);
143 		nextdata += len;
144 		rest -= len;
145 		eerb->head += len;
146 		if (eerb->head == eerb->buffersize)
147 			eerb->head = 0; /* wrap around */
148 		BUG_ON(eerb->head > eerb->buffersize);
149 	}
150 }
151 
152 /*
153  * Needs to be called with bufferlock held.
154  */
155 static int dasd_eer_read_buffer(struct eerbuffer *eerb, char *data, int count)
156 {
157 
158 	unsigned long tailindex,localtail;
159 	unsigned long rest, len, finalcount;
160 	char *nextdata;
161 
162 	finalcount = min(count, dasd_eer_get_filled_bytes(eerb));
163 	nextdata = data;
164 	rest = finalcount;
165 	while (rest > 0) {
166  		tailindex = eerb->tail / PAGE_SIZE;
167  		localtail = eerb->tail % PAGE_SIZE;
168 		len = min(rest, PAGE_SIZE - localtail);
169 		memcpy(nextdata, eerb->buffer[tailindex] + localtail, len);
170 		nextdata += len;
171 		rest -= len;
172 		eerb->tail += len;
173 		if (eerb->tail == eerb->buffersize)
174 			eerb->tail = 0; /* wrap around */
175 		BUG_ON(eerb->tail > eerb->buffersize);
176 	}
177 	return finalcount;
178 }
179 
180 /*
181  * Whenever you want to write a blob of data to the internal buffer you
182  * have to start by using this function first. It will write the number
183  * of bytes that will be written to the buffer. If necessary it will remove
184  * old records to make room for the new one.
185  * Needs to be called with bufferlock held.
186  */
187 static int dasd_eer_start_record(struct eerbuffer *eerb, int count)
188 {
189 	int tailcount;
190 
191 	if (count + sizeof(count) > eerb->buffersize)
192 		return -ENOMEM;
193 	while (dasd_eer_get_free_bytes(eerb) < count + sizeof(count)) {
194 		if (eerb->residual > 0) {
195 			eerb->tail += eerb->residual;
196 			if (eerb->tail >= eerb->buffersize)
197 				eerb->tail -= eerb->buffersize;
198 			eerb->residual = -1;
199 		}
200 		dasd_eer_read_buffer(eerb, (char *) &tailcount,
201 				     sizeof(tailcount));
202 		eerb->tail += tailcount;
203 		if (eerb->tail >= eerb->buffersize)
204 			eerb->tail -= eerb->buffersize;
205 	}
206 	dasd_eer_write_buffer(eerb, (char*) &count, sizeof(count));
207 
208 	return 0;
209 };
210 
211 /*
212  * Release pages that are not used anymore.
213  */
214 static void dasd_eer_free_buffer_pages(char **buf, int no_pages)
215 {
216 	int i;
217 
218 	for (i = 0; i < no_pages; i++)
219 		free_page((unsigned long) buf[i]);
220 }
221 
222 /*
223  * Allocate a new set of memory pages.
224  */
225 static int dasd_eer_allocate_buffer_pages(char **buf, int no_pages)
226 {
227 	int i;
228 
229 	for (i = 0; i < no_pages; i++) {
230 		buf[i] = (char *) get_zeroed_page(GFP_KERNEL);
231 		if (!buf[i]) {
232 			dasd_eer_free_buffer_pages(buf, i);
233 			return -ENOMEM;
234 		}
235 	}
236 	return 0;
237 }
238 
239 /*
240  * SECTION: The extended error reporting functionality
241  */
242 
243 /*
244  * When a DASD device driver wants to report an error, it calls the
245  * function dasd_eer_write and gives the respective trigger ID as
246  * parameter. Currently there are four kinds of triggers:
247  *
248  * DASD_EER_FATALERROR:  all kinds of unrecoverable I/O problems
249  * DASD_EER_PPRCSUSPEND: PPRC was suspended
250  * DASD_EER_NOPATH:      There is no path to the device left.
251  * DASD_EER_STATECHANGE: The state of the device has changed.
252  *
253  * For the first three triggers all required information can be supplied by
254  * the caller. For these triggers a record is written by the function
255  * dasd_eer_write_standard_trigger.
256  *
257  * The DASD_EER_STATECHANGE trigger is special since a sense subsystem
258  * status ccw need to be executed to gather the necessary sense data first.
259  * The dasd_eer_snss function will queue the SNSS request and the request
260  * callback will then call dasd_eer_write with the DASD_EER_STATCHANGE
261  * trigger.
262  *
263  * To avoid memory allocations at runtime, the necessary memory is allocated
264  * when the extended error reporting is enabled for a device (by
265  * dasd_eer_probe). There is one sense subsystem status request for each
266  * eer enabled DASD device. The presence of the cqr in device->eer_cqr
267  * indicates that eer is enable for the device. The use of the snss request
268  * is protected by the DASD_FLAG_EER_IN_USE bit. When this flag indicates
269  * that the cqr is currently in use, dasd_eer_snss cannot start a second
270  * request but sets the DASD_FLAG_EER_SNSS flag instead. The callback of
271  * the SNSS request will check the bit and call dasd_eer_snss again.
272  */
273 
274 #define SNSS_DATA_SIZE 44
275 
276 #define DASD_EER_BUSID_SIZE 10
277 struct dasd_eer_header {
278 	__u32 total_size;
279 	__u32 trigger;
280 	__u64 tv_sec;
281 	__u64 tv_usec;
282 	char busid[DASD_EER_BUSID_SIZE];
283 } __attribute__ ((packed));
284 
285 /*
286  * The following function can be used for those triggers that have
287  * all necessary data available when the function is called.
288  * If the parameter cqr is not NULL, the chain of requests will be searched
289  * for valid sense data, and all valid sense data sets will be added to
290  * the triggers data.
291  */
292 static void dasd_eer_write_standard_trigger(struct dasd_device *device,
293 					    struct dasd_ccw_req *cqr,
294 					    int trigger)
295 {
296 	struct dasd_ccw_req *temp_cqr;
297 	int data_size;
298 	struct timeval tv;
299 	struct dasd_eer_header header;
300 	unsigned long flags;
301 	struct eerbuffer *eerb;
302 	char *sense;
303 
304 	/* go through cqr chain and count the valid sense data sets */
305 	data_size = 0;
306 	for (temp_cqr = cqr; temp_cqr; temp_cqr = temp_cqr->refers)
307 		if (dasd_get_sense(&temp_cqr->irb))
308 			data_size += 32;
309 
310 	header.total_size = sizeof(header) + data_size + 4; /* "EOR" */
311 	header.trigger = trigger;
312 	do_gettimeofday(&tv);
313 	header.tv_sec = tv.tv_sec;
314 	header.tv_usec = tv.tv_usec;
315 	strncpy(header.busid, dev_name(&device->cdev->dev),
316 		DASD_EER_BUSID_SIZE);
317 
318 	spin_lock_irqsave(&bufferlock, flags);
319 	list_for_each_entry(eerb, &bufferlist, list) {
320 		dasd_eer_start_record(eerb, header.total_size);
321 		dasd_eer_write_buffer(eerb, (char *) &header, sizeof(header));
322 		for (temp_cqr = cqr; temp_cqr; temp_cqr = temp_cqr->refers) {
323 			sense = dasd_get_sense(&temp_cqr->irb);
324 			if (sense)
325 				dasd_eer_write_buffer(eerb, sense, 32);
326 		}
327 		dasd_eer_write_buffer(eerb, "EOR", 4);
328 	}
329 	spin_unlock_irqrestore(&bufferlock, flags);
330 	wake_up_interruptible(&dasd_eer_read_wait_queue);
331 }
332 
333 /*
334  * This function writes a DASD_EER_STATECHANGE trigger.
335  */
336 static void dasd_eer_write_snss_trigger(struct dasd_device *device,
337 					struct dasd_ccw_req *cqr,
338 					int trigger)
339 {
340 	int data_size;
341 	int snss_rc;
342 	struct timeval tv;
343 	struct dasd_eer_header header;
344 	unsigned long flags;
345 	struct eerbuffer *eerb;
346 
347 	snss_rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
348 	if (snss_rc)
349 		data_size = 0;
350 	else
351 		data_size = SNSS_DATA_SIZE;
352 
353 	header.total_size = sizeof(header) + data_size + 4; /* "EOR" */
354 	header.trigger = DASD_EER_STATECHANGE;
355 	do_gettimeofday(&tv);
356 	header.tv_sec = tv.tv_sec;
357 	header.tv_usec = tv.tv_usec;
358 	strncpy(header.busid, dev_name(&device->cdev->dev),
359 		DASD_EER_BUSID_SIZE);
360 
361 	spin_lock_irqsave(&bufferlock, flags);
362 	list_for_each_entry(eerb, &bufferlist, list) {
363 		dasd_eer_start_record(eerb, header.total_size);
364 		dasd_eer_write_buffer(eerb, (char *) &header , sizeof(header));
365 		if (!snss_rc)
366 			dasd_eer_write_buffer(eerb, cqr->data, SNSS_DATA_SIZE);
367 		dasd_eer_write_buffer(eerb, "EOR", 4);
368 	}
369 	spin_unlock_irqrestore(&bufferlock, flags);
370 	wake_up_interruptible(&dasd_eer_read_wait_queue);
371 }
372 
373 /*
374  * This function is called for all triggers. It calls the appropriate
375  * function that writes the actual trigger records.
376  */
377 void dasd_eer_write(struct dasd_device *device, struct dasd_ccw_req *cqr,
378 		    unsigned int id)
379 {
380 	if (!device->eer_cqr)
381 		return;
382 	switch (id) {
383 	case DASD_EER_FATALERROR:
384 	case DASD_EER_PPRCSUSPEND:
385 		dasd_eer_write_standard_trigger(device, cqr, id);
386 		break;
387 	case DASD_EER_NOPATH:
388 		dasd_eer_write_standard_trigger(device, NULL, id);
389 		break;
390 	case DASD_EER_STATECHANGE:
391 		dasd_eer_write_snss_trigger(device, cqr, id);
392 		break;
393 	default: /* unknown trigger, so we write it without any sense data */
394 		dasd_eer_write_standard_trigger(device, NULL, id);
395 		break;
396 	}
397 }
398 EXPORT_SYMBOL(dasd_eer_write);
399 
400 /*
401  * Start a sense subsystem status request.
402  * Needs to be called with the device held.
403  */
404 void dasd_eer_snss(struct dasd_device *device)
405 {
406 	struct dasd_ccw_req *cqr;
407 
408 	cqr = device->eer_cqr;
409 	if (!cqr)	/* Device not eer enabled. */
410 		return;
411 	if (test_and_set_bit(DASD_FLAG_EER_IN_USE, &device->flags)) {
412 		/* Sense subsystem status request in use. */
413 		set_bit(DASD_FLAG_EER_SNSS, &device->flags);
414 		return;
415 	}
416 	/* cdev is already locked, can't use dasd_add_request_head */
417 	clear_bit(DASD_FLAG_EER_SNSS, &device->flags);
418 	cqr->status = DASD_CQR_QUEUED;
419 	list_add(&cqr->devlist, &device->ccw_queue);
420 	dasd_schedule_device_bh(device);
421 }
422 
423 /*
424  * Callback function for use with sense subsystem status request.
425  */
426 static void dasd_eer_snss_cb(struct dasd_ccw_req *cqr, void *data)
427 {
428 	struct dasd_device *device = cqr->startdev;
429 	unsigned long flags;
430 
431 	dasd_eer_write(device, cqr, DASD_EER_STATECHANGE);
432 	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
433 	if (device->eer_cqr == cqr) {
434 		clear_bit(DASD_FLAG_EER_IN_USE, &device->flags);
435 		if (test_bit(DASD_FLAG_EER_SNSS, &device->flags))
436 			/* Another SNSS has been requested in the meantime. */
437 			dasd_eer_snss(device);
438 		cqr = NULL;
439 	}
440 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
441 	if (cqr)
442 		/*
443 		 * Extended error recovery has been switched off while
444 		 * the SNSS request was running. It could even have
445 		 * been switched off and on again in which case there
446 		 * is a new ccw in device->eer_cqr. Free the "old"
447 		 * snss request now.
448 		 */
449 		dasd_kfree_request(cqr, device);
450 }
451 
452 /*
453  * Enable error reporting on a given device.
454  */
455 int dasd_eer_enable(struct dasd_device *device)
456 {
457 	struct dasd_ccw_req *cqr = NULL;
458 	unsigned long flags;
459 	struct ccw1 *ccw;
460 	int rc = 0;
461 
462 	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
463 	if (device->eer_cqr)
464 		goto out;
465 	else if (!device->discipline ||
466 		 strcmp(device->discipline->name, "ECKD"))
467 		rc = -EMEDIUMTYPE;
468 	else if (test_bit(DASD_FLAG_OFFLINE, &device->flags))
469 		rc = -EBUSY;
470 
471 	if (rc)
472 		goto out;
473 
474 	cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* SNSS */,
475 				   SNSS_DATA_SIZE, device);
476 	if (IS_ERR(cqr)) {
477 		rc = -ENOMEM;
478 		cqr = NULL;
479 		goto out;
480 	}
481 
482 	cqr->startdev = device;
483 	cqr->retries = 255;
484 	cqr->expires = 10 * HZ;
485 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
486 	set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
487 
488 	ccw = cqr->cpaddr;
489 	ccw->cmd_code = DASD_ECKD_CCW_SNSS;
490 	ccw->count = SNSS_DATA_SIZE;
491 	ccw->flags = 0;
492 	ccw->cda = (__u32)(addr_t) cqr->data;
493 
494 	cqr->buildclk = get_tod_clock();
495 	cqr->status = DASD_CQR_FILLED;
496 	cqr->callback = dasd_eer_snss_cb;
497 
498 	if (!device->eer_cqr) {
499 		device->eer_cqr = cqr;
500 		cqr = NULL;
501 	}
502 
503 out:
504 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
505 
506 	if (cqr)
507 		dasd_kfree_request(cqr, device);
508 
509 	return rc;
510 }
511 
512 /*
513  * Disable error reporting on a given device.
514  */
515 void dasd_eer_disable(struct dasd_device *device)
516 {
517 	struct dasd_ccw_req *cqr;
518 	unsigned long flags;
519 	int in_use;
520 
521 	if (!device->eer_cqr)
522 		return;
523 	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
524 	cqr = device->eer_cqr;
525 	device->eer_cqr = NULL;
526 	clear_bit(DASD_FLAG_EER_SNSS, &device->flags);
527 	in_use = test_and_clear_bit(DASD_FLAG_EER_IN_USE, &device->flags);
528 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
529 	if (cqr && !in_use)
530 		dasd_kfree_request(cqr, device);
531 }
532 
533 /*
534  * SECTION: the device operations
535  */
536 
537 /*
538  * On the one side we need a lock to access our internal buffer, on the
539  * other side a copy_to_user can sleep. So we need to copy the data we have
540  * to transfer in a readbuffer, which is protected by the readbuffer_mutex.
541  */
542 static char readbuffer[PAGE_SIZE];
543 static DEFINE_MUTEX(readbuffer_mutex);
544 
545 static int dasd_eer_open(struct inode *inp, struct file *filp)
546 {
547 	struct eerbuffer *eerb;
548 	unsigned long flags;
549 
550 	eerb = kzalloc(sizeof(struct eerbuffer), GFP_KERNEL);
551 	if (!eerb)
552 		return -ENOMEM;
553 	eerb->buffer_page_count = eer_pages;
554 	if (eerb->buffer_page_count < 1 ||
555 	    eerb->buffer_page_count > INT_MAX / PAGE_SIZE) {
556 		kfree(eerb);
557 		DBF_EVENT(DBF_WARNING, "can't open device since module "
558 			"parameter eer_pages is smaller than 1 or"
559 			" bigger than %d", (int)(INT_MAX / PAGE_SIZE));
560 		return -EINVAL;
561 	}
562 	eerb->buffersize = eerb->buffer_page_count * PAGE_SIZE;
563 	eerb->buffer = kmalloc(eerb->buffer_page_count * sizeof(char *),
564 			       GFP_KERNEL);
565         if (!eerb->buffer) {
566 		kfree(eerb);
567                 return -ENOMEM;
568 	}
569 	if (dasd_eer_allocate_buffer_pages(eerb->buffer,
570 					   eerb->buffer_page_count)) {
571 		kfree(eerb->buffer);
572 		kfree(eerb);
573 		return -ENOMEM;
574 	}
575 	filp->private_data = eerb;
576 	spin_lock_irqsave(&bufferlock, flags);
577 	list_add(&eerb->list, &bufferlist);
578 	spin_unlock_irqrestore(&bufferlock, flags);
579 
580 	return nonseekable_open(inp,filp);
581 }
582 
583 static int dasd_eer_close(struct inode *inp, struct file *filp)
584 {
585 	struct eerbuffer *eerb;
586 	unsigned long flags;
587 
588 	eerb = (struct eerbuffer *) filp->private_data;
589 	spin_lock_irqsave(&bufferlock, flags);
590 	list_del(&eerb->list);
591 	spin_unlock_irqrestore(&bufferlock, flags);
592 	dasd_eer_free_buffer_pages(eerb->buffer, eerb->buffer_page_count);
593 	kfree(eerb->buffer);
594 	kfree(eerb);
595 
596 	return 0;
597 }
598 
599 static ssize_t dasd_eer_read(struct file *filp, char __user *buf,
600 			     size_t count, loff_t *ppos)
601 {
602 	int tc,rc;
603 	int tailcount,effective_count;
604         unsigned long flags;
605 	struct eerbuffer *eerb;
606 
607 	eerb = (struct eerbuffer *) filp->private_data;
608 	if (mutex_lock_interruptible(&readbuffer_mutex))
609 		return -ERESTARTSYS;
610 
611 	spin_lock_irqsave(&bufferlock, flags);
612 
613 	if (eerb->residual < 0) { /* the remainder of this record */
614 		                  /* has been deleted             */
615 		eerb->residual = 0;
616 		spin_unlock_irqrestore(&bufferlock, flags);
617 		mutex_unlock(&readbuffer_mutex);
618 		return -EIO;
619 	} else if (eerb->residual > 0) {
620 		/* OK we still have a second half of a record to deliver */
621 		effective_count = min(eerb->residual, (int) count);
622 		eerb->residual -= effective_count;
623 	} else {
624 		tc = 0;
625 		while (!tc) {
626 			tc = dasd_eer_read_buffer(eerb, (char *) &tailcount,
627 						  sizeof(tailcount));
628 			if (!tc) {
629 				/* no data available */
630 				spin_unlock_irqrestore(&bufferlock, flags);
631 				mutex_unlock(&readbuffer_mutex);
632 				if (filp->f_flags & O_NONBLOCK)
633 					return -EAGAIN;
634 				rc = wait_event_interruptible(
635 					dasd_eer_read_wait_queue,
636 					eerb->head != eerb->tail);
637 				if (rc)
638 					return rc;
639 				if (mutex_lock_interruptible(&readbuffer_mutex))
640 					return -ERESTARTSYS;
641 				spin_lock_irqsave(&bufferlock, flags);
642 			}
643 		}
644 		WARN_ON(tc != sizeof(tailcount));
645 		effective_count = min(tailcount,(int)count);
646 		eerb->residual = tailcount - effective_count;
647 	}
648 
649 	tc = dasd_eer_read_buffer(eerb, readbuffer, effective_count);
650 	WARN_ON(tc != effective_count);
651 
652 	spin_unlock_irqrestore(&bufferlock, flags);
653 
654 	if (copy_to_user(buf, readbuffer, effective_count)) {
655 		mutex_unlock(&readbuffer_mutex);
656 		return -EFAULT;
657 	}
658 
659 	mutex_unlock(&readbuffer_mutex);
660 	return effective_count;
661 }
662 
663 static unsigned int dasd_eer_poll(struct file *filp, poll_table *ptable)
664 {
665 	unsigned int mask;
666 	unsigned long flags;
667 	struct eerbuffer *eerb;
668 
669 	eerb = (struct eerbuffer *) filp->private_data;
670 	poll_wait(filp, &dasd_eer_read_wait_queue, ptable);
671 	spin_lock_irqsave(&bufferlock, flags);
672 	if (eerb->head != eerb->tail)
673 		mask = POLLIN | POLLRDNORM ;
674 	else
675 		mask = 0;
676 	spin_unlock_irqrestore(&bufferlock, flags);
677 	return mask;
678 }
679 
680 static const struct file_operations dasd_eer_fops = {
681 	.open		= &dasd_eer_open,
682 	.release	= &dasd_eer_close,
683 	.read		= &dasd_eer_read,
684 	.poll		= &dasd_eer_poll,
685 	.owner		= THIS_MODULE,
686 	.llseek		= noop_llseek,
687 };
688 
689 static struct miscdevice *dasd_eer_dev = NULL;
690 
691 int __init dasd_eer_init(void)
692 {
693 	int rc;
694 
695 	dasd_eer_dev = kzalloc(sizeof(*dasd_eer_dev), GFP_KERNEL);
696 	if (!dasd_eer_dev)
697 		return -ENOMEM;
698 
699 	dasd_eer_dev->minor = MISC_DYNAMIC_MINOR;
700 	dasd_eer_dev->name  = "dasd_eer";
701 	dasd_eer_dev->fops  = &dasd_eer_fops;
702 
703 	rc = misc_register(dasd_eer_dev);
704 	if (rc) {
705 		kfree(dasd_eer_dev);
706 		dasd_eer_dev = NULL;
707 		DBF_EVENT(DBF_ERR, "%s", "dasd_eer_init could not "
708 		       "register misc device");
709 		return rc;
710 	}
711 
712 	return 0;
713 }
714 
715 void dasd_eer_exit(void)
716 {
717 	if (dasd_eer_dev) {
718 		misc_deregister(dasd_eer_dev);
719 		kfree(dasd_eer_dev);
720 		dasd_eer_dev = NULL;
721 	}
722 }
723