xref: /openbmc/linux/drivers/s390/block/dasd_eer.c (revision 12c3a548)
1 /*
2  *	character device driver for extended error reporting
3  *
4  *
5  *	Copyright (C) 2005 IBM Corporation
6  *	extended error reporting for DASD ECKD devices
7  *	Author(s): Stefan Weinhuber <wein@de.ibm.com>
8  *
9  */
10 
11 #include <linux/init.h>
12 #include <linux/fs.h>
13 #include <linux/kernel.h>
14 #include <linux/miscdevice.h>
15 #include <linux/module.h>
16 #include <linux/moduleparam.h>
17 #include <linux/device.h>
18 #include <linux/workqueue.h>
19 #include <linux/poll.h>
20 #include <linux/notifier.h>
21 
22 #include <asm/uaccess.h>
23 #include <asm/semaphore.h>
24 #include <asm/atomic.h>
25 #include <asm/ebcdic.h>
26 
27 #include "dasd_int.h"
28 #include "dasd_eckd.h"
29 
30 
31 MODULE_LICENSE("GPL");
32 
33 MODULE_AUTHOR("Stefan Weinhuber <wein@de.ibm.com>");
34 MODULE_DESCRIPTION("DASD extended error reporting module");
35 
36 
37 #ifdef PRINTK_HEADER
38 #undef PRINTK_HEADER
39 #endif				/* PRINTK_HEADER */
40 #define PRINTK_HEADER "dasd(eer):"
41 
42 
43 
44 
45 
46 /*****************************************************************************/
47 /*      the internal buffer                                                  */
48 /*****************************************************************************/
49 
50 /*
51  * The internal buffer is meant to store obaque blobs of data, so it doesn't
52  * know of higher level concepts like triggers.
53  * It consists of a number of pages that are used as a ringbuffer. Each data
54  * blob is stored in a simple record that consists of an integer, which
55  * contains the size of the following data, and the data bytes themselfes.
56  *
57  * To allow for multiple independent readers we create one internal buffer
58  * each time the device is opened and destroy the buffer when the file is
59  * closed again.
60  *
61  * One record can be written to a buffer by using the functions
62  * - dasd_eer_start_record (one time per record to write the size to the buffer
63  *                          and reserve the space for the data)
64  * - dasd_eer_write_buffer (one or more times per record to write the data)
65  * The data can be written in several steps but you will have to compute
66  * the total size up front for the invocation of dasd_eer_start_record.
67  * If the ringbuffer is full, dasd_eer_start_record will remove the required
68  * number of old records.
69  *
70  * A record is typically read in two steps, first read the integer that
71  * specifies the size of the following data, then read the data.
72  * Both can be done by
73  * - dasd_eer_read_buffer
74  *
75  * For all mentioned functions you need to get the bufferlock first and keep it
76  * until a complete record is written or read.
77  */
78 
79 
80 /*
81  * Alle information necessary to keep track of an internal buffer is kept in
82  * a struct eerbuffer. The buffer specific to a file pointer is strored in
83  * the private_data field of that file. To be able to write data to all
84  * existing buffers, each buffer is also added to the bufferlist.
85  * If the user doesn't want to read a complete record in one go, we have to
86  * keep track of the rest of the record. residual stores the number of bytes
87  * that are still to deliver. If the rest of the record is invalidated between
88  * two reads then residual will be set to -1 so that the next read will fail.
89  * All entries in the eerbuffer structure are protected with the bufferlock.
90  * To avoid races between writing to a buffer on the one side and creating
91  * and destroying buffers on the other side, the bufferlock must also be used
92  * to protect the bufferlist.
93  */
94 
95 struct eerbuffer {
96 	struct list_head list;
97 	char **buffer;
98 	int buffersize;
99 	int buffer_page_count;
100 	int head;
101         int tail;
102 	int residual;
103 };
104 
105 LIST_HEAD(bufferlist);
106 
107 static spinlock_t bufferlock = SPIN_LOCK_UNLOCKED;
108 
109 DECLARE_WAIT_QUEUE_HEAD(dasd_eer_read_wait_queue);
110 
111 /*
112  * How many free bytes are available on the buffer.
113  * needs to be called with bufferlock held
114  */
115 static int
116 dasd_eer_get_free_bytes(struct eerbuffer *eerb)
117 {
118 	if (eerb->head < eerb->tail) {
119 		return eerb->tail - eerb->head - 1;
120 	} else
121 		return eerb->buffersize - eerb->head + eerb->tail -1;
122 }
123 
124 /*
125  * How many bytes of buffer space are used.
126  * needs to be called with bufferlock held
127  */
128 static int
129 dasd_eer_get_filled_bytes(struct eerbuffer *eerb)
130 {
131 
132 	if (eerb->head >= eerb->tail) {
133 		return eerb->head - eerb->tail;
134 	} else
135 		return eerb->buffersize - eerb->tail + eerb->head;
136 }
137 
138 /*
139  * The dasd_eer_write_buffer function just copies count bytes of data
140  * to the buffer. Make sure to call dasd_eer_start_record first, to
141  * make sure that enough free space is available.
142  * needs to be called with bufferlock held
143  */
144 static void
145 dasd_eer_write_buffer(struct eerbuffer *eerb, int count, char *data)
146 {
147 
148 	unsigned long headindex,localhead;
149 	unsigned long rest, len;
150 	char *nextdata;
151 
152 	nextdata = data;
153 	rest = count;
154 	while (rest > 0) {
155  		headindex = eerb->head / PAGE_SIZE;
156  		localhead = eerb->head % PAGE_SIZE;
157 		len = min(rest, (PAGE_SIZE - localhead));
158 		memcpy(eerb->buffer[headindex]+localhead, nextdata, len);
159 		nextdata += len;
160 		rest -= len;
161 		eerb->head += len;
162 		if ( eerb->head == eerb->buffersize )
163 			eerb->head = 0; /* wrap around */
164 		if (eerb->head > eerb->buffersize) {
165 			MESSAGE(KERN_ERR, "%s", "runaway buffer head.");
166 			BUG();
167 		}
168 	}
169 }
170 
171 /*
172  * needs to be called with bufferlock held
173  */
174 static int
175 dasd_eer_read_buffer(struct eerbuffer *eerb, int count, char *data)
176 {
177 
178 	unsigned long tailindex,localtail;
179 	unsigned long rest, len, finalcount;
180 	char *nextdata;
181 
182 	finalcount = min(count, dasd_eer_get_filled_bytes(eerb));
183 	nextdata = data;
184 	rest = finalcount;
185 	while (rest > 0) {
186  		tailindex = eerb->tail / PAGE_SIZE;
187  		localtail = eerb->tail % PAGE_SIZE;
188 		len = min(rest, (PAGE_SIZE - localtail));
189 		memcpy(nextdata, eerb->buffer[tailindex]+localtail, len);
190 		nextdata += len;
191 		rest -= len;
192 		eerb->tail += len;
193 		if ( eerb->tail == eerb->buffersize )
194 			eerb->tail = 0; /* wrap around */
195 		if (eerb->tail > eerb->buffersize) {
196 			MESSAGE(KERN_ERR, "%s", "runaway buffer tail.");
197 			BUG();
198 		}
199 	}
200 	return finalcount;
201 }
202 
203 /*
204  * Whenever you want to write a blob of data to the internal buffer you
205  * have to start by using this function first. It will write the number
206  * of bytes that will be written to the buffer. If necessary it will remove
207  * old records to make room for the new one.
208  * needs to be called with bufferlock held
209  */
210 static int
211 dasd_eer_start_record(struct eerbuffer *eerb, int count)
212 {
213 	int tailcount;
214 	if (count + sizeof(count) > eerb->buffersize)
215 		return -ENOMEM;
216 	while (dasd_eer_get_free_bytes(eerb) < count + sizeof(count)) {
217 		if (eerb->residual > 0) {
218 			eerb->tail += eerb->residual;
219 			if (eerb->tail >= eerb->buffersize)
220 				eerb->tail -= eerb->buffersize;
221 			eerb->residual = -1;
222 		}
223 		dasd_eer_read_buffer(eerb, sizeof(tailcount),
224 				     (char*)(&tailcount));
225 		eerb->tail += tailcount;
226 		if (eerb->tail >= eerb->buffersize)
227 			eerb->tail -= eerb->buffersize;
228 	}
229 	dasd_eer_write_buffer(eerb, sizeof(count), (char*)(&count));
230 
231 	return 0;
232 };
233 
234 /*
235  * release pages that are not used anymore
236  */
237 static void
238 dasd_eer_free_buffer_pages(char **buf, int no_pages)
239 {
240 	int i;
241 
242 	for (i = 0; i < no_pages; ++i) {
243 		free_page((unsigned long)buf[i]);
244 	}
245 }
246 
247 /*
248  * allocate a new set of memory pages
249  */
250 static int
251 dasd_eer_allocate_buffer_pages(char **buf, int no_pages)
252 {
253 	int i;
254 
255 	for (i = 0; i < no_pages; ++i) {
256 		buf[i] = (char *) get_zeroed_page(GFP_KERNEL);
257 		if (!buf[i]) {
258 			dasd_eer_free_buffer_pages(buf, i);
259 			return -ENOMEM;
260 		}
261 	}
262 	return 0;
263 }
264 
265 /*
266  * empty the buffer by resetting head and tail
267  * In case there is a half read data blob in the buffer, we set residual
268  * to -1 to indicate that the remainder of the blob is lost.
269  */
270 static void
271 dasd_eer_purge_buffer(struct eerbuffer *eerb)
272 {
273 	unsigned long flags;
274 
275 	spin_lock_irqsave(&bufferlock, flags);
276 	if (eerb->residual > 0)
277 		eerb->residual = -1;
278 	eerb->tail=0;
279 	eerb->head=0;
280 	spin_unlock_irqrestore(&bufferlock, flags);
281 }
282 
283 /*
284  * set the size of the buffer, newsize is the new number of pages to be used
285  * we don't try to copy any data back an forth, so any resize will also purge
286  * the buffer
287  */
288 static int
289 dasd_eer_resize_buffer(struct eerbuffer *eerb, int newsize)
290 {
291 	int i, oldcount, reuse;
292 	char **new;
293 	char **old;
294 	unsigned long flags;
295 
296 	if (newsize < 1)
297 		return -EINVAL;
298 	if (eerb->buffer_page_count == newsize) {
299 		/* documented behaviour is that any successfull invocation
300                  * will purge all records */
301 		dasd_eer_purge_buffer(eerb);
302 		return 0;
303 	}
304 	new = kmalloc(newsize*sizeof(char*), GFP_KERNEL);
305 	if (!new)
306 		return -ENOMEM;
307 
308 	reuse=min(eerb->buffer_page_count, newsize);
309 	for (i = 0; i < reuse; ++i) {
310 		new[i] = eerb->buffer[i];
311 	}
312 	if (eerb->buffer_page_count < newsize) {
313 		if (dasd_eer_allocate_buffer_pages(
314 			    &new[eerb->buffer_page_count],
315 			    newsize - eerb->buffer_page_count)) {
316 			kfree(new);
317 			return -ENOMEM;
318 		}
319 	}
320 
321 	spin_lock_irqsave(&bufferlock, flags);
322 	old = eerb->buffer;
323 	eerb->buffer = new;
324 	if (eerb->residual > 0)
325 		eerb->residual = -1;
326 	eerb->tail = 0;
327 	eerb->head = 0;
328 	oldcount = eerb->buffer_page_count;
329 	eerb->buffer_page_count = newsize;
330 	spin_unlock_irqrestore(&bufferlock, flags);
331 
332 	if (oldcount > newsize) {
333 		for (i = newsize; i < oldcount; ++i) {
334 			free_page((unsigned long)old[i]);
335 		}
336 	}
337 	kfree(old);
338 
339 	return 0;
340 }
341 
342 
343 /*****************************************************************************/
344 /*      The extended error reporting functionality                           */
345 /*****************************************************************************/
346 
347 /*
348  * When a DASD device driver wants to report an error, it calls the
349  * function dasd_eer_write_trigger (via a notifier mechanism) and gives the
350  * respective trigger ID as parameter.
351  * Currently there are four kinds of triggers:
352  *
353  * DASD_EER_FATALERROR:  all kinds of unrecoverable I/O problems
354  * DASD_EER_PPRCSUSPEND: PPRC was suspended
355  * DASD_EER_NOPATH:      There is no path to the device left.
356  * DASD_EER_STATECHANGE: The state of the device has changed.
357  *
358  * For the first three triggers all required information can be supplied by
359  * the caller. For these triggers a record is written by the function
360  * dasd_eer_write_standard_trigger.
361  *
362  * When dasd_eer_write_trigger is called to write a DASD_EER_STATECHANGE
363  * trigger, we have to gather the necessary sense data first. We cannot queue
364  * the necessary SNSS (sense subsystem status) request immediatly, since we
365  * are likely to run in a deadlock situation. Instead, we schedule a
366  * work_struct that calls the function dasd_eer_sense_subsystem_status to
367  * create and start an SNSS  request asynchronously.
368  *
369  * To avoid memory allocations at runtime, the necessary memory is allocated
370  * when the extended error reporting is enabled for a device (by
371  * dasd_eer_probe). There is one private eer data structure for each eer
372  * enabled DASD device. It contains memory for the work_struct, one SNSS cqr
373  * and a flags field that is used to coordinate the use of the cqr. The call
374  * to write a state change trigger can come in at any time, so we have one flag
375  * CQR_IN_USE that protects the cqr itself. When this flag indicates that the
376  * cqr is currently in use, dasd_eer_sense_subsystem_status cannot start a
377  * second request but sets the SNSS_REQUESTED flag instead.
378  *
379  * When the request is finished, the callback function dasd_eer_SNSS_cb
380  * is called. This function will invoke the function
381  * dasd_eer_write_SNSS_trigger to finally write the trigger. It will also
382  * check the SNSS_REQUESTED flag and if it is set it will call
383  * dasd_eer_sense_subsystem_status again.
384  *
385  * To avoid race conditions during the handling of the lock, the flags must
386  * be protected by the snsslock.
387  */
388 
389 struct dasd_eer_private {
390 	struct dasd_ccw_req *cqr;
391 	unsigned long flags;
392 	struct work_struct worker;
393 };
394 
395 static void dasd_eer_destroy(struct dasd_device *device,
396 			     struct dasd_eer_private *eer);
397 static int
398 dasd_eer_write_trigger(struct dasd_eer_trigger *trigger);
399 static void dasd_eer_sense_subsystem_status(void *data);
400 static int dasd_eer_notify(struct notifier_block *self,
401 			   unsigned long action, void *data);
402 
403 struct workqueue_struct *dasd_eer_workqueue;
404 
405 #define SNSS_DATA_SIZE 44
406 static spinlock_t snsslock = SPIN_LOCK_UNLOCKED;
407 
408 #define DASD_EER_BUSID_SIZE 10
409 struct dasd_eer_header {
410 	__u32 total_size;
411 	__u32 trigger;
412 	__u64 tv_sec;
413 	__u64 tv_usec;
414 	char busid[DASD_EER_BUSID_SIZE];
415 } __attribute__ ((packed));
416 
417 static struct notifier_block dasd_eer_nb = {
418 	.notifier_call = dasd_eer_notify,
419 };
420 
421 /*
422  * flags for use with dasd_eer_private
423  */
424 #define CQR_IN_USE     0
425 #define SNSS_REQUESTED 1
426 
427 /*
428  * This function checks if extended error reporting is available for a given
429  * dasd_device. If yes, then it creates and returns a struct dasd_eer,
430  * otherwise it returns an -EPERM error pointer.
431  */
432 struct dasd_eer_private *
433 dasd_eer_probe(struct dasd_device *device)
434 {
435 	struct dasd_eer_private *private;
436 
437 	if (!(device && device->discipline
438 	      && !strcmp(device->discipline->name, "ECKD"))) {
439 		return ERR_PTR(-EPERM);
440 	}
441 	/* allocate the private data structure */
442 	private = (struct dasd_eer_private *)kmalloc(
443 		sizeof(struct dasd_eer_private), GFP_KERNEL);
444 	if (!private) {
445 		return ERR_PTR(-ENOMEM);
446 	}
447 	INIT_WORK(&private->worker, dasd_eer_sense_subsystem_status,
448 		  (void *)device);
449 	private->cqr = dasd_kmalloc_request("ECKD",
450 					    1 /* SNSS */ ,
451 					    SNSS_DATA_SIZE ,
452 					    device);
453 	if (!private->cqr) {
454 		kfree(private);
455 		return ERR_PTR(-ENOMEM);
456 	}
457 	private->flags = 0;
458 	return private;
459 };
460 
461 /*
462  * If our private SNSS request is queued, remove it from the
463  * dasd ccw queue so we can free the requests memory.
464  */
465 static void
466 dasd_eer_dequeue_SNSS_request(struct dasd_device *device,
467 			      struct dasd_eer_private *eer)
468 {
469 	struct list_head *lst, *nxt;
470 	struct dasd_ccw_req *cqr, *erpcqr;
471 	dasd_erp_fn_t erp_fn;
472 
473 	spin_lock_irq(get_ccwdev_lock(device->cdev));
474 	list_for_each_safe(lst, nxt, &device->ccw_queue) {
475 		cqr = list_entry(lst, struct dasd_ccw_req, list);
476 		/* we are looking for two kinds or requests */
477 		/* first kind: our SNSS request: */
478 		if (cqr == eer->cqr) {
479 			if (cqr->status == DASD_CQR_IN_IO)
480 				device->discipline->term_IO(cqr);
481 			list_del(&cqr->list);
482 			break;
483 		}
484 		/* second kind: ERP requests for our SNSS request */
485 		if (cqr->refers) {
486 			/* If this erp request chain ends in our cqr, then */
487                         /* cal the erp_postaction to clean it up  */
488 			erpcqr = cqr;
489 			while (erpcqr->refers) {
490 				erpcqr = erpcqr->refers;
491 			}
492 			if (erpcqr == eer->cqr) {
493 				erp_fn = device->discipline->erp_postaction(
494 					 cqr);
495 				erp_fn(cqr);
496 			}
497 			continue;
498 		}
499 	}
500 	spin_unlock_irq(get_ccwdev_lock(device->cdev));
501 }
502 
503 /*
504  * This function dismantles a struct dasd_eer that was created by
505  * dasd_eer_probe. Since we want to free our private data structure,
506  * we must make sure that the memory is not in use anymore.
507  * We have to flush the work queue and remove a possible SNSS request
508  * from the dasd queue.
509  */
510 static void
511 dasd_eer_destroy(struct dasd_device *device, struct dasd_eer_private *eer)
512 {
513 	flush_workqueue(dasd_eer_workqueue);
514 	dasd_eer_dequeue_SNSS_request(device, eer);
515 	dasd_kfree_request(eer->cqr, device);
516 	kfree(eer);
517 };
518 
519 /*
520  * enable the extended error reporting for a particular device
521  */
522 static int
523 dasd_eer_enable_on_device(struct dasd_device *device)
524 {
525 	void *eer;
526 	if (!device)
527 		return -ENODEV;
528 	if (device->eer)
529 		return 0;
530 	if (!try_module_get(THIS_MODULE)) {
531 		return -EINVAL;
532 	}
533 	eer = (void *)dasd_eer_probe(device);
534 	if (IS_ERR(eer)) {
535 		module_put(THIS_MODULE);
536 		return PTR_ERR(eer);
537 	}
538 	device->eer = eer;
539 	return 0;
540 }
541 
542 /*
543  * enable the extended error reporting for a particular device
544  */
545 static int
546 dasd_eer_disable_on_device(struct dasd_device *device)
547 {
548 	struct dasd_eer_private *eer = device->eer;
549 
550 	if (!device)
551 		return -ENODEV;
552 	if (!device->eer)
553 		return 0;
554 	device->eer = NULL;
555 	dasd_eer_destroy(device,eer);
556 	module_put(THIS_MODULE);
557 
558 	return 0;
559 }
560 
561 /*
562  * Set extended error reporting (eer)
563  * Note: This will be registered as a DASD ioctl, to be called on DASD devices.
564  */
565 static int
566 dasd_ioctl_set_eer(struct block_device *bdev, int no, long args)
567 {
568 	struct dasd_device *device;
569 	int intval;
570 
571 	if (!capable(CAP_SYS_ADMIN))
572 		return -EACCES;
573 	if (bdev != bdev->bd_contains)
574 		/* Error-reporting is not allowed for partitions */
575 		return -EINVAL;
576 	if (get_user(intval, (int __user *) args))
577 		return -EFAULT;
578 	device =  bdev->bd_disk->private_data;
579 	if (device == NULL)
580 		return -ENODEV;
581 
582 	intval = (intval != 0);
583 	DEV_MESSAGE (KERN_DEBUG, device,
584 		     "set eer on device to %d", intval);
585 	if (intval)
586 		return dasd_eer_enable_on_device(device);
587 	else
588 		return dasd_eer_disable_on_device(device);
589 }
590 
591 /*
592  * Get value of extended error reporting.
593  * Note: This will be registered as a DASD ioctl, to be called on DASD devices.
594  */
595 static int
596 dasd_ioctl_get_eer(struct block_device *bdev, int no, long args)
597 {
598 	struct dasd_device *device;
599 
600 	device =  bdev->bd_disk->private_data;
601 	if (device == NULL)
602 		return -ENODEV;
603 	return put_user((device->eer != NULL), (int __user *) args);
604 }
605 
606 /*
607  * The following function can be used for those triggers that have
608  * all necessary data available when the function is called.
609  * If the parameter cqr is not NULL, the chain of requests will be searched
610  * for valid sense data, and all valid sense data sets will be added to
611  * the triggers data.
612  */
613 static int
614 dasd_eer_write_standard_trigger(int trigger, struct dasd_device *device,
615 				struct dasd_ccw_req *cqr)
616 {
617 	struct dasd_ccw_req *temp_cqr;
618 	int data_size;
619 	struct timeval tv;
620 	struct dasd_eer_header header;
621 	unsigned long flags;
622 	struct eerbuffer *eerb;
623 
624 	/* go through cqr chain and count the valid sense data sets */
625 	temp_cqr = cqr;
626 	data_size = 0;
627 	while (temp_cqr) {
628 		if (temp_cqr->irb.esw.esw0.erw.cons)
629 			data_size += 32;
630 		temp_cqr = temp_cqr->refers;
631 	}
632 
633 	header.total_size = sizeof(header) + data_size + 4; /* "EOR" */
634 	header.trigger = trigger;
635 	do_gettimeofday(&tv);
636 	header.tv_sec = tv.tv_sec;
637 	header.tv_usec = tv.tv_usec;
638 	strncpy(header.busid, device->cdev->dev.bus_id, DASD_EER_BUSID_SIZE);
639 
640 	spin_lock_irqsave(&bufferlock, flags);
641 	list_for_each_entry(eerb, &bufferlist, list) {
642 		dasd_eer_start_record(eerb, header.total_size);
643 		dasd_eer_write_buffer(eerb, sizeof(header), (char*)(&header));
644 		temp_cqr = cqr;
645 		while (temp_cqr) {
646 			if (temp_cqr->irb.esw.esw0.erw.cons)
647 				dasd_eer_write_buffer(eerb, 32, cqr->irb.ecw);
648 			temp_cqr = temp_cqr->refers;
649 		}
650 		dasd_eer_write_buffer(eerb, 4,"EOR");
651 	}
652 	spin_unlock_irqrestore(&bufferlock, flags);
653 
654 	wake_up_interruptible(&dasd_eer_read_wait_queue);
655 
656 	return 0;
657 }
658 
659 /*
660  * This function writes a DASD_EER_STATECHANGE trigger.
661  */
662 static void
663 dasd_eer_write_SNSS_trigger(struct dasd_device *device,
664 			    struct dasd_ccw_req *cqr)
665 {
666 	int data_size;
667 	int snss_rc;
668 	struct timeval tv;
669 	struct dasd_eer_header header;
670 	unsigned long flags;
671 	struct eerbuffer *eerb;
672 
673 	snss_rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0;
674 	if (snss_rc)
675 		data_size = 0;
676 	else
677 		data_size = SNSS_DATA_SIZE;
678 
679 	header.total_size = sizeof(header) + data_size + 4; /* "EOR" */
680 	header.trigger = DASD_EER_STATECHANGE;
681 	do_gettimeofday(&tv);
682 	header.tv_sec = tv.tv_sec;
683 	header.tv_usec = tv.tv_usec;
684 	strncpy(header.busid, device->cdev->dev.bus_id, DASD_EER_BUSID_SIZE);
685 
686 	spin_lock_irqsave(&bufferlock, flags);
687 	list_for_each_entry(eerb, &bufferlist, list) {
688 		dasd_eer_start_record(eerb, header.total_size);
689 		dasd_eer_write_buffer(eerb, sizeof(header),(char*)(&header));
690 		if (!snss_rc)
691 			dasd_eer_write_buffer(eerb, SNSS_DATA_SIZE, cqr->data);
692 		dasd_eer_write_buffer(eerb, 4,"EOR");
693 	}
694 	spin_unlock_irqrestore(&bufferlock, flags);
695 
696 	wake_up_interruptible(&dasd_eer_read_wait_queue);
697 }
698 
699 /*
700  * callback function for use with SNSS request
701  */
702 static void
703 dasd_eer_SNSS_cb(struct dasd_ccw_req *cqr, void *data)
704 {
705         struct dasd_device *device;
706 	struct dasd_eer_private *private;
707 	unsigned long irqflags;
708 
709         device = (struct dasd_device *)data;
710 	private = (struct dasd_eer_private *)device->eer;
711 	dasd_eer_write_SNSS_trigger(device, cqr);
712 	spin_lock_irqsave(&snsslock, irqflags);
713 	if(!test_and_clear_bit(SNSS_REQUESTED, &private->flags)) {
714 		clear_bit(CQR_IN_USE, &private->flags);
715 		spin_unlock_irqrestore(&snsslock, irqflags);
716 		return;
717 	};
718 	clear_bit(CQR_IN_USE, &private->flags);
719 	spin_unlock_irqrestore(&snsslock, irqflags);
720 	dasd_eer_sense_subsystem_status(device);
721 	return;
722 }
723 
724 /*
725  * clean a used cqr before using it again
726  */
727 static void
728 dasd_eer_clean_SNSS_request(struct dasd_ccw_req *cqr)
729 {
730 	struct ccw1 *cpaddr = cqr->cpaddr;
731 	void *data = cqr->data;
732 
733 	memset(cqr, 0, sizeof(struct dasd_ccw_req));
734 	memset(cpaddr, 0, sizeof(struct ccw1));
735 	memset(data, 0, SNSS_DATA_SIZE);
736 	cqr->cpaddr = cpaddr;
737 	cqr->data = data;
738 	strncpy((char *) &cqr->magic, "ECKD", 4);
739 	ASCEBC((char *) &cqr->magic, 4);
740 	set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
741 }
742 
743 /*
744  * build and start an SNSS request
745  * This function is called from a work queue so we have to
746  * pass the dasd_device pointer as a void pointer.
747  */
748 static void
749 dasd_eer_sense_subsystem_status(void *data)
750 {
751 	struct dasd_device *device;
752 	struct dasd_eer_private *private;
753 	struct dasd_ccw_req *cqr;
754 	struct ccw1 *ccw;
755 	unsigned long irqflags;
756 
757 	device = (struct dasd_device *)data;
758 	private = (struct dasd_eer_private *)device->eer;
759 	if (!private) /* device not eer enabled any more */
760 		return;
761 	cqr = private->cqr;
762 	spin_lock_irqsave(&snsslock, irqflags);
763 	if(test_and_set_bit(CQR_IN_USE, &private->flags)) {
764 		set_bit(SNSS_REQUESTED, &private->flags);
765 		spin_unlock_irqrestore(&snsslock, irqflags);
766 		return;
767 	};
768 	spin_unlock_irqrestore(&snsslock, irqflags);
769 	dasd_eer_clean_SNSS_request(cqr);
770 	cqr->device = device;
771 	cqr->retries = 255;
772 	cqr->expires = 10 * HZ;
773 
774 	ccw = cqr->cpaddr;
775 	ccw->cmd_code = DASD_ECKD_CCW_SNSS;
776 	ccw->count = SNSS_DATA_SIZE;
777 	ccw->flags = 0;
778 	ccw->cda = (__u32)(addr_t)cqr->data;
779 
780 	cqr->buildclk = get_clock();
781 	cqr->status = DASD_CQR_FILLED;
782 	cqr->callback = dasd_eer_SNSS_cb;
783 	cqr->callback_data = (void *)device;
784         dasd_add_request_head(cqr);
785 
786 	return;
787 }
788 
789 /*
790  * This function is called for all triggers. It calls the appropriate
791  * function that writes the actual trigger records.
792  */
793 static int
794 dasd_eer_write_trigger(struct dasd_eer_trigger *trigger)
795 {
796 	int rc;
797 	struct dasd_eer_private *private = trigger->device->eer;
798 
799 	switch (trigger->id) {
800 	case DASD_EER_FATALERROR:
801 	case DASD_EER_PPRCSUSPEND:
802 		rc = dasd_eer_write_standard_trigger(
803 			trigger->id, trigger->device, trigger->cqr);
804 		break;
805 	case DASD_EER_NOPATH:
806 		rc = dasd_eer_write_standard_trigger(
807 			trigger->id, trigger->device, NULL);
808 		break;
809 	case DASD_EER_STATECHANGE:
810                 if (queue_work(dasd_eer_workqueue, &private->worker)) {
811                         rc=0;
812                 } else {
813                         /* If the work_struct was already queued, it can't
814                          * be queued again. But this is OK since we don't
815                          * need to have it queued twice.
816                          */
817                         rc = -EBUSY;
818                 }
819 		break;
820 	default: /* unknown trigger, so we write it without any sense data */
821 		rc = dasd_eer_write_standard_trigger(
822 			trigger->id, trigger->device, NULL);
823 		break;
824 	}
825 	return rc;
826 }
827 
828 /*
829  * This function is registered with the dasd device driver and gets called
830  * for all dasd eer notifications.
831  */
832 static int dasd_eer_notify(struct notifier_block *self,
833 			    unsigned long action, void *data)
834 {
835 	switch (action) {
836 	case DASD_EER_DISABLE:
837 		dasd_eer_disable_on_device((struct dasd_device *)data);
838 		break;
839 	case DASD_EER_TRIGGER:
840 		dasd_eer_write_trigger((struct dasd_eer_trigger *)data);
841 		break;
842 	}
843 	return NOTIFY_OK;
844 }
845 
846 
847 /*****************************************************************************/
848 /*      the device operations                                                */
849 /*****************************************************************************/
850 
851 /*
852  * On the one side we need a lock to access our internal buffer, on the
853  * other side a copy_to_user can sleep. So we need to copy the data we have
854  * to transfer in a readbuffer, which is protected by the readbuffer_mutex.
855  */
856 static char readbuffer[PAGE_SIZE];
857 DECLARE_MUTEX(readbuffer_mutex);
858 
859 
860 static int
861 dasd_eer_open(struct inode *inp, struct file *filp)
862 {
863 	struct eerbuffer *eerb;
864 	unsigned long flags;
865 
866 	eerb = kmalloc(sizeof(struct eerbuffer), GFP_KERNEL);
867 	eerb->head = 0;
868 	eerb->tail = 0;
869 	eerb->residual = 0;
870 	eerb->buffer_page_count = 1;
871 	eerb->buffersize = eerb->buffer_page_count * PAGE_SIZE;
872         eerb->buffer = kmalloc(eerb->buffer_page_count*sizeof(char*),
873 			       GFP_KERNEL);
874         if (!eerb->buffer)
875                 return -ENOMEM;
876 	if (dasd_eer_allocate_buffer_pages(eerb->buffer,
877 					   eerb->buffer_page_count)) {
878 		kfree(eerb->buffer);
879 		return -ENOMEM;
880 	}
881 	filp->private_data = eerb;
882 	spin_lock_irqsave(&bufferlock, flags);
883 	list_add(&eerb->list, &bufferlist);
884 	spin_unlock_irqrestore(&bufferlock, flags);
885 
886 	return nonseekable_open(inp,filp);
887 }
888 
889 static int
890 dasd_eer_close(struct inode *inp, struct file *filp)
891 {
892 	struct eerbuffer *eerb;
893 	unsigned long flags;
894 
895 	eerb = (struct eerbuffer *)filp->private_data;
896 	spin_lock_irqsave(&bufferlock, flags);
897 	list_del(&eerb->list);
898 	spin_unlock_irqrestore(&bufferlock, flags);
899 	dasd_eer_free_buffer_pages(eerb->buffer, eerb->buffer_page_count);
900 	kfree(eerb->buffer);
901 	kfree(eerb);
902 
903 	return 0;
904 }
905 
906 static long
907 dasd_eer_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
908 {
909 	int intval;
910 	struct eerbuffer *eerb;
911 
912 	eerb = (struct eerbuffer *)filp->private_data;
913 	switch (cmd) {
914 	case DASD_EER_PURGE:
915 		dasd_eer_purge_buffer(eerb);
916 		return 0;
917 	case DASD_EER_SETBUFSIZE:
918 		if (get_user(intval, (int __user *)arg))
919 			return -EFAULT;
920 		return dasd_eer_resize_buffer(eerb, intval);
921 	default:
922 		return -ENOIOCTLCMD;
923 	}
924 }
925 
926 static ssize_t
927 dasd_eer_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
928 {
929 	int tc,rc;
930 	int tailcount,effective_count;
931         unsigned long flags;
932 	struct eerbuffer *eerb;
933 
934 	eerb = (struct eerbuffer *)filp->private_data;
935 	if(down_interruptible(&readbuffer_mutex))
936 		return -ERESTARTSYS;
937 
938 	spin_lock_irqsave(&bufferlock, flags);
939 
940 	if (eerb->residual < 0) { /* the remainder of this record */
941 		                  /* has been deleted             */
942 		eerb->residual = 0;
943 		spin_unlock_irqrestore(&bufferlock, flags);
944 		up(&readbuffer_mutex);
945 		return -EIO;
946 	} else if (eerb->residual > 0) {
947 		/* OK we still have a second half of a record to deliver */
948 		effective_count = min(eerb->residual, (int)count);
949 		eerb->residual -= effective_count;
950 	} else {
951 		tc = 0;
952 		while (!tc) {
953 			tc = dasd_eer_read_buffer(eerb,
954 				sizeof(tailcount), (char*)(&tailcount));
955 			if (!tc) {
956 				/* no data available */
957 				spin_unlock_irqrestore(&bufferlock, flags);
958 				up(&readbuffer_mutex);
959 				if (filp->f_flags & O_NONBLOCK)
960 					return -EAGAIN;
961 				rc = wait_event_interruptible(
962 					dasd_eer_read_wait_queue,
963 					eerb->head != eerb->tail);
964 				if (rc) {
965 					return rc;
966 				}
967 				if(down_interruptible(&readbuffer_mutex))
968 					return -ERESTARTSYS;
969 				spin_lock_irqsave(&bufferlock, flags);
970 			}
971 		}
972 		WARN_ON(tc != sizeof(tailcount));
973 		effective_count = min(tailcount,(int)count);
974 		eerb->residual = tailcount - effective_count;
975 	}
976 
977 	tc = dasd_eer_read_buffer(eerb, effective_count, readbuffer);
978 	WARN_ON(tc != effective_count);
979 
980 	spin_unlock_irqrestore(&bufferlock, flags);
981 
982 	if (copy_to_user(buf, readbuffer, effective_count)) {
983 		up(&readbuffer_mutex);
984 		return -EFAULT;
985 	}
986 
987 	up(&readbuffer_mutex);
988 	return effective_count;
989 }
990 
991 static unsigned int
992 dasd_eer_poll (struct file *filp, poll_table *ptable)
993 {
994 	unsigned int mask;
995 	unsigned long flags;
996 	struct eerbuffer *eerb;
997 
998 	eerb = (struct eerbuffer *)filp->private_data;
999 	poll_wait(filp, &dasd_eer_read_wait_queue, ptable);
1000 	spin_lock_irqsave(&bufferlock, flags);
1001 	if (eerb->head != eerb->tail)
1002 		mask = POLLIN | POLLRDNORM ;
1003 	else
1004 		mask = 0;
1005 	spin_unlock_irqrestore(&bufferlock, flags);
1006 	return mask;
1007 }
1008 
1009 static struct file_operations dasd_eer_fops = {
1010 	.open		= &dasd_eer_open,
1011 	.release	= &dasd_eer_close,
1012 	.unlocked_ioctl = &dasd_eer_ioctl,
1013 	.compat_ioctl	= &dasd_eer_ioctl,
1014 	.read		= &dasd_eer_read,
1015 	.poll		= &dasd_eer_poll,
1016 	.owner		= THIS_MODULE,
1017 };
1018 
1019 static struct miscdevice dasd_eer_dev = {
1020 	.minor	    = MISC_DYNAMIC_MINOR,
1021 	.name	    = "dasd_eer",
1022 	.fops	    = &dasd_eer_fops,
1023 };
1024 
1025 
1026 /*****************************************************************************/
1027 /*	Init and exit							     */
1028 /*****************************************************************************/
1029 
1030 static int
1031 __init dasd_eer_init(void)
1032 {
1033 	int rc;
1034 
1035 	dasd_eer_workqueue = create_singlethread_workqueue("dasd_eer");
1036 	if (!dasd_eer_workqueue) {
1037 		MESSAGE(KERN_ERR , "%s", "dasd_eer_init could not "
1038 		       "create workqueue \n");
1039 		rc = -ENOMEM;
1040 		goto out;
1041 	}
1042 
1043 	rc = dasd_register_eer_notifier(&dasd_eer_nb);
1044 	if (rc) {
1045 		MESSAGE(KERN_ERR, "%s", "dasd_eer_init could not "
1046 		       "register error reporting");
1047 		goto queue;
1048 	}
1049 
1050 	dasd_ioctl_no_register(THIS_MODULE, BIODASDEERSET, dasd_ioctl_set_eer);
1051 	dasd_ioctl_no_register(THIS_MODULE, BIODASDEERGET, dasd_ioctl_get_eer);
1052 
1053 	/* we don't need our own character device,
1054 	 * so we just register as misc device */
1055 	rc = misc_register(&dasd_eer_dev);
1056 	if (rc) {
1057 		MESSAGE(KERN_ERR, "%s", "dasd_eer_init could not "
1058 		       "register misc device");
1059 		goto unregister;
1060 	}
1061 
1062 	return 0;
1063 
1064 unregister:
1065 	dasd_unregister_eer_notifier(&dasd_eer_nb);
1066 	dasd_ioctl_no_unregister(THIS_MODULE, BIODASDEERSET,
1067 				 dasd_ioctl_set_eer);
1068 	dasd_ioctl_no_unregister(THIS_MODULE, BIODASDEERGET,
1069 				 dasd_ioctl_get_eer);
1070 queue:
1071 	destroy_workqueue(dasd_eer_workqueue);
1072 out:
1073 	return rc;
1074 
1075 }
1076 module_init(dasd_eer_init);
1077 
1078 static void
1079 __exit dasd_eer_exit(void)
1080 {
1081 	dasd_unregister_eer_notifier(&dasd_eer_nb);
1082 	dasd_ioctl_no_unregister(THIS_MODULE, BIODASDEERSET,
1083 				 dasd_ioctl_set_eer);
1084 	dasd_ioctl_no_unregister(THIS_MODULE, BIODASDEERGET,
1085 				 dasd_ioctl_get_eer);
1086 	destroy_workqueue(dasd_eer_workqueue);
1087 
1088 	WARN_ON(misc_deregister(&dasd_eer_dev) != 0);
1089 }
1090 module_exit(dasd_eer_exit);
1091