xref: /openbmc/linux/drivers/s390/block/dasd_eer.c (revision 1ac731c529cd4d6adbce134754b51ff7d822b145)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
220c64468SStefan Weinhuber /*
320c64468SStefan Weinhuber  *  Character device driver for extended error reporting.
420c64468SStefan Weinhuber  *
5a53c8fabSHeiko Carstens  *  Copyright IBM Corp. 2005
620c64468SStefan Weinhuber  *  extended error reporting for DASD ECKD devices
720c64468SStefan Weinhuber  *  Author(s): Stefan Weinhuber <wein@de.ibm.com>
820c64468SStefan Weinhuber  */
920c64468SStefan Weinhuber 
10ca99dab0SStefan Haberland #define KMSG_COMPONENT "dasd-eckd"
11fc19f381SStefan Haberland 
1220c64468SStefan Weinhuber #include <linux/init.h>
1320c64468SStefan Weinhuber #include <linux/fs.h>
1420c64468SStefan Weinhuber #include <linux/kernel.h>
1520c64468SStefan Weinhuber #include <linux/miscdevice.h>
1620c64468SStefan Weinhuber #include <linux/module.h>
1720c64468SStefan Weinhuber #include <linux/moduleparam.h>
1820c64468SStefan Weinhuber #include <linux/device.h>
1920c64468SStefan Weinhuber #include <linux/poll.h>
203006d7c6SChristoph Hellwig #include <linux/mutex.h>
210983e568SJulien Brunel #include <linux/err.h>
225a0e3ad6STejun Heo #include <linux/slab.h>
2320c64468SStefan Weinhuber 
247c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
2560063497SArun Sharma #include <linux/atomic.h>
2620c64468SStefan Weinhuber #include <asm/ebcdic.h>
2720c64468SStefan Weinhuber 
2820c64468SStefan Weinhuber #include "dasd_int.h"
2920c64468SStefan Weinhuber #include "dasd_eckd.h"
3020c64468SStefan Weinhuber 
3120c64468SStefan Weinhuber #ifdef PRINTK_HEADER
3220c64468SStefan Weinhuber #undef PRINTK_HEADER
3320c64468SStefan Weinhuber #endif				/* PRINTK_HEADER */
3420c64468SStefan Weinhuber #define PRINTK_HEADER "dasd(eer):"
3520c64468SStefan Weinhuber 
3620c64468SStefan Weinhuber /*
3720c64468SStefan Weinhuber  * SECTION: the internal buffer
3820c64468SStefan Weinhuber  */
3920c64468SStefan Weinhuber 
4020c64468SStefan Weinhuber /*
4120c64468SStefan Weinhuber  * The internal buffer is meant to store obaque blobs of data, so it does
4220c64468SStefan Weinhuber  * not know of higher level concepts like triggers.
4320c64468SStefan Weinhuber  * It consists of a number of pages that are used as a ringbuffer. Each data
4420c64468SStefan Weinhuber  * blob is stored in a simple record that consists of an integer, which
4520c64468SStefan Weinhuber  * contains the size of the following data, and the data bytes themselfes.
4620c64468SStefan Weinhuber  *
4720c64468SStefan Weinhuber  * To allow for multiple independent readers we create one internal buffer
4820c64468SStefan Weinhuber  * each time the device is opened and destroy the buffer when the file is
4920c64468SStefan Weinhuber  * closed again. The number of pages used for this buffer is determined by
5020c64468SStefan Weinhuber  * the module parmeter eer_pages.
5120c64468SStefan Weinhuber  *
5220c64468SStefan Weinhuber  * One record can be written to a buffer by using the functions
5320c64468SStefan Weinhuber  * - dasd_eer_start_record (one time per record to write the size to the
5420c64468SStefan Weinhuber  *                          buffer and reserve the space for the data)
5520c64468SStefan Weinhuber  * - dasd_eer_write_buffer (one or more times per record to write the data)
5620c64468SStefan Weinhuber  * The data can be written in several steps but you will have to compute
5720c64468SStefan Weinhuber  * the total size up front for the invocation of dasd_eer_start_record.
5820c64468SStefan Weinhuber  * If the ringbuffer is full, dasd_eer_start_record will remove the required
5920c64468SStefan Weinhuber  * number of old records.
6020c64468SStefan Weinhuber  *
6120c64468SStefan Weinhuber  * A record is typically read in two steps, first read the integer that
6220c64468SStefan Weinhuber  * specifies the size of the following data, then read the data.
6320c64468SStefan Weinhuber  * Both can be done by
6420c64468SStefan Weinhuber  * - dasd_eer_read_buffer
6520c64468SStefan Weinhuber  *
6620c64468SStefan Weinhuber  * For all mentioned functions you need to get the bufferlock first and keep
6720c64468SStefan Weinhuber  * it until a complete record is written or read.
6820c64468SStefan Weinhuber  *
6920c64468SStefan Weinhuber  * All information necessary to keep track of an internal buffer is kept in
7020c64468SStefan Weinhuber  * a struct eerbuffer. The buffer specific to a file pointer is strored in
7120c64468SStefan Weinhuber  * the private_data field of that file. To be able to write data to all
7220c64468SStefan Weinhuber  * existing buffers, each buffer is also added to the bufferlist.
7320c64468SStefan Weinhuber  * If the user does not want to read a complete record in one go, we have to
7420c64468SStefan Weinhuber  * keep track of the rest of the record. residual stores the number of bytes
7520c64468SStefan Weinhuber  * that are still to deliver. If the rest of the record is invalidated between
7620c64468SStefan Weinhuber  * two reads then residual will be set to -1 so that the next read will fail.
7720c64468SStefan Weinhuber  * All entries in the eerbuffer structure are protected with the bufferlock.
7820c64468SStefan Weinhuber  * To avoid races between writing to a buffer on the one side and creating
7920c64468SStefan Weinhuber  * and destroying buffers on the other side, the bufferlock must also be used
8020c64468SStefan Weinhuber  * to protect the bufferlist.
8120c64468SStefan Weinhuber  */
8220c64468SStefan Weinhuber 
8320c64468SStefan Weinhuber static int eer_pages = 5;
8420c64468SStefan Weinhuber module_param(eer_pages, int, S_IRUGO|S_IWUSR);
8520c64468SStefan Weinhuber 
8620c64468SStefan Weinhuber struct eerbuffer {
8720c64468SStefan Weinhuber 	struct list_head list;
8820c64468SStefan Weinhuber 	char **buffer;
8920c64468SStefan Weinhuber 	int buffersize;
9020c64468SStefan Weinhuber 	int buffer_page_count;
9120c64468SStefan Weinhuber 	int head;
9220c64468SStefan Weinhuber         int tail;
9320c64468SStefan Weinhuber 	int residual;
9420c64468SStefan Weinhuber };
9520c64468SStefan Weinhuber 
9620c64468SStefan Weinhuber static LIST_HEAD(bufferlist);
9734af946aSIngo Molnar static DEFINE_SPINLOCK(bufferlock);
9820c64468SStefan Weinhuber static DECLARE_WAIT_QUEUE_HEAD(dasd_eer_read_wait_queue);
9920c64468SStefan Weinhuber 
10020c64468SStefan Weinhuber /*
10120c64468SStefan Weinhuber  * How many free bytes are available on the buffer.
10220c64468SStefan Weinhuber  * Needs to be called with bufferlock held.
10320c64468SStefan Weinhuber  */
dasd_eer_get_free_bytes(struct eerbuffer * eerb)10420c64468SStefan Weinhuber static int dasd_eer_get_free_bytes(struct eerbuffer *eerb)
10520c64468SStefan Weinhuber {
10620c64468SStefan Weinhuber 	if (eerb->head < eerb->tail)
10720c64468SStefan Weinhuber 		return eerb->tail - eerb->head - 1;
10820c64468SStefan Weinhuber 	return eerb->buffersize - eerb->head + eerb->tail -1;
10920c64468SStefan Weinhuber }
11020c64468SStefan Weinhuber 
11120c64468SStefan Weinhuber /*
11220c64468SStefan Weinhuber  * How many bytes of buffer space are used.
11320c64468SStefan Weinhuber  * Needs to be called with bufferlock held.
11420c64468SStefan Weinhuber  */
dasd_eer_get_filled_bytes(struct eerbuffer * eerb)11520c64468SStefan Weinhuber static int dasd_eer_get_filled_bytes(struct eerbuffer *eerb)
11620c64468SStefan Weinhuber {
11720c64468SStefan Weinhuber 
11820c64468SStefan Weinhuber 	if (eerb->head >= eerb->tail)
11920c64468SStefan Weinhuber 		return eerb->head - eerb->tail;
12020c64468SStefan Weinhuber 	return eerb->buffersize - eerb->tail + eerb->head;
12120c64468SStefan Weinhuber }
12220c64468SStefan Weinhuber 
12320c64468SStefan Weinhuber /*
12420c64468SStefan Weinhuber  * The dasd_eer_write_buffer function just copies count bytes of data
12520c64468SStefan Weinhuber  * to the buffer. Make sure to call dasd_eer_start_record first, to
12620c64468SStefan Weinhuber  * make sure that enough free space is available.
12720c64468SStefan Weinhuber  * Needs to be called with bufferlock held.
12820c64468SStefan Weinhuber  */
dasd_eer_write_buffer(struct eerbuffer * eerb,char * data,int count)12920c64468SStefan Weinhuber static void dasd_eer_write_buffer(struct eerbuffer *eerb,
13020c64468SStefan Weinhuber 				  char *data, int count)
13120c64468SStefan Weinhuber {
13220c64468SStefan Weinhuber 
13320c64468SStefan Weinhuber 	unsigned long headindex,localhead;
13420c64468SStefan Weinhuber 	unsigned long rest, len;
13520c64468SStefan Weinhuber 	char *nextdata;
13620c64468SStefan Weinhuber 
13720c64468SStefan Weinhuber 	nextdata = data;
13820c64468SStefan Weinhuber 	rest = count;
13920c64468SStefan Weinhuber 	while (rest > 0) {
14020c64468SStefan Weinhuber  		headindex = eerb->head / PAGE_SIZE;
14120c64468SStefan Weinhuber  		localhead = eerb->head % PAGE_SIZE;
14220c64468SStefan Weinhuber 		len = min(rest, PAGE_SIZE - localhead);
14320c64468SStefan Weinhuber 		memcpy(eerb->buffer[headindex]+localhead, nextdata, len);
14420c64468SStefan Weinhuber 		nextdata += len;
14520c64468SStefan Weinhuber 		rest -= len;
14620c64468SStefan Weinhuber 		eerb->head += len;
14720c64468SStefan Weinhuber 		if (eerb->head == eerb->buffersize)
14820c64468SStefan Weinhuber 			eerb->head = 0; /* wrap around */
14920c64468SStefan Weinhuber 		BUG_ON(eerb->head > eerb->buffersize);
15020c64468SStefan Weinhuber 	}
15120c64468SStefan Weinhuber }
15220c64468SStefan Weinhuber 
15320c64468SStefan Weinhuber /*
15420c64468SStefan Weinhuber  * Needs to be called with bufferlock held.
15520c64468SStefan Weinhuber  */
dasd_eer_read_buffer(struct eerbuffer * eerb,char * data,int count)15620c64468SStefan Weinhuber static int dasd_eer_read_buffer(struct eerbuffer *eerb, char *data, int count)
15720c64468SStefan Weinhuber {
15820c64468SStefan Weinhuber 
15920c64468SStefan Weinhuber 	unsigned long tailindex,localtail;
16020c64468SStefan Weinhuber 	unsigned long rest, len, finalcount;
16120c64468SStefan Weinhuber 	char *nextdata;
16220c64468SStefan Weinhuber 
16320c64468SStefan Weinhuber 	finalcount = min(count, dasd_eer_get_filled_bytes(eerb));
16420c64468SStefan Weinhuber 	nextdata = data;
16520c64468SStefan Weinhuber 	rest = finalcount;
16620c64468SStefan Weinhuber 	while (rest > 0) {
16720c64468SStefan Weinhuber  		tailindex = eerb->tail / PAGE_SIZE;
16820c64468SStefan Weinhuber  		localtail = eerb->tail % PAGE_SIZE;
16920c64468SStefan Weinhuber 		len = min(rest, PAGE_SIZE - localtail);
17020c64468SStefan Weinhuber 		memcpy(nextdata, eerb->buffer[tailindex] + localtail, len);
17120c64468SStefan Weinhuber 		nextdata += len;
17220c64468SStefan Weinhuber 		rest -= len;
17320c64468SStefan Weinhuber 		eerb->tail += len;
17420c64468SStefan Weinhuber 		if (eerb->tail == eerb->buffersize)
17520c64468SStefan Weinhuber 			eerb->tail = 0; /* wrap around */
17620c64468SStefan Weinhuber 		BUG_ON(eerb->tail > eerb->buffersize);
17720c64468SStefan Weinhuber 	}
17820c64468SStefan Weinhuber 	return finalcount;
17920c64468SStefan Weinhuber }
18020c64468SStefan Weinhuber 
18120c64468SStefan Weinhuber /*
18220c64468SStefan Weinhuber  * Whenever you want to write a blob of data to the internal buffer you
18320c64468SStefan Weinhuber  * have to start by using this function first. It will write the number
18420c64468SStefan Weinhuber  * of bytes that will be written to the buffer. If necessary it will remove
18520c64468SStefan Weinhuber  * old records to make room for the new one.
18620c64468SStefan Weinhuber  * Needs to be called with bufferlock held.
18720c64468SStefan Weinhuber  */
dasd_eer_start_record(struct eerbuffer * eerb,int count)18820c64468SStefan Weinhuber static int dasd_eer_start_record(struct eerbuffer *eerb, int count)
18920c64468SStefan Weinhuber {
19020c64468SStefan Weinhuber 	int tailcount;
19120c64468SStefan Weinhuber 
19220c64468SStefan Weinhuber 	if (count + sizeof(count) > eerb->buffersize)
19320c64468SStefan Weinhuber 		return -ENOMEM;
19420c64468SStefan Weinhuber 	while (dasd_eer_get_free_bytes(eerb) < count + sizeof(count)) {
19520c64468SStefan Weinhuber 		if (eerb->residual > 0) {
19620c64468SStefan Weinhuber 			eerb->tail += eerb->residual;
19720c64468SStefan Weinhuber 			if (eerb->tail >= eerb->buffersize)
19820c64468SStefan Weinhuber 				eerb->tail -= eerb->buffersize;
19920c64468SStefan Weinhuber 			eerb->residual = -1;
20020c64468SStefan Weinhuber 		}
20120c64468SStefan Weinhuber 		dasd_eer_read_buffer(eerb, (char *) &tailcount,
20220c64468SStefan Weinhuber 				     sizeof(tailcount));
20320c64468SStefan Weinhuber 		eerb->tail += tailcount;
20420c64468SStefan Weinhuber 		if (eerb->tail >= eerb->buffersize)
20520c64468SStefan Weinhuber 			eerb->tail -= eerb->buffersize;
20620c64468SStefan Weinhuber 	}
20720c64468SStefan Weinhuber 	dasd_eer_write_buffer(eerb, (char*) &count, sizeof(count));
20820c64468SStefan Weinhuber 
20920c64468SStefan Weinhuber 	return 0;
21020c64468SStefan Weinhuber };
21120c64468SStefan Weinhuber 
21220c64468SStefan Weinhuber /*
21320c64468SStefan Weinhuber  * Release pages that are not used anymore.
21420c64468SStefan Weinhuber  */
dasd_eer_free_buffer_pages(char ** buf,int no_pages)21520c64468SStefan Weinhuber static void dasd_eer_free_buffer_pages(char **buf, int no_pages)
21620c64468SStefan Weinhuber {
21720c64468SStefan Weinhuber 	int i;
21820c64468SStefan Weinhuber 
21920c64468SStefan Weinhuber 	for (i = 0; i < no_pages; i++)
22020c64468SStefan Weinhuber 		free_page((unsigned long) buf[i]);
22120c64468SStefan Weinhuber }
22220c64468SStefan Weinhuber 
22320c64468SStefan Weinhuber /*
22420c64468SStefan Weinhuber  * Allocate a new set of memory pages.
22520c64468SStefan Weinhuber  */
dasd_eer_allocate_buffer_pages(char ** buf,int no_pages)22620c64468SStefan Weinhuber static int dasd_eer_allocate_buffer_pages(char **buf, int no_pages)
22720c64468SStefan Weinhuber {
22820c64468SStefan Weinhuber 	int i;
22920c64468SStefan Weinhuber 
23020c64468SStefan Weinhuber 	for (i = 0; i < no_pages; i++) {
23120c64468SStefan Weinhuber 		buf[i] = (char *) get_zeroed_page(GFP_KERNEL);
23220c64468SStefan Weinhuber 		if (!buf[i]) {
23320c64468SStefan Weinhuber 			dasd_eer_free_buffer_pages(buf, i);
23420c64468SStefan Weinhuber 			return -ENOMEM;
23520c64468SStefan Weinhuber 		}
23620c64468SStefan Weinhuber 	}
23720c64468SStefan Weinhuber 	return 0;
23820c64468SStefan Weinhuber }
23920c64468SStefan Weinhuber 
24020c64468SStefan Weinhuber /*
24120c64468SStefan Weinhuber  * SECTION: The extended error reporting functionality
24220c64468SStefan Weinhuber  */
24320c64468SStefan Weinhuber 
24420c64468SStefan Weinhuber /*
24520c64468SStefan Weinhuber  * When a DASD device driver wants to report an error, it calls the
24620c64468SStefan Weinhuber  * function dasd_eer_write and gives the respective trigger ID as
24720c64468SStefan Weinhuber  * parameter. Currently there are four kinds of triggers:
24820c64468SStefan Weinhuber  *
24920c64468SStefan Weinhuber  * DASD_EER_FATALERROR:  all kinds of unrecoverable I/O problems
25020c64468SStefan Weinhuber  * DASD_EER_PPRCSUSPEND: PPRC was suspended
25120c64468SStefan Weinhuber  * DASD_EER_NOPATH:      There is no path to the device left.
25220c64468SStefan Weinhuber  * DASD_EER_STATECHANGE: The state of the device has changed.
25320c64468SStefan Weinhuber  *
25420c64468SStefan Weinhuber  * For the first three triggers all required information can be supplied by
25520c64468SStefan Weinhuber  * the caller. For these triggers a record is written by the function
25620c64468SStefan Weinhuber  * dasd_eer_write_standard_trigger.
25720c64468SStefan Weinhuber  *
25820c64468SStefan Weinhuber  * The DASD_EER_STATECHANGE trigger is special since a sense subsystem
25920c64468SStefan Weinhuber  * status ccw need to be executed to gather the necessary sense data first.
26020c64468SStefan Weinhuber  * The dasd_eer_snss function will queue the SNSS request and the request
26120c64468SStefan Weinhuber  * callback will then call dasd_eer_write with the DASD_EER_STATCHANGE
26220c64468SStefan Weinhuber  * trigger.
26320c64468SStefan Weinhuber  *
26420c64468SStefan Weinhuber  * To avoid memory allocations at runtime, the necessary memory is allocated
26520c64468SStefan Weinhuber  * when the extended error reporting is enabled for a device (by
26620c64468SStefan Weinhuber  * dasd_eer_probe). There is one sense subsystem status request for each
26720c64468SStefan Weinhuber  * eer enabled DASD device. The presence of the cqr in device->eer_cqr
26820c64468SStefan Weinhuber  * indicates that eer is enable for the device. The use of the snss request
26920c64468SStefan Weinhuber  * is protected by the DASD_FLAG_EER_IN_USE bit. When this flag indicates
27020c64468SStefan Weinhuber  * that the cqr is currently in use, dasd_eer_snss cannot start a second
27120c64468SStefan Weinhuber  * request but sets the DASD_FLAG_EER_SNSS flag instead. The callback of
27220c64468SStefan Weinhuber  * the SNSS request will check the bit and call dasd_eer_snss again.
27320c64468SStefan Weinhuber  */
27420c64468SStefan Weinhuber 
27520c64468SStefan Weinhuber #define SNSS_DATA_SIZE 44
27620c64468SStefan Weinhuber 
27720c64468SStefan Weinhuber #define DASD_EER_BUSID_SIZE 10
27820c64468SStefan Weinhuber struct dasd_eer_header {
27920c64468SStefan Weinhuber 	__u32 total_size;
28020c64468SStefan Weinhuber 	__u32 trigger;
28120c64468SStefan Weinhuber 	__u64 tv_sec;
28220c64468SStefan Weinhuber 	__u64 tv_usec;
28320c64468SStefan Weinhuber 	char busid[DASD_EER_BUSID_SIZE];
284774fc4efSStefan Weinhuber } __attribute__ ((packed));
28520c64468SStefan Weinhuber 
28620c64468SStefan Weinhuber /*
28720c64468SStefan Weinhuber  * The following function can be used for those triggers that have
28820c64468SStefan Weinhuber  * all necessary data available when the function is called.
28920c64468SStefan Weinhuber  * If the parameter cqr is not NULL, the chain of requests will be searched
29020c64468SStefan Weinhuber  * for valid sense data, and all valid sense data sets will be added to
29120c64468SStefan Weinhuber  * the triggers data.
29220c64468SStefan Weinhuber  */
dasd_eer_write_standard_trigger(struct dasd_device * device,struct dasd_ccw_req * cqr,int trigger)29320c64468SStefan Weinhuber static void dasd_eer_write_standard_trigger(struct dasd_device *device,
29420c64468SStefan Weinhuber 					    struct dasd_ccw_req *cqr,
29520c64468SStefan Weinhuber 					    int trigger)
29620c64468SStefan Weinhuber {
29720c64468SStefan Weinhuber 	struct dasd_ccw_req *temp_cqr;
29820c64468SStefan Weinhuber 	int data_size;
299399c5acdSArnd Bergmann 	struct timespec64 ts;
30020c64468SStefan Weinhuber 	struct dasd_eer_header header;
30120c64468SStefan Weinhuber 	unsigned long flags;
30220c64468SStefan Weinhuber 	struct eerbuffer *eerb;
303f3eb5384SStefan Weinhuber 	char *sense;
30420c64468SStefan Weinhuber 
30520c64468SStefan Weinhuber 	/* go through cqr chain and count the valid sense data sets */
30620c64468SStefan Weinhuber 	data_size = 0;
30720c64468SStefan Weinhuber 	for (temp_cqr = cqr; temp_cqr; temp_cqr = temp_cqr->refers)
308f3eb5384SStefan Weinhuber 		if (dasd_get_sense(&temp_cqr->irb))
30920c64468SStefan Weinhuber 			data_size += 32;
31020c64468SStefan Weinhuber 
31120c64468SStefan Weinhuber 	header.total_size = sizeof(header) + data_size + 4; /* "EOR" */
31220c64468SStefan Weinhuber 	header.trigger = trigger;
313399c5acdSArnd Bergmann 	ktime_get_real_ts64(&ts);
314399c5acdSArnd Bergmann 	header.tv_sec = ts.tv_sec;
315399c5acdSArnd Bergmann 	header.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
316820109fbSWolfram Sang 	strscpy(header.busid, dev_name(&device->cdev->dev),
3172a0217d5SKay Sievers 		DASD_EER_BUSID_SIZE);
31820c64468SStefan Weinhuber 
31920c64468SStefan Weinhuber 	spin_lock_irqsave(&bufferlock, flags);
32020c64468SStefan Weinhuber 	list_for_each_entry(eerb, &bufferlist, list) {
32120c64468SStefan Weinhuber 		dasd_eer_start_record(eerb, header.total_size);
32220c64468SStefan Weinhuber 		dasd_eer_write_buffer(eerb, (char *) &header, sizeof(header));
323f3eb5384SStefan Weinhuber 		for (temp_cqr = cqr; temp_cqr; temp_cqr = temp_cqr->refers) {
324f3eb5384SStefan Weinhuber 			sense = dasd_get_sense(&temp_cqr->irb);
325f3eb5384SStefan Weinhuber 			if (sense)
326f3eb5384SStefan Weinhuber 				dasd_eer_write_buffer(eerb, sense, 32);
327f3eb5384SStefan Weinhuber 		}
32820c64468SStefan Weinhuber 		dasd_eer_write_buffer(eerb, "EOR", 4);
32920c64468SStefan Weinhuber 	}
33020c64468SStefan Weinhuber 	spin_unlock_irqrestore(&bufferlock, flags);
33120c64468SStefan Weinhuber 	wake_up_interruptible(&dasd_eer_read_wait_queue);
33220c64468SStefan Weinhuber }
33320c64468SStefan Weinhuber 
33420c64468SStefan Weinhuber /*
33520c64468SStefan Weinhuber  * This function writes a DASD_EER_STATECHANGE trigger.
33620c64468SStefan Weinhuber  */
dasd_eer_write_snss_trigger(struct dasd_device * device,struct dasd_ccw_req * cqr,int trigger)33720c64468SStefan Weinhuber static void dasd_eer_write_snss_trigger(struct dasd_device *device,
33820c64468SStefan Weinhuber 					struct dasd_ccw_req *cqr,
33920c64468SStefan Weinhuber 					int trigger)
34020c64468SStefan Weinhuber {
34120c64468SStefan Weinhuber 	int data_size;
34220c64468SStefan Weinhuber 	int snss_rc;
343399c5acdSArnd Bergmann 	struct timespec64 ts;
34420c64468SStefan Weinhuber 	struct dasd_eer_header header;
34520c64468SStefan Weinhuber 	unsigned long flags;
34620c64468SStefan Weinhuber 	struct eerbuffer *eerb;
34720c64468SStefan Weinhuber 
3488e09f215SStefan Weinhuber 	snss_rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
34920c64468SStefan Weinhuber 	if (snss_rc)
35020c64468SStefan Weinhuber 		data_size = 0;
35120c64468SStefan Weinhuber 	else
35220c64468SStefan Weinhuber 		data_size = SNSS_DATA_SIZE;
35320c64468SStefan Weinhuber 
35420c64468SStefan Weinhuber 	header.total_size = sizeof(header) + data_size + 4; /* "EOR" */
35520c64468SStefan Weinhuber 	header.trigger = DASD_EER_STATECHANGE;
356399c5acdSArnd Bergmann 	ktime_get_real_ts64(&ts);
357399c5acdSArnd Bergmann 	header.tv_sec = ts.tv_sec;
358399c5acdSArnd Bergmann 	header.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
359820109fbSWolfram Sang 	strscpy(header.busid, dev_name(&device->cdev->dev),
3602a0217d5SKay Sievers 		DASD_EER_BUSID_SIZE);
36120c64468SStefan Weinhuber 
36220c64468SStefan Weinhuber 	spin_lock_irqsave(&bufferlock, flags);
36320c64468SStefan Weinhuber 	list_for_each_entry(eerb, &bufferlist, list) {
36420c64468SStefan Weinhuber 		dasd_eer_start_record(eerb, header.total_size);
36520c64468SStefan Weinhuber 		dasd_eer_write_buffer(eerb, (char *) &header , sizeof(header));
36620c64468SStefan Weinhuber 		if (!snss_rc)
36720c64468SStefan Weinhuber 			dasd_eer_write_buffer(eerb, cqr->data, SNSS_DATA_SIZE);
36820c64468SStefan Weinhuber 		dasd_eer_write_buffer(eerb, "EOR", 4);
36920c64468SStefan Weinhuber 	}
37020c64468SStefan Weinhuber 	spin_unlock_irqrestore(&bufferlock, flags);
37120c64468SStefan Weinhuber 	wake_up_interruptible(&dasd_eer_read_wait_queue);
37220c64468SStefan Weinhuber }
37320c64468SStefan Weinhuber 
37420c64468SStefan Weinhuber /*
37520c64468SStefan Weinhuber  * This function is called for all triggers. It calls the appropriate
37620c64468SStefan Weinhuber  * function that writes the actual trigger records.
37720c64468SStefan Weinhuber  */
dasd_eer_write(struct dasd_device * device,struct dasd_ccw_req * cqr,unsigned int id)37820c64468SStefan Weinhuber void dasd_eer_write(struct dasd_device *device, struct dasd_ccw_req *cqr,
37920c64468SStefan Weinhuber 		    unsigned int id)
38020c64468SStefan Weinhuber {
38120c64468SStefan Weinhuber 	if (!device->eer_cqr)
38220c64468SStefan Weinhuber 		return;
38320c64468SStefan Weinhuber 	switch (id) {
38420c64468SStefan Weinhuber 	case DASD_EER_FATALERROR:
38520c64468SStefan Weinhuber 	case DASD_EER_PPRCSUSPEND:
38620c64468SStefan Weinhuber 		dasd_eer_write_standard_trigger(device, cqr, id);
38720c64468SStefan Weinhuber 		break;
38820c64468SStefan Weinhuber 	case DASD_EER_NOPATH:
3899e12e54cSJan Höppner 	case DASD_EER_NOSPC:
390*1cee2975SStefan Haberland 	case DASD_EER_AUTOQUIESCE:
39120c64468SStefan Weinhuber 		dasd_eer_write_standard_trigger(device, NULL, id);
39220c64468SStefan Weinhuber 		break;
39320c64468SStefan Weinhuber 	case DASD_EER_STATECHANGE:
39420c64468SStefan Weinhuber 		dasd_eer_write_snss_trigger(device, cqr, id);
39520c64468SStefan Weinhuber 		break;
39620c64468SStefan Weinhuber 	default: /* unknown trigger, so we write it without any sense data */
39720c64468SStefan Weinhuber 		dasd_eer_write_standard_trigger(device, NULL, id);
39820c64468SStefan Weinhuber 		break;
39920c64468SStefan Weinhuber 	}
40020c64468SStefan Weinhuber }
40120c64468SStefan Weinhuber EXPORT_SYMBOL(dasd_eer_write);
40220c64468SStefan Weinhuber 
40320c64468SStefan Weinhuber /*
40420c64468SStefan Weinhuber  * Start a sense subsystem status request.
40520c64468SStefan Weinhuber  * Needs to be called with the device held.
40620c64468SStefan Weinhuber  */
dasd_eer_snss(struct dasd_device * device)40720c64468SStefan Weinhuber void dasd_eer_snss(struct dasd_device *device)
40820c64468SStefan Weinhuber {
40920c64468SStefan Weinhuber 	struct dasd_ccw_req *cqr;
41020c64468SStefan Weinhuber 
41120c64468SStefan Weinhuber 	cqr = device->eer_cqr;
41220c64468SStefan Weinhuber 	if (!cqr)	/* Device not eer enabled. */
41320c64468SStefan Weinhuber 		return;
41420c64468SStefan Weinhuber 	if (test_and_set_bit(DASD_FLAG_EER_IN_USE, &device->flags)) {
41520c64468SStefan Weinhuber 		/* Sense subsystem status request in use. */
41620c64468SStefan Weinhuber 		set_bit(DASD_FLAG_EER_SNSS, &device->flags);
41720c64468SStefan Weinhuber 		return;
41820c64468SStefan Weinhuber 	}
4198e09f215SStefan Weinhuber 	/* cdev is already locked, can't use dasd_add_request_head */
42020c64468SStefan Weinhuber 	clear_bit(DASD_FLAG_EER_SNSS, &device->flags);
42120c64468SStefan Weinhuber 	cqr->status = DASD_CQR_QUEUED;
4228e09f215SStefan Weinhuber 	list_add(&cqr->devlist, &device->ccw_queue);
4238e09f215SStefan Weinhuber 	dasd_schedule_device_bh(device);
42420c64468SStefan Weinhuber }
42520c64468SStefan Weinhuber 
42620c64468SStefan Weinhuber /*
42720c64468SStefan Weinhuber  * Callback function for use with sense subsystem status request.
42820c64468SStefan Weinhuber  */
dasd_eer_snss_cb(struct dasd_ccw_req * cqr,void * data)42920c64468SStefan Weinhuber static void dasd_eer_snss_cb(struct dasd_ccw_req *cqr, void *data)
43020c64468SStefan Weinhuber {
4318e09f215SStefan Weinhuber 	struct dasd_device *device = cqr->startdev;
43220c64468SStefan Weinhuber 	unsigned long flags;
43320c64468SStefan Weinhuber 
43420c64468SStefan Weinhuber 	dasd_eer_write(device, cqr, DASD_EER_STATECHANGE);
43520c64468SStefan Weinhuber 	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
43620c64468SStefan Weinhuber 	if (device->eer_cqr == cqr) {
43720c64468SStefan Weinhuber 		clear_bit(DASD_FLAG_EER_IN_USE, &device->flags);
43820c64468SStefan Weinhuber 		if (test_bit(DASD_FLAG_EER_SNSS, &device->flags))
43920c64468SStefan Weinhuber 			/* Another SNSS has been requested in the meantime. */
44020c64468SStefan Weinhuber 			dasd_eer_snss(device);
44120c64468SStefan Weinhuber 		cqr = NULL;
44220c64468SStefan Weinhuber 	}
44320c64468SStefan Weinhuber 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
44420c64468SStefan Weinhuber 	if (cqr)
44520c64468SStefan Weinhuber 		/*
44620c64468SStefan Weinhuber 		 * Extended error recovery has been switched off while
44720c64468SStefan Weinhuber 		 * the SNSS request was running. It could even have
44820c64468SStefan Weinhuber 		 * been switched off and on again in which case there
44920c64468SStefan Weinhuber 		 * is a new ccw in device->eer_cqr. Free the "old"
45020c64468SStefan Weinhuber 		 * snss request now.
45120c64468SStefan Weinhuber 		 */
452ec530174SSebastian Ott 		dasd_sfree_request(cqr, device);
45320c64468SStefan Weinhuber }
45420c64468SStefan Weinhuber 
45520c64468SStefan Weinhuber /*
45620c64468SStefan Weinhuber  * Enable error reporting on a given device.
45720c64468SStefan Weinhuber  */
dasd_eer_enable(struct dasd_device * device)45820c64468SStefan Weinhuber int dasd_eer_enable(struct dasd_device *device)
45920c64468SStefan Weinhuber {
4609de67725SJan Höppner 	struct dasd_ccw_req *cqr = NULL;
46120c64468SStefan Weinhuber 	unsigned long flags;
462f3eb5384SStefan Weinhuber 	struct ccw1 *ccw;
4639de67725SJan Höppner 	int rc = 0;
46420c64468SStefan Weinhuber 
4659de67725SJan Höppner 	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
46620c64468SStefan Weinhuber 	if (device->eer_cqr)
4679de67725SJan Höppner 		goto out;
4689de67725SJan Höppner 	else if (!device->discipline ||
4699de67725SJan Höppner 		 strcmp(device->discipline->name, "ECKD"))
4709de67725SJan Höppner 		rc = -EMEDIUMTYPE;
4719de67725SJan Höppner 	else if (test_bit(DASD_FLAG_OFFLINE, &device->flags))
4729de67725SJan Höppner 		rc = -EBUSY;
47320c64468SStefan Weinhuber 
4749de67725SJan Höppner 	if (rc)
4759de67725SJan Höppner 		goto out;
47620c64468SStefan Weinhuber 
477ec530174SSebastian Ott 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* SNSS */,
478ec530174SSebastian Ott 				   SNSS_DATA_SIZE, device, NULL);
4799de67725SJan Höppner 	if (IS_ERR(cqr)) {
4809de67725SJan Höppner 		rc = -ENOMEM;
4819de67725SJan Höppner 		cqr = NULL;
4829de67725SJan Höppner 		goto out;
4839de67725SJan Höppner 	}
48420c64468SStefan Weinhuber 
4858e09f215SStefan Weinhuber 	cqr->startdev = device;
48620c64468SStefan Weinhuber 	cqr->retries = 255;
48720c64468SStefan Weinhuber 	cqr->expires = 10 * HZ;
488046f3e82SStefan Weinhuber 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
4895a27e60dSStefan Weinhuber 	set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
49020c64468SStefan Weinhuber 
491f3eb5384SStefan Weinhuber 	ccw = cqr->cpaddr;
492f3eb5384SStefan Weinhuber 	ccw->cmd_code = DASD_ECKD_CCW_SNSS;
493f3eb5384SStefan Weinhuber 	ccw->count = SNSS_DATA_SIZE;
494f3eb5384SStefan Weinhuber 	ccw->flags = 0;
495b87c52e4SAlexander Gordeev 	ccw->cda = (__u32)virt_to_phys(cqr->data);
49620c64468SStefan Weinhuber 
4971aae0560SHeiko Carstens 	cqr->buildclk = get_tod_clock();
49820c64468SStefan Weinhuber 	cqr->status = DASD_CQR_FILLED;
49920c64468SStefan Weinhuber 	cqr->callback = dasd_eer_snss_cb;
50020c64468SStefan Weinhuber 
50120c64468SStefan Weinhuber 	if (!device->eer_cqr) {
50220c64468SStefan Weinhuber 		device->eer_cqr = cqr;
50320c64468SStefan Weinhuber 		cqr = NULL;
50420c64468SStefan Weinhuber 	}
5059de67725SJan Höppner 
5069de67725SJan Höppner out:
50720c64468SStefan Weinhuber 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
5089de67725SJan Höppner 
50920c64468SStefan Weinhuber 	if (cqr)
510ec530174SSebastian Ott 		dasd_sfree_request(cqr, device);
5119de67725SJan Höppner 
5129de67725SJan Höppner 	return rc;
51320c64468SStefan Weinhuber }
51420c64468SStefan Weinhuber 
51520c64468SStefan Weinhuber /*
51620c64468SStefan Weinhuber  * Disable error reporting on a given device.
51720c64468SStefan Weinhuber  */
dasd_eer_disable(struct dasd_device * device)51820c64468SStefan Weinhuber void dasd_eer_disable(struct dasd_device *device)
51920c64468SStefan Weinhuber {
52020c64468SStefan Weinhuber 	struct dasd_ccw_req *cqr;
52120c64468SStefan Weinhuber 	unsigned long flags;
52220c64468SStefan Weinhuber 	int in_use;
52320c64468SStefan Weinhuber 
52420c64468SStefan Weinhuber 	if (!device->eer_cqr)
52520c64468SStefan Weinhuber 		return;
52620c64468SStefan Weinhuber 	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
52720c64468SStefan Weinhuber 	cqr = device->eer_cqr;
52820c64468SStefan Weinhuber 	device->eer_cqr = NULL;
52920c64468SStefan Weinhuber 	clear_bit(DASD_FLAG_EER_SNSS, &device->flags);
53020c64468SStefan Weinhuber 	in_use = test_and_clear_bit(DASD_FLAG_EER_IN_USE, &device->flags);
53120c64468SStefan Weinhuber 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
53220c64468SStefan Weinhuber 	if (cqr && !in_use)
533ec530174SSebastian Ott 		dasd_sfree_request(cqr, device);
53420c64468SStefan Weinhuber }
53520c64468SStefan Weinhuber 
53620c64468SStefan Weinhuber /*
53720c64468SStefan Weinhuber  * SECTION: the device operations
53820c64468SStefan Weinhuber  */
53920c64468SStefan Weinhuber 
54020c64468SStefan Weinhuber /*
54120c64468SStefan Weinhuber  * On the one side we need a lock to access our internal buffer, on the
54220c64468SStefan Weinhuber  * other side a copy_to_user can sleep. So we need to copy the data we have
54320c64468SStefan Weinhuber  * to transfer in a readbuffer, which is protected by the readbuffer_mutex.
54420c64468SStefan Weinhuber  */
54520c64468SStefan Weinhuber static char readbuffer[PAGE_SIZE];
5463006d7c6SChristoph Hellwig static DEFINE_MUTEX(readbuffer_mutex);
54720c64468SStefan Weinhuber 
dasd_eer_open(struct inode * inp,struct file * filp)54820c64468SStefan Weinhuber static int dasd_eer_open(struct inode *inp, struct file *filp)
54920c64468SStefan Weinhuber {
55020c64468SStefan Weinhuber 	struct eerbuffer *eerb;
55120c64468SStefan Weinhuber 	unsigned long flags;
55220c64468SStefan Weinhuber 
55320c64468SStefan Weinhuber 	eerb = kzalloc(sizeof(struct eerbuffer), GFP_KERNEL);
554f45a43d8SStefan Weinhuber 	if (!eerb)
555f45a43d8SStefan Weinhuber 		return -ENOMEM;
55620c64468SStefan Weinhuber 	eerb->buffer_page_count = eer_pages;
55720c64468SStefan Weinhuber 	if (eerb->buffer_page_count < 1 ||
55820c64468SStefan Weinhuber 	    eerb->buffer_page_count > INT_MAX / PAGE_SIZE) {
55920c64468SStefan Weinhuber 		kfree(eerb);
560fc19f381SStefan Haberland 		DBF_EVENT(DBF_WARNING, "can't open device since module "
561025dfdafSFrederik Schwarzer 			"parameter eer_pages is smaller than 1 or"
562025dfdafSFrederik Schwarzer 			" bigger than %d", (int)(INT_MAX / PAGE_SIZE));
56320c64468SStefan Weinhuber 		return -EINVAL;
56420c64468SStefan Weinhuber 	}
56520c64468SStefan Weinhuber 	eerb->buffersize = eerb->buffer_page_count * PAGE_SIZE;
5666da2ec56SKees Cook 	eerb->buffer = kmalloc_array(eerb->buffer_page_count, sizeof(char *),
56720c64468SStefan Weinhuber 				     GFP_KERNEL);
56820c64468SStefan Weinhuber         if (!eerb->buffer) {
56920c64468SStefan Weinhuber 		kfree(eerb);
57020c64468SStefan Weinhuber                 return -ENOMEM;
57120c64468SStefan Weinhuber 	}
57220c64468SStefan Weinhuber 	if (dasd_eer_allocate_buffer_pages(eerb->buffer,
57320c64468SStefan Weinhuber 					   eerb->buffer_page_count)) {
57420c64468SStefan Weinhuber 		kfree(eerb->buffer);
57520c64468SStefan Weinhuber 		kfree(eerb);
57620c64468SStefan Weinhuber 		return -ENOMEM;
57720c64468SStefan Weinhuber 	}
57820c64468SStefan Weinhuber 	filp->private_data = eerb;
57920c64468SStefan Weinhuber 	spin_lock_irqsave(&bufferlock, flags);
58020c64468SStefan Weinhuber 	list_add(&eerb->list, &bufferlist);
58120c64468SStefan Weinhuber 	spin_unlock_irqrestore(&bufferlock, flags);
58220c64468SStefan Weinhuber 
58320c64468SStefan Weinhuber 	return nonseekable_open(inp,filp);
58420c64468SStefan Weinhuber }
58520c64468SStefan Weinhuber 
dasd_eer_close(struct inode * inp,struct file * filp)58620c64468SStefan Weinhuber static int dasd_eer_close(struct inode *inp, struct file *filp)
58720c64468SStefan Weinhuber {
58820c64468SStefan Weinhuber 	struct eerbuffer *eerb;
58920c64468SStefan Weinhuber 	unsigned long flags;
59020c64468SStefan Weinhuber 
59120c64468SStefan Weinhuber 	eerb = (struct eerbuffer *) filp->private_data;
59220c64468SStefan Weinhuber 	spin_lock_irqsave(&bufferlock, flags);
59320c64468SStefan Weinhuber 	list_del(&eerb->list);
59420c64468SStefan Weinhuber 	spin_unlock_irqrestore(&bufferlock, flags);
59520c64468SStefan Weinhuber 	dasd_eer_free_buffer_pages(eerb->buffer, eerb->buffer_page_count);
59620c64468SStefan Weinhuber 	kfree(eerb->buffer);
59720c64468SStefan Weinhuber 	kfree(eerb);
59820c64468SStefan Weinhuber 
59920c64468SStefan Weinhuber 	return 0;
60020c64468SStefan Weinhuber }
60120c64468SStefan Weinhuber 
dasd_eer_read(struct file * filp,char __user * buf,size_t count,loff_t * ppos)60220c64468SStefan Weinhuber static ssize_t dasd_eer_read(struct file *filp, char __user *buf,
60320c64468SStefan Weinhuber 			     size_t count, loff_t *ppos)
60420c64468SStefan Weinhuber {
60520c64468SStefan Weinhuber 	int tc,rc;
60620c64468SStefan Weinhuber 	int tailcount,effective_count;
60720c64468SStefan Weinhuber         unsigned long flags;
60820c64468SStefan Weinhuber 	struct eerbuffer *eerb;
60920c64468SStefan Weinhuber 
61020c64468SStefan Weinhuber 	eerb = (struct eerbuffer *) filp->private_data;
6113006d7c6SChristoph Hellwig 	if (mutex_lock_interruptible(&readbuffer_mutex))
61220c64468SStefan Weinhuber 		return -ERESTARTSYS;
61320c64468SStefan Weinhuber 
61420c64468SStefan Weinhuber 	spin_lock_irqsave(&bufferlock, flags);
61520c64468SStefan Weinhuber 
61620c64468SStefan Weinhuber 	if (eerb->residual < 0) { /* the remainder of this record */
61720c64468SStefan Weinhuber 		                  /* has been deleted             */
61820c64468SStefan Weinhuber 		eerb->residual = 0;
61920c64468SStefan Weinhuber 		spin_unlock_irqrestore(&bufferlock, flags);
6203006d7c6SChristoph Hellwig 		mutex_unlock(&readbuffer_mutex);
62120c64468SStefan Weinhuber 		return -EIO;
62220c64468SStefan Weinhuber 	} else if (eerb->residual > 0) {
62320c64468SStefan Weinhuber 		/* OK we still have a second half of a record to deliver */
62420c64468SStefan Weinhuber 		effective_count = min(eerb->residual, (int) count);
62520c64468SStefan Weinhuber 		eerb->residual -= effective_count;
62620c64468SStefan Weinhuber 	} else {
62720c64468SStefan Weinhuber 		tc = 0;
62820c64468SStefan Weinhuber 		while (!tc) {
62920c64468SStefan Weinhuber 			tc = dasd_eer_read_buffer(eerb, (char *) &tailcount,
63020c64468SStefan Weinhuber 						  sizeof(tailcount));
63120c64468SStefan Weinhuber 			if (!tc) {
63220c64468SStefan Weinhuber 				/* no data available */
63320c64468SStefan Weinhuber 				spin_unlock_irqrestore(&bufferlock, flags);
6343006d7c6SChristoph Hellwig 				mutex_unlock(&readbuffer_mutex);
63520c64468SStefan Weinhuber 				if (filp->f_flags & O_NONBLOCK)
63620c64468SStefan Weinhuber 					return -EAGAIN;
63720c64468SStefan Weinhuber 				rc = wait_event_interruptible(
63820c64468SStefan Weinhuber 					dasd_eer_read_wait_queue,
63920c64468SStefan Weinhuber 					eerb->head != eerb->tail);
64020c64468SStefan Weinhuber 				if (rc)
64120c64468SStefan Weinhuber 					return rc;
6423006d7c6SChristoph Hellwig 				if (mutex_lock_interruptible(&readbuffer_mutex))
64320c64468SStefan Weinhuber 					return -ERESTARTSYS;
64420c64468SStefan Weinhuber 				spin_lock_irqsave(&bufferlock, flags);
64520c64468SStefan Weinhuber 			}
64620c64468SStefan Weinhuber 		}
64720c64468SStefan Weinhuber 		WARN_ON(tc != sizeof(tailcount));
64820c64468SStefan Weinhuber 		effective_count = min(tailcount,(int)count);
64920c64468SStefan Weinhuber 		eerb->residual = tailcount - effective_count;
65020c64468SStefan Weinhuber 	}
65120c64468SStefan Weinhuber 
65220c64468SStefan Weinhuber 	tc = dasd_eer_read_buffer(eerb, readbuffer, effective_count);
65320c64468SStefan Weinhuber 	WARN_ON(tc != effective_count);
65420c64468SStefan Weinhuber 
65520c64468SStefan Weinhuber 	spin_unlock_irqrestore(&bufferlock, flags);
65620c64468SStefan Weinhuber 
65720c64468SStefan Weinhuber 	if (copy_to_user(buf, readbuffer, effective_count)) {
6583006d7c6SChristoph Hellwig 		mutex_unlock(&readbuffer_mutex);
65920c64468SStefan Weinhuber 		return -EFAULT;
66020c64468SStefan Weinhuber 	}
66120c64468SStefan Weinhuber 
6623006d7c6SChristoph Hellwig 	mutex_unlock(&readbuffer_mutex);
66320c64468SStefan Weinhuber 	return effective_count;
66420c64468SStefan Weinhuber }
66520c64468SStefan Weinhuber 
dasd_eer_poll(struct file * filp,poll_table * ptable)666afc9a42bSAl Viro static __poll_t dasd_eer_poll(struct file *filp, poll_table *ptable)
66720c64468SStefan Weinhuber {
668afc9a42bSAl Viro 	__poll_t mask;
66920c64468SStefan Weinhuber 	unsigned long flags;
67020c64468SStefan Weinhuber 	struct eerbuffer *eerb;
67120c64468SStefan Weinhuber 
67220c64468SStefan Weinhuber 	eerb = (struct eerbuffer *) filp->private_data;
67320c64468SStefan Weinhuber 	poll_wait(filp, &dasd_eer_read_wait_queue, ptable);
67420c64468SStefan Weinhuber 	spin_lock_irqsave(&bufferlock, flags);
67520c64468SStefan Weinhuber 	if (eerb->head != eerb->tail)
676a9a08845SLinus Torvalds 		mask = EPOLLIN | EPOLLRDNORM ;
67720c64468SStefan Weinhuber 	else
67820c64468SStefan Weinhuber 		mask = 0;
67920c64468SStefan Weinhuber 	spin_unlock_irqrestore(&bufferlock, flags);
68020c64468SStefan Weinhuber 	return mask;
68120c64468SStefan Weinhuber }
68220c64468SStefan Weinhuber 
683d54b1fdbSArjan van de Ven static const struct file_operations dasd_eer_fops = {
68420c64468SStefan Weinhuber 	.open		= &dasd_eer_open,
68520c64468SStefan Weinhuber 	.release	= &dasd_eer_close,
68620c64468SStefan Weinhuber 	.read		= &dasd_eer_read,
68720c64468SStefan Weinhuber 	.poll		= &dasd_eer_poll,
68820c64468SStefan Weinhuber 	.owner		= THIS_MODULE,
6896038f373SArnd Bergmann 	.llseek		= noop_llseek,
69020c64468SStefan Weinhuber };
69120c64468SStefan Weinhuber 
692e3c699b3SStefan Weinhuber static struct miscdevice *dasd_eer_dev = NULL;
69320c64468SStefan Weinhuber 
dasd_eer_init(void)69420c64468SStefan Weinhuber int __init dasd_eer_init(void)
69520c64468SStefan Weinhuber {
69620c64468SStefan Weinhuber 	int rc;
69720c64468SStefan Weinhuber 
698e3c699b3SStefan Weinhuber 	dasd_eer_dev = kzalloc(sizeof(*dasd_eer_dev), GFP_KERNEL);
699e3c699b3SStefan Weinhuber 	if (!dasd_eer_dev)
700e3c699b3SStefan Weinhuber 		return -ENOMEM;
701e3c699b3SStefan Weinhuber 
702e3c699b3SStefan Weinhuber 	dasd_eer_dev->minor = MISC_DYNAMIC_MINOR;
703e3c699b3SStefan Weinhuber 	dasd_eer_dev->name  = "dasd_eer";
704e3c699b3SStefan Weinhuber 	dasd_eer_dev->fops  = &dasd_eer_fops;
705e3c699b3SStefan Weinhuber 
706e3c699b3SStefan Weinhuber 	rc = misc_register(dasd_eer_dev);
70720c64468SStefan Weinhuber 	if (rc) {
708e3c699b3SStefan Weinhuber 		kfree(dasd_eer_dev);
709e3c699b3SStefan Weinhuber 		dasd_eer_dev = NULL;
710fc19f381SStefan Haberland 		DBF_EVENT(DBF_ERR, "%s", "dasd_eer_init could not "
71120c64468SStefan Weinhuber 		       "register misc device");
71220c64468SStefan Weinhuber 		return rc;
71320c64468SStefan Weinhuber 	}
71420c64468SStefan Weinhuber 
71520c64468SStefan Weinhuber 	return 0;
71620c64468SStefan Weinhuber }
71720c64468SStefan Weinhuber 
dasd_eer_exit(void)7181375fc1fSHeiko Carstens void dasd_eer_exit(void)
71920c64468SStefan Weinhuber {
720e3c699b3SStefan Weinhuber 	if (dasd_eer_dev) {
721547415d5SAkinobu Mita 		misc_deregister(dasd_eer_dev);
722e3c699b3SStefan Weinhuber 		kfree(dasd_eer_dev);
723e3c699b3SStefan Weinhuber 		dasd_eer_dev = NULL;
724e3c699b3SStefan Weinhuber 	}
72520c64468SStefan Weinhuber }
726