xref: /openbmc/linux/drivers/s390/char/vmlogrdr.c (revision 1ab142d4)
1 /*
2  * drivers/s390/char/vmlogrdr.c
3  *	character device driver for reading z/VM system service records
4  *
5  *
6  *	Copyright IBM Corp. 2004, 2009
7  *	character device driver for reading z/VM system service records,
8  *	Version 1.0
9  *	Author(s): Xenia Tkatschow <xenia@us.ibm.com>
10  *		   Stefan Weinhuber <wein@de.ibm.com>
11  *
12  */
13 
14 #define KMSG_COMPONENT "vmlogrdr"
15 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16 
17 #include <linux/module.h>
18 #include <linux/init.h>
19 #include <linux/slab.h>
20 #include <linux/errno.h>
21 #include <linux/types.h>
22 #include <linux/interrupt.h>
23 #include <linux/spinlock.h>
24 #include <linux/atomic.h>
25 #include <asm/uaccess.h>
26 #include <asm/cpcmd.h>
27 #include <asm/debug.h>
28 #include <asm/ebcdic.h>
29 #include <net/iucv/iucv.h>
30 #include <linux/kmod.h>
31 #include <linux/cdev.h>
32 #include <linux/device.h>
33 #include <linux/string.h>
34 
35 MODULE_AUTHOR
36 	("(C) 2004 IBM Corporation by Xenia Tkatschow (xenia@us.ibm.com)\n"
37 	 "                            Stefan Weinhuber (wein@de.ibm.com)");
38 MODULE_DESCRIPTION ("Character device driver for reading z/VM "
39 		    "system service records.");
40 MODULE_LICENSE("GPL");
41 
42 
43 /*
44  * The size of the buffer for iucv data transfer is one page,
45  * but in addition to the data we read from iucv we also
46  * place an integer and some characters into that buffer,
47  * so the maximum size for record data is a little less then
48  * one page.
49  */
50 #define NET_BUFFER_SIZE	(PAGE_SIZE - sizeof(int) - sizeof(FENCE))
51 
52 /*
53  * The elements that are concurrently accessed by bottom halves are
54  * connection_established, iucv_path_severed, local_interrupt_buffer
55  * and receive_ready. The first three can be protected by
56  * priv_lock.  receive_ready is atomic, so it can be incremented and
57  * decremented without holding a lock.
58  * The variable dev_in_use needs to be protected by the lock, since
59  * it's a flag used by open to make sure that the device is opened only
60  * by one user at the same time.
61  */
62 struct vmlogrdr_priv_t {
63 	char system_service[8];
64 	char internal_name[8];
65 	char recording_name[8];
66 	struct iucv_path *path;
67 	int connection_established;
68 	int iucv_path_severed;
69 	struct iucv_message local_interrupt_buffer;
70 	atomic_t receive_ready;
71 	int minor_num;
72 	char * buffer;
73 	char * current_position;
74 	int remaining;
75 	ulong residual_length;
76 	int buffer_free;
77 	int dev_in_use; /* 1: already opened, 0: not opened*/
78 	spinlock_t priv_lock;
79 	struct device  *device;
80 	struct device  *class_device;
81 	int autorecording;
82 	int autopurge;
83 };
84 
85 
86 /*
87  * File operation structure for vmlogrdr devices
88  */
89 static int vmlogrdr_open(struct inode *, struct file *);
90 static int vmlogrdr_release(struct inode *, struct file *);
91 static ssize_t vmlogrdr_read (struct file *filp, char __user *data,
92 			      size_t count, loff_t * ppos);
93 
94 static const struct file_operations vmlogrdr_fops = {
95 	.owner   = THIS_MODULE,
96 	.open    = vmlogrdr_open,
97 	.release = vmlogrdr_release,
98 	.read    = vmlogrdr_read,
99 	.llseek  = no_llseek,
100 };
101 
102 
103 static void vmlogrdr_iucv_path_complete(struct iucv_path *, u8 ipuser[16]);
104 static void vmlogrdr_iucv_path_severed(struct iucv_path *, u8 ipuser[16]);
105 static void vmlogrdr_iucv_message_pending(struct iucv_path *,
106 					  struct iucv_message *);
107 
108 
109 static struct iucv_handler vmlogrdr_iucv_handler = {
110 	.path_complete	 = vmlogrdr_iucv_path_complete,
111 	.path_severed	 = vmlogrdr_iucv_path_severed,
112 	.message_pending = vmlogrdr_iucv_message_pending,
113 };
114 
115 
116 static DECLARE_WAIT_QUEUE_HEAD(conn_wait_queue);
117 static DECLARE_WAIT_QUEUE_HEAD(read_wait_queue);
118 
119 /*
120  * pointer to system service private structure
121  * minor number 0 --> logrec
122  * minor number 1 --> account
123  * minor number 2 --> symptom
124  */
125 
126 static struct vmlogrdr_priv_t sys_ser[] = {
127 	{ .system_service = "*LOGREC ",
128 	  .internal_name  = "logrec",
129 	  .recording_name = "EREP",
130 	  .minor_num      = 0,
131 	  .buffer_free    = 1,
132 	  .priv_lock	  = __SPIN_LOCK_UNLOCKED(sys_ser[0].priv_lock),
133 	  .autorecording  = 1,
134 	  .autopurge      = 1,
135 	},
136 	{ .system_service = "*ACCOUNT",
137 	  .internal_name  = "account",
138 	  .recording_name = "ACCOUNT",
139 	  .minor_num      = 1,
140 	  .buffer_free    = 1,
141 	  .priv_lock	  = __SPIN_LOCK_UNLOCKED(sys_ser[1].priv_lock),
142 	  .autorecording  = 1,
143 	  .autopurge      = 1,
144 	},
145 	{ .system_service = "*SYMPTOM",
146 	  .internal_name  = "symptom",
147 	  .recording_name = "SYMPTOM",
148 	  .minor_num      = 2,
149 	  .buffer_free    = 1,
150 	  .priv_lock	  = __SPIN_LOCK_UNLOCKED(sys_ser[2].priv_lock),
151 	  .autorecording  = 1,
152 	  .autopurge      = 1,
153 	}
154 };
155 
156 #define MAXMINOR  (sizeof(sys_ser)/sizeof(struct vmlogrdr_priv_t))
157 
158 static char FENCE[] = {"EOR"};
159 static int vmlogrdr_major = 0;
160 static struct cdev  *vmlogrdr_cdev = NULL;
161 static int recording_class_AB;
162 
163 
164 static void vmlogrdr_iucv_path_complete(struct iucv_path *path, u8 ipuser[16])
165 {
166 	struct vmlogrdr_priv_t * logptr = path->private;
167 
168 	spin_lock(&logptr->priv_lock);
169 	logptr->connection_established = 1;
170 	spin_unlock(&logptr->priv_lock);
171 	wake_up(&conn_wait_queue);
172 }
173 
174 
175 static void vmlogrdr_iucv_path_severed(struct iucv_path *path, u8 ipuser[16])
176 {
177 	struct vmlogrdr_priv_t * logptr = path->private;
178 	u8 reason = (u8) ipuser[8];
179 
180 	pr_err("vmlogrdr: connection severed with reason %i\n", reason);
181 
182 	iucv_path_sever(path, NULL);
183 	kfree(path);
184 	logptr->path = NULL;
185 
186 	spin_lock(&logptr->priv_lock);
187 	logptr->connection_established = 0;
188 	logptr->iucv_path_severed = 1;
189 	spin_unlock(&logptr->priv_lock);
190 
191 	wake_up(&conn_wait_queue);
192 	/* just in case we're sleeping waiting for a record */
193 	wake_up_interruptible(&read_wait_queue);
194 }
195 
196 
197 static void vmlogrdr_iucv_message_pending(struct iucv_path *path,
198 					  struct iucv_message *msg)
199 {
200 	struct vmlogrdr_priv_t * logptr = path->private;
201 
202 	/*
203 	 * This function is the bottom half so it should be quick.
204 	 * Copy the external interrupt data into our local eib and increment
205 	 * the usage count
206 	 */
207 	spin_lock(&logptr->priv_lock);
208 	memcpy(&logptr->local_interrupt_buffer, msg, sizeof(*msg));
209 	atomic_inc(&logptr->receive_ready);
210 	spin_unlock(&logptr->priv_lock);
211 	wake_up_interruptible(&read_wait_queue);
212 }
213 
214 
215 static int vmlogrdr_get_recording_class_AB(void)
216 {
217 	static const char cp_command[] = "QUERY COMMAND RECORDING ";
218 	char cp_response[80];
219 	char *tail;
220 	int len,i;
221 
222 	cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
223 	len = strnlen(cp_response,sizeof(cp_response));
224 	// now the parsing
225 	tail=strnchr(cp_response,len,'=');
226 	if (!tail)
227 		return 0;
228 	tail++;
229 	if (!strncmp("ANY",tail,3))
230 		return 1;
231 	if (!strncmp("NONE",tail,4))
232 		return 0;
233 	/*
234 	 * expect comma separated list of classes here, if one of them
235 	 * is A or B return 1 otherwise 0
236 	 */
237         for (i=tail-cp_response; i<len; i++)
238 		if ( cp_response[i]=='A' || cp_response[i]=='B' )
239 			return 1;
240 	return 0;
241 }
242 
243 
244 static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr,
245 			      int action, int purge)
246 {
247 
248 	char cp_command[80];
249 	char cp_response[160];
250 	char *onoff, *qid_string;
251 	int rc;
252 
253 	onoff = ((action == 1) ? "ON" : "OFF");
254 	qid_string = ((recording_class_AB == 1) ? " QID * " : "");
255 
256 	/*
257 	 * The recording commands needs to be called with option QID
258 	 * for guests that have previlege classes A or B.
259 	 * Purging has to be done as separate step, because recording
260 	 * can't be switched on as long as records are on the queue.
261 	 * Doing both at the same time doesn't work.
262 	 */
263 	if (purge && (action == 1)) {
264 		memset(cp_command, 0x00, sizeof(cp_command));
265 		memset(cp_response, 0x00, sizeof(cp_response));
266 		snprintf(cp_command, sizeof(cp_command),
267 			 "RECORDING %s PURGE %s",
268 			 logptr->recording_name,
269 			 qid_string);
270 		cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
271 	}
272 
273 	memset(cp_command, 0x00, sizeof(cp_command));
274 	memset(cp_response, 0x00, sizeof(cp_response));
275 	snprintf(cp_command, sizeof(cp_command), "RECORDING %s %s %s",
276 		logptr->recording_name,
277 		onoff,
278 		qid_string);
279 	cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
280 	/* The recording command will usually answer with 'Command complete'
281 	 * on success, but when the specific service was never connected
282 	 * before then there might be an additional informational message
283 	 * 'HCPCRC8072I Recording entry not found' before the
284 	 * 'Command complete'. So I use strstr rather then the strncmp.
285 	 */
286 	if (strstr(cp_response,"Command complete"))
287 		rc = 0;
288 	else
289 		rc = -EIO;
290 	/*
291 	 * If we turn recording off, we have to purge any remaining records
292 	 * afterwards, as a large number of queued records may impact z/VM
293 	 * performance.
294 	 */
295 	if (purge && (action == 0)) {
296 		memset(cp_command, 0x00, sizeof(cp_command));
297 		memset(cp_response, 0x00, sizeof(cp_response));
298 		snprintf(cp_command, sizeof(cp_command),
299 			 "RECORDING %s PURGE %s",
300 			 logptr->recording_name,
301 			 qid_string);
302 		cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
303 	}
304 
305 	return rc;
306 }
307 
308 
309 static int vmlogrdr_open (struct inode *inode, struct file *filp)
310 {
311 	int dev_num = 0;
312 	struct vmlogrdr_priv_t * logptr = NULL;
313 	int connect_rc = 0;
314 	int ret;
315 
316 	dev_num = iminor(inode);
317 	if (dev_num > MAXMINOR)
318 		return -ENODEV;
319 	logptr = &sys_ser[dev_num];
320 
321 	/*
322 	 * only allow for blocking reads to be open
323 	 */
324 	if (filp->f_flags & O_NONBLOCK)
325 		return -ENOSYS;
326 
327 	/* Besure this device hasn't already been opened */
328 	spin_lock_bh(&logptr->priv_lock);
329 	if (logptr->dev_in_use)	{
330 		spin_unlock_bh(&logptr->priv_lock);
331 		return -EBUSY;
332 	}
333 	logptr->dev_in_use = 1;
334 	logptr->connection_established = 0;
335 	logptr->iucv_path_severed = 0;
336 	atomic_set(&logptr->receive_ready, 0);
337 	logptr->buffer_free = 1;
338 	spin_unlock_bh(&logptr->priv_lock);
339 
340 	/* set the file options */
341 	filp->private_data = logptr;
342 	filp->f_op = &vmlogrdr_fops;
343 
344 	/* start recording for this service*/
345 	if (logptr->autorecording) {
346 		ret = vmlogrdr_recording(logptr,1,logptr->autopurge);
347 		if (ret)
348 			pr_warning("vmlogrdr: failed to start "
349 				   "recording automatically\n");
350 	}
351 
352 	/* create connection to the system service */
353 	logptr->path = iucv_path_alloc(10, 0, GFP_KERNEL);
354 	if (!logptr->path)
355 		goto out_dev;
356 	connect_rc = iucv_path_connect(logptr->path, &vmlogrdr_iucv_handler,
357 				       logptr->system_service, NULL, NULL,
358 				       logptr);
359 	if (connect_rc) {
360 		pr_err("vmlogrdr: iucv connection to %s "
361 		       "failed with rc %i \n",
362 		       logptr->system_service, connect_rc);
363 		goto out_path;
364 	}
365 
366 	/* We've issued the connect and now we must wait for a
367 	 * ConnectionComplete or ConnectinSevered Interrupt
368 	 * before we can continue to process.
369 	 */
370 	wait_event(conn_wait_queue, (logptr->connection_established)
371 		   || (logptr->iucv_path_severed));
372 	if (logptr->iucv_path_severed)
373 		goto out_record;
374 	nonseekable_open(inode, filp);
375 	return 0;
376 
377 out_record:
378 	if (logptr->autorecording)
379 		vmlogrdr_recording(logptr,0,logptr->autopurge);
380 out_path:
381 	kfree(logptr->path);	/* kfree(NULL) is ok. */
382 	logptr->path = NULL;
383 out_dev:
384 	logptr->dev_in_use = 0;
385 	return -EIO;
386 }
387 
388 
389 static int vmlogrdr_release (struct inode *inode, struct file *filp)
390 {
391 	int ret;
392 
393 	struct vmlogrdr_priv_t * logptr = filp->private_data;
394 
395 	iucv_path_sever(logptr->path, NULL);
396 	kfree(logptr->path);
397 	logptr->path = NULL;
398 	if (logptr->autorecording) {
399 		ret = vmlogrdr_recording(logptr,0,logptr->autopurge);
400 		if (ret)
401 			pr_warning("vmlogrdr: failed to stop "
402 				   "recording automatically\n");
403 	}
404 	logptr->dev_in_use = 0;
405 
406 	return 0;
407 }
408 
409 
410 static int vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv)
411 {
412 	int rc, *temp;
413 	/* we need to keep track of two data sizes here:
414 	 * The number of bytes we need to receive from iucv and
415 	 * the total number of bytes we actually write into the buffer.
416 	 */
417 	int user_data_count, iucv_data_count;
418 	char * buffer;
419 
420 	if (atomic_read(&priv->receive_ready)) {
421 		spin_lock_bh(&priv->priv_lock);
422 		if (priv->residual_length){
423 			/* receive second half of a record */
424 			iucv_data_count = priv->residual_length;
425 			user_data_count = 0;
426 			buffer = priv->buffer;
427 		} else {
428 			/* receive a new record:
429 			 * We need to return the total length of the record
430                          * + size of FENCE in the first 4 bytes of the buffer.
431 		         */
432 			iucv_data_count = priv->local_interrupt_buffer.length;
433 			user_data_count = sizeof(int);
434 			temp = (int*)priv->buffer;
435 			*temp= iucv_data_count + sizeof(FENCE);
436 			buffer = priv->buffer + sizeof(int);
437 		}
438 		/*
439 		 * If the record is bigger than our buffer, we receive only
440 		 * a part of it. We can get the rest later.
441 		 */
442 		if (iucv_data_count > NET_BUFFER_SIZE)
443 			iucv_data_count = NET_BUFFER_SIZE;
444 		rc = iucv_message_receive(priv->path,
445 					  &priv->local_interrupt_buffer,
446 					  0, buffer, iucv_data_count,
447 					  &priv->residual_length);
448 		spin_unlock_bh(&priv->priv_lock);
449 		/* An rc of 5 indicates that the record was bigger than
450 		 * the buffer, which is OK for us. A 9 indicates that the
451 		 * record was purged befor we could receive it.
452 		 */
453 		if (rc == 5)
454 			rc = 0;
455 		if (rc == 9)
456 			atomic_set(&priv->receive_ready, 0);
457 	} else {
458 		rc = 1;
459 	}
460 	if (!rc) {
461 		priv->buffer_free = 0;
462  		user_data_count += iucv_data_count;
463 		priv->current_position = priv->buffer;
464 		if (priv->residual_length == 0){
465 			/* the whole record has been captured,
466 			 * now add the fence */
467 			atomic_dec(&priv->receive_ready);
468 			buffer = priv->buffer + user_data_count;
469 			memcpy(buffer, FENCE, sizeof(FENCE));
470 			user_data_count += sizeof(FENCE);
471 		}
472 		priv->remaining = user_data_count;
473 	}
474 
475 	return rc;
476 }
477 
478 
479 static ssize_t vmlogrdr_read(struct file *filp, char __user *data,
480 			     size_t count, loff_t * ppos)
481 {
482 	int rc;
483 	struct vmlogrdr_priv_t * priv = filp->private_data;
484 
485 	while (priv->buffer_free) {
486 		rc = vmlogrdr_receive_data(priv);
487 		if (rc) {
488 			rc = wait_event_interruptible(read_wait_queue,
489 					atomic_read(&priv->receive_ready));
490 			if (rc)
491 				return rc;
492 		}
493 	}
494 	/* copy only up to end of record */
495 	if (count > priv->remaining)
496 		count = priv->remaining;
497 
498 	if (copy_to_user(data, priv->current_position, count))
499 		return -EFAULT;
500 
501 	*ppos += count;
502 	priv->current_position += count;
503 	priv->remaining -= count;
504 
505 	/* if all data has been transferred, set buffer free */
506 	if (priv->remaining == 0)
507 		priv->buffer_free = 1;
508 
509 	return count;
510 }
511 
512 static ssize_t vmlogrdr_autopurge_store(struct device * dev,
513 					struct device_attribute *attr,
514 					const char * buf, size_t count)
515 {
516 	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
517 	ssize_t ret = count;
518 
519 	switch (buf[0]) {
520 	case '0':
521 		priv->autopurge=0;
522 		break;
523 	case '1':
524 		priv->autopurge=1;
525 		break;
526 	default:
527 		ret = -EINVAL;
528 	}
529 	return ret;
530 }
531 
532 
533 static ssize_t vmlogrdr_autopurge_show(struct device *dev,
534 				       struct device_attribute *attr,
535 				       char *buf)
536 {
537 	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
538 	return sprintf(buf, "%u\n", priv->autopurge);
539 }
540 
541 
542 static DEVICE_ATTR(autopurge, 0644, vmlogrdr_autopurge_show,
543 		   vmlogrdr_autopurge_store);
544 
545 
546 static ssize_t vmlogrdr_purge_store(struct device * dev,
547 				    struct device_attribute *attr,
548 				    const char * buf, size_t count)
549 {
550 
551 	char cp_command[80];
552 	char cp_response[80];
553 	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
554 
555 	if (buf[0] != '1')
556 		return -EINVAL;
557 
558 	memset(cp_command, 0x00, sizeof(cp_command));
559 	memset(cp_response, 0x00, sizeof(cp_response));
560 
561         /*
562 	 * The recording command needs to be called with option QID
563 	 * for guests that have previlege classes A or B.
564 	 * Other guests will not recognize the command and we have to
565 	 * issue the same command without the QID parameter.
566 	 */
567 
568 	if (recording_class_AB)
569 		snprintf(cp_command, sizeof(cp_command),
570 			 "RECORDING %s PURGE QID * ",
571 			 priv->recording_name);
572 	else
573 		snprintf(cp_command, sizeof(cp_command),
574 			 "RECORDING %s PURGE ",
575 			 priv->recording_name);
576 
577 	cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
578 
579 	return count;
580 }
581 
582 
583 static DEVICE_ATTR(purge, 0200, NULL, vmlogrdr_purge_store);
584 
585 
586 static ssize_t vmlogrdr_autorecording_store(struct device *dev,
587 					    struct device_attribute *attr,
588 					    const char *buf, size_t count)
589 {
590 	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
591 	ssize_t ret = count;
592 
593 	switch (buf[0]) {
594 	case '0':
595 		priv->autorecording=0;
596 		break;
597 	case '1':
598 		priv->autorecording=1;
599 		break;
600 	default:
601 		ret = -EINVAL;
602 	}
603 	return ret;
604 }
605 
606 
607 static ssize_t vmlogrdr_autorecording_show(struct device *dev,
608 					   struct device_attribute *attr,
609 					   char *buf)
610 {
611 	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
612 	return sprintf(buf, "%u\n", priv->autorecording);
613 }
614 
615 
616 static DEVICE_ATTR(autorecording, 0644, vmlogrdr_autorecording_show,
617 		   vmlogrdr_autorecording_store);
618 
619 
620 static ssize_t vmlogrdr_recording_store(struct device * dev,
621 					struct device_attribute *attr,
622 					const char * buf, size_t count)
623 {
624 	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
625 	ssize_t ret;
626 
627 	switch (buf[0]) {
628 	case '0':
629 		ret = vmlogrdr_recording(priv,0,0);
630 		break;
631 	case '1':
632 		ret = vmlogrdr_recording(priv,1,0);
633 		break;
634 	default:
635 		ret = -EINVAL;
636 	}
637 	if (ret)
638 		return ret;
639 	else
640 		return count;
641 
642 }
643 
644 
645 static DEVICE_ATTR(recording, 0200, NULL, vmlogrdr_recording_store);
646 
647 
648 static ssize_t vmlogrdr_recording_status_show(struct device_driver *driver,
649 					      char *buf)
650 {
651 
652 	static const char cp_command[] = "QUERY RECORDING ";
653 	int len;
654 
655 	cpcmd(cp_command, buf, 4096, NULL);
656 	len = strlen(buf);
657 	return len;
658 }
659 
660 
661 static DRIVER_ATTR(recording_status, 0444, vmlogrdr_recording_status_show,
662 		   NULL);
663 
664 static struct attribute *vmlogrdr_attrs[] = {
665 	&dev_attr_autopurge.attr,
666 	&dev_attr_purge.attr,
667 	&dev_attr_autorecording.attr,
668 	&dev_attr_recording.attr,
669 	NULL,
670 };
671 
672 static int vmlogrdr_pm_prepare(struct device *dev)
673 {
674 	int rc;
675 	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
676 
677 	rc = 0;
678 	if (priv) {
679 		spin_lock_bh(&priv->priv_lock);
680 		if (priv->dev_in_use)
681 			rc = -EBUSY;
682 		spin_unlock_bh(&priv->priv_lock);
683 	}
684 	if (rc)
685 		pr_err("vmlogrdr: device %s is busy. Refuse to suspend.\n",
686 		       dev_name(dev));
687 	return rc;
688 }
689 
690 
691 static const struct dev_pm_ops vmlogrdr_pm_ops = {
692 	.prepare = vmlogrdr_pm_prepare,
693 };
694 
695 static struct attribute_group vmlogrdr_attr_group = {
696 	.attrs = vmlogrdr_attrs,
697 };
698 
699 static struct class *vmlogrdr_class;
700 static struct device_driver vmlogrdr_driver = {
701 	.name = "vmlogrdr",
702 	.bus  = &iucv_bus,
703 	.pm = &vmlogrdr_pm_ops,
704 };
705 
706 
707 static int vmlogrdr_register_driver(void)
708 {
709 	int ret;
710 
711 	/* Register with iucv driver */
712 	ret = iucv_register(&vmlogrdr_iucv_handler, 1);
713 	if (ret)
714 		goto out;
715 
716 	ret = driver_register(&vmlogrdr_driver);
717 	if (ret)
718 		goto out_iucv;
719 
720 	ret = driver_create_file(&vmlogrdr_driver,
721 				 &driver_attr_recording_status);
722 	if (ret)
723 		goto out_driver;
724 
725 	vmlogrdr_class = class_create(THIS_MODULE, "vmlogrdr");
726 	if (IS_ERR(vmlogrdr_class)) {
727 		ret = PTR_ERR(vmlogrdr_class);
728 		vmlogrdr_class = NULL;
729 		goto out_attr;
730 	}
731 	return 0;
732 
733 out_attr:
734 	driver_remove_file(&vmlogrdr_driver, &driver_attr_recording_status);
735 out_driver:
736 	driver_unregister(&vmlogrdr_driver);
737 out_iucv:
738 	iucv_unregister(&vmlogrdr_iucv_handler, 1);
739 out:
740 	return ret;
741 }
742 
743 
744 static void vmlogrdr_unregister_driver(void)
745 {
746 	class_destroy(vmlogrdr_class);
747 	vmlogrdr_class = NULL;
748 	driver_remove_file(&vmlogrdr_driver, &driver_attr_recording_status);
749 	driver_unregister(&vmlogrdr_driver);
750 	iucv_unregister(&vmlogrdr_iucv_handler, 1);
751 }
752 
753 
754 static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
755 {
756 	struct device *dev;
757 	int ret;
758 
759 	dev = kzalloc(sizeof(struct device), GFP_KERNEL);
760 	if (dev) {
761 		dev_set_name(dev, priv->internal_name);
762 		dev->bus = &iucv_bus;
763 		dev->parent = iucv_root;
764 		dev->driver = &vmlogrdr_driver;
765 		dev_set_drvdata(dev, priv);
766 		/*
767 		 * The release function could be called after the
768 		 * module has been unloaded. It's _only_ task is to
769 		 * free the struct. Therefore, we specify kfree()
770 		 * directly here. (Probably a little bit obfuscating
771 		 * but legitime ...).
772 		 */
773 		dev->release = (void (*)(struct device *))kfree;
774 	} else
775 		return -ENOMEM;
776 	ret = device_register(dev);
777 	if (ret) {
778 		put_device(dev);
779 		return ret;
780 	}
781 
782 	ret = sysfs_create_group(&dev->kobj, &vmlogrdr_attr_group);
783 	if (ret) {
784 		device_unregister(dev);
785 		return ret;
786 	}
787 	priv->class_device = device_create(vmlogrdr_class, dev,
788 					   MKDEV(vmlogrdr_major,
789 						 priv->minor_num),
790 					   priv, "%s", dev_name(dev));
791 	if (IS_ERR(priv->class_device)) {
792 		ret = PTR_ERR(priv->class_device);
793 		priv->class_device=NULL;
794 		sysfs_remove_group(&dev->kobj, &vmlogrdr_attr_group);
795 		device_unregister(dev);
796 		return ret;
797 	}
798 	priv->device = dev;
799 	return 0;
800 }
801 
802 
803 static int vmlogrdr_unregister_device(struct vmlogrdr_priv_t *priv)
804 {
805 	device_destroy(vmlogrdr_class, MKDEV(vmlogrdr_major, priv->minor_num));
806 	if (priv->device != NULL) {
807 		sysfs_remove_group(&priv->device->kobj, &vmlogrdr_attr_group);
808 		device_unregister(priv->device);
809 		priv->device=NULL;
810 	}
811 	return 0;
812 }
813 
814 
815 static int vmlogrdr_register_cdev(dev_t dev)
816 {
817 	int rc = 0;
818 	vmlogrdr_cdev = cdev_alloc();
819 	if (!vmlogrdr_cdev) {
820 		return -ENOMEM;
821 	}
822 	vmlogrdr_cdev->owner = THIS_MODULE;
823 	vmlogrdr_cdev->ops = &vmlogrdr_fops;
824 	vmlogrdr_cdev->dev = dev;
825 	rc = cdev_add(vmlogrdr_cdev, vmlogrdr_cdev->dev, MAXMINOR);
826 	if (!rc)
827 		return 0;
828 
829 	// cleanup: cdev is not fully registered, no cdev_del here!
830 	kobject_put(&vmlogrdr_cdev->kobj);
831 	vmlogrdr_cdev=NULL;
832 	return rc;
833 }
834 
835 
836 static void vmlogrdr_cleanup(void)
837 {
838         int i;
839 
840 	if (vmlogrdr_cdev) {
841 		cdev_del(vmlogrdr_cdev);
842 		vmlogrdr_cdev=NULL;
843 	}
844 	for (i=0; i < MAXMINOR; ++i ) {
845 		vmlogrdr_unregister_device(&sys_ser[i]);
846 		free_page((unsigned long)sys_ser[i].buffer);
847 	}
848 	vmlogrdr_unregister_driver();
849 	if (vmlogrdr_major) {
850 		unregister_chrdev_region(MKDEV(vmlogrdr_major, 0), MAXMINOR);
851 		vmlogrdr_major=0;
852 	}
853 }
854 
855 
856 static int __init vmlogrdr_init(void)
857 {
858 	int rc;
859 	int i;
860 	dev_t dev;
861 
862 	if (! MACHINE_IS_VM) {
863 		pr_err("not running under VM, driver not loaded.\n");
864 		return -ENODEV;
865 	}
866 
867         recording_class_AB = vmlogrdr_get_recording_class_AB();
868 
869 	rc = alloc_chrdev_region(&dev, 0, MAXMINOR, "vmlogrdr");
870 	if (rc)
871 		return rc;
872 	vmlogrdr_major = MAJOR(dev);
873 
874 	rc=vmlogrdr_register_driver();
875 	if (rc)
876 		goto cleanup;
877 
878 	for (i=0; i < MAXMINOR; ++i ) {
879 		sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL);
880 		if (!sys_ser[i].buffer) {
881 			rc = -ENOMEM;
882 			break;
883 		}
884 		sys_ser[i].current_position = sys_ser[i].buffer;
885 		rc=vmlogrdr_register_device(&sys_ser[i]);
886 		if (rc)
887 			break;
888 	}
889 	if (rc)
890 		goto cleanup;
891 
892 	rc = vmlogrdr_register_cdev(dev);
893 	if (rc)
894 		goto cleanup;
895 	return 0;
896 
897 cleanup:
898 	vmlogrdr_cleanup();
899 	return rc;
900 }
901 
902 
903 static void __exit vmlogrdr_exit(void)
904 {
905 	vmlogrdr_cleanup();
906 	return;
907 }
908 
909 
910 module_init(vmlogrdr_init);
911 module_exit(vmlogrdr_exit);
912