xref: /openbmc/linux/drivers/s390/char/vmlogrdr.c (revision 6ee73861)
1 /*
2  * drivers/s390/char/vmlogrdr.c
3  *	character device driver for reading z/VM system service records
4  *
5  *
6  *	Copyright IBM Corp. 2004, 2009
7  *	character device driver for reading z/VM system service records,
8  *	Version 1.0
9  *	Author(s): Xenia Tkatschow <xenia@us.ibm.com>
10  *		   Stefan Weinhuber <wein@de.ibm.com>
11  *
12  */
13 
14 #define KMSG_COMPONENT "vmlogrdr"
15 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16 
17 #include <linux/module.h>
18 #include <linux/init.h>
19 #include <linux/errno.h>
20 #include <linux/types.h>
21 #include <linux/interrupt.h>
22 #include <linux/spinlock.h>
23 #include <asm/atomic.h>
24 #include <asm/uaccess.h>
25 #include <asm/cpcmd.h>
26 #include <asm/debug.h>
27 #include <asm/ebcdic.h>
28 #include <net/iucv/iucv.h>
29 #include <linux/kmod.h>
30 #include <linux/cdev.h>
31 #include <linux/device.h>
32 #include <linux/smp_lock.h>
33 #include <linux/string.h>
34 
35 MODULE_AUTHOR
36 	("(C) 2004 IBM Corporation by Xenia Tkatschow (xenia@us.ibm.com)\n"
37 	 "                            Stefan Weinhuber (wein@de.ibm.com)");
38 MODULE_DESCRIPTION ("Character device driver for reading z/VM "
39 		    "system service records.");
40 MODULE_LICENSE("GPL");
41 
42 
43 /*
44  * The size of the buffer for iucv data transfer is one page,
45  * but in addition to the data we read from iucv we also
46  * place an integer and some characters into that buffer,
47  * so the maximum size for record data is a little less then
48  * one page.
49  */
50 #define NET_BUFFER_SIZE	(PAGE_SIZE - sizeof(int) - sizeof(FENCE))
51 
52 /*
53  * The elements that are concurrently accessed by bottom halves are
54  * connection_established, iucv_path_severed, local_interrupt_buffer
55  * and receive_ready. The first three can be protected by
56  * priv_lock.  receive_ready is atomic, so it can be incremented and
57  * decremented without holding a lock.
58  * The variable dev_in_use needs to be protected by the lock, since
59  * it's a flag used by open to make sure that the device is opened only
60  * by one user at the same time.
61  */
62 struct vmlogrdr_priv_t {
63 	char system_service[8];
64 	char internal_name[8];
65 	char recording_name[8];
66 	struct iucv_path *path;
67 	int connection_established;
68 	int iucv_path_severed;
69 	struct iucv_message local_interrupt_buffer;
70 	atomic_t receive_ready;
71 	int minor_num;
72 	char * buffer;
73 	char * current_position;
74 	int remaining;
75 	ulong residual_length;
76 	int buffer_free;
77 	int dev_in_use; /* 1: already opened, 0: not opened*/
78 	spinlock_t priv_lock;
79 	struct device  *device;
80 	struct device  *class_device;
81 	int autorecording;
82 	int autopurge;
83 };
84 
85 
86 /*
87  * File operation structure for vmlogrdr devices
88  */
89 static int vmlogrdr_open(struct inode *, struct file *);
90 static int vmlogrdr_release(struct inode *, struct file *);
91 static ssize_t vmlogrdr_read (struct file *filp, char __user *data,
92 			      size_t count, loff_t * ppos);
93 
94 static const struct file_operations vmlogrdr_fops = {
95 	.owner   = THIS_MODULE,
96 	.open    = vmlogrdr_open,
97 	.release = vmlogrdr_release,
98 	.read    = vmlogrdr_read,
99 };
100 
101 
102 static void vmlogrdr_iucv_path_complete(struct iucv_path *, u8 ipuser[16]);
103 static void vmlogrdr_iucv_path_severed(struct iucv_path *, u8 ipuser[16]);
104 static void vmlogrdr_iucv_message_pending(struct iucv_path *,
105 					  struct iucv_message *);
106 
107 
108 static struct iucv_handler vmlogrdr_iucv_handler = {
109 	.path_complete	 = vmlogrdr_iucv_path_complete,
110 	.path_severed	 = vmlogrdr_iucv_path_severed,
111 	.message_pending = vmlogrdr_iucv_message_pending,
112 };
113 
114 
115 static DECLARE_WAIT_QUEUE_HEAD(conn_wait_queue);
116 static DECLARE_WAIT_QUEUE_HEAD(read_wait_queue);
117 
118 /*
119  * pointer to system service private structure
120  * minor number 0 --> logrec
121  * minor number 1 --> account
122  * minor number 2 --> symptom
123  */
124 
125 static struct vmlogrdr_priv_t sys_ser[] = {
126 	{ .system_service = "*LOGREC ",
127 	  .internal_name  = "logrec",
128 	  .recording_name = "EREP",
129 	  .minor_num      = 0,
130 	  .buffer_free    = 1,
131 	  .priv_lock	  = __SPIN_LOCK_UNLOCKED(sys_ser[0].priv_lock),
132 	  .autorecording  = 1,
133 	  .autopurge      = 1,
134 	},
135 	{ .system_service = "*ACCOUNT",
136 	  .internal_name  = "account",
137 	  .recording_name = "ACCOUNT",
138 	  .minor_num      = 1,
139 	  .buffer_free    = 1,
140 	  .priv_lock	  = __SPIN_LOCK_UNLOCKED(sys_ser[1].priv_lock),
141 	  .autorecording  = 1,
142 	  .autopurge      = 1,
143 	},
144 	{ .system_service = "*SYMPTOM",
145 	  .internal_name  = "symptom",
146 	  .recording_name = "SYMPTOM",
147 	  .minor_num      = 2,
148 	  .buffer_free    = 1,
149 	  .priv_lock	  = __SPIN_LOCK_UNLOCKED(sys_ser[2].priv_lock),
150 	  .autorecording  = 1,
151 	  .autopurge      = 1,
152 	}
153 };
154 
155 #define MAXMINOR  (sizeof(sys_ser)/sizeof(struct vmlogrdr_priv_t))
156 
157 static char FENCE[] = {"EOR"};
158 static int vmlogrdr_major = 0;
159 static struct cdev  *vmlogrdr_cdev = NULL;
160 static int recording_class_AB;
161 
162 
163 static void vmlogrdr_iucv_path_complete(struct iucv_path *path, u8 ipuser[16])
164 {
165 	struct vmlogrdr_priv_t * logptr = path->private;
166 
167 	spin_lock(&logptr->priv_lock);
168 	logptr->connection_established = 1;
169 	spin_unlock(&logptr->priv_lock);
170 	wake_up(&conn_wait_queue);
171 }
172 
173 
174 static void vmlogrdr_iucv_path_severed(struct iucv_path *path, u8 ipuser[16])
175 {
176 	struct vmlogrdr_priv_t * logptr = path->private;
177 	u8 reason = (u8) ipuser[8];
178 
179 	pr_err("vmlogrdr: connection severed with reason %i\n", reason);
180 
181 	iucv_path_sever(path, NULL);
182 	kfree(path);
183 	logptr->path = NULL;
184 
185 	spin_lock(&logptr->priv_lock);
186 	logptr->connection_established = 0;
187 	logptr->iucv_path_severed = 1;
188 	spin_unlock(&logptr->priv_lock);
189 
190 	wake_up(&conn_wait_queue);
191 	/* just in case we're sleeping waiting for a record */
192 	wake_up_interruptible(&read_wait_queue);
193 }
194 
195 
196 static void vmlogrdr_iucv_message_pending(struct iucv_path *path,
197 					  struct iucv_message *msg)
198 {
199 	struct vmlogrdr_priv_t * logptr = path->private;
200 
201 	/*
202 	 * This function is the bottom half so it should be quick.
203 	 * Copy the external interrupt data into our local eib and increment
204 	 * the usage count
205 	 */
206 	spin_lock(&logptr->priv_lock);
207 	memcpy(&logptr->local_interrupt_buffer, msg, sizeof(*msg));
208 	atomic_inc(&logptr->receive_ready);
209 	spin_unlock(&logptr->priv_lock);
210 	wake_up_interruptible(&read_wait_queue);
211 }
212 
213 
214 static int vmlogrdr_get_recording_class_AB(void)
215 {
216 	char cp_command[]="QUERY COMMAND RECORDING ";
217 	char cp_response[80];
218 	char *tail;
219 	int len,i;
220 
221 	cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
222 	len = strnlen(cp_response,sizeof(cp_response));
223 	// now the parsing
224 	tail=strnchr(cp_response,len,'=');
225 	if (!tail)
226 		return 0;
227 	tail++;
228 	if (!strncmp("ANY",tail,3))
229 		return 1;
230 	if (!strncmp("NONE",tail,4))
231 		return 0;
232 	/*
233 	 * expect comma separated list of classes here, if one of them
234 	 * is A or B return 1 otherwise 0
235 	 */
236         for (i=tail-cp_response; i<len; i++)
237 		if ( cp_response[i]=='A' || cp_response[i]=='B' )
238 			return 1;
239 	return 0;
240 }
241 
242 
243 static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr,
244 			      int action, int purge)
245 {
246 
247 	char cp_command[80];
248 	char cp_response[160];
249 	char *onoff, *qid_string;
250 
251 	memset(cp_command, 0x00, sizeof(cp_command));
252 	memset(cp_response, 0x00, sizeof(cp_response));
253 
254         onoff = ((action == 1) ? "ON" : "OFF");
255 	qid_string = ((recording_class_AB == 1) ? " QID * " : "");
256 
257         /*
258 	 * The recording commands needs to be called with option QID
259 	 * for guests that have previlege classes A or B.
260 	 * Purging has to be done as separate step, because recording
261 	 * can't be switched on as long as records are on the queue.
262 	 * Doing both at the same time doesn't work.
263 	 */
264 
265 	if (purge) {
266 		snprintf(cp_command, sizeof(cp_command),
267 			 "RECORDING %s PURGE %s",
268 			 logptr->recording_name,
269 			 qid_string);
270 
271 		cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
272 	}
273 
274 	memset(cp_command, 0x00, sizeof(cp_command));
275 	memset(cp_response, 0x00, sizeof(cp_response));
276 	snprintf(cp_command, sizeof(cp_command), "RECORDING %s %s %s",
277 		logptr->recording_name,
278 		onoff,
279 		qid_string);
280 
281 	cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
282 	/* The recording command will usually answer with 'Command complete'
283 	 * on success, but when the specific service was never connected
284 	 * before then there might be an additional informational message
285 	 * 'HCPCRC8072I Recording entry not found' before the
286          * 'Command complete'. So I use strstr rather then the strncmp.
287 	 */
288 	if (strstr(cp_response,"Command complete"))
289 		return 0;
290 	else
291 		return -EIO;
292 
293 }
294 
295 
296 static int vmlogrdr_open (struct inode *inode, struct file *filp)
297 {
298 	int dev_num = 0;
299 	struct vmlogrdr_priv_t * logptr = NULL;
300 	int connect_rc = 0;
301 	int ret;
302 
303 	dev_num = iminor(inode);
304 	if (dev_num > MAXMINOR)
305 		return -ENODEV;
306 	logptr = &sys_ser[dev_num];
307 
308 	/*
309 	 * only allow for blocking reads to be open
310 	 */
311 	if (filp->f_flags & O_NONBLOCK)
312 		return -ENOSYS;
313 
314 	/* Besure this device hasn't already been opened */
315 	lock_kernel();
316 	spin_lock_bh(&logptr->priv_lock);
317 	if (logptr->dev_in_use)	{
318 		spin_unlock_bh(&logptr->priv_lock);
319 		unlock_kernel();
320 		return -EBUSY;
321 	}
322 	logptr->dev_in_use = 1;
323 	logptr->connection_established = 0;
324 	logptr->iucv_path_severed = 0;
325 	atomic_set(&logptr->receive_ready, 0);
326 	logptr->buffer_free = 1;
327 	spin_unlock_bh(&logptr->priv_lock);
328 
329 	/* set the file options */
330 	filp->private_data = logptr;
331 	filp->f_op = &vmlogrdr_fops;
332 
333 	/* start recording for this service*/
334 	if (logptr->autorecording) {
335 		ret = vmlogrdr_recording(logptr,1,logptr->autopurge);
336 		if (ret)
337 			pr_warning("vmlogrdr: failed to start "
338 				   "recording automatically\n");
339 	}
340 
341 	/* create connection to the system service */
342 	logptr->path = iucv_path_alloc(10, 0, GFP_KERNEL);
343 	if (!logptr->path)
344 		goto out_dev;
345 	connect_rc = iucv_path_connect(logptr->path, &vmlogrdr_iucv_handler,
346 				       logptr->system_service, NULL, NULL,
347 				       logptr);
348 	if (connect_rc) {
349 		pr_err("vmlogrdr: iucv connection to %s "
350 		       "failed with rc %i \n",
351 		       logptr->system_service, connect_rc);
352 		goto out_path;
353 	}
354 
355 	/* We've issued the connect and now we must wait for a
356 	 * ConnectionComplete or ConnectinSevered Interrupt
357 	 * before we can continue to process.
358 	 */
359 	wait_event(conn_wait_queue, (logptr->connection_established)
360 		   || (logptr->iucv_path_severed));
361 	if (logptr->iucv_path_severed)
362 		goto out_record;
363  	ret = nonseekable_open(inode, filp);
364 	unlock_kernel();
365 	return ret;
366 
367 out_record:
368 	if (logptr->autorecording)
369 		vmlogrdr_recording(logptr,0,logptr->autopurge);
370 out_path:
371 	kfree(logptr->path);	/* kfree(NULL) is ok. */
372 	logptr->path = NULL;
373 out_dev:
374 	logptr->dev_in_use = 0;
375 	unlock_kernel();
376 	return -EIO;
377 }
378 
379 
380 static int vmlogrdr_release (struct inode *inode, struct file *filp)
381 {
382 	int ret;
383 
384 	struct vmlogrdr_priv_t * logptr = filp->private_data;
385 
386 	iucv_path_sever(logptr->path, NULL);
387 	kfree(logptr->path);
388 	logptr->path = NULL;
389 	if (logptr->autorecording) {
390 		ret = vmlogrdr_recording(logptr,0,logptr->autopurge);
391 		if (ret)
392 			pr_warning("vmlogrdr: failed to stop "
393 				   "recording automatically\n");
394 	}
395 	logptr->dev_in_use = 0;
396 
397 	return 0;
398 }
399 
400 
401 static int vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv)
402 {
403 	int rc, *temp;
404 	/* we need to keep track of two data sizes here:
405 	 * The number of bytes we need to receive from iucv and
406 	 * the total number of bytes we actually write into the buffer.
407 	 */
408 	int user_data_count, iucv_data_count;
409 	char * buffer;
410 
411 	if (atomic_read(&priv->receive_ready)) {
412 		spin_lock_bh(&priv->priv_lock);
413 		if (priv->residual_length){
414 			/* receive second half of a record */
415 			iucv_data_count = priv->residual_length;
416 			user_data_count = 0;
417 			buffer = priv->buffer;
418 		} else {
419 			/* receive a new record:
420 			 * We need to return the total length of the record
421                          * + size of FENCE in the first 4 bytes of the buffer.
422 		         */
423 			iucv_data_count = priv->local_interrupt_buffer.length;
424 			user_data_count = sizeof(int);
425 			temp = (int*)priv->buffer;
426 			*temp= iucv_data_count + sizeof(FENCE);
427 			buffer = priv->buffer + sizeof(int);
428 		}
429 		/*
430 		 * If the record is bigger than our buffer, we receive only
431 		 * a part of it. We can get the rest later.
432 		 */
433 		if (iucv_data_count > NET_BUFFER_SIZE)
434 			iucv_data_count = NET_BUFFER_SIZE;
435 		rc = iucv_message_receive(priv->path,
436 					  &priv->local_interrupt_buffer,
437 					  0, buffer, iucv_data_count,
438 					  &priv->residual_length);
439 		spin_unlock_bh(&priv->priv_lock);
440 		/* An rc of 5 indicates that the record was bigger than
441 		 * the buffer, which is OK for us. A 9 indicates that the
442 		 * record was purged befor we could receive it.
443 		 */
444 		if (rc == 5)
445 			rc = 0;
446 		if (rc == 9)
447 			atomic_set(&priv->receive_ready, 0);
448 	} else {
449 		rc = 1;
450 	}
451 	if (!rc) {
452 		priv->buffer_free = 0;
453  		user_data_count += iucv_data_count;
454 		priv->current_position = priv->buffer;
455 		if (priv->residual_length == 0){
456 			/* the whole record has been captured,
457 			 * now add the fence */
458 			atomic_dec(&priv->receive_ready);
459 			buffer = priv->buffer + user_data_count;
460 			memcpy(buffer, FENCE, sizeof(FENCE));
461 			user_data_count += sizeof(FENCE);
462 		}
463 		priv->remaining = user_data_count;
464 	}
465 
466 	return rc;
467 }
468 
469 
470 static ssize_t vmlogrdr_read(struct file *filp, char __user *data,
471 			     size_t count, loff_t * ppos)
472 {
473 	int rc;
474 	struct vmlogrdr_priv_t * priv = filp->private_data;
475 
476 	while (priv->buffer_free) {
477 		rc = vmlogrdr_receive_data(priv);
478 		if (rc) {
479 			rc = wait_event_interruptible(read_wait_queue,
480 					atomic_read(&priv->receive_ready));
481 			if (rc)
482 				return rc;
483 		}
484 	}
485 	/* copy only up to end of record */
486 	if (count > priv->remaining)
487 		count = priv->remaining;
488 
489 	if (copy_to_user(data, priv->current_position, count))
490 		return -EFAULT;
491 
492 	*ppos += count;
493 	priv->current_position += count;
494 	priv->remaining -= count;
495 
496 	/* if all data has been transferred, set buffer free */
497 	if (priv->remaining == 0)
498 		priv->buffer_free = 1;
499 
500 	return count;
501 }
502 
503 static ssize_t vmlogrdr_autopurge_store(struct device * dev,
504 					struct device_attribute *attr,
505 					const char * buf, size_t count)
506 {
507 	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
508 	ssize_t ret = count;
509 
510 	switch (buf[0]) {
511 	case '0':
512 		priv->autopurge=0;
513 		break;
514 	case '1':
515 		priv->autopurge=1;
516 		break;
517 	default:
518 		ret = -EINVAL;
519 	}
520 	return ret;
521 }
522 
523 
524 static ssize_t vmlogrdr_autopurge_show(struct device *dev,
525 				       struct device_attribute *attr,
526 				       char *buf)
527 {
528 	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
529 	return sprintf(buf, "%u\n", priv->autopurge);
530 }
531 
532 
533 static DEVICE_ATTR(autopurge, 0644, vmlogrdr_autopurge_show,
534 		   vmlogrdr_autopurge_store);
535 
536 
537 static ssize_t vmlogrdr_purge_store(struct device * dev,
538 				    struct device_attribute *attr,
539 				    const char * buf, size_t count)
540 {
541 
542 	char cp_command[80];
543 	char cp_response[80];
544 	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
545 
546 	if (buf[0] != '1')
547 		return -EINVAL;
548 
549 	memset(cp_command, 0x00, sizeof(cp_command));
550 	memset(cp_response, 0x00, sizeof(cp_response));
551 
552         /*
553 	 * The recording command needs to be called with option QID
554 	 * for guests that have previlege classes A or B.
555 	 * Other guests will not recognize the command and we have to
556 	 * issue the same command without the QID parameter.
557 	 */
558 
559 	if (recording_class_AB)
560 		snprintf(cp_command, sizeof(cp_command),
561 			 "RECORDING %s PURGE QID * ",
562 			 priv->recording_name);
563 	else
564 		snprintf(cp_command, sizeof(cp_command),
565 			 "RECORDING %s PURGE ",
566 			 priv->recording_name);
567 
568 	cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
569 
570 	return count;
571 }
572 
573 
574 static DEVICE_ATTR(purge, 0200, NULL, vmlogrdr_purge_store);
575 
576 
577 static ssize_t vmlogrdr_autorecording_store(struct device *dev,
578 					    struct device_attribute *attr,
579 					    const char *buf, size_t count)
580 {
581 	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
582 	ssize_t ret = count;
583 
584 	switch (buf[0]) {
585 	case '0':
586 		priv->autorecording=0;
587 		break;
588 	case '1':
589 		priv->autorecording=1;
590 		break;
591 	default:
592 		ret = -EINVAL;
593 	}
594 	return ret;
595 }
596 
597 
598 static ssize_t vmlogrdr_autorecording_show(struct device *dev,
599 					   struct device_attribute *attr,
600 					   char *buf)
601 {
602 	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
603 	return sprintf(buf, "%u\n", priv->autorecording);
604 }
605 
606 
607 static DEVICE_ATTR(autorecording, 0644, vmlogrdr_autorecording_show,
608 		   vmlogrdr_autorecording_store);
609 
610 
611 static ssize_t vmlogrdr_recording_store(struct device * dev,
612 					struct device_attribute *attr,
613 					const char * buf, size_t count)
614 {
615 	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
616 	ssize_t ret;
617 
618 	switch (buf[0]) {
619 	case '0':
620 		ret = vmlogrdr_recording(priv,0,0);
621 		break;
622 	case '1':
623 		ret = vmlogrdr_recording(priv,1,0);
624 		break;
625 	default:
626 		ret = -EINVAL;
627 	}
628 	if (ret)
629 		return ret;
630 	else
631 		return count;
632 
633 }
634 
635 
636 static DEVICE_ATTR(recording, 0200, NULL, vmlogrdr_recording_store);
637 
638 
639 static ssize_t vmlogrdr_recording_status_show(struct device_driver *driver,
640 					      char *buf)
641 {
642 
643 	char cp_command[] = "QUERY RECORDING ";
644 	int len;
645 
646 	cpcmd(cp_command, buf, 4096, NULL);
647 	len = strlen(buf);
648 	return len;
649 }
650 
651 
652 static DRIVER_ATTR(recording_status, 0444, vmlogrdr_recording_status_show,
653 		   NULL);
654 
655 static struct attribute *vmlogrdr_attrs[] = {
656 	&dev_attr_autopurge.attr,
657 	&dev_attr_purge.attr,
658 	&dev_attr_autorecording.attr,
659 	&dev_attr_recording.attr,
660 	NULL,
661 };
662 
663 static int vmlogrdr_pm_prepare(struct device *dev)
664 {
665 	int rc;
666 	struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
667 
668 	rc = 0;
669 	if (priv) {
670 		spin_lock_bh(&priv->priv_lock);
671 		if (priv->dev_in_use)
672 			rc = -EBUSY;
673 		spin_unlock_bh(&priv->priv_lock);
674 	}
675 	if (rc)
676 		pr_err("vmlogrdr: device %s is busy. Refuse to suspend.\n",
677 		       dev_name(dev));
678 	return rc;
679 }
680 
681 
682 static struct dev_pm_ops vmlogrdr_pm_ops = {
683 	.prepare = vmlogrdr_pm_prepare,
684 };
685 
686 static struct attribute_group vmlogrdr_attr_group = {
687 	.attrs = vmlogrdr_attrs,
688 };
689 
690 static struct class *vmlogrdr_class;
691 static struct device_driver vmlogrdr_driver = {
692 	.name = "vmlogrdr",
693 	.bus  = &iucv_bus,
694 	.pm = &vmlogrdr_pm_ops,
695 };
696 
697 
698 static int vmlogrdr_register_driver(void)
699 {
700 	int ret;
701 
702 	/* Register with iucv driver */
703 	ret = iucv_register(&vmlogrdr_iucv_handler, 1);
704 	if (ret)
705 		goto out;
706 
707 	ret = driver_register(&vmlogrdr_driver);
708 	if (ret)
709 		goto out_iucv;
710 
711 	ret = driver_create_file(&vmlogrdr_driver,
712 				 &driver_attr_recording_status);
713 	if (ret)
714 		goto out_driver;
715 
716 	vmlogrdr_class = class_create(THIS_MODULE, "vmlogrdr");
717 	if (IS_ERR(vmlogrdr_class)) {
718 		ret = PTR_ERR(vmlogrdr_class);
719 		vmlogrdr_class = NULL;
720 		goto out_attr;
721 	}
722 	return 0;
723 
724 out_attr:
725 	driver_remove_file(&vmlogrdr_driver, &driver_attr_recording_status);
726 out_driver:
727 	driver_unregister(&vmlogrdr_driver);
728 out_iucv:
729 	iucv_unregister(&vmlogrdr_iucv_handler, 1);
730 out:
731 	return ret;
732 }
733 
734 
735 static void vmlogrdr_unregister_driver(void)
736 {
737 	class_destroy(vmlogrdr_class);
738 	vmlogrdr_class = NULL;
739 	driver_remove_file(&vmlogrdr_driver, &driver_attr_recording_status);
740 	driver_unregister(&vmlogrdr_driver);
741 	iucv_unregister(&vmlogrdr_iucv_handler, 1);
742 }
743 
744 
745 static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
746 {
747 	struct device *dev;
748 	int ret;
749 
750 	dev = kzalloc(sizeof(struct device), GFP_KERNEL);
751 	if (dev) {
752 		dev_set_name(dev, priv->internal_name);
753 		dev->bus = &iucv_bus;
754 		dev->parent = iucv_root;
755 		dev->driver = &vmlogrdr_driver;
756 		dev_set_drvdata(dev, priv);
757 		/*
758 		 * The release function could be called after the
759 		 * module has been unloaded. It's _only_ task is to
760 		 * free the struct. Therefore, we specify kfree()
761 		 * directly here. (Probably a little bit obfuscating
762 		 * but legitime ...).
763 		 */
764 		dev->release = (void (*)(struct device *))kfree;
765 	} else
766 		return -ENOMEM;
767 	ret = device_register(dev);
768 	if (ret) {
769 		put_device(dev);
770 		return ret;
771 	}
772 
773 	ret = sysfs_create_group(&dev->kobj, &vmlogrdr_attr_group);
774 	if (ret) {
775 		device_unregister(dev);
776 		return ret;
777 	}
778 	priv->class_device = device_create(vmlogrdr_class, dev,
779 					   MKDEV(vmlogrdr_major,
780 						 priv->minor_num),
781 					   priv, "%s", dev_name(dev));
782 	if (IS_ERR(priv->class_device)) {
783 		ret = PTR_ERR(priv->class_device);
784 		priv->class_device=NULL;
785 		sysfs_remove_group(&dev->kobj, &vmlogrdr_attr_group);
786 		device_unregister(dev);
787 		return ret;
788 	}
789 	priv->device = dev;
790 	return 0;
791 }
792 
793 
794 static int vmlogrdr_unregister_device(struct vmlogrdr_priv_t *priv)
795 {
796 	device_destroy(vmlogrdr_class, MKDEV(vmlogrdr_major, priv->minor_num));
797 	if (priv->device != NULL) {
798 		sysfs_remove_group(&priv->device->kobj, &vmlogrdr_attr_group);
799 		device_unregister(priv->device);
800 		priv->device=NULL;
801 	}
802 	return 0;
803 }
804 
805 
806 static int vmlogrdr_register_cdev(dev_t dev)
807 {
808 	int rc = 0;
809 	vmlogrdr_cdev = cdev_alloc();
810 	if (!vmlogrdr_cdev) {
811 		return -ENOMEM;
812 	}
813 	vmlogrdr_cdev->owner = THIS_MODULE;
814 	vmlogrdr_cdev->ops = &vmlogrdr_fops;
815 	vmlogrdr_cdev->dev = dev;
816 	rc = cdev_add(vmlogrdr_cdev, vmlogrdr_cdev->dev, MAXMINOR);
817 	if (!rc)
818 		return 0;
819 
820 	// cleanup: cdev is not fully registered, no cdev_del here!
821 	kobject_put(&vmlogrdr_cdev->kobj);
822 	vmlogrdr_cdev=NULL;
823 	return rc;
824 }
825 
826 
827 static void vmlogrdr_cleanup(void)
828 {
829         int i;
830 
831 	if (vmlogrdr_cdev) {
832 		cdev_del(vmlogrdr_cdev);
833 		vmlogrdr_cdev=NULL;
834 	}
835 	for (i=0; i < MAXMINOR; ++i ) {
836 		vmlogrdr_unregister_device(&sys_ser[i]);
837 		free_page((unsigned long)sys_ser[i].buffer);
838 	}
839 	vmlogrdr_unregister_driver();
840 	if (vmlogrdr_major) {
841 		unregister_chrdev_region(MKDEV(vmlogrdr_major, 0), MAXMINOR);
842 		vmlogrdr_major=0;
843 	}
844 }
845 
846 
847 static int __init vmlogrdr_init(void)
848 {
849 	int rc;
850 	int i;
851 	dev_t dev;
852 
853 	if (! MACHINE_IS_VM) {
854 		pr_err("not running under VM, driver not loaded.\n");
855 		return -ENODEV;
856 	}
857 
858         recording_class_AB = vmlogrdr_get_recording_class_AB();
859 
860 	rc = alloc_chrdev_region(&dev, 0, MAXMINOR, "vmlogrdr");
861 	if (rc)
862 		return rc;
863 	vmlogrdr_major = MAJOR(dev);
864 
865 	rc=vmlogrdr_register_driver();
866 	if (rc)
867 		goto cleanup;
868 
869 	for (i=0; i < MAXMINOR; ++i ) {
870 		sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL);
871 		if (!sys_ser[i].buffer) {
872 			rc = -ENOMEM;
873 			break;
874 		}
875 		sys_ser[i].current_position = sys_ser[i].buffer;
876 		rc=vmlogrdr_register_device(&sys_ser[i]);
877 		if (rc)
878 			break;
879 	}
880 	if (rc)
881 		goto cleanup;
882 
883 	rc = vmlogrdr_register_cdev(dev);
884 	if (rc)
885 		goto cleanup;
886 	return 0;
887 
888 cleanup:
889 	vmlogrdr_cleanup();
890 	return rc;
891 }
892 
893 
894 static void __exit vmlogrdr_exit(void)
895 {
896 	vmlogrdr_cleanup();
897 	return;
898 }
899 
900 
901 module_init(vmlogrdr_init);
902 module_exit(vmlogrdr_exit);
903