xref: /openbmc/linux/drivers/s390/net/netiucv.c (revision b34e08d5)
1 /*
2  * IUCV network driver
3  *
4  * Copyright IBM Corp. 2001, 2009
5  *
6  * Author(s):
7  *	Original netiucv driver:
8  *		Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
9  *	Sysfs integration and all bugs therein:
10  *		Cornelia Huck (cornelia.huck@de.ibm.com)
11  *	PM functions:
12  *		Ursula Braun (ursula.braun@de.ibm.com)
13  *
14  * Documentation used:
15  *  the source of the original IUCV driver by:
16  *    Stefan Hegewald <hegewald@de.ibm.com>
17  *    Hartmut Penner <hpenner@de.ibm.com>
18  *    Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
19  *    Martin Schwidefsky (schwidefsky@de.ibm.com)
20  *    Alan Altmark (Alan_Altmark@us.ibm.com)  Sept. 2000
21  *
22  * This program is free software; you can redistribute it and/or modify
23  * it under the terms of the GNU General Public License as published by
24  * the Free Software Foundation; either version 2, or (at your option)
25  * any later version.
26  *
27  * This program is distributed in the hope that it will be useful,
28  * but WITHOUT ANY WARRANTY; without even the implied warranty of
29  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
30  * GNU General Public License for more details.
31  *
32  * You should have received a copy of the GNU General Public License
33  * along with this program; if not, write to the Free Software
34  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
35  *
36  */
37 
38 #define KMSG_COMPONENT "netiucv"
39 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
40 
41 #undef DEBUG
42 
43 #include <linux/module.h>
44 #include <linux/init.h>
45 #include <linux/kernel.h>
46 #include <linux/slab.h>
47 #include <linux/errno.h>
48 #include <linux/types.h>
49 #include <linux/interrupt.h>
50 #include <linux/timer.h>
51 #include <linux/bitops.h>
52 
53 #include <linux/signal.h>
54 #include <linux/string.h>
55 #include <linux/device.h>
56 
57 #include <linux/ip.h>
58 #include <linux/if_arp.h>
59 #include <linux/tcp.h>
60 #include <linux/skbuff.h>
61 #include <linux/ctype.h>
62 #include <net/dst.h>
63 
64 #include <asm/io.h>
65 #include <asm/uaccess.h>
66 #include <asm/ebcdic.h>
67 
68 #include <net/iucv/iucv.h>
69 #include "fsm.h"
70 
71 MODULE_AUTHOR
72     ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)");
73 MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
74 
75 /**
76  * Debug Facility stuff
77  */
78 #define IUCV_DBF_SETUP_NAME "iucv_setup"
79 #define IUCV_DBF_SETUP_LEN 64
80 #define IUCV_DBF_SETUP_PAGES 2
81 #define IUCV_DBF_SETUP_NR_AREAS 1
82 #define IUCV_DBF_SETUP_LEVEL 3
83 
84 #define IUCV_DBF_DATA_NAME "iucv_data"
85 #define IUCV_DBF_DATA_LEN 128
86 #define IUCV_DBF_DATA_PAGES 2
87 #define IUCV_DBF_DATA_NR_AREAS 1
88 #define IUCV_DBF_DATA_LEVEL 2
89 
90 #define IUCV_DBF_TRACE_NAME "iucv_trace"
91 #define IUCV_DBF_TRACE_LEN 16
92 #define IUCV_DBF_TRACE_PAGES 4
93 #define IUCV_DBF_TRACE_NR_AREAS 1
94 #define IUCV_DBF_TRACE_LEVEL 3
95 
96 #define IUCV_DBF_TEXT(name,level,text) \
97 	do { \
98 		debug_text_event(iucv_dbf_##name,level,text); \
99 	} while (0)
100 
101 #define IUCV_DBF_HEX(name,level,addr,len) \
102 	do { \
103 		debug_event(iucv_dbf_##name,level,(void*)(addr),len); \
104 	} while (0)
105 
106 DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf);
107 
108 #define IUCV_DBF_TEXT_(name, level, text...) \
109 	do { \
110 		if (debug_level_enabled(iucv_dbf_##name, level)) { \
111 			char* __buf = get_cpu_var(iucv_dbf_txt_buf); \
112 			sprintf(__buf, text); \
113 			debug_text_event(iucv_dbf_##name, level, __buf); \
114 			put_cpu_var(iucv_dbf_txt_buf); \
115 		} \
116 	} while (0)
117 
118 #define IUCV_DBF_SPRINTF(name,level,text...) \
119 	do { \
120 		debug_sprintf_event(iucv_dbf_trace, level, ##text ); \
121 		debug_sprintf_event(iucv_dbf_trace, level, text ); \
122 	} while (0)
123 
124 /**
125  * some more debug stuff
126  */
127 #define PRINTK_HEADER " iucv: "       /* for debugging */
128 
129 /* dummy device to make sure netiucv_pm functions are called */
130 static struct device *netiucv_dev;
131 
132 static int netiucv_pm_prepare(struct device *);
133 static void netiucv_pm_complete(struct device *);
134 static int netiucv_pm_freeze(struct device *);
135 static int netiucv_pm_restore_thaw(struct device *);
136 
137 static const struct dev_pm_ops netiucv_pm_ops = {
138 	.prepare = netiucv_pm_prepare,
139 	.complete = netiucv_pm_complete,
140 	.freeze = netiucv_pm_freeze,
141 	.thaw = netiucv_pm_restore_thaw,
142 	.restore = netiucv_pm_restore_thaw,
143 };
144 
145 static struct device_driver netiucv_driver = {
146 	.owner = THIS_MODULE,
147 	.name = "netiucv",
148 	.bus  = &iucv_bus,
149 	.pm = &netiucv_pm_ops,
150 };
151 
152 static int netiucv_callback_connreq(struct iucv_path *,
153 				    u8 ipvmid[8], u8 ipuser[16]);
154 static void netiucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
155 static void netiucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
156 static void netiucv_callback_connsusp(struct iucv_path *, u8 ipuser[16]);
157 static void netiucv_callback_connres(struct iucv_path *, u8 ipuser[16]);
158 static void netiucv_callback_rx(struct iucv_path *, struct iucv_message *);
159 static void netiucv_callback_txdone(struct iucv_path *, struct iucv_message *);
160 
161 static struct iucv_handler netiucv_handler = {
162 	.path_pending	  = netiucv_callback_connreq,
163 	.path_complete	  = netiucv_callback_connack,
164 	.path_severed	  = netiucv_callback_connrej,
165 	.path_quiesced	  = netiucv_callback_connsusp,
166 	.path_resumed	  = netiucv_callback_connres,
167 	.message_pending  = netiucv_callback_rx,
168 	.message_complete = netiucv_callback_txdone
169 };
170 
171 /**
172  * Per connection profiling data
173  */
174 struct connection_profile {
175 	unsigned long maxmulti;
176 	unsigned long maxcqueue;
177 	unsigned long doios_single;
178 	unsigned long doios_multi;
179 	unsigned long txlen;
180 	unsigned long tx_time;
181 	struct timespec send_stamp;
182 	unsigned long tx_pending;
183 	unsigned long tx_max_pending;
184 };
185 
186 /**
187  * Representation of one iucv connection
188  */
189 struct iucv_connection {
190 	struct list_head	  list;
191 	struct iucv_path	  *path;
192 	struct sk_buff            *rx_buff;
193 	struct sk_buff            *tx_buff;
194 	struct sk_buff_head       collect_queue;
195 	struct sk_buff_head	  commit_queue;
196 	spinlock_t                collect_lock;
197 	int                       collect_len;
198 	int                       max_buffsize;
199 	fsm_timer                 timer;
200 	fsm_instance              *fsm;
201 	struct net_device         *netdev;
202 	struct connection_profile prof;
203 	char                      userid[9];
204 	char			  userdata[17];
205 };
206 
207 /**
208  * Linked list of all connection structs.
209  */
210 static LIST_HEAD(iucv_connection_list);
211 static DEFINE_RWLOCK(iucv_connection_rwlock);
212 
213 /**
214  * Representation of event-data for the
215  * connection state machine.
216  */
217 struct iucv_event {
218 	struct iucv_connection *conn;
219 	void                   *data;
220 };
221 
222 /**
223  * Private part of the network device structure
224  */
225 struct netiucv_priv {
226 	struct net_device_stats stats;
227 	unsigned long           tbusy;
228 	fsm_instance            *fsm;
229         struct iucv_connection  *conn;
230 	struct device           *dev;
231 	int			 pm_state;
232 };
233 
234 /**
235  * Link level header for a packet.
236  */
237 struct ll_header {
238 	u16 next;
239 };
240 
241 #define NETIUCV_HDRLEN		 (sizeof(struct ll_header))
242 #define NETIUCV_BUFSIZE_MAX	 65537
243 #define NETIUCV_BUFSIZE_DEFAULT  NETIUCV_BUFSIZE_MAX
244 #define NETIUCV_MTU_MAX          (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN)
245 #define NETIUCV_MTU_DEFAULT      9216
246 #define NETIUCV_QUEUELEN_DEFAULT 50
247 #define NETIUCV_TIMEOUT_5SEC     5000
248 
249 /**
250  * Compatibility macros for busy handling
251  * of network devices.
252  */
253 static inline void netiucv_clear_busy(struct net_device *dev)
254 {
255 	struct netiucv_priv *priv = netdev_priv(dev);
256 	clear_bit(0, &priv->tbusy);
257 	netif_wake_queue(dev);
258 }
259 
260 static inline int netiucv_test_and_set_busy(struct net_device *dev)
261 {
262 	struct netiucv_priv *priv = netdev_priv(dev);
263 	netif_stop_queue(dev);
264 	return test_and_set_bit(0, &priv->tbusy);
265 }
266 
267 static u8 iucvMagic_ascii[16] = {
268 	0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
269 	0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20
270 };
271 
272 static u8 iucvMagic_ebcdic[16] = {
273 	0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
274 	0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
275 };
276 
277 /**
278  * Convert an iucv userId to its printable
279  * form (strip whitespace at end).
280  *
281  * @param An iucv userId
282  *
283  * @returns The printable string (static data!!)
284  */
285 static char *netiucv_printname(char *name, int len)
286 {
287 	static char tmp[17];
288 	char *p = tmp;
289 	memcpy(tmp, name, len);
290 	tmp[len] = '\0';
291 	while (*p && ((p - tmp) < len) && (!isspace(*p)))
292 		p++;
293 	*p = '\0';
294 	return tmp;
295 }
296 
297 static char *netiucv_printuser(struct iucv_connection *conn)
298 {
299 	static char tmp_uid[9];
300 	static char tmp_udat[17];
301 	static char buf[100];
302 
303 	if (memcmp(conn->userdata, iucvMagic_ebcdic, 16)) {
304 		tmp_uid[8] = '\0';
305 		tmp_udat[16] = '\0';
306 		memcpy(tmp_uid, conn->userid, 8);
307 		memcpy(tmp_uid, netiucv_printname(tmp_uid, 8), 8);
308 		memcpy(tmp_udat, conn->userdata, 16);
309 		EBCASC(tmp_udat, 16);
310 		memcpy(tmp_udat, netiucv_printname(tmp_udat, 16), 16);
311 		sprintf(buf, "%s.%s", tmp_uid, tmp_udat);
312 		return buf;
313 	} else
314 		return netiucv_printname(conn->userid, 8);
315 }
316 
317 /**
318  * States of the interface statemachine.
319  */
320 enum dev_states {
321 	DEV_STATE_STOPPED,
322 	DEV_STATE_STARTWAIT,
323 	DEV_STATE_STOPWAIT,
324 	DEV_STATE_RUNNING,
325 	/**
326 	 * MUST be always the last element!!
327 	 */
328 	NR_DEV_STATES
329 };
330 
331 static const char *dev_state_names[] = {
332 	"Stopped",
333 	"StartWait",
334 	"StopWait",
335 	"Running",
336 };
337 
338 /**
339  * Events of the interface statemachine.
340  */
341 enum dev_events {
342 	DEV_EVENT_START,
343 	DEV_EVENT_STOP,
344 	DEV_EVENT_CONUP,
345 	DEV_EVENT_CONDOWN,
346 	/**
347 	 * MUST be always the last element!!
348 	 */
349 	NR_DEV_EVENTS
350 };
351 
352 static const char *dev_event_names[] = {
353 	"Start",
354 	"Stop",
355 	"Connection up",
356 	"Connection down",
357 };
358 
359 /**
360  * Events of the connection statemachine
361  */
362 enum conn_events {
363 	/**
364 	 * Events, representing callbacks from
365 	 * lowlevel iucv layer)
366 	 */
367 	CONN_EVENT_CONN_REQ,
368 	CONN_EVENT_CONN_ACK,
369 	CONN_EVENT_CONN_REJ,
370 	CONN_EVENT_CONN_SUS,
371 	CONN_EVENT_CONN_RES,
372 	CONN_EVENT_RX,
373 	CONN_EVENT_TXDONE,
374 
375 	/**
376 	 * Events, representing errors return codes from
377 	 * calls to lowlevel iucv layer
378 	 */
379 
380 	/**
381 	 * Event, representing timer expiry.
382 	 */
383 	CONN_EVENT_TIMER,
384 
385 	/**
386 	 * Events, representing commands from upper levels.
387 	 */
388 	CONN_EVENT_START,
389 	CONN_EVENT_STOP,
390 
391 	/**
392 	 * MUST be always the last element!!
393 	 */
394 	NR_CONN_EVENTS,
395 };
396 
397 static const char *conn_event_names[] = {
398 	"Remote connection request",
399 	"Remote connection acknowledge",
400 	"Remote connection reject",
401 	"Connection suspended",
402 	"Connection resumed",
403 	"Data received",
404 	"Data sent",
405 
406 	"Timer",
407 
408 	"Start",
409 	"Stop",
410 };
411 
412 /**
413  * States of the connection statemachine.
414  */
415 enum conn_states {
416 	/**
417 	 * Connection not assigned to any device,
418 	 * initial state, invalid
419 	 */
420 	CONN_STATE_INVALID,
421 
422 	/**
423 	 * Userid assigned but not operating
424 	 */
425 	CONN_STATE_STOPPED,
426 
427 	/**
428 	 * Connection registered,
429 	 * no connection request sent yet,
430 	 * no connection request received
431 	 */
432 	CONN_STATE_STARTWAIT,
433 
434 	/**
435 	 * Connection registered and connection request sent,
436 	 * no acknowledge and no connection request received yet.
437 	 */
438 	CONN_STATE_SETUPWAIT,
439 
440 	/**
441 	 * Connection up and running idle
442 	 */
443 	CONN_STATE_IDLE,
444 
445 	/**
446 	 * Data sent, awaiting CONN_EVENT_TXDONE
447 	 */
448 	CONN_STATE_TX,
449 
450 	/**
451 	 * Error during registration.
452 	 */
453 	CONN_STATE_REGERR,
454 
455 	/**
456 	 * Error during registration.
457 	 */
458 	CONN_STATE_CONNERR,
459 
460 	/**
461 	 * MUST be always the last element!!
462 	 */
463 	NR_CONN_STATES,
464 };
465 
466 static const char *conn_state_names[] = {
467 	"Invalid",
468 	"Stopped",
469 	"StartWait",
470 	"SetupWait",
471 	"Idle",
472 	"TX",
473 	"Terminating",
474 	"Registration error",
475 	"Connect error",
476 };
477 
478 
479 /**
480  * Debug Facility Stuff
481  */
482 static debug_info_t *iucv_dbf_setup = NULL;
483 static debug_info_t *iucv_dbf_data = NULL;
484 static debug_info_t *iucv_dbf_trace = NULL;
485 
486 DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf);
487 
488 static void iucv_unregister_dbf_views(void)
489 {
490 	if (iucv_dbf_setup)
491 		debug_unregister(iucv_dbf_setup);
492 	if (iucv_dbf_data)
493 		debug_unregister(iucv_dbf_data);
494 	if (iucv_dbf_trace)
495 		debug_unregister(iucv_dbf_trace);
496 }
497 static int iucv_register_dbf_views(void)
498 {
499 	iucv_dbf_setup = debug_register(IUCV_DBF_SETUP_NAME,
500 					IUCV_DBF_SETUP_PAGES,
501 					IUCV_DBF_SETUP_NR_AREAS,
502 					IUCV_DBF_SETUP_LEN);
503 	iucv_dbf_data = debug_register(IUCV_DBF_DATA_NAME,
504 				       IUCV_DBF_DATA_PAGES,
505 				       IUCV_DBF_DATA_NR_AREAS,
506 				       IUCV_DBF_DATA_LEN);
507 	iucv_dbf_trace = debug_register(IUCV_DBF_TRACE_NAME,
508 					IUCV_DBF_TRACE_PAGES,
509 					IUCV_DBF_TRACE_NR_AREAS,
510 					IUCV_DBF_TRACE_LEN);
511 
512 	if ((iucv_dbf_setup == NULL) || (iucv_dbf_data == NULL) ||
513 	    (iucv_dbf_trace == NULL)) {
514 		iucv_unregister_dbf_views();
515 		return -ENOMEM;
516 	}
517 	debug_register_view(iucv_dbf_setup, &debug_hex_ascii_view);
518 	debug_set_level(iucv_dbf_setup, IUCV_DBF_SETUP_LEVEL);
519 
520 	debug_register_view(iucv_dbf_data, &debug_hex_ascii_view);
521 	debug_set_level(iucv_dbf_data, IUCV_DBF_DATA_LEVEL);
522 
523 	debug_register_view(iucv_dbf_trace, &debug_hex_ascii_view);
524 	debug_set_level(iucv_dbf_trace, IUCV_DBF_TRACE_LEVEL);
525 
526 	return 0;
527 }
528 
529 /*
530  * Callback-wrappers, called from lowlevel iucv layer.
531  */
532 
533 static void netiucv_callback_rx(struct iucv_path *path,
534 				struct iucv_message *msg)
535 {
536 	struct iucv_connection *conn = path->private;
537 	struct iucv_event ev;
538 
539 	ev.conn = conn;
540 	ev.data = msg;
541 	fsm_event(conn->fsm, CONN_EVENT_RX, &ev);
542 }
543 
544 static void netiucv_callback_txdone(struct iucv_path *path,
545 				    struct iucv_message *msg)
546 {
547 	struct iucv_connection *conn = path->private;
548 	struct iucv_event ev;
549 
550 	ev.conn = conn;
551 	ev.data = msg;
552 	fsm_event(conn->fsm, CONN_EVENT_TXDONE, &ev);
553 }
554 
555 static void netiucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
556 {
557 	struct iucv_connection *conn = path->private;
558 
559 	fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, conn);
560 }
561 
562 static int netiucv_callback_connreq(struct iucv_path *path,
563 				    u8 ipvmid[8], u8 ipuser[16])
564 {
565 	struct iucv_connection *conn = path->private;
566 	struct iucv_event ev;
567 	static char tmp_user[9];
568 	static char tmp_udat[17];
569 	int rc;
570 
571 	rc = -EINVAL;
572 	memcpy(tmp_user, netiucv_printname(ipvmid, 8), 8);
573 	memcpy(tmp_udat, ipuser, 16);
574 	EBCASC(tmp_udat, 16);
575 	read_lock_bh(&iucv_connection_rwlock);
576 	list_for_each_entry(conn, &iucv_connection_list, list) {
577 		if (strncmp(ipvmid, conn->userid, 8) ||
578 		    strncmp(ipuser, conn->userdata, 16))
579 			continue;
580 		/* Found a matching connection for this path. */
581 		conn->path = path;
582 		ev.conn = conn;
583 		ev.data = path;
584 		fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev);
585 		rc = 0;
586 	}
587 	IUCV_DBF_TEXT_(setup, 2, "Connection requested for %s.%s\n",
588 		       tmp_user, netiucv_printname(tmp_udat, 16));
589 	read_unlock_bh(&iucv_connection_rwlock);
590 	return rc;
591 }
592 
593 static void netiucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
594 {
595 	struct iucv_connection *conn = path->private;
596 
597 	fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, conn);
598 }
599 
600 static void netiucv_callback_connsusp(struct iucv_path *path, u8 ipuser[16])
601 {
602 	struct iucv_connection *conn = path->private;
603 
604 	fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, conn);
605 }
606 
607 static void netiucv_callback_connres(struct iucv_path *path, u8 ipuser[16])
608 {
609 	struct iucv_connection *conn = path->private;
610 
611 	fsm_event(conn->fsm, CONN_EVENT_CONN_RES, conn);
612 }
613 
614 /**
615  * NOP action for statemachines
616  */
617 static void netiucv_action_nop(fsm_instance *fi, int event, void *arg)
618 {
619 }
620 
621 /*
622  * Actions of the connection statemachine
623  */
624 
625 /**
626  * netiucv_unpack_skb
627  * @conn: The connection where this skb has been received.
628  * @pskb: The received skb.
629  *
630  * Unpack a just received skb and hand it over to upper layers.
631  * Helper function for conn_action_rx.
632  */
633 static void netiucv_unpack_skb(struct iucv_connection *conn,
634 			       struct sk_buff *pskb)
635 {
636 	struct net_device     *dev = conn->netdev;
637 	struct netiucv_priv   *privptr = netdev_priv(dev);
638 	u16 offset = 0;
639 
640 	skb_put(pskb, NETIUCV_HDRLEN);
641 	pskb->dev = dev;
642 	pskb->ip_summed = CHECKSUM_NONE;
643 	pskb->protocol = ntohs(ETH_P_IP);
644 
645 	while (1) {
646 		struct sk_buff *skb;
647 		struct ll_header *header = (struct ll_header *) pskb->data;
648 
649 		if (!header->next)
650 			break;
651 
652 		skb_pull(pskb, NETIUCV_HDRLEN);
653 		header->next -= offset;
654 		offset += header->next;
655 		header->next -= NETIUCV_HDRLEN;
656 		if (skb_tailroom(pskb) < header->next) {
657 			IUCV_DBF_TEXT_(data, 2, "Illegal next field: %d > %d\n",
658 				header->next, skb_tailroom(pskb));
659 			return;
660 		}
661 		skb_put(pskb, header->next);
662 		skb_reset_mac_header(pskb);
663 		skb = dev_alloc_skb(pskb->len);
664 		if (!skb) {
665 			IUCV_DBF_TEXT(data, 2,
666 				"Out of memory in netiucv_unpack_skb\n");
667 			privptr->stats.rx_dropped++;
668 			return;
669 		}
670 		skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len),
671 					  pskb->len);
672 		skb_reset_mac_header(skb);
673 		skb->dev = pskb->dev;
674 		skb->protocol = pskb->protocol;
675 		pskb->ip_summed = CHECKSUM_UNNECESSARY;
676 		privptr->stats.rx_packets++;
677 		privptr->stats.rx_bytes += skb->len;
678 		/*
679 		 * Since receiving is always initiated from a tasklet (in iucv.c),
680 		 * we must use netif_rx_ni() instead of netif_rx()
681 		 */
682 		netif_rx_ni(skb);
683 		skb_pull(pskb, header->next);
684 		skb_put(pskb, NETIUCV_HDRLEN);
685 	}
686 }
687 
688 static void conn_action_rx(fsm_instance *fi, int event, void *arg)
689 {
690 	struct iucv_event *ev = arg;
691 	struct iucv_connection *conn = ev->conn;
692 	struct iucv_message *msg = ev->data;
693 	struct netiucv_priv *privptr = netdev_priv(conn->netdev);
694 	int rc;
695 
696 	IUCV_DBF_TEXT(trace, 4, __func__);
697 
698 	if (!conn->netdev) {
699 		iucv_message_reject(conn->path, msg);
700 		IUCV_DBF_TEXT(data, 2,
701 			      "Received data for unlinked connection\n");
702 		return;
703 	}
704 	if (msg->length > conn->max_buffsize) {
705 		iucv_message_reject(conn->path, msg);
706 		privptr->stats.rx_dropped++;
707 		IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n",
708 			       msg->length, conn->max_buffsize);
709 		return;
710 	}
711 	conn->rx_buff->data = conn->rx_buff->head;
712 	skb_reset_tail_pointer(conn->rx_buff);
713 	conn->rx_buff->len = 0;
714 	rc = iucv_message_receive(conn->path, msg, 0, conn->rx_buff->data,
715 				  msg->length, NULL);
716 	if (rc || msg->length < 5) {
717 		privptr->stats.rx_errors++;
718 		IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc);
719 		return;
720 	}
721 	netiucv_unpack_skb(conn, conn->rx_buff);
722 }
723 
724 static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
725 {
726 	struct iucv_event *ev = arg;
727 	struct iucv_connection *conn = ev->conn;
728 	struct iucv_message *msg = ev->data;
729 	struct iucv_message txmsg;
730 	struct netiucv_priv *privptr = NULL;
731 	u32 single_flag = msg->tag;
732 	u32 txbytes = 0;
733 	u32 txpackets = 0;
734 	u32 stat_maxcq = 0;
735 	struct sk_buff *skb;
736 	unsigned long saveflags;
737 	struct ll_header header;
738 	int rc;
739 
740 	IUCV_DBF_TEXT(trace, 4, __func__);
741 
742 	if (!conn || !conn->netdev) {
743 		IUCV_DBF_TEXT(data, 2,
744 			      "Send confirmation for unlinked connection\n");
745 		return;
746 	}
747 	privptr = netdev_priv(conn->netdev);
748 	conn->prof.tx_pending--;
749 	if (single_flag) {
750 		if ((skb = skb_dequeue(&conn->commit_queue))) {
751 			atomic_dec(&skb->users);
752 			if (privptr) {
753 				privptr->stats.tx_packets++;
754 				privptr->stats.tx_bytes +=
755 					(skb->len - NETIUCV_HDRLEN
756 						  - NETIUCV_HDRLEN);
757 			}
758 			dev_kfree_skb_any(skb);
759 		}
760 	}
761 	conn->tx_buff->data = conn->tx_buff->head;
762 	skb_reset_tail_pointer(conn->tx_buff);
763 	conn->tx_buff->len = 0;
764 	spin_lock_irqsave(&conn->collect_lock, saveflags);
765 	while ((skb = skb_dequeue(&conn->collect_queue))) {
766 		header.next = conn->tx_buff->len + skb->len + NETIUCV_HDRLEN;
767 		memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header,
768 		       NETIUCV_HDRLEN);
769 		skb_copy_from_linear_data(skb,
770 					  skb_put(conn->tx_buff, skb->len),
771 					  skb->len);
772 		txbytes += skb->len;
773 		txpackets++;
774 		stat_maxcq++;
775 		atomic_dec(&skb->users);
776 		dev_kfree_skb_any(skb);
777 	}
778 	if (conn->collect_len > conn->prof.maxmulti)
779 		conn->prof.maxmulti = conn->collect_len;
780 	conn->collect_len = 0;
781 	spin_unlock_irqrestore(&conn->collect_lock, saveflags);
782 	if (conn->tx_buff->len == 0) {
783 		fsm_newstate(fi, CONN_STATE_IDLE);
784 		return;
785 	}
786 
787 	header.next = 0;
788 	memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
789 	conn->prof.send_stamp = current_kernel_time();
790 	txmsg.class = 0;
791 	txmsg.tag = 0;
792 	rc = iucv_message_send(conn->path, &txmsg, 0, 0,
793 			       conn->tx_buff->data, conn->tx_buff->len);
794 	conn->prof.doios_multi++;
795 	conn->prof.txlen += conn->tx_buff->len;
796 	conn->prof.tx_pending++;
797 	if (conn->prof.tx_pending > conn->prof.tx_max_pending)
798 		conn->prof.tx_max_pending = conn->prof.tx_pending;
799 	if (rc) {
800 		conn->prof.tx_pending--;
801 		fsm_newstate(fi, CONN_STATE_IDLE);
802 		if (privptr)
803 			privptr->stats.tx_errors += txpackets;
804 		IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
805 	} else {
806 		if (privptr) {
807 			privptr->stats.tx_packets += txpackets;
808 			privptr->stats.tx_bytes += txbytes;
809 		}
810 		if (stat_maxcq > conn->prof.maxcqueue)
811 			conn->prof.maxcqueue = stat_maxcq;
812 	}
813 }
814 
815 static void conn_action_connaccept(fsm_instance *fi, int event, void *arg)
816 {
817 	struct iucv_event *ev = arg;
818 	struct iucv_connection *conn = ev->conn;
819 	struct iucv_path *path = ev->data;
820 	struct net_device *netdev = conn->netdev;
821 	struct netiucv_priv *privptr = netdev_priv(netdev);
822 	int rc;
823 
824 	IUCV_DBF_TEXT(trace, 3, __func__);
825 
826 	conn->path = path;
827 	path->msglim = NETIUCV_QUEUELEN_DEFAULT;
828 	path->flags = 0;
829 	rc = iucv_path_accept(path, &netiucv_handler, conn->userdata , conn);
830 	if (rc) {
831 		IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc);
832 		return;
833 	}
834 	fsm_newstate(fi, CONN_STATE_IDLE);
835 	netdev->tx_queue_len = conn->path->msglim;
836 	fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
837 }
838 
839 static void conn_action_connreject(fsm_instance *fi, int event, void *arg)
840 {
841 	struct iucv_event *ev = arg;
842 	struct iucv_path *path = ev->data;
843 
844 	IUCV_DBF_TEXT(trace, 3, __func__);
845 	iucv_path_sever(path, NULL);
846 }
847 
848 static void conn_action_connack(fsm_instance *fi, int event, void *arg)
849 {
850 	struct iucv_connection *conn = arg;
851 	struct net_device *netdev = conn->netdev;
852 	struct netiucv_priv *privptr = netdev_priv(netdev);
853 
854 	IUCV_DBF_TEXT(trace, 3, __func__);
855 	fsm_deltimer(&conn->timer);
856 	fsm_newstate(fi, CONN_STATE_IDLE);
857 	netdev->tx_queue_len = conn->path->msglim;
858 	fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
859 }
860 
861 static void conn_action_conntimsev(fsm_instance *fi, int event, void *arg)
862 {
863 	struct iucv_connection *conn = arg;
864 
865 	IUCV_DBF_TEXT(trace, 3, __func__);
866 	fsm_deltimer(&conn->timer);
867 	iucv_path_sever(conn->path, conn->userdata);
868 	fsm_newstate(fi, CONN_STATE_STARTWAIT);
869 }
870 
871 static void conn_action_connsever(fsm_instance *fi, int event, void *arg)
872 {
873 	struct iucv_connection *conn = arg;
874 	struct net_device *netdev = conn->netdev;
875 	struct netiucv_priv *privptr = netdev_priv(netdev);
876 
877 	IUCV_DBF_TEXT(trace, 3, __func__);
878 
879 	fsm_deltimer(&conn->timer);
880 	iucv_path_sever(conn->path, conn->userdata);
881 	dev_info(privptr->dev, "The peer z/VM guest %s has closed the "
882 			       "connection\n", netiucv_printuser(conn));
883 	IUCV_DBF_TEXT(data, 2,
884 		      "conn_action_connsever: Remote dropped connection\n");
885 	fsm_newstate(fi, CONN_STATE_STARTWAIT);
886 	fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
887 }
888 
889 static void conn_action_start(fsm_instance *fi, int event, void *arg)
890 {
891 	struct iucv_connection *conn = arg;
892 	struct net_device *netdev = conn->netdev;
893 	struct netiucv_priv *privptr = netdev_priv(netdev);
894 	int rc;
895 
896 	IUCV_DBF_TEXT(trace, 3, __func__);
897 
898 	fsm_newstate(fi, CONN_STATE_STARTWAIT);
899 
900 	/*
901 	 * We must set the state before calling iucv_connect because the
902 	 * callback handler could be called at any point after the connection
903 	 * request is sent
904 	 */
905 
906 	fsm_newstate(fi, CONN_STATE_SETUPWAIT);
907 	conn->path = iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT, 0, GFP_KERNEL);
908 	IUCV_DBF_TEXT_(setup, 2, "%s: connecting to %s ...\n",
909 		netdev->name, netiucv_printuser(conn));
910 
911 	rc = iucv_path_connect(conn->path, &netiucv_handler, conn->userid,
912 			       NULL, conn->userdata, conn);
913 	switch (rc) {
914 	case 0:
915 		netdev->tx_queue_len = conn->path->msglim;
916 		fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC,
917 			     CONN_EVENT_TIMER, conn);
918 		return;
919 	case 11:
920 		dev_warn(privptr->dev,
921 			"The IUCV device failed to connect to z/VM guest %s\n",
922 			netiucv_printname(conn->userid, 8));
923 		fsm_newstate(fi, CONN_STATE_STARTWAIT);
924 		break;
925 	case 12:
926 		dev_warn(privptr->dev,
927 			"The IUCV device failed to connect to the peer on z/VM"
928 			" guest %s\n", netiucv_printname(conn->userid, 8));
929 		fsm_newstate(fi, CONN_STATE_STARTWAIT);
930 		break;
931 	case 13:
932 		dev_err(privptr->dev,
933 			"Connecting the IUCV device would exceed the maximum"
934 			" number of IUCV connections\n");
935 		fsm_newstate(fi, CONN_STATE_CONNERR);
936 		break;
937 	case 14:
938 		dev_err(privptr->dev,
939 			"z/VM guest %s has too many IUCV connections"
940 			" to connect with the IUCV device\n",
941 			netiucv_printname(conn->userid, 8));
942 		fsm_newstate(fi, CONN_STATE_CONNERR);
943 		break;
944 	case 15:
945 		dev_err(privptr->dev,
946 			"The IUCV device cannot connect to a z/VM guest with no"
947 			" IUCV authorization\n");
948 		fsm_newstate(fi, CONN_STATE_CONNERR);
949 		break;
950 	default:
951 		dev_err(privptr->dev,
952 			"Connecting the IUCV device failed with error %d\n",
953 			rc);
954 		fsm_newstate(fi, CONN_STATE_CONNERR);
955 		break;
956 	}
957 	IUCV_DBF_TEXT_(setup, 5, "iucv_connect rc is %d\n", rc);
958 	kfree(conn->path);
959 	conn->path = NULL;
960 }
961 
962 static void netiucv_purge_skb_queue(struct sk_buff_head *q)
963 {
964 	struct sk_buff *skb;
965 
966 	while ((skb = skb_dequeue(q))) {
967 		atomic_dec(&skb->users);
968 		dev_kfree_skb_any(skb);
969 	}
970 }
971 
972 static void conn_action_stop(fsm_instance *fi, int event, void *arg)
973 {
974 	struct iucv_event *ev = arg;
975 	struct iucv_connection *conn = ev->conn;
976 	struct net_device *netdev = conn->netdev;
977 	struct netiucv_priv *privptr = netdev_priv(netdev);
978 
979 	IUCV_DBF_TEXT(trace, 3, __func__);
980 
981 	fsm_deltimer(&conn->timer);
982 	fsm_newstate(fi, CONN_STATE_STOPPED);
983 	netiucv_purge_skb_queue(&conn->collect_queue);
984 	if (conn->path) {
985 		IUCV_DBF_TEXT(trace, 5, "calling iucv_path_sever\n");
986 		iucv_path_sever(conn->path, conn->userdata);
987 		kfree(conn->path);
988 		conn->path = NULL;
989 	}
990 	netiucv_purge_skb_queue(&conn->commit_queue);
991 	fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
992 }
993 
994 static void conn_action_inval(fsm_instance *fi, int event, void *arg)
995 {
996 	struct iucv_connection *conn = arg;
997 	struct net_device *netdev = conn->netdev;
998 
999 	IUCV_DBF_TEXT_(data, 2, "%s('%s'): conn_action_inval called\n",
1000 		netdev->name, conn->userid);
1001 }
1002 
1003 static const fsm_node conn_fsm[] = {
1004 	{ CONN_STATE_INVALID,   CONN_EVENT_START,    conn_action_inval      },
1005 	{ CONN_STATE_STOPPED,   CONN_EVENT_START,    conn_action_start      },
1006 
1007 	{ CONN_STATE_STOPPED,   CONN_EVENT_STOP,     conn_action_stop       },
1008 	{ CONN_STATE_STARTWAIT, CONN_EVENT_STOP,     conn_action_stop       },
1009 	{ CONN_STATE_SETUPWAIT, CONN_EVENT_STOP,     conn_action_stop       },
1010 	{ CONN_STATE_IDLE,      CONN_EVENT_STOP,     conn_action_stop       },
1011 	{ CONN_STATE_TX,        CONN_EVENT_STOP,     conn_action_stop       },
1012 	{ CONN_STATE_REGERR,    CONN_EVENT_STOP,     conn_action_stop       },
1013 	{ CONN_STATE_CONNERR,   CONN_EVENT_STOP,     conn_action_stop       },
1014 
1015 	{ CONN_STATE_STOPPED,   CONN_EVENT_CONN_REQ, conn_action_connreject },
1016         { CONN_STATE_STARTWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
1017 	{ CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
1018 	{ CONN_STATE_IDLE,      CONN_EVENT_CONN_REQ, conn_action_connreject },
1019 	{ CONN_STATE_TX,        CONN_EVENT_CONN_REQ, conn_action_connreject },
1020 
1021 	{ CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_ACK, conn_action_connack    },
1022 	{ CONN_STATE_SETUPWAIT, CONN_EVENT_TIMER,    conn_action_conntimsev },
1023 
1024 	{ CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REJ, conn_action_connsever  },
1025 	{ CONN_STATE_IDLE,      CONN_EVENT_CONN_REJ, conn_action_connsever  },
1026 	{ CONN_STATE_TX,        CONN_EVENT_CONN_REJ, conn_action_connsever  },
1027 
1028 	{ CONN_STATE_IDLE,      CONN_EVENT_RX,       conn_action_rx         },
1029 	{ CONN_STATE_TX,        CONN_EVENT_RX,       conn_action_rx         },
1030 
1031 	{ CONN_STATE_TX,        CONN_EVENT_TXDONE,   conn_action_txdone     },
1032 	{ CONN_STATE_IDLE,      CONN_EVENT_TXDONE,   conn_action_txdone     },
1033 };
1034 
1035 static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node);
1036 
1037 
1038 /*
1039  * Actions for interface - statemachine.
1040  */
1041 
1042 /**
1043  * dev_action_start
1044  * @fi: An instance of an interface statemachine.
1045  * @event: The event, just happened.
1046  * @arg: Generic pointer, casted from struct net_device * upon call.
1047  *
1048  * Startup connection by sending CONN_EVENT_START to it.
1049  */
1050 static void dev_action_start(fsm_instance *fi, int event, void *arg)
1051 {
1052 	struct net_device   *dev = arg;
1053 	struct netiucv_priv *privptr = netdev_priv(dev);
1054 
1055 	IUCV_DBF_TEXT(trace, 3, __func__);
1056 
1057 	fsm_newstate(fi, DEV_STATE_STARTWAIT);
1058 	fsm_event(privptr->conn->fsm, CONN_EVENT_START, privptr->conn);
1059 }
1060 
1061 /**
1062  * Shutdown connection by sending CONN_EVENT_STOP to it.
1063  *
1064  * @param fi    An instance of an interface statemachine.
1065  * @param event The event, just happened.
1066  * @param arg   Generic pointer, casted from struct net_device * upon call.
1067  */
1068 static void
1069 dev_action_stop(fsm_instance *fi, int event, void *arg)
1070 {
1071 	struct net_device   *dev = arg;
1072 	struct netiucv_priv *privptr = netdev_priv(dev);
1073 	struct iucv_event   ev;
1074 
1075 	IUCV_DBF_TEXT(trace, 3, __func__);
1076 
1077 	ev.conn = privptr->conn;
1078 
1079 	fsm_newstate(fi, DEV_STATE_STOPWAIT);
1080 	fsm_event(privptr->conn->fsm, CONN_EVENT_STOP, &ev);
1081 }
1082 
1083 /**
1084  * Called from connection statemachine
1085  * when a connection is up and running.
1086  *
1087  * @param fi    An instance of an interface statemachine.
1088  * @param event The event, just happened.
1089  * @param arg   Generic pointer, casted from struct net_device * upon call.
1090  */
1091 static void
1092 dev_action_connup(fsm_instance *fi, int event, void *arg)
1093 {
1094 	struct net_device   *dev = arg;
1095 	struct netiucv_priv *privptr = netdev_priv(dev);
1096 
1097 	IUCV_DBF_TEXT(trace, 3, __func__);
1098 
1099 	switch (fsm_getstate(fi)) {
1100 		case DEV_STATE_STARTWAIT:
1101 			fsm_newstate(fi, DEV_STATE_RUNNING);
1102 			dev_info(privptr->dev,
1103 				"The IUCV device has been connected"
1104 				" successfully to %s\n",
1105 				netiucv_printuser(privptr->conn));
1106 			IUCV_DBF_TEXT(setup, 3,
1107 				"connection is up and running\n");
1108 			break;
1109 		case DEV_STATE_STOPWAIT:
1110 			IUCV_DBF_TEXT(data, 2,
1111 				"dev_action_connup: in DEV_STATE_STOPWAIT\n");
1112 			break;
1113 	}
1114 }
1115 
1116 /**
1117  * Called from connection statemachine
1118  * when a connection has been shutdown.
1119  *
1120  * @param fi    An instance of an interface statemachine.
1121  * @param event The event, just happened.
1122  * @param arg   Generic pointer, casted from struct net_device * upon call.
1123  */
1124 static void
1125 dev_action_conndown(fsm_instance *fi, int event, void *arg)
1126 {
1127 	IUCV_DBF_TEXT(trace, 3, __func__);
1128 
1129 	switch (fsm_getstate(fi)) {
1130 		case DEV_STATE_RUNNING:
1131 			fsm_newstate(fi, DEV_STATE_STARTWAIT);
1132 			break;
1133 		case DEV_STATE_STOPWAIT:
1134 			fsm_newstate(fi, DEV_STATE_STOPPED);
1135 			IUCV_DBF_TEXT(setup, 3, "connection is down\n");
1136 			break;
1137 	}
1138 }
1139 
1140 static const fsm_node dev_fsm[] = {
1141 	{ DEV_STATE_STOPPED,    DEV_EVENT_START,   dev_action_start    },
1142 
1143 	{ DEV_STATE_STOPWAIT,   DEV_EVENT_START,   dev_action_start    },
1144 	{ DEV_STATE_STOPWAIT,   DEV_EVENT_CONDOWN, dev_action_conndown },
1145 
1146 	{ DEV_STATE_STARTWAIT,  DEV_EVENT_STOP,    dev_action_stop     },
1147 	{ DEV_STATE_STARTWAIT,  DEV_EVENT_CONUP,   dev_action_connup   },
1148 
1149 	{ DEV_STATE_RUNNING,    DEV_EVENT_STOP,    dev_action_stop     },
1150 	{ DEV_STATE_RUNNING,    DEV_EVENT_CONDOWN, dev_action_conndown },
1151 	{ DEV_STATE_RUNNING,    DEV_EVENT_CONUP,   netiucv_action_nop  },
1152 };
1153 
1154 static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node);
1155 
1156 /**
1157  * Transmit a packet.
1158  * This is a helper function for netiucv_tx().
1159  *
1160  * @param conn Connection to be used for sending.
1161  * @param skb Pointer to struct sk_buff of packet to send.
1162  *            The linklevel header has already been set up
1163  *            by netiucv_tx().
1164  *
1165  * @return 0 on success, -ERRNO on failure. (Never fails.)
1166  */
1167 static int netiucv_transmit_skb(struct iucv_connection *conn,
1168 				struct sk_buff *skb)
1169 {
1170 	struct iucv_message msg;
1171 	unsigned long saveflags;
1172 	struct ll_header header;
1173 	int rc;
1174 
1175 	if (fsm_getstate(conn->fsm) != CONN_STATE_IDLE) {
1176 		int l = skb->len + NETIUCV_HDRLEN;
1177 
1178 		spin_lock_irqsave(&conn->collect_lock, saveflags);
1179 		if (conn->collect_len + l >
1180 		    (conn->max_buffsize - NETIUCV_HDRLEN)) {
1181 			rc = -EBUSY;
1182 			IUCV_DBF_TEXT(data, 2,
1183 				      "EBUSY from netiucv_transmit_skb\n");
1184 		} else {
1185 			atomic_inc(&skb->users);
1186 			skb_queue_tail(&conn->collect_queue, skb);
1187 			conn->collect_len += l;
1188 			rc = 0;
1189 		}
1190 		spin_unlock_irqrestore(&conn->collect_lock, saveflags);
1191 	} else {
1192 		struct sk_buff *nskb = skb;
1193 		/**
1194 		 * Copy the skb to a new allocated skb in lowmem only if the
1195 		 * data is located above 2G in memory or tailroom is < 2.
1196 		 */
1197 		unsigned long hi = ((unsigned long)(skb_tail_pointer(skb) +
1198 				    NETIUCV_HDRLEN)) >> 31;
1199 		int copied = 0;
1200 		if (hi || (skb_tailroom(skb) < 2)) {
1201 			nskb = alloc_skb(skb->len + NETIUCV_HDRLEN +
1202 					 NETIUCV_HDRLEN, GFP_ATOMIC | GFP_DMA);
1203 			if (!nskb) {
1204 				IUCV_DBF_TEXT(data, 2, "alloc_skb failed\n");
1205 				rc = -ENOMEM;
1206 				return rc;
1207 			} else {
1208 				skb_reserve(nskb, NETIUCV_HDRLEN);
1209 				memcpy(skb_put(nskb, skb->len),
1210 				       skb->data, skb->len);
1211 			}
1212 			copied = 1;
1213 		}
1214 		/**
1215 		 * skb now is below 2G and has enough room. Add headers.
1216 		 */
1217 		header.next = nskb->len + NETIUCV_HDRLEN;
1218 		memcpy(skb_push(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
1219 		header.next = 0;
1220 		memcpy(skb_put(nskb, NETIUCV_HDRLEN), &header,  NETIUCV_HDRLEN);
1221 
1222 		fsm_newstate(conn->fsm, CONN_STATE_TX);
1223 		conn->prof.send_stamp = current_kernel_time();
1224 
1225 		msg.tag = 1;
1226 		msg.class = 0;
1227 		rc = iucv_message_send(conn->path, &msg, 0, 0,
1228 				       nskb->data, nskb->len);
1229 		conn->prof.doios_single++;
1230 		conn->prof.txlen += skb->len;
1231 		conn->prof.tx_pending++;
1232 		if (conn->prof.tx_pending > conn->prof.tx_max_pending)
1233 			conn->prof.tx_max_pending = conn->prof.tx_pending;
1234 		if (rc) {
1235 			struct netiucv_priv *privptr;
1236 			fsm_newstate(conn->fsm, CONN_STATE_IDLE);
1237 			conn->prof.tx_pending--;
1238 			privptr = netdev_priv(conn->netdev);
1239 			if (privptr)
1240 				privptr->stats.tx_errors++;
1241 			if (copied)
1242 				dev_kfree_skb(nskb);
1243 			else {
1244 				/**
1245 				 * Remove our headers. They get added
1246 				 * again on retransmit.
1247 				 */
1248 				skb_pull(skb, NETIUCV_HDRLEN);
1249 				skb_trim(skb, skb->len - NETIUCV_HDRLEN);
1250 			}
1251 			IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
1252 		} else {
1253 			if (copied)
1254 				dev_kfree_skb(skb);
1255 			atomic_inc(&nskb->users);
1256 			skb_queue_tail(&conn->commit_queue, nskb);
1257 		}
1258 	}
1259 
1260 	return rc;
1261 }
1262 
1263 /*
1264  * Interface API for upper network layers
1265  */
1266 
1267 /**
1268  * Open an interface.
1269  * Called from generic network layer when ifconfig up is run.
1270  *
1271  * @param dev Pointer to interface struct.
1272  *
1273  * @return 0 on success, -ERRNO on failure. (Never fails.)
1274  */
1275 static int netiucv_open(struct net_device *dev)
1276 {
1277 	struct netiucv_priv *priv = netdev_priv(dev);
1278 
1279 	fsm_event(priv->fsm, DEV_EVENT_START, dev);
1280 	return 0;
1281 }
1282 
1283 /**
1284  * Close an interface.
1285  * Called from generic network layer when ifconfig down is run.
1286  *
1287  * @param dev Pointer to interface struct.
1288  *
1289  * @return 0 on success, -ERRNO on failure. (Never fails.)
1290  */
1291 static int netiucv_close(struct net_device *dev)
1292 {
1293 	struct netiucv_priv *priv = netdev_priv(dev);
1294 
1295 	fsm_event(priv->fsm, DEV_EVENT_STOP, dev);
1296 	return 0;
1297 }
1298 
1299 static int netiucv_pm_prepare(struct device *dev)
1300 {
1301 	IUCV_DBF_TEXT(trace, 3, __func__);
1302 	return 0;
1303 }
1304 
1305 static void netiucv_pm_complete(struct device *dev)
1306 {
1307 	IUCV_DBF_TEXT(trace, 3, __func__);
1308 	return;
1309 }
1310 
1311 /**
1312  * netiucv_pm_freeze() - Freeze PM callback
1313  * @dev:	netiucv device
1314  *
1315  * close open netiucv interfaces
1316  */
1317 static int netiucv_pm_freeze(struct device *dev)
1318 {
1319 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1320 	struct net_device *ndev = NULL;
1321 	int rc = 0;
1322 
1323 	IUCV_DBF_TEXT(trace, 3, __func__);
1324 	if (priv && priv->conn)
1325 		ndev = priv->conn->netdev;
1326 	if (!ndev)
1327 		goto out;
1328 	netif_device_detach(ndev);
1329 	priv->pm_state = fsm_getstate(priv->fsm);
1330 	rc = netiucv_close(ndev);
1331 out:
1332 	return rc;
1333 }
1334 
1335 /**
1336  * netiucv_pm_restore_thaw() - Thaw and restore PM callback
1337  * @dev:	netiucv device
1338  *
1339  * re-open netiucv interfaces closed during freeze
1340  */
1341 static int netiucv_pm_restore_thaw(struct device *dev)
1342 {
1343 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1344 	struct net_device *ndev = NULL;
1345 	int rc = 0;
1346 
1347 	IUCV_DBF_TEXT(trace, 3, __func__);
1348 	if (priv && priv->conn)
1349 		ndev = priv->conn->netdev;
1350 	if (!ndev)
1351 		goto out;
1352 	switch (priv->pm_state) {
1353 	case DEV_STATE_RUNNING:
1354 	case DEV_STATE_STARTWAIT:
1355 		rc = netiucv_open(ndev);
1356 		break;
1357 	default:
1358 		break;
1359 	}
1360 	netif_device_attach(ndev);
1361 out:
1362 	return rc;
1363 }
1364 
1365 /**
1366  * Start transmission of a packet.
1367  * Called from generic network device layer.
1368  *
1369  * @param skb Pointer to buffer containing the packet.
1370  * @param dev Pointer to interface struct.
1371  *
1372  * @return 0 if packet consumed, !0 if packet rejected.
1373  *         Note: If we return !0, then the packet is free'd by
1374  *               the generic network layer.
1375  */
1376 static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
1377 {
1378 	struct netiucv_priv *privptr = netdev_priv(dev);
1379 	int rc;
1380 
1381 	IUCV_DBF_TEXT(trace, 4, __func__);
1382 	/**
1383 	 * Some sanity checks ...
1384 	 */
1385 	if (skb == NULL) {
1386 		IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n");
1387 		privptr->stats.tx_dropped++;
1388 		return NETDEV_TX_OK;
1389 	}
1390 	if (skb_headroom(skb) < NETIUCV_HDRLEN) {
1391 		IUCV_DBF_TEXT(data, 2,
1392 			"netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n");
1393 		dev_kfree_skb(skb);
1394 		privptr->stats.tx_dropped++;
1395 		return NETDEV_TX_OK;
1396 	}
1397 
1398 	/**
1399 	 * If connection is not running, try to restart it
1400 	 * and throw away packet.
1401 	 */
1402 	if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
1403 		dev_kfree_skb(skb);
1404 		privptr->stats.tx_dropped++;
1405 		privptr->stats.tx_errors++;
1406 		privptr->stats.tx_carrier_errors++;
1407 		return NETDEV_TX_OK;
1408 	}
1409 
1410 	if (netiucv_test_and_set_busy(dev)) {
1411 		IUCV_DBF_TEXT(data, 2, "EBUSY from netiucv_tx\n");
1412 		return NETDEV_TX_BUSY;
1413 	}
1414 	dev->trans_start = jiffies;
1415 	rc = netiucv_transmit_skb(privptr->conn, skb);
1416 	netiucv_clear_busy(dev);
1417 	return rc ? NETDEV_TX_BUSY : NETDEV_TX_OK;
1418 }
1419 
1420 /**
1421  * netiucv_stats
1422  * @dev: Pointer to interface struct.
1423  *
1424  * Returns interface statistics of a device.
1425  *
1426  * Returns pointer to stats struct of this interface.
1427  */
1428 static struct net_device_stats *netiucv_stats (struct net_device * dev)
1429 {
1430 	struct netiucv_priv *priv = netdev_priv(dev);
1431 
1432 	IUCV_DBF_TEXT(trace, 5, __func__);
1433 	return &priv->stats;
1434 }
1435 
1436 /**
1437  * netiucv_change_mtu
1438  * @dev: Pointer to interface struct.
1439  * @new_mtu: The new MTU to use for this interface.
1440  *
1441  * Sets MTU of an interface.
1442  *
1443  * Returns 0 on success, -EINVAL if MTU is out of valid range.
1444  *         (valid range is 576 .. NETIUCV_MTU_MAX).
1445  */
1446 static int netiucv_change_mtu(struct net_device * dev, int new_mtu)
1447 {
1448 	IUCV_DBF_TEXT(trace, 3, __func__);
1449 	if (new_mtu < 576 || new_mtu > NETIUCV_MTU_MAX) {
1450 		IUCV_DBF_TEXT(setup, 2, "given MTU out of valid range\n");
1451 		return -EINVAL;
1452 	}
1453 	dev->mtu = new_mtu;
1454 	return 0;
1455 }
1456 
1457 /*
1458  * attributes in sysfs
1459  */
1460 
1461 static ssize_t user_show(struct device *dev, struct device_attribute *attr,
1462 			 char *buf)
1463 {
1464 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1465 
1466 	IUCV_DBF_TEXT(trace, 5, __func__);
1467 	return sprintf(buf, "%s\n", netiucv_printuser(priv->conn));
1468 }
1469 
1470 static int netiucv_check_user(const char *buf, size_t count, char *username,
1471 			      char *userdata)
1472 {
1473 	const char *p;
1474 	int i;
1475 
1476 	p = strchr(buf, '.');
1477 	if ((p && ((count > 26) ||
1478 		   ((p - buf) > 8) ||
1479 		   (buf + count - p > 18))) ||
1480 	    (!p && (count > 9))) {
1481 		IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
1482 		return -EINVAL;
1483 	}
1484 
1485 	for (i = 0, p = buf; i < 8 && *p && *p != '.'; i++, p++) {
1486 		if (isalnum(*p) || *p == '$') {
1487 			username[i] = toupper(*p);
1488 			continue;
1489 		}
1490 		if (*p == '\n')
1491 			/* trailing lf, grr */
1492 			break;
1493 		IUCV_DBF_TEXT_(setup, 2,
1494 			       "conn_write: invalid character %02x\n", *p);
1495 		return -EINVAL;
1496 	}
1497 	while (i < 8)
1498 		username[i++] = ' ';
1499 	username[8] = '\0';
1500 
1501 	if (*p == '.') {
1502 		p++;
1503 		for (i = 0; i < 16 && *p; i++, p++) {
1504 			if (*p == '\n')
1505 				break;
1506 			userdata[i] = toupper(*p);
1507 		}
1508 		while (i > 0 && i < 16)
1509 			userdata[i++] = ' ';
1510 	} else
1511 		memcpy(userdata, iucvMagic_ascii, 16);
1512 	userdata[16] = '\0';
1513 	ASCEBC(userdata, 16);
1514 
1515 	return 0;
1516 }
1517 
1518 static ssize_t user_write(struct device *dev, struct device_attribute *attr,
1519 			  const char *buf, size_t count)
1520 {
1521 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1522 	struct net_device *ndev = priv->conn->netdev;
1523 	char	username[9];
1524 	char	userdata[17];
1525 	int	rc;
1526 	struct iucv_connection *cp;
1527 
1528 	IUCV_DBF_TEXT(trace, 3, __func__);
1529 	rc = netiucv_check_user(buf, count, username, userdata);
1530 	if (rc)
1531 		return rc;
1532 
1533 	if (memcmp(username, priv->conn->userid, 9) &&
1534 	    (ndev->flags & (IFF_UP | IFF_RUNNING))) {
1535 		/* username changed while the interface is active. */
1536 		IUCV_DBF_TEXT(setup, 2, "user_write: device active\n");
1537 		return -EPERM;
1538 	}
1539 	read_lock_bh(&iucv_connection_rwlock);
1540 	list_for_each_entry(cp, &iucv_connection_list, list) {
1541 		if (!strncmp(username, cp->userid, 9) &&
1542 		   !strncmp(userdata, cp->userdata, 17) && cp->netdev != ndev) {
1543 			read_unlock_bh(&iucv_connection_rwlock);
1544 			IUCV_DBF_TEXT_(setup, 2, "user_write: Connection to %s "
1545 				"already exists\n", netiucv_printuser(cp));
1546 			return -EEXIST;
1547 		}
1548 	}
1549 	read_unlock_bh(&iucv_connection_rwlock);
1550 	memcpy(priv->conn->userid, username, 9);
1551 	memcpy(priv->conn->userdata, userdata, 17);
1552 	return count;
1553 }
1554 
1555 static DEVICE_ATTR(user, 0644, user_show, user_write);
1556 
1557 static ssize_t buffer_show (struct device *dev, struct device_attribute *attr,
1558 			    char *buf)
1559 {
1560 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1561 
1562 	IUCV_DBF_TEXT(trace, 5, __func__);
1563 	return sprintf(buf, "%d\n", priv->conn->max_buffsize);
1564 }
1565 
1566 static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
1567 			     const char *buf, size_t count)
1568 {
1569 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1570 	struct net_device *ndev = priv->conn->netdev;
1571 	char         *e;
1572 	int          bs1;
1573 
1574 	IUCV_DBF_TEXT(trace, 3, __func__);
1575 	if (count >= 39)
1576 		return -EINVAL;
1577 
1578 	bs1 = simple_strtoul(buf, &e, 0);
1579 
1580 	if (e && (!isspace(*e))) {
1581 		IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %02x\n",
1582 			*e);
1583 		return -EINVAL;
1584 	}
1585 	if (bs1 > NETIUCV_BUFSIZE_MAX) {
1586 		IUCV_DBF_TEXT_(setup, 2,
1587 			"buffer_write: buffer size %d too large\n",
1588 			bs1);
1589 		return -EINVAL;
1590 	}
1591 	if ((ndev->flags & IFF_RUNNING) &&
1592 	    (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2))) {
1593 		IUCV_DBF_TEXT_(setup, 2,
1594 			"buffer_write: buffer size %d too small\n",
1595 			bs1);
1596 		return -EINVAL;
1597 	}
1598 	if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN)) {
1599 		IUCV_DBF_TEXT_(setup, 2,
1600 			"buffer_write: buffer size %d too small\n",
1601 			bs1);
1602 		return -EINVAL;
1603 	}
1604 
1605 	priv->conn->max_buffsize = bs1;
1606 	if (!(ndev->flags & IFF_RUNNING))
1607 		ndev->mtu = bs1 - NETIUCV_HDRLEN - NETIUCV_HDRLEN;
1608 
1609 	return count;
1610 
1611 }
1612 
1613 static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
1614 
1615 static ssize_t dev_fsm_show (struct device *dev, struct device_attribute *attr,
1616 			     char *buf)
1617 {
1618 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1619 
1620 	IUCV_DBF_TEXT(trace, 5, __func__);
1621 	return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm));
1622 }
1623 
1624 static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL);
1625 
1626 static ssize_t conn_fsm_show (struct device *dev,
1627 			      struct device_attribute *attr, char *buf)
1628 {
1629 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1630 
1631 	IUCV_DBF_TEXT(trace, 5, __func__);
1632 	return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm));
1633 }
1634 
1635 static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL);
1636 
1637 static ssize_t maxmulti_show (struct device *dev,
1638 			      struct device_attribute *attr, char *buf)
1639 {
1640 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1641 
1642 	IUCV_DBF_TEXT(trace, 5, __func__);
1643 	return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti);
1644 }
1645 
1646 static ssize_t maxmulti_write (struct device *dev,
1647 			       struct device_attribute *attr,
1648 			       const char *buf, size_t count)
1649 {
1650 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1651 
1652 	IUCV_DBF_TEXT(trace, 4, __func__);
1653 	priv->conn->prof.maxmulti = 0;
1654 	return count;
1655 }
1656 
1657 static DEVICE_ATTR(max_tx_buffer_used, 0644, maxmulti_show, maxmulti_write);
1658 
1659 static ssize_t maxcq_show (struct device *dev, struct device_attribute *attr,
1660 			   char *buf)
1661 {
1662 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1663 
1664 	IUCV_DBF_TEXT(trace, 5, __func__);
1665 	return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue);
1666 }
1667 
1668 static ssize_t maxcq_write (struct device *dev, struct device_attribute *attr,
1669 			    const char *buf, size_t count)
1670 {
1671 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1672 
1673 	IUCV_DBF_TEXT(trace, 4, __func__);
1674 	priv->conn->prof.maxcqueue = 0;
1675 	return count;
1676 }
1677 
1678 static DEVICE_ATTR(max_chained_skbs, 0644, maxcq_show, maxcq_write);
1679 
1680 static ssize_t sdoio_show (struct device *dev, struct device_attribute *attr,
1681 			   char *buf)
1682 {
1683 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1684 
1685 	IUCV_DBF_TEXT(trace, 5, __func__);
1686 	return sprintf(buf, "%ld\n", priv->conn->prof.doios_single);
1687 }
1688 
1689 static ssize_t sdoio_write (struct device *dev, struct device_attribute *attr,
1690 			    const char *buf, size_t count)
1691 {
1692 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1693 
1694 	IUCV_DBF_TEXT(trace, 4, __func__);
1695 	priv->conn->prof.doios_single = 0;
1696 	return count;
1697 }
1698 
1699 static DEVICE_ATTR(tx_single_write_ops, 0644, sdoio_show, sdoio_write);
1700 
1701 static ssize_t mdoio_show (struct device *dev, struct device_attribute *attr,
1702 			   char *buf)
1703 {
1704 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1705 
1706 	IUCV_DBF_TEXT(trace, 5, __func__);
1707 	return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi);
1708 }
1709 
1710 static ssize_t mdoio_write (struct device *dev, struct device_attribute *attr,
1711 			    const char *buf, size_t count)
1712 {
1713 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1714 
1715 	IUCV_DBF_TEXT(trace, 5, __func__);
1716 	priv->conn->prof.doios_multi = 0;
1717 	return count;
1718 }
1719 
1720 static DEVICE_ATTR(tx_multi_write_ops, 0644, mdoio_show, mdoio_write);
1721 
1722 static ssize_t txlen_show (struct device *dev, struct device_attribute *attr,
1723 			   char *buf)
1724 {
1725 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1726 
1727 	IUCV_DBF_TEXT(trace, 5, __func__);
1728 	return sprintf(buf, "%ld\n", priv->conn->prof.txlen);
1729 }
1730 
1731 static ssize_t txlen_write (struct device *dev, struct device_attribute *attr,
1732 			    const char *buf, size_t count)
1733 {
1734 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1735 
1736 	IUCV_DBF_TEXT(trace, 4, __func__);
1737 	priv->conn->prof.txlen = 0;
1738 	return count;
1739 }
1740 
1741 static DEVICE_ATTR(netto_bytes, 0644, txlen_show, txlen_write);
1742 
1743 static ssize_t txtime_show (struct device *dev, struct device_attribute *attr,
1744 			    char *buf)
1745 {
1746 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1747 
1748 	IUCV_DBF_TEXT(trace, 5, __func__);
1749 	return sprintf(buf, "%ld\n", priv->conn->prof.tx_time);
1750 }
1751 
1752 static ssize_t txtime_write (struct device *dev, struct device_attribute *attr,
1753 			     const char *buf, size_t count)
1754 {
1755 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1756 
1757 	IUCV_DBF_TEXT(trace, 4, __func__);
1758 	priv->conn->prof.tx_time = 0;
1759 	return count;
1760 }
1761 
1762 static DEVICE_ATTR(max_tx_io_time, 0644, txtime_show, txtime_write);
1763 
1764 static ssize_t txpend_show (struct device *dev, struct device_attribute *attr,
1765 			    char *buf)
1766 {
1767 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1768 
1769 	IUCV_DBF_TEXT(trace, 5, __func__);
1770 	return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending);
1771 }
1772 
1773 static ssize_t txpend_write (struct device *dev, struct device_attribute *attr,
1774 			     const char *buf, size_t count)
1775 {
1776 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1777 
1778 	IUCV_DBF_TEXT(trace, 4, __func__);
1779 	priv->conn->prof.tx_pending = 0;
1780 	return count;
1781 }
1782 
1783 static DEVICE_ATTR(tx_pending, 0644, txpend_show, txpend_write);
1784 
1785 static ssize_t txmpnd_show (struct device *dev, struct device_attribute *attr,
1786 			    char *buf)
1787 {
1788 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1789 
1790 	IUCV_DBF_TEXT(trace, 5, __func__);
1791 	return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending);
1792 }
1793 
1794 static ssize_t txmpnd_write (struct device *dev, struct device_attribute *attr,
1795 			     const char *buf, size_t count)
1796 {
1797 	struct netiucv_priv *priv = dev_get_drvdata(dev);
1798 
1799 	IUCV_DBF_TEXT(trace, 4, __func__);
1800 	priv->conn->prof.tx_max_pending = 0;
1801 	return count;
1802 }
1803 
1804 static DEVICE_ATTR(tx_max_pending, 0644, txmpnd_show, txmpnd_write);
1805 
1806 static struct attribute *netiucv_attrs[] = {
1807 	&dev_attr_buffer.attr,
1808 	&dev_attr_user.attr,
1809 	NULL,
1810 };
1811 
1812 static struct attribute_group netiucv_attr_group = {
1813 	.attrs = netiucv_attrs,
1814 };
1815 
1816 static struct attribute *netiucv_stat_attrs[] = {
1817 	&dev_attr_device_fsm_state.attr,
1818 	&dev_attr_connection_fsm_state.attr,
1819 	&dev_attr_max_tx_buffer_used.attr,
1820 	&dev_attr_max_chained_skbs.attr,
1821 	&dev_attr_tx_single_write_ops.attr,
1822 	&dev_attr_tx_multi_write_ops.attr,
1823 	&dev_attr_netto_bytes.attr,
1824 	&dev_attr_max_tx_io_time.attr,
1825 	&dev_attr_tx_pending.attr,
1826 	&dev_attr_tx_max_pending.attr,
1827 	NULL,
1828 };
1829 
1830 static struct attribute_group netiucv_stat_attr_group = {
1831 	.name  = "stats",
1832 	.attrs = netiucv_stat_attrs,
1833 };
1834 
1835 static const struct attribute_group *netiucv_attr_groups[] = {
1836 	&netiucv_stat_attr_group,
1837 	&netiucv_attr_group,
1838 	NULL,
1839 };
1840 
1841 static int netiucv_register_device(struct net_device *ndev)
1842 {
1843 	struct netiucv_priv *priv = netdev_priv(ndev);
1844 	struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL);
1845 	int ret;
1846 
1847 	IUCV_DBF_TEXT(trace, 3, __func__);
1848 
1849 	if (dev) {
1850 		dev_set_name(dev, "net%s", ndev->name);
1851 		dev->bus = &iucv_bus;
1852 		dev->parent = iucv_root;
1853 		dev->groups = netiucv_attr_groups;
1854 		/*
1855 		 * The release function could be called after the
1856 		 * module has been unloaded. It's _only_ task is to
1857 		 * free the struct. Therefore, we specify kfree()
1858 		 * directly here. (Probably a little bit obfuscating
1859 		 * but legitime ...).
1860 		 */
1861 		dev->release = (void (*)(struct device *))kfree;
1862 		dev->driver = &netiucv_driver;
1863 	} else
1864 		return -ENOMEM;
1865 
1866 	ret = device_register(dev);
1867 	if (ret) {
1868 		put_device(dev);
1869 		return ret;
1870 	}
1871 	priv->dev = dev;
1872 	dev_set_drvdata(dev, priv);
1873 	return 0;
1874 }
1875 
1876 static void netiucv_unregister_device(struct device *dev)
1877 {
1878 	IUCV_DBF_TEXT(trace, 3, __func__);
1879 	device_unregister(dev);
1880 }
1881 
1882 /**
1883  * Allocate and initialize a new connection structure.
1884  * Add it to the list of netiucv connections;
1885  */
1886 static struct iucv_connection *netiucv_new_connection(struct net_device *dev,
1887 						      char *username,
1888 						      char *userdata)
1889 {
1890 	struct iucv_connection *conn;
1891 
1892 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
1893 	if (!conn)
1894 		goto out;
1895 	skb_queue_head_init(&conn->collect_queue);
1896 	skb_queue_head_init(&conn->commit_queue);
1897 	spin_lock_init(&conn->collect_lock);
1898 	conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT;
1899 	conn->netdev = dev;
1900 
1901 	conn->rx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1902 	if (!conn->rx_buff)
1903 		goto out_conn;
1904 	conn->tx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1905 	if (!conn->tx_buff)
1906 		goto out_rx;
1907 	conn->fsm = init_fsm("netiucvconn", conn_state_names,
1908 			     conn_event_names, NR_CONN_STATES,
1909 			     NR_CONN_EVENTS, conn_fsm, CONN_FSM_LEN,
1910 			     GFP_KERNEL);
1911 	if (!conn->fsm)
1912 		goto out_tx;
1913 
1914 	fsm_settimer(conn->fsm, &conn->timer);
1915 	fsm_newstate(conn->fsm, CONN_STATE_INVALID);
1916 
1917 	if (userdata)
1918 		memcpy(conn->userdata, userdata, 17);
1919 	if (username) {
1920 		memcpy(conn->userid, username, 9);
1921 		fsm_newstate(conn->fsm, CONN_STATE_STOPPED);
1922 	}
1923 
1924 	write_lock_bh(&iucv_connection_rwlock);
1925 	list_add_tail(&conn->list, &iucv_connection_list);
1926 	write_unlock_bh(&iucv_connection_rwlock);
1927 	return conn;
1928 
1929 out_tx:
1930 	kfree_skb(conn->tx_buff);
1931 out_rx:
1932 	kfree_skb(conn->rx_buff);
1933 out_conn:
1934 	kfree(conn);
1935 out:
1936 	return NULL;
1937 }
1938 
1939 /**
1940  * Release a connection structure and remove it from the
1941  * list of netiucv connections.
1942  */
1943 static void netiucv_remove_connection(struct iucv_connection *conn)
1944 {
1945 
1946 	IUCV_DBF_TEXT(trace, 3, __func__);
1947 	write_lock_bh(&iucv_connection_rwlock);
1948 	list_del_init(&conn->list);
1949 	write_unlock_bh(&iucv_connection_rwlock);
1950 	fsm_deltimer(&conn->timer);
1951 	netiucv_purge_skb_queue(&conn->collect_queue);
1952 	if (conn->path) {
1953 		iucv_path_sever(conn->path, conn->userdata);
1954 		kfree(conn->path);
1955 		conn->path = NULL;
1956 	}
1957 	netiucv_purge_skb_queue(&conn->commit_queue);
1958 	kfree_fsm(conn->fsm);
1959 	kfree_skb(conn->rx_buff);
1960 	kfree_skb(conn->tx_buff);
1961 }
1962 
1963 /**
1964  * Release everything of a net device.
1965  */
1966 static void netiucv_free_netdevice(struct net_device *dev)
1967 {
1968 	struct netiucv_priv *privptr = netdev_priv(dev);
1969 
1970 	IUCV_DBF_TEXT(trace, 3, __func__);
1971 
1972 	if (!dev)
1973 		return;
1974 
1975 	if (privptr) {
1976 		if (privptr->conn)
1977 			netiucv_remove_connection(privptr->conn);
1978 		if (privptr->fsm)
1979 			kfree_fsm(privptr->fsm);
1980 		privptr->conn = NULL; privptr->fsm = NULL;
1981 		/* privptr gets freed by free_netdev() */
1982 	}
1983 	free_netdev(dev);
1984 }
1985 
1986 /**
1987  * Initialize a net device. (Called from kernel in alloc_netdev())
1988  */
1989 static const struct net_device_ops netiucv_netdev_ops = {
1990 	.ndo_open		= netiucv_open,
1991 	.ndo_stop		= netiucv_close,
1992 	.ndo_get_stats		= netiucv_stats,
1993 	.ndo_start_xmit		= netiucv_tx,
1994 	.ndo_change_mtu	   	= netiucv_change_mtu,
1995 };
1996 
1997 static void netiucv_setup_netdevice(struct net_device *dev)
1998 {
1999 	dev->mtu	         = NETIUCV_MTU_DEFAULT;
2000 	dev->destructor          = netiucv_free_netdevice;
2001 	dev->hard_header_len     = NETIUCV_HDRLEN;
2002 	dev->addr_len            = 0;
2003 	dev->type                = ARPHRD_SLIP;
2004 	dev->tx_queue_len        = NETIUCV_QUEUELEN_DEFAULT;
2005 	dev->flags	         = IFF_POINTOPOINT | IFF_NOARP;
2006 	dev->netdev_ops		 = &netiucv_netdev_ops;
2007 }
2008 
2009 /**
2010  * Allocate and initialize everything of a net device.
2011  */
2012 static struct net_device *netiucv_init_netdevice(char *username, char *userdata)
2013 {
2014 	struct netiucv_priv *privptr;
2015 	struct net_device *dev;
2016 
2017 	dev = alloc_netdev(sizeof(struct netiucv_priv), "iucv%d",
2018 			   netiucv_setup_netdevice);
2019 	if (!dev)
2020 		return NULL;
2021 	rtnl_lock();
2022 	if (dev_alloc_name(dev, dev->name) < 0)
2023 		goto out_netdev;
2024 
2025 	privptr = netdev_priv(dev);
2026 	privptr->fsm = init_fsm("netiucvdev", dev_state_names,
2027 				dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS,
2028 				dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
2029 	if (!privptr->fsm)
2030 		goto out_netdev;
2031 
2032 	privptr->conn = netiucv_new_connection(dev, username, userdata);
2033 	if (!privptr->conn) {
2034 		IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n");
2035 		goto out_fsm;
2036 	}
2037 	fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
2038 	return dev;
2039 
2040 out_fsm:
2041 	kfree_fsm(privptr->fsm);
2042 out_netdev:
2043 	rtnl_unlock();
2044 	free_netdev(dev);
2045 	return NULL;
2046 }
2047 
2048 static ssize_t conn_write(struct device_driver *drv,
2049 			  const char *buf, size_t count)
2050 {
2051 	char username[9];
2052 	char userdata[17];
2053 	int rc;
2054 	struct net_device *dev;
2055 	struct netiucv_priv *priv;
2056 	struct iucv_connection *cp;
2057 
2058 	IUCV_DBF_TEXT(trace, 3, __func__);
2059 	rc = netiucv_check_user(buf, count, username, userdata);
2060 	if (rc)
2061 		return rc;
2062 
2063 	read_lock_bh(&iucv_connection_rwlock);
2064 	list_for_each_entry(cp, &iucv_connection_list, list) {
2065 		if (!strncmp(username, cp->userid, 9) &&
2066 		    !strncmp(userdata, cp->userdata, 17)) {
2067 			read_unlock_bh(&iucv_connection_rwlock);
2068 			IUCV_DBF_TEXT_(setup, 2, "conn_write: Connection to %s "
2069 				"already exists\n", netiucv_printuser(cp));
2070 			return -EEXIST;
2071 		}
2072 	}
2073 	read_unlock_bh(&iucv_connection_rwlock);
2074 
2075 	dev = netiucv_init_netdevice(username, userdata);
2076 	if (!dev) {
2077 		IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n");
2078 		return -ENODEV;
2079 	}
2080 
2081 	rc = netiucv_register_device(dev);
2082 	if (rc) {
2083 		rtnl_unlock();
2084 		IUCV_DBF_TEXT_(setup, 2,
2085 			"ret %d from netiucv_register_device\n", rc);
2086 		goto out_free_ndev;
2087 	}
2088 
2089 	/* sysfs magic */
2090 	priv = netdev_priv(dev);
2091 	SET_NETDEV_DEV(dev, priv->dev);
2092 
2093 	rc = register_netdevice(dev);
2094 	rtnl_unlock();
2095 	if (rc)
2096 		goto out_unreg;
2097 
2098 	dev_info(priv->dev, "The IUCV interface to %s has been established "
2099 			    "successfully\n",
2100 		netiucv_printuser(priv->conn));
2101 
2102 	return count;
2103 
2104 out_unreg:
2105 	netiucv_unregister_device(priv->dev);
2106 out_free_ndev:
2107 	netiucv_free_netdevice(dev);
2108 	return rc;
2109 }
2110 
2111 static DRIVER_ATTR(connection, 0200, NULL, conn_write);
2112 
2113 static ssize_t remove_write (struct device_driver *drv,
2114 			     const char *buf, size_t count)
2115 {
2116 	struct iucv_connection *cp;
2117         struct net_device *ndev;
2118         struct netiucv_priv *priv;
2119         struct device *dev;
2120         char name[IFNAMSIZ];
2121 	const char *p;
2122         int i;
2123 
2124 	IUCV_DBF_TEXT(trace, 3, __func__);
2125 
2126         if (count >= IFNAMSIZ)
2127                 count = IFNAMSIZ - 1;
2128 
2129 	for (i = 0, p = buf; i < count && *p; i++, p++) {
2130 		if (*p == '\n' || *p == ' ')
2131                         /* trailing lf, grr */
2132                         break;
2133 		name[i] = *p;
2134         }
2135         name[i] = '\0';
2136 
2137 	read_lock_bh(&iucv_connection_rwlock);
2138 	list_for_each_entry(cp, &iucv_connection_list, list) {
2139 		ndev = cp->netdev;
2140 		priv = netdev_priv(ndev);
2141                 dev = priv->dev;
2142 		if (strncmp(name, ndev->name, count))
2143 			continue;
2144 		read_unlock_bh(&iucv_connection_rwlock);
2145                 if (ndev->flags & (IFF_UP | IFF_RUNNING)) {
2146 			dev_warn(dev, "The IUCV device is connected"
2147 				" to %s and cannot be removed\n",
2148 				priv->conn->userid);
2149 			IUCV_DBF_TEXT(data, 2, "remove_write: still active\n");
2150 			return -EPERM;
2151                 }
2152                 unregister_netdev(ndev);
2153                 netiucv_unregister_device(dev);
2154                 return count;
2155         }
2156 	read_unlock_bh(&iucv_connection_rwlock);
2157 	IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n");
2158         return -EINVAL;
2159 }
2160 
2161 static DRIVER_ATTR(remove, 0200, NULL, remove_write);
2162 
2163 static struct attribute * netiucv_drv_attrs[] = {
2164 	&driver_attr_connection.attr,
2165 	&driver_attr_remove.attr,
2166 	NULL,
2167 };
2168 
2169 static struct attribute_group netiucv_drv_attr_group = {
2170 	.attrs = netiucv_drv_attrs,
2171 };
2172 
2173 static const struct attribute_group *netiucv_drv_attr_groups[] = {
2174 	&netiucv_drv_attr_group,
2175 	NULL,
2176 };
2177 
2178 static void netiucv_banner(void)
2179 {
2180 	pr_info("driver initialized\n");
2181 }
2182 
2183 static void __exit netiucv_exit(void)
2184 {
2185 	struct iucv_connection *cp;
2186 	struct net_device *ndev;
2187 	struct netiucv_priv *priv;
2188 	struct device *dev;
2189 
2190 	IUCV_DBF_TEXT(trace, 3, __func__);
2191 	while (!list_empty(&iucv_connection_list)) {
2192 		cp = list_entry(iucv_connection_list.next,
2193 				struct iucv_connection, list);
2194 		ndev = cp->netdev;
2195 		priv = netdev_priv(ndev);
2196 		dev = priv->dev;
2197 
2198 		unregister_netdev(ndev);
2199 		netiucv_unregister_device(dev);
2200 	}
2201 
2202 	device_unregister(netiucv_dev);
2203 	driver_unregister(&netiucv_driver);
2204 	iucv_unregister(&netiucv_handler, 1);
2205 	iucv_unregister_dbf_views();
2206 
2207 	pr_info("driver unloaded\n");
2208 	return;
2209 }
2210 
2211 static int __init netiucv_init(void)
2212 {
2213 	int rc;
2214 
2215 	rc = iucv_register_dbf_views();
2216 	if (rc)
2217 		goto out;
2218 	rc = iucv_register(&netiucv_handler, 1);
2219 	if (rc)
2220 		goto out_dbf;
2221 	IUCV_DBF_TEXT(trace, 3, __func__);
2222 	netiucv_driver.groups = netiucv_drv_attr_groups;
2223 	rc = driver_register(&netiucv_driver);
2224 	if (rc) {
2225 		IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc);
2226 		goto out_iucv;
2227 	}
2228 	/* establish dummy device */
2229 	netiucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
2230 	if (!netiucv_dev) {
2231 		rc = -ENOMEM;
2232 		goto out_driver;
2233 	}
2234 	dev_set_name(netiucv_dev, "netiucv");
2235 	netiucv_dev->bus = &iucv_bus;
2236 	netiucv_dev->parent = iucv_root;
2237 	netiucv_dev->release = (void (*)(struct device *))kfree;
2238 	netiucv_dev->driver = &netiucv_driver;
2239 	rc = device_register(netiucv_dev);
2240 	if (rc) {
2241 		put_device(netiucv_dev);
2242 		goto out_driver;
2243 	}
2244 	netiucv_banner();
2245 	return rc;
2246 
2247 out_driver:
2248 	driver_unregister(&netiucv_driver);
2249 out_iucv:
2250 	iucv_unregister(&netiucv_handler, 1);
2251 out_dbf:
2252 	iucv_unregister_dbf_views();
2253 out:
2254 	return rc;
2255 }
2256 
2257 module_init(netiucv_init);
2258 module_exit(netiucv_exit);
2259 MODULE_LICENSE("GPL");
2260