xref: /openbmc/linux/drivers/s390/net/netiucv.c (revision 545e4006)
1 /*
2  * IUCV network driver
3  *
4  * Copyright 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
5  * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
6  *
7  * Sysfs integration and all bugs therein by Cornelia Huck
8  * (cornelia.huck@de.ibm.com)
9  *
10  * Documentation used:
11  *  the source of the original IUCV driver by:
12  *    Stefan Hegewald <hegewald@de.ibm.com>
13  *    Hartmut Penner <hpenner@de.ibm.com>
14  *    Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
15  *    Martin Schwidefsky (schwidefsky@de.ibm.com)
16  *    Alan Altmark (Alan_Altmark@us.ibm.com)  Sept. 2000
17  *
18  * This program is free software; you can redistribute it and/or modify
19  * it under the terms of the GNU General Public License as published by
20  * the Free Software Foundation; either version 2, or (at your option)
21  * any later version.
22  *
23  * This program is distributed in the hope that it will be useful,
24  * but WITHOUT ANY WARRANTY; without even the implied warranty of
25  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
26  * GNU General Public License for more details.
27  *
28  * You should have received a copy of the GNU General Public License
29  * along with this program; if not, write to the Free Software
30  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
31  *
32  */
33 
34 #undef DEBUG
35 
36 #include <linux/module.h>
37 #include <linux/init.h>
38 #include <linux/kernel.h>
39 #include <linux/slab.h>
40 #include <linux/errno.h>
41 #include <linux/types.h>
42 #include <linux/interrupt.h>
43 #include <linux/timer.h>
44 #include <linux/bitops.h>
45 
46 #include <linux/signal.h>
47 #include <linux/string.h>
48 #include <linux/device.h>
49 
50 #include <linux/ip.h>
51 #include <linux/if_arp.h>
52 #include <linux/tcp.h>
53 #include <linux/skbuff.h>
54 #include <linux/ctype.h>
55 #include <net/dst.h>
56 
57 #include <asm/io.h>
58 #include <asm/uaccess.h>
59 
60 #include <net/iucv/iucv.h>
61 #include "fsm.h"
62 
63 MODULE_AUTHOR
64     ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)");
65 MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
66 
67 /**
68  * Debug Facility stuff
69  */
70 #define IUCV_DBF_SETUP_NAME "iucv_setup"
71 #define IUCV_DBF_SETUP_LEN 32
72 #define IUCV_DBF_SETUP_PAGES 2
73 #define IUCV_DBF_SETUP_NR_AREAS 1
74 #define IUCV_DBF_SETUP_LEVEL 3
75 
76 #define IUCV_DBF_DATA_NAME "iucv_data"
77 #define IUCV_DBF_DATA_LEN 128
78 #define IUCV_DBF_DATA_PAGES 2
79 #define IUCV_DBF_DATA_NR_AREAS 1
80 #define IUCV_DBF_DATA_LEVEL 2
81 
82 #define IUCV_DBF_TRACE_NAME "iucv_trace"
83 #define IUCV_DBF_TRACE_LEN 16
84 #define IUCV_DBF_TRACE_PAGES 4
85 #define IUCV_DBF_TRACE_NR_AREAS 1
86 #define IUCV_DBF_TRACE_LEVEL 3
87 
88 #define IUCV_DBF_TEXT(name,level,text) \
89 	do { \
90 		debug_text_event(iucv_dbf_##name,level,text); \
91 	} while (0)
92 
93 #define IUCV_DBF_HEX(name,level,addr,len) \
94 	do { \
95 		debug_event(iucv_dbf_##name,level,(void*)(addr),len); \
96 	} while (0)
97 
98 DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf);
99 
100 /* Allow to sort out low debug levels early to avoid wasted sprints */
101 static inline int iucv_dbf_passes(debug_info_t *dbf_grp, int level)
102 {
103 	return (level <= dbf_grp->level);
104 }
105 
106 #define IUCV_DBF_TEXT_(name, level, text...) \
107 	do { \
108 		if (iucv_dbf_passes(iucv_dbf_##name, level)) { \
109 			char* iucv_dbf_txt_buf = \
110 					get_cpu_var(iucv_dbf_txt_buf); \
111 			sprintf(iucv_dbf_txt_buf, text); \
112 			debug_text_event(iucv_dbf_##name, level, \
113 						iucv_dbf_txt_buf); \
114 			put_cpu_var(iucv_dbf_txt_buf); \
115 		} \
116 	} while (0)
117 
118 #define IUCV_DBF_SPRINTF(name,level,text...) \
119 	do { \
120 		debug_sprintf_event(iucv_dbf_trace, level, ##text ); \
121 		debug_sprintf_event(iucv_dbf_trace, level, text ); \
122 	} while (0)
123 
124 /**
125  * some more debug stuff
126  */
127 #define IUCV_HEXDUMP16(importance,header,ptr) \
128 PRINT_##importance(header "%02x %02x %02x %02x  %02x %02x %02x %02x  " \
129 		   "%02x %02x %02x %02x  %02x %02x %02x %02x\n", \
130 		   *(((char*)ptr)),*(((char*)ptr)+1),*(((char*)ptr)+2), \
131 		   *(((char*)ptr)+3),*(((char*)ptr)+4),*(((char*)ptr)+5), \
132 		   *(((char*)ptr)+6),*(((char*)ptr)+7),*(((char*)ptr)+8), \
133 		   *(((char*)ptr)+9),*(((char*)ptr)+10),*(((char*)ptr)+11), \
134 		   *(((char*)ptr)+12),*(((char*)ptr)+13), \
135 		   *(((char*)ptr)+14),*(((char*)ptr)+15)); \
136 PRINT_##importance(header "%02x %02x %02x %02x  %02x %02x %02x %02x  " \
137 		   "%02x %02x %02x %02x  %02x %02x %02x %02x\n", \
138 		   *(((char*)ptr)+16),*(((char*)ptr)+17), \
139 		   *(((char*)ptr)+18),*(((char*)ptr)+19), \
140 		   *(((char*)ptr)+20),*(((char*)ptr)+21), \
141 		   *(((char*)ptr)+22),*(((char*)ptr)+23), \
142 		   *(((char*)ptr)+24),*(((char*)ptr)+25), \
143 		   *(((char*)ptr)+26),*(((char*)ptr)+27), \
144 		   *(((char*)ptr)+28),*(((char*)ptr)+29), \
145 		   *(((char*)ptr)+30),*(((char*)ptr)+31));
146 
147 #define PRINTK_HEADER " iucv: "       /* for debugging */
148 
149 static struct device_driver netiucv_driver = {
150 	.owner = THIS_MODULE,
151 	.name = "netiucv",
152 	.bus  = &iucv_bus,
153 };
154 
155 static int netiucv_callback_connreq(struct iucv_path *,
156 				    u8 ipvmid[8], u8 ipuser[16]);
157 static void netiucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
158 static void netiucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
159 static void netiucv_callback_connsusp(struct iucv_path *, u8 ipuser[16]);
160 static void netiucv_callback_connres(struct iucv_path *, u8 ipuser[16]);
161 static void netiucv_callback_rx(struct iucv_path *, struct iucv_message *);
162 static void netiucv_callback_txdone(struct iucv_path *, struct iucv_message *);
163 
164 static struct iucv_handler netiucv_handler = {
165 	.path_pending	  = netiucv_callback_connreq,
166 	.path_complete	  = netiucv_callback_connack,
167 	.path_severed	  = netiucv_callback_connrej,
168 	.path_quiesced	  = netiucv_callback_connsusp,
169 	.path_resumed	  = netiucv_callback_connres,
170 	.message_pending  = netiucv_callback_rx,
171 	.message_complete = netiucv_callback_txdone
172 };
173 
174 /**
175  * Per connection profiling data
176  */
177 struct connection_profile {
178 	unsigned long maxmulti;
179 	unsigned long maxcqueue;
180 	unsigned long doios_single;
181 	unsigned long doios_multi;
182 	unsigned long txlen;
183 	unsigned long tx_time;
184 	struct timespec send_stamp;
185 	unsigned long tx_pending;
186 	unsigned long tx_max_pending;
187 };
188 
189 /**
190  * Representation of one iucv connection
191  */
192 struct iucv_connection {
193 	struct list_head	  list;
194 	struct iucv_path	  *path;
195 	struct sk_buff            *rx_buff;
196 	struct sk_buff            *tx_buff;
197 	struct sk_buff_head       collect_queue;
198 	struct sk_buff_head	  commit_queue;
199 	spinlock_t                collect_lock;
200 	int                       collect_len;
201 	int                       max_buffsize;
202 	fsm_timer                 timer;
203 	fsm_instance              *fsm;
204 	struct net_device         *netdev;
205 	struct connection_profile prof;
206 	char                      userid[9];
207 };
208 
209 /**
210  * Linked list of all connection structs.
211  */
212 static LIST_HEAD(iucv_connection_list);
213 static DEFINE_RWLOCK(iucv_connection_rwlock);
214 
215 /**
216  * Representation of event-data for the
217  * connection state machine.
218  */
219 struct iucv_event {
220 	struct iucv_connection *conn;
221 	void                   *data;
222 };
223 
224 /**
225  * Private part of the network device structure
226  */
227 struct netiucv_priv {
228 	struct net_device_stats stats;
229 	unsigned long           tbusy;
230 	fsm_instance            *fsm;
231         struct iucv_connection  *conn;
232 	struct device           *dev;
233 };
234 
235 /**
236  * Link level header for a packet.
237  */
238 struct ll_header {
239 	u16 next;
240 };
241 
242 #define NETIUCV_HDRLEN		 (sizeof(struct ll_header))
243 #define NETIUCV_BUFSIZE_MAX      32768
244 #define NETIUCV_BUFSIZE_DEFAULT  NETIUCV_BUFSIZE_MAX
245 #define NETIUCV_MTU_MAX          (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN)
246 #define NETIUCV_MTU_DEFAULT      9216
247 #define NETIUCV_QUEUELEN_DEFAULT 50
248 #define NETIUCV_TIMEOUT_5SEC     5000
249 
250 /**
251  * Compatibility macros for busy handling
252  * of network devices.
253  */
254 static inline void netiucv_clear_busy(struct net_device *dev)
255 {
256 	struct netiucv_priv *priv = netdev_priv(dev);
257 	clear_bit(0, &priv->tbusy);
258 	netif_wake_queue(dev);
259 }
260 
261 static inline int netiucv_test_and_set_busy(struct net_device *dev)
262 {
263 	struct netiucv_priv *priv = netdev_priv(dev);
264 	netif_stop_queue(dev);
265 	return test_and_set_bit(0, &priv->tbusy);
266 }
267 
268 static u8 iucvMagic[16] = {
269 	0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
270 	0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
271 };
272 
273 /**
274  * Convert an iucv userId to its printable
275  * form (strip whitespace at end).
276  *
277  * @param An iucv userId
278  *
279  * @returns The printable string (static data!!)
280  */
281 static char *netiucv_printname(char *name)
282 {
283 	static char tmp[9];
284 	char *p = tmp;
285 	memcpy(tmp, name, 8);
286 	tmp[8] = '\0';
287 	while (*p && (!isspace(*p)))
288 		p++;
289 	*p = '\0';
290 	return tmp;
291 }
292 
293 /**
294  * States of the interface statemachine.
295  */
296 enum dev_states {
297 	DEV_STATE_STOPPED,
298 	DEV_STATE_STARTWAIT,
299 	DEV_STATE_STOPWAIT,
300 	DEV_STATE_RUNNING,
301 	/**
302 	 * MUST be always the last element!!
303 	 */
304 	NR_DEV_STATES
305 };
306 
307 static const char *dev_state_names[] = {
308 	"Stopped",
309 	"StartWait",
310 	"StopWait",
311 	"Running",
312 };
313 
314 /**
315  * Events of the interface statemachine.
316  */
317 enum dev_events {
318 	DEV_EVENT_START,
319 	DEV_EVENT_STOP,
320 	DEV_EVENT_CONUP,
321 	DEV_EVENT_CONDOWN,
322 	/**
323 	 * MUST be always the last element!!
324 	 */
325 	NR_DEV_EVENTS
326 };
327 
328 static const char *dev_event_names[] = {
329 	"Start",
330 	"Stop",
331 	"Connection up",
332 	"Connection down",
333 };
334 
335 /**
336  * Events of the connection statemachine
337  */
338 enum conn_events {
339 	/**
340 	 * Events, representing callbacks from
341 	 * lowlevel iucv layer)
342 	 */
343 	CONN_EVENT_CONN_REQ,
344 	CONN_EVENT_CONN_ACK,
345 	CONN_EVENT_CONN_REJ,
346 	CONN_EVENT_CONN_SUS,
347 	CONN_EVENT_CONN_RES,
348 	CONN_EVENT_RX,
349 	CONN_EVENT_TXDONE,
350 
351 	/**
352 	 * Events, representing errors return codes from
353 	 * calls to lowlevel iucv layer
354 	 */
355 
356 	/**
357 	 * Event, representing timer expiry.
358 	 */
359 	CONN_EVENT_TIMER,
360 
361 	/**
362 	 * Events, representing commands from upper levels.
363 	 */
364 	CONN_EVENT_START,
365 	CONN_EVENT_STOP,
366 
367 	/**
368 	 * MUST be always the last element!!
369 	 */
370 	NR_CONN_EVENTS,
371 };
372 
373 static const char *conn_event_names[] = {
374 	"Remote connection request",
375 	"Remote connection acknowledge",
376 	"Remote connection reject",
377 	"Connection suspended",
378 	"Connection resumed",
379 	"Data received",
380 	"Data sent",
381 
382 	"Timer",
383 
384 	"Start",
385 	"Stop",
386 };
387 
388 /**
389  * States of the connection statemachine.
390  */
391 enum conn_states {
392 	/**
393 	 * Connection not assigned to any device,
394 	 * initial state, invalid
395 	 */
396 	CONN_STATE_INVALID,
397 
398 	/**
399 	 * Userid assigned but not operating
400 	 */
401 	CONN_STATE_STOPPED,
402 
403 	/**
404 	 * Connection registered,
405 	 * no connection request sent yet,
406 	 * no connection request received
407 	 */
408 	CONN_STATE_STARTWAIT,
409 
410 	/**
411 	 * Connection registered and connection request sent,
412 	 * no acknowledge and no connection request received yet.
413 	 */
414 	CONN_STATE_SETUPWAIT,
415 
416 	/**
417 	 * Connection up and running idle
418 	 */
419 	CONN_STATE_IDLE,
420 
421 	/**
422 	 * Data sent, awaiting CONN_EVENT_TXDONE
423 	 */
424 	CONN_STATE_TX,
425 
426 	/**
427 	 * Error during registration.
428 	 */
429 	CONN_STATE_REGERR,
430 
431 	/**
432 	 * Error during registration.
433 	 */
434 	CONN_STATE_CONNERR,
435 
436 	/**
437 	 * MUST be always the last element!!
438 	 */
439 	NR_CONN_STATES,
440 };
441 
442 static const char *conn_state_names[] = {
443 	"Invalid",
444 	"Stopped",
445 	"StartWait",
446 	"SetupWait",
447 	"Idle",
448 	"TX",
449 	"Terminating",
450 	"Registration error",
451 	"Connect error",
452 };
453 
454 
455 /**
456  * Debug Facility Stuff
457  */
458 static debug_info_t *iucv_dbf_setup = NULL;
459 static debug_info_t *iucv_dbf_data = NULL;
460 static debug_info_t *iucv_dbf_trace = NULL;
461 
462 DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf);
463 
464 static void iucv_unregister_dbf_views(void)
465 {
466 	if (iucv_dbf_setup)
467 		debug_unregister(iucv_dbf_setup);
468 	if (iucv_dbf_data)
469 		debug_unregister(iucv_dbf_data);
470 	if (iucv_dbf_trace)
471 		debug_unregister(iucv_dbf_trace);
472 }
473 static int iucv_register_dbf_views(void)
474 {
475 	iucv_dbf_setup = debug_register(IUCV_DBF_SETUP_NAME,
476 					IUCV_DBF_SETUP_PAGES,
477 					IUCV_DBF_SETUP_NR_AREAS,
478 					IUCV_DBF_SETUP_LEN);
479 	iucv_dbf_data = debug_register(IUCV_DBF_DATA_NAME,
480 				       IUCV_DBF_DATA_PAGES,
481 				       IUCV_DBF_DATA_NR_AREAS,
482 				       IUCV_DBF_DATA_LEN);
483 	iucv_dbf_trace = debug_register(IUCV_DBF_TRACE_NAME,
484 					IUCV_DBF_TRACE_PAGES,
485 					IUCV_DBF_TRACE_NR_AREAS,
486 					IUCV_DBF_TRACE_LEN);
487 
488 	if ((iucv_dbf_setup == NULL) || (iucv_dbf_data == NULL) ||
489 	    (iucv_dbf_trace == NULL)) {
490 		iucv_unregister_dbf_views();
491 		return -ENOMEM;
492 	}
493 	debug_register_view(iucv_dbf_setup, &debug_hex_ascii_view);
494 	debug_set_level(iucv_dbf_setup, IUCV_DBF_SETUP_LEVEL);
495 
496 	debug_register_view(iucv_dbf_data, &debug_hex_ascii_view);
497 	debug_set_level(iucv_dbf_data, IUCV_DBF_DATA_LEVEL);
498 
499 	debug_register_view(iucv_dbf_trace, &debug_hex_ascii_view);
500 	debug_set_level(iucv_dbf_trace, IUCV_DBF_TRACE_LEVEL);
501 
502 	return 0;
503 }
504 
505 /*
506  * Callback-wrappers, called from lowlevel iucv layer.
507  */
508 
509 static void netiucv_callback_rx(struct iucv_path *path,
510 				struct iucv_message *msg)
511 {
512 	struct iucv_connection *conn = path->private;
513 	struct iucv_event ev;
514 
515 	ev.conn = conn;
516 	ev.data = msg;
517 	fsm_event(conn->fsm, CONN_EVENT_RX, &ev);
518 }
519 
520 static void netiucv_callback_txdone(struct iucv_path *path,
521 				    struct iucv_message *msg)
522 {
523 	struct iucv_connection *conn = path->private;
524 	struct iucv_event ev;
525 
526 	ev.conn = conn;
527 	ev.data = msg;
528 	fsm_event(conn->fsm, CONN_EVENT_TXDONE, &ev);
529 }
530 
531 static void netiucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
532 {
533 	struct iucv_connection *conn = path->private;
534 
535 	fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, conn);
536 }
537 
538 static int netiucv_callback_connreq(struct iucv_path *path,
539 				    u8 ipvmid[8], u8 ipuser[16])
540 {
541 	struct iucv_connection *conn = path->private;
542 	struct iucv_event ev;
543 	int rc;
544 
545 	if (memcmp(iucvMagic, ipuser, sizeof(ipuser)))
546 		/* ipuser must match iucvMagic. */
547 		return -EINVAL;
548 	rc = -EINVAL;
549 	read_lock_bh(&iucv_connection_rwlock);
550 	list_for_each_entry(conn, &iucv_connection_list, list) {
551 		if (strncmp(ipvmid, conn->userid, 8))
552 			continue;
553 		/* Found a matching connection for this path. */
554 		conn->path = path;
555 		ev.conn = conn;
556 		ev.data = path;
557 		fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev);
558 		rc = 0;
559 	}
560 	read_unlock_bh(&iucv_connection_rwlock);
561 	return rc;
562 }
563 
564 static void netiucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
565 {
566 	struct iucv_connection *conn = path->private;
567 
568 	fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, conn);
569 }
570 
571 static void netiucv_callback_connsusp(struct iucv_path *path, u8 ipuser[16])
572 {
573 	struct iucv_connection *conn = path->private;
574 
575 	fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, conn);
576 }
577 
578 static void netiucv_callback_connres(struct iucv_path *path, u8 ipuser[16])
579 {
580 	struct iucv_connection *conn = path->private;
581 
582 	fsm_event(conn->fsm, CONN_EVENT_CONN_RES, conn);
583 }
584 
585 /**
586  * NOP action for statemachines
587  */
588 static void netiucv_action_nop(fsm_instance *fi, int event, void *arg)
589 {
590 }
591 
592 /*
593  * Actions of the connection statemachine
594  */
595 
596 /**
597  * netiucv_unpack_skb
598  * @conn: The connection where this skb has been received.
599  * @pskb: The received skb.
600  *
601  * Unpack a just received skb and hand it over to upper layers.
602  * Helper function for conn_action_rx.
603  */
604 static void netiucv_unpack_skb(struct iucv_connection *conn,
605 			       struct sk_buff *pskb)
606 {
607 	struct net_device     *dev = conn->netdev;
608 	struct netiucv_priv   *privptr = netdev_priv(dev);
609 	u16 offset = 0;
610 
611 	skb_put(pskb, NETIUCV_HDRLEN);
612 	pskb->dev = dev;
613 	pskb->ip_summed = CHECKSUM_NONE;
614 	pskb->protocol = ntohs(ETH_P_IP);
615 
616 	while (1) {
617 		struct sk_buff *skb;
618 		struct ll_header *header = (struct ll_header *) pskb->data;
619 
620 		if (!header->next)
621 			break;
622 
623 		skb_pull(pskb, NETIUCV_HDRLEN);
624 		header->next -= offset;
625 		offset += header->next;
626 		header->next -= NETIUCV_HDRLEN;
627 		if (skb_tailroom(pskb) < header->next) {
628 			IUCV_DBF_TEXT_(data, 2, "Illegal next field: %d > %d\n",
629 				header->next, skb_tailroom(pskb));
630 			return;
631 		}
632 		skb_put(pskb, header->next);
633 		skb_reset_mac_header(pskb);
634 		skb = dev_alloc_skb(pskb->len);
635 		if (!skb) {
636 			IUCV_DBF_TEXT(data, 2,
637 				"Out of memory in netiucv_unpack_skb\n");
638 			privptr->stats.rx_dropped++;
639 			return;
640 		}
641 		skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len),
642 					  pskb->len);
643 		skb_reset_mac_header(skb);
644 		skb->dev = pskb->dev;
645 		skb->protocol = pskb->protocol;
646 		pskb->ip_summed = CHECKSUM_UNNECESSARY;
647 		privptr->stats.rx_packets++;
648 		privptr->stats.rx_bytes += skb->len;
649 		/*
650 		 * Since receiving is always initiated from a tasklet (in iucv.c),
651 		 * we must use netif_rx_ni() instead of netif_rx()
652 		 */
653 		netif_rx_ni(skb);
654 		dev->last_rx = jiffies;
655 		skb_pull(pskb, header->next);
656 		skb_put(pskb, NETIUCV_HDRLEN);
657 	}
658 }
659 
660 static void conn_action_rx(fsm_instance *fi, int event, void *arg)
661 {
662 	struct iucv_event *ev = arg;
663 	struct iucv_connection *conn = ev->conn;
664 	struct iucv_message *msg = ev->data;
665 	struct netiucv_priv *privptr = netdev_priv(conn->netdev);
666 	int rc;
667 
668 	IUCV_DBF_TEXT(trace, 4, __func__);
669 
670 	if (!conn->netdev) {
671 		iucv_message_reject(conn->path, msg);
672 		IUCV_DBF_TEXT(data, 2,
673 			      "Received data for unlinked connection\n");
674 		return;
675 	}
676 	if (msg->length > conn->max_buffsize) {
677 		iucv_message_reject(conn->path, msg);
678 		privptr->stats.rx_dropped++;
679 		IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n",
680 			       msg->length, conn->max_buffsize);
681 		return;
682 	}
683 	conn->rx_buff->data = conn->rx_buff->head;
684 	skb_reset_tail_pointer(conn->rx_buff);
685 	conn->rx_buff->len = 0;
686 	rc = iucv_message_receive(conn->path, msg, 0, conn->rx_buff->data,
687 				  msg->length, NULL);
688 	if (rc || msg->length < 5) {
689 		privptr->stats.rx_errors++;
690 		IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc);
691 		return;
692 	}
693 	netiucv_unpack_skb(conn, conn->rx_buff);
694 }
695 
696 static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
697 {
698 	struct iucv_event *ev = arg;
699 	struct iucv_connection *conn = ev->conn;
700 	struct iucv_message *msg = ev->data;
701 	struct iucv_message txmsg;
702 	struct netiucv_priv *privptr = NULL;
703 	u32 single_flag = msg->tag;
704 	u32 txbytes = 0;
705 	u32 txpackets = 0;
706 	u32 stat_maxcq = 0;
707 	struct sk_buff *skb;
708 	unsigned long saveflags;
709 	struct ll_header header;
710 	int rc;
711 
712 	IUCV_DBF_TEXT(trace, 4, __func__);
713 
714 	if (conn && conn->netdev)
715 		privptr = netdev_priv(conn->netdev);
716 	conn->prof.tx_pending--;
717 	if (single_flag) {
718 		if ((skb = skb_dequeue(&conn->commit_queue))) {
719 			atomic_dec(&skb->users);
720 			dev_kfree_skb_any(skb);
721 			if (privptr) {
722 				privptr->stats.tx_packets++;
723 				privptr->stats.tx_bytes +=
724 					(skb->len - NETIUCV_HDRLEN
725 					 	  - NETIUCV_HDRLEN);
726 			}
727 		}
728 	}
729 	conn->tx_buff->data = conn->tx_buff->head;
730 	skb_reset_tail_pointer(conn->tx_buff);
731 	conn->tx_buff->len = 0;
732 	spin_lock_irqsave(&conn->collect_lock, saveflags);
733 	while ((skb = skb_dequeue(&conn->collect_queue))) {
734 		header.next = conn->tx_buff->len + skb->len + NETIUCV_HDRLEN;
735 		memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header,
736 		       NETIUCV_HDRLEN);
737 		skb_copy_from_linear_data(skb,
738 					  skb_put(conn->tx_buff, skb->len),
739 					  skb->len);
740 		txbytes += skb->len;
741 		txpackets++;
742 		stat_maxcq++;
743 		atomic_dec(&skb->users);
744 		dev_kfree_skb_any(skb);
745 	}
746 	if (conn->collect_len > conn->prof.maxmulti)
747 		conn->prof.maxmulti = conn->collect_len;
748 	conn->collect_len = 0;
749 	spin_unlock_irqrestore(&conn->collect_lock, saveflags);
750 	if (conn->tx_buff->len == 0) {
751 		fsm_newstate(fi, CONN_STATE_IDLE);
752 		return;
753 	}
754 
755 	header.next = 0;
756 	memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
757 	conn->prof.send_stamp = current_kernel_time();
758 	txmsg.class = 0;
759 	txmsg.tag = 0;
760 	rc = iucv_message_send(conn->path, &txmsg, 0, 0,
761 			       conn->tx_buff->data, conn->tx_buff->len);
762 	conn->prof.doios_multi++;
763 	conn->prof.txlen += conn->tx_buff->len;
764 	conn->prof.tx_pending++;
765 	if (conn->prof.tx_pending > conn->prof.tx_max_pending)
766 		conn->prof.tx_max_pending = conn->prof.tx_pending;
767 	if (rc) {
768 		conn->prof.tx_pending--;
769 		fsm_newstate(fi, CONN_STATE_IDLE);
770 		if (privptr)
771 			privptr->stats.tx_errors += txpackets;
772 		IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
773 	} else {
774 		if (privptr) {
775 			privptr->stats.tx_packets += txpackets;
776 			privptr->stats.tx_bytes += txbytes;
777 		}
778 		if (stat_maxcq > conn->prof.maxcqueue)
779 			conn->prof.maxcqueue = stat_maxcq;
780 	}
781 }
782 
783 static void conn_action_connaccept(fsm_instance *fi, int event, void *arg)
784 {
785 	struct iucv_event *ev = arg;
786 	struct iucv_connection *conn = ev->conn;
787 	struct iucv_path *path = ev->data;
788 	struct net_device *netdev = conn->netdev;
789 	struct netiucv_priv *privptr = netdev_priv(netdev);
790 	int rc;
791 
792 	IUCV_DBF_TEXT(trace, 3, __func__);
793 
794 	conn->path = path;
795 	path->msglim = NETIUCV_QUEUELEN_DEFAULT;
796 	path->flags = 0;
797 	rc = iucv_path_accept(path, &netiucv_handler, NULL, conn);
798 	if (rc) {
799 		IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc);
800 		return;
801 	}
802 	fsm_newstate(fi, CONN_STATE_IDLE);
803 	netdev->tx_queue_len = conn->path->msglim;
804 	fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
805 }
806 
807 static void conn_action_connreject(fsm_instance *fi, int event, void *arg)
808 {
809 	struct iucv_event *ev = arg;
810 	struct iucv_path *path = ev->data;
811 
812 	IUCV_DBF_TEXT(trace, 3, __func__);
813 	iucv_path_sever(path, NULL);
814 }
815 
816 static void conn_action_connack(fsm_instance *fi, int event, void *arg)
817 {
818 	struct iucv_connection *conn = arg;
819 	struct net_device *netdev = conn->netdev;
820 	struct netiucv_priv *privptr = netdev_priv(netdev);
821 
822 	IUCV_DBF_TEXT(trace, 3, __func__);
823 	fsm_deltimer(&conn->timer);
824 	fsm_newstate(fi, CONN_STATE_IDLE);
825 	netdev->tx_queue_len = conn->path->msglim;
826 	fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
827 }
828 
829 static void conn_action_conntimsev(fsm_instance *fi, int event, void *arg)
830 {
831 	struct iucv_connection *conn = arg;
832 
833 	IUCV_DBF_TEXT(trace, 3, __func__);
834 	fsm_deltimer(&conn->timer);
835 	iucv_path_sever(conn->path, NULL);
836 	fsm_newstate(fi, CONN_STATE_STARTWAIT);
837 }
838 
839 static void conn_action_connsever(fsm_instance *fi, int event, void *arg)
840 {
841 	struct iucv_connection *conn = arg;
842 	struct net_device *netdev = conn->netdev;
843 	struct netiucv_priv *privptr = netdev_priv(netdev);
844 
845 	IUCV_DBF_TEXT(trace, 3, __func__);
846 
847 	fsm_deltimer(&conn->timer);
848 	iucv_path_sever(conn->path, NULL);
849 	PRINT_INFO("%s: Remote dropped connection\n", netdev->name);
850 	IUCV_DBF_TEXT(data, 2,
851 		      "conn_action_connsever: Remote dropped connection\n");
852 	fsm_newstate(fi, CONN_STATE_STARTWAIT);
853 	fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
854 }
855 
856 static void conn_action_start(fsm_instance *fi, int event, void *arg)
857 {
858 	struct iucv_connection *conn = arg;
859 	int rc;
860 
861 	IUCV_DBF_TEXT(trace, 3, __func__);
862 
863 	fsm_newstate(fi, CONN_STATE_STARTWAIT);
864 	IUCV_DBF_TEXT_(setup, 2, "%s('%s'): connecting ...\n",
865 		    conn->netdev->name, conn->userid);
866 
867 	/*
868 	 * We must set the state before calling iucv_connect because the
869 	 * callback handler could be called at any point after the connection
870 	 * request is sent
871 	 */
872 
873 	fsm_newstate(fi, CONN_STATE_SETUPWAIT);
874 	conn->path = iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT, 0, GFP_KERNEL);
875 	rc = iucv_path_connect(conn->path, &netiucv_handler, conn->userid,
876 			       NULL, iucvMagic, conn);
877 	switch (rc) {
878 	case 0:
879 		conn->netdev->tx_queue_len = conn->path->msglim;
880 		fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC,
881 			     CONN_EVENT_TIMER, conn);
882 		return;
883 	case 11:
884 		PRINT_INFO("%s: User %s is currently not available.\n",
885 			   conn->netdev->name,
886 			   netiucv_printname(conn->userid));
887 		fsm_newstate(fi, CONN_STATE_STARTWAIT);
888 		break;
889 	case 12:
890 		PRINT_INFO("%s: User %s is currently not ready.\n",
891 			   conn->netdev->name,
892 			   netiucv_printname(conn->userid));
893 		fsm_newstate(fi, CONN_STATE_STARTWAIT);
894 		break;
895 	case 13:
896 		PRINT_WARN("%s: Too many IUCV connections.\n",
897 			   conn->netdev->name);
898 		fsm_newstate(fi, CONN_STATE_CONNERR);
899 		break;
900 	case 14:
901 		PRINT_WARN("%s: User %s has too many IUCV connections.\n",
902 			   conn->netdev->name,
903 			   netiucv_printname(conn->userid));
904 		fsm_newstate(fi, CONN_STATE_CONNERR);
905 		break;
906 	case 15:
907 		PRINT_WARN("%s: No IUCV authorization in CP directory.\n",
908 			   conn->netdev->name);
909 		fsm_newstate(fi, CONN_STATE_CONNERR);
910 		break;
911 	default:
912 		PRINT_WARN("%s: iucv_connect returned error %d\n",
913 			   conn->netdev->name, rc);
914 		fsm_newstate(fi, CONN_STATE_CONNERR);
915 		break;
916 	}
917 	IUCV_DBF_TEXT_(setup, 5, "iucv_connect rc is %d\n", rc);
918 	kfree(conn->path);
919 	conn->path = NULL;
920 }
921 
922 static void netiucv_purge_skb_queue(struct sk_buff_head *q)
923 {
924 	struct sk_buff *skb;
925 
926 	while ((skb = skb_dequeue(q))) {
927 		atomic_dec(&skb->users);
928 		dev_kfree_skb_any(skb);
929 	}
930 }
931 
932 static void conn_action_stop(fsm_instance *fi, int event, void *arg)
933 {
934 	struct iucv_event *ev = arg;
935 	struct iucv_connection *conn = ev->conn;
936 	struct net_device *netdev = conn->netdev;
937 	struct netiucv_priv *privptr = netdev_priv(netdev);
938 
939 	IUCV_DBF_TEXT(trace, 3, __func__);
940 
941 	fsm_deltimer(&conn->timer);
942 	fsm_newstate(fi, CONN_STATE_STOPPED);
943 	netiucv_purge_skb_queue(&conn->collect_queue);
944 	if (conn->path) {
945 		IUCV_DBF_TEXT(trace, 5, "calling iucv_path_sever\n");
946 		iucv_path_sever(conn->path, iucvMagic);
947 		kfree(conn->path);
948 		conn->path = NULL;
949 	}
950 	netiucv_purge_skb_queue(&conn->commit_queue);
951 	fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
952 }
953 
954 static void conn_action_inval(fsm_instance *fi, int event, void *arg)
955 {
956 	struct iucv_connection *conn = arg;
957 	struct net_device *netdev = conn->netdev;
958 
959 	IUCV_DBF_TEXT_(data, 2, "%s('%s'): conn_action_inval called\n",
960 		netdev->name, conn->userid);
961 }
962 
963 static const fsm_node conn_fsm[] = {
964 	{ CONN_STATE_INVALID,   CONN_EVENT_START,    conn_action_inval      },
965 	{ CONN_STATE_STOPPED,   CONN_EVENT_START,    conn_action_start      },
966 
967 	{ CONN_STATE_STOPPED,   CONN_EVENT_STOP,     conn_action_stop       },
968 	{ CONN_STATE_STARTWAIT, CONN_EVENT_STOP,     conn_action_stop       },
969 	{ CONN_STATE_SETUPWAIT, CONN_EVENT_STOP,     conn_action_stop       },
970 	{ CONN_STATE_IDLE,      CONN_EVENT_STOP,     conn_action_stop       },
971 	{ CONN_STATE_TX,        CONN_EVENT_STOP,     conn_action_stop       },
972 	{ CONN_STATE_REGERR,    CONN_EVENT_STOP,     conn_action_stop       },
973 	{ CONN_STATE_CONNERR,   CONN_EVENT_STOP,     conn_action_stop       },
974 
975 	{ CONN_STATE_STOPPED,   CONN_EVENT_CONN_REQ, conn_action_connreject },
976         { CONN_STATE_STARTWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
977 	{ CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
978 	{ CONN_STATE_IDLE,      CONN_EVENT_CONN_REQ, conn_action_connreject },
979 	{ CONN_STATE_TX,        CONN_EVENT_CONN_REQ, conn_action_connreject },
980 
981 	{ CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_ACK, conn_action_connack    },
982 	{ CONN_STATE_SETUPWAIT, CONN_EVENT_TIMER,    conn_action_conntimsev },
983 
984 	{ CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REJ, conn_action_connsever  },
985 	{ CONN_STATE_IDLE,      CONN_EVENT_CONN_REJ, conn_action_connsever  },
986 	{ CONN_STATE_TX,        CONN_EVENT_CONN_REJ, conn_action_connsever  },
987 
988 	{ CONN_STATE_IDLE,      CONN_EVENT_RX,       conn_action_rx         },
989 	{ CONN_STATE_TX,        CONN_EVENT_RX,       conn_action_rx         },
990 
991 	{ CONN_STATE_TX,        CONN_EVENT_TXDONE,   conn_action_txdone     },
992 	{ CONN_STATE_IDLE,      CONN_EVENT_TXDONE,   conn_action_txdone     },
993 };
994 
995 static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node);
996 
997 
998 /*
999  * Actions for interface - statemachine.
1000  */
1001 
1002 /**
1003  * dev_action_start
1004  * @fi: An instance of an interface statemachine.
1005  * @event: The event, just happened.
1006  * @arg: Generic pointer, casted from struct net_device * upon call.
1007  *
1008  * Startup connection by sending CONN_EVENT_START to it.
1009  */
1010 static void dev_action_start(fsm_instance *fi, int event, void *arg)
1011 {
1012 	struct net_device   *dev = arg;
1013 	struct netiucv_priv *privptr = netdev_priv(dev);
1014 
1015 	IUCV_DBF_TEXT(trace, 3, __func__);
1016 
1017 	fsm_newstate(fi, DEV_STATE_STARTWAIT);
1018 	fsm_event(privptr->conn->fsm, CONN_EVENT_START, privptr->conn);
1019 }
1020 
1021 /**
1022  * Shutdown connection by sending CONN_EVENT_STOP to it.
1023  *
1024  * @param fi    An instance of an interface statemachine.
1025  * @param event The event, just happened.
1026  * @param arg   Generic pointer, casted from struct net_device * upon call.
1027  */
1028 static void
1029 dev_action_stop(fsm_instance *fi, int event, void *arg)
1030 {
1031 	struct net_device   *dev = arg;
1032 	struct netiucv_priv *privptr = netdev_priv(dev);
1033 	struct iucv_event   ev;
1034 
1035 	IUCV_DBF_TEXT(trace, 3, __func__);
1036 
1037 	ev.conn = privptr->conn;
1038 
1039 	fsm_newstate(fi, DEV_STATE_STOPWAIT);
1040 	fsm_event(privptr->conn->fsm, CONN_EVENT_STOP, &ev);
1041 }
1042 
1043 /**
1044  * Called from connection statemachine
1045  * when a connection is up and running.
1046  *
1047  * @param fi    An instance of an interface statemachine.
1048  * @param event The event, just happened.
1049  * @param arg   Generic pointer, casted from struct net_device * upon call.
1050  */
1051 static void
1052 dev_action_connup(fsm_instance *fi, int event, void *arg)
1053 {
1054 	struct net_device   *dev = arg;
1055 	struct netiucv_priv *privptr = netdev_priv(dev);
1056 
1057 	IUCV_DBF_TEXT(trace, 3, __func__);
1058 
1059 	switch (fsm_getstate(fi)) {
1060 		case DEV_STATE_STARTWAIT:
1061 			fsm_newstate(fi, DEV_STATE_RUNNING);
1062 			PRINT_INFO("%s: connected with remote side %s\n",
1063 			       dev->name, privptr->conn->userid);
1064 			IUCV_DBF_TEXT(setup, 3,
1065 				"connection is up and running\n");
1066 			break;
1067 		case DEV_STATE_STOPWAIT:
1068 			IUCV_DBF_TEXT(data, 2,
1069 				"dev_action_connup: in DEV_STATE_STOPWAIT\n");
1070 			break;
1071 	}
1072 }
1073 
1074 /**
1075  * Called from connection statemachine
1076  * when a connection has been shutdown.
1077  *
1078  * @param fi    An instance of an interface statemachine.
1079  * @param event The event, just happened.
1080  * @param arg   Generic pointer, casted from struct net_device * upon call.
1081  */
1082 static void
1083 dev_action_conndown(fsm_instance *fi, int event, void *arg)
1084 {
1085 	IUCV_DBF_TEXT(trace, 3, __func__);
1086 
1087 	switch (fsm_getstate(fi)) {
1088 		case DEV_STATE_RUNNING:
1089 			fsm_newstate(fi, DEV_STATE_STARTWAIT);
1090 			break;
1091 		case DEV_STATE_STOPWAIT:
1092 			fsm_newstate(fi, DEV_STATE_STOPPED);
1093 			IUCV_DBF_TEXT(setup, 3, "connection is down\n");
1094 			break;
1095 	}
1096 }
1097 
1098 static const fsm_node dev_fsm[] = {
1099 	{ DEV_STATE_STOPPED,    DEV_EVENT_START,   dev_action_start    },
1100 
1101 	{ DEV_STATE_STOPWAIT,   DEV_EVENT_START,   dev_action_start    },
1102 	{ DEV_STATE_STOPWAIT,   DEV_EVENT_CONDOWN, dev_action_conndown },
1103 
1104 	{ DEV_STATE_STARTWAIT,  DEV_EVENT_STOP,    dev_action_stop     },
1105 	{ DEV_STATE_STARTWAIT,  DEV_EVENT_CONUP,   dev_action_connup   },
1106 
1107 	{ DEV_STATE_RUNNING,    DEV_EVENT_STOP,    dev_action_stop     },
1108 	{ DEV_STATE_RUNNING,    DEV_EVENT_CONDOWN, dev_action_conndown },
1109 	{ DEV_STATE_RUNNING,    DEV_EVENT_CONUP,   netiucv_action_nop  },
1110 };
1111 
1112 static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node);
1113 
1114 /**
1115  * Transmit a packet.
1116  * This is a helper function for netiucv_tx().
1117  *
1118  * @param conn Connection to be used for sending.
1119  * @param skb Pointer to struct sk_buff of packet to send.
1120  *            The linklevel header has already been set up
1121  *            by netiucv_tx().
1122  *
1123  * @return 0 on success, -ERRNO on failure. (Never fails.)
1124  */
1125 static int netiucv_transmit_skb(struct iucv_connection *conn,
1126 				struct sk_buff *skb)
1127 {
1128 	struct iucv_message msg;
1129 	unsigned long saveflags;
1130 	struct ll_header header;
1131 	int rc;
1132 
1133 	if (fsm_getstate(conn->fsm) != CONN_STATE_IDLE) {
1134 		int l = skb->len + NETIUCV_HDRLEN;
1135 
1136 		spin_lock_irqsave(&conn->collect_lock, saveflags);
1137 		if (conn->collect_len + l >
1138 		    (conn->max_buffsize - NETIUCV_HDRLEN)) {
1139 			rc = -EBUSY;
1140 			IUCV_DBF_TEXT(data, 2,
1141 				      "EBUSY from netiucv_transmit_skb\n");
1142 		} else {
1143 			atomic_inc(&skb->users);
1144 			skb_queue_tail(&conn->collect_queue, skb);
1145 			conn->collect_len += l;
1146 			rc = 0;
1147 		}
1148 		spin_unlock_irqrestore(&conn->collect_lock, saveflags);
1149 	} else {
1150 		struct sk_buff *nskb = skb;
1151 		/**
1152 		 * Copy the skb to a new allocated skb in lowmem only if the
1153 		 * data is located above 2G in memory or tailroom is < 2.
1154 		 */
1155 		unsigned long hi = ((unsigned long)(skb_tail_pointer(skb) +
1156 				    NETIUCV_HDRLEN)) >> 31;
1157 		int copied = 0;
1158 		if (hi || (skb_tailroom(skb) < 2)) {
1159 			nskb = alloc_skb(skb->len + NETIUCV_HDRLEN +
1160 					 NETIUCV_HDRLEN, GFP_ATOMIC | GFP_DMA);
1161 			if (!nskb) {
1162 				IUCV_DBF_TEXT(data, 2, "alloc_skb failed\n");
1163 				rc = -ENOMEM;
1164 				return rc;
1165 			} else {
1166 				skb_reserve(nskb, NETIUCV_HDRLEN);
1167 				memcpy(skb_put(nskb, skb->len),
1168 				       skb->data, skb->len);
1169 			}
1170 			copied = 1;
1171 		}
1172 		/**
1173 		 * skb now is below 2G and has enough room. Add headers.
1174 		 */
1175 		header.next = nskb->len + NETIUCV_HDRLEN;
1176 		memcpy(skb_push(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
1177 		header.next = 0;
1178 		memcpy(skb_put(nskb, NETIUCV_HDRLEN), &header,  NETIUCV_HDRLEN);
1179 
1180 		fsm_newstate(conn->fsm, CONN_STATE_TX);
1181 		conn->prof.send_stamp = current_kernel_time();
1182 
1183 		msg.tag = 1;
1184 		msg.class = 0;
1185 		rc = iucv_message_send(conn->path, &msg, 0, 0,
1186 				       nskb->data, nskb->len);
1187 		conn->prof.doios_single++;
1188 		conn->prof.txlen += skb->len;
1189 		conn->prof.tx_pending++;
1190 		if (conn->prof.tx_pending > conn->prof.tx_max_pending)
1191 			conn->prof.tx_max_pending = conn->prof.tx_pending;
1192 		if (rc) {
1193 			struct netiucv_priv *privptr;
1194 			fsm_newstate(conn->fsm, CONN_STATE_IDLE);
1195 			conn->prof.tx_pending--;
1196 			privptr = netdev_priv(conn->netdev);
1197 			if (privptr)
1198 				privptr->stats.tx_errors++;
1199 			if (copied)
1200 				dev_kfree_skb(nskb);
1201 			else {
1202 				/**
1203 				 * Remove our headers. They get added
1204 				 * again on retransmit.
1205 				 */
1206 				skb_pull(skb, NETIUCV_HDRLEN);
1207 				skb_trim(skb, skb->len - NETIUCV_HDRLEN);
1208 			}
1209 			IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
1210 		} else {
1211 			if (copied)
1212 				dev_kfree_skb(skb);
1213 			atomic_inc(&nskb->users);
1214 			skb_queue_tail(&conn->commit_queue, nskb);
1215 		}
1216 	}
1217 
1218 	return rc;
1219 }
1220 
1221 /*
1222  * Interface API for upper network layers
1223  */
1224 
1225 /**
1226  * Open an interface.
1227  * Called from generic network layer when ifconfig up is run.
1228  *
1229  * @param dev Pointer to interface struct.
1230  *
1231  * @return 0 on success, -ERRNO on failure. (Never fails.)
1232  */
1233 static int netiucv_open(struct net_device *dev)
1234 {
1235 	struct netiucv_priv *priv = netdev_priv(dev);
1236 
1237 	fsm_event(priv->fsm, DEV_EVENT_START, dev);
1238 	return 0;
1239 }
1240 
1241 /**
1242  * Close an interface.
1243  * Called from generic network layer when ifconfig down is run.
1244  *
1245  * @param dev Pointer to interface struct.
1246  *
1247  * @return 0 on success, -ERRNO on failure. (Never fails.)
1248  */
1249 static int netiucv_close(struct net_device *dev)
1250 {
1251 	struct netiucv_priv *priv = netdev_priv(dev);
1252 
1253 	fsm_event(priv->fsm, DEV_EVENT_STOP, dev);
1254 	return 0;
1255 }
1256 
1257 /**
1258  * Start transmission of a packet.
1259  * Called from generic network device layer.
1260  *
1261  * @param skb Pointer to buffer containing the packet.
1262  * @param dev Pointer to interface struct.
1263  *
1264  * @return 0 if packet consumed, !0 if packet rejected.
1265  *         Note: If we return !0, then the packet is free'd by
1266  *               the generic network layer.
1267  */
1268 static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
1269 {
1270 	struct netiucv_priv *privptr = netdev_priv(dev);
1271 	int rc;
1272 
1273 	IUCV_DBF_TEXT(trace, 4, __func__);
1274 	/**
1275 	 * Some sanity checks ...
1276 	 */
1277 	if (skb == NULL) {
1278 		IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n");
1279 		privptr->stats.tx_dropped++;
1280 		return 0;
1281 	}
1282 	if (skb_headroom(skb) < NETIUCV_HDRLEN) {
1283 		IUCV_DBF_TEXT(data, 2,
1284 			"netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n");
1285 		dev_kfree_skb(skb);
1286 		privptr->stats.tx_dropped++;
1287 		return 0;
1288 	}
1289 
1290 	/**
1291 	 * If connection is not running, try to restart it
1292 	 * and throw away packet.
1293 	 */
1294 	if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
1295 		dev_kfree_skb(skb);
1296 		privptr->stats.tx_dropped++;
1297 		privptr->stats.tx_errors++;
1298 		privptr->stats.tx_carrier_errors++;
1299 		return 0;
1300 	}
1301 
1302 	if (netiucv_test_and_set_busy(dev)) {
1303 		IUCV_DBF_TEXT(data, 2, "EBUSY from netiucv_tx\n");
1304 		return -EBUSY;
1305 	}
1306 	dev->trans_start = jiffies;
1307 	rc = netiucv_transmit_skb(privptr->conn, skb) != 0;
1308 	netiucv_clear_busy(dev);
1309 	return rc;
1310 }
1311 
1312 /**
1313  * netiucv_stats
1314  * @dev: Pointer to interface struct.
1315  *
1316  * Returns interface statistics of a device.
1317  *
1318  * Returns pointer to stats struct of this interface.
1319  */
1320 static struct net_device_stats *netiucv_stats (struct net_device * dev)
1321 {
1322 	struct netiucv_priv *priv = netdev_priv(dev);
1323 
1324 	IUCV_DBF_TEXT(trace, 5, __func__);
1325 	return &priv->stats;
1326 }
1327 
1328 /**
1329  * netiucv_change_mtu
1330  * @dev: Pointer to interface struct.
1331  * @new_mtu: The new MTU to use for this interface.
1332  *
1333  * Sets MTU of an interface.
1334  *
1335  * Returns 0 on success, -EINVAL if MTU is out of valid range.
1336  *         (valid range is 576 .. NETIUCV_MTU_MAX).
1337  */
1338 static int netiucv_change_mtu(struct net_device * dev, int new_mtu)
1339 {
1340 	IUCV_DBF_TEXT(trace, 3, __func__);
1341 	if (new_mtu < 576 || new_mtu > NETIUCV_MTU_MAX) {
1342 		IUCV_DBF_TEXT(setup, 2, "given MTU out of valid range\n");
1343 		return -EINVAL;
1344 	}
1345 	dev->mtu = new_mtu;
1346 	return 0;
1347 }
1348 
1349 /*
1350  * attributes in sysfs
1351  */
1352 
1353 static ssize_t user_show(struct device *dev, struct device_attribute *attr,
1354 			 char *buf)
1355 {
1356 	struct netiucv_priv *priv = dev->driver_data;
1357 
1358 	IUCV_DBF_TEXT(trace, 5, __func__);
1359 	return sprintf(buf, "%s\n", netiucv_printname(priv->conn->userid));
1360 }
1361 
1362 static ssize_t user_write(struct device *dev, struct device_attribute *attr,
1363 			  const char *buf, size_t count)
1364 {
1365 	struct netiucv_priv *priv = dev->driver_data;
1366 	struct net_device *ndev = priv->conn->netdev;
1367 	char    *p;
1368 	char    *tmp;
1369 	char 	username[9];
1370 	int 	i;
1371 	struct iucv_connection *cp;
1372 
1373 	IUCV_DBF_TEXT(trace, 3, __func__);
1374 	if (count > 9) {
1375 		IUCV_DBF_TEXT_(setup, 2,
1376 			       "%d is length of username\n", (int) count);
1377 		return -EINVAL;
1378 	}
1379 
1380 	tmp = strsep((char **) &buf, "\n");
1381 	for (i = 0, p = tmp; i < 8 && *p; i++, p++) {
1382 		if (isalnum(*p) || (*p == '$')) {
1383 			username[i]= toupper(*p);
1384 			continue;
1385 		}
1386 		if (*p == '\n') {
1387 			/* trailing lf, grr */
1388 			break;
1389 		}
1390 		IUCV_DBF_TEXT_(setup, 2,
1391 			       "username: invalid character %c\n", *p);
1392 		return -EINVAL;
1393 	}
1394 	while (i < 8)
1395 		username[i++] = ' ';
1396 	username[8] = '\0';
1397 
1398 	if (memcmp(username, priv->conn->userid, 9) &&
1399 	    (ndev->flags & (IFF_UP | IFF_RUNNING))) {
1400 		/* username changed while the interface is active. */
1401 		IUCV_DBF_TEXT(setup, 2, "user_write: device active\n");
1402 		return -EPERM;
1403 	}
1404 	read_lock_bh(&iucv_connection_rwlock);
1405 	list_for_each_entry(cp, &iucv_connection_list, list) {
1406 		if (!strncmp(username, cp->userid, 9) && cp->netdev != ndev) {
1407 			read_unlock_bh(&iucv_connection_rwlock);
1408 			IUCV_DBF_TEXT_(setup, 2, "user_write: Connection "
1409 				"to %s already exists\n", username);
1410 			return -EEXIST;
1411 		}
1412 	}
1413 	read_unlock_bh(&iucv_connection_rwlock);
1414 	memcpy(priv->conn->userid, username, 9);
1415 	return count;
1416 }
1417 
1418 static DEVICE_ATTR(user, 0644, user_show, user_write);
1419 
1420 static ssize_t buffer_show (struct device *dev, struct device_attribute *attr,
1421 			    char *buf)
1422 {	struct netiucv_priv *priv = dev->driver_data;
1423 
1424 	IUCV_DBF_TEXT(trace, 5, __func__);
1425 	return sprintf(buf, "%d\n", priv->conn->max_buffsize);
1426 }
1427 
1428 static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
1429 			     const char *buf, size_t count)
1430 {
1431 	struct netiucv_priv *priv = dev->driver_data;
1432 	struct net_device *ndev = priv->conn->netdev;
1433 	char         *e;
1434 	int          bs1;
1435 
1436 	IUCV_DBF_TEXT(trace, 3, __func__);
1437 	if (count >= 39)
1438 		return -EINVAL;
1439 
1440 	bs1 = simple_strtoul(buf, &e, 0);
1441 
1442 	if (e && (!isspace(*e))) {
1443 		IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %c\n", *e);
1444 		return -EINVAL;
1445 	}
1446 	if (bs1 > NETIUCV_BUFSIZE_MAX) {
1447 		IUCV_DBF_TEXT_(setup, 2,
1448 			"buffer_write: buffer size %d too large\n",
1449 			bs1);
1450 		return -EINVAL;
1451 	}
1452 	if ((ndev->flags & IFF_RUNNING) &&
1453 	    (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2))) {
1454 		IUCV_DBF_TEXT_(setup, 2,
1455 			"buffer_write: buffer size %d too small\n",
1456 			bs1);
1457 		return -EINVAL;
1458 	}
1459 	if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN)) {
1460 		IUCV_DBF_TEXT_(setup, 2,
1461 			"buffer_write: buffer size %d too small\n",
1462 			bs1);
1463 		return -EINVAL;
1464 	}
1465 
1466 	priv->conn->max_buffsize = bs1;
1467 	if (!(ndev->flags & IFF_RUNNING))
1468 		ndev->mtu = bs1 - NETIUCV_HDRLEN - NETIUCV_HDRLEN;
1469 
1470 	return count;
1471 
1472 }
1473 
1474 static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
1475 
1476 static ssize_t dev_fsm_show (struct device *dev, struct device_attribute *attr,
1477 			     char *buf)
1478 {
1479 	struct netiucv_priv *priv = dev->driver_data;
1480 
1481 	IUCV_DBF_TEXT(trace, 5, __func__);
1482 	return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm));
1483 }
1484 
1485 static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL);
1486 
1487 static ssize_t conn_fsm_show (struct device *dev,
1488 			      struct device_attribute *attr, char *buf)
1489 {
1490 	struct netiucv_priv *priv = dev->driver_data;
1491 
1492 	IUCV_DBF_TEXT(trace, 5, __func__);
1493 	return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm));
1494 }
1495 
1496 static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL);
1497 
1498 static ssize_t maxmulti_show (struct device *dev,
1499 			      struct device_attribute *attr, char *buf)
1500 {
1501 	struct netiucv_priv *priv = dev->driver_data;
1502 
1503 	IUCV_DBF_TEXT(trace, 5, __func__);
1504 	return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti);
1505 }
1506 
1507 static ssize_t maxmulti_write (struct device *dev,
1508 			       struct device_attribute *attr,
1509 			       const char *buf, size_t count)
1510 {
1511 	struct netiucv_priv *priv = dev->driver_data;
1512 
1513 	IUCV_DBF_TEXT(trace, 4, __func__);
1514 	priv->conn->prof.maxmulti = 0;
1515 	return count;
1516 }
1517 
1518 static DEVICE_ATTR(max_tx_buffer_used, 0644, maxmulti_show, maxmulti_write);
1519 
1520 static ssize_t maxcq_show (struct device *dev, struct device_attribute *attr,
1521 			   char *buf)
1522 {
1523 	struct netiucv_priv *priv = dev->driver_data;
1524 
1525 	IUCV_DBF_TEXT(trace, 5, __func__);
1526 	return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue);
1527 }
1528 
1529 static ssize_t maxcq_write (struct device *dev, struct device_attribute *attr,
1530 			    const char *buf, size_t count)
1531 {
1532 	struct netiucv_priv *priv = dev->driver_data;
1533 
1534 	IUCV_DBF_TEXT(trace, 4, __func__);
1535 	priv->conn->prof.maxcqueue = 0;
1536 	return count;
1537 }
1538 
1539 static DEVICE_ATTR(max_chained_skbs, 0644, maxcq_show, maxcq_write);
1540 
1541 static ssize_t sdoio_show (struct device *dev, struct device_attribute *attr,
1542 			   char *buf)
1543 {
1544 	struct netiucv_priv *priv = dev->driver_data;
1545 
1546 	IUCV_DBF_TEXT(trace, 5, __func__);
1547 	return sprintf(buf, "%ld\n", priv->conn->prof.doios_single);
1548 }
1549 
1550 static ssize_t sdoio_write (struct device *dev, struct device_attribute *attr,
1551 			    const char *buf, size_t count)
1552 {
1553 	struct netiucv_priv *priv = dev->driver_data;
1554 
1555 	IUCV_DBF_TEXT(trace, 4, __func__);
1556 	priv->conn->prof.doios_single = 0;
1557 	return count;
1558 }
1559 
1560 static DEVICE_ATTR(tx_single_write_ops, 0644, sdoio_show, sdoio_write);
1561 
1562 static ssize_t mdoio_show (struct device *dev, struct device_attribute *attr,
1563 			   char *buf)
1564 {
1565 	struct netiucv_priv *priv = dev->driver_data;
1566 
1567 	IUCV_DBF_TEXT(trace, 5, __func__);
1568 	return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi);
1569 }
1570 
1571 static ssize_t mdoio_write (struct device *dev, struct device_attribute *attr,
1572 			    const char *buf, size_t count)
1573 {
1574 	struct netiucv_priv *priv = dev->driver_data;
1575 
1576 	IUCV_DBF_TEXT(trace, 5, __func__);
1577 	priv->conn->prof.doios_multi = 0;
1578 	return count;
1579 }
1580 
1581 static DEVICE_ATTR(tx_multi_write_ops, 0644, mdoio_show, mdoio_write);
1582 
1583 static ssize_t txlen_show (struct device *dev, struct device_attribute *attr,
1584 			   char *buf)
1585 {
1586 	struct netiucv_priv *priv = dev->driver_data;
1587 
1588 	IUCV_DBF_TEXT(trace, 5, __func__);
1589 	return sprintf(buf, "%ld\n", priv->conn->prof.txlen);
1590 }
1591 
1592 static ssize_t txlen_write (struct device *dev, struct device_attribute *attr,
1593 			    const char *buf, size_t count)
1594 {
1595 	struct netiucv_priv *priv = dev->driver_data;
1596 
1597 	IUCV_DBF_TEXT(trace, 4, __func__);
1598 	priv->conn->prof.txlen = 0;
1599 	return count;
1600 }
1601 
1602 static DEVICE_ATTR(netto_bytes, 0644, txlen_show, txlen_write);
1603 
1604 static ssize_t txtime_show (struct device *dev, struct device_attribute *attr,
1605 			    char *buf)
1606 {
1607 	struct netiucv_priv *priv = dev->driver_data;
1608 
1609 	IUCV_DBF_TEXT(trace, 5, __func__);
1610 	return sprintf(buf, "%ld\n", priv->conn->prof.tx_time);
1611 }
1612 
1613 static ssize_t txtime_write (struct device *dev, struct device_attribute *attr,
1614 			     const char *buf, size_t count)
1615 {
1616 	struct netiucv_priv *priv = dev->driver_data;
1617 
1618 	IUCV_DBF_TEXT(trace, 4, __func__);
1619 	priv->conn->prof.tx_time = 0;
1620 	return count;
1621 }
1622 
1623 static DEVICE_ATTR(max_tx_io_time, 0644, txtime_show, txtime_write);
1624 
1625 static ssize_t txpend_show (struct device *dev, struct device_attribute *attr,
1626 			    char *buf)
1627 {
1628 	struct netiucv_priv *priv = dev->driver_data;
1629 
1630 	IUCV_DBF_TEXT(trace, 5, __func__);
1631 	return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending);
1632 }
1633 
1634 static ssize_t txpend_write (struct device *dev, struct device_attribute *attr,
1635 			     const char *buf, size_t count)
1636 {
1637 	struct netiucv_priv *priv = dev->driver_data;
1638 
1639 	IUCV_DBF_TEXT(trace, 4, __func__);
1640 	priv->conn->prof.tx_pending = 0;
1641 	return count;
1642 }
1643 
1644 static DEVICE_ATTR(tx_pending, 0644, txpend_show, txpend_write);
1645 
1646 static ssize_t txmpnd_show (struct device *dev, struct device_attribute *attr,
1647 			    char *buf)
1648 {
1649 	struct netiucv_priv *priv = dev->driver_data;
1650 
1651 	IUCV_DBF_TEXT(trace, 5, __func__);
1652 	return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending);
1653 }
1654 
1655 static ssize_t txmpnd_write (struct device *dev, struct device_attribute *attr,
1656 			     const char *buf, size_t count)
1657 {
1658 	struct netiucv_priv *priv = dev->driver_data;
1659 
1660 	IUCV_DBF_TEXT(trace, 4, __func__);
1661 	priv->conn->prof.tx_max_pending = 0;
1662 	return count;
1663 }
1664 
1665 static DEVICE_ATTR(tx_max_pending, 0644, txmpnd_show, txmpnd_write);
1666 
1667 static struct attribute *netiucv_attrs[] = {
1668 	&dev_attr_buffer.attr,
1669 	&dev_attr_user.attr,
1670 	NULL,
1671 };
1672 
1673 static struct attribute_group netiucv_attr_group = {
1674 	.attrs = netiucv_attrs,
1675 };
1676 
1677 static struct attribute *netiucv_stat_attrs[] = {
1678 	&dev_attr_device_fsm_state.attr,
1679 	&dev_attr_connection_fsm_state.attr,
1680 	&dev_attr_max_tx_buffer_used.attr,
1681 	&dev_attr_max_chained_skbs.attr,
1682 	&dev_attr_tx_single_write_ops.attr,
1683 	&dev_attr_tx_multi_write_ops.attr,
1684 	&dev_attr_netto_bytes.attr,
1685 	&dev_attr_max_tx_io_time.attr,
1686 	&dev_attr_tx_pending.attr,
1687 	&dev_attr_tx_max_pending.attr,
1688 	NULL,
1689 };
1690 
1691 static struct attribute_group netiucv_stat_attr_group = {
1692 	.name  = "stats",
1693 	.attrs = netiucv_stat_attrs,
1694 };
1695 
1696 static int netiucv_add_files(struct device *dev)
1697 {
1698 	int ret;
1699 
1700 	IUCV_DBF_TEXT(trace, 3, __func__);
1701 	ret = sysfs_create_group(&dev->kobj, &netiucv_attr_group);
1702 	if (ret)
1703 		return ret;
1704 	ret = sysfs_create_group(&dev->kobj, &netiucv_stat_attr_group);
1705 	if (ret)
1706 		sysfs_remove_group(&dev->kobj, &netiucv_attr_group);
1707 	return ret;
1708 }
1709 
1710 static void netiucv_remove_files(struct device *dev)
1711 {
1712 	IUCV_DBF_TEXT(trace, 3, __func__);
1713 	sysfs_remove_group(&dev->kobj, &netiucv_stat_attr_group);
1714 	sysfs_remove_group(&dev->kobj, &netiucv_attr_group);
1715 }
1716 
1717 static int netiucv_register_device(struct net_device *ndev)
1718 {
1719 	struct netiucv_priv *priv = netdev_priv(ndev);
1720 	struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL);
1721 	int ret;
1722 
1723 
1724 	IUCV_DBF_TEXT(trace, 3, __func__);
1725 
1726 	if (dev) {
1727 		snprintf(dev->bus_id, BUS_ID_SIZE, "net%s", ndev->name);
1728 		dev->bus = &iucv_bus;
1729 		dev->parent = iucv_root;
1730 		/*
1731 		 * The release function could be called after the
1732 		 * module has been unloaded. It's _only_ task is to
1733 		 * free the struct. Therefore, we specify kfree()
1734 		 * directly here. (Probably a little bit obfuscating
1735 		 * but legitime ...).
1736 		 */
1737 		dev->release = (void (*)(struct device *))kfree;
1738 		dev->driver = &netiucv_driver;
1739 	} else
1740 		return -ENOMEM;
1741 
1742 	ret = device_register(dev);
1743 
1744 	if (ret)
1745 		return ret;
1746 	ret = netiucv_add_files(dev);
1747 	if (ret)
1748 		goto out_unreg;
1749 	priv->dev = dev;
1750 	dev->driver_data = priv;
1751 	return 0;
1752 
1753 out_unreg:
1754 	device_unregister(dev);
1755 	return ret;
1756 }
1757 
1758 static void netiucv_unregister_device(struct device *dev)
1759 {
1760 	IUCV_DBF_TEXT(trace, 3, __func__);
1761 	netiucv_remove_files(dev);
1762 	device_unregister(dev);
1763 }
1764 
1765 /**
1766  * Allocate and initialize a new connection structure.
1767  * Add it to the list of netiucv connections;
1768  */
1769 static struct iucv_connection *netiucv_new_connection(struct net_device *dev,
1770 						      char *username)
1771 {
1772 	struct iucv_connection *conn;
1773 
1774 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
1775 	if (!conn)
1776 		goto out;
1777 	skb_queue_head_init(&conn->collect_queue);
1778 	skb_queue_head_init(&conn->commit_queue);
1779 	spin_lock_init(&conn->collect_lock);
1780 	conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT;
1781 	conn->netdev = dev;
1782 
1783 	conn->rx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1784 	if (!conn->rx_buff)
1785 		goto out_conn;
1786 	conn->tx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1787 	if (!conn->tx_buff)
1788 		goto out_rx;
1789 	conn->fsm = init_fsm("netiucvconn", conn_state_names,
1790 			     conn_event_names, NR_CONN_STATES,
1791 			     NR_CONN_EVENTS, conn_fsm, CONN_FSM_LEN,
1792 			     GFP_KERNEL);
1793 	if (!conn->fsm)
1794 		goto out_tx;
1795 
1796 	fsm_settimer(conn->fsm, &conn->timer);
1797 	fsm_newstate(conn->fsm, CONN_STATE_INVALID);
1798 
1799 	if (username) {
1800 		memcpy(conn->userid, username, 9);
1801 		fsm_newstate(conn->fsm, CONN_STATE_STOPPED);
1802 	}
1803 
1804 	write_lock_bh(&iucv_connection_rwlock);
1805 	list_add_tail(&conn->list, &iucv_connection_list);
1806 	write_unlock_bh(&iucv_connection_rwlock);
1807 	return conn;
1808 
1809 out_tx:
1810 	kfree_skb(conn->tx_buff);
1811 out_rx:
1812 	kfree_skb(conn->rx_buff);
1813 out_conn:
1814 	kfree(conn);
1815 out:
1816 	return NULL;
1817 }
1818 
1819 /**
1820  * Release a connection structure and remove it from the
1821  * list of netiucv connections.
1822  */
1823 static void netiucv_remove_connection(struct iucv_connection *conn)
1824 {
1825 	IUCV_DBF_TEXT(trace, 3, __func__);
1826 	write_lock_bh(&iucv_connection_rwlock);
1827 	list_del_init(&conn->list);
1828 	write_unlock_bh(&iucv_connection_rwlock);
1829 	fsm_deltimer(&conn->timer);
1830 	netiucv_purge_skb_queue(&conn->collect_queue);
1831 	if (conn->path) {
1832 		iucv_path_sever(conn->path, iucvMagic);
1833 		kfree(conn->path);
1834 		conn->path = NULL;
1835 	}
1836 	netiucv_purge_skb_queue(&conn->commit_queue);
1837 	kfree_fsm(conn->fsm);
1838 	kfree_skb(conn->rx_buff);
1839 	kfree_skb(conn->tx_buff);
1840 }
1841 
1842 /**
1843  * Release everything of a net device.
1844  */
1845 static void netiucv_free_netdevice(struct net_device *dev)
1846 {
1847 	struct netiucv_priv *privptr = netdev_priv(dev);
1848 
1849 	IUCV_DBF_TEXT(trace, 3, __func__);
1850 
1851 	if (!dev)
1852 		return;
1853 
1854 	if (privptr) {
1855 		if (privptr->conn)
1856 			netiucv_remove_connection(privptr->conn);
1857 		if (privptr->fsm)
1858 			kfree_fsm(privptr->fsm);
1859 		privptr->conn = NULL; privptr->fsm = NULL;
1860 		/* privptr gets freed by free_netdev() */
1861 	}
1862 	free_netdev(dev);
1863 }
1864 
1865 /**
1866  * Initialize a net device. (Called from kernel in alloc_netdev())
1867  */
1868 static void netiucv_setup_netdevice(struct net_device *dev)
1869 {
1870 	dev->mtu	         = NETIUCV_MTU_DEFAULT;
1871 	dev->hard_start_xmit     = netiucv_tx;
1872 	dev->open	         = netiucv_open;
1873 	dev->stop	         = netiucv_close;
1874 	dev->get_stats	         = netiucv_stats;
1875 	dev->change_mtu          = netiucv_change_mtu;
1876 	dev->destructor          = netiucv_free_netdevice;
1877 	dev->hard_header_len     = NETIUCV_HDRLEN;
1878 	dev->addr_len            = 0;
1879 	dev->type                = ARPHRD_SLIP;
1880 	dev->tx_queue_len        = NETIUCV_QUEUELEN_DEFAULT;
1881 	dev->flags	         = IFF_POINTOPOINT | IFF_NOARP;
1882 }
1883 
1884 /**
1885  * Allocate and initialize everything of a net device.
1886  */
1887 static struct net_device *netiucv_init_netdevice(char *username)
1888 {
1889 	struct netiucv_priv *privptr;
1890 	struct net_device *dev;
1891 
1892 	dev = alloc_netdev(sizeof(struct netiucv_priv), "iucv%d",
1893 			   netiucv_setup_netdevice);
1894 	if (!dev)
1895 		return NULL;
1896 	if (dev_alloc_name(dev, dev->name) < 0)
1897 		goto out_netdev;
1898 
1899 	privptr = netdev_priv(dev);
1900 	privptr->fsm = init_fsm("netiucvdev", dev_state_names,
1901 				dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS,
1902 				dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
1903 	if (!privptr->fsm)
1904 		goto out_netdev;
1905 
1906 	privptr->conn = netiucv_new_connection(dev, username);
1907 	if (!privptr->conn) {
1908 		IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n");
1909 		goto out_fsm;
1910 	}
1911 	fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
1912 	return dev;
1913 
1914 out_fsm:
1915 	kfree_fsm(privptr->fsm);
1916 out_netdev:
1917 	free_netdev(dev);
1918 	return NULL;
1919 }
1920 
1921 static ssize_t conn_write(struct device_driver *drv,
1922 			  const char *buf, size_t count)
1923 {
1924 	const char *p;
1925 	char username[9];
1926 	int i, rc;
1927 	struct net_device *dev;
1928 	struct netiucv_priv *priv;
1929 	struct iucv_connection *cp;
1930 
1931 	IUCV_DBF_TEXT(trace, 3, __func__);
1932 	if (count>9) {
1933 		IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
1934 		return -EINVAL;
1935 	}
1936 
1937 	for (i = 0, p = buf; i < 8 && *p; i++, p++) {
1938 		if (isalnum(*p) || *p == '$') {
1939 			username[i] = toupper(*p);
1940 			continue;
1941 		}
1942 		if (*p == '\n')
1943 			/* trailing lf, grr */
1944 			break;
1945 		IUCV_DBF_TEXT_(setup, 2,
1946 			       "conn_write: invalid character %c\n", *p);
1947 		return -EINVAL;
1948 	}
1949 	while (i < 8)
1950 		username[i++] = ' ';
1951 	username[8] = '\0';
1952 
1953 	read_lock_bh(&iucv_connection_rwlock);
1954 	list_for_each_entry(cp, &iucv_connection_list, list) {
1955 		if (!strncmp(username, cp->userid, 9)) {
1956 			read_unlock_bh(&iucv_connection_rwlock);
1957 			IUCV_DBF_TEXT_(setup, 2, "conn_write: Connection "
1958 				"to %s already exists\n", username);
1959 			return -EEXIST;
1960 		}
1961 	}
1962 	read_unlock_bh(&iucv_connection_rwlock);
1963 
1964 	dev = netiucv_init_netdevice(username);
1965 	if (!dev) {
1966 		IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n");
1967 		return -ENODEV;
1968 	}
1969 
1970 	rc = netiucv_register_device(dev);
1971 	if (rc) {
1972 		IUCV_DBF_TEXT_(setup, 2,
1973 			"ret %d from netiucv_register_device\n", rc);
1974 		goto out_free_ndev;
1975 	}
1976 
1977 	/* sysfs magic */
1978 	priv = netdev_priv(dev);
1979 	SET_NETDEV_DEV(dev, priv->dev);
1980 
1981 	rc = register_netdev(dev);
1982 	if (rc)
1983 		goto out_unreg;
1984 
1985 
1986 	return count;
1987 
1988 out_unreg:
1989 	netiucv_unregister_device(priv->dev);
1990 out_free_ndev:
1991 	netiucv_free_netdevice(dev);
1992 	return rc;
1993 }
1994 
1995 static DRIVER_ATTR(connection, 0200, NULL, conn_write);
1996 
1997 static ssize_t remove_write (struct device_driver *drv,
1998 			     const char *buf, size_t count)
1999 {
2000 	struct iucv_connection *cp;
2001         struct net_device *ndev;
2002         struct netiucv_priv *priv;
2003         struct device *dev;
2004         char name[IFNAMSIZ];
2005 	const char *p;
2006         int i;
2007 
2008 	IUCV_DBF_TEXT(trace, 3, __func__);
2009 
2010         if (count >= IFNAMSIZ)
2011                 count = IFNAMSIZ - 1;;
2012 
2013 	for (i = 0, p = buf; i < count && *p; i++, p++) {
2014 		if (*p == '\n' || *p == ' ')
2015                         /* trailing lf, grr */
2016                         break;
2017 		name[i] = *p;
2018         }
2019         name[i] = '\0';
2020 
2021 	read_lock_bh(&iucv_connection_rwlock);
2022 	list_for_each_entry(cp, &iucv_connection_list, list) {
2023 		ndev = cp->netdev;
2024 		priv = netdev_priv(ndev);
2025                 dev = priv->dev;
2026 		if (strncmp(name, ndev->name, count))
2027 			continue;
2028 		read_unlock_bh(&iucv_connection_rwlock);
2029                 if (ndev->flags & (IFF_UP | IFF_RUNNING)) {
2030 			PRINT_WARN("netiucv: net device %s active with peer "
2031 				   "%s\n", ndev->name, priv->conn->userid);
2032                         PRINT_WARN("netiucv: %s cannot be removed\n",
2033 				   ndev->name);
2034 			IUCV_DBF_TEXT(data, 2, "remove_write: still active\n");
2035 			return -EPERM;
2036                 }
2037                 unregister_netdev(ndev);
2038                 netiucv_unregister_device(dev);
2039                 return count;
2040         }
2041 	read_unlock_bh(&iucv_connection_rwlock);
2042 	IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n");
2043         return -EINVAL;
2044 }
2045 
2046 static DRIVER_ATTR(remove, 0200, NULL, remove_write);
2047 
2048 static struct attribute * netiucv_drv_attrs[] = {
2049 	&driver_attr_connection.attr,
2050 	&driver_attr_remove.attr,
2051 	NULL,
2052 };
2053 
2054 static struct attribute_group netiucv_drv_attr_group = {
2055 	.attrs = netiucv_drv_attrs,
2056 };
2057 
2058 static struct attribute_group *netiucv_drv_attr_groups[] = {
2059 	&netiucv_drv_attr_group,
2060 	NULL,
2061 };
2062 
2063 static void netiucv_banner(void)
2064 {
2065 	PRINT_INFO("NETIUCV driver initialized\n");
2066 }
2067 
2068 static void __exit netiucv_exit(void)
2069 {
2070 	struct iucv_connection *cp;
2071 	struct net_device *ndev;
2072 	struct netiucv_priv *priv;
2073 	struct device *dev;
2074 
2075 	IUCV_DBF_TEXT(trace, 3, __func__);
2076 	while (!list_empty(&iucv_connection_list)) {
2077 		cp = list_entry(iucv_connection_list.next,
2078 				struct iucv_connection, list);
2079 		ndev = cp->netdev;
2080 		priv = netdev_priv(ndev);
2081 		dev = priv->dev;
2082 
2083 		unregister_netdev(ndev);
2084 		netiucv_unregister_device(dev);
2085 	}
2086 
2087 	driver_unregister(&netiucv_driver);
2088 	iucv_unregister(&netiucv_handler, 1);
2089 	iucv_unregister_dbf_views();
2090 
2091 	PRINT_INFO("NETIUCV driver unloaded\n");
2092 	return;
2093 }
2094 
2095 static int __init netiucv_init(void)
2096 {
2097 	int rc;
2098 
2099 	rc = iucv_register_dbf_views();
2100 	if (rc)
2101 		goto out;
2102 	rc = iucv_register(&netiucv_handler, 1);
2103 	if (rc)
2104 		goto out_dbf;
2105 	IUCV_DBF_TEXT(trace, 3, __func__);
2106 	netiucv_driver.groups = netiucv_drv_attr_groups;
2107 	rc = driver_register(&netiucv_driver);
2108 	if (rc) {
2109 		IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc);
2110 		goto out_iucv;
2111 	}
2112 
2113 	netiucv_banner();
2114 	return rc;
2115 
2116 out_iucv:
2117 	iucv_unregister(&netiucv_handler, 1);
2118 out_dbf:
2119 	iucv_unregister_dbf_views();
2120 out:
2121 	return rc;
2122 }
2123 
2124 module_init(netiucv_init);
2125 module_exit(netiucv_exit);
2126 MODULE_LICENSE("GPL");
2127