xref: /openbmc/linux/drivers/s390/net/netiucv.c (revision 2612e3bbc0386368a850140a6c9b990cd496a5ec)
1  // SPDX-License-Identifier: GPL-2.0+
2  /*
3   * IUCV network driver
4   *
5   * Copyright IBM Corp. 2001, 2009
6   *
7   * Author(s):
8   *	Original netiucv driver:
9   *		Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
10   *	Sysfs integration and all bugs therein:
11   *		Cornelia Huck (cornelia.huck@de.ibm.com)
12   *	PM functions:
13   *		Ursula Braun (ursula.braun@de.ibm.com)
14   *
15   * Documentation used:
16   *  the source of the original IUCV driver by:
17   *    Stefan Hegewald <hegewald@de.ibm.com>
18   *    Hartmut Penner <hpenner@de.ibm.com>
19   *    Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
20   *    Martin Schwidefsky (schwidefsky@de.ibm.com)
21   *    Alan Altmark (Alan_Altmark@us.ibm.com)  Sept. 2000
22   */
23  
24  #define KMSG_COMPONENT "netiucv"
25  #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
26  
27  #undef DEBUG
28  
29  #include <linux/module.h>
30  #include <linux/init.h>
31  #include <linux/kernel.h>
32  #include <linux/slab.h>
33  #include <linux/errno.h>
34  #include <linux/types.h>
35  #include <linux/interrupt.h>
36  #include <linux/timer.h>
37  #include <linux/bitops.h>
38  
39  #include <linux/signal.h>
40  #include <linux/string.h>
41  #include <linux/device.h>
42  
43  #include <linux/ip.h>
44  #include <linux/if_arp.h>
45  #include <linux/tcp.h>
46  #include <linux/skbuff.h>
47  #include <linux/ctype.h>
48  #include <net/dst.h>
49  
50  #include <linux/io.h>
51  #include <linux/uaccess.h>
52  #include <asm/ebcdic.h>
53  
54  #include <net/iucv/iucv.h>
55  #include "fsm.h"
56  
57  MODULE_AUTHOR
58      ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)");
59  MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
60  
61  /*
62   * Debug Facility stuff
63   */
64  #define IUCV_DBF_SETUP_NAME "iucv_setup"
65  #define IUCV_DBF_SETUP_LEN 64
66  #define IUCV_DBF_SETUP_PAGES 2
67  #define IUCV_DBF_SETUP_NR_AREAS 1
68  #define IUCV_DBF_SETUP_LEVEL 3
69  
70  #define IUCV_DBF_DATA_NAME "iucv_data"
71  #define IUCV_DBF_DATA_LEN 128
72  #define IUCV_DBF_DATA_PAGES 2
73  #define IUCV_DBF_DATA_NR_AREAS 1
74  #define IUCV_DBF_DATA_LEVEL 2
75  
76  #define IUCV_DBF_TRACE_NAME "iucv_trace"
77  #define IUCV_DBF_TRACE_LEN 16
78  #define IUCV_DBF_TRACE_PAGES 4
79  #define IUCV_DBF_TRACE_NR_AREAS 1
80  #define IUCV_DBF_TRACE_LEVEL 3
81  
82  #define IUCV_DBF_TEXT(name,level,text) \
83  	do { \
84  		debug_text_event(iucv_dbf_##name,level,text); \
85  	} while (0)
86  
87  #define IUCV_DBF_HEX(name,level,addr,len) \
88  	do { \
89  		debug_event(iucv_dbf_##name,level,(void*)(addr),len); \
90  	} while (0)
91  
92  DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf);
93  
94  #define IUCV_DBF_TEXT_(name, level, text...) \
95  	do { \
96  		if (debug_level_enabled(iucv_dbf_##name, level)) { \
97  			char* __buf = get_cpu_var(iucv_dbf_txt_buf); \
98  			sprintf(__buf, text); \
99  			debug_text_event(iucv_dbf_##name, level, __buf); \
100  			put_cpu_var(iucv_dbf_txt_buf); \
101  		} \
102  	} while (0)
103  
104  #define IUCV_DBF_SPRINTF(name,level,text...) \
105  	do { \
106  		debug_sprintf_event(iucv_dbf_trace, level, ##text ); \
107  		debug_sprintf_event(iucv_dbf_trace, level, text ); \
108  	} while (0)
109  
110  /*
111   * some more debug stuff
112   */
113  #define PRINTK_HEADER " iucv: "       /* for debugging */
114  
115  static struct device_driver netiucv_driver = {
116  	.owner = THIS_MODULE,
117  	.name = "netiucv",
118  	.bus  = &iucv_bus,
119  };
120  
121  /*
122   * Per connection profiling data
123   */
124  struct connection_profile {
125  	unsigned long maxmulti;
126  	unsigned long maxcqueue;
127  	unsigned long doios_single;
128  	unsigned long doios_multi;
129  	unsigned long txlen;
130  	unsigned long tx_time;
131  	unsigned long send_stamp;
132  	unsigned long tx_pending;
133  	unsigned long tx_max_pending;
134  };
135  
136  /*
137   * Representation of one iucv connection
138   */
139  struct iucv_connection {
140  	struct list_head	  list;
141  	struct iucv_path	  *path;
142  	struct sk_buff            *rx_buff;
143  	struct sk_buff            *tx_buff;
144  	struct sk_buff_head       collect_queue;
145  	struct sk_buff_head	  commit_queue;
146  	spinlock_t                collect_lock;
147  	int                       collect_len;
148  	int                       max_buffsize;
149  	fsm_timer                 timer;
150  	fsm_instance              *fsm;
151  	struct net_device         *netdev;
152  	struct connection_profile prof;
153  	char                      userid[9];
154  	char			  userdata[17];
155  };
156  
157  /*
158   * Linked list of all connection structs.
159   */
160  static LIST_HEAD(iucv_connection_list);
161  static DEFINE_RWLOCK(iucv_connection_rwlock);
162  
163  /*
164   * Representation of event-data for the
165   * connection state machine.
166   */
167  struct iucv_event {
168  	struct iucv_connection *conn;
169  	void                   *data;
170  };
171  
172  /*
173   * Private part of the network device structure
174   */
175  struct netiucv_priv {
176  	struct net_device_stats stats;
177  	unsigned long           tbusy;
178  	fsm_instance            *fsm;
179          struct iucv_connection  *conn;
180  	struct device           *dev;
181  };
182  
183  /*
184   * Link level header for a packet.
185   */
186  struct ll_header {
187  	u16 next;
188  };
189  
190  #define NETIUCV_HDRLEN		 (sizeof(struct ll_header))
191  #define NETIUCV_BUFSIZE_MAX	 65537
192  #define NETIUCV_BUFSIZE_DEFAULT  NETIUCV_BUFSIZE_MAX
193  #define NETIUCV_MTU_MAX          (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN)
194  #define NETIUCV_MTU_DEFAULT      9216
195  #define NETIUCV_QUEUELEN_DEFAULT 50
196  #define NETIUCV_TIMEOUT_5SEC     5000
197  
198  /*
199   * Compatibility macros for busy handling
200   * of network devices.
201   */
netiucv_clear_busy(struct net_device * dev)202  static void netiucv_clear_busy(struct net_device *dev)
203  {
204  	struct netiucv_priv *priv = netdev_priv(dev);
205  	clear_bit(0, &priv->tbusy);
206  	netif_wake_queue(dev);
207  }
208  
netiucv_test_and_set_busy(struct net_device * dev)209  static int netiucv_test_and_set_busy(struct net_device *dev)
210  {
211  	struct netiucv_priv *priv = netdev_priv(dev);
212  	netif_stop_queue(dev);
213  	return test_and_set_bit(0, &priv->tbusy);
214  }
215  
216  static u8 iucvMagic_ascii[16] = {
217  	0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
218  	0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20
219  };
220  
221  static u8 iucvMagic_ebcdic[16] = {
222  	0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
223  	0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
224  };
225  
226  /*
227   * Convert an iucv userId to its printable
228   * form (strip whitespace at end).
229   *
230   * @param An iucv userId
231   *
232   * @returns The printable string (static data!!)
233   */
netiucv_printname(char * name,int len)234  static char *netiucv_printname(char *name, int len)
235  {
236  	static char tmp[17];
237  	char *p = tmp;
238  	memcpy(tmp, name, len);
239  	tmp[len] = '\0';
240  	while (*p && ((p - tmp) < len) && (!isspace(*p)))
241  		p++;
242  	*p = '\0';
243  	return tmp;
244  }
245  
netiucv_printuser(struct iucv_connection * conn)246  static char *netiucv_printuser(struct iucv_connection *conn)
247  {
248  	static char tmp_uid[9];
249  	static char tmp_udat[17];
250  	static char buf[100];
251  
252  	if (memcmp(conn->userdata, iucvMagic_ebcdic, 16)) {
253  		tmp_uid[8] = '\0';
254  		tmp_udat[16] = '\0';
255  		memcpy(tmp_uid, netiucv_printname(conn->userid, 8), 8);
256  		memcpy(tmp_udat, conn->userdata, 16);
257  		EBCASC(tmp_udat, 16);
258  		memcpy(tmp_udat, netiucv_printname(tmp_udat, 16), 16);
259  		sprintf(buf, "%s.%s", tmp_uid, tmp_udat);
260  		return buf;
261  	} else
262  		return netiucv_printname(conn->userid, 8);
263  }
264  
265  /*
266   * States of the interface statemachine.
267   */
268  enum dev_states {
269  	DEV_STATE_STOPPED,
270  	DEV_STATE_STARTWAIT,
271  	DEV_STATE_STOPWAIT,
272  	DEV_STATE_RUNNING,
273  	/*
274  	 * MUST be always the last element!!
275  	 */
276  	NR_DEV_STATES
277  };
278  
279  static const char *dev_state_names[] = {
280  	"Stopped",
281  	"StartWait",
282  	"StopWait",
283  	"Running",
284  };
285  
286  /*
287   * Events of the interface statemachine.
288   */
289  enum dev_events {
290  	DEV_EVENT_START,
291  	DEV_EVENT_STOP,
292  	DEV_EVENT_CONUP,
293  	DEV_EVENT_CONDOWN,
294  	/*
295  	 * MUST be always the last element!!
296  	 */
297  	NR_DEV_EVENTS
298  };
299  
300  static const char *dev_event_names[] = {
301  	"Start",
302  	"Stop",
303  	"Connection up",
304  	"Connection down",
305  };
306  
307  /*
308   * Events of the connection statemachine
309   */
310  enum conn_events {
311  	/*
312  	 * Events, representing callbacks from
313  	 * lowlevel iucv layer)
314  	 */
315  	CONN_EVENT_CONN_REQ,
316  	CONN_EVENT_CONN_ACK,
317  	CONN_EVENT_CONN_REJ,
318  	CONN_EVENT_CONN_SUS,
319  	CONN_EVENT_CONN_RES,
320  	CONN_EVENT_RX,
321  	CONN_EVENT_TXDONE,
322  
323  	/*
324  	 * Events, representing errors return codes from
325  	 * calls to lowlevel iucv layer
326  	 */
327  
328  	/*
329  	 * Event, representing timer expiry.
330  	 */
331  	CONN_EVENT_TIMER,
332  
333  	/*
334  	 * Events, representing commands from upper levels.
335  	 */
336  	CONN_EVENT_START,
337  	CONN_EVENT_STOP,
338  
339  	/*
340  	 * MUST be always the last element!!
341  	 */
342  	NR_CONN_EVENTS,
343  };
344  
345  static const char *conn_event_names[] = {
346  	"Remote connection request",
347  	"Remote connection acknowledge",
348  	"Remote connection reject",
349  	"Connection suspended",
350  	"Connection resumed",
351  	"Data received",
352  	"Data sent",
353  
354  	"Timer",
355  
356  	"Start",
357  	"Stop",
358  };
359  
360  /*
361   * States of the connection statemachine.
362   */
363  enum conn_states {
364  	/*
365  	 * Connection not assigned to any device,
366  	 * initial state, invalid
367  	 */
368  	CONN_STATE_INVALID,
369  
370  	/*
371  	 * Userid assigned but not operating
372  	 */
373  	CONN_STATE_STOPPED,
374  
375  	/*
376  	 * Connection registered,
377  	 * no connection request sent yet,
378  	 * no connection request received
379  	 */
380  	CONN_STATE_STARTWAIT,
381  
382  	/*
383  	 * Connection registered and connection request sent,
384  	 * no acknowledge and no connection request received yet.
385  	 */
386  	CONN_STATE_SETUPWAIT,
387  
388  	/*
389  	 * Connection up and running idle
390  	 */
391  	CONN_STATE_IDLE,
392  
393  	/*
394  	 * Data sent, awaiting CONN_EVENT_TXDONE
395  	 */
396  	CONN_STATE_TX,
397  
398  	/*
399  	 * Error during registration.
400  	 */
401  	CONN_STATE_REGERR,
402  
403  	/*
404  	 * Error during registration.
405  	 */
406  	CONN_STATE_CONNERR,
407  
408  	/*
409  	 * MUST be always the last element!!
410  	 */
411  	NR_CONN_STATES,
412  };
413  
414  static const char *conn_state_names[] = {
415  	"Invalid",
416  	"Stopped",
417  	"StartWait",
418  	"SetupWait",
419  	"Idle",
420  	"TX",
421  	"Terminating",
422  	"Registration error",
423  	"Connect error",
424  };
425  
426  
427  /*
428   * Debug Facility Stuff
429   */
430  static debug_info_t *iucv_dbf_setup = NULL;
431  static debug_info_t *iucv_dbf_data = NULL;
432  static debug_info_t *iucv_dbf_trace = NULL;
433  
434  DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf);
435  
iucv_unregister_dbf_views(void)436  static void iucv_unregister_dbf_views(void)
437  {
438  	debug_unregister(iucv_dbf_setup);
439  	debug_unregister(iucv_dbf_data);
440  	debug_unregister(iucv_dbf_trace);
441  }
iucv_register_dbf_views(void)442  static int iucv_register_dbf_views(void)
443  {
444  	iucv_dbf_setup = debug_register(IUCV_DBF_SETUP_NAME,
445  					IUCV_DBF_SETUP_PAGES,
446  					IUCV_DBF_SETUP_NR_AREAS,
447  					IUCV_DBF_SETUP_LEN);
448  	iucv_dbf_data = debug_register(IUCV_DBF_DATA_NAME,
449  				       IUCV_DBF_DATA_PAGES,
450  				       IUCV_DBF_DATA_NR_AREAS,
451  				       IUCV_DBF_DATA_LEN);
452  	iucv_dbf_trace = debug_register(IUCV_DBF_TRACE_NAME,
453  					IUCV_DBF_TRACE_PAGES,
454  					IUCV_DBF_TRACE_NR_AREAS,
455  					IUCV_DBF_TRACE_LEN);
456  
457  	if ((iucv_dbf_setup == NULL) || (iucv_dbf_data == NULL) ||
458  	    (iucv_dbf_trace == NULL)) {
459  		iucv_unregister_dbf_views();
460  		return -ENOMEM;
461  	}
462  	debug_register_view(iucv_dbf_setup, &debug_hex_ascii_view);
463  	debug_set_level(iucv_dbf_setup, IUCV_DBF_SETUP_LEVEL);
464  
465  	debug_register_view(iucv_dbf_data, &debug_hex_ascii_view);
466  	debug_set_level(iucv_dbf_data, IUCV_DBF_DATA_LEVEL);
467  
468  	debug_register_view(iucv_dbf_trace, &debug_hex_ascii_view);
469  	debug_set_level(iucv_dbf_trace, IUCV_DBF_TRACE_LEVEL);
470  
471  	return 0;
472  }
473  
474  /*
475   * Callback-wrappers, called from lowlevel iucv layer.
476   */
477  
netiucv_callback_rx(struct iucv_path * path,struct iucv_message * msg)478  static void netiucv_callback_rx(struct iucv_path *path,
479  				struct iucv_message *msg)
480  {
481  	struct iucv_connection *conn = path->private;
482  	struct iucv_event ev;
483  
484  	ev.conn = conn;
485  	ev.data = msg;
486  	fsm_event(conn->fsm, CONN_EVENT_RX, &ev);
487  }
488  
netiucv_callback_txdone(struct iucv_path * path,struct iucv_message * msg)489  static void netiucv_callback_txdone(struct iucv_path *path,
490  				    struct iucv_message *msg)
491  {
492  	struct iucv_connection *conn = path->private;
493  	struct iucv_event ev;
494  
495  	ev.conn = conn;
496  	ev.data = msg;
497  	fsm_event(conn->fsm, CONN_EVENT_TXDONE, &ev);
498  }
499  
netiucv_callback_connack(struct iucv_path * path,u8 ipuser[16])500  static void netiucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
501  {
502  	struct iucv_connection *conn = path->private;
503  
504  	fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, conn);
505  }
506  
netiucv_callback_connreq(struct iucv_path * path,u8 * ipvmid,u8 * ipuser)507  static int netiucv_callback_connreq(struct iucv_path *path, u8 *ipvmid,
508  				    u8 *ipuser)
509  {
510  	struct iucv_connection *conn = path->private;
511  	struct iucv_event ev;
512  	static char tmp_user[9];
513  	static char tmp_udat[17];
514  	int rc;
515  
516  	rc = -EINVAL;
517  	memcpy(tmp_user, netiucv_printname(ipvmid, 8), 8);
518  	memcpy(tmp_udat, ipuser, 16);
519  	EBCASC(tmp_udat, 16);
520  	read_lock_bh(&iucv_connection_rwlock);
521  	list_for_each_entry(conn, &iucv_connection_list, list) {
522  		if (strncmp(ipvmid, conn->userid, 8) ||
523  		    strncmp(ipuser, conn->userdata, 16))
524  			continue;
525  		/* Found a matching connection for this path. */
526  		conn->path = path;
527  		ev.conn = conn;
528  		ev.data = path;
529  		fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev);
530  		rc = 0;
531  	}
532  	IUCV_DBF_TEXT_(setup, 2, "Connection requested for %s.%s\n",
533  		       tmp_user, netiucv_printname(tmp_udat, 16));
534  	read_unlock_bh(&iucv_connection_rwlock);
535  	return rc;
536  }
537  
netiucv_callback_connrej(struct iucv_path * path,u8 * ipuser)538  static void netiucv_callback_connrej(struct iucv_path *path, u8 *ipuser)
539  {
540  	struct iucv_connection *conn = path->private;
541  
542  	fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, conn);
543  }
544  
netiucv_callback_connsusp(struct iucv_path * path,u8 * ipuser)545  static void netiucv_callback_connsusp(struct iucv_path *path, u8 *ipuser)
546  {
547  	struct iucv_connection *conn = path->private;
548  
549  	fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, conn);
550  }
551  
netiucv_callback_connres(struct iucv_path * path,u8 * ipuser)552  static void netiucv_callback_connres(struct iucv_path *path, u8 *ipuser)
553  {
554  	struct iucv_connection *conn = path->private;
555  
556  	fsm_event(conn->fsm, CONN_EVENT_CONN_RES, conn);
557  }
558  
559  /*
560   * NOP action for statemachines
561   */
netiucv_action_nop(fsm_instance * fi,int event,void * arg)562  static void netiucv_action_nop(fsm_instance *fi, int event, void *arg)
563  {
564  }
565  
566  /*
567   * Actions of the connection statemachine
568   */
569  
570  /*
571   * netiucv_unpack_skb
572   * @conn: The connection where this skb has been received.
573   * @pskb: The received skb.
574   *
575   * Unpack a just received skb and hand it over to upper layers.
576   * Helper function for conn_action_rx.
577   */
netiucv_unpack_skb(struct iucv_connection * conn,struct sk_buff * pskb)578  static void netiucv_unpack_skb(struct iucv_connection *conn,
579  			       struct sk_buff *pskb)
580  {
581  	struct net_device     *dev = conn->netdev;
582  	struct netiucv_priv   *privptr = netdev_priv(dev);
583  	u16 offset = 0;
584  
585  	skb_put(pskb, NETIUCV_HDRLEN);
586  	pskb->dev = dev;
587  	pskb->ip_summed = CHECKSUM_NONE;
588  	pskb->protocol = cpu_to_be16(ETH_P_IP);
589  
590  	while (1) {
591  		struct sk_buff *skb;
592  		struct ll_header *header = (struct ll_header *) pskb->data;
593  
594  		if (!header->next)
595  			break;
596  
597  		skb_pull(pskb, NETIUCV_HDRLEN);
598  		header->next -= offset;
599  		offset += header->next;
600  		header->next -= NETIUCV_HDRLEN;
601  		if (skb_tailroom(pskb) < header->next) {
602  			IUCV_DBF_TEXT_(data, 2, "Illegal next field: %d > %d\n",
603  				header->next, skb_tailroom(pskb));
604  			return;
605  		}
606  		skb_put(pskb, header->next);
607  		skb_reset_mac_header(pskb);
608  		skb = dev_alloc_skb(pskb->len);
609  		if (!skb) {
610  			IUCV_DBF_TEXT(data, 2,
611  				"Out of memory in netiucv_unpack_skb\n");
612  			privptr->stats.rx_dropped++;
613  			return;
614  		}
615  		skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len),
616  					  pskb->len);
617  		skb_reset_mac_header(skb);
618  		skb->dev = pskb->dev;
619  		skb->protocol = pskb->protocol;
620  		pskb->ip_summed = CHECKSUM_UNNECESSARY;
621  		privptr->stats.rx_packets++;
622  		privptr->stats.rx_bytes += skb->len;
623  		netif_rx(skb);
624  		skb_pull(pskb, header->next);
625  		skb_put(pskb, NETIUCV_HDRLEN);
626  	}
627  }
628  
conn_action_rx(fsm_instance * fi,int event,void * arg)629  static void conn_action_rx(fsm_instance *fi, int event, void *arg)
630  {
631  	struct iucv_event *ev = arg;
632  	struct iucv_connection *conn = ev->conn;
633  	struct iucv_message *msg = ev->data;
634  	struct netiucv_priv *privptr = netdev_priv(conn->netdev);
635  	int rc;
636  
637  	IUCV_DBF_TEXT(trace, 4, __func__);
638  
639  	if (!conn->netdev) {
640  		iucv_message_reject(conn->path, msg);
641  		IUCV_DBF_TEXT(data, 2,
642  			      "Received data for unlinked connection\n");
643  		return;
644  	}
645  	if (msg->length > conn->max_buffsize) {
646  		iucv_message_reject(conn->path, msg);
647  		privptr->stats.rx_dropped++;
648  		IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n",
649  			       msg->length, conn->max_buffsize);
650  		return;
651  	}
652  	conn->rx_buff->data = conn->rx_buff->head;
653  	skb_reset_tail_pointer(conn->rx_buff);
654  	conn->rx_buff->len = 0;
655  	rc = iucv_message_receive(conn->path, msg, 0, conn->rx_buff->data,
656  				  msg->length, NULL);
657  	if (rc || msg->length < 5) {
658  		privptr->stats.rx_errors++;
659  		IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc);
660  		return;
661  	}
662  	netiucv_unpack_skb(conn, conn->rx_buff);
663  }
664  
conn_action_txdone(fsm_instance * fi,int event,void * arg)665  static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
666  {
667  	struct iucv_event *ev = arg;
668  	struct iucv_connection *conn = ev->conn;
669  	struct iucv_message *msg = ev->data;
670  	struct iucv_message txmsg;
671  	struct netiucv_priv *privptr = NULL;
672  	u32 single_flag = msg->tag;
673  	u32 txbytes = 0;
674  	u32 txpackets = 0;
675  	u32 stat_maxcq = 0;
676  	struct sk_buff *skb;
677  	unsigned long saveflags;
678  	struct ll_header header;
679  	int rc;
680  
681  	IUCV_DBF_TEXT(trace, 4, __func__);
682  
683  	if (!conn || !conn->netdev) {
684  		IUCV_DBF_TEXT(data, 2,
685  			      "Send confirmation for unlinked connection\n");
686  		return;
687  	}
688  	privptr = netdev_priv(conn->netdev);
689  	conn->prof.tx_pending--;
690  	if (single_flag) {
691  		if ((skb = skb_dequeue(&conn->commit_queue))) {
692  			refcount_dec(&skb->users);
693  			if (privptr) {
694  				privptr->stats.tx_packets++;
695  				privptr->stats.tx_bytes +=
696  					(skb->len - NETIUCV_HDRLEN
697  						  - NETIUCV_HDRLEN);
698  			}
699  			dev_kfree_skb_any(skb);
700  		}
701  	}
702  	conn->tx_buff->data = conn->tx_buff->head;
703  	skb_reset_tail_pointer(conn->tx_buff);
704  	conn->tx_buff->len = 0;
705  	spin_lock_irqsave(&conn->collect_lock, saveflags);
706  	while ((skb = skb_dequeue(&conn->collect_queue))) {
707  		header.next = conn->tx_buff->len + skb->len + NETIUCV_HDRLEN;
708  		skb_put_data(conn->tx_buff, &header, NETIUCV_HDRLEN);
709  		skb_copy_from_linear_data(skb,
710  					  skb_put(conn->tx_buff, skb->len),
711  					  skb->len);
712  		txbytes += skb->len;
713  		txpackets++;
714  		stat_maxcq++;
715  		refcount_dec(&skb->users);
716  		dev_kfree_skb_any(skb);
717  	}
718  	if (conn->collect_len > conn->prof.maxmulti)
719  		conn->prof.maxmulti = conn->collect_len;
720  	conn->collect_len = 0;
721  	spin_unlock_irqrestore(&conn->collect_lock, saveflags);
722  	if (conn->tx_buff->len == 0) {
723  		fsm_newstate(fi, CONN_STATE_IDLE);
724  		return;
725  	}
726  
727  	header.next = 0;
728  	skb_put_data(conn->tx_buff, &header, NETIUCV_HDRLEN);
729  	conn->prof.send_stamp = jiffies;
730  	txmsg.class = 0;
731  	txmsg.tag = 0;
732  	rc = iucv_message_send(conn->path, &txmsg, 0, 0,
733  			       conn->tx_buff->data, conn->tx_buff->len);
734  	conn->prof.doios_multi++;
735  	conn->prof.txlen += conn->tx_buff->len;
736  	conn->prof.tx_pending++;
737  	if (conn->prof.tx_pending > conn->prof.tx_max_pending)
738  		conn->prof.tx_max_pending = conn->prof.tx_pending;
739  	if (rc) {
740  		conn->prof.tx_pending--;
741  		fsm_newstate(fi, CONN_STATE_IDLE);
742  		if (privptr)
743  			privptr->stats.tx_errors += txpackets;
744  		IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
745  	} else {
746  		if (privptr) {
747  			privptr->stats.tx_packets += txpackets;
748  			privptr->stats.tx_bytes += txbytes;
749  		}
750  		if (stat_maxcq > conn->prof.maxcqueue)
751  			conn->prof.maxcqueue = stat_maxcq;
752  	}
753  }
754  
755  static struct iucv_handler netiucv_handler = {
756  	.path_pending	  = netiucv_callback_connreq,
757  	.path_complete	  = netiucv_callback_connack,
758  	.path_severed	  = netiucv_callback_connrej,
759  	.path_quiesced	  = netiucv_callback_connsusp,
760  	.path_resumed	  = netiucv_callback_connres,
761  	.message_pending  = netiucv_callback_rx,
762  	.message_complete = netiucv_callback_txdone,
763  };
764  
conn_action_connaccept(fsm_instance * fi,int event,void * arg)765  static void conn_action_connaccept(fsm_instance *fi, int event, void *arg)
766  {
767  	struct iucv_event *ev = arg;
768  	struct iucv_connection *conn = ev->conn;
769  	struct iucv_path *path = ev->data;
770  	struct net_device *netdev = conn->netdev;
771  	struct netiucv_priv *privptr = netdev_priv(netdev);
772  	int rc;
773  
774  	IUCV_DBF_TEXT(trace, 3, __func__);
775  
776  	conn->path = path;
777  	path->msglim = NETIUCV_QUEUELEN_DEFAULT;
778  	path->flags = 0;
779  	rc = iucv_path_accept(path, &netiucv_handler, conn->userdata , conn);
780  	if (rc) {
781  		IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc);
782  		return;
783  	}
784  	fsm_newstate(fi, CONN_STATE_IDLE);
785  	netdev->tx_queue_len = conn->path->msglim;
786  	fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
787  }
788  
conn_action_connreject(fsm_instance * fi,int event,void * arg)789  static void conn_action_connreject(fsm_instance *fi, int event, void *arg)
790  {
791  	struct iucv_event *ev = arg;
792  	struct iucv_path *path = ev->data;
793  
794  	IUCV_DBF_TEXT(trace, 3, __func__);
795  	iucv_path_sever(path, NULL);
796  }
797  
conn_action_connack(fsm_instance * fi,int event,void * arg)798  static void conn_action_connack(fsm_instance *fi, int event, void *arg)
799  {
800  	struct iucv_connection *conn = arg;
801  	struct net_device *netdev = conn->netdev;
802  	struct netiucv_priv *privptr = netdev_priv(netdev);
803  
804  	IUCV_DBF_TEXT(trace, 3, __func__);
805  	fsm_deltimer(&conn->timer);
806  	fsm_newstate(fi, CONN_STATE_IDLE);
807  	netdev->tx_queue_len = conn->path->msglim;
808  	fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
809  }
810  
conn_action_conntimsev(fsm_instance * fi,int event,void * arg)811  static void conn_action_conntimsev(fsm_instance *fi, int event, void *arg)
812  {
813  	struct iucv_connection *conn = arg;
814  
815  	IUCV_DBF_TEXT(trace, 3, __func__);
816  	fsm_deltimer(&conn->timer);
817  	iucv_path_sever(conn->path, conn->userdata);
818  	fsm_newstate(fi, CONN_STATE_STARTWAIT);
819  }
820  
conn_action_connsever(fsm_instance * fi,int event,void * arg)821  static void conn_action_connsever(fsm_instance *fi, int event, void *arg)
822  {
823  	struct iucv_connection *conn = arg;
824  	struct net_device *netdev = conn->netdev;
825  	struct netiucv_priv *privptr = netdev_priv(netdev);
826  
827  	IUCV_DBF_TEXT(trace, 3, __func__);
828  
829  	fsm_deltimer(&conn->timer);
830  	iucv_path_sever(conn->path, conn->userdata);
831  	dev_info(privptr->dev, "The peer z/VM guest %s has closed the "
832  			       "connection\n", netiucv_printuser(conn));
833  	IUCV_DBF_TEXT(data, 2,
834  		      "conn_action_connsever: Remote dropped connection\n");
835  	fsm_newstate(fi, CONN_STATE_STARTWAIT);
836  	fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
837  }
838  
conn_action_start(fsm_instance * fi,int event,void * arg)839  static void conn_action_start(fsm_instance *fi, int event, void *arg)
840  {
841  	struct iucv_connection *conn = arg;
842  	struct net_device *netdev = conn->netdev;
843  	struct netiucv_priv *privptr = netdev_priv(netdev);
844  	int rc;
845  
846  	IUCV_DBF_TEXT(trace, 3, __func__);
847  
848  	fsm_newstate(fi, CONN_STATE_STARTWAIT);
849  
850  	/*
851  	 * We must set the state before calling iucv_connect because the
852  	 * callback handler could be called at any point after the connection
853  	 * request is sent
854  	 */
855  
856  	fsm_newstate(fi, CONN_STATE_SETUPWAIT);
857  	conn->path = iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT, 0, GFP_KERNEL);
858  	IUCV_DBF_TEXT_(setup, 2, "%s: connecting to %s ...\n",
859  		netdev->name, netiucv_printuser(conn));
860  
861  	rc = iucv_path_connect(conn->path, &netiucv_handler, conn->userid,
862  			       NULL, conn->userdata, conn);
863  	switch (rc) {
864  	case 0:
865  		netdev->tx_queue_len = conn->path->msglim;
866  		fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC,
867  			     CONN_EVENT_TIMER, conn);
868  		return;
869  	case 11:
870  		dev_warn(privptr->dev,
871  			"The IUCV device failed to connect to z/VM guest %s\n",
872  			netiucv_printname(conn->userid, 8));
873  		fsm_newstate(fi, CONN_STATE_STARTWAIT);
874  		break;
875  	case 12:
876  		dev_warn(privptr->dev,
877  			"The IUCV device failed to connect to the peer on z/VM"
878  			" guest %s\n", netiucv_printname(conn->userid, 8));
879  		fsm_newstate(fi, CONN_STATE_STARTWAIT);
880  		break;
881  	case 13:
882  		dev_err(privptr->dev,
883  			"Connecting the IUCV device would exceed the maximum"
884  			" number of IUCV connections\n");
885  		fsm_newstate(fi, CONN_STATE_CONNERR);
886  		break;
887  	case 14:
888  		dev_err(privptr->dev,
889  			"z/VM guest %s has too many IUCV connections"
890  			" to connect with the IUCV device\n",
891  			netiucv_printname(conn->userid, 8));
892  		fsm_newstate(fi, CONN_STATE_CONNERR);
893  		break;
894  	case 15:
895  		dev_err(privptr->dev,
896  			"The IUCV device cannot connect to a z/VM guest with no"
897  			" IUCV authorization\n");
898  		fsm_newstate(fi, CONN_STATE_CONNERR);
899  		break;
900  	default:
901  		dev_err(privptr->dev,
902  			"Connecting the IUCV device failed with error %d\n",
903  			rc);
904  		fsm_newstate(fi, CONN_STATE_CONNERR);
905  		break;
906  	}
907  	IUCV_DBF_TEXT_(setup, 5, "iucv_connect rc is %d\n", rc);
908  	kfree(conn->path);
909  	conn->path = NULL;
910  }
911  
netiucv_purge_skb_queue(struct sk_buff_head * q)912  static void netiucv_purge_skb_queue(struct sk_buff_head *q)
913  {
914  	struct sk_buff *skb;
915  
916  	while ((skb = skb_dequeue(q))) {
917  		refcount_dec(&skb->users);
918  		dev_kfree_skb_any(skb);
919  	}
920  }
921  
conn_action_stop(fsm_instance * fi,int event,void * arg)922  static void conn_action_stop(fsm_instance *fi, int event, void *arg)
923  {
924  	struct iucv_event *ev = arg;
925  	struct iucv_connection *conn = ev->conn;
926  	struct net_device *netdev = conn->netdev;
927  	struct netiucv_priv *privptr = netdev_priv(netdev);
928  
929  	IUCV_DBF_TEXT(trace, 3, __func__);
930  
931  	fsm_deltimer(&conn->timer);
932  	fsm_newstate(fi, CONN_STATE_STOPPED);
933  	netiucv_purge_skb_queue(&conn->collect_queue);
934  	if (conn->path) {
935  		IUCV_DBF_TEXT(trace, 5, "calling iucv_path_sever\n");
936  		iucv_path_sever(conn->path, conn->userdata);
937  		kfree(conn->path);
938  		conn->path = NULL;
939  	}
940  	netiucv_purge_skb_queue(&conn->commit_queue);
941  	fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
942  }
943  
conn_action_inval(fsm_instance * fi,int event,void * arg)944  static void conn_action_inval(fsm_instance *fi, int event, void *arg)
945  {
946  	struct iucv_connection *conn = arg;
947  	struct net_device *netdev = conn->netdev;
948  
949  	IUCV_DBF_TEXT_(data, 2, "%s('%s'): conn_action_inval called\n",
950  		netdev->name, conn->userid);
951  }
952  
953  static const fsm_node conn_fsm[] = {
954  	{ CONN_STATE_INVALID,   CONN_EVENT_START,    conn_action_inval      },
955  	{ CONN_STATE_STOPPED,   CONN_EVENT_START,    conn_action_start      },
956  
957  	{ CONN_STATE_STOPPED,   CONN_EVENT_STOP,     conn_action_stop       },
958  	{ CONN_STATE_STARTWAIT, CONN_EVENT_STOP,     conn_action_stop       },
959  	{ CONN_STATE_SETUPWAIT, CONN_EVENT_STOP,     conn_action_stop       },
960  	{ CONN_STATE_IDLE,      CONN_EVENT_STOP,     conn_action_stop       },
961  	{ CONN_STATE_TX,        CONN_EVENT_STOP,     conn_action_stop       },
962  	{ CONN_STATE_REGERR,    CONN_EVENT_STOP,     conn_action_stop       },
963  	{ CONN_STATE_CONNERR,   CONN_EVENT_STOP,     conn_action_stop       },
964  
965  	{ CONN_STATE_STOPPED,   CONN_EVENT_CONN_REQ, conn_action_connreject },
966          { CONN_STATE_STARTWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
967  	{ CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
968  	{ CONN_STATE_IDLE,      CONN_EVENT_CONN_REQ, conn_action_connreject },
969  	{ CONN_STATE_TX,        CONN_EVENT_CONN_REQ, conn_action_connreject },
970  
971  	{ CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_ACK, conn_action_connack    },
972  	{ CONN_STATE_SETUPWAIT, CONN_EVENT_TIMER,    conn_action_conntimsev },
973  
974  	{ CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REJ, conn_action_connsever  },
975  	{ CONN_STATE_IDLE,      CONN_EVENT_CONN_REJ, conn_action_connsever  },
976  	{ CONN_STATE_TX,        CONN_EVENT_CONN_REJ, conn_action_connsever  },
977  
978  	{ CONN_STATE_IDLE,      CONN_EVENT_RX,       conn_action_rx         },
979  	{ CONN_STATE_TX,        CONN_EVENT_RX,       conn_action_rx         },
980  
981  	{ CONN_STATE_TX,        CONN_EVENT_TXDONE,   conn_action_txdone     },
982  	{ CONN_STATE_IDLE,      CONN_EVENT_TXDONE,   conn_action_txdone     },
983  };
984  
985  static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node);
986  
987  
988  /*
989   * Actions for interface - statemachine.
990   */
991  
992  /*
993   * dev_action_start
994   * @fi: An instance of an interface statemachine.
995   * @event: The event, just happened.
996   * @arg: Generic pointer, casted from struct net_device * upon call.
997   *
998   * Startup connection by sending CONN_EVENT_START to it.
999   */
dev_action_start(fsm_instance * fi,int event,void * arg)1000  static void dev_action_start(fsm_instance *fi, int event, void *arg)
1001  {
1002  	struct net_device   *dev = arg;
1003  	struct netiucv_priv *privptr = netdev_priv(dev);
1004  
1005  	IUCV_DBF_TEXT(trace, 3, __func__);
1006  
1007  	fsm_newstate(fi, DEV_STATE_STARTWAIT);
1008  	fsm_event(privptr->conn->fsm, CONN_EVENT_START, privptr->conn);
1009  }
1010  
1011  /*
1012   * Shutdown connection by sending CONN_EVENT_STOP to it.
1013   *
1014   * @param fi    An instance of an interface statemachine.
1015   * @param event The event, just happened.
1016   * @param arg   Generic pointer, casted from struct net_device * upon call.
1017   */
1018  static void
dev_action_stop(fsm_instance * fi,int event,void * arg)1019  dev_action_stop(fsm_instance *fi, int event, void *arg)
1020  {
1021  	struct net_device   *dev = arg;
1022  	struct netiucv_priv *privptr = netdev_priv(dev);
1023  	struct iucv_event   ev;
1024  
1025  	IUCV_DBF_TEXT(trace, 3, __func__);
1026  
1027  	ev.conn = privptr->conn;
1028  
1029  	fsm_newstate(fi, DEV_STATE_STOPWAIT);
1030  	fsm_event(privptr->conn->fsm, CONN_EVENT_STOP, &ev);
1031  }
1032  
1033  /*
1034   * Called from connection statemachine
1035   * when a connection is up and running.
1036   *
1037   * @param fi    An instance of an interface statemachine.
1038   * @param event The event, just happened.
1039   * @param arg   Generic pointer, casted from struct net_device * upon call.
1040   */
1041  static void
dev_action_connup(fsm_instance * fi,int event,void * arg)1042  dev_action_connup(fsm_instance *fi, int event, void *arg)
1043  {
1044  	struct net_device   *dev = arg;
1045  	struct netiucv_priv *privptr = netdev_priv(dev);
1046  
1047  	IUCV_DBF_TEXT(trace, 3, __func__);
1048  
1049  	switch (fsm_getstate(fi)) {
1050  		case DEV_STATE_STARTWAIT:
1051  			fsm_newstate(fi, DEV_STATE_RUNNING);
1052  			dev_info(privptr->dev,
1053  				"The IUCV device has been connected"
1054  				" successfully to %s\n",
1055  				netiucv_printuser(privptr->conn));
1056  			IUCV_DBF_TEXT(setup, 3,
1057  				"connection is up and running\n");
1058  			break;
1059  		case DEV_STATE_STOPWAIT:
1060  			IUCV_DBF_TEXT(data, 2,
1061  				"dev_action_connup: in DEV_STATE_STOPWAIT\n");
1062  			break;
1063  	}
1064  }
1065  
1066  /*
1067   * Called from connection statemachine
1068   * when a connection has been shutdown.
1069   *
1070   * @param fi    An instance of an interface statemachine.
1071   * @param event The event, just happened.
1072   * @param arg   Generic pointer, casted from struct net_device * upon call.
1073   */
1074  static void
dev_action_conndown(fsm_instance * fi,int event,void * arg)1075  dev_action_conndown(fsm_instance *fi, int event, void *arg)
1076  {
1077  	IUCV_DBF_TEXT(trace, 3, __func__);
1078  
1079  	switch (fsm_getstate(fi)) {
1080  		case DEV_STATE_RUNNING:
1081  			fsm_newstate(fi, DEV_STATE_STARTWAIT);
1082  			break;
1083  		case DEV_STATE_STOPWAIT:
1084  			fsm_newstate(fi, DEV_STATE_STOPPED);
1085  			IUCV_DBF_TEXT(setup, 3, "connection is down\n");
1086  			break;
1087  	}
1088  }
1089  
1090  static const fsm_node dev_fsm[] = {
1091  	{ DEV_STATE_STOPPED,    DEV_EVENT_START,   dev_action_start    },
1092  
1093  	{ DEV_STATE_STOPWAIT,   DEV_EVENT_START,   dev_action_start    },
1094  	{ DEV_STATE_STOPWAIT,   DEV_EVENT_CONDOWN, dev_action_conndown },
1095  
1096  	{ DEV_STATE_STARTWAIT,  DEV_EVENT_STOP,    dev_action_stop     },
1097  	{ DEV_STATE_STARTWAIT,  DEV_EVENT_CONUP,   dev_action_connup   },
1098  
1099  	{ DEV_STATE_RUNNING,    DEV_EVENT_STOP,    dev_action_stop     },
1100  	{ DEV_STATE_RUNNING,    DEV_EVENT_CONDOWN, dev_action_conndown },
1101  	{ DEV_STATE_RUNNING,    DEV_EVENT_CONUP,   netiucv_action_nop  },
1102  };
1103  
1104  static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node);
1105  
1106  /*
1107   * Transmit a packet.
1108   * This is a helper function for netiucv_tx().
1109   *
1110   * @param conn Connection to be used for sending.
1111   * @param skb Pointer to struct sk_buff of packet to send.
1112   *            The linklevel header has already been set up
1113   *            by netiucv_tx().
1114   *
1115   * @return 0 on success, -ERRNO on failure. (Never fails.)
1116   */
netiucv_transmit_skb(struct iucv_connection * conn,struct sk_buff * skb)1117  static int netiucv_transmit_skb(struct iucv_connection *conn,
1118  				struct sk_buff *skb)
1119  {
1120  	struct iucv_message msg;
1121  	unsigned long saveflags;
1122  	struct ll_header header;
1123  	int rc;
1124  
1125  	if (fsm_getstate(conn->fsm) != CONN_STATE_IDLE) {
1126  		int l = skb->len + NETIUCV_HDRLEN;
1127  
1128  		spin_lock_irqsave(&conn->collect_lock, saveflags);
1129  		if (conn->collect_len + l >
1130  		    (conn->max_buffsize - NETIUCV_HDRLEN)) {
1131  			rc = -EBUSY;
1132  			IUCV_DBF_TEXT(data, 2,
1133  				      "EBUSY from netiucv_transmit_skb\n");
1134  		} else {
1135  			refcount_inc(&skb->users);
1136  			skb_queue_tail(&conn->collect_queue, skb);
1137  			conn->collect_len += l;
1138  			rc = 0;
1139  		}
1140  		spin_unlock_irqrestore(&conn->collect_lock, saveflags);
1141  	} else {
1142  		struct sk_buff *nskb = skb;
1143  		/*
1144  		 * Copy the skb to a new allocated skb in lowmem only if the
1145  		 * data is located above 2G in memory or tailroom is < 2.
1146  		 */
1147  		unsigned long hi = ((unsigned long)(skb_tail_pointer(skb) +
1148  				    NETIUCV_HDRLEN)) >> 31;
1149  		int copied = 0;
1150  		if (hi || (skb_tailroom(skb) < 2)) {
1151  			nskb = alloc_skb(skb->len + NETIUCV_HDRLEN +
1152  					 NETIUCV_HDRLEN, GFP_ATOMIC | GFP_DMA);
1153  			if (!nskb) {
1154  				IUCV_DBF_TEXT(data, 2, "alloc_skb failed\n");
1155  				rc = -ENOMEM;
1156  				return rc;
1157  			} else {
1158  				skb_reserve(nskb, NETIUCV_HDRLEN);
1159  				skb_put_data(nskb, skb->data, skb->len);
1160  			}
1161  			copied = 1;
1162  		}
1163  		/*
1164  		 * skb now is below 2G and has enough room. Add headers.
1165  		 */
1166  		header.next = nskb->len + NETIUCV_HDRLEN;
1167  		memcpy(skb_push(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
1168  		header.next = 0;
1169  		skb_put_data(nskb, &header, NETIUCV_HDRLEN);
1170  
1171  		fsm_newstate(conn->fsm, CONN_STATE_TX);
1172  		conn->prof.send_stamp = jiffies;
1173  
1174  		msg.tag = 1;
1175  		msg.class = 0;
1176  		rc = iucv_message_send(conn->path, &msg, 0, 0,
1177  				       nskb->data, nskb->len);
1178  		conn->prof.doios_single++;
1179  		conn->prof.txlen += skb->len;
1180  		conn->prof.tx_pending++;
1181  		if (conn->prof.tx_pending > conn->prof.tx_max_pending)
1182  			conn->prof.tx_max_pending = conn->prof.tx_pending;
1183  		if (rc) {
1184  			struct netiucv_priv *privptr;
1185  			fsm_newstate(conn->fsm, CONN_STATE_IDLE);
1186  			conn->prof.tx_pending--;
1187  			privptr = netdev_priv(conn->netdev);
1188  			if (privptr)
1189  				privptr->stats.tx_errors++;
1190  			if (copied)
1191  				dev_kfree_skb(nskb);
1192  			else {
1193  				/*
1194  				 * Remove our headers. They get added
1195  				 * again on retransmit.
1196  				 */
1197  				skb_pull(skb, NETIUCV_HDRLEN);
1198  				skb_trim(skb, skb->len - NETIUCV_HDRLEN);
1199  			}
1200  			IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
1201  		} else {
1202  			if (copied)
1203  				dev_kfree_skb(skb);
1204  			refcount_inc(&nskb->users);
1205  			skb_queue_tail(&conn->commit_queue, nskb);
1206  		}
1207  	}
1208  
1209  	return rc;
1210  }
1211  
1212  /*
1213   * Interface API for upper network layers
1214   */
1215  
1216  /*
1217   * Open an interface.
1218   * Called from generic network layer when ifconfig up is run.
1219   *
1220   * @param dev Pointer to interface struct.
1221   *
1222   * @return 0 on success, -ERRNO on failure. (Never fails.)
1223   */
netiucv_open(struct net_device * dev)1224  static int netiucv_open(struct net_device *dev)
1225  {
1226  	struct netiucv_priv *priv = netdev_priv(dev);
1227  
1228  	fsm_event(priv->fsm, DEV_EVENT_START, dev);
1229  	return 0;
1230  }
1231  
1232  /*
1233   * Close an interface.
1234   * Called from generic network layer when ifconfig down is run.
1235   *
1236   * @param dev Pointer to interface struct.
1237   *
1238   * @return 0 on success, -ERRNO on failure. (Never fails.)
1239   */
netiucv_close(struct net_device * dev)1240  static int netiucv_close(struct net_device *dev)
1241  {
1242  	struct netiucv_priv *priv = netdev_priv(dev);
1243  
1244  	fsm_event(priv->fsm, DEV_EVENT_STOP, dev);
1245  	return 0;
1246  }
1247  
1248  /*
1249   * Start transmission of a packet.
1250   * Called from generic network device layer.
1251   */
netiucv_tx(struct sk_buff * skb,struct net_device * dev)1252  static netdev_tx_t netiucv_tx(struct sk_buff *skb, struct net_device *dev)
1253  {
1254  	struct netiucv_priv *privptr = netdev_priv(dev);
1255  	int rc;
1256  
1257  	IUCV_DBF_TEXT(trace, 4, __func__);
1258  	/*
1259  	 * Some sanity checks ...
1260  	 */
1261  	if (skb == NULL) {
1262  		IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n");
1263  		privptr->stats.tx_dropped++;
1264  		return NETDEV_TX_OK;
1265  	}
1266  	if (skb_headroom(skb) < NETIUCV_HDRLEN) {
1267  		IUCV_DBF_TEXT(data, 2,
1268  			"netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n");
1269  		dev_kfree_skb(skb);
1270  		privptr->stats.tx_dropped++;
1271  		return NETDEV_TX_OK;
1272  	}
1273  
1274  	/*
1275  	 * If connection is not running, try to restart it
1276  	 * and throw away packet.
1277  	 */
1278  	if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
1279  		dev_kfree_skb(skb);
1280  		privptr->stats.tx_dropped++;
1281  		privptr->stats.tx_errors++;
1282  		privptr->stats.tx_carrier_errors++;
1283  		return NETDEV_TX_OK;
1284  	}
1285  
1286  	if (netiucv_test_and_set_busy(dev)) {
1287  		IUCV_DBF_TEXT(data, 2, "EBUSY from netiucv_tx\n");
1288  		return NETDEV_TX_BUSY;
1289  	}
1290  	netif_trans_update(dev);
1291  	rc = netiucv_transmit_skb(privptr->conn, skb);
1292  	netiucv_clear_busy(dev);
1293  	return rc ? NETDEV_TX_BUSY : NETDEV_TX_OK;
1294  }
1295  
1296  /*
1297   * netiucv_stats
1298   * @dev: Pointer to interface struct.
1299   *
1300   * Returns interface statistics of a device.
1301   *
1302   * Returns pointer to stats struct of this interface.
1303   */
netiucv_stats(struct net_device * dev)1304  static struct net_device_stats *netiucv_stats (struct net_device * dev)
1305  {
1306  	struct netiucv_priv *priv = netdev_priv(dev);
1307  
1308  	IUCV_DBF_TEXT(trace, 5, __func__);
1309  	return &priv->stats;
1310  }
1311  
1312  /*
1313   * attributes in sysfs
1314   */
1315  
user_show(struct device * dev,struct device_attribute * attr,char * buf)1316  static ssize_t user_show(struct device *dev, struct device_attribute *attr,
1317  			 char *buf)
1318  {
1319  	struct netiucv_priv *priv = dev_get_drvdata(dev);
1320  
1321  	IUCV_DBF_TEXT(trace, 5, __func__);
1322  	return sprintf(buf, "%s\n", netiucv_printuser(priv->conn));
1323  }
1324  
netiucv_check_user(const char * buf,size_t count,char * username,char * userdata)1325  static int netiucv_check_user(const char *buf, size_t count, char *username,
1326  			      char *userdata)
1327  {
1328  	const char *p;
1329  	int i;
1330  
1331  	p = strchr(buf, '.');
1332  	if ((p && ((count > 26) ||
1333  		   ((p - buf) > 8) ||
1334  		   (buf + count - p > 18))) ||
1335  	    (!p && (count > 9))) {
1336  		IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
1337  		return -EINVAL;
1338  	}
1339  
1340  	for (i = 0, p = buf; i < 8 && *p && *p != '.'; i++, p++) {
1341  		if (isalnum(*p) || *p == '$') {
1342  			username[i] = toupper(*p);
1343  			continue;
1344  		}
1345  		if (*p == '\n')
1346  			/* trailing lf, grr */
1347  			break;
1348  		IUCV_DBF_TEXT_(setup, 2,
1349  			       "conn_write: invalid character %02x\n", *p);
1350  		return -EINVAL;
1351  	}
1352  	while (i < 8)
1353  		username[i++] = ' ';
1354  	username[8] = '\0';
1355  
1356  	if (*p == '.') {
1357  		p++;
1358  		for (i = 0; i < 16 && *p; i++, p++) {
1359  			if (*p == '\n')
1360  				break;
1361  			userdata[i] = toupper(*p);
1362  		}
1363  		while (i > 0 && i < 16)
1364  			userdata[i++] = ' ';
1365  	} else
1366  		memcpy(userdata, iucvMagic_ascii, 16);
1367  	userdata[16] = '\0';
1368  	ASCEBC(userdata, 16);
1369  
1370  	return 0;
1371  }
1372  
user_write(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1373  static ssize_t user_write(struct device *dev, struct device_attribute *attr,
1374  			  const char *buf, size_t count)
1375  {
1376  	struct netiucv_priv *priv = dev_get_drvdata(dev);
1377  	struct net_device *ndev = priv->conn->netdev;
1378  	char	username[9];
1379  	char	userdata[17];
1380  	int	rc;
1381  	struct iucv_connection *cp;
1382  
1383  	IUCV_DBF_TEXT(trace, 3, __func__);
1384  	rc = netiucv_check_user(buf, count, username, userdata);
1385  	if (rc)
1386  		return rc;
1387  
1388  	if (memcmp(username, priv->conn->userid, 9) &&
1389  	    (ndev->flags & (IFF_UP | IFF_RUNNING))) {
1390  		/* username changed while the interface is active. */
1391  		IUCV_DBF_TEXT(setup, 2, "user_write: device active\n");
1392  		return -EPERM;
1393  	}
1394  	read_lock_bh(&iucv_connection_rwlock);
1395  	list_for_each_entry(cp, &iucv_connection_list, list) {
1396  		if (!strncmp(username, cp->userid, 9) &&
1397  		   !strncmp(userdata, cp->userdata, 17) && cp->netdev != ndev) {
1398  			read_unlock_bh(&iucv_connection_rwlock);
1399  			IUCV_DBF_TEXT_(setup, 2, "user_write: Connection to %s "
1400  				"already exists\n", netiucv_printuser(cp));
1401  			return -EEXIST;
1402  		}
1403  	}
1404  	read_unlock_bh(&iucv_connection_rwlock);
1405  	memcpy(priv->conn->userid, username, 9);
1406  	memcpy(priv->conn->userdata, userdata, 17);
1407  	return count;
1408  }
1409  
1410  static DEVICE_ATTR(user, 0644, user_show, user_write);
1411  
buffer_show(struct device * dev,struct device_attribute * attr,char * buf)1412  static ssize_t buffer_show (struct device *dev, struct device_attribute *attr,
1413  			    char *buf)
1414  {
1415  	struct netiucv_priv *priv = dev_get_drvdata(dev);
1416  
1417  	IUCV_DBF_TEXT(trace, 5, __func__);
1418  	return sprintf(buf, "%d\n", priv->conn->max_buffsize);
1419  }
1420  
buffer_write(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1421  static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
1422  			     const char *buf, size_t count)
1423  {
1424  	struct netiucv_priv *priv = dev_get_drvdata(dev);
1425  	struct net_device *ndev = priv->conn->netdev;
1426  	unsigned int bs1;
1427  	int rc;
1428  
1429  	IUCV_DBF_TEXT(trace, 3, __func__);
1430  	if (count >= 39)
1431  		return -EINVAL;
1432  
1433  	rc = kstrtouint(buf, 0, &bs1);
1434  
1435  	if (rc == -EINVAL) {
1436  		IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %s\n",
1437  			buf);
1438  		return -EINVAL;
1439  	}
1440  	if ((rc == -ERANGE) || (bs1 > NETIUCV_BUFSIZE_MAX)) {
1441  		IUCV_DBF_TEXT_(setup, 2,
1442  			"buffer_write: buffer size %d too large\n",
1443  			bs1);
1444  		return -EINVAL;
1445  	}
1446  	if ((ndev->flags & IFF_RUNNING) &&
1447  	    (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2))) {
1448  		IUCV_DBF_TEXT_(setup, 2,
1449  			"buffer_write: buffer size %d too small\n",
1450  			bs1);
1451  		return -EINVAL;
1452  	}
1453  	if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN)) {
1454  		IUCV_DBF_TEXT_(setup, 2,
1455  			"buffer_write: buffer size %d too small\n",
1456  			bs1);
1457  		return -EINVAL;
1458  	}
1459  
1460  	priv->conn->max_buffsize = bs1;
1461  	if (!(ndev->flags & IFF_RUNNING))
1462  		ndev->mtu = bs1 - NETIUCV_HDRLEN - NETIUCV_HDRLEN;
1463  
1464  	return count;
1465  
1466  }
1467  
1468  static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
1469  
dev_fsm_show(struct device * dev,struct device_attribute * attr,char * buf)1470  static ssize_t dev_fsm_show (struct device *dev, struct device_attribute *attr,
1471  			     char *buf)
1472  {
1473  	struct netiucv_priv *priv = dev_get_drvdata(dev);
1474  
1475  	IUCV_DBF_TEXT(trace, 5, __func__);
1476  	return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm));
1477  }
1478  
1479  static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL);
1480  
conn_fsm_show(struct device * dev,struct device_attribute * attr,char * buf)1481  static ssize_t conn_fsm_show (struct device *dev,
1482  			      struct device_attribute *attr, char *buf)
1483  {
1484  	struct netiucv_priv *priv = dev_get_drvdata(dev);
1485  
1486  	IUCV_DBF_TEXT(trace, 5, __func__);
1487  	return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm));
1488  }
1489  
1490  static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL);
1491  
maxmulti_show(struct device * dev,struct device_attribute * attr,char * buf)1492  static ssize_t maxmulti_show (struct device *dev,
1493  			      struct device_attribute *attr, char *buf)
1494  {
1495  	struct netiucv_priv *priv = dev_get_drvdata(dev);
1496  
1497  	IUCV_DBF_TEXT(trace, 5, __func__);
1498  	return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti);
1499  }
1500  
maxmulti_write(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1501  static ssize_t maxmulti_write (struct device *dev,
1502  			       struct device_attribute *attr,
1503  			       const char *buf, size_t count)
1504  {
1505  	struct netiucv_priv *priv = dev_get_drvdata(dev);
1506  
1507  	IUCV_DBF_TEXT(trace, 4, __func__);
1508  	priv->conn->prof.maxmulti = 0;
1509  	return count;
1510  }
1511  
1512  static DEVICE_ATTR(max_tx_buffer_used, 0644, maxmulti_show, maxmulti_write);
1513  
maxcq_show(struct device * dev,struct device_attribute * attr,char * buf)1514  static ssize_t maxcq_show (struct device *dev, struct device_attribute *attr,
1515  			   char *buf)
1516  {
1517  	struct netiucv_priv *priv = dev_get_drvdata(dev);
1518  
1519  	IUCV_DBF_TEXT(trace, 5, __func__);
1520  	return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue);
1521  }
1522  
maxcq_write(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1523  static ssize_t maxcq_write (struct device *dev, struct device_attribute *attr,
1524  			    const char *buf, size_t count)
1525  {
1526  	struct netiucv_priv *priv = dev_get_drvdata(dev);
1527  
1528  	IUCV_DBF_TEXT(trace, 4, __func__);
1529  	priv->conn->prof.maxcqueue = 0;
1530  	return count;
1531  }
1532  
1533  static DEVICE_ATTR(max_chained_skbs, 0644, maxcq_show, maxcq_write);
1534  
sdoio_show(struct device * dev,struct device_attribute * attr,char * buf)1535  static ssize_t sdoio_show (struct device *dev, struct device_attribute *attr,
1536  			   char *buf)
1537  {
1538  	struct netiucv_priv *priv = dev_get_drvdata(dev);
1539  
1540  	IUCV_DBF_TEXT(trace, 5, __func__);
1541  	return sprintf(buf, "%ld\n", priv->conn->prof.doios_single);
1542  }
1543  
sdoio_write(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1544  static ssize_t sdoio_write (struct device *dev, struct device_attribute *attr,
1545  			    const char *buf, size_t count)
1546  {
1547  	struct netiucv_priv *priv = dev_get_drvdata(dev);
1548  
1549  	IUCV_DBF_TEXT(trace, 4, __func__);
1550  	priv->conn->prof.doios_single = 0;
1551  	return count;
1552  }
1553  
1554  static DEVICE_ATTR(tx_single_write_ops, 0644, sdoio_show, sdoio_write);
1555  
mdoio_show(struct device * dev,struct device_attribute * attr,char * buf)1556  static ssize_t mdoio_show (struct device *dev, struct device_attribute *attr,
1557  			   char *buf)
1558  {
1559  	struct netiucv_priv *priv = dev_get_drvdata(dev);
1560  
1561  	IUCV_DBF_TEXT(trace, 5, __func__);
1562  	return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi);
1563  }
1564  
mdoio_write(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1565  static ssize_t mdoio_write (struct device *dev, struct device_attribute *attr,
1566  			    const char *buf, size_t count)
1567  {
1568  	struct netiucv_priv *priv = dev_get_drvdata(dev);
1569  
1570  	IUCV_DBF_TEXT(trace, 5, __func__);
1571  	priv->conn->prof.doios_multi = 0;
1572  	return count;
1573  }
1574  
1575  static DEVICE_ATTR(tx_multi_write_ops, 0644, mdoio_show, mdoio_write);
1576  
txlen_show(struct device * dev,struct device_attribute * attr,char * buf)1577  static ssize_t txlen_show (struct device *dev, struct device_attribute *attr,
1578  			   char *buf)
1579  {
1580  	struct netiucv_priv *priv = dev_get_drvdata(dev);
1581  
1582  	IUCV_DBF_TEXT(trace, 5, __func__);
1583  	return sprintf(buf, "%ld\n", priv->conn->prof.txlen);
1584  }
1585  
txlen_write(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1586  static ssize_t txlen_write (struct device *dev, struct device_attribute *attr,
1587  			    const char *buf, size_t count)
1588  {
1589  	struct netiucv_priv *priv = dev_get_drvdata(dev);
1590  
1591  	IUCV_DBF_TEXT(trace, 4, __func__);
1592  	priv->conn->prof.txlen = 0;
1593  	return count;
1594  }
1595  
1596  static DEVICE_ATTR(netto_bytes, 0644, txlen_show, txlen_write);
1597  
txtime_show(struct device * dev,struct device_attribute * attr,char * buf)1598  static ssize_t txtime_show (struct device *dev, struct device_attribute *attr,
1599  			    char *buf)
1600  {
1601  	struct netiucv_priv *priv = dev_get_drvdata(dev);
1602  
1603  	IUCV_DBF_TEXT(trace, 5, __func__);
1604  	return sprintf(buf, "%ld\n", priv->conn->prof.tx_time);
1605  }
1606  
txtime_write(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1607  static ssize_t txtime_write (struct device *dev, struct device_attribute *attr,
1608  			     const char *buf, size_t count)
1609  {
1610  	struct netiucv_priv *priv = dev_get_drvdata(dev);
1611  
1612  	IUCV_DBF_TEXT(trace, 4, __func__);
1613  	priv->conn->prof.tx_time = 0;
1614  	return count;
1615  }
1616  
1617  static DEVICE_ATTR(max_tx_io_time, 0644, txtime_show, txtime_write);
1618  
txpend_show(struct device * dev,struct device_attribute * attr,char * buf)1619  static ssize_t txpend_show (struct device *dev, struct device_attribute *attr,
1620  			    char *buf)
1621  {
1622  	struct netiucv_priv *priv = dev_get_drvdata(dev);
1623  
1624  	IUCV_DBF_TEXT(trace, 5, __func__);
1625  	return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending);
1626  }
1627  
txpend_write(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1628  static ssize_t txpend_write (struct device *dev, struct device_attribute *attr,
1629  			     const char *buf, size_t count)
1630  {
1631  	struct netiucv_priv *priv = dev_get_drvdata(dev);
1632  
1633  	IUCV_DBF_TEXT(trace, 4, __func__);
1634  	priv->conn->prof.tx_pending = 0;
1635  	return count;
1636  }
1637  
1638  static DEVICE_ATTR(tx_pending, 0644, txpend_show, txpend_write);
1639  
txmpnd_show(struct device * dev,struct device_attribute * attr,char * buf)1640  static ssize_t txmpnd_show (struct device *dev, struct device_attribute *attr,
1641  			    char *buf)
1642  {
1643  	struct netiucv_priv *priv = dev_get_drvdata(dev);
1644  
1645  	IUCV_DBF_TEXT(trace, 5, __func__);
1646  	return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending);
1647  }
1648  
txmpnd_write(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1649  static ssize_t txmpnd_write (struct device *dev, struct device_attribute *attr,
1650  			     const char *buf, size_t count)
1651  {
1652  	struct netiucv_priv *priv = dev_get_drvdata(dev);
1653  
1654  	IUCV_DBF_TEXT(trace, 4, __func__);
1655  	priv->conn->prof.tx_max_pending = 0;
1656  	return count;
1657  }
1658  
1659  static DEVICE_ATTR(tx_max_pending, 0644, txmpnd_show, txmpnd_write);
1660  
1661  static struct attribute *netiucv_attrs[] = {
1662  	&dev_attr_buffer.attr,
1663  	&dev_attr_user.attr,
1664  	NULL,
1665  };
1666  
1667  static struct attribute_group netiucv_attr_group = {
1668  	.attrs = netiucv_attrs,
1669  };
1670  
1671  static struct attribute *netiucv_stat_attrs[] = {
1672  	&dev_attr_device_fsm_state.attr,
1673  	&dev_attr_connection_fsm_state.attr,
1674  	&dev_attr_max_tx_buffer_used.attr,
1675  	&dev_attr_max_chained_skbs.attr,
1676  	&dev_attr_tx_single_write_ops.attr,
1677  	&dev_attr_tx_multi_write_ops.attr,
1678  	&dev_attr_netto_bytes.attr,
1679  	&dev_attr_max_tx_io_time.attr,
1680  	&dev_attr_tx_pending.attr,
1681  	&dev_attr_tx_max_pending.attr,
1682  	NULL,
1683  };
1684  
1685  static struct attribute_group netiucv_stat_attr_group = {
1686  	.name  = "stats",
1687  	.attrs = netiucv_stat_attrs,
1688  };
1689  
1690  static const struct attribute_group *netiucv_attr_groups[] = {
1691  	&netiucv_stat_attr_group,
1692  	&netiucv_attr_group,
1693  	NULL,
1694  };
1695  
netiucv_register_device(struct net_device * ndev)1696  static int netiucv_register_device(struct net_device *ndev)
1697  {
1698  	struct netiucv_priv *priv = netdev_priv(ndev);
1699  	struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL);
1700  	int ret;
1701  
1702  	IUCV_DBF_TEXT(trace, 3, __func__);
1703  
1704  	if (dev) {
1705  		dev_set_name(dev, "net%s", ndev->name);
1706  		dev->bus = &iucv_bus;
1707  		dev->parent = iucv_root;
1708  		dev->groups = netiucv_attr_groups;
1709  		/*
1710  		 * The release function could be called after the
1711  		 * module has been unloaded. It's _only_ task is to
1712  		 * free the struct. Therefore, we specify kfree()
1713  		 * directly here. (Probably a little bit obfuscating
1714  		 * but legitime ...).
1715  		 */
1716  		dev->release = (void (*)(struct device *))kfree;
1717  		dev->driver = &netiucv_driver;
1718  	} else
1719  		return -ENOMEM;
1720  
1721  	ret = device_register(dev);
1722  	if (ret) {
1723  		put_device(dev);
1724  		return ret;
1725  	}
1726  	priv->dev = dev;
1727  	dev_set_drvdata(dev, priv);
1728  	return 0;
1729  }
1730  
netiucv_unregister_device(struct device * dev)1731  static void netiucv_unregister_device(struct device *dev)
1732  {
1733  	IUCV_DBF_TEXT(trace, 3, __func__);
1734  	device_unregister(dev);
1735  }
1736  
1737  /*
1738   * Allocate and initialize a new connection structure.
1739   * Add it to the list of netiucv connections;
1740   */
netiucv_new_connection(struct net_device * dev,char * username,char * userdata)1741  static struct iucv_connection *netiucv_new_connection(struct net_device *dev,
1742  						      char *username,
1743  						      char *userdata)
1744  {
1745  	struct iucv_connection *conn;
1746  
1747  	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
1748  	if (!conn)
1749  		goto out;
1750  	skb_queue_head_init(&conn->collect_queue);
1751  	skb_queue_head_init(&conn->commit_queue);
1752  	spin_lock_init(&conn->collect_lock);
1753  	conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT;
1754  	conn->netdev = dev;
1755  
1756  	conn->rx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1757  	if (!conn->rx_buff)
1758  		goto out_conn;
1759  	conn->tx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1760  	if (!conn->tx_buff)
1761  		goto out_rx;
1762  	conn->fsm = init_fsm("netiucvconn", conn_state_names,
1763  			     conn_event_names, NR_CONN_STATES,
1764  			     NR_CONN_EVENTS, conn_fsm, CONN_FSM_LEN,
1765  			     GFP_KERNEL);
1766  	if (!conn->fsm)
1767  		goto out_tx;
1768  
1769  	fsm_settimer(conn->fsm, &conn->timer);
1770  	fsm_newstate(conn->fsm, CONN_STATE_INVALID);
1771  
1772  	if (userdata)
1773  		memcpy(conn->userdata, userdata, 17);
1774  	if (username) {
1775  		memcpy(conn->userid, username, 9);
1776  		fsm_newstate(conn->fsm, CONN_STATE_STOPPED);
1777  	}
1778  
1779  	write_lock_bh(&iucv_connection_rwlock);
1780  	list_add_tail(&conn->list, &iucv_connection_list);
1781  	write_unlock_bh(&iucv_connection_rwlock);
1782  	return conn;
1783  
1784  out_tx:
1785  	kfree_skb(conn->tx_buff);
1786  out_rx:
1787  	kfree_skb(conn->rx_buff);
1788  out_conn:
1789  	kfree(conn);
1790  out:
1791  	return NULL;
1792  }
1793  
1794  /*
1795   * Release a connection structure and remove it from the
1796   * list of netiucv connections.
1797   */
netiucv_remove_connection(struct iucv_connection * conn)1798  static void netiucv_remove_connection(struct iucv_connection *conn)
1799  {
1800  
1801  	IUCV_DBF_TEXT(trace, 3, __func__);
1802  	write_lock_bh(&iucv_connection_rwlock);
1803  	list_del_init(&conn->list);
1804  	write_unlock_bh(&iucv_connection_rwlock);
1805  	fsm_deltimer(&conn->timer);
1806  	netiucv_purge_skb_queue(&conn->collect_queue);
1807  	if (conn->path) {
1808  		iucv_path_sever(conn->path, conn->userdata);
1809  		kfree(conn->path);
1810  		conn->path = NULL;
1811  	}
1812  	netiucv_purge_skb_queue(&conn->commit_queue);
1813  	kfree_fsm(conn->fsm);
1814  	kfree_skb(conn->rx_buff);
1815  	kfree_skb(conn->tx_buff);
1816  }
1817  
1818  /*
1819   * Release everything of a net device.
1820   */
netiucv_free_netdevice(struct net_device * dev)1821  static void netiucv_free_netdevice(struct net_device *dev)
1822  {
1823  	struct netiucv_priv *privptr = netdev_priv(dev);
1824  
1825  	IUCV_DBF_TEXT(trace, 3, __func__);
1826  
1827  	if (!dev)
1828  		return;
1829  
1830  	if (privptr) {
1831  		if (privptr->conn)
1832  			netiucv_remove_connection(privptr->conn);
1833  		if (privptr->fsm)
1834  			kfree_fsm(privptr->fsm);
1835  		privptr->conn = NULL; privptr->fsm = NULL;
1836  		/* privptr gets freed by free_netdev() */
1837  	}
1838  }
1839  
1840  /*
1841   * Initialize a net device. (Called from kernel in alloc_netdev())
1842   */
1843  static const struct net_device_ops netiucv_netdev_ops = {
1844  	.ndo_open		= netiucv_open,
1845  	.ndo_stop		= netiucv_close,
1846  	.ndo_get_stats		= netiucv_stats,
1847  	.ndo_start_xmit		= netiucv_tx,
1848  };
1849  
netiucv_setup_netdevice(struct net_device * dev)1850  static void netiucv_setup_netdevice(struct net_device *dev)
1851  {
1852  	dev->mtu	         = NETIUCV_MTU_DEFAULT;
1853  	dev->min_mtu		 = 576;
1854  	dev->max_mtu		 = NETIUCV_MTU_MAX;
1855  	dev->needs_free_netdev   = true;
1856  	dev->priv_destructor     = netiucv_free_netdevice;
1857  	dev->hard_header_len     = NETIUCV_HDRLEN;
1858  	dev->addr_len            = 0;
1859  	dev->type                = ARPHRD_SLIP;
1860  	dev->tx_queue_len        = NETIUCV_QUEUELEN_DEFAULT;
1861  	dev->flags	         = IFF_POINTOPOINT | IFF_NOARP;
1862  	dev->netdev_ops		 = &netiucv_netdev_ops;
1863  }
1864  
1865  /*
1866   * Allocate and initialize everything of a net device.
1867   */
netiucv_init_netdevice(char * username,char * userdata)1868  static struct net_device *netiucv_init_netdevice(char *username, char *userdata)
1869  {
1870  	struct netiucv_priv *privptr;
1871  	struct net_device *dev;
1872  
1873  	dev = alloc_netdev(sizeof(struct netiucv_priv), "iucv%d",
1874  			   NET_NAME_UNKNOWN, netiucv_setup_netdevice);
1875  	if (!dev)
1876  		return NULL;
1877  	rtnl_lock();
1878  	if (dev_alloc_name(dev, dev->name) < 0)
1879  		goto out_netdev;
1880  
1881  	privptr = netdev_priv(dev);
1882  	privptr->fsm = init_fsm("netiucvdev", dev_state_names,
1883  				dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS,
1884  				dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
1885  	if (!privptr->fsm)
1886  		goto out_netdev;
1887  
1888  	privptr->conn = netiucv_new_connection(dev, username, userdata);
1889  	if (!privptr->conn) {
1890  		IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n");
1891  		goto out_fsm;
1892  	}
1893  	fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
1894  	return dev;
1895  
1896  out_fsm:
1897  	kfree_fsm(privptr->fsm);
1898  out_netdev:
1899  	rtnl_unlock();
1900  	free_netdev(dev);
1901  	return NULL;
1902  }
1903  
connection_store(struct device_driver * drv,const char * buf,size_t count)1904  static ssize_t connection_store(struct device_driver *drv, const char *buf,
1905  				size_t count)
1906  {
1907  	char username[9];
1908  	char userdata[17];
1909  	int rc;
1910  	struct net_device *dev;
1911  	struct netiucv_priv *priv;
1912  	struct iucv_connection *cp;
1913  
1914  	IUCV_DBF_TEXT(trace, 3, __func__);
1915  	rc = netiucv_check_user(buf, count, username, userdata);
1916  	if (rc)
1917  		return rc;
1918  
1919  	read_lock_bh(&iucv_connection_rwlock);
1920  	list_for_each_entry(cp, &iucv_connection_list, list) {
1921  		if (!strncmp(username, cp->userid, 9) &&
1922  		    !strncmp(userdata, cp->userdata, 17)) {
1923  			read_unlock_bh(&iucv_connection_rwlock);
1924  			IUCV_DBF_TEXT_(setup, 2, "conn_write: Connection to %s "
1925  				"already exists\n", netiucv_printuser(cp));
1926  			return -EEXIST;
1927  		}
1928  	}
1929  	read_unlock_bh(&iucv_connection_rwlock);
1930  
1931  	dev = netiucv_init_netdevice(username, userdata);
1932  	if (!dev) {
1933  		IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n");
1934  		return -ENODEV;
1935  	}
1936  
1937  	rc = netiucv_register_device(dev);
1938  	if (rc) {
1939  		rtnl_unlock();
1940  		IUCV_DBF_TEXT_(setup, 2,
1941  			"ret %d from netiucv_register_device\n", rc);
1942  		goto out_free_ndev;
1943  	}
1944  
1945  	/* sysfs magic */
1946  	priv = netdev_priv(dev);
1947  	SET_NETDEV_DEV(dev, priv->dev);
1948  
1949  	rc = register_netdevice(dev);
1950  	rtnl_unlock();
1951  	if (rc)
1952  		goto out_unreg;
1953  
1954  	dev_info(priv->dev, "The IUCV interface to %s has been established "
1955  			    "successfully\n",
1956  		netiucv_printuser(priv->conn));
1957  
1958  	return count;
1959  
1960  out_unreg:
1961  	netiucv_unregister_device(priv->dev);
1962  out_free_ndev:
1963  	netiucv_free_netdevice(dev);
1964  	return rc;
1965  }
1966  static DRIVER_ATTR_WO(connection);
1967  
remove_store(struct device_driver * drv,const char * buf,size_t count)1968  static ssize_t remove_store(struct device_driver *drv, const char *buf,
1969  			    size_t count)
1970  {
1971  	struct iucv_connection *cp;
1972          struct net_device *ndev;
1973          struct netiucv_priv *priv;
1974          struct device *dev;
1975          char name[IFNAMSIZ];
1976  	const char *p;
1977          int i;
1978  
1979  	IUCV_DBF_TEXT(trace, 3, __func__);
1980  
1981          if (count >= IFNAMSIZ)
1982                  count = IFNAMSIZ - 1;
1983  
1984  	for (i = 0, p = buf; i < count && *p; i++, p++) {
1985  		if (*p == '\n' || *p == ' ')
1986                          /* trailing lf, grr */
1987                          break;
1988  		name[i] = *p;
1989          }
1990          name[i] = '\0';
1991  
1992  	read_lock_bh(&iucv_connection_rwlock);
1993  	list_for_each_entry(cp, &iucv_connection_list, list) {
1994  		ndev = cp->netdev;
1995  		priv = netdev_priv(ndev);
1996                  dev = priv->dev;
1997  		if (strncmp(name, ndev->name, count))
1998  			continue;
1999  		read_unlock_bh(&iucv_connection_rwlock);
2000                  if (ndev->flags & (IFF_UP | IFF_RUNNING)) {
2001  			dev_warn(dev, "The IUCV device is connected"
2002  				" to %s and cannot be removed\n",
2003  				priv->conn->userid);
2004  			IUCV_DBF_TEXT(data, 2, "remove_write: still active\n");
2005  			return -EPERM;
2006                  }
2007                  unregister_netdev(ndev);
2008                  netiucv_unregister_device(dev);
2009                  return count;
2010          }
2011  	read_unlock_bh(&iucv_connection_rwlock);
2012  	IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n");
2013          return -EINVAL;
2014  }
2015  static DRIVER_ATTR_WO(remove);
2016  
2017  static struct attribute * netiucv_drv_attrs[] = {
2018  	&driver_attr_connection.attr,
2019  	&driver_attr_remove.attr,
2020  	NULL,
2021  };
2022  
2023  static struct attribute_group netiucv_drv_attr_group = {
2024  	.attrs = netiucv_drv_attrs,
2025  };
2026  
2027  static const struct attribute_group *netiucv_drv_attr_groups[] = {
2028  	&netiucv_drv_attr_group,
2029  	NULL,
2030  };
2031  
netiucv_banner(void)2032  static void netiucv_banner(void)
2033  {
2034  	pr_info("driver initialized\n");
2035  }
2036  
netiucv_exit(void)2037  static void __exit netiucv_exit(void)
2038  {
2039  	struct iucv_connection *cp;
2040  	struct net_device *ndev;
2041  	struct netiucv_priv *priv;
2042  	struct device *dev;
2043  
2044  	IUCV_DBF_TEXT(trace, 3, __func__);
2045  	while (!list_empty(&iucv_connection_list)) {
2046  		cp = list_entry(iucv_connection_list.next,
2047  				struct iucv_connection, list);
2048  		ndev = cp->netdev;
2049  		priv = netdev_priv(ndev);
2050  		dev = priv->dev;
2051  
2052  		unregister_netdev(ndev);
2053  		netiucv_unregister_device(dev);
2054  	}
2055  
2056  	driver_unregister(&netiucv_driver);
2057  	iucv_unregister(&netiucv_handler, 1);
2058  	iucv_unregister_dbf_views();
2059  
2060  	pr_info("driver unloaded\n");
2061  	return;
2062  }
2063  
netiucv_init(void)2064  static int __init netiucv_init(void)
2065  {
2066  	int rc;
2067  
2068  	rc = iucv_register_dbf_views();
2069  	if (rc)
2070  		goto out;
2071  	rc = iucv_register(&netiucv_handler, 1);
2072  	if (rc)
2073  		goto out_dbf;
2074  	IUCV_DBF_TEXT(trace, 3, __func__);
2075  	netiucv_driver.groups = netiucv_drv_attr_groups;
2076  	rc = driver_register(&netiucv_driver);
2077  	if (rc) {
2078  		IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc);
2079  		goto out_iucv;
2080  	}
2081  
2082  	netiucv_banner();
2083  	return rc;
2084  
2085  out_iucv:
2086  	iucv_unregister(&netiucv_handler, 1);
2087  out_dbf:
2088  	iucv_unregister_dbf_views();
2089  out:
2090  	return rc;
2091  }
2092  
2093  module_init(netiucv_init);
2094  module_exit(netiucv_exit);
2095  MODULE_LICENSE("GPL");
2096