xref: /openbmc/linux/net/iucv/af_iucv.c (revision e23feb16)
1 /*
2  *  IUCV protocol stack for Linux on zSeries
3  *
4  *  Copyright IBM Corp. 2006, 2009
5  *
6  *  Author(s):	Jennifer Hunt <jenhunt@us.ibm.com>
7  *		Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
8  *  PM functions:
9  *		Ursula Braun <ursula.braun@de.ibm.com>
10  */
11 
12 #define KMSG_COMPONENT "af_iucv"
13 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
14 
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/list.h>
18 #include <linux/errno.h>
19 #include <linux/kernel.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/skbuff.h>
23 #include <linux/init.h>
24 #include <linux/poll.h>
25 #include <net/sock.h>
26 #include <asm/ebcdic.h>
27 #include <asm/cpcmd.h>
28 #include <linux/kmod.h>
29 
30 #include <net/iucv/af_iucv.h>
31 
32 #define VERSION "1.2"
33 
34 static char iucv_userid[80];
35 
36 static const struct proto_ops iucv_sock_ops;
37 
38 static struct proto iucv_proto = {
39 	.name		= "AF_IUCV",
40 	.owner		= THIS_MODULE,
41 	.obj_size	= sizeof(struct iucv_sock),
42 };
43 
44 static struct iucv_interface *pr_iucv;
45 
46 /* special AF_IUCV IPRM messages */
47 static const u8 iprm_shutdown[8] =
48 	{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
49 
50 #define TRGCLS_SIZE	(sizeof(((struct iucv_message *)0)->class))
51 
52 #define __iucv_sock_wait(sk, condition, timeo, ret)			\
53 do {									\
54 	DEFINE_WAIT(__wait);						\
55 	long __timeo = timeo;						\
56 	ret = 0;							\
57 	prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE);	\
58 	while (!(condition)) {						\
59 		if (!__timeo) {						\
60 			ret = -EAGAIN;					\
61 			break;						\
62 		}							\
63 		if (signal_pending(current)) {				\
64 			ret = sock_intr_errno(__timeo);			\
65 			break;						\
66 		}							\
67 		release_sock(sk);					\
68 		__timeo = schedule_timeout(__timeo);			\
69 		lock_sock(sk);						\
70 		ret = sock_error(sk);					\
71 		if (ret)						\
72 			break;						\
73 	}								\
74 	finish_wait(sk_sleep(sk), &__wait);				\
75 } while (0)
76 
77 #define iucv_sock_wait(sk, condition, timeo)				\
78 ({									\
79 	int __ret = 0;							\
80 	if (!(condition))						\
81 		__iucv_sock_wait(sk, condition, timeo, __ret);		\
82 	__ret;								\
83 })
84 
85 static void iucv_sock_kill(struct sock *sk);
86 static void iucv_sock_close(struct sock *sk);
87 static void iucv_sever_path(struct sock *, int);
88 
89 static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
90 	struct packet_type *pt, struct net_device *orig_dev);
91 static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
92 		   struct sk_buff *skb, u8 flags);
93 static void afiucv_hs_callback_txnotify(struct sk_buff *, enum iucv_tx_notify);
94 
95 /* Call Back functions */
96 static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
97 static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
98 static void iucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
99 static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8],
100 				 u8 ipuser[16]);
101 static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
102 static void iucv_callback_shutdown(struct iucv_path *, u8 ipuser[16]);
103 
104 static struct iucv_sock_list iucv_sk_list = {
105 	.lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock),
106 	.autobind_name = ATOMIC_INIT(0)
107 };
108 
109 static struct iucv_handler af_iucv_handler = {
110 	.path_pending	  = iucv_callback_connreq,
111 	.path_complete	  = iucv_callback_connack,
112 	.path_severed	  = iucv_callback_connrej,
113 	.message_pending  = iucv_callback_rx,
114 	.message_complete = iucv_callback_txdone,
115 	.path_quiesced	  = iucv_callback_shutdown,
116 };
117 
118 static inline void high_nmcpy(unsigned char *dst, char *src)
119 {
120        memcpy(dst, src, 8);
121 }
122 
123 static inline void low_nmcpy(unsigned char *dst, char *src)
124 {
125        memcpy(&dst[8], src, 8);
126 }
127 
128 static int afiucv_pm_prepare(struct device *dev)
129 {
130 #ifdef CONFIG_PM_DEBUG
131 	printk(KERN_WARNING "afiucv_pm_prepare\n");
132 #endif
133 	return 0;
134 }
135 
136 static void afiucv_pm_complete(struct device *dev)
137 {
138 #ifdef CONFIG_PM_DEBUG
139 	printk(KERN_WARNING "afiucv_pm_complete\n");
140 #endif
141 }
142 
143 /**
144  * afiucv_pm_freeze() - Freeze PM callback
145  * @dev:	AFIUCV dummy device
146  *
147  * Sever all established IUCV communication pathes
148  */
149 static int afiucv_pm_freeze(struct device *dev)
150 {
151 	struct iucv_sock *iucv;
152 	struct sock *sk;
153 	int err = 0;
154 
155 #ifdef CONFIG_PM_DEBUG
156 	printk(KERN_WARNING "afiucv_pm_freeze\n");
157 #endif
158 	read_lock(&iucv_sk_list.lock);
159 	sk_for_each(sk, &iucv_sk_list.head) {
160 		iucv = iucv_sk(sk);
161 		switch (sk->sk_state) {
162 		case IUCV_DISCONN:
163 		case IUCV_CLOSING:
164 		case IUCV_CONNECTED:
165 			iucv_sever_path(sk, 0);
166 			break;
167 		case IUCV_OPEN:
168 		case IUCV_BOUND:
169 		case IUCV_LISTEN:
170 		case IUCV_CLOSED:
171 		default:
172 			break;
173 		}
174 		skb_queue_purge(&iucv->send_skb_q);
175 		skb_queue_purge(&iucv->backlog_skb_q);
176 	}
177 	read_unlock(&iucv_sk_list.lock);
178 	return err;
179 }
180 
181 /**
182  * afiucv_pm_restore_thaw() - Thaw and restore PM callback
183  * @dev:	AFIUCV dummy device
184  *
185  * socket clean up after freeze
186  */
187 static int afiucv_pm_restore_thaw(struct device *dev)
188 {
189 	struct sock *sk;
190 
191 #ifdef CONFIG_PM_DEBUG
192 	printk(KERN_WARNING "afiucv_pm_restore_thaw\n");
193 #endif
194 	read_lock(&iucv_sk_list.lock);
195 	sk_for_each(sk, &iucv_sk_list.head) {
196 		switch (sk->sk_state) {
197 		case IUCV_CONNECTED:
198 			sk->sk_err = EPIPE;
199 			sk->sk_state = IUCV_DISCONN;
200 			sk->sk_state_change(sk);
201 			break;
202 		case IUCV_DISCONN:
203 		case IUCV_CLOSING:
204 		case IUCV_LISTEN:
205 		case IUCV_BOUND:
206 		case IUCV_OPEN:
207 		default:
208 			break;
209 		}
210 	}
211 	read_unlock(&iucv_sk_list.lock);
212 	return 0;
213 }
214 
215 static const struct dev_pm_ops afiucv_pm_ops = {
216 	.prepare = afiucv_pm_prepare,
217 	.complete = afiucv_pm_complete,
218 	.freeze = afiucv_pm_freeze,
219 	.thaw = afiucv_pm_restore_thaw,
220 	.restore = afiucv_pm_restore_thaw,
221 };
222 
223 static struct device_driver af_iucv_driver = {
224 	.owner = THIS_MODULE,
225 	.name = "afiucv",
226 	.bus  = NULL,
227 	.pm   = &afiucv_pm_ops,
228 };
229 
230 /* dummy device used as trigger for PM functions */
231 static struct device *af_iucv_dev;
232 
233 /**
234  * iucv_msg_length() - Returns the length of an iucv message.
235  * @msg:	Pointer to struct iucv_message, MUST NOT be NULL
236  *
237  * The function returns the length of the specified iucv message @msg of data
238  * stored in a buffer and of data stored in the parameter list (PRMDATA).
239  *
240  * For IUCV_IPRMDATA, AF_IUCV uses the following convention to transport socket
241  * data:
242  *	PRMDATA[0..6]	socket data (max 7 bytes);
243  *	PRMDATA[7]	socket data length value (len is 0xff - PRMDATA[7])
244  *
245  * The socket data length is computed by subtracting the socket data length
246  * value from 0xFF.
247  * If the socket data len is greater 7, then PRMDATA can be used for special
248  * notifications (see iucv_sock_shutdown); and further,
249  * if the socket data len is > 7, the function returns 8.
250  *
251  * Use this function to allocate socket buffers to store iucv message data.
252  */
253 static inline size_t iucv_msg_length(struct iucv_message *msg)
254 {
255 	size_t datalen;
256 
257 	if (msg->flags & IUCV_IPRMDATA) {
258 		datalen = 0xff - msg->rmmsg[7];
259 		return (datalen < 8) ? datalen : 8;
260 	}
261 	return msg->length;
262 }
263 
264 /**
265  * iucv_sock_in_state() - check for specific states
266  * @sk:		sock structure
267  * @state:	first iucv sk state
268  * @state:	second iucv sk state
269  *
270  * Returns true if the socket in either in the first or second state.
271  */
272 static int iucv_sock_in_state(struct sock *sk, int state, int state2)
273 {
274 	return (sk->sk_state == state || sk->sk_state == state2);
275 }
276 
277 /**
278  * iucv_below_msglim() - function to check if messages can be sent
279  * @sk:		sock structure
280  *
281  * Returns true if the send queue length is lower than the message limit.
282  * Always returns true if the socket is not connected (no iucv path for
283  * checking the message limit).
284  */
285 static inline int iucv_below_msglim(struct sock *sk)
286 {
287 	struct iucv_sock *iucv = iucv_sk(sk);
288 
289 	if (sk->sk_state != IUCV_CONNECTED)
290 		return 1;
291 	if (iucv->transport == AF_IUCV_TRANS_IUCV)
292 		return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim);
293 	else
294 		return ((atomic_read(&iucv->msg_sent) < iucv->msglimit_peer) &&
295 			(atomic_read(&iucv->pendings) <= 0));
296 }
297 
298 /**
299  * iucv_sock_wake_msglim() - Wake up thread waiting on msg limit
300  */
301 static void iucv_sock_wake_msglim(struct sock *sk)
302 {
303 	struct socket_wq *wq;
304 
305 	rcu_read_lock();
306 	wq = rcu_dereference(sk->sk_wq);
307 	if (wq_has_sleeper(wq))
308 		wake_up_interruptible_all(&wq->wait);
309 	sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
310 	rcu_read_unlock();
311 }
312 
313 /**
314  * afiucv_hs_send() - send a message through HiperSockets transport
315  */
316 static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
317 		   struct sk_buff *skb, u8 flags)
318 {
319 	struct iucv_sock *iucv = iucv_sk(sock);
320 	struct af_iucv_trans_hdr *phs_hdr;
321 	struct sk_buff *nskb;
322 	int err, confirm_recv = 0;
323 
324 	memset(skb->head, 0, ETH_HLEN);
325 	phs_hdr = (struct af_iucv_trans_hdr *)skb_push(skb,
326 					sizeof(struct af_iucv_trans_hdr));
327 	skb_reset_mac_header(skb);
328 	skb_reset_network_header(skb);
329 	skb_push(skb, ETH_HLEN);
330 	skb_reset_mac_header(skb);
331 	memset(phs_hdr, 0, sizeof(struct af_iucv_trans_hdr));
332 
333 	phs_hdr->magic = ETH_P_AF_IUCV;
334 	phs_hdr->version = 1;
335 	phs_hdr->flags = flags;
336 	if (flags == AF_IUCV_FLAG_SYN)
337 		phs_hdr->window = iucv->msglimit;
338 	else if ((flags == AF_IUCV_FLAG_WIN) || !flags) {
339 		confirm_recv = atomic_read(&iucv->msg_recv);
340 		phs_hdr->window = confirm_recv;
341 		if (confirm_recv)
342 			phs_hdr->flags = phs_hdr->flags | AF_IUCV_FLAG_WIN;
343 	}
344 	memcpy(phs_hdr->destUserID, iucv->dst_user_id, 8);
345 	memcpy(phs_hdr->destAppName, iucv->dst_name, 8);
346 	memcpy(phs_hdr->srcUserID, iucv->src_user_id, 8);
347 	memcpy(phs_hdr->srcAppName, iucv->src_name, 8);
348 	ASCEBC(phs_hdr->destUserID, sizeof(phs_hdr->destUserID));
349 	ASCEBC(phs_hdr->destAppName, sizeof(phs_hdr->destAppName));
350 	ASCEBC(phs_hdr->srcUserID, sizeof(phs_hdr->srcUserID));
351 	ASCEBC(phs_hdr->srcAppName, sizeof(phs_hdr->srcAppName));
352 	if (imsg)
353 		memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message));
354 
355 	skb->dev = iucv->hs_dev;
356 	if (!skb->dev)
357 		return -ENODEV;
358 	if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev))
359 		return -ENETDOWN;
360 	if (skb->len > skb->dev->mtu) {
361 		if (sock->sk_type == SOCK_SEQPACKET)
362 			return -EMSGSIZE;
363 		else
364 			skb_trim(skb, skb->dev->mtu);
365 	}
366 	skb->protocol = ETH_P_AF_IUCV;
367 	nskb = skb_clone(skb, GFP_ATOMIC);
368 	if (!nskb)
369 		return -ENOMEM;
370 	skb_queue_tail(&iucv->send_skb_q, nskb);
371 	err = dev_queue_xmit(skb);
372 	if (net_xmit_eval(err)) {
373 		skb_unlink(nskb, &iucv->send_skb_q);
374 		kfree_skb(nskb);
375 	} else {
376 		atomic_sub(confirm_recv, &iucv->msg_recv);
377 		WARN_ON(atomic_read(&iucv->msg_recv) < 0);
378 	}
379 	return net_xmit_eval(err);
380 }
381 
382 static struct sock *__iucv_get_sock_by_name(char *nm)
383 {
384 	struct sock *sk;
385 
386 	sk_for_each(sk, &iucv_sk_list.head)
387 		if (!memcmp(&iucv_sk(sk)->src_name, nm, 8))
388 			return sk;
389 
390 	return NULL;
391 }
392 
393 static void iucv_sock_destruct(struct sock *sk)
394 {
395 	skb_queue_purge(&sk->sk_receive_queue);
396 	skb_queue_purge(&sk->sk_error_queue);
397 
398 	sk_mem_reclaim(sk);
399 
400 	if (!sock_flag(sk, SOCK_DEAD)) {
401 		pr_err("Attempt to release alive iucv socket %p\n", sk);
402 		return;
403 	}
404 
405 	WARN_ON(atomic_read(&sk->sk_rmem_alloc));
406 	WARN_ON(atomic_read(&sk->sk_wmem_alloc));
407 	WARN_ON(sk->sk_wmem_queued);
408 	WARN_ON(sk->sk_forward_alloc);
409 }
410 
411 /* Cleanup Listen */
412 static void iucv_sock_cleanup_listen(struct sock *parent)
413 {
414 	struct sock *sk;
415 
416 	/* Close non-accepted connections */
417 	while ((sk = iucv_accept_dequeue(parent, NULL))) {
418 		iucv_sock_close(sk);
419 		iucv_sock_kill(sk);
420 	}
421 
422 	parent->sk_state = IUCV_CLOSED;
423 }
424 
425 /* Kill socket (only if zapped and orphaned) */
426 static void iucv_sock_kill(struct sock *sk)
427 {
428 	if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
429 		return;
430 
431 	iucv_sock_unlink(&iucv_sk_list, sk);
432 	sock_set_flag(sk, SOCK_DEAD);
433 	sock_put(sk);
434 }
435 
436 /* Terminate an IUCV path */
437 static void iucv_sever_path(struct sock *sk, int with_user_data)
438 {
439 	unsigned char user_data[16];
440 	struct iucv_sock *iucv = iucv_sk(sk);
441 	struct iucv_path *path = iucv->path;
442 
443 	if (iucv->path) {
444 		iucv->path = NULL;
445 		if (with_user_data) {
446 			low_nmcpy(user_data, iucv->src_name);
447 			high_nmcpy(user_data, iucv->dst_name);
448 			ASCEBC(user_data, sizeof(user_data));
449 			pr_iucv->path_sever(path, user_data);
450 		} else
451 			pr_iucv->path_sever(path, NULL);
452 		iucv_path_free(path);
453 	}
454 }
455 
456 /* Send FIN through an IUCV socket for HIPER transport */
457 static int iucv_send_ctrl(struct sock *sk, u8 flags)
458 {
459 	int err = 0;
460 	int blen;
461 	struct sk_buff *skb;
462 
463 	blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
464 	skb = sock_alloc_send_skb(sk, blen, 1, &err);
465 	if (skb) {
466 		skb_reserve(skb, blen);
467 		err = afiucv_hs_send(NULL, sk, skb, flags);
468 	}
469 	return err;
470 }
471 
472 /* Close an IUCV socket */
473 static void iucv_sock_close(struct sock *sk)
474 {
475 	struct iucv_sock *iucv = iucv_sk(sk);
476 	unsigned long timeo;
477 	int err = 0;
478 
479 	lock_sock(sk);
480 
481 	switch (sk->sk_state) {
482 	case IUCV_LISTEN:
483 		iucv_sock_cleanup_listen(sk);
484 		break;
485 
486 	case IUCV_CONNECTED:
487 		if (iucv->transport == AF_IUCV_TRANS_HIPER) {
488 			err = iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN);
489 			sk->sk_state = IUCV_DISCONN;
490 			sk->sk_state_change(sk);
491 		}
492 	case IUCV_DISCONN:   /* fall through */
493 		sk->sk_state = IUCV_CLOSING;
494 		sk->sk_state_change(sk);
495 
496 		if (!err && !skb_queue_empty(&iucv->send_skb_q)) {
497 			if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
498 				timeo = sk->sk_lingertime;
499 			else
500 				timeo = IUCV_DISCONN_TIMEOUT;
501 			iucv_sock_wait(sk,
502 					iucv_sock_in_state(sk, IUCV_CLOSED, 0),
503 					timeo);
504 		}
505 
506 	case IUCV_CLOSING:   /* fall through */
507 		sk->sk_state = IUCV_CLOSED;
508 		sk->sk_state_change(sk);
509 
510 		sk->sk_err = ECONNRESET;
511 		sk->sk_state_change(sk);
512 
513 		skb_queue_purge(&iucv->send_skb_q);
514 		skb_queue_purge(&iucv->backlog_skb_q);
515 
516 	default:   /* fall through */
517 		iucv_sever_path(sk, 1);
518 	}
519 
520 	if (iucv->hs_dev) {
521 		dev_put(iucv->hs_dev);
522 		iucv->hs_dev = NULL;
523 		sk->sk_bound_dev_if = 0;
524 	}
525 
526 	/* mark socket for deletion by iucv_sock_kill() */
527 	sock_set_flag(sk, SOCK_ZAPPED);
528 
529 	release_sock(sk);
530 }
531 
532 static void iucv_sock_init(struct sock *sk, struct sock *parent)
533 {
534 	if (parent)
535 		sk->sk_type = parent->sk_type;
536 }
537 
538 static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
539 {
540 	struct sock *sk;
541 	struct iucv_sock *iucv;
542 
543 	sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto);
544 	if (!sk)
545 		return NULL;
546 	iucv = iucv_sk(sk);
547 
548 	sock_init_data(sock, sk);
549 	INIT_LIST_HEAD(&iucv->accept_q);
550 	spin_lock_init(&iucv->accept_q_lock);
551 	skb_queue_head_init(&iucv->send_skb_q);
552 	INIT_LIST_HEAD(&iucv->message_q.list);
553 	spin_lock_init(&iucv->message_q.lock);
554 	skb_queue_head_init(&iucv->backlog_skb_q);
555 	iucv->send_tag = 0;
556 	atomic_set(&iucv->pendings, 0);
557 	iucv->flags = 0;
558 	iucv->msglimit = 0;
559 	atomic_set(&iucv->msg_sent, 0);
560 	atomic_set(&iucv->msg_recv, 0);
561 	iucv->path = NULL;
562 	iucv->sk_txnotify = afiucv_hs_callback_txnotify;
563 	memset(&iucv->src_user_id , 0, 32);
564 	if (pr_iucv)
565 		iucv->transport = AF_IUCV_TRANS_IUCV;
566 	else
567 		iucv->transport = AF_IUCV_TRANS_HIPER;
568 
569 	sk->sk_destruct = iucv_sock_destruct;
570 	sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
571 	sk->sk_allocation = GFP_DMA;
572 
573 	sock_reset_flag(sk, SOCK_ZAPPED);
574 
575 	sk->sk_protocol = proto;
576 	sk->sk_state	= IUCV_OPEN;
577 
578 	iucv_sock_link(&iucv_sk_list, sk);
579 	return sk;
580 }
581 
582 /* Create an IUCV socket */
583 static int iucv_sock_create(struct net *net, struct socket *sock, int protocol,
584 			    int kern)
585 {
586 	struct sock *sk;
587 
588 	if (protocol && protocol != PF_IUCV)
589 		return -EPROTONOSUPPORT;
590 
591 	sock->state = SS_UNCONNECTED;
592 
593 	switch (sock->type) {
594 	case SOCK_STREAM:
595 		sock->ops = &iucv_sock_ops;
596 		break;
597 	case SOCK_SEQPACKET:
598 		/* currently, proto ops can handle both sk types */
599 		sock->ops = &iucv_sock_ops;
600 		break;
601 	default:
602 		return -ESOCKTNOSUPPORT;
603 	}
604 
605 	sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL);
606 	if (!sk)
607 		return -ENOMEM;
608 
609 	iucv_sock_init(sk, NULL);
610 
611 	return 0;
612 }
613 
614 void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk)
615 {
616 	write_lock_bh(&l->lock);
617 	sk_add_node(sk, &l->head);
618 	write_unlock_bh(&l->lock);
619 }
620 
621 void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
622 {
623 	write_lock_bh(&l->lock);
624 	sk_del_node_init(sk);
625 	write_unlock_bh(&l->lock);
626 }
627 
628 void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
629 {
630 	unsigned long flags;
631 	struct iucv_sock *par = iucv_sk(parent);
632 
633 	sock_hold(sk);
634 	spin_lock_irqsave(&par->accept_q_lock, flags);
635 	list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q);
636 	spin_unlock_irqrestore(&par->accept_q_lock, flags);
637 	iucv_sk(sk)->parent = parent;
638 	sk_acceptq_added(parent);
639 }
640 
641 void iucv_accept_unlink(struct sock *sk)
642 {
643 	unsigned long flags;
644 	struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent);
645 
646 	spin_lock_irqsave(&par->accept_q_lock, flags);
647 	list_del_init(&iucv_sk(sk)->accept_q);
648 	spin_unlock_irqrestore(&par->accept_q_lock, flags);
649 	sk_acceptq_removed(iucv_sk(sk)->parent);
650 	iucv_sk(sk)->parent = NULL;
651 	sock_put(sk);
652 }
653 
654 struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
655 {
656 	struct iucv_sock *isk, *n;
657 	struct sock *sk;
658 
659 	list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
660 		sk = (struct sock *) isk;
661 		lock_sock(sk);
662 
663 		if (sk->sk_state == IUCV_CLOSED) {
664 			iucv_accept_unlink(sk);
665 			release_sock(sk);
666 			continue;
667 		}
668 
669 		if (sk->sk_state == IUCV_CONNECTED ||
670 		    sk->sk_state == IUCV_DISCONN ||
671 		    !newsock) {
672 			iucv_accept_unlink(sk);
673 			if (newsock)
674 				sock_graft(sk, newsock);
675 
676 			release_sock(sk);
677 			return sk;
678 		}
679 
680 		release_sock(sk);
681 	}
682 	return NULL;
683 }
684 
685 /* Bind an unbound socket */
686 static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
687 			  int addr_len)
688 {
689 	struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
690 	struct sock *sk = sock->sk;
691 	struct iucv_sock *iucv;
692 	int err = 0;
693 	struct net_device *dev;
694 	char uid[9];
695 
696 	/* Verify the input sockaddr */
697 	if (!addr || addr->sa_family != AF_IUCV)
698 		return -EINVAL;
699 
700 	lock_sock(sk);
701 	if (sk->sk_state != IUCV_OPEN) {
702 		err = -EBADFD;
703 		goto done;
704 	}
705 
706 	write_lock_bh(&iucv_sk_list.lock);
707 
708 	iucv = iucv_sk(sk);
709 	if (__iucv_get_sock_by_name(sa->siucv_name)) {
710 		err = -EADDRINUSE;
711 		goto done_unlock;
712 	}
713 	if (iucv->path)
714 		goto done_unlock;
715 
716 	/* Bind the socket */
717 	if (pr_iucv)
718 		if (!memcmp(sa->siucv_user_id, iucv_userid, 8))
719 			goto vm_bind; /* VM IUCV transport */
720 
721 	/* try hiper transport */
722 	memcpy(uid, sa->siucv_user_id, sizeof(uid));
723 	ASCEBC(uid, 8);
724 	rcu_read_lock();
725 	for_each_netdev_rcu(&init_net, dev) {
726 		if (!memcmp(dev->perm_addr, uid, 8)) {
727 			memcpy(iucv->src_name, sa->siucv_name, 8);
728 			memcpy(iucv->src_user_id, sa->siucv_user_id, 8);
729 			sk->sk_bound_dev_if = dev->ifindex;
730 			iucv->hs_dev = dev;
731 			dev_hold(dev);
732 			sk->sk_state = IUCV_BOUND;
733 			iucv->transport = AF_IUCV_TRANS_HIPER;
734 			if (!iucv->msglimit)
735 				iucv->msglimit = IUCV_HIPER_MSGLIM_DEFAULT;
736 			rcu_read_unlock();
737 			goto done_unlock;
738 		}
739 	}
740 	rcu_read_unlock();
741 vm_bind:
742 	if (pr_iucv) {
743 		/* use local userid for backward compat */
744 		memcpy(iucv->src_name, sa->siucv_name, 8);
745 		memcpy(iucv->src_user_id, iucv_userid, 8);
746 		sk->sk_state = IUCV_BOUND;
747 		iucv->transport = AF_IUCV_TRANS_IUCV;
748 		if (!iucv->msglimit)
749 			iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
750 		goto done_unlock;
751 	}
752 	/* found no dev to bind */
753 	err = -ENODEV;
754 done_unlock:
755 	/* Release the socket list lock */
756 	write_unlock_bh(&iucv_sk_list.lock);
757 done:
758 	release_sock(sk);
759 	return err;
760 }
761 
762 /* Automatically bind an unbound socket */
763 static int iucv_sock_autobind(struct sock *sk)
764 {
765 	struct iucv_sock *iucv = iucv_sk(sk);
766 	char name[12];
767 	int err = 0;
768 
769 	if (unlikely(!pr_iucv))
770 		return -EPROTO;
771 
772 	memcpy(iucv->src_user_id, iucv_userid, 8);
773 
774 	write_lock_bh(&iucv_sk_list.lock);
775 
776 	sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
777 	while (__iucv_get_sock_by_name(name)) {
778 		sprintf(name, "%08x",
779 			atomic_inc_return(&iucv_sk_list.autobind_name));
780 	}
781 
782 	write_unlock_bh(&iucv_sk_list.lock);
783 
784 	memcpy(&iucv->src_name, name, 8);
785 
786 	if (!iucv->msglimit)
787 		iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
788 
789 	return err;
790 }
791 
792 static int afiucv_path_connect(struct socket *sock, struct sockaddr *addr)
793 {
794 	struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
795 	struct sock *sk = sock->sk;
796 	struct iucv_sock *iucv = iucv_sk(sk);
797 	unsigned char user_data[16];
798 	int err;
799 
800 	high_nmcpy(user_data, sa->siucv_name);
801 	low_nmcpy(user_data, iucv->src_name);
802 	ASCEBC(user_data, sizeof(user_data));
803 
804 	/* Create path. */
805 	iucv->path = iucv_path_alloc(iucv->msglimit,
806 				     IUCV_IPRMDATA, GFP_KERNEL);
807 	if (!iucv->path) {
808 		err = -ENOMEM;
809 		goto done;
810 	}
811 	err = pr_iucv->path_connect(iucv->path, &af_iucv_handler,
812 				    sa->siucv_user_id, NULL, user_data,
813 				    sk);
814 	if (err) {
815 		iucv_path_free(iucv->path);
816 		iucv->path = NULL;
817 		switch (err) {
818 		case 0x0b:	/* Target communicator is not logged on */
819 			err = -ENETUNREACH;
820 			break;
821 		case 0x0d:	/* Max connections for this guest exceeded */
822 		case 0x0e:	/* Max connections for target guest exceeded */
823 			err = -EAGAIN;
824 			break;
825 		case 0x0f:	/* Missing IUCV authorization */
826 			err = -EACCES;
827 			break;
828 		default:
829 			err = -ECONNREFUSED;
830 			break;
831 		}
832 	}
833 done:
834 	return err;
835 }
836 
837 /* Connect an unconnected socket */
838 static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
839 			     int alen, int flags)
840 {
841 	struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
842 	struct sock *sk = sock->sk;
843 	struct iucv_sock *iucv = iucv_sk(sk);
844 	int err;
845 
846 	if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv))
847 		return -EINVAL;
848 
849 	if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
850 		return -EBADFD;
851 
852 	if (sk->sk_state == IUCV_OPEN &&
853 	    iucv->transport == AF_IUCV_TRANS_HIPER)
854 		return -EBADFD; /* explicit bind required */
855 
856 	if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET)
857 		return -EINVAL;
858 
859 	if (sk->sk_state == IUCV_OPEN) {
860 		err = iucv_sock_autobind(sk);
861 		if (unlikely(err))
862 			return err;
863 	}
864 
865 	lock_sock(sk);
866 
867 	/* Set the destination information */
868 	memcpy(iucv->dst_user_id, sa->siucv_user_id, 8);
869 	memcpy(iucv->dst_name, sa->siucv_name, 8);
870 
871 	if (iucv->transport == AF_IUCV_TRANS_HIPER)
872 		err = iucv_send_ctrl(sock->sk, AF_IUCV_FLAG_SYN);
873 	else
874 		err = afiucv_path_connect(sock, addr);
875 	if (err)
876 		goto done;
877 
878 	if (sk->sk_state != IUCV_CONNECTED)
879 		err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED,
880 							    IUCV_DISCONN),
881 				     sock_sndtimeo(sk, flags & O_NONBLOCK));
882 
883 	if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_CLOSED)
884 		err = -ECONNREFUSED;
885 
886 	if (err && iucv->transport == AF_IUCV_TRANS_IUCV)
887 		iucv_sever_path(sk, 0);
888 
889 done:
890 	release_sock(sk);
891 	return err;
892 }
893 
894 /* Move a socket into listening state. */
895 static int iucv_sock_listen(struct socket *sock, int backlog)
896 {
897 	struct sock *sk = sock->sk;
898 	int err;
899 
900 	lock_sock(sk);
901 
902 	err = -EINVAL;
903 	if (sk->sk_state != IUCV_BOUND)
904 		goto done;
905 
906 	if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
907 		goto done;
908 
909 	sk->sk_max_ack_backlog = backlog;
910 	sk->sk_ack_backlog = 0;
911 	sk->sk_state = IUCV_LISTEN;
912 	err = 0;
913 
914 done:
915 	release_sock(sk);
916 	return err;
917 }
918 
919 /* Accept a pending connection */
920 static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
921 			    int flags)
922 {
923 	DECLARE_WAITQUEUE(wait, current);
924 	struct sock *sk = sock->sk, *nsk;
925 	long timeo;
926 	int err = 0;
927 
928 	lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
929 
930 	if (sk->sk_state != IUCV_LISTEN) {
931 		err = -EBADFD;
932 		goto done;
933 	}
934 
935 	timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
936 
937 	/* Wait for an incoming connection */
938 	add_wait_queue_exclusive(sk_sleep(sk), &wait);
939 	while (!(nsk = iucv_accept_dequeue(sk, newsock))) {
940 		set_current_state(TASK_INTERRUPTIBLE);
941 		if (!timeo) {
942 			err = -EAGAIN;
943 			break;
944 		}
945 
946 		release_sock(sk);
947 		timeo = schedule_timeout(timeo);
948 		lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
949 
950 		if (sk->sk_state != IUCV_LISTEN) {
951 			err = -EBADFD;
952 			break;
953 		}
954 
955 		if (signal_pending(current)) {
956 			err = sock_intr_errno(timeo);
957 			break;
958 		}
959 	}
960 
961 	set_current_state(TASK_RUNNING);
962 	remove_wait_queue(sk_sleep(sk), &wait);
963 
964 	if (err)
965 		goto done;
966 
967 	newsock->state = SS_CONNECTED;
968 
969 done:
970 	release_sock(sk);
971 	return err;
972 }
973 
974 static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
975 			     int *len, int peer)
976 {
977 	struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr;
978 	struct sock *sk = sock->sk;
979 	struct iucv_sock *iucv = iucv_sk(sk);
980 
981 	addr->sa_family = AF_IUCV;
982 	*len = sizeof(struct sockaddr_iucv);
983 
984 	if (peer) {
985 		memcpy(siucv->siucv_user_id, iucv->dst_user_id, 8);
986 		memcpy(siucv->siucv_name, iucv->dst_name, 8);
987 	} else {
988 		memcpy(siucv->siucv_user_id, iucv->src_user_id, 8);
989 		memcpy(siucv->siucv_name, iucv->src_name, 8);
990 	}
991 	memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port));
992 	memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr));
993 	memset(&siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid));
994 
995 	return 0;
996 }
997 
998 /**
999  * iucv_send_iprm() - Send socket data in parameter list of an iucv message.
1000  * @path:	IUCV path
1001  * @msg:	Pointer to a struct iucv_message
1002  * @skb:	The socket data to send, skb->len MUST BE <= 7
1003  *
1004  * Send the socket data in the parameter list in the iucv message
1005  * (IUCV_IPRMDATA). The socket data is stored at index 0 to 6 in the parameter
1006  * list and the socket data len at index 7 (last byte).
1007  * See also iucv_msg_length().
1008  *
1009  * Returns the error code from the iucv_message_send() call.
1010  */
1011 static int iucv_send_iprm(struct iucv_path *path, struct iucv_message *msg,
1012 			  struct sk_buff *skb)
1013 {
1014 	u8 prmdata[8];
1015 
1016 	memcpy(prmdata, (void *) skb->data, skb->len);
1017 	prmdata[7] = 0xff - (u8) skb->len;
1018 	return pr_iucv->message_send(path, msg, IUCV_IPRMDATA, 0,
1019 				 (void *) prmdata, 8);
1020 }
1021 
1022 static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
1023 			     struct msghdr *msg, size_t len)
1024 {
1025 	struct sock *sk = sock->sk;
1026 	struct iucv_sock *iucv = iucv_sk(sk);
1027 	struct sk_buff *skb;
1028 	struct iucv_message txmsg;
1029 	struct cmsghdr *cmsg;
1030 	int cmsg_done;
1031 	long timeo;
1032 	char user_id[9];
1033 	char appl_id[9];
1034 	int err;
1035 	int noblock = msg->msg_flags & MSG_DONTWAIT;
1036 
1037 	err = sock_error(sk);
1038 	if (err)
1039 		return err;
1040 
1041 	if (msg->msg_flags & MSG_OOB)
1042 		return -EOPNOTSUPP;
1043 
1044 	/* SOCK_SEQPACKET: we do not support segmented records */
1045 	if (sk->sk_type == SOCK_SEQPACKET && !(msg->msg_flags & MSG_EOR))
1046 		return -EOPNOTSUPP;
1047 
1048 	lock_sock(sk);
1049 
1050 	if (sk->sk_shutdown & SEND_SHUTDOWN) {
1051 		err = -EPIPE;
1052 		goto out;
1053 	}
1054 
1055 	/* Return if the socket is not in connected state */
1056 	if (sk->sk_state != IUCV_CONNECTED) {
1057 		err = -ENOTCONN;
1058 		goto out;
1059 	}
1060 
1061 	/* initialize defaults */
1062 	cmsg_done   = 0;	/* check for duplicate headers */
1063 	txmsg.class = 0;
1064 
1065 	/* iterate over control messages */
1066 	for (cmsg = CMSG_FIRSTHDR(msg); cmsg;
1067 		cmsg = CMSG_NXTHDR(msg, cmsg)) {
1068 
1069 		if (!CMSG_OK(msg, cmsg)) {
1070 			err = -EINVAL;
1071 			goto out;
1072 		}
1073 
1074 		if (cmsg->cmsg_level != SOL_IUCV)
1075 			continue;
1076 
1077 		if (cmsg->cmsg_type & cmsg_done) {
1078 			err = -EINVAL;
1079 			goto out;
1080 		}
1081 		cmsg_done |= cmsg->cmsg_type;
1082 
1083 		switch (cmsg->cmsg_type) {
1084 		case SCM_IUCV_TRGCLS:
1085 			if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) {
1086 				err = -EINVAL;
1087 				goto out;
1088 			}
1089 
1090 			/* set iucv message target class */
1091 			memcpy(&txmsg.class,
1092 				(void *) CMSG_DATA(cmsg), TRGCLS_SIZE);
1093 
1094 			break;
1095 
1096 		default:
1097 			err = -EINVAL;
1098 			goto out;
1099 			break;
1100 		}
1101 	}
1102 
1103 	/* allocate one skb for each iucv message:
1104 	 * this is fine for SOCK_SEQPACKET (unless we want to support
1105 	 * segmented records using the MSG_EOR flag), but
1106 	 * for SOCK_STREAM we might want to improve it in future */
1107 	if (iucv->transport == AF_IUCV_TRANS_HIPER)
1108 		skb = sock_alloc_send_skb(sk,
1109 			len + sizeof(struct af_iucv_trans_hdr) + ETH_HLEN,
1110 			noblock, &err);
1111 	else
1112 		skb = sock_alloc_send_skb(sk, len, noblock, &err);
1113 	if (!skb) {
1114 		err = -ENOMEM;
1115 		goto out;
1116 	}
1117 	if (iucv->transport == AF_IUCV_TRANS_HIPER)
1118 		skb_reserve(skb, sizeof(struct af_iucv_trans_hdr) + ETH_HLEN);
1119 	if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
1120 		err = -EFAULT;
1121 		goto fail;
1122 	}
1123 
1124 	/* wait if outstanding messages for iucv path has reached */
1125 	timeo = sock_sndtimeo(sk, noblock);
1126 	err = iucv_sock_wait(sk, iucv_below_msglim(sk), timeo);
1127 	if (err)
1128 		goto fail;
1129 
1130 	/* return -ECONNRESET if the socket is no longer connected */
1131 	if (sk->sk_state != IUCV_CONNECTED) {
1132 		err = -ECONNRESET;
1133 		goto fail;
1134 	}
1135 
1136 	/* increment and save iucv message tag for msg_completion cbk */
1137 	txmsg.tag = iucv->send_tag++;
1138 	IUCV_SKB_CB(skb)->tag = txmsg.tag;
1139 
1140 	if (iucv->transport == AF_IUCV_TRANS_HIPER) {
1141 		atomic_inc(&iucv->msg_sent);
1142 		err = afiucv_hs_send(&txmsg, sk, skb, 0);
1143 		if (err) {
1144 			atomic_dec(&iucv->msg_sent);
1145 			goto fail;
1146 		}
1147 		goto release;
1148 	}
1149 	skb_queue_tail(&iucv->send_skb_q, skb);
1150 
1151 	if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags)
1152 	      && skb->len <= 7) {
1153 		err = iucv_send_iprm(iucv->path, &txmsg, skb);
1154 
1155 		/* on success: there is no message_complete callback
1156 		 * for an IPRMDATA msg; remove skb from send queue */
1157 		if (err == 0) {
1158 			skb_unlink(skb, &iucv->send_skb_q);
1159 			kfree_skb(skb);
1160 		}
1161 
1162 		/* this error should never happen since the
1163 		 * IUCV_IPRMDATA path flag is set... sever path */
1164 		if (err == 0x15) {
1165 			pr_iucv->path_sever(iucv->path, NULL);
1166 			skb_unlink(skb, &iucv->send_skb_q);
1167 			err = -EPIPE;
1168 			goto fail;
1169 		}
1170 	} else
1171 		err = pr_iucv->message_send(iucv->path, &txmsg, 0, 0,
1172 					(void *) skb->data, skb->len);
1173 	if (err) {
1174 		if (err == 3) {
1175 			user_id[8] = 0;
1176 			memcpy(user_id, iucv->dst_user_id, 8);
1177 			appl_id[8] = 0;
1178 			memcpy(appl_id, iucv->dst_name, 8);
1179 			pr_err("Application %s on z/VM guest %s"
1180 				" exceeds message limit\n",
1181 				appl_id, user_id);
1182 			err = -EAGAIN;
1183 		} else
1184 			err = -EPIPE;
1185 		skb_unlink(skb, &iucv->send_skb_q);
1186 		goto fail;
1187 	}
1188 
1189 release:
1190 	release_sock(sk);
1191 	return len;
1192 
1193 fail:
1194 	kfree_skb(skb);
1195 out:
1196 	release_sock(sk);
1197 	return err;
1198 }
1199 
1200 /* iucv_fragment_skb() - Fragment a single IUCV message into multiple skb's
1201  *
1202  * Locking: must be called with message_q.lock held
1203  */
1204 static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len)
1205 {
1206 	int dataleft, size, copied = 0;
1207 	struct sk_buff *nskb;
1208 
1209 	dataleft = len;
1210 	while (dataleft) {
1211 		if (dataleft >= sk->sk_rcvbuf / 4)
1212 			size = sk->sk_rcvbuf / 4;
1213 		else
1214 			size = dataleft;
1215 
1216 		nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA);
1217 		if (!nskb)
1218 			return -ENOMEM;
1219 
1220 		/* copy target class to control buffer of new skb */
1221 		IUCV_SKB_CB(nskb)->class = IUCV_SKB_CB(skb)->class;
1222 
1223 		/* copy data fragment */
1224 		memcpy(nskb->data, skb->data + copied, size);
1225 		copied += size;
1226 		dataleft -= size;
1227 
1228 		skb_reset_transport_header(nskb);
1229 		skb_reset_network_header(nskb);
1230 		nskb->len = size;
1231 
1232 		skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, nskb);
1233 	}
1234 
1235 	return 0;
1236 }
1237 
1238 /* iucv_process_message() - Receive a single outstanding IUCV message
1239  *
1240  * Locking: must be called with message_q.lock held
1241  */
1242 static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
1243 				 struct iucv_path *path,
1244 				 struct iucv_message *msg)
1245 {
1246 	int rc;
1247 	unsigned int len;
1248 
1249 	len = iucv_msg_length(msg);
1250 
1251 	/* store msg target class in the second 4 bytes of skb ctrl buffer */
1252 	/* Note: the first 4 bytes are reserved for msg tag */
1253 	IUCV_SKB_CB(skb)->class = msg->class;
1254 
1255 	/* check for special IPRM messages (e.g. iucv_sock_shutdown) */
1256 	if ((msg->flags & IUCV_IPRMDATA) && len > 7) {
1257 		if (memcmp(msg->rmmsg, iprm_shutdown, 8) == 0) {
1258 			skb->data = NULL;
1259 			skb->len = 0;
1260 		}
1261 	} else {
1262 		rc = pr_iucv->message_receive(path, msg,
1263 					      msg->flags & IUCV_IPRMDATA,
1264 					      skb->data, len, NULL);
1265 		if (rc) {
1266 			kfree_skb(skb);
1267 			return;
1268 		}
1269 		/* we need to fragment iucv messages for SOCK_STREAM only;
1270 		 * for SOCK_SEQPACKET, it is only relevant if we support
1271 		 * record segmentation using MSG_EOR (see also recvmsg()) */
1272 		if (sk->sk_type == SOCK_STREAM &&
1273 		    skb->truesize >= sk->sk_rcvbuf / 4) {
1274 			rc = iucv_fragment_skb(sk, skb, len);
1275 			kfree_skb(skb);
1276 			skb = NULL;
1277 			if (rc) {
1278 				pr_iucv->path_sever(path, NULL);
1279 				return;
1280 			}
1281 			skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q);
1282 		} else {
1283 			skb_reset_transport_header(skb);
1284 			skb_reset_network_header(skb);
1285 			skb->len = len;
1286 		}
1287 	}
1288 
1289 	IUCV_SKB_CB(skb)->offset = 0;
1290 	if (sock_queue_rcv_skb(sk, skb))
1291 		skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb);
1292 }
1293 
1294 /* iucv_process_message_q() - Process outstanding IUCV messages
1295  *
1296  * Locking: must be called with message_q.lock held
1297  */
1298 static void iucv_process_message_q(struct sock *sk)
1299 {
1300 	struct iucv_sock *iucv = iucv_sk(sk);
1301 	struct sk_buff *skb;
1302 	struct sock_msg_q *p, *n;
1303 
1304 	list_for_each_entry_safe(p, n, &iucv->message_q.list, list) {
1305 		skb = alloc_skb(iucv_msg_length(&p->msg), GFP_ATOMIC | GFP_DMA);
1306 		if (!skb)
1307 			break;
1308 		iucv_process_message(sk, skb, p->path, &p->msg);
1309 		list_del(&p->list);
1310 		kfree(p);
1311 		if (!skb_queue_empty(&iucv->backlog_skb_q))
1312 			break;
1313 	}
1314 }
1315 
1316 static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
1317 			     struct msghdr *msg, size_t len, int flags)
1318 {
1319 	int noblock = flags & MSG_DONTWAIT;
1320 	struct sock *sk = sock->sk;
1321 	struct iucv_sock *iucv = iucv_sk(sk);
1322 	unsigned int copied, rlen;
1323 	struct sk_buff *skb, *rskb, *cskb;
1324 	int err = 0;
1325 	u32 offset;
1326 
1327 	msg->msg_namelen = 0;
1328 
1329 	if ((sk->sk_state == IUCV_DISCONN) &&
1330 	    skb_queue_empty(&iucv->backlog_skb_q) &&
1331 	    skb_queue_empty(&sk->sk_receive_queue) &&
1332 	    list_empty(&iucv->message_q.list))
1333 		return 0;
1334 
1335 	if (flags & (MSG_OOB))
1336 		return -EOPNOTSUPP;
1337 
1338 	/* receive/dequeue next skb:
1339 	 * the function understands MSG_PEEK and, thus, does not dequeue skb */
1340 	skb = skb_recv_datagram(sk, flags, noblock, &err);
1341 	if (!skb) {
1342 		if (sk->sk_shutdown & RCV_SHUTDOWN)
1343 			return 0;
1344 		return err;
1345 	}
1346 
1347 	offset = IUCV_SKB_CB(skb)->offset;
1348 	rlen   = skb->len - offset;		/* real length of skb */
1349 	copied = min_t(unsigned int, rlen, len);
1350 	if (!rlen)
1351 		sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN;
1352 
1353 	cskb = skb;
1354 	if (skb_copy_datagram_iovec(cskb, offset, msg->msg_iov, copied)) {
1355 		if (!(flags & MSG_PEEK))
1356 			skb_queue_head(&sk->sk_receive_queue, skb);
1357 		return -EFAULT;
1358 	}
1359 
1360 	/* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */
1361 	if (sk->sk_type == SOCK_SEQPACKET) {
1362 		if (copied < rlen)
1363 			msg->msg_flags |= MSG_TRUNC;
1364 		/* each iucv message contains a complete record */
1365 		msg->msg_flags |= MSG_EOR;
1366 	}
1367 
1368 	/* create control message to store iucv msg target class:
1369 	 * get the trgcls from the control buffer of the skb due to
1370 	 * fragmentation of original iucv message. */
1371 	err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS,
1372 		       sizeof(IUCV_SKB_CB(skb)->class),
1373 		       (void *)&IUCV_SKB_CB(skb)->class);
1374 	if (err) {
1375 		if (!(flags & MSG_PEEK))
1376 			skb_queue_head(&sk->sk_receive_queue, skb);
1377 		return err;
1378 	}
1379 
1380 	/* Mark read part of skb as used */
1381 	if (!(flags & MSG_PEEK)) {
1382 
1383 		/* SOCK_STREAM: re-queue skb if it contains unreceived data */
1384 		if (sk->sk_type == SOCK_STREAM) {
1385 			if (copied < rlen) {
1386 				IUCV_SKB_CB(skb)->offset = offset + copied;
1387 				goto done;
1388 			}
1389 		}
1390 
1391 		kfree_skb(skb);
1392 		if (iucv->transport == AF_IUCV_TRANS_HIPER) {
1393 			atomic_inc(&iucv->msg_recv);
1394 			if (atomic_read(&iucv->msg_recv) > iucv->msglimit) {
1395 				WARN_ON(1);
1396 				iucv_sock_close(sk);
1397 				return -EFAULT;
1398 			}
1399 		}
1400 
1401 		/* Queue backlog skbs */
1402 		spin_lock_bh(&iucv->message_q.lock);
1403 		rskb = skb_dequeue(&iucv->backlog_skb_q);
1404 		while (rskb) {
1405 			IUCV_SKB_CB(rskb)->offset = 0;
1406 			if (sock_queue_rcv_skb(sk, rskb)) {
1407 				skb_queue_head(&iucv->backlog_skb_q,
1408 						rskb);
1409 				break;
1410 			} else {
1411 				rskb = skb_dequeue(&iucv->backlog_skb_q);
1412 			}
1413 		}
1414 		if (skb_queue_empty(&iucv->backlog_skb_q)) {
1415 			if (!list_empty(&iucv->message_q.list))
1416 				iucv_process_message_q(sk);
1417 			if (atomic_read(&iucv->msg_recv) >=
1418 							iucv->msglimit / 2) {
1419 				err = iucv_send_ctrl(sk, AF_IUCV_FLAG_WIN);
1420 				if (err) {
1421 					sk->sk_state = IUCV_DISCONN;
1422 					sk->sk_state_change(sk);
1423 				}
1424 			}
1425 		}
1426 		spin_unlock_bh(&iucv->message_q.lock);
1427 	}
1428 
1429 done:
1430 	/* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */
1431 	if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC))
1432 		copied = rlen;
1433 
1434 	return copied;
1435 }
1436 
1437 static inline unsigned int iucv_accept_poll(struct sock *parent)
1438 {
1439 	struct iucv_sock *isk, *n;
1440 	struct sock *sk;
1441 
1442 	list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
1443 		sk = (struct sock *) isk;
1444 
1445 		if (sk->sk_state == IUCV_CONNECTED)
1446 			return POLLIN | POLLRDNORM;
1447 	}
1448 
1449 	return 0;
1450 }
1451 
1452 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
1453 			    poll_table *wait)
1454 {
1455 	struct sock *sk = sock->sk;
1456 	unsigned int mask = 0;
1457 
1458 	sock_poll_wait(file, sk_sleep(sk), wait);
1459 
1460 	if (sk->sk_state == IUCV_LISTEN)
1461 		return iucv_accept_poll(sk);
1462 
1463 	if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
1464 		mask |= POLLERR |
1465 			(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
1466 
1467 	if (sk->sk_shutdown & RCV_SHUTDOWN)
1468 		mask |= POLLRDHUP;
1469 
1470 	if (sk->sk_shutdown == SHUTDOWN_MASK)
1471 		mask |= POLLHUP;
1472 
1473 	if (!skb_queue_empty(&sk->sk_receive_queue) ||
1474 	    (sk->sk_shutdown & RCV_SHUTDOWN))
1475 		mask |= POLLIN | POLLRDNORM;
1476 
1477 	if (sk->sk_state == IUCV_CLOSED)
1478 		mask |= POLLHUP;
1479 
1480 	if (sk->sk_state == IUCV_DISCONN)
1481 		mask |= POLLIN;
1482 
1483 	if (sock_writeable(sk) && iucv_below_msglim(sk))
1484 		mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
1485 	else
1486 		set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1487 
1488 	return mask;
1489 }
1490 
1491 static int iucv_sock_shutdown(struct socket *sock, int how)
1492 {
1493 	struct sock *sk = sock->sk;
1494 	struct iucv_sock *iucv = iucv_sk(sk);
1495 	struct iucv_message txmsg;
1496 	int err = 0;
1497 
1498 	how++;
1499 
1500 	if ((how & ~SHUTDOWN_MASK) || !how)
1501 		return -EINVAL;
1502 
1503 	lock_sock(sk);
1504 	switch (sk->sk_state) {
1505 	case IUCV_LISTEN:
1506 	case IUCV_DISCONN:
1507 	case IUCV_CLOSING:
1508 	case IUCV_CLOSED:
1509 		err = -ENOTCONN;
1510 		goto fail;
1511 	default:
1512 		break;
1513 	}
1514 
1515 	if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
1516 		if (iucv->transport == AF_IUCV_TRANS_IUCV) {
1517 			txmsg.class = 0;
1518 			txmsg.tag = 0;
1519 			err = pr_iucv->message_send(iucv->path, &txmsg,
1520 				IUCV_IPRMDATA, 0, (void *) iprm_shutdown, 8);
1521 			if (err) {
1522 				switch (err) {
1523 				case 1:
1524 					err = -ENOTCONN;
1525 					break;
1526 				case 2:
1527 					err = -ECONNRESET;
1528 					break;
1529 				default:
1530 					err = -ENOTCONN;
1531 					break;
1532 				}
1533 			}
1534 		} else
1535 			iucv_send_ctrl(sk, AF_IUCV_FLAG_SHT);
1536 	}
1537 
1538 	sk->sk_shutdown |= how;
1539 	if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
1540 		if (iucv->transport == AF_IUCV_TRANS_IUCV) {
1541 			err = pr_iucv->path_quiesce(iucv->path, NULL);
1542 			if (err)
1543 				err = -ENOTCONN;
1544 /*			skb_queue_purge(&sk->sk_receive_queue); */
1545 		}
1546 		skb_queue_purge(&sk->sk_receive_queue);
1547 	}
1548 
1549 	/* Wake up anyone sleeping in poll */
1550 	sk->sk_state_change(sk);
1551 
1552 fail:
1553 	release_sock(sk);
1554 	return err;
1555 }
1556 
1557 static int iucv_sock_release(struct socket *sock)
1558 {
1559 	struct sock *sk = sock->sk;
1560 	int err = 0;
1561 
1562 	if (!sk)
1563 		return 0;
1564 
1565 	iucv_sock_close(sk);
1566 
1567 	sock_orphan(sk);
1568 	iucv_sock_kill(sk);
1569 	return err;
1570 }
1571 
1572 /* getsockopt and setsockopt */
1573 static int iucv_sock_setsockopt(struct socket *sock, int level, int optname,
1574 				char __user *optval, unsigned int optlen)
1575 {
1576 	struct sock *sk = sock->sk;
1577 	struct iucv_sock *iucv = iucv_sk(sk);
1578 	int val;
1579 	int rc;
1580 
1581 	if (level != SOL_IUCV)
1582 		return -ENOPROTOOPT;
1583 
1584 	if (optlen < sizeof(int))
1585 		return -EINVAL;
1586 
1587 	if (get_user(val, (int __user *) optval))
1588 		return -EFAULT;
1589 
1590 	rc = 0;
1591 
1592 	lock_sock(sk);
1593 	switch (optname) {
1594 	case SO_IPRMDATA_MSG:
1595 		if (val)
1596 			iucv->flags |= IUCV_IPRMDATA;
1597 		else
1598 			iucv->flags &= ~IUCV_IPRMDATA;
1599 		break;
1600 	case SO_MSGLIMIT:
1601 		switch (sk->sk_state) {
1602 		case IUCV_OPEN:
1603 		case IUCV_BOUND:
1604 			if (val < 1 || val > (u16)(~0))
1605 				rc = -EINVAL;
1606 			else
1607 				iucv->msglimit = val;
1608 			break;
1609 		default:
1610 			rc = -EINVAL;
1611 			break;
1612 		}
1613 		break;
1614 	default:
1615 		rc = -ENOPROTOOPT;
1616 		break;
1617 	}
1618 	release_sock(sk);
1619 
1620 	return rc;
1621 }
1622 
1623 static int iucv_sock_getsockopt(struct socket *sock, int level, int optname,
1624 				char __user *optval, int __user *optlen)
1625 {
1626 	struct sock *sk = sock->sk;
1627 	struct iucv_sock *iucv = iucv_sk(sk);
1628 	unsigned int val;
1629 	int len;
1630 
1631 	if (level != SOL_IUCV)
1632 		return -ENOPROTOOPT;
1633 
1634 	if (get_user(len, optlen))
1635 		return -EFAULT;
1636 
1637 	if (len < 0)
1638 		return -EINVAL;
1639 
1640 	len = min_t(unsigned int, len, sizeof(int));
1641 
1642 	switch (optname) {
1643 	case SO_IPRMDATA_MSG:
1644 		val = (iucv->flags & IUCV_IPRMDATA) ? 1 : 0;
1645 		break;
1646 	case SO_MSGLIMIT:
1647 		lock_sock(sk);
1648 		val = (iucv->path != NULL) ? iucv->path->msglim	/* connected */
1649 					   : iucv->msglimit;	/* default */
1650 		release_sock(sk);
1651 		break;
1652 	case SO_MSGSIZE:
1653 		if (sk->sk_state == IUCV_OPEN)
1654 			return -EBADFD;
1655 		val = (iucv->hs_dev) ? iucv->hs_dev->mtu -
1656 				sizeof(struct af_iucv_trans_hdr) - ETH_HLEN :
1657 				0x7fffffff;
1658 		break;
1659 	default:
1660 		return -ENOPROTOOPT;
1661 	}
1662 
1663 	if (put_user(len, optlen))
1664 		return -EFAULT;
1665 	if (copy_to_user(optval, &val, len))
1666 		return -EFAULT;
1667 
1668 	return 0;
1669 }
1670 
1671 
1672 /* Callback wrappers - called from iucv base support */
1673 static int iucv_callback_connreq(struct iucv_path *path,
1674 				 u8 ipvmid[8], u8 ipuser[16])
1675 {
1676 	unsigned char user_data[16];
1677 	unsigned char nuser_data[16];
1678 	unsigned char src_name[8];
1679 	struct sock *sk, *nsk;
1680 	struct iucv_sock *iucv, *niucv;
1681 	int err;
1682 
1683 	memcpy(src_name, ipuser, 8);
1684 	EBCASC(src_name, 8);
1685 	/* Find out if this path belongs to af_iucv. */
1686 	read_lock(&iucv_sk_list.lock);
1687 	iucv = NULL;
1688 	sk = NULL;
1689 	sk_for_each(sk, &iucv_sk_list.head)
1690 		if (sk->sk_state == IUCV_LISTEN &&
1691 		    !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
1692 			/*
1693 			 * Found a listening socket with
1694 			 * src_name == ipuser[0-7].
1695 			 */
1696 			iucv = iucv_sk(sk);
1697 			break;
1698 		}
1699 	read_unlock(&iucv_sk_list.lock);
1700 	if (!iucv)
1701 		/* No socket found, not one of our paths. */
1702 		return -EINVAL;
1703 
1704 	bh_lock_sock(sk);
1705 
1706 	/* Check if parent socket is listening */
1707 	low_nmcpy(user_data, iucv->src_name);
1708 	high_nmcpy(user_data, iucv->dst_name);
1709 	ASCEBC(user_data, sizeof(user_data));
1710 	if (sk->sk_state != IUCV_LISTEN) {
1711 		err = pr_iucv->path_sever(path, user_data);
1712 		iucv_path_free(path);
1713 		goto fail;
1714 	}
1715 
1716 	/* Check for backlog size */
1717 	if (sk_acceptq_is_full(sk)) {
1718 		err = pr_iucv->path_sever(path, user_data);
1719 		iucv_path_free(path);
1720 		goto fail;
1721 	}
1722 
1723 	/* Create the new socket */
1724 	nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC);
1725 	if (!nsk) {
1726 		err = pr_iucv->path_sever(path, user_data);
1727 		iucv_path_free(path);
1728 		goto fail;
1729 	}
1730 
1731 	niucv = iucv_sk(nsk);
1732 	iucv_sock_init(nsk, sk);
1733 
1734 	/* Set the new iucv_sock */
1735 	memcpy(niucv->dst_name, ipuser + 8, 8);
1736 	EBCASC(niucv->dst_name, 8);
1737 	memcpy(niucv->dst_user_id, ipvmid, 8);
1738 	memcpy(niucv->src_name, iucv->src_name, 8);
1739 	memcpy(niucv->src_user_id, iucv->src_user_id, 8);
1740 	niucv->path = path;
1741 
1742 	/* Call iucv_accept */
1743 	high_nmcpy(nuser_data, ipuser + 8);
1744 	memcpy(nuser_data + 8, niucv->src_name, 8);
1745 	ASCEBC(nuser_data + 8, 8);
1746 
1747 	/* set message limit for path based on msglimit of accepting socket */
1748 	niucv->msglimit = iucv->msglimit;
1749 	path->msglim = iucv->msglimit;
1750 	err = pr_iucv->path_accept(path, &af_iucv_handler, nuser_data, nsk);
1751 	if (err) {
1752 		iucv_sever_path(nsk, 1);
1753 		iucv_sock_kill(nsk);
1754 		goto fail;
1755 	}
1756 
1757 	iucv_accept_enqueue(sk, nsk);
1758 
1759 	/* Wake up accept */
1760 	nsk->sk_state = IUCV_CONNECTED;
1761 	sk->sk_data_ready(sk, 1);
1762 	err = 0;
1763 fail:
1764 	bh_unlock_sock(sk);
1765 	return 0;
1766 }
1767 
1768 static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
1769 {
1770 	struct sock *sk = path->private;
1771 
1772 	sk->sk_state = IUCV_CONNECTED;
1773 	sk->sk_state_change(sk);
1774 }
1775 
1776 static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
1777 {
1778 	struct sock *sk = path->private;
1779 	struct iucv_sock *iucv = iucv_sk(sk);
1780 	struct sk_buff *skb;
1781 	struct sock_msg_q *save_msg;
1782 	int len;
1783 
1784 	if (sk->sk_shutdown & RCV_SHUTDOWN) {
1785 		pr_iucv->message_reject(path, msg);
1786 		return;
1787 	}
1788 
1789 	spin_lock(&iucv->message_q.lock);
1790 
1791 	if (!list_empty(&iucv->message_q.list) ||
1792 	    !skb_queue_empty(&iucv->backlog_skb_q))
1793 		goto save_message;
1794 
1795 	len = atomic_read(&sk->sk_rmem_alloc);
1796 	len += SKB_TRUESIZE(iucv_msg_length(msg));
1797 	if (len > sk->sk_rcvbuf)
1798 		goto save_message;
1799 
1800 	skb = alloc_skb(iucv_msg_length(msg), GFP_ATOMIC | GFP_DMA);
1801 	if (!skb)
1802 		goto save_message;
1803 
1804 	iucv_process_message(sk, skb, path, msg);
1805 	goto out_unlock;
1806 
1807 save_message:
1808 	save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA);
1809 	if (!save_msg)
1810 		goto out_unlock;
1811 	save_msg->path = path;
1812 	save_msg->msg = *msg;
1813 
1814 	list_add_tail(&save_msg->list, &iucv->message_q.list);
1815 
1816 out_unlock:
1817 	spin_unlock(&iucv->message_q.lock);
1818 }
1819 
1820 static void iucv_callback_txdone(struct iucv_path *path,
1821 				 struct iucv_message *msg)
1822 {
1823 	struct sock *sk = path->private;
1824 	struct sk_buff *this = NULL;
1825 	struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q;
1826 	struct sk_buff *list_skb = list->next;
1827 	unsigned long flags;
1828 
1829 	bh_lock_sock(sk);
1830 	if (!skb_queue_empty(list)) {
1831 		spin_lock_irqsave(&list->lock, flags);
1832 
1833 		while (list_skb != (struct sk_buff *)list) {
1834 			if (msg->tag != IUCV_SKB_CB(list_skb)->tag) {
1835 				this = list_skb;
1836 				break;
1837 			}
1838 			list_skb = list_skb->next;
1839 		}
1840 		if (this)
1841 			__skb_unlink(this, list);
1842 
1843 		spin_unlock_irqrestore(&list->lock, flags);
1844 
1845 		if (this) {
1846 			kfree_skb(this);
1847 			/* wake up any process waiting for sending */
1848 			iucv_sock_wake_msglim(sk);
1849 		}
1850 	}
1851 
1852 	if (sk->sk_state == IUCV_CLOSING) {
1853 		if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
1854 			sk->sk_state = IUCV_CLOSED;
1855 			sk->sk_state_change(sk);
1856 		}
1857 	}
1858 	bh_unlock_sock(sk);
1859 
1860 }
1861 
1862 static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
1863 {
1864 	struct sock *sk = path->private;
1865 
1866 	if (sk->sk_state == IUCV_CLOSED)
1867 		return;
1868 
1869 	bh_lock_sock(sk);
1870 	iucv_sever_path(sk, 1);
1871 	sk->sk_state = IUCV_DISCONN;
1872 
1873 	sk->sk_state_change(sk);
1874 	bh_unlock_sock(sk);
1875 }
1876 
1877 /* called if the other communication side shuts down its RECV direction;
1878  * in turn, the callback sets SEND_SHUTDOWN to disable sending of data.
1879  */
1880 static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16])
1881 {
1882 	struct sock *sk = path->private;
1883 
1884 	bh_lock_sock(sk);
1885 	if (sk->sk_state != IUCV_CLOSED) {
1886 		sk->sk_shutdown |= SEND_SHUTDOWN;
1887 		sk->sk_state_change(sk);
1888 	}
1889 	bh_unlock_sock(sk);
1890 }
1891 
1892 /***************** HiperSockets transport callbacks ********************/
1893 static void afiucv_swap_src_dest(struct sk_buff *skb)
1894 {
1895 	struct af_iucv_trans_hdr *trans_hdr =
1896 				(struct af_iucv_trans_hdr *)skb->data;
1897 	char tmpID[8];
1898 	char tmpName[8];
1899 
1900 	ASCEBC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
1901 	ASCEBC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
1902 	ASCEBC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID));
1903 	ASCEBC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
1904 	memcpy(tmpID, trans_hdr->srcUserID, 8);
1905 	memcpy(tmpName, trans_hdr->srcAppName, 8);
1906 	memcpy(trans_hdr->srcUserID, trans_hdr->destUserID, 8);
1907 	memcpy(trans_hdr->srcAppName, trans_hdr->destAppName, 8);
1908 	memcpy(trans_hdr->destUserID, tmpID, 8);
1909 	memcpy(trans_hdr->destAppName, tmpName, 8);
1910 	skb_push(skb, ETH_HLEN);
1911 	memset(skb->data, 0, ETH_HLEN);
1912 }
1913 
1914 /**
1915  * afiucv_hs_callback_syn - react on received SYN
1916  **/
1917 static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
1918 {
1919 	struct sock *nsk;
1920 	struct iucv_sock *iucv, *niucv;
1921 	struct af_iucv_trans_hdr *trans_hdr;
1922 	int err;
1923 
1924 	iucv = iucv_sk(sk);
1925 	trans_hdr = (struct af_iucv_trans_hdr *)skb->data;
1926 	if (!iucv) {
1927 		/* no sock - connection refused */
1928 		afiucv_swap_src_dest(skb);
1929 		trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
1930 		err = dev_queue_xmit(skb);
1931 		goto out;
1932 	}
1933 
1934 	nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC);
1935 	bh_lock_sock(sk);
1936 	if ((sk->sk_state != IUCV_LISTEN) ||
1937 	    sk_acceptq_is_full(sk) ||
1938 	    !nsk) {
1939 		/* error on server socket - connection refused */
1940 		if (nsk)
1941 			sk_free(nsk);
1942 		afiucv_swap_src_dest(skb);
1943 		trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
1944 		err = dev_queue_xmit(skb);
1945 		bh_unlock_sock(sk);
1946 		goto out;
1947 	}
1948 
1949 	niucv = iucv_sk(nsk);
1950 	iucv_sock_init(nsk, sk);
1951 	niucv->transport = AF_IUCV_TRANS_HIPER;
1952 	niucv->msglimit = iucv->msglimit;
1953 	if (!trans_hdr->window)
1954 		niucv->msglimit_peer = IUCV_HIPER_MSGLIM_DEFAULT;
1955 	else
1956 		niucv->msglimit_peer = trans_hdr->window;
1957 	memcpy(niucv->dst_name, trans_hdr->srcAppName, 8);
1958 	memcpy(niucv->dst_user_id, trans_hdr->srcUserID, 8);
1959 	memcpy(niucv->src_name, iucv->src_name, 8);
1960 	memcpy(niucv->src_user_id, iucv->src_user_id, 8);
1961 	nsk->sk_bound_dev_if = sk->sk_bound_dev_if;
1962 	niucv->hs_dev = iucv->hs_dev;
1963 	dev_hold(niucv->hs_dev);
1964 	afiucv_swap_src_dest(skb);
1965 	trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK;
1966 	trans_hdr->window = niucv->msglimit;
1967 	/* if receiver acks the xmit connection is established */
1968 	err = dev_queue_xmit(skb);
1969 	if (!err) {
1970 		iucv_accept_enqueue(sk, nsk);
1971 		nsk->sk_state = IUCV_CONNECTED;
1972 		sk->sk_data_ready(sk, 1);
1973 	} else
1974 		iucv_sock_kill(nsk);
1975 	bh_unlock_sock(sk);
1976 
1977 out:
1978 	return NET_RX_SUCCESS;
1979 }
1980 
1981 /**
1982  * afiucv_hs_callback_synack() - react on received SYN-ACK
1983  **/
1984 static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb)
1985 {
1986 	struct iucv_sock *iucv = iucv_sk(sk);
1987 	struct af_iucv_trans_hdr *trans_hdr =
1988 					(struct af_iucv_trans_hdr *)skb->data;
1989 
1990 	if (!iucv)
1991 		goto out;
1992 	if (sk->sk_state != IUCV_BOUND)
1993 		goto out;
1994 	bh_lock_sock(sk);
1995 	iucv->msglimit_peer = trans_hdr->window;
1996 	sk->sk_state = IUCV_CONNECTED;
1997 	sk->sk_state_change(sk);
1998 	bh_unlock_sock(sk);
1999 out:
2000 	kfree_skb(skb);
2001 	return NET_RX_SUCCESS;
2002 }
2003 
2004 /**
2005  * afiucv_hs_callback_synfin() - react on received SYN_FIN
2006  **/
2007 static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb)
2008 {
2009 	struct iucv_sock *iucv = iucv_sk(sk);
2010 
2011 	if (!iucv)
2012 		goto out;
2013 	if (sk->sk_state != IUCV_BOUND)
2014 		goto out;
2015 	bh_lock_sock(sk);
2016 	sk->sk_state = IUCV_DISCONN;
2017 	sk->sk_state_change(sk);
2018 	bh_unlock_sock(sk);
2019 out:
2020 	kfree_skb(skb);
2021 	return NET_RX_SUCCESS;
2022 }
2023 
2024 /**
2025  * afiucv_hs_callback_fin() - react on received FIN
2026  **/
2027 static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb)
2028 {
2029 	struct iucv_sock *iucv = iucv_sk(sk);
2030 
2031 	/* other end of connection closed */
2032 	if (!iucv)
2033 		goto out;
2034 	bh_lock_sock(sk);
2035 	if (sk->sk_state == IUCV_CONNECTED) {
2036 		sk->sk_state = IUCV_DISCONN;
2037 		sk->sk_state_change(sk);
2038 	}
2039 	bh_unlock_sock(sk);
2040 out:
2041 	kfree_skb(skb);
2042 	return NET_RX_SUCCESS;
2043 }
2044 
2045 /**
2046  * afiucv_hs_callback_win() - react on received WIN
2047  **/
2048 static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb)
2049 {
2050 	struct iucv_sock *iucv = iucv_sk(sk);
2051 	struct af_iucv_trans_hdr *trans_hdr =
2052 					(struct af_iucv_trans_hdr *)skb->data;
2053 
2054 	if (!iucv)
2055 		return NET_RX_SUCCESS;
2056 
2057 	if (sk->sk_state != IUCV_CONNECTED)
2058 		return NET_RX_SUCCESS;
2059 
2060 	atomic_sub(trans_hdr->window, &iucv->msg_sent);
2061 	iucv_sock_wake_msglim(sk);
2062 	return NET_RX_SUCCESS;
2063 }
2064 
2065 /**
2066  * afiucv_hs_callback_rx() - react on received data
2067  **/
2068 static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
2069 {
2070 	struct iucv_sock *iucv = iucv_sk(sk);
2071 
2072 	if (!iucv) {
2073 		kfree_skb(skb);
2074 		return NET_RX_SUCCESS;
2075 	}
2076 
2077 	if (sk->sk_state != IUCV_CONNECTED) {
2078 		kfree_skb(skb);
2079 		return NET_RX_SUCCESS;
2080 	}
2081 
2082 	if (sk->sk_shutdown & RCV_SHUTDOWN) {
2083 		kfree_skb(skb);
2084 		return NET_RX_SUCCESS;
2085 	}
2086 
2087 		/* write stuff from iucv_msg to skb cb */
2088 	if (skb->len < sizeof(struct af_iucv_trans_hdr)) {
2089 		kfree_skb(skb);
2090 		return NET_RX_SUCCESS;
2091 	}
2092 	skb_pull(skb, sizeof(struct af_iucv_trans_hdr));
2093 	skb_reset_transport_header(skb);
2094 	skb_reset_network_header(skb);
2095 	IUCV_SKB_CB(skb)->offset = 0;
2096 	spin_lock(&iucv->message_q.lock);
2097 	if (skb_queue_empty(&iucv->backlog_skb_q)) {
2098 		if (sock_queue_rcv_skb(sk, skb)) {
2099 			/* handle rcv queue full */
2100 			skb_queue_tail(&iucv->backlog_skb_q, skb);
2101 		}
2102 	} else
2103 		skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
2104 	spin_unlock(&iucv->message_q.lock);
2105 	return NET_RX_SUCCESS;
2106 }
2107 
2108 /**
2109  * afiucv_hs_rcv() - base function for arriving data through HiperSockets
2110  *                   transport
2111  *                   called from netif RX softirq
2112  **/
2113 static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
2114 	struct packet_type *pt, struct net_device *orig_dev)
2115 {
2116 	struct sock *sk;
2117 	struct iucv_sock *iucv;
2118 	struct af_iucv_trans_hdr *trans_hdr;
2119 	char nullstring[8];
2120 	int err = 0;
2121 
2122 	skb_pull(skb, ETH_HLEN);
2123 	trans_hdr = (struct af_iucv_trans_hdr *)skb->data;
2124 	EBCASC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
2125 	EBCASC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
2126 	EBCASC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
2127 	EBCASC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID));
2128 	memset(nullstring, 0, sizeof(nullstring));
2129 	iucv = NULL;
2130 	sk = NULL;
2131 	read_lock(&iucv_sk_list.lock);
2132 	sk_for_each(sk, &iucv_sk_list.head) {
2133 		if (trans_hdr->flags == AF_IUCV_FLAG_SYN) {
2134 			if ((!memcmp(&iucv_sk(sk)->src_name,
2135 				     trans_hdr->destAppName, 8)) &&
2136 			    (!memcmp(&iucv_sk(sk)->src_user_id,
2137 				     trans_hdr->destUserID, 8)) &&
2138 			    (!memcmp(&iucv_sk(sk)->dst_name, nullstring, 8)) &&
2139 			    (!memcmp(&iucv_sk(sk)->dst_user_id,
2140 				     nullstring, 8))) {
2141 				iucv = iucv_sk(sk);
2142 				break;
2143 			}
2144 		} else {
2145 			if ((!memcmp(&iucv_sk(sk)->src_name,
2146 				     trans_hdr->destAppName, 8)) &&
2147 			    (!memcmp(&iucv_sk(sk)->src_user_id,
2148 				     trans_hdr->destUserID, 8)) &&
2149 			    (!memcmp(&iucv_sk(sk)->dst_name,
2150 				     trans_hdr->srcAppName, 8)) &&
2151 			    (!memcmp(&iucv_sk(sk)->dst_user_id,
2152 				     trans_hdr->srcUserID, 8))) {
2153 				iucv = iucv_sk(sk);
2154 				break;
2155 			}
2156 		}
2157 	}
2158 	read_unlock(&iucv_sk_list.lock);
2159 	if (!iucv)
2160 		sk = NULL;
2161 
2162 	/* no sock
2163 	how should we send with no sock
2164 	1) send without sock no send rc checking?
2165 	2) introduce default sock to handle this cases
2166 
2167 	 SYN -> send SYN|ACK in good case, send SYN|FIN in bad case
2168 	 data -> send FIN
2169 	 SYN|ACK, SYN|FIN, FIN -> no action? */
2170 
2171 	switch (trans_hdr->flags) {
2172 	case AF_IUCV_FLAG_SYN:
2173 		/* connect request */
2174 		err = afiucv_hs_callback_syn(sk, skb);
2175 		break;
2176 	case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK):
2177 		/* connect request confirmed */
2178 		err = afiucv_hs_callback_synack(sk, skb);
2179 		break;
2180 	case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN):
2181 		/* connect request refused */
2182 		err = afiucv_hs_callback_synfin(sk, skb);
2183 		break;
2184 	case (AF_IUCV_FLAG_FIN):
2185 		/* close request */
2186 		err = afiucv_hs_callback_fin(sk, skb);
2187 		break;
2188 	case (AF_IUCV_FLAG_WIN):
2189 		err = afiucv_hs_callback_win(sk, skb);
2190 		if (skb->len == sizeof(struct af_iucv_trans_hdr)) {
2191 			kfree_skb(skb);
2192 			break;
2193 		}
2194 		/* fall through and receive non-zero length data */
2195 	case (AF_IUCV_FLAG_SHT):
2196 		/* shutdown request */
2197 		/* fall through and receive zero length data */
2198 	case 0:
2199 		/* plain data frame */
2200 		IUCV_SKB_CB(skb)->class = trans_hdr->iucv_hdr.class;
2201 		err = afiucv_hs_callback_rx(sk, skb);
2202 		break;
2203 	default:
2204 		;
2205 	}
2206 
2207 	return err;
2208 }
2209 
2210 /**
2211  * afiucv_hs_callback_txnotify() - handle send notifcations from HiperSockets
2212  *                                 transport
2213  **/
2214 static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
2215 					enum iucv_tx_notify n)
2216 {
2217 	struct sock *isk = skb->sk;
2218 	struct sock *sk = NULL;
2219 	struct iucv_sock *iucv = NULL;
2220 	struct sk_buff_head *list;
2221 	struct sk_buff *list_skb;
2222 	struct sk_buff *nskb;
2223 	unsigned long flags;
2224 
2225 	read_lock_irqsave(&iucv_sk_list.lock, flags);
2226 	sk_for_each(sk, &iucv_sk_list.head)
2227 		if (sk == isk) {
2228 			iucv = iucv_sk(sk);
2229 			break;
2230 		}
2231 	read_unlock_irqrestore(&iucv_sk_list.lock, flags);
2232 
2233 	if (!iucv || sock_flag(sk, SOCK_ZAPPED))
2234 		return;
2235 
2236 	list = &iucv->send_skb_q;
2237 	spin_lock_irqsave(&list->lock, flags);
2238 	if (skb_queue_empty(list))
2239 		goto out_unlock;
2240 	list_skb = list->next;
2241 	nskb = list_skb->next;
2242 	while (list_skb != (struct sk_buff *)list) {
2243 		if (skb_shinfo(list_skb) == skb_shinfo(skb)) {
2244 			switch (n) {
2245 			case TX_NOTIFY_OK:
2246 				__skb_unlink(list_skb, list);
2247 				kfree_skb(list_skb);
2248 				iucv_sock_wake_msglim(sk);
2249 				break;
2250 			case TX_NOTIFY_PENDING:
2251 				atomic_inc(&iucv->pendings);
2252 				break;
2253 			case TX_NOTIFY_DELAYED_OK:
2254 				__skb_unlink(list_skb, list);
2255 				atomic_dec(&iucv->pendings);
2256 				if (atomic_read(&iucv->pendings) <= 0)
2257 					iucv_sock_wake_msglim(sk);
2258 				kfree_skb(list_skb);
2259 				break;
2260 			case TX_NOTIFY_UNREACHABLE:
2261 			case TX_NOTIFY_DELAYED_UNREACHABLE:
2262 			case TX_NOTIFY_TPQFULL: /* not yet used */
2263 			case TX_NOTIFY_GENERALERROR:
2264 			case TX_NOTIFY_DELAYED_GENERALERROR:
2265 				__skb_unlink(list_skb, list);
2266 				kfree_skb(list_skb);
2267 				if (sk->sk_state == IUCV_CONNECTED) {
2268 					sk->sk_state = IUCV_DISCONN;
2269 					sk->sk_state_change(sk);
2270 				}
2271 				break;
2272 			}
2273 			break;
2274 		}
2275 		list_skb = nskb;
2276 		nskb = nskb->next;
2277 	}
2278 out_unlock:
2279 	spin_unlock_irqrestore(&list->lock, flags);
2280 
2281 	if (sk->sk_state == IUCV_CLOSING) {
2282 		if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
2283 			sk->sk_state = IUCV_CLOSED;
2284 			sk->sk_state_change(sk);
2285 		}
2286 	}
2287 
2288 }
2289 
2290 /*
2291  * afiucv_netdev_event: handle netdev notifier chain events
2292  */
2293 static int afiucv_netdev_event(struct notifier_block *this,
2294 			       unsigned long event, void *ptr)
2295 {
2296 	struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
2297 	struct sock *sk;
2298 	struct iucv_sock *iucv;
2299 
2300 	switch (event) {
2301 	case NETDEV_REBOOT:
2302 	case NETDEV_GOING_DOWN:
2303 		sk_for_each(sk, &iucv_sk_list.head) {
2304 			iucv = iucv_sk(sk);
2305 			if ((iucv->hs_dev == event_dev) &&
2306 			    (sk->sk_state == IUCV_CONNECTED)) {
2307 				if (event == NETDEV_GOING_DOWN)
2308 					iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN);
2309 				sk->sk_state = IUCV_DISCONN;
2310 				sk->sk_state_change(sk);
2311 			}
2312 		}
2313 		break;
2314 	case NETDEV_DOWN:
2315 	case NETDEV_UNREGISTER:
2316 	default:
2317 		break;
2318 	}
2319 	return NOTIFY_DONE;
2320 }
2321 
2322 static struct notifier_block afiucv_netdev_notifier = {
2323 	.notifier_call = afiucv_netdev_event,
2324 };
2325 
2326 static const struct proto_ops iucv_sock_ops = {
2327 	.family		= PF_IUCV,
2328 	.owner		= THIS_MODULE,
2329 	.release	= iucv_sock_release,
2330 	.bind		= iucv_sock_bind,
2331 	.connect	= iucv_sock_connect,
2332 	.listen		= iucv_sock_listen,
2333 	.accept		= iucv_sock_accept,
2334 	.getname	= iucv_sock_getname,
2335 	.sendmsg	= iucv_sock_sendmsg,
2336 	.recvmsg	= iucv_sock_recvmsg,
2337 	.poll		= iucv_sock_poll,
2338 	.ioctl		= sock_no_ioctl,
2339 	.mmap		= sock_no_mmap,
2340 	.socketpair	= sock_no_socketpair,
2341 	.shutdown	= iucv_sock_shutdown,
2342 	.setsockopt	= iucv_sock_setsockopt,
2343 	.getsockopt	= iucv_sock_getsockopt,
2344 };
2345 
2346 static const struct net_proto_family iucv_sock_family_ops = {
2347 	.family	= AF_IUCV,
2348 	.owner	= THIS_MODULE,
2349 	.create	= iucv_sock_create,
2350 };
2351 
2352 static struct packet_type iucv_packet_type = {
2353 	.type = cpu_to_be16(ETH_P_AF_IUCV),
2354 	.func = afiucv_hs_rcv,
2355 };
2356 
2357 static int afiucv_iucv_init(void)
2358 {
2359 	int err;
2360 
2361 	err = pr_iucv->iucv_register(&af_iucv_handler, 0);
2362 	if (err)
2363 		goto out;
2364 	/* establish dummy device */
2365 	af_iucv_driver.bus = pr_iucv->bus;
2366 	err = driver_register(&af_iucv_driver);
2367 	if (err)
2368 		goto out_iucv;
2369 	af_iucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
2370 	if (!af_iucv_dev) {
2371 		err = -ENOMEM;
2372 		goto out_driver;
2373 	}
2374 	dev_set_name(af_iucv_dev, "af_iucv");
2375 	af_iucv_dev->bus = pr_iucv->bus;
2376 	af_iucv_dev->parent = pr_iucv->root;
2377 	af_iucv_dev->release = (void (*)(struct device *))kfree;
2378 	af_iucv_dev->driver = &af_iucv_driver;
2379 	err = device_register(af_iucv_dev);
2380 	if (err)
2381 		goto out_driver;
2382 	return 0;
2383 
2384 out_driver:
2385 	driver_unregister(&af_iucv_driver);
2386 out_iucv:
2387 	pr_iucv->iucv_unregister(&af_iucv_handler, 0);
2388 out:
2389 	return err;
2390 }
2391 
2392 static int __init afiucv_init(void)
2393 {
2394 	int err;
2395 
2396 	if (MACHINE_IS_VM) {
2397 		cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
2398 		if (unlikely(err)) {
2399 			WARN_ON(err);
2400 			err = -EPROTONOSUPPORT;
2401 			goto out;
2402 		}
2403 
2404 		pr_iucv = try_then_request_module(symbol_get(iucv_if), "iucv");
2405 		if (!pr_iucv) {
2406 			printk(KERN_WARNING "iucv_if lookup failed\n");
2407 			memset(&iucv_userid, 0, sizeof(iucv_userid));
2408 		}
2409 	} else {
2410 		memset(&iucv_userid, 0, sizeof(iucv_userid));
2411 		pr_iucv = NULL;
2412 	}
2413 
2414 	err = proto_register(&iucv_proto, 0);
2415 	if (err)
2416 		goto out;
2417 	err = sock_register(&iucv_sock_family_ops);
2418 	if (err)
2419 		goto out_proto;
2420 
2421 	if (pr_iucv) {
2422 		err = afiucv_iucv_init();
2423 		if (err)
2424 			goto out_sock;
2425 	} else
2426 		register_netdevice_notifier(&afiucv_netdev_notifier);
2427 	dev_add_pack(&iucv_packet_type);
2428 	return 0;
2429 
2430 out_sock:
2431 	sock_unregister(PF_IUCV);
2432 out_proto:
2433 	proto_unregister(&iucv_proto);
2434 out:
2435 	if (pr_iucv)
2436 		symbol_put(iucv_if);
2437 	return err;
2438 }
2439 
2440 static void __exit afiucv_exit(void)
2441 {
2442 	if (pr_iucv) {
2443 		device_unregister(af_iucv_dev);
2444 		driver_unregister(&af_iucv_driver);
2445 		pr_iucv->iucv_unregister(&af_iucv_handler, 0);
2446 		symbol_put(iucv_if);
2447 	} else
2448 		unregister_netdevice_notifier(&afiucv_netdev_notifier);
2449 	dev_remove_pack(&iucv_packet_type);
2450 	sock_unregister(PF_IUCV);
2451 	proto_unregister(&iucv_proto);
2452 }
2453 
2454 module_init(afiucv_init);
2455 module_exit(afiucv_exit);
2456 
2457 MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>");
2458 MODULE_DESCRIPTION("IUCV Sockets ver " VERSION);
2459 MODULE_VERSION(VERSION);
2460 MODULE_LICENSE("GPL");
2461 MODULE_ALIAS_NETPROTO(PF_IUCV);
2462 
2463