xref: /openbmc/linux/net/iucv/af_iucv.c (revision 58f9d806)
1 /*
2  *  IUCV protocol stack for Linux on zSeries
3  *
4  *  Copyright IBM Corp. 2006, 2009
5  *
6  *  Author(s):	Jennifer Hunt <jenhunt@us.ibm.com>
7  *		Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
8  *  PM functions:
9  *		Ursula Braun <ursula.braun@de.ibm.com>
10  */
11 
12 #define KMSG_COMPONENT "af_iucv"
13 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
14 
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/list.h>
18 #include <linux/errno.h>
19 #include <linux/kernel.h>
20 #include <linux/sched/signal.h>
21 #include <linux/slab.h>
22 #include <linux/skbuff.h>
23 #include <linux/init.h>
24 #include <linux/poll.h>
25 #include <linux/security.h>
26 #include <net/sock.h>
27 #include <asm/ebcdic.h>
28 #include <asm/cpcmd.h>
29 #include <linux/kmod.h>
30 
31 #include <net/iucv/af_iucv.h>
32 
33 #define VERSION "1.2"
34 
35 static char iucv_userid[80];
36 
37 static const struct proto_ops iucv_sock_ops;
38 
39 static struct proto iucv_proto = {
40 	.name		= "AF_IUCV",
41 	.owner		= THIS_MODULE,
42 	.obj_size	= sizeof(struct iucv_sock),
43 };
44 
45 static struct iucv_interface *pr_iucv;
46 
47 /* special AF_IUCV IPRM messages */
48 static const u8 iprm_shutdown[8] =
49 	{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
50 
51 #define TRGCLS_SIZE	FIELD_SIZEOF(struct iucv_message, class)
52 
53 #define __iucv_sock_wait(sk, condition, timeo, ret)			\
54 do {									\
55 	DEFINE_WAIT(__wait);						\
56 	long __timeo = timeo;						\
57 	ret = 0;							\
58 	prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE);	\
59 	while (!(condition)) {						\
60 		if (!__timeo) {						\
61 			ret = -EAGAIN;					\
62 			break;						\
63 		}							\
64 		if (signal_pending(current)) {				\
65 			ret = sock_intr_errno(__timeo);			\
66 			break;						\
67 		}							\
68 		release_sock(sk);					\
69 		__timeo = schedule_timeout(__timeo);			\
70 		lock_sock(sk);						\
71 		ret = sock_error(sk);					\
72 		if (ret)						\
73 			break;						\
74 	}								\
75 	finish_wait(sk_sleep(sk), &__wait);				\
76 } while (0)
77 
78 #define iucv_sock_wait(sk, condition, timeo)				\
79 ({									\
80 	int __ret = 0;							\
81 	if (!(condition))						\
82 		__iucv_sock_wait(sk, condition, timeo, __ret);		\
83 	__ret;								\
84 })
85 
86 static void iucv_sock_kill(struct sock *sk);
87 static void iucv_sock_close(struct sock *sk);
88 static void iucv_sever_path(struct sock *, int);
89 
90 static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
91 	struct packet_type *pt, struct net_device *orig_dev);
92 static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
93 		   struct sk_buff *skb, u8 flags);
94 static void afiucv_hs_callback_txnotify(struct sk_buff *, enum iucv_tx_notify);
95 
96 /* Call Back functions */
97 static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
98 static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
99 static void iucv_callback_connack(struct iucv_path *, u8 *);
100 static int iucv_callback_connreq(struct iucv_path *, u8 *, u8 *);
101 static void iucv_callback_connrej(struct iucv_path *, u8 *);
102 static void iucv_callback_shutdown(struct iucv_path *, u8 *);
103 
104 static struct iucv_sock_list iucv_sk_list = {
105 	.lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock),
106 	.autobind_name = ATOMIC_INIT(0)
107 };
108 
109 static struct iucv_handler af_iucv_handler = {
110 	.path_pending	  = iucv_callback_connreq,
111 	.path_complete	  = iucv_callback_connack,
112 	.path_severed	  = iucv_callback_connrej,
113 	.message_pending  = iucv_callback_rx,
114 	.message_complete = iucv_callback_txdone,
115 	.path_quiesced	  = iucv_callback_shutdown,
116 };
117 
118 static inline void high_nmcpy(unsigned char *dst, char *src)
119 {
120        memcpy(dst, src, 8);
121 }
122 
123 static inline void low_nmcpy(unsigned char *dst, char *src)
124 {
125        memcpy(&dst[8], src, 8);
126 }
127 
128 static int afiucv_pm_prepare(struct device *dev)
129 {
130 #ifdef CONFIG_PM_DEBUG
131 	printk(KERN_WARNING "afiucv_pm_prepare\n");
132 #endif
133 	return 0;
134 }
135 
136 static void afiucv_pm_complete(struct device *dev)
137 {
138 #ifdef CONFIG_PM_DEBUG
139 	printk(KERN_WARNING "afiucv_pm_complete\n");
140 #endif
141 }
142 
143 /**
144  * afiucv_pm_freeze() - Freeze PM callback
145  * @dev:	AFIUCV dummy device
146  *
147  * Sever all established IUCV communication pathes
148  */
149 static int afiucv_pm_freeze(struct device *dev)
150 {
151 	struct iucv_sock *iucv;
152 	struct sock *sk;
153 
154 #ifdef CONFIG_PM_DEBUG
155 	printk(KERN_WARNING "afiucv_pm_freeze\n");
156 #endif
157 	read_lock(&iucv_sk_list.lock);
158 	sk_for_each(sk, &iucv_sk_list.head) {
159 		iucv = iucv_sk(sk);
160 		switch (sk->sk_state) {
161 		case IUCV_DISCONN:
162 		case IUCV_CLOSING:
163 		case IUCV_CONNECTED:
164 			iucv_sever_path(sk, 0);
165 			break;
166 		case IUCV_OPEN:
167 		case IUCV_BOUND:
168 		case IUCV_LISTEN:
169 		case IUCV_CLOSED:
170 		default:
171 			break;
172 		}
173 		skb_queue_purge(&iucv->send_skb_q);
174 		skb_queue_purge(&iucv->backlog_skb_q);
175 	}
176 	read_unlock(&iucv_sk_list.lock);
177 	return 0;
178 }
179 
180 /**
181  * afiucv_pm_restore_thaw() - Thaw and restore PM callback
182  * @dev:	AFIUCV dummy device
183  *
184  * socket clean up after freeze
185  */
186 static int afiucv_pm_restore_thaw(struct device *dev)
187 {
188 	struct sock *sk;
189 
190 #ifdef CONFIG_PM_DEBUG
191 	printk(KERN_WARNING "afiucv_pm_restore_thaw\n");
192 #endif
193 	read_lock(&iucv_sk_list.lock);
194 	sk_for_each(sk, &iucv_sk_list.head) {
195 		switch (sk->sk_state) {
196 		case IUCV_CONNECTED:
197 			sk->sk_err = EPIPE;
198 			sk->sk_state = IUCV_DISCONN;
199 			sk->sk_state_change(sk);
200 			break;
201 		case IUCV_DISCONN:
202 		case IUCV_CLOSING:
203 		case IUCV_LISTEN:
204 		case IUCV_BOUND:
205 		case IUCV_OPEN:
206 		default:
207 			break;
208 		}
209 	}
210 	read_unlock(&iucv_sk_list.lock);
211 	return 0;
212 }
213 
214 static const struct dev_pm_ops afiucv_pm_ops = {
215 	.prepare = afiucv_pm_prepare,
216 	.complete = afiucv_pm_complete,
217 	.freeze = afiucv_pm_freeze,
218 	.thaw = afiucv_pm_restore_thaw,
219 	.restore = afiucv_pm_restore_thaw,
220 };
221 
222 static struct device_driver af_iucv_driver = {
223 	.owner = THIS_MODULE,
224 	.name = "afiucv",
225 	.bus  = NULL,
226 	.pm   = &afiucv_pm_ops,
227 };
228 
229 /* dummy device used as trigger for PM functions */
230 static struct device *af_iucv_dev;
231 
232 /**
233  * iucv_msg_length() - Returns the length of an iucv message.
234  * @msg:	Pointer to struct iucv_message, MUST NOT be NULL
235  *
236  * The function returns the length of the specified iucv message @msg of data
237  * stored in a buffer and of data stored in the parameter list (PRMDATA).
238  *
239  * For IUCV_IPRMDATA, AF_IUCV uses the following convention to transport socket
240  * data:
241  *	PRMDATA[0..6]	socket data (max 7 bytes);
242  *	PRMDATA[7]	socket data length value (len is 0xff - PRMDATA[7])
243  *
244  * The socket data length is computed by subtracting the socket data length
245  * value from 0xFF.
246  * If the socket data len is greater 7, then PRMDATA can be used for special
247  * notifications (see iucv_sock_shutdown); and further,
248  * if the socket data len is > 7, the function returns 8.
249  *
250  * Use this function to allocate socket buffers to store iucv message data.
251  */
252 static inline size_t iucv_msg_length(struct iucv_message *msg)
253 {
254 	size_t datalen;
255 
256 	if (msg->flags & IUCV_IPRMDATA) {
257 		datalen = 0xff - msg->rmmsg[7];
258 		return (datalen < 8) ? datalen : 8;
259 	}
260 	return msg->length;
261 }
262 
263 /**
264  * iucv_sock_in_state() - check for specific states
265  * @sk:		sock structure
266  * @state:	first iucv sk state
267  * @state:	second iucv sk state
268  *
269  * Returns true if the socket in either in the first or second state.
270  */
271 static int iucv_sock_in_state(struct sock *sk, int state, int state2)
272 {
273 	return (sk->sk_state == state || sk->sk_state == state2);
274 }
275 
276 /**
277  * iucv_below_msglim() - function to check if messages can be sent
278  * @sk:		sock structure
279  *
280  * Returns true if the send queue length is lower than the message limit.
281  * Always returns true if the socket is not connected (no iucv path for
282  * checking the message limit).
283  */
284 static inline int iucv_below_msglim(struct sock *sk)
285 {
286 	struct iucv_sock *iucv = iucv_sk(sk);
287 
288 	if (sk->sk_state != IUCV_CONNECTED)
289 		return 1;
290 	if (iucv->transport == AF_IUCV_TRANS_IUCV)
291 		return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim);
292 	else
293 		return ((atomic_read(&iucv->msg_sent) < iucv->msglimit_peer) &&
294 			(atomic_read(&iucv->pendings) <= 0));
295 }
296 
297 /**
298  * iucv_sock_wake_msglim() - Wake up thread waiting on msg limit
299  */
300 static void iucv_sock_wake_msglim(struct sock *sk)
301 {
302 	struct socket_wq *wq;
303 
304 	rcu_read_lock();
305 	wq = rcu_dereference(sk->sk_wq);
306 	if (skwq_has_sleeper(wq))
307 		wake_up_interruptible_all(&wq->wait);
308 	sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
309 	rcu_read_unlock();
310 }
311 
312 /**
313  * afiucv_hs_send() - send a message through HiperSockets transport
314  */
315 static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
316 		   struct sk_buff *skb, u8 flags)
317 {
318 	struct iucv_sock *iucv = iucv_sk(sock);
319 	struct af_iucv_trans_hdr *phs_hdr;
320 	struct sk_buff *nskb;
321 	int err, confirm_recv = 0;
322 
323 	phs_hdr = skb_push(skb, sizeof(*phs_hdr));
324 	memset(phs_hdr, 0, sizeof(*phs_hdr));
325 	skb_reset_network_header(skb);
326 
327 	phs_hdr->magic = ETH_P_AF_IUCV;
328 	phs_hdr->version = 1;
329 	phs_hdr->flags = flags;
330 	if (flags == AF_IUCV_FLAG_SYN)
331 		phs_hdr->window = iucv->msglimit;
332 	else if ((flags == AF_IUCV_FLAG_WIN) || !flags) {
333 		confirm_recv = atomic_read(&iucv->msg_recv);
334 		phs_hdr->window = confirm_recv;
335 		if (confirm_recv)
336 			phs_hdr->flags = phs_hdr->flags | AF_IUCV_FLAG_WIN;
337 	}
338 	memcpy(phs_hdr->destUserID, iucv->dst_user_id, 8);
339 	memcpy(phs_hdr->destAppName, iucv->dst_name, 8);
340 	memcpy(phs_hdr->srcUserID, iucv->src_user_id, 8);
341 	memcpy(phs_hdr->srcAppName, iucv->src_name, 8);
342 	ASCEBC(phs_hdr->destUserID, sizeof(phs_hdr->destUserID));
343 	ASCEBC(phs_hdr->destAppName, sizeof(phs_hdr->destAppName));
344 	ASCEBC(phs_hdr->srcUserID, sizeof(phs_hdr->srcUserID));
345 	ASCEBC(phs_hdr->srcAppName, sizeof(phs_hdr->srcAppName));
346 	if (imsg)
347 		memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message));
348 
349 	skb_push(skb, ETH_HLEN);
350 	memset(skb->data, 0, ETH_HLEN);
351 
352 	skb->dev = iucv->hs_dev;
353 	if (!skb->dev) {
354 		err = -ENODEV;
355 		goto err_free;
356 	}
357 	if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev)) {
358 		err = -ENETDOWN;
359 		goto err_free;
360 	}
361 	if (skb->len > skb->dev->mtu) {
362 		if (sock->sk_type == SOCK_SEQPACKET) {
363 			err = -EMSGSIZE;
364 			goto err_free;
365 		}
366 		skb_trim(skb, skb->dev->mtu);
367 	}
368 	skb->protocol = cpu_to_be16(ETH_P_AF_IUCV);
369 	nskb = skb_clone(skb, GFP_ATOMIC);
370 	if (!nskb) {
371 		err = -ENOMEM;
372 		goto err_free;
373 	}
374 
375 	skb_queue_tail(&iucv->send_skb_q, nskb);
376 	err = dev_queue_xmit(skb);
377 	if (net_xmit_eval(err)) {
378 		skb_unlink(nskb, &iucv->send_skb_q);
379 		kfree_skb(nskb);
380 	} else {
381 		atomic_sub(confirm_recv, &iucv->msg_recv);
382 		WARN_ON(atomic_read(&iucv->msg_recv) < 0);
383 	}
384 	return net_xmit_eval(err);
385 
386 err_free:
387 	kfree_skb(skb);
388 	return err;
389 }
390 
391 static struct sock *__iucv_get_sock_by_name(char *nm)
392 {
393 	struct sock *sk;
394 
395 	sk_for_each(sk, &iucv_sk_list.head)
396 		if (!memcmp(&iucv_sk(sk)->src_name, nm, 8))
397 			return sk;
398 
399 	return NULL;
400 }
401 
402 static void iucv_sock_destruct(struct sock *sk)
403 {
404 	skb_queue_purge(&sk->sk_receive_queue);
405 	skb_queue_purge(&sk->sk_error_queue);
406 
407 	sk_mem_reclaim(sk);
408 
409 	if (!sock_flag(sk, SOCK_DEAD)) {
410 		pr_err("Attempt to release alive iucv socket %p\n", sk);
411 		return;
412 	}
413 
414 	WARN_ON(atomic_read(&sk->sk_rmem_alloc));
415 	WARN_ON(refcount_read(&sk->sk_wmem_alloc));
416 	WARN_ON(sk->sk_wmem_queued);
417 	WARN_ON(sk->sk_forward_alloc);
418 }
419 
420 /* Cleanup Listen */
421 static void iucv_sock_cleanup_listen(struct sock *parent)
422 {
423 	struct sock *sk;
424 
425 	/* Close non-accepted connections */
426 	while ((sk = iucv_accept_dequeue(parent, NULL))) {
427 		iucv_sock_close(sk);
428 		iucv_sock_kill(sk);
429 	}
430 
431 	parent->sk_state = IUCV_CLOSED;
432 }
433 
434 /* Kill socket (only if zapped and orphaned) */
435 static void iucv_sock_kill(struct sock *sk)
436 {
437 	if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
438 		return;
439 
440 	iucv_sock_unlink(&iucv_sk_list, sk);
441 	sock_set_flag(sk, SOCK_DEAD);
442 	sock_put(sk);
443 }
444 
445 /* Terminate an IUCV path */
446 static void iucv_sever_path(struct sock *sk, int with_user_data)
447 {
448 	unsigned char user_data[16];
449 	struct iucv_sock *iucv = iucv_sk(sk);
450 	struct iucv_path *path = iucv->path;
451 
452 	if (iucv->path) {
453 		iucv->path = NULL;
454 		if (with_user_data) {
455 			low_nmcpy(user_data, iucv->src_name);
456 			high_nmcpy(user_data, iucv->dst_name);
457 			ASCEBC(user_data, sizeof(user_data));
458 			pr_iucv->path_sever(path, user_data);
459 		} else
460 			pr_iucv->path_sever(path, NULL);
461 		iucv_path_free(path);
462 	}
463 }
464 
465 /* Send controlling flags through an IUCV socket for HIPER transport */
466 static int iucv_send_ctrl(struct sock *sk, u8 flags)
467 {
468 	int err = 0;
469 	int blen;
470 	struct sk_buff *skb;
471 	u8 shutdown = 0;
472 
473 	blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
474 	if (sk->sk_shutdown & SEND_SHUTDOWN) {
475 		/* controlling flags should be sent anyway */
476 		shutdown = sk->sk_shutdown;
477 		sk->sk_shutdown &= RCV_SHUTDOWN;
478 	}
479 	skb = sock_alloc_send_skb(sk, blen, 1, &err);
480 	if (skb) {
481 		skb_reserve(skb, blen);
482 		err = afiucv_hs_send(NULL, sk, skb, flags);
483 	}
484 	if (shutdown)
485 		sk->sk_shutdown = shutdown;
486 	return err;
487 }
488 
489 /* Close an IUCV socket */
490 static void iucv_sock_close(struct sock *sk)
491 {
492 	struct iucv_sock *iucv = iucv_sk(sk);
493 	unsigned long timeo;
494 	int err = 0;
495 
496 	lock_sock(sk);
497 
498 	switch (sk->sk_state) {
499 	case IUCV_LISTEN:
500 		iucv_sock_cleanup_listen(sk);
501 		break;
502 
503 	case IUCV_CONNECTED:
504 		if (iucv->transport == AF_IUCV_TRANS_HIPER) {
505 			err = iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN);
506 			sk->sk_state = IUCV_DISCONN;
507 			sk->sk_state_change(sk);
508 		}
509 	case IUCV_DISCONN:   /* fall through */
510 		sk->sk_state = IUCV_CLOSING;
511 		sk->sk_state_change(sk);
512 
513 		if (!err && !skb_queue_empty(&iucv->send_skb_q)) {
514 			if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
515 				timeo = sk->sk_lingertime;
516 			else
517 				timeo = IUCV_DISCONN_TIMEOUT;
518 			iucv_sock_wait(sk,
519 					iucv_sock_in_state(sk, IUCV_CLOSED, 0),
520 					timeo);
521 		}
522 
523 	case IUCV_CLOSING:   /* fall through */
524 		sk->sk_state = IUCV_CLOSED;
525 		sk->sk_state_change(sk);
526 
527 		sk->sk_err = ECONNRESET;
528 		sk->sk_state_change(sk);
529 
530 		skb_queue_purge(&iucv->send_skb_q);
531 		skb_queue_purge(&iucv->backlog_skb_q);
532 
533 	default:   /* fall through */
534 		iucv_sever_path(sk, 1);
535 	}
536 
537 	if (iucv->hs_dev) {
538 		dev_put(iucv->hs_dev);
539 		iucv->hs_dev = NULL;
540 		sk->sk_bound_dev_if = 0;
541 	}
542 
543 	/* mark socket for deletion by iucv_sock_kill() */
544 	sock_set_flag(sk, SOCK_ZAPPED);
545 
546 	release_sock(sk);
547 }
548 
549 static void iucv_sock_init(struct sock *sk, struct sock *parent)
550 {
551 	if (parent) {
552 		sk->sk_type = parent->sk_type;
553 		security_sk_clone(parent, sk);
554 	}
555 }
556 
557 static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio, int kern)
558 {
559 	struct sock *sk;
560 	struct iucv_sock *iucv;
561 
562 	sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto, kern);
563 	if (!sk)
564 		return NULL;
565 	iucv = iucv_sk(sk);
566 
567 	sock_init_data(sock, sk);
568 	INIT_LIST_HEAD(&iucv->accept_q);
569 	spin_lock_init(&iucv->accept_q_lock);
570 	skb_queue_head_init(&iucv->send_skb_q);
571 	INIT_LIST_HEAD(&iucv->message_q.list);
572 	spin_lock_init(&iucv->message_q.lock);
573 	skb_queue_head_init(&iucv->backlog_skb_q);
574 	iucv->send_tag = 0;
575 	atomic_set(&iucv->pendings, 0);
576 	iucv->flags = 0;
577 	iucv->msglimit = 0;
578 	atomic_set(&iucv->msg_sent, 0);
579 	atomic_set(&iucv->msg_recv, 0);
580 	iucv->path = NULL;
581 	iucv->sk_txnotify = afiucv_hs_callback_txnotify;
582 	memset(&iucv->src_user_id , 0, 32);
583 	if (pr_iucv)
584 		iucv->transport = AF_IUCV_TRANS_IUCV;
585 	else
586 		iucv->transport = AF_IUCV_TRANS_HIPER;
587 
588 	sk->sk_destruct = iucv_sock_destruct;
589 	sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
590 	sk->sk_allocation = GFP_DMA;
591 
592 	sock_reset_flag(sk, SOCK_ZAPPED);
593 
594 	sk->sk_protocol = proto;
595 	sk->sk_state	= IUCV_OPEN;
596 
597 	iucv_sock_link(&iucv_sk_list, sk);
598 	return sk;
599 }
600 
601 /* Create an IUCV socket */
602 static int iucv_sock_create(struct net *net, struct socket *sock, int protocol,
603 			    int kern)
604 {
605 	struct sock *sk;
606 
607 	if (protocol && protocol != PF_IUCV)
608 		return -EPROTONOSUPPORT;
609 
610 	sock->state = SS_UNCONNECTED;
611 
612 	switch (sock->type) {
613 	case SOCK_STREAM:
614 		sock->ops = &iucv_sock_ops;
615 		break;
616 	case SOCK_SEQPACKET:
617 		/* currently, proto ops can handle both sk types */
618 		sock->ops = &iucv_sock_ops;
619 		break;
620 	default:
621 		return -ESOCKTNOSUPPORT;
622 	}
623 
624 	sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL, kern);
625 	if (!sk)
626 		return -ENOMEM;
627 
628 	iucv_sock_init(sk, NULL);
629 
630 	return 0;
631 }
632 
633 void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk)
634 {
635 	write_lock_bh(&l->lock);
636 	sk_add_node(sk, &l->head);
637 	write_unlock_bh(&l->lock);
638 }
639 
640 void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
641 {
642 	write_lock_bh(&l->lock);
643 	sk_del_node_init(sk);
644 	write_unlock_bh(&l->lock);
645 }
646 
647 void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
648 {
649 	unsigned long flags;
650 	struct iucv_sock *par = iucv_sk(parent);
651 
652 	sock_hold(sk);
653 	spin_lock_irqsave(&par->accept_q_lock, flags);
654 	list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q);
655 	spin_unlock_irqrestore(&par->accept_q_lock, flags);
656 	iucv_sk(sk)->parent = parent;
657 	sk_acceptq_added(parent);
658 }
659 
660 void iucv_accept_unlink(struct sock *sk)
661 {
662 	unsigned long flags;
663 	struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent);
664 
665 	spin_lock_irqsave(&par->accept_q_lock, flags);
666 	list_del_init(&iucv_sk(sk)->accept_q);
667 	spin_unlock_irqrestore(&par->accept_q_lock, flags);
668 	sk_acceptq_removed(iucv_sk(sk)->parent);
669 	iucv_sk(sk)->parent = NULL;
670 	sock_put(sk);
671 }
672 
673 struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
674 {
675 	struct iucv_sock *isk, *n;
676 	struct sock *sk;
677 
678 	list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
679 		sk = (struct sock *) isk;
680 		lock_sock(sk);
681 
682 		if (sk->sk_state == IUCV_CLOSED) {
683 			iucv_accept_unlink(sk);
684 			release_sock(sk);
685 			continue;
686 		}
687 
688 		if (sk->sk_state == IUCV_CONNECTED ||
689 		    sk->sk_state == IUCV_DISCONN ||
690 		    !newsock) {
691 			iucv_accept_unlink(sk);
692 			if (newsock)
693 				sock_graft(sk, newsock);
694 
695 			release_sock(sk);
696 			return sk;
697 		}
698 
699 		release_sock(sk);
700 	}
701 	return NULL;
702 }
703 
704 static void __iucv_auto_name(struct iucv_sock *iucv)
705 {
706 	char name[12];
707 
708 	sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
709 	while (__iucv_get_sock_by_name(name)) {
710 		sprintf(name, "%08x",
711 			atomic_inc_return(&iucv_sk_list.autobind_name));
712 	}
713 	memcpy(iucv->src_name, name, 8);
714 }
715 
716 /* Bind an unbound socket */
717 static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
718 			  int addr_len)
719 {
720 	struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
721 	struct sock *sk = sock->sk;
722 	struct iucv_sock *iucv;
723 	int err = 0;
724 	struct net_device *dev;
725 	char uid[9];
726 
727 	/* Verify the input sockaddr */
728 	if (addr_len < sizeof(struct sockaddr_iucv) ||
729 	    addr->sa_family != AF_IUCV)
730 		return -EINVAL;
731 
732 	lock_sock(sk);
733 	if (sk->sk_state != IUCV_OPEN) {
734 		err = -EBADFD;
735 		goto done;
736 	}
737 
738 	write_lock_bh(&iucv_sk_list.lock);
739 
740 	iucv = iucv_sk(sk);
741 	if (__iucv_get_sock_by_name(sa->siucv_name)) {
742 		err = -EADDRINUSE;
743 		goto done_unlock;
744 	}
745 	if (iucv->path)
746 		goto done_unlock;
747 
748 	/* Bind the socket */
749 	if (pr_iucv)
750 		if (!memcmp(sa->siucv_user_id, iucv_userid, 8))
751 			goto vm_bind; /* VM IUCV transport */
752 
753 	/* try hiper transport */
754 	memcpy(uid, sa->siucv_user_id, sizeof(uid));
755 	ASCEBC(uid, 8);
756 	rcu_read_lock();
757 	for_each_netdev_rcu(&init_net, dev) {
758 		if (!memcmp(dev->perm_addr, uid, 8)) {
759 			memcpy(iucv->src_user_id, sa->siucv_user_id, 8);
760 			/* Check for unitialized siucv_name */
761 			if (strncmp(sa->siucv_name, "        ", 8) == 0)
762 				__iucv_auto_name(iucv);
763 			else
764 				memcpy(iucv->src_name, sa->siucv_name, 8);
765 			sk->sk_bound_dev_if = dev->ifindex;
766 			iucv->hs_dev = dev;
767 			dev_hold(dev);
768 			sk->sk_state = IUCV_BOUND;
769 			iucv->transport = AF_IUCV_TRANS_HIPER;
770 			if (!iucv->msglimit)
771 				iucv->msglimit = IUCV_HIPER_MSGLIM_DEFAULT;
772 			rcu_read_unlock();
773 			goto done_unlock;
774 		}
775 	}
776 	rcu_read_unlock();
777 vm_bind:
778 	if (pr_iucv) {
779 		/* use local userid for backward compat */
780 		memcpy(iucv->src_name, sa->siucv_name, 8);
781 		memcpy(iucv->src_user_id, iucv_userid, 8);
782 		sk->sk_state = IUCV_BOUND;
783 		iucv->transport = AF_IUCV_TRANS_IUCV;
784 		if (!iucv->msglimit)
785 			iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
786 		goto done_unlock;
787 	}
788 	/* found no dev to bind */
789 	err = -ENODEV;
790 done_unlock:
791 	/* Release the socket list lock */
792 	write_unlock_bh(&iucv_sk_list.lock);
793 done:
794 	release_sock(sk);
795 	return err;
796 }
797 
798 /* Automatically bind an unbound socket */
799 static int iucv_sock_autobind(struct sock *sk)
800 {
801 	struct iucv_sock *iucv = iucv_sk(sk);
802 	int err = 0;
803 
804 	if (unlikely(!pr_iucv))
805 		return -EPROTO;
806 
807 	memcpy(iucv->src_user_id, iucv_userid, 8);
808 
809 	write_lock_bh(&iucv_sk_list.lock);
810 	__iucv_auto_name(iucv);
811 	write_unlock_bh(&iucv_sk_list.lock);
812 
813 	if (!iucv->msglimit)
814 		iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
815 
816 	return err;
817 }
818 
819 static int afiucv_path_connect(struct socket *sock, struct sockaddr *addr)
820 {
821 	struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
822 	struct sock *sk = sock->sk;
823 	struct iucv_sock *iucv = iucv_sk(sk);
824 	unsigned char user_data[16];
825 	int err;
826 
827 	high_nmcpy(user_data, sa->siucv_name);
828 	low_nmcpy(user_data, iucv->src_name);
829 	ASCEBC(user_data, sizeof(user_data));
830 
831 	/* Create path. */
832 	iucv->path = iucv_path_alloc(iucv->msglimit,
833 				     IUCV_IPRMDATA, GFP_KERNEL);
834 	if (!iucv->path) {
835 		err = -ENOMEM;
836 		goto done;
837 	}
838 	err = pr_iucv->path_connect(iucv->path, &af_iucv_handler,
839 				    sa->siucv_user_id, NULL, user_data,
840 				    sk);
841 	if (err) {
842 		iucv_path_free(iucv->path);
843 		iucv->path = NULL;
844 		switch (err) {
845 		case 0x0b:	/* Target communicator is not logged on */
846 			err = -ENETUNREACH;
847 			break;
848 		case 0x0d:	/* Max connections for this guest exceeded */
849 		case 0x0e:	/* Max connections for target guest exceeded */
850 			err = -EAGAIN;
851 			break;
852 		case 0x0f:	/* Missing IUCV authorization */
853 			err = -EACCES;
854 			break;
855 		default:
856 			err = -ECONNREFUSED;
857 			break;
858 		}
859 	}
860 done:
861 	return err;
862 }
863 
864 /* Connect an unconnected socket */
865 static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
866 			     int alen, int flags)
867 {
868 	struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
869 	struct sock *sk = sock->sk;
870 	struct iucv_sock *iucv = iucv_sk(sk);
871 	int err;
872 
873 	if (alen < sizeof(struct sockaddr_iucv) || addr->sa_family != AF_IUCV)
874 		return -EINVAL;
875 
876 	if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
877 		return -EBADFD;
878 
879 	if (sk->sk_state == IUCV_OPEN &&
880 	    iucv->transport == AF_IUCV_TRANS_HIPER)
881 		return -EBADFD; /* explicit bind required */
882 
883 	if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET)
884 		return -EINVAL;
885 
886 	if (sk->sk_state == IUCV_OPEN) {
887 		err = iucv_sock_autobind(sk);
888 		if (unlikely(err))
889 			return err;
890 	}
891 
892 	lock_sock(sk);
893 
894 	/* Set the destination information */
895 	memcpy(iucv->dst_user_id, sa->siucv_user_id, 8);
896 	memcpy(iucv->dst_name, sa->siucv_name, 8);
897 
898 	if (iucv->transport == AF_IUCV_TRANS_HIPER)
899 		err = iucv_send_ctrl(sock->sk, AF_IUCV_FLAG_SYN);
900 	else
901 		err = afiucv_path_connect(sock, addr);
902 	if (err)
903 		goto done;
904 
905 	if (sk->sk_state != IUCV_CONNECTED)
906 		err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED,
907 							    IUCV_DISCONN),
908 				     sock_sndtimeo(sk, flags & O_NONBLOCK));
909 
910 	if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_CLOSED)
911 		err = -ECONNREFUSED;
912 
913 	if (err && iucv->transport == AF_IUCV_TRANS_IUCV)
914 		iucv_sever_path(sk, 0);
915 
916 done:
917 	release_sock(sk);
918 	return err;
919 }
920 
921 /* Move a socket into listening state. */
922 static int iucv_sock_listen(struct socket *sock, int backlog)
923 {
924 	struct sock *sk = sock->sk;
925 	int err;
926 
927 	lock_sock(sk);
928 
929 	err = -EINVAL;
930 	if (sk->sk_state != IUCV_BOUND)
931 		goto done;
932 
933 	if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
934 		goto done;
935 
936 	sk->sk_max_ack_backlog = backlog;
937 	sk->sk_ack_backlog = 0;
938 	sk->sk_state = IUCV_LISTEN;
939 	err = 0;
940 
941 done:
942 	release_sock(sk);
943 	return err;
944 }
945 
946 /* Accept a pending connection */
947 static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
948 			    int flags, bool kern)
949 {
950 	DECLARE_WAITQUEUE(wait, current);
951 	struct sock *sk = sock->sk, *nsk;
952 	long timeo;
953 	int err = 0;
954 
955 	lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
956 
957 	if (sk->sk_state != IUCV_LISTEN) {
958 		err = -EBADFD;
959 		goto done;
960 	}
961 
962 	timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
963 
964 	/* Wait for an incoming connection */
965 	add_wait_queue_exclusive(sk_sleep(sk), &wait);
966 	while (!(nsk = iucv_accept_dequeue(sk, newsock))) {
967 		set_current_state(TASK_INTERRUPTIBLE);
968 		if (!timeo) {
969 			err = -EAGAIN;
970 			break;
971 		}
972 
973 		release_sock(sk);
974 		timeo = schedule_timeout(timeo);
975 		lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
976 
977 		if (sk->sk_state != IUCV_LISTEN) {
978 			err = -EBADFD;
979 			break;
980 		}
981 
982 		if (signal_pending(current)) {
983 			err = sock_intr_errno(timeo);
984 			break;
985 		}
986 	}
987 
988 	set_current_state(TASK_RUNNING);
989 	remove_wait_queue(sk_sleep(sk), &wait);
990 
991 	if (err)
992 		goto done;
993 
994 	newsock->state = SS_CONNECTED;
995 
996 done:
997 	release_sock(sk);
998 	return err;
999 }
1000 
1001 static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
1002 			     int peer)
1003 {
1004 	struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr;
1005 	struct sock *sk = sock->sk;
1006 	struct iucv_sock *iucv = iucv_sk(sk);
1007 
1008 	addr->sa_family = AF_IUCV;
1009 
1010 	if (peer) {
1011 		memcpy(siucv->siucv_user_id, iucv->dst_user_id, 8);
1012 		memcpy(siucv->siucv_name, iucv->dst_name, 8);
1013 	} else {
1014 		memcpy(siucv->siucv_user_id, iucv->src_user_id, 8);
1015 		memcpy(siucv->siucv_name, iucv->src_name, 8);
1016 	}
1017 	memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port));
1018 	memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr));
1019 	memset(&siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid));
1020 
1021 	return sizeof(struct sockaddr_iucv);
1022 }
1023 
1024 /**
1025  * iucv_send_iprm() - Send socket data in parameter list of an iucv message.
1026  * @path:	IUCV path
1027  * @msg:	Pointer to a struct iucv_message
1028  * @skb:	The socket data to send, skb->len MUST BE <= 7
1029  *
1030  * Send the socket data in the parameter list in the iucv message
1031  * (IUCV_IPRMDATA). The socket data is stored at index 0 to 6 in the parameter
1032  * list and the socket data len at index 7 (last byte).
1033  * See also iucv_msg_length().
1034  *
1035  * Returns the error code from the iucv_message_send() call.
1036  */
1037 static int iucv_send_iprm(struct iucv_path *path, struct iucv_message *msg,
1038 			  struct sk_buff *skb)
1039 {
1040 	u8 prmdata[8];
1041 
1042 	memcpy(prmdata, (void *) skb->data, skb->len);
1043 	prmdata[7] = 0xff - (u8) skb->len;
1044 	return pr_iucv->message_send(path, msg, IUCV_IPRMDATA, 0,
1045 				 (void *) prmdata, 8);
1046 }
1047 
1048 static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1049 			     size_t len)
1050 {
1051 	struct sock *sk = sock->sk;
1052 	struct iucv_sock *iucv = iucv_sk(sk);
1053 	size_t headroom = 0;
1054 	size_t linear;
1055 	struct sk_buff *skb;
1056 	struct iucv_message txmsg = {0};
1057 	struct cmsghdr *cmsg;
1058 	int cmsg_done;
1059 	long timeo;
1060 	char user_id[9];
1061 	char appl_id[9];
1062 	int err;
1063 	int noblock = msg->msg_flags & MSG_DONTWAIT;
1064 
1065 	err = sock_error(sk);
1066 	if (err)
1067 		return err;
1068 
1069 	if (msg->msg_flags & MSG_OOB)
1070 		return -EOPNOTSUPP;
1071 
1072 	/* SOCK_SEQPACKET: we do not support segmented records */
1073 	if (sk->sk_type == SOCK_SEQPACKET && !(msg->msg_flags & MSG_EOR))
1074 		return -EOPNOTSUPP;
1075 
1076 	lock_sock(sk);
1077 
1078 	if (sk->sk_shutdown & SEND_SHUTDOWN) {
1079 		err = -EPIPE;
1080 		goto out;
1081 	}
1082 
1083 	/* Return if the socket is not in connected state */
1084 	if (sk->sk_state != IUCV_CONNECTED) {
1085 		err = -ENOTCONN;
1086 		goto out;
1087 	}
1088 
1089 	/* initialize defaults */
1090 	cmsg_done   = 0;	/* check for duplicate headers */
1091 	txmsg.class = 0;
1092 
1093 	/* iterate over control messages */
1094 	for_each_cmsghdr(cmsg, msg) {
1095 		if (!CMSG_OK(msg, cmsg)) {
1096 			err = -EINVAL;
1097 			goto out;
1098 		}
1099 
1100 		if (cmsg->cmsg_level != SOL_IUCV)
1101 			continue;
1102 
1103 		if (cmsg->cmsg_type & cmsg_done) {
1104 			err = -EINVAL;
1105 			goto out;
1106 		}
1107 		cmsg_done |= cmsg->cmsg_type;
1108 
1109 		switch (cmsg->cmsg_type) {
1110 		case SCM_IUCV_TRGCLS:
1111 			if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) {
1112 				err = -EINVAL;
1113 				goto out;
1114 			}
1115 
1116 			/* set iucv message target class */
1117 			memcpy(&txmsg.class,
1118 				(void *) CMSG_DATA(cmsg), TRGCLS_SIZE);
1119 
1120 			break;
1121 
1122 		default:
1123 			err = -EINVAL;
1124 			goto out;
1125 		}
1126 	}
1127 
1128 	/* allocate one skb for each iucv message:
1129 	 * this is fine for SOCK_SEQPACKET (unless we want to support
1130 	 * segmented records using the MSG_EOR flag), but
1131 	 * for SOCK_STREAM we might want to improve it in future */
1132 	if (iucv->transport == AF_IUCV_TRANS_HIPER) {
1133 		headroom = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
1134 		linear = len;
1135 	} else {
1136 		if (len < PAGE_SIZE) {
1137 			linear = len;
1138 		} else {
1139 			/* In nonlinear "classic" iucv skb,
1140 			 * reserve space for iucv_array
1141 			 */
1142 			headroom = sizeof(struct iucv_array) *
1143 				   (MAX_SKB_FRAGS + 1);
1144 			linear = PAGE_SIZE - headroom;
1145 		}
1146 	}
1147 	skb = sock_alloc_send_pskb(sk, headroom + linear, len - linear,
1148 				   noblock, &err, 0);
1149 	if (!skb)
1150 		goto out;
1151 	if (headroom)
1152 		skb_reserve(skb, headroom);
1153 	skb_put(skb, linear);
1154 	skb->len = len;
1155 	skb->data_len = len - linear;
1156 	err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
1157 	if (err)
1158 		goto fail;
1159 
1160 	/* wait if outstanding messages for iucv path has reached */
1161 	timeo = sock_sndtimeo(sk, noblock);
1162 	err = iucv_sock_wait(sk, iucv_below_msglim(sk), timeo);
1163 	if (err)
1164 		goto fail;
1165 
1166 	/* return -ECONNRESET if the socket is no longer connected */
1167 	if (sk->sk_state != IUCV_CONNECTED) {
1168 		err = -ECONNRESET;
1169 		goto fail;
1170 	}
1171 
1172 	/* increment and save iucv message tag for msg_completion cbk */
1173 	txmsg.tag = iucv->send_tag++;
1174 	IUCV_SKB_CB(skb)->tag = txmsg.tag;
1175 
1176 	if (iucv->transport == AF_IUCV_TRANS_HIPER) {
1177 		atomic_inc(&iucv->msg_sent);
1178 		err = afiucv_hs_send(&txmsg, sk, skb, 0);
1179 		if (err) {
1180 			atomic_dec(&iucv->msg_sent);
1181 			goto out;
1182 		}
1183 	} else { /* Classic VM IUCV transport */
1184 		skb_queue_tail(&iucv->send_skb_q, skb);
1185 
1186 		if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags) &&
1187 		    skb->len <= 7) {
1188 			err = iucv_send_iprm(iucv->path, &txmsg, skb);
1189 
1190 			/* on success: there is no message_complete callback */
1191 			/* for an IPRMDATA msg; remove skb from send queue   */
1192 			if (err == 0) {
1193 				skb_unlink(skb, &iucv->send_skb_q);
1194 				kfree_skb(skb);
1195 			}
1196 
1197 			/* this error should never happen since the	*/
1198 			/* IUCV_IPRMDATA path flag is set... sever path */
1199 			if (err == 0x15) {
1200 				pr_iucv->path_sever(iucv->path, NULL);
1201 				skb_unlink(skb, &iucv->send_skb_q);
1202 				err = -EPIPE;
1203 				goto fail;
1204 			}
1205 		} else if (skb_is_nonlinear(skb)) {
1206 			struct iucv_array *iba = (struct iucv_array *)skb->head;
1207 			int i;
1208 
1209 			/* skip iucv_array lying in the headroom */
1210 			iba[0].address = (u32)(addr_t)skb->data;
1211 			iba[0].length = (u32)skb_headlen(skb);
1212 			for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1213 				skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1214 
1215 				iba[i + 1].address =
1216 					(u32)(addr_t)skb_frag_address(frag);
1217 				iba[i + 1].length = (u32)skb_frag_size(frag);
1218 			}
1219 			err = pr_iucv->message_send(iucv->path, &txmsg,
1220 						    IUCV_IPBUFLST, 0,
1221 						    (void *)iba, skb->len);
1222 		} else { /* non-IPRM Linear skb */
1223 			err = pr_iucv->message_send(iucv->path, &txmsg,
1224 					0, 0, (void *)skb->data, skb->len);
1225 		}
1226 		if (err) {
1227 			if (err == 3) {
1228 				user_id[8] = 0;
1229 				memcpy(user_id, iucv->dst_user_id, 8);
1230 				appl_id[8] = 0;
1231 				memcpy(appl_id, iucv->dst_name, 8);
1232 				pr_err(
1233 		"Application %s on z/VM guest %s exceeds message limit\n",
1234 					appl_id, user_id);
1235 				err = -EAGAIN;
1236 			} else {
1237 				err = -EPIPE;
1238 			}
1239 			skb_unlink(skb, &iucv->send_skb_q);
1240 			goto fail;
1241 		}
1242 	}
1243 
1244 	release_sock(sk);
1245 	return len;
1246 
1247 fail:
1248 	kfree_skb(skb);
1249 out:
1250 	release_sock(sk);
1251 	return err;
1252 }
1253 
1254 static struct sk_buff *alloc_iucv_recv_skb(unsigned long len)
1255 {
1256 	size_t headroom, linear;
1257 	struct sk_buff *skb;
1258 	int err;
1259 
1260 	if (len < PAGE_SIZE) {
1261 		headroom = 0;
1262 		linear = len;
1263 	} else {
1264 		headroom = sizeof(struct iucv_array) * (MAX_SKB_FRAGS + 1);
1265 		linear = PAGE_SIZE - headroom;
1266 	}
1267 	skb = alloc_skb_with_frags(headroom + linear, len - linear,
1268 				   0, &err, GFP_ATOMIC | GFP_DMA);
1269 	WARN_ONCE(!skb,
1270 		  "alloc of recv iucv skb len=%lu failed with errcode=%d\n",
1271 		  len, err);
1272 	if (skb) {
1273 		if (headroom)
1274 			skb_reserve(skb, headroom);
1275 		skb_put(skb, linear);
1276 		skb->len = len;
1277 		skb->data_len = len - linear;
1278 	}
1279 	return skb;
1280 }
1281 
1282 /* iucv_process_message() - Receive a single outstanding IUCV message
1283  *
1284  * Locking: must be called with message_q.lock held
1285  */
1286 static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
1287 				 struct iucv_path *path,
1288 				 struct iucv_message *msg)
1289 {
1290 	int rc;
1291 	unsigned int len;
1292 
1293 	len = iucv_msg_length(msg);
1294 
1295 	/* store msg target class in the second 4 bytes of skb ctrl buffer */
1296 	/* Note: the first 4 bytes are reserved for msg tag */
1297 	IUCV_SKB_CB(skb)->class = msg->class;
1298 
1299 	/* check for special IPRM messages (e.g. iucv_sock_shutdown) */
1300 	if ((msg->flags & IUCV_IPRMDATA) && len > 7) {
1301 		if (memcmp(msg->rmmsg, iprm_shutdown, 8) == 0) {
1302 			skb->data = NULL;
1303 			skb->len = 0;
1304 		}
1305 	} else {
1306 		if (skb_is_nonlinear(skb)) {
1307 			struct iucv_array *iba = (struct iucv_array *)skb->head;
1308 			int i;
1309 
1310 			iba[0].address = (u32)(addr_t)skb->data;
1311 			iba[0].length = (u32)skb_headlen(skb);
1312 			for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1313 				skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1314 
1315 				iba[i + 1].address =
1316 					(u32)(addr_t)skb_frag_address(frag);
1317 				iba[i + 1].length = (u32)skb_frag_size(frag);
1318 			}
1319 			rc = pr_iucv->message_receive(path, msg,
1320 					      IUCV_IPBUFLST,
1321 					      (void *)iba, len, NULL);
1322 		} else {
1323 			rc = pr_iucv->message_receive(path, msg,
1324 					      msg->flags & IUCV_IPRMDATA,
1325 					      skb->data, len, NULL);
1326 		}
1327 		if (rc) {
1328 			kfree_skb(skb);
1329 			return;
1330 		}
1331 		WARN_ON_ONCE(skb->len != len);
1332 	}
1333 
1334 	IUCV_SKB_CB(skb)->offset = 0;
1335 	if (sk_filter(sk, skb)) {
1336 		atomic_inc(&sk->sk_drops);	/* skb rejected by filter */
1337 		kfree_skb(skb);
1338 		return;
1339 	}
1340 	if (__sock_queue_rcv_skb(sk, skb))	/* handle rcv queue full */
1341 		skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
1342 }
1343 
1344 /* iucv_process_message_q() - Process outstanding IUCV messages
1345  *
1346  * Locking: must be called with message_q.lock held
1347  */
1348 static void iucv_process_message_q(struct sock *sk)
1349 {
1350 	struct iucv_sock *iucv = iucv_sk(sk);
1351 	struct sk_buff *skb;
1352 	struct sock_msg_q *p, *n;
1353 
1354 	list_for_each_entry_safe(p, n, &iucv->message_q.list, list) {
1355 		skb = alloc_iucv_recv_skb(iucv_msg_length(&p->msg));
1356 		if (!skb)
1357 			break;
1358 		iucv_process_message(sk, skb, p->path, &p->msg);
1359 		list_del(&p->list);
1360 		kfree(p);
1361 		if (!skb_queue_empty(&iucv->backlog_skb_q))
1362 			break;
1363 	}
1364 }
1365 
1366 static int iucv_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1367 			     size_t len, int flags)
1368 {
1369 	int noblock = flags & MSG_DONTWAIT;
1370 	struct sock *sk = sock->sk;
1371 	struct iucv_sock *iucv = iucv_sk(sk);
1372 	unsigned int copied, rlen;
1373 	struct sk_buff *skb, *rskb, *cskb;
1374 	int err = 0;
1375 	u32 offset;
1376 
1377 	if ((sk->sk_state == IUCV_DISCONN) &&
1378 	    skb_queue_empty(&iucv->backlog_skb_q) &&
1379 	    skb_queue_empty(&sk->sk_receive_queue) &&
1380 	    list_empty(&iucv->message_q.list))
1381 		return 0;
1382 
1383 	if (flags & (MSG_OOB))
1384 		return -EOPNOTSUPP;
1385 
1386 	/* receive/dequeue next skb:
1387 	 * the function understands MSG_PEEK and, thus, does not dequeue skb */
1388 	skb = skb_recv_datagram(sk, flags, noblock, &err);
1389 	if (!skb) {
1390 		if (sk->sk_shutdown & RCV_SHUTDOWN)
1391 			return 0;
1392 		return err;
1393 	}
1394 
1395 	offset = IUCV_SKB_CB(skb)->offset;
1396 	rlen   = skb->len - offset;		/* real length of skb */
1397 	copied = min_t(unsigned int, rlen, len);
1398 	if (!rlen)
1399 		sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN;
1400 
1401 	cskb = skb;
1402 	if (skb_copy_datagram_msg(cskb, offset, msg, copied)) {
1403 		if (!(flags & MSG_PEEK))
1404 			skb_queue_head(&sk->sk_receive_queue, skb);
1405 		return -EFAULT;
1406 	}
1407 
1408 	/* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */
1409 	if (sk->sk_type == SOCK_SEQPACKET) {
1410 		if (copied < rlen)
1411 			msg->msg_flags |= MSG_TRUNC;
1412 		/* each iucv message contains a complete record */
1413 		msg->msg_flags |= MSG_EOR;
1414 	}
1415 
1416 	/* create control message to store iucv msg target class:
1417 	 * get the trgcls from the control buffer of the skb due to
1418 	 * fragmentation of original iucv message. */
1419 	err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS,
1420 		       sizeof(IUCV_SKB_CB(skb)->class),
1421 		       (void *)&IUCV_SKB_CB(skb)->class);
1422 	if (err) {
1423 		if (!(flags & MSG_PEEK))
1424 			skb_queue_head(&sk->sk_receive_queue, skb);
1425 		return err;
1426 	}
1427 
1428 	/* Mark read part of skb as used */
1429 	if (!(flags & MSG_PEEK)) {
1430 
1431 		/* SOCK_STREAM: re-queue skb if it contains unreceived data */
1432 		if (sk->sk_type == SOCK_STREAM) {
1433 			if (copied < rlen) {
1434 				IUCV_SKB_CB(skb)->offset = offset + copied;
1435 				skb_queue_head(&sk->sk_receive_queue, skb);
1436 				goto done;
1437 			}
1438 		}
1439 
1440 		kfree_skb(skb);
1441 		if (iucv->transport == AF_IUCV_TRANS_HIPER) {
1442 			atomic_inc(&iucv->msg_recv);
1443 			if (atomic_read(&iucv->msg_recv) > iucv->msglimit) {
1444 				WARN_ON(1);
1445 				iucv_sock_close(sk);
1446 				return -EFAULT;
1447 			}
1448 		}
1449 
1450 		/* Queue backlog skbs */
1451 		spin_lock_bh(&iucv->message_q.lock);
1452 		rskb = skb_dequeue(&iucv->backlog_skb_q);
1453 		while (rskb) {
1454 			IUCV_SKB_CB(rskb)->offset = 0;
1455 			if (__sock_queue_rcv_skb(sk, rskb)) {
1456 				/* handle rcv queue full */
1457 				skb_queue_head(&iucv->backlog_skb_q,
1458 						rskb);
1459 				break;
1460 			}
1461 			rskb = skb_dequeue(&iucv->backlog_skb_q);
1462 		}
1463 		if (skb_queue_empty(&iucv->backlog_skb_q)) {
1464 			if (!list_empty(&iucv->message_q.list))
1465 				iucv_process_message_q(sk);
1466 			if (atomic_read(&iucv->msg_recv) >=
1467 							iucv->msglimit / 2) {
1468 				err = iucv_send_ctrl(sk, AF_IUCV_FLAG_WIN);
1469 				if (err) {
1470 					sk->sk_state = IUCV_DISCONN;
1471 					sk->sk_state_change(sk);
1472 				}
1473 			}
1474 		}
1475 		spin_unlock_bh(&iucv->message_q.lock);
1476 	}
1477 
1478 done:
1479 	/* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */
1480 	if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC))
1481 		copied = rlen;
1482 
1483 	return copied;
1484 }
1485 
1486 static inline __poll_t iucv_accept_poll(struct sock *parent)
1487 {
1488 	struct iucv_sock *isk, *n;
1489 	struct sock *sk;
1490 
1491 	list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) {
1492 		sk = (struct sock *) isk;
1493 
1494 		if (sk->sk_state == IUCV_CONNECTED)
1495 			return EPOLLIN | EPOLLRDNORM;
1496 	}
1497 
1498 	return 0;
1499 }
1500 
1501 __poll_t iucv_sock_poll(struct file *file, struct socket *sock,
1502 			    poll_table *wait)
1503 {
1504 	struct sock *sk = sock->sk;
1505 	__poll_t mask = 0;
1506 
1507 	sock_poll_wait(file, sock, wait);
1508 
1509 	if (sk->sk_state == IUCV_LISTEN)
1510 		return iucv_accept_poll(sk);
1511 
1512 	if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
1513 		mask |= EPOLLERR |
1514 			(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
1515 
1516 	if (sk->sk_shutdown & RCV_SHUTDOWN)
1517 		mask |= EPOLLRDHUP;
1518 
1519 	if (sk->sk_shutdown == SHUTDOWN_MASK)
1520 		mask |= EPOLLHUP;
1521 
1522 	if (!skb_queue_empty(&sk->sk_receive_queue) ||
1523 	    (sk->sk_shutdown & RCV_SHUTDOWN))
1524 		mask |= EPOLLIN | EPOLLRDNORM;
1525 
1526 	if (sk->sk_state == IUCV_CLOSED)
1527 		mask |= EPOLLHUP;
1528 
1529 	if (sk->sk_state == IUCV_DISCONN)
1530 		mask |= EPOLLIN;
1531 
1532 	if (sock_writeable(sk) && iucv_below_msglim(sk))
1533 		mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
1534 	else
1535 		sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1536 
1537 	return mask;
1538 }
1539 
1540 static int iucv_sock_shutdown(struct socket *sock, int how)
1541 {
1542 	struct sock *sk = sock->sk;
1543 	struct iucv_sock *iucv = iucv_sk(sk);
1544 	struct iucv_message txmsg;
1545 	int err = 0;
1546 
1547 	how++;
1548 
1549 	if ((how & ~SHUTDOWN_MASK) || !how)
1550 		return -EINVAL;
1551 
1552 	lock_sock(sk);
1553 	switch (sk->sk_state) {
1554 	case IUCV_LISTEN:
1555 	case IUCV_DISCONN:
1556 	case IUCV_CLOSING:
1557 	case IUCV_CLOSED:
1558 		err = -ENOTCONN;
1559 		goto fail;
1560 	default:
1561 		break;
1562 	}
1563 
1564 	if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
1565 		if (iucv->transport == AF_IUCV_TRANS_IUCV) {
1566 			txmsg.class = 0;
1567 			txmsg.tag = 0;
1568 			err = pr_iucv->message_send(iucv->path, &txmsg,
1569 				IUCV_IPRMDATA, 0, (void *) iprm_shutdown, 8);
1570 			if (err) {
1571 				switch (err) {
1572 				case 1:
1573 					err = -ENOTCONN;
1574 					break;
1575 				case 2:
1576 					err = -ECONNRESET;
1577 					break;
1578 				default:
1579 					err = -ENOTCONN;
1580 					break;
1581 				}
1582 			}
1583 		} else
1584 			iucv_send_ctrl(sk, AF_IUCV_FLAG_SHT);
1585 	}
1586 
1587 	sk->sk_shutdown |= how;
1588 	if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
1589 		if ((iucv->transport == AF_IUCV_TRANS_IUCV) &&
1590 		    iucv->path) {
1591 			err = pr_iucv->path_quiesce(iucv->path, NULL);
1592 			if (err)
1593 				err = -ENOTCONN;
1594 /*			skb_queue_purge(&sk->sk_receive_queue); */
1595 		}
1596 		skb_queue_purge(&sk->sk_receive_queue);
1597 	}
1598 
1599 	/* Wake up anyone sleeping in poll */
1600 	sk->sk_state_change(sk);
1601 
1602 fail:
1603 	release_sock(sk);
1604 	return err;
1605 }
1606 
1607 static int iucv_sock_release(struct socket *sock)
1608 {
1609 	struct sock *sk = sock->sk;
1610 	int err = 0;
1611 
1612 	if (!sk)
1613 		return 0;
1614 
1615 	iucv_sock_close(sk);
1616 
1617 	sock_orphan(sk);
1618 	iucv_sock_kill(sk);
1619 	return err;
1620 }
1621 
1622 /* getsockopt and setsockopt */
1623 static int iucv_sock_setsockopt(struct socket *sock, int level, int optname,
1624 				char __user *optval, unsigned int optlen)
1625 {
1626 	struct sock *sk = sock->sk;
1627 	struct iucv_sock *iucv = iucv_sk(sk);
1628 	int val;
1629 	int rc;
1630 
1631 	if (level != SOL_IUCV)
1632 		return -ENOPROTOOPT;
1633 
1634 	if (optlen < sizeof(int))
1635 		return -EINVAL;
1636 
1637 	if (get_user(val, (int __user *) optval))
1638 		return -EFAULT;
1639 
1640 	rc = 0;
1641 
1642 	lock_sock(sk);
1643 	switch (optname) {
1644 	case SO_IPRMDATA_MSG:
1645 		if (val)
1646 			iucv->flags |= IUCV_IPRMDATA;
1647 		else
1648 			iucv->flags &= ~IUCV_IPRMDATA;
1649 		break;
1650 	case SO_MSGLIMIT:
1651 		switch (sk->sk_state) {
1652 		case IUCV_OPEN:
1653 		case IUCV_BOUND:
1654 			if (val < 1 || val > (u16)(~0))
1655 				rc = -EINVAL;
1656 			else
1657 				iucv->msglimit = val;
1658 			break;
1659 		default:
1660 			rc = -EINVAL;
1661 			break;
1662 		}
1663 		break;
1664 	default:
1665 		rc = -ENOPROTOOPT;
1666 		break;
1667 	}
1668 	release_sock(sk);
1669 
1670 	return rc;
1671 }
1672 
1673 static int iucv_sock_getsockopt(struct socket *sock, int level, int optname,
1674 				char __user *optval, int __user *optlen)
1675 {
1676 	struct sock *sk = sock->sk;
1677 	struct iucv_sock *iucv = iucv_sk(sk);
1678 	unsigned int val;
1679 	int len;
1680 
1681 	if (level != SOL_IUCV)
1682 		return -ENOPROTOOPT;
1683 
1684 	if (get_user(len, optlen))
1685 		return -EFAULT;
1686 
1687 	if (len < 0)
1688 		return -EINVAL;
1689 
1690 	len = min_t(unsigned int, len, sizeof(int));
1691 
1692 	switch (optname) {
1693 	case SO_IPRMDATA_MSG:
1694 		val = (iucv->flags & IUCV_IPRMDATA) ? 1 : 0;
1695 		break;
1696 	case SO_MSGLIMIT:
1697 		lock_sock(sk);
1698 		val = (iucv->path != NULL) ? iucv->path->msglim	/* connected */
1699 					   : iucv->msglimit;	/* default */
1700 		release_sock(sk);
1701 		break;
1702 	case SO_MSGSIZE:
1703 		if (sk->sk_state == IUCV_OPEN)
1704 			return -EBADFD;
1705 		val = (iucv->hs_dev) ? iucv->hs_dev->mtu -
1706 				sizeof(struct af_iucv_trans_hdr) - ETH_HLEN :
1707 				0x7fffffff;
1708 		break;
1709 	default:
1710 		return -ENOPROTOOPT;
1711 	}
1712 
1713 	if (put_user(len, optlen))
1714 		return -EFAULT;
1715 	if (copy_to_user(optval, &val, len))
1716 		return -EFAULT;
1717 
1718 	return 0;
1719 }
1720 
1721 
1722 /* Callback wrappers - called from iucv base support */
1723 static int iucv_callback_connreq(struct iucv_path *path,
1724 				 u8 ipvmid[8], u8 ipuser[16])
1725 {
1726 	unsigned char user_data[16];
1727 	unsigned char nuser_data[16];
1728 	unsigned char src_name[8];
1729 	struct sock *sk, *nsk;
1730 	struct iucv_sock *iucv, *niucv;
1731 	int err;
1732 
1733 	memcpy(src_name, ipuser, 8);
1734 	EBCASC(src_name, 8);
1735 	/* Find out if this path belongs to af_iucv. */
1736 	read_lock(&iucv_sk_list.lock);
1737 	iucv = NULL;
1738 	sk = NULL;
1739 	sk_for_each(sk, &iucv_sk_list.head)
1740 		if (sk->sk_state == IUCV_LISTEN &&
1741 		    !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
1742 			/*
1743 			 * Found a listening socket with
1744 			 * src_name == ipuser[0-7].
1745 			 */
1746 			iucv = iucv_sk(sk);
1747 			break;
1748 		}
1749 	read_unlock(&iucv_sk_list.lock);
1750 	if (!iucv)
1751 		/* No socket found, not one of our paths. */
1752 		return -EINVAL;
1753 
1754 	bh_lock_sock(sk);
1755 
1756 	/* Check if parent socket is listening */
1757 	low_nmcpy(user_data, iucv->src_name);
1758 	high_nmcpy(user_data, iucv->dst_name);
1759 	ASCEBC(user_data, sizeof(user_data));
1760 	if (sk->sk_state != IUCV_LISTEN) {
1761 		err = pr_iucv->path_sever(path, user_data);
1762 		iucv_path_free(path);
1763 		goto fail;
1764 	}
1765 
1766 	/* Check for backlog size */
1767 	if (sk_acceptq_is_full(sk)) {
1768 		err = pr_iucv->path_sever(path, user_data);
1769 		iucv_path_free(path);
1770 		goto fail;
1771 	}
1772 
1773 	/* Create the new socket */
1774 	nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC, 0);
1775 	if (!nsk) {
1776 		err = pr_iucv->path_sever(path, user_data);
1777 		iucv_path_free(path);
1778 		goto fail;
1779 	}
1780 
1781 	niucv = iucv_sk(nsk);
1782 	iucv_sock_init(nsk, sk);
1783 
1784 	/* Set the new iucv_sock */
1785 	memcpy(niucv->dst_name, ipuser + 8, 8);
1786 	EBCASC(niucv->dst_name, 8);
1787 	memcpy(niucv->dst_user_id, ipvmid, 8);
1788 	memcpy(niucv->src_name, iucv->src_name, 8);
1789 	memcpy(niucv->src_user_id, iucv->src_user_id, 8);
1790 	niucv->path = path;
1791 
1792 	/* Call iucv_accept */
1793 	high_nmcpy(nuser_data, ipuser + 8);
1794 	memcpy(nuser_data + 8, niucv->src_name, 8);
1795 	ASCEBC(nuser_data + 8, 8);
1796 
1797 	/* set message limit for path based on msglimit of accepting socket */
1798 	niucv->msglimit = iucv->msglimit;
1799 	path->msglim = iucv->msglimit;
1800 	err = pr_iucv->path_accept(path, &af_iucv_handler, nuser_data, nsk);
1801 	if (err) {
1802 		iucv_sever_path(nsk, 1);
1803 		iucv_sock_kill(nsk);
1804 		goto fail;
1805 	}
1806 
1807 	iucv_accept_enqueue(sk, nsk);
1808 
1809 	/* Wake up accept */
1810 	nsk->sk_state = IUCV_CONNECTED;
1811 	sk->sk_data_ready(sk);
1812 	err = 0;
1813 fail:
1814 	bh_unlock_sock(sk);
1815 	return 0;
1816 }
1817 
1818 static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
1819 {
1820 	struct sock *sk = path->private;
1821 
1822 	sk->sk_state = IUCV_CONNECTED;
1823 	sk->sk_state_change(sk);
1824 }
1825 
1826 static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
1827 {
1828 	struct sock *sk = path->private;
1829 	struct iucv_sock *iucv = iucv_sk(sk);
1830 	struct sk_buff *skb;
1831 	struct sock_msg_q *save_msg;
1832 	int len;
1833 
1834 	if (sk->sk_shutdown & RCV_SHUTDOWN) {
1835 		pr_iucv->message_reject(path, msg);
1836 		return;
1837 	}
1838 
1839 	spin_lock(&iucv->message_q.lock);
1840 
1841 	if (!list_empty(&iucv->message_q.list) ||
1842 	    !skb_queue_empty(&iucv->backlog_skb_q))
1843 		goto save_message;
1844 
1845 	len = atomic_read(&sk->sk_rmem_alloc);
1846 	len += SKB_TRUESIZE(iucv_msg_length(msg));
1847 	if (len > sk->sk_rcvbuf)
1848 		goto save_message;
1849 
1850 	skb = alloc_iucv_recv_skb(iucv_msg_length(msg));
1851 	if (!skb)
1852 		goto save_message;
1853 
1854 	iucv_process_message(sk, skb, path, msg);
1855 	goto out_unlock;
1856 
1857 save_message:
1858 	save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA);
1859 	if (!save_msg)
1860 		goto out_unlock;
1861 	save_msg->path = path;
1862 	save_msg->msg = *msg;
1863 
1864 	list_add_tail(&save_msg->list, &iucv->message_q.list);
1865 
1866 out_unlock:
1867 	spin_unlock(&iucv->message_q.lock);
1868 }
1869 
1870 static void iucv_callback_txdone(struct iucv_path *path,
1871 				 struct iucv_message *msg)
1872 {
1873 	struct sock *sk = path->private;
1874 	struct sk_buff *this = NULL;
1875 	struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q;
1876 	struct sk_buff *list_skb = list->next;
1877 	unsigned long flags;
1878 
1879 	bh_lock_sock(sk);
1880 	if (!skb_queue_empty(list)) {
1881 		spin_lock_irqsave(&list->lock, flags);
1882 
1883 		while (list_skb != (struct sk_buff *)list) {
1884 			if (msg->tag == IUCV_SKB_CB(list_skb)->tag) {
1885 				this = list_skb;
1886 				break;
1887 			}
1888 			list_skb = list_skb->next;
1889 		}
1890 		if (this)
1891 			__skb_unlink(this, list);
1892 
1893 		spin_unlock_irqrestore(&list->lock, flags);
1894 
1895 		if (this) {
1896 			kfree_skb(this);
1897 			/* wake up any process waiting for sending */
1898 			iucv_sock_wake_msglim(sk);
1899 		}
1900 	}
1901 
1902 	if (sk->sk_state == IUCV_CLOSING) {
1903 		if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
1904 			sk->sk_state = IUCV_CLOSED;
1905 			sk->sk_state_change(sk);
1906 		}
1907 	}
1908 	bh_unlock_sock(sk);
1909 
1910 }
1911 
1912 static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
1913 {
1914 	struct sock *sk = path->private;
1915 
1916 	if (sk->sk_state == IUCV_CLOSED)
1917 		return;
1918 
1919 	bh_lock_sock(sk);
1920 	iucv_sever_path(sk, 1);
1921 	sk->sk_state = IUCV_DISCONN;
1922 
1923 	sk->sk_state_change(sk);
1924 	bh_unlock_sock(sk);
1925 }
1926 
1927 /* called if the other communication side shuts down its RECV direction;
1928  * in turn, the callback sets SEND_SHUTDOWN to disable sending of data.
1929  */
1930 static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16])
1931 {
1932 	struct sock *sk = path->private;
1933 
1934 	bh_lock_sock(sk);
1935 	if (sk->sk_state != IUCV_CLOSED) {
1936 		sk->sk_shutdown |= SEND_SHUTDOWN;
1937 		sk->sk_state_change(sk);
1938 	}
1939 	bh_unlock_sock(sk);
1940 }
1941 
1942 /***************** HiperSockets transport callbacks ********************/
1943 static void afiucv_swap_src_dest(struct sk_buff *skb)
1944 {
1945 	struct af_iucv_trans_hdr *trans_hdr = iucv_trans_hdr(skb);
1946 	char tmpID[8];
1947 	char tmpName[8];
1948 
1949 	ASCEBC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
1950 	ASCEBC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
1951 	ASCEBC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID));
1952 	ASCEBC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
1953 	memcpy(tmpID, trans_hdr->srcUserID, 8);
1954 	memcpy(tmpName, trans_hdr->srcAppName, 8);
1955 	memcpy(trans_hdr->srcUserID, trans_hdr->destUserID, 8);
1956 	memcpy(trans_hdr->srcAppName, trans_hdr->destAppName, 8);
1957 	memcpy(trans_hdr->destUserID, tmpID, 8);
1958 	memcpy(trans_hdr->destAppName, tmpName, 8);
1959 	skb_push(skb, ETH_HLEN);
1960 	memset(skb->data, 0, ETH_HLEN);
1961 }
1962 
1963 /**
1964  * afiucv_hs_callback_syn - react on received SYN
1965  **/
1966 static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
1967 {
1968 	struct af_iucv_trans_hdr *trans_hdr = iucv_trans_hdr(skb);
1969 	struct sock *nsk;
1970 	struct iucv_sock *iucv, *niucv;
1971 	int err;
1972 
1973 	iucv = iucv_sk(sk);
1974 	if (!iucv) {
1975 		/* no sock - connection refused */
1976 		afiucv_swap_src_dest(skb);
1977 		trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
1978 		err = dev_queue_xmit(skb);
1979 		goto out;
1980 	}
1981 
1982 	nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC, 0);
1983 	bh_lock_sock(sk);
1984 	if ((sk->sk_state != IUCV_LISTEN) ||
1985 	    sk_acceptq_is_full(sk) ||
1986 	    !nsk) {
1987 		/* error on server socket - connection refused */
1988 		afiucv_swap_src_dest(skb);
1989 		trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN;
1990 		err = dev_queue_xmit(skb);
1991 		iucv_sock_kill(nsk);
1992 		bh_unlock_sock(sk);
1993 		goto out;
1994 	}
1995 
1996 	niucv = iucv_sk(nsk);
1997 	iucv_sock_init(nsk, sk);
1998 	niucv->transport = AF_IUCV_TRANS_HIPER;
1999 	niucv->msglimit = iucv->msglimit;
2000 	if (!trans_hdr->window)
2001 		niucv->msglimit_peer = IUCV_HIPER_MSGLIM_DEFAULT;
2002 	else
2003 		niucv->msglimit_peer = trans_hdr->window;
2004 	memcpy(niucv->dst_name, trans_hdr->srcAppName, 8);
2005 	memcpy(niucv->dst_user_id, trans_hdr->srcUserID, 8);
2006 	memcpy(niucv->src_name, iucv->src_name, 8);
2007 	memcpy(niucv->src_user_id, iucv->src_user_id, 8);
2008 	nsk->sk_bound_dev_if = sk->sk_bound_dev_if;
2009 	niucv->hs_dev = iucv->hs_dev;
2010 	dev_hold(niucv->hs_dev);
2011 	afiucv_swap_src_dest(skb);
2012 	trans_hdr->flags = AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK;
2013 	trans_hdr->window = niucv->msglimit;
2014 	/* if receiver acks the xmit connection is established */
2015 	err = dev_queue_xmit(skb);
2016 	if (!err) {
2017 		iucv_accept_enqueue(sk, nsk);
2018 		nsk->sk_state = IUCV_CONNECTED;
2019 		sk->sk_data_ready(sk);
2020 	} else
2021 		iucv_sock_kill(nsk);
2022 	bh_unlock_sock(sk);
2023 
2024 out:
2025 	return NET_RX_SUCCESS;
2026 }
2027 
2028 /**
2029  * afiucv_hs_callback_synack() - react on received SYN-ACK
2030  **/
2031 static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb)
2032 {
2033 	struct iucv_sock *iucv = iucv_sk(sk);
2034 
2035 	if (!iucv)
2036 		goto out;
2037 	if (sk->sk_state != IUCV_BOUND)
2038 		goto out;
2039 	bh_lock_sock(sk);
2040 	iucv->msglimit_peer = iucv_trans_hdr(skb)->window;
2041 	sk->sk_state = IUCV_CONNECTED;
2042 	sk->sk_state_change(sk);
2043 	bh_unlock_sock(sk);
2044 out:
2045 	kfree_skb(skb);
2046 	return NET_RX_SUCCESS;
2047 }
2048 
2049 /**
2050  * afiucv_hs_callback_synfin() - react on received SYN_FIN
2051  **/
2052 static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb)
2053 {
2054 	struct iucv_sock *iucv = iucv_sk(sk);
2055 
2056 	if (!iucv)
2057 		goto out;
2058 	if (sk->sk_state != IUCV_BOUND)
2059 		goto out;
2060 	bh_lock_sock(sk);
2061 	sk->sk_state = IUCV_DISCONN;
2062 	sk->sk_state_change(sk);
2063 	bh_unlock_sock(sk);
2064 out:
2065 	kfree_skb(skb);
2066 	return NET_RX_SUCCESS;
2067 }
2068 
2069 /**
2070  * afiucv_hs_callback_fin() - react on received FIN
2071  **/
2072 static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb)
2073 {
2074 	struct iucv_sock *iucv = iucv_sk(sk);
2075 
2076 	/* other end of connection closed */
2077 	if (!iucv)
2078 		goto out;
2079 	bh_lock_sock(sk);
2080 	if (sk->sk_state == IUCV_CONNECTED) {
2081 		sk->sk_state = IUCV_DISCONN;
2082 		sk->sk_state_change(sk);
2083 	}
2084 	bh_unlock_sock(sk);
2085 out:
2086 	kfree_skb(skb);
2087 	return NET_RX_SUCCESS;
2088 }
2089 
2090 /**
2091  * afiucv_hs_callback_win() - react on received WIN
2092  **/
2093 static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb)
2094 {
2095 	struct iucv_sock *iucv = iucv_sk(sk);
2096 
2097 	if (!iucv)
2098 		return NET_RX_SUCCESS;
2099 
2100 	if (sk->sk_state != IUCV_CONNECTED)
2101 		return NET_RX_SUCCESS;
2102 
2103 	atomic_sub(iucv_trans_hdr(skb)->window, &iucv->msg_sent);
2104 	iucv_sock_wake_msglim(sk);
2105 	return NET_RX_SUCCESS;
2106 }
2107 
2108 /**
2109  * afiucv_hs_callback_rx() - react on received data
2110  **/
2111 static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
2112 {
2113 	struct iucv_sock *iucv = iucv_sk(sk);
2114 
2115 	if (!iucv) {
2116 		kfree_skb(skb);
2117 		return NET_RX_SUCCESS;
2118 	}
2119 
2120 	if (sk->sk_state != IUCV_CONNECTED) {
2121 		kfree_skb(skb);
2122 		return NET_RX_SUCCESS;
2123 	}
2124 
2125 	if (sk->sk_shutdown & RCV_SHUTDOWN) {
2126 		kfree_skb(skb);
2127 		return NET_RX_SUCCESS;
2128 	}
2129 
2130 	/* write stuff from iucv_msg to skb cb */
2131 	skb_pull(skb, sizeof(struct af_iucv_trans_hdr));
2132 	skb_reset_transport_header(skb);
2133 	skb_reset_network_header(skb);
2134 	IUCV_SKB_CB(skb)->offset = 0;
2135 	if (sk_filter(sk, skb)) {
2136 		atomic_inc(&sk->sk_drops);	/* skb rejected by filter */
2137 		kfree_skb(skb);
2138 		return NET_RX_SUCCESS;
2139 	}
2140 
2141 	spin_lock(&iucv->message_q.lock);
2142 	if (skb_queue_empty(&iucv->backlog_skb_q)) {
2143 		if (__sock_queue_rcv_skb(sk, skb))
2144 			/* handle rcv queue full */
2145 			skb_queue_tail(&iucv->backlog_skb_q, skb);
2146 	} else
2147 		skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
2148 	spin_unlock(&iucv->message_q.lock);
2149 	return NET_RX_SUCCESS;
2150 }
2151 
2152 /**
2153  * afiucv_hs_rcv() - base function for arriving data through HiperSockets
2154  *                   transport
2155  *                   called from netif RX softirq
2156  **/
2157 static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
2158 	struct packet_type *pt, struct net_device *orig_dev)
2159 {
2160 	struct sock *sk;
2161 	struct iucv_sock *iucv;
2162 	struct af_iucv_trans_hdr *trans_hdr;
2163 	int err = NET_RX_SUCCESS;
2164 	char nullstring[8];
2165 
2166 	if (!pskb_may_pull(skb, sizeof(*trans_hdr))) {
2167 		WARN_ONCE(1, "AF_IUCV failed to receive skb, len=%u", skb->len);
2168 		kfree_skb(skb);
2169 		return NET_RX_SUCCESS;
2170 	}
2171 
2172 	trans_hdr = iucv_trans_hdr(skb);
2173 	EBCASC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName));
2174 	EBCASC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID));
2175 	EBCASC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName));
2176 	EBCASC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID));
2177 	memset(nullstring, 0, sizeof(nullstring));
2178 	iucv = NULL;
2179 	sk = NULL;
2180 	read_lock(&iucv_sk_list.lock);
2181 	sk_for_each(sk, &iucv_sk_list.head) {
2182 		if (trans_hdr->flags == AF_IUCV_FLAG_SYN) {
2183 			if ((!memcmp(&iucv_sk(sk)->src_name,
2184 				     trans_hdr->destAppName, 8)) &&
2185 			    (!memcmp(&iucv_sk(sk)->src_user_id,
2186 				     trans_hdr->destUserID, 8)) &&
2187 			    (!memcmp(&iucv_sk(sk)->dst_name, nullstring, 8)) &&
2188 			    (!memcmp(&iucv_sk(sk)->dst_user_id,
2189 				     nullstring, 8))) {
2190 				iucv = iucv_sk(sk);
2191 				break;
2192 			}
2193 		} else {
2194 			if ((!memcmp(&iucv_sk(sk)->src_name,
2195 				     trans_hdr->destAppName, 8)) &&
2196 			    (!memcmp(&iucv_sk(sk)->src_user_id,
2197 				     trans_hdr->destUserID, 8)) &&
2198 			    (!memcmp(&iucv_sk(sk)->dst_name,
2199 				     trans_hdr->srcAppName, 8)) &&
2200 			    (!memcmp(&iucv_sk(sk)->dst_user_id,
2201 				     trans_hdr->srcUserID, 8))) {
2202 				iucv = iucv_sk(sk);
2203 				break;
2204 			}
2205 		}
2206 	}
2207 	read_unlock(&iucv_sk_list.lock);
2208 	if (!iucv)
2209 		sk = NULL;
2210 
2211 	/* no sock
2212 	how should we send with no sock
2213 	1) send without sock no send rc checking?
2214 	2) introduce default sock to handle this cases
2215 
2216 	 SYN -> send SYN|ACK in good case, send SYN|FIN in bad case
2217 	 data -> send FIN
2218 	 SYN|ACK, SYN|FIN, FIN -> no action? */
2219 
2220 	switch (trans_hdr->flags) {
2221 	case AF_IUCV_FLAG_SYN:
2222 		/* connect request */
2223 		err = afiucv_hs_callback_syn(sk, skb);
2224 		break;
2225 	case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_ACK):
2226 		/* connect request confirmed */
2227 		err = afiucv_hs_callback_synack(sk, skb);
2228 		break;
2229 	case (AF_IUCV_FLAG_SYN | AF_IUCV_FLAG_FIN):
2230 		/* connect request refused */
2231 		err = afiucv_hs_callback_synfin(sk, skb);
2232 		break;
2233 	case (AF_IUCV_FLAG_FIN):
2234 		/* close request */
2235 		err = afiucv_hs_callback_fin(sk, skb);
2236 		break;
2237 	case (AF_IUCV_FLAG_WIN):
2238 		err = afiucv_hs_callback_win(sk, skb);
2239 		if (skb->len == sizeof(struct af_iucv_trans_hdr)) {
2240 			kfree_skb(skb);
2241 			break;
2242 		}
2243 		/* fall through and receive non-zero length data */
2244 	case (AF_IUCV_FLAG_SHT):
2245 		/* shutdown request */
2246 		/* fall through and receive zero length data */
2247 	case 0:
2248 		/* plain data frame */
2249 		IUCV_SKB_CB(skb)->class = trans_hdr->iucv_hdr.class;
2250 		err = afiucv_hs_callback_rx(sk, skb);
2251 		break;
2252 	default:
2253 		kfree_skb(skb);
2254 	}
2255 
2256 	return err;
2257 }
2258 
2259 /**
2260  * afiucv_hs_callback_txnotify() - handle send notifcations from HiperSockets
2261  *                                 transport
2262  **/
2263 static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
2264 					enum iucv_tx_notify n)
2265 {
2266 	struct sock *isk = skb->sk;
2267 	struct sock *sk = NULL;
2268 	struct iucv_sock *iucv = NULL;
2269 	struct sk_buff_head *list;
2270 	struct sk_buff *list_skb;
2271 	struct sk_buff *nskb;
2272 	unsigned long flags;
2273 
2274 	read_lock_irqsave(&iucv_sk_list.lock, flags);
2275 	sk_for_each(sk, &iucv_sk_list.head)
2276 		if (sk == isk) {
2277 			iucv = iucv_sk(sk);
2278 			break;
2279 		}
2280 	read_unlock_irqrestore(&iucv_sk_list.lock, flags);
2281 
2282 	if (!iucv || sock_flag(sk, SOCK_ZAPPED))
2283 		return;
2284 
2285 	list = &iucv->send_skb_q;
2286 	spin_lock_irqsave(&list->lock, flags);
2287 	if (skb_queue_empty(list))
2288 		goto out_unlock;
2289 	list_skb = list->next;
2290 	nskb = list_skb->next;
2291 	while (list_skb != (struct sk_buff *)list) {
2292 		if (skb_shinfo(list_skb) == skb_shinfo(skb)) {
2293 			switch (n) {
2294 			case TX_NOTIFY_OK:
2295 				__skb_unlink(list_skb, list);
2296 				kfree_skb(list_skb);
2297 				iucv_sock_wake_msglim(sk);
2298 				break;
2299 			case TX_NOTIFY_PENDING:
2300 				atomic_inc(&iucv->pendings);
2301 				break;
2302 			case TX_NOTIFY_DELAYED_OK:
2303 				__skb_unlink(list_skb, list);
2304 				atomic_dec(&iucv->pendings);
2305 				if (atomic_read(&iucv->pendings) <= 0)
2306 					iucv_sock_wake_msglim(sk);
2307 				kfree_skb(list_skb);
2308 				break;
2309 			case TX_NOTIFY_UNREACHABLE:
2310 			case TX_NOTIFY_DELAYED_UNREACHABLE:
2311 			case TX_NOTIFY_TPQFULL: /* not yet used */
2312 			case TX_NOTIFY_GENERALERROR:
2313 			case TX_NOTIFY_DELAYED_GENERALERROR:
2314 				__skb_unlink(list_skb, list);
2315 				kfree_skb(list_skb);
2316 				if (sk->sk_state == IUCV_CONNECTED) {
2317 					sk->sk_state = IUCV_DISCONN;
2318 					sk->sk_state_change(sk);
2319 				}
2320 				break;
2321 			}
2322 			break;
2323 		}
2324 		list_skb = nskb;
2325 		nskb = nskb->next;
2326 	}
2327 out_unlock:
2328 	spin_unlock_irqrestore(&list->lock, flags);
2329 
2330 	if (sk->sk_state == IUCV_CLOSING) {
2331 		if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
2332 			sk->sk_state = IUCV_CLOSED;
2333 			sk->sk_state_change(sk);
2334 		}
2335 	}
2336 
2337 }
2338 
2339 /*
2340  * afiucv_netdev_event: handle netdev notifier chain events
2341  */
2342 static int afiucv_netdev_event(struct notifier_block *this,
2343 			       unsigned long event, void *ptr)
2344 {
2345 	struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
2346 	struct sock *sk;
2347 	struct iucv_sock *iucv;
2348 
2349 	switch (event) {
2350 	case NETDEV_REBOOT:
2351 	case NETDEV_GOING_DOWN:
2352 		sk_for_each(sk, &iucv_sk_list.head) {
2353 			iucv = iucv_sk(sk);
2354 			if ((iucv->hs_dev == event_dev) &&
2355 			    (sk->sk_state == IUCV_CONNECTED)) {
2356 				if (event == NETDEV_GOING_DOWN)
2357 					iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN);
2358 				sk->sk_state = IUCV_DISCONN;
2359 				sk->sk_state_change(sk);
2360 			}
2361 		}
2362 		break;
2363 	case NETDEV_DOWN:
2364 	case NETDEV_UNREGISTER:
2365 	default:
2366 		break;
2367 	}
2368 	return NOTIFY_DONE;
2369 }
2370 
2371 static struct notifier_block afiucv_netdev_notifier = {
2372 	.notifier_call = afiucv_netdev_event,
2373 };
2374 
2375 static const struct proto_ops iucv_sock_ops = {
2376 	.family		= PF_IUCV,
2377 	.owner		= THIS_MODULE,
2378 	.release	= iucv_sock_release,
2379 	.bind		= iucv_sock_bind,
2380 	.connect	= iucv_sock_connect,
2381 	.listen		= iucv_sock_listen,
2382 	.accept		= iucv_sock_accept,
2383 	.getname	= iucv_sock_getname,
2384 	.sendmsg	= iucv_sock_sendmsg,
2385 	.recvmsg	= iucv_sock_recvmsg,
2386 	.poll		= iucv_sock_poll,
2387 	.ioctl		= sock_no_ioctl,
2388 	.mmap		= sock_no_mmap,
2389 	.socketpair	= sock_no_socketpair,
2390 	.shutdown	= iucv_sock_shutdown,
2391 	.setsockopt	= iucv_sock_setsockopt,
2392 	.getsockopt	= iucv_sock_getsockopt,
2393 };
2394 
2395 static const struct net_proto_family iucv_sock_family_ops = {
2396 	.family	= AF_IUCV,
2397 	.owner	= THIS_MODULE,
2398 	.create	= iucv_sock_create,
2399 };
2400 
2401 static struct packet_type iucv_packet_type = {
2402 	.type = cpu_to_be16(ETH_P_AF_IUCV),
2403 	.func = afiucv_hs_rcv,
2404 };
2405 
2406 static int afiucv_iucv_init(void)
2407 {
2408 	int err;
2409 
2410 	err = pr_iucv->iucv_register(&af_iucv_handler, 0);
2411 	if (err)
2412 		goto out;
2413 	/* establish dummy device */
2414 	af_iucv_driver.bus = pr_iucv->bus;
2415 	err = driver_register(&af_iucv_driver);
2416 	if (err)
2417 		goto out_iucv;
2418 	af_iucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
2419 	if (!af_iucv_dev) {
2420 		err = -ENOMEM;
2421 		goto out_driver;
2422 	}
2423 	dev_set_name(af_iucv_dev, "af_iucv");
2424 	af_iucv_dev->bus = pr_iucv->bus;
2425 	af_iucv_dev->parent = pr_iucv->root;
2426 	af_iucv_dev->release = (void (*)(struct device *))kfree;
2427 	af_iucv_dev->driver = &af_iucv_driver;
2428 	err = device_register(af_iucv_dev);
2429 	if (err)
2430 		goto out_iucv_dev;
2431 	return 0;
2432 
2433 out_iucv_dev:
2434 	put_device(af_iucv_dev);
2435 out_driver:
2436 	driver_unregister(&af_iucv_driver);
2437 out_iucv:
2438 	pr_iucv->iucv_unregister(&af_iucv_handler, 0);
2439 out:
2440 	return err;
2441 }
2442 
2443 static int __init afiucv_init(void)
2444 {
2445 	int err;
2446 
2447 	if (MACHINE_IS_VM) {
2448 		cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
2449 		if (unlikely(err)) {
2450 			WARN_ON(err);
2451 			err = -EPROTONOSUPPORT;
2452 			goto out;
2453 		}
2454 
2455 		pr_iucv = try_then_request_module(symbol_get(iucv_if), "iucv");
2456 		if (!pr_iucv) {
2457 			printk(KERN_WARNING "iucv_if lookup failed\n");
2458 			memset(&iucv_userid, 0, sizeof(iucv_userid));
2459 		}
2460 	} else {
2461 		memset(&iucv_userid, 0, sizeof(iucv_userid));
2462 		pr_iucv = NULL;
2463 	}
2464 
2465 	err = proto_register(&iucv_proto, 0);
2466 	if (err)
2467 		goto out;
2468 	err = sock_register(&iucv_sock_family_ops);
2469 	if (err)
2470 		goto out_proto;
2471 
2472 	if (pr_iucv) {
2473 		err = afiucv_iucv_init();
2474 		if (err)
2475 			goto out_sock;
2476 	} else
2477 		register_netdevice_notifier(&afiucv_netdev_notifier);
2478 	dev_add_pack(&iucv_packet_type);
2479 	return 0;
2480 
2481 out_sock:
2482 	sock_unregister(PF_IUCV);
2483 out_proto:
2484 	proto_unregister(&iucv_proto);
2485 out:
2486 	if (pr_iucv)
2487 		symbol_put(iucv_if);
2488 	return err;
2489 }
2490 
2491 static void __exit afiucv_exit(void)
2492 {
2493 	if (pr_iucv) {
2494 		device_unregister(af_iucv_dev);
2495 		driver_unregister(&af_iucv_driver);
2496 		pr_iucv->iucv_unregister(&af_iucv_handler, 0);
2497 		symbol_put(iucv_if);
2498 	} else
2499 		unregister_netdevice_notifier(&afiucv_netdev_notifier);
2500 	dev_remove_pack(&iucv_packet_type);
2501 	sock_unregister(PF_IUCV);
2502 	proto_unregister(&iucv_proto);
2503 }
2504 
2505 module_init(afiucv_init);
2506 module_exit(afiucv_exit);
2507 
2508 MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>");
2509 MODULE_DESCRIPTION("IUCV Sockets ver " VERSION);
2510 MODULE_VERSION(VERSION);
2511 MODULE_LICENSE("GPL");
2512 MODULE_ALIAS_NETPROTO(PF_IUCV);
2513