xref: /openbmc/linux/net/phonet/pep.c (revision 9d56dd3b083a3bec56e9da35ce07baca81030b03)
1 /*
2  * File: pep.c
3  *
4  * Phonet pipe protocol end point socket
5  *
6  * Copyright (C) 2008 Nokia Corporation.
7  *
8  * Author: RĂ©mi Denis-Courmont <remi.denis-courmont@nokia.com>
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public License
12  * version 2 as published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful, but
15  * WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
22  * 02110-1301 USA
23  */
24 
25 #include <linux/kernel.h>
26 #include <linux/socket.h>
27 #include <net/sock.h>
28 #include <net/tcp_states.h>
29 #include <asm/ioctls.h>
30 
31 #include <linux/phonet.h>
32 #include <net/phonet/phonet.h>
33 #include <net/phonet/pep.h>
34 #include <net/phonet/gprs.h>
35 
36 /* sk_state values:
37  * TCP_CLOSE		sock not in use yet
38  * TCP_CLOSE_WAIT	disconnected pipe
39  * TCP_LISTEN		listening pipe endpoint
40  * TCP_SYN_RECV		connected pipe in disabled state
41  * TCP_ESTABLISHED	connected pipe in enabled state
42  *
43  * pep_sock locking:
44  *  - sk_state, ackq, hlist: sock lock needed
45  *  - listener: read only
46  *  - pipe_handle: read only
47  */
48 
49 #define CREDITS_MAX	10
50 #define CREDITS_THR	7
51 
52 static const struct sockaddr_pn pipe_srv = {
53 	.spn_family = AF_PHONET,
54 	.spn_resource = 0xD9, /* pipe service */
55 };
56 
57 #define pep_sb_size(s) (((s) + 5) & ~3) /* 2-bytes head, 32-bits aligned */
58 
59 /* Get the next TLV sub-block. */
60 static unsigned char *pep_get_sb(struct sk_buff *skb, u8 *ptype, u8 *plen,
61 					void *buf)
62 {
63 	void *data = NULL;
64 	struct {
65 		u8 sb_type;
66 		u8 sb_len;
67 	} *ph, h;
68 	int buflen = *plen;
69 
70 	ph = skb_header_pointer(skb, 0, 2, &h);
71 	if (ph == NULL || ph->sb_len < 2 || !pskb_may_pull(skb, ph->sb_len))
72 		return NULL;
73 	ph->sb_len -= 2;
74 	*ptype = ph->sb_type;
75 	*plen = ph->sb_len;
76 
77 	if (buflen > ph->sb_len)
78 		buflen = ph->sb_len;
79 	data = skb_header_pointer(skb, 2, buflen, buf);
80 	__skb_pull(skb, 2 + ph->sb_len);
81 	return data;
82 }
83 
84 static int pep_reply(struct sock *sk, struct sk_buff *oskb,
85 			u8 code, const void *data, int len, gfp_t priority)
86 {
87 	const struct pnpipehdr *oph = pnp_hdr(oskb);
88 	struct pnpipehdr *ph;
89 	struct sk_buff *skb;
90 
91 	skb = alloc_skb(MAX_PNPIPE_HEADER + len, priority);
92 	if (!skb)
93 		return -ENOMEM;
94 	skb_set_owner_w(skb, sk);
95 
96 	skb_reserve(skb, MAX_PNPIPE_HEADER);
97 	__skb_put(skb, len);
98 	skb_copy_to_linear_data(skb, data, len);
99 	__skb_push(skb, sizeof(*ph));
100 	skb_reset_transport_header(skb);
101 	ph = pnp_hdr(skb);
102 	ph->utid = oph->utid;
103 	ph->message_id = oph->message_id + 1; /* REQ -> RESP */
104 	ph->pipe_handle = oph->pipe_handle;
105 	ph->error_code = code;
106 
107 	return pn_skb_send(sk, skb, &pipe_srv);
108 }
109 
110 #define PAD 0x00
111 static int pep_accept_conn(struct sock *sk, struct sk_buff *skb)
112 {
113 	static const u8 data[20] = {
114 		PAD, PAD, PAD, 2 /* sub-blocks */,
115 		PN_PIPE_SB_REQUIRED_FC_TX, pep_sb_size(5), 3, PAD,
116 			PN_MULTI_CREDIT_FLOW_CONTROL,
117 			PN_ONE_CREDIT_FLOW_CONTROL,
118 			PN_LEGACY_FLOW_CONTROL,
119 			PAD,
120 		PN_PIPE_SB_PREFERRED_FC_RX, pep_sb_size(5), 3, PAD,
121 			PN_MULTI_CREDIT_FLOW_CONTROL,
122 			PN_ONE_CREDIT_FLOW_CONTROL,
123 			PN_LEGACY_FLOW_CONTROL,
124 			PAD,
125 	};
126 
127 	might_sleep();
128 	return pep_reply(sk, skb, PN_PIPE_NO_ERROR, data, sizeof(data),
129 				GFP_KERNEL);
130 }
131 
132 static int pep_reject_conn(struct sock *sk, struct sk_buff *skb, u8 code)
133 {
134 	static const u8 data[4] = { PAD, PAD, PAD, 0 /* sub-blocks */ };
135 	WARN_ON(code == PN_PIPE_NO_ERROR);
136 	return pep_reply(sk, skb, code, data, sizeof(data), GFP_ATOMIC);
137 }
138 
139 /* Control requests are not sent by the pipe service and have a specific
140  * message format. */
141 static int pep_ctrlreq_error(struct sock *sk, struct sk_buff *oskb, u8 code,
142 				gfp_t priority)
143 {
144 	const struct pnpipehdr *oph = pnp_hdr(oskb);
145 	struct sk_buff *skb;
146 	struct pnpipehdr *ph;
147 	struct sockaddr_pn dst;
148 
149 	skb = alloc_skb(MAX_PNPIPE_HEADER + 4, priority);
150 	if (!skb)
151 		return -ENOMEM;
152 	skb_set_owner_w(skb, sk);
153 
154 	skb_reserve(skb, MAX_PHONET_HEADER);
155 	ph = (struct pnpipehdr *)skb_put(skb, sizeof(*ph) + 4);
156 
157 	ph->utid = oph->utid;
158 	ph->message_id = PNS_PEP_CTRL_RESP;
159 	ph->pipe_handle = oph->pipe_handle;
160 	ph->data[0] = oph->data[1]; /* CTRL id */
161 	ph->data[1] = oph->data[0]; /* PEP type */
162 	ph->data[2] = code; /* error code, at an usual offset */
163 	ph->data[3] = PAD;
164 	ph->data[4] = PAD;
165 
166 	pn_skb_get_src_sockaddr(oskb, &dst);
167 	return pn_skb_send(sk, skb, &dst);
168 }
169 
170 static int pipe_snd_status(struct sock *sk, u8 type, u8 status, gfp_t priority)
171 {
172 	struct pep_sock *pn = pep_sk(sk);
173 	struct pnpipehdr *ph;
174 	struct sk_buff *skb;
175 
176 	skb = alloc_skb(MAX_PNPIPE_HEADER + 4, priority);
177 	if (!skb)
178 		return -ENOMEM;
179 	skb_set_owner_w(skb, sk);
180 
181 	skb_reserve(skb, MAX_PNPIPE_HEADER + 4);
182 	__skb_push(skb, sizeof(*ph) + 4);
183 	skb_reset_transport_header(skb);
184 	ph = pnp_hdr(skb);
185 	ph->utid = 0;
186 	ph->message_id = PNS_PEP_STATUS_IND;
187 	ph->pipe_handle = pn->pipe_handle;
188 	ph->pep_type = PN_PEP_TYPE_COMMON;
189 	ph->data[1] = type;
190 	ph->data[2] = PAD;
191 	ph->data[3] = PAD;
192 	ph->data[4] = status;
193 
194 	return pn_skb_send(sk, skb, &pipe_srv);
195 }
196 
197 /* Send our RX flow control information to the sender.
198  * Socket must be locked. */
199 static void pipe_grant_credits(struct sock *sk)
200 {
201 	struct pep_sock *pn = pep_sk(sk);
202 
203 	BUG_ON(sk->sk_state != TCP_ESTABLISHED);
204 
205 	switch (pn->rx_fc) {
206 	case PN_LEGACY_FLOW_CONTROL: /* TODO */
207 		break;
208 	case PN_ONE_CREDIT_FLOW_CONTROL:
209 		pipe_snd_status(sk, PN_PEP_IND_FLOW_CONTROL,
210 				PEP_IND_READY, GFP_ATOMIC);
211 		pn->rx_credits = 1;
212 		break;
213 	case PN_MULTI_CREDIT_FLOW_CONTROL:
214 		if ((pn->rx_credits + CREDITS_THR) > CREDITS_MAX)
215 			break;
216 		if (pipe_snd_status(sk, PN_PEP_IND_ID_MCFC_GRANT_CREDITS,
217 					CREDITS_MAX - pn->rx_credits,
218 					GFP_ATOMIC) == 0)
219 			pn->rx_credits = CREDITS_MAX;
220 		break;
221 	}
222 }
223 
224 static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
225 {
226 	struct pep_sock *pn = pep_sk(sk);
227 	struct pnpipehdr *hdr = pnp_hdr(skb);
228 	int wake = 0;
229 
230 	if (!pskb_may_pull(skb, sizeof(*hdr) + 4))
231 		return -EINVAL;
232 
233 	if (hdr->data[0] != PN_PEP_TYPE_COMMON) {
234 		LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP type: %u\n",
235 				(unsigned)hdr->data[0]);
236 		return -EOPNOTSUPP;
237 	}
238 
239 	switch (hdr->data[1]) {
240 	case PN_PEP_IND_FLOW_CONTROL:
241 		switch (pn->tx_fc) {
242 		case PN_LEGACY_FLOW_CONTROL:
243 			switch (hdr->data[4]) {
244 			case PEP_IND_BUSY:
245 				atomic_set(&pn->tx_credits, 0);
246 				break;
247 			case PEP_IND_READY:
248 				atomic_set(&pn->tx_credits, wake = 1);
249 				break;
250 			}
251 			break;
252 		case PN_ONE_CREDIT_FLOW_CONTROL:
253 			if (hdr->data[4] == PEP_IND_READY)
254 				atomic_set(&pn->tx_credits, wake = 1);
255 			break;
256 		}
257 		break;
258 
259 	case PN_PEP_IND_ID_MCFC_GRANT_CREDITS:
260 		if (pn->tx_fc != PN_MULTI_CREDIT_FLOW_CONTROL)
261 			break;
262 		atomic_add(wake = hdr->data[4], &pn->tx_credits);
263 		break;
264 
265 	default:
266 		LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP indication: %u\n",
267 				(unsigned)hdr->data[1]);
268 		return -EOPNOTSUPP;
269 	}
270 	if (wake)
271 		sk->sk_write_space(sk);
272 	return 0;
273 }
274 
275 static int pipe_rcv_created(struct sock *sk, struct sk_buff *skb)
276 {
277 	struct pep_sock *pn = pep_sk(sk);
278 	struct pnpipehdr *hdr = pnp_hdr(skb);
279 	u8 n_sb = hdr->data[0];
280 
281 	pn->rx_fc = pn->tx_fc = PN_LEGACY_FLOW_CONTROL;
282 	__skb_pull(skb, sizeof(*hdr));
283 	while (n_sb > 0) {
284 		u8 type, buf[2], len = sizeof(buf);
285 		u8 *data = pep_get_sb(skb, &type, &len, buf);
286 
287 		if (data == NULL)
288 			return -EINVAL;
289 		switch (type) {
290 		case PN_PIPE_SB_NEGOTIATED_FC:
291 			if (len < 2 || (data[0] | data[1]) > 3)
292 				break;
293 			pn->tx_fc = data[0] & 3;
294 			pn->rx_fc = data[1] & 3;
295 			break;
296 		}
297 		n_sb--;
298 	}
299 	return 0;
300 }
301 
302 /* Queue an skb to a connected sock.
303  * Socket lock must be held. */
304 static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
305 {
306 	struct pep_sock *pn = pep_sk(sk);
307 	struct pnpipehdr *hdr = pnp_hdr(skb);
308 	struct sk_buff_head *queue;
309 	int err = 0;
310 
311 	BUG_ON(sk->sk_state == TCP_CLOSE_WAIT);
312 
313 	switch (hdr->message_id) {
314 	case PNS_PEP_CONNECT_REQ:
315 		pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE);
316 		break;
317 
318 	case PNS_PEP_DISCONNECT_REQ:
319 		pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
320 		sk->sk_state = TCP_CLOSE_WAIT;
321 		if (!sock_flag(sk, SOCK_DEAD))
322 			sk->sk_state_change(sk);
323 		break;
324 
325 	case PNS_PEP_ENABLE_REQ:
326 		/* Wait for PNS_PIPE_(ENABLED|REDIRECTED)_IND */
327 		pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
328 		break;
329 
330 	case PNS_PEP_RESET_REQ:
331 		switch (hdr->state_after_reset) {
332 		case PN_PIPE_DISABLE:
333 			pn->init_enable = 0;
334 			break;
335 		case PN_PIPE_ENABLE:
336 			pn->init_enable = 1;
337 			break;
338 		default: /* not allowed to send an error here!? */
339 			err = -EINVAL;
340 			goto out;
341 		}
342 		/* fall through */
343 	case PNS_PEP_DISABLE_REQ:
344 		atomic_set(&pn->tx_credits, 0);
345 		pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
346 		break;
347 
348 	case PNS_PEP_CTRL_REQ:
349 		if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
350 			atomic_inc(&sk->sk_drops);
351 			break;
352 		}
353 		__skb_pull(skb, 4);
354 		queue = &pn->ctrlreq_queue;
355 		goto queue;
356 
357 	case PNS_PIPE_DATA:
358 		__skb_pull(skb, 3); /* Pipe data header */
359 		if (!pn_flow_safe(pn->rx_fc)) {
360 			err = sock_queue_rcv_skb(sk, skb);
361 			if (!err)
362 				return 0;
363 			break;
364 		}
365 
366 		if (pn->rx_credits == 0) {
367 			atomic_inc(&sk->sk_drops);
368 			err = -ENOBUFS;
369 			break;
370 		}
371 		pn->rx_credits--;
372 		queue = &sk->sk_receive_queue;
373 		goto queue;
374 
375 	case PNS_PEP_STATUS_IND:
376 		pipe_rcv_status(sk, skb);
377 		break;
378 
379 	case PNS_PIPE_REDIRECTED_IND:
380 		err = pipe_rcv_created(sk, skb);
381 		break;
382 
383 	case PNS_PIPE_CREATED_IND:
384 		err = pipe_rcv_created(sk, skb);
385 		if (err)
386 			break;
387 		/* fall through */
388 	case PNS_PIPE_RESET_IND:
389 		if (!pn->init_enable)
390 			break;
391 		/* fall through */
392 	case PNS_PIPE_ENABLED_IND:
393 		if (!pn_flow_safe(pn->tx_fc)) {
394 			atomic_set(&pn->tx_credits, 1);
395 			sk->sk_write_space(sk);
396 		}
397 		if (sk->sk_state == TCP_ESTABLISHED)
398 			break; /* Nothing to do */
399 		sk->sk_state = TCP_ESTABLISHED;
400 		pipe_grant_credits(sk);
401 		break;
402 
403 	case PNS_PIPE_DISABLED_IND:
404 		sk->sk_state = TCP_SYN_RECV;
405 		pn->rx_credits = 0;
406 		break;
407 
408 	default:
409 		LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP message: %u\n",
410 				hdr->message_id);
411 		err = -EINVAL;
412 	}
413 out:
414 	kfree_skb(skb);
415 	return err;
416 
417 queue:
418 	skb->dev = NULL;
419 	skb_set_owner_r(skb, sk);
420 	err = skb->len;
421 	skb_queue_tail(queue, skb);
422 	if (!sock_flag(sk, SOCK_DEAD))
423 		sk->sk_data_ready(sk, err);
424 	return 0;
425 }
426 
427 /* Destroy connected sock. */
428 static void pipe_destruct(struct sock *sk)
429 {
430 	struct pep_sock *pn = pep_sk(sk);
431 
432 	skb_queue_purge(&sk->sk_receive_queue);
433 	skb_queue_purge(&pn->ctrlreq_queue);
434 }
435 
436 static int pep_connreq_rcv(struct sock *sk, struct sk_buff *skb)
437 {
438 	struct sock *newsk;
439 	struct pep_sock *newpn, *pn = pep_sk(sk);
440 	struct pnpipehdr *hdr;
441 	struct sockaddr_pn dst;
442 	u16 peer_type;
443 	u8 pipe_handle, enabled, n_sb;
444 
445 	if (!pskb_pull(skb, sizeof(*hdr) + 4))
446 		return -EINVAL;
447 
448 	hdr = pnp_hdr(skb);
449 	pipe_handle = hdr->pipe_handle;
450 	switch (hdr->state_after_connect) {
451 	case PN_PIPE_DISABLE:
452 		enabled = 0;
453 		break;
454 	case PN_PIPE_ENABLE:
455 		enabled = 1;
456 		break;
457 	default:
458 		pep_reject_conn(sk, skb, PN_PIPE_ERR_INVALID_PARAM);
459 		return -EINVAL;
460 	}
461 	peer_type = hdr->other_pep_type << 8;
462 
463 	if (unlikely(sk->sk_state != TCP_LISTEN) || sk_acceptq_is_full(sk)) {
464 		pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE);
465 		return -ENOBUFS;
466 	}
467 
468 	/* Parse sub-blocks (options) */
469 	n_sb = hdr->data[4];
470 	while (n_sb > 0) {
471 		u8 type, buf[1], len = sizeof(buf);
472 		const u8 *data = pep_get_sb(skb, &type, &len, buf);
473 
474 		if (data == NULL)
475 			return -EINVAL;
476 		switch (type) {
477 		case PN_PIPE_SB_CONNECT_REQ_PEP_SUB_TYPE:
478 			if (len < 1)
479 				return -EINVAL;
480 			peer_type = (peer_type & 0xff00) | data[0];
481 			break;
482 		}
483 		n_sb--;
484 	}
485 
486 	skb = skb_clone(skb, GFP_ATOMIC);
487 	if (!skb)
488 		return -ENOMEM;
489 
490 	/* Create a new to-be-accepted sock */
491 	newsk = sk_alloc(sock_net(sk), PF_PHONET, GFP_ATOMIC, sk->sk_prot);
492 	if (!newsk) {
493 		kfree_skb(skb);
494 		return -ENOMEM;
495 	}
496 	sock_init_data(NULL, newsk);
497 	newsk->sk_state = TCP_SYN_RECV;
498 	newsk->sk_backlog_rcv = pipe_do_rcv;
499 	newsk->sk_protocol = sk->sk_protocol;
500 	newsk->sk_destruct = pipe_destruct;
501 
502 	newpn = pep_sk(newsk);
503 	pn_skb_get_dst_sockaddr(skb, &dst);
504 	newpn->pn_sk.sobject = pn_sockaddr_get_object(&dst);
505 	newpn->pn_sk.resource = pn->pn_sk.resource;
506 	skb_queue_head_init(&newpn->ctrlreq_queue);
507 	newpn->pipe_handle = pipe_handle;
508 	atomic_set(&newpn->tx_credits, 0);
509 	newpn->peer_type = peer_type;
510 	newpn->rx_credits = 0;
511 	newpn->rx_fc = newpn->tx_fc = PN_LEGACY_FLOW_CONTROL;
512 	newpn->init_enable = enabled;
513 
514 	BUG_ON(!skb_queue_empty(&newsk->sk_receive_queue));
515 	skb_queue_head(&newsk->sk_receive_queue, skb);
516 	if (!sock_flag(sk, SOCK_DEAD))
517 		sk->sk_data_ready(sk, 0);
518 
519 	sk_acceptq_added(sk);
520 	sk_add_node(newsk, &pn->ackq);
521 	return 0;
522 }
523 
524 /* Listening sock must be locked */
525 static struct sock *pep_find_pipe(const struct hlist_head *hlist,
526 					const struct sockaddr_pn *dst,
527 					u8 pipe_handle)
528 {
529 	struct hlist_node *node;
530 	struct sock *sknode;
531 	u16 dobj = pn_sockaddr_get_object(dst);
532 
533 	sk_for_each(sknode, node, hlist) {
534 		struct pep_sock *pnnode = pep_sk(sknode);
535 
536 		/* Ports match, but addresses might not: */
537 		if (pnnode->pn_sk.sobject != dobj)
538 			continue;
539 		if (pnnode->pipe_handle != pipe_handle)
540 			continue;
541 		if (sknode->sk_state == TCP_CLOSE_WAIT)
542 			continue;
543 
544 		sock_hold(sknode);
545 		return sknode;
546 	}
547 	return NULL;
548 }
549 
550 /*
551  * Deliver an skb to a listening sock.
552  * Socket lock must be held.
553  * We then queue the skb to the right connected sock (if any).
554  */
555 static int pep_do_rcv(struct sock *sk, struct sk_buff *skb)
556 {
557 	struct pep_sock *pn = pep_sk(sk);
558 	struct sock *sknode;
559 	struct pnpipehdr *hdr;
560 	struct sockaddr_pn dst;
561 	int err = NET_RX_SUCCESS;
562 	u8 pipe_handle;
563 
564 	if (!pskb_may_pull(skb, sizeof(*hdr)))
565 		goto drop;
566 
567 	hdr = pnp_hdr(skb);
568 	pipe_handle = hdr->pipe_handle;
569 	if (pipe_handle == PN_PIPE_INVALID_HANDLE)
570 		goto drop;
571 
572 	pn_skb_get_dst_sockaddr(skb, &dst);
573 
574 	/* Look for an existing pipe handle */
575 	sknode = pep_find_pipe(&pn->hlist, &dst, pipe_handle);
576 	if (sknode)
577 		return sk_receive_skb(sknode, skb, 1);
578 
579 	/* Look for a pipe handle pending accept */
580 	sknode = pep_find_pipe(&pn->ackq, &dst, pipe_handle);
581 	if (sknode) {
582 		sock_put(sknode);
583 		if (net_ratelimit())
584 			printk(KERN_WARNING"Phonet unconnected PEP ignored");
585 		err = NET_RX_DROP;
586 		goto drop;
587 	}
588 
589 	switch (hdr->message_id) {
590 	case PNS_PEP_CONNECT_REQ:
591 		err = pep_connreq_rcv(sk, skb);
592 		break;
593 
594 	case PNS_PEP_DISCONNECT_REQ:
595 		pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
596 		break;
597 
598 	case PNS_PEP_CTRL_REQ:
599 		pep_ctrlreq_error(sk, skb, PN_PIPE_INVALID_HANDLE, GFP_ATOMIC);
600 		break;
601 
602 	case PNS_PEP_RESET_REQ:
603 	case PNS_PEP_ENABLE_REQ:
604 	case PNS_PEP_DISABLE_REQ:
605 		/* invalid handle is not even allowed here! */
606 	default:
607 		err = NET_RX_DROP;
608 	}
609 drop:
610 	kfree_skb(skb);
611 	return err;
612 }
613 
614 /* associated socket ceases to exist */
615 static void pep_sock_close(struct sock *sk, long timeout)
616 {
617 	struct pep_sock *pn = pep_sk(sk);
618 	int ifindex = 0;
619 
620 	sk_common_release(sk);
621 
622 	lock_sock(sk);
623 	if (sk->sk_state == TCP_LISTEN) {
624 		/* Destroy the listen queue */
625 		struct sock *sknode;
626 		struct hlist_node *p, *n;
627 
628 		sk_for_each_safe(sknode, p, n, &pn->ackq)
629 			sk_del_node_init(sknode);
630 		sk->sk_state = TCP_CLOSE;
631 	}
632 	ifindex = pn->ifindex;
633 	pn->ifindex = 0;
634 	release_sock(sk);
635 
636 	if (ifindex)
637 		gprs_detach(sk);
638 }
639 
640 static int pep_wait_connreq(struct sock *sk, int noblock)
641 {
642 	struct task_struct *tsk = current;
643 	struct pep_sock *pn = pep_sk(sk);
644 	long timeo = sock_rcvtimeo(sk, noblock);
645 
646 	for (;;) {
647 		DEFINE_WAIT(wait);
648 
649 		if (sk->sk_state != TCP_LISTEN)
650 			return -EINVAL;
651 		if (!hlist_empty(&pn->ackq))
652 			break;
653 		if (!timeo)
654 			return -EWOULDBLOCK;
655 		if (signal_pending(tsk))
656 			return sock_intr_errno(timeo);
657 
658 		prepare_to_wait_exclusive(&sk->sk_socket->wait, &wait,
659 						TASK_INTERRUPTIBLE);
660 		release_sock(sk);
661 		timeo = schedule_timeout(timeo);
662 		lock_sock(sk);
663 		finish_wait(&sk->sk_socket->wait, &wait);
664 	}
665 
666 	return 0;
667 }
668 
669 static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp)
670 {
671 	struct pep_sock *pn = pep_sk(sk);
672 	struct sock *newsk = NULL;
673 	struct sk_buff *oskb;
674 	int err;
675 
676 	lock_sock(sk);
677 	err = pep_wait_connreq(sk, flags & O_NONBLOCK);
678 	if (err)
679 		goto out;
680 
681 	newsk = __sk_head(&pn->ackq);
682 
683 	oskb = skb_dequeue(&newsk->sk_receive_queue);
684 	err = pep_accept_conn(newsk, oskb);
685 	if (err) {
686 		skb_queue_head(&newsk->sk_receive_queue, oskb);
687 		newsk = NULL;
688 		goto out;
689 	}
690 
691 	sock_hold(sk);
692 	pep_sk(newsk)->listener = sk;
693 
694 	sock_hold(newsk);
695 	sk_del_node_init(newsk);
696 	sk_acceptq_removed(sk);
697 	sk_add_node(newsk, &pn->hlist);
698 	__sock_put(newsk);
699 
700 out:
701 	release_sock(sk);
702 	*errp = err;
703 	return newsk;
704 }
705 
706 static int pep_ioctl(struct sock *sk, int cmd, unsigned long arg)
707 {
708 	struct pep_sock *pn = pep_sk(sk);
709 	int answ;
710 
711 	switch (cmd) {
712 	case SIOCINQ:
713 		if (sk->sk_state == TCP_LISTEN)
714 			return -EINVAL;
715 
716 		lock_sock(sk);
717 		if (sock_flag(sk, SOCK_URGINLINE) &&
718 		    !skb_queue_empty(&pn->ctrlreq_queue))
719 			answ = skb_peek(&pn->ctrlreq_queue)->len;
720 		else if (!skb_queue_empty(&sk->sk_receive_queue))
721 			answ = skb_peek(&sk->sk_receive_queue)->len;
722 		else
723 			answ = 0;
724 		release_sock(sk);
725 		return put_user(answ, (int __user *)arg);
726 	}
727 
728 	return -ENOIOCTLCMD;
729 }
730 
731 static int pep_init(struct sock *sk)
732 {
733 	struct pep_sock *pn = pep_sk(sk);
734 
735 	INIT_HLIST_HEAD(&pn->ackq);
736 	INIT_HLIST_HEAD(&pn->hlist);
737 	skb_queue_head_init(&pn->ctrlreq_queue);
738 	pn->pipe_handle = PN_PIPE_INVALID_HANDLE;
739 	return 0;
740 }
741 
742 static int pep_setsockopt(struct sock *sk, int level, int optname,
743 				char __user *optval, unsigned int optlen)
744 {
745 	struct pep_sock *pn = pep_sk(sk);
746 	int val = 0, err = 0;
747 
748 	if (level != SOL_PNPIPE)
749 		return -ENOPROTOOPT;
750 	if (optlen >= sizeof(int)) {
751 		if (get_user(val, (int __user *) optval))
752 			return -EFAULT;
753 	}
754 
755 	lock_sock(sk);
756 	switch (optname) {
757 	case PNPIPE_ENCAP:
758 		if (val && val != PNPIPE_ENCAP_IP) {
759 			err = -EINVAL;
760 			break;
761 		}
762 		if (!pn->ifindex == !val)
763 			break; /* Nothing to do! */
764 		if (!capable(CAP_NET_ADMIN)) {
765 			err = -EPERM;
766 			break;
767 		}
768 		if (val) {
769 			release_sock(sk);
770 			err = gprs_attach(sk);
771 			if (err > 0) {
772 				pn->ifindex = err;
773 				err = 0;
774 			}
775 		} else {
776 			pn->ifindex = 0;
777 			release_sock(sk);
778 			gprs_detach(sk);
779 			err = 0;
780 		}
781 		goto out_norel;
782 	default:
783 		err = -ENOPROTOOPT;
784 	}
785 	release_sock(sk);
786 
787 out_norel:
788 	return err;
789 }
790 
791 static int pep_getsockopt(struct sock *sk, int level, int optname,
792 				char __user *optval, int __user *optlen)
793 {
794 	struct pep_sock *pn = pep_sk(sk);
795 	int len, val;
796 
797 	if (level != SOL_PNPIPE)
798 		return -ENOPROTOOPT;
799 	if (get_user(len, optlen))
800 		return -EFAULT;
801 
802 	switch (optname) {
803 	case PNPIPE_ENCAP:
804 		val = pn->ifindex ? PNPIPE_ENCAP_IP : PNPIPE_ENCAP_NONE;
805 		break;
806 	case PNPIPE_IFINDEX:
807 		val = pn->ifindex;
808 		break;
809 	default:
810 		return -ENOPROTOOPT;
811 	}
812 
813 	len = min_t(unsigned int, sizeof(int), len);
814 	if (put_user(len, optlen))
815 		return -EFAULT;
816 	if (put_user(val, (int __user *) optval))
817 		return -EFAULT;
818 	return 0;
819 }
820 
821 static int pipe_skb_send(struct sock *sk, struct sk_buff *skb)
822 {
823 	struct pep_sock *pn = pep_sk(sk);
824 	struct pnpipehdr *ph;
825 
826 	if (pn_flow_safe(pn->tx_fc) &&
827 	    !atomic_add_unless(&pn->tx_credits, -1, 0)) {
828 		kfree_skb(skb);
829 		return -ENOBUFS;
830 	}
831 
832 	skb_push(skb, 3);
833 	skb_reset_transport_header(skb);
834 	ph = pnp_hdr(skb);
835 	ph->utid = 0;
836 	ph->message_id = PNS_PIPE_DATA;
837 	ph->pipe_handle = pn->pipe_handle;
838 
839 	return pn_skb_send(sk, skb, &pipe_srv);
840 }
841 
842 static int pep_sendmsg(struct kiocb *iocb, struct sock *sk,
843 			struct msghdr *msg, size_t len)
844 {
845 	struct pep_sock *pn = pep_sk(sk);
846 	struct sk_buff *skb;
847 	long timeo;
848 	int flags = msg->msg_flags;
849 	int err, done;
850 
851 	if (msg->msg_flags & MSG_OOB || !(msg->msg_flags & MSG_EOR))
852 		return -EOPNOTSUPP;
853 
854 	skb = sock_alloc_send_skb(sk, MAX_PNPIPE_HEADER + len,
855 					flags & MSG_DONTWAIT, &err);
856 	if (!skb)
857 		return -ENOBUFS;
858 
859 	skb_reserve(skb, MAX_PHONET_HEADER + 3);
860 	err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
861 	if (err < 0)
862 		goto outfree;
863 
864 	lock_sock(sk);
865 	timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
866 	if ((1 << sk->sk_state) & (TCPF_LISTEN|TCPF_CLOSE)) {
867 		err = -ENOTCONN;
868 		goto out;
869 	}
870 	if (sk->sk_state != TCP_ESTABLISHED) {
871 		/* Wait until the pipe gets to enabled state */
872 disabled:
873 		err = sk_stream_wait_connect(sk, &timeo);
874 		if (err)
875 			goto out;
876 
877 		if (sk->sk_state == TCP_CLOSE_WAIT) {
878 			err = -ECONNRESET;
879 			goto out;
880 		}
881 	}
882 	BUG_ON(sk->sk_state != TCP_ESTABLISHED);
883 
884 	/* Wait until flow control allows TX */
885 	done = atomic_read(&pn->tx_credits);
886 	while (!done) {
887 		DEFINE_WAIT(wait);
888 
889 		if (!timeo) {
890 			err = -EAGAIN;
891 			goto out;
892 		}
893 		if (signal_pending(current)) {
894 			err = sock_intr_errno(timeo);
895 			goto out;
896 		}
897 
898 		prepare_to_wait(&sk->sk_socket->wait, &wait,
899 				TASK_INTERRUPTIBLE);
900 		done = sk_wait_event(sk, &timeo, atomic_read(&pn->tx_credits));
901 		finish_wait(&sk->sk_socket->wait, &wait);
902 
903 		if (sk->sk_state != TCP_ESTABLISHED)
904 			goto disabled;
905 	}
906 
907 	err = pipe_skb_send(sk, skb);
908 	if (err >= 0)
909 		err = len; /* success! */
910 	skb = NULL;
911 out:
912 	release_sock(sk);
913 outfree:
914 	kfree_skb(skb);
915 	return err;
916 }
917 
918 int pep_writeable(struct sock *sk)
919 {
920 	struct pep_sock *pn = pep_sk(sk);
921 
922 	return atomic_read(&pn->tx_credits);
923 }
924 
925 int pep_write(struct sock *sk, struct sk_buff *skb)
926 {
927 	struct sk_buff *rskb, *fs;
928 	int flen = 0;
929 
930 	rskb = alloc_skb(MAX_PNPIPE_HEADER, GFP_ATOMIC);
931 	if (!rskb) {
932 		kfree_skb(skb);
933 		return -ENOMEM;
934 	}
935 	skb_shinfo(rskb)->frag_list = skb;
936 	rskb->len += skb->len;
937 	rskb->data_len += rskb->len;
938 	rskb->truesize += rskb->len;
939 
940 	/* Avoid nested fragments */
941 	skb_walk_frags(skb, fs)
942 		flen += fs->len;
943 	skb->next = skb_shinfo(skb)->frag_list;
944 	skb_frag_list_init(skb);
945 	skb->len -= flen;
946 	skb->data_len -= flen;
947 	skb->truesize -= flen;
948 
949 	skb_reserve(rskb, MAX_PHONET_HEADER + 3);
950 	return pipe_skb_send(sk, rskb);
951 }
952 
953 struct sk_buff *pep_read(struct sock *sk)
954 {
955 	struct sk_buff *skb = skb_dequeue(&sk->sk_receive_queue);
956 
957 	if (sk->sk_state == TCP_ESTABLISHED)
958 		pipe_grant_credits(sk);
959 	return skb;
960 }
961 
962 static int pep_recvmsg(struct kiocb *iocb, struct sock *sk,
963 			struct msghdr *msg, size_t len, int noblock,
964 			int flags, int *addr_len)
965 {
966 	struct sk_buff *skb;
967 	int err;
968 
969 	if (unlikely(1 << sk->sk_state & (TCPF_LISTEN | TCPF_CLOSE)))
970 		return -ENOTCONN;
971 
972 	if ((flags & MSG_OOB) || sock_flag(sk, SOCK_URGINLINE)) {
973 		/* Dequeue and acknowledge control request */
974 		struct pep_sock *pn = pep_sk(sk);
975 
976 		skb = skb_dequeue(&pn->ctrlreq_queue);
977 		if (skb) {
978 			pep_ctrlreq_error(sk, skb, PN_PIPE_NO_ERROR,
979 						GFP_KERNEL);
980 			msg->msg_flags |= MSG_OOB;
981 			goto copy;
982 		}
983 		if (flags & MSG_OOB)
984 			return -EINVAL;
985 	}
986 
987 	skb = skb_recv_datagram(sk, flags, noblock, &err);
988 	lock_sock(sk);
989 	if (skb == NULL) {
990 		if (err == -ENOTCONN && sk->sk_state == TCP_CLOSE_WAIT)
991 			err = -ECONNRESET;
992 		release_sock(sk);
993 		return err;
994 	}
995 
996 	if (sk->sk_state == TCP_ESTABLISHED)
997 		pipe_grant_credits(sk);
998 	release_sock(sk);
999 copy:
1000 	msg->msg_flags |= MSG_EOR;
1001 	if (skb->len > len)
1002 		msg->msg_flags |= MSG_TRUNC;
1003 	else
1004 		len = skb->len;
1005 
1006 	err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len);
1007 	if (!err)
1008 		err = (flags & MSG_TRUNC) ? skb->len : len;
1009 
1010 	skb_free_datagram(sk, skb);
1011 	return err;
1012 }
1013 
1014 static void pep_sock_unhash(struct sock *sk)
1015 {
1016 	struct pep_sock *pn = pep_sk(sk);
1017 	struct sock *skparent = NULL;
1018 
1019 	lock_sock(sk);
1020 	if ((1 << sk->sk_state) & ~(TCPF_CLOSE|TCPF_LISTEN)) {
1021 		skparent = pn->listener;
1022 		sk_del_node_init(sk);
1023 		release_sock(sk);
1024 
1025 		sk = skparent;
1026 		pn = pep_sk(skparent);
1027 		lock_sock(sk);
1028 	}
1029 	/* Unhash a listening sock only when it is closed
1030 	 * and all of its active connected pipes are closed. */
1031 	if (hlist_empty(&pn->hlist))
1032 		pn_sock_unhash(&pn->pn_sk.sk);
1033 	release_sock(sk);
1034 
1035 	if (skparent)
1036 		sock_put(skparent);
1037 }
1038 
1039 static struct proto pep_proto = {
1040 	.close		= pep_sock_close,
1041 	.accept		= pep_sock_accept,
1042 	.ioctl		= pep_ioctl,
1043 	.init		= pep_init,
1044 	.setsockopt	= pep_setsockopt,
1045 	.getsockopt	= pep_getsockopt,
1046 	.sendmsg	= pep_sendmsg,
1047 	.recvmsg	= pep_recvmsg,
1048 	.backlog_rcv	= pep_do_rcv,
1049 	.hash		= pn_sock_hash,
1050 	.unhash		= pep_sock_unhash,
1051 	.get_port	= pn_sock_get_port,
1052 	.obj_size	= sizeof(struct pep_sock),
1053 	.owner		= THIS_MODULE,
1054 	.name		= "PNPIPE",
1055 };
1056 
1057 static struct phonet_protocol pep_pn_proto = {
1058 	.ops		= &phonet_stream_ops,
1059 	.prot		= &pep_proto,
1060 	.sock_type	= SOCK_SEQPACKET,
1061 };
1062 
1063 static int __init pep_register(void)
1064 {
1065 	return phonet_proto_register(PN_PROTO_PIPE, &pep_pn_proto);
1066 }
1067 
1068 static void __exit pep_unregister(void)
1069 {
1070 	phonet_proto_unregister(PN_PROTO_PIPE, &pep_pn_proto);
1071 }
1072 
1073 module_init(pep_register);
1074 module_exit(pep_unregister);
1075 MODULE_AUTHOR("Remi Denis-Courmont, Nokia");
1076 MODULE_DESCRIPTION("Phonet pipe protocol");
1077 MODULE_LICENSE("GPL");
1078 MODULE_ALIAS_NET_PF_PROTO(PF_PHONET, PN_PROTO_PIPE);
1079