xref: /openbmc/linux/net/rose/af_rose.c (revision 360823a09426347ea8f232b0b0b5156d0aed0302)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *
4  * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
5  * Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
6  * Copyright (C) Terry Dawson VK2KTJ (terry@animats.net)
7  * Copyright (C) Tomi Manninen OH2BNS (oh2bns@sral.fi)
8  */
9 
10 #include <linux/capability.h>
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/init.h>
14 #include <linux/errno.h>
15 #include <linux/types.h>
16 #include <linux/socket.h>
17 #include <linux/in.h>
18 #include <linux/slab.h>
19 #include <linux/kernel.h>
20 #include <linux/sched/signal.h>
21 #include <linux/spinlock.h>
22 #include <linux/timer.h>
23 #include <linux/string.h>
24 #include <linux/sockios.h>
25 #include <linux/net.h>
26 #include <linux/stat.h>
27 #include <net/net_namespace.h>
28 #include <net/ax25.h>
29 #include <linux/inet.h>
30 #include <linux/netdevice.h>
31 #include <linux/if_arp.h>
32 #include <linux/skbuff.h>
33 #include <net/sock.h>
34 #include <linux/uaccess.h>
35 #include <linux/fcntl.h>
36 #include <linux/termios.h>
37 #include <linux/mm.h>
38 #include <linux/interrupt.h>
39 #include <linux/notifier.h>
40 #include <net/rose.h>
41 #include <linux/proc_fs.h>
42 #include <linux/seq_file.h>
43 #include <net/tcp_states.h>
44 #include <net/ip.h>
45 #include <net/arp.h>
46 
47 static int rose_ndevs = 10;
48 
49 int sysctl_rose_restart_request_timeout = ROSE_DEFAULT_T0;
50 int sysctl_rose_call_request_timeout    = ROSE_DEFAULT_T1;
51 int sysctl_rose_reset_request_timeout   = ROSE_DEFAULT_T2;
52 int sysctl_rose_clear_request_timeout   = ROSE_DEFAULT_T3;
53 int sysctl_rose_no_activity_timeout     = ROSE_DEFAULT_IDLE;
54 int sysctl_rose_ack_hold_back_timeout   = ROSE_DEFAULT_HB;
55 int sysctl_rose_routing_control         = ROSE_DEFAULT_ROUTING;
56 int sysctl_rose_link_fail_timeout       = ROSE_DEFAULT_FAIL_TIMEOUT;
57 int sysctl_rose_maximum_vcs             = ROSE_DEFAULT_MAXVC;
58 int sysctl_rose_window_size             = ROSE_DEFAULT_WINDOW_SIZE;
59 
60 static HLIST_HEAD(rose_list);
61 static DEFINE_SPINLOCK(rose_list_lock);
62 
63 static const struct proto_ops rose_proto_ops;
64 
65 ax25_address rose_callsign;
66 
67 /*
68  * ROSE network devices are virtual network devices encapsulating ROSE
69  * frames into AX.25 which will be sent through an AX.25 device, so form a
70  * special "super class" of normal net devices; split their locks off into a
71  * separate class since they always nest.
72  */
73 static struct lock_class_key rose_netdev_xmit_lock_key;
74 static struct lock_class_key rose_netdev_addr_lock_key;
75 
rose_set_lockdep_one(struct net_device * dev,struct netdev_queue * txq,void * _unused)76 static void rose_set_lockdep_one(struct net_device *dev,
77 				 struct netdev_queue *txq,
78 				 void *_unused)
79 {
80 	lockdep_set_class(&txq->_xmit_lock, &rose_netdev_xmit_lock_key);
81 }
82 
rose_set_lockdep_key(struct net_device * dev)83 static void rose_set_lockdep_key(struct net_device *dev)
84 {
85 	lockdep_set_class(&dev->addr_list_lock, &rose_netdev_addr_lock_key);
86 	netdev_for_each_tx_queue(dev, rose_set_lockdep_one, NULL);
87 }
88 
89 /*
90  *	Convert a ROSE address into text.
91  */
rose2asc(char * buf,const rose_address * addr)92 char *rose2asc(char *buf, const rose_address *addr)
93 {
94 	if (addr->rose_addr[0] == 0x00 && addr->rose_addr[1] == 0x00 &&
95 	    addr->rose_addr[2] == 0x00 && addr->rose_addr[3] == 0x00 &&
96 	    addr->rose_addr[4] == 0x00) {
97 		strcpy(buf, "*");
98 	} else {
99 		sprintf(buf, "%02X%02X%02X%02X%02X", addr->rose_addr[0] & 0xFF,
100 						addr->rose_addr[1] & 0xFF,
101 						addr->rose_addr[2] & 0xFF,
102 						addr->rose_addr[3] & 0xFF,
103 						addr->rose_addr[4] & 0xFF);
104 	}
105 
106 	return buf;
107 }
108 
109 /*
110  *	Compare two ROSE addresses, 0 == equal.
111  */
rosecmp(const rose_address * addr1,const rose_address * addr2)112 int rosecmp(const rose_address *addr1, const rose_address *addr2)
113 {
114 	int i;
115 
116 	for (i = 0; i < 5; i++)
117 		if (addr1->rose_addr[i] != addr2->rose_addr[i])
118 			return 1;
119 
120 	return 0;
121 }
122 
123 /*
124  *	Compare two ROSE addresses for only mask digits, 0 == equal.
125  */
rosecmpm(const rose_address * addr1,const rose_address * addr2,unsigned short mask)126 int rosecmpm(const rose_address *addr1, const rose_address *addr2,
127 	     unsigned short mask)
128 {
129 	unsigned int i, j;
130 
131 	if (mask > 10)
132 		return 1;
133 
134 	for (i = 0; i < mask; i++) {
135 		j = i / 2;
136 
137 		if ((i % 2) != 0) {
138 			if ((addr1->rose_addr[j] & 0x0F) != (addr2->rose_addr[j] & 0x0F))
139 				return 1;
140 		} else {
141 			if ((addr1->rose_addr[j] & 0xF0) != (addr2->rose_addr[j] & 0xF0))
142 				return 1;
143 		}
144 	}
145 
146 	return 0;
147 }
148 
149 /*
150  *	Socket removal during an interrupt is now safe.
151  */
rose_remove_socket(struct sock * sk)152 static void rose_remove_socket(struct sock *sk)
153 {
154 	spin_lock_bh(&rose_list_lock);
155 	sk_del_node_init(sk);
156 	spin_unlock_bh(&rose_list_lock);
157 }
158 
159 /*
160  *	Kill all bound sockets on a broken link layer connection to a
161  *	particular neighbour.
162  */
rose_kill_by_neigh(struct rose_neigh * neigh)163 void rose_kill_by_neigh(struct rose_neigh *neigh)
164 {
165 	struct sock *s;
166 
167 	spin_lock_bh(&rose_list_lock);
168 	sk_for_each(s, &rose_list) {
169 		struct rose_sock *rose = rose_sk(s);
170 
171 		if (rose->neighbour == neigh) {
172 			rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
173 			rose->neighbour->use--;
174 			rose->neighbour = NULL;
175 		}
176 	}
177 	spin_unlock_bh(&rose_list_lock);
178 }
179 
180 /*
181  *	Kill all bound sockets on a dropped device.
182  */
rose_kill_by_device(struct net_device * dev)183 static void rose_kill_by_device(struct net_device *dev)
184 {
185 	struct sock *sk, *array[16];
186 	struct rose_sock *rose;
187 	bool rescan;
188 	int i, cnt;
189 
190 start:
191 	rescan = false;
192 	cnt = 0;
193 	spin_lock_bh(&rose_list_lock);
194 	sk_for_each(sk, &rose_list) {
195 		rose = rose_sk(sk);
196 		if (rose->device == dev) {
197 			if (cnt == ARRAY_SIZE(array)) {
198 				rescan = true;
199 				break;
200 			}
201 			sock_hold(sk);
202 			array[cnt++] = sk;
203 		}
204 	}
205 	spin_unlock_bh(&rose_list_lock);
206 
207 	for (i = 0; i < cnt; i++) {
208 		sk = array[cnt];
209 		rose = rose_sk(sk);
210 		lock_sock(sk);
211 		spin_lock_bh(&rose_list_lock);
212 		if (rose->device == dev) {
213 			rose_disconnect(sk, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
214 			if (rose->neighbour)
215 				rose->neighbour->use--;
216 			netdev_put(rose->device, &rose->dev_tracker);
217 			rose->device = NULL;
218 		}
219 		spin_unlock_bh(&rose_list_lock);
220 		release_sock(sk);
221 		sock_put(sk);
222 		cond_resched();
223 	}
224 	if (rescan)
225 		goto start;
226 }
227 
228 /*
229  *	Handle device status changes.
230  */
rose_device_event(struct notifier_block * this,unsigned long event,void * ptr)231 static int rose_device_event(struct notifier_block *this,
232 			     unsigned long event, void *ptr)
233 {
234 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
235 
236 	if (!net_eq(dev_net(dev), &init_net))
237 		return NOTIFY_DONE;
238 
239 	if (event != NETDEV_DOWN)
240 		return NOTIFY_DONE;
241 
242 	switch (dev->type) {
243 	case ARPHRD_ROSE:
244 		rose_kill_by_device(dev);
245 		break;
246 	case ARPHRD_AX25:
247 		rose_link_device_down(dev);
248 		rose_rt_device_down(dev);
249 		break;
250 	}
251 
252 	return NOTIFY_DONE;
253 }
254 
255 /*
256  *	Add a socket to the bound sockets list.
257  */
rose_insert_socket(struct sock * sk)258 static void rose_insert_socket(struct sock *sk)
259 {
260 
261 	spin_lock_bh(&rose_list_lock);
262 	sk_add_node(sk, &rose_list);
263 	spin_unlock_bh(&rose_list_lock);
264 }
265 
266 /*
267  *	Find a socket that wants to accept the Call Request we just
268  *	received.
269  */
rose_find_listener(rose_address * addr,ax25_address * call)270 static struct sock *rose_find_listener(rose_address *addr, ax25_address *call)
271 {
272 	struct sock *s;
273 
274 	spin_lock_bh(&rose_list_lock);
275 	sk_for_each(s, &rose_list) {
276 		struct rose_sock *rose = rose_sk(s);
277 
278 		if (!rosecmp(&rose->source_addr, addr) &&
279 		    !ax25cmp(&rose->source_call, call) &&
280 		    !rose->source_ndigis && s->sk_state == TCP_LISTEN)
281 			goto found;
282 	}
283 
284 	sk_for_each(s, &rose_list) {
285 		struct rose_sock *rose = rose_sk(s);
286 
287 		if (!rosecmp(&rose->source_addr, addr) &&
288 		    !ax25cmp(&rose->source_call, &null_ax25_address) &&
289 		    s->sk_state == TCP_LISTEN)
290 			goto found;
291 	}
292 	s = NULL;
293 found:
294 	spin_unlock_bh(&rose_list_lock);
295 	return s;
296 }
297 
298 /*
299  *	Find a connected ROSE socket given my LCI and device.
300  */
rose_find_socket(unsigned int lci,struct rose_neigh * neigh)301 struct sock *rose_find_socket(unsigned int lci, struct rose_neigh *neigh)
302 {
303 	struct sock *s;
304 
305 	spin_lock_bh(&rose_list_lock);
306 	sk_for_each(s, &rose_list) {
307 		struct rose_sock *rose = rose_sk(s);
308 
309 		if (rose->lci == lci && rose->neighbour == neigh)
310 			goto found;
311 	}
312 	s = NULL;
313 found:
314 	spin_unlock_bh(&rose_list_lock);
315 	return s;
316 }
317 
318 /*
319  *	Find a unique LCI for a given device.
320  */
rose_new_lci(struct rose_neigh * neigh)321 unsigned int rose_new_lci(struct rose_neigh *neigh)
322 {
323 	int lci;
324 
325 	if (neigh->dce_mode) {
326 		for (lci = 1; lci <= sysctl_rose_maximum_vcs; lci++)
327 			if (rose_find_socket(lci, neigh) == NULL && rose_route_free_lci(lci, neigh) == NULL)
328 				return lci;
329 	} else {
330 		for (lci = sysctl_rose_maximum_vcs; lci > 0; lci--)
331 			if (rose_find_socket(lci, neigh) == NULL && rose_route_free_lci(lci, neigh) == NULL)
332 				return lci;
333 	}
334 
335 	return 0;
336 }
337 
338 /*
339  *	Deferred destroy.
340  */
341 void rose_destroy_socket(struct sock *);
342 
343 /*
344  *	Handler for deferred kills.
345  */
rose_destroy_timer(struct timer_list * t)346 static void rose_destroy_timer(struct timer_list *t)
347 {
348 	struct sock *sk = from_timer(sk, t, sk_timer);
349 
350 	rose_destroy_socket(sk);
351 }
352 
353 /*
354  *	This is called from user mode and the timers. Thus it protects itself
355  *	against interrupt users but doesn't worry about being called during
356  *	work.  Once it is removed from the queue no interrupt or bottom half
357  *	will touch it and we are (fairly 8-) ) safe.
358  */
rose_destroy_socket(struct sock * sk)359 void rose_destroy_socket(struct sock *sk)
360 {
361 	struct sk_buff *skb;
362 
363 	rose_remove_socket(sk);
364 	rose_stop_heartbeat(sk);
365 	rose_stop_idletimer(sk);
366 	rose_stop_timer(sk);
367 
368 	rose_clear_queues(sk);		/* Flush the queues */
369 
370 	while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
371 		if (skb->sk != sk) {	/* A pending connection */
372 			/* Queue the unaccepted socket for death */
373 			sock_set_flag(skb->sk, SOCK_DEAD);
374 			rose_start_heartbeat(skb->sk);
375 			rose_sk(skb->sk)->state = ROSE_STATE_0;
376 		}
377 
378 		kfree_skb(skb);
379 	}
380 
381 	if (sk_has_allocations(sk)) {
382 		/* Defer: outstanding buffers */
383 		timer_setup(&sk->sk_timer, rose_destroy_timer, 0);
384 		sk->sk_timer.expires  = jiffies + 10 * HZ;
385 		add_timer(&sk->sk_timer);
386 	} else
387 		sock_put(sk);
388 }
389 
390 /*
391  *	Handling for system calls applied via the various interfaces to a
392  *	ROSE socket object.
393  */
394 
rose_setsockopt(struct socket * sock,int level,int optname,sockptr_t optval,unsigned int optlen)395 static int rose_setsockopt(struct socket *sock, int level, int optname,
396 		sockptr_t optval, unsigned int optlen)
397 {
398 	struct sock *sk = sock->sk;
399 	struct rose_sock *rose = rose_sk(sk);
400 	unsigned int opt;
401 
402 	if (level != SOL_ROSE)
403 		return -ENOPROTOOPT;
404 
405 	if (optlen < sizeof(unsigned int))
406 		return -EINVAL;
407 
408 	if (copy_from_sockptr(&opt, optval, sizeof(unsigned int)))
409 		return -EFAULT;
410 
411 	switch (optname) {
412 	case ROSE_DEFER:
413 		rose->defer = opt ? 1 : 0;
414 		return 0;
415 
416 	case ROSE_T1:
417 		if (opt < 1 || opt > UINT_MAX / HZ)
418 			return -EINVAL;
419 		rose->t1 = opt * HZ;
420 		return 0;
421 
422 	case ROSE_T2:
423 		if (opt < 1 || opt > UINT_MAX / HZ)
424 			return -EINVAL;
425 		rose->t2 = opt * HZ;
426 		return 0;
427 
428 	case ROSE_T3:
429 		if (opt < 1 || opt > UINT_MAX / HZ)
430 			return -EINVAL;
431 		rose->t3 = opt * HZ;
432 		return 0;
433 
434 	case ROSE_HOLDBACK:
435 		if (opt < 1 || opt > UINT_MAX / HZ)
436 			return -EINVAL;
437 		rose->hb = opt * HZ;
438 		return 0;
439 
440 	case ROSE_IDLE:
441 		if (opt > UINT_MAX / (60 * HZ))
442 			return -EINVAL;
443 		rose->idle = opt * 60 * HZ;
444 		return 0;
445 
446 	case ROSE_QBITINCL:
447 		rose->qbitincl = opt ? 1 : 0;
448 		return 0;
449 
450 	default:
451 		return -ENOPROTOOPT;
452 	}
453 }
454 
rose_getsockopt(struct socket * sock,int level,int optname,char __user * optval,int __user * optlen)455 static int rose_getsockopt(struct socket *sock, int level, int optname,
456 	char __user *optval, int __user *optlen)
457 {
458 	struct sock *sk = sock->sk;
459 	struct rose_sock *rose = rose_sk(sk);
460 	int val = 0;
461 	int len;
462 
463 	if (level != SOL_ROSE)
464 		return -ENOPROTOOPT;
465 
466 	if (get_user(len, optlen))
467 		return -EFAULT;
468 
469 	if (len < 0)
470 		return -EINVAL;
471 
472 	switch (optname) {
473 	case ROSE_DEFER:
474 		val = rose->defer;
475 		break;
476 
477 	case ROSE_T1:
478 		val = rose->t1 / HZ;
479 		break;
480 
481 	case ROSE_T2:
482 		val = rose->t2 / HZ;
483 		break;
484 
485 	case ROSE_T3:
486 		val = rose->t3 / HZ;
487 		break;
488 
489 	case ROSE_HOLDBACK:
490 		val = rose->hb / HZ;
491 		break;
492 
493 	case ROSE_IDLE:
494 		val = rose->idle / (60 * HZ);
495 		break;
496 
497 	case ROSE_QBITINCL:
498 		val = rose->qbitincl;
499 		break;
500 
501 	default:
502 		return -ENOPROTOOPT;
503 	}
504 
505 	len = min_t(unsigned int, len, sizeof(int));
506 
507 	if (put_user(len, optlen))
508 		return -EFAULT;
509 
510 	return copy_to_user(optval, &val, len) ? -EFAULT : 0;
511 }
512 
rose_listen(struct socket * sock,int backlog)513 static int rose_listen(struct socket *sock, int backlog)
514 {
515 	struct sock *sk = sock->sk;
516 
517 	lock_sock(sk);
518 	if (sock->state != SS_UNCONNECTED) {
519 		release_sock(sk);
520 		return -EINVAL;
521 	}
522 
523 	if (sk->sk_state != TCP_LISTEN) {
524 		struct rose_sock *rose = rose_sk(sk);
525 
526 		rose->dest_ndigis = 0;
527 		memset(&rose->dest_addr, 0, ROSE_ADDR_LEN);
528 		memset(&rose->dest_call, 0, AX25_ADDR_LEN);
529 		memset(rose->dest_digis, 0, AX25_ADDR_LEN * ROSE_MAX_DIGIS);
530 		sk->sk_max_ack_backlog = backlog;
531 		sk->sk_state           = TCP_LISTEN;
532 		release_sock(sk);
533 		return 0;
534 	}
535 	release_sock(sk);
536 
537 	return -EOPNOTSUPP;
538 }
539 
540 static struct proto rose_proto = {
541 	.name	  = "ROSE",
542 	.owner	  = THIS_MODULE,
543 	.obj_size = sizeof(struct rose_sock),
544 };
545 
rose_create(struct net * net,struct socket * sock,int protocol,int kern)546 static int rose_create(struct net *net, struct socket *sock, int protocol,
547 		       int kern)
548 {
549 	struct sock *sk;
550 	struct rose_sock *rose;
551 
552 	if (!net_eq(net, &init_net))
553 		return -EAFNOSUPPORT;
554 
555 	if (sock->type != SOCK_SEQPACKET || protocol != 0)
556 		return -ESOCKTNOSUPPORT;
557 
558 	sk = sk_alloc(net, PF_ROSE, GFP_ATOMIC, &rose_proto, kern);
559 	if (sk == NULL)
560 		return -ENOMEM;
561 
562 	rose = rose_sk(sk);
563 
564 	sock_init_data(sock, sk);
565 
566 	skb_queue_head_init(&rose->ack_queue);
567 #ifdef M_BIT
568 	skb_queue_head_init(&rose->frag_queue);
569 	rose->fraglen    = 0;
570 #endif
571 
572 	sock->ops    = &rose_proto_ops;
573 	sk->sk_protocol = protocol;
574 
575 	timer_setup(&rose->timer, NULL, 0);
576 	timer_setup(&rose->idletimer, NULL, 0);
577 
578 	rose->t1   = msecs_to_jiffies(sysctl_rose_call_request_timeout);
579 	rose->t2   = msecs_to_jiffies(sysctl_rose_reset_request_timeout);
580 	rose->t3   = msecs_to_jiffies(sysctl_rose_clear_request_timeout);
581 	rose->hb   = msecs_to_jiffies(sysctl_rose_ack_hold_back_timeout);
582 	rose->idle = msecs_to_jiffies(sysctl_rose_no_activity_timeout);
583 
584 	rose->state = ROSE_STATE_0;
585 
586 	return 0;
587 }
588 
rose_make_new(struct sock * osk)589 static struct sock *rose_make_new(struct sock *osk)
590 {
591 	struct sock *sk;
592 	struct rose_sock *rose, *orose;
593 
594 	if (osk->sk_type != SOCK_SEQPACKET)
595 		return NULL;
596 
597 	sk = sk_alloc(sock_net(osk), PF_ROSE, GFP_ATOMIC, &rose_proto, 0);
598 	if (sk == NULL)
599 		return NULL;
600 
601 	rose = rose_sk(sk);
602 
603 	sock_init_data(NULL, sk);
604 
605 	skb_queue_head_init(&rose->ack_queue);
606 #ifdef M_BIT
607 	skb_queue_head_init(&rose->frag_queue);
608 	rose->fraglen  = 0;
609 #endif
610 
611 	sk->sk_type     = osk->sk_type;
612 	sk->sk_priority = osk->sk_priority;
613 	sk->sk_protocol = osk->sk_protocol;
614 	sk->sk_rcvbuf   = osk->sk_rcvbuf;
615 	sk->sk_sndbuf   = osk->sk_sndbuf;
616 	sk->sk_state    = TCP_ESTABLISHED;
617 	sock_copy_flags(sk, osk);
618 
619 	timer_setup(&rose->timer, NULL, 0);
620 	timer_setup(&rose->idletimer, NULL, 0);
621 
622 	orose		= rose_sk(osk);
623 	rose->t1	= orose->t1;
624 	rose->t2	= orose->t2;
625 	rose->t3	= orose->t3;
626 	rose->hb	= orose->hb;
627 	rose->idle	= orose->idle;
628 	rose->defer	= orose->defer;
629 	rose->device	= orose->device;
630 	if (rose->device)
631 		netdev_hold(rose->device, &rose->dev_tracker, GFP_ATOMIC);
632 	rose->qbitincl	= orose->qbitincl;
633 
634 	return sk;
635 }
636 
rose_release(struct socket * sock)637 static int rose_release(struct socket *sock)
638 {
639 	struct sock *sk = sock->sk;
640 	struct rose_sock *rose;
641 
642 	if (sk == NULL) return 0;
643 
644 	sock_hold(sk);
645 	sock_orphan(sk);
646 	lock_sock(sk);
647 	rose = rose_sk(sk);
648 
649 	switch (rose->state) {
650 	case ROSE_STATE_0:
651 		release_sock(sk);
652 		rose_disconnect(sk, 0, -1, -1);
653 		lock_sock(sk);
654 		rose_destroy_socket(sk);
655 		break;
656 
657 	case ROSE_STATE_2:
658 		rose->neighbour->use--;
659 		release_sock(sk);
660 		rose_disconnect(sk, 0, -1, -1);
661 		lock_sock(sk);
662 		rose_destroy_socket(sk);
663 		break;
664 
665 	case ROSE_STATE_1:
666 	case ROSE_STATE_3:
667 	case ROSE_STATE_4:
668 	case ROSE_STATE_5:
669 		rose_clear_queues(sk);
670 		rose_stop_idletimer(sk);
671 		rose_write_internal(sk, ROSE_CLEAR_REQUEST);
672 		rose_start_t3timer(sk);
673 		rose->state  = ROSE_STATE_2;
674 		sk->sk_state    = TCP_CLOSE;
675 		sk->sk_shutdown |= SEND_SHUTDOWN;
676 		sk->sk_state_change(sk);
677 		sock_set_flag(sk, SOCK_DEAD);
678 		sock_set_flag(sk, SOCK_DESTROY);
679 		break;
680 
681 	default:
682 		break;
683 	}
684 
685 	spin_lock_bh(&rose_list_lock);
686 	netdev_put(rose->device, &rose->dev_tracker);
687 	rose->device = NULL;
688 	spin_unlock_bh(&rose_list_lock);
689 	sock->sk = NULL;
690 	release_sock(sk);
691 	sock_put(sk);
692 
693 	return 0;
694 }
695 
rose_bind(struct socket * sock,struct sockaddr * uaddr,int addr_len)696 static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
697 {
698 	struct sock *sk = sock->sk;
699 	struct rose_sock *rose = rose_sk(sk);
700 	struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr;
701 	struct net_device *dev;
702 	ax25_address *source;
703 	ax25_uid_assoc *user;
704 	int err = -EINVAL;
705 	int n;
706 
707 	if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose))
708 		return -EINVAL;
709 
710 	if (addr->srose_family != AF_ROSE)
711 		return -EINVAL;
712 
713 	if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
714 		return -EINVAL;
715 
716 	if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
717 		return -EINVAL;
718 
719 	lock_sock(sk);
720 
721 	if (!sock_flag(sk, SOCK_ZAPPED))
722 		goto out_release;
723 
724 	err = -EADDRNOTAVAIL;
725 	dev = rose_dev_get(&addr->srose_addr);
726 	if (!dev)
727 		goto out_release;
728 
729 	source = &addr->srose_call;
730 
731 	user = ax25_findbyuid(current_euid());
732 	if (user) {
733 		rose->source_call = user->call;
734 		ax25_uid_put(user);
735 	} else {
736 		if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE)) {
737 			dev_put(dev);
738 			err = -EACCES;
739 			goto out_release;
740 		}
741 		rose->source_call   = *source;
742 	}
743 
744 	rose->source_addr   = addr->srose_addr;
745 	rose->device        = dev;
746 	netdev_tracker_alloc(rose->device, &rose->dev_tracker, GFP_KERNEL);
747 	rose->source_ndigis = addr->srose_ndigis;
748 
749 	if (addr_len == sizeof(struct full_sockaddr_rose)) {
750 		struct full_sockaddr_rose *full_addr = (struct full_sockaddr_rose *)uaddr;
751 		for (n = 0 ; n < addr->srose_ndigis ; n++)
752 			rose->source_digis[n] = full_addr->srose_digis[n];
753 	} else {
754 		if (rose->source_ndigis == 1) {
755 			rose->source_digis[0] = addr->srose_digi;
756 		}
757 	}
758 
759 	rose_insert_socket(sk);
760 
761 	sock_reset_flag(sk, SOCK_ZAPPED);
762 	err = 0;
763 out_release:
764 	release_sock(sk);
765 	return err;
766 }
767 
rose_connect(struct socket * sock,struct sockaddr * uaddr,int addr_len,int flags)768 static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags)
769 {
770 	struct sock *sk = sock->sk;
771 	struct rose_sock *rose = rose_sk(sk);
772 	struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr;
773 	unsigned char cause, diagnostic;
774 	ax25_uid_assoc *user;
775 	int n, err = 0;
776 
777 	if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose))
778 		return -EINVAL;
779 
780 	if (addr->srose_family != AF_ROSE)
781 		return -EINVAL;
782 
783 	if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
784 		return -EINVAL;
785 
786 	if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
787 		return -EINVAL;
788 
789 	/* Source + Destination digis should not exceed ROSE_MAX_DIGIS */
790 	if ((rose->source_ndigis + addr->srose_ndigis) > ROSE_MAX_DIGIS)
791 		return -EINVAL;
792 
793 	lock_sock(sk);
794 
795 	if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
796 		/* Connect completed during a ERESTARTSYS event */
797 		sock->state = SS_CONNECTED;
798 		goto out_release;
799 	}
800 
801 	if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) {
802 		sock->state = SS_UNCONNECTED;
803 		err = -ECONNREFUSED;
804 		goto out_release;
805 	}
806 
807 	if (sk->sk_state == TCP_ESTABLISHED) {
808 		/* No reconnect on a seqpacket socket */
809 		err = -EISCONN;
810 		goto out_release;
811 	}
812 
813 	sk->sk_state   = TCP_CLOSE;
814 	sock->state = SS_UNCONNECTED;
815 
816 	rose->neighbour = rose_get_neigh(&addr->srose_addr, &cause,
817 					 &diagnostic, 0);
818 	if (!rose->neighbour) {
819 		err = -ENETUNREACH;
820 		goto out_release;
821 	}
822 
823 	rose->lci = rose_new_lci(rose->neighbour);
824 	if (!rose->lci) {
825 		err = -ENETUNREACH;
826 		goto out_release;
827 	}
828 
829 	if (sock_flag(sk, SOCK_ZAPPED)) {	/* Must bind first - autobinding in this may or may not work */
830 		struct net_device *dev;
831 
832 		sock_reset_flag(sk, SOCK_ZAPPED);
833 
834 		dev = rose_dev_first();
835 		if (!dev) {
836 			err = -ENETUNREACH;
837 			goto out_release;
838 		}
839 
840 		user = ax25_findbyuid(current_euid());
841 		if (!user) {
842 			err = -EINVAL;
843 			dev_put(dev);
844 			goto out_release;
845 		}
846 
847 		memcpy(&rose->source_addr, dev->dev_addr, ROSE_ADDR_LEN);
848 		rose->source_call = user->call;
849 		rose->device      = dev;
850 		netdev_tracker_alloc(rose->device, &rose->dev_tracker,
851 				     GFP_KERNEL);
852 		ax25_uid_put(user);
853 
854 		rose_insert_socket(sk);		/* Finish the bind */
855 	}
856 	rose->dest_addr   = addr->srose_addr;
857 	rose->dest_call   = addr->srose_call;
858 	rose->rand        = ((long)rose & 0xFFFF) + rose->lci;
859 	rose->dest_ndigis = addr->srose_ndigis;
860 
861 	if (addr_len == sizeof(struct full_sockaddr_rose)) {
862 		struct full_sockaddr_rose *full_addr = (struct full_sockaddr_rose *)uaddr;
863 		for (n = 0 ; n < addr->srose_ndigis ; n++)
864 			rose->dest_digis[n] = full_addr->srose_digis[n];
865 	} else {
866 		if (rose->dest_ndigis == 1) {
867 			rose->dest_digis[0] = addr->srose_digi;
868 		}
869 	}
870 
871 	/* Move to connecting socket, start sending Connect Requests */
872 	sock->state   = SS_CONNECTING;
873 	sk->sk_state     = TCP_SYN_SENT;
874 
875 	rose->state = ROSE_STATE_1;
876 
877 	rose->neighbour->use++;
878 
879 	rose_write_internal(sk, ROSE_CALL_REQUEST);
880 	rose_start_heartbeat(sk);
881 	rose_start_t1timer(sk);
882 
883 	/* Now the loop */
884 	if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) {
885 		err = -EINPROGRESS;
886 		goto out_release;
887 	}
888 
889 	/*
890 	 * A Connect Ack with Choke or timeout or failed routing will go to
891 	 * closed.
892 	 */
893 	if (sk->sk_state == TCP_SYN_SENT) {
894 		DEFINE_WAIT(wait);
895 
896 		for (;;) {
897 			prepare_to_wait(sk_sleep(sk), &wait,
898 					TASK_INTERRUPTIBLE);
899 			if (sk->sk_state != TCP_SYN_SENT)
900 				break;
901 			if (!signal_pending(current)) {
902 				release_sock(sk);
903 				schedule();
904 				lock_sock(sk);
905 				continue;
906 			}
907 			err = -ERESTARTSYS;
908 			break;
909 		}
910 		finish_wait(sk_sleep(sk), &wait);
911 
912 		if (err)
913 			goto out_release;
914 	}
915 
916 	if (sk->sk_state != TCP_ESTABLISHED) {
917 		sock->state = SS_UNCONNECTED;
918 		err = sock_error(sk);	/* Always set at this point */
919 		goto out_release;
920 	}
921 
922 	sock->state = SS_CONNECTED;
923 
924 out_release:
925 	release_sock(sk);
926 
927 	return err;
928 }
929 
rose_accept(struct socket * sock,struct socket * newsock,int flags,bool kern)930 static int rose_accept(struct socket *sock, struct socket *newsock, int flags,
931 		       bool kern)
932 {
933 	struct sk_buff *skb;
934 	struct sock *newsk;
935 	DEFINE_WAIT(wait);
936 	struct sock *sk;
937 	int err = 0;
938 
939 	if ((sk = sock->sk) == NULL)
940 		return -EINVAL;
941 
942 	lock_sock(sk);
943 	if (sk->sk_type != SOCK_SEQPACKET) {
944 		err = -EOPNOTSUPP;
945 		goto out_release;
946 	}
947 
948 	if (sk->sk_state != TCP_LISTEN) {
949 		err = -EINVAL;
950 		goto out_release;
951 	}
952 
953 	/*
954 	 *	The write queue this time is holding sockets ready to use
955 	 *	hooked into the SABM we saved
956 	 */
957 	for (;;) {
958 		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
959 
960 		skb = skb_dequeue(&sk->sk_receive_queue);
961 		if (skb)
962 			break;
963 
964 		if (flags & O_NONBLOCK) {
965 			err = -EWOULDBLOCK;
966 			break;
967 		}
968 		if (!signal_pending(current)) {
969 			release_sock(sk);
970 			schedule();
971 			lock_sock(sk);
972 			continue;
973 		}
974 		err = -ERESTARTSYS;
975 		break;
976 	}
977 	finish_wait(sk_sleep(sk), &wait);
978 	if (err)
979 		goto out_release;
980 
981 	newsk = skb->sk;
982 	sock_graft(newsk, newsock);
983 
984 	/* Now attach up the new socket */
985 	skb->sk = NULL;
986 	kfree_skb(skb);
987 	sk_acceptq_removed(sk);
988 
989 out_release:
990 	release_sock(sk);
991 
992 	return err;
993 }
994 
rose_getname(struct socket * sock,struct sockaddr * uaddr,int peer)995 static int rose_getname(struct socket *sock, struct sockaddr *uaddr,
996 	int peer)
997 {
998 	struct full_sockaddr_rose *srose = (struct full_sockaddr_rose *)uaddr;
999 	struct sock *sk = sock->sk;
1000 	struct rose_sock *rose = rose_sk(sk);
1001 	int n;
1002 
1003 	memset(srose, 0, sizeof(*srose));
1004 	if (peer != 0) {
1005 		if (sk->sk_state != TCP_ESTABLISHED)
1006 			return -ENOTCONN;
1007 		srose->srose_family = AF_ROSE;
1008 		srose->srose_addr   = rose->dest_addr;
1009 		srose->srose_call   = rose->dest_call;
1010 		srose->srose_ndigis = rose->dest_ndigis;
1011 		for (n = 0; n < rose->dest_ndigis; n++)
1012 			srose->srose_digis[n] = rose->dest_digis[n];
1013 	} else {
1014 		srose->srose_family = AF_ROSE;
1015 		srose->srose_addr   = rose->source_addr;
1016 		srose->srose_call   = rose->source_call;
1017 		srose->srose_ndigis = rose->source_ndigis;
1018 		for (n = 0; n < rose->source_ndigis; n++)
1019 			srose->srose_digis[n] = rose->source_digis[n];
1020 	}
1021 
1022 	return sizeof(struct full_sockaddr_rose);
1023 }
1024 
rose_rx_call_request(struct sk_buff * skb,struct net_device * dev,struct rose_neigh * neigh,unsigned int lci)1025 int rose_rx_call_request(struct sk_buff *skb, struct net_device *dev, struct rose_neigh *neigh, unsigned int lci)
1026 {
1027 	struct sock *sk;
1028 	struct sock *make;
1029 	struct rose_sock *make_rose;
1030 	struct rose_facilities_struct facilities;
1031 	int n;
1032 
1033 	skb->sk = NULL;		/* Initially we don't know who it's for */
1034 
1035 	/*
1036 	 *	skb->data points to the rose frame start
1037 	 */
1038 	memset(&facilities, 0x00, sizeof(struct rose_facilities_struct));
1039 
1040 	if (!rose_parse_facilities(skb->data + ROSE_CALL_REQ_FACILITIES_OFF,
1041 				   skb->len - ROSE_CALL_REQ_FACILITIES_OFF,
1042 				   &facilities)) {
1043 		rose_transmit_clear_request(neigh, lci, ROSE_INVALID_FACILITY, 76);
1044 		return 0;
1045 	}
1046 
1047 	sk = rose_find_listener(&facilities.source_addr, &facilities.source_call);
1048 
1049 	/*
1050 	 * We can't accept the Call Request.
1051 	 */
1052 	if (sk == NULL || sk_acceptq_is_full(sk) ||
1053 	    (make = rose_make_new(sk)) == NULL) {
1054 		rose_transmit_clear_request(neigh, lci, ROSE_NETWORK_CONGESTION, 120);
1055 		return 0;
1056 	}
1057 
1058 	skb->sk     = make;
1059 	make->sk_state = TCP_ESTABLISHED;
1060 	make_rose = rose_sk(make);
1061 
1062 	make_rose->lci           = lci;
1063 	make_rose->dest_addr     = facilities.dest_addr;
1064 	make_rose->dest_call     = facilities.dest_call;
1065 	make_rose->dest_ndigis   = facilities.dest_ndigis;
1066 	for (n = 0 ; n < facilities.dest_ndigis ; n++)
1067 		make_rose->dest_digis[n] = facilities.dest_digis[n];
1068 	make_rose->source_addr   = facilities.source_addr;
1069 	make_rose->source_call   = facilities.source_call;
1070 	make_rose->source_ndigis = facilities.source_ndigis;
1071 	for (n = 0 ; n < facilities.source_ndigis ; n++)
1072 		make_rose->source_digis[n] = facilities.source_digis[n];
1073 	make_rose->neighbour     = neigh;
1074 	make_rose->device        = dev;
1075 	/* Caller got a reference for us. */
1076 	netdev_tracker_alloc(make_rose->device, &make_rose->dev_tracker,
1077 			     GFP_ATOMIC);
1078 	make_rose->facilities    = facilities;
1079 
1080 	make_rose->neighbour->use++;
1081 
1082 	if (rose_sk(sk)->defer) {
1083 		make_rose->state = ROSE_STATE_5;
1084 	} else {
1085 		rose_write_internal(make, ROSE_CALL_ACCEPTED);
1086 		make_rose->state = ROSE_STATE_3;
1087 		rose_start_idletimer(make);
1088 	}
1089 
1090 	make_rose->condition = 0x00;
1091 	make_rose->vs        = 0;
1092 	make_rose->va        = 0;
1093 	make_rose->vr        = 0;
1094 	make_rose->vl        = 0;
1095 	sk_acceptq_added(sk);
1096 
1097 	rose_insert_socket(make);
1098 
1099 	skb_queue_head(&sk->sk_receive_queue, skb);
1100 
1101 	rose_start_heartbeat(make);
1102 
1103 	if (!sock_flag(sk, SOCK_DEAD))
1104 		sk->sk_data_ready(sk);
1105 
1106 	return 1;
1107 }
1108 
rose_sendmsg(struct socket * sock,struct msghdr * msg,size_t len)1109 static int rose_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
1110 {
1111 	struct sock *sk = sock->sk;
1112 	struct rose_sock *rose = rose_sk(sk);
1113 	DECLARE_SOCKADDR(struct sockaddr_rose *, usrose, msg->msg_name);
1114 	int err;
1115 	struct full_sockaddr_rose srose;
1116 	struct sk_buff *skb;
1117 	unsigned char *asmptr;
1118 	int n, size, qbit = 0;
1119 
1120 	if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT))
1121 		return -EINVAL;
1122 
1123 	if (sock_flag(sk, SOCK_ZAPPED))
1124 		return -EADDRNOTAVAIL;
1125 
1126 	if (sk->sk_shutdown & SEND_SHUTDOWN) {
1127 		send_sig(SIGPIPE, current, 0);
1128 		return -EPIPE;
1129 	}
1130 
1131 	if (rose->neighbour == NULL || rose->device == NULL)
1132 		return -ENETUNREACH;
1133 
1134 	if (usrose != NULL) {
1135 		if (msg->msg_namelen != sizeof(struct sockaddr_rose) && msg->msg_namelen != sizeof(struct full_sockaddr_rose))
1136 			return -EINVAL;
1137 		memset(&srose, 0, sizeof(struct full_sockaddr_rose));
1138 		memcpy(&srose, usrose, msg->msg_namelen);
1139 		if (rosecmp(&rose->dest_addr, &srose.srose_addr) != 0 ||
1140 		    ax25cmp(&rose->dest_call, &srose.srose_call) != 0)
1141 			return -EISCONN;
1142 		if (srose.srose_ndigis != rose->dest_ndigis)
1143 			return -EISCONN;
1144 		if (srose.srose_ndigis == rose->dest_ndigis) {
1145 			for (n = 0 ; n < srose.srose_ndigis ; n++)
1146 				if (ax25cmp(&rose->dest_digis[n],
1147 					    &srose.srose_digis[n]))
1148 					return -EISCONN;
1149 		}
1150 		if (srose.srose_family != AF_ROSE)
1151 			return -EINVAL;
1152 	} else {
1153 		if (sk->sk_state != TCP_ESTABLISHED)
1154 			return -ENOTCONN;
1155 
1156 		srose.srose_family = AF_ROSE;
1157 		srose.srose_addr   = rose->dest_addr;
1158 		srose.srose_call   = rose->dest_call;
1159 		srose.srose_ndigis = rose->dest_ndigis;
1160 		for (n = 0 ; n < rose->dest_ndigis ; n++)
1161 			srose.srose_digis[n] = rose->dest_digis[n];
1162 	}
1163 
1164 	/* Build a packet */
1165 	/* Sanity check the packet size */
1166 	if (len > 65535)
1167 		return -EMSGSIZE;
1168 
1169 	size = len + AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN;
1170 
1171 	if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL)
1172 		return err;
1173 
1174 	skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN);
1175 
1176 	/*
1177 	 *	Put the data on the end
1178 	 */
1179 
1180 	skb_reset_transport_header(skb);
1181 	skb_put(skb, len);
1182 
1183 	err = memcpy_from_msg(skb_transport_header(skb), msg, len);
1184 	if (err) {
1185 		kfree_skb(skb);
1186 		return err;
1187 	}
1188 
1189 	/*
1190 	 *	If the Q BIT Include socket option is in force, the first
1191 	 *	byte of the user data is the logical value of the Q Bit.
1192 	 */
1193 	if (rose->qbitincl) {
1194 		qbit = skb->data[0];
1195 		skb_pull(skb, 1);
1196 	}
1197 
1198 	/*
1199 	 *	Push down the ROSE header
1200 	 */
1201 	asmptr = skb_push(skb, ROSE_MIN_LEN);
1202 
1203 	/* Build a ROSE Network header */
1204 	asmptr[0] = ((rose->lci >> 8) & 0x0F) | ROSE_GFI;
1205 	asmptr[1] = (rose->lci >> 0) & 0xFF;
1206 	asmptr[2] = ROSE_DATA;
1207 
1208 	if (qbit)
1209 		asmptr[0] |= ROSE_Q_BIT;
1210 
1211 	if (sk->sk_state != TCP_ESTABLISHED) {
1212 		kfree_skb(skb);
1213 		return -ENOTCONN;
1214 	}
1215 
1216 #ifdef M_BIT
1217 #define ROSE_PACLEN (256-ROSE_MIN_LEN)
1218 	if (skb->len - ROSE_MIN_LEN > ROSE_PACLEN) {
1219 		unsigned char header[ROSE_MIN_LEN];
1220 		struct sk_buff *skbn;
1221 		int frontlen;
1222 		int lg;
1223 
1224 		/* Save a copy of the Header */
1225 		skb_copy_from_linear_data(skb, header, ROSE_MIN_LEN);
1226 		skb_pull(skb, ROSE_MIN_LEN);
1227 
1228 		frontlen = skb_headroom(skb);
1229 
1230 		while (skb->len > 0) {
1231 			if ((skbn = sock_alloc_send_skb(sk, frontlen + ROSE_PACLEN, 0, &err)) == NULL) {
1232 				kfree_skb(skb);
1233 				return err;
1234 			}
1235 
1236 			skbn->sk   = sk;
1237 			skbn->free = 1;
1238 			skbn->arp  = 1;
1239 
1240 			skb_reserve(skbn, frontlen);
1241 
1242 			lg = (ROSE_PACLEN > skb->len) ? skb->len : ROSE_PACLEN;
1243 
1244 			/* Copy the user data */
1245 			skb_copy_from_linear_data(skb, skb_put(skbn, lg), lg);
1246 			skb_pull(skb, lg);
1247 
1248 			/* Duplicate the Header */
1249 			skb_push(skbn, ROSE_MIN_LEN);
1250 			skb_copy_to_linear_data(skbn, header, ROSE_MIN_LEN);
1251 
1252 			if (skb->len > 0)
1253 				skbn->data[2] |= M_BIT;
1254 
1255 			skb_queue_tail(&sk->sk_write_queue, skbn); /* Throw it on the queue */
1256 		}
1257 
1258 		skb->free = 1;
1259 		kfree_skb(skb);
1260 	} else {
1261 		skb_queue_tail(&sk->sk_write_queue, skb);		/* Throw it on the queue */
1262 	}
1263 #else
1264 	skb_queue_tail(&sk->sk_write_queue, skb);	/* Shove it onto the queue */
1265 #endif
1266 
1267 	rose_kick(sk);
1268 
1269 	return len;
1270 }
1271 
1272 
rose_recvmsg(struct socket * sock,struct msghdr * msg,size_t size,int flags)1273 static int rose_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
1274 			int flags)
1275 {
1276 	struct sock *sk = sock->sk;
1277 	struct rose_sock *rose = rose_sk(sk);
1278 	size_t copied;
1279 	unsigned char *asmptr;
1280 	struct sk_buff *skb;
1281 	int n, er, qbit;
1282 
1283 	/*
1284 	 * This works for seqpacket too. The receiver has ordered the queue for
1285 	 * us! We do one quick check first though
1286 	 */
1287 	if (sk->sk_state != TCP_ESTABLISHED)
1288 		return -ENOTCONN;
1289 
1290 	/* Now we can treat all alike */
1291 	skb = skb_recv_datagram(sk, flags, &er);
1292 	if (!skb)
1293 		return er;
1294 
1295 	qbit = (skb->data[0] & ROSE_Q_BIT) == ROSE_Q_BIT;
1296 
1297 	skb_pull(skb, ROSE_MIN_LEN);
1298 
1299 	if (rose->qbitincl) {
1300 		asmptr  = skb_push(skb, 1);
1301 		*asmptr = qbit;
1302 	}
1303 
1304 	skb_reset_transport_header(skb);
1305 	copied     = skb->len;
1306 
1307 	if (copied > size) {
1308 		copied = size;
1309 		msg->msg_flags |= MSG_TRUNC;
1310 	}
1311 
1312 	skb_copy_datagram_msg(skb, 0, msg, copied);
1313 
1314 	if (msg->msg_name) {
1315 		struct sockaddr_rose *srose;
1316 		DECLARE_SOCKADDR(struct full_sockaddr_rose *, full_srose,
1317 				 msg->msg_name);
1318 
1319 		memset(msg->msg_name, 0, sizeof(struct full_sockaddr_rose));
1320 		srose = msg->msg_name;
1321 		srose->srose_family = AF_ROSE;
1322 		srose->srose_addr   = rose->dest_addr;
1323 		srose->srose_call   = rose->dest_call;
1324 		srose->srose_ndigis = rose->dest_ndigis;
1325 		for (n = 0 ; n < rose->dest_ndigis ; n++)
1326 			full_srose->srose_digis[n] = rose->dest_digis[n];
1327 		msg->msg_namelen = sizeof(struct full_sockaddr_rose);
1328 	}
1329 
1330 	skb_free_datagram(sk, skb);
1331 
1332 	return copied;
1333 }
1334 
1335 
rose_ioctl(struct socket * sock,unsigned int cmd,unsigned long arg)1336 static int rose_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1337 {
1338 	struct sock *sk = sock->sk;
1339 	struct rose_sock *rose = rose_sk(sk);
1340 	void __user *argp = (void __user *)arg;
1341 
1342 	switch (cmd) {
1343 	case TIOCOUTQ: {
1344 		long amount;
1345 
1346 		amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
1347 		if (amount < 0)
1348 			amount = 0;
1349 		return put_user(amount, (unsigned int __user *) argp);
1350 	}
1351 
1352 	case TIOCINQ: {
1353 		struct sk_buff *skb;
1354 		long amount = 0L;
1355 
1356 		spin_lock_irq(&sk->sk_receive_queue.lock);
1357 		if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
1358 			amount = skb->len;
1359 		spin_unlock_irq(&sk->sk_receive_queue.lock);
1360 		return put_user(amount, (unsigned int __user *) argp);
1361 	}
1362 
1363 	case SIOCGIFADDR:
1364 	case SIOCSIFADDR:
1365 	case SIOCGIFDSTADDR:
1366 	case SIOCSIFDSTADDR:
1367 	case SIOCGIFBRDADDR:
1368 	case SIOCSIFBRDADDR:
1369 	case SIOCGIFNETMASK:
1370 	case SIOCSIFNETMASK:
1371 	case SIOCGIFMETRIC:
1372 	case SIOCSIFMETRIC:
1373 		return -EINVAL;
1374 
1375 	case SIOCADDRT:
1376 	case SIOCDELRT:
1377 	case SIOCRSCLRRT:
1378 		if (!capable(CAP_NET_ADMIN))
1379 			return -EPERM;
1380 		return rose_rt_ioctl(cmd, argp);
1381 
1382 	case SIOCRSGCAUSE: {
1383 		struct rose_cause_struct rose_cause;
1384 		rose_cause.cause      = rose->cause;
1385 		rose_cause.diagnostic = rose->diagnostic;
1386 		return copy_to_user(argp, &rose_cause, sizeof(struct rose_cause_struct)) ? -EFAULT : 0;
1387 	}
1388 
1389 	case SIOCRSSCAUSE: {
1390 		struct rose_cause_struct rose_cause;
1391 		if (copy_from_user(&rose_cause, argp, sizeof(struct rose_cause_struct)))
1392 			return -EFAULT;
1393 		rose->cause      = rose_cause.cause;
1394 		rose->diagnostic = rose_cause.diagnostic;
1395 		return 0;
1396 	}
1397 
1398 	case SIOCRSSL2CALL:
1399 		if (!capable(CAP_NET_ADMIN)) return -EPERM;
1400 		if (ax25cmp(&rose_callsign, &null_ax25_address) != 0)
1401 			ax25_listen_release(&rose_callsign, NULL);
1402 		if (copy_from_user(&rose_callsign, argp, sizeof(ax25_address)))
1403 			return -EFAULT;
1404 		if (ax25cmp(&rose_callsign, &null_ax25_address) != 0)
1405 			return ax25_listen_register(&rose_callsign, NULL);
1406 
1407 		return 0;
1408 
1409 	case SIOCRSGL2CALL:
1410 		return copy_to_user(argp, &rose_callsign, sizeof(ax25_address)) ? -EFAULT : 0;
1411 
1412 	case SIOCRSACCEPT:
1413 		if (rose->state == ROSE_STATE_5) {
1414 			rose_write_internal(sk, ROSE_CALL_ACCEPTED);
1415 			rose_start_idletimer(sk);
1416 			rose->condition = 0x00;
1417 			rose->vs        = 0;
1418 			rose->va        = 0;
1419 			rose->vr        = 0;
1420 			rose->vl        = 0;
1421 			rose->state     = ROSE_STATE_3;
1422 		}
1423 		return 0;
1424 
1425 	default:
1426 		return -ENOIOCTLCMD;
1427 	}
1428 
1429 	return 0;
1430 }
1431 
1432 #ifdef CONFIG_PROC_FS
rose_info_start(struct seq_file * seq,loff_t * pos)1433 static void *rose_info_start(struct seq_file *seq, loff_t *pos)
1434 	__acquires(rose_list_lock)
1435 {
1436 	spin_lock_bh(&rose_list_lock);
1437 	return seq_hlist_start_head(&rose_list, *pos);
1438 }
1439 
rose_info_next(struct seq_file * seq,void * v,loff_t * pos)1440 static void *rose_info_next(struct seq_file *seq, void *v, loff_t *pos)
1441 {
1442 	return seq_hlist_next(v, &rose_list, pos);
1443 }
1444 
rose_info_stop(struct seq_file * seq,void * v)1445 static void rose_info_stop(struct seq_file *seq, void *v)
1446 	__releases(rose_list_lock)
1447 {
1448 	spin_unlock_bh(&rose_list_lock);
1449 }
1450 
rose_info_show(struct seq_file * seq,void * v)1451 static int rose_info_show(struct seq_file *seq, void *v)
1452 {
1453 	char buf[11], rsbuf[11];
1454 
1455 	if (v == SEQ_START_TOKEN)
1456 		seq_puts(seq,
1457 			 "dest_addr  dest_call src_addr   src_call  dev   lci neigh st vs vr va   t  t1  t2  t3  hb    idle Snd-Q Rcv-Q inode\n");
1458 
1459 	else {
1460 		struct sock *s = sk_entry(v);
1461 		struct rose_sock *rose = rose_sk(s);
1462 		const char *devname, *callsign;
1463 		const struct net_device *dev = rose->device;
1464 
1465 		if (!dev)
1466 			devname = "???";
1467 		else
1468 			devname = dev->name;
1469 
1470 		seq_printf(seq, "%-10s %-9s ",
1471 			   rose2asc(rsbuf, &rose->dest_addr),
1472 			   ax2asc(buf, &rose->dest_call));
1473 
1474 		if (ax25cmp(&rose->source_call, &null_ax25_address) == 0)
1475 			callsign = "??????-?";
1476 		else
1477 			callsign = ax2asc(buf, &rose->source_call);
1478 
1479 		seq_printf(seq,
1480 			   "%-10s %-9s %-5s %3.3X %05d  %d  %d  %d  %d %3lu %3lu %3lu %3lu %3lu %3lu/%03lu %5d %5d %ld\n",
1481 			rose2asc(rsbuf, &rose->source_addr),
1482 			callsign,
1483 			devname,
1484 			rose->lci & 0x0FFF,
1485 			(rose->neighbour) ? rose->neighbour->number : 0,
1486 			rose->state,
1487 			rose->vs,
1488 			rose->vr,
1489 			rose->va,
1490 			ax25_display_timer(&rose->timer) / HZ,
1491 			rose->t1 / HZ,
1492 			rose->t2 / HZ,
1493 			rose->t3 / HZ,
1494 			rose->hb / HZ,
1495 			ax25_display_timer(&rose->idletimer) / (60 * HZ),
1496 			rose->idle / (60 * HZ),
1497 			sk_wmem_alloc_get(s),
1498 			sk_rmem_alloc_get(s),
1499 			s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L);
1500 	}
1501 
1502 	return 0;
1503 }
1504 
1505 static const struct seq_operations rose_info_seqops = {
1506 	.start = rose_info_start,
1507 	.next = rose_info_next,
1508 	.stop = rose_info_stop,
1509 	.show = rose_info_show,
1510 };
1511 #endif	/* CONFIG_PROC_FS */
1512 
1513 static const struct net_proto_family rose_family_ops = {
1514 	.family		=	PF_ROSE,
1515 	.create		=	rose_create,
1516 	.owner		=	THIS_MODULE,
1517 };
1518 
1519 static const struct proto_ops rose_proto_ops = {
1520 	.family		=	PF_ROSE,
1521 	.owner		=	THIS_MODULE,
1522 	.release	=	rose_release,
1523 	.bind		=	rose_bind,
1524 	.connect	=	rose_connect,
1525 	.socketpair	=	sock_no_socketpair,
1526 	.accept		=	rose_accept,
1527 	.getname	=	rose_getname,
1528 	.poll		=	datagram_poll,
1529 	.ioctl		=	rose_ioctl,
1530 	.gettstamp	=	sock_gettstamp,
1531 	.listen		=	rose_listen,
1532 	.shutdown	=	sock_no_shutdown,
1533 	.setsockopt	=	rose_setsockopt,
1534 	.getsockopt	=	rose_getsockopt,
1535 	.sendmsg	=	rose_sendmsg,
1536 	.recvmsg	=	rose_recvmsg,
1537 	.mmap		=	sock_no_mmap,
1538 };
1539 
1540 static struct notifier_block rose_dev_notifier = {
1541 	.notifier_call	=	rose_device_event,
1542 };
1543 
1544 static struct net_device **dev_rose;
1545 
1546 static struct ax25_protocol rose_pid = {
1547 	.pid	= AX25_P_ROSE,
1548 	.func	= rose_route_frame
1549 };
1550 
1551 static struct ax25_linkfail rose_linkfail_notifier = {
1552 	.func	= rose_link_failed
1553 };
1554 
rose_proto_init(void)1555 static int __init rose_proto_init(void)
1556 {
1557 	int i;
1558 	int rc;
1559 
1560 	if (rose_ndevs > 0x7FFFFFFF/sizeof(struct net_device *)) {
1561 		printk(KERN_ERR "ROSE: rose_proto_init - rose_ndevs parameter too large\n");
1562 		rc = -EINVAL;
1563 		goto out;
1564 	}
1565 
1566 	rc = proto_register(&rose_proto, 0);
1567 	if (rc != 0)
1568 		goto out;
1569 
1570 	rose_callsign = null_ax25_address;
1571 
1572 	dev_rose = kcalloc(rose_ndevs, sizeof(struct net_device *),
1573 			   GFP_KERNEL);
1574 	if (dev_rose == NULL) {
1575 		printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate device structure\n");
1576 		rc = -ENOMEM;
1577 		goto out_proto_unregister;
1578 	}
1579 
1580 	for (i = 0; i < rose_ndevs; i++) {
1581 		struct net_device *dev;
1582 		char name[IFNAMSIZ];
1583 
1584 		sprintf(name, "rose%d", i);
1585 		dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, rose_setup);
1586 		if (!dev) {
1587 			printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate memory\n");
1588 			rc = -ENOMEM;
1589 			goto fail;
1590 		}
1591 		rc = register_netdev(dev);
1592 		if (rc) {
1593 			printk(KERN_ERR "ROSE: netdevice registration failed\n");
1594 			free_netdev(dev);
1595 			goto fail;
1596 		}
1597 		rose_set_lockdep_key(dev);
1598 		dev_rose[i] = dev;
1599 	}
1600 
1601 	sock_register(&rose_family_ops);
1602 	register_netdevice_notifier(&rose_dev_notifier);
1603 
1604 	ax25_register_pid(&rose_pid);
1605 	ax25_linkfail_register(&rose_linkfail_notifier);
1606 
1607 #ifdef CONFIG_SYSCTL
1608 	rose_register_sysctl();
1609 #endif
1610 	rose_loopback_init();
1611 
1612 	rose_add_loopback_neigh();
1613 
1614 	proc_create_seq("rose", 0444, init_net.proc_net, &rose_info_seqops);
1615 	proc_create_seq("rose_neigh", 0444, init_net.proc_net,
1616 		    &rose_neigh_seqops);
1617 	proc_create_seq("rose_nodes", 0444, init_net.proc_net,
1618 		    &rose_node_seqops);
1619 	proc_create_seq("rose_routes", 0444, init_net.proc_net,
1620 		    &rose_route_seqops);
1621 out:
1622 	return rc;
1623 fail:
1624 	while (--i >= 0) {
1625 		unregister_netdev(dev_rose[i]);
1626 		free_netdev(dev_rose[i]);
1627 	}
1628 	kfree(dev_rose);
1629 out_proto_unregister:
1630 	proto_unregister(&rose_proto);
1631 	goto out;
1632 }
1633 module_init(rose_proto_init);
1634 
1635 module_param(rose_ndevs, int, 0);
1636 MODULE_PARM_DESC(rose_ndevs, "number of ROSE devices");
1637 
1638 MODULE_AUTHOR("Jonathan Naylor G4KLX <g4klx@g4klx.demon.co.uk>");
1639 MODULE_DESCRIPTION("The amateur radio ROSE network layer protocol");
1640 MODULE_LICENSE("GPL");
1641 MODULE_ALIAS_NETPROTO(PF_ROSE);
1642 
rose_exit(void)1643 static void __exit rose_exit(void)
1644 {
1645 	int i;
1646 
1647 	remove_proc_entry("rose", init_net.proc_net);
1648 	remove_proc_entry("rose_neigh", init_net.proc_net);
1649 	remove_proc_entry("rose_nodes", init_net.proc_net);
1650 	remove_proc_entry("rose_routes", init_net.proc_net);
1651 	rose_loopback_clear();
1652 
1653 	rose_rt_free();
1654 
1655 	ax25_protocol_release(AX25_P_ROSE);
1656 	ax25_linkfail_release(&rose_linkfail_notifier);
1657 
1658 	if (ax25cmp(&rose_callsign, &null_ax25_address) != 0)
1659 		ax25_listen_release(&rose_callsign, NULL);
1660 
1661 #ifdef CONFIG_SYSCTL
1662 	rose_unregister_sysctl();
1663 #endif
1664 	unregister_netdevice_notifier(&rose_dev_notifier);
1665 
1666 	sock_unregister(PF_ROSE);
1667 
1668 	for (i = 0; i < rose_ndevs; i++) {
1669 		struct net_device *dev = dev_rose[i];
1670 
1671 		if (dev) {
1672 			unregister_netdev(dev);
1673 			free_netdev(dev);
1674 		}
1675 	}
1676 
1677 	kfree(dev_rose);
1678 	proto_unregister(&rose_proto);
1679 }
1680 
1681 module_exit(rose_exit);
1682