xref: /openbmc/linux/net/rose/af_rose.c (revision 0e17c50f)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *
4  * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
5  * Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
6  * Copyright (C) Terry Dawson VK2KTJ (terry@animats.net)
7  * Copyright (C) Tomi Manninen OH2BNS (oh2bns@sral.fi)
8  */
9 
10 #include <linux/capability.h>
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/init.h>
14 #include <linux/errno.h>
15 #include <linux/types.h>
16 #include <linux/socket.h>
17 #include <linux/in.h>
18 #include <linux/slab.h>
19 #include <linux/kernel.h>
20 #include <linux/sched/signal.h>
21 #include <linux/spinlock.h>
22 #include <linux/timer.h>
23 #include <linux/string.h>
24 #include <linux/sockios.h>
25 #include <linux/net.h>
26 #include <linux/stat.h>
27 #include <net/net_namespace.h>
28 #include <net/ax25.h>
29 #include <linux/inet.h>
30 #include <linux/netdevice.h>
31 #include <linux/if_arp.h>
32 #include <linux/skbuff.h>
33 #include <net/sock.h>
34 #include <linux/uaccess.h>
35 #include <linux/fcntl.h>
36 #include <linux/termios.h>
37 #include <linux/mm.h>
38 #include <linux/interrupt.h>
39 #include <linux/notifier.h>
40 #include <net/rose.h>
41 #include <linux/proc_fs.h>
42 #include <linux/seq_file.h>
43 #include <net/tcp_states.h>
44 #include <net/ip.h>
45 #include <net/arp.h>
46 
47 static int rose_ndevs = 10;
48 
49 int sysctl_rose_restart_request_timeout = ROSE_DEFAULT_T0;
50 int sysctl_rose_call_request_timeout    = ROSE_DEFAULT_T1;
51 int sysctl_rose_reset_request_timeout   = ROSE_DEFAULT_T2;
52 int sysctl_rose_clear_request_timeout   = ROSE_DEFAULT_T3;
53 int sysctl_rose_no_activity_timeout     = ROSE_DEFAULT_IDLE;
54 int sysctl_rose_ack_hold_back_timeout   = ROSE_DEFAULT_HB;
55 int sysctl_rose_routing_control         = ROSE_DEFAULT_ROUTING;
56 int sysctl_rose_link_fail_timeout       = ROSE_DEFAULT_FAIL_TIMEOUT;
57 int sysctl_rose_maximum_vcs             = ROSE_DEFAULT_MAXVC;
58 int sysctl_rose_window_size             = ROSE_DEFAULT_WINDOW_SIZE;
59 
60 static HLIST_HEAD(rose_list);
61 static DEFINE_SPINLOCK(rose_list_lock);
62 
63 static const struct proto_ops rose_proto_ops;
64 
65 ax25_address rose_callsign;
66 
67 /*
68  *	Convert a ROSE address into text.
69  */
70 char *rose2asc(char *buf, const rose_address *addr)
71 {
72 	if (addr->rose_addr[0] == 0x00 && addr->rose_addr[1] == 0x00 &&
73 	    addr->rose_addr[2] == 0x00 && addr->rose_addr[3] == 0x00 &&
74 	    addr->rose_addr[4] == 0x00) {
75 		strcpy(buf, "*");
76 	} else {
77 		sprintf(buf, "%02X%02X%02X%02X%02X", addr->rose_addr[0] & 0xFF,
78 						addr->rose_addr[1] & 0xFF,
79 						addr->rose_addr[2] & 0xFF,
80 						addr->rose_addr[3] & 0xFF,
81 						addr->rose_addr[4] & 0xFF);
82 	}
83 
84 	return buf;
85 }
86 
87 /*
88  *	Compare two ROSE addresses, 0 == equal.
89  */
90 int rosecmp(rose_address *addr1, rose_address *addr2)
91 {
92 	int i;
93 
94 	for (i = 0; i < 5; i++)
95 		if (addr1->rose_addr[i] != addr2->rose_addr[i])
96 			return 1;
97 
98 	return 0;
99 }
100 
101 /*
102  *	Compare two ROSE addresses for only mask digits, 0 == equal.
103  */
104 int rosecmpm(rose_address *addr1, rose_address *addr2, unsigned short mask)
105 {
106 	unsigned int i, j;
107 
108 	if (mask > 10)
109 		return 1;
110 
111 	for (i = 0; i < mask; i++) {
112 		j = i / 2;
113 
114 		if ((i % 2) != 0) {
115 			if ((addr1->rose_addr[j] & 0x0F) != (addr2->rose_addr[j] & 0x0F))
116 				return 1;
117 		} else {
118 			if ((addr1->rose_addr[j] & 0xF0) != (addr2->rose_addr[j] & 0xF0))
119 				return 1;
120 		}
121 	}
122 
123 	return 0;
124 }
125 
126 /*
127  *	Socket removal during an interrupt is now safe.
128  */
129 static void rose_remove_socket(struct sock *sk)
130 {
131 	spin_lock_bh(&rose_list_lock);
132 	sk_del_node_init(sk);
133 	spin_unlock_bh(&rose_list_lock);
134 }
135 
136 /*
137  *	Kill all bound sockets on a broken link layer connection to a
138  *	particular neighbour.
139  */
140 void rose_kill_by_neigh(struct rose_neigh *neigh)
141 {
142 	struct sock *s;
143 
144 	spin_lock_bh(&rose_list_lock);
145 	sk_for_each(s, &rose_list) {
146 		struct rose_sock *rose = rose_sk(s);
147 
148 		if (rose->neighbour == neigh) {
149 			rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
150 			rose->neighbour->use--;
151 			rose->neighbour = NULL;
152 		}
153 	}
154 	spin_unlock_bh(&rose_list_lock);
155 }
156 
157 /*
158  *	Kill all bound sockets on a dropped device.
159  */
160 static void rose_kill_by_device(struct net_device *dev)
161 {
162 	struct sock *s;
163 
164 	spin_lock_bh(&rose_list_lock);
165 	sk_for_each(s, &rose_list) {
166 		struct rose_sock *rose = rose_sk(s);
167 
168 		if (rose->device == dev) {
169 			rose_disconnect(s, ENETUNREACH, ROSE_OUT_OF_ORDER, 0);
170 			if (rose->neighbour)
171 				rose->neighbour->use--;
172 			rose->device = NULL;
173 		}
174 	}
175 	spin_unlock_bh(&rose_list_lock);
176 }
177 
178 /*
179  *	Handle device status changes.
180  */
181 static int rose_device_event(struct notifier_block *this,
182 			     unsigned long event, void *ptr)
183 {
184 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
185 
186 	if (!net_eq(dev_net(dev), &init_net))
187 		return NOTIFY_DONE;
188 
189 	if (event != NETDEV_DOWN)
190 		return NOTIFY_DONE;
191 
192 	switch (dev->type) {
193 	case ARPHRD_ROSE:
194 		rose_kill_by_device(dev);
195 		break;
196 	case ARPHRD_AX25:
197 		rose_link_device_down(dev);
198 		rose_rt_device_down(dev);
199 		break;
200 	}
201 
202 	return NOTIFY_DONE;
203 }
204 
205 /*
206  *	Add a socket to the bound sockets list.
207  */
208 static void rose_insert_socket(struct sock *sk)
209 {
210 
211 	spin_lock_bh(&rose_list_lock);
212 	sk_add_node(sk, &rose_list);
213 	spin_unlock_bh(&rose_list_lock);
214 }
215 
216 /*
217  *	Find a socket that wants to accept the Call Request we just
218  *	received.
219  */
220 static struct sock *rose_find_listener(rose_address *addr, ax25_address *call)
221 {
222 	struct sock *s;
223 
224 	spin_lock_bh(&rose_list_lock);
225 	sk_for_each(s, &rose_list) {
226 		struct rose_sock *rose = rose_sk(s);
227 
228 		if (!rosecmp(&rose->source_addr, addr) &&
229 		    !ax25cmp(&rose->source_call, call) &&
230 		    !rose->source_ndigis && s->sk_state == TCP_LISTEN)
231 			goto found;
232 	}
233 
234 	sk_for_each(s, &rose_list) {
235 		struct rose_sock *rose = rose_sk(s);
236 
237 		if (!rosecmp(&rose->source_addr, addr) &&
238 		    !ax25cmp(&rose->source_call, &null_ax25_address) &&
239 		    s->sk_state == TCP_LISTEN)
240 			goto found;
241 	}
242 	s = NULL;
243 found:
244 	spin_unlock_bh(&rose_list_lock);
245 	return s;
246 }
247 
248 /*
249  *	Find a connected ROSE socket given my LCI and device.
250  */
251 struct sock *rose_find_socket(unsigned int lci, struct rose_neigh *neigh)
252 {
253 	struct sock *s;
254 
255 	spin_lock_bh(&rose_list_lock);
256 	sk_for_each(s, &rose_list) {
257 		struct rose_sock *rose = rose_sk(s);
258 
259 		if (rose->lci == lci && rose->neighbour == neigh)
260 			goto found;
261 	}
262 	s = NULL;
263 found:
264 	spin_unlock_bh(&rose_list_lock);
265 	return s;
266 }
267 
268 /*
269  *	Find a unique LCI for a given device.
270  */
271 unsigned int rose_new_lci(struct rose_neigh *neigh)
272 {
273 	int lci;
274 
275 	if (neigh->dce_mode) {
276 		for (lci = 1; lci <= sysctl_rose_maximum_vcs; lci++)
277 			if (rose_find_socket(lci, neigh) == NULL && rose_route_free_lci(lci, neigh) == NULL)
278 				return lci;
279 	} else {
280 		for (lci = sysctl_rose_maximum_vcs; lci > 0; lci--)
281 			if (rose_find_socket(lci, neigh) == NULL && rose_route_free_lci(lci, neigh) == NULL)
282 				return lci;
283 	}
284 
285 	return 0;
286 }
287 
288 /*
289  *	Deferred destroy.
290  */
291 void rose_destroy_socket(struct sock *);
292 
293 /*
294  *	Handler for deferred kills.
295  */
296 static void rose_destroy_timer(struct timer_list *t)
297 {
298 	struct sock *sk = from_timer(sk, t, sk_timer);
299 
300 	rose_destroy_socket(sk);
301 }
302 
303 /*
304  *	This is called from user mode and the timers. Thus it protects itself
305  *	against interrupt users but doesn't worry about being called during
306  *	work.  Once it is removed from the queue no interrupt or bottom half
307  *	will touch it and we are (fairly 8-) ) safe.
308  */
309 void rose_destroy_socket(struct sock *sk)
310 {
311 	struct sk_buff *skb;
312 
313 	rose_remove_socket(sk);
314 	rose_stop_heartbeat(sk);
315 	rose_stop_idletimer(sk);
316 	rose_stop_timer(sk);
317 
318 	rose_clear_queues(sk);		/* Flush the queues */
319 
320 	while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
321 		if (skb->sk != sk) {	/* A pending connection */
322 			/* Queue the unaccepted socket for death */
323 			sock_set_flag(skb->sk, SOCK_DEAD);
324 			rose_start_heartbeat(skb->sk);
325 			rose_sk(skb->sk)->state = ROSE_STATE_0;
326 		}
327 
328 		kfree_skb(skb);
329 	}
330 
331 	if (sk_has_allocations(sk)) {
332 		/* Defer: outstanding buffers */
333 		timer_setup(&sk->sk_timer, rose_destroy_timer, 0);
334 		sk->sk_timer.expires  = jiffies + 10 * HZ;
335 		add_timer(&sk->sk_timer);
336 	} else
337 		sock_put(sk);
338 }
339 
340 /*
341  *	Handling for system calls applied via the various interfaces to a
342  *	ROSE socket object.
343  */
344 
345 static int rose_setsockopt(struct socket *sock, int level, int optname,
346 	char __user *optval, unsigned int optlen)
347 {
348 	struct sock *sk = sock->sk;
349 	struct rose_sock *rose = rose_sk(sk);
350 	int opt;
351 
352 	if (level != SOL_ROSE)
353 		return -ENOPROTOOPT;
354 
355 	if (optlen < sizeof(int))
356 		return -EINVAL;
357 
358 	if (get_user(opt, (int __user *)optval))
359 		return -EFAULT;
360 
361 	switch (optname) {
362 	case ROSE_DEFER:
363 		rose->defer = opt ? 1 : 0;
364 		return 0;
365 
366 	case ROSE_T1:
367 		if (opt < 1)
368 			return -EINVAL;
369 		rose->t1 = opt * HZ;
370 		return 0;
371 
372 	case ROSE_T2:
373 		if (opt < 1)
374 			return -EINVAL;
375 		rose->t2 = opt * HZ;
376 		return 0;
377 
378 	case ROSE_T3:
379 		if (opt < 1)
380 			return -EINVAL;
381 		rose->t3 = opt * HZ;
382 		return 0;
383 
384 	case ROSE_HOLDBACK:
385 		if (opt < 1)
386 			return -EINVAL;
387 		rose->hb = opt * HZ;
388 		return 0;
389 
390 	case ROSE_IDLE:
391 		if (opt < 0)
392 			return -EINVAL;
393 		rose->idle = opt * 60 * HZ;
394 		return 0;
395 
396 	case ROSE_QBITINCL:
397 		rose->qbitincl = opt ? 1 : 0;
398 		return 0;
399 
400 	default:
401 		return -ENOPROTOOPT;
402 	}
403 }
404 
405 static int rose_getsockopt(struct socket *sock, int level, int optname,
406 	char __user *optval, int __user *optlen)
407 {
408 	struct sock *sk = sock->sk;
409 	struct rose_sock *rose = rose_sk(sk);
410 	int val = 0;
411 	int len;
412 
413 	if (level != SOL_ROSE)
414 		return -ENOPROTOOPT;
415 
416 	if (get_user(len, optlen))
417 		return -EFAULT;
418 
419 	if (len < 0)
420 		return -EINVAL;
421 
422 	switch (optname) {
423 	case ROSE_DEFER:
424 		val = rose->defer;
425 		break;
426 
427 	case ROSE_T1:
428 		val = rose->t1 / HZ;
429 		break;
430 
431 	case ROSE_T2:
432 		val = rose->t2 / HZ;
433 		break;
434 
435 	case ROSE_T3:
436 		val = rose->t3 / HZ;
437 		break;
438 
439 	case ROSE_HOLDBACK:
440 		val = rose->hb / HZ;
441 		break;
442 
443 	case ROSE_IDLE:
444 		val = rose->idle / (60 * HZ);
445 		break;
446 
447 	case ROSE_QBITINCL:
448 		val = rose->qbitincl;
449 		break;
450 
451 	default:
452 		return -ENOPROTOOPT;
453 	}
454 
455 	len = min_t(unsigned int, len, sizeof(int));
456 
457 	if (put_user(len, optlen))
458 		return -EFAULT;
459 
460 	return copy_to_user(optval, &val, len) ? -EFAULT : 0;
461 }
462 
463 static int rose_listen(struct socket *sock, int backlog)
464 {
465 	struct sock *sk = sock->sk;
466 
467 	if (sk->sk_state != TCP_LISTEN) {
468 		struct rose_sock *rose = rose_sk(sk);
469 
470 		rose->dest_ndigis = 0;
471 		memset(&rose->dest_addr, 0, ROSE_ADDR_LEN);
472 		memset(&rose->dest_call, 0, AX25_ADDR_LEN);
473 		memset(rose->dest_digis, 0, AX25_ADDR_LEN * ROSE_MAX_DIGIS);
474 		sk->sk_max_ack_backlog = backlog;
475 		sk->sk_state           = TCP_LISTEN;
476 		return 0;
477 	}
478 
479 	return -EOPNOTSUPP;
480 }
481 
482 static struct proto rose_proto = {
483 	.name	  = "ROSE",
484 	.owner	  = THIS_MODULE,
485 	.obj_size = sizeof(struct rose_sock),
486 };
487 
488 static int rose_create(struct net *net, struct socket *sock, int protocol,
489 		       int kern)
490 {
491 	struct sock *sk;
492 	struct rose_sock *rose;
493 
494 	if (!net_eq(net, &init_net))
495 		return -EAFNOSUPPORT;
496 
497 	if (sock->type != SOCK_SEQPACKET || protocol != 0)
498 		return -ESOCKTNOSUPPORT;
499 
500 	sk = sk_alloc(net, PF_ROSE, GFP_ATOMIC, &rose_proto, kern);
501 	if (sk == NULL)
502 		return -ENOMEM;
503 
504 	rose = rose_sk(sk);
505 
506 	sock_init_data(sock, sk);
507 
508 	skb_queue_head_init(&rose->ack_queue);
509 #ifdef M_BIT
510 	skb_queue_head_init(&rose->frag_queue);
511 	rose->fraglen    = 0;
512 #endif
513 
514 	sock->ops    = &rose_proto_ops;
515 	sk->sk_protocol = protocol;
516 
517 	timer_setup(&rose->timer, NULL, 0);
518 	timer_setup(&rose->idletimer, NULL, 0);
519 
520 	rose->t1   = msecs_to_jiffies(sysctl_rose_call_request_timeout);
521 	rose->t2   = msecs_to_jiffies(sysctl_rose_reset_request_timeout);
522 	rose->t3   = msecs_to_jiffies(sysctl_rose_clear_request_timeout);
523 	rose->hb   = msecs_to_jiffies(sysctl_rose_ack_hold_back_timeout);
524 	rose->idle = msecs_to_jiffies(sysctl_rose_no_activity_timeout);
525 
526 	rose->state = ROSE_STATE_0;
527 
528 	return 0;
529 }
530 
531 static struct sock *rose_make_new(struct sock *osk)
532 {
533 	struct sock *sk;
534 	struct rose_sock *rose, *orose;
535 
536 	if (osk->sk_type != SOCK_SEQPACKET)
537 		return NULL;
538 
539 	sk = sk_alloc(sock_net(osk), PF_ROSE, GFP_ATOMIC, &rose_proto, 0);
540 	if (sk == NULL)
541 		return NULL;
542 
543 	rose = rose_sk(sk);
544 
545 	sock_init_data(NULL, sk);
546 
547 	skb_queue_head_init(&rose->ack_queue);
548 #ifdef M_BIT
549 	skb_queue_head_init(&rose->frag_queue);
550 	rose->fraglen  = 0;
551 #endif
552 
553 	sk->sk_type     = osk->sk_type;
554 	sk->sk_priority = osk->sk_priority;
555 	sk->sk_protocol = osk->sk_protocol;
556 	sk->sk_rcvbuf   = osk->sk_rcvbuf;
557 	sk->sk_sndbuf   = osk->sk_sndbuf;
558 	sk->sk_state    = TCP_ESTABLISHED;
559 	sock_copy_flags(sk, osk);
560 
561 	timer_setup(&rose->timer, NULL, 0);
562 	timer_setup(&rose->idletimer, NULL, 0);
563 
564 	orose		= rose_sk(osk);
565 	rose->t1	= orose->t1;
566 	rose->t2	= orose->t2;
567 	rose->t3	= orose->t3;
568 	rose->hb	= orose->hb;
569 	rose->idle	= orose->idle;
570 	rose->defer	= orose->defer;
571 	rose->device	= orose->device;
572 	rose->qbitincl	= orose->qbitincl;
573 
574 	return sk;
575 }
576 
577 static int rose_release(struct socket *sock)
578 {
579 	struct sock *sk = sock->sk;
580 	struct rose_sock *rose;
581 
582 	if (sk == NULL) return 0;
583 
584 	sock_hold(sk);
585 	sock_orphan(sk);
586 	lock_sock(sk);
587 	rose = rose_sk(sk);
588 
589 	switch (rose->state) {
590 	case ROSE_STATE_0:
591 		release_sock(sk);
592 		rose_disconnect(sk, 0, -1, -1);
593 		lock_sock(sk);
594 		rose_destroy_socket(sk);
595 		break;
596 
597 	case ROSE_STATE_2:
598 		rose->neighbour->use--;
599 		release_sock(sk);
600 		rose_disconnect(sk, 0, -1, -1);
601 		lock_sock(sk);
602 		rose_destroy_socket(sk);
603 		break;
604 
605 	case ROSE_STATE_1:
606 	case ROSE_STATE_3:
607 	case ROSE_STATE_4:
608 	case ROSE_STATE_5:
609 		rose_clear_queues(sk);
610 		rose_stop_idletimer(sk);
611 		rose_write_internal(sk, ROSE_CLEAR_REQUEST);
612 		rose_start_t3timer(sk);
613 		rose->state  = ROSE_STATE_2;
614 		sk->sk_state    = TCP_CLOSE;
615 		sk->sk_shutdown |= SEND_SHUTDOWN;
616 		sk->sk_state_change(sk);
617 		sock_set_flag(sk, SOCK_DEAD);
618 		sock_set_flag(sk, SOCK_DESTROY);
619 		break;
620 
621 	default:
622 		break;
623 	}
624 
625 	sock->sk = NULL;
626 	release_sock(sk);
627 	sock_put(sk);
628 
629 	return 0;
630 }
631 
632 static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
633 {
634 	struct sock *sk = sock->sk;
635 	struct rose_sock *rose = rose_sk(sk);
636 	struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr;
637 	struct net_device *dev;
638 	ax25_address *source;
639 	ax25_uid_assoc *user;
640 	int n;
641 
642 	if (!sock_flag(sk, SOCK_ZAPPED))
643 		return -EINVAL;
644 
645 	if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose))
646 		return -EINVAL;
647 
648 	if (addr->srose_family != AF_ROSE)
649 		return -EINVAL;
650 
651 	if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
652 		return -EINVAL;
653 
654 	if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
655 		return -EINVAL;
656 
657 	if ((dev = rose_dev_get(&addr->srose_addr)) == NULL)
658 		return -EADDRNOTAVAIL;
659 
660 	source = &addr->srose_call;
661 
662 	user = ax25_findbyuid(current_euid());
663 	if (user) {
664 		rose->source_call = user->call;
665 		ax25_uid_put(user);
666 	} else {
667 		if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE)) {
668 			dev_put(dev);
669 			return -EACCES;
670 		}
671 		rose->source_call   = *source;
672 	}
673 
674 	rose->source_addr   = addr->srose_addr;
675 	rose->device        = dev;
676 	rose->source_ndigis = addr->srose_ndigis;
677 
678 	if (addr_len == sizeof(struct full_sockaddr_rose)) {
679 		struct full_sockaddr_rose *full_addr = (struct full_sockaddr_rose *)uaddr;
680 		for (n = 0 ; n < addr->srose_ndigis ; n++)
681 			rose->source_digis[n] = full_addr->srose_digis[n];
682 	} else {
683 		if (rose->source_ndigis == 1) {
684 			rose->source_digis[0] = addr->srose_digi;
685 		}
686 	}
687 
688 	rose_insert_socket(sk);
689 
690 	sock_reset_flag(sk, SOCK_ZAPPED);
691 
692 	return 0;
693 }
694 
695 static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags)
696 {
697 	struct sock *sk = sock->sk;
698 	struct rose_sock *rose = rose_sk(sk);
699 	struct sockaddr_rose *addr = (struct sockaddr_rose *)uaddr;
700 	unsigned char cause, diagnostic;
701 	struct net_device *dev;
702 	ax25_uid_assoc *user;
703 	int n, err = 0;
704 
705 	if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose))
706 		return -EINVAL;
707 
708 	if (addr->srose_family != AF_ROSE)
709 		return -EINVAL;
710 
711 	if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
712 		return -EINVAL;
713 
714 	if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
715 		return -EINVAL;
716 
717 	/* Source + Destination digis should not exceed ROSE_MAX_DIGIS */
718 	if ((rose->source_ndigis + addr->srose_ndigis) > ROSE_MAX_DIGIS)
719 		return -EINVAL;
720 
721 	lock_sock(sk);
722 
723 	if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
724 		/* Connect completed during a ERESTARTSYS event */
725 		sock->state = SS_CONNECTED;
726 		goto out_release;
727 	}
728 
729 	if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) {
730 		sock->state = SS_UNCONNECTED;
731 		err = -ECONNREFUSED;
732 		goto out_release;
733 	}
734 
735 	if (sk->sk_state == TCP_ESTABLISHED) {
736 		/* No reconnect on a seqpacket socket */
737 		err = -EISCONN;
738 		goto out_release;
739 	}
740 
741 	sk->sk_state   = TCP_CLOSE;
742 	sock->state = SS_UNCONNECTED;
743 
744 	rose->neighbour = rose_get_neigh(&addr->srose_addr, &cause,
745 					 &diagnostic, 0);
746 	if (!rose->neighbour) {
747 		err = -ENETUNREACH;
748 		goto out_release;
749 	}
750 
751 	rose->lci = rose_new_lci(rose->neighbour);
752 	if (!rose->lci) {
753 		err = -ENETUNREACH;
754 		goto out_release;
755 	}
756 
757 	if (sock_flag(sk, SOCK_ZAPPED)) {	/* Must bind first - autobinding in this may or may not work */
758 		sock_reset_flag(sk, SOCK_ZAPPED);
759 
760 		if ((dev = rose_dev_first()) == NULL) {
761 			err = -ENETUNREACH;
762 			goto out_release;
763 		}
764 
765 		user = ax25_findbyuid(current_euid());
766 		if (!user) {
767 			err = -EINVAL;
768 			goto out_release;
769 		}
770 
771 		memcpy(&rose->source_addr, dev->dev_addr, ROSE_ADDR_LEN);
772 		rose->source_call = user->call;
773 		rose->device      = dev;
774 		ax25_uid_put(user);
775 
776 		rose_insert_socket(sk);		/* Finish the bind */
777 	}
778 	rose->dest_addr   = addr->srose_addr;
779 	rose->dest_call   = addr->srose_call;
780 	rose->rand        = ((long)rose & 0xFFFF) + rose->lci;
781 	rose->dest_ndigis = addr->srose_ndigis;
782 
783 	if (addr_len == sizeof(struct full_sockaddr_rose)) {
784 		struct full_sockaddr_rose *full_addr = (struct full_sockaddr_rose *)uaddr;
785 		for (n = 0 ; n < addr->srose_ndigis ; n++)
786 			rose->dest_digis[n] = full_addr->srose_digis[n];
787 	} else {
788 		if (rose->dest_ndigis == 1) {
789 			rose->dest_digis[0] = addr->srose_digi;
790 		}
791 	}
792 
793 	/* Move to connecting socket, start sending Connect Requests */
794 	sock->state   = SS_CONNECTING;
795 	sk->sk_state     = TCP_SYN_SENT;
796 
797 	rose->state = ROSE_STATE_1;
798 
799 	rose->neighbour->use++;
800 
801 	rose_write_internal(sk, ROSE_CALL_REQUEST);
802 	rose_start_heartbeat(sk);
803 	rose_start_t1timer(sk);
804 
805 	/* Now the loop */
806 	if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) {
807 		err = -EINPROGRESS;
808 		goto out_release;
809 	}
810 
811 	/*
812 	 * A Connect Ack with Choke or timeout or failed routing will go to
813 	 * closed.
814 	 */
815 	if (sk->sk_state == TCP_SYN_SENT) {
816 		DEFINE_WAIT(wait);
817 
818 		for (;;) {
819 			prepare_to_wait(sk_sleep(sk), &wait,
820 					TASK_INTERRUPTIBLE);
821 			if (sk->sk_state != TCP_SYN_SENT)
822 				break;
823 			if (!signal_pending(current)) {
824 				release_sock(sk);
825 				schedule();
826 				lock_sock(sk);
827 				continue;
828 			}
829 			err = -ERESTARTSYS;
830 			break;
831 		}
832 		finish_wait(sk_sleep(sk), &wait);
833 
834 		if (err)
835 			goto out_release;
836 	}
837 
838 	if (sk->sk_state != TCP_ESTABLISHED) {
839 		sock->state = SS_UNCONNECTED;
840 		err = sock_error(sk);	/* Always set at this point */
841 		goto out_release;
842 	}
843 
844 	sock->state = SS_CONNECTED;
845 
846 out_release:
847 	release_sock(sk);
848 
849 	return err;
850 }
851 
852 static int rose_accept(struct socket *sock, struct socket *newsock, int flags,
853 		       bool kern)
854 {
855 	struct sk_buff *skb;
856 	struct sock *newsk;
857 	DEFINE_WAIT(wait);
858 	struct sock *sk;
859 	int err = 0;
860 
861 	if ((sk = sock->sk) == NULL)
862 		return -EINVAL;
863 
864 	lock_sock(sk);
865 	if (sk->sk_type != SOCK_SEQPACKET) {
866 		err = -EOPNOTSUPP;
867 		goto out_release;
868 	}
869 
870 	if (sk->sk_state != TCP_LISTEN) {
871 		err = -EINVAL;
872 		goto out_release;
873 	}
874 
875 	/*
876 	 *	The write queue this time is holding sockets ready to use
877 	 *	hooked into the SABM we saved
878 	 */
879 	for (;;) {
880 		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
881 
882 		skb = skb_dequeue(&sk->sk_receive_queue);
883 		if (skb)
884 			break;
885 
886 		if (flags & O_NONBLOCK) {
887 			err = -EWOULDBLOCK;
888 			break;
889 		}
890 		if (!signal_pending(current)) {
891 			release_sock(sk);
892 			schedule();
893 			lock_sock(sk);
894 			continue;
895 		}
896 		err = -ERESTARTSYS;
897 		break;
898 	}
899 	finish_wait(sk_sleep(sk), &wait);
900 	if (err)
901 		goto out_release;
902 
903 	newsk = skb->sk;
904 	sock_graft(newsk, newsock);
905 
906 	/* Now attach up the new socket */
907 	skb->sk = NULL;
908 	kfree_skb(skb);
909 	sk_acceptq_removed(sk);
910 
911 out_release:
912 	release_sock(sk);
913 
914 	return err;
915 }
916 
917 static int rose_getname(struct socket *sock, struct sockaddr *uaddr,
918 	int peer)
919 {
920 	struct full_sockaddr_rose *srose = (struct full_sockaddr_rose *)uaddr;
921 	struct sock *sk = sock->sk;
922 	struct rose_sock *rose = rose_sk(sk);
923 	int n;
924 
925 	memset(srose, 0, sizeof(*srose));
926 	if (peer != 0) {
927 		if (sk->sk_state != TCP_ESTABLISHED)
928 			return -ENOTCONN;
929 		srose->srose_family = AF_ROSE;
930 		srose->srose_addr   = rose->dest_addr;
931 		srose->srose_call   = rose->dest_call;
932 		srose->srose_ndigis = rose->dest_ndigis;
933 		for (n = 0; n < rose->dest_ndigis; n++)
934 			srose->srose_digis[n] = rose->dest_digis[n];
935 	} else {
936 		srose->srose_family = AF_ROSE;
937 		srose->srose_addr   = rose->source_addr;
938 		srose->srose_call   = rose->source_call;
939 		srose->srose_ndigis = rose->source_ndigis;
940 		for (n = 0; n < rose->source_ndigis; n++)
941 			srose->srose_digis[n] = rose->source_digis[n];
942 	}
943 
944 	return sizeof(struct full_sockaddr_rose);
945 }
946 
947 int rose_rx_call_request(struct sk_buff *skb, struct net_device *dev, struct rose_neigh *neigh, unsigned int lci)
948 {
949 	struct sock *sk;
950 	struct sock *make;
951 	struct rose_sock *make_rose;
952 	struct rose_facilities_struct facilities;
953 	int n;
954 
955 	skb->sk = NULL;		/* Initially we don't know who it's for */
956 
957 	/*
958 	 *	skb->data points to the rose frame start
959 	 */
960 	memset(&facilities, 0x00, sizeof(struct rose_facilities_struct));
961 
962 	if (!rose_parse_facilities(skb->data + ROSE_CALL_REQ_FACILITIES_OFF,
963 				   skb->len - ROSE_CALL_REQ_FACILITIES_OFF,
964 				   &facilities)) {
965 		rose_transmit_clear_request(neigh, lci, ROSE_INVALID_FACILITY, 76);
966 		return 0;
967 	}
968 
969 	sk = rose_find_listener(&facilities.source_addr, &facilities.source_call);
970 
971 	/*
972 	 * We can't accept the Call Request.
973 	 */
974 	if (sk == NULL || sk_acceptq_is_full(sk) ||
975 	    (make = rose_make_new(sk)) == NULL) {
976 		rose_transmit_clear_request(neigh, lci, ROSE_NETWORK_CONGESTION, 120);
977 		return 0;
978 	}
979 
980 	skb->sk     = make;
981 	make->sk_state = TCP_ESTABLISHED;
982 	make_rose = rose_sk(make);
983 
984 	make_rose->lci           = lci;
985 	make_rose->dest_addr     = facilities.dest_addr;
986 	make_rose->dest_call     = facilities.dest_call;
987 	make_rose->dest_ndigis   = facilities.dest_ndigis;
988 	for (n = 0 ; n < facilities.dest_ndigis ; n++)
989 		make_rose->dest_digis[n] = facilities.dest_digis[n];
990 	make_rose->source_addr   = facilities.source_addr;
991 	make_rose->source_call   = facilities.source_call;
992 	make_rose->source_ndigis = facilities.source_ndigis;
993 	for (n = 0 ; n < facilities.source_ndigis ; n++)
994 		make_rose->source_digis[n] = facilities.source_digis[n];
995 	make_rose->neighbour     = neigh;
996 	make_rose->device        = dev;
997 	make_rose->facilities    = facilities;
998 
999 	make_rose->neighbour->use++;
1000 
1001 	if (rose_sk(sk)->defer) {
1002 		make_rose->state = ROSE_STATE_5;
1003 	} else {
1004 		rose_write_internal(make, ROSE_CALL_ACCEPTED);
1005 		make_rose->state = ROSE_STATE_3;
1006 		rose_start_idletimer(make);
1007 	}
1008 
1009 	make_rose->condition = 0x00;
1010 	make_rose->vs        = 0;
1011 	make_rose->va        = 0;
1012 	make_rose->vr        = 0;
1013 	make_rose->vl        = 0;
1014 	sk_acceptq_added(sk);
1015 
1016 	rose_insert_socket(make);
1017 
1018 	skb_queue_head(&sk->sk_receive_queue, skb);
1019 
1020 	rose_start_heartbeat(make);
1021 
1022 	if (!sock_flag(sk, SOCK_DEAD))
1023 		sk->sk_data_ready(sk);
1024 
1025 	return 1;
1026 }
1027 
1028 static int rose_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
1029 {
1030 	struct sock *sk = sock->sk;
1031 	struct rose_sock *rose = rose_sk(sk);
1032 	DECLARE_SOCKADDR(struct sockaddr_rose *, usrose, msg->msg_name);
1033 	int err;
1034 	struct full_sockaddr_rose srose;
1035 	struct sk_buff *skb;
1036 	unsigned char *asmptr;
1037 	int n, size, qbit = 0;
1038 
1039 	if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_CMSG_COMPAT))
1040 		return -EINVAL;
1041 
1042 	if (sock_flag(sk, SOCK_ZAPPED))
1043 		return -EADDRNOTAVAIL;
1044 
1045 	if (sk->sk_shutdown & SEND_SHUTDOWN) {
1046 		send_sig(SIGPIPE, current, 0);
1047 		return -EPIPE;
1048 	}
1049 
1050 	if (rose->neighbour == NULL || rose->device == NULL)
1051 		return -ENETUNREACH;
1052 
1053 	if (usrose != NULL) {
1054 		if (msg->msg_namelen != sizeof(struct sockaddr_rose) && msg->msg_namelen != sizeof(struct full_sockaddr_rose))
1055 			return -EINVAL;
1056 		memset(&srose, 0, sizeof(struct full_sockaddr_rose));
1057 		memcpy(&srose, usrose, msg->msg_namelen);
1058 		if (rosecmp(&rose->dest_addr, &srose.srose_addr) != 0 ||
1059 		    ax25cmp(&rose->dest_call, &srose.srose_call) != 0)
1060 			return -EISCONN;
1061 		if (srose.srose_ndigis != rose->dest_ndigis)
1062 			return -EISCONN;
1063 		if (srose.srose_ndigis == rose->dest_ndigis) {
1064 			for (n = 0 ; n < srose.srose_ndigis ; n++)
1065 				if (ax25cmp(&rose->dest_digis[n],
1066 					    &srose.srose_digis[n]))
1067 					return -EISCONN;
1068 		}
1069 		if (srose.srose_family != AF_ROSE)
1070 			return -EINVAL;
1071 	} else {
1072 		if (sk->sk_state != TCP_ESTABLISHED)
1073 			return -ENOTCONN;
1074 
1075 		srose.srose_family = AF_ROSE;
1076 		srose.srose_addr   = rose->dest_addr;
1077 		srose.srose_call   = rose->dest_call;
1078 		srose.srose_ndigis = rose->dest_ndigis;
1079 		for (n = 0 ; n < rose->dest_ndigis ; n++)
1080 			srose.srose_digis[n] = rose->dest_digis[n];
1081 	}
1082 
1083 	/* Build a packet */
1084 	/* Sanity check the packet size */
1085 	if (len > 65535)
1086 		return -EMSGSIZE;
1087 
1088 	size = len + AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN;
1089 
1090 	if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL)
1091 		return err;
1092 
1093 	skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN);
1094 
1095 	/*
1096 	 *	Put the data on the end
1097 	 */
1098 
1099 	skb_reset_transport_header(skb);
1100 	skb_put(skb, len);
1101 
1102 	err = memcpy_from_msg(skb_transport_header(skb), msg, len);
1103 	if (err) {
1104 		kfree_skb(skb);
1105 		return err;
1106 	}
1107 
1108 	/*
1109 	 *	If the Q BIT Include socket option is in force, the first
1110 	 *	byte of the user data is the logical value of the Q Bit.
1111 	 */
1112 	if (rose->qbitincl) {
1113 		qbit = skb->data[0];
1114 		skb_pull(skb, 1);
1115 	}
1116 
1117 	/*
1118 	 *	Push down the ROSE header
1119 	 */
1120 	asmptr = skb_push(skb, ROSE_MIN_LEN);
1121 
1122 	/* Build a ROSE Network header */
1123 	asmptr[0] = ((rose->lci >> 8) & 0x0F) | ROSE_GFI;
1124 	asmptr[1] = (rose->lci >> 0) & 0xFF;
1125 	asmptr[2] = ROSE_DATA;
1126 
1127 	if (qbit)
1128 		asmptr[0] |= ROSE_Q_BIT;
1129 
1130 	if (sk->sk_state != TCP_ESTABLISHED) {
1131 		kfree_skb(skb);
1132 		return -ENOTCONN;
1133 	}
1134 
1135 #ifdef M_BIT
1136 #define ROSE_PACLEN (256-ROSE_MIN_LEN)
1137 	if (skb->len - ROSE_MIN_LEN > ROSE_PACLEN) {
1138 		unsigned char header[ROSE_MIN_LEN];
1139 		struct sk_buff *skbn;
1140 		int frontlen;
1141 		int lg;
1142 
1143 		/* Save a copy of the Header */
1144 		skb_copy_from_linear_data(skb, header, ROSE_MIN_LEN);
1145 		skb_pull(skb, ROSE_MIN_LEN);
1146 
1147 		frontlen = skb_headroom(skb);
1148 
1149 		while (skb->len > 0) {
1150 			if ((skbn = sock_alloc_send_skb(sk, frontlen + ROSE_PACLEN, 0, &err)) == NULL) {
1151 				kfree_skb(skb);
1152 				return err;
1153 			}
1154 
1155 			skbn->sk   = sk;
1156 			skbn->free = 1;
1157 			skbn->arp  = 1;
1158 
1159 			skb_reserve(skbn, frontlen);
1160 
1161 			lg = (ROSE_PACLEN > skb->len) ? skb->len : ROSE_PACLEN;
1162 
1163 			/* Copy the user data */
1164 			skb_copy_from_linear_data(skb, skb_put(skbn, lg), lg);
1165 			skb_pull(skb, lg);
1166 
1167 			/* Duplicate the Header */
1168 			skb_push(skbn, ROSE_MIN_LEN);
1169 			skb_copy_to_linear_data(skbn, header, ROSE_MIN_LEN);
1170 
1171 			if (skb->len > 0)
1172 				skbn->data[2] |= M_BIT;
1173 
1174 			skb_queue_tail(&sk->sk_write_queue, skbn); /* Throw it on the queue */
1175 		}
1176 
1177 		skb->free = 1;
1178 		kfree_skb(skb);
1179 	} else {
1180 		skb_queue_tail(&sk->sk_write_queue, skb);		/* Throw it on the queue */
1181 	}
1182 #else
1183 	skb_queue_tail(&sk->sk_write_queue, skb);	/* Shove it onto the queue */
1184 #endif
1185 
1186 	rose_kick(sk);
1187 
1188 	return len;
1189 }
1190 
1191 
1192 static int rose_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
1193 			int flags)
1194 {
1195 	struct sock *sk = sock->sk;
1196 	struct rose_sock *rose = rose_sk(sk);
1197 	size_t copied;
1198 	unsigned char *asmptr;
1199 	struct sk_buff *skb;
1200 	int n, er, qbit;
1201 
1202 	/*
1203 	 * This works for seqpacket too. The receiver has ordered the queue for
1204 	 * us! We do one quick check first though
1205 	 */
1206 	if (sk->sk_state != TCP_ESTABLISHED)
1207 		return -ENOTCONN;
1208 
1209 	/* Now we can treat all alike */
1210 	if ((skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &er)) == NULL)
1211 		return er;
1212 
1213 	qbit = (skb->data[0] & ROSE_Q_BIT) == ROSE_Q_BIT;
1214 
1215 	skb_pull(skb, ROSE_MIN_LEN);
1216 
1217 	if (rose->qbitincl) {
1218 		asmptr  = skb_push(skb, 1);
1219 		*asmptr = qbit;
1220 	}
1221 
1222 	skb_reset_transport_header(skb);
1223 	copied     = skb->len;
1224 
1225 	if (copied > size) {
1226 		copied = size;
1227 		msg->msg_flags |= MSG_TRUNC;
1228 	}
1229 
1230 	skb_copy_datagram_msg(skb, 0, msg, copied);
1231 
1232 	if (msg->msg_name) {
1233 		struct sockaddr_rose *srose;
1234 		DECLARE_SOCKADDR(struct full_sockaddr_rose *, full_srose,
1235 				 msg->msg_name);
1236 
1237 		memset(msg->msg_name, 0, sizeof(struct full_sockaddr_rose));
1238 		srose = msg->msg_name;
1239 		srose->srose_family = AF_ROSE;
1240 		srose->srose_addr   = rose->dest_addr;
1241 		srose->srose_call   = rose->dest_call;
1242 		srose->srose_ndigis = rose->dest_ndigis;
1243 		for (n = 0 ; n < rose->dest_ndigis ; n++)
1244 			full_srose->srose_digis[n] = rose->dest_digis[n];
1245 		msg->msg_namelen = sizeof(struct full_sockaddr_rose);
1246 	}
1247 
1248 	skb_free_datagram(sk, skb);
1249 
1250 	return copied;
1251 }
1252 
1253 
1254 static int rose_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1255 {
1256 	struct sock *sk = sock->sk;
1257 	struct rose_sock *rose = rose_sk(sk);
1258 	void __user *argp = (void __user *)arg;
1259 
1260 	switch (cmd) {
1261 	case TIOCOUTQ: {
1262 		long amount;
1263 
1264 		amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
1265 		if (amount < 0)
1266 			amount = 0;
1267 		return put_user(amount, (unsigned int __user *) argp);
1268 	}
1269 
1270 	case TIOCINQ: {
1271 		struct sk_buff *skb;
1272 		long amount = 0L;
1273 		/* These two are safe on a single CPU system as only user tasks fiddle here */
1274 		if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
1275 			amount = skb->len;
1276 		return put_user(amount, (unsigned int __user *) argp);
1277 	}
1278 
1279 	case SIOCGIFADDR:
1280 	case SIOCSIFADDR:
1281 	case SIOCGIFDSTADDR:
1282 	case SIOCSIFDSTADDR:
1283 	case SIOCGIFBRDADDR:
1284 	case SIOCSIFBRDADDR:
1285 	case SIOCGIFNETMASK:
1286 	case SIOCSIFNETMASK:
1287 	case SIOCGIFMETRIC:
1288 	case SIOCSIFMETRIC:
1289 		return -EINVAL;
1290 
1291 	case SIOCADDRT:
1292 	case SIOCDELRT:
1293 	case SIOCRSCLRRT:
1294 		if (!capable(CAP_NET_ADMIN))
1295 			return -EPERM;
1296 		return rose_rt_ioctl(cmd, argp);
1297 
1298 	case SIOCRSGCAUSE: {
1299 		struct rose_cause_struct rose_cause;
1300 		rose_cause.cause      = rose->cause;
1301 		rose_cause.diagnostic = rose->diagnostic;
1302 		return copy_to_user(argp, &rose_cause, sizeof(struct rose_cause_struct)) ? -EFAULT : 0;
1303 	}
1304 
1305 	case SIOCRSSCAUSE: {
1306 		struct rose_cause_struct rose_cause;
1307 		if (copy_from_user(&rose_cause, argp, sizeof(struct rose_cause_struct)))
1308 			return -EFAULT;
1309 		rose->cause      = rose_cause.cause;
1310 		rose->diagnostic = rose_cause.diagnostic;
1311 		return 0;
1312 	}
1313 
1314 	case SIOCRSSL2CALL:
1315 		if (!capable(CAP_NET_ADMIN)) return -EPERM;
1316 		if (ax25cmp(&rose_callsign, &null_ax25_address) != 0)
1317 			ax25_listen_release(&rose_callsign, NULL);
1318 		if (copy_from_user(&rose_callsign, argp, sizeof(ax25_address)))
1319 			return -EFAULT;
1320 		if (ax25cmp(&rose_callsign, &null_ax25_address) != 0)
1321 			return ax25_listen_register(&rose_callsign, NULL);
1322 
1323 		return 0;
1324 
1325 	case SIOCRSGL2CALL:
1326 		return copy_to_user(argp, &rose_callsign, sizeof(ax25_address)) ? -EFAULT : 0;
1327 
1328 	case SIOCRSACCEPT:
1329 		if (rose->state == ROSE_STATE_5) {
1330 			rose_write_internal(sk, ROSE_CALL_ACCEPTED);
1331 			rose_start_idletimer(sk);
1332 			rose->condition = 0x00;
1333 			rose->vs        = 0;
1334 			rose->va        = 0;
1335 			rose->vr        = 0;
1336 			rose->vl        = 0;
1337 			rose->state     = ROSE_STATE_3;
1338 		}
1339 		return 0;
1340 
1341 	default:
1342 		return -ENOIOCTLCMD;
1343 	}
1344 
1345 	return 0;
1346 }
1347 
1348 #ifdef CONFIG_PROC_FS
1349 static void *rose_info_start(struct seq_file *seq, loff_t *pos)
1350 	__acquires(rose_list_lock)
1351 {
1352 	spin_lock_bh(&rose_list_lock);
1353 	return seq_hlist_start_head(&rose_list, *pos);
1354 }
1355 
1356 static void *rose_info_next(struct seq_file *seq, void *v, loff_t *pos)
1357 {
1358 	return seq_hlist_next(v, &rose_list, pos);
1359 }
1360 
1361 static void rose_info_stop(struct seq_file *seq, void *v)
1362 	__releases(rose_list_lock)
1363 {
1364 	spin_unlock_bh(&rose_list_lock);
1365 }
1366 
1367 static int rose_info_show(struct seq_file *seq, void *v)
1368 {
1369 	char buf[11], rsbuf[11];
1370 
1371 	if (v == SEQ_START_TOKEN)
1372 		seq_puts(seq,
1373 			 "dest_addr  dest_call src_addr   src_call  dev   lci neigh st vs vr va   t  t1  t2  t3  hb    idle Snd-Q Rcv-Q inode\n");
1374 
1375 	else {
1376 		struct sock *s = sk_entry(v);
1377 		struct rose_sock *rose = rose_sk(s);
1378 		const char *devname, *callsign;
1379 		const struct net_device *dev = rose->device;
1380 
1381 		if (!dev)
1382 			devname = "???";
1383 		else
1384 			devname = dev->name;
1385 
1386 		seq_printf(seq, "%-10s %-9s ",
1387 			   rose2asc(rsbuf, &rose->dest_addr),
1388 			   ax2asc(buf, &rose->dest_call));
1389 
1390 		if (ax25cmp(&rose->source_call, &null_ax25_address) == 0)
1391 			callsign = "??????-?";
1392 		else
1393 			callsign = ax2asc(buf, &rose->source_call);
1394 
1395 		seq_printf(seq,
1396 			   "%-10s %-9s %-5s %3.3X %05d  %d  %d  %d  %d %3lu %3lu %3lu %3lu %3lu %3lu/%03lu %5d %5d %ld\n",
1397 			rose2asc(rsbuf, &rose->source_addr),
1398 			callsign,
1399 			devname,
1400 			rose->lci & 0x0FFF,
1401 			(rose->neighbour) ? rose->neighbour->number : 0,
1402 			rose->state,
1403 			rose->vs,
1404 			rose->vr,
1405 			rose->va,
1406 			ax25_display_timer(&rose->timer) / HZ,
1407 			rose->t1 / HZ,
1408 			rose->t2 / HZ,
1409 			rose->t3 / HZ,
1410 			rose->hb / HZ,
1411 			ax25_display_timer(&rose->idletimer) / (60 * HZ),
1412 			rose->idle / (60 * HZ),
1413 			sk_wmem_alloc_get(s),
1414 			sk_rmem_alloc_get(s),
1415 			s->sk_socket ? SOCK_INODE(s->sk_socket)->i_ino : 0L);
1416 	}
1417 
1418 	return 0;
1419 }
1420 
1421 static const struct seq_operations rose_info_seqops = {
1422 	.start = rose_info_start,
1423 	.next = rose_info_next,
1424 	.stop = rose_info_stop,
1425 	.show = rose_info_show,
1426 };
1427 #endif	/* CONFIG_PROC_FS */
1428 
1429 static const struct net_proto_family rose_family_ops = {
1430 	.family		=	PF_ROSE,
1431 	.create		=	rose_create,
1432 	.owner		=	THIS_MODULE,
1433 };
1434 
1435 static const struct proto_ops rose_proto_ops = {
1436 	.family		=	PF_ROSE,
1437 	.owner		=	THIS_MODULE,
1438 	.release	=	rose_release,
1439 	.bind		=	rose_bind,
1440 	.connect	=	rose_connect,
1441 	.socketpair	=	sock_no_socketpair,
1442 	.accept		=	rose_accept,
1443 	.getname	=	rose_getname,
1444 	.poll		=	datagram_poll,
1445 	.ioctl		=	rose_ioctl,
1446 	.gettstamp	=	sock_gettstamp,
1447 	.listen		=	rose_listen,
1448 	.shutdown	=	sock_no_shutdown,
1449 	.setsockopt	=	rose_setsockopt,
1450 	.getsockopt	=	rose_getsockopt,
1451 	.sendmsg	=	rose_sendmsg,
1452 	.recvmsg	=	rose_recvmsg,
1453 	.mmap		=	sock_no_mmap,
1454 	.sendpage	=	sock_no_sendpage,
1455 };
1456 
1457 static struct notifier_block rose_dev_notifier = {
1458 	.notifier_call	=	rose_device_event,
1459 };
1460 
1461 static struct net_device **dev_rose;
1462 
1463 static struct ax25_protocol rose_pid = {
1464 	.pid	= AX25_P_ROSE,
1465 	.func	= rose_route_frame
1466 };
1467 
1468 static struct ax25_linkfail rose_linkfail_notifier = {
1469 	.func	= rose_link_failed
1470 };
1471 
1472 static int __init rose_proto_init(void)
1473 {
1474 	int i;
1475 	int rc;
1476 
1477 	if (rose_ndevs > 0x7FFFFFFF/sizeof(struct net_device *)) {
1478 		printk(KERN_ERR "ROSE: rose_proto_init - rose_ndevs parameter too large\n");
1479 		rc = -EINVAL;
1480 		goto out;
1481 	}
1482 
1483 	rc = proto_register(&rose_proto, 0);
1484 	if (rc != 0)
1485 		goto out;
1486 
1487 	rose_callsign = null_ax25_address;
1488 
1489 	dev_rose = kcalloc(rose_ndevs, sizeof(struct net_device *),
1490 			   GFP_KERNEL);
1491 	if (dev_rose == NULL) {
1492 		printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate device structure\n");
1493 		rc = -ENOMEM;
1494 		goto out_proto_unregister;
1495 	}
1496 
1497 	for (i = 0; i < rose_ndevs; i++) {
1498 		struct net_device *dev;
1499 		char name[IFNAMSIZ];
1500 
1501 		sprintf(name, "rose%d", i);
1502 		dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, rose_setup);
1503 		if (!dev) {
1504 			printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate memory\n");
1505 			rc = -ENOMEM;
1506 			goto fail;
1507 		}
1508 		rc = register_netdev(dev);
1509 		if (rc) {
1510 			printk(KERN_ERR "ROSE: netdevice registration failed\n");
1511 			free_netdev(dev);
1512 			goto fail;
1513 		}
1514 		dev_rose[i] = dev;
1515 	}
1516 
1517 	sock_register(&rose_family_ops);
1518 	register_netdevice_notifier(&rose_dev_notifier);
1519 
1520 	ax25_register_pid(&rose_pid);
1521 	ax25_linkfail_register(&rose_linkfail_notifier);
1522 
1523 #ifdef CONFIG_SYSCTL
1524 	rose_register_sysctl();
1525 #endif
1526 	rose_loopback_init();
1527 
1528 	rose_add_loopback_neigh();
1529 
1530 	proc_create_seq("rose", 0444, init_net.proc_net, &rose_info_seqops);
1531 	proc_create_seq("rose_neigh", 0444, init_net.proc_net,
1532 		    &rose_neigh_seqops);
1533 	proc_create_seq("rose_nodes", 0444, init_net.proc_net,
1534 		    &rose_node_seqops);
1535 	proc_create_seq("rose_routes", 0444, init_net.proc_net,
1536 		    &rose_route_seqops);
1537 out:
1538 	return rc;
1539 fail:
1540 	while (--i >= 0) {
1541 		unregister_netdev(dev_rose[i]);
1542 		free_netdev(dev_rose[i]);
1543 	}
1544 	kfree(dev_rose);
1545 out_proto_unregister:
1546 	proto_unregister(&rose_proto);
1547 	goto out;
1548 }
1549 module_init(rose_proto_init);
1550 
1551 module_param(rose_ndevs, int, 0);
1552 MODULE_PARM_DESC(rose_ndevs, "number of ROSE devices");
1553 
1554 MODULE_AUTHOR("Jonathan Naylor G4KLX <g4klx@g4klx.demon.co.uk>");
1555 MODULE_DESCRIPTION("The amateur radio ROSE network layer protocol");
1556 MODULE_LICENSE("GPL");
1557 MODULE_ALIAS_NETPROTO(PF_ROSE);
1558 
1559 static void __exit rose_exit(void)
1560 {
1561 	int i;
1562 
1563 	remove_proc_entry("rose", init_net.proc_net);
1564 	remove_proc_entry("rose_neigh", init_net.proc_net);
1565 	remove_proc_entry("rose_nodes", init_net.proc_net);
1566 	remove_proc_entry("rose_routes", init_net.proc_net);
1567 	rose_loopback_clear();
1568 
1569 	rose_rt_free();
1570 
1571 	ax25_protocol_release(AX25_P_ROSE);
1572 	ax25_linkfail_release(&rose_linkfail_notifier);
1573 
1574 	if (ax25cmp(&rose_callsign, &null_ax25_address) != 0)
1575 		ax25_listen_release(&rose_callsign, NULL);
1576 
1577 #ifdef CONFIG_SYSCTL
1578 	rose_unregister_sysctl();
1579 #endif
1580 	unregister_netdevice_notifier(&rose_dev_notifier);
1581 
1582 	sock_unregister(PF_ROSE);
1583 
1584 	for (i = 0; i < rose_ndevs; i++) {
1585 		struct net_device *dev = dev_rose[i];
1586 
1587 		if (dev) {
1588 			unregister_netdev(dev);
1589 			free_netdev(dev);
1590 		}
1591 	}
1592 
1593 	kfree(dev_rose);
1594 	proto_unregister(&rose_proto);
1595 }
1596 
1597 module_exit(rose_exit);
1598