xref: /openbmc/linux/net/bluetooth/af_bluetooth.c (revision 92a2c6b2)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth address family and sockets. */
26 
27 #include <linux/module.h>
28 #include <linux/debugfs.h>
29 #include <asm/ioctls.h>
30 
31 #include <net/bluetooth/bluetooth.h>
32 #include <linux/proc_fs.h>
33 
34 #include "selftest.h"
35 
36 #define VERSION "2.20"
37 
38 /* Bluetooth sockets */
39 #define BT_MAX_PROTO	8
40 static const struct net_proto_family *bt_proto[BT_MAX_PROTO];
41 static DEFINE_RWLOCK(bt_proto_lock);
42 
43 static struct lock_class_key bt_lock_key[BT_MAX_PROTO];
44 static const char *const bt_key_strings[BT_MAX_PROTO] = {
45 	"sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP",
46 	"sk_lock-AF_BLUETOOTH-BTPROTO_HCI",
47 	"sk_lock-AF_BLUETOOTH-BTPROTO_SCO",
48 	"sk_lock-AF_BLUETOOTH-BTPROTO_RFCOMM",
49 	"sk_lock-AF_BLUETOOTH-BTPROTO_BNEP",
50 	"sk_lock-AF_BLUETOOTH-BTPROTO_CMTP",
51 	"sk_lock-AF_BLUETOOTH-BTPROTO_HIDP",
52 	"sk_lock-AF_BLUETOOTH-BTPROTO_AVDTP",
53 };
54 
55 static struct lock_class_key bt_slock_key[BT_MAX_PROTO];
56 static const char *const bt_slock_key_strings[BT_MAX_PROTO] = {
57 	"slock-AF_BLUETOOTH-BTPROTO_L2CAP",
58 	"slock-AF_BLUETOOTH-BTPROTO_HCI",
59 	"slock-AF_BLUETOOTH-BTPROTO_SCO",
60 	"slock-AF_BLUETOOTH-BTPROTO_RFCOMM",
61 	"slock-AF_BLUETOOTH-BTPROTO_BNEP",
62 	"slock-AF_BLUETOOTH-BTPROTO_CMTP",
63 	"slock-AF_BLUETOOTH-BTPROTO_HIDP",
64 	"slock-AF_BLUETOOTH-BTPROTO_AVDTP",
65 };
66 
67 void bt_sock_reclassify_lock(struct sock *sk, int proto)
68 {
69 	BUG_ON(!sk);
70 	BUG_ON(sock_owned_by_user(sk));
71 
72 	sock_lock_init_class_and_name(sk,
73 			bt_slock_key_strings[proto], &bt_slock_key[proto],
74 				bt_key_strings[proto], &bt_lock_key[proto]);
75 }
76 EXPORT_SYMBOL(bt_sock_reclassify_lock);
77 
78 int bt_sock_register(int proto, const struct net_proto_family *ops)
79 {
80 	int err = 0;
81 
82 	if (proto < 0 || proto >= BT_MAX_PROTO)
83 		return -EINVAL;
84 
85 	write_lock(&bt_proto_lock);
86 
87 	if (bt_proto[proto])
88 		err = -EEXIST;
89 	else
90 		bt_proto[proto] = ops;
91 
92 	write_unlock(&bt_proto_lock);
93 
94 	return err;
95 }
96 EXPORT_SYMBOL(bt_sock_register);
97 
98 void bt_sock_unregister(int proto)
99 {
100 	if (proto < 0 || proto >= BT_MAX_PROTO)
101 		return;
102 
103 	write_lock(&bt_proto_lock);
104 	bt_proto[proto] = NULL;
105 	write_unlock(&bt_proto_lock);
106 }
107 EXPORT_SYMBOL(bt_sock_unregister);
108 
109 static int bt_sock_create(struct net *net, struct socket *sock, int proto,
110 			  int kern)
111 {
112 	int err;
113 
114 	if (net != &init_net)
115 		return -EAFNOSUPPORT;
116 
117 	if (proto < 0 || proto >= BT_MAX_PROTO)
118 		return -EINVAL;
119 
120 	if (!bt_proto[proto])
121 		request_module("bt-proto-%d", proto);
122 
123 	err = -EPROTONOSUPPORT;
124 
125 	read_lock(&bt_proto_lock);
126 
127 	if (bt_proto[proto] && try_module_get(bt_proto[proto]->owner)) {
128 		err = bt_proto[proto]->create(net, sock, proto, kern);
129 		if (!err)
130 			bt_sock_reclassify_lock(sock->sk, proto);
131 		module_put(bt_proto[proto]->owner);
132 	}
133 
134 	read_unlock(&bt_proto_lock);
135 
136 	return err;
137 }
138 
139 void bt_sock_link(struct bt_sock_list *l, struct sock *sk)
140 {
141 	write_lock(&l->lock);
142 	sk_add_node(sk, &l->head);
143 	write_unlock(&l->lock);
144 }
145 EXPORT_SYMBOL(bt_sock_link);
146 
147 void bt_sock_unlink(struct bt_sock_list *l, struct sock *sk)
148 {
149 	write_lock(&l->lock);
150 	sk_del_node_init(sk);
151 	write_unlock(&l->lock);
152 }
153 EXPORT_SYMBOL(bt_sock_unlink);
154 
155 void bt_accept_enqueue(struct sock *parent, struct sock *sk)
156 {
157 	BT_DBG("parent %p, sk %p", parent, sk);
158 
159 	sock_hold(sk);
160 	list_add_tail(&bt_sk(sk)->accept_q, &bt_sk(parent)->accept_q);
161 	bt_sk(sk)->parent = parent;
162 	parent->sk_ack_backlog++;
163 }
164 EXPORT_SYMBOL(bt_accept_enqueue);
165 
166 void bt_accept_unlink(struct sock *sk)
167 {
168 	BT_DBG("sk %p state %d", sk, sk->sk_state);
169 
170 	list_del_init(&bt_sk(sk)->accept_q);
171 	bt_sk(sk)->parent->sk_ack_backlog--;
172 	bt_sk(sk)->parent = NULL;
173 	sock_put(sk);
174 }
175 EXPORT_SYMBOL(bt_accept_unlink);
176 
177 struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock)
178 {
179 	struct list_head *p, *n;
180 	struct sock *sk;
181 
182 	BT_DBG("parent %p", parent);
183 
184 	list_for_each_safe(p, n, &bt_sk(parent)->accept_q) {
185 		sk = (struct sock *) list_entry(p, struct bt_sock, accept_q);
186 
187 		lock_sock(sk);
188 
189 		/* FIXME: Is this check still needed */
190 		if (sk->sk_state == BT_CLOSED) {
191 			release_sock(sk);
192 			bt_accept_unlink(sk);
193 			continue;
194 		}
195 
196 		if (sk->sk_state == BT_CONNECTED || !newsock ||
197 		    test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags)) {
198 			bt_accept_unlink(sk);
199 			if (newsock)
200 				sock_graft(sk, newsock);
201 
202 			release_sock(sk);
203 			return sk;
204 		}
205 
206 		release_sock(sk);
207 	}
208 
209 	return NULL;
210 }
211 EXPORT_SYMBOL(bt_accept_dequeue);
212 
213 int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
214 				struct msghdr *msg, size_t len, int flags)
215 {
216 	int noblock = flags & MSG_DONTWAIT;
217 	struct sock *sk = sock->sk;
218 	struct sk_buff *skb;
219 	size_t copied;
220 	int err;
221 
222 	BT_DBG("sock %p sk %p len %zu", sock, sk, len);
223 
224 	if (flags & (MSG_OOB))
225 		return -EOPNOTSUPP;
226 
227 	skb = skb_recv_datagram(sk, flags, noblock, &err);
228 	if (!skb) {
229 		if (sk->sk_shutdown & RCV_SHUTDOWN)
230 			return 0;
231 
232 		return err;
233 	}
234 
235 	copied = skb->len;
236 	if (len < copied) {
237 		msg->msg_flags |= MSG_TRUNC;
238 		copied = len;
239 	}
240 
241 	skb_reset_transport_header(skb);
242 	err = skb_copy_datagram_msg(skb, 0, msg, copied);
243 	if (err == 0) {
244 		sock_recv_ts_and_drops(msg, sk, skb);
245 
246 		if (bt_sk(sk)->skb_msg_name)
247 			bt_sk(sk)->skb_msg_name(skb, msg->msg_name,
248 						&msg->msg_namelen);
249 	}
250 
251 	skb_free_datagram(sk, skb);
252 
253 	return err ? : copied;
254 }
255 EXPORT_SYMBOL(bt_sock_recvmsg);
256 
257 static long bt_sock_data_wait(struct sock *sk, long timeo)
258 {
259 	DECLARE_WAITQUEUE(wait, current);
260 
261 	add_wait_queue(sk_sleep(sk), &wait);
262 	for (;;) {
263 		set_current_state(TASK_INTERRUPTIBLE);
264 
265 		if (!skb_queue_empty(&sk->sk_receive_queue))
266 			break;
267 
268 		if (sk->sk_err || (sk->sk_shutdown & RCV_SHUTDOWN))
269 			break;
270 
271 		if (signal_pending(current) || !timeo)
272 			break;
273 
274 		set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
275 		release_sock(sk);
276 		timeo = schedule_timeout(timeo);
277 		lock_sock(sk);
278 		clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
279 	}
280 
281 	__set_current_state(TASK_RUNNING);
282 	remove_wait_queue(sk_sleep(sk), &wait);
283 	return timeo;
284 }
285 
286 int bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
287 			       struct msghdr *msg, size_t size, int flags)
288 {
289 	struct sock *sk = sock->sk;
290 	int err = 0;
291 	size_t target, copied = 0;
292 	long timeo;
293 
294 	if (flags & MSG_OOB)
295 		return -EOPNOTSUPP;
296 
297 	BT_DBG("sk %p size %zu", sk, size);
298 
299 	lock_sock(sk);
300 
301 	target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
302 	timeo  = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
303 
304 	do {
305 		struct sk_buff *skb;
306 		int chunk;
307 
308 		skb = skb_dequeue(&sk->sk_receive_queue);
309 		if (!skb) {
310 			if (copied >= target)
311 				break;
312 
313 			err = sock_error(sk);
314 			if (err)
315 				break;
316 			if (sk->sk_shutdown & RCV_SHUTDOWN)
317 				break;
318 
319 			err = -EAGAIN;
320 			if (!timeo)
321 				break;
322 
323 			timeo = bt_sock_data_wait(sk, timeo);
324 
325 			if (signal_pending(current)) {
326 				err = sock_intr_errno(timeo);
327 				goto out;
328 			}
329 			continue;
330 		}
331 
332 		chunk = min_t(unsigned int, skb->len, size);
333 		if (skb_copy_datagram_msg(skb, 0, msg, chunk)) {
334 			skb_queue_head(&sk->sk_receive_queue, skb);
335 			if (!copied)
336 				copied = -EFAULT;
337 			break;
338 		}
339 		copied += chunk;
340 		size   -= chunk;
341 
342 		sock_recv_ts_and_drops(msg, sk, skb);
343 
344 		if (!(flags & MSG_PEEK)) {
345 			int skb_len = skb_headlen(skb);
346 
347 			if (chunk <= skb_len) {
348 				__skb_pull(skb, chunk);
349 			} else {
350 				struct sk_buff *frag;
351 
352 				__skb_pull(skb, skb_len);
353 				chunk -= skb_len;
354 
355 				skb_walk_frags(skb, frag) {
356 					if (chunk <= frag->len) {
357 						/* Pulling partial data */
358 						skb->len -= chunk;
359 						skb->data_len -= chunk;
360 						__skb_pull(frag, chunk);
361 						break;
362 					} else if (frag->len) {
363 						/* Pulling all frag data */
364 						chunk -= frag->len;
365 						skb->len -= frag->len;
366 						skb->data_len -= frag->len;
367 						__skb_pull(frag, frag->len);
368 					}
369 				}
370 			}
371 
372 			if (skb->len) {
373 				skb_queue_head(&sk->sk_receive_queue, skb);
374 				break;
375 			}
376 			kfree_skb(skb);
377 
378 		} else {
379 			/* put message back and return */
380 			skb_queue_head(&sk->sk_receive_queue, skb);
381 			break;
382 		}
383 	} while (size);
384 
385 out:
386 	release_sock(sk);
387 	return copied ? : err;
388 }
389 EXPORT_SYMBOL(bt_sock_stream_recvmsg);
390 
391 static inline unsigned int bt_accept_poll(struct sock *parent)
392 {
393 	struct list_head *p, *n;
394 	struct sock *sk;
395 
396 	list_for_each_safe(p, n, &bt_sk(parent)->accept_q) {
397 		sk = (struct sock *) list_entry(p, struct bt_sock, accept_q);
398 		if (sk->sk_state == BT_CONNECTED ||
399 		    (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags) &&
400 		     sk->sk_state == BT_CONNECT2))
401 			return POLLIN | POLLRDNORM;
402 	}
403 
404 	return 0;
405 }
406 
407 unsigned int bt_sock_poll(struct file *file, struct socket *sock,
408 			  poll_table *wait)
409 {
410 	struct sock *sk = sock->sk;
411 	unsigned int mask = 0;
412 
413 	BT_DBG("sock %p, sk %p", sock, sk);
414 
415 	poll_wait(file, sk_sleep(sk), wait);
416 
417 	if (sk->sk_state == BT_LISTEN)
418 		return bt_accept_poll(sk);
419 
420 	if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
421 		mask |= POLLERR |
422 			(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
423 
424 	if (sk->sk_shutdown & RCV_SHUTDOWN)
425 		mask |= POLLRDHUP | POLLIN | POLLRDNORM;
426 
427 	if (sk->sk_shutdown == SHUTDOWN_MASK)
428 		mask |= POLLHUP;
429 
430 	if (!skb_queue_empty(&sk->sk_receive_queue))
431 		mask |= POLLIN | POLLRDNORM;
432 
433 	if (sk->sk_state == BT_CLOSED)
434 		mask |= POLLHUP;
435 
436 	if (sk->sk_state == BT_CONNECT ||
437 			sk->sk_state == BT_CONNECT2 ||
438 			sk->sk_state == BT_CONFIG)
439 		return mask;
440 
441 	if (!test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags) && sock_writeable(sk))
442 		mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
443 	else
444 		set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
445 
446 	return mask;
447 }
448 EXPORT_SYMBOL(bt_sock_poll);
449 
450 int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
451 {
452 	struct sock *sk = sock->sk;
453 	struct sk_buff *skb;
454 	long amount;
455 	int err;
456 
457 	BT_DBG("sk %p cmd %x arg %lx", sk, cmd, arg);
458 
459 	switch (cmd) {
460 	case TIOCOUTQ:
461 		if (sk->sk_state == BT_LISTEN)
462 			return -EINVAL;
463 
464 		amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
465 		if (amount < 0)
466 			amount = 0;
467 		err = put_user(amount, (int __user *) arg);
468 		break;
469 
470 	case TIOCINQ:
471 		if (sk->sk_state == BT_LISTEN)
472 			return -EINVAL;
473 
474 		lock_sock(sk);
475 		skb = skb_peek(&sk->sk_receive_queue);
476 		amount = skb ? skb->len : 0;
477 		release_sock(sk);
478 		err = put_user(amount, (int __user *) arg);
479 		break;
480 
481 	case SIOCGSTAMP:
482 		err = sock_get_timestamp(sk, (struct timeval __user *) arg);
483 		break;
484 
485 	case SIOCGSTAMPNS:
486 		err = sock_get_timestampns(sk, (struct timespec __user *) arg);
487 		break;
488 
489 	default:
490 		err = -ENOIOCTLCMD;
491 		break;
492 	}
493 
494 	return err;
495 }
496 EXPORT_SYMBOL(bt_sock_ioctl);
497 
498 /* This function expects the sk lock to be held when called */
499 int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo)
500 {
501 	DECLARE_WAITQUEUE(wait, current);
502 	int err = 0;
503 
504 	BT_DBG("sk %p", sk);
505 
506 	add_wait_queue(sk_sleep(sk), &wait);
507 	set_current_state(TASK_INTERRUPTIBLE);
508 	while (sk->sk_state != state) {
509 		if (!timeo) {
510 			err = -EINPROGRESS;
511 			break;
512 		}
513 
514 		if (signal_pending(current)) {
515 			err = sock_intr_errno(timeo);
516 			break;
517 		}
518 
519 		release_sock(sk);
520 		timeo = schedule_timeout(timeo);
521 		lock_sock(sk);
522 		set_current_state(TASK_INTERRUPTIBLE);
523 
524 		err = sock_error(sk);
525 		if (err)
526 			break;
527 	}
528 	__set_current_state(TASK_RUNNING);
529 	remove_wait_queue(sk_sleep(sk), &wait);
530 	return err;
531 }
532 EXPORT_SYMBOL(bt_sock_wait_state);
533 
534 /* This function expects the sk lock to be held when called */
535 int bt_sock_wait_ready(struct sock *sk, unsigned long flags)
536 {
537 	DECLARE_WAITQUEUE(wait, current);
538 	unsigned long timeo;
539 	int err = 0;
540 
541 	BT_DBG("sk %p", sk);
542 
543 	timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
544 
545 	add_wait_queue(sk_sleep(sk), &wait);
546 	set_current_state(TASK_INTERRUPTIBLE);
547 	while (test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags)) {
548 		if (!timeo) {
549 			err = -EAGAIN;
550 			break;
551 		}
552 
553 		if (signal_pending(current)) {
554 			err = sock_intr_errno(timeo);
555 			break;
556 		}
557 
558 		release_sock(sk);
559 		timeo = schedule_timeout(timeo);
560 		lock_sock(sk);
561 		set_current_state(TASK_INTERRUPTIBLE);
562 
563 		err = sock_error(sk);
564 		if (err)
565 			break;
566 	}
567 	__set_current_state(TASK_RUNNING);
568 	remove_wait_queue(sk_sleep(sk), &wait);
569 
570 	return err;
571 }
572 EXPORT_SYMBOL(bt_sock_wait_ready);
573 
574 #ifdef CONFIG_PROC_FS
575 struct bt_seq_state {
576 	struct bt_sock_list *l;
577 };
578 
579 static void *bt_seq_start(struct seq_file *seq, loff_t *pos)
580 	__acquires(seq->private->l->lock)
581 {
582 	struct bt_seq_state *s = seq->private;
583 	struct bt_sock_list *l = s->l;
584 
585 	read_lock(&l->lock);
586 	return seq_hlist_start_head(&l->head, *pos);
587 }
588 
589 static void *bt_seq_next(struct seq_file *seq, void *v, loff_t *pos)
590 {
591 	struct bt_seq_state *s = seq->private;
592 	struct bt_sock_list *l = s->l;
593 
594 	return seq_hlist_next(v, &l->head, pos);
595 }
596 
597 static void bt_seq_stop(struct seq_file *seq, void *v)
598 	__releases(seq->private->l->lock)
599 {
600 	struct bt_seq_state *s = seq->private;
601 	struct bt_sock_list *l = s->l;
602 
603 	read_unlock(&l->lock);
604 }
605 
606 static int bt_seq_show(struct seq_file *seq, void *v)
607 {
608 	struct bt_seq_state *s = seq->private;
609 	struct bt_sock_list *l = s->l;
610 
611 	if (v == SEQ_START_TOKEN) {
612 		seq_puts(seq ,"sk               RefCnt Rmem   Wmem   User   Inode  Parent");
613 
614 		if (l->custom_seq_show) {
615 			seq_putc(seq, ' ');
616 			l->custom_seq_show(seq, v);
617 		}
618 
619 		seq_putc(seq, '\n');
620 	} else {
621 		struct sock *sk = sk_entry(v);
622 		struct bt_sock *bt = bt_sk(sk);
623 
624 		seq_printf(seq,
625 			   "%pK %-6d %-6u %-6u %-6u %-6lu %-6lu",
626 			   sk,
627 			   atomic_read(&sk->sk_refcnt),
628 			   sk_rmem_alloc_get(sk),
629 			   sk_wmem_alloc_get(sk),
630 			   from_kuid(seq_user_ns(seq), sock_i_uid(sk)),
631 			   sock_i_ino(sk),
632 			   bt->parent? sock_i_ino(bt->parent): 0LU);
633 
634 		if (l->custom_seq_show) {
635 			seq_putc(seq, ' ');
636 			l->custom_seq_show(seq, v);
637 		}
638 
639 		seq_putc(seq, '\n');
640 	}
641 	return 0;
642 }
643 
644 static const struct seq_operations bt_seq_ops = {
645 	.start = bt_seq_start,
646 	.next  = bt_seq_next,
647 	.stop  = bt_seq_stop,
648 	.show  = bt_seq_show,
649 };
650 
651 static int bt_seq_open(struct inode *inode, struct file *file)
652 {
653 	struct bt_sock_list *sk_list;
654 	struct bt_seq_state *s;
655 
656 	sk_list = PDE_DATA(inode);
657 	s = __seq_open_private(file, &bt_seq_ops,
658 			       sizeof(struct bt_seq_state));
659 	if (!s)
660 		return -ENOMEM;
661 
662 	s->l = sk_list;
663 	return 0;
664 }
665 
666 static const struct file_operations bt_fops = {
667 	.open = bt_seq_open,
668 	.read = seq_read,
669 	.llseek = seq_lseek,
670 	.release = seq_release_private
671 };
672 
673 int bt_procfs_init(struct net *net, const char *name,
674 		   struct bt_sock_list* sk_list,
675 		   int (* seq_show)(struct seq_file *, void *))
676 {
677 	sk_list->custom_seq_show = seq_show;
678 
679 	if (!proc_create_data(name, 0, net->proc_net, &bt_fops, sk_list))
680 		return -ENOMEM;
681 	return 0;
682 }
683 
684 void bt_procfs_cleanup(struct net *net, const char *name)
685 {
686 	remove_proc_entry(name, net->proc_net);
687 }
688 #else
689 int bt_procfs_init(struct net *net, const char *name,
690 		   struct bt_sock_list* sk_list,
691 		   int (* seq_show)(struct seq_file *, void *))
692 {
693 	return 0;
694 }
695 
696 void bt_procfs_cleanup(struct net *net, const char *name)
697 {
698 }
699 #endif
700 EXPORT_SYMBOL(bt_procfs_init);
701 EXPORT_SYMBOL(bt_procfs_cleanup);
702 
703 static struct net_proto_family bt_sock_family_ops = {
704 	.owner	= THIS_MODULE,
705 	.family	= PF_BLUETOOTH,
706 	.create	= bt_sock_create,
707 };
708 
709 struct dentry *bt_debugfs;
710 EXPORT_SYMBOL_GPL(bt_debugfs);
711 
712 static int __init bt_init(void)
713 {
714 	struct sk_buff *skb;
715 	int err;
716 
717 	BUILD_BUG_ON(sizeof(struct bt_skb_cb) > sizeof(skb->cb));
718 
719 	BT_INFO("Core ver %s", VERSION);
720 
721 	err = bt_selftest();
722 	if (err < 0)
723 		return err;
724 
725 	bt_debugfs = debugfs_create_dir("bluetooth", NULL);
726 
727 	err = bt_sysfs_init();
728 	if (err < 0)
729 		return err;
730 
731 	err = sock_register(&bt_sock_family_ops);
732 	if (err < 0) {
733 		bt_sysfs_cleanup();
734 		return err;
735 	}
736 
737 	BT_INFO("HCI device and connection manager initialized");
738 
739 	err = hci_sock_init();
740 	if (err < 0)
741 		goto error;
742 
743 	err = l2cap_init();
744 	if (err < 0)
745 		goto sock_err;
746 
747 	err = sco_init();
748 	if (err < 0) {
749 		l2cap_exit();
750 		goto sock_err;
751 	}
752 
753 	return 0;
754 
755 sock_err:
756 	hci_sock_cleanup();
757 
758 error:
759 	sock_unregister(PF_BLUETOOTH);
760 	bt_sysfs_cleanup();
761 
762 	return err;
763 }
764 
765 static void __exit bt_exit(void)
766 {
767 	sco_exit();
768 
769 	l2cap_exit();
770 
771 	hci_sock_cleanup();
772 
773 	sock_unregister(PF_BLUETOOTH);
774 
775 	bt_sysfs_cleanup();
776 
777 	debugfs_remove_recursive(bt_debugfs);
778 }
779 
780 subsys_initcall(bt_init);
781 module_exit(bt_exit);
782 
783 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
784 MODULE_DESCRIPTION("Bluetooth Core ver " VERSION);
785 MODULE_VERSION(VERSION);
786 MODULE_LICENSE("GPL");
787 MODULE_ALIAS_NETPROTO(PF_BLUETOOTH);
788