xref: /openbmc/linux/net/bluetooth/af_bluetooth.c (revision 588b48ca)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth address family and sockets. */
26 
27 #include <linux/module.h>
28 #include <linux/debugfs.h>
29 #include <asm/ioctls.h>
30 
31 #include <net/bluetooth/bluetooth.h>
32 #include <linux/proc_fs.h>
33 
34 #define VERSION "2.19"
35 
36 /* Bluetooth sockets */
37 #define BT_MAX_PROTO	8
38 static const struct net_proto_family *bt_proto[BT_MAX_PROTO];
39 static DEFINE_RWLOCK(bt_proto_lock);
40 
41 static struct lock_class_key bt_lock_key[BT_MAX_PROTO];
42 static const char *const bt_key_strings[BT_MAX_PROTO] = {
43 	"sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP",
44 	"sk_lock-AF_BLUETOOTH-BTPROTO_HCI",
45 	"sk_lock-AF_BLUETOOTH-BTPROTO_SCO",
46 	"sk_lock-AF_BLUETOOTH-BTPROTO_RFCOMM",
47 	"sk_lock-AF_BLUETOOTH-BTPROTO_BNEP",
48 	"sk_lock-AF_BLUETOOTH-BTPROTO_CMTP",
49 	"sk_lock-AF_BLUETOOTH-BTPROTO_HIDP",
50 	"sk_lock-AF_BLUETOOTH-BTPROTO_AVDTP",
51 };
52 
53 static struct lock_class_key bt_slock_key[BT_MAX_PROTO];
54 static const char *const bt_slock_key_strings[BT_MAX_PROTO] = {
55 	"slock-AF_BLUETOOTH-BTPROTO_L2CAP",
56 	"slock-AF_BLUETOOTH-BTPROTO_HCI",
57 	"slock-AF_BLUETOOTH-BTPROTO_SCO",
58 	"slock-AF_BLUETOOTH-BTPROTO_RFCOMM",
59 	"slock-AF_BLUETOOTH-BTPROTO_BNEP",
60 	"slock-AF_BLUETOOTH-BTPROTO_CMTP",
61 	"slock-AF_BLUETOOTH-BTPROTO_HIDP",
62 	"slock-AF_BLUETOOTH-BTPROTO_AVDTP",
63 };
64 
65 void bt_sock_reclassify_lock(struct sock *sk, int proto)
66 {
67 	BUG_ON(!sk);
68 	BUG_ON(sock_owned_by_user(sk));
69 
70 	sock_lock_init_class_and_name(sk,
71 			bt_slock_key_strings[proto], &bt_slock_key[proto],
72 				bt_key_strings[proto], &bt_lock_key[proto]);
73 }
74 EXPORT_SYMBOL(bt_sock_reclassify_lock);
75 
76 int bt_sock_register(int proto, const struct net_proto_family *ops)
77 {
78 	int err = 0;
79 
80 	if (proto < 0 || proto >= BT_MAX_PROTO)
81 		return -EINVAL;
82 
83 	write_lock(&bt_proto_lock);
84 
85 	if (bt_proto[proto])
86 		err = -EEXIST;
87 	else
88 		bt_proto[proto] = ops;
89 
90 	write_unlock(&bt_proto_lock);
91 
92 	return err;
93 }
94 EXPORT_SYMBOL(bt_sock_register);
95 
96 void bt_sock_unregister(int proto)
97 {
98 	if (proto < 0 || proto >= BT_MAX_PROTO)
99 		return;
100 
101 	write_lock(&bt_proto_lock);
102 	bt_proto[proto] = NULL;
103 	write_unlock(&bt_proto_lock);
104 }
105 EXPORT_SYMBOL(bt_sock_unregister);
106 
107 static int bt_sock_create(struct net *net, struct socket *sock, int proto,
108 			  int kern)
109 {
110 	int err;
111 
112 	if (net != &init_net)
113 		return -EAFNOSUPPORT;
114 
115 	if (proto < 0 || proto >= BT_MAX_PROTO)
116 		return -EINVAL;
117 
118 	if (!bt_proto[proto])
119 		request_module("bt-proto-%d", proto);
120 
121 	err = -EPROTONOSUPPORT;
122 
123 	read_lock(&bt_proto_lock);
124 
125 	if (bt_proto[proto] && try_module_get(bt_proto[proto]->owner)) {
126 		err = bt_proto[proto]->create(net, sock, proto, kern);
127 		if (!err)
128 			bt_sock_reclassify_lock(sock->sk, proto);
129 		module_put(bt_proto[proto]->owner);
130 	}
131 
132 	read_unlock(&bt_proto_lock);
133 
134 	return err;
135 }
136 
137 void bt_sock_link(struct bt_sock_list *l, struct sock *sk)
138 {
139 	write_lock(&l->lock);
140 	sk_add_node(sk, &l->head);
141 	write_unlock(&l->lock);
142 }
143 EXPORT_SYMBOL(bt_sock_link);
144 
145 void bt_sock_unlink(struct bt_sock_list *l, struct sock *sk)
146 {
147 	write_lock(&l->lock);
148 	sk_del_node_init(sk);
149 	write_unlock(&l->lock);
150 }
151 EXPORT_SYMBOL(bt_sock_unlink);
152 
153 void bt_accept_enqueue(struct sock *parent, struct sock *sk)
154 {
155 	BT_DBG("parent %p, sk %p", parent, sk);
156 
157 	sock_hold(sk);
158 	list_add_tail(&bt_sk(sk)->accept_q, &bt_sk(parent)->accept_q);
159 	bt_sk(sk)->parent = parent;
160 	parent->sk_ack_backlog++;
161 }
162 EXPORT_SYMBOL(bt_accept_enqueue);
163 
164 void bt_accept_unlink(struct sock *sk)
165 {
166 	BT_DBG("sk %p state %d", sk, sk->sk_state);
167 
168 	list_del_init(&bt_sk(sk)->accept_q);
169 	bt_sk(sk)->parent->sk_ack_backlog--;
170 	bt_sk(sk)->parent = NULL;
171 	sock_put(sk);
172 }
173 EXPORT_SYMBOL(bt_accept_unlink);
174 
175 struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock)
176 {
177 	struct list_head *p, *n;
178 	struct sock *sk;
179 
180 	BT_DBG("parent %p", parent);
181 
182 	list_for_each_safe(p, n, &bt_sk(parent)->accept_q) {
183 		sk = (struct sock *) list_entry(p, struct bt_sock, accept_q);
184 
185 		lock_sock(sk);
186 
187 		/* FIXME: Is this check still needed */
188 		if (sk->sk_state == BT_CLOSED) {
189 			release_sock(sk);
190 			bt_accept_unlink(sk);
191 			continue;
192 		}
193 
194 		if (sk->sk_state == BT_CONNECTED || !newsock ||
195 		    test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags)) {
196 			bt_accept_unlink(sk);
197 			if (newsock)
198 				sock_graft(sk, newsock);
199 
200 			release_sock(sk);
201 			return sk;
202 		}
203 
204 		release_sock(sk);
205 	}
206 
207 	return NULL;
208 }
209 EXPORT_SYMBOL(bt_accept_dequeue);
210 
211 int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
212 				struct msghdr *msg, size_t len, int flags)
213 {
214 	int noblock = flags & MSG_DONTWAIT;
215 	struct sock *sk = sock->sk;
216 	struct sk_buff *skb;
217 	size_t copied;
218 	int err;
219 
220 	BT_DBG("sock %p sk %p len %zu", sock, sk, len);
221 
222 	if (flags & (MSG_OOB))
223 		return -EOPNOTSUPP;
224 
225 	skb = skb_recv_datagram(sk, flags, noblock, &err);
226 	if (!skb) {
227 		if (sk->sk_shutdown & RCV_SHUTDOWN)
228 			return 0;
229 
230 		return err;
231 	}
232 
233 	copied = skb->len;
234 	if (len < copied) {
235 		msg->msg_flags |= MSG_TRUNC;
236 		copied = len;
237 	}
238 
239 	skb_reset_transport_header(skb);
240 	err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
241 	if (err == 0) {
242 		sock_recv_ts_and_drops(msg, sk, skb);
243 
244 		if (bt_sk(sk)->skb_msg_name)
245 			bt_sk(sk)->skb_msg_name(skb, msg->msg_name,
246 						&msg->msg_namelen);
247 	}
248 
249 	skb_free_datagram(sk, skb);
250 
251 	return err ? : copied;
252 }
253 EXPORT_SYMBOL(bt_sock_recvmsg);
254 
255 static long bt_sock_data_wait(struct sock *sk, long timeo)
256 {
257 	DECLARE_WAITQUEUE(wait, current);
258 
259 	add_wait_queue(sk_sleep(sk), &wait);
260 	for (;;) {
261 		set_current_state(TASK_INTERRUPTIBLE);
262 
263 		if (!skb_queue_empty(&sk->sk_receive_queue))
264 			break;
265 
266 		if (sk->sk_err || (sk->sk_shutdown & RCV_SHUTDOWN))
267 			break;
268 
269 		if (signal_pending(current) || !timeo)
270 			break;
271 
272 		set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
273 		release_sock(sk);
274 		timeo = schedule_timeout(timeo);
275 		lock_sock(sk);
276 		clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
277 	}
278 
279 	__set_current_state(TASK_RUNNING);
280 	remove_wait_queue(sk_sleep(sk), &wait);
281 	return timeo;
282 }
283 
284 int bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
285 			       struct msghdr *msg, size_t size, int flags)
286 {
287 	struct sock *sk = sock->sk;
288 	int err = 0;
289 	size_t target, copied = 0;
290 	long timeo;
291 
292 	if (flags & MSG_OOB)
293 		return -EOPNOTSUPP;
294 
295 	BT_DBG("sk %p size %zu", sk, size);
296 
297 	lock_sock(sk);
298 
299 	target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
300 	timeo  = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
301 
302 	do {
303 		struct sk_buff *skb;
304 		int chunk;
305 
306 		skb = skb_dequeue(&sk->sk_receive_queue);
307 		if (!skb) {
308 			if (copied >= target)
309 				break;
310 
311 			err = sock_error(sk);
312 			if (err)
313 				break;
314 			if (sk->sk_shutdown & RCV_SHUTDOWN)
315 				break;
316 
317 			err = -EAGAIN;
318 			if (!timeo)
319 				break;
320 
321 			timeo = bt_sock_data_wait(sk, timeo);
322 
323 			if (signal_pending(current)) {
324 				err = sock_intr_errno(timeo);
325 				goto out;
326 			}
327 			continue;
328 		}
329 
330 		chunk = min_t(unsigned int, skb->len, size);
331 		if (skb_copy_datagram_iovec(skb, 0, msg->msg_iov, chunk)) {
332 			skb_queue_head(&sk->sk_receive_queue, skb);
333 			if (!copied)
334 				copied = -EFAULT;
335 			break;
336 		}
337 		copied += chunk;
338 		size   -= chunk;
339 
340 		sock_recv_ts_and_drops(msg, sk, skb);
341 
342 		if (!(flags & MSG_PEEK)) {
343 			int skb_len = skb_headlen(skb);
344 
345 			if (chunk <= skb_len) {
346 				__skb_pull(skb, chunk);
347 			} else {
348 				struct sk_buff *frag;
349 
350 				__skb_pull(skb, skb_len);
351 				chunk -= skb_len;
352 
353 				skb_walk_frags(skb, frag) {
354 					if (chunk <= frag->len) {
355 						/* Pulling partial data */
356 						skb->len -= chunk;
357 						skb->data_len -= chunk;
358 						__skb_pull(frag, chunk);
359 						break;
360 					} else if (frag->len) {
361 						/* Pulling all frag data */
362 						chunk -= frag->len;
363 						skb->len -= frag->len;
364 						skb->data_len -= frag->len;
365 						__skb_pull(frag, frag->len);
366 					}
367 				}
368 			}
369 
370 			if (skb->len) {
371 				skb_queue_head(&sk->sk_receive_queue, skb);
372 				break;
373 			}
374 			kfree_skb(skb);
375 
376 		} else {
377 			/* put message back and return */
378 			skb_queue_head(&sk->sk_receive_queue, skb);
379 			break;
380 		}
381 	} while (size);
382 
383 out:
384 	release_sock(sk);
385 	return copied ? : err;
386 }
387 EXPORT_SYMBOL(bt_sock_stream_recvmsg);
388 
389 static inline unsigned int bt_accept_poll(struct sock *parent)
390 {
391 	struct list_head *p, *n;
392 	struct sock *sk;
393 
394 	list_for_each_safe(p, n, &bt_sk(parent)->accept_q) {
395 		sk = (struct sock *) list_entry(p, struct bt_sock, accept_q);
396 		if (sk->sk_state == BT_CONNECTED ||
397 		    (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags) &&
398 		     sk->sk_state == BT_CONNECT2))
399 			return POLLIN | POLLRDNORM;
400 	}
401 
402 	return 0;
403 }
404 
405 unsigned int bt_sock_poll(struct file *file, struct socket *sock,
406 			  poll_table *wait)
407 {
408 	struct sock *sk = sock->sk;
409 	unsigned int mask = 0;
410 
411 	BT_DBG("sock %p, sk %p", sock, sk);
412 
413 	poll_wait(file, sk_sleep(sk), wait);
414 
415 	if (sk->sk_state == BT_LISTEN)
416 		return bt_accept_poll(sk);
417 
418 	if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
419 		mask |= POLLERR |
420 			(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
421 
422 	if (sk->sk_shutdown & RCV_SHUTDOWN)
423 		mask |= POLLRDHUP | POLLIN | POLLRDNORM;
424 
425 	if (sk->sk_shutdown == SHUTDOWN_MASK)
426 		mask |= POLLHUP;
427 
428 	if (!skb_queue_empty(&sk->sk_receive_queue))
429 		mask |= POLLIN | POLLRDNORM;
430 
431 	if (sk->sk_state == BT_CLOSED)
432 		mask |= POLLHUP;
433 
434 	if (sk->sk_state == BT_CONNECT ||
435 			sk->sk_state == BT_CONNECT2 ||
436 			sk->sk_state == BT_CONFIG)
437 		return mask;
438 
439 	if (!test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags) && sock_writeable(sk))
440 		mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
441 	else
442 		set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
443 
444 	return mask;
445 }
446 EXPORT_SYMBOL(bt_sock_poll);
447 
448 int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
449 {
450 	struct sock *sk = sock->sk;
451 	struct sk_buff *skb;
452 	long amount;
453 	int err;
454 
455 	BT_DBG("sk %p cmd %x arg %lx", sk, cmd, arg);
456 
457 	switch (cmd) {
458 	case TIOCOUTQ:
459 		if (sk->sk_state == BT_LISTEN)
460 			return -EINVAL;
461 
462 		amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
463 		if (amount < 0)
464 			amount = 0;
465 		err = put_user(amount, (int __user *) arg);
466 		break;
467 
468 	case TIOCINQ:
469 		if (sk->sk_state == BT_LISTEN)
470 			return -EINVAL;
471 
472 		lock_sock(sk);
473 		skb = skb_peek(&sk->sk_receive_queue);
474 		amount = skb ? skb->len : 0;
475 		release_sock(sk);
476 		err = put_user(amount, (int __user *) arg);
477 		break;
478 
479 	case SIOCGSTAMP:
480 		err = sock_get_timestamp(sk, (struct timeval __user *) arg);
481 		break;
482 
483 	case SIOCGSTAMPNS:
484 		err = sock_get_timestampns(sk, (struct timespec __user *) arg);
485 		break;
486 
487 	default:
488 		err = -ENOIOCTLCMD;
489 		break;
490 	}
491 
492 	return err;
493 }
494 EXPORT_SYMBOL(bt_sock_ioctl);
495 
496 /* This function expects the sk lock to be held when called */
497 int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo)
498 {
499 	DECLARE_WAITQUEUE(wait, current);
500 	int err = 0;
501 
502 	BT_DBG("sk %p", sk);
503 
504 	add_wait_queue(sk_sleep(sk), &wait);
505 	set_current_state(TASK_INTERRUPTIBLE);
506 	while (sk->sk_state != state) {
507 		if (!timeo) {
508 			err = -EINPROGRESS;
509 			break;
510 		}
511 
512 		if (signal_pending(current)) {
513 			err = sock_intr_errno(timeo);
514 			break;
515 		}
516 
517 		release_sock(sk);
518 		timeo = schedule_timeout(timeo);
519 		lock_sock(sk);
520 		set_current_state(TASK_INTERRUPTIBLE);
521 
522 		err = sock_error(sk);
523 		if (err)
524 			break;
525 	}
526 	__set_current_state(TASK_RUNNING);
527 	remove_wait_queue(sk_sleep(sk), &wait);
528 	return err;
529 }
530 EXPORT_SYMBOL(bt_sock_wait_state);
531 
532 /* This function expects the sk lock to be held when called */
533 int bt_sock_wait_ready(struct sock *sk, unsigned long flags)
534 {
535 	DECLARE_WAITQUEUE(wait, current);
536 	unsigned long timeo;
537 	int err = 0;
538 
539 	BT_DBG("sk %p", sk);
540 
541 	timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
542 
543 	add_wait_queue(sk_sleep(sk), &wait);
544 	set_current_state(TASK_INTERRUPTIBLE);
545 	while (test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags)) {
546 		if (!timeo) {
547 			err = -EAGAIN;
548 			break;
549 		}
550 
551 		if (signal_pending(current)) {
552 			err = sock_intr_errno(timeo);
553 			break;
554 		}
555 
556 		release_sock(sk);
557 		timeo = schedule_timeout(timeo);
558 		lock_sock(sk);
559 		set_current_state(TASK_INTERRUPTIBLE);
560 
561 		err = sock_error(sk);
562 		if (err)
563 			break;
564 	}
565 	__set_current_state(TASK_RUNNING);
566 	remove_wait_queue(sk_sleep(sk), &wait);
567 
568 	return err;
569 }
570 EXPORT_SYMBOL(bt_sock_wait_ready);
571 
572 #ifdef CONFIG_PROC_FS
573 struct bt_seq_state {
574 	struct bt_sock_list *l;
575 };
576 
577 static void *bt_seq_start(struct seq_file *seq, loff_t *pos)
578 	__acquires(seq->private->l->lock)
579 {
580 	struct bt_seq_state *s = seq->private;
581 	struct bt_sock_list *l = s->l;
582 
583 	read_lock(&l->lock);
584 	return seq_hlist_start_head(&l->head, *pos);
585 }
586 
587 static void *bt_seq_next(struct seq_file *seq, void *v, loff_t *pos)
588 {
589 	struct bt_seq_state *s = seq->private;
590 	struct bt_sock_list *l = s->l;
591 
592 	return seq_hlist_next(v, &l->head, pos);
593 }
594 
595 static void bt_seq_stop(struct seq_file *seq, void *v)
596 	__releases(seq->private->l->lock)
597 {
598 	struct bt_seq_state *s = seq->private;
599 	struct bt_sock_list *l = s->l;
600 
601 	read_unlock(&l->lock);
602 }
603 
604 static int bt_seq_show(struct seq_file *seq, void *v)
605 {
606 	struct bt_seq_state *s = seq->private;
607 	struct bt_sock_list *l = s->l;
608 
609 	if (v == SEQ_START_TOKEN) {
610 		seq_puts(seq ,"sk               RefCnt Rmem   Wmem   User   Inode  Parent");
611 
612 		if (l->custom_seq_show) {
613 			seq_putc(seq, ' ');
614 			l->custom_seq_show(seq, v);
615 		}
616 
617 		seq_putc(seq, '\n');
618 	} else {
619 		struct sock *sk = sk_entry(v);
620 		struct bt_sock *bt = bt_sk(sk);
621 
622 		seq_printf(seq,
623 			   "%pK %-6d %-6u %-6u %-6u %-6lu %-6lu",
624 			   sk,
625 			   atomic_read(&sk->sk_refcnt),
626 			   sk_rmem_alloc_get(sk),
627 			   sk_wmem_alloc_get(sk),
628 			   from_kuid(seq_user_ns(seq), sock_i_uid(sk)),
629 			   sock_i_ino(sk),
630 			   bt->parent? sock_i_ino(bt->parent): 0LU);
631 
632 		if (l->custom_seq_show) {
633 			seq_putc(seq, ' ');
634 			l->custom_seq_show(seq, v);
635 		}
636 
637 		seq_putc(seq, '\n');
638 	}
639 	return 0;
640 }
641 
642 static const struct seq_operations bt_seq_ops = {
643 	.start = bt_seq_start,
644 	.next  = bt_seq_next,
645 	.stop  = bt_seq_stop,
646 	.show  = bt_seq_show,
647 };
648 
649 static int bt_seq_open(struct inode *inode, struct file *file)
650 {
651 	struct bt_sock_list *sk_list;
652 	struct bt_seq_state *s;
653 
654 	sk_list = PDE_DATA(inode);
655 	s = __seq_open_private(file, &bt_seq_ops,
656 			       sizeof(struct bt_seq_state));
657 	if (!s)
658 		return -ENOMEM;
659 
660 	s->l = sk_list;
661 	return 0;
662 }
663 
664 static const struct file_operations bt_fops = {
665 	.open = bt_seq_open,
666 	.read = seq_read,
667 	.llseek = seq_lseek,
668 	.release = seq_release_private
669 };
670 
671 int bt_procfs_init(struct net *net, const char *name,
672 		   struct bt_sock_list* sk_list,
673 		   int (* seq_show)(struct seq_file *, void *))
674 {
675 	sk_list->custom_seq_show = seq_show;
676 
677 	if (!proc_create_data(name, 0, net->proc_net, &bt_fops, sk_list))
678 		return -ENOMEM;
679 	return 0;
680 }
681 
682 void bt_procfs_cleanup(struct net *net, const char *name)
683 {
684 	remove_proc_entry(name, net->proc_net);
685 }
686 #else
687 int bt_procfs_init(struct net *net, const char *name,
688 		   struct bt_sock_list* sk_list,
689 		   int (* seq_show)(struct seq_file *, void *))
690 {
691 	return 0;
692 }
693 
694 void bt_procfs_cleanup(struct net *net, const char *name)
695 {
696 }
697 #endif
698 EXPORT_SYMBOL(bt_procfs_init);
699 EXPORT_SYMBOL(bt_procfs_cleanup);
700 
701 static struct net_proto_family bt_sock_family_ops = {
702 	.owner	= THIS_MODULE,
703 	.family	= PF_BLUETOOTH,
704 	.create	= bt_sock_create,
705 };
706 
707 struct dentry *bt_debugfs;
708 EXPORT_SYMBOL_GPL(bt_debugfs);
709 
710 static int __init bt_init(void)
711 {
712 	int err;
713 
714 	BT_INFO("Core ver %s", VERSION);
715 
716 	bt_debugfs = debugfs_create_dir("bluetooth", NULL);
717 
718 	err = bt_sysfs_init();
719 	if (err < 0)
720 		return err;
721 
722 	err = sock_register(&bt_sock_family_ops);
723 	if (err < 0) {
724 		bt_sysfs_cleanup();
725 		return err;
726 	}
727 
728 	BT_INFO("HCI device and connection manager initialized");
729 
730 	err = hci_sock_init();
731 	if (err < 0)
732 		goto error;
733 
734 	err = l2cap_init();
735 	if (err < 0)
736 		goto sock_err;
737 
738 	err = sco_init();
739 	if (err < 0) {
740 		l2cap_exit();
741 		goto sock_err;
742 	}
743 
744 	return 0;
745 
746 sock_err:
747 	hci_sock_cleanup();
748 
749 error:
750 	sock_unregister(PF_BLUETOOTH);
751 	bt_sysfs_cleanup();
752 
753 	return err;
754 }
755 
756 static void __exit bt_exit(void)
757 {
758 	sco_exit();
759 
760 	l2cap_exit();
761 
762 	hci_sock_cleanup();
763 
764 	sock_unregister(PF_BLUETOOTH);
765 
766 	bt_sysfs_cleanup();
767 
768 	debugfs_remove_recursive(bt_debugfs);
769 }
770 
771 subsys_initcall(bt_init);
772 module_exit(bt_exit);
773 
774 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
775 MODULE_DESCRIPTION("Bluetooth Core ver " VERSION);
776 MODULE_VERSION(VERSION);
777 MODULE_LICENSE("GPL");
778 MODULE_ALIAS_NETPROTO(PF_BLUETOOTH);
779