xref: /openbmc/linux/net/bluetooth/af_bluetooth.c (revision d97636980f6ba7344c8aa6fa349b9059c60ee478)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth address family and sockets. */
26 
27 #include <linux/module.h>
28 #include <asm/ioctls.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <linux/proc_fs.h>
32 
33 #define VERSION "2.16"
34 
35 /* Bluetooth sockets */
36 #define BT_MAX_PROTO	8
37 static const struct net_proto_family *bt_proto[BT_MAX_PROTO];
38 static DEFINE_RWLOCK(bt_proto_lock);
39 
40 static struct lock_class_key bt_lock_key[BT_MAX_PROTO];
41 static const char *const bt_key_strings[BT_MAX_PROTO] = {
42 	"sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP",
43 	"sk_lock-AF_BLUETOOTH-BTPROTO_HCI",
44 	"sk_lock-AF_BLUETOOTH-BTPROTO_SCO",
45 	"sk_lock-AF_BLUETOOTH-BTPROTO_RFCOMM",
46 	"sk_lock-AF_BLUETOOTH-BTPROTO_BNEP",
47 	"sk_lock-AF_BLUETOOTH-BTPROTO_CMTP",
48 	"sk_lock-AF_BLUETOOTH-BTPROTO_HIDP",
49 	"sk_lock-AF_BLUETOOTH-BTPROTO_AVDTP",
50 };
51 
52 static struct lock_class_key bt_slock_key[BT_MAX_PROTO];
53 static const char *const bt_slock_key_strings[BT_MAX_PROTO] = {
54 	"slock-AF_BLUETOOTH-BTPROTO_L2CAP",
55 	"slock-AF_BLUETOOTH-BTPROTO_HCI",
56 	"slock-AF_BLUETOOTH-BTPROTO_SCO",
57 	"slock-AF_BLUETOOTH-BTPROTO_RFCOMM",
58 	"slock-AF_BLUETOOTH-BTPROTO_BNEP",
59 	"slock-AF_BLUETOOTH-BTPROTO_CMTP",
60 	"slock-AF_BLUETOOTH-BTPROTO_HIDP",
61 	"slock-AF_BLUETOOTH-BTPROTO_AVDTP",
62 };
63 
64 void bt_sock_reclassify_lock(struct sock *sk, int proto)
65 {
66 	BUG_ON(!sk);
67 	BUG_ON(sock_owned_by_user(sk));
68 
69 	sock_lock_init_class_and_name(sk,
70 			bt_slock_key_strings[proto], &bt_slock_key[proto],
71 				bt_key_strings[proto], &bt_lock_key[proto]);
72 }
73 EXPORT_SYMBOL(bt_sock_reclassify_lock);
74 
75 int bt_sock_register(int proto, const struct net_proto_family *ops)
76 {
77 	int err = 0;
78 
79 	if (proto < 0 || proto >= BT_MAX_PROTO)
80 		return -EINVAL;
81 
82 	write_lock(&bt_proto_lock);
83 
84 	if (bt_proto[proto])
85 		err = -EEXIST;
86 	else
87 		bt_proto[proto] = ops;
88 
89 	write_unlock(&bt_proto_lock);
90 
91 	return err;
92 }
93 EXPORT_SYMBOL(bt_sock_register);
94 
95 void bt_sock_unregister(int proto)
96 {
97 	if (proto < 0 || proto >= BT_MAX_PROTO)
98 		return;
99 
100 	write_lock(&bt_proto_lock);
101 	bt_proto[proto] = NULL;
102 	write_unlock(&bt_proto_lock);
103 }
104 EXPORT_SYMBOL(bt_sock_unregister);
105 
106 static int bt_sock_create(struct net *net, struct socket *sock, int proto,
107 			  int kern)
108 {
109 	int err;
110 
111 	if (net != &init_net)
112 		return -EAFNOSUPPORT;
113 
114 	if (proto < 0 || proto >= BT_MAX_PROTO)
115 		return -EINVAL;
116 
117 	if (!bt_proto[proto])
118 		request_module("bt-proto-%d", proto);
119 
120 	err = -EPROTONOSUPPORT;
121 
122 	read_lock(&bt_proto_lock);
123 
124 	if (bt_proto[proto] && try_module_get(bt_proto[proto]->owner)) {
125 		err = bt_proto[proto]->create(net, sock, proto, kern);
126 		if (!err)
127 			bt_sock_reclassify_lock(sock->sk, proto);
128 		module_put(bt_proto[proto]->owner);
129 	}
130 
131 	read_unlock(&bt_proto_lock);
132 
133 	return err;
134 }
135 
136 void bt_sock_link(struct bt_sock_list *l, struct sock *sk)
137 {
138 	write_lock(&l->lock);
139 	sk_add_node(sk, &l->head);
140 	write_unlock(&l->lock);
141 }
142 EXPORT_SYMBOL(bt_sock_link);
143 
144 void bt_sock_unlink(struct bt_sock_list *l, struct sock *sk)
145 {
146 	write_lock(&l->lock);
147 	sk_del_node_init(sk);
148 	write_unlock(&l->lock);
149 }
150 EXPORT_SYMBOL(bt_sock_unlink);
151 
152 void bt_accept_enqueue(struct sock *parent, struct sock *sk)
153 {
154 	BT_DBG("parent %p, sk %p", parent, sk);
155 
156 	sock_hold(sk);
157 	list_add_tail(&bt_sk(sk)->accept_q, &bt_sk(parent)->accept_q);
158 	bt_sk(sk)->parent = parent;
159 	parent->sk_ack_backlog++;
160 }
161 EXPORT_SYMBOL(bt_accept_enqueue);
162 
163 void bt_accept_unlink(struct sock *sk)
164 {
165 	BT_DBG("sk %p state %d", sk, sk->sk_state);
166 
167 	list_del_init(&bt_sk(sk)->accept_q);
168 	bt_sk(sk)->parent->sk_ack_backlog--;
169 	bt_sk(sk)->parent = NULL;
170 	sock_put(sk);
171 }
172 EXPORT_SYMBOL(bt_accept_unlink);
173 
174 struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock)
175 {
176 	struct list_head *p, *n;
177 	struct sock *sk;
178 
179 	BT_DBG("parent %p", parent);
180 
181 	list_for_each_safe(p, n, &bt_sk(parent)->accept_q) {
182 		sk = (struct sock *) list_entry(p, struct bt_sock, accept_q);
183 
184 		lock_sock(sk);
185 
186 		/* FIXME: Is this check still needed */
187 		if (sk->sk_state == BT_CLOSED) {
188 			release_sock(sk);
189 			bt_accept_unlink(sk);
190 			continue;
191 		}
192 
193 		if (sk->sk_state == BT_CONNECTED || !newsock ||
194 		    test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags)) {
195 			bt_accept_unlink(sk);
196 			if (newsock)
197 				sock_graft(sk, newsock);
198 
199 			release_sock(sk);
200 			return sk;
201 		}
202 
203 		release_sock(sk);
204 	}
205 
206 	return NULL;
207 }
208 EXPORT_SYMBOL(bt_accept_dequeue);
209 
210 int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
211 				struct msghdr *msg, size_t len, int flags)
212 {
213 	int noblock = flags & MSG_DONTWAIT;
214 	struct sock *sk = sock->sk;
215 	struct sk_buff *skb;
216 	size_t copied;
217 	int err;
218 
219 	BT_DBG("sock %p sk %p len %zu", sock, sk, len);
220 
221 	if (flags & (MSG_OOB))
222 		return -EOPNOTSUPP;
223 
224 	skb = skb_recv_datagram(sk, flags, noblock, &err);
225 	if (!skb) {
226 		if (sk->sk_shutdown & RCV_SHUTDOWN) {
227 			msg->msg_namelen = 0;
228 			return 0;
229 		}
230 		return err;
231 	}
232 
233 	copied = skb->len;
234 	if (len < copied) {
235 		msg->msg_flags |= MSG_TRUNC;
236 		copied = len;
237 	}
238 
239 	skb_reset_transport_header(skb);
240 	err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
241 	if (err == 0) {
242 		sock_recv_ts_and_drops(msg, sk, skb);
243 
244 		if (bt_sk(sk)->skb_msg_name)
245 			bt_sk(sk)->skb_msg_name(skb, msg->msg_name,
246 						&msg->msg_namelen);
247 		else
248 			msg->msg_namelen = 0;
249 	}
250 
251 	skb_free_datagram(sk, skb);
252 
253 	return err ? : copied;
254 }
255 EXPORT_SYMBOL(bt_sock_recvmsg);
256 
257 static long bt_sock_data_wait(struct sock *sk, long timeo)
258 {
259 	DECLARE_WAITQUEUE(wait, current);
260 
261 	add_wait_queue(sk_sleep(sk), &wait);
262 	for (;;) {
263 		set_current_state(TASK_INTERRUPTIBLE);
264 
265 		if (!skb_queue_empty(&sk->sk_receive_queue))
266 			break;
267 
268 		if (sk->sk_err || (sk->sk_shutdown & RCV_SHUTDOWN))
269 			break;
270 
271 		if (signal_pending(current) || !timeo)
272 			break;
273 
274 		set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
275 		release_sock(sk);
276 		timeo = schedule_timeout(timeo);
277 		lock_sock(sk);
278 		clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
279 	}
280 
281 	__set_current_state(TASK_RUNNING);
282 	remove_wait_queue(sk_sleep(sk), &wait);
283 	return timeo;
284 }
285 
286 int bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
287 			       struct msghdr *msg, size_t size, int flags)
288 {
289 	struct sock *sk = sock->sk;
290 	int err = 0;
291 	size_t target, copied = 0;
292 	long timeo;
293 
294 	if (flags & MSG_OOB)
295 		return -EOPNOTSUPP;
296 
297 	msg->msg_namelen = 0;
298 
299 	BT_DBG("sk %p size %zu", sk, size);
300 
301 	lock_sock(sk);
302 
303 	target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
304 	timeo  = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
305 
306 	do {
307 		struct sk_buff *skb;
308 		int chunk;
309 
310 		skb = skb_dequeue(&sk->sk_receive_queue);
311 		if (!skb) {
312 			if (copied >= target)
313 				break;
314 
315 			err = sock_error(sk);
316 			if (err)
317 				break;
318 			if (sk->sk_shutdown & RCV_SHUTDOWN)
319 				break;
320 
321 			err = -EAGAIN;
322 			if (!timeo)
323 				break;
324 
325 			timeo = bt_sock_data_wait(sk, timeo);
326 
327 			if (signal_pending(current)) {
328 				err = sock_intr_errno(timeo);
329 				goto out;
330 			}
331 			continue;
332 		}
333 
334 		chunk = min_t(unsigned int, skb->len, size);
335 		if (skb_copy_datagram_iovec(skb, 0, msg->msg_iov, chunk)) {
336 			skb_queue_head(&sk->sk_receive_queue, skb);
337 			if (!copied)
338 				copied = -EFAULT;
339 			break;
340 		}
341 		copied += chunk;
342 		size   -= chunk;
343 
344 		sock_recv_ts_and_drops(msg, sk, skb);
345 
346 		if (!(flags & MSG_PEEK)) {
347 			int skb_len = skb_headlen(skb);
348 
349 			if (chunk <= skb_len) {
350 				__skb_pull(skb, chunk);
351 			} else {
352 				struct sk_buff *frag;
353 
354 				__skb_pull(skb, skb_len);
355 				chunk -= skb_len;
356 
357 				skb_walk_frags(skb, frag) {
358 					if (chunk <= frag->len) {
359 						/* Pulling partial data */
360 						skb->len -= chunk;
361 						skb->data_len -= chunk;
362 						__skb_pull(frag, chunk);
363 						break;
364 					} else if (frag->len) {
365 						/* Pulling all frag data */
366 						chunk -= frag->len;
367 						skb->len -= frag->len;
368 						skb->data_len -= frag->len;
369 						__skb_pull(frag, frag->len);
370 					}
371 				}
372 			}
373 
374 			if (skb->len) {
375 				skb_queue_head(&sk->sk_receive_queue, skb);
376 				break;
377 			}
378 			kfree_skb(skb);
379 
380 		} else {
381 			/* put message back and return */
382 			skb_queue_head(&sk->sk_receive_queue, skb);
383 			break;
384 		}
385 	} while (size);
386 
387 out:
388 	release_sock(sk);
389 	return copied ? : err;
390 }
391 EXPORT_SYMBOL(bt_sock_stream_recvmsg);
392 
393 static inline unsigned int bt_accept_poll(struct sock *parent)
394 {
395 	struct list_head *p, *n;
396 	struct sock *sk;
397 
398 	list_for_each_safe(p, n, &bt_sk(parent)->accept_q) {
399 		sk = (struct sock *) list_entry(p, struct bt_sock, accept_q);
400 		if (sk->sk_state == BT_CONNECTED ||
401 		    (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags) &&
402 		     sk->sk_state == BT_CONNECT2))
403 			return POLLIN | POLLRDNORM;
404 	}
405 
406 	return 0;
407 }
408 
409 unsigned int bt_sock_poll(struct file *file, struct socket *sock,
410 			  poll_table *wait)
411 {
412 	struct sock *sk = sock->sk;
413 	unsigned int mask = 0;
414 
415 	BT_DBG("sock %p, sk %p", sock, sk);
416 
417 	poll_wait(file, sk_sleep(sk), wait);
418 
419 	if (sk->sk_state == BT_LISTEN)
420 		return bt_accept_poll(sk);
421 
422 	if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
423 		mask |= POLLERR |
424 			(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
425 
426 	if (sk->sk_shutdown & RCV_SHUTDOWN)
427 		mask |= POLLRDHUP | POLLIN | POLLRDNORM;
428 
429 	if (sk->sk_shutdown == SHUTDOWN_MASK)
430 		mask |= POLLHUP;
431 
432 	if (!skb_queue_empty(&sk->sk_receive_queue))
433 		mask |= POLLIN | POLLRDNORM;
434 
435 	if (sk->sk_state == BT_CLOSED)
436 		mask |= POLLHUP;
437 
438 	if (sk->sk_state == BT_CONNECT ||
439 			sk->sk_state == BT_CONNECT2 ||
440 			sk->sk_state == BT_CONFIG)
441 		return mask;
442 
443 	if (!test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags) && sock_writeable(sk))
444 		mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
445 	else
446 		set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
447 
448 	return mask;
449 }
450 EXPORT_SYMBOL(bt_sock_poll);
451 
452 int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
453 {
454 	struct sock *sk = sock->sk;
455 	struct sk_buff *skb;
456 	long amount;
457 	int err;
458 
459 	BT_DBG("sk %p cmd %x arg %lx", sk, cmd, arg);
460 
461 	switch (cmd) {
462 	case TIOCOUTQ:
463 		if (sk->sk_state == BT_LISTEN)
464 			return -EINVAL;
465 
466 		amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
467 		if (amount < 0)
468 			amount = 0;
469 		err = put_user(amount, (int __user *) arg);
470 		break;
471 
472 	case TIOCINQ:
473 		if (sk->sk_state == BT_LISTEN)
474 			return -EINVAL;
475 
476 		lock_sock(sk);
477 		skb = skb_peek(&sk->sk_receive_queue);
478 		amount = skb ? skb->len : 0;
479 		release_sock(sk);
480 		err = put_user(amount, (int __user *) arg);
481 		break;
482 
483 	case SIOCGSTAMP:
484 		err = sock_get_timestamp(sk, (struct timeval __user *) arg);
485 		break;
486 
487 	case SIOCGSTAMPNS:
488 		err = sock_get_timestampns(sk, (struct timespec __user *) arg);
489 		break;
490 
491 	default:
492 		err = -ENOIOCTLCMD;
493 		break;
494 	}
495 
496 	return err;
497 }
498 EXPORT_SYMBOL(bt_sock_ioctl);
499 
500 /* This function expects the sk lock to be held when called */
501 int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo)
502 {
503 	DECLARE_WAITQUEUE(wait, current);
504 	int err = 0;
505 
506 	BT_DBG("sk %p", sk);
507 
508 	add_wait_queue(sk_sleep(sk), &wait);
509 	set_current_state(TASK_INTERRUPTIBLE);
510 	while (sk->sk_state != state) {
511 		if (!timeo) {
512 			err = -EINPROGRESS;
513 			break;
514 		}
515 
516 		if (signal_pending(current)) {
517 			err = sock_intr_errno(timeo);
518 			break;
519 		}
520 
521 		release_sock(sk);
522 		timeo = schedule_timeout(timeo);
523 		lock_sock(sk);
524 		set_current_state(TASK_INTERRUPTIBLE);
525 
526 		err = sock_error(sk);
527 		if (err)
528 			break;
529 	}
530 	__set_current_state(TASK_RUNNING);
531 	remove_wait_queue(sk_sleep(sk), &wait);
532 	return err;
533 }
534 EXPORT_SYMBOL(bt_sock_wait_state);
535 
536 /* This function expects the sk lock to be held when called */
537 int bt_sock_wait_ready(struct sock *sk, unsigned long flags)
538 {
539 	DECLARE_WAITQUEUE(wait, current);
540 	unsigned long timeo;
541 	int err = 0;
542 
543 	BT_DBG("sk %p", sk);
544 
545 	timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
546 
547 	add_wait_queue(sk_sleep(sk), &wait);
548 	set_current_state(TASK_INTERRUPTIBLE);
549 	while (test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags)) {
550 		if (!timeo) {
551 			err = -EAGAIN;
552 			break;
553 		}
554 
555 		if (signal_pending(current)) {
556 			err = sock_intr_errno(timeo);
557 			break;
558 		}
559 
560 		release_sock(sk);
561 		timeo = schedule_timeout(timeo);
562 		lock_sock(sk);
563 		set_current_state(TASK_INTERRUPTIBLE);
564 
565 		err = sock_error(sk);
566 		if (err)
567 			break;
568 	}
569 	__set_current_state(TASK_RUNNING);
570 	remove_wait_queue(sk_sleep(sk), &wait);
571 
572 	return err;
573 }
574 EXPORT_SYMBOL(bt_sock_wait_ready);
575 
576 #ifdef CONFIG_PROC_FS
577 struct bt_seq_state {
578 	struct bt_sock_list *l;
579 };
580 
581 static void *bt_seq_start(struct seq_file *seq, loff_t *pos)
582 	__acquires(seq->private->l->lock)
583 {
584 	struct bt_seq_state *s = seq->private;
585 	struct bt_sock_list *l = s->l;
586 
587 	read_lock(&l->lock);
588 	return seq_hlist_start_head(&l->head, *pos);
589 }
590 
591 static void *bt_seq_next(struct seq_file *seq, void *v, loff_t *pos)
592 {
593 	struct bt_seq_state *s = seq->private;
594 	struct bt_sock_list *l = s->l;
595 
596 	return seq_hlist_next(v, &l->head, pos);
597 }
598 
599 static void bt_seq_stop(struct seq_file *seq, void *v)
600 	__releases(seq->private->l->lock)
601 {
602 	struct bt_seq_state *s = seq->private;
603 	struct bt_sock_list *l = s->l;
604 
605 	read_unlock(&l->lock);
606 }
607 
608 static int bt_seq_show(struct seq_file *seq, void *v)
609 {
610 	struct bt_seq_state *s = seq->private;
611 	struct bt_sock_list *l = s->l;
612 
613 	if (v == SEQ_START_TOKEN) {
614 		seq_puts(seq ,"sk               RefCnt Rmem   Wmem   User   Inode  Src Dst Parent");
615 
616 		if (l->custom_seq_show) {
617 			seq_putc(seq, ' ');
618 			l->custom_seq_show(seq, v);
619 		}
620 
621 		seq_putc(seq, '\n');
622 	} else {
623 		struct sock *sk = sk_entry(v);
624 		struct bt_sock *bt = bt_sk(sk);
625 
626 		seq_printf(seq,
627 			   "%pK %-6d %-6u %-6u %-6u %-6lu %-6lu",
628 			   sk,
629 			   atomic_read(&sk->sk_refcnt),
630 			   sk_rmem_alloc_get(sk),
631 			   sk_wmem_alloc_get(sk),
632 			   from_kuid(seq_user_ns(seq), sock_i_uid(sk)),
633 			   sock_i_ino(sk),
634 			   bt->parent? sock_i_ino(bt->parent): 0LU);
635 
636 		if (l->custom_seq_show) {
637 			seq_putc(seq, ' ');
638 			l->custom_seq_show(seq, v);
639 		}
640 
641 		seq_putc(seq, '\n');
642 	}
643 	return 0;
644 }
645 
646 static struct seq_operations bt_seq_ops = {
647 	.start = bt_seq_start,
648 	.next  = bt_seq_next,
649 	.stop  = bt_seq_stop,
650 	.show  = bt_seq_show,
651 };
652 
653 static int bt_seq_open(struct inode *inode, struct file *file)
654 {
655 	struct bt_sock_list *sk_list;
656 	struct bt_seq_state *s;
657 
658 	sk_list = PDE_DATA(inode);
659 	s = __seq_open_private(file, &bt_seq_ops,
660 			       sizeof(struct bt_seq_state));
661 	if (!s)
662 		return -ENOMEM;
663 
664 	s->l = sk_list;
665 	return 0;
666 }
667 
668 static const struct file_operations bt_fops = {
669 	.open = bt_seq_open,
670 	.read = seq_read,
671 	.llseek = seq_lseek,
672 	.release = seq_release_private
673 };
674 
675 int bt_procfs_init(struct net *net, const char *name,
676 		   struct bt_sock_list* sk_list,
677 		   int (* seq_show)(struct seq_file *, void *))
678 {
679 	sk_list->custom_seq_show = seq_show;
680 
681 	if (!proc_create_data(name, 0, net->proc_net, &bt_fops, sk_list))
682 		return -ENOMEM;
683 	return 0;
684 }
685 
686 void bt_procfs_cleanup(struct net *net, const char *name)
687 {
688 	remove_proc_entry(name, net->proc_net);
689 }
690 #else
691 int bt_procfs_init(struct net *net, const char *name,
692 		   struct bt_sock_list* sk_list,
693 		   int (* seq_show)(struct seq_file *, void *))
694 {
695 	return 0;
696 }
697 
698 void bt_procfs_cleanup(struct net *net, const char *name)
699 {
700 }
701 #endif
702 EXPORT_SYMBOL(bt_procfs_init);
703 EXPORT_SYMBOL(bt_procfs_cleanup);
704 
705 static struct net_proto_family bt_sock_family_ops = {
706 	.owner	= THIS_MODULE,
707 	.family	= PF_BLUETOOTH,
708 	.create	= bt_sock_create,
709 };
710 
711 static int __init bt_init(void)
712 {
713 	int err;
714 
715 	BT_INFO("Core ver %s", VERSION);
716 
717 	err = bt_sysfs_init();
718 	if (err < 0)
719 		return err;
720 
721 	err = sock_register(&bt_sock_family_ops);
722 	if (err < 0) {
723 		bt_sysfs_cleanup();
724 		return err;
725 	}
726 
727 	BT_INFO("HCI device and connection manager initialized");
728 
729 	err = hci_sock_init();
730 	if (err < 0)
731 		goto error;
732 
733 	err = l2cap_init();
734 	if (err < 0)
735 		goto sock_err;
736 
737 	err = sco_init();
738 	if (err < 0) {
739 		l2cap_exit();
740 		goto sock_err;
741 	}
742 
743 	return 0;
744 
745 sock_err:
746 	hci_sock_cleanup();
747 
748 error:
749 	sock_unregister(PF_BLUETOOTH);
750 	bt_sysfs_cleanup();
751 
752 	return err;
753 }
754 
755 static void __exit bt_exit(void)
756 {
757 
758 	sco_exit();
759 
760 	l2cap_exit();
761 
762 	hci_sock_cleanup();
763 
764 	sock_unregister(PF_BLUETOOTH);
765 
766 	bt_sysfs_cleanup();
767 }
768 
769 subsys_initcall(bt_init);
770 module_exit(bt_exit);
771 
772 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
773 MODULE_DESCRIPTION("Bluetooth Core ver " VERSION);
774 MODULE_VERSION(VERSION);
775 MODULE_LICENSE("GPL");
776 MODULE_ALIAS_NETPROTO(PF_BLUETOOTH);
777