xref: /openbmc/linux/include/net/bluetooth/hci_core.h (revision df2634f43f5106947f3735a0b61a6527a4b278cd)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 #ifndef __HCI_CORE_H
26 #define __HCI_CORE_H
27 
28 #include <net/bluetooth/hci.h>
29 
30 /* HCI upper protocols */
31 #define HCI_PROTO_L2CAP	0
32 #define HCI_PROTO_SCO	1
33 
34 /* HCI Core structures */
35 struct inquiry_data {
36 	bdaddr_t	bdaddr;
37 	__u8		pscan_rep_mode;
38 	__u8		pscan_period_mode;
39 	__u8		pscan_mode;
40 	__u8		dev_class[3];
41 	__le16		clock_offset;
42 	__s8		rssi;
43 	__u8		ssp_mode;
44 };
45 
46 struct inquiry_entry {
47 	struct inquiry_entry	*next;
48 	__u32			timestamp;
49 	struct inquiry_data	data;
50 };
51 
52 struct inquiry_cache {
53 	spinlock_t		lock;
54 	__u32			timestamp;
55 	struct inquiry_entry	*list;
56 };
57 
58 struct hci_conn_hash {
59 	struct list_head list;
60 	spinlock_t       lock;
61 	unsigned int     acl_num;
62 	unsigned int     sco_num;
63 };
64 
65 struct bdaddr_list {
66 	struct list_head list;
67 	bdaddr_t bdaddr;
68 };
69 #define NUM_REASSEMBLY 4
70 struct hci_dev {
71 	struct list_head list;
72 	spinlock_t	lock;
73 	atomic_t	refcnt;
74 
75 	char		name[8];
76 	unsigned long	flags;
77 	__u16		id;
78 	__u8		bus;
79 	__u8		dev_type;
80 	bdaddr_t	bdaddr;
81 	__u8		dev_name[248];
82 	__u8		dev_class[3];
83 	__u8		features[8];
84 	__u8		commands[64];
85 	__u8		ssp_mode;
86 	__u8		hci_ver;
87 	__u16		hci_rev;
88 	__u16		manufacturer;
89 	__u16		voice_setting;
90 
91 	__u16		pkt_type;
92 	__u16		esco_type;
93 	__u16		link_policy;
94 	__u16		link_mode;
95 
96 	__u32		idle_timeout;
97 	__u16		sniff_min_interval;
98 	__u16		sniff_max_interval;
99 
100 	unsigned long	quirks;
101 
102 	atomic_t	cmd_cnt;
103 	unsigned int	acl_cnt;
104 	unsigned int	sco_cnt;
105 
106 	unsigned int	acl_mtu;
107 	unsigned int	sco_mtu;
108 	unsigned int	acl_pkts;
109 	unsigned int	sco_pkts;
110 
111 	unsigned long	cmd_last_tx;
112 	unsigned long	acl_last_tx;
113 	unsigned long	sco_last_tx;
114 
115 	struct workqueue_struct	*workqueue;
116 
117 	struct tasklet_struct	cmd_task;
118 	struct tasklet_struct	rx_task;
119 	struct tasklet_struct	tx_task;
120 
121 	struct sk_buff_head	rx_q;
122 	struct sk_buff_head	raw_q;
123 	struct sk_buff_head	cmd_q;
124 
125 	struct sk_buff		*sent_cmd;
126 	struct sk_buff		*reassembly[NUM_REASSEMBLY];
127 
128 	struct mutex		req_lock;
129 	wait_queue_head_t	req_wait_q;
130 	__u32			req_status;
131 	__u32			req_result;
132 	__u16			req_last_cmd;
133 
134 	struct inquiry_cache	inq_cache;
135 	struct hci_conn_hash	conn_hash;
136 	struct list_head	blacklist;
137 
138 	struct hci_dev_stats	stat;
139 
140 	struct sk_buff_head	driver_init;
141 
142 	void			*driver_data;
143 	void			*core_data;
144 
145 	atomic_t		promisc;
146 
147 	struct dentry		*debugfs;
148 
149 	struct device		*parent;
150 	struct device		dev;
151 
152 	struct rfkill		*rfkill;
153 
154 	struct module		*owner;
155 
156 	int (*open)(struct hci_dev *hdev);
157 	int (*close)(struct hci_dev *hdev);
158 	int (*flush)(struct hci_dev *hdev);
159 	int (*send)(struct sk_buff *skb);
160 	void (*destruct)(struct hci_dev *hdev);
161 	void (*notify)(struct hci_dev *hdev, unsigned int evt);
162 	int (*ioctl)(struct hci_dev *hdev, unsigned int cmd, unsigned long arg);
163 };
164 
165 struct hci_conn {
166 	struct list_head list;
167 
168 	atomic_t	 refcnt;
169 	spinlock_t	 lock;
170 
171 	bdaddr_t	 dst;
172 	__u16		 handle;
173 	__u16		 state;
174 	__u8             mode;
175 	__u8		 type;
176 	__u8		 out;
177 	__u8		 attempt;
178 	__u8		 dev_class[3];
179 	__u8             features[8];
180 	__u8             ssp_mode;
181 	__u16            interval;
182 	__u16            pkt_type;
183 	__u16            link_policy;
184 	__u32		 link_mode;
185 	__u8             auth_type;
186 	__u8             sec_level;
187 	__u8		 pending_sec_level;
188 	__u8             power_save;
189 	__u16            disc_timeout;
190 	unsigned long	 pend;
191 
192 	unsigned int	 sent;
193 
194 	struct sk_buff_head data_q;
195 
196 	struct timer_list disc_timer;
197 	struct timer_list idle_timer;
198 
199 	struct work_struct work_add;
200 	struct work_struct work_del;
201 
202 	struct device	dev;
203 	atomic_t	devref;
204 
205 	struct hci_dev	*hdev;
206 	void		*l2cap_data;
207 	void		*sco_data;
208 	void		*priv;
209 
210 	struct hci_conn	*link;
211 };
212 
213 extern struct hci_proto *hci_proto[];
214 extern struct list_head hci_dev_list;
215 extern struct list_head hci_cb_list;
216 extern rwlock_t hci_dev_list_lock;
217 extern rwlock_t hci_cb_list_lock;
218 
219 /* ----- Inquiry cache ----- */
220 #define INQUIRY_CACHE_AGE_MAX   (HZ*30)   /* 30 seconds */
221 #define INQUIRY_ENTRY_AGE_MAX   (HZ*60)   /* 60 seconds */
222 
223 #define inquiry_cache_lock(c)		spin_lock(&c->lock)
224 #define inquiry_cache_unlock(c)		spin_unlock(&c->lock)
225 #define inquiry_cache_lock_bh(c)	spin_lock_bh(&c->lock)
226 #define inquiry_cache_unlock_bh(c)	spin_unlock_bh(&c->lock)
227 
228 static inline void inquiry_cache_init(struct hci_dev *hdev)
229 {
230 	struct inquiry_cache *c = &hdev->inq_cache;
231 	spin_lock_init(&c->lock);
232 	c->list = NULL;
233 }
234 
235 static inline int inquiry_cache_empty(struct hci_dev *hdev)
236 {
237 	struct inquiry_cache *c = &hdev->inq_cache;
238 	return c->list == NULL;
239 }
240 
241 static inline long inquiry_cache_age(struct hci_dev *hdev)
242 {
243 	struct inquiry_cache *c = &hdev->inq_cache;
244 	return jiffies - c->timestamp;
245 }
246 
247 static inline long inquiry_entry_age(struct inquiry_entry *e)
248 {
249 	return jiffies - e->timestamp;
250 }
251 
252 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr);
253 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data);
254 
255 /* ----- HCI Connections ----- */
256 enum {
257 	HCI_CONN_AUTH_PEND,
258 	HCI_CONN_ENCRYPT_PEND,
259 	HCI_CONN_RSWITCH_PEND,
260 	HCI_CONN_MODE_CHANGE_PEND,
261 	HCI_CONN_SCO_SETUP_PEND,
262 };
263 
264 static inline void hci_conn_hash_init(struct hci_dev *hdev)
265 {
266 	struct hci_conn_hash *h = &hdev->conn_hash;
267 	INIT_LIST_HEAD(&h->list);
268 	spin_lock_init(&h->lock);
269 	h->acl_num = 0;
270 	h->sco_num = 0;
271 }
272 
273 static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
274 {
275 	struct hci_conn_hash *h = &hdev->conn_hash;
276 	list_add(&c->list, &h->list);
277 	if (c->type == ACL_LINK)
278 		h->acl_num++;
279 	else
280 		h->sco_num++;
281 }
282 
283 static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c)
284 {
285 	struct hci_conn_hash *h = &hdev->conn_hash;
286 	list_del(&c->list);
287 	if (c->type == ACL_LINK)
288 		h->acl_num--;
289 	else
290 		h->sco_num--;
291 }
292 
293 static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev,
294 					__u16 handle)
295 {
296 	struct hci_conn_hash *h = &hdev->conn_hash;
297 	struct list_head *p;
298 	struct hci_conn  *c;
299 
300 	list_for_each(p, &h->list) {
301 		c = list_entry(p, struct hci_conn, list);
302 		if (c->handle == handle)
303 			return c;
304 	}
305 	return NULL;
306 }
307 
308 static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev,
309 					__u8 type, bdaddr_t *ba)
310 {
311 	struct hci_conn_hash *h = &hdev->conn_hash;
312 	struct list_head *p;
313 	struct hci_conn  *c;
314 
315 	list_for_each(p, &h->list) {
316 		c = list_entry(p, struct hci_conn, list);
317 		if (c->type == type && !bacmp(&c->dst, ba))
318 			return c;
319 	}
320 	return NULL;
321 }
322 
323 static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
324 					__u8 type, __u16 state)
325 {
326 	struct hci_conn_hash *h = &hdev->conn_hash;
327 	struct list_head *p;
328 	struct hci_conn  *c;
329 
330 	list_for_each(p, &h->list) {
331 		c = list_entry(p, struct hci_conn, list);
332 		if (c->type == type && c->state == state)
333 			return c;
334 	}
335 	return NULL;
336 }
337 
338 void hci_acl_connect(struct hci_conn *conn);
339 void hci_acl_disconn(struct hci_conn *conn, __u8 reason);
340 void hci_add_sco(struct hci_conn *conn, __u16 handle);
341 void hci_setup_sync(struct hci_conn *conn, __u16 handle);
342 void hci_sco_setup(struct hci_conn *conn, __u8 status);
343 
344 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst);
345 int hci_conn_del(struct hci_conn *conn);
346 void hci_conn_hash_flush(struct hci_dev *hdev);
347 void hci_conn_check_pending(struct hci_dev *hdev);
348 
349 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type);
350 int hci_conn_check_link_mode(struct hci_conn *conn);
351 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type);
352 int hci_conn_change_link_key(struct hci_conn *conn);
353 int hci_conn_switch_role(struct hci_conn *conn, __u8 role);
354 
355 void hci_conn_enter_active_mode(struct hci_conn *conn);
356 void hci_conn_enter_sniff_mode(struct hci_conn *conn);
357 
358 void hci_conn_hold_device(struct hci_conn *conn);
359 void hci_conn_put_device(struct hci_conn *conn);
360 
361 static inline void hci_conn_hold(struct hci_conn *conn)
362 {
363 	atomic_inc(&conn->refcnt);
364 	del_timer(&conn->disc_timer);
365 }
366 
367 static inline void hci_conn_put(struct hci_conn *conn)
368 {
369 	if (atomic_dec_and_test(&conn->refcnt)) {
370 		unsigned long timeo;
371 		if (conn->type == ACL_LINK) {
372 			del_timer(&conn->idle_timer);
373 			if (conn->state == BT_CONNECTED) {
374 				timeo = msecs_to_jiffies(conn->disc_timeout);
375 				if (!conn->out)
376 					timeo *= 2;
377 			} else
378 				timeo = msecs_to_jiffies(10);
379 		} else
380 			timeo = msecs_to_jiffies(10);
381 		mod_timer(&conn->disc_timer, jiffies + timeo);
382 	}
383 }
384 
385 /* ----- HCI Devices ----- */
386 static inline void __hci_dev_put(struct hci_dev *d)
387 {
388 	if (atomic_dec_and_test(&d->refcnt))
389 		d->destruct(d);
390 }
391 
392 static inline void hci_dev_put(struct hci_dev *d)
393 {
394 	__hci_dev_put(d);
395 	module_put(d->owner);
396 }
397 
398 static inline struct hci_dev *__hci_dev_hold(struct hci_dev *d)
399 {
400 	atomic_inc(&d->refcnt);
401 	return d;
402 }
403 
404 static inline struct hci_dev *hci_dev_hold(struct hci_dev *d)
405 {
406 	if (try_module_get(d->owner))
407 		return __hci_dev_hold(d);
408 	return NULL;
409 }
410 
411 #define hci_dev_lock(d)		spin_lock(&d->lock)
412 #define hci_dev_unlock(d)	spin_unlock(&d->lock)
413 #define hci_dev_lock_bh(d)	spin_lock_bh(&d->lock)
414 #define hci_dev_unlock_bh(d)	spin_unlock_bh(&d->lock)
415 
416 struct hci_dev *hci_dev_get(int index);
417 struct hci_dev *hci_get_route(bdaddr_t *src, bdaddr_t *dst);
418 
419 struct hci_dev *hci_alloc_dev(void);
420 void hci_free_dev(struct hci_dev *hdev);
421 int hci_register_dev(struct hci_dev *hdev);
422 int hci_unregister_dev(struct hci_dev *hdev);
423 int hci_suspend_dev(struct hci_dev *hdev);
424 int hci_resume_dev(struct hci_dev *hdev);
425 int hci_dev_open(__u16 dev);
426 int hci_dev_close(__u16 dev);
427 int hci_dev_reset(__u16 dev);
428 int hci_dev_reset_stat(__u16 dev);
429 int hci_dev_cmd(unsigned int cmd, void __user *arg);
430 int hci_get_dev_list(void __user *arg);
431 int hci_get_dev_info(void __user *arg);
432 int hci_get_conn_list(void __user *arg);
433 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg);
434 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg);
435 int hci_inquiry(void __user *arg);
436 
437 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr);
438 int hci_blacklist_clear(struct hci_dev *hdev);
439 
440 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
441 
442 int hci_recv_frame(struct sk_buff *skb);
443 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count);
444 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count);
445 
446 int hci_register_sysfs(struct hci_dev *hdev);
447 void hci_unregister_sysfs(struct hci_dev *hdev);
448 void hci_conn_init_sysfs(struct hci_conn *conn);
449 void hci_conn_add_sysfs(struct hci_conn *conn);
450 void hci_conn_del_sysfs(struct hci_conn *conn);
451 
452 #define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->parent = (pdev))
453 
454 /* ----- LMP capabilities ----- */
455 #define lmp_rswitch_capable(dev)   ((dev)->features[0] & LMP_RSWITCH)
456 #define lmp_encrypt_capable(dev)   ((dev)->features[0] & LMP_ENCRYPT)
457 #define lmp_sniff_capable(dev)     ((dev)->features[0] & LMP_SNIFF)
458 #define lmp_sniffsubr_capable(dev) ((dev)->features[5] & LMP_SNIFF_SUBR)
459 #define lmp_esco_capable(dev)      ((dev)->features[3] & LMP_ESCO)
460 #define lmp_ssp_capable(dev)       ((dev)->features[6] & LMP_SIMPLE_PAIR)
461 
462 /* ----- HCI protocols ----- */
463 struct hci_proto {
464 	char		*name;
465 	unsigned int	id;
466 	unsigned long	flags;
467 
468 	void		*priv;
469 
470 	int (*connect_ind)	(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type);
471 	int (*connect_cfm)	(struct hci_conn *conn, __u8 status);
472 	int (*disconn_ind)	(struct hci_conn *conn);
473 	int (*disconn_cfm)	(struct hci_conn *conn, __u8 reason);
474 	int (*recv_acldata)	(struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
475 	int (*recv_scodata)	(struct hci_conn *conn, struct sk_buff *skb);
476 	int (*security_cfm)	(struct hci_conn *conn, __u8 status, __u8 encrypt);
477 };
478 
479 static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type)
480 {
481 	register struct hci_proto *hp;
482 	int mask = 0;
483 
484 	hp = hci_proto[HCI_PROTO_L2CAP];
485 	if (hp && hp->connect_ind)
486 		mask |= hp->connect_ind(hdev, bdaddr, type);
487 
488 	hp = hci_proto[HCI_PROTO_SCO];
489 	if (hp && hp->connect_ind)
490 		mask |= hp->connect_ind(hdev, bdaddr, type);
491 
492 	return mask;
493 }
494 
495 static inline void hci_proto_connect_cfm(struct hci_conn *conn, __u8 status)
496 {
497 	register struct hci_proto *hp;
498 
499 	hp = hci_proto[HCI_PROTO_L2CAP];
500 	if (hp && hp->connect_cfm)
501 		hp->connect_cfm(conn, status);
502 
503 	hp = hci_proto[HCI_PROTO_SCO];
504 	if (hp && hp->connect_cfm)
505 		hp->connect_cfm(conn, status);
506 }
507 
508 static inline int hci_proto_disconn_ind(struct hci_conn *conn)
509 {
510 	register struct hci_proto *hp;
511 	int reason = 0x13;
512 
513 	hp = hci_proto[HCI_PROTO_L2CAP];
514 	if (hp && hp->disconn_ind)
515 		reason = hp->disconn_ind(conn);
516 
517 	hp = hci_proto[HCI_PROTO_SCO];
518 	if (hp && hp->disconn_ind)
519 		reason = hp->disconn_ind(conn);
520 
521 	return reason;
522 }
523 
524 static inline void hci_proto_disconn_cfm(struct hci_conn *conn, __u8 reason)
525 {
526 	register struct hci_proto *hp;
527 
528 	hp = hci_proto[HCI_PROTO_L2CAP];
529 	if (hp && hp->disconn_cfm)
530 		hp->disconn_cfm(conn, reason);
531 
532 	hp = hci_proto[HCI_PROTO_SCO];
533 	if (hp && hp->disconn_cfm)
534 		hp->disconn_cfm(conn, reason);
535 }
536 
537 static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status)
538 {
539 	register struct hci_proto *hp;
540 	__u8 encrypt;
541 
542 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
543 		return;
544 
545 	encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00;
546 
547 	hp = hci_proto[HCI_PROTO_L2CAP];
548 	if (hp && hp->security_cfm)
549 		hp->security_cfm(conn, status, encrypt);
550 
551 	hp = hci_proto[HCI_PROTO_SCO];
552 	if (hp && hp->security_cfm)
553 		hp->security_cfm(conn, status, encrypt);
554 }
555 
556 static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt)
557 {
558 	register struct hci_proto *hp;
559 
560 	hp = hci_proto[HCI_PROTO_L2CAP];
561 	if (hp && hp->security_cfm)
562 		hp->security_cfm(conn, status, encrypt);
563 
564 	hp = hci_proto[HCI_PROTO_SCO];
565 	if (hp && hp->security_cfm)
566 		hp->security_cfm(conn, status, encrypt);
567 }
568 
569 int hci_register_proto(struct hci_proto *hproto);
570 int hci_unregister_proto(struct hci_proto *hproto);
571 
572 /* ----- HCI callbacks ----- */
573 struct hci_cb {
574 	struct list_head list;
575 
576 	char *name;
577 
578 	void (*security_cfm)	(struct hci_conn *conn, __u8 status, __u8 encrypt);
579 	void (*key_change_cfm)	(struct hci_conn *conn, __u8 status);
580 	void (*role_switch_cfm)	(struct hci_conn *conn, __u8 status, __u8 role);
581 };
582 
583 static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
584 {
585 	struct list_head *p;
586 	__u8 encrypt;
587 
588 	hci_proto_auth_cfm(conn, status);
589 
590 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
591 		return;
592 
593 	encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00;
594 
595 	read_lock_bh(&hci_cb_list_lock);
596 	list_for_each(p, &hci_cb_list) {
597 		struct hci_cb *cb = list_entry(p, struct hci_cb, list);
598 		if (cb->security_cfm)
599 			cb->security_cfm(conn, status, encrypt);
600 	}
601 	read_unlock_bh(&hci_cb_list_lock);
602 }
603 
604 static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt)
605 {
606 	struct list_head *p;
607 
608 	if (conn->sec_level == BT_SECURITY_SDP)
609 		conn->sec_level = BT_SECURITY_LOW;
610 
611 	hci_proto_encrypt_cfm(conn, status, encrypt);
612 
613 	read_lock_bh(&hci_cb_list_lock);
614 	list_for_each(p, &hci_cb_list) {
615 		struct hci_cb *cb = list_entry(p, struct hci_cb, list);
616 		if (cb->security_cfm)
617 			cb->security_cfm(conn, status, encrypt);
618 	}
619 	read_unlock_bh(&hci_cb_list_lock);
620 }
621 
622 static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status)
623 {
624 	struct list_head *p;
625 
626 	read_lock_bh(&hci_cb_list_lock);
627 	list_for_each(p, &hci_cb_list) {
628 		struct hci_cb *cb = list_entry(p, struct hci_cb, list);
629 		if (cb->key_change_cfm)
630 			cb->key_change_cfm(conn, status);
631 	}
632 	read_unlock_bh(&hci_cb_list_lock);
633 }
634 
635 static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status, __u8 role)
636 {
637 	struct list_head *p;
638 
639 	read_lock_bh(&hci_cb_list_lock);
640 	list_for_each(p, &hci_cb_list) {
641 		struct hci_cb *cb = list_entry(p, struct hci_cb, list);
642 		if (cb->role_switch_cfm)
643 			cb->role_switch_cfm(conn, status, role);
644 	}
645 	read_unlock_bh(&hci_cb_list_lock);
646 }
647 
648 int hci_register_cb(struct hci_cb *hcb);
649 int hci_unregister_cb(struct hci_cb *hcb);
650 
651 int hci_register_notifier(struct notifier_block *nb);
652 int hci_unregister_notifier(struct notifier_block *nb);
653 
654 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param);
655 void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
656 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb);
657 
658 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode);
659 
660 void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data);
661 
662 /* ----- HCI Sockets ----- */
663 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
664 
665 /* Management interface */
666 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t len);
667 int mgmt_index_added(u16 index);
668 int mgmt_index_removed(u16 index);
669 
670 /* HCI info for socket */
671 #define hci_pi(sk) ((struct hci_pinfo *) sk)
672 
673 struct hci_pinfo {
674 	struct bt_sock    bt;
675 	struct hci_dev    *hdev;
676 	struct hci_filter filter;
677 	__u32             cmsg_mask;
678 	unsigned short   channel;
679 };
680 
681 /* HCI security filter */
682 #define HCI_SFLT_MAX_OGF  5
683 
684 struct hci_sec_filter {
685 	__u32 type_mask;
686 	__u32 event_mask[2];
687 	__u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
688 };
689 
690 /* ----- HCI requests ----- */
691 #define HCI_REQ_DONE	  0
692 #define HCI_REQ_PEND	  1
693 #define HCI_REQ_CANCELED  2
694 
695 #define hci_req_lock(d)		mutex_lock(&d->req_lock)
696 #define hci_req_unlock(d)	mutex_unlock(&d->req_lock)
697 
698 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result);
699 
700 #endif /* __HCI_CORE_H */
701