xref: /openbmc/linux/include/net/bluetooth/hci_core.h (revision 78c99ba1)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 #ifndef __HCI_CORE_H
26 #define __HCI_CORE_H
27 
28 #include <net/bluetooth/hci.h>
29 
30 /* HCI upper protocols */
31 #define HCI_PROTO_L2CAP	0
32 #define HCI_PROTO_SCO	1
33 
34 /* HCI Core structures */
35 struct inquiry_data {
36 	bdaddr_t	bdaddr;
37 	__u8		pscan_rep_mode;
38 	__u8		pscan_period_mode;
39 	__u8		pscan_mode;
40 	__u8		dev_class[3];
41 	__le16		clock_offset;
42 	__s8		rssi;
43 	__u8		ssp_mode;
44 };
45 
46 struct inquiry_entry {
47 	struct inquiry_entry 	*next;
48 	__u32			timestamp;
49 	struct inquiry_data	data;
50 };
51 
52 struct inquiry_cache {
53 	spinlock_t 		lock;
54 	__u32			timestamp;
55 	struct inquiry_entry 	*list;
56 };
57 
58 struct hci_conn_hash {
59 	struct list_head list;
60 	spinlock_t       lock;
61 	unsigned int     acl_num;
62 	unsigned int     sco_num;
63 };
64 
65 struct hci_dev {
66 	struct list_head list;
67 	spinlock_t	lock;
68 	atomic_t	refcnt;
69 
70 	char		name[8];
71 	unsigned long	flags;
72 	__u16		id;
73 	__u8		type;
74 	bdaddr_t	bdaddr;
75 	__u8		dev_name[248];
76 	__u8		dev_class[3];
77 	__u8		features[8];
78 	__u8		commands[64];
79 	__u8		ssp_mode;
80 	__u8		hci_ver;
81 	__u16		hci_rev;
82 	__u16		manufacturer;
83 	__u16		voice_setting;
84 
85 	__u16		pkt_type;
86 	__u16		esco_type;
87 	__u16		link_policy;
88 	__u16		link_mode;
89 
90 	__u32		idle_timeout;
91 	__u16		sniff_min_interval;
92 	__u16		sniff_max_interval;
93 
94 	unsigned long	quirks;
95 
96 	atomic_t	cmd_cnt;
97 	unsigned int	acl_cnt;
98 	unsigned int	sco_cnt;
99 
100 	unsigned int	acl_mtu;
101 	unsigned int	sco_mtu;
102 	unsigned int	acl_pkts;
103 	unsigned int	sco_pkts;
104 
105 	unsigned long	cmd_last_tx;
106 	unsigned long	acl_last_tx;
107 	unsigned long	sco_last_tx;
108 
109 	struct tasklet_struct	cmd_task;
110 	struct tasklet_struct	rx_task;
111 	struct tasklet_struct	tx_task;
112 
113 	struct sk_buff_head	rx_q;
114 	struct sk_buff_head	raw_q;
115 	struct sk_buff_head	cmd_q;
116 
117 	struct sk_buff		*sent_cmd;
118 	struct sk_buff		*reassembly[3];
119 
120 	struct semaphore	req_lock;
121 	wait_queue_head_t	req_wait_q;
122 	__u32			req_status;
123 	__u32			req_result;
124 
125 	struct inquiry_cache	inq_cache;
126 	struct hci_conn_hash	conn_hash;
127 
128 	struct hci_dev_stats	stat;
129 
130 	struct sk_buff_head	driver_init;
131 
132 	void			*driver_data;
133 	void			*core_data;
134 
135 	atomic_t 		promisc;
136 
137 	struct device		*parent;
138 	struct device		dev;
139 
140 	struct module 		*owner;
141 
142 	int (*open)(struct hci_dev *hdev);
143 	int (*close)(struct hci_dev *hdev);
144 	int (*flush)(struct hci_dev *hdev);
145 	int (*send)(struct sk_buff *skb);
146 	void (*destruct)(struct hci_dev *hdev);
147 	void (*notify)(struct hci_dev *hdev, unsigned int evt);
148 	int (*ioctl)(struct hci_dev *hdev, unsigned int cmd, unsigned long arg);
149 };
150 
151 struct hci_conn {
152 	struct list_head list;
153 
154 	atomic_t	 refcnt;
155 	spinlock_t	 lock;
156 
157 	bdaddr_t	 dst;
158 	__u16		 handle;
159 	__u16		 state;
160 	__u8             mode;
161 	__u8		 type;
162 	__u8		 out;
163 	__u8		 attempt;
164 	__u8		 dev_class[3];
165 	__u8             features[8];
166 	__u8             ssp_mode;
167 	__u16            interval;
168 	__u16            pkt_type;
169 	__u16            link_policy;
170 	__u32		 link_mode;
171 	__u8             auth_type;
172 	__u8             sec_level;
173 	__u8             power_save;
174 	__u16            disc_timeout;
175 	unsigned long	 pend;
176 
177 	unsigned int	 sent;
178 
179 	struct sk_buff_head data_q;
180 
181 	struct timer_list disc_timer;
182 	struct timer_list idle_timer;
183 
184 	struct work_struct work_add;
185 	struct work_struct work_del;
186 
187 	struct device	dev;
188 
189 	struct hci_dev	*hdev;
190 	void		*l2cap_data;
191 	void		*sco_data;
192 	void		*priv;
193 
194 	struct hci_conn	*link;
195 };
196 
197 extern struct hci_proto *hci_proto[];
198 extern struct list_head hci_dev_list;
199 extern struct list_head hci_cb_list;
200 extern rwlock_t hci_dev_list_lock;
201 extern rwlock_t hci_cb_list_lock;
202 
203 /* ----- Inquiry cache ----- */
204 #define INQUIRY_CACHE_AGE_MAX   (HZ*30)   // 30 seconds
205 #define INQUIRY_ENTRY_AGE_MAX   (HZ*60)   // 60 seconds
206 
207 #define inquiry_cache_lock(c)		spin_lock(&c->lock)
208 #define inquiry_cache_unlock(c)		spin_unlock(&c->lock)
209 #define inquiry_cache_lock_bh(c)	spin_lock_bh(&c->lock)
210 #define inquiry_cache_unlock_bh(c)	spin_unlock_bh(&c->lock)
211 
212 static inline void inquiry_cache_init(struct hci_dev *hdev)
213 {
214 	struct inquiry_cache *c = &hdev->inq_cache;
215 	spin_lock_init(&c->lock);
216 	c->list = NULL;
217 }
218 
219 static inline int inquiry_cache_empty(struct hci_dev *hdev)
220 {
221 	struct inquiry_cache *c = &hdev->inq_cache;
222 	return (c->list == NULL);
223 }
224 
225 static inline long inquiry_cache_age(struct hci_dev *hdev)
226 {
227 	struct inquiry_cache *c = &hdev->inq_cache;
228 	return jiffies - c->timestamp;
229 }
230 
231 static inline long inquiry_entry_age(struct inquiry_entry *e)
232 {
233 	return jiffies - e->timestamp;
234 }
235 
236 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr);
237 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data);
238 
239 /* ----- HCI Connections ----- */
240 enum {
241 	HCI_CONN_AUTH_PEND,
242 	HCI_CONN_ENCRYPT_PEND,
243 	HCI_CONN_RSWITCH_PEND,
244 	HCI_CONN_MODE_CHANGE_PEND,
245 };
246 
247 static inline void hci_conn_hash_init(struct hci_dev *hdev)
248 {
249 	struct hci_conn_hash *h = &hdev->conn_hash;
250 	INIT_LIST_HEAD(&h->list);
251 	spin_lock_init(&h->lock);
252 	h->acl_num = 0;
253 	h->sco_num = 0;
254 }
255 
256 static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
257 {
258 	struct hci_conn_hash *h = &hdev->conn_hash;
259 	list_add(&c->list, &h->list);
260 	if (c->type == ACL_LINK)
261 		h->acl_num++;
262 	else
263 		h->sco_num++;
264 }
265 
266 static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c)
267 {
268 	struct hci_conn_hash *h = &hdev->conn_hash;
269 	list_del(&c->list);
270 	if (c->type == ACL_LINK)
271 		h->acl_num--;
272 	else
273 		h->sco_num--;
274 }
275 
276 static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev,
277 					__u16 handle)
278 {
279 	struct hci_conn_hash *h = &hdev->conn_hash;
280 	struct list_head *p;
281 	struct hci_conn  *c;
282 
283 	list_for_each(p, &h->list) {
284 		c = list_entry(p, struct hci_conn, list);
285 		if (c->handle == handle)
286 			return c;
287 	}
288 	return NULL;
289 }
290 
291 static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev,
292 					__u8 type, bdaddr_t *ba)
293 {
294 	struct hci_conn_hash *h = &hdev->conn_hash;
295 	struct list_head *p;
296 	struct hci_conn  *c;
297 
298 	list_for_each(p, &h->list) {
299 		c = list_entry(p, struct hci_conn, list);
300 		if (c->type == type && !bacmp(&c->dst, ba))
301 			return c;
302 	}
303 	return NULL;
304 }
305 
306 static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
307 					__u8 type, __u16 state)
308 {
309 	struct hci_conn_hash *h = &hdev->conn_hash;
310 	struct list_head *p;
311 	struct hci_conn  *c;
312 
313 	list_for_each(p, &h->list) {
314 		c = list_entry(p, struct hci_conn, list);
315 		if (c->type == type && c->state == state)
316 			return c;
317 	}
318 	return NULL;
319 }
320 
321 void hci_acl_connect(struct hci_conn *conn);
322 void hci_acl_disconn(struct hci_conn *conn, __u8 reason);
323 void hci_add_sco(struct hci_conn *conn, __u16 handle);
324 void hci_setup_sync(struct hci_conn *conn, __u16 handle);
325 
326 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst);
327 int hci_conn_del(struct hci_conn *conn);
328 void hci_conn_hash_flush(struct hci_dev *hdev);
329 void hci_conn_check_pending(struct hci_dev *hdev);
330 
331 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type);
332 int hci_conn_check_link_mode(struct hci_conn *conn);
333 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type);
334 int hci_conn_change_link_key(struct hci_conn *conn);
335 int hci_conn_switch_role(struct hci_conn *conn, __u8 role);
336 
337 void hci_conn_enter_active_mode(struct hci_conn *conn);
338 void hci_conn_enter_sniff_mode(struct hci_conn *conn);
339 
340 static inline void hci_conn_hold(struct hci_conn *conn)
341 {
342 	atomic_inc(&conn->refcnt);
343 	del_timer(&conn->disc_timer);
344 }
345 
346 static inline void hci_conn_put(struct hci_conn *conn)
347 {
348 	if (atomic_dec_and_test(&conn->refcnt)) {
349 		unsigned long timeo;
350 		if (conn->type == ACL_LINK) {
351 			del_timer(&conn->idle_timer);
352 			if (conn->state == BT_CONNECTED) {
353 				timeo = msecs_to_jiffies(conn->disc_timeout);
354 				if (!conn->out)
355 					timeo *= 2;
356 			} else
357 				timeo = msecs_to_jiffies(10);
358 		} else
359 			timeo = msecs_to_jiffies(10);
360 		mod_timer(&conn->disc_timer, jiffies + timeo);
361 	}
362 }
363 
364 /* ----- HCI tasks ----- */
365 static inline void hci_sched_cmd(struct hci_dev *hdev)
366 {
367 	tasklet_schedule(&hdev->cmd_task);
368 }
369 
370 static inline void hci_sched_rx(struct hci_dev *hdev)
371 {
372 	tasklet_schedule(&hdev->rx_task);
373 }
374 
375 static inline void hci_sched_tx(struct hci_dev *hdev)
376 {
377 	tasklet_schedule(&hdev->tx_task);
378 }
379 
380 /* ----- HCI Devices ----- */
381 static inline void __hci_dev_put(struct hci_dev *d)
382 {
383 	if (atomic_dec_and_test(&d->refcnt))
384 		d->destruct(d);
385 }
386 
387 static inline void hci_dev_put(struct hci_dev *d)
388 {
389 	__hci_dev_put(d);
390 	module_put(d->owner);
391 }
392 
393 static inline struct hci_dev *__hci_dev_hold(struct hci_dev *d)
394 {
395 	atomic_inc(&d->refcnt);
396 	return d;
397 }
398 
399 static inline struct hci_dev *hci_dev_hold(struct hci_dev *d)
400 {
401 	if (try_module_get(d->owner))
402 		return __hci_dev_hold(d);
403 	return NULL;
404 }
405 
406 #define hci_dev_lock(d)		spin_lock(&d->lock)
407 #define hci_dev_unlock(d)	spin_unlock(&d->lock)
408 #define hci_dev_lock_bh(d)	spin_lock_bh(&d->lock)
409 #define hci_dev_unlock_bh(d)	spin_unlock_bh(&d->lock)
410 
411 struct hci_dev *hci_dev_get(int index);
412 struct hci_dev *hci_get_route(bdaddr_t *src, bdaddr_t *dst);
413 
414 struct hci_dev *hci_alloc_dev(void);
415 void hci_free_dev(struct hci_dev *hdev);
416 int hci_register_dev(struct hci_dev *hdev);
417 int hci_unregister_dev(struct hci_dev *hdev);
418 int hci_suspend_dev(struct hci_dev *hdev);
419 int hci_resume_dev(struct hci_dev *hdev);
420 int hci_dev_open(__u16 dev);
421 int hci_dev_close(__u16 dev);
422 int hci_dev_reset(__u16 dev);
423 int hci_dev_reset_stat(__u16 dev);
424 int hci_dev_cmd(unsigned int cmd, void __user *arg);
425 int hci_get_dev_list(void __user *arg);
426 int hci_get_dev_info(void __user *arg);
427 int hci_get_conn_list(void __user *arg);
428 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg);
429 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg);
430 int hci_inquiry(void __user *arg);
431 
432 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
433 
434 /* Receive frame from HCI drivers */
435 static inline int hci_recv_frame(struct sk_buff *skb)
436 {
437 	struct hci_dev *hdev = (struct hci_dev *) skb->dev;
438 	if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
439 			&& !test_bit(HCI_INIT, &hdev->flags))) {
440 		kfree_skb(skb);
441 		return -ENXIO;
442 	}
443 
444 	/* Incomming skb */
445 	bt_cb(skb)->incoming = 1;
446 
447 	/* Time stamp */
448 	__net_timestamp(skb);
449 
450 	/* Queue frame for rx task */
451 	skb_queue_tail(&hdev->rx_q, skb);
452 	hci_sched_rx(hdev);
453 	return 0;
454 }
455 
456 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count);
457 
458 int hci_register_sysfs(struct hci_dev *hdev);
459 void hci_unregister_sysfs(struct hci_dev *hdev);
460 void hci_conn_init_sysfs(struct hci_conn *conn);
461 void hci_conn_add_sysfs(struct hci_conn *conn);
462 void hci_conn_del_sysfs(struct hci_conn *conn);
463 
464 #define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->parent = (pdev))
465 
466 /* ----- LMP capabilities ----- */
467 #define lmp_rswitch_capable(dev)   ((dev)->features[0] & LMP_RSWITCH)
468 #define lmp_encrypt_capable(dev)   ((dev)->features[0] & LMP_ENCRYPT)
469 #define lmp_sniff_capable(dev)     ((dev)->features[0] & LMP_SNIFF)
470 #define lmp_sniffsubr_capable(dev) ((dev)->features[5] & LMP_SNIFF_SUBR)
471 #define lmp_esco_capable(dev)      ((dev)->features[3] & LMP_ESCO)
472 #define lmp_ssp_capable(dev)       ((dev)->features[6] & LMP_SIMPLE_PAIR)
473 
474 /* ----- HCI protocols ----- */
475 struct hci_proto {
476 	char		*name;
477 	unsigned int	id;
478 	unsigned long	flags;
479 
480 	void		*priv;
481 
482 	int (*connect_ind)	(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type);
483 	int (*connect_cfm)	(struct hci_conn *conn, __u8 status);
484 	int (*disconn_ind)	(struct hci_conn *conn);
485 	int (*disconn_cfm)	(struct hci_conn *conn, __u8 reason);
486 	int (*recv_acldata)	(struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
487 	int (*recv_scodata)	(struct hci_conn *conn, struct sk_buff *skb);
488 	int (*security_cfm)	(struct hci_conn *conn, __u8 status, __u8 encrypt);
489 };
490 
491 static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type)
492 {
493 	register struct hci_proto *hp;
494 	int mask = 0;
495 
496 	hp = hci_proto[HCI_PROTO_L2CAP];
497 	if (hp && hp->connect_ind)
498 		mask |= hp->connect_ind(hdev, bdaddr, type);
499 
500 	hp = hci_proto[HCI_PROTO_SCO];
501 	if (hp && hp->connect_ind)
502 		mask |= hp->connect_ind(hdev, bdaddr, type);
503 
504 	return mask;
505 }
506 
507 static inline void hci_proto_connect_cfm(struct hci_conn *conn, __u8 status)
508 {
509 	register struct hci_proto *hp;
510 
511 	hp = hci_proto[HCI_PROTO_L2CAP];
512 	if (hp && hp->connect_cfm)
513 		hp->connect_cfm(conn, status);
514 
515 	hp = hci_proto[HCI_PROTO_SCO];
516 	if (hp && hp->connect_cfm)
517 		hp->connect_cfm(conn, status);
518 }
519 
520 static inline int hci_proto_disconn_ind(struct hci_conn *conn)
521 {
522 	register struct hci_proto *hp;
523 	int reason = 0x13;
524 
525 	hp = hci_proto[HCI_PROTO_L2CAP];
526 	if (hp && hp->disconn_ind)
527 		reason = hp->disconn_ind(conn);
528 
529 	hp = hci_proto[HCI_PROTO_SCO];
530 	if (hp && hp->disconn_ind)
531 		reason = hp->disconn_ind(conn);
532 
533 	return reason;
534 }
535 
536 static inline void hci_proto_disconn_cfm(struct hci_conn *conn, __u8 reason)
537 {
538 	register struct hci_proto *hp;
539 
540 	hp = hci_proto[HCI_PROTO_L2CAP];
541 	if (hp && hp->disconn_cfm)
542 		hp->disconn_cfm(conn, reason);
543 
544 	hp = hci_proto[HCI_PROTO_SCO];
545 	if (hp && hp->disconn_cfm)
546 		hp->disconn_cfm(conn, reason);
547 }
548 
549 static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status)
550 {
551 	register struct hci_proto *hp;
552 	__u8 encrypt;
553 
554 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
555 		return;
556 
557 	encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00;
558 
559 	hp = hci_proto[HCI_PROTO_L2CAP];
560 	if (hp && hp->security_cfm)
561 		hp->security_cfm(conn, status, encrypt);
562 
563 	hp = hci_proto[HCI_PROTO_SCO];
564 	if (hp && hp->security_cfm)
565 		hp->security_cfm(conn, status, encrypt);
566 }
567 
568 static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt)
569 {
570 	register struct hci_proto *hp;
571 
572 	hp = hci_proto[HCI_PROTO_L2CAP];
573 	if (hp && hp->security_cfm)
574 		hp->security_cfm(conn, status, encrypt);
575 
576 	hp = hci_proto[HCI_PROTO_SCO];
577 	if (hp && hp->security_cfm)
578 		hp->security_cfm(conn, status, encrypt);
579 }
580 
581 int hci_register_proto(struct hci_proto *hproto);
582 int hci_unregister_proto(struct hci_proto *hproto);
583 
584 /* ----- HCI callbacks ----- */
585 struct hci_cb {
586 	struct list_head list;
587 
588 	char *name;
589 
590 	void (*security_cfm)	(struct hci_conn *conn, __u8 status, __u8 encrypt);
591 	void (*key_change_cfm)	(struct hci_conn *conn, __u8 status);
592 	void (*role_switch_cfm)	(struct hci_conn *conn, __u8 status, __u8 role);
593 };
594 
595 static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
596 {
597 	struct list_head *p;
598 	__u8 encrypt;
599 
600 	hci_proto_auth_cfm(conn, status);
601 
602 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
603 		return;
604 
605 	encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00;
606 
607 	read_lock_bh(&hci_cb_list_lock);
608 	list_for_each(p, &hci_cb_list) {
609 		struct hci_cb *cb = list_entry(p, struct hci_cb, list);
610 		if (cb->security_cfm)
611 			cb->security_cfm(conn, status, encrypt);
612 	}
613 	read_unlock_bh(&hci_cb_list_lock);
614 }
615 
616 static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt)
617 {
618 	struct list_head *p;
619 
620 	if (conn->sec_level == BT_SECURITY_SDP)
621 		conn->sec_level = BT_SECURITY_LOW;
622 
623 	hci_proto_encrypt_cfm(conn, status, encrypt);
624 
625 	read_lock_bh(&hci_cb_list_lock);
626 	list_for_each(p, &hci_cb_list) {
627 		struct hci_cb *cb = list_entry(p, struct hci_cb, list);
628 		if (cb->security_cfm)
629 			cb->security_cfm(conn, status, encrypt);
630 	}
631 	read_unlock_bh(&hci_cb_list_lock);
632 }
633 
634 static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status)
635 {
636 	struct list_head *p;
637 
638 	read_lock_bh(&hci_cb_list_lock);
639 	list_for_each(p, &hci_cb_list) {
640 		struct hci_cb *cb = list_entry(p, struct hci_cb, list);
641 		if (cb->key_change_cfm)
642 			cb->key_change_cfm(conn, status);
643 	}
644 	read_unlock_bh(&hci_cb_list_lock);
645 }
646 
647 static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status, __u8 role)
648 {
649 	struct list_head *p;
650 
651 	read_lock_bh(&hci_cb_list_lock);
652 	list_for_each(p, &hci_cb_list) {
653 		struct hci_cb *cb = list_entry(p, struct hci_cb, list);
654 		if (cb->role_switch_cfm)
655 			cb->role_switch_cfm(conn, status, role);
656 	}
657 	read_unlock_bh(&hci_cb_list_lock);
658 }
659 
660 int hci_register_cb(struct hci_cb *hcb);
661 int hci_unregister_cb(struct hci_cb *hcb);
662 
663 int hci_register_notifier(struct notifier_block *nb);
664 int hci_unregister_notifier(struct notifier_block *nb);
665 
666 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param);
667 int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
668 int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb);
669 
670 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode);
671 
672 void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data);
673 
674 /* ----- HCI Sockets ----- */
675 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
676 
677 /* HCI info for socket */
678 #define hci_pi(sk) ((struct hci_pinfo *) sk)
679 
680 struct hci_pinfo {
681 	struct bt_sock    bt;
682 	struct hci_dev    *hdev;
683 	struct hci_filter filter;
684 	__u32             cmsg_mask;
685 };
686 
687 /* HCI security filter */
688 #define HCI_SFLT_MAX_OGF  5
689 
690 struct hci_sec_filter {
691 	__u32 type_mask;
692 	__u32 event_mask[2];
693 	__u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
694 };
695 
696 /* ----- HCI requests ----- */
697 #define HCI_REQ_DONE	  0
698 #define HCI_REQ_PEND	  1
699 #define HCI_REQ_CANCELED  2
700 
701 #define hci_req_lock(d)		down(&d->req_lock)
702 #define hci_req_unlock(d)	up(&d->req_lock)
703 
704 void hci_req_complete(struct hci_dev *hdev, int result);
705 
706 #endif /* __HCI_CORE_H */
707