xref: /openbmc/linux/include/net/bluetooth/hci_core.h (revision b627b4ed)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 #ifndef __HCI_CORE_H
26 #define __HCI_CORE_H
27 
28 #include <net/bluetooth/hci.h>
29 
30 /* HCI upper protocols */
31 #define HCI_PROTO_L2CAP	0
32 #define HCI_PROTO_SCO	1
33 
34 /* HCI Core structures */
35 struct inquiry_data {
36 	bdaddr_t	bdaddr;
37 	__u8		pscan_rep_mode;
38 	__u8		pscan_period_mode;
39 	__u8		pscan_mode;
40 	__u8		dev_class[3];
41 	__le16		clock_offset;
42 	__s8		rssi;
43 	__u8		ssp_mode;
44 };
45 
46 struct inquiry_entry {
47 	struct inquiry_entry 	*next;
48 	__u32			timestamp;
49 	struct inquiry_data	data;
50 };
51 
52 struct inquiry_cache {
53 	spinlock_t 		lock;
54 	__u32			timestamp;
55 	struct inquiry_entry 	*list;
56 };
57 
58 struct hci_conn_hash {
59 	struct list_head list;
60 	spinlock_t       lock;
61 	unsigned int     acl_num;
62 	unsigned int     sco_num;
63 };
64 
65 struct hci_dev {
66 	struct list_head list;
67 	spinlock_t	lock;
68 	atomic_t	refcnt;
69 
70 	char		name[8];
71 	unsigned long	flags;
72 	__u16		id;
73 	__u8		type;
74 	bdaddr_t	bdaddr;
75 	__u8		dev_name[248];
76 	__u8		dev_class[3];
77 	__u8		features[8];
78 	__u8		commands[64];
79 	__u8		ssp_mode;
80 	__u8		hci_ver;
81 	__u16		hci_rev;
82 	__u16		manufacturer;
83 	__u16		voice_setting;
84 
85 	__u16		pkt_type;
86 	__u16		esco_type;
87 	__u16		link_policy;
88 	__u16		link_mode;
89 
90 	__u32		idle_timeout;
91 	__u16		sniff_min_interval;
92 	__u16		sniff_max_interval;
93 
94 	unsigned long	quirks;
95 
96 	atomic_t	cmd_cnt;
97 	unsigned int	acl_cnt;
98 	unsigned int	sco_cnt;
99 
100 	unsigned int	acl_mtu;
101 	unsigned int	sco_mtu;
102 	unsigned int	acl_pkts;
103 	unsigned int	sco_pkts;
104 
105 	unsigned long	cmd_last_tx;
106 	unsigned long	acl_last_tx;
107 	unsigned long	sco_last_tx;
108 
109 	struct tasklet_struct	cmd_task;
110 	struct tasklet_struct	rx_task;
111 	struct tasklet_struct	tx_task;
112 
113 	struct sk_buff_head	rx_q;
114 	struct sk_buff_head	raw_q;
115 	struct sk_buff_head	cmd_q;
116 
117 	struct sk_buff		*sent_cmd;
118 	struct sk_buff		*reassembly[3];
119 
120 	struct semaphore	req_lock;
121 	wait_queue_head_t	req_wait_q;
122 	__u32			req_status;
123 	__u32			req_result;
124 
125 	struct inquiry_cache	inq_cache;
126 	struct hci_conn_hash	conn_hash;
127 
128 	struct hci_dev_stats	stat;
129 
130 	struct sk_buff_head	driver_init;
131 
132 	void			*driver_data;
133 	void			*core_data;
134 
135 	atomic_t 		promisc;
136 
137 	struct device		*parent;
138 	struct device		dev;
139 
140 	struct module 		*owner;
141 
142 	int (*open)(struct hci_dev *hdev);
143 	int (*close)(struct hci_dev *hdev);
144 	int (*flush)(struct hci_dev *hdev);
145 	int (*send)(struct sk_buff *skb);
146 	void (*destruct)(struct hci_dev *hdev);
147 	void (*notify)(struct hci_dev *hdev, unsigned int evt);
148 	int (*ioctl)(struct hci_dev *hdev, unsigned int cmd, unsigned long arg);
149 };
150 
151 struct hci_conn {
152 	struct list_head list;
153 
154 	atomic_t	 refcnt;
155 	spinlock_t	 lock;
156 
157 	bdaddr_t	 dst;
158 	__u16		 handle;
159 	__u16		 state;
160 	__u8             mode;
161 	__u8		 type;
162 	__u8		 out;
163 	__u8		 attempt;
164 	__u8		 dev_class[3];
165 	__u8             features[8];
166 	__u8             ssp_mode;
167 	__u16            interval;
168 	__u16            pkt_type;
169 	__u16            link_policy;
170 	__u32		 link_mode;
171 	__u8             auth_type;
172 	__u8             sec_level;
173 	__u8             power_save;
174 	unsigned long	 pend;
175 
176 	unsigned int	 sent;
177 
178 	struct sk_buff_head data_q;
179 
180 	struct timer_list disc_timer;
181 	struct timer_list idle_timer;
182 
183 	struct work_struct work;
184 
185 	struct device	dev;
186 
187 	struct hci_dev	*hdev;
188 	void		*l2cap_data;
189 	void		*sco_data;
190 	void		*priv;
191 
192 	struct hci_conn	*link;
193 };
194 
195 extern struct hci_proto *hci_proto[];
196 extern struct list_head hci_dev_list;
197 extern struct list_head hci_cb_list;
198 extern rwlock_t hci_dev_list_lock;
199 extern rwlock_t hci_cb_list_lock;
200 
201 /* ----- Inquiry cache ----- */
202 #define INQUIRY_CACHE_AGE_MAX   (HZ*30)   // 30 seconds
203 #define INQUIRY_ENTRY_AGE_MAX   (HZ*60)   // 60 seconds
204 
205 #define inquiry_cache_lock(c)		spin_lock(&c->lock)
206 #define inquiry_cache_unlock(c)		spin_unlock(&c->lock)
207 #define inquiry_cache_lock_bh(c)	spin_lock_bh(&c->lock)
208 #define inquiry_cache_unlock_bh(c)	spin_unlock_bh(&c->lock)
209 
210 static inline void inquiry_cache_init(struct hci_dev *hdev)
211 {
212 	struct inquiry_cache *c = &hdev->inq_cache;
213 	spin_lock_init(&c->lock);
214 	c->list = NULL;
215 }
216 
217 static inline int inquiry_cache_empty(struct hci_dev *hdev)
218 {
219 	struct inquiry_cache *c = &hdev->inq_cache;
220 	return (c->list == NULL);
221 }
222 
223 static inline long inquiry_cache_age(struct hci_dev *hdev)
224 {
225 	struct inquiry_cache *c = &hdev->inq_cache;
226 	return jiffies - c->timestamp;
227 }
228 
229 static inline long inquiry_entry_age(struct inquiry_entry *e)
230 {
231 	return jiffies - e->timestamp;
232 }
233 
234 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr);
235 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data);
236 
237 /* ----- HCI Connections ----- */
238 enum {
239 	HCI_CONN_AUTH_PEND,
240 	HCI_CONN_ENCRYPT_PEND,
241 	HCI_CONN_RSWITCH_PEND,
242 	HCI_CONN_MODE_CHANGE_PEND,
243 };
244 
245 static inline void hci_conn_hash_init(struct hci_dev *hdev)
246 {
247 	struct hci_conn_hash *h = &hdev->conn_hash;
248 	INIT_LIST_HEAD(&h->list);
249 	spin_lock_init(&h->lock);
250 	h->acl_num = 0;
251 	h->sco_num = 0;
252 }
253 
254 static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
255 {
256 	struct hci_conn_hash *h = &hdev->conn_hash;
257 	list_add(&c->list, &h->list);
258 	if (c->type == ACL_LINK)
259 		h->acl_num++;
260 	else
261 		h->sco_num++;
262 }
263 
264 static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c)
265 {
266 	struct hci_conn_hash *h = &hdev->conn_hash;
267 	list_del(&c->list);
268 	if (c->type == ACL_LINK)
269 		h->acl_num--;
270 	else
271 		h->sco_num--;
272 }
273 
274 static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev,
275 					__u16 handle)
276 {
277 	struct hci_conn_hash *h = &hdev->conn_hash;
278 	struct list_head *p;
279 	struct hci_conn  *c;
280 
281 	list_for_each(p, &h->list) {
282 		c = list_entry(p, struct hci_conn, list);
283 		if (c->handle == handle)
284 			return c;
285 	}
286 	return NULL;
287 }
288 
289 static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev,
290 					__u8 type, bdaddr_t *ba)
291 {
292 	struct hci_conn_hash *h = &hdev->conn_hash;
293 	struct list_head *p;
294 	struct hci_conn  *c;
295 
296 	list_for_each(p, &h->list) {
297 		c = list_entry(p, struct hci_conn, list);
298 		if (c->type == type && !bacmp(&c->dst, ba))
299 			return c;
300 	}
301 	return NULL;
302 }
303 
304 static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
305 					__u8 type, __u16 state)
306 {
307 	struct hci_conn_hash *h = &hdev->conn_hash;
308 	struct list_head *p;
309 	struct hci_conn  *c;
310 
311 	list_for_each(p, &h->list) {
312 		c = list_entry(p, struct hci_conn, list);
313 		if (c->type == type && c->state == state)
314 			return c;
315 	}
316 	return NULL;
317 }
318 
319 void hci_acl_connect(struct hci_conn *conn);
320 void hci_acl_disconn(struct hci_conn *conn, __u8 reason);
321 void hci_add_sco(struct hci_conn *conn, __u16 handle);
322 void hci_setup_sync(struct hci_conn *conn, __u16 handle);
323 
324 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst);
325 int hci_conn_del(struct hci_conn *conn);
326 void hci_conn_hash_flush(struct hci_dev *hdev);
327 void hci_conn_check_pending(struct hci_dev *hdev);
328 
329 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type);
330 int hci_conn_check_link_mode(struct hci_conn *conn);
331 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type);
332 int hci_conn_change_link_key(struct hci_conn *conn);
333 int hci_conn_switch_role(struct hci_conn *conn, __u8 role);
334 
335 void hci_conn_enter_active_mode(struct hci_conn *conn);
336 void hci_conn_enter_sniff_mode(struct hci_conn *conn);
337 
338 static inline void hci_conn_hold(struct hci_conn *conn)
339 {
340 	atomic_inc(&conn->refcnt);
341 	del_timer(&conn->disc_timer);
342 }
343 
344 static inline void hci_conn_put(struct hci_conn *conn)
345 {
346 	if (atomic_dec_and_test(&conn->refcnt)) {
347 		unsigned long timeo;
348 		if (conn->type == ACL_LINK) {
349 			del_timer(&conn->idle_timer);
350 			if (conn->state == BT_CONNECTED) {
351 				timeo = msecs_to_jiffies(HCI_DISCONN_TIMEOUT);
352 				if (!conn->out)
353 					timeo *= 5;
354 			} else
355 				timeo = msecs_to_jiffies(10);
356 		} else
357 			timeo = msecs_to_jiffies(10);
358 		mod_timer(&conn->disc_timer, jiffies + timeo);
359 	}
360 }
361 
362 /* ----- HCI tasks ----- */
363 static inline void hci_sched_cmd(struct hci_dev *hdev)
364 {
365 	tasklet_schedule(&hdev->cmd_task);
366 }
367 
368 static inline void hci_sched_rx(struct hci_dev *hdev)
369 {
370 	tasklet_schedule(&hdev->rx_task);
371 }
372 
373 static inline void hci_sched_tx(struct hci_dev *hdev)
374 {
375 	tasklet_schedule(&hdev->tx_task);
376 }
377 
378 /* ----- HCI Devices ----- */
379 static inline void __hci_dev_put(struct hci_dev *d)
380 {
381 	if (atomic_dec_and_test(&d->refcnt))
382 		d->destruct(d);
383 }
384 
385 static inline void hci_dev_put(struct hci_dev *d)
386 {
387 	__hci_dev_put(d);
388 	module_put(d->owner);
389 }
390 
391 static inline struct hci_dev *__hci_dev_hold(struct hci_dev *d)
392 {
393 	atomic_inc(&d->refcnt);
394 	return d;
395 }
396 
397 static inline struct hci_dev *hci_dev_hold(struct hci_dev *d)
398 {
399 	if (try_module_get(d->owner))
400 		return __hci_dev_hold(d);
401 	return NULL;
402 }
403 
404 #define hci_dev_lock(d)		spin_lock(&d->lock)
405 #define hci_dev_unlock(d)	spin_unlock(&d->lock)
406 #define hci_dev_lock_bh(d)	spin_lock_bh(&d->lock)
407 #define hci_dev_unlock_bh(d)	spin_unlock_bh(&d->lock)
408 
409 struct hci_dev *hci_dev_get(int index);
410 struct hci_dev *hci_get_route(bdaddr_t *src, bdaddr_t *dst);
411 
412 struct hci_dev *hci_alloc_dev(void);
413 void hci_free_dev(struct hci_dev *hdev);
414 int hci_register_dev(struct hci_dev *hdev);
415 int hci_unregister_dev(struct hci_dev *hdev);
416 int hci_suspend_dev(struct hci_dev *hdev);
417 int hci_resume_dev(struct hci_dev *hdev);
418 int hci_dev_open(__u16 dev);
419 int hci_dev_close(__u16 dev);
420 int hci_dev_reset(__u16 dev);
421 int hci_dev_reset_stat(__u16 dev);
422 int hci_dev_cmd(unsigned int cmd, void __user *arg);
423 int hci_get_dev_list(void __user *arg);
424 int hci_get_dev_info(void __user *arg);
425 int hci_get_conn_list(void __user *arg);
426 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg);
427 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg);
428 int hci_inquiry(void __user *arg);
429 
430 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
431 
432 /* Receive frame from HCI drivers */
433 static inline int hci_recv_frame(struct sk_buff *skb)
434 {
435 	struct hci_dev *hdev = (struct hci_dev *) skb->dev;
436 	if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
437 			&& !test_bit(HCI_INIT, &hdev->flags))) {
438 		kfree_skb(skb);
439 		return -ENXIO;
440 	}
441 
442 	/* Incomming skb */
443 	bt_cb(skb)->incoming = 1;
444 
445 	/* Time stamp */
446 	__net_timestamp(skb);
447 
448 	/* Queue frame for rx task */
449 	skb_queue_tail(&hdev->rx_q, skb);
450 	hci_sched_rx(hdev);
451 	return 0;
452 }
453 
454 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count);
455 
456 int hci_register_sysfs(struct hci_dev *hdev);
457 void hci_unregister_sysfs(struct hci_dev *hdev);
458 void hci_conn_add_sysfs(struct hci_conn *conn);
459 void hci_conn_del_sysfs(struct hci_conn *conn);
460 
461 #define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->parent = (pdev))
462 
463 /* ----- LMP capabilities ----- */
464 #define lmp_rswitch_capable(dev)   ((dev)->features[0] & LMP_RSWITCH)
465 #define lmp_encrypt_capable(dev)   ((dev)->features[0] & LMP_ENCRYPT)
466 #define lmp_sniff_capable(dev)     ((dev)->features[0] & LMP_SNIFF)
467 #define lmp_sniffsubr_capable(dev) ((dev)->features[5] & LMP_SNIFF_SUBR)
468 #define lmp_esco_capable(dev)      ((dev)->features[3] & LMP_ESCO)
469 #define lmp_ssp_capable(dev)       ((dev)->features[6] & LMP_SIMPLE_PAIR)
470 
471 /* ----- HCI protocols ----- */
472 struct hci_proto {
473 	char		*name;
474 	unsigned int	id;
475 	unsigned long	flags;
476 
477 	void		*priv;
478 
479 	int (*connect_ind)	(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type);
480 	int (*connect_cfm)	(struct hci_conn *conn, __u8 status);
481 	int (*disconn_ind)	(struct hci_conn *conn);
482 	int (*disconn_cfm)	(struct hci_conn *conn, __u8 reason);
483 	int (*recv_acldata)	(struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
484 	int (*recv_scodata)	(struct hci_conn *conn, struct sk_buff *skb);
485 	int (*security_cfm)	(struct hci_conn *conn, __u8 status, __u8 encrypt);
486 };
487 
488 static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type)
489 {
490 	register struct hci_proto *hp;
491 	int mask = 0;
492 
493 	hp = hci_proto[HCI_PROTO_L2CAP];
494 	if (hp && hp->connect_ind)
495 		mask |= hp->connect_ind(hdev, bdaddr, type);
496 
497 	hp = hci_proto[HCI_PROTO_SCO];
498 	if (hp && hp->connect_ind)
499 		mask |= hp->connect_ind(hdev, bdaddr, type);
500 
501 	return mask;
502 }
503 
504 static inline void hci_proto_connect_cfm(struct hci_conn *conn, __u8 status)
505 {
506 	register struct hci_proto *hp;
507 
508 	hp = hci_proto[HCI_PROTO_L2CAP];
509 	if (hp && hp->connect_cfm)
510 		hp->connect_cfm(conn, status);
511 
512 	hp = hci_proto[HCI_PROTO_SCO];
513 	if (hp && hp->connect_cfm)
514 		hp->connect_cfm(conn, status);
515 }
516 
517 static inline int hci_proto_disconn_ind(struct hci_conn *conn)
518 {
519 	register struct hci_proto *hp;
520 	int reason = 0x13;
521 
522 	hp = hci_proto[HCI_PROTO_L2CAP];
523 	if (hp && hp->disconn_ind)
524 		reason = hp->disconn_ind(conn);
525 
526 	hp = hci_proto[HCI_PROTO_SCO];
527 	if (hp && hp->disconn_ind)
528 		reason = hp->disconn_ind(conn);
529 
530 	return reason;
531 }
532 
533 static inline void hci_proto_disconn_cfm(struct hci_conn *conn, __u8 reason)
534 {
535 	register struct hci_proto *hp;
536 
537 	hp = hci_proto[HCI_PROTO_L2CAP];
538 	if (hp && hp->disconn_cfm)
539 		hp->disconn_cfm(conn, reason);
540 
541 	hp = hci_proto[HCI_PROTO_SCO];
542 	if (hp && hp->disconn_cfm)
543 		hp->disconn_cfm(conn, reason);
544 }
545 
546 static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status)
547 {
548 	register struct hci_proto *hp;
549 	__u8 encrypt;
550 
551 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
552 		return;
553 
554 	encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00;
555 
556 	hp = hci_proto[HCI_PROTO_L2CAP];
557 	if (hp && hp->security_cfm)
558 		hp->security_cfm(conn, status, encrypt);
559 
560 	hp = hci_proto[HCI_PROTO_SCO];
561 	if (hp && hp->security_cfm)
562 		hp->security_cfm(conn, status, encrypt);
563 }
564 
565 static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt)
566 {
567 	register struct hci_proto *hp;
568 
569 	hp = hci_proto[HCI_PROTO_L2CAP];
570 	if (hp && hp->security_cfm)
571 		hp->security_cfm(conn, status, encrypt);
572 
573 	hp = hci_proto[HCI_PROTO_SCO];
574 	if (hp && hp->security_cfm)
575 		hp->security_cfm(conn, status, encrypt);
576 }
577 
578 int hci_register_proto(struct hci_proto *hproto);
579 int hci_unregister_proto(struct hci_proto *hproto);
580 
581 /* ----- HCI callbacks ----- */
582 struct hci_cb {
583 	struct list_head list;
584 
585 	char *name;
586 
587 	void (*security_cfm)	(struct hci_conn *conn, __u8 status, __u8 encrypt);
588 	void (*key_change_cfm)	(struct hci_conn *conn, __u8 status);
589 	void (*role_switch_cfm)	(struct hci_conn *conn, __u8 status, __u8 role);
590 };
591 
592 static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
593 {
594 	struct list_head *p;
595 	__u8 encrypt;
596 
597 	hci_proto_auth_cfm(conn, status);
598 
599 	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
600 		return;
601 
602 	encrypt = (conn->link_mode & HCI_LM_ENCRYPT) ? 0x01 : 0x00;
603 
604 	read_lock_bh(&hci_cb_list_lock);
605 	list_for_each(p, &hci_cb_list) {
606 		struct hci_cb *cb = list_entry(p, struct hci_cb, list);
607 		if (cb->security_cfm)
608 			cb->security_cfm(conn, status, encrypt);
609 	}
610 	read_unlock_bh(&hci_cb_list_lock);
611 }
612 
613 static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt)
614 {
615 	struct list_head *p;
616 
617 	if (conn->sec_level == BT_SECURITY_SDP)
618 		conn->sec_level = BT_SECURITY_LOW;
619 
620 	hci_proto_encrypt_cfm(conn, status, encrypt);
621 
622 	read_lock_bh(&hci_cb_list_lock);
623 	list_for_each(p, &hci_cb_list) {
624 		struct hci_cb *cb = list_entry(p, struct hci_cb, list);
625 		if (cb->security_cfm)
626 			cb->security_cfm(conn, status, encrypt);
627 	}
628 	read_unlock_bh(&hci_cb_list_lock);
629 }
630 
631 static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status)
632 {
633 	struct list_head *p;
634 
635 	read_lock_bh(&hci_cb_list_lock);
636 	list_for_each(p, &hci_cb_list) {
637 		struct hci_cb *cb = list_entry(p, struct hci_cb, list);
638 		if (cb->key_change_cfm)
639 			cb->key_change_cfm(conn, status);
640 	}
641 	read_unlock_bh(&hci_cb_list_lock);
642 }
643 
644 static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status, __u8 role)
645 {
646 	struct list_head *p;
647 
648 	read_lock_bh(&hci_cb_list_lock);
649 	list_for_each(p, &hci_cb_list) {
650 		struct hci_cb *cb = list_entry(p, struct hci_cb, list);
651 		if (cb->role_switch_cfm)
652 			cb->role_switch_cfm(conn, status, role);
653 	}
654 	read_unlock_bh(&hci_cb_list_lock);
655 }
656 
657 int hci_register_cb(struct hci_cb *hcb);
658 int hci_unregister_cb(struct hci_cb *hcb);
659 
660 int hci_register_notifier(struct notifier_block *nb);
661 int hci_unregister_notifier(struct notifier_block *nb);
662 
663 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param);
664 int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
665 int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb);
666 
667 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode);
668 
669 void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data);
670 
671 /* ----- HCI Sockets ----- */
672 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
673 
674 /* HCI info for socket */
675 #define hci_pi(sk) ((struct hci_pinfo *) sk)
676 
677 struct hci_pinfo {
678 	struct bt_sock    bt;
679 	struct hci_dev    *hdev;
680 	struct hci_filter filter;
681 	__u32             cmsg_mask;
682 };
683 
684 /* HCI security filter */
685 #define HCI_SFLT_MAX_OGF  5
686 
687 struct hci_sec_filter {
688 	__u32 type_mask;
689 	__u32 event_mask[2];
690 	__u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
691 };
692 
693 /* ----- HCI requests ----- */
694 #define HCI_REQ_DONE	  0
695 #define HCI_REQ_PEND	  1
696 #define HCI_REQ_CANCELED  2
697 
698 #define hci_req_lock(d)		down(&d->req_lock)
699 #define hci_req_unlock(d)	up(&d->req_lock)
700 
701 void hci_req_complete(struct hci_dev *hdev, int result);
702 
703 #endif /* __HCI_CORE_H */
704