xref: /openbmc/linux/include/net/bluetooth/hci_core.h (revision 93dc544c)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 #ifndef __HCI_CORE_H
26 #define __HCI_CORE_H
27 
28 #include <net/bluetooth/hci.h>
29 
30 /* HCI upper protocols */
31 #define HCI_PROTO_L2CAP	0
32 #define HCI_PROTO_SCO	1
33 
34 /* HCI Core structures */
35 struct inquiry_data {
36 	bdaddr_t	bdaddr;
37 	__u8		pscan_rep_mode;
38 	__u8		pscan_period_mode;
39 	__u8		pscan_mode;
40 	__u8		dev_class[3];
41 	__le16		clock_offset;
42 	__s8		rssi;
43 	__u8		ssp_mode;
44 };
45 
46 struct inquiry_entry {
47 	struct inquiry_entry 	*next;
48 	__u32			timestamp;
49 	struct inquiry_data	data;
50 };
51 
52 struct inquiry_cache {
53 	spinlock_t 		lock;
54 	__u32			timestamp;
55 	struct inquiry_entry 	*list;
56 };
57 
58 struct hci_conn_hash {
59 	struct list_head list;
60 	spinlock_t       lock;
61 	unsigned int     acl_num;
62 	unsigned int     sco_num;
63 };
64 
65 struct hci_dev {
66 	struct list_head list;
67 	spinlock_t	lock;
68 	atomic_t	refcnt;
69 
70 	char		name[8];
71 	unsigned long	flags;
72 	__u16		id;
73 	__u8		type;
74 	bdaddr_t	bdaddr;
75 	__u8		dev_name[248];
76 	__u8		dev_class[3];
77 	__u8		features[8];
78 	__u8		commands[64];
79 	__u8		ssp_mode;
80 	__u8		hci_ver;
81 	__u16		hci_rev;
82 	__u16		manufacturer;
83 	__u16		voice_setting;
84 
85 	__u16		pkt_type;
86 	__u16		esco_type;
87 	__u16		link_policy;
88 	__u16		link_mode;
89 
90 	__u32		idle_timeout;
91 	__u16		sniff_min_interval;
92 	__u16		sniff_max_interval;
93 
94 	unsigned long	quirks;
95 
96 	atomic_t	cmd_cnt;
97 	unsigned int	acl_cnt;
98 	unsigned int	sco_cnt;
99 
100 	unsigned int	acl_mtu;
101 	unsigned int	sco_mtu;
102 	unsigned int	acl_pkts;
103 	unsigned int	sco_pkts;
104 
105 	unsigned long	cmd_last_tx;
106 	unsigned long	acl_last_tx;
107 	unsigned long	sco_last_tx;
108 
109 	struct tasklet_struct	cmd_task;
110 	struct tasklet_struct	rx_task;
111 	struct tasklet_struct	tx_task;
112 
113 	struct sk_buff_head	rx_q;
114 	struct sk_buff_head	raw_q;
115 	struct sk_buff_head	cmd_q;
116 
117 	struct sk_buff		*sent_cmd;
118 	struct sk_buff		*reassembly[3];
119 
120 	struct semaphore	req_lock;
121 	wait_queue_head_t	req_wait_q;
122 	__u32			req_status;
123 	__u32			req_result;
124 
125 	struct inquiry_cache	inq_cache;
126 	struct hci_conn_hash	conn_hash;
127 
128 	struct hci_dev_stats	stat;
129 
130 	struct sk_buff_head	driver_init;
131 
132 	void			*driver_data;
133 	void			*core_data;
134 
135 	atomic_t 		promisc;
136 
137 	struct device		*parent;
138 	struct device		dev;
139 
140 	struct module 		*owner;
141 
142 	int (*open)(struct hci_dev *hdev);
143 	int (*close)(struct hci_dev *hdev);
144 	int (*flush)(struct hci_dev *hdev);
145 	int (*send)(struct sk_buff *skb);
146 	void (*destruct)(struct hci_dev *hdev);
147 	void (*notify)(struct hci_dev *hdev, unsigned int evt);
148 	int (*ioctl)(struct hci_dev *hdev, unsigned int cmd, unsigned long arg);
149 };
150 
151 struct hci_conn {
152 	struct list_head list;
153 
154 	atomic_t	 refcnt;
155 	spinlock_t	 lock;
156 
157 	bdaddr_t	 dst;
158 	__u16		 handle;
159 	__u16		 state;
160 	__u8             mode;
161 	__u8		 type;
162 	__u8		 out;
163 	__u8		 attempt;
164 	__u8		 dev_class[3];
165 	__u8             features[8];
166 	__u8             ssp_mode;
167 	__u16            interval;
168 	__u16            pkt_type;
169 	__u16            link_policy;
170 	__u32		 link_mode;
171 	__u8             auth_type;
172 	__u8             power_save;
173 	unsigned long	 pend;
174 
175 	unsigned int	 sent;
176 
177 	struct sk_buff_head data_q;
178 
179 	struct timer_list disc_timer;
180 	struct timer_list idle_timer;
181 
182 	struct work_struct work;
183 
184 	struct device	dev;
185 
186 	struct hci_dev	*hdev;
187 	void		*l2cap_data;
188 	void		*sco_data;
189 	void		*priv;
190 
191 	struct hci_conn	*link;
192 };
193 
194 extern struct hci_proto *hci_proto[];
195 extern struct list_head hci_dev_list;
196 extern struct list_head hci_cb_list;
197 extern rwlock_t hci_dev_list_lock;
198 extern rwlock_t hci_cb_list_lock;
199 
200 /* ----- Inquiry cache ----- */
201 #define INQUIRY_CACHE_AGE_MAX   (HZ*30)   // 30 seconds
202 #define INQUIRY_ENTRY_AGE_MAX   (HZ*60)   // 60 seconds
203 
204 #define inquiry_cache_lock(c)		spin_lock(&c->lock)
205 #define inquiry_cache_unlock(c)		spin_unlock(&c->lock)
206 #define inquiry_cache_lock_bh(c)	spin_lock_bh(&c->lock)
207 #define inquiry_cache_unlock_bh(c)	spin_unlock_bh(&c->lock)
208 
209 static inline void inquiry_cache_init(struct hci_dev *hdev)
210 {
211 	struct inquiry_cache *c = &hdev->inq_cache;
212 	spin_lock_init(&c->lock);
213 	c->list = NULL;
214 }
215 
216 static inline int inquiry_cache_empty(struct hci_dev *hdev)
217 {
218 	struct inquiry_cache *c = &hdev->inq_cache;
219 	return (c->list == NULL);
220 }
221 
222 static inline long inquiry_cache_age(struct hci_dev *hdev)
223 {
224 	struct inquiry_cache *c = &hdev->inq_cache;
225 	return jiffies - c->timestamp;
226 }
227 
228 static inline long inquiry_entry_age(struct inquiry_entry *e)
229 {
230 	return jiffies - e->timestamp;
231 }
232 
233 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr);
234 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data);
235 
236 /* ----- HCI Connections ----- */
237 enum {
238 	HCI_CONN_AUTH_PEND,
239 	HCI_CONN_ENCRYPT_PEND,
240 	HCI_CONN_RSWITCH_PEND,
241 	HCI_CONN_MODE_CHANGE_PEND,
242 };
243 
244 static inline void hci_conn_hash_init(struct hci_dev *hdev)
245 {
246 	struct hci_conn_hash *h = &hdev->conn_hash;
247 	INIT_LIST_HEAD(&h->list);
248 	spin_lock_init(&h->lock);
249 	h->acl_num = 0;
250 	h->sco_num = 0;
251 }
252 
253 static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
254 {
255 	struct hci_conn_hash *h = &hdev->conn_hash;
256 	list_add(&c->list, &h->list);
257 	if (c->type == ACL_LINK)
258 		h->acl_num++;
259 	else
260 		h->sco_num++;
261 }
262 
263 static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c)
264 {
265 	struct hci_conn_hash *h = &hdev->conn_hash;
266 	list_del(&c->list);
267 	if (c->type == ACL_LINK)
268 		h->acl_num--;
269 	else
270 		h->sco_num--;
271 }
272 
273 static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev,
274 					__u16 handle)
275 {
276 	struct hci_conn_hash *h = &hdev->conn_hash;
277 	struct list_head *p;
278 	struct hci_conn  *c;
279 
280 	list_for_each(p, &h->list) {
281 		c = list_entry(p, struct hci_conn, list);
282 		if (c->handle == handle)
283 			return c;
284 	}
285 	return NULL;
286 }
287 
288 static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev,
289 					__u8 type, bdaddr_t *ba)
290 {
291 	struct hci_conn_hash *h = &hdev->conn_hash;
292 	struct list_head *p;
293 	struct hci_conn  *c;
294 
295 	list_for_each(p, &h->list) {
296 		c = list_entry(p, struct hci_conn, list);
297 		if (c->type == type && !bacmp(&c->dst, ba))
298 			return c;
299 	}
300 	return NULL;
301 }
302 
303 static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
304 					__u8 type, __u16 state)
305 {
306 	struct hci_conn_hash *h = &hdev->conn_hash;
307 	struct list_head *p;
308 	struct hci_conn  *c;
309 
310 	list_for_each(p, &h->list) {
311 		c = list_entry(p, struct hci_conn, list);
312 		if (c->type == type && c->state == state)
313 			return c;
314 	}
315 	return NULL;
316 }
317 
318 void hci_acl_connect(struct hci_conn *conn);
319 void hci_acl_disconn(struct hci_conn *conn, __u8 reason);
320 void hci_add_sco(struct hci_conn *conn, __u16 handle);
321 void hci_setup_sync(struct hci_conn *conn, __u16 handle);
322 
323 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst);
324 int hci_conn_del(struct hci_conn *conn);
325 void hci_conn_hash_flush(struct hci_dev *hdev);
326 void hci_conn_check_pending(struct hci_dev *hdev);
327 
328 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *src);
329 int hci_conn_auth(struct hci_conn *conn);
330 int hci_conn_encrypt(struct hci_conn *conn);
331 int hci_conn_change_link_key(struct hci_conn *conn);
332 int hci_conn_switch_role(struct hci_conn *conn, uint8_t role);
333 
334 void hci_conn_enter_active_mode(struct hci_conn *conn);
335 void hci_conn_enter_sniff_mode(struct hci_conn *conn);
336 
337 static inline void hci_conn_hold(struct hci_conn *conn)
338 {
339 	atomic_inc(&conn->refcnt);
340 	del_timer(&conn->disc_timer);
341 }
342 
343 static inline void hci_conn_put(struct hci_conn *conn)
344 {
345 	if (atomic_dec_and_test(&conn->refcnt)) {
346 		unsigned long timeo;
347 		if (conn->type == ACL_LINK) {
348 			del_timer(&conn->idle_timer);
349 			if (conn->state == BT_CONNECTED) {
350 				timeo = msecs_to_jiffies(HCI_DISCONN_TIMEOUT);
351 				if (!conn->out)
352 					timeo *= 5;
353 			} else
354 				timeo = msecs_to_jiffies(10);
355 		} else
356 			timeo = msecs_to_jiffies(10);
357 		mod_timer(&conn->disc_timer, jiffies + timeo);
358 	}
359 }
360 
361 /* ----- HCI tasks ----- */
362 static inline void hci_sched_cmd(struct hci_dev *hdev)
363 {
364 	tasklet_schedule(&hdev->cmd_task);
365 }
366 
367 static inline void hci_sched_rx(struct hci_dev *hdev)
368 {
369 	tasklet_schedule(&hdev->rx_task);
370 }
371 
372 static inline void hci_sched_tx(struct hci_dev *hdev)
373 {
374 	tasklet_schedule(&hdev->tx_task);
375 }
376 
377 /* ----- HCI Devices ----- */
378 static inline void __hci_dev_put(struct hci_dev *d)
379 {
380 	if (atomic_dec_and_test(&d->refcnt))
381 		d->destruct(d);
382 }
383 
384 static inline void hci_dev_put(struct hci_dev *d)
385 {
386 	__hci_dev_put(d);
387 	module_put(d->owner);
388 }
389 
390 static inline struct hci_dev *__hci_dev_hold(struct hci_dev *d)
391 {
392 	atomic_inc(&d->refcnt);
393 	return d;
394 }
395 
396 static inline struct hci_dev *hci_dev_hold(struct hci_dev *d)
397 {
398 	if (try_module_get(d->owner))
399 		return __hci_dev_hold(d);
400 	return NULL;
401 }
402 
403 #define hci_dev_lock(d)		spin_lock(&d->lock)
404 #define hci_dev_unlock(d)	spin_unlock(&d->lock)
405 #define hci_dev_lock_bh(d)	spin_lock_bh(&d->lock)
406 #define hci_dev_unlock_bh(d)	spin_unlock_bh(&d->lock)
407 
408 struct hci_dev *hci_dev_get(int index);
409 struct hci_dev *hci_get_route(bdaddr_t *src, bdaddr_t *dst);
410 
411 struct hci_dev *hci_alloc_dev(void);
412 void hci_free_dev(struct hci_dev *hdev);
413 int hci_register_dev(struct hci_dev *hdev);
414 int hci_unregister_dev(struct hci_dev *hdev);
415 int hci_suspend_dev(struct hci_dev *hdev);
416 int hci_resume_dev(struct hci_dev *hdev);
417 int hci_dev_open(__u16 dev);
418 int hci_dev_close(__u16 dev);
419 int hci_dev_reset(__u16 dev);
420 int hci_dev_reset_stat(__u16 dev);
421 int hci_dev_cmd(unsigned int cmd, void __user *arg);
422 int hci_get_dev_list(void __user *arg);
423 int hci_get_dev_info(void __user *arg);
424 int hci_get_conn_list(void __user *arg);
425 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg);
426 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg);
427 int hci_inquiry(void __user *arg);
428 
429 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
430 
431 /* Receive frame from HCI drivers */
432 static inline int hci_recv_frame(struct sk_buff *skb)
433 {
434 	struct hci_dev *hdev = (struct hci_dev *) skb->dev;
435 	if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
436 			&& !test_bit(HCI_INIT, &hdev->flags))) {
437 		kfree_skb(skb);
438 		return -ENXIO;
439 	}
440 
441 	/* Incomming skb */
442 	bt_cb(skb)->incoming = 1;
443 
444 	/* Time stamp */
445 	__net_timestamp(skb);
446 
447 	/* Queue frame for rx task */
448 	skb_queue_tail(&hdev->rx_q, skb);
449 	hci_sched_rx(hdev);
450 	return 0;
451 }
452 
453 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count);
454 
455 int hci_register_sysfs(struct hci_dev *hdev);
456 void hci_unregister_sysfs(struct hci_dev *hdev);
457 void hci_conn_add_sysfs(struct hci_conn *conn);
458 void hci_conn_del_sysfs(struct hci_conn *conn);
459 
460 #define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->parent = (pdev))
461 
462 /* ----- LMP capabilities ----- */
463 #define lmp_rswitch_capable(dev)   ((dev)->features[0] & LMP_RSWITCH)
464 #define lmp_encrypt_capable(dev)   ((dev)->features[0] & LMP_ENCRYPT)
465 #define lmp_sniff_capable(dev)     ((dev)->features[0] & LMP_SNIFF)
466 #define lmp_sniffsubr_capable(dev) ((dev)->features[5] & LMP_SNIFF_SUBR)
467 #define lmp_esco_capable(dev)      ((dev)->features[3] & LMP_ESCO)
468 #define lmp_ssp_capable(dev)       ((dev)->features[6] & LMP_SIMPLE_PAIR)
469 
470 /* ----- HCI protocols ----- */
471 struct hci_proto {
472 	char 		*name;
473 	unsigned int	id;
474 	unsigned long	flags;
475 
476 	void		*priv;
477 
478 	int (*connect_ind) 	(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type);
479 	int (*connect_cfm)	(struct hci_conn *conn, __u8 status);
480 	int (*disconn_ind)	(struct hci_conn *conn, __u8 reason);
481 	int (*recv_acldata)	(struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
482 	int (*recv_scodata)	(struct hci_conn *conn, struct sk_buff *skb);
483 	int (*auth_cfm)		(struct hci_conn *conn, __u8 status);
484 	int (*encrypt_cfm)	(struct hci_conn *conn, __u8 status, __u8 encrypt);
485 };
486 
487 static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type)
488 {
489 	register struct hci_proto *hp;
490 	int mask = 0;
491 
492 	hp = hci_proto[HCI_PROTO_L2CAP];
493 	if (hp && hp->connect_ind)
494 		mask |= hp->connect_ind(hdev, bdaddr, type);
495 
496 	hp = hci_proto[HCI_PROTO_SCO];
497 	if (hp && hp->connect_ind)
498 		mask |= hp->connect_ind(hdev, bdaddr, type);
499 
500 	return mask;
501 }
502 
503 static inline void hci_proto_connect_cfm(struct hci_conn *conn, __u8 status)
504 {
505 	register struct hci_proto *hp;
506 
507 	hp = hci_proto[HCI_PROTO_L2CAP];
508 	if (hp && hp->connect_cfm)
509 		hp->connect_cfm(conn, status);
510 
511 	hp = hci_proto[HCI_PROTO_SCO];
512 	if (hp && hp->connect_cfm)
513 		hp->connect_cfm(conn, status);
514 }
515 
516 static inline void hci_proto_disconn_ind(struct hci_conn *conn, __u8 reason)
517 {
518 	register struct hci_proto *hp;
519 
520 	hp = hci_proto[HCI_PROTO_L2CAP];
521 	if (hp && hp->disconn_ind)
522 		hp->disconn_ind(conn, reason);
523 
524 	hp = hci_proto[HCI_PROTO_SCO];
525 	if (hp && hp->disconn_ind)
526 		hp->disconn_ind(conn, reason);
527 }
528 
529 static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status)
530 {
531 	register struct hci_proto *hp;
532 
533 	hp = hci_proto[HCI_PROTO_L2CAP];
534 	if (hp && hp->auth_cfm)
535 		hp->auth_cfm(conn, status);
536 
537 	hp = hci_proto[HCI_PROTO_SCO];
538 	if (hp && hp->auth_cfm)
539 		hp->auth_cfm(conn, status);
540 }
541 
542 static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt)
543 {
544 	register struct hci_proto *hp;
545 
546 	hp = hci_proto[HCI_PROTO_L2CAP];
547 	if (hp && hp->encrypt_cfm)
548 		hp->encrypt_cfm(conn, status, encrypt);
549 
550 	hp = hci_proto[HCI_PROTO_SCO];
551 	if (hp && hp->encrypt_cfm)
552 		hp->encrypt_cfm(conn, status, encrypt);
553 }
554 
555 int hci_register_proto(struct hci_proto *hproto);
556 int hci_unregister_proto(struct hci_proto *hproto);
557 
558 /* ----- HCI callbacks ----- */
559 struct hci_cb {
560 	struct list_head list;
561 
562 	char *name;
563 
564 	void (*auth_cfm)	(struct hci_conn *conn, __u8 status);
565 	void (*encrypt_cfm)	(struct hci_conn *conn, __u8 status, __u8 encrypt);
566 	void (*key_change_cfm)	(struct hci_conn *conn, __u8 status);
567 	void (*role_switch_cfm)	(struct hci_conn *conn, __u8 status, __u8 role);
568 };
569 
570 static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
571 {
572 	struct list_head *p;
573 
574 	hci_proto_auth_cfm(conn, status);
575 
576 	read_lock_bh(&hci_cb_list_lock);
577 	list_for_each(p, &hci_cb_list) {
578 		struct hci_cb *cb = list_entry(p, struct hci_cb, list);
579 		if (cb->auth_cfm)
580 			cb->auth_cfm(conn, status);
581 	}
582 	read_unlock_bh(&hci_cb_list_lock);
583 }
584 
585 static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt)
586 {
587 	struct list_head *p;
588 
589 	hci_proto_encrypt_cfm(conn, status, encrypt);
590 
591 	read_lock_bh(&hci_cb_list_lock);
592 	list_for_each(p, &hci_cb_list) {
593 		struct hci_cb *cb = list_entry(p, struct hci_cb, list);
594 		if (cb->encrypt_cfm)
595 			cb->encrypt_cfm(conn, status, encrypt);
596 	}
597 	read_unlock_bh(&hci_cb_list_lock);
598 }
599 
600 static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status)
601 {
602 	struct list_head *p;
603 
604 	read_lock_bh(&hci_cb_list_lock);
605 	list_for_each(p, &hci_cb_list) {
606 		struct hci_cb *cb = list_entry(p, struct hci_cb, list);
607 		if (cb->key_change_cfm)
608 			cb->key_change_cfm(conn, status);
609 	}
610 	read_unlock_bh(&hci_cb_list_lock);
611 }
612 
613 static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status, __u8 role)
614 {
615 	struct list_head *p;
616 
617 	read_lock_bh(&hci_cb_list_lock);
618 	list_for_each(p, &hci_cb_list) {
619 		struct hci_cb *cb = list_entry(p, struct hci_cb, list);
620 		if (cb->role_switch_cfm)
621 			cb->role_switch_cfm(conn, status, role);
622 	}
623 	read_unlock_bh(&hci_cb_list_lock);
624 }
625 
626 int hci_register_cb(struct hci_cb *hcb);
627 int hci_unregister_cb(struct hci_cb *hcb);
628 
629 int hci_register_notifier(struct notifier_block *nb);
630 int hci_unregister_notifier(struct notifier_block *nb);
631 
632 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param);
633 int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
634 int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb);
635 
636 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode);
637 
638 void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data);
639 
640 /* ----- HCI Sockets ----- */
641 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
642 
643 /* HCI info for socket */
644 #define hci_pi(sk) ((struct hci_pinfo *) sk)
645 
646 struct hci_pinfo {
647 	struct bt_sock    bt;
648 	struct hci_dev    *hdev;
649 	struct hci_filter filter;
650 	__u32             cmsg_mask;
651 };
652 
653 /* HCI security filter */
654 #define HCI_SFLT_MAX_OGF  5
655 
656 struct hci_sec_filter {
657 	__u32 type_mask;
658 	__u32 event_mask[2];
659 	__u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
660 };
661 
662 /* ----- HCI requests ----- */
663 #define HCI_REQ_DONE	  0
664 #define HCI_REQ_PEND	  1
665 #define HCI_REQ_CANCELED  2
666 
667 #define hci_req_lock(d)		down(&d->req_lock)
668 #define hci_req_unlock(d)	up(&d->req_lock)
669 
670 void hci_req_complete(struct hci_dev *hdev, int result);
671 
672 #endif /* __HCI_CORE_H */
673