xref: /openbmc/linux/include/net/bluetooth/hci_core.h (revision 87c2ce3b)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4 
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 #ifndef __HCI_CORE_H
26 #define __HCI_CORE_H
27 
28 #include <net/bluetooth/hci.h>
29 
30 /* HCI upper protocols */
31 #define HCI_PROTO_L2CAP	0
32 #define HCI_PROTO_SCO	1
33 
34 #define HCI_INIT_TIMEOUT (HZ * 10)
35 
36 /* HCI Core structures */
37 
38 struct inquiry_data {
39 	bdaddr_t	bdaddr;
40 	__u8		pscan_rep_mode;
41 	__u8		pscan_period_mode;
42 	__u8		pscan_mode;
43 	__u8		dev_class[3];
44 	__le16		clock_offset;
45 	__s8		rssi;
46 };
47 
48 struct inquiry_entry {
49 	struct inquiry_entry 	*next;
50 	__u32			timestamp;
51 	struct inquiry_data	data;
52 };
53 
54 struct inquiry_cache {
55 	spinlock_t 		lock;
56 	__u32			timestamp;
57 	struct inquiry_entry 	*list;
58 };
59 
60 struct hci_conn_hash {
61 	struct list_head list;
62 	spinlock_t       lock;
63 	unsigned int     acl_num;
64 	unsigned int     sco_num;
65 };
66 
67 struct hci_dev {
68 	struct list_head list;
69 	spinlock_t	lock;
70 	atomic_t	refcnt;
71 
72 	char		name[8];
73 	unsigned long	flags;
74 	__u16		id;
75 	__u8		type;
76 	bdaddr_t	bdaddr;
77 	__u8		features[8];
78 	__u16		voice_setting;
79 
80 	__u16		pkt_type;
81 	__u16		link_policy;
82 	__u16		link_mode;
83 
84 	unsigned long	quirks;
85 
86 	atomic_t	cmd_cnt;
87 	unsigned int	acl_cnt;
88 	unsigned int	sco_cnt;
89 
90 	unsigned int	acl_mtu;
91 	unsigned int	sco_mtu;
92 	unsigned int	acl_pkts;
93 	unsigned int	sco_pkts;
94 
95 	unsigned long	cmd_last_tx;
96 	unsigned long	acl_last_tx;
97 	unsigned long	sco_last_tx;
98 
99 	struct tasklet_struct	cmd_task;
100 	struct tasklet_struct	rx_task;
101 	struct tasklet_struct	tx_task;
102 
103 	struct sk_buff_head	rx_q;
104 	struct sk_buff_head	raw_q;
105 	struct sk_buff_head	cmd_q;
106 
107 	struct sk_buff		*sent_cmd;
108 
109 	struct semaphore	req_lock;
110 	wait_queue_head_t	req_wait_q;
111 	__u32			req_status;
112 	__u32			req_result;
113 
114 	struct inquiry_cache	inq_cache;
115 	struct hci_conn_hash	conn_hash;
116 
117 	struct hci_dev_stats	stat;
118 
119 	struct sk_buff_head	driver_init;
120 
121 	void			*driver_data;
122 	void			*core_data;
123 
124 	atomic_t 		promisc;
125 
126 	struct class_device	class_dev;
127 
128 	struct module 		*owner;
129 
130 	int (*open)(struct hci_dev *hdev);
131 	int (*close)(struct hci_dev *hdev);
132 	int (*flush)(struct hci_dev *hdev);
133 	int (*send)(struct sk_buff *skb);
134 	void (*destruct)(struct hci_dev *hdev);
135 	void (*notify)(struct hci_dev *hdev, unsigned int evt);
136 	int (*ioctl)(struct hci_dev *hdev, unsigned int cmd, unsigned long arg);
137 };
138 
139 struct hci_conn {
140 	struct list_head list;
141 
142 	atomic_t	 refcnt;
143 	spinlock_t	 lock;
144 
145 	bdaddr_t	 dst;
146 	__u16		 handle;
147 	__u16		 state;
148 	__u8		 type;
149 	__u8		 out;
150 	__u8		 dev_class[3];
151 	__u32		 link_mode;
152 	unsigned long	 pend;
153 
154 	unsigned int	 sent;
155 
156 	struct sk_buff_head data_q;
157 
158 	struct timer_list timer;
159 
160 	struct hci_dev	*hdev;
161 	void		*l2cap_data;
162 	void		*sco_data;
163 	void		*priv;
164 
165 	struct hci_conn	*link;
166 };
167 
168 extern struct hci_proto *hci_proto[];
169 extern struct list_head hci_dev_list;
170 extern struct list_head hci_cb_list;
171 extern rwlock_t hci_dev_list_lock;
172 extern rwlock_t hci_cb_list_lock;
173 
174 /* ----- Inquiry cache ----- */
175 #define INQUIRY_CACHE_AGE_MAX   (HZ*30)   // 30 seconds
176 #define INQUIRY_ENTRY_AGE_MAX   (HZ*60)   // 60 seconds
177 
178 #define inquiry_cache_lock(c)		spin_lock(&c->lock)
179 #define inquiry_cache_unlock(c)		spin_unlock(&c->lock)
180 #define inquiry_cache_lock_bh(c)	spin_lock_bh(&c->lock)
181 #define inquiry_cache_unlock_bh(c)	spin_unlock_bh(&c->lock)
182 
183 static inline void inquiry_cache_init(struct hci_dev *hdev)
184 {
185 	struct inquiry_cache *c = &hdev->inq_cache;
186 	spin_lock_init(&c->lock);
187 	c->list = NULL;
188 }
189 
190 static inline int inquiry_cache_empty(struct hci_dev *hdev)
191 {
192 	struct inquiry_cache *c = &hdev->inq_cache;
193 	return (c->list == NULL);
194 }
195 
196 static inline long inquiry_cache_age(struct hci_dev *hdev)
197 {
198 	struct inquiry_cache *c = &hdev->inq_cache;
199 	return jiffies - c->timestamp;
200 }
201 
202 static inline long inquiry_entry_age(struct inquiry_entry *e)
203 {
204 	return jiffies - e->timestamp;
205 }
206 
207 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr);
208 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data);
209 
210 /* ----- HCI Connections ----- */
211 enum {
212 	HCI_CONN_AUTH_PEND,
213 	HCI_CONN_ENCRYPT_PEND,
214 	HCI_CONN_RSWITCH_PEND
215 };
216 
217 static inline void hci_conn_hash_init(struct hci_dev *hdev)
218 {
219 	struct hci_conn_hash *h = &hdev->conn_hash;
220 	INIT_LIST_HEAD(&h->list);
221 	spin_lock_init(&h->lock);
222 	h->acl_num = 0;
223 	h->sco_num = 0;
224 }
225 
226 static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
227 {
228 	struct hci_conn_hash *h = &hdev->conn_hash;
229 	list_add(&c->list, &h->list);
230 	if (c->type == ACL_LINK)
231 		h->acl_num++;
232 	else
233 		h->sco_num++;
234 }
235 
236 static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c)
237 {
238 	struct hci_conn_hash *h = &hdev->conn_hash;
239 	list_del(&c->list);
240 	if (c->type == ACL_LINK)
241 		h->acl_num--;
242 	else
243 		h->sco_num--;
244 }
245 
246 static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev,
247 					__u16 handle)
248 {
249 	struct hci_conn_hash *h = &hdev->conn_hash;
250 	struct list_head *p;
251 	struct hci_conn  *c;
252 
253 	list_for_each(p, &h->list) {
254 		c = list_entry(p, struct hci_conn, list);
255 		if (c->handle == handle)
256 			return c;
257 	}
258 	return NULL;
259 }
260 
261 static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev,
262 					__u8 type, bdaddr_t *ba)
263 {
264 	struct hci_conn_hash *h = &hdev->conn_hash;
265 	struct list_head *p;
266 	struct hci_conn  *c;
267 
268 	list_for_each(p, &h->list) {
269 		c = list_entry(p, struct hci_conn, list);
270 		if (c->type == type && !bacmp(&c->dst, ba))
271 			return c;
272 	}
273 	return NULL;
274 }
275 
276 void hci_acl_disconn(struct hci_conn *conn, __u8 reason);
277 void hci_add_sco(struct hci_conn *conn, __u16 handle);
278 
279 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst);
280 int    hci_conn_del(struct hci_conn *conn);
281 void   hci_conn_hash_flush(struct hci_dev *hdev);
282 
283 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *src);
284 int hci_conn_auth(struct hci_conn *conn);
285 int hci_conn_encrypt(struct hci_conn *conn);
286 int hci_conn_change_link_key(struct hci_conn *conn);
287 int hci_conn_switch_role(struct hci_conn *conn, uint8_t role);
288 
289 static inline void hci_conn_set_timer(struct hci_conn *conn, unsigned long timeout)
290 {
291 	mod_timer(&conn->timer, jiffies + timeout);
292 }
293 
294 static inline void hci_conn_del_timer(struct hci_conn *conn)
295 {
296 	del_timer(&conn->timer);
297 }
298 
299 static inline void hci_conn_hold(struct hci_conn *conn)
300 {
301 	atomic_inc(&conn->refcnt);
302 	hci_conn_del_timer(conn);
303 }
304 
305 static inline void hci_conn_put(struct hci_conn *conn)
306 {
307 	if (atomic_dec_and_test(&conn->refcnt)) {
308 		if (conn->type == ACL_LINK) {
309 			unsigned long timeo = (conn->out) ?
310 				HCI_DISCONN_TIMEOUT : HCI_DISCONN_TIMEOUT * 2;
311 			hci_conn_set_timer(conn, timeo);
312 		} else
313 			hci_conn_set_timer(conn, HZ / 100);
314 	}
315 }
316 
317 /* ----- HCI tasks ----- */
318 static inline void hci_sched_cmd(struct hci_dev *hdev)
319 {
320 	tasklet_schedule(&hdev->cmd_task);
321 }
322 
323 static inline void hci_sched_rx(struct hci_dev *hdev)
324 {
325 	tasklet_schedule(&hdev->rx_task);
326 }
327 
328 static inline void hci_sched_tx(struct hci_dev *hdev)
329 {
330 	tasklet_schedule(&hdev->tx_task);
331 }
332 
333 /* ----- HCI Devices ----- */
334 static inline void __hci_dev_put(struct hci_dev *d)
335 {
336 	if (atomic_dec_and_test(&d->refcnt))
337 		d->destruct(d);
338 }
339 
340 static inline void hci_dev_put(struct hci_dev *d)
341 {
342 	__hci_dev_put(d);
343 	module_put(d->owner);
344 }
345 
346 static inline struct hci_dev *__hci_dev_hold(struct hci_dev *d)
347 {
348 	atomic_inc(&d->refcnt);
349 	return d;
350 }
351 
352 static inline struct hci_dev *hci_dev_hold(struct hci_dev *d)
353 {
354 	if (try_module_get(d->owner))
355 		return __hci_dev_hold(d);
356 	return NULL;
357 }
358 
359 #define hci_dev_lock(d)		spin_lock(&d->lock)
360 #define hci_dev_unlock(d)	spin_unlock(&d->lock)
361 #define hci_dev_lock_bh(d)	spin_lock_bh(&d->lock)
362 #define hci_dev_unlock_bh(d)	spin_unlock_bh(&d->lock)
363 
364 struct hci_dev *hci_dev_get(int index);
365 struct hci_dev *hci_get_route(bdaddr_t *src, bdaddr_t *dst);
366 
367 struct hci_dev *hci_alloc_dev(void);
368 void hci_free_dev(struct hci_dev *hdev);
369 int hci_register_dev(struct hci_dev *hdev);
370 int hci_unregister_dev(struct hci_dev *hdev);
371 int hci_suspend_dev(struct hci_dev *hdev);
372 int hci_resume_dev(struct hci_dev *hdev);
373 int hci_dev_open(__u16 dev);
374 int hci_dev_close(__u16 dev);
375 int hci_dev_reset(__u16 dev);
376 int hci_dev_reset_stat(__u16 dev);
377 int hci_dev_cmd(unsigned int cmd, void __user *arg);
378 int hci_get_dev_list(void __user *arg);
379 int hci_get_dev_info(void __user *arg);
380 int hci_get_conn_list(void __user *arg);
381 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg);
382 int hci_inquiry(void __user *arg);
383 
384 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
385 
386 /* Receive frame from HCI drivers */
387 static inline int hci_recv_frame(struct sk_buff *skb)
388 {
389 	struct hci_dev *hdev = (struct hci_dev *) skb->dev;
390 	if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
391 			&& !test_bit(HCI_INIT, &hdev->flags))) {
392 		kfree_skb(skb);
393 		return -ENXIO;
394 	}
395 
396 	/* Incomming skb */
397 	bt_cb(skb)->incoming = 1;
398 
399 	/* Time stamp */
400 	__net_timestamp(skb);
401 
402 	/* Queue frame for rx task */
403 	skb_queue_tail(&hdev->rx_q, skb);
404 	hci_sched_rx(hdev);
405 	return 0;
406 }
407 
408 int hci_register_sysfs(struct hci_dev *hdev);
409 void hci_unregister_sysfs(struct hci_dev *hdev);
410 
411 #define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->class_dev.dev = (pdev))
412 
413 /* ----- LMP capabilities ----- */
414 #define lmp_rswitch_capable(dev) (dev->features[0] & LMP_RSWITCH)
415 #define lmp_encrypt_capable(dev) (dev->features[0] & LMP_ENCRYPT)
416 
417 /* ----- HCI protocols ----- */
418 struct hci_proto {
419 	char 		*name;
420 	unsigned int	id;
421 	unsigned long	flags;
422 
423 	void		*priv;
424 
425 	int (*connect_ind) 	(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type);
426 	int (*connect_cfm)	(struct hci_conn *conn, __u8 status);
427 	int (*disconn_ind)	(struct hci_conn *conn, __u8 reason);
428 	int (*recv_acldata)	(struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
429 	int (*recv_scodata)	(struct hci_conn *conn, struct sk_buff *skb);
430 	int (*auth_cfm)		(struct hci_conn *conn, __u8 status);
431 	int (*encrypt_cfm)	(struct hci_conn *conn, __u8 status);
432 };
433 
434 static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type)
435 {
436 	register struct hci_proto *hp;
437 	int mask = 0;
438 
439 	hp = hci_proto[HCI_PROTO_L2CAP];
440 	if (hp && hp->connect_ind)
441 		mask |= hp->connect_ind(hdev, bdaddr, type);
442 
443 	hp = hci_proto[HCI_PROTO_SCO];
444 	if (hp && hp->connect_ind)
445 		mask |= hp->connect_ind(hdev, bdaddr, type);
446 
447 	return mask;
448 }
449 
450 static inline void hci_proto_connect_cfm(struct hci_conn *conn, __u8 status)
451 {
452 	register struct hci_proto *hp;
453 
454 	hp = hci_proto[HCI_PROTO_L2CAP];
455 	if (hp && hp->connect_cfm)
456 		hp->connect_cfm(conn, status);
457 
458 	hp = hci_proto[HCI_PROTO_SCO];
459 	if (hp && hp->connect_cfm)
460 		hp->connect_cfm(conn, status);
461 }
462 
463 static inline void hci_proto_disconn_ind(struct hci_conn *conn, __u8 reason)
464 {
465 	register struct hci_proto *hp;
466 
467 	hp = hci_proto[HCI_PROTO_L2CAP];
468 	if (hp && hp->disconn_ind)
469 		hp->disconn_ind(conn, reason);
470 
471 	hp = hci_proto[HCI_PROTO_SCO];
472 	if (hp && hp->disconn_ind)
473 		hp->disconn_ind(conn, reason);
474 }
475 
476 static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status)
477 {
478 	register struct hci_proto *hp;
479 
480 	hp = hci_proto[HCI_PROTO_L2CAP];
481 	if (hp && hp->auth_cfm)
482 		hp->auth_cfm(conn, status);
483 
484 	hp = hci_proto[HCI_PROTO_SCO];
485 	if (hp && hp->auth_cfm)
486 		hp->auth_cfm(conn, status);
487 }
488 
489 static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status)
490 {
491 	register struct hci_proto *hp;
492 
493 	hp = hci_proto[HCI_PROTO_L2CAP];
494 	if (hp && hp->encrypt_cfm)
495 		hp->encrypt_cfm(conn, status);
496 
497 	hp = hci_proto[HCI_PROTO_SCO];
498 	if (hp && hp->encrypt_cfm)
499 		hp->encrypt_cfm(conn, status);
500 }
501 
502 int hci_register_proto(struct hci_proto *hproto);
503 int hci_unregister_proto(struct hci_proto *hproto);
504 
505 /* ----- HCI callbacks ----- */
506 struct hci_cb {
507 	struct list_head list;
508 
509 	char *name;
510 
511 	void (*auth_cfm)	(struct hci_conn *conn, __u8 status);
512 	void (*encrypt_cfm)	(struct hci_conn *conn, __u8 status, __u8 encrypt);
513 	void (*key_change_cfm)	(struct hci_conn *conn, __u8 status);
514 	void (*role_switch_cfm)	(struct hci_conn *conn, __u8 status, __u8 role);
515 };
516 
517 static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
518 {
519 	struct list_head *p;
520 
521 	hci_proto_auth_cfm(conn, status);
522 
523 	read_lock_bh(&hci_cb_list_lock);
524 	list_for_each(p, &hci_cb_list) {
525 		struct hci_cb *cb = list_entry(p, struct hci_cb, list);
526 		if (cb->auth_cfm)
527 			cb->auth_cfm(conn, status);
528 	}
529 	read_unlock_bh(&hci_cb_list_lock);
530 }
531 
532 static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt)
533 {
534 	struct list_head *p;
535 
536 	hci_proto_encrypt_cfm(conn, status);
537 
538 	read_lock_bh(&hci_cb_list_lock);
539 	list_for_each(p, &hci_cb_list) {
540 		struct hci_cb *cb = list_entry(p, struct hci_cb, list);
541 		if (cb->encrypt_cfm)
542 			cb->encrypt_cfm(conn, status, encrypt);
543 	}
544 	read_unlock_bh(&hci_cb_list_lock);
545 }
546 
547 static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status)
548 {
549 	struct list_head *p;
550 
551 	read_lock_bh(&hci_cb_list_lock);
552 	list_for_each(p, &hci_cb_list) {
553 		struct hci_cb *cb = list_entry(p, struct hci_cb, list);
554 		if (cb->key_change_cfm)
555 			cb->key_change_cfm(conn, status);
556 	}
557 	read_unlock_bh(&hci_cb_list_lock);
558 }
559 
560 static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status, __u8 role)
561 {
562 	struct list_head *p;
563 
564 	read_lock_bh(&hci_cb_list_lock);
565 	list_for_each(p, &hci_cb_list) {
566 		struct hci_cb *cb = list_entry(p, struct hci_cb, list);
567 		if (cb->role_switch_cfm)
568 			cb->role_switch_cfm(conn, status, role);
569 	}
570 	read_unlock_bh(&hci_cb_list_lock);
571 }
572 
573 int hci_register_cb(struct hci_cb *hcb);
574 int hci_unregister_cb(struct hci_cb *hcb);
575 
576 int hci_register_notifier(struct notifier_block *nb);
577 int hci_unregister_notifier(struct notifier_block *nb);
578 
579 int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *param);
580 int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
581 int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb);
582 
583 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 ogf, __u16 ocf);
584 
585 void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data);
586 
587 /* ----- HCI Sockets ----- */
588 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
589 
590 /* HCI info for socket */
591 #define hci_pi(sk) ((struct hci_pinfo *) sk)
592 
593 struct hci_pinfo {
594 	struct bt_sock    bt;
595 	struct hci_dev    *hdev;
596 	struct hci_filter filter;
597 	__u32             cmsg_mask;
598 };
599 
600 /* HCI security filter */
601 #define HCI_SFLT_MAX_OGF  5
602 
603 struct hci_sec_filter {
604 	__u32 type_mask;
605 	__u32 event_mask[2];
606 	__u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
607 };
608 
609 /* ----- HCI requests ----- */
610 #define HCI_REQ_DONE	  0
611 #define HCI_REQ_PEND	  1
612 #define HCI_REQ_CANCELED  2
613 
614 #define hci_req_lock(d)		down(&d->req_lock)
615 #define hci_req_unlock(d)	up(&d->req_lock)
616 
617 void hci_req_complete(struct hci_dev *hdev, int result);
618 
619 #endif /* __HCI_CORE_H */
620