xref: /openbmc/linux/include/net/bluetooth/hci_core.h (revision b3fd7368f8f60bc9a7ffc2a5311db5f4dbd42180)
1  /*
2     BlueZ - Bluetooth protocol stack for Linux
3     Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4  
5     Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6  
7     This program is free software; you can redistribute it and/or modify
8     it under the terms of the GNU General Public License version 2 as
9     published by the Free Software Foundation;
10  
11     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12     OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13     FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14     IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15     CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16     WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17     ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18     OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  
20     ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21     COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22     SOFTWARE IS DISCLAIMED.
23  */
24  
25  #ifndef __HCI_CORE_H
26  #define __HCI_CORE_H
27  
28  #include <net/bluetooth/hci.h>
29  #include <net/bluetooth/hci_sock.h>
30  
31  /* HCI priority */
32  #define HCI_PRIO_MAX	7
33  
34  /* HCI Core structures */
35  struct inquiry_data {
36  	bdaddr_t	bdaddr;
37  	__u8		pscan_rep_mode;
38  	__u8		pscan_period_mode;
39  	__u8		pscan_mode;
40  	__u8		dev_class[3];
41  	__le16		clock_offset;
42  	__s8		rssi;
43  	__u8		ssp_mode;
44  };
45  
46  struct inquiry_entry {
47  	struct list_head	all;		/* inq_cache.all */
48  	struct list_head	list;		/* unknown or resolve */
49  	enum {
50  		NAME_NOT_KNOWN,
51  		NAME_NEEDED,
52  		NAME_PENDING,
53  		NAME_KNOWN,
54  	} name_state;
55  	__u32			timestamp;
56  	struct inquiry_data	data;
57  };
58  
59  struct discovery_state {
60  	int			type;
61  	enum {
62  		DISCOVERY_STOPPED,
63  		DISCOVERY_STARTING,
64  		DISCOVERY_FINDING,
65  		DISCOVERY_RESOLVING,
66  		DISCOVERY_STOPPING,
67  	} state;
68  	struct list_head	all;	/* All devices found during inquiry */
69  	struct list_head	unknown;	/* Name state not known */
70  	struct list_head	resolve;	/* Name needs to be resolved */
71  	__u32			timestamp;
72  	bdaddr_t		last_adv_addr;
73  	u8			last_adv_addr_type;
74  	s8			last_adv_rssi;
75  	u32			last_adv_flags;
76  	u8			last_adv_data[HCI_MAX_AD_LENGTH];
77  	u8			last_adv_data_len;
78  	bool			report_invalid_rssi;
79  	bool			result_filtering;
80  	s8			rssi;
81  	u16			uuid_count;
82  	u8			(*uuids)[16];
83  	unsigned long		scan_start;
84  	unsigned long		scan_duration;
85  };
86  
87  struct hci_conn_hash {
88  	struct list_head list;
89  	unsigned int     acl_num;
90  	unsigned int     amp_num;
91  	unsigned int     sco_num;
92  	unsigned int     le_num;
93  	unsigned int     le_num_slave;
94  };
95  
96  struct bdaddr_list {
97  	struct list_head list;
98  	bdaddr_t bdaddr;
99  	u8 bdaddr_type;
100  };
101  
102  struct bt_uuid {
103  	struct list_head list;
104  	u8 uuid[16];
105  	u8 size;
106  	u8 svc_hint;
107  };
108  
109  struct smp_csrk {
110  	bdaddr_t bdaddr;
111  	u8 bdaddr_type;
112  	u8 type;
113  	u8 val[16];
114  };
115  
116  struct smp_ltk {
117  	struct list_head list;
118  	struct rcu_head rcu;
119  	bdaddr_t bdaddr;
120  	u8 bdaddr_type;
121  	u8 authenticated;
122  	u8 type;
123  	u8 enc_size;
124  	__le16 ediv;
125  	__le64 rand;
126  	u8 val[16];
127  };
128  
129  struct smp_irk {
130  	struct list_head list;
131  	struct rcu_head rcu;
132  	bdaddr_t rpa;
133  	bdaddr_t bdaddr;
134  	u8 addr_type;
135  	u8 val[16];
136  };
137  
138  struct link_key {
139  	struct list_head list;
140  	struct rcu_head rcu;
141  	bdaddr_t bdaddr;
142  	u8 type;
143  	u8 val[HCI_LINK_KEY_SIZE];
144  	u8 pin_len;
145  };
146  
147  struct oob_data {
148  	struct list_head list;
149  	bdaddr_t bdaddr;
150  	u8 bdaddr_type;
151  	u8 present;
152  	u8 hash192[16];
153  	u8 rand192[16];
154  	u8 hash256[16];
155  	u8 rand256[16];
156  };
157  
158  struct adv_info {
159  	struct delayed_work timeout_exp;
160  	__u8	instance;
161  	__u32	flags;
162  	__u16	timeout;
163  	__u16	adv_data_len;
164  	__u8	adv_data[HCI_MAX_AD_LENGTH];
165  	__u16	scan_rsp_len;
166  	__u8	scan_rsp_data[HCI_MAX_AD_LENGTH];
167  };
168  
169  #define HCI_MAX_SHORT_NAME_LENGTH	10
170  
171  /* Default LE RPA expiry time, 15 minutes */
172  #define HCI_DEFAULT_RPA_TIMEOUT		(15 * 60)
173  
174  /* Default min/max age of connection information (1s/3s) */
175  #define DEFAULT_CONN_INFO_MIN_AGE	1000
176  #define DEFAULT_CONN_INFO_MAX_AGE	3000
177  
178  struct amp_assoc {
179  	__u16	len;
180  	__u16	offset;
181  	__u16	rem_len;
182  	__u16	len_so_far;
183  	__u8	data[HCI_MAX_AMP_ASSOC_SIZE];
184  };
185  
186  #define HCI_MAX_PAGES	3
187  
188  struct hci_dev {
189  	struct list_head list;
190  	struct mutex	lock;
191  
192  	char		name[8];
193  	unsigned long	flags;
194  	__u16		id;
195  	__u8		bus;
196  	__u8		dev_type;
197  	bdaddr_t	bdaddr;
198  	bdaddr_t	setup_addr;
199  	bdaddr_t	public_addr;
200  	bdaddr_t	random_addr;
201  	bdaddr_t	static_addr;
202  	__u8		adv_addr_type;
203  	__u8		dev_name[HCI_MAX_NAME_LENGTH];
204  	__u8		short_name[HCI_MAX_SHORT_NAME_LENGTH];
205  	__u8		eir[HCI_MAX_EIR_LENGTH];
206  	__u8		dev_class[3];
207  	__u8		major_class;
208  	__u8		minor_class;
209  	__u8		max_page;
210  	__u8		features[HCI_MAX_PAGES][8];
211  	__u8		le_features[8];
212  	__u8		le_white_list_size;
213  	__u8		le_states[8];
214  	__u8		commands[64];
215  	__u8		hci_ver;
216  	__u16		hci_rev;
217  	__u8		lmp_ver;
218  	__u16		manufacturer;
219  	__u16		lmp_subver;
220  	__u16		voice_setting;
221  	__u8		num_iac;
222  	__u8		stored_max_keys;
223  	__u8		stored_num_keys;
224  	__u8		io_capability;
225  	__s8		inq_tx_power;
226  	__u16		page_scan_interval;
227  	__u16		page_scan_window;
228  	__u8		page_scan_type;
229  	__u8		le_adv_channel_map;
230  	__u16		le_adv_min_interval;
231  	__u16		le_adv_max_interval;
232  	__u8		le_scan_type;
233  	__u16		le_scan_interval;
234  	__u16		le_scan_window;
235  	__u16		le_conn_min_interval;
236  	__u16		le_conn_max_interval;
237  	__u16		le_conn_latency;
238  	__u16		le_supv_timeout;
239  	__u16		le_def_tx_len;
240  	__u16		le_def_tx_time;
241  	__u16		le_max_tx_len;
242  	__u16		le_max_tx_time;
243  	__u16		le_max_rx_len;
244  	__u16		le_max_rx_time;
245  	__u16		discov_interleaved_timeout;
246  	__u16		conn_info_min_age;
247  	__u16		conn_info_max_age;
248  	__u8		ssp_debug_mode;
249  	__u8		hw_error_code;
250  	__u32		clock;
251  
252  	__u16		devid_source;
253  	__u16		devid_vendor;
254  	__u16		devid_product;
255  	__u16		devid_version;
256  
257  	__u16		pkt_type;
258  	__u16		esco_type;
259  	__u16		link_policy;
260  	__u16		link_mode;
261  
262  	__u32		idle_timeout;
263  	__u16		sniff_min_interval;
264  	__u16		sniff_max_interval;
265  
266  	__u8		amp_status;
267  	__u32		amp_total_bw;
268  	__u32		amp_max_bw;
269  	__u32		amp_min_latency;
270  	__u32		amp_max_pdu;
271  	__u8		amp_type;
272  	__u16		amp_pal_cap;
273  	__u16		amp_assoc_size;
274  	__u32		amp_max_flush_to;
275  	__u32		amp_be_flush_to;
276  
277  	struct amp_assoc	loc_assoc;
278  
279  	__u8		flow_ctl_mode;
280  
281  	unsigned int	auto_accept_delay;
282  
283  	unsigned long	quirks;
284  
285  	atomic_t	cmd_cnt;
286  	unsigned int	acl_cnt;
287  	unsigned int	sco_cnt;
288  	unsigned int	le_cnt;
289  
290  	unsigned int	acl_mtu;
291  	unsigned int	sco_mtu;
292  	unsigned int	le_mtu;
293  	unsigned int	acl_pkts;
294  	unsigned int	sco_pkts;
295  	unsigned int	le_pkts;
296  
297  	__u16		block_len;
298  	__u16		block_mtu;
299  	__u16		num_blocks;
300  	__u16		block_cnt;
301  
302  	unsigned long	acl_last_tx;
303  	unsigned long	sco_last_tx;
304  	unsigned long	le_last_tx;
305  
306  	struct workqueue_struct	*workqueue;
307  	struct workqueue_struct	*req_workqueue;
308  
309  	struct work_struct	power_on;
310  	struct delayed_work	power_off;
311  	struct work_struct	error_reset;
312  
313  	__u16			discov_timeout;
314  	struct delayed_work	discov_off;
315  
316  	struct delayed_work	service_cache;
317  
318  	struct delayed_work	cmd_timer;
319  
320  	struct work_struct	rx_work;
321  	struct work_struct	cmd_work;
322  	struct work_struct	tx_work;
323  
324  	struct sk_buff_head	rx_q;
325  	struct sk_buff_head	raw_q;
326  	struct sk_buff_head	cmd_q;
327  
328  	struct sk_buff		*sent_cmd;
329  
330  	struct mutex		req_lock;
331  	wait_queue_head_t	req_wait_q;
332  	__u32			req_status;
333  	__u32			req_result;
334  	struct sk_buff		*req_skb;
335  
336  	void			*smp_data;
337  	void			*smp_bredr_data;
338  
339  	struct discovery_state	discovery;
340  	struct hci_conn_hash	conn_hash;
341  
342  	struct list_head	mgmt_pending;
343  	struct list_head	blacklist;
344  	struct list_head	whitelist;
345  	struct list_head	uuids;
346  	struct list_head	link_keys;
347  	struct list_head	long_term_keys;
348  	struct list_head	identity_resolving_keys;
349  	struct list_head	remote_oob_data;
350  	struct list_head	le_white_list;
351  	struct list_head	le_conn_params;
352  	struct list_head	pend_le_conns;
353  	struct list_head	pend_le_reports;
354  
355  	struct hci_dev_stats	stat;
356  
357  	atomic_t		promisc;
358  
359  	struct dentry		*debugfs;
360  
361  	struct device		dev;
362  
363  	struct rfkill		*rfkill;
364  
365  	DECLARE_BITMAP(dev_flags, __HCI_NUM_FLAGS);
366  
367  	struct delayed_work	le_scan_disable;
368  	struct delayed_work	le_scan_restart;
369  
370  	__s8			adv_tx_power;
371  	__u8			adv_data[HCI_MAX_AD_LENGTH];
372  	__u8			adv_data_len;
373  	__u8			scan_rsp_data[HCI_MAX_AD_LENGTH];
374  	__u8			scan_rsp_data_len;
375  
376  	struct adv_info		adv_instance;
377  
378  	__u8			irk[16];
379  	__u32			rpa_timeout;
380  	struct delayed_work	rpa_expired;
381  	bdaddr_t		rpa;
382  
383  	int (*open)(struct hci_dev *hdev);
384  	int (*close)(struct hci_dev *hdev);
385  	int (*flush)(struct hci_dev *hdev);
386  	int (*setup)(struct hci_dev *hdev);
387  	int (*shutdown)(struct hci_dev *hdev);
388  	int (*send)(struct hci_dev *hdev, struct sk_buff *skb);
389  	void (*notify)(struct hci_dev *hdev, unsigned int evt);
390  	void (*hw_error)(struct hci_dev *hdev, u8 code);
391  	int (*set_bdaddr)(struct hci_dev *hdev, const bdaddr_t *bdaddr);
392  };
393  
394  #define HCI_PHY_HANDLE(handle)	(handle & 0xff)
395  
396  struct hci_conn {
397  	struct list_head list;
398  
399  	atomic_t	refcnt;
400  
401  	bdaddr_t	dst;
402  	__u8		dst_type;
403  	bdaddr_t	src;
404  	__u8		src_type;
405  	bdaddr_t	init_addr;
406  	__u8		init_addr_type;
407  	bdaddr_t	resp_addr;
408  	__u8		resp_addr_type;
409  	__u16		handle;
410  	__u16		state;
411  	__u8		mode;
412  	__u8		type;
413  	__u8		role;
414  	bool		out;
415  	__u8		attempt;
416  	__u8		dev_class[3];
417  	__u8		features[HCI_MAX_PAGES][8];
418  	__u16		pkt_type;
419  	__u16		link_policy;
420  	__u8		key_type;
421  	__u8		auth_type;
422  	__u8		sec_level;
423  	__u8		pending_sec_level;
424  	__u8		pin_length;
425  	__u8		enc_key_size;
426  	__u8		io_capability;
427  	__u32		passkey_notify;
428  	__u8		passkey_entered;
429  	__u16		disc_timeout;
430  	__u16		conn_timeout;
431  	__u16		setting;
432  	__u16		le_conn_min_interval;
433  	__u16		le_conn_max_interval;
434  	__u16		le_conn_interval;
435  	__u16		le_conn_latency;
436  	__u16		le_supv_timeout;
437  	__u8		le_adv_data[HCI_MAX_AD_LENGTH];
438  	__u8		le_adv_data_len;
439  	__s8		rssi;
440  	__s8		tx_power;
441  	__s8		max_tx_power;
442  	unsigned long	flags;
443  
444  	__u32		clock;
445  	__u16		clock_accuracy;
446  
447  	unsigned long	conn_info_timestamp;
448  
449  	__u8		remote_cap;
450  	__u8		remote_auth;
451  	__u8		remote_id;
452  
453  	unsigned int	sent;
454  
455  	struct sk_buff_head data_q;
456  	struct list_head chan_list;
457  
458  	struct delayed_work disc_work;
459  	struct delayed_work auto_accept_work;
460  	struct delayed_work idle_work;
461  	struct delayed_work le_conn_timeout;
462  
463  	struct device	dev;
464  	struct dentry	*debugfs;
465  
466  	struct hci_dev	*hdev;
467  	void		*l2cap_data;
468  	void		*sco_data;
469  	struct amp_mgr	*amp_mgr;
470  
471  	struct hci_conn	*link;
472  
473  	void (*connect_cfm_cb)	(struct hci_conn *conn, u8 status);
474  	void (*security_cfm_cb)	(struct hci_conn *conn, u8 status);
475  	void (*disconn_cfm_cb)	(struct hci_conn *conn, u8 reason);
476  };
477  
478  struct hci_chan {
479  	struct list_head list;
480  	__u16 handle;
481  	struct hci_conn *conn;
482  	struct sk_buff_head data_q;
483  	unsigned int	sent;
484  	__u8		state;
485  };
486  
487  struct hci_conn_params {
488  	struct list_head list;
489  	struct list_head action;
490  
491  	bdaddr_t addr;
492  	u8 addr_type;
493  
494  	u16 conn_min_interval;
495  	u16 conn_max_interval;
496  	u16 conn_latency;
497  	u16 supervision_timeout;
498  
499  	enum {
500  		HCI_AUTO_CONN_DISABLED,
501  		HCI_AUTO_CONN_REPORT,
502  		HCI_AUTO_CONN_DIRECT,
503  		HCI_AUTO_CONN_ALWAYS,
504  		HCI_AUTO_CONN_LINK_LOSS,
505  	} auto_connect;
506  
507  	struct hci_conn *conn;
508  };
509  
510  extern struct list_head hci_dev_list;
511  extern struct list_head hci_cb_list;
512  extern rwlock_t hci_dev_list_lock;
513  extern struct mutex hci_cb_list_lock;
514  
515  #define hci_dev_set_flag(hdev, nr)             set_bit((nr), (hdev)->dev_flags)
516  #define hci_dev_clear_flag(hdev, nr)           clear_bit((nr), (hdev)->dev_flags)
517  #define hci_dev_change_flag(hdev, nr)          change_bit((nr), (hdev)->dev_flags)
518  #define hci_dev_test_flag(hdev, nr)            test_bit((nr), (hdev)->dev_flags)
519  #define hci_dev_test_and_set_flag(hdev, nr)    test_and_set_bit((nr), (hdev)->dev_flags)
520  #define hci_dev_test_and_clear_flag(hdev, nr)  test_and_clear_bit((nr), (hdev)->dev_flags)
521  #define hci_dev_test_and_change_flag(hdev, nr) test_and_change_bit((nr), (hdev)->dev_flags)
522  
523  #define hci_dev_clear_volatile_flags(hdev)			\
524  	do {							\
525  		hci_dev_clear_flag(hdev, HCI_LE_SCAN);		\
526  		hci_dev_clear_flag(hdev, HCI_LE_ADV);		\
527  		hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);	\
528  	} while (0)
529  
530  /* ----- HCI interface to upper protocols ----- */
531  int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr);
532  int l2cap_disconn_ind(struct hci_conn *hcon);
533  int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags);
534  
535  int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags);
536  int sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb);
537  
538  /* ----- Inquiry cache ----- */
539  #define INQUIRY_CACHE_AGE_MAX   (HZ*30)   /* 30 seconds */
540  #define INQUIRY_ENTRY_AGE_MAX   (HZ*60)   /* 60 seconds */
541  
542  static inline void discovery_init(struct hci_dev *hdev)
543  {
544  	hdev->discovery.state = DISCOVERY_STOPPED;
545  	INIT_LIST_HEAD(&hdev->discovery.all);
546  	INIT_LIST_HEAD(&hdev->discovery.unknown);
547  	INIT_LIST_HEAD(&hdev->discovery.resolve);
548  	hdev->discovery.report_invalid_rssi = true;
549  	hdev->discovery.rssi = HCI_RSSI_INVALID;
550  }
551  
552  static inline void hci_discovery_filter_clear(struct hci_dev *hdev)
553  {
554  	hdev->discovery.result_filtering = false;
555  	hdev->discovery.report_invalid_rssi = true;
556  	hdev->discovery.rssi = HCI_RSSI_INVALID;
557  	hdev->discovery.uuid_count = 0;
558  	kfree(hdev->discovery.uuids);
559  	hdev->discovery.uuids = NULL;
560  	hdev->discovery.scan_start = 0;
561  	hdev->discovery.scan_duration = 0;
562  }
563  
564  static inline void adv_info_init(struct hci_dev *hdev)
565  {
566  	memset(&hdev->adv_instance, 0, sizeof(struct adv_info));
567  }
568  
569  bool hci_discovery_active(struct hci_dev *hdev);
570  
571  void hci_discovery_set_state(struct hci_dev *hdev, int state);
572  
573  static inline int inquiry_cache_empty(struct hci_dev *hdev)
574  {
575  	return list_empty(&hdev->discovery.all);
576  }
577  
578  static inline long inquiry_cache_age(struct hci_dev *hdev)
579  {
580  	struct discovery_state *c = &hdev->discovery;
581  	return jiffies - c->timestamp;
582  }
583  
584  static inline long inquiry_entry_age(struct inquiry_entry *e)
585  {
586  	return jiffies - e->timestamp;
587  }
588  
589  struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
590  					       bdaddr_t *bdaddr);
591  struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
592  						       bdaddr_t *bdaddr);
593  struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
594  						       bdaddr_t *bdaddr,
595  						       int state);
596  void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
597  				      struct inquiry_entry *ie);
598  u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
599  			     bool name_known);
600  void hci_inquiry_cache_flush(struct hci_dev *hdev);
601  
602  /* ----- HCI Connections ----- */
603  enum {
604  	HCI_CONN_AUTH_PEND,
605  	HCI_CONN_REAUTH_PEND,
606  	HCI_CONN_ENCRYPT_PEND,
607  	HCI_CONN_RSWITCH_PEND,
608  	HCI_CONN_MODE_CHANGE_PEND,
609  	HCI_CONN_SCO_SETUP_PEND,
610  	HCI_CONN_MGMT_CONNECTED,
611  	HCI_CONN_SSP_ENABLED,
612  	HCI_CONN_SC_ENABLED,
613  	HCI_CONN_AES_CCM,
614  	HCI_CONN_POWER_SAVE,
615  	HCI_CONN_FLUSH_KEY,
616  	HCI_CONN_ENCRYPT,
617  	HCI_CONN_AUTH,
618  	HCI_CONN_SECURE,
619  	HCI_CONN_FIPS,
620  	HCI_CONN_STK_ENCRYPT,
621  	HCI_CONN_AUTH_INITIATOR,
622  	HCI_CONN_DROP,
623  	HCI_CONN_PARAM_REMOVAL_PEND,
624  	HCI_CONN_NEW_LINK_KEY,
625  };
626  
627  static inline bool hci_conn_ssp_enabled(struct hci_conn *conn)
628  {
629  	struct hci_dev *hdev = conn->hdev;
630  	return hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
631  	       test_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
632  }
633  
634  static inline bool hci_conn_sc_enabled(struct hci_conn *conn)
635  {
636  	struct hci_dev *hdev = conn->hdev;
637  	return hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
638  	       test_bit(HCI_CONN_SC_ENABLED, &conn->flags);
639  }
640  
641  static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
642  {
643  	struct hci_conn_hash *h = &hdev->conn_hash;
644  	list_add_rcu(&c->list, &h->list);
645  	switch (c->type) {
646  	case ACL_LINK:
647  		h->acl_num++;
648  		break;
649  	case AMP_LINK:
650  		h->amp_num++;
651  		break;
652  	case LE_LINK:
653  		h->le_num++;
654  		if (c->role == HCI_ROLE_SLAVE)
655  			h->le_num_slave++;
656  		break;
657  	case SCO_LINK:
658  	case ESCO_LINK:
659  		h->sco_num++;
660  		break;
661  	}
662  }
663  
664  static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c)
665  {
666  	struct hci_conn_hash *h = &hdev->conn_hash;
667  
668  	list_del_rcu(&c->list);
669  	synchronize_rcu();
670  
671  	switch (c->type) {
672  	case ACL_LINK:
673  		h->acl_num--;
674  		break;
675  	case AMP_LINK:
676  		h->amp_num--;
677  		break;
678  	case LE_LINK:
679  		h->le_num--;
680  		if (c->role == HCI_ROLE_SLAVE)
681  			h->le_num_slave--;
682  		break;
683  	case SCO_LINK:
684  	case ESCO_LINK:
685  		h->sco_num--;
686  		break;
687  	}
688  }
689  
690  static inline unsigned int hci_conn_num(struct hci_dev *hdev, __u8 type)
691  {
692  	struct hci_conn_hash *h = &hdev->conn_hash;
693  	switch (type) {
694  	case ACL_LINK:
695  		return h->acl_num;
696  	case AMP_LINK:
697  		return h->amp_num;
698  	case LE_LINK:
699  		return h->le_num;
700  	case SCO_LINK:
701  	case ESCO_LINK:
702  		return h->sco_num;
703  	default:
704  		return 0;
705  	}
706  }
707  
708  static inline unsigned int hci_conn_count(struct hci_dev *hdev)
709  {
710  	struct hci_conn_hash *c = &hdev->conn_hash;
711  
712  	return c->acl_num + c->amp_num + c->sco_num + c->le_num;
713  }
714  
715  static inline __u8 hci_conn_lookup_type(struct hci_dev *hdev, __u16 handle)
716  {
717  	struct hci_conn_hash *h = &hdev->conn_hash;
718  	struct hci_conn *c;
719  	__u8 type = INVALID_LINK;
720  
721  	rcu_read_lock();
722  
723  	list_for_each_entry_rcu(c, &h->list, list) {
724  		if (c->handle == handle) {
725  			type = c->type;
726  			break;
727  		}
728  	}
729  
730  	rcu_read_unlock();
731  
732  	return type;
733  }
734  
735  static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev,
736  								__u16 handle)
737  {
738  	struct hci_conn_hash *h = &hdev->conn_hash;
739  	struct hci_conn  *c;
740  
741  	rcu_read_lock();
742  
743  	list_for_each_entry_rcu(c, &h->list, list) {
744  		if (c->handle == handle) {
745  			rcu_read_unlock();
746  			return c;
747  		}
748  	}
749  	rcu_read_unlock();
750  
751  	return NULL;
752  }
753  
754  static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev,
755  							__u8 type, bdaddr_t *ba)
756  {
757  	struct hci_conn_hash *h = &hdev->conn_hash;
758  	struct hci_conn  *c;
759  
760  	rcu_read_lock();
761  
762  	list_for_each_entry_rcu(c, &h->list, list) {
763  		if (c->type == type && !bacmp(&c->dst, ba)) {
764  			rcu_read_unlock();
765  			return c;
766  		}
767  	}
768  
769  	rcu_read_unlock();
770  
771  	return NULL;
772  }
773  
774  static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
775  							__u8 type, __u16 state)
776  {
777  	struct hci_conn_hash *h = &hdev->conn_hash;
778  	struct hci_conn  *c;
779  
780  	rcu_read_lock();
781  
782  	list_for_each_entry_rcu(c, &h->list, list) {
783  		if (c->type == type && c->state == state) {
784  			rcu_read_unlock();
785  			return c;
786  		}
787  	}
788  
789  	rcu_read_unlock();
790  
791  	return NULL;
792  }
793  
794  int hci_disconnect(struct hci_conn *conn, __u8 reason);
795  bool hci_setup_sync(struct hci_conn *conn, __u16 handle);
796  void hci_sco_setup(struct hci_conn *conn, __u8 status);
797  
798  struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
799  			      u8 role);
800  int hci_conn_del(struct hci_conn *conn);
801  void hci_conn_hash_flush(struct hci_dev *hdev);
802  void hci_conn_check_pending(struct hci_dev *hdev);
803  
804  struct hci_chan *hci_chan_create(struct hci_conn *conn);
805  void hci_chan_del(struct hci_chan *chan);
806  void hci_chan_list_flush(struct hci_conn *conn);
807  struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle);
808  
809  struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
810  				u8 dst_type, u8 sec_level, u16 conn_timeout,
811  				u8 role);
812  struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
813  				 u8 sec_level, u8 auth_type);
814  struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
815  				 __u16 setting);
816  int hci_conn_check_link_mode(struct hci_conn *conn);
817  int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level);
818  int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
819  		      bool initiator);
820  int hci_conn_switch_role(struct hci_conn *conn, __u8 role);
821  
822  void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active);
823  
824  void hci_le_conn_failed(struct hci_conn *conn, u8 status);
825  
826  /*
827   * hci_conn_get() and hci_conn_put() are used to control the life-time of an
828   * "hci_conn" object. They do not guarantee that the hci_conn object is running,
829   * working or anything else. They just guarantee that the object is available
830   * and can be dereferenced. So you can use its locks, local variables and any
831   * other constant data.
832   * Before accessing runtime data, you _must_ lock the object and then check that
833   * it is still running. As soon as you release the locks, the connection might
834   * get dropped, though.
835   *
836   * On the other hand, hci_conn_hold() and hci_conn_drop() are used to control
837   * how long the underlying connection is held. So every channel that runs on the
838   * hci_conn object calls this to prevent the connection from disappearing. As
839   * long as you hold a device, you must also guarantee that you have a valid
840   * reference to the device via hci_conn_get() (or the initial reference from
841   * hci_conn_add()).
842   * The hold()/drop() ref-count is known to drop below 0 sometimes, which doesn't
843   * break because nobody cares for that. But this means, we cannot use
844   * _get()/_drop() in it, but require the caller to have a valid ref (FIXME).
845   */
846  
847  static inline struct hci_conn *hci_conn_get(struct hci_conn *conn)
848  {
849  	get_device(&conn->dev);
850  	return conn;
851  }
852  
853  static inline void hci_conn_put(struct hci_conn *conn)
854  {
855  	put_device(&conn->dev);
856  }
857  
858  static inline void hci_conn_hold(struct hci_conn *conn)
859  {
860  	BT_DBG("hcon %p orig refcnt %d", conn, atomic_read(&conn->refcnt));
861  
862  	atomic_inc(&conn->refcnt);
863  	cancel_delayed_work(&conn->disc_work);
864  }
865  
866  static inline void hci_conn_drop(struct hci_conn *conn)
867  {
868  	BT_DBG("hcon %p orig refcnt %d", conn, atomic_read(&conn->refcnt));
869  
870  	if (atomic_dec_and_test(&conn->refcnt)) {
871  		unsigned long timeo;
872  
873  		switch (conn->type) {
874  		case ACL_LINK:
875  		case LE_LINK:
876  			cancel_delayed_work(&conn->idle_work);
877  			if (conn->state == BT_CONNECTED) {
878  				timeo = conn->disc_timeout;
879  				if (!conn->out)
880  					timeo *= 2;
881  			} else {
882  				timeo = 0;
883  			}
884  			break;
885  
886  		case AMP_LINK:
887  			timeo = conn->disc_timeout;
888  			break;
889  
890  		default:
891  			timeo = 0;
892  			break;
893  		}
894  
895  		cancel_delayed_work(&conn->disc_work);
896  		queue_delayed_work(conn->hdev->workqueue,
897  				   &conn->disc_work, timeo);
898  	}
899  }
900  
901  /* ----- HCI Devices ----- */
902  static inline void hci_dev_put(struct hci_dev *d)
903  {
904  	BT_DBG("%s orig refcnt %d", d->name,
905  	       atomic_read(&d->dev.kobj.kref.refcount));
906  
907  	put_device(&d->dev);
908  }
909  
910  static inline struct hci_dev *hci_dev_hold(struct hci_dev *d)
911  {
912  	BT_DBG("%s orig refcnt %d", d->name,
913  	       atomic_read(&d->dev.kobj.kref.refcount));
914  
915  	get_device(&d->dev);
916  	return d;
917  }
918  
919  #define hci_dev_lock(d)		mutex_lock(&d->lock)
920  #define hci_dev_unlock(d)	mutex_unlock(&d->lock)
921  
922  #define to_hci_dev(d) container_of(d, struct hci_dev, dev)
923  #define to_hci_conn(c) container_of(c, struct hci_conn, dev)
924  
925  static inline void *hci_get_drvdata(struct hci_dev *hdev)
926  {
927  	return dev_get_drvdata(&hdev->dev);
928  }
929  
930  static inline void hci_set_drvdata(struct hci_dev *hdev, void *data)
931  {
932  	dev_set_drvdata(&hdev->dev, data);
933  }
934  
935  struct hci_dev *hci_dev_get(int index);
936  struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src);
937  
938  struct hci_dev *hci_alloc_dev(void);
939  void hci_free_dev(struct hci_dev *hdev);
940  int hci_register_dev(struct hci_dev *hdev);
941  void hci_unregister_dev(struct hci_dev *hdev);
942  int hci_suspend_dev(struct hci_dev *hdev);
943  int hci_resume_dev(struct hci_dev *hdev);
944  int hci_reset_dev(struct hci_dev *hdev);
945  int hci_dev_open(__u16 dev);
946  int hci_dev_close(__u16 dev);
947  int hci_dev_reset(__u16 dev);
948  int hci_dev_reset_stat(__u16 dev);
949  int hci_dev_cmd(unsigned int cmd, void __user *arg);
950  int hci_get_dev_list(void __user *arg);
951  int hci_get_dev_info(void __user *arg);
952  int hci_get_conn_list(void __user *arg);
953  int hci_get_conn_info(struct hci_dev *hdev, void __user *arg);
954  int hci_get_auth_info(struct hci_dev *hdev, void __user *arg);
955  int hci_inquiry(void __user *arg);
956  
957  struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *list,
958  					   bdaddr_t *bdaddr, u8 type);
959  int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type);
960  int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type);
961  void hci_bdaddr_list_clear(struct list_head *list);
962  
963  struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
964  					       bdaddr_t *addr, u8 addr_type);
965  struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
966  					    bdaddr_t *addr, u8 addr_type);
967  void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type);
968  void hci_conn_params_clear_all(struct hci_dev *hdev);
969  void hci_conn_params_clear_disabled(struct hci_dev *hdev);
970  
971  struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
972  						  bdaddr_t *addr,
973  						  u8 addr_type);
974  
975  void hci_uuids_clear(struct hci_dev *hdev);
976  
977  void hci_link_keys_clear(struct hci_dev *hdev);
978  struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr);
979  struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
980  				  bdaddr_t *bdaddr, u8 *val, u8 type,
981  				  u8 pin_len, bool *persistent);
982  struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
983  			    u8 addr_type, u8 type, u8 authenticated,
984  			    u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand);
985  struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
986  			     u8 addr_type, u8 role);
987  int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type);
988  void hci_smp_ltks_clear(struct hci_dev *hdev);
989  int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr);
990  
991  struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa);
992  struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
993  				     u8 addr_type);
994  struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
995  			    u8 addr_type, u8 val[16], bdaddr_t *rpa);
996  void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type);
997  void hci_smp_irks_clear(struct hci_dev *hdev);
998  
999  bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
1000  
1001  void hci_remote_oob_data_clear(struct hci_dev *hdev);
1002  struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1003  					  bdaddr_t *bdaddr, u8 bdaddr_type);
1004  int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1005  			    u8 bdaddr_type, u8 *hash192, u8 *rand192,
1006  			    u8 *hash256, u8 *rand256);
1007  int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1008  			       u8 bdaddr_type);
1009  
1010  void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
1011  
1012  int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb);
1013  
1014  void hci_init_sysfs(struct hci_dev *hdev);
1015  void hci_conn_init_sysfs(struct hci_conn *conn);
1016  void hci_conn_add_sysfs(struct hci_conn *conn);
1017  void hci_conn_del_sysfs(struct hci_conn *conn);
1018  
1019  #define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->dev.parent = (pdev))
1020  
1021  /* ----- LMP capabilities ----- */
1022  #define lmp_encrypt_capable(dev)   ((dev)->features[0][0] & LMP_ENCRYPT)
1023  #define lmp_rswitch_capable(dev)   ((dev)->features[0][0] & LMP_RSWITCH)
1024  #define lmp_hold_capable(dev)      ((dev)->features[0][0] & LMP_HOLD)
1025  #define lmp_sniff_capable(dev)     ((dev)->features[0][0] & LMP_SNIFF)
1026  #define lmp_park_capable(dev)      ((dev)->features[0][1] & LMP_PARK)
1027  #define lmp_inq_rssi_capable(dev)  ((dev)->features[0][3] & LMP_RSSI_INQ)
1028  #define lmp_esco_capable(dev)      ((dev)->features[0][3] & LMP_ESCO)
1029  #define lmp_bredr_capable(dev)     (!((dev)->features[0][4] & LMP_NO_BREDR))
1030  #define lmp_le_capable(dev)        ((dev)->features[0][4] & LMP_LE)
1031  #define lmp_sniffsubr_capable(dev) ((dev)->features[0][5] & LMP_SNIFF_SUBR)
1032  #define lmp_pause_enc_capable(dev) ((dev)->features[0][5] & LMP_PAUSE_ENC)
1033  #define lmp_ext_inq_capable(dev)   ((dev)->features[0][6] & LMP_EXT_INQ)
1034  #define lmp_le_br_capable(dev)     (!!((dev)->features[0][6] & LMP_SIMUL_LE_BR))
1035  #define lmp_ssp_capable(dev)       ((dev)->features[0][6] & LMP_SIMPLE_PAIR)
1036  #define lmp_no_flush_capable(dev)  ((dev)->features[0][6] & LMP_NO_FLUSH)
1037  #define lmp_lsto_capable(dev)      ((dev)->features[0][7] & LMP_LSTO)
1038  #define lmp_inq_tx_pwr_capable(dev) ((dev)->features[0][7] & LMP_INQ_TX_PWR)
1039  #define lmp_ext_feat_capable(dev)  ((dev)->features[0][7] & LMP_EXTFEATURES)
1040  #define lmp_transp_capable(dev)    ((dev)->features[0][2] & LMP_TRANSPARENT)
1041  
1042  /* ----- Extended LMP capabilities ----- */
1043  #define lmp_csb_master_capable(dev) ((dev)->features[2][0] & LMP_CSB_MASTER)
1044  #define lmp_csb_slave_capable(dev)  ((dev)->features[2][0] & LMP_CSB_SLAVE)
1045  #define lmp_sync_train_capable(dev) ((dev)->features[2][0] & LMP_SYNC_TRAIN)
1046  #define lmp_sync_scan_capable(dev)  ((dev)->features[2][0] & LMP_SYNC_SCAN)
1047  #define lmp_sc_capable(dev)         ((dev)->features[2][1] & LMP_SC)
1048  #define lmp_ping_capable(dev)       ((dev)->features[2][1] & LMP_PING)
1049  
1050  /* ----- Host capabilities ----- */
1051  #define lmp_host_ssp_capable(dev)  ((dev)->features[1][0] & LMP_HOST_SSP)
1052  #define lmp_host_sc_capable(dev)   ((dev)->features[1][0] & LMP_HOST_SC)
1053  #define lmp_host_le_capable(dev)   (!!((dev)->features[1][0] & LMP_HOST_LE))
1054  #define lmp_host_le_br_capable(dev) (!!((dev)->features[1][0] & LMP_HOST_LE_BREDR))
1055  
1056  #define hdev_is_powered(dev)   (test_bit(HCI_UP, &(dev)->flags) && \
1057  				!hci_dev_test_flag(dev, HCI_AUTO_OFF))
1058  #define bredr_sc_enabled(dev)  (lmp_sc_capable(dev) && \
1059  				hci_dev_test_flag(dev, HCI_SC_ENABLED))
1060  
1061  /* ----- HCI protocols ----- */
1062  #define HCI_PROTO_DEFER             0x01
1063  
1064  static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr,
1065  					__u8 type, __u8 *flags)
1066  {
1067  	switch (type) {
1068  	case ACL_LINK:
1069  		return l2cap_connect_ind(hdev, bdaddr);
1070  
1071  	case SCO_LINK:
1072  	case ESCO_LINK:
1073  		return sco_connect_ind(hdev, bdaddr, flags);
1074  
1075  	default:
1076  		BT_ERR("unknown link type %d", type);
1077  		return -EINVAL;
1078  	}
1079  }
1080  
1081  static inline int hci_proto_disconn_ind(struct hci_conn *conn)
1082  {
1083  	if (conn->type != ACL_LINK && conn->type != LE_LINK)
1084  		return HCI_ERROR_REMOTE_USER_TERM;
1085  
1086  	return l2cap_disconn_ind(conn);
1087  }
1088  
1089  /* ----- HCI callbacks ----- */
1090  struct hci_cb {
1091  	struct list_head list;
1092  
1093  	char *name;
1094  
1095  	void (*connect_cfm)	(struct hci_conn *conn, __u8 status);
1096  	void (*disconn_cfm)	(struct hci_conn *conn, __u8 status);
1097  	void (*security_cfm)	(struct hci_conn *conn, __u8 status,
1098  								__u8 encrypt);
1099  	void (*key_change_cfm)	(struct hci_conn *conn, __u8 status);
1100  	void (*role_switch_cfm)	(struct hci_conn *conn, __u8 status, __u8 role);
1101  };
1102  
1103  static inline void hci_connect_cfm(struct hci_conn *conn, __u8 status)
1104  {
1105  	struct hci_cb *cb;
1106  
1107  	mutex_lock(&hci_cb_list_lock);
1108  	list_for_each_entry(cb, &hci_cb_list, list) {
1109  		if (cb->connect_cfm)
1110  			cb->connect_cfm(conn, status);
1111  	}
1112  	mutex_unlock(&hci_cb_list_lock);
1113  
1114  	if (conn->connect_cfm_cb)
1115  		conn->connect_cfm_cb(conn, status);
1116  }
1117  
1118  static inline void hci_disconn_cfm(struct hci_conn *conn, __u8 reason)
1119  {
1120  	struct hci_cb *cb;
1121  
1122  	mutex_lock(&hci_cb_list_lock);
1123  	list_for_each_entry(cb, &hci_cb_list, list) {
1124  		if (cb->disconn_cfm)
1125  			cb->disconn_cfm(conn, reason);
1126  	}
1127  	mutex_unlock(&hci_cb_list_lock);
1128  
1129  	if (conn->disconn_cfm_cb)
1130  		conn->disconn_cfm_cb(conn, reason);
1131  }
1132  
1133  static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
1134  {
1135  	struct hci_cb *cb;
1136  	__u8 encrypt;
1137  
1138  	if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
1139  		return;
1140  
1141  	encrypt = test_bit(HCI_CONN_ENCRYPT, &conn->flags) ? 0x01 : 0x00;
1142  
1143  	mutex_lock(&hci_cb_list_lock);
1144  	list_for_each_entry(cb, &hci_cb_list, list) {
1145  		if (cb->security_cfm)
1146  			cb->security_cfm(conn, status, encrypt);
1147  	}
1148  	mutex_unlock(&hci_cb_list_lock);
1149  
1150  	if (conn->security_cfm_cb)
1151  		conn->security_cfm_cb(conn, status);
1152  }
1153  
1154  static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status,
1155  								__u8 encrypt)
1156  {
1157  	struct hci_cb *cb;
1158  
1159  	if (conn->sec_level == BT_SECURITY_SDP)
1160  		conn->sec_level = BT_SECURITY_LOW;
1161  
1162  	if (conn->pending_sec_level > conn->sec_level)
1163  		conn->sec_level = conn->pending_sec_level;
1164  
1165  	mutex_lock(&hci_cb_list_lock);
1166  	list_for_each_entry(cb, &hci_cb_list, list) {
1167  		if (cb->security_cfm)
1168  			cb->security_cfm(conn, status, encrypt);
1169  	}
1170  	mutex_unlock(&hci_cb_list_lock);
1171  
1172  	if (conn->security_cfm_cb)
1173  		conn->security_cfm_cb(conn, status);
1174  }
1175  
1176  static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status)
1177  {
1178  	struct hci_cb *cb;
1179  
1180  	mutex_lock(&hci_cb_list_lock);
1181  	list_for_each_entry(cb, &hci_cb_list, list) {
1182  		if (cb->key_change_cfm)
1183  			cb->key_change_cfm(conn, status);
1184  	}
1185  	mutex_unlock(&hci_cb_list_lock);
1186  }
1187  
1188  static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status,
1189  								__u8 role)
1190  {
1191  	struct hci_cb *cb;
1192  
1193  	mutex_lock(&hci_cb_list_lock);
1194  	list_for_each_entry(cb, &hci_cb_list, list) {
1195  		if (cb->role_switch_cfm)
1196  			cb->role_switch_cfm(conn, status, role);
1197  	}
1198  	mutex_unlock(&hci_cb_list_lock);
1199  }
1200  
1201  static inline bool eir_has_data_type(u8 *data, size_t data_len, u8 type)
1202  {
1203  	size_t parsed = 0;
1204  
1205  	if (data_len < 2)
1206  		return false;
1207  
1208  	while (parsed < data_len - 1) {
1209  		u8 field_len = data[0];
1210  
1211  		if (field_len == 0)
1212  			break;
1213  
1214  		parsed += field_len + 1;
1215  
1216  		if (parsed > data_len)
1217  			break;
1218  
1219  		if (data[1] == type)
1220  			return true;
1221  
1222  		data += field_len + 1;
1223  	}
1224  
1225  	return false;
1226  }
1227  
1228  static inline bool hci_bdaddr_is_rpa(bdaddr_t *bdaddr, u8 addr_type)
1229  {
1230  	if (addr_type != ADDR_LE_DEV_RANDOM)
1231  		return false;
1232  
1233  	if ((bdaddr->b[5] & 0xc0) == 0x40)
1234  	       return true;
1235  
1236  	return false;
1237  }
1238  
1239  static inline bool hci_is_identity_address(bdaddr_t *addr, u8 addr_type)
1240  {
1241  	if (addr_type == ADDR_LE_DEV_PUBLIC)
1242  		return true;
1243  
1244  	/* Check for Random Static address type */
1245  	if ((addr->b[5] & 0xc0) == 0xc0)
1246  		return true;
1247  
1248  	return false;
1249  }
1250  
1251  static inline struct smp_irk *hci_get_irk(struct hci_dev *hdev,
1252  					  bdaddr_t *bdaddr, u8 addr_type)
1253  {
1254  	if (!hci_bdaddr_is_rpa(bdaddr, addr_type))
1255  		return NULL;
1256  
1257  	return hci_find_irk_by_rpa(hdev, bdaddr);
1258  }
1259  
1260  static inline int hci_check_conn_params(u16 min, u16 max, u16 latency,
1261  					u16 to_multiplier)
1262  {
1263  	u16 max_latency;
1264  
1265  	if (min > max || min < 6 || max > 3200)
1266  		return -EINVAL;
1267  
1268  	if (to_multiplier < 10 || to_multiplier > 3200)
1269  		return -EINVAL;
1270  
1271  	if (max >= to_multiplier * 8)
1272  		return -EINVAL;
1273  
1274  	max_latency = (to_multiplier * 8 / max) - 1;
1275  	if (latency > 499 || latency > max_latency)
1276  		return -EINVAL;
1277  
1278  	return 0;
1279  }
1280  
1281  int hci_register_cb(struct hci_cb *hcb);
1282  int hci_unregister_cb(struct hci_cb *hcb);
1283  
1284  struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1285  			       const void *param, u32 timeout);
1286  struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1287  				  const void *param, u8 event, u32 timeout);
1288  
1289  int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
1290  		 const void *param);
1291  void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags);
1292  void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb);
1293  
1294  void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode);
1295  
1296  /* ----- HCI Sockets ----- */
1297  void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
1298  void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
1299  			 int flag, struct sock *skip_sk);
1300  void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb);
1301  
1302  void hci_sock_dev_event(struct hci_dev *hdev, int event);
1303  
1304  #define HCI_MGMT_VAR_LEN	BIT(0)
1305  #define HCI_MGMT_NO_HDEV	BIT(1)
1306  #define HCI_MGMT_UNTRUSTED	BIT(2)
1307  #define HCI_MGMT_UNCONFIGURED	BIT(3)
1308  
1309  struct hci_mgmt_handler {
1310  	int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
1311  		     u16 data_len);
1312  	size_t data_len;
1313  	unsigned long flags;
1314  };
1315  
1316  struct hci_mgmt_chan {
1317  	struct list_head list;
1318  	unsigned short channel;
1319  	size_t handler_count;
1320  	const struct hci_mgmt_handler *handlers;
1321  	void (*hdev_init) (struct sock *sk, struct hci_dev *hdev);
1322  };
1323  
1324  int hci_mgmt_chan_register(struct hci_mgmt_chan *c);
1325  void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c);
1326  
1327  /* Management interface */
1328  #define DISCOV_TYPE_BREDR		(BIT(BDADDR_BREDR))
1329  #define DISCOV_TYPE_LE			(BIT(BDADDR_LE_PUBLIC) | \
1330  					 BIT(BDADDR_LE_RANDOM))
1331  #define DISCOV_TYPE_INTERLEAVED		(BIT(BDADDR_BREDR) | \
1332  					 BIT(BDADDR_LE_PUBLIC) | \
1333  					 BIT(BDADDR_LE_RANDOM))
1334  
1335  /* These LE scan and inquiry parameters were chosen according to LE General
1336   * Discovery Procedure specification.
1337   */
1338  #define DISCOV_LE_SCAN_WIN		0x12
1339  #define DISCOV_LE_SCAN_INT		0x12
1340  #define DISCOV_LE_TIMEOUT		10240	/* msec */
1341  #define DISCOV_INTERLEAVED_TIMEOUT	5120	/* msec */
1342  #define DISCOV_INTERLEAVED_INQUIRY_LEN	0x04
1343  #define DISCOV_BREDR_INQUIRY_LEN	0x08
1344  #define DISCOV_LE_RESTART_DELAY		msecs_to_jiffies(200)	/* msec */
1345  
1346  int mgmt_new_settings(struct hci_dev *hdev);
1347  void mgmt_index_added(struct hci_dev *hdev);
1348  void mgmt_index_removed(struct hci_dev *hdev);
1349  void mgmt_set_powered_failed(struct hci_dev *hdev, int err);
1350  int mgmt_powered(struct hci_dev *hdev, u8 powered);
1351  int mgmt_update_adv_data(struct hci_dev *hdev);
1352  void mgmt_discoverable_timeout(struct hci_dev *hdev);
1353  void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
1354  		       bool persistent);
1355  void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
1356  			   u32 flags, u8 *name, u8 name_len);
1357  void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
1358  			      u8 link_type, u8 addr_type, u8 reason,
1359  			      bool mgmt_connected);
1360  void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
1361  			    u8 link_type, u8 addr_type, u8 status);
1362  void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
1363  			 u8 addr_type, u8 status);
1364  void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure);
1365  void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
1366  				  u8 status);
1367  void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
1368  				      u8 status);
1369  int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
1370  			      u8 link_type, u8 addr_type, u32 value,
1371  			      u8 confirm_hint);
1372  int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
1373  				     u8 link_type, u8 addr_type, u8 status);
1374  int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
1375  					 u8 link_type, u8 addr_type, u8 status);
1376  int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
1377  			      u8 link_type, u8 addr_type);
1378  int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
1379  				     u8 link_type, u8 addr_type, u8 status);
1380  int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
1381  					 u8 link_type, u8 addr_type, u8 status);
1382  int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
1383  			     u8 link_type, u8 addr_type, u32 passkey,
1384  			     u8 entered);
1385  void mgmt_auth_failed(struct hci_conn *conn, u8 status);
1386  void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status);
1387  void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status);
1388  void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
1389  				    u8 status);
1390  void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status);
1391  void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
1392  		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
1393  		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len);
1394  void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
1395  		      u8 addr_type, s8 rssi, u8 *name, u8 name_len);
1396  void mgmt_discovering(struct hci_dev *hdev, u8 discovering);
1397  bool mgmt_powering_down(struct hci_dev *hdev);
1398  void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent);
1399  void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk);
1400  void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
1401  		   bool persistent);
1402  void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
1403  			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
1404  			 u16 max_interval, u16 latency, u16 timeout);
1405  void mgmt_reenable_advertising(struct hci_dev *hdev);
1406  void mgmt_smp_complete(struct hci_conn *conn, bool complete);
1407  
1408  u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
1409  		      u16 to_multiplier);
1410  void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
1411  							__u8 ltk[16]);
1412  
1413  void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
1414  			       u8 *bdaddr_type);
1415  
1416  #define SCO_AIRMODE_MASK       0x0003
1417  #define SCO_AIRMODE_CVSD       0x0000
1418  #define SCO_AIRMODE_TRANSP     0x0003
1419  
1420  #endif /* __HCI_CORE_H */
1421