xref: /openbmc/linux/net/bluetooth/mgmt.c (revision 72000df2c01d6927319ad7e3f43460f6d0227de5)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI Management interface */
26 
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35 
36 #include "hci_request.h"
37 #include "smp.h"
38 
39 #define MGMT_VERSION	1
40 #define MGMT_REVISION	9
41 
42 static const u16 mgmt_commands[] = {
43 	MGMT_OP_READ_INDEX_LIST,
44 	MGMT_OP_READ_INFO,
45 	MGMT_OP_SET_POWERED,
46 	MGMT_OP_SET_DISCOVERABLE,
47 	MGMT_OP_SET_CONNECTABLE,
48 	MGMT_OP_SET_FAST_CONNECTABLE,
49 	MGMT_OP_SET_BONDABLE,
50 	MGMT_OP_SET_LINK_SECURITY,
51 	MGMT_OP_SET_SSP,
52 	MGMT_OP_SET_HS,
53 	MGMT_OP_SET_LE,
54 	MGMT_OP_SET_DEV_CLASS,
55 	MGMT_OP_SET_LOCAL_NAME,
56 	MGMT_OP_ADD_UUID,
57 	MGMT_OP_REMOVE_UUID,
58 	MGMT_OP_LOAD_LINK_KEYS,
59 	MGMT_OP_LOAD_LONG_TERM_KEYS,
60 	MGMT_OP_DISCONNECT,
61 	MGMT_OP_GET_CONNECTIONS,
62 	MGMT_OP_PIN_CODE_REPLY,
63 	MGMT_OP_PIN_CODE_NEG_REPLY,
64 	MGMT_OP_SET_IO_CAPABILITY,
65 	MGMT_OP_PAIR_DEVICE,
66 	MGMT_OP_CANCEL_PAIR_DEVICE,
67 	MGMT_OP_UNPAIR_DEVICE,
68 	MGMT_OP_USER_CONFIRM_REPLY,
69 	MGMT_OP_USER_CONFIRM_NEG_REPLY,
70 	MGMT_OP_USER_PASSKEY_REPLY,
71 	MGMT_OP_USER_PASSKEY_NEG_REPLY,
72 	MGMT_OP_READ_LOCAL_OOB_DATA,
73 	MGMT_OP_ADD_REMOTE_OOB_DATA,
74 	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
75 	MGMT_OP_START_DISCOVERY,
76 	MGMT_OP_STOP_DISCOVERY,
77 	MGMT_OP_CONFIRM_NAME,
78 	MGMT_OP_BLOCK_DEVICE,
79 	MGMT_OP_UNBLOCK_DEVICE,
80 	MGMT_OP_SET_DEVICE_ID,
81 	MGMT_OP_SET_ADVERTISING,
82 	MGMT_OP_SET_BREDR,
83 	MGMT_OP_SET_STATIC_ADDRESS,
84 	MGMT_OP_SET_SCAN_PARAMS,
85 	MGMT_OP_SET_SECURE_CONN,
86 	MGMT_OP_SET_DEBUG_KEYS,
87 	MGMT_OP_SET_PRIVACY,
88 	MGMT_OP_LOAD_IRKS,
89 	MGMT_OP_GET_CONN_INFO,
90 	MGMT_OP_GET_CLOCK_INFO,
91 	MGMT_OP_ADD_DEVICE,
92 	MGMT_OP_REMOVE_DEVICE,
93 	MGMT_OP_LOAD_CONN_PARAM,
94 	MGMT_OP_READ_UNCONF_INDEX_LIST,
95 	MGMT_OP_READ_CONFIG_INFO,
96 	MGMT_OP_SET_EXTERNAL_CONFIG,
97 	MGMT_OP_SET_PUBLIC_ADDRESS,
98 	MGMT_OP_START_SERVICE_DISCOVERY,
99 	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
100 	MGMT_OP_READ_EXT_INDEX_LIST,
101 	MGMT_OP_READ_ADV_FEATURES,
102 };
103 
104 static const u16 mgmt_events[] = {
105 	MGMT_EV_CONTROLLER_ERROR,
106 	MGMT_EV_INDEX_ADDED,
107 	MGMT_EV_INDEX_REMOVED,
108 	MGMT_EV_NEW_SETTINGS,
109 	MGMT_EV_CLASS_OF_DEV_CHANGED,
110 	MGMT_EV_LOCAL_NAME_CHANGED,
111 	MGMT_EV_NEW_LINK_KEY,
112 	MGMT_EV_NEW_LONG_TERM_KEY,
113 	MGMT_EV_DEVICE_CONNECTED,
114 	MGMT_EV_DEVICE_DISCONNECTED,
115 	MGMT_EV_CONNECT_FAILED,
116 	MGMT_EV_PIN_CODE_REQUEST,
117 	MGMT_EV_USER_CONFIRM_REQUEST,
118 	MGMT_EV_USER_PASSKEY_REQUEST,
119 	MGMT_EV_AUTH_FAILED,
120 	MGMT_EV_DEVICE_FOUND,
121 	MGMT_EV_DISCOVERING,
122 	MGMT_EV_DEVICE_BLOCKED,
123 	MGMT_EV_DEVICE_UNBLOCKED,
124 	MGMT_EV_DEVICE_UNPAIRED,
125 	MGMT_EV_PASSKEY_NOTIFY,
126 	MGMT_EV_NEW_IRK,
127 	MGMT_EV_NEW_CSRK,
128 	MGMT_EV_DEVICE_ADDED,
129 	MGMT_EV_DEVICE_REMOVED,
130 	MGMT_EV_NEW_CONN_PARAM,
131 	MGMT_EV_UNCONF_INDEX_ADDED,
132 	MGMT_EV_UNCONF_INDEX_REMOVED,
133 	MGMT_EV_NEW_CONFIG_OPTIONS,
134 	MGMT_EV_EXT_INDEX_ADDED,
135 	MGMT_EV_EXT_INDEX_REMOVED,
136 	MGMT_EV_LOCAL_OOB_DATA_UPDATED,
137 };
138 
139 #define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
140 
141 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
142 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
143 
144 struct mgmt_pending_cmd {
145 	struct list_head list;
146 	u16 opcode;
147 	int index;
148 	void *param;
149 	size_t param_len;
150 	struct sock *sk;
151 	void *user_data;
152 	int (*cmd_complete)(struct mgmt_pending_cmd *cmd, u8 status);
153 };
154 
155 /* HCI to MGMT error code conversion table */
156 static u8 mgmt_status_table[] = {
157 	MGMT_STATUS_SUCCESS,
158 	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
159 	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
160 	MGMT_STATUS_FAILED,		/* Hardware Failure */
161 	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
162 	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
163 	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
164 	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
165 	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
166 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
167 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
168 	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
169 	MGMT_STATUS_BUSY,		/* Command Disallowed */
170 	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
171 	MGMT_STATUS_REJECTED,		/* Rejected Security */
172 	MGMT_STATUS_REJECTED,		/* Rejected Personal */
173 	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
174 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
175 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
176 	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
177 	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
178 	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
179 	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
180 	MGMT_STATUS_BUSY,		/* Repeated Attempts */
181 	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
182 	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
183 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
184 	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
185 	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
186 	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
187 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
188 	MGMT_STATUS_FAILED,		/* Unspecified Error */
189 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
190 	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
191 	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
192 	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
193 	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
194 	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
195 	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
196 	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
197 	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
198 	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
199 	MGMT_STATUS_FAILED,		/* Transaction Collision */
200 	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
201 	MGMT_STATUS_REJECTED,		/* QoS Rejected */
202 	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
203 	MGMT_STATUS_REJECTED,		/* Insufficient Security */
204 	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
205 	MGMT_STATUS_BUSY,		/* Role Switch Pending */
206 	MGMT_STATUS_FAILED,		/* Slot Violation */
207 	MGMT_STATUS_FAILED,		/* Role Switch Failed */
208 	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
209 	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
210 	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
211 	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
212 	MGMT_STATUS_BUSY,		/* Controller Busy */
213 	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
214 	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
215 	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
216 	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
217 	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
218 };
219 
220 static u8 mgmt_status(u8 hci_status)
221 {
222 	if (hci_status < ARRAY_SIZE(mgmt_status_table))
223 		return mgmt_status_table[hci_status];
224 
225 	return MGMT_STATUS_FAILED;
226 }
227 
228 static int mgmt_send_event(u16 event, struct hci_dev *hdev,
229 			   unsigned short channel, void *data, u16 data_len,
230 			   int flag, struct sock *skip_sk)
231 {
232 	struct sk_buff *skb;
233 	struct mgmt_hdr *hdr;
234 
235 	skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
236 	if (!skb)
237 		return -ENOMEM;
238 
239 	hdr = (void *) skb_put(skb, sizeof(*hdr));
240 	hdr->opcode = cpu_to_le16(event);
241 	if (hdev)
242 		hdr->index = cpu_to_le16(hdev->id);
243 	else
244 		hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
245 	hdr->len = cpu_to_le16(data_len);
246 
247 	if (data)
248 		memcpy(skb_put(skb, data_len), data, data_len);
249 
250 	/* Time stamp */
251 	__net_timestamp(skb);
252 
253 	hci_send_to_channel(channel, skb, flag, skip_sk);
254 	kfree_skb(skb);
255 
256 	return 0;
257 }
258 
259 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
260 			    u16 len, int flag)
261 {
262 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
263 			       flag, NULL);
264 }
265 
266 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
267 			      u16 len, int flag, struct sock *skip_sk)
268 {
269 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
270 			       flag, skip_sk);
271 }
272 
273 static int mgmt_generic_event(u16 event, struct hci_dev *hdev, void *data,
274 			      u16 len, struct sock *skip_sk)
275 {
276 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
277 			       HCI_MGMT_GENERIC_EVENTS, skip_sk);
278 }
279 
280 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
281 		      struct sock *skip_sk)
282 {
283 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
284 			       HCI_SOCK_TRUSTED, skip_sk);
285 }
286 
287 static int mgmt_cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
288 {
289 	struct sk_buff *skb;
290 	struct mgmt_hdr *hdr;
291 	struct mgmt_ev_cmd_status *ev;
292 	int err;
293 
294 	BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
295 
296 	skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
297 	if (!skb)
298 		return -ENOMEM;
299 
300 	hdr = (void *) skb_put(skb, sizeof(*hdr));
301 
302 	hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
303 	hdr->index = cpu_to_le16(index);
304 	hdr->len = cpu_to_le16(sizeof(*ev));
305 
306 	ev = (void *) skb_put(skb, sizeof(*ev));
307 	ev->status = status;
308 	ev->opcode = cpu_to_le16(cmd);
309 
310 	err = sock_queue_rcv_skb(sk, skb);
311 	if (err < 0)
312 		kfree_skb(skb);
313 
314 	return err;
315 }
316 
317 static int mgmt_cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
318 			     void *rp, size_t rp_len)
319 {
320 	struct sk_buff *skb;
321 	struct mgmt_hdr *hdr;
322 	struct mgmt_ev_cmd_complete *ev;
323 	int err;
324 
325 	BT_DBG("sock %p", sk);
326 
327 	skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
328 	if (!skb)
329 		return -ENOMEM;
330 
331 	hdr = (void *) skb_put(skb, sizeof(*hdr));
332 
333 	hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
334 	hdr->index = cpu_to_le16(index);
335 	hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
336 
337 	ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
338 	ev->opcode = cpu_to_le16(cmd);
339 	ev->status = status;
340 
341 	if (rp)
342 		memcpy(ev->data, rp, rp_len);
343 
344 	err = sock_queue_rcv_skb(sk, skb);
345 	if (err < 0)
346 		kfree_skb(skb);
347 
348 	return err;
349 }
350 
351 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
352 			u16 data_len)
353 {
354 	struct mgmt_rp_read_version rp;
355 
356 	BT_DBG("sock %p", sk);
357 
358 	rp.version = MGMT_VERSION;
359 	rp.revision = cpu_to_le16(MGMT_REVISION);
360 
361 	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
362 				 &rp, sizeof(rp));
363 }
364 
365 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
366 			 u16 data_len)
367 {
368 	struct mgmt_rp_read_commands *rp;
369 	const u16 num_commands = ARRAY_SIZE(mgmt_commands);
370 	const u16 num_events = ARRAY_SIZE(mgmt_events);
371 	__le16 *opcode;
372 	size_t rp_size;
373 	int i, err;
374 
375 	BT_DBG("sock %p", sk);
376 
377 	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
378 
379 	rp = kmalloc(rp_size, GFP_KERNEL);
380 	if (!rp)
381 		return -ENOMEM;
382 
383 	rp->num_commands = cpu_to_le16(num_commands);
384 	rp->num_events = cpu_to_le16(num_events);
385 
386 	for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
387 		put_unaligned_le16(mgmt_commands[i], opcode);
388 
389 	for (i = 0; i < num_events; i++, opcode++)
390 		put_unaligned_le16(mgmt_events[i], opcode);
391 
392 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
393 				rp, rp_size);
394 	kfree(rp);
395 
396 	return err;
397 }
398 
399 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
400 			   u16 data_len)
401 {
402 	struct mgmt_rp_read_index_list *rp;
403 	struct hci_dev *d;
404 	size_t rp_len;
405 	u16 count;
406 	int err;
407 
408 	BT_DBG("sock %p", sk);
409 
410 	read_lock(&hci_dev_list_lock);
411 
412 	count = 0;
413 	list_for_each_entry(d, &hci_dev_list, list) {
414 		if (d->dev_type == HCI_BREDR &&
415 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED))
416 			count++;
417 	}
418 
419 	rp_len = sizeof(*rp) + (2 * count);
420 	rp = kmalloc(rp_len, GFP_ATOMIC);
421 	if (!rp) {
422 		read_unlock(&hci_dev_list_lock);
423 		return -ENOMEM;
424 	}
425 
426 	count = 0;
427 	list_for_each_entry(d, &hci_dev_list, list) {
428 		if (hci_dev_test_flag(d, HCI_SETUP) ||
429 		    hci_dev_test_flag(d, HCI_CONFIG) ||
430 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
431 			continue;
432 
433 		/* Devices marked as raw-only are neither configured
434 		 * nor unconfigured controllers.
435 		 */
436 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
437 			continue;
438 
439 		if (d->dev_type == HCI_BREDR &&
440 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
441 			rp->index[count++] = cpu_to_le16(d->id);
442 			BT_DBG("Added hci%u", d->id);
443 		}
444 	}
445 
446 	rp->num_controllers = cpu_to_le16(count);
447 	rp_len = sizeof(*rp) + (2 * count);
448 
449 	read_unlock(&hci_dev_list_lock);
450 
451 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
452 				0, rp, rp_len);
453 
454 	kfree(rp);
455 
456 	return err;
457 }
458 
459 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
460 				  void *data, u16 data_len)
461 {
462 	struct mgmt_rp_read_unconf_index_list *rp;
463 	struct hci_dev *d;
464 	size_t rp_len;
465 	u16 count;
466 	int err;
467 
468 	BT_DBG("sock %p", sk);
469 
470 	read_lock(&hci_dev_list_lock);
471 
472 	count = 0;
473 	list_for_each_entry(d, &hci_dev_list, list) {
474 		if (d->dev_type == HCI_BREDR &&
475 		    hci_dev_test_flag(d, HCI_UNCONFIGURED))
476 			count++;
477 	}
478 
479 	rp_len = sizeof(*rp) + (2 * count);
480 	rp = kmalloc(rp_len, GFP_ATOMIC);
481 	if (!rp) {
482 		read_unlock(&hci_dev_list_lock);
483 		return -ENOMEM;
484 	}
485 
486 	count = 0;
487 	list_for_each_entry(d, &hci_dev_list, list) {
488 		if (hci_dev_test_flag(d, HCI_SETUP) ||
489 		    hci_dev_test_flag(d, HCI_CONFIG) ||
490 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
491 			continue;
492 
493 		/* Devices marked as raw-only are neither configured
494 		 * nor unconfigured controllers.
495 		 */
496 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
497 			continue;
498 
499 		if (d->dev_type == HCI_BREDR &&
500 		    hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
501 			rp->index[count++] = cpu_to_le16(d->id);
502 			BT_DBG("Added hci%u", d->id);
503 		}
504 	}
505 
506 	rp->num_controllers = cpu_to_le16(count);
507 	rp_len = sizeof(*rp) + (2 * count);
508 
509 	read_unlock(&hci_dev_list_lock);
510 
511 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
512 				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
513 
514 	kfree(rp);
515 
516 	return err;
517 }
518 
519 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
520 			       void *data, u16 data_len)
521 {
522 	struct mgmt_rp_read_ext_index_list *rp;
523 	struct hci_dev *d;
524 	size_t rp_len;
525 	u16 count;
526 	int err;
527 
528 	BT_DBG("sock %p", sk);
529 
530 	read_lock(&hci_dev_list_lock);
531 
532 	count = 0;
533 	list_for_each_entry(d, &hci_dev_list, list) {
534 		if (d->dev_type == HCI_BREDR || d->dev_type == HCI_AMP)
535 			count++;
536 	}
537 
538 	rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
539 	rp = kmalloc(rp_len, GFP_ATOMIC);
540 	if (!rp) {
541 		read_unlock(&hci_dev_list_lock);
542 		return -ENOMEM;
543 	}
544 
545 	count = 0;
546 	list_for_each_entry(d, &hci_dev_list, list) {
547 		if (hci_dev_test_flag(d, HCI_SETUP) ||
548 		    hci_dev_test_flag(d, HCI_CONFIG) ||
549 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
550 			continue;
551 
552 		/* Devices marked as raw-only are neither configured
553 		 * nor unconfigured controllers.
554 		 */
555 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
556 			continue;
557 
558 		if (d->dev_type == HCI_BREDR) {
559 			if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
560 				rp->entry[count].type = 0x01;
561 			else
562 				rp->entry[count].type = 0x00;
563 		} else if (d->dev_type == HCI_AMP) {
564 			rp->entry[count].type = 0x02;
565 		} else {
566 			continue;
567 		}
568 
569 		rp->entry[count].bus = d->bus;
570 		rp->entry[count++].index = cpu_to_le16(d->id);
571 		BT_DBG("Added hci%u", d->id);
572 	}
573 
574 	rp->num_controllers = cpu_to_le16(count);
575 	rp_len = sizeof(*rp) + (sizeof(rp->entry[0]) * count);
576 
577 	read_unlock(&hci_dev_list_lock);
578 
579 	/* If this command is called at least once, then all the
580 	 * default index and unconfigured index events are disabled
581 	 * and from now on only extended index events are used.
582 	 */
583 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
584 	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
585 	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
586 
587 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
588 				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp, rp_len);
589 
590 	kfree(rp);
591 
592 	return err;
593 }
594 
595 static bool is_configured(struct hci_dev *hdev)
596 {
597 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
598 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
599 		return false;
600 
601 	if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
602 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
603 		return false;
604 
605 	return true;
606 }
607 
608 static __le32 get_missing_options(struct hci_dev *hdev)
609 {
610 	u32 options = 0;
611 
612 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
613 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
614 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
615 
616 	if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
617 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
618 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
619 
620 	return cpu_to_le32(options);
621 }
622 
623 static int new_options(struct hci_dev *hdev, struct sock *skip)
624 {
625 	__le32 options = get_missing_options(hdev);
626 
627 	return mgmt_generic_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
628 				  sizeof(options), skip);
629 }
630 
631 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
632 {
633 	__le32 options = get_missing_options(hdev);
634 
635 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
636 				 sizeof(options));
637 }
638 
639 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
640 			    void *data, u16 data_len)
641 {
642 	struct mgmt_rp_read_config_info rp;
643 	u32 options = 0;
644 
645 	BT_DBG("sock %p %s", sk, hdev->name);
646 
647 	hci_dev_lock(hdev);
648 
649 	memset(&rp, 0, sizeof(rp));
650 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
651 
652 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
653 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
654 
655 	if (hdev->set_bdaddr)
656 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
657 
658 	rp.supported_options = cpu_to_le32(options);
659 	rp.missing_options = get_missing_options(hdev);
660 
661 	hci_dev_unlock(hdev);
662 
663 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
664 				 &rp, sizeof(rp));
665 }
666 
667 static u32 get_supported_settings(struct hci_dev *hdev)
668 {
669 	u32 settings = 0;
670 
671 	settings |= MGMT_SETTING_POWERED;
672 	settings |= MGMT_SETTING_BONDABLE;
673 	settings |= MGMT_SETTING_DEBUG_KEYS;
674 	settings |= MGMT_SETTING_CONNECTABLE;
675 	settings |= MGMT_SETTING_DISCOVERABLE;
676 
677 	if (lmp_bredr_capable(hdev)) {
678 		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
679 			settings |= MGMT_SETTING_FAST_CONNECTABLE;
680 		settings |= MGMT_SETTING_BREDR;
681 		settings |= MGMT_SETTING_LINK_SECURITY;
682 
683 		if (lmp_ssp_capable(hdev)) {
684 			settings |= MGMT_SETTING_SSP;
685 			settings |= MGMT_SETTING_HS;
686 		}
687 
688 		if (lmp_sc_capable(hdev))
689 			settings |= MGMT_SETTING_SECURE_CONN;
690 	}
691 
692 	if (lmp_le_capable(hdev)) {
693 		settings |= MGMT_SETTING_LE;
694 		settings |= MGMT_SETTING_ADVERTISING;
695 		settings |= MGMT_SETTING_SECURE_CONN;
696 		settings |= MGMT_SETTING_PRIVACY;
697 		settings |= MGMT_SETTING_STATIC_ADDRESS;
698 	}
699 
700 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
701 	    hdev->set_bdaddr)
702 		settings |= MGMT_SETTING_CONFIGURATION;
703 
704 	return settings;
705 }
706 
707 static u32 get_current_settings(struct hci_dev *hdev)
708 {
709 	u32 settings = 0;
710 
711 	if (hdev_is_powered(hdev))
712 		settings |= MGMT_SETTING_POWERED;
713 
714 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
715 		settings |= MGMT_SETTING_CONNECTABLE;
716 
717 	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
718 		settings |= MGMT_SETTING_FAST_CONNECTABLE;
719 
720 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
721 		settings |= MGMT_SETTING_DISCOVERABLE;
722 
723 	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
724 		settings |= MGMT_SETTING_BONDABLE;
725 
726 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
727 		settings |= MGMT_SETTING_BREDR;
728 
729 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
730 		settings |= MGMT_SETTING_LE;
731 
732 	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
733 		settings |= MGMT_SETTING_LINK_SECURITY;
734 
735 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
736 		settings |= MGMT_SETTING_SSP;
737 
738 	if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
739 		settings |= MGMT_SETTING_HS;
740 
741 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
742 		settings |= MGMT_SETTING_ADVERTISING;
743 
744 	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
745 		settings |= MGMT_SETTING_SECURE_CONN;
746 
747 	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
748 		settings |= MGMT_SETTING_DEBUG_KEYS;
749 
750 	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
751 		settings |= MGMT_SETTING_PRIVACY;
752 
753 	/* The current setting for static address has two purposes. The
754 	 * first is to indicate if the static address will be used and
755 	 * the second is to indicate if it is actually set.
756 	 *
757 	 * This means if the static address is not configured, this flag
758 	 * will never bet set. If the address is configured, then if the
759 	 * address is actually used decides if the flag is set or not.
760 	 *
761 	 * For single mode LE only controllers and dual-mode controllers
762 	 * with BR/EDR disabled, the existence of the static address will
763 	 * be evaluated.
764 	 */
765 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
766 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
767 	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
768 		if (bacmp(&hdev->static_addr, BDADDR_ANY))
769 			settings |= MGMT_SETTING_STATIC_ADDRESS;
770 	}
771 
772 	return settings;
773 }
774 
775 #define PNP_INFO_SVCLASS_ID		0x1200
776 
777 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
778 {
779 	u8 *ptr = data, *uuids_start = NULL;
780 	struct bt_uuid *uuid;
781 
782 	if (len < 4)
783 		return ptr;
784 
785 	list_for_each_entry(uuid, &hdev->uuids, list) {
786 		u16 uuid16;
787 
788 		if (uuid->size != 16)
789 			continue;
790 
791 		uuid16 = get_unaligned_le16(&uuid->uuid[12]);
792 		if (uuid16 < 0x1100)
793 			continue;
794 
795 		if (uuid16 == PNP_INFO_SVCLASS_ID)
796 			continue;
797 
798 		if (!uuids_start) {
799 			uuids_start = ptr;
800 			uuids_start[0] = 1;
801 			uuids_start[1] = EIR_UUID16_ALL;
802 			ptr += 2;
803 		}
804 
805 		/* Stop if not enough space to put next UUID */
806 		if ((ptr - data) + sizeof(u16) > len) {
807 			uuids_start[1] = EIR_UUID16_SOME;
808 			break;
809 		}
810 
811 		*ptr++ = (uuid16 & 0x00ff);
812 		*ptr++ = (uuid16 & 0xff00) >> 8;
813 		uuids_start[0] += sizeof(uuid16);
814 	}
815 
816 	return ptr;
817 }
818 
819 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
820 {
821 	u8 *ptr = data, *uuids_start = NULL;
822 	struct bt_uuid *uuid;
823 
824 	if (len < 6)
825 		return ptr;
826 
827 	list_for_each_entry(uuid, &hdev->uuids, list) {
828 		if (uuid->size != 32)
829 			continue;
830 
831 		if (!uuids_start) {
832 			uuids_start = ptr;
833 			uuids_start[0] = 1;
834 			uuids_start[1] = EIR_UUID32_ALL;
835 			ptr += 2;
836 		}
837 
838 		/* Stop if not enough space to put next UUID */
839 		if ((ptr - data) + sizeof(u32) > len) {
840 			uuids_start[1] = EIR_UUID32_SOME;
841 			break;
842 		}
843 
844 		memcpy(ptr, &uuid->uuid[12], sizeof(u32));
845 		ptr += sizeof(u32);
846 		uuids_start[0] += sizeof(u32);
847 	}
848 
849 	return ptr;
850 }
851 
852 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
853 {
854 	u8 *ptr = data, *uuids_start = NULL;
855 	struct bt_uuid *uuid;
856 
857 	if (len < 18)
858 		return ptr;
859 
860 	list_for_each_entry(uuid, &hdev->uuids, list) {
861 		if (uuid->size != 128)
862 			continue;
863 
864 		if (!uuids_start) {
865 			uuids_start = ptr;
866 			uuids_start[0] = 1;
867 			uuids_start[1] = EIR_UUID128_ALL;
868 			ptr += 2;
869 		}
870 
871 		/* Stop if not enough space to put next UUID */
872 		if ((ptr - data) + 16 > len) {
873 			uuids_start[1] = EIR_UUID128_SOME;
874 			break;
875 		}
876 
877 		memcpy(ptr, uuid->uuid, 16);
878 		ptr += 16;
879 		uuids_start[0] += 16;
880 	}
881 
882 	return ptr;
883 }
884 
885 static struct mgmt_pending_cmd *mgmt_pending_find(u16 opcode,
886 						  struct hci_dev *hdev)
887 {
888 	struct mgmt_pending_cmd *cmd;
889 
890 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
891 		if (cmd->opcode == opcode)
892 			return cmd;
893 	}
894 
895 	return NULL;
896 }
897 
898 static struct mgmt_pending_cmd *mgmt_pending_find_data(u16 opcode,
899 						       struct hci_dev *hdev,
900 						       const void *data)
901 {
902 	struct mgmt_pending_cmd *cmd;
903 
904 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
905 		if (cmd->user_data != data)
906 			continue;
907 		if (cmd->opcode == opcode)
908 			return cmd;
909 	}
910 
911 	return NULL;
912 }
913 
914 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
915 {
916 	u8 ad_len = 0;
917 	size_t name_len;
918 
919 	name_len = strlen(hdev->dev_name);
920 	if (name_len > 0) {
921 		size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
922 
923 		if (name_len > max_len) {
924 			name_len = max_len;
925 			ptr[1] = EIR_NAME_SHORT;
926 		} else
927 			ptr[1] = EIR_NAME_COMPLETE;
928 
929 		ptr[0] = name_len + 1;
930 
931 		memcpy(ptr + 2, hdev->dev_name, name_len);
932 
933 		ad_len += (name_len + 2);
934 		ptr += (name_len + 2);
935 	}
936 
937 	return ad_len;
938 }
939 
940 static void update_scan_rsp_data(struct hci_request *req)
941 {
942 	struct hci_dev *hdev = req->hdev;
943 	struct hci_cp_le_set_scan_rsp_data cp;
944 	u8 len;
945 
946 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
947 		return;
948 
949 	memset(&cp, 0, sizeof(cp));
950 
951 	len = create_scan_rsp_data(hdev, cp.data);
952 
953 	if (hdev->scan_rsp_data_len == len &&
954 	    memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
955 		return;
956 
957 	memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
958 	hdev->scan_rsp_data_len = len;
959 
960 	cp.length = len;
961 
962 	hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
963 }
964 
965 static u8 get_adv_discov_flags(struct hci_dev *hdev)
966 {
967 	struct mgmt_pending_cmd *cmd;
968 
969 	/* If there's a pending mgmt command the flags will not yet have
970 	 * their final values, so check for this first.
971 	 */
972 	cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
973 	if (cmd) {
974 		struct mgmt_mode *cp = cmd->param;
975 		if (cp->val == 0x01)
976 			return LE_AD_GENERAL;
977 		else if (cp->val == 0x02)
978 			return LE_AD_LIMITED;
979 	} else {
980 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
981 			return LE_AD_LIMITED;
982 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
983 			return LE_AD_GENERAL;
984 	}
985 
986 	return 0;
987 }
988 
989 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
990 {
991 	u8 ad_len = 0, flags = 0;
992 
993 	flags |= get_adv_discov_flags(hdev);
994 
995 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
996 		flags |= LE_AD_NO_BREDR;
997 
998 	if (flags) {
999 		BT_DBG("adv flags 0x%02x", flags);
1000 
1001 		ptr[0] = 2;
1002 		ptr[1] = EIR_FLAGS;
1003 		ptr[2] = flags;
1004 
1005 		ad_len += 3;
1006 		ptr += 3;
1007 	}
1008 
1009 	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1010 		ptr[0] = 2;
1011 		ptr[1] = EIR_TX_POWER;
1012 		ptr[2] = (u8) hdev->adv_tx_power;
1013 
1014 		ad_len += 3;
1015 		ptr += 3;
1016 	}
1017 
1018 	return ad_len;
1019 }
1020 
1021 static void update_adv_data(struct hci_request *req)
1022 {
1023 	struct hci_dev *hdev = req->hdev;
1024 	struct hci_cp_le_set_adv_data cp;
1025 	u8 len;
1026 
1027 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1028 		return;
1029 
1030 	memset(&cp, 0, sizeof(cp));
1031 
1032 	len = create_adv_data(hdev, cp.data);
1033 
1034 	if (hdev->adv_data_len == len &&
1035 	    memcmp(cp.data, hdev->adv_data, len) == 0)
1036 		return;
1037 
1038 	memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1039 	hdev->adv_data_len = len;
1040 
1041 	cp.length = len;
1042 
1043 	hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1044 }
1045 
1046 int mgmt_update_adv_data(struct hci_dev *hdev)
1047 {
1048 	struct hci_request req;
1049 
1050 	hci_req_init(&req, hdev);
1051 	update_adv_data(&req);
1052 
1053 	return hci_req_run(&req, NULL);
1054 }
1055 
1056 static void create_eir(struct hci_dev *hdev, u8 *data)
1057 {
1058 	u8 *ptr = data;
1059 	size_t name_len;
1060 
1061 	name_len = strlen(hdev->dev_name);
1062 
1063 	if (name_len > 0) {
1064 		/* EIR Data type */
1065 		if (name_len > 48) {
1066 			name_len = 48;
1067 			ptr[1] = EIR_NAME_SHORT;
1068 		} else
1069 			ptr[1] = EIR_NAME_COMPLETE;
1070 
1071 		/* EIR Data length */
1072 		ptr[0] = name_len + 1;
1073 
1074 		memcpy(ptr + 2, hdev->dev_name, name_len);
1075 
1076 		ptr += (name_len + 2);
1077 	}
1078 
1079 	if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
1080 		ptr[0] = 2;
1081 		ptr[1] = EIR_TX_POWER;
1082 		ptr[2] = (u8) hdev->inq_tx_power;
1083 
1084 		ptr += 3;
1085 	}
1086 
1087 	if (hdev->devid_source > 0) {
1088 		ptr[0] = 9;
1089 		ptr[1] = EIR_DEVICE_ID;
1090 
1091 		put_unaligned_le16(hdev->devid_source, ptr + 2);
1092 		put_unaligned_le16(hdev->devid_vendor, ptr + 4);
1093 		put_unaligned_le16(hdev->devid_product, ptr + 6);
1094 		put_unaligned_le16(hdev->devid_version, ptr + 8);
1095 
1096 		ptr += 10;
1097 	}
1098 
1099 	ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1100 	ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1101 	ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1102 }
1103 
1104 static void update_eir(struct hci_request *req)
1105 {
1106 	struct hci_dev *hdev = req->hdev;
1107 	struct hci_cp_write_eir cp;
1108 
1109 	if (!hdev_is_powered(hdev))
1110 		return;
1111 
1112 	if (!lmp_ext_inq_capable(hdev))
1113 		return;
1114 
1115 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1116 		return;
1117 
1118 	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1119 		return;
1120 
1121 	memset(&cp, 0, sizeof(cp));
1122 
1123 	create_eir(hdev, cp.data);
1124 
1125 	if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
1126 		return;
1127 
1128 	memcpy(hdev->eir, cp.data, sizeof(cp.data));
1129 
1130 	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1131 }
1132 
1133 static u8 get_service_classes(struct hci_dev *hdev)
1134 {
1135 	struct bt_uuid *uuid;
1136 	u8 val = 0;
1137 
1138 	list_for_each_entry(uuid, &hdev->uuids, list)
1139 		val |= uuid->svc_hint;
1140 
1141 	return val;
1142 }
1143 
1144 static void update_class(struct hci_request *req)
1145 {
1146 	struct hci_dev *hdev = req->hdev;
1147 	u8 cod[3];
1148 
1149 	BT_DBG("%s", hdev->name);
1150 
1151 	if (!hdev_is_powered(hdev))
1152 		return;
1153 
1154 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1155 		return;
1156 
1157 	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1158 		return;
1159 
1160 	cod[0] = hdev->minor_class;
1161 	cod[1] = hdev->major_class;
1162 	cod[2] = get_service_classes(hdev);
1163 
1164 	if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1165 		cod[1] |= 0x20;
1166 
1167 	if (memcmp(cod, hdev->dev_class, 3) == 0)
1168 		return;
1169 
1170 	hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1171 }
1172 
1173 static bool get_connectable(struct hci_dev *hdev)
1174 {
1175 	struct mgmt_pending_cmd *cmd;
1176 
1177 	/* If there's a pending mgmt command the flag will not yet have
1178 	 * it's final value, so check for this first.
1179 	 */
1180 	cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1181 	if (cmd) {
1182 		struct mgmt_mode *cp = cmd->param;
1183 		return cp->val;
1184 	}
1185 
1186 	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
1187 }
1188 
1189 static void disable_advertising(struct hci_request *req)
1190 {
1191 	u8 enable = 0x00;
1192 
1193 	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1194 }
1195 
1196 static void enable_advertising(struct hci_request *req)
1197 {
1198 	struct hci_dev *hdev = req->hdev;
1199 	struct hci_cp_le_set_adv_param cp;
1200 	u8 own_addr_type, enable = 0x01;
1201 	bool connectable;
1202 
1203 	if (hci_conn_num(hdev, LE_LINK) > 0)
1204 		return;
1205 
1206 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1207 		disable_advertising(req);
1208 
1209 	/* Clear the HCI_LE_ADV bit temporarily so that the
1210 	 * hci_update_random_address knows that it's safe to go ahead
1211 	 * and write a new random address. The flag will be set back on
1212 	 * as soon as the SET_ADV_ENABLE HCI command completes.
1213 	 */
1214 	hci_dev_clear_flag(hdev, HCI_LE_ADV);
1215 
1216 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1217 		connectable = true;
1218 	else
1219 		connectable = get_connectable(hdev);
1220 
1221 	/* Set require_privacy to true only when non-connectable
1222 	 * advertising is used. In that case it is fine to use a
1223 	 * non-resolvable private address.
1224 	 */
1225 	if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1226 		return;
1227 
1228 	memset(&cp, 0, sizeof(cp));
1229 	cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1230 	cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1231 	cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1232 	cp.own_address_type = own_addr_type;
1233 	cp.channel_map = hdev->le_adv_channel_map;
1234 
1235 	hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1236 
1237 	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1238 }
1239 
1240 static void service_cache_off(struct work_struct *work)
1241 {
1242 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1243 					    service_cache.work);
1244 	struct hci_request req;
1245 
1246 	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1247 		return;
1248 
1249 	hci_req_init(&req, hdev);
1250 
1251 	hci_dev_lock(hdev);
1252 
1253 	update_eir(&req);
1254 	update_class(&req);
1255 
1256 	hci_dev_unlock(hdev);
1257 
1258 	hci_req_run(&req, NULL);
1259 }
1260 
1261 static void rpa_expired(struct work_struct *work)
1262 {
1263 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1264 					    rpa_expired.work);
1265 	struct hci_request req;
1266 
1267 	BT_DBG("");
1268 
1269 	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1270 
1271 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1272 		return;
1273 
1274 	/* The generation of a new RPA and programming it into the
1275 	 * controller happens in the enable_advertising() function.
1276 	 */
1277 	hci_req_init(&req, hdev);
1278 	enable_advertising(&req);
1279 	hci_req_run(&req, NULL);
1280 }
1281 
1282 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1283 {
1284 	if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1285 		return;
1286 
1287 	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1288 	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1289 
1290 	/* Non-mgmt controlled devices get this bit set
1291 	 * implicitly so that pairing works for them, however
1292 	 * for mgmt we require user-space to explicitly enable
1293 	 * it
1294 	 */
1295 	hci_dev_clear_flag(hdev, HCI_BONDABLE);
1296 }
1297 
1298 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1299 				void *data, u16 data_len)
1300 {
1301 	struct mgmt_rp_read_info rp;
1302 
1303 	BT_DBG("sock %p %s", sk, hdev->name);
1304 
1305 	hci_dev_lock(hdev);
1306 
1307 	memset(&rp, 0, sizeof(rp));
1308 
1309 	bacpy(&rp.bdaddr, &hdev->bdaddr);
1310 
1311 	rp.version = hdev->hci_ver;
1312 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1313 
1314 	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1315 	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1316 
1317 	memcpy(rp.dev_class, hdev->dev_class, 3);
1318 
1319 	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1320 	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1321 
1322 	hci_dev_unlock(hdev);
1323 
1324 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1325 				 sizeof(rp));
1326 }
1327 
1328 static void mgmt_pending_free(struct mgmt_pending_cmd *cmd)
1329 {
1330 	sock_put(cmd->sk);
1331 	kfree(cmd->param);
1332 	kfree(cmd);
1333 }
1334 
1335 static struct mgmt_pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
1336 						 struct hci_dev *hdev,
1337 						 void *data, u16 len)
1338 {
1339 	struct mgmt_pending_cmd *cmd;
1340 
1341 	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1342 	if (!cmd)
1343 		return NULL;
1344 
1345 	cmd->opcode = opcode;
1346 	cmd->index = hdev->id;
1347 
1348 	cmd->param = kmemdup(data, len, GFP_KERNEL);
1349 	if (!cmd->param) {
1350 		kfree(cmd);
1351 		return NULL;
1352 	}
1353 
1354 	cmd->param_len = len;
1355 
1356 	cmd->sk = sk;
1357 	sock_hold(sk);
1358 
1359 	list_add(&cmd->list, &hdev->mgmt_pending);
1360 
1361 	return cmd;
1362 }
1363 
1364 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1365 				 void (*cb)(struct mgmt_pending_cmd *cmd,
1366 					    void *data),
1367 				 void *data)
1368 {
1369 	struct mgmt_pending_cmd *cmd, *tmp;
1370 
1371 	list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1372 		if (opcode > 0 && cmd->opcode != opcode)
1373 			continue;
1374 
1375 		cb(cmd, data);
1376 	}
1377 }
1378 
1379 static void mgmt_pending_remove(struct mgmt_pending_cmd *cmd)
1380 {
1381 	list_del(&cmd->list);
1382 	mgmt_pending_free(cmd);
1383 }
1384 
1385 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1386 {
1387 	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1388 
1389 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1390 				 sizeof(settings));
1391 }
1392 
1393 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1394 {
1395 	BT_DBG("%s status 0x%02x", hdev->name, status);
1396 
1397 	if (hci_conn_count(hdev) == 0) {
1398 		cancel_delayed_work(&hdev->power_off);
1399 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
1400 	}
1401 }
1402 
1403 static bool hci_stop_discovery(struct hci_request *req)
1404 {
1405 	struct hci_dev *hdev = req->hdev;
1406 	struct hci_cp_remote_name_req_cancel cp;
1407 	struct inquiry_entry *e;
1408 
1409 	switch (hdev->discovery.state) {
1410 	case DISCOVERY_FINDING:
1411 		if (test_bit(HCI_INQUIRY, &hdev->flags)) {
1412 			hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1413 		} else {
1414 			cancel_delayed_work(&hdev->le_scan_disable);
1415 			hci_req_add_le_scan_disable(req);
1416 		}
1417 
1418 		return true;
1419 
1420 	case DISCOVERY_RESOLVING:
1421 		e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1422 						     NAME_PENDING);
1423 		if (!e)
1424 			break;
1425 
1426 		bacpy(&cp.bdaddr, &e->data.bdaddr);
1427 		hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1428 			    &cp);
1429 
1430 		return true;
1431 
1432 	default:
1433 		/* Passive scanning */
1434 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1435 			hci_req_add_le_scan_disable(req);
1436 			return true;
1437 		}
1438 
1439 		break;
1440 	}
1441 
1442 	return false;
1443 }
1444 
1445 static int clean_up_hci_state(struct hci_dev *hdev)
1446 {
1447 	struct hci_request req;
1448 	struct hci_conn *conn;
1449 	bool discov_stopped;
1450 	int err;
1451 
1452 	hci_req_init(&req, hdev);
1453 
1454 	if (test_bit(HCI_ISCAN, &hdev->flags) ||
1455 	    test_bit(HCI_PSCAN, &hdev->flags)) {
1456 		u8 scan = 0x00;
1457 		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1458 	}
1459 
1460 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1461 		disable_advertising(&req);
1462 
1463 	discov_stopped = hci_stop_discovery(&req);
1464 
1465 	list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1466 		struct hci_cp_disconnect dc;
1467 		struct hci_cp_reject_conn_req rej;
1468 
1469 		switch (conn->state) {
1470 		case BT_CONNECTED:
1471 		case BT_CONFIG:
1472 			dc.handle = cpu_to_le16(conn->handle);
1473 			dc.reason = 0x15; /* Terminated due to Power Off */
1474 			hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1475 			break;
1476 		case BT_CONNECT:
1477 			if (conn->type == LE_LINK)
1478 				hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1479 					    0, NULL);
1480 			else if (conn->type == ACL_LINK)
1481 				hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1482 					    6, &conn->dst);
1483 			break;
1484 		case BT_CONNECT2:
1485 			bacpy(&rej.bdaddr, &conn->dst);
1486 			rej.reason = 0x15; /* Terminated due to Power Off */
1487 			if (conn->type == ACL_LINK)
1488 				hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1489 					    sizeof(rej), &rej);
1490 			else if (conn->type == SCO_LINK)
1491 				hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1492 					    sizeof(rej), &rej);
1493 			break;
1494 		}
1495 	}
1496 
1497 	err = hci_req_run(&req, clean_up_hci_complete);
1498 	if (!err && discov_stopped)
1499 		hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1500 
1501 	return err;
1502 }
1503 
1504 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1505 		       u16 len)
1506 {
1507 	struct mgmt_mode *cp = data;
1508 	struct mgmt_pending_cmd *cmd;
1509 	int err;
1510 
1511 	BT_DBG("request for %s", hdev->name);
1512 
1513 	if (cp->val != 0x00 && cp->val != 0x01)
1514 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1515 				       MGMT_STATUS_INVALID_PARAMS);
1516 
1517 	hci_dev_lock(hdev);
1518 
1519 	if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1520 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1521 				      MGMT_STATUS_BUSY);
1522 		goto failed;
1523 	}
1524 
1525 	if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
1526 		cancel_delayed_work(&hdev->power_off);
1527 
1528 		if (cp->val) {
1529 			mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1530 					 data, len);
1531 			err = mgmt_powered(hdev, 1);
1532 			goto failed;
1533 		}
1534 	}
1535 
1536 	if (!!cp->val == hdev_is_powered(hdev)) {
1537 		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1538 		goto failed;
1539 	}
1540 
1541 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1542 	if (!cmd) {
1543 		err = -ENOMEM;
1544 		goto failed;
1545 	}
1546 
1547 	if (cp->val) {
1548 		queue_work(hdev->req_workqueue, &hdev->power_on);
1549 		err = 0;
1550 	} else {
1551 		/* Disconnect connections, stop scans, etc */
1552 		err = clean_up_hci_state(hdev);
1553 		if (!err)
1554 			queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1555 					   HCI_POWER_OFF_TIMEOUT);
1556 
1557 		/* ENODATA means there were no HCI commands queued */
1558 		if (err == -ENODATA) {
1559 			cancel_delayed_work(&hdev->power_off);
1560 			queue_work(hdev->req_workqueue, &hdev->power_off.work);
1561 			err = 0;
1562 		}
1563 	}
1564 
1565 failed:
1566 	hci_dev_unlock(hdev);
1567 	return err;
1568 }
1569 
1570 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1571 {
1572 	__le32 ev = cpu_to_le32(get_current_settings(hdev));
1573 
1574 	return mgmt_generic_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1575 				  sizeof(ev), skip);
1576 }
1577 
1578 int mgmt_new_settings(struct hci_dev *hdev)
1579 {
1580 	return new_settings(hdev, NULL);
1581 }
1582 
1583 struct cmd_lookup {
1584 	struct sock *sk;
1585 	struct hci_dev *hdev;
1586 	u8 mgmt_status;
1587 };
1588 
1589 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1590 {
1591 	struct cmd_lookup *match = data;
1592 
1593 	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1594 
1595 	list_del(&cmd->list);
1596 
1597 	if (match->sk == NULL) {
1598 		match->sk = cmd->sk;
1599 		sock_hold(match->sk);
1600 	}
1601 
1602 	mgmt_pending_free(cmd);
1603 }
1604 
1605 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1606 {
1607 	u8 *status = data;
1608 
1609 	mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1610 	mgmt_pending_remove(cmd);
1611 }
1612 
1613 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1614 {
1615 	if (cmd->cmd_complete) {
1616 		u8 *status = data;
1617 
1618 		cmd->cmd_complete(cmd, *status);
1619 		mgmt_pending_remove(cmd);
1620 
1621 		return;
1622 	}
1623 
1624 	cmd_status_rsp(cmd, data);
1625 }
1626 
1627 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1628 {
1629 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1630 				 cmd->param, cmd->param_len);
1631 }
1632 
1633 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1634 {
1635 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1636 				 cmd->param, sizeof(struct mgmt_addr_info));
1637 }
1638 
1639 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1640 {
1641 	if (!lmp_bredr_capable(hdev))
1642 		return MGMT_STATUS_NOT_SUPPORTED;
1643 	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1644 		return MGMT_STATUS_REJECTED;
1645 	else
1646 		return MGMT_STATUS_SUCCESS;
1647 }
1648 
1649 static u8 mgmt_le_support(struct hci_dev *hdev)
1650 {
1651 	if (!lmp_le_capable(hdev))
1652 		return MGMT_STATUS_NOT_SUPPORTED;
1653 	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1654 		return MGMT_STATUS_REJECTED;
1655 	else
1656 		return MGMT_STATUS_SUCCESS;
1657 }
1658 
1659 static void set_discoverable_complete(struct hci_dev *hdev, u8 status,
1660 				      u16 opcode)
1661 {
1662 	struct mgmt_pending_cmd *cmd;
1663 	struct mgmt_mode *cp;
1664 	struct hci_request req;
1665 	bool changed;
1666 
1667 	BT_DBG("status 0x%02x", status);
1668 
1669 	hci_dev_lock(hdev);
1670 
1671 	cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1672 	if (!cmd)
1673 		goto unlock;
1674 
1675 	if (status) {
1676 		u8 mgmt_err = mgmt_status(status);
1677 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1678 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1679 		goto remove_cmd;
1680 	}
1681 
1682 	cp = cmd->param;
1683 	if (cp->val) {
1684 		changed = !hci_dev_test_and_set_flag(hdev, HCI_DISCOVERABLE);
1685 
1686 		if (hdev->discov_timeout > 0) {
1687 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1688 			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1689 					   to);
1690 		}
1691 	} else {
1692 		changed = hci_dev_test_and_clear_flag(hdev, HCI_DISCOVERABLE);
1693 	}
1694 
1695 	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1696 
1697 	if (changed)
1698 		new_settings(hdev, cmd->sk);
1699 
1700 	/* When the discoverable mode gets changed, make sure
1701 	 * that class of device has the limited discoverable
1702 	 * bit correctly set. Also update page scan based on whitelist
1703 	 * entries.
1704 	 */
1705 	hci_req_init(&req, hdev);
1706 	__hci_update_page_scan(&req);
1707 	update_class(&req);
1708 	hci_req_run(&req, NULL);
1709 
1710 remove_cmd:
1711 	mgmt_pending_remove(cmd);
1712 
1713 unlock:
1714 	hci_dev_unlock(hdev);
1715 }
1716 
1717 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1718 			    u16 len)
1719 {
1720 	struct mgmt_cp_set_discoverable *cp = data;
1721 	struct mgmt_pending_cmd *cmd;
1722 	struct hci_request req;
1723 	u16 timeout;
1724 	u8 scan;
1725 	int err;
1726 
1727 	BT_DBG("request for %s", hdev->name);
1728 
1729 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1730 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1731 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1732 				       MGMT_STATUS_REJECTED);
1733 
1734 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1735 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1736 				       MGMT_STATUS_INVALID_PARAMS);
1737 
1738 	timeout = __le16_to_cpu(cp->timeout);
1739 
1740 	/* Disabling discoverable requires that no timeout is set,
1741 	 * and enabling limited discoverable requires a timeout.
1742 	 */
1743 	if ((cp->val == 0x00 && timeout > 0) ||
1744 	    (cp->val == 0x02 && timeout == 0))
1745 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1746 				       MGMT_STATUS_INVALID_PARAMS);
1747 
1748 	hci_dev_lock(hdev);
1749 
1750 	if (!hdev_is_powered(hdev) && timeout > 0) {
1751 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1752 				      MGMT_STATUS_NOT_POWERED);
1753 		goto failed;
1754 	}
1755 
1756 	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1757 	    mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1758 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1759 				      MGMT_STATUS_BUSY);
1760 		goto failed;
1761 	}
1762 
1763 	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1764 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1765 				      MGMT_STATUS_REJECTED);
1766 		goto failed;
1767 	}
1768 
1769 	if (!hdev_is_powered(hdev)) {
1770 		bool changed = false;
1771 
1772 		/* Setting limited discoverable when powered off is
1773 		 * not a valid operation since it requires a timeout
1774 		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1775 		 */
1776 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1777 			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1778 			changed = true;
1779 		}
1780 
1781 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1782 		if (err < 0)
1783 			goto failed;
1784 
1785 		if (changed)
1786 			err = new_settings(hdev, sk);
1787 
1788 		goto failed;
1789 	}
1790 
1791 	/* If the current mode is the same, then just update the timeout
1792 	 * value with the new value. And if only the timeout gets updated,
1793 	 * then no need for any HCI transactions.
1794 	 */
1795 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1796 	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
1797 						   HCI_LIMITED_DISCOVERABLE)) {
1798 		cancel_delayed_work(&hdev->discov_off);
1799 		hdev->discov_timeout = timeout;
1800 
1801 		if (cp->val && hdev->discov_timeout > 0) {
1802 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1803 			queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1804 					   to);
1805 		}
1806 
1807 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1808 		goto failed;
1809 	}
1810 
1811 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1812 	if (!cmd) {
1813 		err = -ENOMEM;
1814 		goto failed;
1815 	}
1816 
1817 	/* Cancel any potential discoverable timeout that might be
1818 	 * still active and store new timeout value. The arming of
1819 	 * the timeout happens in the complete handler.
1820 	 */
1821 	cancel_delayed_work(&hdev->discov_off);
1822 	hdev->discov_timeout = timeout;
1823 
1824 	/* Limited discoverable mode */
1825 	if (cp->val == 0x02)
1826 		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1827 	else
1828 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1829 
1830 	hci_req_init(&req, hdev);
1831 
1832 	/* The procedure for LE-only controllers is much simpler - just
1833 	 * update the advertising data.
1834 	 */
1835 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1836 		goto update_ad;
1837 
1838 	scan = SCAN_PAGE;
1839 
1840 	if (cp->val) {
1841 		struct hci_cp_write_current_iac_lap hci_cp;
1842 
1843 		if (cp->val == 0x02) {
1844 			/* Limited discoverable mode */
1845 			hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1846 			hci_cp.iac_lap[0] = 0x00;	/* LIAC */
1847 			hci_cp.iac_lap[1] = 0x8b;
1848 			hci_cp.iac_lap[2] = 0x9e;
1849 			hci_cp.iac_lap[3] = 0x33;	/* GIAC */
1850 			hci_cp.iac_lap[4] = 0x8b;
1851 			hci_cp.iac_lap[5] = 0x9e;
1852 		} else {
1853 			/* General discoverable mode */
1854 			hci_cp.num_iac = 1;
1855 			hci_cp.iac_lap[0] = 0x33;	/* GIAC */
1856 			hci_cp.iac_lap[1] = 0x8b;
1857 			hci_cp.iac_lap[2] = 0x9e;
1858 		}
1859 
1860 		hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1861 			    (hci_cp.num_iac * 3) + 1, &hci_cp);
1862 
1863 		scan |= SCAN_INQUIRY;
1864 	} else {
1865 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1866 	}
1867 
1868 	hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1869 
1870 update_ad:
1871 	update_adv_data(&req);
1872 
1873 	err = hci_req_run(&req, set_discoverable_complete);
1874 	if (err < 0)
1875 		mgmt_pending_remove(cmd);
1876 
1877 failed:
1878 	hci_dev_unlock(hdev);
1879 	return err;
1880 }
1881 
1882 static void write_fast_connectable(struct hci_request *req, bool enable)
1883 {
1884 	struct hci_dev *hdev = req->hdev;
1885 	struct hci_cp_write_page_scan_activity acp;
1886 	u8 type;
1887 
1888 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1889 		return;
1890 
1891 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1892 		return;
1893 
1894 	if (enable) {
1895 		type = PAGE_SCAN_TYPE_INTERLACED;
1896 
1897 		/* 160 msec page scan interval */
1898 		acp.interval = cpu_to_le16(0x0100);
1899 	} else {
1900 		type = PAGE_SCAN_TYPE_STANDARD;	/* default */
1901 
1902 		/* default 1.28 sec page scan */
1903 		acp.interval = cpu_to_le16(0x0800);
1904 	}
1905 
1906 	acp.window = cpu_to_le16(0x0012);
1907 
1908 	if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1909 	    __cpu_to_le16(hdev->page_scan_window) != acp.window)
1910 		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1911 			    sizeof(acp), &acp);
1912 
1913 	if (hdev->page_scan_type != type)
1914 		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1915 }
1916 
1917 static void set_connectable_complete(struct hci_dev *hdev, u8 status,
1918 				     u16 opcode)
1919 {
1920 	struct mgmt_pending_cmd *cmd;
1921 	struct mgmt_mode *cp;
1922 	bool conn_changed, discov_changed;
1923 
1924 	BT_DBG("status 0x%02x", status);
1925 
1926 	hci_dev_lock(hdev);
1927 
1928 	cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1929 	if (!cmd)
1930 		goto unlock;
1931 
1932 	if (status) {
1933 		u8 mgmt_err = mgmt_status(status);
1934 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1935 		goto remove_cmd;
1936 	}
1937 
1938 	cp = cmd->param;
1939 	if (cp->val) {
1940 		conn_changed = !hci_dev_test_and_set_flag(hdev,
1941 							  HCI_CONNECTABLE);
1942 		discov_changed = false;
1943 	} else {
1944 		conn_changed = hci_dev_test_and_clear_flag(hdev,
1945 							   HCI_CONNECTABLE);
1946 		discov_changed = hci_dev_test_and_clear_flag(hdev,
1947 							     HCI_DISCOVERABLE);
1948 	}
1949 
1950 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1951 
1952 	if (conn_changed || discov_changed) {
1953 		new_settings(hdev, cmd->sk);
1954 		hci_update_page_scan(hdev);
1955 		if (discov_changed)
1956 			mgmt_update_adv_data(hdev);
1957 		hci_update_background_scan(hdev);
1958 	}
1959 
1960 remove_cmd:
1961 	mgmt_pending_remove(cmd);
1962 
1963 unlock:
1964 	hci_dev_unlock(hdev);
1965 }
1966 
1967 static int set_connectable_update_settings(struct hci_dev *hdev,
1968 					   struct sock *sk, u8 val)
1969 {
1970 	bool changed = false;
1971 	int err;
1972 
1973 	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1974 		changed = true;
1975 
1976 	if (val) {
1977 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1978 	} else {
1979 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1980 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1981 	}
1982 
1983 	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1984 	if (err < 0)
1985 		return err;
1986 
1987 	if (changed) {
1988 		hci_update_page_scan(hdev);
1989 		hci_update_background_scan(hdev);
1990 		return new_settings(hdev, sk);
1991 	}
1992 
1993 	return 0;
1994 }
1995 
1996 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1997 			   u16 len)
1998 {
1999 	struct mgmt_mode *cp = data;
2000 	struct mgmt_pending_cmd *cmd;
2001 	struct hci_request req;
2002 	u8 scan;
2003 	int err;
2004 
2005 	BT_DBG("request for %s", hdev->name);
2006 
2007 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2008 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2009 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
2010 				       MGMT_STATUS_REJECTED);
2011 
2012 	if (cp->val != 0x00 && cp->val != 0x01)
2013 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
2014 				       MGMT_STATUS_INVALID_PARAMS);
2015 
2016 	hci_dev_lock(hdev);
2017 
2018 	if (!hdev_is_powered(hdev)) {
2019 		err = set_connectable_update_settings(hdev, sk, cp->val);
2020 		goto failed;
2021 	}
2022 
2023 	if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
2024 	    mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
2025 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
2026 				      MGMT_STATUS_BUSY);
2027 		goto failed;
2028 	}
2029 
2030 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
2031 	if (!cmd) {
2032 		err = -ENOMEM;
2033 		goto failed;
2034 	}
2035 
2036 	hci_req_init(&req, hdev);
2037 
2038 	/* If BR/EDR is not enabled and we disable advertising as a
2039 	 * by-product of disabling connectable, we need to update the
2040 	 * advertising flags.
2041 	 */
2042 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2043 		if (!cp->val) {
2044 			hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2045 			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2046 		}
2047 		update_adv_data(&req);
2048 	} else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
2049 		if (cp->val) {
2050 			scan = SCAN_PAGE;
2051 		} else {
2052 			/* If we don't have any whitelist entries just
2053 			 * disable all scanning. If there are entries
2054 			 * and we had both page and inquiry scanning
2055 			 * enabled then fall back to only page scanning.
2056 			 * Otherwise no changes are needed.
2057 			 */
2058 			if (list_empty(&hdev->whitelist))
2059 				scan = SCAN_DISABLED;
2060 			else if (test_bit(HCI_ISCAN, &hdev->flags))
2061 				scan = SCAN_PAGE;
2062 			else
2063 				goto no_scan_update;
2064 
2065 			if (test_bit(HCI_ISCAN, &hdev->flags) &&
2066 			    hdev->discov_timeout > 0)
2067 				cancel_delayed_work(&hdev->discov_off);
2068 		}
2069 
2070 		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2071 	}
2072 
2073 no_scan_update:
2074 	/* Update the advertising parameters if necessary */
2075 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2076 		enable_advertising(&req);
2077 
2078 	err = hci_req_run(&req, set_connectable_complete);
2079 	if (err < 0) {
2080 		mgmt_pending_remove(cmd);
2081 		if (err == -ENODATA)
2082 			err = set_connectable_update_settings(hdev, sk,
2083 							      cp->val);
2084 		goto failed;
2085 	}
2086 
2087 failed:
2088 	hci_dev_unlock(hdev);
2089 	return err;
2090 }
2091 
2092 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
2093 			u16 len)
2094 {
2095 	struct mgmt_mode *cp = data;
2096 	bool changed;
2097 	int err;
2098 
2099 	BT_DBG("request for %s", hdev->name);
2100 
2101 	if (cp->val != 0x00 && cp->val != 0x01)
2102 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
2103 				       MGMT_STATUS_INVALID_PARAMS);
2104 
2105 	hci_dev_lock(hdev);
2106 
2107 	if (cp->val)
2108 		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
2109 	else
2110 		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
2111 
2112 	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
2113 	if (err < 0)
2114 		goto unlock;
2115 
2116 	if (changed)
2117 		err = new_settings(hdev, sk);
2118 
2119 unlock:
2120 	hci_dev_unlock(hdev);
2121 	return err;
2122 }
2123 
2124 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
2125 			     u16 len)
2126 {
2127 	struct mgmt_mode *cp = data;
2128 	struct mgmt_pending_cmd *cmd;
2129 	u8 val, status;
2130 	int err;
2131 
2132 	BT_DBG("request for %s", hdev->name);
2133 
2134 	status = mgmt_bredr_support(hdev);
2135 	if (status)
2136 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2137 				       status);
2138 
2139 	if (cp->val != 0x00 && cp->val != 0x01)
2140 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2141 				       MGMT_STATUS_INVALID_PARAMS);
2142 
2143 	hci_dev_lock(hdev);
2144 
2145 	if (!hdev_is_powered(hdev)) {
2146 		bool changed = false;
2147 
2148 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
2149 			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
2150 			changed = true;
2151 		}
2152 
2153 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2154 		if (err < 0)
2155 			goto failed;
2156 
2157 		if (changed)
2158 			err = new_settings(hdev, sk);
2159 
2160 		goto failed;
2161 	}
2162 
2163 	if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
2164 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2165 				      MGMT_STATUS_BUSY);
2166 		goto failed;
2167 	}
2168 
2169 	val = !!cp->val;
2170 
2171 	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
2172 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2173 		goto failed;
2174 	}
2175 
2176 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
2177 	if (!cmd) {
2178 		err = -ENOMEM;
2179 		goto failed;
2180 	}
2181 
2182 	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
2183 	if (err < 0) {
2184 		mgmt_pending_remove(cmd);
2185 		goto failed;
2186 	}
2187 
2188 failed:
2189 	hci_dev_unlock(hdev);
2190 	return err;
2191 }
2192 
2193 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2194 {
2195 	struct mgmt_mode *cp = data;
2196 	struct mgmt_pending_cmd *cmd;
2197 	u8 status;
2198 	int err;
2199 
2200 	BT_DBG("request for %s", hdev->name);
2201 
2202 	status = mgmt_bredr_support(hdev);
2203 	if (status)
2204 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2205 
2206 	if (!lmp_ssp_capable(hdev))
2207 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2208 				       MGMT_STATUS_NOT_SUPPORTED);
2209 
2210 	if (cp->val != 0x00 && cp->val != 0x01)
2211 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2212 				       MGMT_STATUS_INVALID_PARAMS);
2213 
2214 	hci_dev_lock(hdev);
2215 
2216 	if (!hdev_is_powered(hdev)) {
2217 		bool changed;
2218 
2219 		if (cp->val) {
2220 			changed = !hci_dev_test_and_set_flag(hdev,
2221 							     HCI_SSP_ENABLED);
2222 		} else {
2223 			changed = hci_dev_test_and_clear_flag(hdev,
2224 							      HCI_SSP_ENABLED);
2225 			if (!changed)
2226 				changed = hci_dev_test_and_clear_flag(hdev,
2227 								      HCI_HS_ENABLED);
2228 			else
2229 				hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
2230 		}
2231 
2232 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2233 		if (err < 0)
2234 			goto failed;
2235 
2236 		if (changed)
2237 			err = new_settings(hdev, sk);
2238 
2239 		goto failed;
2240 	}
2241 
2242 	if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
2243 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2244 				      MGMT_STATUS_BUSY);
2245 		goto failed;
2246 	}
2247 
2248 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2249 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2250 		goto failed;
2251 	}
2252 
2253 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2254 	if (!cmd) {
2255 		err = -ENOMEM;
2256 		goto failed;
2257 	}
2258 
2259 	if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
2260 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2261 			     sizeof(cp->val), &cp->val);
2262 
2263 	err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2264 	if (err < 0) {
2265 		mgmt_pending_remove(cmd);
2266 		goto failed;
2267 	}
2268 
2269 failed:
2270 	hci_dev_unlock(hdev);
2271 	return err;
2272 }
2273 
2274 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2275 {
2276 	struct mgmt_mode *cp = data;
2277 	bool changed;
2278 	u8 status;
2279 	int err;
2280 
2281 	BT_DBG("request for %s", hdev->name);
2282 
2283 	status = mgmt_bredr_support(hdev);
2284 	if (status)
2285 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2286 
2287 	if (!lmp_ssp_capable(hdev))
2288 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2289 				       MGMT_STATUS_NOT_SUPPORTED);
2290 
2291 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
2292 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2293 				       MGMT_STATUS_REJECTED);
2294 
2295 	if (cp->val != 0x00 && cp->val != 0x01)
2296 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2297 				       MGMT_STATUS_INVALID_PARAMS);
2298 
2299 	hci_dev_lock(hdev);
2300 
2301 	if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
2302 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2303 				      MGMT_STATUS_BUSY);
2304 		goto unlock;
2305 	}
2306 
2307 	if (cp->val) {
2308 		changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
2309 	} else {
2310 		if (hdev_is_powered(hdev)) {
2311 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2312 					      MGMT_STATUS_REJECTED);
2313 			goto unlock;
2314 		}
2315 
2316 		changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
2317 	}
2318 
2319 	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2320 	if (err < 0)
2321 		goto unlock;
2322 
2323 	if (changed)
2324 		err = new_settings(hdev, sk);
2325 
2326 unlock:
2327 	hci_dev_unlock(hdev);
2328 	return err;
2329 }
2330 
2331 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2332 {
2333 	struct cmd_lookup match = { NULL, hdev };
2334 
2335 	hci_dev_lock(hdev);
2336 
2337 	if (status) {
2338 		u8 mgmt_err = mgmt_status(status);
2339 
2340 		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2341 				     &mgmt_err);
2342 		goto unlock;
2343 	}
2344 
2345 	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2346 
2347 	new_settings(hdev, match.sk);
2348 
2349 	if (match.sk)
2350 		sock_put(match.sk);
2351 
2352 	/* Make sure the controller has a good default for
2353 	 * advertising data. Restrict the update to when LE
2354 	 * has actually been enabled. During power on, the
2355 	 * update in powered_update_hci will take care of it.
2356 	 */
2357 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2358 		struct hci_request req;
2359 
2360 		hci_req_init(&req, hdev);
2361 		update_adv_data(&req);
2362 		update_scan_rsp_data(&req);
2363 		__hci_update_background_scan(&req);
2364 		hci_req_run(&req, NULL);
2365 	}
2366 
2367 unlock:
2368 	hci_dev_unlock(hdev);
2369 }
2370 
2371 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2372 {
2373 	struct mgmt_mode *cp = data;
2374 	struct hci_cp_write_le_host_supported hci_cp;
2375 	struct mgmt_pending_cmd *cmd;
2376 	struct hci_request req;
2377 	int err;
2378 	u8 val, enabled;
2379 
2380 	BT_DBG("request for %s", hdev->name);
2381 
2382 	if (!lmp_le_capable(hdev))
2383 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2384 				       MGMT_STATUS_NOT_SUPPORTED);
2385 
2386 	if (cp->val != 0x00 && cp->val != 0x01)
2387 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2388 				       MGMT_STATUS_INVALID_PARAMS);
2389 
2390 	/* LE-only devices do not allow toggling LE on/off */
2391 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
2392 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2393 				       MGMT_STATUS_REJECTED);
2394 
2395 	hci_dev_lock(hdev);
2396 
2397 	val = !!cp->val;
2398 	enabled = lmp_host_le_capable(hdev);
2399 
2400 	if (!hdev_is_powered(hdev) || val == enabled) {
2401 		bool changed = false;
2402 
2403 		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2404 			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2405 			changed = true;
2406 		}
2407 
2408 		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2409 			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2410 			changed = true;
2411 		}
2412 
2413 		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2414 		if (err < 0)
2415 			goto unlock;
2416 
2417 		if (changed)
2418 			err = new_settings(hdev, sk);
2419 
2420 		goto unlock;
2421 	}
2422 
2423 	if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
2424 	    mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2425 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2426 				      MGMT_STATUS_BUSY);
2427 		goto unlock;
2428 	}
2429 
2430 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2431 	if (!cmd) {
2432 		err = -ENOMEM;
2433 		goto unlock;
2434 	}
2435 
2436 	hci_req_init(&req, hdev);
2437 
2438 	memset(&hci_cp, 0, sizeof(hci_cp));
2439 
2440 	if (val) {
2441 		hci_cp.le = val;
2442 		hci_cp.simul = 0x00;
2443 	} else {
2444 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2445 			disable_advertising(&req);
2446 	}
2447 
2448 	hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2449 		    &hci_cp);
2450 
2451 	err = hci_req_run(&req, le_enable_complete);
2452 	if (err < 0)
2453 		mgmt_pending_remove(cmd);
2454 
2455 unlock:
2456 	hci_dev_unlock(hdev);
2457 	return err;
2458 }
2459 
2460 /* This is a helper function to test for pending mgmt commands that can
2461  * cause CoD or EIR HCI commands. We can only allow one such pending
2462  * mgmt command at a time since otherwise we cannot easily track what
2463  * the current values are, will be, and based on that calculate if a new
2464  * HCI command needs to be sent and if yes with what value.
2465  */
2466 static bool pending_eir_or_class(struct hci_dev *hdev)
2467 {
2468 	struct mgmt_pending_cmd *cmd;
2469 
2470 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2471 		switch (cmd->opcode) {
2472 		case MGMT_OP_ADD_UUID:
2473 		case MGMT_OP_REMOVE_UUID:
2474 		case MGMT_OP_SET_DEV_CLASS:
2475 		case MGMT_OP_SET_POWERED:
2476 			return true;
2477 		}
2478 	}
2479 
2480 	return false;
2481 }
2482 
2483 static const u8 bluetooth_base_uuid[] = {
2484 			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2485 			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2486 };
2487 
2488 static u8 get_uuid_size(const u8 *uuid)
2489 {
2490 	u32 val;
2491 
2492 	if (memcmp(uuid, bluetooth_base_uuid, 12))
2493 		return 128;
2494 
2495 	val = get_unaligned_le32(&uuid[12]);
2496 	if (val > 0xffff)
2497 		return 32;
2498 
2499 	return 16;
2500 }
2501 
2502 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2503 {
2504 	struct mgmt_pending_cmd *cmd;
2505 
2506 	hci_dev_lock(hdev);
2507 
2508 	cmd = mgmt_pending_find(mgmt_op, hdev);
2509 	if (!cmd)
2510 		goto unlock;
2511 
2512 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2513 			  mgmt_status(status), hdev->dev_class, 3);
2514 
2515 	mgmt_pending_remove(cmd);
2516 
2517 unlock:
2518 	hci_dev_unlock(hdev);
2519 }
2520 
2521 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2522 {
2523 	BT_DBG("status 0x%02x", status);
2524 
2525 	mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2526 }
2527 
2528 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2529 {
2530 	struct mgmt_cp_add_uuid *cp = data;
2531 	struct mgmt_pending_cmd *cmd;
2532 	struct hci_request req;
2533 	struct bt_uuid *uuid;
2534 	int err;
2535 
2536 	BT_DBG("request for %s", hdev->name);
2537 
2538 	hci_dev_lock(hdev);
2539 
2540 	if (pending_eir_or_class(hdev)) {
2541 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2542 				      MGMT_STATUS_BUSY);
2543 		goto failed;
2544 	}
2545 
2546 	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2547 	if (!uuid) {
2548 		err = -ENOMEM;
2549 		goto failed;
2550 	}
2551 
2552 	memcpy(uuid->uuid, cp->uuid, 16);
2553 	uuid->svc_hint = cp->svc_hint;
2554 	uuid->size = get_uuid_size(cp->uuid);
2555 
2556 	list_add_tail(&uuid->list, &hdev->uuids);
2557 
2558 	hci_req_init(&req, hdev);
2559 
2560 	update_class(&req);
2561 	update_eir(&req);
2562 
2563 	err = hci_req_run(&req, add_uuid_complete);
2564 	if (err < 0) {
2565 		if (err != -ENODATA)
2566 			goto failed;
2567 
2568 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2569 					hdev->dev_class, 3);
2570 		goto failed;
2571 	}
2572 
2573 	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2574 	if (!cmd) {
2575 		err = -ENOMEM;
2576 		goto failed;
2577 	}
2578 
2579 	err = 0;
2580 
2581 failed:
2582 	hci_dev_unlock(hdev);
2583 	return err;
2584 }
2585 
2586 static bool enable_service_cache(struct hci_dev *hdev)
2587 {
2588 	if (!hdev_is_powered(hdev))
2589 		return false;
2590 
2591 	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2592 		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2593 				   CACHE_TIMEOUT);
2594 		return true;
2595 	}
2596 
2597 	return false;
2598 }
2599 
2600 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2601 {
2602 	BT_DBG("status 0x%02x", status);
2603 
2604 	mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2605 }
2606 
2607 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2608 		       u16 len)
2609 {
2610 	struct mgmt_cp_remove_uuid *cp = data;
2611 	struct mgmt_pending_cmd *cmd;
2612 	struct bt_uuid *match, *tmp;
2613 	u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2614 	struct hci_request req;
2615 	int err, found;
2616 
2617 	BT_DBG("request for %s", hdev->name);
2618 
2619 	hci_dev_lock(hdev);
2620 
2621 	if (pending_eir_or_class(hdev)) {
2622 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2623 				      MGMT_STATUS_BUSY);
2624 		goto unlock;
2625 	}
2626 
2627 	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2628 		hci_uuids_clear(hdev);
2629 
2630 		if (enable_service_cache(hdev)) {
2631 			err = mgmt_cmd_complete(sk, hdev->id,
2632 						MGMT_OP_REMOVE_UUID,
2633 						0, hdev->dev_class, 3);
2634 			goto unlock;
2635 		}
2636 
2637 		goto update_class;
2638 	}
2639 
2640 	found = 0;
2641 
2642 	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2643 		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2644 			continue;
2645 
2646 		list_del(&match->list);
2647 		kfree(match);
2648 		found++;
2649 	}
2650 
2651 	if (found == 0) {
2652 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2653 				      MGMT_STATUS_INVALID_PARAMS);
2654 		goto unlock;
2655 	}
2656 
2657 update_class:
2658 	hci_req_init(&req, hdev);
2659 
2660 	update_class(&req);
2661 	update_eir(&req);
2662 
2663 	err = hci_req_run(&req, remove_uuid_complete);
2664 	if (err < 0) {
2665 		if (err != -ENODATA)
2666 			goto unlock;
2667 
2668 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2669 					hdev->dev_class, 3);
2670 		goto unlock;
2671 	}
2672 
2673 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2674 	if (!cmd) {
2675 		err = -ENOMEM;
2676 		goto unlock;
2677 	}
2678 
2679 	err = 0;
2680 
2681 unlock:
2682 	hci_dev_unlock(hdev);
2683 	return err;
2684 }
2685 
2686 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2687 {
2688 	BT_DBG("status 0x%02x", status);
2689 
2690 	mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2691 }
2692 
2693 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2694 			 u16 len)
2695 {
2696 	struct mgmt_cp_set_dev_class *cp = data;
2697 	struct mgmt_pending_cmd *cmd;
2698 	struct hci_request req;
2699 	int err;
2700 
2701 	BT_DBG("request for %s", hdev->name);
2702 
2703 	if (!lmp_bredr_capable(hdev))
2704 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2705 				       MGMT_STATUS_NOT_SUPPORTED);
2706 
2707 	hci_dev_lock(hdev);
2708 
2709 	if (pending_eir_or_class(hdev)) {
2710 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2711 				      MGMT_STATUS_BUSY);
2712 		goto unlock;
2713 	}
2714 
2715 	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2716 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2717 				      MGMT_STATUS_INVALID_PARAMS);
2718 		goto unlock;
2719 	}
2720 
2721 	hdev->major_class = cp->major;
2722 	hdev->minor_class = cp->minor;
2723 
2724 	if (!hdev_is_powered(hdev)) {
2725 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2726 					hdev->dev_class, 3);
2727 		goto unlock;
2728 	}
2729 
2730 	hci_req_init(&req, hdev);
2731 
2732 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2733 		hci_dev_unlock(hdev);
2734 		cancel_delayed_work_sync(&hdev->service_cache);
2735 		hci_dev_lock(hdev);
2736 		update_eir(&req);
2737 	}
2738 
2739 	update_class(&req);
2740 
2741 	err = hci_req_run(&req, set_class_complete);
2742 	if (err < 0) {
2743 		if (err != -ENODATA)
2744 			goto unlock;
2745 
2746 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2747 					hdev->dev_class, 3);
2748 		goto unlock;
2749 	}
2750 
2751 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2752 	if (!cmd) {
2753 		err = -ENOMEM;
2754 		goto unlock;
2755 	}
2756 
2757 	err = 0;
2758 
2759 unlock:
2760 	hci_dev_unlock(hdev);
2761 	return err;
2762 }
2763 
2764 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2765 			  u16 len)
2766 {
2767 	struct mgmt_cp_load_link_keys *cp = data;
2768 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2769 				   sizeof(struct mgmt_link_key_info));
2770 	u16 key_count, expected_len;
2771 	bool changed;
2772 	int i;
2773 
2774 	BT_DBG("request for %s", hdev->name);
2775 
2776 	if (!lmp_bredr_capable(hdev))
2777 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2778 				       MGMT_STATUS_NOT_SUPPORTED);
2779 
2780 	key_count = __le16_to_cpu(cp->key_count);
2781 	if (key_count > max_key_count) {
2782 		BT_ERR("load_link_keys: too big key_count value %u",
2783 		       key_count);
2784 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2785 				       MGMT_STATUS_INVALID_PARAMS);
2786 	}
2787 
2788 	expected_len = sizeof(*cp) + key_count *
2789 					sizeof(struct mgmt_link_key_info);
2790 	if (expected_len != len) {
2791 		BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2792 		       expected_len, len);
2793 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2794 				       MGMT_STATUS_INVALID_PARAMS);
2795 	}
2796 
2797 	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2798 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2799 				       MGMT_STATUS_INVALID_PARAMS);
2800 
2801 	BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2802 	       key_count);
2803 
2804 	for (i = 0; i < key_count; i++) {
2805 		struct mgmt_link_key_info *key = &cp->keys[i];
2806 
2807 		if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2808 			return mgmt_cmd_status(sk, hdev->id,
2809 					       MGMT_OP_LOAD_LINK_KEYS,
2810 					       MGMT_STATUS_INVALID_PARAMS);
2811 	}
2812 
2813 	hci_dev_lock(hdev);
2814 
2815 	hci_link_keys_clear(hdev);
2816 
2817 	if (cp->debug_keys)
2818 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2819 	else
2820 		changed = hci_dev_test_and_clear_flag(hdev,
2821 						      HCI_KEEP_DEBUG_KEYS);
2822 
2823 	if (changed)
2824 		new_settings(hdev, NULL);
2825 
2826 	for (i = 0; i < key_count; i++) {
2827 		struct mgmt_link_key_info *key = &cp->keys[i];
2828 
2829 		/* Always ignore debug keys and require a new pairing if
2830 		 * the user wants to use them.
2831 		 */
2832 		if (key->type == HCI_LK_DEBUG_COMBINATION)
2833 			continue;
2834 
2835 		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2836 				 key->type, key->pin_len, NULL);
2837 	}
2838 
2839 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2840 
2841 	hci_dev_unlock(hdev);
2842 
2843 	return 0;
2844 }
2845 
2846 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2847 			   u8 addr_type, struct sock *skip_sk)
2848 {
2849 	struct mgmt_ev_device_unpaired ev;
2850 
2851 	bacpy(&ev.addr.bdaddr, bdaddr);
2852 	ev.addr.type = addr_type;
2853 
2854 	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2855 			  skip_sk);
2856 }
2857 
2858 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2859 			 u16 len)
2860 {
2861 	struct mgmt_cp_unpair_device *cp = data;
2862 	struct mgmt_rp_unpair_device rp;
2863 	struct hci_cp_disconnect dc;
2864 	struct mgmt_pending_cmd *cmd;
2865 	struct hci_conn *conn;
2866 	int err;
2867 
2868 	memset(&rp, 0, sizeof(rp));
2869 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2870 	rp.addr.type = cp->addr.type;
2871 
2872 	if (!bdaddr_type_is_valid(cp->addr.type))
2873 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2874 					 MGMT_STATUS_INVALID_PARAMS,
2875 					 &rp, sizeof(rp));
2876 
2877 	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2878 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2879 					 MGMT_STATUS_INVALID_PARAMS,
2880 					 &rp, sizeof(rp));
2881 
2882 	hci_dev_lock(hdev);
2883 
2884 	if (!hdev_is_powered(hdev)) {
2885 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2886 					MGMT_STATUS_NOT_POWERED, &rp,
2887 					sizeof(rp));
2888 		goto unlock;
2889 	}
2890 
2891 	if (cp->addr.type == BDADDR_BREDR) {
2892 		/* If disconnection is requested, then look up the
2893 		 * connection. If the remote device is connected, it
2894 		 * will be later used to terminate the link.
2895 		 *
2896 		 * Setting it to NULL explicitly will cause no
2897 		 * termination of the link.
2898 		 */
2899 		if (cp->disconnect)
2900 			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2901 						       &cp->addr.bdaddr);
2902 		else
2903 			conn = NULL;
2904 
2905 		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2906 	} else {
2907 		u8 addr_type;
2908 
2909 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2910 					       &cp->addr.bdaddr);
2911 		if (conn) {
2912 			/* Defer clearing up the connection parameters
2913 			 * until closing to give a chance of keeping
2914 			 * them if a repairing happens.
2915 			 */
2916 			set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2917 
2918 			/* If disconnection is not requested, then
2919 			 * clear the connection variable so that the
2920 			 * link is not terminated.
2921 			 */
2922 			if (!cp->disconnect)
2923 				conn = NULL;
2924 		}
2925 
2926 		if (cp->addr.type == BDADDR_LE_PUBLIC)
2927 			addr_type = ADDR_LE_DEV_PUBLIC;
2928 		else
2929 			addr_type = ADDR_LE_DEV_RANDOM;
2930 
2931 		hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2932 
2933 		err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2934 	}
2935 
2936 	if (err < 0) {
2937 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2938 					MGMT_STATUS_NOT_PAIRED, &rp,
2939 					sizeof(rp));
2940 		goto unlock;
2941 	}
2942 
2943 	/* If the connection variable is set, then termination of the
2944 	 * link is requested.
2945 	 */
2946 	if (!conn) {
2947 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2948 					&rp, sizeof(rp));
2949 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2950 		goto unlock;
2951 	}
2952 
2953 	cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2954 			       sizeof(*cp));
2955 	if (!cmd) {
2956 		err = -ENOMEM;
2957 		goto unlock;
2958 	}
2959 
2960 	cmd->cmd_complete = addr_cmd_complete;
2961 
2962 	dc.handle = cpu_to_le16(conn->handle);
2963 	dc.reason = 0x13; /* Remote User Terminated Connection */
2964 	err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2965 	if (err < 0)
2966 		mgmt_pending_remove(cmd);
2967 
2968 unlock:
2969 	hci_dev_unlock(hdev);
2970 	return err;
2971 }
2972 
2973 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2974 		      u16 len)
2975 {
2976 	struct mgmt_cp_disconnect *cp = data;
2977 	struct mgmt_rp_disconnect rp;
2978 	struct mgmt_pending_cmd *cmd;
2979 	struct hci_conn *conn;
2980 	int err;
2981 
2982 	BT_DBG("");
2983 
2984 	memset(&rp, 0, sizeof(rp));
2985 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2986 	rp.addr.type = cp->addr.type;
2987 
2988 	if (!bdaddr_type_is_valid(cp->addr.type))
2989 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2990 					 MGMT_STATUS_INVALID_PARAMS,
2991 					 &rp, sizeof(rp));
2992 
2993 	hci_dev_lock(hdev);
2994 
2995 	if (!test_bit(HCI_UP, &hdev->flags)) {
2996 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2997 					MGMT_STATUS_NOT_POWERED, &rp,
2998 					sizeof(rp));
2999 		goto failed;
3000 	}
3001 
3002 	if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
3003 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3004 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
3005 		goto failed;
3006 	}
3007 
3008 	if (cp->addr.type == BDADDR_BREDR)
3009 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3010 					       &cp->addr.bdaddr);
3011 	else
3012 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
3013 
3014 	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3015 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3016 					MGMT_STATUS_NOT_CONNECTED, &rp,
3017 					sizeof(rp));
3018 		goto failed;
3019 	}
3020 
3021 	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3022 	if (!cmd) {
3023 		err = -ENOMEM;
3024 		goto failed;
3025 	}
3026 
3027 	cmd->cmd_complete = generic_cmd_complete;
3028 
3029 	err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3030 	if (err < 0)
3031 		mgmt_pending_remove(cmd);
3032 
3033 failed:
3034 	hci_dev_unlock(hdev);
3035 	return err;
3036 }
3037 
3038 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3039 {
3040 	switch (link_type) {
3041 	case LE_LINK:
3042 		switch (addr_type) {
3043 		case ADDR_LE_DEV_PUBLIC:
3044 			return BDADDR_LE_PUBLIC;
3045 
3046 		default:
3047 			/* Fallback to LE Random address type */
3048 			return BDADDR_LE_RANDOM;
3049 		}
3050 
3051 	default:
3052 		/* Fallback to BR/EDR type */
3053 		return BDADDR_BREDR;
3054 	}
3055 }
3056 
3057 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3058 			   u16 data_len)
3059 {
3060 	struct mgmt_rp_get_connections *rp;
3061 	struct hci_conn *c;
3062 	size_t rp_len;
3063 	int err;
3064 	u16 i;
3065 
3066 	BT_DBG("");
3067 
3068 	hci_dev_lock(hdev);
3069 
3070 	if (!hdev_is_powered(hdev)) {
3071 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3072 				      MGMT_STATUS_NOT_POWERED);
3073 		goto unlock;
3074 	}
3075 
3076 	i = 0;
3077 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
3078 		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3079 			i++;
3080 	}
3081 
3082 	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
3083 	rp = kmalloc(rp_len, GFP_KERNEL);
3084 	if (!rp) {
3085 		err = -ENOMEM;
3086 		goto unlock;
3087 	}
3088 
3089 	i = 0;
3090 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
3091 		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3092 			continue;
3093 		bacpy(&rp->addr[i].bdaddr, &c->dst);
3094 		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3095 		if (c->type == SCO_LINK || c->type == ESCO_LINK)
3096 			continue;
3097 		i++;
3098 	}
3099 
3100 	rp->conn_count = cpu_to_le16(i);
3101 
3102 	/* Recalculate length in case of filtered SCO connections, etc */
3103 	rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
3104 
3105 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3106 				rp_len);
3107 
3108 	kfree(rp);
3109 
3110 unlock:
3111 	hci_dev_unlock(hdev);
3112 	return err;
3113 }
3114 
3115 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3116 				   struct mgmt_cp_pin_code_neg_reply *cp)
3117 {
3118 	struct mgmt_pending_cmd *cmd;
3119 	int err;
3120 
3121 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3122 			       sizeof(*cp));
3123 	if (!cmd)
3124 		return -ENOMEM;
3125 
3126 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3127 			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3128 	if (err < 0)
3129 		mgmt_pending_remove(cmd);
3130 
3131 	return err;
3132 }
3133 
3134 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3135 			  u16 len)
3136 {
3137 	struct hci_conn *conn;
3138 	struct mgmt_cp_pin_code_reply *cp = data;
3139 	struct hci_cp_pin_code_reply reply;
3140 	struct mgmt_pending_cmd *cmd;
3141 	int err;
3142 
3143 	BT_DBG("");
3144 
3145 	hci_dev_lock(hdev);
3146 
3147 	if (!hdev_is_powered(hdev)) {
3148 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3149 				      MGMT_STATUS_NOT_POWERED);
3150 		goto failed;
3151 	}
3152 
3153 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3154 	if (!conn) {
3155 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3156 				      MGMT_STATUS_NOT_CONNECTED);
3157 		goto failed;
3158 	}
3159 
3160 	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3161 		struct mgmt_cp_pin_code_neg_reply ncp;
3162 
3163 		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3164 
3165 		BT_ERR("PIN code is not 16 bytes long");
3166 
3167 		err = send_pin_code_neg_reply(sk, hdev, &ncp);
3168 		if (err >= 0)
3169 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3170 					      MGMT_STATUS_INVALID_PARAMS);
3171 
3172 		goto failed;
3173 	}
3174 
3175 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3176 	if (!cmd) {
3177 		err = -ENOMEM;
3178 		goto failed;
3179 	}
3180 
3181 	cmd->cmd_complete = addr_cmd_complete;
3182 
3183 	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3184 	reply.pin_len = cp->pin_len;
3185 	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3186 
3187 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3188 	if (err < 0)
3189 		mgmt_pending_remove(cmd);
3190 
3191 failed:
3192 	hci_dev_unlock(hdev);
3193 	return err;
3194 }
3195 
3196 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3197 			     u16 len)
3198 {
3199 	struct mgmt_cp_set_io_capability *cp = data;
3200 
3201 	BT_DBG("");
3202 
3203 	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3204 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3205 					 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
3206 
3207 	hci_dev_lock(hdev);
3208 
3209 	hdev->io_capability = cp->io_capability;
3210 
3211 	BT_DBG("%s IO capability set to 0x%02x", hdev->name,
3212 	       hdev->io_capability);
3213 
3214 	hci_dev_unlock(hdev);
3215 
3216 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3217 				 NULL, 0);
3218 }
3219 
3220 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3221 {
3222 	struct hci_dev *hdev = conn->hdev;
3223 	struct mgmt_pending_cmd *cmd;
3224 
3225 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3226 		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3227 			continue;
3228 
3229 		if (cmd->user_data != conn)
3230 			continue;
3231 
3232 		return cmd;
3233 	}
3234 
3235 	return NULL;
3236 }
3237 
3238 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3239 {
3240 	struct mgmt_rp_pair_device rp;
3241 	struct hci_conn *conn = cmd->user_data;
3242 	int err;
3243 
3244 	bacpy(&rp.addr.bdaddr, &conn->dst);
3245 	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3246 
3247 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3248 				status, &rp, sizeof(rp));
3249 
3250 	/* So we don't get further callbacks for this connection */
3251 	conn->connect_cfm_cb = NULL;
3252 	conn->security_cfm_cb = NULL;
3253 	conn->disconn_cfm_cb = NULL;
3254 
3255 	hci_conn_drop(conn);
3256 
3257 	/* The device is paired so there is no need to remove
3258 	 * its connection parameters anymore.
3259 	 */
3260 	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3261 
3262 	hci_conn_put(conn);
3263 
3264 	return err;
3265 }
3266 
3267 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3268 {
3269 	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3270 	struct mgmt_pending_cmd *cmd;
3271 
3272 	cmd = find_pairing(conn);
3273 	if (cmd) {
3274 		cmd->cmd_complete(cmd, status);
3275 		mgmt_pending_remove(cmd);
3276 	}
3277 }
3278 
3279 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3280 {
3281 	struct mgmt_pending_cmd *cmd;
3282 
3283 	BT_DBG("status %u", status);
3284 
3285 	cmd = find_pairing(conn);
3286 	if (!cmd) {
3287 		BT_DBG("Unable to find a pending command");
3288 		return;
3289 	}
3290 
3291 	cmd->cmd_complete(cmd, mgmt_status(status));
3292 	mgmt_pending_remove(cmd);
3293 }
3294 
3295 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3296 {
3297 	struct mgmt_pending_cmd *cmd;
3298 
3299 	BT_DBG("status %u", status);
3300 
3301 	if (!status)
3302 		return;
3303 
3304 	cmd = find_pairing(conn);
3305 	if (!cmd) {
3306 		BT_DBG("Unable to find a pending command");
3307 		return;
3308 	}
3309 
3310 	cmd->cmd_complete(cmd, mgmt_status(status));
3311 	mgmt_pending_remove(cmd);
3312 }
3313 
3314 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3315 		       u16 len)
3316 {
3317 	struct mgmt_cp_pair_device *cp = data;
3318 	struct mgmt_rp_pair_device rp;
3319 	struct mgmt_pending_cmd *cmd;
3320 	u8 sec_level, auth_type;
3321 	struct hci_conn *conn;
3322 	int err;
3323 
3324 	BT_DBG("");
3325 
3326 	memset(&rp, 0, sizeof(rp));
3327 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3328 	rp.addr.type = cp->addr.type;
3329 
3330 	if (!bdaddr_type_is_valid(cp->addr.type))
3331 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3332 					 MGMT_STATUS_INVALID_PARAMS,
3333 					 &rp, sizeof(rp));
3334 
3335 	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3336 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3337 					 MGMT_STATUS_INVALID_PARAMS,
3338 					 &rp, sizeof(rp));
3339 
3340 	hci_dev_lock(hdev);
3341 
3342 	if (!hdev_is_powered(hdev)) {
3343 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3344 					MGMT_STATUS_NOT_POWERED, &rp,
3345 					sizeof(rp));
3346 		goto unlock;
3347 	}
3348 
3349 	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3350 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3351 					MGMT_STATUS_ALREADY_PAIRED, &rp,
3352 					sizeof(rp));
3353 		goto unlock;
3354 	}
3355 
3356 	sec_level = BT_SECURITY_MEDIUM;
3357 	auth_type = HCI_AT_DEDICATED_BONDING;
3358 
3359 	if (cp->addr.type == BDADDR_BREDR) {
3360 		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3361 				       auth_type);
3362 	} else {
3363 		u8 addr_type;
3364 
3365 		/* Convert from L2CAP channel address type to HCI address type
3366 		 */
3367 		if (cp->addr.type == BDADDR_LE_PUBLIC)
3368 			addr_type = ADDR_LE_DEV_PUBLIC;
3369 		else
3370 			addr_type = ADDR_LE_DEV_RANDOM;
3371 
3372 		/* When pairing a new device, it is expected to remember
3373 		 * this device for future connections. Adding the connection
3374 		 * parameter information ahead of time allows tracking
3375 		 * of the slave preferred values and will speed up any
3376 		 * further connection establishment.
3377 		 *
3378 		 * If connection parameters already exist, then they
3379 		 * will be kept and this function does nothing.
3380 		 */
3381 		hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3382 
3383 		conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3384 				      sec_level, HCI_LE_CONN_TIMEOUT,
3385 				      HCI_ROLE_MASTER);
3386 	}
3387 
3388 	if (IS_ERR(conn)) {
3389 		int status;
3390 
3391 		if (PTR_ERR(conn) == -EBUSY)
3392 			status = MGMT_STATUS_BUSY;
3393 		else if (PTR_ERR(conn) == -EOPNOTSUPP)
3394 			status = MGMT_STATUS_NOT_SUPPORTED;
3395 		else if (PTR_ERR(conn) == -ECONNREFUSED)
3396 			status = MGMT_STATUS_REJECTED;
3397 		else
3398 			status = MGMT_STATUS_CONNECT_FAILED;
3399 
3400 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3401 					status, &rp, sizeof(rp));
3402 		goto unlock;
3403 	}
3404 
3405 	if (conn->connect_cfm_cb) {
3406 		hci_conn_drop(conn);
3407 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3408 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
3409 		goto unlock;
3410 	}
3411 
3412 	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3413 	if (!cmd) {
3414 		err = -ENOMEM;
3415 		hci_conn_drop(conn);
3416 		goto unlock;
3417 	}
3418 
3419 	cmd->cmd_complete = pairing_complete;
3420 
3421 	/* For LE, just connecting isn't a proof that the pairing finished */
3422 	if (cp->addr.type == BDADDR_BREDR) {
3423 		conn->connect_cfm_cb = pairing_complete_cb;
3424 		conn->security_cfm_cb = pairing_complete_cb;
3425 		conn->disconn_cfm_cb = pairing_complete_cb;
3426 	} else {
3427 		conn->connect_cfm_cb = le_pairing_complete_cb;
3428 		conn->security_cfm_cb = le_pairing_complete_cb;
3429 		conn->disconn_cfm_cb = le_pairing_complete_cb;
3430 	}
3431 
3432 	conn->io_capability = cp->io_cap;
3433 	cmd->user_data = hci_conn_get(conn);
3434 
3435 	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3436 	    hci_conn_security(conn, sec_level, auth_type, true)) {
3437 		cmd->cmd_complete(cmd, 0);
3438 		mgmt_pending_remove(cmd);
3439 	}
3440 
3441 	err = 0;
3442 
3443 unlock:
3444 	hci_dev_unlock(hdev);
3445 	return err;
3446 }
3447 
3448 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3449 			      u16 len)
3450 {
3451 	struct mgmt_addr_info *addr = data;
3452 	struct mgmt_pending_cmd *cmd;
3453 	struct hci_conn *conn;
3454 	int err;
3455 
3456 	BT_DBG("");
3457 
3458 	hci_dev_lock(hdev);
3459 
3460 	if (!hdev_is_powered(hdev)) {
3461 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3462 				      MGMT_STATUS_NOT_POWERED);
3463 		goto unlock;
3464 	}
3465 
3466 	cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3467 	if (!cmd) {
3468 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3469 				      MGMT_STATUS_INVALID_PARAMS);
3470 		goto unlock;
3471 	}
3472 
3473 	conn = cmd->user_data;
3474 
3475 	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3476 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3477 				      MGMT_STATUS_INVALID_PARAMS);
3478 		goto unlock;
3479 	}
3480 
3481 	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3482 	mgmt_pending_remove(cmd);
3483 
3484 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3485 				addr, sizeof(*addr));
3486 unlock:
3487 	hci_dev_unlock(hdev);
3488 	return err;
3489 }
3490 
3491 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3492 			     struct mgmt_addr_info *addr, u16 mgmt_op,
3493 			     u16 hci_op, __le32 passkey)
3494 {
3495 	struct mgmt_pending_cmd *cmd;
3496 	struct hci_conn *conn;
3497 	int err;
3498 
3499 	hci_dev_lock(hdev);
3500 
3501 	if (!hdev_is_powered(hdev)) {
3502 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3503 					MGMT_STATUS_NOT_POWERED, addr,
3504 					sizeof(*addr));
3505 		goto done;
3506 	}
3507 
3508 	if (addr->type == BDADDR_BREDR)
3509 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3510 	else
3511 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3512 
3513 	if (!conn) {
3514 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3515 					MGMT_STATUS_NOT_CONNECTED, addr,
3516 					sizeof(*addr));
3517 		goto done;
3518 	}
3519 
3520 	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3521 		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3522 		if (!err)
3523 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3524 						MGMT_STATUS_SUCCESS, addr,
3525 						sizeof(*addr));
3526 		else
3527 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3528 						MGMT_STATUS_FAILED, addr,
3529 						sizeof(*addr));
3530 
3531 		goto done;
3532 	}
3533 
3534 	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3535 	if (!cmd) {
3536 		err = -ENOMEM;
3537 		goto done;
3538 	}
3539 
3540 	cmd->cmd_complete = addr_cmd_complete;
3541 
3542 	/* Continue with pairing via HCI */
3543 	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3544 		struct hci_cp_user_passkey_reply cp;
3545 
3546 		bacpy(&cp.bdaddr, &addr->bdaddr);
3547 		cp.passkey = passkey;
3548 		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3549 	} else
3550 		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3551 				   &addr->bdaddr);
3552 
3553 	if (err < 0)
3554 		mgmt_pending_remove(cmd);
3555 
3556 done:
3557 	hci_dev_unlock(hdev);
3558 	return err;
3559 }
3560 
3561 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3562 			      void *data, u16 len)
3563 {
3564 	struct mgmt_cp_pin_code_neg_reply *cp = data;
3565 
3566 	BT_DBG("");
3567 
3568 	return user_pairing_resp(sk, hdev, &cp->addr,
3569 				MGMT_OP_PIN_CODE_NEG_REPLY,
3570 				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3571 }
3572 
3573 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3574 			      u16 len)
3575 {
3576 	struct mgmt_cp_user_confirm_reply *cp = data;
3577 
3578 	BT_DBG("");
3579 
3580 	if (len != sizeof(*cp))
3581 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3582 				       MGMT_STATUS_INVALID_PARAMS);
3583 
3584 	return user_pairing_resp(sk, hdev, &cp->addr,
3585 				 MGMT_OP_USER_CONFIRM_REPLY,
3586 				 HCI_OP_USER_CONFIRM_REPLY, 0);
3587 }
3588 
3589 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3590 				  void *data, u16 len)
3591 {
3592 	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3593 
3594 	BT_DBG("");
3595 
3596 	return user_pairing_resp(sk, hdev, &cp->addr,
3597 				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3598 				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3599 }
3600 
3601 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3602 			      u16 len)
3603 {
3604 	struct mgmt_cp_user_passkey_reply *cp = data;
3605 
3606 	BT_DBG("");
3607 
3608 	return user_pairing_resp(sk, hdev, &cp->addr,
3609 				 MGMT_OP_USER_PASSKEY_REPLY,
3610 				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3611 }
3612 
3613 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3614 				  void *data, u16 len)
3615 {
3616 	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3617 
3618 	BT_DBG("");
3619 
3620 	return user_pairing_resp(sk, hdev, &cp->addr,
3621 				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3622 				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3623 }
3624 
3625 static void update_name(struct hci_request *req)
3626 {
3627 	struct hci_dev *hdev = req->hdev;
3628 	struct hci_cp_write_local_name cp;
3629 
3630 	memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3631 
3632 	hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3633 }
3634 
3635 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3636 {
3637 	struct mgmt_cp_set_local_name *cp;
3638 	struct mgmt_pending_cmd *cmd;
3639 
3640 	BT_DBG("status 0x%02x", status);
3641 
3642 	hci_dev_lock(hdev);
3643 
3644 	cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3645 	if (!cmd)
3646 		goto unlock;
3647 
3648 	cp = cmd->param;
3649 
3650 	if (status)
3651 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3652 			        mgmt_status(status));
3653 	else
3654 		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3655 				  cp, sizeof(*cp));
3656 
3657 	mgmt_pending_remove(cmd);
3658 
3659 unlock:
3660 	hci_dev_unlock(hdev);
3661 }
3662 
3663 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3664 			  u16 len)
3665 {
3666 	struct mgmt_cp_set_local_name *cp = data;
3667 	struct mgmt_pending_cmd *cmd;
3668 	struct hci_request req;
3669 	int err;
3670 
3671 	BT_DBG("");
3672 
3673 	hci_dev_lock(hdev);
3674 
3675 	/* If the old values are the same as the new ones just return a
3676 	 * direct command complete event.
3677 	 */
3678 	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3679 	    !memcmp(hdev->short_name, cp->short_name,
3680 		    sizeof(hdev->short_name))) {
3681 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3682 					data, len);
3683 		goto failed;
3684 	}
3685 
3686 	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3687 
3688 	if (!hdev_is_powered(hdev)) {
3689 		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3690 
3691 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3692 					data, len);
3693 		if (err < 0)
3694 			goto failed;
3695 
3696 		err = mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev,
3697 					 data, len, sk);
3698 
3699 		goto failed;
3700 	}
3701 
3702 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3703 	if (!cmd) {
3704 		err = -ENOMEM;
3705 		goto failed;
3706 	}
3707 
3708 	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3709 
3710 	hci_req_init(&req, hdev);
3711 
3712 	if (lmp_bredr_capable(hdev)) {
3713 		update_name(&req);
3714 		update_eir(&req);
3715 	}
3716 
3717 	/* The name is stored in the scan response data and so
3718 	 * no need to udpate the advertising data here.
3719 	 */
3720 	if (lmp_le_capable(hdev))
3721 		update_scan_rsp_data(&req);
3722 
3723 	err = hci_req_run(&req, set_name_complete);
3724 	if (err < 0)
3725 		mgmt_pending_remove(cmd);
3726 
3727 failed:
3728 	hci_dev_unlock(hdev);
3729 	return err;
3730 }
3731 
3732 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3733 			       void *data, u16 data_len)
3734 {
3735 	struct mgmt_pending_cmd *cmd;
3736 	int err;
3737 
3738 	BT_DBG("%s", hdev->name);
3739 
3740 	hci_dev_lock(hdev);
3741 
3742 	if (!hdev_is_powered(hdev)) {
3743 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3744 				      MGMT_STATUS_NOT_POWERED);
3745 		goto unlock;
3746 	}
3747 
3748 	if (!lmp_ssp_capable(hdev)) {
3749 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3750 				      MGMT_STATUS_NOT_SUPPORTED);
3751 		goto unlock;
3752 	}
3753 
3754 	if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3755 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3756 				      MGMT_STATUS_BUSY);
3757 		goto unlock;
3758 	}
3759 
3760 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3761 	if (!cmd) {
3762 		err = -ENOMEM;
3763 		goto unlock;
3764 	}
3765 
3766 	if (bredr_sc_enabled(hdev))
3767 		err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3768 				   0, NULL);
3769 	else
3770 		err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3771 
3772 	if (err < 0)
3773 		mgmt_pending_remove(cmd);
3774 
3775 unlock:
3776 	hci_dev_unlock(hdev);
3777 	return err;
3778 }
3779 
3780 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3781 			       void *data, u16 len)
3782 {
3783 	struct mgmt_addr_info *addr = data;
3784 	int err;
3785 
3786 	BT_DBG("%s ", hdev->name);
3787 
3788 	if (!bdaddr_type_is_valid(addr->type))
3789 		return mgmt_cmd_complete(sk, hdev->id,
3790 					 MGMT_OP_ADD_REMOTE_OOB_DATA,
3791 					 MGMT_STATUS_INVALID_PARAMS,
3792 					 addr, sizeof(*addr));
3793 
3794 	hci_dev_lock(hdev);
3795 
3796 	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3797 		struct mgmt_cp_add_remote_oob_data *cp = data;
3798 		u8 status;
3799 
3800 		if (cp->addr.type != BDADDR_BREDR) {
3801 			err = mgmt_cmd_complete(sk, hdev->id,
3802 						MGMT_OP_ADD_REMOTE_OOB_DATA,
3803 						MGMT_STATUS_INVALID_PARAMS,
3804 						&cp->addr, sizeof(cp->addr));
3805 			goto unlock;
3806 		}
3807 
3808 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3809 					      cp->addr.type, cp->hash,
3810 					      cp->rand, NULL, NULL);
3811 		if (err < 0)
3812 			status = MGMT_STATUS_FAILED;
3813 		else
3814 			status = MGMT_STATUS_SUCCESS;
3815 
3816 		err = mgmt_cmd_complete(sk, hdev->id,
3817 					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
3818 					&cp->addr, sizeof(cp->addr));
3819 	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3820 		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3821 		u8 *rand192, *hash192, *rand256, *hash256;
3822 		u8 status;
3823 
3824 		if (bdaddr_type_is_le(cp->addr.type)) {
3825 			/* Enforce zero-valued 192-bit parameters as
3826 			 * long as legacy SMP OOB isn't implemented.
3827 			 */
3828 			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
3829 			    memcmp(cp->hash192, ZERO_KEY, 16)) {
3830 				err = mgmt_cmd_complete(sk, hdev->id,
3831 							MGMT_OP_ADD_REMOTE_OOB_DATA,
3832 							MGMT_STATUS_INVALID_PARAMS,
3833 							addr, sizeof(*addr));
3834 				goto unlock;
3835 			}
3836 
3837 			rand192 = NULL;
3838 			hash192 = NULL;
3839 		} else {
3840 			/* In case one of the P-192 values is set to zero,
3841 			 * then just disable OOB data for P-192.
3842 			 */
3843 			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
3844 			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
3845 				rand192 = NULL;
3846 				hash192 = NULL;
3847 			} else {
3848 				rand192 = cp->rand192;
3849 				hash192 = cp->hash192;
3850 			}
3851 		}
3852 
3853 		/* In case one of the P-256 values is set to zero, then just
3854 		 * disable OOB data for P-256.
3855 		 */
3856 		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
3857 		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
3858 			rand256 = NULL;
3859 			hash256 = NULL;
3860 		} else {
3861 			rand256 = cp->rand256;
3862 			hash256 = cp->hash256;
3863 		}
3864 
3865 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3866 					      cp->addr.type, hash192, rand192,
3867 					      hash256, rand256);
3868 		if (err < 0)
3869 			status = MGMT_STATUS_FAILED;
3870 		else
3871 			status = MGMT_STATUS_SUCCESS;
3872 
3873 		err = mgmt_cmd_complete(sk, hdev->id,
3874 					MGMT_OP_ADD_REMOTE_OOB_DATA,
3875 					status, &cp->addr, sizeof(cp->addr));
3876 	} else {
3877 		BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3878 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3879 				      MGMT_STATUS_INVALID_PARAMS);
3880 	}
3881 
3882 unlock:
3883 	hci_dev_unlock(hdev);
3884 	return err;
3885 }
3886 
3887 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3888 				  void *data, u16 len)
3889 {
3890 	struct mgmt_cp_remove_remote_oob_data *cp = data;
3891 	u8 status;
3892 	int err;
3893 
3894 	BT_DBG("%s", hdev->name);
3895 
3896 	if (cp->addr.type != BDADDR_BREDR)
3897 		return mgmt_cmd_complete(sk, hdev->id,
3898 					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3899 					 MGMT_STATUS_INVALID_PARAMS,
3900 					 &cp->addr, sizeof(cp->addr));
3901 
3902 	hci_dev_lock(hdev);
3903 
3904 	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
3905 		hci_remote_oob_data_clear(hdev);
3906 		status = MGMT_STATUS_SUCCESS;
3907 		goto done;
3908 	}
3909 
3910 	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
3911 	if (err < 0)
3912 		status = MGMT_STATUS_INVALID_PARAMS;
3913 	else
3914 		status = MGMT_STATUS_SUCCESS;
3915 
3916 done:
3917 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3918 				status, &cp->addr, sizeof(cp->addr));
3919 
3920 	hci_dev_unlock(hdev);
3921 	return err;
3922 }
3923 
3924 static bool trigger_discovery(struct hci_request *req, u8 *status)
3925 {
3926 	struct hci_dev *hdev = req->hdev;
3927 	struct hci_cp_le_set_scan_param param_cp;
3928 	struct hci_cp_le_set_scan_enable enable_cp;
3929 	struct hci_cp_inquiry inq_cp;
3930 	/* General inquiry access code (GIAC) */
3931 	u8 lap[3] = { 0x33, 0x8b, 0x9e };
3932 	u8 own_addr_type;
3933 	int err;
3934 
3935 	switch (hdev->discovery.type) {
3936 	case DISCOV_TYPE_BREDR:
3937 		*status = mgmt_bredr_support(hdev);
3938 		if (*status)
3939 			return false;
3940 
3941 		if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3942 			*status = MGMT_STATUS_BUSY;
3943 			return false;
3944 		}
3945 
3946 		hci_inquiry_cache_flush(hdev);
3947 
3948 		memset(&inq_cp, 0, sizeof(inq_cp));
3949 		memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3950 		inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3951 		hci_req_add(req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3952 		break;
3953 
3954 	case DISCOV_TYPE_LE:
3955 	case DISCOV_TYPE_INTERLEAVED:
3956 		*status = mgmt_le_support(hdev);
3957 		if (*status)
3958 			return false;
3959 
3960 		if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3961 		    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
3962 			*status = MGMT_STATUS_NOT_SUPPORTED;
3963 			return false;
3964 		}
3965 
3966 		if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
3967 			/* Don't let discovery abort an outgoing
3968 			 * connection attempt that's using directed
3969 			 * advertising.
3970 			 */
3971 			if (hci_conn_hash_lookup_state(hdev, LE_LINK,
3972 						       BT_CONNECT)) {
3973 				*status = MGMT_STATUS_REJECTED;
3974 				return false;
3975 			}
3976 
3977 			disable_advertising(req);
3978 		}
3979 
3980 		/* If controller is scanning, it means the background scanning
3981 		 * is running. Thus, we should temporarily stop it in order to
3982 		 * set the discovery scanning parameters.
3983 		 */
3984 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
3985 			hci_req_add_le_scan_disable(req);
3986 
3987 		memset(&param_cp, 0, sizeof(param_cp));
3988 
3989 		/* All active scans will be done with either a resolvable
3990 		 * private address (when privacy feature has been enabled)
3991 		 * or non-resolvable private address.
3992 		 */
3993 		err = hci_update_random_address(req, true, &own_addr_type);
3994 		if (err < 0) {
3995 			*status = MGMT_STATUS_FAILED;
3996 			return false;
3997 		}
3998 
3999 		param_cp.type = LE_SCAN_ACTIVE;
4000 		param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
4001 		param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
4002 		param_cp.own_address_type = own_addr_type;
4003 		hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
4004 			    &param_cp);
4005 
4006 		memset(&enable_cp, 0, sizeof(enable_cp));
4007 		enable_cp.enable = LE_SCAN_ENABLE;
4008 		enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
4009 		hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
4010 			    &enable_cp);
4011 		break;
4012 
4013 	default:
4014 		*status = MGMT_STATUS_INVALID_PARAMS;
4015 		return false;
4016 	}
4017 
4018 	return true;
4019 }
4020 
4021 static void start_discovery_complete(struct hci_dev *hdev, u8 status,
4022 				     u16 opcode)
4023 {
4024 	struct mgmt_pending_cmd *cmd;
4025 	unsigned long timeout;
4026 
4027 	BT_DBG("status %d", status);
4028 
4029 	hci_dev_lock(hdev);
4030 
4031 	cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
4032 	if (!cmd)
4033 		cmd = mgmt_pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
4034 
4035 	if (cmd) {
4036 		cmd->cmd_complete(cmd, mgmt_status(status));
4037 		mgmt_pending_remove(cmd);
4038 	}
4039 
4040 	if (status) {
4041 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4042 		goto unlock;
4043 	}
4044 
4045 	hci_discovery_set_state(hdev, DISCOVERY_FINDING);
4046 
4047 	/* If the scan involves LE scan, pick proper timeout to schedule
4048 	 * hdev->le_scan_disable that will stop it.
4049 	 */
4050 	switch (hdev->discovery.type) {
4051 	case DISCOV_TYPE_LE:
4052 		timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
4053 		break;
4054 	case DISCOV_TYPE_INTERLEAVED:
4055 		timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
4056 		break;
4057 	case DISCOV_TYPE_BREDR:
4058 		timeout = 0;
4059 		break;
4060 	default:
4061 		BT_ERR("Invalid discovery type %d", hdev->discovery.type);
4062 		timeout = 0;
4063 		break;
4064 	}
4065 
4066 	if (timeout) {
4067 		/* When service discovery is used and the controller has
4068 		 * a strict duplicate filter, it is important to remember
4069 		 * the start and duration of the scan. This is required
4070 		 * for restarting scanning during the discovery phase.
4071 		 */
4072 		if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER,
4073 			     &hdev->quirks) &&
4074 		    hdev->discovery.result_filtering) {
4075 			hdev->discovery.scan_start = jiffies;
4076 			hdev->discovery.scan_duration = timeout;
4077 		}
4078 
4079 		queue_delayed_work(hdev->workqueue,
4080 				   &hdev->le_scan_disable, timeout);
4081 	}
4082 
4083 unlock:
4084 	hci_dev_unlock(hdev);
4085 }
4086 
4087 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4088 			   void *data, u16 len)
4089 {
4090 	struct mgmt_cp_start_discovery *cp = data;
4091 	struct mgmt_pending_cmd *cmd;
4092 	struct hci_request req;
4093 	u8 status;
4094 	int err;
4095 
4096 	BT_DBG("%s", hdev->name);
4097 
4098 	hci_dev_lock(hdev);
4099 
4100 	if (!hdev_is_powered(hdev)) {
4101 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4102 					MGMT_STATUS_NOT_POWERED,
4103 					&cp->type, sizeof(cp->type));
4104 		goto failed;
4105 	}
4106 
4107 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
4108 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4109 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4110 					MGMT_STATUS_BUSY, &cp->type,
4111 					sizeof(cp->type));
4112 		goto failed;
4113 	}
4114 
4115 	cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, data, len);
4116 	if (!cmd) {
4117 		err = -ENOMEM;
4118 		goto failed;
4119 	}
4120 
4121 	cmd->cmd_complete = generic_cmd_complete;
4122 
4123 	/* Clear the discovery filter first to free any previously
4124 	 * allocated memory for the UUID list.
4125 	 */
4126 	hci_discovery_filter_clear(hdev);
4127 
4128 	hdev->discovery.type = cp->type;
4129 	hdev->discovery.report_invalid_rssi = false;
4130 
4131 	hci_req_init(&req, hdev);
4132 
4133 	if (!trigger_discovery(&req, &status)) {
4134 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4135 					status, &cp->type, sizeof(cp->type));
4136 		mgmt_pending_remove(cmd);
4137 		goto failed;
4138 	}
4139 
4140 	err = hci_req_run(&req, start_discovery_complete);
4141 	if (err < 0) {
4142 		mgmt_pending_remove(cmd);
4143 		goto failed;
4144 	}
4145 
4146 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4147 
4148 failed:
4149 	hci_dev_unlock(hdev);
4150 	return err;
4151 }
4152 
4153 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
4154 					  u8 status)
4155 {
4156 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
4157 				 cmd->param, 1);
4158 }
4159 
4160 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4161 				   void *data, u16 len)
4162 {
4163 	struct mgmt_cp_start_service_discovery *cp = data;
4164 	struct mgmt_pending_cmd *cmd;
4165 	struct hci_request req;
4166 	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
4167 	u16 uuid_count, expected_len;
4168 	u8 status;
4169 	int err;
4170 
4171 	BT_DBG("%s", hdev->name);
4172 
4173 	hci_dev_lock(hdev);
4174 
4175 	if (!hdev_is_powered(hdev)) {
4176 		err = mgmt_cmd_complete(sk, hdev->id,
4177 					MGMT_OP_START_SERVICE_DISCOVERY,
4178 					MGMT_STATUS_NOT_POWERED,
4179 					&cp->type, sizeof(cp->type));
4180 		goto failed;
4181 	}
4182 
4183 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
4184 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4185 		err = mgmt_cmd_complete(sk, hdev->id,
4186 					MGMT_OP_START_SERVICE_DISCOVERY,
4187 					MGMT_STATUS_BUSY, &cp->type,
4188 					sizeof(cp->type));
4189 		goto failed;
4190 	}
4191 
4192 	uuid_count = __le16_to_cpu(cp->uuid_count);
4193 	if (uuid_count > max_uuid_count) {
4194 		BT_ERR("service_discovery: too big uuid_count value %u",
4195 		       uuid_count);
4196 		err = mgmt_cmd_complete(sk, hdev->id,
4197 					MGMT_OP_START_SERVICE_DISCOVERY,
4198 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
4199 					sizeof(cp->type));
4200 		goto failed;
4201 	}
4202 
4203 	expected_len = sizeof(*cp) + uuid_count * 16;
4204 	if (expected_len != len) {
4205 		BT_ERR("service_discovery: expected %u bytes, got %u bytes",
4206 		       expected_len, len);
4207 		err = mgmt_cmd_complete(sk, hdev->id,
4208 					MGMT_OP_START_SERVICE_DISCOVERY,
4209 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
4210 					sizeof(cp->type));
4211 		goto failed;
4212 	}
4213 
4214 	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4215 			       hdev, data, len);
4216 	if (!cmd) {
4217 		err = -ENOMEM;
4218 		goto failed;
4219 	}
4220 
4221 	cmd->cmd_complete = service_discovery_cmd_complete;
4222 
4223 	/* Clear the discovery filter first to free any previously
4224 	 * allocated memory for the UUID list.
4225 	 */
4226 	hci_discovery_filter_clear(hdev);
4227 
4228 	hdev->discovery.result_filtering = true;
4229 	hdev->discovery.type = cp->type;
4230 	hdev->discovery.rssi = cp->rssi;
4231 	hdev->discovery.uuid_count = uuid_count;
4232 
4233 	if (uuid_count > 0) {
4234 		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4235 						GFP_KERNEL);
4236 		if (!hdev->discovery.uuids) {
4237 			err = mgmt_cmd_complete(sk, hdev->id,
4238 						MGMT_OP_START_SERVICE_DISCOVERY,
4239 						MGMT_STATUS_FAILED,
4240 						&cp->type, sizeof(cp->type));
4241 			mgmt_pending_remove(cmd);
4242 			goto failed;
4243 		}
4244 	}
4245 
4246 	hci_req_init(&req, hdev);
4247 
4248 	if (!trigger_discovery(&req, &status)) {
4249 		err = mgmt_cmd_complete(sk, hdev->id,
4250 					MGMT_OP_START_SERVICE_DISCOVERY,
4251 					status, &cp->type, sizeof(cp->type));
4252 		mgmt_pending_remove(cmd);
4253 		goto failed;
4254 	}
4255 
4256 	err = hci_req_run(&req, start_discovery_complete);
4257 	if (err < 0) {
4258 		mgmt_pending_remove(cmd);
4259 		goto failed;
4260 	}
4261 
4262 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4263 
4264 failed:
4265 	hci_dev_unlock(hdev);
4266 	return err;
4267 }
4268 
4269 static void stop_discovery_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4270 {
4271 	struct mgmt_pending_cmd *cmd;
4272 
4273 	BT_DBG("status %d", status);
4274 
4275 	hci_dev_lock(hdev);
4276 
4277 	cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4278 	if (cmd) {
4279 		cmd->cmd_complete(cmd, mgmt_status(status));
4280 		mgmt_pending_remove(cmd);
4281 	}
4282 
4283 	if (!status)
4284 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4285 
4286 	hci_dev_unlock(hdev);
4287 }
4288 
4289 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4290 			  u16 len)
4291 {
4292 	struct mgmt_cp_stop_discovery *mgmt_cp = data;
4293 	struct mgmt_pending_cmd *cmd;
4294 	struct hci_request req;
4295 	int err;
4296 
4297 	BT_DBG("%s", hdev->name);
4298 
4299 	hci_dev_lock(hdev);
4300 
4301 	if (!hci_discovery_active(hdev)) {
4302 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4303 					MGMT_STATUS_REJECTED, &mgmt_cp->type,
4304 					sizeof(mgmt_cp->type));
4305 		goto unlock;
4306 	}
4307 
4308 	if (hdev->discovery.type != mgmt_cp->type) {
4309 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4310 					MGMT_STATUS_INVALID_PARAMS,
4311 					&mgmt_cp->type, sizeof(mgmt_cp->type));
4312 		goto unlock;
4313 	}
4314 
4315 	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4316 	if (!cmd) {
4317 		err = -ENOMEM;
4318 		goto unlock;
4319 	}
4320 
4321 	cmd->cmd_complete = generic_cmd_complete;
4322 
4323 	hci_req_init(&req, hdev);
4324 
4325 	hci_stop_discovery(&req);
4326 
4327 	err = hci_req_run(&req, stop_discovery_complete);
4328 	if (!err) {
4329 		hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4330 		goto unlock;
4331 	}
4332 
4333 	mgmt_pending_remove(cmd);
4334 
4335 	/* If no HCI commands were sent we're done */
4336 	if (err == -ENODATA) {
4337 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
4338 					&mgmt_cp->type, sizeof(mgmt_cp->type));
4339 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4340 	}
4341 
4342 unlock:
4343 	hci_dev_unlock(hdev);
4344 	return err;
4345 }
4346 
4347 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4348 			u16 len)
4349 {
4350 	struct mgmt_cp_confirm_name *cp = data;
4351 	struct inquiry_entry *e;
4352 	int err;
4353 
4354 	BT_DBG("%s", hdev->name);
4355 
4356 	hci_dev_lock(hdev);
4357 
4358 	if (!hci_discovery_active(hdev)) {
4359 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4360 					MGMT_STATUS_FAILED, &cp->addr,
4361 					sizeof(cp->addr));
4362 		goto failed;
4363 	}
4364 
4365 	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4366 	if (!e) {
4367 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4368 					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4369 					sizeof(cp->addr));
4370 		goto failed;
4371 	}
4372 
4373 	if (cp->name_known) {
4374 		e->name_state = NAME_KNOWN;
4375 		list_del(&e->list);
4376 	} else {
4377 		e->name_state = NAME_NEEDED;
4378 		hci_inquiry_cache_update_resolve(hdev, e);
4379 	}
4380 
4381 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
4382 				&cp->addr, sizeof(cp->addr));
4383 
4384 failed:
4385 	hci_dev_unlock(hdev);
4386 	return err;
4387 }
4388 
4389 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4390 			u16 len)
4391 {
4392 	struct mgmt_cp_block_device *cp = data;
4393 	u8 status;
4394 	int err;
4395 
4396 	BT_DBG("%s", hdev->name);
4397 
4398 	if (!bdaddr_type_is_valid(cp->addr.type))
4399 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4400 					 MGMT_STATUS_INVALID_PARAMS,
4401 					 &cp->addr, sizeof(cp->addr));
4402 
4403 	hci_dev_lock(hdev);
4404 
4405 	err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4406 				  cp->addr.type);
4407 	if (err < 0) {
4408 		status = MGMT_STATUS_FAILED;
4409 		goto done;
4410 	}
4411 
4412 	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4413 		   sk);
4414 	status = MGMT_STATUS_SUCCESS;
4415 
4416 done:
4417 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4418 				&cp->addr, sizeof(cp->addr));
4419 
4420 	hci_dev_unlock(hdev);
4421 
4422 	return err;
4423 }
4424 
4425 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4426 			  u16 len)
4427 {
4428 	struct mgmt_cp_unblock_device *cp = data;
4429 	u8 status;
4430 	int err;
4431 
4432 	BT_DBG("%s", hdev->name);
4433 
4434 	if (!bdaddr_type_is_valid(cp->addr.type))
4435 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4436 					 MGMT_STATUS_INVALID_PARAMS,
4437 					 &cp->addr, sizeof(cp->addr));
4438 
4439 	hci_dev_lock(hdev);
4440 
4441 	err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4442 				  cp->addr.type);
4443 	if (err < 0) {
4444 		status = MGMT_STATUS_INVALID_PARAMS;
4445 		goto done;
4446 	}
4447 
4448 	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4449 		   sk);
4450 	status = MGMT_STATUS_SUCCESS;
4451 
4452 done:
4453 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4454 				&cp->addr, sizeof(cp->addr));
4455 
4456 	hci_dev_unlock(hdev);
4457 
4458 	return err;
4459 }
4460 
4461 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4462 			 u16 len)
4463 {
4464 	struct mgmt_cp_set_device_id *cp = data;
4465 	struct hci_request req;
4466 	int err;
4467 	__u16 source;
4468 
4469 	BT_DBG("%s", hdev->name);
4470 
4471 	source = __le16_to_cpu(cp->source);
4472 
4473 	if (source > 0x0002)
4474 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4475 				       MGMT_STATUS_INVALID_PARAMS);
4476 
4477 	hci_dev_lock(hdev);
4478 
4479 	hdev->devid_source = source;
4480 	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4481 	hdev->devid_product = __le16_to_cpu(cp->product);
4482 	hdev->devid_version = __le16_to_cpu(cp->version);
4483 
4484 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
4485 				NULL, 0);
4486 
4487 	hci_req_init(&req, hdev);
4488 	update_eir(&req);
4489 	hci_req_run(&req, NULL);
4490 
4491 	hci_dev_unlock(hdev);
4492 
4493 	return err;
4494 }
4495 
4496 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
4497 				     u16 opcode)
4498 {
4499 	struct cmd_lookup match = { NULL, hdev };
4500 
4501 	hci_dev_lock(hdev);
4502 
4503 	if (status) {
4504 		u8 mgmt_err = mgmt_status(status);
4505 
4506 		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4507 				     cmd_status_rsp, &mgmt_err);
4508 		goto unlock;
4509 	}
4510 
4511 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
4512 		hci_dev_set_flag(hdev, HCI_ADVERTISING);
4513 	else
4514 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4515 
4516 	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4517 			     &match);
4518 
4519 	new_settings(hdev, match.sk);
4520 
4521 	if (match.sk)
4522 		sock_put(match.sk);
4523 
4524 unlock:
4525 	hci_dev_unlock(hdev);
4526 }
4527 
4528 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4529 			   u16 len)
4530 {
4531 	struct mgmt_mode *cp = data;
4532 	struct mgmt_pending_cmd *cmd;
4533 	struct hci_request req;
4534 	u8 val, status;
4535 	int err;
4536 
4537 	BT_DBG("request for %s", hdev->name);
4538 
4539 	status = mgmt_le_support(hdev);
4540 	if (status)
4541 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4542 				       status);
4543 
4544 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4545 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4546 				       MGMT_STATUS_INVALID_PARAMS);
4547 
4548 	hci_dev_lock(hdev);
4549 
4550 	val = !!cp->val;
4551 
4552 	/* The following conditions are ones which mean that we should
4553 	 * not do any HCI communication but directly send a mgmt
4554 	 * response to user space (after toggling the flag if
4555 	 * necessary).
4556 	 */
4557 	if (!hdev_is_powered(hdev) ||
4558 	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
4559 	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
4560 	    hci_conn_num(hdev, LE_LINK) > 0 ||
4561 	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4562 	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4563 		bool changed;
4564 
4565 		if (cp->val) {
4566 			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
4567 			if (cp->val == 0x02)
4568 				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4569 			else
4570 				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4571 		} else {
4572 			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
4573 			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4574 		}
4575 
4576 		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4577 		if (err < 0)
4578 			goto unlock;
4579 
4580 		if (changed)
4581 			err = new_settings(hdev, sk);
4582 
4583 		goto unlock;
4584 	}
4585 
4586 	if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4587 	    mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
4588 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4589 				      MGMT_STATUS_BUSY);
4590 		goto unlock;
4591 	}
4592 
4593 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4594 	if (!cmd) {
4595 		err = -ENOMEM;
4596 		goto unlock;
4597 	}
4598 
4599 	hci_req_init(&req, hdev);
4600 
4601 	if (cp->val == 0x02)
4602 		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4603 	else
4604 		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4605 
4606 	if (val)
4607 		enable_advertising(&req);
4608 	else
4609 		disable_advertising(&req);
4610 
4611 	err = hci_req_run(&req, set_advertising_complete);
4612 	if (err < 0)
4613 		mgmt_pending_remove(cmd);
4614 
4615 unlock:
4616 	hci_dev_unlock(hdev);
4617 	return err;
4618 }
4619 
4620 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4621 			      void *data, u16 len)
4622 {
4623 	struct mgmt_cp_set_static_address *cp = data;
4624 	int err;
4625 
4626 	BT_DBG("%s", hdev->name);
4627 
4628 	if (!lmp_le_capable(hdev))
4629 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4630 				       MGMT_STATUS_NOT_SUPPORTED);
4631 
4632 	if (hdev_is_powered(hdev))
4633 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4634 				       MGMT_STATUS_REJECTED);
4635 
4636 	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4637 		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4638 			return mgmt_cmd_status(sk, hdev->id,
4639 					       MGMT_OP_SET_STATIC_ADDRESS,
4640 					       MGMT_STATUS_INVALID_PARAMS);
4641 
4642 		/* Two most significant bits shall be set */
4643 		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4644 			return mgmt_cmd_status(sk, hdev->id,
4645 					       MGMT_OP_SET_STATIC_ADDRESS,
4646 					       MGMT_STATUS_INVALID_PARAMS);
4647 	}
4648 
4649 	hci_dev_lock(hdev);
4650 
4651 	bacpy(&hdev->static_addr, &cp->bdaddr);
4652 
4653 	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
4654 	if (err < 0)
4655 		goto unlock;
4656 
4657 	err = new_settings(hdev, sk);
4658 
4659 unlock:
4660 	hci_dev_unlock(hdev);
4661 	return err;
4662 }
4663 
4664 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4665 			   void *data, u16 len)
4666 {
4667 	struct mgmt_cp_set_scan_params *cp = data;
4668 	__u16 interval, window;
4669 	int err;
4670 
4671 	BT_DBG("%s", hdev->name);
4672 
4673 	if (!lmp_le_capable(hdev))
4674 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4675 				       MGMT_STATUS_NOT_SUPPORTED);
4676 
4677 	interval = __le16_to_cpu(cp->interval);
4678 
4679 	if (interval < 0x0004 || interval > 0x4000)
4680 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4681 				       MGMT_STATUS_INVALID_PARAMS);
4682 
4683 	window = __le16_to_cpu(cp->window);
4684 
4685 	if (window < 0x0004 || window > 0x4000)
4686 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4687 				       MGMT_STATUS_INVALID_PARAMS);
4688 
4689 	if (window > interval)
4690 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4691 				       MGMT_STATUS_INVALID_PARAMS);
4692 
4693 	hci_dev_lock(hdev);
4694 
4695 	hdev->le_scan_interval = interval;
4696 	hdev->le_scan_window = window;
4697 
4698 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
4699 				NULL, 0);
4700 
4701 	/* If background scan is running, restart it so new parameters are
4702 	 * loaded.
4703 	 */
4704 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4705 	    hdev->discovery.state == DISCOVERY_STOPPED) {
4706 		struct hci_request req;
4707 
4708 		hci_req_init(&req, hdev);
4709 
4710 		hci_req_add_le_scan_disable(&req);
4711 		hci_req_add_le_passive_scan(&req);
4712 
4713 		hci_req_run(&req, NULL);
4714 	}
4715 
4716 	hci_dev_unlock(hdev);
4717 
4718 	return err;
4719 }
4720 
4721 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
4722 				      u16 opcode)
4723 {
4724 	struct mgmt_pending_cmd *cmd;
4725 
4726 	BT_DBG("status 0x%02x", status);
4727 
4728 	hci_dev_lock(hdev);
4729 
4730 	cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4731 	if (!cmd)
4732 		goto unlock;
4733 
4734 	if (status) {
4735 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4736 			        mgmt_status(status));
4737 	} else {
4738 		struct mgmt_mode *cp = cmd->param;
4739 
4740 		if (cp->val)
4741 			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
4742 		else
4743 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4744 
4745 		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4746 		new_settings(hdev, cmd->sk);
4747 	}
4748 
4749 	mgmt_pending_remove(cmd);
4750 
4751 unlock:
4752 	hci_dev_unlock(hdev);
4753 }
4754 
4755 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4756 				void *data, u16 len)
4757 {
4758 	struct mgmt_mode *cp = data;
4759 	struct mgmt_pending_cmd *cmd;
4760 	struct hci_request req;
4761 	int err;
4762 
4763 	BT_DBG("%s", hdev->name);
4764 
4765 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
4766 	    hdev->hci_ver < BLUETOOTH_VER_1_2)
4767 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4768 				       MGMT_STATUS_NOT_SUPPORTED);
4769 
4770 	if (cp->val != 0x00 && cp->val != 0x01)
4771 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4772 				       MGMT_STATUS_INVALID_PARAMS);
4773 
4774 	hci_dev_lock(hdev);
4775 
4776 	if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4777 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4778 				      MGMT_STATUS_BUSY);
4779 		goto unlock;
4780 	}
4781 
4782 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
4783 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4784 					hdev);
4785 		goto unlock;
4786 	}
4787 
4788 	if (!hdev_is_powered(hdev)) {
4789 		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
4790 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4791 					hdev);
4792 		new_settings(hdev, sk);
4793 		goto unlock;
4794 	}
4795 
4796 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4797 			       data, len);
4798 	if (!cmd) {
4799 		err = -ENOMEM;
4800 		goto unlock;
4801 	}
4802 
4803 	hci_req_init(&req, hdev);
4804 
4805 	write_fast_connectable(&req, cp->val);
4806 
4807 	err = hci_req_run(&req, fast_connectable_complete);
4808 	if (err < 0) {
4809 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4810 				      MGMT_STATUS_FAILED);
4811 		mgmt_pending_remove(cmd);
4812 	}
4813 
4814 unlock:
4815 	hci_dev_unlock(hdev);
4816 
4817 	return err;
4818 }
4819 
4820 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4821 {
4822 	struct mgmt_pending_cmd *cmd;
4823 
4824 	BT_DBG("status 0x%02x", status);
4825 
4826 	hci_dev_lock(hdev);
4827 
4828 	cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4829 	if (!cmd)
4830 		goto unlock;
4831 
4832 	if (status) {
4833 		u8 mgmt_err = mgmt_status(status);
4834 
4835 		/* We need to restore the flag if related HCI commands
4836 		 * failed.
4837 		 */
4838 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
4839 
4840 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4841 	} else {
4842 		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4843 		new_settings(hdev, cmd->sk);
4844 	}
4845 
4846 	mgmt_pending_remove(cmd);
4847 
4848 unlock:
4849 	hci_dev_unlock(hdev);
4850 }
4851 
4852 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4853 {
4854 	struct mgmt_mode *cp = data;
4855 	struct mgmt_pending_cmd *cmd;
4856 	struct hci_request req;
4857 	int err;
4858 
4859 	BT_DBG("request for %s", hdev->name);
4860 
4861 	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4862 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4863 				       MGMT_STATUS_NOT_SUPPORTED);
4864 
4865 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
4866 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4867 				       MGMT_STATUS_REJECTED);
4868 
4869 	if (cp->val != 0x00 && cp->val != 0x01)
4870 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4871 				       MGMT_STATUS_INVALID_PARAMS);
4872 
4873 	hci_dev_lock(hdev);
4874 
4875 	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4876 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4877 		goto unlock;
4878 	}
4879 
4880 	if (!hdev_is_powered(hdev)) {
4881 		if (!cp->val) {
4882 			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
4883 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
4884 			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
4885 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4886 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
4887 		}
4888 
4889 		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
4890 
4891 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4892 		if (err < 0)
4893 			goto unlock;
4894 
4895 		err = new_settings(hdev, sk);
4896 		goto unlock;
4897 	}
4898 
4899 	/* Reject disabling when powered on */
4900 	if (!cp->val) {
4901 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4902 				      MGMT_STATUS_REJECTED);
4903 		goto unlock;
4904 	} else {
4905 		/* When configuring a dual-mode controller to operate
4906 		 * with LE only and using a static address, then switching
4907 		 * BR/EDR back on is not allowed.
4908 		 *
4909 		 * Dual-mode controllers shall operate with the public
4910 		 * address as its identity address for BR/EDR and LE. So
4911 		 * reject the attempt to create an invalid configuration.
4912 		 *
4913 		 * The same restrictions applies when secure connections
4914 		 * has been enabled. For BR/EDR this is a controller feature
4915 		 * while for LE it is a host stack feature. This means that
4916 		 * switching BR/EDR back on when secure connections has been
4917 		 * enabled is not a supported transaction.
4918 		 */
4919 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
4920 		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
4921 		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
4922 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4923 					      MGMT_STATUS_REJECTED);
4924 			goto unlock;
4925 		}
4926 	}
4927 
4928 	if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4929 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4930 				      MGMT_STATUS_BUSY);
4931 		goto unlock;
4932 	}
4933 
4934 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4935 	if (!cmd) {
4936 		err = -ENOMEM;
4937 		goto unlock;
4938 	}
4939 
4940 	/* We need to flip the bit already here so that update_adv_data
4941 	 * generates the correct flags.
4942 	 */
4943 	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
4944 
4945 	hci_req_init(&req, hdev);
4946 
4947 	write_fast_connectable(&req, false);
4948 	__hci_update_page_scan(&req);
4949 
4950 	/* Since only the advertising data flags will change, there
4951 	 * is no need to update the scan response data.
4952 	 */
4953 	update_adv_data(&req);
4954 
4955 	err = hci_req_run(&req, set_bredr_complete);
4956 	if (err < 0)
4957 		mgmt_pending_remove(cmd);
4958 
4959 unlock:
4960 	hci_dev_unlock(hdev);
4961 	return err;
4962 }
4963 
4964 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4965 {
4966 	struct mgmt_pending_cmd *cmd;
4967 	struct mgmt_mode *cp;
4968 
4969 	BT_DBG("%s status %u", hdev->name, status);
4970 
4971 	hci_dev_lock(hdev);
4972 
4973 	cmd = mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
4974 	if (!cmd)
4975 		goto unlock;
4976 
4977 	if (status) {
4978 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
4979 			        mgmt_status(status));
4980 		goto remove;
4981 	}
4982 
4983 	cp = cmd->param;
4984 
4985 	switch (cp->val) {
4986 	case 0x00:
4987 		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
4988 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4989 		break;
4990 	case 0x01:
4991 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
4992 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4993 		break;
4994 	case 0x02:
4995 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
4996 		hci_dev_set_flag(hdev, HCI_SC_ONLY);
4997 		break;
4998 	}
4999 
5000 	send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
5001 	new_settings(hdev, cmd->sk);
5002 
5003 remove:
5004 	mgmt_pending_remove(cmd);
5005 unlock:
5006 	hci_dev_unlock(hdev);
5007 }
5008 
5009 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
5010 			   void *data, u16 len)
5011 {
5012 	struct mgmt_mode *cp = data;
5013 	struct mgmt_pending_cmd *cmd;
5014 	struct hci_request req;
5015 	u8 val;
5016 	int err;
5017 
5018 	BT_DBG("request for %s", hdev->name);
5019 
5020 	if (!lmp_sc_capable(hdev) &&
5021 	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5022 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5023 				       MGMT_STATUS_NOT_SUPPORTED);
5024 
5025 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5026 	    lmp_sc_capable(hdev) &&
5027 	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5028 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5029 				       MGMT_STATUS_REJECTED);
5030 
5031 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5032 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5033 				  MGMT_STATUS_INVALID_PARAMS);
5034 
5035 	hci_dev_lock(hdev);
5036 
5037 	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
5038 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5039 		bool changed;
5040 
5041 		if (cp->val) {
5042 			changed = !hci_dev_test_and_set_flag(hdev,
5043 							     HCI_SC_ENABLED);
5044 			if (cp->val == 0x02)
5045 				hci_dev_set_flag(hdev, HCI_SC_ONLY);
5046 			else
5047 				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5048 		} else {
5049 			changed = hci_dev_test_and_clear_flag(hdev,
5050 							      HCI_SC_ENABLED);
5051 			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5052 		}
5053 
5054 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5055 		if (err < 0)
5056 			goto failed;
5057 
5058 		if (changed)
5059 			err = new_settings(hdev, sk);
5060 
5061 		goto failed;
5062 	}
5063 
5064 	if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
5065 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5066 				      MGMT_STATUS_BUSY);
5067 		goto failed;
5068 	}
5069 
5070 	val = !!cp->val;
5071 
5072 	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5073 	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5074 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5075 		goto failed;
5076 	}
5077 
5078 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
5079 	if (!cmd) {
5080 		err = -ENOMEM;
5081 		goto failed;
5082 	}
5083 
5084 	hci_req_init(&req, hdev);
5085 	hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
5086 	err = hci_req_run(&req, sc_enable_complete);
5087 	if (err < 0) {
5088 		mgmt_pending_remove(cmd);
5089 		goto failed;
5090 	}
5091 
5092 failed:
5093 	hci_dev_unlock(hdev);
5094 	return err;
5095 }
5096 
5097 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
5098 			  void *data, u16 len)
5099 {
5100 	struct mgmt_mode *cp = data;
5101 	bool changed, use_changed;
5102 	int err;
5103 
5104 	BT_DBG("request for %s", hdev->name);
5105 
5106 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5107 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
5108 				       MGMT_STATUS_INVALID_PARAMS);
5109 
5110 	hci_dev_lock(hdev);
5111 
5112 	if (cp->val)
5113 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
5114 	else
5115 		changed = hci_dev_test_and_clear_flag(hdev,
5116 						      HCI_KEEP_DEBUG_KEYS);
5117 
5118 	if (cp->val == 0x02)
5119 		use_changed = !hci_dev_test_and_set_flag(hdev,
5120 							 HCI_USE_DEBUG_KEYS);
5121 	else
5122 		use_changed = hci_dev_test_and_clear_flag(hdev,
5123 							  HCI_USE_DEBUG_KEYS);
5124 
5125 	if (hdev_is_powered(hdev) && use_changed &&
5126 	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
5127 		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
5128 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
5129 			     sizeof(mode), &mode);
5130 	}
5131 
5132 	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
5133 	if (err < 0)
5134 		goto unlock;
5135 
5136 	if (changed)
5137 		err = new_settings(hdev, sk);
5138 
5139 unlock:
5140 	hci_dev_unlock(hdev);
5141 	return err;
5142 }
5143 
5144 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5145 		       u16 len)
5146 {
5147 	struct mgmt_cp_set_privacy *cp = cp_data;
5148 	bool changed;
5149 	int err;
5150 
5151 	BT_DBG("request for %s", hdev->name);
5152 
5153 	if (!lmp_le_capable(hdev))
5154 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5155 				       MGMT_STATUS_NOT_SUPPORTED);
5156 
5157 	if (cp->privacy != 0x00 && cp->privacy != 0x01)
5158 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5159 				       MGMT_STATUS_INVALID_PARAMS);
5160 
5161 	if (hdev_is_powered(hdev))
5162 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5163 				       MGMT_STATUS_REJECTED);
5164 
5165 	hci_dev_lock(hdev);
5166 
5167 	/* If user space supports this command it is also expected to
5168 	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
5169 	 */
5170 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5171 
5172 	if (cp->privacy) {
5173 		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
5174 		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
5175 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
5176 	} else {
5177 		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
5178 		memset(hdev->irk, 0, sizeof(hdev->irk));
5179 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
5180 	}
5181 
5182 	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
5183 	if (err < 0)
5184 		goto unlock;
5185 
5186 	if (changed)
5187 		err = new_settings(hdev, sk);
5188 
5189 unlock:
5190 	hci_dev_unlock(hdev);
5191 	return err;
5192 }
5193 
5194 static bool irk_is_valid(struct mgmt_irk_info *irk)
5195 {
5196 	switch (irk->addr.type) {
5197 	case BDADDR_LE_PUBLIC:
5198 		return true;
5199 
5200 	case BDADDR_LE_RANDOM:
5201 		/* Two most significant bits shall be set */
5202 		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5203 			return false;
5204 		return true;
5205 	}
5206 
5207 	return false;
5208 }
5209 
5210 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5211 		     u16 len)
5212 {
5213 	struct mgmt_cp_load_irks *cp = cp_data;
5214 	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
5215 				   sizeof(struct mgmt_irk_info));
5216 	u16 irk_count, expected_len;
5217 	int i, err;
5218 
5219 	BT_DBG("request for %s", hdev->name);
5220 
5221 	if (!lmp_le_capable(hdev))
5222 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5223 				       MGMT_STATUS_NOT_SUPPORTED);
5224 
5225 	irk_count = __le16_to_cpu(cp->irk_count);
5226 	if (irk_count > max_irk_count) {
5227 		BT_ERR("load_irks: too big irk_count value %u", irk_count);
5228 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5229 				       MGMT_STATUS_INVALID_PARAMS);
5230 	}
5231 
5232 	expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
5233 	if (expected_len != len) {
5234 		BT_ERR("load_irks: expected %u bytes, got %u bytes",
5235 		       expected_len, len);
5236 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5237 				       MGMT_STATUS_INVALID_PARAMS);
5238 	}
5239 
5240 	BT_DBG("%s irk_count %u", hdev->name, irk_count);
5241 
5242 	for (i = 0; i < irk_count; i++) {
5243 		struct mgmt_irk_info *key = &cp->irks[i];
5244 
5245 		if (!irk_is_valid(key))
5246 			return mgmt_cmd_status(sk, hdev->id,
5247 					       MGMT_OP_LOAD_IRKS,
5248 					       MGMT_STATUS_INVALID_PARAMS);
5249 	}
5250 
5251 	hci_dev_lock(hdev);
5252 
5253 	hci_smp_irks_clear(hdev);
5254 
5255 	for (i = 0; i < irk_count; i++) {
5256 		struct mgmt_irk_info *irk = &cp->irks[i];
5257 		u8 addr_type;
5258 
5259 		if (irk->addr.type == BDADDR_LE_PUBLIC)
5260 			addr_type = ADDR_LE_DEV_PUBLIC;
5261 		else
5262 			addr_type = ADDR_LE_DEV_RANDOM;
5263 
5264 		hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
5265 			    BDADDR_ANY);
5266 	}
5267 
5268 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5269 
5270 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5271 
5272 	hci_dev_unlock(hdev);
5273 
5274 	return err;
5275 }
5276 
5277 static bool ltk_is_valid(struct mgmt_ltk_info *key)
5278 {
5279 	if (key->master != 0x00 && key->master != 0x01)
5280 		return false;
5281 
5282 	switch (key->addr.type) {
5283 	case BDADDR_LE_PUBLIC:
5284 		return true;
5285 
5286 	case BDADDR_LE_RANDOM:
5287 		/* Two most significant bits shall be set */
5288 		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5289 			return false;
5290 		return true;
5291 	}
5292 
5293 	return false;
5294 }
5295 
5296 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5297 			       void *cp_data, u16 len)
5298 {
5299 	struct mgmt_cp_load_long_term_keys *cp = cp_data;
5300 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5301 				   sizeof(struct mgmt_ltk_info));
5302 	u16 key_count, expected_len;
5303 	int i, err;
5304 
5305 	BT_DBG("request for %s", hdev->name);
5306 
5307 	if (!lmp_le_capable(hdev))
5308 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5309 				       MGMT_STATUS_NOT_SUPPORTED);
5310 
5311 	key_count = __le16_to_cpu(cp->key_count);
5312 	if (key_count > max_key_count) {
5313 		BT_ERR("load_ltks: too big key_count value %u", key_count);
5314 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5315 				       MGMT_STATUS_INVALID_PARAMS);
5316 	}
5317 
5318 	expected_len = sizeof(*cp) + key_count *
5319 					sizeof(struct mgmt_ltk_info);
5320 	if (expected_len != len) {
5321 		BT_ERR("load_keys: expected %u bytes, got %u bytes",
5322 		       expected_len, len);
5323 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5324 				       MGMT_STATUS_INVALID_PARAMS);
5325 	}
5326 
5327 	BT_DBG("%s key_count %u", hdev->name, key_count);
5328 
5329 	for (i = 0; i < key_count; i++) {
5330 		struct mgmt_ltk_info *key = &cp->keys[i];
5331 
5332 		if (!ltk_is_valid(key))
5333 			return mgmt_cmd_status(sk, hdev->id,
5334 					       MGMT_OP_LOAD_LONG_TERM_KEYS,
5335 					       MGMT_STATUS_INVALID_PARAMS);
5336 	}
5337 
5338 	hci_dev_lock(hdev);
5339 
5340 	hci_smp_ltks_clear(hdev);
5341 
5342 	for (i = 0; i < key_count; i++) {
5343 		struct mgmt_ltk_info *key = &cp->keys[i];
5344 		u8 type, addr_type, authenticated;
5345 
5346 		if (key->addr.type == BDADDR_LE_PUBLIC)
5347 			addr_type = ADDR_LE_DEV_PUBLIC;
5348 		else
5349 			addr_type = ADDR_LE_DEV_RANDOM;
5350 
5351 		switch (key->type) {
5352 		case MGMT_LTK_UNAUTHENTICATED:
5353 			authenticated = 0x00;
5354 			type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5355 			break;
5356 		case MGMT_LTK_AUTHENTICATED:
5357 			authenticated = 0x01;
5358 			type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5359 			break;
5360 		case MGMT_LTK_P256_UNAUTH:
5361 			authenticated = 0x00;
5362 			type = SMP_LTK_P256;
5363 			break;
5364 		case MGMT_LTK_P256_AUTH:
5365 			authenticated = 0x01;
5366 			type = SMP_LTK_P256;
5367 			break;
5368 		case MGMT_LTK_P256_DEBUG:
5369 			authenticated = 0x00;
5370 			type = SMP_LTK_P256_DEBUG;
5371 		default:
5372 			continue;
5373 		}
5374 
5375 		hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
5376 			    authenticated, key->val, key->enc_size, key->ediv,
5377 			    key->rand);
5378 	}
5379 
5380 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5381 			   NULL, 0);
5382 
5383 	hci_dev_unlock(hdev);
5384 
5385 	return err;
5386 }
5387 
5388 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5389 {
5390 	struct hci_conn *conn = cmd->user_data;
5391 	struct mgmt_rp_get_conn_info rp;
5392 	int err;
5393 
5394 	memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5395 
5396 	if (status == MGMT_STATUS_SUCCESS) {
5397 		rp.rssi = conn->rssi;
5398 		rp.tx_power = conn->tx_power;
5399 		rp.max_tx_power = conn->max_tx_power;
5400 	} else {
5401 		rp.rssi = HCI_RSSI_INVALID;
5402 		rp.tx_power = HCI_TX_POWER_INVALID;
5403 		rp.max_tx_power = HCI_TX_POWER_INVALID;
5404 	}
5405 
5406 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
5407 				status, &rp, sizeof(rp));
5408 
5409 	hci_conn_drop(conn);
5410 	hci_conn_put(conn);
5411 
5412 	return err;
5413 }
5414 
5415 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
5416 				       u16 opcode)
5417 {
5418 	struct hci_cp_read_rssi *cp;
5419 	struct mgmt_pending_cmd *cmd;
5420 	struct hci_conn *conn;
5421 	u16 handle;
5422 	u8 status;
5423 
5424 	BT_DBG("status 0x%02x", hci_status);
5425 
5426 	hci_dev_lock(hdev);
5427 
5428 	/* Commands sent in request are either Read RSSI or Read Transmit Power
5429 	 * Level so we check which one was last sent to retrieve connection
5430 	 * handle.  Both commands have handle as first parameter so it's safe to
5431 	 * cast data on the same command struct.
5432 	 *
5433 	 * First command sent is always Read RSSI and we fail only if it fails.
5434 	 * In other case we simply override error to indicate success as we
5435 	 * already remembered if TX power value is actually valid.
5436 	 */
5437 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
5438 	if (!cp) {
5439 		cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5440 		status = MGMT_STATUS_SUCCESS;
5441 	} else {
5442 		status = mgmt_status(hci_status);
5443 	}
5444 
5445 	if (!cp) {
5446 		BT_ERR("invalid sent_cmd in conn_info response");
5447 		goto unlock;
5448 	}
5449 
5450 	handle = __le16_to_cpu(cp->handle);
5451 	conn = hci_conn_hash_lookup_handle(hdev, handle);
5452 	if (!conn) {
5453 		BT_ERR("unknown handle (%d) in conn_info response", handle);
5454 		goto unlock;
5455 	}
5456 
5457 	cmd = mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
5458 	if (!cmd)
5459 		goto unlock;
5460 
5461 	cmd->cmd_complete(cmd, status);
5462 	mgmt_pending_remove(cmd);
5463 
5464 unlock:
5465 	hci_dev_unlock(hdev);
5466 }
5467 
5468 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5469 			 u16 len)
5470 {
5471 	struct mgmt_cp_get_conn_info *cp = data;
5472 	struct mgmt_rp_get_conn_info rp;
5473 	struct hci_conn *conn;
5474 	unsigned long conn_info_age;
5475 	int err = 0;
5476 
5477 	BT_DBG("%s", hdev->name);
5478 
5479 	memset(&rp, 0, sizeof(rp));
5480 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5481 	rp.addr.type = cp->addr.type;
5482 
5483 	if (!bdaddr_type_is_valid(cp->addr.type))
5484 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5485 					 MGMT_STATUS_INVALID_PARAMS,
5486 					 &rp, sizeof(rp));
5487 
5488 	hci_dev_lock(hdev);
5489 
5490 	if (!hdev_is_powered(hdev)) {
5491 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5492 					MGMT_STATUS_NOT_POWERED, &rp,
5493 					sizeof(rp));
5494 		goto unlock;
5495 	}
5496 
5497 	if (cp->addr.type == BDADDR_BREDR)
5498 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5499 					       &cp->addr.bdaddr);
5500 	else
5501 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5502 
5503 	if (!conn || conn->state != BT_CONNECTED) {
5504 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5505 					MGMT_STATUS_NOT_CONNECTED, &rp,
5506 					sizeof(rp));
5507 		goto unlock;
5508 	}
5509 
5510 	if (mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
5511 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5512 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
5513 		goto unlock;
5514 	}
5515 
5516 	/* To avoid client trying to guess when to poll again for information we
5517 	 * calculate conn info age as random value between min/max set in hdev.
5518 	 */
5519 	conn_info_age = hdev->conn_info_min_age +
5520 			prandom_u32_max(hdev->conn_info_max_age -
5521 					hdev->conn_info_min_age);
5522 
5523 	/* Query controller to refresh cached values if they are too old or were
5524 	 * never read.
5525 	 */
5526 	if (time_after(jiffies, conn->conn_info_timestamp +
5527 		       msecs_to_jiffies(conn_info_age)) ||
5528 	    !conn->conn_info_timestamp) {
5529 		struct hci_request req;
5530 		struct hci_cp_read_tx_power req_txp_cp;
5531 		struct hci_cp_read_rssi req_rssi_cp;
5532 		struct mgmt_pending_cmd *cmd;
5533 
5534 		hci_req_init(&req, hdev);
5535 		req_rssi_cp.handle = cpu_to_le16(conn->handle);
5536 		hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5537 			    &req_rssi_cp);
5538 
5539 		/* For LE links TX power does not change thus we don't need to
5540 		 * query for it once value is known.
5541 		 */
5542 		if (!bdaddr_type_is_le(cp->addr.type) ||
5543 		    conn->tx_power == HCI_TX_POWER_INVALID) {
5544 			req_txp_cp.handle = cpu_to_le16(conn->handle);
5545 			req_txp_cp.type = 0x00;
5546 			hci_req_add(&req, HCI_OP_READ_TX_POWER,
5547 				    sizeof(req_txp_cp), &req_txp_cp);
5548 		}
5549 
5550 		/* Max TX power needs to be read only once per connection */
5551 		if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5552 			req_txp_cp.handle = cpu_to_le16(conn->handle);
5553 			req_txp_cp.type = 0x01;
5554 			hci_req_add(&req, HCI_OP_READ_TX_POWER,
5555 				    sizeof(req_txp_cp), &req_txp_cp);
5556 		}
5557 
5558 		err = hci_req_run(&req, conn_info_refresh_complete);
5559 		if (err < 0)
5560 			goto unlock;
5561 
5562 		cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5563 				       data, len);
5564 		if (!cmd) {
5565 			err = -ENOMEM;
5566 			goto unlock;
5567 		}
5568 
5569 		hci_conn_hold(conn);
5570 		cmd->user_data = hci_conn_get(conn);
5571 		cmd->cmd_complete = conn_info_cmd_complete;
5572 
5573 		conn->conn_info_timestamp = jiffies;
5574 	} else {
5575 		/* Cache is valid, just reply with values cached in hci_conn */
5576 		rp.rssi = conn->rssi;
5577 		rp.tx_power = conn->tx_power;
5578 		rp.max_tx_power = conn->max_tx_power;
5579 
5580 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5581 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5582 	}
5583 
5584 unlock:
5585 	hci_dev_unlock(hdev);
5586 	return err;
5587 }
5588 
5589 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5590 {
5591 	struct hci_conn *conn = cmd->user_data;
5592 	struct mgmt_rp_get_clock_info rp;
5593 	struct hci_dev *hdev;
5594 	int err;
5595 
5596 	memset(&rp, 0, sizeof(rp));
5597 	memcpy(&rp.addr, &cmd->param, sizeof(rp.addr));
5598 
5599 	if (status)
5600 		goto complete;
5601 
5602 	hdev = hci_dev_get(cmd->index);
5603 	if (hdev) {
5604 		rp.local_clock = cpu_to_le32(hdev->clock);
5605 		hci_dev_put(hdev);
5606 	}
5607 
5608 	if (conn) {
5609 		rp.piconet_clock = cpu_to_le32(conn->clock);
5610 		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5611 	}
5612 
5613 complete:
5614 	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
5615 				sizeof(rp));
5616 
5617 	if (conn) {
5618 		hci_conn_drop(conn);
5619 		hci_conn_put(conn);
5620 	}
5621 
5622 	return err;
5623 }
5624 
5625 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5626 {
5627 	struct hci_cp_read_clock *hci_cp;
5628 	struct mgmt_pending_cmd *cmd;
5629 	struct hci_conn *conn;
5630 
5631 	BT_DBG("%s status %u", hdev->name, status);
5632 
5633 	hci_dev_lock(hdev);
5634 
5635 	hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5636 	if (!hci_cp)
5637 		goto unlock;
5638 
5639 	if (hci_cp->which) {
5640 		u16 handle = __le16_to_cpu(hci_cp->handle);
5641 		conn = hci_conn_hash_lookup_handle(hdev, handle);
5642 	} else {
5643 		conn = NULL;
5644 	}
5645 
5646 	cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5647 	if (!cmd)
5648 		goto unlock;
5649 
5650 	cmd->cmd_complete(cmd, mgmt_status(status));
5651 	mgmt_pending_remove(cmd);
5652 
5653 unlock:
5654 	hci_dev_unlock(hdev);
5655 }
5656 
5657 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5658 			 u16 len)
5659 {
5660 	struct mgmt_cp_get_clock_info *cp = data;
5661 	struct mgmt_rp_get_clock_info rp;
5662 	struct hci_cp_read_clock hci_cp;
5663 	struct mgmt_pending_cmd *cmd;
5664 	struct hci_request req;
5665 	struct hci_conn *conn;
5666 	int err;
5667 
5668 	BT_DBG("%s", hdev->name);
5669 
5670 	memset(&rp, 0, sizeof(rp));
5671 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5672 	rp.addr.type = cp->addr.type;
5673 
5674 	if (cp->addr.type != BDADDR_BREDR)
5675 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5676 					 MGMT_STATUS_INVALID_PARAMS,
5677 					 &rp, sizeof(rp));
5678 
5679 	hci_dev_lock(hdev);
5680 
5681 	if (!hdev_is_powered(hdev)) {
5682 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5683 					MGMT_STATUS_NOT_POWERED, &rp,
5684 					sizeof(rp));
5685 		goto unlock;
5686 	}
5687 
5688 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5689 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5690 					       &cp->addr.bdaddr);
5691 		if (!conn || conn->state != BT_CONNECTED) {
5692 			err = mgmt_cmd_complete(sk, hdev->id,
5693 						MGMT_OP_GET_CLOCK_INFO,
5694 						MGMT_STATUS_NOT_CONNECTED,
5695 						&rp, sizeof(rp));
5696 			goto unlock;
5697 		}
5698 	} else {
5699 		conn = NULL;
5700 	}
5701 
5702 	cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5703 	if (!cmd) {
5704 		err = -ENOMEM;
5705 		goto unlock;
5706 	}
5707 
5708 	cmd->cmd_complete = clock_info_cmd_complete;
5709 
5710 	hci_req_init(&req, hdev);
5711 
5712 	memset(&hci_cp, 0, sizeof(hci_cp));
5713 	hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5714 
5715 	if (conn) {
5716 		hci_conn_hold(conn);
5717 		cmd->user_data = hci_conn_get(conn);
5718 
5719 		hci_cp.handle = cpu_to_le16(conn->handle);
5720 		hci_cp.which = 0x01; /* Piconet clock */
5721 		hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5722 	}
5723 
5724 	err = hci_req_run(&req, get_clock_info_complete);
5725 	if (err < 0)
5726 		mgmt_pending_remove(cmd);
5727 
5728 unlock:
5729 	hci_dev_unlock(hdev);
5730 	return err;
5731 }
5732 
5733 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
5734 {
5735 	struct hci_conn *conn;
5736 
5737 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
5738 	if (!conn)
5739 		return false;
5740 
5741 	if (conn->dst_type != type)
5742 		return false;
5743 
5744 	if (conn->state != BT_CONNECTED)
5745 		return false;
5746 
5747 	return true;
5748 }
5749 
5750 /* This function requires the caller holds hdev->lock */
5751 static int hci_conn_params_set(struct hci_request *req, bdaddr_t *addr,
5752 			       u8 addr_type, u8 auto_connect)
5753 {
5754 	struct hci_dev *hdev = req->hdev;
5755 	struct hci_conn_params *params;
5756 
5757 	params = hci_conn_params_add(hdev, addr, addr_type);
5758 	if (!params)
5759 		return -EIO;
5760 
5761 	if (params->auto_connect == auto_connect)
5762 		return 0;
5763 
5764 	list_del_init(&params->action);
5765 
5766 	switch (auto_connect) {
5767 	case HCI_AUTO_CONN_DISABLED:
5768 	case HCI_AUTO_CONN_LINK_LOSS:
5769 		__hci_update_background_scan(req);
5770 		break;
5771 	case HCI_AUTO_CONN_REPORT:
5772 		list_add(&params->action, &hdev->pend_le_reports);
5773 		__hci_update_background_scan(req);
5774 		break;
5775 	case HCI_AUTO_CONN_DIRECT:
5776 	case HCI_AUTO_CONN_ALWAYS:
5777 		if (!is_connected(hdev, addr, addr_type)) {
5778 			list_add(&params->action, &hdev->pend_le_conns);
5779 			__hci_update_background_scan(req);
5780 		}
5781 		break;
5782 	}
5783 
5784 	params->auto_connect = auto_connect;
5785 
5786 	BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
5787 	       auto_connect);
5788 
5789 	return 0;
5790 }
5791 
5792 static void device_added(struct sock *sk, struct hci_dev *hdev,
5793 			 bdaddr_t *bdaddr, u8 type, u8 action)
5794 {
5795 	struct mgmt_ev_device_added ev;
5796 
5797 	bacpy(&ev.addr.bdaddr, bdaddr);
5798 	ev.addr.type = type;
5799 	ev.action = action;
5800 
5801 	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5802 }
5803 
5804 static void add_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5805 {
5806 	struct mgmt_pending_cmd *cmd;
5807 
5808 	BT_DBG("status 0x%02x", status);
5809 
5810 	hci_dev_lock(hdev);
5811 
5812 	cmd = mgmt_pending_find(MGMT_OP_ADD_DEVICE, hdev);
5813 	if (!cmd)
5814 		goto unlock;
5815 
5816 	cmd->cmd_complete(cmd, mgmt_status(status));
5817 	mgmt_pending_remove(cmd);
5818 
5819 unlock:
5820 	hci_dev_unlock(hdev);
5821 }
5822 
5823 static int add_device(struct sock *sk, struct hci_dev *hdev,
5824 		      void *data, u16 len)
5825 {
5826 	struct mgmt_cp_add_device *cp = data;
5827 	struct mgmt_pending_cmd *cmd;
5828 	struct hci_request req;
5829 	u8 auto_conn, addr_type;
5830 	int err;
5831 
5832 	BT_DBG("%s", hdev->name);
5833 
5834 	if (!bdaddr_type_is_valid(cp->addr.type) ||
5835 	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5836 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5837 					 MGMT_STATUS_INVALID_PARAMS,
5838 					 &cp->addr, sizeof(cp->addr));
5839 
5840 	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5841 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5842 					 MGMT_STATUS_INVALID_PARAMS,
5843 					 &cp->addr, sizeof(cp->addr));
5844 
5845 	hci_req_init(&req, hdev);
5846 
5847 	hci_dev_lock(hdev);
5848 
5849 	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
5850 	if (!cmd) {
5851 		err = -ENOMEM;
5852 		goto unlock;
5853 	}
5854 
5855 	cmd->cmd_complete = addr_cmd_complete;
5856 
5857 	if (cp->addr.type == BDADDR_BREDR) {
5858 		/* Only incoming connections action is supported for now */
5859 		if (cp->action != 0x01) {
5860 			err = cmd->cmd_complete(cmd,
5861 						MGMT_STATUS_INVALID_PARAMS);
5862 			mgmt_pending_remove(cmd);
5863 			goto unlock;
5864 		}
5865 
5866 		err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5867 					  cp->addr.type);
5868 		if (err)
5869 			goto unlock;
5870 
5871 		__hci_update_page_scan(&req);
5872 
5873 		goto added;
5874 	}
5875 
5876 	if (cp->addr.type == BDADDR_LE_PUBLIC)
5877 		addr_type = ADDR_LE_DEV_PUBLIC;
5878 	else
5879 		addr_type = ADDR_LE_DEV_RANDOM;
5880 
5881 	if (cp->action == 0x02)
5882 		auto_conn = HCI_AUTO_CONN_ALWAYS;
5883 	else if (cp->action == 0x01)
5884 		auto_conn = HCI_AUTO_CONN_DIRECT;
5885 	else
5886 		auto_conn = HCI_AUTO_CONN_REPORT;
5887 
5888 	/* If the connection parameters don't exist for this device,
5889 	 * they will be created and configured with defaults.
5890 	 */
5891 	if (hci_conn_params_set(&req, &cp->addr.bdaddr, addr_type,
5892 				auto_conn) < 0) {
5893 		err = cmd->cmd_complete(cmd, MGMT_STATUS_FAILED);
5894 		mgmt_pending_remove(cmd);
5895 		goto unlock;
5896 	}
5897 
5898 added:
5899 	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5900 
5901 	err = hci_req_run(&req, add_device_complete);
5902 	if (err < 0) {
5903 		/* ENODATA means no HCI commands were needed (e.g. if
5904 		 * the adapter is powered off).
5905 		 */
5906 		if (err == -ENODATA)
5907 			err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
5908 		mgmt_pending_remove(cmd);
5909 	}
5910 
5911 unlock:
5912 	hci_dev_unlock(hdev);
5913 	return err;
5914 }
5915 
5916 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5917 			   bdaddr_t *bdaddr, u8 type)
5918 {
5919 	struct mgmt_ev_device_removed ev;
5920 
5921 	bacpy(&ev.addr.bdaddr, bdaddr);
5922 	ev.addr.type = type;
5923 
5924 	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5925 }
5926 
5927 static void remove_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5928 {
5929 	struct mgmt_pending_cmd *cmd;
5930 
5931 	BT_DBG("status 0x%02x", status);
5932 
5933 	hci_dev_lock(hdev);
5934 
5935 	cmd = mgmt_pending_find(MGMT_OP_REMOVE_DEVICE, hdev);
5936 	if (!cmd)
5937 		goto unlock;
5938 
5939 	cmd->cmd_complete(cmd, mgmt_status(status));
5940 	mgmt_pending_remove(cmd);
5941 
5942 unlock:
5943 	hci_dev_unlock(hdev);
5944 }
5945 
5946 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5947 			 void *data, u16 len)
5948 {
5949 	struct mgmt_cp_remove_device *cp = data;
5950 	struct mgmt_pending_cmd *cmd;
5951 	struct hci_request req;
5952 	int err;
5953 
5954 	BT_DBG("%s", hdev->name);
5955 
5956 	hci_req_init(&req, hdev);
5957 
5958 	hci_dev_lock(hdev);
5959 
5960 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEVICE, hdev, data, len);
5961 	if (!cmd) {
5962 		err = -ENOMEM;
5963 		goto unlock;
5964 	}
5965 
5966 	cmd->cmd_complete = addr_cmd_complete;
5967 
5968 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5969 		struct hci_conn_params *params;
5970 		u8 addr_type;
5971 
5972 		if (!bdaddr_type_is_valid(cp->addr.type)) {
5973 			err = cmd->cmd_complete(cmd,
5974 						MGMT_STATUS_INVALID_PARAMS);
5975 			mgmt_pending_remove(cmd);
5976 			goto unlock;
5977 		}
5978 
5979 		if (cp->addr.type == BDADDR_BREDR) {
5980 			err = hci_bdaddr_list_del(&hdev->whitelist,
5981 						  &cp->addr.bdaddr,
5982 						  cp->addr.type);
5983 			if (err) {
5984 				err = cmd->cmd_complete(cmd,
5985 							MGMT_STATUS_INVALID_PARAMS);
5986 				mgmt_pending_remove(cmd);
5987 				goto unlock;
5988 			}
5989 
5990 			__hci_update_page_scan(&req);
5991 
5992 			device_removed(sk, hdev, &cp->addr.bdaddr,
5993 				       cp->addr.type);
5994 			goto complete;
5995 		}
5996 
5997 		if (cp->addr.type == BDADDR_LE_PUBLIC)
5998 			addr_type = ADDR_LE_DEV_PUBLIC;
5999 		else
6000 			addr_type = ADDR_LE_DEV_RANDOM;
6001 
6002 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6003 						addr_type);
6004 		if (!params) {
6005 			err = cmd->cmd_complete(cmd,
6006 						MGMT_STATUS_INVALID_PARAMS);
6007 			mgmt_pending_remove(cmd);
6008 			goto unlock;
6009 		}
6010 
6011 		if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
6012 			err = cmd->cmd_complete(cmd,
6013 						MGMT_STATUS_INVALID_PARAMS);
6014 			mgmt_pending_remove(cmd);
6015 			goto unlock;
6016 		}
6017 
6018 		list_del(&params->action);
6019 		list_del(&params->list);
6020 		kfree(params);
6021 		__hci_update_background_scan(&req);
6022 
6023 		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
6024 	} else {
6025 		struct hci_conn_params *p, *tmp;
6026 		struct bdaddr_list *b, *btmp;
6027 
6028 		if (cp->addr.type) {
6029 			err = cmd->cmd_complete(cmd,
6030 						MGMT_STATUS_INVALID_PARAMS);
6031 			mgmt_pending_remove(cmd);
6032 			goto unlock;
6033 		}
6034 
6035 		list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
6036 			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
6037 			list_del(&b->list);
6038 			kfree(b);
6039 		}
6040 
6041 		__hci_update_page_scan(&req);
6042 
6043 		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
6044 			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
6045 				continue;
6046 			device_removed(sk, hdev, &p->addr, p->addr_type);
6047 			list_del(&p->action);
6048 			list_del(&p->list);
6049 			kfree(p);
6050 		}
6051 
6052 		BT_DBG("All LE connection parameters were removed");
6053 
6054 		__hci_update_background_scan(&req);
6055 	}
6056 
6057 complete:
6058 	err = hci_req_run(&req, remove_device_complete);
6059 	if (err < 0) {
6060 		/* ENODATA means no HCI commands were needed (e.g. if
6061 		 * the adapter is powered off).
6062 		 */
6063 		if (err == -ENODATA)
6064 			err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
6065 		mgmt_pending_remove(cmd);
6066 	}
6067 
6068 unlock:
6069 	hci_dev_unlock(hdev);
6070 	return err;
6071 }
6072 
6073 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
6074 			   u16 len)
6075 {
6076 	struct mgmt_cp_load_conn_param *cp = data;
6077 	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
6078 				     sizeof(struct mgmt_conn_param));
6079 	u16 param_count, expected_len;
6080 	int i;
6081 
6082 	if (!lmp_le_capable(hdev))
6083 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6084 				       MGMT_STATUS_NOT_SUPPORTED);
6085 
6086 	param_count = __le16_to_cpu(cp->param_count);
6087 	if (param_count > max_param_count) {
6088 		BT_ERR("load_conn_param: too big param_count value %u",
6089 		       param_count);
6090 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6091 				       MGMT_STATUS_INVALID_PARAMS);
6092 	}
6093 
6094 	expected_len = sizeof(*cp) + param_count *
6095 					sizeof(struct mgmt_conn_param);
6096 	if (expected_len != len) {
6097 		BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
6098 		       expected_len, len);
6099 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6100 				       MGMT_STATUS_INVALID_PARAMS);
6101 	}
6102 
6103 	BT_DBG("%s param_count %u", hdev->name, param_count);
6104 
6105 	hci_dev_lock(hdev);
6106 
6107 	hci_conn_params_clear_disabled(hdev);
6108 
6109 	for (i = 0; i < param_count; i++) {
6110 		struct mgmt_conn_param *param = &cp->params[i];
6111 		struct hci_conn_params *hci_param;
6112 		u16 min, max, latency, timeout;
6113 		u8 addr_type;
6114 
6115 		BT_DBG("Adding %pMR (type %u)", &param->addr.bdaddr,
6116 		       param->addr.type);
6117 
6118 		if (param->addr.type == BDADDR_LE_PUBLIC) {
6119 			addr_type = ADDR_LE_DEV_PUBLIC;
6120 		} else if (param->addr.type == BDADDR_LE_RANDOM) {
6121 			addr_type = ADDR_LE_DEV_RANDOM;
6122 		} else {
6123 			BT_ERR("Ignoring invalid connection parameters");
6124 			continue;
6125 		}
6126 
6127 		min = le16_to_cpu(param->min_interval);
6128 		max = le16_to_cpu(param->max_interval);
6129 		latency = le16_to_cpu(param->latency);
6130 		timeout = le16_to_cpu(param->timeout);
6131 
6132 		BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
6133 		       min, max, latency, timeout);
6134 
6135 		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
6136 			BT_ERR("Ignoring invalid connection parameters");
6137 			continue;
6138 		}
6139 
6140 		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
6141 						addr_type);
6142 		if (!hci_param) {
6143 			BT_ERR("Failed to add connection parameters");
6144 			continue;
6145 		}
6146 
6147 		hci_param->conn_min_interval = min;
6148 		hci_param->conn_max_interval = max;
6149 		hci_param->conn_latency = latency;
6150 		hci_param->supervision_timeout = timeout;
6151 	}
6152 
6153 	hci_dev_unlock(hdev);
6154 
6155 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
6156 				 NULL, 0);
6157 }
6158 
6159 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
6160 			       void *data, u16 len)
6161 {
6162 	struct mgmt_cp_set_external_config *cp = data;
6163 	bool changed;
6164 	int err;
6165 
6166 	BT_DBG("%s", hdev->name);
6167 
6168 	if (hdev_is_powered(hdev))
6169 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6170 				       MGMT_STATUS_REJECTED);
6171 
6172 	if (cp->config != 0x00 && cp->config != 0x01)
6173 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6174 				         MGMT_STATUS_INVALID_PARAMS);
6175 
6176 	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
6177 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6178 				       MGMT_STATUS_NOT_SUPPORTED);
6179 
6180 	hci_dev_lock(hdev);
6181 
6182 	if (cp->config)
6183 		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
6184 	else
6185 		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
6186 
6187 	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
6188 	if (err < 0)
6189 		goto unlock;
6190 
6191 	if (!changed)
6192 		goto unlock;
6193 
6194 	err = new_options(hdev, sk);
6195 
6196 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
6197 		mgmt_index_removed(hdev);
6198 
6199 		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
6200 			hci_dev_set_flag(hdev, HCI_CONFIG);
6201 			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6202 
6203 			queue_work(hdev->req_workqueue, &hdev->power_on);
6204 		} else {
6205 			set_bit(HCI_RAW, &hdev->flags);
6206 			mgmt_index_added(hdev);
6207 		}
6208 	}
6209 
6210 unlock:
6211 	hci_dev_unlock(hdev);
6212 	return err;
6213 }
6214 
6215 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
6216 			      void *data, u16 len)
6217 {
6218 	struct mgmt_cp_set_public_address *cp = data;
6219 	bool changed;
6220 	int err;
6221 
6222 	BT_DBG("%s", hdev->name);
6223 
6224 	if (hdev_is_powered(hdev))
6225 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6226 				       MGMT_STATUS_REJECTED);
6227 
6228 	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
6229 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6230 				       MGMT_STATUS_INVALID_PARAMS);
6231 
6232 	if (!hdev->set_bdaddr)
6233 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6234 				       MGMT_STATUS_NOT_SUPPORTED);
6235 
6236 	hci_dev_lock(hdev);
6237 
6238 	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
6239 	bacpy(&hdev->public_addr, &cp->bdaddr);
6240 
6241 	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
6242 	if (err < 0)
6243 		goto unlock;
6244 
6245 	if (!changed)
6246 		goto unlock;
6247 
6248 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
6249 		err = new_options(hdev, sk);
6250 
6251 	if (is_configured(hdev)) {
6252 		mgmt_index_removed(hdev);
6253 
6254 		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
6255 
6256 		hci_dev_set_flag(hdev, HCI_CONFIG);
6257 		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6258 
6259 		queue_work(hdev->req_workqueue, &hdev->power_on);
6260 	}
6261 
6262 unlock:
6263 	hci_dev_unlock(hdev);
6264 	return err;
6265 }
6266 
6267 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
6268 				  u8 data_len)
6269 {
6270 	eir[eir_len++] = sizeof(type) + data_len;
6271 	eir[eir_len++] = type;
6272 	memcpy(&eir[eir_len], data, data_len);
6273 	eir_len += data_len;
6274 
6275 	return eir_len;
6276 }
6277 
6278 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
6279 				   void *data, u16 data_len)
6280 {
6281 	struct mgmt_cp_read_local_oob_ext_data *cp = data;
6282 	struct mgmt_rp_read_local_oob_ext_data *rp;
6283 	size_t rp_len;
6284 	u16 eir_len;
6285 	u8 status, flags, role, addr[7], hash[16], rand[16];
6286 	int err;
6287 
6288 	BT_DBG("%s", hdev->name);
6289 
6290 	if (!hdev_is_powered(hdev))
6291 		return mgmt_cmd_complete(sk, hdev->id,
6292 					 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6293 					 MGMT_STATUS_NOT_POWERED,
6294 					 &cp->type, sizeof(cp->type));
6295 
6296 	switch (cp->type) {
6297 	case BIT(BDADDR_BREDR):
6298 		status = mgmt_bredr_support(hdev);
6299 		if (status)
6300 			return mgmt_cmd_complete(sk, hdev->id,
6301 						 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6302 						 status, &cp->type,
6303 						 sizeof(cp->type));
6304 		eir_len = 5;
6305 		break;
6306 	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6307 		status = mgmt_le_support(hdev);
6308 		if (status)
6309 			return mgmt_cmd_complete(sk, hdev->id,
6310 						 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6311 						 status, &cp->type,
6312 						 sizeof(cp->type));
6313 		eir_len = 9 + 3 + 18 + 18 + 3;
6314 		break;
6315 	default:
6316 		return mgmt_cmd_complete(sk, hdev->id,
6317 					 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6318 					 MGMT_STATUS_INVALID_PARAMS,
6319 					 &cp->type, sizeof(cp->type));
6320 	}
6321 
6322 	hci_dev_lock(hdev);
6323 
6324 	rp_len = sizeof(*rp) + eir_len;
6325 	rp = kmalloc(rp_len, GFP_ATOMIC);
6326 	if (!rp) {
6327 		hci_dev_unlock(hdev);
6328 		return -ENOMEM;
6329 	}
6330 
6331 	eir_len = 0;
6332 	switch (cp->type) {
6333 	case BIT(BDADDR_BREDR):
6334 		eir_len = eir_append_data(rp->eir, eir_len, EIR_CLASS_OF_DEV,
6335 					  hdev->dev_class, 3);
6336 		break;
6337 	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6338 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6339 		    smp_generate_oob(hdev, hash, rand) < 0) {
6340 			hci_dev_unlock(hdev);
6341 			err = mgmt_cmd_complete(sk, hdev->id,
6342 						MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6343 						MGMT_STATUS_FAILED,
6344 						&cp->type, sizeof(cp->type));
6345 			goto done;
6346 		}
6347 
6348 		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
6349 			memcpy(addr, &hdev->rpa, 6);
6350 			addr[6] = 0x01;
6351 		} else if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
6352 			   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
6353 			   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6354 			    bacmp(&hdev->static_addr, BDADDR_ANY))) {
6355 			memcpy(addr, &hdev->static_addr, 6);
6356 			addr[6] = 0x01;
6357 		} else {
6358 			memcpy(addr, &hdev->bdaddr, 6);
6359 			addr[6] = 0x00;
6360 		}
6361 
6362 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
6363 					  addr, sizeof(addr));
6364 
6365 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
6366 			role = 0x02;
6367 		else
6368 			role = 0x01;
6369 
6370 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
6371 					  &role, sizeof(role));
6372 
6373 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
6374 			eir_len = eir_append_data(rp->eir, eir_len,
6375 						  EIR_LE_SC_CONFIRM,
6376 						  hash, sizeof(hash));
6377 
6378 			eir_len = eir_append_data(rp->eir, eir_len,
6379 						  EIR_LE_SC_RANDOM,
6380 						  rand, sizeof(rand));
6381 		}
6382 
6383 		flags = get_adv_discov_flags(hdev);
6384 
6385 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
6386 			flags |= LE_AD_NO_BREDR;
6387 
6388 		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
6389 					  &flags, sizeof(flags));
6390 		break;
6391 	}
6392 
6393 	rp->type = cp->type;
6394 	rp->eir_len = cpu_to_le16(eir_len);
6395 
6396 	hci_dev_unlock(hdev);
6397 
6398 	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
6399 
6400 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6401 				MGMT_STATUS_SUCCESS, rp, sizeof(*rp) + eir_len);
6402 	if (err < 0)
6403 		goto done;
6404 
6405 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
6406 				 rp, sizeof(*rp) + eir_len,
6407 				 HCI_MGMT_OOB_DATA_EVENTS, sk);
6408 
6409 done:
6410 	kfree(rp);
6411 
6412 	return err;
6413 }
6414 
6415 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
6416 			     void *data, u16 data_len)
6417 {
6418 	struct mgmt_rp_read_adv_features *rp;
6419 	size_t rp_len;
6420 	int err;
6421 
6422 	BT_DBG("%s", hdev->name);
6423 
6424 	hci_dev_lock(hdev);
6425 
6426 	rp_len = sizeof(*rp);
6427 	rp = kmalloc(rp_len, GFP_ATOMIC);
6428 	if (!rp) {
6429 		hci_dev_unlock(hdev);
6430 		return -ENOMEM;
6431 	}
6432 
6433 	rp->supported_flags = cpu_to_le32(0);
6434 	rp->max_adv_data_len = 31;
6435 	rp->max_scan_rsp_len = 31;
6436 	rp->max_instances = 0;
6437 	rp->num_instances = 0;
6438 
6439 	hci_dev_unlock(hdev);
6440 
6441 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
6442 				MGMT_STATUS_SUCCESS, rp, rp_len);
6443 
6444 	kfree(rp);
6445 
6446 	return err;
6447 }
6448 
6449 static const struct hci_mgmt_handler mgmt_handlers[] = {
6450 	{ NULL }, /* 0x0000 (no command) */
6451 	{ read_version,            MGMT_READ_VERSION_SIZE,
6452 						HCI_MGMT_NO_HDEV |
6453 						HCI_MGMT_UNTRUSTED },
6454 	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
6455 						HCI_MGMT_NO_HDEV |
6456 						HCI_MGMT_UNTRUSTED },
6457 	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
6458 						HCI_MGMT_NO_HDEV |
6459 						HCI_MGMT_UNTRUSTED },
6460 	{ read_controller_info,    MGMT_READ_INFO_SIZE,
6461 						HCI_MGMT_UNTRUSTED },
6462 	{ set_powered,             MGMT_SETTING_SIZE },
6463 	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
6464 	{ set_connectable,         MGMT_SETTING_SIZE },
6465 	{ set_fast_connectable,    MGMT_SETTING_SIZE },
6466 	{ set_bondable,            MGMT_SETTING_SIZE },
6467 	{ set_link_security,       MGMT_SETTING_SIZE },
6468 	{ set_ssp,                 MGMT_SETTING_SIZE },
6469 	{ set_hs,                  MGMT_SETTING_SIZE },
6470 	{ set_le,                  MGMT_SETTING_SIZE },
6471 	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
6472 	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
6473 	{ add_uuid,                MGMT_ADD_UUID_SIZE },
6474 	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
6475 	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
6476 						HCI_MGMT_VAR_LEN },
6477 	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
6478 						HCI_MGMT_VAR_LEN },
6479 	{ disconnect,              MGMT_DISCONNECT_SIZE },
6480 	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
6481 	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
6482 	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
6483 	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
6484 	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
6485 	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
6486 	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
6487 	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
6488 	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
6489 	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
6490 	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
6491 	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
6492 	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
6493 						HCI_MGMT_VAR_LEN },
6494 	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
6495 	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
6496 	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
6497 	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
6498 	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
6499 	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
6500 	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
6501 	{ set_advertising,         MGMT_SETTING_SIZE },
6502 	{ set_bredr,               MGMT_SETTING_SIZE },
6503 	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
6504 	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
6505 	{ set_secure_conn,         MGMT_SETTING_SIZE },
6506 	{ set_debug_keys,          MGMT_SETTING_SIZE },
6507 	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
6508 	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
6509 						HCI_MGMT_VAR_LEN },
6510 	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
6511 	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
6512 	{ add_device,              MGMT_ADD_DEVICE_SIZE },
6513 	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
6514 	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
6515 						HCI_MGMT_VAR_LEN },
6516 	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
6517 						HCI_MGMT_NO_HDEV |
6518 						HCI_MGMT_UNTRUSTED },
6519 	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
6520 						HCI_MGMT_UNCONFIGURED |
6521 						HCI_MGMT_UNTRUSTED },
6522 	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
6523 						HCI_MGMT_UNCONFIGURED },
6524 	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
6525 						HCI_MGMT_UNCONFIGURED },
6526 	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
6527 						HCI_MGMT_VAR_LEN },
6528 	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
6529 	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
6530 						HCI_MGMT_NO_HDEV |
6531 						HCI_MGMT_UNTRUSTED },
6532 	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
6533 };
6534 
6535 int mgmt_control(struct hci_mgmt_chan *chan, struct sock *sk,
6536 		 struct msghdr *msg, size_t msglen)
6537 {
6538 	void *buf;
6539 	u8 *cp;
6540 	struct mgmt_hdr *hdr;
6541 	u16 opcode, index, len;
6542 	struct hci_dev *hdev = NULL;
6543 	const struct hci_mgmt_handler *handler;
6544 	bool var_len, no_hdev;
6545 	int err;
6546 
6547 	BT_DBG("got %zu bytes", msglen);
6548 
6549 	if (msglen < sizeof(*hdr))
6550 		return -EINVAL;
6551 
6552 	buf = kmalloc(msglen, GFP_KERNEL);
6553 	if (!buf)
6554 		return -ENOMEM;
6555 
6556 	if (memcpy_from_msg(buf, msg, msglen)) {
6557 		err = -EFAULT;
6558 		goto done;
6559 	}
6560 
6561 	hdr = buf;
6562 	opcode = __le16_to_cpu(hdr->opcode);
6563 	index = __le16_to_cpu(hdr->index);
6564 	len = __le16_to_cpu(hdr->len);
6565 
6566 	if (len != msglen - sizeof(*hdr)) {
6567 		err = -EINVAL;
6568 		goto done;
6569 	}
6570 
6571 	if (opcode >= chan->handler_count ||
6572 	    chan->handlers[opcode].func == NULL) {
6573 		BT_DBG("Unknown op %u", opcode);
6574 		err = mgmt_cmd_status(sk, index, opcode,
6575 				      MGMT_STATUS_UNKNOWN_COMMAND);
6576 		goto done;
6577 	}
6578 
6579 	handler = &chan->handlers[opcode];
6580 
6581 	if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
6582 	    !(handler->flags & HCI_MGMT_UNTRUSTED)) {
6583 		err = mgmt_cmd_status(sk, index, opcode,
6584 				      MGMT_STATUS_PERMISSION_DENIED);
6585 		goto done;
6586 	}
6587 
6588 	if (index != MGMT_INDEX_NONE) {
6589 		hdev = hci_dev_get(index);
6590 		if (!hdev) {
6591 			err = mgmt_cmd_status(sk, index, opcode,
6592 					      MGMT_STATUS_INVALID_INDEX);
6593 			goto done;
6594 		}
6595 
6596 		if (hci_dev_test_flag(hdev, HCI_SETUP) ||
6597 		    hci_dev_test_flag(hdev, HCI_CONFIG) ||
6598 		    hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
6599 			err = mgmt_cmd_status(sk, index, opcode,
6600 					      MGMT_STATUS_INVALID_INDEX);
6601 			goto done;
6602 		}
6603 
6604 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
6605 		    !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
6606 			err = mgmt_cmd_status(sk, index, opcode,
6607 					      MGMT_STATUS_INVALID_INDEX);
6608 			goto done;
6609 		}
6610 	}
6611 
6612 	no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
6613 	if (no_hdev != !hdev) {
6614 		err = mgmt_cmd_status(sk, index, opcode,
6615 				      MGMT_STATUS_INVALID_INDEX);
6616 		goto done;
6617 	}
6618 
6619 	var_len = (handler->flags & HCI_MGMT_VAR_LEN);
6620 	if ((var_len && len < handler->data_len) ||
6621 	    (!var_len && len != handler->data_len)) {
6622 		err = mgmt_cmd_status(sk, index, opcode,
6623 				      MGMT_STATUS_INVALID_PARAMS);
6624 		goto done;
6625 	}
6626 
6627 	if (hdev)
6628 		mgmt_init_hdev(sk, hdev);
6629 
6630 	cp = buf + sizeof(*hdr);
6631 
6632 	err = handler->func(sk, hdev, cp, len);
6633 	if (err < 0)
6634 		goto done;
6635 
6636 	err = msglen;
6637 
6638 done:
6639 	if (hdev)
6640 		hci_dev_put(hdev);
6641 
6642 	kfree(buf);
6643 	return err;
6644 }
6645 
6646 void mgmt_index_added(struct hci_dev *hdev)
6647 {
6648 	struct mgmt_ev_ext_index ev;
6649 
6650 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6651 		return;
6652 
6653 	switch (hdev->dev_type) {
6654 	case HCI_BREDR:
6655 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
6656 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
6657 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
6658 			ev.type = 0x01;
6659 		} else {
6660 			mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
6661 					 HCI_MGMT_INDEX_EVENTS);
6662 			ev.type = 0x00;
6663 		}
6664 		break;
6665 	case HCI_AMP:
6666 		ev.type = 0x02;
6667 		break;
6668 	default:
6669 		return;
6670 	}
6671 
6672 	ev.bus = hdev->bus;
6673 
6674 	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
6675 			 HCI_MGMT_EXT_INDEX_EVENTS);
6676 }
6677 
6678 void mgmt_index_removed(struct hci_dev *hdev)
6679 {
6680 	struct mgmt_ev_ext_index ev;
6681 	u8 status = MGMT_STATUS_INVALID_INDEX;
6682 
6683 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
6684 		return;
6685 
6686 	switch (hdev->dev_type) {
6687 	case HCI_BREDR:
6688 		mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6689 
6690 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
6691 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
6692 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
6693 			ev.type = 0x01;
6694 		} else {
6695 			mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
6696 					 HCI_MGMT_INDEX_EVENTS);
6697 			ev.type = 0x00;
6698 		}
6699 		break;
6700 	case HCI_AMP:
6701 		ev.type = 0x02;
6702 		break;
6703 	default:
6704 		return;
6705 	}
6706 
6707 	ev.bus = hdev->bus;
6708 
6709 	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
6710 			 HCI_MGMT_EXT_INDEX_EVENTS);
6711 }
6712 
6713 /* This function requires the caller holds hdev->lock */
6714 static void restart_le_actions(struct hci_request *req)
6715 {
6716 	struct hci_dev *hdev = req->hdev;
6717 	struct hci_conn_params *p;
6718 
6719 	list_for_each_entry(p, &hdev->le_conn_params, list) {
6720 		/* Needed for AUTO_OFF case where might not "really"
6721 		 * have been powered off.
6722 		 */
6723 		list_del_init(&p->action);
6724 
6725 		switch (p->auto_connect) {
6726 		case HCI_AUTO_CONN_DIRECT:
6727 		case HCI_AUTO_CONN_ALWAYS:
6728 			list_add(&p->action, &hdev->pend_le_conns);
6729 			break;
6730 		case HCI_AUTO_CONN_REPORT:
6731 			list_add(&p->action, &hdev->pend_le_reports);
6732 			break;
6733 		default:
6734 			break;
6735 		}
6736 	}
6737 
6738 	__hci_update_background_scan(req);
6739 }
6740 
6741 static void powered_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6742 {
6743 	struct cmd_lookup match = { NULL, hdev };
6744 
6745 	BT_DBG("status 0x%02x", status);
6746 
6747 	if (!status) {
6748 		/* Register the available SMP channels (BR/EDR and LE) only
6749 		 * when successfully powering on the controller. This late
6750 		 * registration is required so that LE SMP can clearly
6751 		 * decide if the public address or static address is used.
6752 		 */
6753 		smp_register(hdev);
6754 	}
6755 
6756 	hci_dev_lock(hdev);
6757 
6758 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6759 
6760 	new_settings(hdev, match.sk);
6761 
6762 	hci_dev_unlock(hdev);
6763 
6764 	if (match.sk)
6765 		sock_put(match.sk);
6766 }
6767 
6768 static int powered_update_hci(struct hci_dev *hdev)
6769 {
6770 	struct hci_request req;
6771 	u8 link_sec;
6772 
6773 	hci_req_init(&req, hdev);
6774 
6775 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
6776 	    !lmp_host_ssp_capable(hdev)) {
6777 		u8 mode = 0x01;
6778 
6779 		hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
6780 
6781 		if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
6782 			u8 support = 0x01;
6783 
6784 			hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT,
6785 				    sizeof(support), &support);
6786 		}
6787 	}
6788 
6789 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
6790 	    lmp_bredr_capable(hdev)) {
6791 		struct hci_cp_write_le_host_supported cp;
6792 
6793 		cp.le = 0x01;
6794 		cp.simul = 0x00;
6795 
6796 		/* Check first if we already have the right
6797 		 * host state (host features set)
6798 		 */
6799 		if (cp.le != lmp_host_le_capable(hdev) ||
6800 		    cp.simul != lmp_host_le_br_capable(hdev))
6801 			hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
6802 				    sizeof(cp), &cp);
6803 	}
6804 
6805 	if (lmp_le_capable(hdev)) {
6806 		/* Make sure the controller has a good default for
6807 		 * advertising data. This also applies to the case
6808 		 * where BR/EDR was toggled during the AUTO_OFF phase.
6809 		 */
6810 		if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
6811 			update_adv_data(&req);
6812 			update_scan_rsp_data(&req);
6813 		}
6814 
6815 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
6816 			enable_advertising(&req);
6817 
6818 		restart_le_actions(&req);
6819 	}
6820 
6821 	link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
6822 	if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
6823 		hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
6824 			    sizeof(link_sec), &link_sec);
6825 
6826 	if (lmp_bredr_capable(hdev)) {
6827 		if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
6828 			write_fast_connectable(&req, true);
6829 		else
6830 			write_fast_connectable(&req, false);
6831 		__hci_update_page_scan(&req);
6832 		update_class(&req);
6833 		update_name(&req);
6834 		update_eir(&req);
6835 	}
6836 
6837 	return hci_req_run(&req, powered_complete);
6838 }
6839 
6840 int mgmt_powered(struct hci_dev *hdev, u8 powered)
6841 {
6842 	struct cmd_lookup match = { NULL, hdev };
6843 	u8 status, zero_cod[] = { 0, 0, 0 };
6844 	int err;
6845 
6846 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
6847 		return 0;
6848 
6849 	if (powered) {
6850 		if (powered_update_hci(hdev) == 0)
6851 			return 0;
6852 
6853 		mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
6854 				     &match);
6855 		goto new_settings;
6856 	}
6857 
6858 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6859 
6860 	/* If the power off is because of hdev unregistration let
6861 	 * use the appropriate INVALID_INDEX status. Otherwise use
6862 	 * NOT_POWERED. We cover both scenarios here since later in
6863 	 * mgmt_index_removed() any hci_conn callbacks will have already
6864 	 * been triggered, potentially causing misleading DISCONNECTED
6865 	 * status responses.
6866 	 */
6867 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
6868 		status = MGMT_STATUS_INVALID_INDEX;
6869 	else
6870 		status = MGMT_STATUS_NOT_POWERED;
6871 
6872 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
6873 
6874 	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
6875 		mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
6876 				   zero_cod, sizeof(zero_cod), NULL);
6877 
6878 new_settings:
6879 	err = new_settings(hdev, match.sk);
6880 
6881 	if (match.sk)
6882 		sock_put(match.sk);
6883 
6884 	return err;
6885 }
6886 
6887 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
6888 {
6889 	struct mgmt_pending_cmd *cmd;
6890 	u8 status;
6891 
6892 	cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6893 	if (!cmd)
6894 		return;
6895 
6896 	if (err == -ERFKILL)
6897 		status = MGMT_STATUS_RFKILLED;
6898 	else
6899 		status = MGMT_STATUS_FAILED;
6900 
6901 	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
6902 
6903 	mgmt_pending_remove(cmd);
6904 }
6905 
6906 void mgmt_discoverable_timeout(struct hci_dev *hdev)
6907 {
6908 	struct hci_request req;
6909 
6910 	hci_dev_lock(hdev);
6911 
6912 	/* When discoverable timeout triggers, then just make sure
6913 	 * the limited discoverable flag is cleared. Even in the case
6914 	 * of a timeout triggered from general discoverable, it is
6915 	 * safe to unconditionally clear the flag.
6916 	 */
6917 	hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
6918 	hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6919 
6920 	hci_req_init(&req, hdev);
6921 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6922 		u8 scan = SCAN_PAGE;
6923 		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
6924 			    sizeof(scan), &scan);
6925 	}
6926 	update_class(&req);
6927 	update_adv_data(&req);
6928 	hci_req_run(&req, NULL);
6929 
6930 	hdev->discov_timeout = 0;
6931 
6932 	new_settings(hdev, NULL);
6933 
6934 	hci_dev_unlock(hdev);
6935 }
6936 
6937 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
6938 		       bool persistent)
6939 {
6940 	struct mgmt_ev_new_link_key ev;
6941 
6942 	memset(&ev, 0, sizeof(ev));
6943 
6944 	ev.store_hint = persistent;
6945 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6946 	ev.key.addr.type = BDADDR_BREDR;
6947 	ev.key.type = key->type;
6948 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
6949 	ev.key.pin_len = key->pin_len;
6950 
6951 	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
6952 }
6953 
6954 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
6955 {
6956 	switch (ltk->type) {
6957 	case SMP_LTK:
6958 	case SMP_LTK_SLAVE:
6959 		if (ltk->authenticated)
6960 			return MGMT_LTK_AUTHENTICATED;
6961 		return MGMT_LTK_UNAUTHENTICATED;
6962 	case SMP_LTK_P256:
6963 		if (ltk->authenticated)
6964 			return MGMT_LTK_P256_AUTH;
6965 		return MGMT_LTK_P256_UNAUTH;
6966 	case SMP_LTK_P256_DEBUG:
6967 		return MGMT_LTK_P256_DEBUG;
6968 	}
6969 
6970 	return MGMT_LTK_UNAUTHENTICATED;
6971 }
6972 
6973 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
6974 {
6975 	struct mgmt_ev_new_long_term_key ev;
6976 
6977 	memset(&ev, 0, sizeof(ev));
6978 
6979 	/* Devices using resolvable or non-resolvable random addresses
6980 	 * without providing an indentity resolving key don't require
6981 	 * to store long term keys. Their addresses will change the
6982 	 * next time around.
6983 	 *
6984 	 * Only when a remote device provides an identity address
6985 	 * make sure the long term key is stored. If the remote
6986 	 * identity is known, the long term keys are internally
6987 	 * mapped to the identity address. So allow static random
6988 	 * and public addresses here.
6989 	 */
6990 	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6991 	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
6992 		ev.store_hint = 0x00;
6993 	else
6994 		ev.store_hint = persistent;
6995 
6996 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6997 	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
6998 	ev.key.type = mgmt_ltk_type(key);
6999 	ev.key.enc_size = key->enc_size;
7000 	ev.key.ediv = key->ediv;
7001 	ev.key.rand = key->rand;
7002 
7003 	if (key->type == SMP_LTK)
7004 		ev.key.master = 1;
7005 
7006 	memcpy(ev.key.val, key->val, sizeof(key->val));
7007 
7008 	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
7009 }
7010 
7011 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
7012 {
7013 	struct mgmt_ev_new_irk ev;
7014 
7015 	memset(&ev, 0, sizeof(ev));
7016 
7017 	/* For identity resolving keys from devices that are already
7018 	 * using a public address or static random address, do not
7019 	 * ask for storing this key. The identity resolving key really
7020 	 * is only mandatory for devices using resovlable random
7021 	 * addresses.
7022 	 *
7023 	 * Storing all identity resolving keys has the downside that
7024 	 * they will be also loaded on next boot of they system. More
7025 	 * identity resolving keys, means more time during scanning is
7026 	 * needed to actually resolve these addresses.
7027 	 */
7028 	if (bacmp(&irk->rpa, BDADDR_ANY))
7029 		ev.store_hint = 0x01;
7030 	else
7031 		ev.store_hint = 0x00;
7032 
7033 	bacpy(&ev.rpa, &irk->rpa);
7034 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
7035 	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
7036 	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
7037 
7038 	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
7039 }
7040 
7041 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
7042 		   bool persistent)
7043 {
7044 	struct mgmt_ev_new_csrk ev;
7045 
7046 	memset(&ev, 0, sizeof(ev));
7047 
7048 	/* Devices using resolvable or non-resolvable random addresses
7049 	 * without providing an indentity resolving key don't require
7050 	 * to store signature resolving keys. Their addresses will change
7051 	 * the next time around.
7052 	 *
7053 	 * Only when a remote device provides an identity address
7054 	 * make sure the signature resolving key is stored. So allow
7055 	 * static random and public addresses here.
7056 	 */
7057 	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
7058 	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
7059 		ev.store_hint = 0x00;
7060 	else
7061 		ev.store_hint = persistent;
7062 
7063 	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
7064 	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
7065 	ev.key.type = csrk->type;
7066 	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
7067 
7068 	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
7069 }
7070 
7071 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
7072 			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
7073 			 u16 max_interval, u16 latency, u16 timeout)
7074 {
7075 	struct mgmt_ev_new_conn_param ev;
7076 
7077 	if (!hci_is_identity_address(bdaddr, bdaddr_type))
7078 		return;
7079 
7080 	memset(&ev, 0, sizeof(ev));
7081 	bacpy(&ev.addr.bdaddr, bdaddr);
7082 	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
7083 	ev.store_hint = store_hint;
7084 	ev.min_interval = cpu_to_le16(min_interval);
7085 	ev.max_interval = cpu_to_le16(max_interval);
7086 	ev.latency = cpu_to_le16(latency);
7087 	ev.timeout = cpu_to_le16(timeout);
7088 
7089 	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
7090 }
7091 
7092 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
7093 			   u32 flags, u8 *name, u8 name_len)
7094 {
7095 	char buf[512];
7096 	struct mgmt_ev_device_connected *ev = (void *) buf;
7097 	u16 eir_len = 0;
7098 
7099 	bacpy(&ev->addr.bdaddr, &conn->dst);
7100 	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7101 
7102 	ev->flags = __cpu_to_le32(flags);
7103 
7104 	/* We must ensure that the EIR Data fields are ordered and
7105 	 * unique. Keep it simple for now and avoid the problem by not
7106 	 * adding any BR/EDR data to the LE adv.
7107 	 */
7108 	if (conn->le_adv_data_len > 0) {
7109 		memcpy(&ev->eir[eir_len],
7110 		       conn->le_adv_data, conn->le_adv_data_len);
7111 		eir_len = conn->le_adv_data_len;
7112 	} else {
7113 		if (name_len > 0)
7114 			eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
7115 						  name, name_len);
7116 
7117 		if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
7118 			eir_len = eir_append_data(ev->eir, eir_len,
7119 						  EIR_CLASS_OF_DEV,
7120 						  conn->dev_class, 3);
7121 	}
7122 
7123 	ev->eir_len = cpu_to_le16(eir_len);
7124 
7125 	mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
7126 		    sizeof(*ev) + eir_len, NULL);
7127 }
7128 
7129 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
7130 {
7131 	struct sock **sk = data;
7132 
7133 	cmd->cmd_complete(cmd, 0);
7134 
7135 	*sk = cmd->sk;
7136 	sock_hold(*sk);
7137 
7138 	mgmt_pending_remove(cmd);
7139 }
7140 
7141 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
7142 {
7143 	struct hci_dev *hdev = data;
7144 	struct mgmt_cp_unpair_device *cp = cmd->param;
7145 
7146 	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
7147 
7148 	cmd->cmd_complete(cmd, 0);
7149 	mgmt_pending_remove(cmd);
7150 }
7151 
7152 bool mgmt_powering_down(struct hci_dev *hdev)
7153 {
7154 	struct mgmt_pending_cmd *cmd;
7155 	struct mgmt_mode *cp;
7156 
7157 	cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
7158 	if (!cmd)
7159 		return false;
7160 
7161 	cp = cmd->param;
7162 	if (!cp->val)
7163 		return true;
7164 
7165 	return false;
7166 }
7167 
7168 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
7169 			      u8 link_type, u8 addr_type, u8 reason,
7170 			      bool mgmt_connected)
7171 {
7172 	struct mgmt_ev_device_disconnected ev;
7173 	struct sock *sk = NULL;
7174 
7175 	/* The connection is still in hci_conn_hash so test for 1
7176 	 * instead of 0 to know if this is the last one.
7177 	 */
7178 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
7179 		cancel_delayed_work(&hdev->power_off);
7180 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
7181 	}
7182 
7183 	if (!mgmt_connected)
7184 		return;
7185 
7186 	if (link_type != ACL_LINK && link_type != LE_LINK)
7187 		return;
7188 
7189 	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
7190 
7191 	bacpy(&ev.addr.bdaddr, bdaddr);
7192 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7193 	ev.reason = reason;
7194 
7195 	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
7196 
7197 	if (sk)
7198 		sock_put(sk);
7199 
7200 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
7201 			     hdev);
7202 }
7203 
7204 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
7205 			    u8 link_type, u8 addr_type, u8 status)
7206 {
7207 	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
7208 	struct mgmt_cp_disconnect *cp;
7209 	struct mgmt_pending_cmd *cmd;
7210 
7211 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
7212 			     hdev);
7213 
7214 	cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
7215 	if (!cmd)
7216 		return;
7217 
7218 	cp = cmd->param;
7219 
7220 	if (bacmp(bdaddr, &cp->addr.bdaddr))
7221 		return;
7222 
7223 	if (cp->addr.type != bdaddr_type)
7224 		return;
7225 
7226 	cmd->cmd_complete(cmd, mgmt_status(status));
7227 	mgmt_pending_remove(cmd);
7228 }
7229 
7230 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7231 			 u8 addr_type, u8 status)
7232 {
7233 	struct mgmt_ev_connect_failed ev;
7234 
7235 	/* The connection is still in hci_conn_hash so test for 1
7236 	 * instead of 0 to know if this is the last one.
7237 	 */
7238 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
7239 		cancel_delayed_work(&hdev->power_off);
7240 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
7241 	}
7242 
7243 	bacpy(&ev.addr.bdaddr, bdaddr);
7244 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7245 	ev.status = mgmt_status(status);
7246 
7247 	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
7248 }
7249 
7250 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
7251 {
7252 	struct mgmt_ev_pin_code_request ev;
7253 
7254 	bacpy(&ev.addr.bdaddr, bdaddr);
7255 	ev.addr.type = BDADDR_BREDR;
7256 	ev.secure = secure;
7257 
7258 	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
7259 }
7260 
7261 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7262 				  u8 status)
7263 {
7264 	struct mgmt_pending_cmd *cmd;
7265 
7266 	cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
7267 	if (!cmd)
7268 		return;
7269 
7270 	cmd->cmd_complete(cmd, mgmt_status(status));
7271 	mgmt_pending_remove(cmd);
7272 }
7273 
7274 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7275 				      u8 status)
7276 {
7277 	struct mgmt_pending_cmd *cmd;
7278 
7279 	cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
7280 	if (!cmd)
7281 		return;
7282 
7283 	cmd->cmd_complete(cmd, mgmt_status(status));
7284 	mgmt_pending_remove(cmd);
7285 }
7286 
7287 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7288 			      u8 link_type, u8 addr_type, u32 value,
7289 			      u8 confirm_hint)
7290 {
7291 	struct mgmt_ev_user_confirm_request ev;
7292 
7293 	BT_DBG("%s", hdev->name);
7294 
7295 	bacpy(&ev.addr.bdaddr, bdaddr);
7296 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7297 	ev.confirm_hint = confirm_hint;
7298 	ev.value = cpu_to_le32(value);
7299 
7300 	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
7301 			  NULL);
7302 }
7303 
7304 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7305 			      u8 link_type, u8 addr_type)
7306 {
7307 	struct mgmt_ev_user_passkey_request ev;
7308 
7309 	BT_DBG("%s", hdev->name);
7310 
7311 	bacpy(&ev.addr.bdaddr, bdaddr);
7312 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7313 
7314 	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
7315 			  NULL);
7316 }
7317 
7318 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7319 				      u8 link_type, u8 addr_type, u8 status,
7320 				      u8 opcode)
7321 {
7322 	struct mgmt_pending_cmd *cmd;
7323 
7324 	cmd = mgmt_pending_find(opcode, hdev);
7325 	if (!cmd)
7326 		return -ENOENT;
7327 
7328 	cmd->cmd_complete(cmd, mgmt_status(status));
7329 	mgmt_pending_remove(cmd);
7330 
7331 	return 0;
7332 }
7333 
7334 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7335 				     u8 link_type, u8 addr_type, u8 status)
7336 {
7337 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7338 					  status, MGMT_OP_USER_CONFIRM_REPLY);
7339 }
7340 
7341 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7342 					 u8 link_type, u8 addr_type, u8 status)
7343 {
7344 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7345 					  status,
7346 					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
7347 }
7348 
7349 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7350 				     u8 link_type, u8 addr_type, u8 status)
7351 {
7352 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7353 					  status, MGMT_OP_USER_PASSKEY_REPLY);
7354 }
7355 
7356 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7357 					 u8 link_type, u8 addr_type, u8 status)
7358 {
7359 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7360 					  status,
7361 					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
7362 }
7363 
7364 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
7365 			     u8 link_type, u8 addr_type, u32 passkey,
7366 			     u8 entered)
7367 {
7368 	struct mgmt_ev_passkey_notify ev;
7369 
7370 	BT_DBG("%s", hdev->name);
7371 
7372 	bacpy(&ev.addr.bdaddr, bdaddr);
7373 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7374 	ev.passkey = __cpu_to_le32(passkey);
7375 	ev.entered = entered;
7376 
7377 	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
7378 }
7379 
7380 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
7381 {
7382 	struct mgmt_ev_auth_failed ev;
7383 	struct mgmt_pending_cmd *cmd;
7384 	u8 status = mgmt_status(hci_status);
7385 
7386 	bacpy(&ev.addr.bdaddr, &conn->dst);
7387 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7388 	ev.status = status;
7389 
7390 	cmd = find_pairing(conn);
7391 
7392 	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
7393 		    cmd ? cmd->sk : NULL);
7394 
7395 	if (cmd) {
7396 		cmd->cmd_complete(cmd, status);
7397 		mgmt_pending_remove(cmd);
7398 	}
7399 }
7400 
7401 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
7402 {
7403 	struct cmd_lookup match = { NULL, hdev };
7404 	bool changed;
7405 
7406 	if (status) {
7407 		u8 mgmt_err = mgmt_status(status);
7408 		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
7409 				     cmd_status_rsp, &mgmt_err);
7410 		return;
7411 	}
7412 
7413 	if (test_bit(HCI_AUTH, &hdev->flags))
7414 		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
7415 	else
7416 		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
7417 
7418 	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
7419 			     &match);
7420 
7421 	if (changed)
7422 		new_settings(hdev, match.sk);
7423 
7424 	if (match.sk)
7425 		sock_put(match.sk);
7426 }
7427 
7428 static void clear_eir(struct hci_request *req)
7429 {
7430 	struct hci_dev *hdev = req->hdev;
7431 	struct hci_cp_write_eir cp;
7432 
7433 	if (!lmp_ext_inq_capable(hdev))
7434 		return;
7435 
7436 	memset(hdev->eir, 0, sizeof(hdev->eir));
7437 
7438 	memset(&cp, 0, sizeof(cp));
7439 
7440 	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
7441 }
7442 
7443 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
7444 {
7445 	struct cmd_lookup match = { NULL, hdev };
7446 	struct hci_request req;
7447 	bool changed = false;
7448 
7449 	if (status) {
7450 		u8 mgmt_err = mgmt_status(status);
7451 
7452 		if (enable && hci_dev_test_and_clear_flag(hdev,
7453 							  HCI_SSP_ENABLED)) {
7454 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7455 			new_settings(hdev, NULL);
7456 		}
7457 
7458 		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
7459 				     &mgmt_err);
7460 		return;
7461 	}
7462 
7463 	if (enable) {
7464 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
7465 	} else {
7466 		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
7467 		if (!changed)
7468 			changed = hci_dev_test_and_clear_flag(hdev,
7469 							      HCI_HS_ENABLED);
7470 		else
7471 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7472 	}
7473 
7474 	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
7475 
7476 	if (changed)
7477 		new_settings(hdev, match.sk);
7478 
7479 	if (match.sk)
7480 		sock_put(match.sk);
7481 
7482 	hci_req_init(&req, hdev);
7483 
7484 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7485 		if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
7486 			hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
7487 				    sizeof(enable), &enable);
7488 		update_eir(&req);
7489 	} else {
7490 		clear_eir(&req);
7491 	}
7492 
7493 	hci_req_run(&req, NULL);
7494 }
7495 
7496 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
7497 {
7498 	struct cmd_lookup *match = data;
7499 
7500 	if (match->sk == NULL) {
7501 		match->sk = cmd->sk;
7502 		sock_hold(match->sk);
7503 	}
7504 }
7505 
7506 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
7507 				    u8 status)
7508 {
7509 	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
7510 
7511 	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
7512 	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
7513 	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
7514 
7515 	if (!status)
7516 		mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
7517 				   dev_class, 3, NULL);
7518 
7519 	if (match.sk)
7520 		sock_put(match.sk);
7521 }
7522 
7523 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
7524 {
7525 	struct mgmt_cp_set_local_name ev;
7526 	struct mgmt_pending_cmd *cmd;
7527 
7528 	if (status)
7529 		return;
7530 
7531 	memset(&ev, 0, sizeof(ev));
7532 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
7533 	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
7534 
7535 	cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
7536 	if (!cmd) {
7537 		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
7538 
7539 		/* If this is a HCI command related to powering on the
7540 		 * HCI dev don't send any mgmt signals.
7541 		 */
7542 		if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
7543 			return;
7544 	}
7545 
7546 	mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
7547 			   cmd ? cmd->sk : NULL);
7548 }
7549 
7550 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
7551 				       u8 *rand192, u8 *hash256, u8 *rand256,
7552 				       u8 status)
7553 {
7554 	struct mgmt_pending_cmd *cmd;
7555 
7556 	BT_DBG("%s status %u", hdev->name, status);
7557 
7558 	cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
7559 	if (!cmd)
7560 		return;
7561 
7562 	if (status) {
7563 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
7564 			        mgmt_status(status));
7565 	} else {
7566 		struct mgmt_rp_read_local_oob_data rp;
7567 		size_t rp_size = sizeof(rp);
7568 
7569 		memcpy(rp.hash192, hash192, sizeof(rp.hash192));
7570 		memcpy(rp.rand192, rand192, sizeof(rp.rand192));
7571 
7572 		if (bredr_sc_enabled(hdev) && hash256 && rand256) {
7573 			memcpy(rp.hash256, hash256, sizeof(rp.hash256));
7574 			memcpy(rp.rand256, rand256, sizeof(rp.rand256));
7575 		} else {
7576 			rp_size -= sizeof(rp.hash256) + sizeof(rp.rand256);
7577 		}
7578 
7579 		mgmt_cmd_complete(cmd->sk, hdev->id,
7580 				  MGMT_OP_READ_LOCAL_OOB_DATA, 0,
7581 				  &rp, rp_size);
7582 	}
7583 
7584 	mgmt_pending_remove(cmd);
7585 }
7586 
7587 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
7588 {
7589 	int i;
7590 
7591 	for (i = 0; i < uuid_count; i++) {
7592 		if (!memcmp(uuid, uuids[i], 16))
7593 			return true;
7594 	}
7595 
7596 	return false;
7597 }
7598 
7599 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
7600 {
7601 	u16 parsed = 0;
7602 
7603 	while (parsed < eir_len) {
7604 		u8 field_len = eir[0];
7605 		u8 uuid[16];
7606 		int i;
7607 
7608 		if (field_len == 0)
7609 			break;
7610 
7611 		if (eir_len - parsed < field_len + 1)
7612 			break;
7613 
7614 		switch (eir[1]) {
7615 		case EIR_UUID16_ALL:
7616 		case EIR_UUID16_SOME:
7617 			for (i = 0; i + 3 <= field_len; i += 2) {
7618 				memcpy(uuid, bluetooth_base_uuid, 16);
7619 				uuid[13] = eir[i + 3];
7620 				uuid[12] = eir[i + 2];
7621 				if (has_uuid(uuid, uuid_count, uuids))
7622 					return true;
7623 			}
7624 			break;
7625 		case EIR_UUID32_ALL:
7626 		case EIR_UUID32_SOME:
7627 			for (i = 0; i + 5 <= field_len; i += 4) {
7628 				memcpy(uuid, bluetooth_base_uuid, 16);
7629 				uuid[15] = eir[i + 5];
7630 				uuid[14] = eir[i + 4];
7631 				uuid[13] = eir[i + 3];
7632 				uuid[12] = eir[i + 2];
7633 				if (has_uuid(uuid, uuid_count, uuids))
7634 					return true;
7635 			}
7636 			break;
7637 		case EIR_UUID128_ALL:
7638 		case EIR_UUID128_SOME:
7639 			for (i = 0; i + 17 <= field_len; i += 16) {
7640 				memcpy(uuid, eir + i + 2, 16);
7641 				if (has_uuid(uuid, uuid_count, uuids))
7642 					return true;
7643 			}
7644 			break;
7645 		}
7646 
7647 		parsed += field_len + 1;
7648 		eir += field_len + 1;
7649 	}
7650 
7651 	return false;
7652 }
7653 
7654 static void restart_le_scan(struct hci_dev *hdev)
7655 {
7656 	/* If controller is not scanning we are done. */
7657 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
7658 		return;
7659 
7660 	if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
7661 		       hdev->discovery.scan_start +
7662 		       hdev->discovery.scan_duration))
7663 		return;
7664 
7665 	queue_delayed_work(hdev->workqueue, &hdev->le_scan_restart,
7666 			   DISCOV_LE_RESTART_DELAY);
7667 }
7668 
7669 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
7670 			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7671 {
7672 	/* If a RSSI threshold has been specified, and
7673 	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
7674 	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
7675 	 * is set, let it through for further processing, as we might need to
7676 	 * restart the scan.
7677 	 *
7678 	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
7679 	 * the results are also dropped.
7680 	 */
7681 	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7682 	    (rssi == HCI_RSSI_INVALID ||
7683 	    (rssi < hdev->discovery.rssi &&
7684 	     !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
7685 		return  false;
7686 
7687 	if (hdev->discovery.uuid_count != 0) {
7688 		/* If a list of UUIDs is provided in filter, results with no
7689 		 * matching UUID should be dropped.
7690 		 */
7691 		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
7692 				   hdev->discovery.uuids) &&
7693 		    !eir_has_uuids(scan_rsp, scan_rsp_len,
7694 				   hdev->discovery.uuid_count,
7695 				   hdev->discovery.uuids))
7696 			return false;
7697 	}
7698 
7699 	/* If duplicate filtering does not report RSSI changes, then restart
7700 	 * scanning to ensure updated result with updated RSSI values.
7701 	 */
7702 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
7703 		restart_le_scan(hdev);
7704 
7705 		/* Validate RSSI value against the RSSI threshold once more. */
7706 		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7707 		    rssi < hdev->discovery.rssi)
7708 			return false;
7709 	}
7710 
7711 	return true;
7712 }
7713 
7714 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7715 		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
7716 		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7717 {
7718 	char buf[512];
7719 	struct mgmt_ev_device_found *ev = (void *)buf;
7720 	size_t ev_size;
7721 
7722 	/* Don't send events for a non-kernel initiated discovery. With
7723 	 * LE one exception is if we have pend_le_reports > 0 in which
7724 	 * case we're doing passive scanning and want these events.
7725 	 */
7726 	if (!hci_discovery_active(hdev)) {
7727 		if (link_type == ACL_LINK)
7728 			return;
7729 		if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
7730 			return;
7731 	}
7732 
7733 	if (hdev->discovery.result_filtering) {
7734 		/* We are using service discovery */
7735 		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
7736 				     scan_rsp_len))
7737 			return;
7738 	}
7739 
7740 	/* Make sure that the buffer is big enough. The 5 extra bytes
7741 	 * are for the potential CoD field.
7742 	 */
7743 	if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
7744 		return;
7745 
7746 	memset(buf, 0, sizeof(buf));
7747 
7748 	/* In case of device discovery with BR/EDR devices (pre 1.2), the
7749 	 * RSSI value was reported as 0 when not available. This behavior
7750 	 * is kept when using device discovery. This is required for full
7751 	 * backwards compatibility with the API.
7752 	 *
7753 	 * However when using service discovery, the value 127 will be
7754 	 * returned when the RSSI is not available.
7755 	 */
7756 	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
7757 	    link_type == ACL_LINK)
7758 		rssi = 0;
7759 
7760 	bacpy(&ev->addr.bdaddr, bdaddr);
7761 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
7762 	ev->rssi = rssi;
7763 	ev->flags = cpu_to_le32(flags);
7764 
7765 	if (eir_len > 0)
7766 		/* Copy EIR or advertising data into event */
7767 		memcpy(ev->eir, eir, eir_len);
7768 
7769 	if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
7770 		eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
7771 					  dev_class, 3);
7772 
7773 	if (scan_rsp_len > 0)
7774 		/* Append scan response data to event */
7775 		memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
7776 
7777 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
7778 	ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
7779 
7780 	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
7781 }
7782 
7783 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7784 		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
7785 {
7786 	struct mgmt_ev_device_found *ev;
7787 	char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
7788 	u16 eir_len;
7789 
7790 	ev = (struct mgmt_ev_device_found *) buf;
7791 
7792 	memset(buf, 0, sizeof(buf));
7793 
7794 	bacpy(&ev->addr.bdaddr, bdaddr);
7795 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
7796 	ev->rssi = rssi;
7797 
7798 	eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
7799 				  name_len);
7800 
7801 	ev->eir_len = cpu_to_le16(eir_len);
7802 
7803 	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
7804 }
7805 
7806 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
7807 {
7808 	struct mgmt_ev_discovering ev;
7809 
7810 	BT_DBG("%s discovering %u", hdev->name, discovering);
7811 
7812 	memset(&ev, 0, sizeof(ev));
7813 	ev.type = hdev->discovery.type;
7814 	ev.discovering = discovering;
7815 
7816 	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
7817 }
7818 
7819 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7820 {
7821 	BT_DBG("%s status %u", hdev->name, status);
7822 }
7823 
7824 void mgmt_reenable_advertising(struct hci_dev *hdev)
7825 {
7826 	struct hci_request req;
7827 
7828 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
7829 		return;
7830 
7831 	hci_req_init(&req, hdev);
7832 	enable_advertising(&req);
7833 	hci_req_run(&req, adv_enable_complete);
7834 }
7835 
7836 static struct hci_mgmt_chan chan = {
7837 	.channel	= HCI_CHANNEL_CONTROL,
7838 	.handler_count	= ARRAY_SIZE(mgmt_handlers),
7839 	.handlers	= mgmt_handlers,
7840 };
7841 
7842 int mgmt_init(void)
7843 {
7844 	return hci_mgmt_chan_register(&chan);
7845 }
7846 
7847 void mgmt_exit(void)
7848 {
7849 	hci_mgmt_chan_unregister(&chan);
7850 }
7851