xref: /openbmc/linux/net/bluetooth/mgmt.c (revision 27f937cc)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI Management interface */
26 
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35 
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 
40 #define MGMT_VERSION	1
41 #define MGMT_REVISION	15
42 
43 static const u16 mgmt_commands[] = {
44 	MGMT_OP_READ_INDEX_LIST,
45 	MGMT_OP_READ_INFO,
46 	MGMT_OP_SET_POWERED,
47 	MGMT_OP_SET_DISCOVERABLE,
48 	MGMT_OP_SET_CONNECTABLE,
49 	MGMT_OP_SET_FAST_CONNECTABLE,
50 	MGMT_OP_SET_BONDABLE,
51 	MGMT_OP_SET_LINK_SECURITY,
52 	MGMT_OP_SET_SSP,
53 	MGMT_OP_SET_HS,
54 	MGMT_OP_SET_LE,
55 	MGMT_OP_SET_DEV_CLASS,
56 	MGMT_OP_SET_LOCAL_NAME,
57 	MGMT_OP_ADD_UUID,
58 	MGMT_OP_REMOVE_UUID,
59 	MGMT_OP_LOAD_LINK_KEYS,
60 	MGMT_OP_LOAD_LONG_TERM_KEYS,
61 	MGMT_OP_DISCONNECT,
62 	MGMT_OP_GET_CONNECTIONS,
63 	MGMT_OP_PIN_CODE_REPLY,
64 	MGMT_OP_PIN_CODE_NEG_REPLY,
65 	MGMT_OP_SET_IO_CAPABILITY,
66 	MGMT_OP_PAIR_DEVICE,
67 	MGMT_OP_CANCEL_PAIR_DEVICE,
68 	MGMT_OP_UNPAIR_DEVICE,
69 	MGMT_OP_USER_CONFIRM_REPLY,
70 	MGMT_OP_USER_CONFIRM_NEG_REPLY,
71 	MGMT_OP_USER_PASSKEY_REPLY,
72 	MGMT_OP_USER_PASSKEY_NEG_REPLY,
73 	MGMT_OP_READ_LOCAL_OOB_DATA,
74 	MGMT_OP_ADD_REMOTE_OOB_DATA,
75 	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
76 	MGMT_OP_START_DISCOVERY,
77 	MGMT_OP_STOP_DISCOVERY,
78 	MGMT_OP_CONFIRM_NAME,
79 	MGMT_OP_BLOCK_DEVICE,
80 	MGMT_OP_UNBLOCK_DEVICE,
81 	MGMT_OP_SET_DEVICE_ID,
82 	MGMT_OP_SET_ADVERTISING,
83 	MGMT_OP_SET_BREDR,
84 	MGMT_OP_SET_STATIC_ADDRESS,
85 	MGMT_OP_SET_SCAN_PARAMS,
86 	MGMT_OP_SET_SECURE_CONN,
87 	MGMT_OP_SET_DEBUG_KEYS,
88 	MGMT_OP_SET_PRIVACY,
89 	MGMT_OP_LOAD_IRKS,
90 	MGMT_OP_GET_CONN_INFO,
91 	MGMT_OP_GET_CLOCK_INFO,
92 	MGMT_OP_ADD_DEVICE,
93 	MGMT_OP_REMOVE_DEVICE,
94 	MGMT_OP_LOAD_CONN_PARAM,
95 	MGMT_OP_READ_UNCONF_INDEX_LIST,
96 	MGMT_OP_READ_CONFIG_INFO,
97 	MGMT_OP_SET_EXTERNAL_CONFIG,
98 	MGMT_OP_SET_PUBLIC_ADDRESS,
99 	MGMT_OP_START_SERVICE_DISCOVERY,
100 	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
101 	MGMT_OP_READ_EXT_INDEX_LIST,
102 	MGMT_OP_READ_ADV_FEATURES,
103 	MGMT_OP_ADD_ADVERTISING,
104 	MGMT_OP_REMOVE_ADVERTISING,
105 	MGMT_OP_GET_ADV_SIZE_INFO,
106 	MGMT_OP_START_LIMITED_DISCOVERY,
107 	MGMT_OP_READ_EXT_INFO,
108 	MGMT_OP_SET_APPEARANCE,
109 	MGMT_OP_SET_BLOCKED_KEYS,
110 };
111 
112 static const u16 mgmt_events[] = {
113 	MGMT_EV_CONTROLLER_ERROR,
114 	MGMT_EV_INDEX_ADDED,
115 	MGMT_EV_INDEX_REMOVED,
116 	MGMT_EV_NEW_SETTINGS,
117 	MGMT_EV_CLASS_OF_DEV_CHANGED,
118 	MGMT_EV_LOCAL_NAME_CHANGED,
119 	MGMT_EV_NEW_LINK_KEY,
120 	MGMT_EV_NEW_LONG_TERM_KEY,
121 	MGMT_EV_DEVICE_CONNECTED,
122 	MGMT_EV_DEVICE_DISCONNECTED,
123 	MGMT_EV_CONNECT_FAILED,
124 	MGMT_EV_PIN_CODE_REQUEST,
125 	MGMT_EV_USER_CONFIRM_REQUEST,
126 	MGMT_EV_USER_PASSKEY_REQUEST,
127 	MGMT_EV_AUTH_FAILED,
128 	MGMT_EV_DEVICE_FOUND,
129 	MGMT_EV_DISCOVERING,
130 	MGMT_EV_DEVICE_BLOCKED,
131 	MGMT_EV_DEVICE_UNBLOCKED,
132 	MGMT_EV_DEVICE_UNPAIRED,
133 	MGMT_EV_PASSKEY_NOTIFY,
134 	MGMT_EV_NEW_IRK,
135 	MGMT_EV_NEW_CSRK,
136 	MGMT_EV_DEVICE_ADDED,
137 	MGMT_EV_DEVICE_REMOVED,
138 	MGMT_EV_NEW_CONN_PARAM,
139 	MGMT_EV_UNCONF_INDEX_ADDED,
140 	MGMT_EV_UNCONF_INDEX_REMOVED,
141 	MGMT_EV_NEW_CONFIG_OPTIONS,
142 	MGMT_EV_EXT_INDEX_ADDED,
143 	MGMT_EV_EXT_INDEX_REMOVED,
144 	MGMT_EV_LOCAL_OOB_DATA_UPDATED,
145 	MGMT_EV_ADVERTISING_ADDED,
146 	MGMT_EV_ADVERTISING_REMOVED,
147 	MGMT_EV_EXT_INFO_CHANGED,
148 };
149 
150 static const u16 mgmt_untrusted_commands[] = {
151 	MGMT_OP_READ_INDEX_LIST,
152 	MGMT_OP_READ_INFO,
153 	MGMT_OP_READ_UNCONF_INDEX_LIST,
154 	MGMT_OP_READ_CONFIG_INFO,
155 	MGMT_OP_READ_EXT_INDEX_LIST,
156 	MGMT_OP_READ_EXT_INFO,
157 };
158 
159 static const u16 mgmt_untrusted_events[] = {
160 	MGMT_EV_INDEX_ADDED,
161 	MGMT_EV_INDEX_REMOVED,
162 	MGMT_EV_NEW_SETTINGS,
163 	MGMT_EV_CLASS_OF_DEV_CHANGED,
164 	MGMT_EV_LOCAL_NAME_CHANGED,
165 	MGMT_EV_UNCONF_INDEX_ADDED,
166 	MGMT_EV_UNCONF_INDEX_REMOVED,
167 	MGMT_EV_NEW_CONFIG_OPTIONS,
168 	MGMT_EV_EXT_INDEX_ADDED,
169 	MGMT_EV_EXT_INDEX_REMOVED,
170 	MGMT_EV_EXT_INFO_CHANGED,
171 };
172 
173 #define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
174 
175 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
176 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
177 
178 /* HCI to MGMT error code conversion table */
179 static const u8 mgmt_status_table[] = {
180 	MGMT_STATUS_SUCCESS,
181 	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
182 	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
183 	MGMT_STATUS_FAILED,		/* Hardware Failure */
184 	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
185 	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
186 	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
187 	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
188 	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
189 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
190 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
191 	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
192 	MGMT_STATUS_BUSY,		/* Command Disallowed */
193 	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
194 	MGMT_STATUS_REJECTED,		/* Rejected Security */
195 	MGMT_STATUS_REJECTED,		/* Rejected Personal */
196 	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
197 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
198 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
199 	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
200 	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
201 	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
202 	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
203 	MGMT_STATUS_BUSY,		/* Repeated Attempts */
204 	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
205 	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
206 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
207 	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
208 	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
209 	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
210 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
211 	MGMT_STATUS_FAILED,		/* Unspecified Error */
212 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
213 	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
214 	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
215 	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
216 	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
217 	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
218 	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
219 	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
220 	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
221 	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
222 	MGMT_STATUS_FAILED,		/* Transaction Collision */
223 	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
224 	MGMT_STATUS_REJECTED,		/* QoS Rejected */
225 	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
226 	MGMT_STATUS_REJECTED,		/* Insufficient Security */
227 	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
228 	MGMT_STATUS_BUSY,		/* Role Switch Pending */
229 	MGMT_STATUS_FAILED,		/* Slot Violation */
230 	MGMT_STATUS_FAILED,		/* Role Switch Failed */
231 	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
232 	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
233 	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
234 	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
235 	MGMT_STATUS_BUSY,		/* Controller Busy */
236 	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
237 	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
238 	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
239 	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
240 	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
241 };
242 
243 static u8 mgmt_status(u8 hci_status)
244 {
245 	if (hci_status < ARRAY_SIZE(mgmt_status_table))
246 		return mgmt_status_table[hci_status];
247 
248 	return MGMT_STATUS_FAILED;
249 }
250 
251 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
252 			    u16 len, int flag)
253 {
254 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
255 			       flag, NULL);
256 }
257 
258 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
259 			      u16 len, int flag, struct sock *skip_sk)
260 {
261 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
262 			       flag, skip_sk);
263 }
264 
265 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
266 		      struct sock *skip_sk)
267 {
268 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
269 			       HCI_SOCK_TRUSTED, skip_sk);
270 }
271 
272 static u8 le_addr_type(u8 mgmt_addr_type)
273 {
274 	if (mgmt_addr_type == BDADDR_LE_PUBLIC)
275 		return ADDR_LE_DEV_PUBLIC;
276 	else
277 		return ADDR_LE_DEV_RANDOM;
278 }
279 
280 void mgmt_fill_version_info(void *ver)
281 {
282 	struct mgmt_rp_read_version *rp = ver;
283 
284 	rp->version = MGMT_VERSION;
285 	rp->revision = cpu_to_le16(MGMT_REVISION);
286 }
287 
288 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
289 			u16 data_len)
290 {
291 	struct mgmt_rp_read_version rp;
292 
293 	BT_DBG("sock %p", sk);
294 
295 	mgmt_fill_version_info(&rp);
296 
297 	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
298 				 &rp, sizeof(rp));
299 }
300 
301 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
302 			 u16 data_len)
303 {
304 	struct mgmt_rp_read_commands *rp;
305 	u16 num_commands, num_events;
306 	size_t rp_size;
307 	int i, err;
308 
309 	BT_DBG("sock %p", sk);
310 
311 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
312 		num_commands = ARRAY_SIZE(mgmt_commands);
313 		num_events = ARRAY_SIZE(mgmt_events);
314 	} else {
315 		num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
316 		num_events = ARRAY_SIZE(mgmt_untrusted_events);
317 	}
318 
319 	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
320 
321 	rp = kmalloc(rp_size, GFP_KERNEL);
322 	if (!rp)
323 		return -ENOMEM;
324 
325 	rp->num_commands = cpu_to_le16(num_commands);
326 	rp->num_events = cpu_to_le16(num_events);
327 
328 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
329 		__le16 *opcode = rp->opcodes;
330 
331 		for (i = 0; i < num_commands; i++, opcode++)
332 			put_unaligned_le16(mgmt_commands[i], opcode);
333 
334 		for (i = 0; i < num_events; i++, opcode++)
335 			put_unaligned_le16(mgmt_events[i], opcode);
336 	} else {
337 		__le16 *opcode = rp->opcodes;
338 
339 		for (i = 0; i < num_commands; i++, opcode++)
340 			put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
341 
342 		for (i = 0; i < num_events; i++, opcode++)
343 			put_unaligned_le16(mgmt_untrusted_events[i], opcode);
344 	}
345 
346 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
347 				rp, rp_size);
348 	kfree(rp);
349 
350 	return err;
351 }
352 
353 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
354 			   u16 data_len)
355 {
356 	struct mgmt_rp_read_index_list *rp;
357 	struct hci_dev *d;
358 	size_t rp_len;
359 	u16 count;
360 	int err;
361 
362 	BT_DBG("sock %p", sk);
363 
364 	read_lock(&hci_dev_list_lock);
365 
366 	count = 0;
367 	list_for_each_entry(d, &hci_dev_list, list) {
368 		if (d->dev_type == HCI_PRIMARY &&
369 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED))
370 			count++;
371 	}
372 
373 	rp_len = sizeof(*rp) + (2 * count);
374 	rp = kmalloc(rp_len, GFP_ATOMIC);
375 	if (!rp) {
376 		read_unlock(&hci_dev_list_lock);
377 		return -ENOMEM;
378 	}
379 
380 	count = 0;
381 	list_for_each_entry(d, &hci_dev_list, list) {
382 		if (hci_dev_test_flag(d, HCI_SETUP) ||
383 		    hci_dev_test_flag(d, HCI_CONFIG) ||
384 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
385 			continue;
386 
387 		/* Devices marked as raw-only are neither configured
388 		 * nor unconfigured controllers.
389 		 */
390 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
391 			continue;
392 
393 		if (d->dev_type == HCI_PRIMARY &&
394 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
395 			rp->index[count++] = cpu_to_le16(d->id);
396 			BT_DBG("Added hci%u", d->id);
397 		}
398 	}
399 
400 	rp->num_controllers = cpu_to_le16(count);
401 	rp_len = sizeof(*rp) + (2 * count);
402 
403 	read_unlock(&hci_dev_list_lock);
404 
405 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
406 				0, rp, rp_len);
407 
408 	kfree(rp);
409 
410 	return err;
411 }
412 
413 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
414 				  void *data, u16 data_len)
415 {
416 	struct mgmt_rp_read_unconf_index_list *rp;
417 	struct hci_dev *d;
418 	size_t rp_len;
419 	u16 count;
420 	int err;
421 
422 	BT_DBG("sock %p", sk);
423 
424 	read_lock(&hci_dev_list_lock);
425 
426 	count = 0;
427 	list_for_each_entry(d, &hci_dev_list, list) {
428 		if (d->dev_type == HCI_PRIMARY &&
429 		    hci_dev_test_flag(d, HCI_UNCONFIGURED))
430 			count++;
431 	}
432 
433 	rp_len = sizeof(*rp) + (2 * count);
434 	rp = kmalloc(rp_len, GFP_ATOMIC);
435 	if (!rp) {
436 		read_unlock(&hci_dev_list_lock);
437 		return -ENOMEM;
438 	}
439 
440 	count = 0;
441 	list_for_each_entry(d, &hci_dev_list, list) {
442 		if (hci_dev_test_flag(d, HCI_SETUP) ||
443 		    hci_dev_test_flag(d, HCI_CONFIG) ||
444 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
445 			continue;
446 
447 		/* Devices marked as raw-only are neither configured
448 		 * nor unconfigured controllers.
449 		 */
450 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
451 			continue;
452 
453 		if (d->dev_type == HCI_PRIMARY &&
454 		    hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
455 			rp->index[count++] = cpu_to_le16(d->id);
456 			BT_DBG("Added hci%u", d->id);
457 		}
458 	}
459 
460 	rp->num_controllers = cpu_to_le16(count);
461 	rp_len = sizeof(*rp) + (2 * count);
462 
463 	read_unlock(&hci_dev_list_lock);
464 
465 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
466 				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
467 
468 	kfree(rp);
469 
470 	return err;
471 }
472 
473 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
474 			       void *data, u16 data_len)
475 {
476 	struct mgmt_rp_read_ext_index_list *rp;
477 	struct hci_dev *d;
478 	u16 count;
479 	int err;
480 
481 	BT_DBG("sock %p", sk);
482 
483 	read_lock(&hci_dev_list_lock);
484 
485 	count = 0;
486 	list_for_each_entry(d, &hci_dev_list, list) {
487 		if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
488 			count++;
489 	}
490 
491 	rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
492 	if (!rp) {
493 		read_unlock(&hci_dev_list_lock);
494 		return -ENOMEM;
495 	}
496 
497 	count = 0;
498 	list_for_each_entry(d, &hci_dev_list, list) {
499 		if (hci_dev_test_flag(d, HCI_SETUP) ||
500 		    hci_dev_test_flag(d, HCI_CONFIG) ||
501 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
502 			continue;
503 
504 		/* Devices marked as raw-only are neither configured
505 		 * nor unconfigured controllers.
506 		 */
507 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
508 			continue;
509 
510 		if (d->dev_type == HCI_PRIMARY) {
511 			if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
512 				rp->entry[count].type = 0x01;
513 			else
514 				rp->entry[count].type = 0x00;
515 		} else if (d->dev_type == HCI_AMP) {
516 			rp->entry[count].type = 0x02;
517 		} else {
518 			continue;
519 		}
520 
521 		rp->entry[count].bus = d->bus;
522 		rp->entry[count++].index = cpu_to_le16(d->id);
523 		BT_DBG("Added hci%u", d->id);
524 	}
525 
526 	rp->num_controllers = cpu_to_le16(count);
527 
528 	read_unlock(&hci_dev_list_lock);
529 
530 	/* If this command is called at least once, then all the
531 	 * default index and unconfigured index events are disabled
532 	 * and from now on only extended index events are used.
533 	 */
534 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
535 	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
536 	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
537 
538 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
539 				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
540 				struct_size(rp, entry, count));
541 
542 	kfree(rp);
543 
544 	return err;
545 }
546 
547 static bool is_configured(struct hci_dev *hdev)
548 {
549 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
550 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
551 		return false;
552 
553 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
554 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
555 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
556 		return false;
557 
558 	return true;
559 }
560 
561 static __le32 get_missing_options(struct hci_dev *hdev)
562 {
563 	u32 options = 0;
564 
565 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
566 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
567 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
568 
569 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
570 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
571 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
572 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
573 
574 	return cpu_to_le32(options);
575 }
576 
577 static int new_options(struct hci_dev *hdev, struct sock *skip)
578 {
579 	__le32 options = get_missing_options(hdev);
580 
581 	return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
582 				  sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
583 }
584 
585 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
586 {
587 	__le32 options = get_missing_options(hdev);
588 
589 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
590 				 sizeof(options));
591 }
592 
593 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
594 			    void *data, u16 data_len)
595 {
596 	struct mgmt_rp_read_config_info rp;
597 	u32 options = 0;
598 
599 	BT_DBG("sock %p %s", sk, hdev->name);
600 
601 	hci_dev_lock(hdev);
602 
603 	memset(&rp, 0, sizeof(rp));
604 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
605 
606 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
607 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
608 
609 	if (hdev->set_bdaddr)
610 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
611 
612 	rp.supported_options = cpu_to_le32(options);
613 	rp.missing_options = get_missing_options(hdev);
614 
615 	hci_dev_unlock(hdev);
616 
617 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
618 				 &rp, sizeof(rp));
619 }
620 
621 static u32 get_supported_phys(struct hci_dev *hdev)
622 {
623 	u32 supported_phys = 0;
624 
625 	if (lmp_bredr_capable(hdev)) {
626 		supported_phys |= MGMT_PHY_BR_1M_1SLOT;
627 
628 		if (hdev->features[0][0] & LMP_3SLOT)
629 			supported_phys |= MGMT_PHY_BR_1M_3SLOT;
630 
631 		if (hdev->features[0][0] & LMP_5SLOT)
632 			supported_phys |= MGMT_PHY_BR_1M_5SLOT;
633 
634 		if (lmp_edr_2m_capable(hdev)) {
635 			supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
636 
637 			if (lmp_edr_3slot_capable(hdev))
638 				supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
639 
640 			if (lmp_edr_5slot_capable(hdev))
641 				supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
642 
643 			if (lmp_edr_3m_capable(hdev)) {
644 				supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
645 
646 				if (lmp_edr_3slot_capable(hdev))
647 					supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
648 
649 				if (lmp_edr_5slot_capable(hdev))
650 					supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
651 			}
652 		}
653 	}
654 
655 	if (lmp_le_capable(hdev)) {
656 		supported_phys |= MGMT_PHY_LE_1M_TX;
657 		supported_phys |= MGMT_PHY_LE_1M_RX;
658 
659 		if (hdev->le_features[1] & HCI_LE_PHY_2M) {
660 			supported_phys |= MGMT_PHY_LE_2M_TX;
661 			supported_phys |= MGMT_PHY_LE_2M_RX;
662 		}
663 
664 		if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
665 			supported_phys |= MGMT_PHY_LE_CODED_TX;
666 			supported_phys |= MGMT_PHY_LE_CODED_RX;
667 		}
668 	}
669 
670 	return supported_phys;
671 }
672 
673 static u32 get_selected_phys(struct hci_dev *hdev)
674 {
675 	u32 selected_phys = 0;
676 
677 	if (lmp_bredr_capable(hdev)) {
678 		selected_phys |= MGMT_PHY_BR_1M_1SLOT;
679 
680 		if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
681 			selected_phys |= MGMT_PHY_BR_1M_3SLOT;
682 
683 		if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
684 			selected_phys |= MGMT_PHY_BR_1M_5SLOT;
685 
686 		if (lmp_edr_2m_capable(hdev)) {
687 			if (!(hdev->pkt_type & HCI_2DH1))
688 				selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
689 
690 			if (lmp_edr_3slot_capable(hdev) &&
691 			    !(hdev->pkt_type & HCI_2DH3))
692 				selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
693 
694 			if (lmp_edr_5slot_capable(hdev) &&
695 			    !(hdev->pkt_type & HCI_2DH5))
696 				selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
697 
698 			if (lmp_edr_3m_capable(hdev)) {
699 				if (!(hdev->pkt_type & HCI_3DH1))
700 					selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
701 
702 				if (lmp_edr_3slot_capable(hdev) &&
703 				    !(hdev->pkt_type & HCI_3DH3))
704 					selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
705 
706 				if (lmp_edr_5slot_capable(hdev) &&
707 				    !(hdev->pkt_type & HCI_3DH5))
708 					selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
709 			}
710 		}
711 	}
712 
713 	if (lmp_le_capable(hdev)) {
714 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
715 			selected_phys |= MGMT_PHY_LE_1M_TX;
716 
717 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
718 			selected_phys |= MGMT_PHY_LE_1M_RX;
719 
720 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
721 			selected_phys |= MGMT_PHY_LE_2M_TX;
722 
723 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
724 			selected_phys |= MGMT_PHY_LE_2M_RX;
725 
726 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
727 			selected_phys |= MGMT_PHY_LE_CODED_TX;
728 
729 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
730 			selected_phys |= MGMT_PHY_LE_CODED_RX;
731 	}
732 
733 	return selected_phys;
734 }
735 
736 static u32 get_configurable_phys(struct hci_dev *hdev)
737 {
738 	return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
739 		~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
740 }
741 
742 static u32 get_supported_settings(struct hci_dev *hdev)
743 {
744 	u32 settings = 0;
745 
746 	settings |= MGMT_SETTING_POWERED;
747 	settings |= MGMT_SETTING_BONDABLE;
748 	settings |= MGMT_SETTING_DEBUG_KEYS;
749 	settings |= MGMT_SETTING_CONNECTABLE;
750 	settings |= MGMT_SETTING_DISCOVERABLE;
751 
752 	if (lmp_bredr_capable(hdev)) {
753 		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
754 			settings |= MGMT_SETTING_FAST_CONNECTABLE;
755 		settings |= MGMT_SETTING_BREDR;
756 		settings |= MGMT_SETTING_LINK_SECURITY;
757 
758 		if (lmp_ssp_capable(hdev)) {
759 			settings |= MGMT_SETTING_SSP;
760 			settings |= MGMT_SETTING_HS;
761 		}
762 
763 		if (lmp_sc_capable(hdev))
764 			settings |= MGMT_SETTING_SECURE_CONN;
765 	}
766 
767 	if (lmp_le_capable(hdev)) {
768 		settings |= MGMT_SETTING_LE;
769 		settings |= MGMT_SETTING_ADVERTISING;
770 		settings |= MGMT_SETTING_SECURE_CONN;
771 		settings |= MGMT_SETTING_PRIVACY;
772 		settings |= MGMT_SETTING_STATIC_ADDRESS;
773 	}
774 
775 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
776 	    hdev->set_bdaddr)
777 		settings |= MGMT_SETTING_CONFIGURATION;
778 
779 	settings |= MGMT_SETTING_PHY_CONFIGURATION;
780 
781 	return settings;
782 }
783 
784 static u32 get_current_settings(struct hci_dev *hdev)
785 {
786 	u32 settings = 0;
787 
788 	if (hdev_is_powered(hdev))
789 		settings |= MGMT_SETTING_POWERED;
790 
791 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
792 		settings |= MGMT_SETTING_CONNECTABLE;
793 
794 	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
795 		settings |= MGMT_SETTING_FAST_CONNECTABLE;
796 
797 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
798 		settings |= MGMT_SETTING_DISCOVERABLE;
799 
800 	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
801 		settings |= MGMT_SETTING_BONDABLE;
802 
803 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
804 		settings |= MGMT_SETTING_BREDR;
805 
806 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
807 		settings |= MGMT_SETTING_LE;
808 
809 	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
810 		settings |= MGMT_SETTING_LINK_SECURITY;
811 
812 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
813 		settings |= MGMT_SETTING_SSP;
814 
815 	if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
816 		settings |= MGMT_SETTING_HS;
817 
818 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
819 		settings |= MGMT_SETTING_ADVERTISING;
820 
821 	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
822 		settings |= MGMT_SETTING_SECURE_CONN;
823 
824 	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
825 		settings |= MGMT_SETTING_DEBUG_KEYS;
826 
827 	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
828 		settings |= MGMT_SETTING_PRIVACY;
829 
830 	/* The current setting for static address has two purposes. The
831 	 * first is to indicate if the static address will be used and
832 	 * the second is to indicate if it is actually set.
833 	 *
834 	 * This means if the static address is not configured, this flag
835 	 * will never be set. If the address is configured, then if the
836 	 * address is actually used decides if the flag is set or not.
837 	 *
838 	 * For single mode LE only controllers and dual-mode controllers
839 	 * with BR/EDR disabled, the existence of the static address will
840 	 * be evaluated.
841 	 */
842 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
843 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
844 	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
845 		if (bacmp(&hdev->static_addr, BDADDR_ANY))
846 			settings |= MGMT_SETTING_STATIC_ADDRESS;
847 	}
848 
849 	return settings;
850 }
851 
852 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
853 {
854 	return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
855 }
856 
857 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
858 						  struct hci_dev *hdev,
859 						  const void *data)
860 {
861 	return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
862 }
863 
864 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
865 {
866 	struct mgmt_pending_cmd *cmd;
867 
868 	/* If there's a pending mgmt command the flags will not yet have
869 	 * their final values, so check for this first.
870 	 */
871 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
872 	if (cmd) {
873 		struct mgmt_mode *cp = cmd->param;
874 		if (cp->val == 0x01)
875 			return LE_AD_GENERAL;
876 		else if (cp->val == 0x02)
877 			return LE_AD_LIMITED;
878 	} else {
879 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
880 			return LE_AD_LIMITED;
881 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
882 			return LE_AD_GENERAL;
883 	}
884 
885 	return 0;
886 }
887 
888 bool mgmt_get_connectable(struct hci_dev *hdev)
889 {
890 	struct mgmt_pending_cmd *cmd;
891 
892 	/* If there's a pending mgmt command the flag will not yet have
893 	 * it's final value, so check for this first.
894 	 */
895 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
896 	if (cmd) {
897 		struct mgmt_mode *cp = cmd->param;
898 
899 		return cp->val;
900 	}
901 
902 	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
903 }
904 
905 static void service_cache_off(struct work_struct *work)
906 {
907 	struct hci_dev *hdev = container_of(work, struct hci_dev,
908 					    service_cache.work);
909 	struct hci_request req;
910 
911 	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
912 		return;
913 
914 	hci_req_init(&req, hdev);
915 
916 	hci_dev_lock(hdev);
917 
918 	__hci_req_update_eir(&req);
919 	__hci_req_update_class(&req);
920 
921 	hci_dev_unlock(hdev);
922 
923 	hci_req_run(&req, NULL);
924 }
925 
926 static void rpa_expired(struct work_struct *work)
927 {
928 	struct hci_dev *hdev = container_of(work, struct hci_dev,
929 					    rpa_expired.work);
930 	struct hci_request req;
931 
932 	BT_DBG("");
933 
934 	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
935 
936 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
937 		return;
938 
939 	/* The generation of a new RPA and programming it into the
940 	 * controller happens in the hci_req_enable_advertising()
941 	 * function.
942 	 */
943 	hci_req_init(&req, hdev);
944 	if (ext_adv_capable(hdev))
945 		__hci_req_start_ext_adv(&req, hdev->cur_adv_instance);
946 	else
947 		__hci_req_enable_advertising(&req);
948 	hci_req_run(&req, NULL);
949 }
950 
951 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
952 {
953 	if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
954 		return;
955 
956 	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
957 	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
958 
959 	/* Non-mgmt controlled devices get this bit set
960 	 * implicitly so that pairing works for them, however
961 	 * for mgmt we require user-space to explicitly enable
962 	 * it
963 	 */
964 	hci_dev_clear_flag(hdev, HCI_BONDABLE);
965 }
966 
967 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
968 				void *data, u16 data_len)
969 {
970 	struct mgmt_rp_read_info rp;
971 
972 	BT_DBG("sock %p %s", sk, hdev->name);
973 
974 	hci_dev_lock(hdev);
975 
976 	memset(&rp, 0, sizeof(rp));
977 
978 	bacpy(&rp.bdaddr, &hdev->bdaddr);
979 
980 	rp.version = hdev->hci_ver;
981 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
982 
983 	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
984 	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
985 
986 	memcpy(rp.dev_class, hdev->dev_class, 3);
987 
988 	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
989 	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
990 
991 	hci_dev_unlock(hdev);
992 
993 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
994 				 sizeof(rp));
995 }
996 
997 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
998 {
999 	u16 eir_len = 0;
1000 	size_t name_len;
1001 
1002 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1003 		eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1004 					  hdev->dev_class, 3);
1005 
1006 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1007 		eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1008 					  hdev->appearance);
1009 
1010 	name_len = strlen(hdev->dev_name);
1011 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1012 				  hdev->dev_name, name_len);
1013 
1014 	name_len = strlen(hdev->short_name);
1015 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1016 				  hdev->short_name, name_len);
1017 
1018 	return eir_len;
1019 }
1020 
1021 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1022 				    void *data, u16 data_len)
1023 {
1024 	char buf[512];
1025 	struct mgmt_rp_read_ext_info *rp = (void *)buf;
1026 	u16 eir_len;
1027 
1028 	BT_DBG("sock %p %s", sk, hdev->name);
1029 
1030 	memset(&buf, 0, sizeof(buf));
1031 
1032 	hci_dev_lock(hdev);
1033 
1034 	bacpy(&rp->bdaddr, &hdev->bdaddr);
1035 
1036 	rp->version = hdev->hci_ver;
1037 	rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1038 
1039 	rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1040 	rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1041 
1042 
1043 	eir_len = append_eir_data_to_buf(hdev, rp->eir);
1044 	rp->eir_len = cpu_to_le16(eir_len);
1045 
1046 	hci_dev_unlock(hdev);
1047 
1048 	/* If this command is called at least once, then the events
1049 	 * for class of device and local name changes are disabled
1050 	 * and only the new extended controller information event
1051 	 * is used.
1052 	 */
1053 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1054 	hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1055 	hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1056 
1057 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1058 				 sizeof(*rp) + eir_len);
1059 }
1060 
1061 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1062 {
1063 	char buf[512];
1064 	struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1065 	u16 eir_len;
1066 
1067 	memset(buf, 0, sizeof(buf));
1068 
1069 	eir_len = append_eir_data_to_buf(hdev, ev->eir);
1070 	ev->eir_len = cpu_to_le16(eir_len);
1071 
1072 	return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1073 				  sizeof(*ev) + eir_len,
1074 				  HCI_MGMT_EXT_INFO_EVENTS, skip);
1075 }
1076 
1077 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1078 {
1079 	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1080 
1081 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1082 				 sizeof(settings));
1083 }
1084 
1085 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1086 {
1087 	BT_DBG("%s status 0x%02x", hdev->name, status);
1088 
1089 	if (hci_conn_count(hdev) == 0) {
1090 		cancel_delayed_work(&hdev->power_off);
1091 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
1092 	}
1093 }
1094 
1095 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1096 {
1097 	struct mgmt_ev_advertising_added ev;
1098 
1099 	ev.instance = instance;
1100 
1101 	mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1102 }
1103 
1104 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1105 			      u8 instance)
1106 {
1107 	struct mgmt_ev_advertising_removed ev;
1108 
1109 	ev.instance = instance;
1110 
1111 	mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1112 }
1113 
1114 static void cancel_adv_timeout(struct hci_dev *hdev)
1115 {
1116 	if (hdev->adv_instance_timeout) {
1117 		hdev->adv_instance_timeout = 0;
1118 		cancel_delayed_work(&hdev->adv_instance_expire);
1119 	}
1120 }
1121 
1122 static int clean_up_hci_state(struct hci_dev *hdev)
1123 {
1124 	struct hci_request req;
1125 	struct hci_conn *conn;
1126 	bool discov_stopped;
1127 	int err;
1128 
1129 	hci_req_init(&req, hdev);
1130 
1131 	if (test_bit(HCI_ISCAN, &hdev->flags) ||
1132 	    test_bit(HCI_PSCAN, &hdev->flags)) {
1133 		u8 scan = 0x00;
1134 		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1135 	}
1136 
1137 	hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
1138 
1139 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1140 		__hci_req_disable_advertising(&req);
1141 
1142 	discov_stopped = hci_req_stop_discovery(&req);
1143 
1144 	list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1145 		/* 0x15 == Terminated due to Power Off */
1146 		__hci_abort_conn(&req, conn, 0x15);
1147 	}
1148 
1149 	err = hci_req_run(&req, clean_up_hci_complete);
1150 	if (!err && discov_stopped)
1151 		hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1152 
1153 	return err;
1154 }
1155 
1156 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1157 		       u16 len)
1158 {
1159 	struct mgmt_mode *cp = data;
1160 	struct mgmt_pending_cmd *cmd;
1161 	int err;
1162 
1163 	BT_DBG("request for %s", hdev->name);
1164 
1165 	if (cp->val != 0x00 && cp->val != 0x01)
1166 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1167 				       MGMT_STATUS_INVALID_PARAMS);
1168 
1169 	hci_dev_lock(hdev);
1170 
1171 	if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1172 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1173 				      MGMT_STATUS_BUSY);
1174 		goto failed;
1175 	}
1176 
1177 	if (!!cp->val == hdev_is_powered(hdev)) {
1178 		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1179 		goto failed;
1180 	}
1181 
1182 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1183 	if (!cmd) {
1184 		err = -ENOMEM;
1185 		goto failed;
1186 	}
1187 
1188 	if (cp->val) {
1189 		queue_work(hdev->req_workqueue, &hdev->power_on);
1190 		err = 0;
1191 	} else {
1192 		/* Disconnect connections, stop scans, etc */
1193 		err = clean_up_hci_state(hdev);
1194 		if (!err)
1195 			queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1196 					   HCI_POWER_OFF_TIMEOUT);
1197 
1198 		/* ENODATA means there were no HCI commands queued */
1199 		if (err == -ENODATA) {
1200 			cancel_delayed_work(&hdev->power_off);
1201 			queue_work(hdev->req_workqueue, &hdev->power_off.work);
1202 			err = 0;
1203 		}
1204 	}
1205 
1206 failed:
1207 	hci_dev_unlock(hdev);
1208 	return err;
1209 }
1210 
1211 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1212 {
1213 	__le32 ev = cpu_to_le32(get_current_settings(hdev));
1214 
1215 	return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1216 				  sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1217 }
1218 
1219 int mgmt_new_settings(struct hci_dev *hdev)
1220 {
1221 	return new_settings(hdev, NULL);
1222 }
1223 
1224 struct cmd_lookup {
1225 	struct sock *sk;
1226 	struct hci_dev *hdev;
1227 	u8 mgmt_status;
1228 };
1229 
1230 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1231 {
1232 	struct cmd_lookup *match = data;
1233 
1234 	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1235 
1236 	list_del(&cmd->list);
1237 
1238 	if (match->sk == NULL) {
1239 		match->sk = cmd->sk;
1240 		sock_hold(match->sk);
1241 	}
1242 
1243 	mgmt_pending_free(cmd);
1244 }
1245 
1246 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1247 {
1248 	u8 *status = data;
1249 
1250 	mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1251 	mgmt_pending_remove(cmd);
1252 }
1253 
1254 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1255 {
1256 	if (cmd->cmd_complete) {
1257 		u8 *status = data;
1258 
1259 		cmd->cmd_complete(cmd, *status);
1260 		mgmt_pending_remove(cmd);
1261 
1262 		return;
1263 	}
1264 
1265 	cmd_status_rsp(cmd, data);
1266 }
1267 
1268 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1269 {
1270 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1271 				 cmd->param, cmd->param_len);
1272 }
1273 
1274 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1275 {
1276 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1277 				 cmd->param, sizeof(struct mgmt_addr_info));
1278 }
1279 
1280 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1281 {
1282 	if (!lmp_bredr_capable(hdev))
1283 		return MGMT_STATUS_NOT_SUPPORTED;
1284 	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1285 		return MGMT_STATUS_REJECTED;
1286 	else
1287 		return MGMT_STATUS_SUCCESS;
1288 }
1289 
1290 static u8 mgmt_le_support(struct hci_dev *hdev)
1291 {
1292 	if (!lmp_le_capable(hdev))
1293 		return MGMT_STATUS_NOT_SUPPORTED;
1294 	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1295 		return MGMT_STATUS_REJECTED;
1296 	else
1297 		return MGMT_STATUS_SUCCESS;
1298 }
1299 
1300 void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
1301 {
1302 	struct mgmt_pending_cmd *cmd;
1303 
1304 	BT_DBG("status 0x%02x", status);
1305 
1306 	hci_dev_lock(hdev);
1307 
1308 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1309 	if (!cmd)
1310 		goto unlock;
1311 
1312 	if (status) {
1313 		u8 mgmt_err = mgmt_status(status);
1314 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1315 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1316 		goto remove_cmd;
1317 	}
1318 
1319 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1320 	    hdev->discov_timeout > 0) {
1321 		int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1322 		queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1323 	}
1324 
1325 	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1326 	new_settings(hdev, cmd->sk);
1327 
1328 remove_cmd:
1329 	mgmt_pending_remove(cmd);
1330 
1331 unlock:
1332 	hci_dev_unlock(hdev);
1333 }
1334 
1335 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1336 			    u16 len)
1337 {
1338 	struct mgmt_cp_set_discoverable *cp = data;
1339 	struct mgmt_pending_cmd *cmd;
1340 	u16 timeout;
1341 	int err;
1342 
1343 	BT_DBG("request for %s", hdev->name);
1344 
1345 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1346 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1347 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1348 				       MGMT_STATUS_REJECTED);
1349 
1350 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1351 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1352 				       MGMT_STATUS_INVALID_PARAMS);
1353 
1354 	timeout = __le16_to_cpu(cp->timeout);
1355 
1356 	/* Disabling discoverable requires that no timeout is set,
1357 	 * and enabling limited discoverable requires a timeout.
1358 	 */
1359 	if ((cp->val == 0x00 && timeout > 0) ||
1360 	    (cp->val == 0x02 && timeout == 0))
1361 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1362 				       MGMT_STATUS_INVALID_PARAMS);
1363 
1364 	hci_dev_lock(hdev);
1365 
1366 	if (!hdev_is_powered(hdev) && timeout > 0) {
1367 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1368 				      MGMT_STATUS_NOT_POWERED);
1369 		goto failed;
1370 	}
1371 
1372 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1373 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1374 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1375 				      MGMT_STATUS_BUSY);
1376 		goto failed;
1377 	}
1378 
1379 	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1380 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1381 				      MGMT_STATUS_REJECTED);
1382 		goto failed;
1383 	}
1384 
1385 	if (!hdev_is_powered(hdev)) {
1386 		bool changed = false;
1387 
1388 		/* Setting limited discoverable when powered off is
1389 		 * not a valid operation since it requires a timeout
1390 		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1391 		 */
1392 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1393 			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1394 			changed = true;
1395 		}
1396 
1397 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1398 		if (err < 0)
1399 			goto failed;
1400 
1401 		if (changed)
1402 			err = new_settings(hdev, sk);
1403 
1404 		goto failed;
1405 	}
1406 
1407 	/* If the current mode is the same, then just update the timeout
1408 	 * value with the new value. And if only the timeout gets updated,
1409 	 * then no need for any HCI transactions.
1410 	 */
1411 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1412 	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
1413 						   HCI_LIMITED_DISCOVERABLE)) {
1414 		cancel_delayed_work(&hdev->discov_off);
1415 		hdev->discov_timeout = timeout;
1416 
1417 		if (cp->val && hdev->discov_timeout > 0) {
1418 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1419 			queue_delayed_work(hdev->req_workqueue,
1420 					   &hdev->discov_off, to);
1421 		}
1422 
1423 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1424 		goto failed;
1425 	}
1426 
1427 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1428 	if (!cmd) {
1429 		err = -ENOMEM;
1430 		goto failed;
1431 	}
1432 
1433 	/* Cancel any potential discoverable timeout that might be
1434 	 * still active and store new timeout value. The arming of
1435 	 * the timeout happens in the complete handler.
1436 	 */
1437 	cancel_delayed_work(&hdev->discov_off);
1438 	hdev->discov_timeout = timeout;
1439 
1440 	if (cp->val)
1441 		hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1442 	else
1443 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1444 
1445 	/* Limited discoverable mode */
1446 	if (cp->val == 0x02)
1447 		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1448 	else
1449 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1450 
1451 	queue_work(hdev->req_workqueue, &hdev->discoverable_update);
1452 	err = 0;
1453 
1454 failed:
1455 	hci_dev_unlock(hdev);
1456 	return err;
1457 }
1458 
1459 void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
1460 {
1461 	struct mgmt_pending_cmd *cmd;
1462 
1463 	BT_DBG("status 0x%02x", status);
1464 
1465 	hci_dev_lock(hdev);
1466 
1467 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1468 	if (!cmd)
1469 		goto unlock;
1470 
1471 	if (status) {
1472 		u8 mgmt_err = mgmt_status(status);
1473 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1474 		goto remove_cmd;
1475 	}
1476 
1477 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1478 	new_settings(hdev, cmd->sk);
1479 
1480 remove_cmd:
1481 	mgmt_pending_remove(cmd);
1482 
1483 unlock:
1484 	hci_dev_unlock(hdev);
1485 }
1486 
1487 static int set_connectable_update_settings(struct hci_dev *hdev,
1488 					   struct sock *sk, u8 val)
1489 {
1490 	bool changed = false;
1491 	int err;
1492 
1493 	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1494 		changed = true;
1495 
1496 	if (val) {
1497 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1498 	} else {
1499 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1500 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1501 	}
1502 
1503 	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1504 	if (err < 0)
1505 		return err;
1506 
1507 	if (changed) {
1508 		hci_req_update_scan(hdev);
1509 		hci_update_background_scan(hdev);
1510 		return new_settings(hdev, sk);
1511 	}
1512 
1513 	return 0;
1514 }
1515 
1516 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1517 			   u16 len)
1518 {
1519 	struct mgmt_mode *cp = data;
1520 	struct mgmt_pending_cmd *cmd;
1521 	int err;
1522 
1523 	BT_DBG("request for %s", hdev->name);
1524 
1525 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1526 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1527 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1528 				       MGMT_STATUS_REJECTED);
1529 
1530 	if (cp->val != 0x00 && cp->val != 0x01)
1531 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1532 				       MGMT_STATUS_INVALID_PARAMS);
1533 
1534 	hci_dev_lock(hdev);
1535 
1536 	if (!hdev_is_powered(hdev)) {
1537 		err = set_connectable_update_settings(hdev, sk, cp->val);
1538 		goto failed;
1539 	}
1540 
1541 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1542 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1543 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1544 				      MGMT_STATUS_BUSY);
1545 		goto failed;
1546 	}
1547 
1548 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1549 	if (!cmd) {
1550 		err = -ENOMEM;
1551 		goto failed;
1552 	}
1553 
1554 	if (cp->val) {
1555 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1556 	} else {
1557 		if (hdev->discov_timeout > 0)
1558 			cancel_delayed_work(&hdev->discov_off);
1559 
1560 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1561 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1562 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1563 	}
1564 
1565 	queue_work(hdev->req_workqueue, &hdev->connectable_update);
1566 	err = 0;
1567 
1568 failed:
1569 	hci_dev_unlock(hdev);
1570 	return err;
1571 }
1572 
1573 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1574 			u16 len)
1575 {
1576 	struct mgmt_mode *cp = data;
1577 	bool changed;
1578 	int err;
1579 
1580 	BT_DBG("request for %s", hdev->name);
1581 
1582 	if (cp->val != 0x00 && cp->val != 0x01)
1583 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1584 				       MGMT_STATUS_INVALID_PARAMS);
1585 
1586 	hci_dev_lock(hdev);
1587 
1588 	if (cp->val)
1589 		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1590 	else
1591 		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1592 
1593 	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1594 	if (err < 0)
1595 		goto unlock;
1596 
1597 	if (changed) {
1598 		/* In limited privacy mode the change of bondable mode
1599 		 * may affect the local advertising address.
1600 		 */
1601 		if (hdev_is_powered(hdev) &&
1602 		    hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1603 		    hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1604 		    hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1605 			queue_work(hdev->req_workqueue,
1606 				   &hdev->discoverable_update);
1607 
1608 		err = new_settings(hdev, sk);
1609 	}
1610 
1611 unlock:
1612 	hci_dev_unlock(hdev);
1613 	return err;
1614 }
1615 
1616 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1617 			     u16 len)
1618 {
1619 	struct mgmt_mode *cp = data;
1620 	struct mgmt_pending_cmd *cmd;
1621 	u8 val, status;
1622 	int err;
1623 
1624 	BT_DBG("request for %s", hdev->name);
1625 
1626 	status = mgmt_bredr_support(hdev);
1627 	if (status)
1628 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1629 				       status);
1630 
1631 	if (cp->val != 0x00 && cp->val != 0x01)
1632 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1633 				       MGMT_STATUS_INVALID_PARAMS);
1634 
1635 	hci_dev_lock(hdev);
1636 
1637 	if (!hdev_is_powered(hdev)) {
1638 		bool changed = false;
1639 
1640 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1641 			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1642 			changed = true;
1643 		}
1644 
1645 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1646 		if (err < 0)
1647 			goto failed;
1648 
1649 		if (changed)
1650 			err = new_settings(hdev, sk);
1651 
1652 		goto failed;
1653 	}
1654 
1655 	if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1656 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1657 				      MGMT_STATUS_BUSY);
1658 		goto failed;
1659 	}
1660 
1661 	val = !!cp->val;
1662 
1663 	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1664 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1665 		goto failed;
1666 	}
1667 
1668 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1669 	if (!cmd) {
1670 		err = -ENOMEM;
1671 		goto failed;
1672 	}
1673 
1674 	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1675 	if (err < 0) {
1676 		mgmt_pending_remove(cmd);
1677 		goto failed;
1678 	}
1679 
1680 failed:
1681 	hci_dev_unlock(hdev);
1682 	return err;
1683 }
1684 
1685 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1686 {
1687 	struct mgmt_mode *cp = data;
1688 	struct mgmt_pending_cmd *cmd;
1689 	u8 status;
1690 	int err;
1691 
1692 	BT_DBG("request for %s", hdev->name);
1693 
1694 	status = mgmt_bredr_support(hdev);
1695 	if (status)
1696 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1697 
1698 	if (!lmp_ssp_capable(hdev))
1699 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1700 				       MGMT_STATUS_NOT_SUPPORTED);
1701 
1702 	if (cp->val != 0x00 && cp->val != 0x01)
1703 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1704 				       MGMT_STATUS_INVALID_PARAMS);
1705 
1706 	hci_dev_lock(hdev);
1707 
1708 	if (!hdev_is_powered(hdev)) {
1709 		bool changed;
1710 
1711 		if (cp->val) {
1712 			changed = !hci_dev_test_and_set_flag(hdev,
1713 							     HCI_SSP_ENABLED);
1714 		} else {
1715 			changed = hci_dev_test_and_clear_flag(hdev,
1716 							      HCI_SSP_ENABLED);
1717 			if (!changed)
1718 				changed = hci_dev_test_and_clear_flag(hdev,
1719 								      HCI_HS_ENABLED);
1720 			else
1721 				hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1722 		}
1723 
1724 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1725 		if (err < 0)
1726 			goto failed;
1727 
1728 		if (changed)
1729 			err = new_settings(hdev, sk);
1730 
1731 		goto failed;
1732 	}
1733 
1734 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1735 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1736 				      MGMT_STATUS_BUSY);
1737 		goto failed;
1738 	}
1739 
1740 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1741 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1742 		goto failed;
1743 	}
1744 
1745 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1746 	if (!cmd) {
1747 		err = -ENOMEM;
1748 		goto failed;
1749 	}
1750 
1751 	if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
1752 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1753 			     sizeof(cp->val), &cp->val);
1754 
1755 	err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1756 	if (err < 0) {
1757 		mgmt_pending_remove(cmd);
1758 		goto failed;
1759 	}
1760 
1761 failed:
1762 	hci_dev_unlock(hdev);
1763 	return err;
1764 }
1765 
1766 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1767 {
1768 	struct mgmt_mode *cp = data;
1769 	bool changed;
1770 	u8 status;
1771 	int err;
1772 
1773 	BT_DBG("request for %s", hdev->name);
1774 
1775 	status = mgmt_bredr_support(hdev);
1776 	if (status)
1777 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1778 
1779 	if (!lmp_ssp_capable(hdev))
1780 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1781 				       MGMT_STATUS_NOT_SUPPORTED);
1782 
1783 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1784 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1785 				       MGMT_STATUS_REJECTED);
1786 
1787 	if (cp->val != 0x00 && cp->val != 0x01)
1788 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1789 				       MGMT_STATUS_INVALID_PARAMS);
1790 
1791 	hci_dev_lock(hdev);
1792 
1793 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1794 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1795 				      MGMT_STATUS_BUSY);
1796 		goto unlock;
1797 	}
1798 
1799 	if (cp->val) {
1800 		changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1801 	} else {
1802 		if (hdev_is_powered(hdev)) {
1803 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1804 					      MGMT_STATUS_REJECTED);
1805 			goto unlock;
1806 		}
1807 
1808 		changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1809 	}
1810 
1811 	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1812 	if (err < 0)
1813 		goto unlock;
1814 
1815 	if (changed)
1816 		err = new_settings(hdev, sk);
1817 
1818 unlock:
1819 	hci_dev_unlock(hdev);
1820 	return err;
1821 }
1822 
1823 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1824 {
1825 	struct cmd_lookup match = { NULL, hdev };
1826 
1827 	hci_dev_lock(hdev);
1828 
1829 	if (status) {
1830 		u8 mgmt_err = mgmt_status(status);
1831 
1832 		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1833 				     &mgmt_err);
1834 		goto unlock;
1835 	}
1836 
1837 	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1838 
1839 	new_settings(hdev, match.sk);
1840 
1841 	if (match.sk)
1842 		sock_put(match.sk);
1843 
1844 	/* Make sure the controller has a good default for
1845 	 * advertising data. Restrict the update to when LE
1846 	 * has actually been enabled. During power on, the
1847 	 * update in powered_update_hci will take care of it.
1848 	 */
1849 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1850 		struct hci_request req;
1851 		hci_req_init(&req, hdev);
1852 		if (ext_adv_capable(hdev)) {
1853 			int err;
1854 
1855 			err = __hci_req_setup_ext_adv_instance(&req, 0x00);
1856 			if (!err)
1857 				__hci_req_update_scan_rsp_data(&req, 0x00);
1858 		} else {
1859 			__hci_req_update_adv_data(&req, 0x00);
1860 			__hci_req_update_scan_rsp_data(&req, 0x00);
1861 		}
1862 		hci_req_run(&req, NULL);
1863 		hci_update_background_scan(hdev);
1864 	}
1865 
1866 unlock:
1867 	hci_dev_unlock(hdev);
1868 }
1869 
1870 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1871 {
1872 	struct mgmt_mode *cp = data;
1873 	struct hci_cp_write_le_host_supported hci_cp;
1874 	struct mgmt_pending_cmd *cmd;
1875 	struct hci_request req;
1876 	int err;
1877 	u8 val, enabled;
1878 
1879 	BT_DBG("request for %s", hdev->name);
1880 
1881 	if (!lmp_le_capable(hdev))
1882 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1883 				       MGMT_STATUS_NOT_SUPPORTED);
1884 
1885 	if (cp->val != 0x00 && cp->val != 0x01)
1886 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1887 				       MGMT_STATUS_INVALID_PARAMS);
1888 
1889 	/* Bluetooth single mode LE only controllers or dual-mode
1890 	 * controllers configured as LE only devices, do not allow
1891 	 * switching LE off. These have either LE enabled explicitly
1892 	 * or BR/EDR has been previously switched off.
1893 	 *
1894 	 * When trying to enable an already enabled LE, then gracefully
1895 	 * send a positive response. Trying to disable it however will
1896 	 * result into rejection.
1897 	 */
1898 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1899 		if (cp->val == 0x01)
1900 			return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1901 
1902 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1903 				       MGMT_STATUS_REJECTED);
1904 	}
1905 
1906 	hci_dev_lock(hdev);
1907 
1908 	val = !!cp->val;
1909 	enabled = lmp_host_le_capable(hdev);
1910 
1911 	if (!val)
1912 		hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
1913 
1914 	if (!hdev_is_powered(hdev) || val == enabled) {
1915 		bool changed = false;
1916 
1917 		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1918 			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
1919 			changed = true;
1920 		}
1921 
1922 		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1923 			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1924 			changed = true;
1925 		}
1926 
1927 		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1928 		if (err < 0)
1929 			goto unlock;
1930 
1931 		if (changed)
1932 			err = new_settings(hdev, sk);
1933 
1934 		goto unlock;
1935 	}
1936 
1937 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
1938 	    pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1939 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1940 				      MGMT_STATUS_BUSY);
1941 		goto unlock;
1942 	}
1943 
1944 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1945 	if (!cmd) {
1946 		err = -ENOMEM;
1947 		goto unlock;
1948 	}
1949 
1950 	hci_req_init(&req, hdev);
1951 
1952 	memset(&hci_cp, 0, sizeof(hci_cp));
1953 
1954 	if (val) {
1955 		hci_cp.le = val;
1956 		hci_cp.simul = 0x00;
1957 	} else {
1958 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1959 			__hci_req_disable_advertising(&req);
1960 
1961 		if (ext_adv_capable(hdev))
1962 			__hci_req_clear_ext_adv_sets(&req);
1963 	}
1964 
1965 	hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1966 		    &hci_cp);
1967 
1968 	err = hci_req_run(&req, le_enable_complete);
1969 	if (err < 0)
1970 		mgmt_pending_remove(cmd);
1971 
1972 unlock:
1973 	hci_dev_unlock(hdev);
1974 	return err;
1975 }
1976 
1977 /* This is a helper function to test for pending mgmt commands that can
1978  * cause CoD or EIR HCI commands. We can only allow one such pending
1979  * mgmt command at a time since otherwise we cannot easily track what
1980  * the current values are, will be, and based on that calculate if a new
1981  * HCI command needs to be sent and if yes with what value.
1982  */
1983 static bool pending_eir_or_class(struct hci_dev *hdev)
1984 {
1985 	struct mgmt_pending_cmd *cmd;
1986 
1987 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1988 		switch (cmd->opcode) {
1989 		case MGMT_OP_ADD_UUID:
1990 		case MGMT_OP_REMOVE_UUID:
1991 		case MGMT_OP_SET_DEV_CLASS:
1992 		case MGMT_OP_SET_POWERED:
1993 			return true;
1994 		}
1995 	}
1996 
1997 	return false;
1998 }
1999 
2000 static const u8 bluetooth_base_uuid[] = {
2001 			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2002 			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2003 };
2004 
2005 static u8 get_uuid_size(const u8 *uuid)
2006 {
2007 	u32 val;
2008 
2009 	if (memcmp(uuid, bluetooth_base_uuid, 12))
2010 		return 128;
2011 
2012 	val = get_unaligned_le32(&uuid[12]);
2013 	if (val > 0xffff)
2014 		return 32;
2015 
2016 	return 16;
2017 }
2018 
2019 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2020 {
2021 	struct mgmt_pending_cmd *cmd;
2022 
2023 	hci_dev_lock(hdev);
2024 
2025 	cmd = pending_find(mgmt_op, hdev);
2026 	if (!cmd)
2027 		goto unlock;
2028 
2029 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2030 			  mgmt_status(status), hdev->dev_class, 3);
2031 
2032 	mgmt_pending_remove(cmd);
2033 
2034 unlock:
2035 	hci_dev_unlock(hdev);
2036 }
2037 
2038 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2039 {
2040 	BT_DBG("status 0x%02x", status);
2041 
2042 	mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2043 }
2044 
2045 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2046 {
2047 	struct mgmt_cp_add_uuid *cp = data;
2048 	struct mgmt_pending_cmd *cmd;
2049 	struct hci_request req;
2050 	struct bt_uuid *uuid;
2051 	int err;
2052 
2053 	BT_DBG("request for %s", hdev->name);
2054 
2055 	hci_dev_lock(hdev);
2056 
2057 	if (pending_eir_or_class(hdev)) {
2058 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2059 				      MGMT_STATUS_BUSY);
2060 		goto failed;
2061 	}
2062 
2063 	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2064 	if (!uuid) {
2065 		err = -ENOMEM;
2066 		goto failed;
2067 	}
2068 
2069 	memcpy(uuid->uuid, cp->uuid, 16);
2070 	uuid->svc_hint = cp->svc_hint;
2071 	uuid->size = get_uuid_size(cp->uuid);
2072 
2073 	list_add_tail(&uuid->list, &hdev->uuids);
2074 
2075 	hci_req_init(&req, hdev);
2076 
2077 	__hci_req_update_class(&req);
2078 	__hci_req_update_eir(&req);
2079 
2080 	err = hci_req_run(&req, add_uuid_complete);
2081 	if (err < 0) {
2082 		if (err != -ENODATA)
2083 			goto failed;
2084 
2085 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2086 					hdev->dev_class, 3);
2087 		goto failed;
2088 	}
2089 
2090 	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2091 	if (!cmd) {
2092 		err = -ENOMEM;
2093 		goto failed;
2094 	}
2095 
2096 	err = 0;
2097 
2098 failed:
2099 	hci_dev_unlock(hdev);
2100 	return err;
2101 }
2102 
2103 static bool enable_service_cache(struct hci_dev *hdev)
2104 {
2105 	if (!hdev_is_powered(hdev))
2106 		return false;
2107 
2108 	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2109 		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2110 				   CACHE_TIMEOUT);
2111 		return true;
2112 	}
2113 
2114 	return false;
2115 }
2116 
2117 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2118 {
2119 	BT_DBG("status 0x%02x", status);
2120 
2121 	mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2122 }
2123 
2124 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2125 		       u16 len)
2126 {
2127 	struct mgmt_cp_remove_uuid *cp = data;
2128 	struct mgmt_pending_cmd *cmd;
2129 	struct bt_uuid *match, *tmp;
2130 	u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2131 	struct hci_request req;
2132 	int err, found;
2133 
2134 	BT_DBG("request for %s", hdev->name);
2135 
2136 	hci_dev_lock(hdev);
2137 
2138 	if (pending_eir_or_class(hdev)) {
2139 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2140 				      MGMT_STATUS_BUSY);
2141 		goto unlock;
2142 	}
2143 
2144 	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2145 		hci_uuids_clear(hdev);
2146 
2147 		if (enable_service_cache(hdev)) {
2148 			err = mgmt_cmd_complete(sk, hdev->id,
2149 						MGMT_OP_REMOVE_UUID,
2150 						0, hdev->dev_class, 3);
2151 			goto unlock;
2152 		}
2153 
2154 		goto update_class;
2155 	}
2156 
2157 	found = 0;
2158 
2159 	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2160 		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2161 			continue;
2162 
2163 		list_del(&match->list);
2164 		kfree(match);
2165 		found++;
2166 	}
2167 
2168 	if (found == 0) {
2169 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2170 				      MGMT_STATUS_INVALID_PARAMS);
2171 		goto unlock;
2172 	}
2173 
2174 update_class:
2175 	hci_req_init(&req, hdev);
2176 
2177 	__hci_req_update_class(&req);
2178 	__hci_req_update_eir(&req);
2179 
2180 	err = hci_req_run(&req, remove_uuid_complete);
2181 	if (err < 0) {
2182 		if (err != -ENODATA)
2183 			goto unlock;
2184 
2185 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2186 					hdev->dev_class, 3);
2187 		goto unlock;
2188 	}
2189 
2190 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2191 	if (!cmd) {
2192 		err = -ENOMEM;
2193 		goto unlock;
2194 	}
2195 
2196 	err = 0;
2197 
2198 unlock:
2199 	hci_dev_unlock(hdev);
2200 	return err;
2201 }
2202 
2203 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2204 {
2205 	BT_DBG("status 0x%02x", status);
2206 
2207 	mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2208 }
2209 
2210 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2211 			 u16 len)
2212 {
2213 	struct mgmt_cp_set_dev_class *cp = data;
2214 	struct mgmt_pending_cmd *cmd;
2215 	struct hci_request req;
2216 	int err;
2217 
2218 	BT_DBG("request for %s", hdev->name);
2219 
2220 	if (!lmp_bredr_capable(hdev))
2221 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2222 				       MGMT_STATUS_NOT_SUPPORTED);
2223 
2224 	hci_dev_lock(hdev);
2225 
2226 	if (pending_eir_or_class(hdev)) {
2227 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2228 				      MGMT_STATUS_BUSY);
2229 		goto unlock;
2230 	}
2231 
2232 	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2233 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2234 				      MGMT_STATUS_INVALID_PARAMS);
2235 		goto unlock;
2236 	}
2237 
2238 	hdev->major_class = cp->major;
2239 	hdev->minor_class = cp->minor;
2240 
2241 	if (!hdev_is_powered(hdev)) {
2242 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2243 					hdev->dev_class, 3);
2244 		goto unlock;
2245 	}
2246 
2247 	hci_req_init(&req, hdev);
2248 
2249 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2250 		hci_dev_unlock(hdev);
2251 		cancel_delayed_work_sync(&hdev->service_cache);
2252 		hci_dev_lock(hdev);
2253 		__hci_req_update_eir(&req);
2254 	}
2255 
2256 	__hci_req_update_class(&req);
2257 
2258 	err = hci_req_run(&req, set_class_complete);
2259 	if (err < 0) {
2260 		if (err != -ENODATA)
2261 			goto unlock;
2262 
2263 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2264 					hdev->dev_class, 3);
2265 		goto unlock;
2266 	}
2267 
2268 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2269 	if (!cmd) {
2270 		err = -ENOMEM;
2271 		goto unlock;
2272 	}
2273 
2274 	err = 0;
2275 
2276 unlock:
2277 	hci_dev_unlock(hdev);
2278 	return err;
2279 }
2280 
2281 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2282 			  u16 len)
2283 {
2284 	struct mgmt_cp_load_link_keys *cp = data;
2285 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2286 				   sizeof(struct mgmt_link_key_info));
2287 	u16 key_count, expected_len;
2288 	bool changed;
2289 	int i;
2290 
2291 	BT_DBG("request for %s", hdev->name);
2292 
2293 	if (!lmp_bredr_capable(hdev))
2294 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2295 				       MGMT_STATUS_NOT_SUPPORTED);
2296 
2297 	key_count = __le16_to_cpu(cp->key_count);
2298 	if (key_count > max_key_count) {
2299 		bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2300 			   key_count);
2301 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2302 				       MGMT_STATUS_INVALID_PARAMS);
2303 	}
2304 
2305 	expected_len = struct_size(cp, keys, key_count);
2306 	if (expected_len != len) {
2307 		bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2308 			   expected_len, len);
2309 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2310 				       MGMT_STATUS_INVALID_PARAMS);
2311 	}
2312 
2313 	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2314 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2315 				       MGMT_STATUS_INVALID_PARAMS);
2316 
2317 	BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2318 	       key_count);
2319 
2320 	for (i = 0; i < key_count; i++) {
2321 		struct mgmt_link_key_info *key = &cp->keys[i];
2322 
2323 		if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2324 			return mgmt_cmd_status(sk, hdev->id,
2325 					       MGMT_OP_LOAD_LINK_KEYS,
2326 					       MGMT_STATUS_INVALID_PARAMS);
2327 	}
2328 
2329 	hci_dev_lock(hdev);
2330 
2331 	hci_link_keys_clear(hdev);
2332 
2333 	if (cp->debug_keys)
2334 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2335 	else
2336 		changed = hci_dev_test_and_clear_flag(hdev,
2337 						      HCI_KEEP_DEBUG_KEYS);
2338 
2339 	if (changed)
2340 		new_settings(hdev, NULL);
2341 
2342 	for (i = 0; i < key_count; i++) {
2343 		struct mgmt_link_key_info *key = &cp->keys[i];
2344 
2345 		if (hci_is_blocked_key(hdev,
2346 				       HCI_BLOCKED_KEY_TYPE_LINKKEY,
2347 				       key->val)) {
2348 			bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2349 				    &key->addr.bdaddr);
2350 			continue;
2351 		}
2352 
2353 		/* Always ignore debug keys and require a new pairing if
2354 		 * the user wants to use them.
2355 		 */
2356 		if (key->type == HCI_LK_DEBUG_COMBINATION)
2357 			continue;
2358 
2359 		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2360 				 key->type, key->pin_len, NULL);
2361 	}
2362 
2363 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2364 
2365 	hci_dev_unlock(hdev);
2366 
2367 	return 0;
2368 }
2369 
2370 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2371 			   u8 addr_type, struct sock *skip_sk)
2372 {
2373 	struct mgmt_ev_device_unpaired ev;
2374 
2375 	bacpy(&ev.addr.bdaddr, bdaddr);
2376 	ev.addr.type = addr_type;
2377 
2378 	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2379 			  skip_sk);
2380 }
2381 
2382 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2383 			 u16 len)
2384 {
2385 	struct mgmt_cp_unpair_device *cp = data;
2386 	struct mgmt_rp_unpair_device rp;
2387 	struct hci_conn_params *params;
2388 	struct mgmt_pending_cmd *cmd;
2389 	struct hci_conn *conn;
2390 	u8 addr_type;
2391 	int err;
2392 
2393 	memset(&rp, 0, sizeof(rp));
2394 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2395 	rp.addr.type = cp->addr.type;
2396 
2397 	if (!bdaddr_type_is_valid(cp->addr.type))
2398 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2399 					 MGMT_STATUS_INVALID_PARAMS,
2400 					 &rp, sizeof(rp));
2401 
2402 	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2403 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2404 					 MGMT_STATUS_INVALID_PARAMS,
2405 					 &rp, sizeof(rp));
2406 
2407 	hci_dev_lock(hdev);
2408 
2409 	if (!hdev_is_powered(hdev)) {
2410 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2411 					MGMT_STATUS_NOT_POWERED, &rp,
2412 					sizeof(rp));
2413 		goto unlock;
2414 	}
2415 
2416 	if (cp->addr.type == BDADDR_BREDR) {
2417 		/* If disconnection is requested, then look up the
2418 		 * connection. If the remote device is connected, it
2419 		 * will be later used to terminate the link.
2420 		 *
2421 		 * Setting it to NULL explicitly will cause no
2422 		 * termination of the link.
2423 		 */
2424 		if (cp->disconnect)
2425 			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2426 						       &cp->addr.bdaddr);
2427 		else
2428 			conn = NULL;
2429 
2430 		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2431 		if (err < 0) {
2432 			err = mgmt_cmd_complete(sk, hdev->id,
2433 						MGMT_OP_UNPAIR_DEVICE,
2434 						MGMT_STATUS_NOT_PAIRED, &rp,
2435 						sizeof(rp));
2436 			goto unlock;
2437 		}
2438 
2439 		goto done;
2440 	}
2441 
2442 	/* LE address type */
2443 	addr_type = le_addr_type(cp->addr.type);
2444 
2445 	/* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2446 	err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2447 	if (err < 0) {
2448 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2449 					MGMT_STATUS_NOT_PAIRED, &rp,
2450 					sizeof(rp));
2451 		goto unlock;
2452 	}
2453 
2454 	conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2455 	if (!conn) {
2456 		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2457 		goto done;
2458 	}
2459 
2460 
2461 	/* Defer clearing up the connection parameters until closing to
2462 	 * give a chance of keeping them if a repairing happens.
2463 	 */
2464 	set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2465 
2466 	/* Disable auto-connection parameters if present */
2467 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2468 	if (params) {
2469 		if (params->explicit_connect)
2470 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2471 		else
2472 			params->auto_connect = HCI_AUTO_CONN_DISABLED;
2473 	}
2474 
2475 	/* If disconnection is not requested, then clear the connection
2476 	 * variable so that the link is not terminated.
2477 	 */
2478 	if (!cp->disconnect)
2479 		conn = NULL;
2480 
2481 done:
2482 	/* If the connection variable is set, then termination of the
2483 	 * link is requested.
2484 	 */
2485 	if (!conn) {
2486 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2487 					&rp, sizeof(rp));
2488 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2489 		goto unlock;
2490 	}
2491 
2492 	cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2493 			       sizeof(*cp));
2494 	if (!cmd) {
2495 		err = -ENOMEM;
2496 		goto unlock;
2497 	}
2498 
2499 	cmd->cmd_complete = addr_cmd_complete;
2500 
2501 	err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2502 	if (err < 0)
2503 		mgmt_pending_remove(cmd);
2504 
2505 unlock:
2506 	hci_dev_unlock(hdev);
2507 	return err;
2508 }
2509 
2510 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2511 		      u16 len)
2512 {
2513 	struct mgmt_cp_disconnect *cp = data;
2514 	struct mgmt_rp_disconnect rp;
2515 	struct mgmt_pending_cmd *cmd;
2516 	struct hci_conn *conn;
2517 	int err;
2518 
2519 	BT_DBG("");
2520 
2521 	memset(&rp, 0, sizeof(rp));
2522 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2523 	rp.addr.type = cp->addr.type;
2524 
2525 	if (!bdaddr_type_is_valid(cp->addr.type))
2526 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2527 					 MGMT_STATUS_INVALID_PARAMS,
2528 					 &rp, sizeof(rp));
2529 
2530 	hci_dev_lock(hdev);
2531 
2532 	if (!test_bit(HCI_UP, &hdev->flags)) {
2533 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2534 					MGMT_STATUS_NOT_POWERED, &rp,
2535 					sizeof(rp));
2536 		goto failed;
2537 	}
2538 
2539 	if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2540 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2541 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
2542 		goto failed;
2543 	}
2544 
2545 	if (cp->addr.type == BDADDR_BREDR)
2546 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2547 					       &cp->addr.bdaddr);
2548 	else
2549 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2550 					       le_addr_type(cp->addr.type));
2551 
2552 	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2553 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2554 					MGMT_STATUS_NOT_CONNECTED, &rp,
2555 					sizeof(rp));
2556 		goto failed;
2557 	}
2558 
2559 	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2560 	if (!cmd) {
2561 		err = -ENOMEM;
2562 		goto failed;
2563 	}
2564 
2565 	cmd->cmd_complete = generic_cmd_complete;
2566 
2567 	err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2568 	if (err < 0)
2569 		mgmt_pending_remove(cmd);
2570 
2571 failed:
2572 	hci_dev_unlock(hdev);
2573 	return err;
2574 }
2575 
2576 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2577 {
2578 	switch (link_type) {
2579 	case LE_LINK:
2580 		switch (addr_type) {
2581 		case ADDR_LE_DEV_PUBLIC:
2582 			return BDADDR_LE_PUBLIC;
2583 
2584 		default:
2585 			/* Fallback to LE Random address type */
2586 			return BDADDR_LE_RANDOM;
2587 		}
2588 
2589 	default:
2590 		/* Fallback to BR/EDR type */
2591 		return BDADDR_BREDR;
2592 	}
2593 }
2594 
2595 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2596 			   u16 data_len)
2597 {
2598 	struct mgmt_rp_get_connections *rp;
2599 	struct hci_conn *c;
2600 	int err;
2601 	u16 i;
2602 
2603 	BT_DBG("");
2604 
2605 	hci_dev_lock(hdev);
2606 
2607 	if (!hdev_is_powered(hdev)) {
2608 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2609 				      MGMT_STATUS_NOT_POWERED);
2610 		goto unlock;
2611 	}
2612 
2613 	i = 0;
2614 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2615 		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2616 			i++;
2617 	}
2618 
2619 	rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2620 	if (!rp) {
2621 		err = -ENOMEM;
2622 		goto unlock;
2623 	}
2624 
2625 	i = 0;
2626 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2627 		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2628 			continue;
2629 		bacpy(&rp->addr[i].bdaddr, &c->dst);
2630 		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2631 		if (c->type == SCO_LINK || c->type == ESCO_LINK)
2632 			continue;
2633 		i++;
2634 	}
2635 
2636 	rp->conn_count = cpu_to_le16(i);
2637 
2638 	/* Recalculate length in case of filtered SCO connections, etc */
2639 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2640 				struct_size(rp, addr, i));
2641 
2642 	kfree(rp);
2643 
2644 unlock:
2645 	hci_dev_unlock(hdev);
2646 	return err;
2647 }
2648 
2649 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2650 				   struct mgmt_cp_pin_code_neg_reply *cp)
2651 {
2652 	struct mgmt_pending_cmd *cmd;
2653 	int err;
2654 
2655 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2656 			       sizeof(*cp));
2657 	if (!cmd)
2658 		return -ENOMEM;
2659 
2660 	cmd->cmd_complete = addr_cmd_complete;
2661 
2662 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2663 			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2664 	if (err < 0)
2665 		mgmt_pending_remove(cmd);
2666 
2667 	return err;
2668 }
2669 
2670 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2671 			  u16 len)
2672 {
2673 	struct hci_conn *conn;
2674 	struct mgmt_cp_pin_code_reply *cp = data;
2675 	struct hci_cp_pin_code_reply reply;
2676 	struct mgmt_pending_cmd *cmd;
2677 	int err;
2678 
2679 	BT_DBG("");
2680 
2681 	hci_dev_lock(hdev);
2682 
2683 	if (!hdev_is_powered(hdev)) {
2684 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2685 				      MGMT_STATUS_NOT_POWERED);
2686 		goto failed;
2687 	}
2688 
2689 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2690 	if (!conn) {
2691 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2692 				      MGMT_STATUS_NOT_CONNECTED);
2693 		goto failed;
2694 	}
2695 
2696 	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2697 		struct mgmt_cp_pin_code_neg_reply ncp;
2698 
2699 		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2700 
2701 		bt_dev_err(hdev, "PIN code is not 16 bytes long");
2702 
2703 		err = send_pin_code_neg_reply(sk, hdev, &ncp);
2704 		if (err >= 0)
2705 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2706 					      MGMT_STATUS_INVALID_PARAMS);
2707 
2708 		goto failed;
2709 	}
2710 
2711 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2712 	if (!cmd) {
2713 		err = -ENOMEM;
2714 		goto failed;
2715 	}
2716 
2717 	cmd->cmd_complete = addr_cmd_complete;
2718 
2719 	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2720 	reply.pin_len = cp->pin_len;
2721 	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2722 
2723 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2724 	if (err < 0)
2725 		mgmt_pending_remove(cmd);
2726 
2727 failed:
2728 	hci_dev_unlock(hdev);
2729 	return err;
2730 }
2731 
2732 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2733 			     u16 len)
2734 {
2735 	struct mgmt_cp_set_io_capability *cp = data;
2736 
2737 	BT_DBG("");
2738 
2739 	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2740 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2741 				       MGMT_STATUS_INVALID_PARAMS);
2742 
2743 	hci_dev_lock(hdev);
2744 
2745 	hdev->io_capability = cp->io_capability;
2746 
2747 	BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2748 	       hdev->io_capability);
2749 
2750 	hci_dev_unlock(hdev);
2751 
2752 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2753 				 NULL, 0);
2754 }
2755 
2756 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2757 {
2758 	struct hci_dev *hdev = conn->hdev;
2759 	struct mgmt_pending_cmd *cmd;
2760 
2761 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2762 		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2763 			continue;
2764 
2765 		if (cmd->user_data != conn)
2766 			continue;
2767 
2768 		return cmd;
2769 	}
2770 
2771 	return NULL;
2772 }
2773 
2774 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2775 {
2776 	struct mgmt_rp_pair_device rp;
2777 	struct hci_conn *conn = cmd->user_data;
2778 	int err;
2779 
2780 	bacpy(&rp.addr.bdaddr, &conn->dst);
2781 	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2782 
2783 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2784 				status, &rp, sizeof(rp));
2785 
2786 	/* So we don't get further callbacks for this connection */
2787 	conn->connect_cfm_cb = NULL;
2788 	conn->security_cfm_cb = NULL;
2789 	conn->disconn_cfm_cb = NULL;
2790 
2791 	hci_conn_drop(conn);
2792 
2793 	/* The device is paired so there is no need to remove
2794 	 * its connection parameters anymore.
2795 	 */
2796 	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2797 
2798 	hci_conn_put(conn);
2799 
2800 	return err;
2801 }
2802 
2803 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2804 {
2805 	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2806 	struct mgmt_pending_cmd *cmd;
2807 
2808 	cmd = find_pairing(conn);
2809 	if (cmd) {
2810 		cmd->cmd_complete(cmd, status);
2811 		mgmt_pending_remove(cmd);
2812 	}
2813 }
2814 
2815 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2816 {
2817 	struct mgmt_pending_cmd *cmd;
2818 
2819 	BT_DBG("status %u", status);
2820 
2821 	cmd = find_pairing(conn);
2822 	if (!cmd) {
2823 		BT_DBG("Unable to find a pending command");
2824 		return;
2825 	}
2826 
2827 	cmd->cmd_complete(cmd, mgmt_status(status));
2828 	mgmt_pending_remove(cmd);
2829 }
2830 
2831 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2832 {
2833 	struct mgmt_pending_cmd *cmd;
2834 
2835 	BT_DBG("status %u", status);
2836 
2837 	if (!status)
2838 		return;
2839 
2840 	cmd = find_pairing(conn);
2841 	if (!cmd) {
2842 		BT_DBG("Unable to find a pending command");
2843 		return;
2844 	}
2845 
2846 	cmd->cmd_complete(cmd, mgmt_status(status));
2847 	mgmt_pending_remove(cmd);
2848 }
2849 
2850 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2851 		       u16 len)
2852 {
2853 	struct mgmt_cp_pair_device *cp = data;
2854 	struct mgmt_rp_pair_device rp;
2855 	struct mgmt_pending_cmd *cmd;
2856 	u8 sec_level, auth_type;
2857 	struct hci_conn *conn;
2858 	int err;
2859 
2860 	BT_DBG("");
2861 
2862 	memset(&rp, 0, sizeof(rp));
2863 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2864 	rp.addr.type = cp->addr.type;
2865 
2866 	if (!bdaddr_type_is_valid(cp->addr.type))
2867 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2868 					 MGMT_STATUS_INVALID_PARAMS,
2869 					 &rp, sizeof(rp));
2870 
2871 	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2872 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2873 					 MGMT_STATUS_INVALID_PARAMS,
2874 					 &rp, sizeof(rp));
2875 
2876 	hci_dev_lock(hdev);
2877 
2878 	if (!hdev_is_powered(hdev)) {
2879 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2880 					MGMT_STATUS_NOT_POWERED, &rp,
2881 					sizeof(rp));
2882 		goto unlock;
2883 	}
2884 
2885 	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
2886 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2887 					MGMT_STATUS_ALREADY_PAIRED, &rp,
2888 					sizeof(rp));
2889 		goto unlock;
2890 	}
2891 
2892 	sec_level = BT_SECURITY_MEDIUM;
2893 	auth_type = HCI_AT_DEDICATED_BONDING;
2894 
2895 	if (cp->addr.type == BDADDR_BREDR) {
2896 		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2897 				       auth_type);
2898 	} else {
2899 		u8 addr_type = le_addr_type(cp->addr.type);
2900 		struct hci_conn_params *p;
2901 
2902 		/* When pairing a new device, it is expected to remember
2903 		 * this device for future connections. Adding the connection
2904 		 * parameter information ahead of time allows tracking
2905 		 * of the slave preferred values and will speed up any
2906 		 * further connection establishment.
2907 		 *
2908 		 * If connection parameters already exist, then they
2909 		 * will be kept and this function does nothing.
2910 		 */
2911 		p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2912 
2913 		if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
2914 			p->auto_connect = HCI_AUTO_CONN_DISABLED;
2915 
2916 		conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr,
2917 					   addr_type, sec_level,
2918 					   HCI_LE_CONN_TIMEOUT);
2919 	}
2920 
2921 	if (IS_ERR(conn)) {
2922 		int status;
2923 
2924 		if (PTR_ERR(conn) == -EBUSY)
2925 			status = MGMT_STATUS_BUSY;
2926 		else if (PTR_ERR(conn) == -EOPNOTSUPP)
2927 			status = MGMT_STATUS_NOT_SUPPORTED;
2928 		else if (PTR_ERR(conn) == -ECONNREFUSED)
2929 			status = MGMT_STATUS_REJECTED;
2930 		else
2931 			status = MGMT_STATUS_CONNECT_FAILED;
2932 
2933 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2934 					status, &rp, sizeof(rp));
2935 		goto unlock;
2936 	}
2937 
2938 	if (conn->connect_cfm_cb) {
2939 		hci_conn_drop(conn);
2940 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2941 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
2942 		goto unlock;
2943 	}
2944 
2945 	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2946 	if (!cmd) {
2947 		err = -ENOMEM;
2948 		hci_conn_drop(conn);
2949 		goto unlock;
2950 	}
2951 
2952 	cmd->cmd_complete = pairing_complete;
2953 
2954 	/* For LE, just connecting isn't a proof that the pairing finished */
2955 	if (cp->addr.type == BDADDR_BREDR) {
2956 		conn->connect_cfm_cb = pairing_complete_cb;
2957 		conn->security_cfm_cb = pairing_complete_cb;
2958 		conn->disconn_cfm_cb = pairing_complete_cb;
2959 	} else {
2960 		conn->connect_cfm_cb = le_pairing_complete_cb;
2961 		conn->security_cfm_cb = le_pairing_complete_cb;
2962 		conn->disconn_cfm_cb = le_pairing_complete_cb;
2963 	}
2964 
2965 	conn->io_capability = cp->io_cap;
2966 	cmd->user_data = hci_conn_get(conn);
2967 
2968 	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
2969 	    hci_conn_security(conn, sec_level, auth_type, true)) {
2970 		cmd->cmd_complete(cmd, 0);
2971 		mgmt_pending_remove(cmd);
2972 	}
2973 
2974 	err = 0;
2975 
2976 unlock:
2977 	hci_dev_unlock(hdev);
2978 	return err;
2979 }
2980 
2981 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2982 			      u16 len)
2983 {
2984 	struct mgmt_addr_info *addr = data;
2985 	struct mgmt_pending_cmd *cmd;
2986 	struct hci_conn *conn;
2987 	int err;
2988 
2989 	BT_DBG("");
2990 
2991 	hci_dev_lock(hdev);
2992 
2993 	if (!hdev_is_powered(hdev)) {
2994 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2995 				      MGMT_STATUS_NOT_POWERED);
2996 		goto unlock;
2997 	}
2998 
2999 	cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3000 	if (!cmd) {
3001 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3002 				      MGMT_STATUS_INVALID_PARAMS);
3003 		goto unlock;
3004 	}
3005 
3006 	conn = cmd->user_data;
3007 
3008 	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3009 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3010 				      MGMT_STATUS_INVALID_PARAMS);
3011 		goto unlock;
3012 	}
3013 
3014 	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3015 	mgmt_pending_remove(cmd);
3016 
3017 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3018 				addr, sizeof(*addr));
3019 unlock:
3020 	hci_dev_unlock(hdev);
3021 	return err;
3022 }
3023 
3024 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3025 			     struct mgmt_addr_info *addr, u16 mgmt_op,
3026 			     u16 hci_op, __le32 passkey)
3027 {
3028 	struct mgmt_pending_cmd *cmd;
3029 	struct hci_conn *conn;
3030 	int err;
3031 
3032 	hci_dev_lock(hdev);
3033 
3034 	if (!hdev_is_powered(hdev)) {
3035 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3036 					MGMT_STATUS_NOT_POWERED, addr,
3037 					sizeof(*addr));
3038 		goto done;
3039 	}
3040 
3041 	if (addr->type == BDADDR_BREDR)
3042 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3043 	else
3044 		conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3045 					       le_addr_type(addr->type));
3046 
3047 	if (!conn) {
3048 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3049 					MGMT_STATUS_NOT_CONNECTED, addr,
3050 					sizeof(*addr));
3051 		goto done;
3052 	}
3053 
3054 	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3055 		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3056 		if (!err)
3057 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3058 						MGMT_STATUS_SUCCESS, addr,
3059 						sizeof(*addr));
3060 		else
3061 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3062 						MGMT_STATUS_FAILED, addr,
3063 						sizeof(*addr));
3064 
3065 		goto done;
3066 	}
3067 
3068 	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3069 	if (!cmd) {
3070 		err = -ENOMEM;
3071 		goto done;
3072 	}
3073 
3074 	cmd->cmd_complete = addr_cmd_complete;
3075 
3076 	/* Continue with pairing via HCI */
3077 	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3078 		struct hci_cp_user_passkey_reply cp;
3079 
3080 		bacpy(&cp.bdaddr, &addr->bdaddr);
3081 		cp.passkey = passkey;
3082 		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3083 	} else
3084 		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3085 				   &addr->bdaddr);
3086 
3087 	if (err < 0)
3088 		mgmt_pending_remove(cmd);
3089 
3090 done:
3091 	hci_dev_unlock(hdev);
3092 	return err;
3093 }
3094 
3095 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3096 			      void *data, u16 len)
3097 {
3098 	struct mgmt_cp_pin_code_neg_reply *cp = data;
3099 
3100 	BT_DBG("");
3101 
3102 	return user_pairing_resp(sk, hdev, &cp->addr,
3103 				MGMT_OP_PIN_CODE_NEG_REPLY,
3104 				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3105 }
3106 
3107 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3108 			      u16 len)
3109 {
3110 	struct mgmt_cp_user_confirm_reply *cp = data;
3111 
3112 	BT_DBG("");
3113 
3114 	if (len != sizeof(*cp))
3115 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3116 				       MGMT_STATUS_INVALID_PARAMS);
3117 
3118 	return user_pairing_resp(sk, hdev, &cp->addr,
3119 				 MGMT_OP_USER_CONFIRM_REPLY,
3120 				 HCI_OP_USER_CONFIRM_REPLY, 0);
3121 }
3122 
3123 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3124 				  void *data, u16 len)
3125 {
3126 	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3127 
3128 	BT_DBG("");
3129 
3130 	return user_pairing_resp(sk, hdev, &cp->addr,
3131 				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3132 				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3133 }
3134 
3135 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3136 			      u16 len)
3137 {
3138 	struct mgmt_cp_user_passkey_reply *cp = data;
3139 
3140 	BT_DBG("");
3141 
3142 	return user_pairing_resp(sk, hdev, &cp->addr,
3143 				 MGMT_OP_USER_PASSKEY_REPLY,
3144 				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3145 }
3146 
3147 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3148 				  void *data, u16 len)
3149 {
3150 	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3151 
3152 	BT_DBG("");
3153 
3154 	return user_pairing_resp(sk, hdev, &cp->addr,
3155 				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3156 				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3157 }
3158 
3159 static void adv_expire(struct hci_dev *hdev, u32 flags)
3160 {
3161 	struct adv_info *adv_instance;
3162 	struct hci_request req;
3163 	int err;
3164 
3165 	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3166 	if (!adv_instance)
3167 		return;
3168 
3169 	/* stop if current instance doesn't need to be changed */
3170 	if (!(adv_instance->flags & flags))
3171 		return;
3172 
3173 	cancel_adv_timeout(hdev);
3174 
3175 	adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3176 	if (!adv_instance)
3177 		return;
3178 
3179 	hci_req_init(&req, hdev);
3180 	err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
3181 					      true);
3182 	if (err)
3183 		return;
3184 
3185 	hci_req_run(&req, NULL);
3186 }
3187 
3188 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3189 {
3190 	struct mgmt_cp_set_local_name *cp;
3191 	struct mgmt_pending_cmd *cmd;
3192 
3193 	BT_DBG("status 0x%02x", status);
3194 
3195 	hci_dev_lock(hdev);
3196 
3197 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3198 	if (!cmd)
3199 		goto unlock;
3200 
3201 	cp = cmd->param;
3202 
3203 	if (status) {
3204 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3205 			        mgmt_status(status));
3206 	} else {
3207 		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3208 				  cp, sizeof(*cp));
3209 
3210 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3211 			adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3212 	}
3213 
3214 	mgmt_pending_remove(cmd);
3215 
3216 unlock:
3217 	hci_dev_unlock(hdev);
3218 }
3219 
3220 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3221 			  u16 len)
3222 {
3223 	struct mgmt_cp_set_local_name *cp = data;
3224 	struct mgmt_pending_cmd *cmd;
3225 	struct hci_request req;
3226 	int err;
3227 
3228 	BT_DBG("");
3229 
3230 	hci_dev_lock(hdev);
3231 
3232 	/* If the old values are the same as the new ones just return a
3233 	 * direct command complete event.
3234 	 */
3235 	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3236 	    !memcmp(hdev->short_name, cp->short_name,
3237 		    sizeof(hdev->short_name))) {
3238 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3239 					data, len);
3240 		goto failed;
3241 	}
3242 
3243 	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3244 
3245 	if (!hdev_is_powered(hdev)) {
3246 		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3247 
3248 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3249 					data, len);
3250 		if (err < 0)
3251 			goto failed;
3252 
3253 		err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3254 					 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3255 		ext_info_changed(hdev, sk);
3256 
3257 		goto failed;
3258 	}
3259 
3260 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3261 	if (!cmd) {
3262 		err = -ENOMEM;
3263 		goto failed;
3264 	}
3265 
3266 	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3267 
3268 	hci_req_init(&req, hdev);
3269 
3270 	if (lmp_bredr_capable(hdev)) {
3271 		__hci_req_update_name(&req);
3272 		__hci_req_update_eir(&req);
3273 	}
3274 
3275 	/* The name is stored in the scan response data and so
3276 	 * no need to udpate the advertising data here.
3277 	 */
3278 	if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3279 		__hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
3280 
3281 	err = hci_req_run(&req, set_name_complete);
3282 	if (err < 0)
3283 		mgmt_pending_remove(cmd);
3284 
3285 failed:
3286 	hci_dev_unlock(hdev);
3287 	return err;
3288 }
3289 
3290 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3291 			  u16 len)
3292 {
3293 	struct mgmt_cp_set_appearance *cp = data;
3294 	u16 appearance;
3295 	int err;
3296 
3297 	BT_DBG("");
3298 
3299 	if (!lmp_le_capable(hdev))
3300 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3301 				       MGMT_STATUS_NOT_SUPPORTED);
3302 
3303 	appearance = le16_to_cpu(cp->appearance);
3304 
3305 	hci_dev_lock(hdev);
3306 
3307 	if (hdev->appearance != appearance) {
3308 		hdev->appearance = appearance;
3309 
3310 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3311 			adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
3312 
3313 		ext_info_changed(hdev, sk);
3314 	}
3315 
3316 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3317 				0);
3318 
3319 	hci_dev_unlock(hdev);
3320 
3321 	return err;
3322 }
3323 
3324 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3325 				 void *data, u16 len)
3326 {
3327 	struct mgmt_rp_get_phy_confguration rp;
3328 
3329 	BT_DBG("sock %p %s", sk, hdev->name);
3330 
3331 	hci_dev_lock(hdev);
3332 
3333 	memset(&rp, 0, sizeof(rp));
3334 
3335 	rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3336 	rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3337 	rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3338 
3339 	hci_dev_unlock(hdev);
3340 
3341 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3342 				 &rp, sizeof(rp));
3343 }
3344 
3345 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3346 {
3347 	struct mgmt_ev_phy_configuration_changed ev;
3348 
3349 	memset(&ev, 0, sizeof(ev));
3350 
3351 	ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3352 
3353 	return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3354 			  sizeof(ev), skip);
3355 }
3356 
3357 static void set_default_phy_complete(struct hci_dev *hdev, u8 status,
3358 				     u16 opcode, struct sk_buff *skb)
3359 {
3360 	struct mgmt_pending_cmd *cmd;
3361 
3362 	BT_DBG("status 0x%02x", status);
3363 
3364 	hci_dev_lock(hdev);
3365 
3366 	cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev);
3367 	if (!cmd)
3368 		goto unlock;
3369 
3370 	if (status) {
3371 		mgmt_cmd_status(cmd->sk, hdev->id,
3372 				MGMT_OP_SET_PHY_CONFIGURATION,
3373 				mgmt_status(status));
3374 	} else {
3375 		mgmt_cmd_complete(cmd->sk, hdev->id,
3376 				  MGMT_OP_SET_PHY_CONFIGURATION, 0,
3377 				  NULL, 0);
3378 
3379 		mgmt_phy_configuration_changed(hdev, cmd->sk);
3380 	}
3381 
3382 	mgmt_pending_remove(cmd);
3383 
3384 unlock:
3385 	hci_dev_unlock(hdev);
3386 }
3387 
3388 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3389 				 void *data, u16 len)
3390 {
3391 	struct mgmt_cp_set_phy_confguration *cp = data;
3392 	struct hci_cp_le_set_default_phy cp_phy;
3393 	struct mgmt_pending_cmd *cmd;
3394 	struct hci_request req;
3395 	u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3396 	u16 pkt_type = (HCI_DH1 | HCI_DM1);
3397 	bool changed = false;
3398 	int err;
3399 
3400 	BT_DBG("sock %p %s", sk, hdev->name);
3401 
3402 	configurable_phys = get_configurable_phys(hdev);
3403 	supported_phys = get_supported_phys(hdev);
3404 	selected_phys = __le32_to_cpu(cp->selected_phys);
3405 
3406 	if (selected_phys & ~supported_phys)
3407 		return mgmt_cmd_status(sk, hdev->id,
3408 				       MGMT_OP_SET_PHY_CONFIGURATION,
3409 				       MGMT_STATUS_INVALID_PARAMS);
3410 
3411 	unconfigure_phys = supported_phys & ~configurable_phys;
3412 
3413 	if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3414 		return mgmt_cmd_status(sk, hdev->id,
3415 				       MGMT_OP_SET_PHY_CONFIGURATION,
3416 				       MGMT_STATUS_INVALID_PARAMS);
3417 
3418 	if (selected_phys == get_selected_phys(hdev))
3419 		return mgmt_cmd_complete(sk, hdev->id,
3420 					 MGMT_OP_SET_PHY_CONFIGURATION,
3421 					 0, NULL, 0);
3422 
3423 	hci_dev_lock(hdev);
3424 
3425 	if (!hdev_is_powered(hdev)) {
3426 		err = mgmt_cmd_status(sk, hdev->id,
3427 				      MGMT_OP_SET_PHY_CONFIGURATION,
3428 				      MGMT_STATUS_REJECTED);
3429 		goto unlock;
3430 	}
3431 
3432 	if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3433 		err = mgmt_cmd_status(sk, hdev->id,
3434 				      MGMT_OP_SET_PHY_CONFIGURATION,
3435 				      MGMT_STATUS_BUSY);
3436 		goto unlock;
3437 	}
3438 
3439 	if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3440 		pkt_type |= (HCI_DH3 | HCI_DM3);
3441 	else
3442 		pkt_type &= ~(HCI_DH3 | HCI_DM3);
3443 
3444 	if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3445 		pkt_type |= (HCI_DH5 | HCI_DM5);
3446 	else
3447 		pkt_type &= ~(HCI_DH5 | HCI_DM5);
3448 
3449 	if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3450 		pkt_type &= ~HCI_2DH1;
3451 	else
3452 		pkt_type |= HCI_2DH1;
3453 
3454 	if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3455 		pkt_type &= ~HCI_2DH3;
3456 	else
3457 		pkt_type |= HCI_2DH3;
3458 
3459 	if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3460 		pkt_type &= ~HCI_2DH5;
3461 	else
3462 		pkt_type |= HCI_2DH5;
3463 
3464 	if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3465 		pkt_type &= ~HCI_3DH1;
3466 	else
3467 		pkt_type |= HCI_3DH1;
3468 
3469 	if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3470 		pkt_type &= ~HCI_3DH3;
3471 	else
3472 		pkt_type |= HCI_3DH3;
3473 
3474 	if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3475 		pkt_type &= ~HCI_3DH5;
3476 	else
3477 		pkt_type |= HCI_3DH5;
3478 
3479 	if (pkt_type != hdev->pkt_type) {
3480 		hdev->pkt_type = pkt_type;
3481 		changed = true;
3482 	}
3483 
3484 	if ((selected_phys & MGMT_PHY_LE_MASK) ==
3485 	    (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3486 		if (changed)
3487 			mgmt_phy_configuration_changed(hdev, sk);
3488 
3489 		err = mgmt_cmd_complete(sk, hdev->id,
3490 					MGMT_OP_SET_PHY_CONFIGURATION,
3491 					0, NULL, 0);
3492 
3493 		goto unlock;
3494 	}
3495 
3496 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3497 			       len);
3498 	if (!cmd) {
3499 		err = -ENOMEM;
3500 		goto unlock;
3501 	}
3502 
3503 	hci_req_init(&req, hdev);
3504 
3505 	memset(&cp_phy, 0, sizeof(cp_phy));
3506 
3507 	if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3508 		cp_phy.all_phys |= 0x01;
3509 
3510 	if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3511 		cp_phy.all_phys |= 0x02;
3512 
3513 	if (selected_phys & MGMT_PHY_LE_1M_TX)
3514 		cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3515 
3516 	if (selected_phys & MGMT_PHY_LE_2M_TX)
3517 		cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3518 
3519 	if (selected_phys & MGMT_PHY_LE_CODED_TX)
3520 		cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3521 
3522 	if (selected_phys & MGMT_PHY_LE_1M_RX)
3523 		cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3524 
3525 	if (selected_phys & MGMT_PHY_LE_2M_RX)
3526 		cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3527 
3528 	if (selected_phys & MGMT_PHY_LE_CODED_RX)
3529 		cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3530 
3531 	hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy);
3532 
3533 	err = hci_req_run_skb(&req, set_default_phy_complete);
3534 	if (err < 0)
3535 		mgmt_pending_remove(cmd);
3536 
3537 unlock:
3538 	hci_dev_unlock(hdev);
3539 
3540 	return err;
3541 }
3542 
3543 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
3544 			    u16 len)
3545 {
3546 	int err = MGMT_STATUS_SUCCESS;
3547 	struct mgmt_cp_set_blocked_keys *keys = data;
3548 	const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
3549 				   sizeof(struct mgmt_blocked_key_info));
3550 	u16 key_count, expected_len;
3551 	int i;
3552 
3553 	BT_DBG("request for %s", hdev->name);
3554 
3555 	key_count = __le16_to_cpu(keys->key_count);
3556 	if (key_count > max_key_count) {
3557 		bt_dev_err(hdev, "too big key_count value %u", key_count);
3558 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3559 				       MGMT_STATUS_INVALID_PARAMS);
3560 	}
3561 
3562 	expected_len = struct_size(keys, keys, key_count);
3563 	if (expected_len != len) {
3564 		bt_dev_err(hdev, "expected %u bytes, got %u bytes",
3565 			   expected_len, len);
3566 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3567 				       MGMT_STATUS_INVALID_PARAMS);
3568 	}
3569 
3570 	hci_dev_lock(hdev);
3571 
3572 	hci_blocked_keys_clear(hdev);
3573 
3574 	for (i = 0; i < keys->key_count; ++i) {
3575 		struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
3576 
3577 		if (!b) {
3578 			err = MGMT_STATUS_NO_RESOURCES;
3579 			break;
3580 		}
3581 
3582 		b->type = keys->keys[i].type;
3583 		memcpy(b->val, keys->keys[i].val, sizeof(b->val));
3584 		list_add_rcu(&b->list, &hdev->blocked_keys);
3585 	}
3586 	hci_dev_unlock(hdev);
3587 
3588 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3589 				err, NULL, 0);
3590 }
3591 
3592 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
3593 				         u16 opcode, struct sk_buff *skb)
3594 {
3595 	struct mgmt_rp_read_local_oob_data mgmt_rp;
3596 	size_t rp_size = sizeof(mgmt_rp);
3597 	struct mgmt_pending_cmd *cmd;
3598 
3599 	BT_DBG("%s status %u", hdev->name, status);
3600 
3601 	cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
3602 	if (!cmd)
3603 		return;
3604 
3605 	if (status || !skb) {
3606 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3607 				status ? mgmt_status(status) : MGMT_STATUS_FAILED);
3608 		goto remove;
3609 	}
3610 
3611 	memset(&mgmt_rp, 0, sizeof(mgmt_rp));
3612 
3613 	if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
3614 		struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
3615 
3616 		if (skb->len < sizeof(*rp)) {
3617 			mgmt_cmd_status(cmd->sk, hdev->id,
3618 					MGMT_OP_READ_LOCAL_OOB_DATA,
3619 					MGMT_STATUS_FAILED);
3620 			goto remove;
3621 		}
3622 
3623 		memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
3624 		memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
3625 
3626 		rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
3627 	} else {
3628 		struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
3629 
3630 		if (skb->len < sizeof(*rp)) {
3631 			mgmt_cmd_status(cmd->sk, hdev->id,
3632 					MGMT_OP_READ_LOCAL_OOB_DATA,
3633 					MGMT_STATUS_FAILED);
3634 			goto remove;
3635 		}
3636 
3637 		memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
3638 		memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
3639 
3640 		memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
3641 		memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
3642 	}
3643 
3644 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3645 			  MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
3646 
3647 remove:
3648 	mgmt_pending_remove(cmd);
3649 }
3650 
3651 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3652 			       void *data, u16 data_len)
3653 {
3654 	struct mgmt_pending_cmd *cmd;
3655 	struct hci_request req;
3656 	int err;
3657 
3658 	BT_DBG("%s", hdev->name);
3659 
3660 	hci_dev_lock(hdev);
3661 
3662 	if (!hdev_is_powered(hdev)) {
3663 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3664 				      MGMT_STATUS_NOT_POWERED);
3665 		goto unlock;
3666 	}
3667 
3668 	if (!lmp_ssp_capable(hdev)) {
3669 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3670 				      MGMT_STATUS_NOT_SUPPORTED);
3671 		goto unlock;
3672 	}
3673 
3674 	if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3675 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3676 				      MGMT_STATUS_BUSY);
3677 		goto unlock;
3678 	}
3679 
3680 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3681 	if (!cmd) {
3682 		err = -ENOMEM;
3683 		goto unlock;
3684 	}
3685 
3686 	hci_req_init(&req, hdev);
3687 
3688 	if (bredr_sc_enabled(hdev))
3689 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
3690 	else
3691 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3692 
3693 	err = hci_req_run_skb(&req, read_local_oob_data_complete);
3694 	if (err < 0)
3695 		mgmt_pending_remove(cmd);
3696 
3697 unlock:
3698 	hci_dev_unlock(hdev);
3699 	return err;
3700 }
3701 
3702 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3703 			       void *data, u16 len)
3704 {
3705 	struct mgmt_addr_info *addr = data;
3706 	int err;
3707 
3708 	BT_DBG("%s ", hdev->name);
3709 
3710 	if (!bdaddr_type_is_valid(addr->type))
3711 		return mgmt_cmd_complete(sk, hdev->id,
3712 					 MGMT_OP_ADD_REMOTE_OOB_DATA,
3713 					 MGMT_STATUS_INVALID_PARAMS,
3714 					 addr, sizeof(*addr));
3715 
3716 	hci_dev_lock(hdev);
3717 
3718 	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3719 		struct mgmt_cp_add_remote_oob_data *cp = data;
3720 		u8 status;
3721 
3722 		if (cp->addr.type != BDADDR_BREDR) {
3723 			err = mgmt_cmd_complete(sk, hdev->id,
3724 						MGMT_OP_ADD_REMOTE_OOB_DATA,
3725 						MGMT_STATUS_INVALID_PARAMS,
3726 						&cp->addr, sizeof(cp->addr));
3727 			goto unlock;
3728 		}
3729 
3730 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3731 					      cp->addr.type, cp->hash,
3732 					      cp->rand, NULL, NULL);
3733 		if (err < 0)
3734 			status = MGMT_STATUS_FAILED;
3735 		else
3736 			status = MGMT_STATUS_SUCCESS;
3737 
3738 		err = mgmt_cmd_complete(sk, hdev->id,
3739 					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
3740 					&cp->addr, sizeof(cp->addr));
3741 	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3742 		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3743 		u8 *rand192, *hash192, *rand256, *hash256;
3744 		u8 status;
3745 
3746 		if (bdaddr_type_is_le(cp->addr.type)) {
3747 			/* Enforce zero-valued 192-bit parameters as
3748 			 * long as legacy SMP OOB isn't implemented.
3749 			 */
3750 			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
3751 			    memcmp(cp->hash192, ZERO_KEY, 16)) {
3752 				err = mgmt_cmd_complete(sk, hdev->id,
3753 							MGMT_OP_ADD_REMOTE_OOB_DATA,
3754 							MGMT_STATUS_INVALID_PARAMS,
3755 							addr, sizeof(*addr));
3756 				goto unlock;
3757 			}
3758 
3759 			rand192 = NULL;
3760 			hash192 = NULL;
3761 		} else {
3762 			/* In case one of the P-192 values is set to zero,
3763 			 * then just disable OOB data for P-192.
3764 			 */
3765 			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
3766 			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
3767 				rand192 = NULL;
3768 				hash192 = NULL;
3769 			} else {
3770 				rand192 = cp->rand192;
3771 				hash192 = cp->hash192;
3772 			}
3773 		}
3774 
3775 		/* In case one of the P-256 values is set to zero, then just
3776 		 * disable OOB data for P-256.
3777 		 */
3778 		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
3779 		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
3780 			rand256 = NULL;
3781 			hash256 = NULL;
3782 		} else {
3783 			rand256 = cp->rand256;
3784 			hash256 = cp->hash256;
3785 		}
3786 
3787 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3788 					      cp->addr.type, hash192, rand192,
3789 					      hash256, rand256);
3790 		if (err < 0)
3791 			status = MGMT_STATUS_FAILED;
3792 		else
3793 			status = MGMT_STATUS_SUCCESS;
3794 
3795 		err = mgmt_cmd_complete(sk, hdev->id,
3796 					MGMT_OP_ADD_REMOTE_OOB_DATA,
3797 					status, &cp->addr, sizeof(cp->addr));
3798 	} else {
3799 		bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
3800 			   len);
3801 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3802 				      MGMT_STATUS_INVALID_PARAMS);
3803 	}
3804 
3805 unlock:
3806 	hci_dev_unlock(hdev);
3807 	return err;
3808 }
3809 
3810 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3811 				  void *data, u16 len)
3812 {
3813 	struct mgmt_cp_remove_remote_oob_data *cp = data;
3814 	u8 status;
3815 	int err;
3816 
3817 	BT_DBG("%s", hdev->name);
3818 
3819 	if (cp->addr.type != BDADDR_BREDR)
3820 		return mgmt_cmd_complete(sk, hdev->id,
3821 					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3822 					 MGMT_STATUS_INVALID_PARAMS,
3823 					 &cp->addr, sizeof(cp->addr));
3824 
3825 	hci_dev_lock(hdev);
3826 
3827 	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
3828 		hci_remote_oob_data_clear(hdev);
3829 		status = MGMT_STATUS_SUCCESS;
3830 		goto done;
3831 	}
3832 
3833 	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
3834 	if (err < 0)
3835 		status = MGMT_STATUS_INVALID_PARAMS;
3836 	else
3837 		status = MGMT_STATUS_SUCCESS;
3838 
3839 done:
3840 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3841 				status, &cp->addr, sizeof(cp->addr));
3842 
3843 	hci_dev_unlock(hdev);
3844 	return err;
3845 }
3846 
3847 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
3848 {
3849 	struct mgmt_pending_cmd *cmd;
3850 
3851 	BT_DBG("status %d", status);
3852 
3853 	hci_dev_lock(hdev);
3854 
3855 	cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
3856 	if (!cmd)
3857 		cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
3858 
3859 	if (!cmd)
3860 		cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
3861 
3862 	if (cmd) {
3863 		cmd->cmd_complete(cmd, mgmt_status(status));
3864 		mgmt_pending_remove(cmd);
3865 	}
3866 
3867 	hci_dev_unlock(hdev);
3868 }
3869 
3870 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
3871 				    uint8_t *mgmt_status)
3872 {
3873 	switch (type) {
3874 	case DISCOV_TYPE_LE:
3875 		*mgmt_status = mgmt_le_support(hdev);
3876 		if (*mgmt_status)
3877 			return false;
3878 		break;
3879 	case DISCOV_TYPE_INTERLEAVED:
3880 		*mgmt_status = mgmt_le_support(hdev);
3881 		if (*mgmt_status)
3882 			return false;
3883 		/* Intentional fall-through */
3884 	case DISCOV_TYPE_BREDR:
3885 		*mgmt_status = mgmt_bredr_support(hdev);
3886 		if (*mgmt_status)
3887 			return false;
3888 		break;
3889 	default:
3890 		*mgmt_status = MGMT_STATUS_INVALID_PARAMS;
3891 		return false;
3892 	}
3893 
3894 	return true;
3895 }
3896 
3897 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
3898 				    u16 op, void *data, u16 len)
3899 {
3900 	struct mgmt_cp_start_discovery *cp = data;
3901 	struct mgmt_pending_cmd *cmd;
3902 	u8 status;
3903 	int err;
3904 
3905 	BT_DBG("%s", hdev->name);
3906 
3907 	hci_dev_lock(hdev);
3908 
3909 	if (!hdev_is_powered(hdev)) {
3910 		err = mgmt_cmd_complete(sk, hdev->id, op,
3911 					MGMT_STATUS_NOT_POWERED,
3912 					&cp->type, sizeof(cp->type));
3913 		goto failed;
3914 	}
3915 
3916 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
3917 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
3918 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
3919 					&cp->type, sizeof(cp->type));
3920 		goto failed;
3921 	}
3922 
3923 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
3924 		err = mgmt_cmd_complete(sk, hdev->id, op, status,
3925 					&cp->type, sizeof(cp->type));
3926 		goto failed;
3927 	}
3928 
3929 	/* Clear the discovery filter first to free any previously
3930 	 * allocated memory for the UUID list.
3931 	 */
3932 	hci_discovery_filter_clear(hdev);
3933 
3934 	hdev->discovery.type = cp->type;
3935 	hdev->discovery.report_invalid_rssi = false;
3936 	if (op == MGMT_OP_START_LIMITED_DISCOVERY)
3937 		hdev->discovery.limited = true;
3938 	else
3939 		hdev->discovery.limited = false;
3940 
3941 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
3942 	if (!cmd) {
3943 		err = -ENOMEM;
3944 		goto failed;
3945 	}
3946 
3947 	cmd->cmd_complete = generic_cmd_complete;
3948 
3949 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3950 	queue_work(hdev->req_workqueue, &hdev->discov_update);
3951 	err = 0;
3952 
3953 failed:
3954 	hci_dev_unlock(hdev);
3955 	return err;
3956 }
3957 
3958 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3959 			   void *data, u16 len)
3960 {
3961 	return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
3962 					data, len);
3963 }
3964 
3965 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
3966 				   void *data, u16 len)
3967 {
3968 	return start_discovery_internal(sk, hdev,
3969 					MGMT_OP_START_LIMITED_DISCOVERY,
3970 					data, len);
3971 }
3972 
3973 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
3974 					  u8 status)
3975 {
3976 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
3977 				 cmd->param, 1);
3978 }
3979 
3980 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
3981 				   void *data, u16 len)
3982 {
3983 	struct mgmt_cp_start_service_discovery *cp = data;
3984 	struct mgmt_pending_cmd *cmd;
3985 	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
3986 	u16 uuid_count, expected_len;
3987 	u8 status;
3988 	int err;
3989 
3990 	BT_DBG("%s", hdev->name);
3991 
3992 	hci_dev_lock(hdev);
3993 
3994 	if (!hdev_is_powered(hdev)) {
3995 		err = mgmt_cmd_complete(sk, hdev->id,
3996 					MGMT_OP_START_SERVICE_DISCOVERY,
3997 					MGMT_STATUS_NOT_POWERED,
3998 					&cp->type, sizeof(cp->type));
3999 		goto failed;
4000 	}
4001 
4002 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
4003 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4004 		err = mgmt_cmd_complete(sk, hdev->id,
4005 					MGMT_OP_START_SERVICE_DISCOVERY,
4006 					MGMT_STATUS_BUSY, &cp->type,
4007 					sizeof(cp->type));
4008 		goto failed;
4009 	}
4010 
4011 	uuid_count = __le16_to_cpu(cp->uuid_count);
4012 	if (uuid_count > max_uuid_count) {
4013 		bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
4014 			   uuid_count);
4015 		err = mgmt_cmd_complete(sk, hdev->id,
4016 					MGMT_OP_START_SERVICE_DISCOVERY,
4017 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
4018 					sizeof(cp->type));
4019 		goto failed;
4020 	}
4021 
4022 	expected_len = sizeof(*cp) + uuid_count * 16;
4023 	if (expected_len != len) {
4024 		bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
4025 			   expected_len, len);
4026 		err = mgmt_cmd_complete(sk, hdev->id,
4027 					MGMT_OP_START_SERVICE_DISCOVERY,
4028 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
4029 					sizeof(cp->type));
4030 		goto failed;
4031 	}
4032 
4033 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4034 		err = mgmt_cmd_complete(sk, hdev->id,
4035 					MGMT_OP_START_SERVICE_DISCOVERY,
4036 					status, &cp->type, sizeof(cp->type));
4037 		goto failed;
4038 	}
4039 
4040 	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4041 			       hdev, data, len);
4042 	if (!cmd) {
4043 		err = -ENOMEM;
4044 		goto failed;
4045 	}
4046 
4047 	cmd->cmd_complete = service_discovery_cmd_complete;
4048 
4049 	/* Clear the discovery filter first to free any previously
4050 	 * allocated memory for the UUID list.
4051 	 */
4052 	hci_discovery_filter_clear(hdev);
4053 
4054 	hdev->discovery.result_filtering = true;
4055 	hdev->discovery.type = cp->type;
4056 	hdev->discovery.rssi = cp->rssi;
4057 	hdev->discovery.uuid_count = uuid_count;
4058 
4059 	if (uuid_count > 0) {
4060 		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4061 						GFP_KERNEL);
4062 		if (!hdev->discovery.uuids) {
4063 			err = mgmt_cmd_complete(sk, hdev->id,
4064 						MGMT_OP_START_SERVICE_DISCOVERY,
4065 						MGMT_STATUS_FAILED,
4066 						&cp->type, sizeof(cp->type));
4067 			mgmt_pending_remove(cmd);
4068 			goto failed;
4069 		}
4070 	}
4071 
4072 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4073 	queue_work(hdev->req_workqueue, &hdev->discov_update);
4074 	err = 0;
4075 
4076 failed:
4077 	hci_dev_unlock(hdev);
4078 	return err;
4079 }
4080 
4081 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
4082 {
4083 	struct mgmt_pending_cmd *cmd;
4084 
4085 	BT_DBG("status %d", status);
4086 
4087 	hci_dev_lock(hdev);
4088 
4089 	cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4090 	if (cmd) {
4091 		cmd->cmd_complete(cmd, mgmt_status(status));
4092 		mgmt_pending_remove(cmd);
4093 	}
4094 
4095 	hci_dev_unlock(hdev);
4096 }
4097 
4098 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4099 			  u16 len)
4100 {
4101 	struct mgmt_cp_stop_discovery *mgmt_cp = data;
4102 	struct mgmt_pending_cmd *cmd;
4103 	int err;
4104 
4105 	BT_DBG("%s", hdev->name);
4106 
4107 	hci_dev_lock(hdev);
4108 
4109 	if (!hci_discovery_active(hdev)) {
4110 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4111 					MGMT_STATUS_REJECTED, &mgmt_cp->type,
4112 					sizeof(mgmt_cp->type));
4113 		goto unlock;
4114 	}
4115 
4116 	if (hdev->discovery.type != mgmt_cp->type) {
4117 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4118 					MGMT_STATUS_INVALID_PARAMS,
4119 					&mgmt_cp->type, sizeof(mgmt_cp->type));
4120 		goto unlock;
4121 	}
4122 
4123 	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4124 	if (!cmd) {
4125 		err = -ENOMEM;
4126 		goto unlock;
4127 	}
4128 
4129 	cmd->cmd_complete = generic_cmd_complete;
4130 
4131 	hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4132 	queue_work(hdev->req_workqueue, &hdev->discov_update);
4133 	err = 0;
4134 
4135 unlock:
4136 	hci_dev_unlock(hdev);
4137 	return err;
4138 }
4139 
4140 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4141 			u16 len)
4142 {
4143 	struct mgmt_cp_confirm_name *cp = data;
4144 	struct inquiry_entry *e;
4145 	int err;
4146 
4147 	BT_DBG("%s", hdev->name);
4148 
4149 	hci_dev_lock(hdev);
4150 
4151 	if (!hci_discovery_active(hdev)) {
4152 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4153 					MGMT_STATUS_FAILED, &cp->addr,
4154 					sizeof(cp->addr));
4155 		goto failed;
4156 	}
4157 
4158 	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4159 	if (!e) {
4160 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4161 					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4162 					sizeof(cp->addr));
4163 		goto failed;
4164 	}
4165 
4166 	if (cp->name_known) {
4167 		e->name_state = NAME_KNOWN;
4168 		list_del(&e->list);
4169 	} else {
4170 		e->name_state = NAME_NEEDED;
4171 		hci_inquiry_cache_update_resolve(hdev, e);
4172 	}
4173 
4174 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
4175 				&cp->addr, sizeof(cp->addr));
4176 
4177 failed:
4178 	hci_dev_unlock(hdev);
4179 	return err;
4180 }
4181 
4182 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4183 			u16 len)
4184 {
4185 	struct mgmt_cp_block_device *cp = data;
4186 	u8 status;
4187 	int err;
4188 
4189 	BT_DBG("%s", hdev->name);
4190 
4191 	if (!bdaddr_type_is_valid(cp->addr.type))
4192 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4193 					 MGMT_STATUS_INVALID_PARAMS,
4194 					 &cp->addr, sizeof(cp->addr));
4195 
4196 	hci_dev_lock(hdev);
4197 
4198 	err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4199 				  cp->addr.type);
4200 	if (err < 0) {
4201 		status = MGMT_STATUS_FAILED;
4202 		goto done;
4203 	}
4204 
4205 	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4206 		   sk);
4207 	status = MGMT_STATUS_SUCCESS;
4208 
4209 done:
4210 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4211 				&cp->addr, sizeof(cp->addr));
4212 
4213 	hci_dev_unlock(hdev);
4214 
4215 	return err;
4216 }
4217 
4218 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4219 			  u16 len)
4220 {
4221 	struct mgmt_cp_unblock_device *cp = data;
4222 	u8 status;
4223 	int err;
4224 
4225 	BT_DBG("%s", hdev->name);
4226 
4227 	if (!bdaddr_type_is_valid(cp->addr.type))
4228 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4229 					 MGMT_STATUS_INVALID_PARAMS,
4230 					 &cp->addr, sizeof(cp->addr));
4231 
4232 	hci_dev_lock(hdev);
4233 
4234 	err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4235 				  cp->addr.type);
4236 	if (err < 0) {
4237 		status = MGMT_STATUS_INVALID_PARAMS;
4238 		goto done;
4239 	}
4240 
4241 	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4242 		   sk);
4243 	status = MGMT_STATUS_SUCCESS;
4244 
4245 done:
4246 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4247 				&cp->addr, sizeof(cp->addr));
4248 
4249 	hci_dev_unlock(hdev);
4250 
4251 	return err;
4252 }
4253 
4254 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4255 			 u16 len)
4256 {
4257 	struct mgmt_cp_set_device_id *cp = data;
4258 	struct hci_request req;
4259 	int err;
4260 	__u16 source;
4261 
4262 	BT_DBG("%s", hdev->name);
4263 
4264 	source = __le16_to_cpu(cp->source);
4265 
4266 	if (source > 0x0002)
4267 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4268 				       MGMT_STATUS_INVALID_PARAMS);
4269 
4270 	hci_dev_lock(hdev);
4271 
4272 	hdev->devid_source = source;
4273 	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4274 	hdev->devid_product = __le16_to_cpu(cp->product);
4275 	hdev->devid_version = __le16_to_cpu(cp->version);
4276 
4277 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
4278 				NULL, 0);
4279 
4280 	hci_req_init(&req, hdev);
4281 	__hci_req_update_eir(&req);
4282 	hci_req_run(&req, NULL);
4283 
4284 	hci_dev_unlock(hdev);
4285 
4286 	return err;
4287 }
4288 
4289 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
4290 					u16 opcode)
4291 {
4292 	BT_DBG("status %d", status);
4293 }
4294 
4295 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
4296 				     u16 opcode)
4297 {
4298 	struct cmd_lookup match = { NULL, hdev };
4299 	struct hci_request req;
4300 	u8 instance;
4301 	struct adv_info *adv_instance;
4302 	int err;
4303 
4304 	hci_dev_lock(hdev);
4305 
4306 	if (status) {
4307 		u8 mgmt_err = mgmt_status(status);
4308 
4309 		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4310 				     cmd_status_rsp, &mgmt_err);
4311 		goto unlock;
4312 	}
4313 
4314 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
4315 		hci_dev_set_flag(hdev, HCI_ADVERTISING);
4316 	else
4317 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4318 
4319 	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4320 			     &match);
4321 
4322 	new_settings(hdev, match.sk);
4323 
4324 	if (match.sk)
4325 		sock_put(match.sk);
4326 
4327 	/* If "Set Advertising" was just disabled and instance advertising was
4328 	 * set up earlier, then re-enable multi-instance advertising.
4329 	 */
4330 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
4331 	    list_empty(&hdev->adv_instances))
4332 		goto unlock;
4333 
4334 	instance = hdev->cur_adv_instance;
4335 	if (!instance) {
4336 		adv_instance = list_first_entry_or_null(&hdev->adv_instances,
4337 							struct adv_info, list);
4338 		if (!adv_instance)
4339 			goto unlock;
4340 
4341 		instance = adv_instance->instance;
4342 	}
4343 
4344 	hci_req_init(&req, hdev);
4345 
4346 	err = __hci_req_schedule_adv_instance(&req, instance, true);
4347 
4348 	if (!err)
4349 		err = hci_req_run(&req, enable_advertising_instance);
4350 
4351 	if (err)
4352 		bt_dev_err(hdev, "failed to re-configure advertising");
4353 
4354 unlock:
4355 	hci_dev_unlock(hdev);
4356 }
4357 
4358 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4359 			   u16 len)
4360 {
4361 	struct mgmt_mode *cp = data;
4362 	struct mgmt_pending_cmd *cmd;
4363 	struct hci_request req;
4364 	u8 val, status;
4365 	int err;
4366 
4367 	BT_DBG("request for %s", hdev->name);
4368 
4369 	status = mgmt_le_support(hdev);
4370 	if (status)
4371 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4372 				       status);
4373 
4374 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4375 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4376 				       MGMT_STATUS_INVALID_PARAMS);
4377 
4378 	hci_dev_lock(hdev);
4379 
4380 	val = !!cp->val;
4381 
4382 	/* The following conditions are ones which mean that we should
4383 	 * not do any HCI communication but directly send a mgmt
4384 	 * response to user space (after toggling the flag if
4385 	 * necessary).
4386 	 */
4387 	if (!hdev_is_powered(hdev) ||
4388 	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
4389 	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
4390 	    hci_conn_num(hdev, LE_LINK) > 0 ||
4391 	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4392 	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4393 		bool changed;
4394 
4395 		if (cp->val) {
4396 			hdev->cur_adv_instance = 0x00;
4397 			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
4398 			if (cp->val == 0x02)
4399 				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4400 			else
4401 				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4402 		} else {
4403 			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
4404 			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4405 		}
4406 
4407 		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4408 		if (err < 0)
4409 			goto unlock;
4410 
4411 		if (changed)
4412 			err = new_settings(hdev, sk);
4413 
4414 		goto unlock;
4415 	}
4416 
4417 	if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4418 	    pending_find(MGMT_OP_SET_LE, hdev)) {
4419 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4420 				      MGMT_STATUS_BUSY);
4421 		goto unlock;
4422 	}
4423 
4424 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4425 	if (!cmd) {
4426 		err = -ENOMEM;
4427 		goto unlock;
4428 	}
4429 
4430 	hci_req_init(&req, hdev);
4431 
4432 	if (cp->val == 0x02)
4433 		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4434 	else
4435 		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4436 
4437 	cancel_adv_timeout(hdev);
4438 
4439 	if (val) {
4440 		/* Switch to instance "0" for the Set Advertising setting.
4441 		 * We cannot use update_[adv|scan_rsp]_data() here as the
4442 		 * HCI_ADVERTISING flag is not yet set.
4443 		 */
4444 		hdev->cur_adv_instance = 0x00;
4445 
4446 		if (ext_adv_capable(hdev)) {
4447 			__hci_req_start_ext_adv(&req, 0x00);
4448 		} else {
4449 			__hci_req_update_adv_data(&req, 0x00);
4450 			__hci_req_update_scan_rsp_data(&req, 0x00);
4451 			__hci_req_enable_advertising(&req);
4452 		}
4453 	} else {
4454 		__hci_req_disable_advertising(&req);
4455 	}
4456 
4457 	err = hci_req_run(&req, set_advertising_complete);
4458 	if (err < 0)
4459 		mgmt_pending_remove(cmd);
4460 
4461 unlock:
4462 	hci_dev_unlock(hdev);
4463 	return err;
4464 }
4465 
4466 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4467 			      void *data, u16 len)
4468 {
4469 	struct mgmt_cp_set_static_address *cp = data;
4470 	int err;
4471 
4472 	BT_DBG("%s", hdev->name);
4473 
4474 	if (!lmp_le_capable(hdev))
4475 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4476 				       MGMT_STATUS_NOT_SUPPORTED);
4477 
4478 	if (hdev_is_powered(hdev))
4479 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4480 				       MGMT_STATUS_REJECTED);
4481 
4482 	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4483 		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4484 			return mgmt_cmd_status(sk, hdev->id,
4485 					       MGMT_OP_SET_STATIC_ADDRESS,
4486 					       MGMT_STATUS_INVALID_PARAMS);
4487 
4488 		/* Two most significant bits shall be set */
4489 		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4490 			return mgmt_cmd_status(sk, hdev->id,
4491 					       MGMT_OP_SET_STATIC_ADDRESS,
4492 					       MGMT_STATUS_INVALID_PARAMS);
4493 	}
4494 
4495 	hci_dev_lock(hdev);
4496 
4497 	bacpy(&hdev->static_addr, &cp->bdaddr);
4498 
4499 	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
4500 	if (err < 0)
4501 		goto unlock;
4502 
4503 	err = new_settings(hdev, sk);
4504 
4505 unlock:
4506 	hci_dev_unlock(hdev);
4507 	return err;
4508 }
4509 
4510 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4511 			   void *data, u16 len)
4512 {
4513 	struct mgmt_cp_set_scan_params *cp = data;
4514 	__u16 interval, window;
4515 	int err;
4516 
4517 	BT_DBG("%s", hdev->name);
4518 
4519 	if (!lmp_le_capable(hdev))
4520 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4521 				       MGMT_STATUS_NOT_SUPPORTED);
4522 
4523 	interval = __le16_to_cpu(cp->interval);
4524 
4525 	if (interval < 0x0004 || interval > 0x4000)
4526 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4527 				       MGMT_STATUS_INVALID_PARAMS);
4528 
4529 	window = __le16_to_cpu(cp->window);
4530 
4531 	if (window < 0x0004 || window > 0x4000)
4532 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4533 				       MGMT_STATUS_INVALID_PARAMS);
4534 
4535 	if (window > interval)
4536 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4537 				       MGMT_STATUS_INVALID_PARAMS);
4538 
4539 	hci_dev_lock(hdev);
4540 
4541 	hdev->le_scan_interval = interval;
4542 	hdev->le_scan_window = window;
4543 
4544 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
4545 				NULL, 0);
4546 
4547 	/* If background scan is running, restart it so new parameters are
4548 	 * loaded.
4549 	 */
4550 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4551 	    hdev->discovery.state == DISCOVERY_STOPPED) {
4552 		struct hci_request req;
4553 
4554 		hci_req_init(&req, hdev);
4555 
4556 		hci_req_add_le_scan_disable(&req);
4557 		hci_req_add_le_passive_scan(&req);
4558 
4559 		hci_req_run(&req, NULL);
4560 	}
4561 
4562 	hci_dev_unlock(hdev);
4563 
4564 	return err;
4565 }
4566 
4567 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
4568 				      u16 opcode)
4569 {
4570 	struct mgmt_pending_cmd *cmd;
4571 
4572 	BT_DBG("status 0x%02x", status);
4573 
4574 	hci_dev_lock(hdev);
4575 
4576 	cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4577 	if (!cmd)
4578 		goto unlock;
4579 
4580 	if (status) {
4581 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4582 			        mgmt_status(status));
4583 	} else {
4584 		struct mgmt_mode *cp = cmd->param;
4585 
4586 		if (cp->val)
4587 			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
4588 		else
4589 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4590 
4591 		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4592 		new_settings(hdev, cmd->sk);
4593 	}
4594 
4595 	mgmt_pending_remove(cmd);
4596 
4597 unlock:
4598 	hci_dev_unlock(hdev);
4599 }
4600 
4601 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4602 				void *data, u16 len)
4603 {
4604 	struct mgmt_mode *cp = data;
4605 	struct mgmt_pending_cmd *cmd;
4606 	struct hci_request req;
4607 	int err;
4608 
4609 	BT_DBG("%s", hdev->name);
4610 
4611 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
4612 	    hdev->hci_ver < BLUETOOTH_VER_1_2)
4613 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4614 				       MGMT_STATUS_NOT_SUPPORTED);
4615 
4616 	if (cp->val != 0x00 && cp->val != 0x01)
4617 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4618 				       MGMT_STATUS_INVALID_PARAMS);
4619 
4620 	hci_dev_lock(hdev);
4621 
4622 	if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4623 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4624 				      MGMT_STATUS_BUSY);
4625 		goto unlock;
4626 	}
4627 
4628 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
4629 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4630 					hdev);
4631 		goto unlock;
4632 	}
4633 
4634 	if (!hdev_is_powered(hdev)) {
4635 		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
4636 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4637 					hdev);
4638 		new_settings(hdev, sk);
4639 		goto unlock;
4640 	}
4641 
4642 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4643 			       data, len);
4644 	if (!cmd) {
4645 		err = -ENOMEM;
4646 		goto unlock;
4647 	}
4648 
4649 	hci_req_init(&req, hdev);
4650 
4651 	__hci_req_write_fast_connectable(&req, cp->val);
4652 
4653 	err = hci_req_run(&req, fast_connectable_complete);
4654 	if (err < 0) {
4655 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4656 				      MGMT_STATUS_FAILED);
4657 		mgmt_pending_remove(cmd);
4658 	}
4659 
4660 unlock:
4661 	hci_dev_unlock(hdev);
4662 
4663 	return err;
4664 }
4665 
4666 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4667 {
4668 	struct mgmt_pending_cmd *cmd;
4669 
4670 	BT_DBG("status 0x%02x", status);
4671 
4672 	hci_dev_lock(hdev);
4673 
4674 	cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
4675 	if (!cmd)
4676 		goto unlock;
4677 
4678 	if (status) {
4679 		u8 mgmt_err = mgmt_status(status);
4680 
4681 		/* We need to restore the flag if related HCI commands
4682 		 * failed.
4683 		 */
4684 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
4685 
4686 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4687 	} else {
4688 		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4689 		new_settings(hdev, cmd->sk);
4690 	}
4691 
4692 	mgmt_pending_remove(cmd);
4693 
4694 unlock:
4695 	hci_dev_unlock(hdev);
4696 }
4697 
4698 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4699 {
4700 	struct mgmt_mode *cp = data;
4701 	struct mgmt_pending_cmd *cmd;
4702 	struct hci_request req;
4703 	int err;
4704 
4705 	BT_DBG("request for %s", hdev->name);
4706 
4707 	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4708 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4709 				       MGMT_STATUS_NOT_SUPPORTED);
4710 
4711 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
4712 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4713 				       MGMT_STATUS_REJECTED);
4714 
4715 	if (cp->val != 0x00 && cp->val != 0x01)
4716 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4717 				       MGMT_STATUS_INVALID_PARAMS);
4718 
4719 	hci_dev_lock(hdev);
4720 
4721 	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4722 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4723 		goto unlock;
4724 	}
4725 
4726 	if (!hdev_is_powered(hdev)) {
4727 		if (!cp->val) {
4728 			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
4729 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
4730 			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
4731 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4732 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
4733 		}
4734 
4735 		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
4736 
4737 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4738 		if (err < 0)
4739 			goto unlock;
4740 
4741 		err = new_settings(hdev, sk);
4742 		goto unlock;
4743 	}
4744 
4745 	/* Reject disabling when powered on */
4746 	if (!cp->val) {
4747 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4748 				      MGMT_STATUS_REJECTED);
4749 		goto unlock;
4750 	} else {
4751 		/* When configuring a dual-mode controller to operate
4752 		 * with LE only and using a static address, then switching
4753 		 * BR/EDR back on is not allowed.
4754 		 *
4755 		 * Dual-mode controllers shall operate with the public
4756 		 * address as its identity address for BR/EDR and LE. So
4757 		 * reject the attempt to create an invalid configuration.
4758 		 *
4759 		 * The same restrictions applies when secure connections
4760 		 * has been enabled. For BR/EDR this is a controller feature
4761 		 * while for LE it is a host stack feature. This means that
4762 		 * switching BR/EDR back on when secure connections has been
4763 		 * enabled is not a supported transaction.
4764 		 */
4765 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
4766 		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
4767 		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
4768 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4769 					      MGMT_STATUS_REJECTED);
4770 			goto unlock;
4771 		}
4772 	}
4773 
4774 	if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
4775 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4776 				      MGMT_STATUS_BUSY);
4777 		goto unlock;
4778 	}
4779 
4780 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4781 	if (!cmd) {
4782 		err = -ENOMEM;
4783 		goto unlock;
4784 	}
4785 
4786 	/* We need to flip the bit already here so that
4787 	 * hci_req_update_adv_data generates the correct flags.
4788 	 */
4789 	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
4790 
4791 	hci_req_init(&req, hdev);
4792 
4793 	__hci_req_write_fast_connectable(&req, false);
4794 	__hci_req_update_scan(&req);
4795 
4796 	/* Since only the advertising data flags will change, there
4797 	 * is no need to update the scan response data.
4798 	 */
4799 	__hci_req_update_adv_data(&req, hdev->cur_adv_instance);
4800 
4801 	err = hci_req_run(&req, set_bredr_complete);
4802 	if (err < 0)
4803 		mgmt_pending_remove(cmd);
4804 
4805 unlock:
4806 	hci_dev_unlock(hdev);
4807 	return err;
4808 }
4809 
4810 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4811 {
4812 	struct mgmt_pending_cmd *cmd;
4813 	struct mgmt_mode *cp;
4814 
4815 	BT_DBG("%s status %u", hdev->name, status);
4816 
4817 	hci_dev_lock(hdev);
4818 
4819 	cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
4820 	if (!cmd)
4821 		goto unlock;
4822 
4823 	if (status) {
4824 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
4825 			        mgmt_status(status));
4826 		goto remove;
4827 	}
4828 
4829 	cp = cmd->param;
4830 
4831 	switch (cp->val) {
4832 	case 0x00:
4833 		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
4834 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4835 		break;
4836 	case 0x01:
4837 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
4838 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4839 		break;
4840 	case 0x02:
4841 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
4842 		hci_dev_set_flag(hdev, HCI_SC_ONLY);
4843 		break;
4844 	}
4845 
4846 	send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
4847 	new_settings(hdev, cmd->sk);
4848 
4849 remove:
4850 	mgmt_pending_remove(cmd);
4851 unlock:
4852 	hci_dev_unlock(hdev);
4853 }
4854 
4855 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4856 			   void *data, u16 len)
4857 {
4858 	struct mgmt_mode *cp = data;
4859 	struct mgmt_pending_cmd *cmd;
4860 	struct hci_request req;
4861 	u8 val;
4862 	int err;
4863 
4864 	BT_DBG("request for %s", hdev->name);
4865 
4866 	if (!lmp_sc_capable(hdev) &&
4867 	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
4868 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4869 				       MGMT_STATUS_NOT_SUPPORTED);
4870 
4871 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
4872 	    lmp_sc_capable(hdev) &&
4873 	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
4874 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4875 				       MGMT_STATUS_REJECTED);
4876 
4877 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4878 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4879 				  MGMT_STATUS_INVALID_PARAMS);
4880 
4881 	hci_dev_lock(hdev);
4882 
4883 	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
4884 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4885 		bool changed;
4886 
4887 		if (cp->val) {
4888 			changed = !hci_dev_test_and_set_flag(hdev,
4889 							     HCI_SC_ENABLED);
4890 			if (cp->val == 0x02)
4891 				hci_dev_set_flag(hdev, HCI_SC_ONLY);
4892 			else
4893 				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4894 		} else {
4895 			changed = hci_dev_test_and_clear_flag(hdev,
4896 							      HCI_SC_ENABLED);
4897 			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4898 		}
4899 
4900 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4901 		if (err < 0)
4902 			goto failed;
4903 
4904 		if (changed)
4905 			err = new_settings(hdev, sk);
4906 
4907 		goto failed;
4908 	}
4909 
4910 	if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4911 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4912 				      MGMT_STATUS_BUSY);
4913 		goto failed;
4914 	}
4915 
4916 	val = !!cp->val;
4917 
4918 	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
4919 	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4920 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4921 		goto failed;
4922 	}
4923 
4924 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4925 	if (!cmd) {
4926 		err = -ENOMEM;
4927 		goto failed;
4928 	}
4929 
4930 	hci_req_init(&req, hdev);
4931 	hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4932 	err = hci_req_run(&req, sc_enable_complete);
4933 	if (err < 0) {
4934 		mgmt_pending_remove(cmd);
4935 		goto failed;
4936 	}
4937 
4938 failed:
4939 	hci_dev_unlock(hdev);
4940 	return err;
4941 }
4942 
4943 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4944 			  void *data, u16 len)
4945 {
4946 	struct mgmt_mode *cp = data;
4947 	bool changed, use_changed;
4948 	int err;
4949 
4950 	BT_DBG("request for %s", hdev->name);
4951 
4952 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4953 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4954 				       MGMT_STATUS_INVALID_PARAMS);
4955 
4956 	hci_dev_lock(hdev);
4957 
4958 	if (cp->val)
4959 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
4960 	else
4961 		changed = hci_dev_test_and_clear_flag(hdev,
4962 						      HCI_KEEP_DEBUG_KEYS);
4963 
4964 	if (cp->val == 0x02)
4965 		use_changed = !hci_dev_test_and_set_flag(hdev,
4966 							 HCI_USE_DEBUG_KEYS);
4967 	else
4968 		use_changed = hci_dev_test_and_clear_flag(hdev,
4969 							  HCI_USE_DEBUG_KEYS);
4970 
4971 	if (hdev_is_powered(hdev) && use_changed &&
4972 	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
4973 		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4974 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4975 			     sizeof(mode), &mode);
4976 	}
4977 
4978 	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4979 	if (err < 0)
4980 		goto unlock;
4981 
4982 	if (changed)
4983 		err = new_settings(hdev, sk);
4984 
4985 unlock:
4986 	hci_dev_unlock(hdev);
4987 	return err;
4988 }
4989 
4990 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4991 		       u16 len)
4992 {
4993 	struct mgmt_cp_set_privacy *cp = cp_data;
4994 	bool changed;
4995 	int err;
4996 
4997 	BT_DBG("request for %s", hdev->name);
4998 
4999 	if (!lmp_le_capable(hdev))
5000 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5001 				       MGMT_STATUS_NOT_SUPPORTED);
5002 
5003 	if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
5004 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5005 				       MGMT_STATUS_INVALID_PARAMS);
5006 
5007 	if (hdev_is_powered(hdev))
5008 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5009 				       MGMT_STATUS_REJECTED);
5010 
5011 	hci_dev_lock(hdev);
5012 
5013 	/* If user space supports this command it is also expected to
5014 	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
5015 	 */
5016 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5017 
5018 	if (cp->privacy) {
5019 		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
5020 		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
5021 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
5022 		hci_adv_instances_set_rpa_expired(hdev, true);
5023 		if (cp->privacy == 0x02)
5024 			hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
5025 		else
5026 			hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
5027 	} else {
5028 		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
5029 		memset(hdev->irk, 0, sizeof(hdev->irk));
5030 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
5031 		hci_adv_instances_set_rpa_expired(hdev, false);
5032 		hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
5033 	}
5034 
5035 	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
5036 	if (err < 0)
5037 		goto unlock;
5038 
5039 	if (changed)
5040 		err = new_settings(hdev, sk);
5041 
5042 unlock:
5043 	hci_dev_unlock(hdev);
5044 	return err;
5045 }
5046 
5047 static bool irk_is_valid(struct mgmt_irk_info *irk)
5048 {
5049 	switch (irk->addr.type) {
5050 	case BDADDR_LE_PUBLIC:
5051 		return true;
5052 
5053 	case BDADDR_LE_RANDOM:
5054 		/* Two most significant bits shall be set */
5055 		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5056 			return false;
5057 		return true;
5058 	}
5059 
5060 	return false;
5061 }
5062 
5063 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5064 		     u16 len)
5065 {
5066 	struct mgmt_cp_load_irks *cp = cp_data;
5067 	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
5068 				   sizeof(struct mgmt_irk_info));
5069 	u16 irk_count, expected_len;
5070 	int i, err;
5071 
5072 	BT_DBG("request for %s", hdev->name);
5073 
5074 	if (!lmp_le_capable(hdev))
5075 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5076 				       MGMT_STATUS_NOT_SUPPORTED);
5077 
5078 	irk_count = __le16_to_cpu(cp->irk_count);
5079 	if (irk_count > max_irk_count) {
5080 		bt_dev_err(hdev, "load_irks: too big irk_count value %u",
5081 			   irk_count);
5082 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5083 				       MGMT_STATUS_INVALID_PARAMS);
5084 	}
5085 
5086 	expected_len = struct_size(cp, irks, irk_count);
5087 	if (expected_len != len) {
5088 		bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
5089 			   expected_len, len);
5090 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5091 				       MGMT_STATUS_INVALID_PARAMS);
5092 	}
5093 
5094 	BT_DBG("%s irk_count %u", hdev->name, irk_count);
5095 
5096 	for (i = 0; i < irk_count; i++) {
5097 		struct mgmt_irk_info *key = &cp->irks[i];
5098 
5099 		if (!irk_is_valid(key))
5100 			return mgmt_cmd_status(sk, hdev->id,
5101 					       MGMT_OP_LOAD_IRKS,
5102 					       MGMT_STATUS_INVALID_PARAMS);
5103 	}
5104 
5105 	hci_dev_lock(hdev);
5106 
5107 	hci_smp_irks_clear(hdev);
5108 
5109 	for (i = 0; i < irk_count; i++) {
5110 		struct mgmt_irk_info *irk = &cp->irks[i];
5111 
5112 		if (hci_is_blocked_key(hdev,
5113 				       HCI_BLOCKED_KEY_TYPE_IRK,
5114 				       irk->val)) {
5115 			bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
5116 				    &irk->addr.bdaddr);
5117 			continue;
5118 		}
5119 
5120 		hci_add_irk(hdev, &irk->addr.bdaddr,
5121 			    le_addr_type(irk->addr.type), irk->val,
5122 			    BDADDR_ANY);
5123 	}
5124 
5125 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5126 
5127 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5128 
5129 	hci_dev_unlock(hdev);
5130 
5131 	return err;
5132 }
5133 
5134 static bool ltk_is_valid(struct mgmt_ltk_info *key)
5135 {
5136 	if (key->master != 0x00 && key->master != 0x01)
5137 		return false;
5138 
5139 	switch (key->addr.type) {
5140 	case BDADDR_LE_PUBLIC:
5141 		return true;
5142 
5143 	case BDADDR_LE_RANDOM:
5144 		/* Two most significant bits shall be set */
5145 		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5146 			return false;
5147 		return true;
5148 	}
5149 
5150 	return false;
5151 }
5152 
5153 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5154 			       void *cp_data, u16 len)
5155 {
5156 	struct mgmt_cp_load_long_term_keys *cp = cp_data;
5157 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5158 				   sizeof(struct mgmt_ltk_info));
5159 	u16 key_count, expected_len;
5160 	int i, err;
5161 
5162 	BT_DBG("request for %s", hdev->name);
5163 
5164 	if (!lmp_le_capable(hdev))
5165 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5166 				       MGMT_STATUS_NOT_SUPPORTED);
5167 
5168 	key_count = __le16_to_cpu(cp->key_count);
5169 	if (key_count > max_key_count) {
5170 		bt_dev_err(hdev, "load_ltks: too big key_count value %u",
5171 			   key_count);
5172 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5173 				       MGMT_STATUS_INVALID_PARAMS);
5174 	}
5175 
5176 	expected_len = struct_size(cp, keys, key_count);
5177 	if (expected_len != len) {
5178 		bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
5179 			   expected_len, len);
5180 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5181 				       MGMT_STATUS_INVALID_PARAMS);
5182 	}
5183 
5184 	BT_DBG("%s key_count %u", hdev->name, key_count);
5185 
5186 	for (i = 0; i < key_count; i++) {
5187 		struct mgmt_ltk_info *key = &cp->keys[i];
5188 
5189 		if (!ltk_is_valid(key))
5190 			return mgmt_cmd_status(sk, hdev->id,
5191 					       MGMT_OP_LOAD_LONG_TERM_KEYS,
5192 					       MGMT_STATUS_INVALID_PARAMS);
5193 	}
5194 
5195 	hci_dev_lock(hdev);
5196 
5197 	hci_smp_ltks_clear(hdev);
5198 
5199 	for (i = 0; i < key_count; i++) {
5200 		struct mgmt_ltk_info *key = &cp->keys[i];
5201 		u8 type, authenticated;
5202 
5203 		if (hci_is_blocked_key(hdev,
5204 				       HCI_BLOCKED_KEY_TYPE_LTK,
5205 				       key->val)) {
5206 			bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
5207 				    &key->addr.bdaddr);
5208 			continue;
5209 		}
5210 
5211 		switch (key->type) {
5212 		case MGMT_LTK_UNAUTHENTICATED:
5213 			authenticated = 0x00;
5214 			type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5215 			break;
5216 		case MGMT_LTK_AUTHENTICATED:
5217 			authenticated = 0x01;
5218 			type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5219 			break;
5220 		case MGMT_LTK_P256_UNAUTH:
5221 			authenticated = 0x00;
5222 			type = SMP_LTK_P256;
5223 			break;
5224 		case MGMT_LTK_P256_AUTH:
5225 			authenticated = 0x01;
5226 			type = SMP_LTK_P256;
5227 			break;
5228 		case MGMT_LTK_P256_DEBUG:
5229 			authenticated = 0x00;
5230 			type = SMP_LTK_P256_DEBUG;
5231 			/* fall through */
5232 		default:
5233 			continue;
5234 		}
5235 
5236 		hci_add_ltk(hdev, &key->addr.bdaddr,
5237 			    le_addr_type(key->addr.type), type, authenticated,
5238 			    key->val, key->enc_size, key->ediv, key->rand);
5239 	}
5240 
5241 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5242 			   NULL, 0);
5243 
5244 	hci_dev_unlock(hdev);
5245 
5246 	return err;
5247 }
5248 
5249 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5250 {
5251 	struct hci_conn *conn = cmd->user_data;
5252 	struct mgmt_rp_get_conn_info rp;
5253 	int err;
5254 
5255 	memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5256 
5257 	if (status == MGMT_STATUS_SUCCESS) {
5258 		rp.rssi = conn->rssi;
5259 		rp.tx_power = conn->tx_power;
5260 		rp.max_tx_power = conn->max_tx_power;
5261 	} else {
5262 		rp.rssi = HCI_RSSI_INVALID;
5263 		rp.tx_power = HCI_TX_POWER_INVALID;
5264 		rp.max_tx_power = HCI_TX_POWER_INVALID;
5265 	}
5266 
5267 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
5268 				status, &rp, sizeof(rp));
5269 
5270 	hci_conn_drop(conn);
5271 	hci_conn_put(conn);
5272 
5273 	return err;
5274 }
5275 
5276 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
5277 				       u16 opcode)
5278 {
5279 	struct hci_cp_read_rssi *cp;
5280 	struct mgmt_pending_cmd *cmd;
5281 	struct hci_conn *conn;
5282 	u16 handle;
5283 	u8 status;
5284 
5285 	BT_DBG("status 0x%02x", hci_status);
5286 
5287 	hci_dev_lock(hdev);
5288 
5289 	/* Commands sent in request are either Read RSSI or Read Transmit Power
5290 	 * Level so we check which one was last sent to retrieve connection
5291 	 * handle.  Both commands have handle as first parameter so it's safe to
5292 	 * cast data on the same command struct.
5293 	 *
5294 	 * First command sent is always Read RSSI and we fail only if it fails.
5295 	 * In other case we simply override error to indicate success as we
5296 	 * already remembered if TX power value is actually valid.
5297 	 */
5298 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
5299 	if (!cp) {
5300 		cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5301 		status = MGMT_STATUS_SUCCESS;
5302 	} else {
5303 		status = mgmt_status(hci_status);
5304 	}
5305 
5306 	if (!cp) {
5307 		bt_dev_err(hdev, "invalid sent_cmd in conn_info response");
5308 		goto unlock;
5309 	}
5310 
5311 	handle = __le16_to_cpu(cp->handle);
5312 	conn = hci_conn_hash_lookup_handle(hdev, handle);
5313 	if (!conn) {
5314 		bt_dev_err(hdev, "unknown handle (%d) in conn_info response",
5315 			   handle);
5316 		goto unlock;
5317 	}
5318 
5319 	cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
5320 	if (!cmd)
5321 		goto unlock;
5322 
5323 	cmd->cmd_complete(cmd, status);
5324 	mgmt_pending_remove(cmd);
5325 
5326 unlock:
5327 	hci_dev_unlock(hdev);
5328 }
5329 
5330 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5331 			 u16 len)
5332 {
5333 	struct mgmt_cp_get_conn_info *cp = data;
5334 	struct mgmt_rp_get_conn_info rp;
5335 	struct hci_conn *conn;
5336 	unsigned long conn_info_age;
5337 	int err = 0;
5338 
5339 	BT_DBG("%s", hdev->name);
5340 
5341 	memset(&rp, 0, sizeof(rp));
5342 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5343 	rp.addr.type = cp->addr.type;
5344 
5345 	if (!bdaddr_type_is_valid(cp->addr.type))
5346 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5347 					 MGMT_STATUS_INVALID_PARAMS,
5348 					 &rp, sizeof(rp));
5349 
5350 	hci_dev_lock(hdev);
5351 
5352 	if (!hdev_is_powered(hdev)) {
5353 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5354 					MGMT_STATUS_NOT_POWERED, &rp,
5355 					sizeof(rp));
5356 		goto unlock;
5357 	}
5358 
5359 	if (cp->addr.type == BDADDR_BREDR)
5360 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5361 					       &cp->addr.bdaddr);
5362 	else
5363 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5364 
5365 	if (!conn || conn->state != BT_CONNECTED) {
5366 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5367 					MGMT_STATUS_NOT_CONNECTED, &rp,
5368 					sizeof(rp));
5369 		goto unlock;
5370 	}
5371 
5372 	if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
5373 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5374 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
5375 		goto unlock;
5376 	}
5377 
5378 	/* To avoid client trying to guess when to poll again for information we
5379 	 * calculate conn info age as random value between min/max set in hdev.
5380 	 */
5381 	conn_info_age = hdev->conn_info_min_age +
5382 			prandom_u32_max(hdev->conn_info_max_age -
5383 					hdev->conn_info_min_age);
5384 
5385 	/* Query controller to refresh cached values if they are too old or were
5386 	 * never read.
5387 	 */
5388 	if (time_after(jiffies, conn->conn_info_timestamp +
5389 		       msecs_to_jiffies(conn_info_age)) ||
5390 	    !conn->conn_info_timestamp) {
5391 		struct hci_request req;
5392 		struct hci_cp_read_tx_power req_txp_cp;
5393 		struct hci_cp_read_rssi req_rssi_cp;
5394 		struct mgmt_pending_cmd *cmd;
5395 
5396 		hci_req_init(&req, hdev);
5397 		req_rssi_cp.handle = cpu_to_le16(conn->handle);
5398 		hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5399 			    &req_rssi_cp);
5400 
5401 		/* For LE links TX power does not change thus we don't need to
5402 		 * query for it once value is known.
5403 		 */
5404 		if (!bdaddr_type_is_le(cp->addr.type) ||
5405 		    conn->tx_power == HCI_TX_POWER_INVALID) {
5406 			req_txp_cp.handle = cpu_to_le16(conn->handle);
5407 			req_txp_cp.type = 0x00;
5408 			hci_req_add(&req, HCI_OP_READ_TX_POWER,
5409 				    sizeof(req_txp_cp), &req_txp_cp);
5410 		}
5411 
5412 		/* Max TX power needs to be read only once per connection */
5413 		if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5414 			req_txp_cp.handle = cpu_to_le16(conn->handle);
5415 			req_txp_cp.type = 0x01;
5416 			hci_req_add(&req, HCI_OP_READ_TX_POWER,
5417 				    sizeof(req_txp_cp), &req_txp_cp);
5418 		}
5419 
5420 		err = hci_req_run(&req, conn_info_refresh_complete);
5421 		if (err < 0)
5422 			goto unlock;
5423 
5424 		cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5425 				       data, len);
5426 		if (!cmd) {
5427 			err = -ENOMEM;
5428 			goto unlock;
5429 		}
5430 
5431 		hci_conn_hold(conn);
5432 		cmd->user_data = hci_conn_get(conn);
5433 		cmd->cmd_complete = conn_info_cmd_complete;
5434 
5435 		conn->conn_info_timestamp = jiffies;
5436 	} else {
5437 		/* Cache is valid, just reply with values cached in hci_conn */
5438 		rp.rssi = conn->rssi;
5439 		rp.tx_power = conn->tx_power;
5440 		rp.max_tx_power = conn->max_tx_power;
5441 
5442 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5443 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5444 	}
5445 
5446 unlock:
5447 	hci_dev_unlock(hdev);
5448 	return err;
5449 }
5450 
5451 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5452 {
5453 	struct hci_conn *conn = cmd->user_data;
5454 	struct mgmt_rp_get_clock_info rp;
5455 	struct hci_dev *hdev;
5456 	int err;
5457 
5458 	memset(&rp, 0, sizeof(rp));
5459 	memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5460 
5461 	if (status)
5462 		goto complete;
5463 
5464 	hdev = hci_dev_get(cmd->index);
5465 	if (hdev) {
5466 		rp.local_clock = cpu_to_le32(hdev->clock);
5467 		hci_dev_put(hdev);
5468 	}
5469 
5470 	if (conn) {
5471 		rp.piconet_clock = cpu_to_le32(conn->clock);
5472 		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5473 	}
5474 
5475 complete:
5476 	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
5477 				sizeof(rp));
5478 
5479 	if (conn) {
5480 		hci_conn_drop(conn);
5481 		hci_conn_put(conn);
5482 	}
5483 
5484 	return err;
5485 }
5486 
5487 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5488 {
5489 	struct hci_cp_read_clock *hci_cp;
5490 	struct mgmt_pending_cmd *cmd;
5491 	struct hci_conn *conn;
5492 
5493 	BT_DBG("%s status %u", hdev->name, status);
5494 
5495 	hci_dev_lock(hdev);
5496 
5497 	hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5498 	if (!hci_cp)
5499 		goto unlock;
5500 
5501 	if (hci_cp->which) {
5502 		u16 handle = __le16_to_cpu(hci_cp->handle);
5503 		conn = hci_conn_hash_lookup_handle(hdev, handle);
5504 	} else {
5505 		conn = NULL;
5506 	}
5507 
5508 	cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5509 	if (!cmd)
5510 		goto unlock;
5511 
5512 	cmd->cmd_complete(cmd, mgmt_status(status));
5513 	mgmt_pending_remove(cmd);
5514 
5515 unlock:
5516 	hci_dev_unlock(hdev);
5517 }
5518 
5519 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5520 			 u16 len)
5521 {
5522 	struct mgmt_cp_get_clock_info *cp = data;
5523 	struct mgmt_rp_get_clock_info rp;
5524 	struct hci_cp_read_clock hci_cp;
5525 	struct mgmt_pending_cmd *cmd;
5526 	struct hci_request req;
5527 	struct hci_conn *conn;
5528 	int err;
5529 
5530 	BT_DBG("%s", hdev->name);
5531 
5532 	memset(&rp, 0, sizeof(rp));
5533 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5534 	rp.addr.type = cp->addr.type;
5535 
5536 	if (cp->addr.type != BDADDR_BREDR)
5537 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5538 					 MGMT_STATUS_INVALID_PARAMS,
5539 					 &rp, sizeof(rp));
5540 
5541 	hci_dev_lock(hdev);
5542 
5543 	if (!hdev_is_powered(hdev)) {
5544 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5545 					MGMT_STATUS_NOT_POWERED, &rp,
5546 					sizeof(rp));
5547 		goto unlock;
5548 	}
5549 
5550 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5551 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5552 					       &cp->addr.bdaddr);
5553 		if (!conn || conn->state != BT_CONNECTED) {
5554 			err = mgmt_cmd_complete(sk, hdev->id,
5555 						MGMT_OP_GET_CLOCK_INFO,
5556 						MGMT_STATUS_NOT_CONNECTED,
5557 						&rp, sizeof(rp));
5558 			goto unlock;
5559 		}
5560 	} else {
5561 		conn = NULL;
5562 	}
5563 
5564 	cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5565 	if (!cmd) {
5566 		err = -ENOMEM;
5567 		goto unlock;
5568 	}
5569 
5570 	cmd->cmd_complete = clock_info_cmd_complete;
5571 
5572 	hci_req_init(&req, hdev);
5573 
5574 	memset(&hci_cp, 0, sizeof(hci_cp));
5575 	hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5576 
5577 	if (conn) {
5578 		hci_conn_hold(conn);
5579 		cmd->user_data = hci_conn_get(conn);
5580 
5581 		hci_cp.handle = cpu_to_le16(conn->handle);
5582 		hci_cp.which = 0x01; /* Piconet clock */
5583 		hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5584 	}
5585 
5586 	err = hci_req_run(&req, get_clock_info_complete);
5587 	if (err < 0)
5588 		mgmt_pending_remove(cmd);
5589 
5590 unlock:
5591 	hci_dev_unlock(hdev);
5592 	return err;
5593 }
5594 
5595 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
5596 {
5597 	struct hci_conn *conn;
5598 
5599 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
5600 	if (!conn)
5601 		return false;
5602 
5603 	if (conn->dst_type != type)
5604 		return false;
5605 
5606 	if (conn->state != BT_CONNECTED)
5607 		return false;
5608 
5609 	return true;
5610 }
5611 
5612 /* This function requires the caller holds hdev->lock */
5613 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
5614 			       u8 addr_type, u8 auto_connect)
5615 {
5616 	struct hci_conn_params *params;
5617 
5618 	params = hci_conn_params_add(hdev, addr, addr_type);
5619 	if (!params)
5620 		return -EIO;
5621 
5622 	if (params->auto_connect == auto_connect)
5623 		return 0;
5624 
5625 	list_del_init(&params->action);
5626 
5627 	switch (auto_connect) {
5628 	case HCI_AUTO_CONN_DISABLED:
5629 	case HCI_AUTO_CONN_LINK_LOSS:
5630 		/* If auto connect is being disabled when we're trying to
5631 		 * connect to device, keep connecting.
5632 		 */
5633 		if (params->explicit_connect)
5634 			list_add(&params->action, &hdev->pend_le_conns);
5635 		break;
5636 	case HCI_AUTO_CONN_REPORT:
5637 		if (params->explicit_connect)
5638 			list_add(&params->action, &hdev->pend_le_conns);
5639 		else
5640 			list_add(&params->action, &hdev->pend_le_reports);
5641 		break;
5642 	case HCI_AUTO_CONN_DIRECT:
5643 	case HCI_AUTO_CONN_ALWAYS:
5644 		if (!is_connected(hdev, addr, addr_type))
5645 			list_add(&params->action, &hdev->pend_le_conns);
5646 		break;
5647 	}
5648 
5649 	params->auto_connect = auto_connect;
5650 
5651 	BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
5652 	       auto_connect);
5653 
5654 	return 0;
5655 }
5656 
5657 static void device_added(struct sock *sk, struct hci_dev *hdev,
5658 			 bdaddr_t *bdaddr, u8 type, u8 action)
5659 {
5660 	struct mgmt_ev_device_added ev;
5661 
5662 	bacpy(&ev.addr.bdaddr, bdaddr);
5663 	ev.addr.type = type;
5664 	ev.action = action;
5665 
5666 	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5667 }
5668 
5669 static int add_device(struct sock *sk, struct hci_dev *hdev,
5670 		      void *data, u16 len)
5671 {
5672 	struct mgmt_cp_add_device *cp = data;
5673 	u8 auto_conn, addr_type;
5674 	int err;
5675 
5676 	BT_DBG("%s", hdev->name);
5677 
5678 	if (!bdaddr_type_is_valid(cp->addr.type) ||
5679 	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5680 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5681 					 MGMT_STATUS_INVALID_PARAMS,
5682 					 &cp->addr, sizeof(cp->addr));
5683 
5684 	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5685 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5686 					 MGMT_STATUS_INVALID_PARAMS,
5687 					 &cp->addr, sizeof(cp->addr));
5688 
5689 	hci_dev_lock(hdev);
5690 
5691 	if (cp->addr.type == BDADDR_BREDR) {
5692 		/* Only incoming connections action is supported for now */
5693 		if (cp->action != 0x01) {
5694 			err = mgmt_cmd_complete(sk, hdev->id,
5695 						MGMT_OP_ADD_DEVICE,
5696 						MGMT_STATUS_INVALID_PARAMS,
5697 						&cp->addr, sizeof(cp->addr));
5698 			goto unlock;
5699 		}
5700 
5701 		err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5702 					  cp->addr.type);
5703 		if (err)
5704 			goto unlock;
5705 
5706 		hci_req_update_scan(hdev);
5707 
5708 		goto added;
5709 	}
5710 
5711 	addr_type = le_addr_type(cp->addr.type);
5712 
5713 	if (cp->action == 0x02)
5714 		auto_conn = HCI_AUTO_CONN_ALWAYS;
5715 	else if (cp->action == 0x01)
5716 		auto_conn = HCI_AUTO_CONN_DIRECT;
5717 	else
5718 		auto_conn = HCI_AUTO_CONN_REPORT;
5719 
5720 	/* Kernel internally uses conn_params with resolvable private
5721 	 * address, but Add Device allows only identity addresses.
5722 	 * Make sure it is enforced before calling
5723 	 * hci_conn_params_lookup.
5724 	 */
5725 	if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
5726 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5727 					MGMT_STATUS_INVALID_PARAMS,
5728 					&cp->addr, sizeof(cp->addr));
5729 		goto unlock;
5730 	}
5731 
5732 	/* If the connection parameters don't exist for this device,
5733 	 * they will be created and configured with defaults.
5734 	 */
5735 	if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
5736 				auto_conn) < 0) {
5737 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5738 					MGMT_STATUS_FAILED, &cp->addr,
5739 					sizeof(cp->addr));
5740 		goto unlock;
5741 	}
5742 
5743 	hci_update_background_scan(hdev);
5744 
5745 added:
5746 	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5747 
5748 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5749 				MGMT_STATUS_SUCCESS, &cp->addr,
5750 				sizeof(cp->addr));
5751 
5752 unlock:
5753 	hci_dev_unlock(hdev);
5754 	return err;
5755 }
5756 
5757 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5758 			   bdaddr_t *bdaddr, u8 type)
5759 {
5760 	struct mgmt_ev_device_removed ev;
5761 
5762 	bacpy(&ev.addr.bdaddr, bdaddr);
5763 	ev.addr.type = type;
5764 
5765 	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5766 }
5767 
5768 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5769 			 void *data, u16 len)
5770 {
5771 	struct mgmt_cp_remove_device *cp = data;
5772 	int err;
5773 
5774 	BT_DBG("%s", hdev->name);
5775 
5776 	hci_dev_lock(hdev);
5777 
5778 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5779 		struct hci_conn_params *params;
5780 		u8 addr_type;
5781 
5782 		if (!bdaddr_type_is_valid(cp->addr.type)) {
5783 			err = mgmt_cmd_complete(sk, hdev->id,
5784 						MGMT_OP_REMOVE_DEVICE,
5785 						MGMT_STATUS_INVALID_PARAMS,
5786 						&cp->addr, sizeof(cp->addr));
5787 			goto unlock;
5788 		}
5789 
5790 		if (cp->addr.type == BDADDR_BREDR) {
5791 			err = hci_bdaddr_list_del(&hdev->whitelist,
5792 						  &cp->addr.bdaddr,
5793 						  cp->addr.type);
5794 			if (err) {
5795 				err = mgmt_cmd_complete(sk, hdev->id,
5796 							MGMT_OP_REMOVE_DEVICE,
5797 							MGMT_STATUS_INVALID_PARAMS,
5798 							&cp->addr,
5799 							sizeof(cp->addr));
5800 				goto unlock;
5801 			}
5802 
5803 			hci_req_update_scan(hdev);
5804 
5805 			device_removed(sk, hdev, &cp->addr.bdaddr,
5806 				       cp->addr.type);
5807 			goto complete;
5808 		}
5809 
5810 		addr_type = le_addr_type(cp->addr.type);
5811 
5812 		/* Kernel internally uses conn_params with resolvable private
5813 		 * address, but Remove Device allows only identity addresses.
5814 		 * Make sure it is enforced before calling
5815 		 * hci_conn_params_lookup.
5816 		 */
5817 		if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
5818 			err = mgmt_cmd_complete(sk, hdev->id,
5819 						MGMT_OP_REMOVE_DEVICE,
5820 						MGMT_STATUS_INVALID_PARAMS,
5821 						&cp->addr, sizeof(cp->addr));
5822 			goto unlock;
5823 		}
5824 
5825 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5826 						addr_type);
5827 		if (!params) {
5828 			err = mgmt_cmd_complete(sk, hdev->id,
5829 						MGMT_OP_REMOVE_DEVICE,
5830 						MGMT_STATUS_INVALID_PARAMS,
5831 						&cp->addr, sizeof(cp->addr));
5832 			goto unlock;
5833 		}
5834 
5835 		if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
5836 		    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
5837 			err = mgmt_cmd_complete(sk, hdev->id,
5838 						MGMT_OP_REMOVE_DEVICE,
5839 						MGMT_STATUS_INVALID_PARAMS,
5840 						&cp->addr, sizeof(cp->addr));
5841 			goto unlock;
5842 		}
5843 
5844 		list_del(&params->action);
5845 		list_del(&params->list);
5846 		kfree(params);
5847 		hci_update_background_scan(hdev);
5848 
5849 		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5850 	} else {
5851 		struct hci_conn_params *p, *tmp;
5852 		struct bdaddr_list *b, *btmp;
5853 
5854 		if (cp->addr.type) {
5855 			err = mgmt_cmd_complete(sk, hdev->id,
5856 						MGMT_OP_REMOVE_DEVICE,
5857 						MGMT_STATUS_INVALID_PARAMS,
5858 						&cp->addr, sizeof(cp->addr));
5859 			goto unlock;
5860 		}
5861 
5862 		list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
5863 			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
5864 			list_del(&b->list);
5865 			kfree(b);
5866 		}
5867 
5868 		hci_req_update_scan(hdev);
5869 
5870 		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
5871 			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
5872 				continue;
5873 			device_removed(sk, hdev, &p->addr, p->addr_type);
5874 			if (p->explicit_connect) {
5875 				p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
5876 				continue;
5877 			}
5878 			list_del(&p->action);
5879 			list_del(&p->list);
5880 			kfree(p);
5881 		}
5882 
5883 		BT_DBG("All LE connection parameters were removed");
5884 
5885 		hci_update_background_scan(hdev);
5886 	}
5887 
5888 complete:
5889 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5890 				MGMT_STATUS_SUCCESS, &cp->addr,
5891 				sizeof(cp->addr));
5892 unlock:
5893 	hci_dev_unlock(hdev);
5894 	return err;
5895 }
5896 
5897 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5898 			   u16 len)
5899 {
5900 	struct mgmt_cp_load_conn_param *cp = data;
5901 	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5902 				     sizeof(struct mgmt_conn_param));
5903 	u16 param_count, expected_len;
5904 	int i;
5905 
5906 	if (!lmp_le_capable(hdev))
5907 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5908 				       MGMT_STATUS_NOT_SUPPORTED);
5909 
5910 	param_count = __le16_to_cpu(cp->param_count);
5911 	if (param_count > max_param_count) {
5912 		bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
5913 			   param_count);
5914 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5915 				       MGMT_STATUS_INVALID_PARAMS);
5916 	}
5917 
5918 	expected_len = struct_size(cp, params, param_count);
5919 	if (expected_len != len) {
5920 		bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
5921 			   expected_len, len);
5922 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5923 				       MGMT_STATUS_INVALID_PARAMS);
5924 	}
5925 
5926 	BT_DBG("%s param_count %u", hdev->name, param_count);
5927 
5928 	hci_dev_lock(hdev);
5929 
5930 	hci_conn_params_clear_disabled(hdev);
5931 
5932 	for (i = 0; i < param_count; i++) {
5933 		struct mgmt_conn_param *param = &cp->params[i];
5934 		struct hci_conn_params *hci_param;
5935 		u16 min, max, latency, timeout;
5936 		u8 addr_type;
5937 
5938 		BT_DBG("Adding %pMR (type %u)", &param->addr.bdaddr,
5939 		       param->addr.type);
5940 
5941 		if (param->addr.type == BDADDR_LE_PUBLIC) {
5942 			addr_type = ADDR_LE_DEV_PUBLIC;
5943 		} else if (param->addr.type == BDADDR_LE_RANDOM) {
5944 			addr_type = ADDR_LE_DEV_RANDOM;
5945 		} else {
5946 			bt_dev_err(hdev, "ignoring invalid connection parameters");
5947 			continue;
5948 		}
5949 
5950 		min = le16_to_cpu(param->min_interval);
5951 		max = le16_to_cpu(param->max_interval);
5952 		latency = le16_to_cpu(param->latency);
5953 		timeout = le16_to_cpu(param->timeout);
5954 
5955 		BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5956 		       min, max, latency, timeout);
5957 
5958 		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
5959 			bt_dev_err(hdev, "ignoring invalid connection parameters");
5960 			continue;
5961 		}
5962 
5963 		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
5964 						addr_type);
5965 		if (!hci_param) {
5966 			bt_dev_err(hdev, "failed to add connection parameters");
5967 			continue;
5968 		}
5969 
5970 		hci_param->conn_min_interval = min;
5971 		hci_param->conn_max_interval = max;
5972 		hci_param->conn_latency = latency;
5973 		hci_param->supervision_timeout = timeout;
5974 	}
5975 
5976 	hci_dev_unlock(hdev);
5977 
5978 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
5979 				 NULL, 0);
5980 }
5981 
5982 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
5983 			       void *data, u16 len)
5984 {
5985 	struct mgmt_cp_set_external_config *cp = data;
5986 	bool changed;
5987 	int err;
5988 
5989 	BT_DBG("%s", hdev->name);
5990 
5991 	if (hdev_is_powered(hdev))
5992 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5993 				       MGMT_STATUS_REJECTED);
5994 
5995 	if (cp->config != 0x00 && cp->config != 0x01)
5996 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5997 				         MGMT_STATUS_INVALID_PARAMS);
5998 
5999 	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
6000 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6001 				       MGMT_STATUS_NOT_SUPPORTED);
6002 
6003 	hci_dev_lock(hdev);
6004 
6005 	if (cp->config)
6006 		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
6007 	else
6008 		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
6009 
6010 	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
6011 	if (err < 0)
6012 		goto unlock;
6013 
6014 	if (!changed)
6015 		goto unlock;
6016 
6017 	err = new_options(hdev, sk);
6018 
6019 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
6020 		mgmt_index_removed(hdev);
6021 
6022 		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
6023 			hci_dev_set_flag(hdev, HCI_CONFIG);
6024 			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6025 
6026 			queue_work(hdev->req_workqueue, &hdev->power_on);
6027 		} else {
6028 			set_bit(HCI_RAW, &hdev->flags);
6029 			mgmt_index_added(hdev);
6030 		}
6031 	}
6032 
6033 unlock:
6034 	hci_dev_unlock(hdev);
6035 	return err;
6036 }
6037 
6038 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
6039 			      void *data, u16 len)
6040 {
6041 	struct mgmt_cp_set_public_address *cp = data;
6042 	bool changed;
6043 	int err;
6044 
6045 	BT_DBG("%s", hdev->name);
6046 
6047 	if (hdev_is_powered(hdev))
6048 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6049 				       MGMT_STATUS_REJECTED);
6050 
6051 	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
6052 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6053 				       MGMT_STATUS_INVALID_PARAMS);
6054 
6055 	if (!hdev->set_bdaddr)
6056 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6057 				       MGMT_STATUS_NOT_SUPPORTED);
6058 
6059 	hci_dev_lock(hdev);
6060 
6061 	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
6062 	bacpy(&hdev->public_addr, &cp->bdaddr);
6063 
6064 	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
6065 	if (err < 0)
6066 		goto unlock;
6067 
6068 	if (!changed)
6069 		goto unlock;
6070 
6071 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
6072 		err = new_options(hdev, sk);
6073 
6074 	if (is_configured(hdev)) {
6075 		mgmt_index_removed(hdev);
6076 
6077 		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
6078 
6079 		hci_dev_set_flag(hdev, HCI_CONFIG);
6080 		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6081 
6082 		queue_work(hdev->req_workqueue, &hdev->power_on);
6083 	}
6084 
6085 unlock:
6086 	hci_dev_unlock(hdev);
6087 	return err;
6088 }
6089 
6090 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
6091 					     u16 opcode, struct sk_buff *skb)
6092 {
6093 	const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
6094 	struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
6095 	u8 *h192, *r192, *h256, *r256;
6096 	struct mgmt_pending_cmd *cmd;
6097 	u16 eir_len;
6098 	int err;
6099 
6100 	BT_DBG("%s status %u", hdev->name, status);
6101 
6102 	cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
6103 	if (!cmd)
6104 		return;
6105 
6106 	mgmt_cp = cmd->param;
6107 
6108 	if (status) {
6109 		status = mgmt_status(status);
6110 		eir_len = 0;
6111 
6112 		h192 = NULL;
6113 		r192 = NULL;
6114 		h256 = NULL;
6115 		r256 = NULL;
6116 	} else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
6117 		struct hci_rp_read_local_oob_data *rp;
6118 
6119 		if (skb->len != sizeof(*rp)) {
6120 			status = MGMT_STATUS_FAILED;
6121 			eir_len = 0;
6122 		} else {
6123 			status = MGMT_STATUS_SUCCESS;
6124 			rp = (void *)skb->data;
6125 
6126 			eir_len = 5 + 18 + 18;
6127 			h192 = rp->hash;
6128 			r192 = rp->rand;
6129 			h256 = NULL;
6130 			r256 = NULL;
6131 		}
6132 	} else {
6133 		struct hci_rp_read_local_oob_ext_data *rp;
6134 
6135 		if (skb->len != sizeof(*rp)) {
6136 			status = MGMT_STATUS_FAILED;
6137 			eir_len = 0;
6138 		} else {
6139 			status = MGMT_STATUS_SUCCESS;
6140 			rp = (void *)skb->data;
6141 
6142 			if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6143 				eir_len = 5 + 18 + 18;
6144 				h192 = NULL;
6145 				r192 = NULL;
6146 			} else {
6147 				eir_len = 5 + 18 + 18 + 18 + 18;
6148 				h192 = rp->hash192;
6149 				r192 = rp->rand192;
6150 			}
6151 
6152 			h256 = rp->hash256;
6153 			r256 = rp->rand256;
6154 		}
6155 	}
6156 
6157 	mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
6158 	if (!mgmt_rp)
6159 		goto done;
6160 
6161 	if (status)
6162 		goto send_rsp;
6163 
6164 	eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
6165 				  hdev->dev_class, 3);
6166 
6167 	if (h192 && r192) {
6168 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6169 					  EIR_SSP_HASH_C192, h192, 16);
6170 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6171 					  EIR_SSP_RAND_R192, r192, 16);
6172 	}
6173 
6174 	if (h256 && r256) {
6175 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6176 					  EIR_SSP_HASH_C256, h256, 16);
6177 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6178 					  EIR_SSP_RAND_R256, r256, 16);
6179 	}
6180 
6181 send_rsp:
6182 	mgmt_rp->type = mgmt_cp->type;
6183 	mgmt_rp->eir_len = cpu_to_le16(eir_len);
6184 
6185 	err = mgmt_cmd_complete(cmd->sk, hdev->id,
6186 				MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
6187 				mgmt_rp, sizeof(*mgmt_rp) + eir_len);
6188 	if (err < 0 || status)
6189 		goto done;
6190 
6191 	hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
6192 
6193 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
6194 				 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
6195 				 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
6196 done:
6197 	kfree(mgmt_rp);
6198 	mgmt_pending_remove(cmd);
6199 }
6200 
6201 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
6202 				  struct mgmt_cp_read_local_oob_ext_data *cp)
6203 {
6204 	struct mgmt_pending_cmd *cmd;
6205 	struct hci_request req;
6206 	int err;
6207 
6208 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
6209 			       cp, sizeof(*cp));
6210 	if (!cmd)
6211 		return -ENOMEM;
6212 
6213 	hci_req_init(&req, hdev);
6214 
6215 	if (bredr_sc_enabled(hdev))
6216 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
6217 	else
6218 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
6219 
6220 	err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
6221 	if (err < 0) {
6222 		mgmt_pending_remove(cmd);
6223 		return err;
6224 	}
6225 
6226 	return 0;
6227 }
6228 
6229 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
6230 				   void *data, u16 data_len)
6231 {
6232 	struct mgmt_cp_read_local_oob_ext_data *cp = data;
6233 	struct mgmt_rp_read_local_oob_ext_data *rp;
6234 	size_t rp_len;
6235 	u16 eir_len;
6236 	u8 status, flags, role, addr[7], hash[16], rand[16];
6237 	int err;
6238 
6239 	BT_DBG("%s", hdev->name);
6240 
6241 	if (hdev_is_powered(hdev)) {
6242 		switch (cp->type) {
6243 		case BIT(BDADDR_BREDR):
6244 			status = mgmt_bredr_support(hdev);
6245 			if (status)
6246 				eir_len = 0;
6247 			else
6248 				eir_len = 5;
6249 			break;
6250 		case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6251 			status = mgmt_le_support(hdev);
6252 			if (status)
6253 				eir_len = 0;
6254 			else
6255 				eir_len = 9 + 3 + 18 + 18 + 3;
6256 			break;
6257 		default:
6258 			status = MGMT_STATUS_INVALID_PARAMS;
6259 			eir_len = 0;
6260 			break;
6261 		}
6262 	} else {
6263 		status = MGMT_STATUS_NOT_POWERED;
6264 		eir_len = 0;
6265 	}
6266 
6267 	rp_len = sizeof(*rp) + eir_len;
6268 	rp = kmalloc(rp_len, GFP_ATOMIC);
6269 	if (!rp)
6270 		return -ENOMEM;
6271 
6272 	if (status)
6273 		goto complete;
6274 
6275 	hci_dev_lock(hdev);
6276 
6277 	eir_len = 0;
6278 	switch (cp->type) {
6279 	case BIT(BDADDR_BREDR):
6280 		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6281 			err = read_local_ssp_oob_req(hdev, sk, cp);
6282 			hci_dev_unlock(hdev);
6283 			if (!err)
6284 				goto done;
6285 
6286 			status = MGMT_STATUS_FAILED;
6287 			goto complete;
6288 		} else {
6289 			eir_len = eir_append_data(rp->eir, eir_len,
6290 						  EIR_CLASS_OF_DEV,
6291 						  hdev->dev_class, 3);
6292 		}
6293 		break;
6294 	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6295 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6296 		    smp_generate_oob(hdev, hash, rand) < 0) {
6297 			hci_dev_unlock(hdev);
6298 			status = MGMT_STATUS_FAILED;
6299 			goto complete;
6300 		}
6301 
6302 		/* This should return the active RPA, but since the RPA
6303 		 * is only programmed on demand, it is really hard to fill
6304 		 * this in at the moment. For now disallow retrieving
6305 		 * local out-of-band data when privacy is in use.
6306 		 *
6307 		 * Returning the identity address will not help here since
6308 		 * pairing happens before the identity resolving key is
6309 		 * known and thus the connection establishment happens
6310 		 * based on the RPA and not the identity address.
6311 		 */
6312 		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
6313 			hci_dev_unlock(hdev);
6314 			status = MGMT_STATUS_REJECTED;
6315 			goto complete;
6316 		}
6317 
6318 		if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
6319 		   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
6320 		   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6321 		    bacmp(&hdev->static_addr, BDADDR_ANY))) {
6322 			memcpy(addr, &hdev->static_addr, 6);
6323 			addr[6] = 0x01;
6324 		} else {
6325 			memcpy(addr, &hdev->bdaddr, 6);
6326 			addr[6] = 0x00;
6327 		}
6328 
6329 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
6330 					  addr, sizeof(addr));
6331 
6332 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
6333 			role = 0x02;
6334 		else
6335 			role = 0x01;
6336 
6337 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
6338 					  &role, sizeof(role));
6339 
6340 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
6341 			eir_len = eir_append_data(rp->eir, eir_len,
6342 						  EIR_LE_SC_CONFIRM,
6343 						  hash, sizeof(hash));
6344 
6345 			eir_len = eir_append_data(rp->eir, eir_len,
6346 						  EIR_LE_SC_RANDOM,
6347 						  rand, sizeof(rand));
6348 		}
6349 
6350 		flags = mgmt_get_adv_discov_flags(hdev);
6351 
6352 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
6353 			flags |= LE_AD_NO_BREDR;
6354 
6355 		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
6356 					  &flags, sizeof(flags));
6357 		break;
6358 	}
6359 
6360 	hci_dev_unlock(hdev);
6361 
6362 	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
6363 
6364 	status = MGMT_STATUS_SUCCESS;
6365 
6366 complete:
6367 	rp->type = cp->type;
6368 	rp->eir_len = cpu_to_le16(eir_len);
6369 
6370 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6371 				status, rp, sizeof(*rp) + eir_len);
6372 	if (err < 0 || status)
6373 		goto done;
6374 
6375 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
6376 				 rp, sizeof(*rp) + eir_len,
6377 				 HCI_MGMT_OOB_DATA_EVENTS, sk);
6378 
6379 done:
6380 	kfree(rp);
6381 
6382 	return err;
6383 }
6384 
6385 static u32 get_supported_adv_flags(struct hci_dev *hdev)
6386 {
6387 	u32 flags = 0;
6388 
6389 	flags |= MGMT_ADV_FLAG_CONNECTABLE;
6390 	flags |= MGMT_ADV_FLAG_DISCOV;
6391 	flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
6392 	flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
6393 	flags |= MGMT_ADV_FLAG_APPEARANCE;
6394 	flags |= MGMT_ADV_FLAG_LOCAL_NAME;
6395 
6396 	/* In extended adv TX_POWER returned from Set Adv Param
6397 	 * will be always valid.
6398 	 */
6399 	if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
6400 	    ext_adv_capable(hdev))
6401 		flags |= MGMT_ADV_FLAG_TX_POWER;
6402 
6403 	if (ext_adv_capable(hdev)) {
6404 		flags |= MGMT_ADV_FLAG_SEC_1M;
6405 
6406 		if (hdev->le_features[1] & HCI_LE_PHY_2M)
6407 			flags |= MGMT_ADV_FLAG_SEC_2M;
6408 
6409 		if (hdev->le_features[1] & HCI_LE_PHY_CODED)
6410 			flags |= MGMT_ADV_FLAG_SEC_CODED;
6411 	}
6412 
6413 	return flags;
6414 }
6415 
6416 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
6417 			     void *data, u16 data_len)
6418 {
6419 	struct mgmt_rp_read_adv_features *rp;
6420 	size_t rp_len;
6421 	int err;
6422 	struct adv_info *adv_instance;
6423 	u32 supported_flags;
6424 	u8 *instance;
6425 
6426 	BT_DBG("%s", hdev->name);
6427 
6428 	if (!lmp_le_capable(hdev))
6429 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
6430 				       MGMT_STATUS_REJECTED);
6431 
6432 	hci_dev_lock(hdev);
6433 
6434 	rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
6435 	rp = kmalloc(rp_len, GFP_ATOMIC);
6436 	if (!rp) {
6437 		hci_dev_unlock(hdev);
6438 		return -ENOMEM;
6439 	}
6440 
6441 	supported_flags = get_supported_adv_flags(hdev);
6442 
6443 	rp->supported_flags = cpu_to_le32(supported_flags);
6444 	rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
6445 	rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
6446 	rp->max_instances = HCI_MAX_ADV_INSTANCES;
6447 	rp->num_instances = hdev->adv_instance_cnt;
6448 
6449 	instance = rp->instance;
6450 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
6451 		*instance = adv_instance->instance;
6452 		instance++;
6453 	}
6454 
6455 	hci_dev_unlock(hdev);
6456 
6457 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
6458 				MGMT_STATUS_SUCCESS, rp, rp_len);
6459 
6460 	kfree(rp);
6461 
6462 	return err;
6463 }
6464 
6465 static u8 calculate_name_len(struct hci_dev *hdev)
6466 {
6467 	u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
6468 
6469 	return append_local_name(hdev, buf, 0);
6470 }
6471 
6472 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
6473 			   bool is_adv_data)
6474 {
6475 	u8 max_len = HCI_MAX_AD_LENGTH;
6476 
6477 	if (is_adv_data) {
6478 		if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
6479 				 MGMT_ADV_FLAG_LIMITED_DISCOV |
6480 				 MGMT_ADV_FLAG_MANAGED_FLAGS))
6481 			max_len -= 3;
6482 
6483 		if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
6484 			max_len -= 3;
6485 	} else {
6486 		if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
6487 			max_len -= calculate_name_len(hdev);
6488 
6489 		if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
6490 			max_len -= 4;
6491 	}
6492 
6493 	return max_len;
6494 }
6495 
6496 static bool flags_managed(u32 adv_flags)
6497 {
6498 	return adv_flags & (MGMT_ADV_FLAG_DISCOV |
6499 			    MGMT_ADV_FLAG_LIMITED_DISCOV |
6500 			    MGMT_ADV_FLAG_MANAGED_FLAGS);
6501 }
6502 
6503 static bool tx_power_managed(u32 adv_flags)
6504 {
6505 	return adv_flags & MGMT_ADV_FLAG_TX_POWER;
6506 }
6507 
6508 static bool name_managed(u32 adv_flags)
6509 {
6510 	return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
6511 }
6512 
6513 static bool appearance_managed(u32 adv_flags)
6514 {
6515 	return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
6516 }
6517 
6518 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
6519 			      u8 len, bool is_adv_data)
6520 {
6521 	int i, cur_len;
6522 	u8 max_len;
6523 
6524 	max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
6525 
6526 	if (len > max_len)
6527 		return false;
6528 
6529 	/* Make sure that the data is correctly formatted. */
6530 	for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
6531 		cur_len = data[i];
6532 
6533 		if (data[i + 1] == EIR_FLAGS &&
6534 		    (!is_adv_data || flags_managed(adv_flags)))
6535 			return false;
6536 
6537 		if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
6538 			return false;
6539 
6540 		if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
6541 			return false;
6542 
6543 		if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
6544 			return false;
6545 
6546 		if (data[i + 1] == EIR_APPEARANCE &&
6547 		    appearance_managed(adv_flags))
6548 			return false;
6549 
6550 		/* If the current field length would exceed the total data
6551 		 * length, then it's invalid.
6552 		 */
6553 		if (i + cur_len >= len)
6554 			return false;
6555 	}
6556 
6557 	return true;
6558 }
6559 
6560 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
6561 				     u16 opcode)
6562 {
6563 	struct mgmt_pending_cmd *cmd;
6564 	struct mgmt_cp_add_advertising *cp;
6565 	struct mgmt_rp_add_advertising rp;
6566 	struct adv_info *adv_instance, *n;
6567 	u8 instance;
6568 
6569 	BT_DBG("status %d", status);
6570 
6571 	hci_dev_lock(hdev);
6572 
6573 	cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
6574 
6575 	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
6576 		if (!adv_instance->pending)
6577 			continue;
6578 
6579 		if (!status) {
6580 			adv_instance->pending = false;
6581 			continue;
6582 		}
6583 
6584 		instance = adv_instance->instance;
6585 
6586 		if (hdev->cur_adv_instance == instance)
6587 			cancel_adv_timeout(hdev);
6588 
6589 		hci_remove_adv_instance(hdev, instance);
6590 		mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
6591 	}
6592 
6593 	if (!cmd)
6594 		goto unlock;
6595 
6596 	cp = cmd->param;
6597 	rp.instance = cp->instance;
6598 
6599 	if (status)
6600 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
6601 				mgmt_status(status));
6602 	else
6603 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
6604 				  mgmt_status(status), &rp, sizeof(rp));
6605 
6606 	mgmt_pending_remove(cmd);
6607 
6608 unlock:
6609 	hci_dev_unlock(hdev);
6610 }
6611 
6612 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
6613 			   void *data, u16 data_len)
6614 {
6615 	struct mgmt_cp_add_advertising *cp = data;
6616 	struct mgmt_rp_add_advertising rp;
6617 	u32 flags;
6618 	u32 supported_flags, phy_flags;
6619 	u8 status;
6620 	u16 timeout, duration;
6621 	unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
6622 	u8 schedule_instance = 0;
6623 	struct adv_info *next_instance;
6624 	int err;
6625 	struct mgmt_pending_cmd *cmd;
6626 	struct hci_request req;
6627 
6628 	BT_DBG("%s", hdev->name);
6629 
6630 	status = mgmt_le_support(hdev);
6631 	if (status)
6632 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6633 				       status);
6634 
6635 	if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
6636 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6637 				       MGMT_STATUS_INVALID_PARAMS);
6638 
6639 	if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
6640 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6641 				       MGMT_STATUS_INVALID_PARAMS);
6642 
6643 	flags = __le32_to_cpu(cp->flags);
6644 	timeout = __le16_to_cpu(cp->timeout);
6645 	duration = __le16_to_cpu(cp->duration);
6646 
6647 	/* The current implementation only supports a subset of the specified
6648 	 * flags. Also need to check mutual exclusiveness of sec flags.
6649 	 */
6650 	supported_flags = get_supported_adv_flags(hdev);
6651 	phy_flags = flags & MGMT_ADV_FLAG_SEC_MASK;
6652 	if (flags & ~supported_flags ||
6653 	    ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
6654 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6655 				       MGMT_STATUS_INVALID_PARAMS);
6656 
6657 	hci_dev_lock(hdev);
6658 
6659 	if (timeout && !hdev_is_powered(hdev)) {
6660 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6661 				      MGMT_STATUS_REJECTED);
6662 		goto unlock;
6663 	}
6664 
6665 	if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
6666 	    pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
6667 	    pending_find(MGMT_OP_SET_LE, hdev)) {
6668 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6669 				      MGMT_STATUS_BUSY);
6670 		goto unlock;
6671 	}
6672 
6673 	if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
6674 	    !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
6675 			       cp->scan_rsp_len, false)) {
6676 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6677 				      MGMT_STATUS_INVALID_PARAMS);
6678 		goto unlock;
6679 	}
6680 
6681 	err = hci_add_adv_instance(hdev, cp->instance, flags,
6682 				   cp->adv_data_len, cp->data,
6683 				   cp->scan_rsp_len,
6684 				   cp->data + cp->adv_data_len,
6685 				   timeout, duration);
6686 	if (err < 0) {
6687 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6688 				      MGMT_STATUS_FAILED);
6689 		goto unlock;
6690 	}
6691 
6692 	/* Only trigger an advertising added event if a new instance was
6693 	 * actually added.
6694 	 */
6695 	if (hdev->adv_instance_cnt > prev_instance_cnt)
6696 		mgmt_advertising_added(sk, hdev, cp->instance);
6697 
6698 	if (hdev->cur_adv_instance == cp->instance) {
6699 		/* If the currently advertised instance is being changed then
6700 		 * cancel the current advertising and schedule the next
6701 		 * instance. If there is only one instance then the overridden
6702 		 * advertising data will be visible right away.
6703 		 */
6704 		cancel_adv_timeout(hdev);
6705 
6706 		next_instance = hci_get_next_instance(hdev, cp->instance);
6707 		if (next_instance)
6708 			schedule_instance = next_instance->instance;
6709 	} else if (!hdev->adv_instance_timeout) {
6710 		/* Immediately advertise the new instance if no other
6711 		 * instance is currently being advertised.
6712 		 */
6713 		schedule_instance = cp->instance;
6714 	}
6715 
6716 	/* If the HCI_ADVERTISING flag is set or the device isn't powered or
6717 	 * there is no instance to be advertised then we have no HCI
6718 	 * communication to make. Simply return.
6719 	 */
6720 	if (!hdev_is_powered(hdev) ||
6721 	    hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6722 	    !schedule_instance) {
6723 		rp.instance = cp->instance;
6724 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6725 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6726 		goto unlock;
6727 	}
6728 
6729 	/* We're good to go, update advertising data, parameters, and start
6730 	 * advertising.
6731 	 */
6732 	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
6733 			       data_len);
6734 	if (!cmd) {
6735 		err = -ENOMEM;
6736 		goto unlock;
6737 	}
6738 
6739 	hci_req_init(&req, hdev);
6740 
6741 	err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
6742 
6743 	if (!err)
6744 		err = hci_req_run(&req, add_advertising_complete);
6745 
6746 	if (err < 0)
6747 		mgmt_pending_remove(cmd);
6748 
6749 unlock:
6750 	hci_dev_unlock(hdev);
6751 
6752 	return err;
6753 }
6754 
6755 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
6756 					u16 opcode)
6757 {
6758 	struct mgmt_pending_cmd *cmd;
6759 	struct mgmt_cp_remove_advertising *cp;
6760 	struct mgmt_rp_remove_advertising rp;
6761 
6762 	BT_DBG("status %d", status);
6763 
6764 	hci_dev_lock(hdev);
6765 
6766 	/* A failure status here only means that we failed to disable
6767 	 * advertising. Otherwise, the advertising instance has been removed,
6768 	 * so report success.
6769 	 */
6770 	cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
6771 	if (!cmd)
6772 		goto unlock;
6773 
6774 	cp = cmd->param;
6775 	rp.instance = cp->instance;
6776 
6777 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
6778 			  &rp, sizeof(rp));
6779 	mgmt_pending_remove(cmd);
6780 
6781 unlock:
6782 	hci_dev_unlock(hdev);
6783 }
6784 
6785 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
6786 			      void *data, u16 data_len)
6787 {
6788 	struct mgmt_cp_remove_advertising *cp = data;
6789 	struct mgmt_rp_remove_advertising rp;
6790 	struct mgmt_pending_cmd *cmd;
6791 	struct hci_request req;
6792 	int err;
6793 
6794 	BT_DBG("%s", hdev->name);
6795 
6796 	hci_dev_lock(hdev);
6797 
6798 	if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
6799 		err = mgmt_cmd_status(sk, hdev->id,
6800 				      MGMT_OP_REMOVE_ADVERTISING,
6801 				      MGMT_STATUS_INVALID_PARAMS);
6802 		goto unlock;
6803 	}
6804 
6805 	if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
6806 	    pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
6807 	    pending_find(MGMT_OP_SET_LE, hdev)) {
6808 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
6809 				      MGMT_STATUS_BUSY);
6810 		goto unlock;
6811 	}
6812 
6813 	if (list_empty(&hdev->adv_instances)) {
6814 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
6815 				      MGMT_STATUS_INVALID_PARAMS);
6816 		goto unlock;
6817 	}
6818 
6819 	hci_req_init(&req, hdev);
6820 
6821 	hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
6822 
6823 	if (list_empty(&hdev->adv_instances))
6824 		__hci_req_disable_advertising(&req);
6825 
6826 	/* If no HCI commands have been collected so far or the HCI_ADVERTISING
6827 	 * flag is set or the device isn't powered then we have no HCI
6828 	 * communication to make. Simply return.
6829 	 */
6830 	if (skb_queue_empty(&req.cmd_q) ||
6831 	    !hdev_is_powered(hdev) ||
6832 	    hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
6833 		hci_req_purge(&req);
6834 		rp.instance = cp->instance;
6835 		err = mgmt_cmd_complete(sk, hdev->id,
6836 					MGMT_OP_REMOVE_ADVERTISING,
6837 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6838 		goto unlock;
6839 	}
6840 
6841 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
6842 			       data_len);
6843 	if (!cmd) {
6844 		err = -ENOMEM;
6845 		goto unlock;
6846 	}
6847 
6848 	err = hci_req_run(&req, remove_advertising_complete);
6849 	if (err < 0)
6850 		mgmt_pending_remove(cmd);
6851 
6852 unlock:
6853 	hci_dev_unlock(hdev);
6854 
6855 	return err;
6856 }
6857 
6858 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
6859 			     void *data, u16 data_len)
6860 {
6861 	struct mgmt_cp_get_adv_size_info *cp = data;
6862 	struct mgmt_rp_get_adv_size_info rp;
6863 	u32 flags, supported_flags;
6864 	int err;
6865 
6866 	BT_DBG("%s", hdev->name);
6867 
6868 	if (!lmp_le_capable(hdev))
6869 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
6870 				       MGMT_STATUS_REJECTED);
6871 
6872 	if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
6873 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
6874 				       MGMT_STATUS_INVALID_PARAMS);
6875 
6876 	flags = __le32_to_cpu(cp->flags);
6877 
6878 	/* The current implementation only supports a subset of the specified
6879 	 * flags.
6880 	 */
6881 	supported_flags = get_supported_adv_flags(hdev);
6882 	if (flags & ~supported_flags)
6883 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
6884 				       MGMT_STATUS_INVALID_PARAMS);
6885 
6886 	rp.instance = cp->instance;
6887 	rp.flags = cp->flags;
6888 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
6889 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
6890 
6891 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
6892 				MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6893 
6894 	return err;
6895 }
6896 
6897 static const struct hci_mgmt_handler mgmt_handlers[] = {
6898 	{ NULL }, /* 0x0000 (no command) */
6899 	{ read_version,            MGMT_READ_VERSION_SIZE,
6900 						HCI_MGMT_NO_HDEV |
6901 						HCI_MGMT_UNTRUSTED },
6902 	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
6903 						HCI_MGMT_NO_HDEV |
6904 						HCI_MGMT_UNTRUSTED },
6905 	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
6906 						HCI_MGMT_NO_HDEV |
6907 						HCI_MGMT_UNTRUSTED },
6908 	{ read_controller_info,    MGMT_READ_INFO_SIZE,
6909 						HCI_MGMT_UNTRUSTED },
6910 	{ set_powered,             MGMT_SETTING_SIZE },
6911 	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
6912 	{ set_connectable,         MGMT_SETTING_SIZE },
6913 	{ set_fast_connectable,    MGMT_SETTING_SIZE },
6914 	{ set_bondable,            MGMT_SETTING_SIZE },
6915 	{ set_link_security,       MGMT_SETTING_SIZE },
6916 	{ set_ssp,                 MGMT_SETTING_SIZE },
6917 	{ set_hs,                  MGMT_SETTING_SIZE },
6918 	{ set_le,                  MGMT_SETTING_SIZE },
6919 	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
6920 	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
6921 	{ add_uuid,                MGMT_ADD_UUID_SIZE },
6922 	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
6923 	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
6924 						HCI_MGMT_VAR_LEN },
6925 	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
6926 						HCI_MGMT_VAR_LEN },
6927 	{ disconnect,              MGMT_DISCONNECT_SIZE },
6928 	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
6929 	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
6930 	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
6931 	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
6932 	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
6933 	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
6934 	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
6935 	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
6936 	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
6937 	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
6938 	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
6939 	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
6940 	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
6941 						HCI_MGMT_VAR_LEN },
6942 	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
6943 	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
6944 	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
6945 	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
6946 	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
6947 	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
6948 	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
6949 	{ set_advertising,         MGMT_SETTING_SIZE },
6950 	{ set_bredr,               MGMT_SETTING_SIZE },
6951 	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
6952 	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
6953 	{ set_secure_conn,         MGMT_SETTING_SIZE },
6954 	{ set_debug_keys,          MGMT_SETTING_SIZE },
6955 	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
6956 	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
6957 						HCI_MGMT_VAR_LEN },
6958 	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
6959 	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
6960 	{ add_device,              MGMT_ADD_DEVICE_SIZE },
6961 	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
6962 	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
6963 						HCI_MGMT_VAR_LEN },
6964 	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
6965 						HCI_MGMT_NO_HDEV |
6966 						HCI_MGMT_UNTRUSTED },
6967 	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
6968 						HCI_MGMT_UNCONFIGURED |
6969 						HCI_MGMT_UNTRUSTED },
6970 	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
6971 						HCI_MGMT_UNCONFIGURED },
6972 	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
6973 						HCI_MGMT_UNCONFIGURED },
6974 	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
6975 						HCI_MGMT_VAR_LEN },
6976 	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
6977 	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
6978 						HCI_MGMT_NO_HDEV |
6979 						HCI_MGMT_UNTRUSTED },
6980 	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
6981 	{ add_advertising,	   MGMT_ADD_ADVERTISING_SIZE,
6982 						HCI_MGMT_VAR_LEN },
6983 	{ remove_advertising,	   MGMT_REMOVE_ADVERTISING_SIZE },
6984 	{ get_adv_size_info,       MGMT_GET_ADV_SIZE_INFO_SIZE },
6985 	{ start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
6986 	{ read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
6987 						HCI_MGMT_UNTRUSTED },
6988 	{ set_appearance,	   MGMT_SET_APPEARANCE_SIZE },
6989 	{ get_phy_configuration,   MGMT_GET_PHY_CONFIGURATION_SIZE },
6990 	{ set_phy_configuration,   MGMT_SET_PHY_CONFIGURATION_SIZE },
6991 	{ set_blocked_keys,	   MGMT_OP_SET_BLOCKED_KEYS_SIZE,
6992 						HCI_MGMT_VAR_LEN },
6993 };
6994 
6995 void mgmt_index_added(struct hci_dev *hdev)
6996 {
6997 	struct mgmt_ev_ext_index ev;
6998 
6999 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
7000 		return;
7001 
7002 	switch (hdev->dev_type) {
7003 	case HCI_PRIMARY:
7004 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7005 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
7006 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7007 			ev.type = 0x01;
7008 		} else {
7009 			mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
7010 					 HCI_MGMT_INDEX_EVENTS);
7011 			ev.type = 0x00;
7012 		}
7013 		break;
7014 	case HCI_AMP:
7015 		ev.type = 0x02;
7016 		break;
7017 	default:
7018 		return;
7019 	}
7020 
7021 	ev.bus = hdev->bus;
7022 
7023 	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
7024 			 HCI_MGMT_EXT_INDEX_EVENTS);
7025 }
7026 
7027 void mgmt_index_removed(struct hci_dev *hdev)
7028 {
7029 	struct mgmt_ev_ext_index ev;
7030 	u8 status = MGMT_STATUS_INVALID_INDEX;
7031 
7032 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
7033 		return;
7034 
7035 	switch (hdev->dev_type) {
7036 	case HCI_PRIMARY:
7037 		mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7038 
7039 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7040 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
7041 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7042 			ev.type = 0x01;
7043 		} else {
7044 			mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
7045 					 HCI_MGMT_INDEX_EVENTS);
7046 			ev.type = 0x00;
7047 		}
7048 		break;
7049 	case HCI_AMP:
7050 		ev.type = 0x02;
7051 		break;
7052 	default:
7053 		return;
7054 	}
7055 
7056 	ev.bus = hdev->bus;
7057 
7058 	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
7059 			 HCI_MGMT_EXT_INDEX_EVENTS);
7060 }
7061 
7062 /* This function requires the caller holds hdev->lock */
7063 static void restart_le_actions(struct hci_dev *hdev)
7064 {
7065 	struct hci_conn_params *p;
7066 
7067 	list_for_each_entry(p, &hdev->le_conn_params, list) {
7068 		/* Needed for AUTO_OFF case where might not "really"
7069 		 * have been powered off.
7070 		 */
7071 		list_del_init(&p->action);
7072 
7073 		switch (p->auto_connect) {
7074 		case HCI_AUTO_CONN_DIRECT:
7075 		case HCI_AUTO_CONN_ALWAYS:
7076 			list_add(&p->action, &hdev->pend_le_conns);
7077 			break;
7078 		case HCI_AUTO_CONN_REPORT:
7079 			list_add(&p->action, &hdev->pend_le_reports);
7080 			break;
7081 		default:
7082 			break;
7083 		}
7084 	}
7085 }
7086 
7087 void mgmt_power_on(struct hci_dev *hdev, int err)
7088 {
7089 	struct cmd_lookup match = { NULL, hdev };
7090 
7091 	BT_DBG("err %d", err);
7092 
7093 	hci_dev_lock(hdev);
7094 
7095 	if (!err) {
7096 		restart_le_actions(hdev);
7097 		hci_update_background_scan(hdev);
7098 	}
7099 
7100 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7101 
7102 	new_settings(hdev, match.sk);
7103 
7104 	if (match.sk)
7105 		sock_put(match.sk);
7106 
7107 	hci_dev_unlock(hdev);
7108 }
7109 
7110 void __mgmt_power_off(struct hci_dev *hdev)
7111 {
7112 	struct cmd_lookup match = { NULL, hdev };
7113 	u8 status, zero_cod[] = { 0, 0, 0 };
7114 
7115 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7116 
7117 	/* If the power off is because of hdev unregistration let
7118 	 * use the appropriate INVALID_INDEX status. Otherwise use
7119 	 * NOT_POWERED. We cover both scenarios here since later in
7120 	 * mgmt_index_removed() any hci_conn callbacks will have already
7121 	 * been triggered, potentially causing misleading DISCONNECTED
7122 	 * status responses.
7123 	 */
7124 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
7125 		status = MGMT_STATUS_INVALID_INDEX;
7126 	else
7127 		status = MGMT_STATUS_NOT_POWERED;
7128 
7129 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7130 
7131 	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
7132 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
7133 				   zero_cod, sizeof(zero_cod),
7134 				   HCI_MGMT_DEV_CLASS_EVENTS, NULL);
7135 		ext_info_changed(hdev, NULL);
7136 	}
7137 
7138 	new_settings(hdev, match.sk);
7139 
7140 	if (match.sk)
7141 		sock_put(match.sk);
7142 }
7143 
7144 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
7145 {
7146 	struct mgmt_pending_cmd *cmd;
7147 	u8 status;
7148 
7149 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7150 	if (!cmd)
7151 		return;
7152 
7153 	if (err == -ERFKILL)
7154 		status = MGMT_STATUS_RFKILLED;
7155 	else
7156 		status = MGMT_STATUS_FAILED;
7157 
7158 	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
7159 
7160 	mgmt_pending_remove(cmd);
7161 }
7162 
7163 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
7164 		       bool persistent)
7165 {
7166 	struct mgmt_ev_new_link_key ev;
7167 
7168 	memset(&ev, 0, sizeof(ev));
7169 
7170 	ev.store_hint = persistent;
7171 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7172 	ev.key.addr.type = BDADDR_BREDR;
7173 	ev.key.type = key->type;
7174 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
7175 	ev.key.pin_len = key->pin_len;
7176 
7177 	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
7178 }
7179 
7180 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
7181 {
7182 	switch (ltk->type) {
7183 	case SMP_LTK:
7184 	case SMP_LTK_SLAVE:
7185 		if (ltk->authenticated)
7186 			return MGMT_LTK_AUTHENTICATED;
7187 		return MGMT_LTK_UNAUTHENTICATED;
7188 	case SMP_LTK_P256:
7189 		if (ltk->authenticated)
7190 			return MGMT_LTK_P256_AUTH;
7191 		return MGMT_LTK_P256_UNAUTH;
7192 	case SMP_LTK_P256_DEBUG:
7193 		return MGMT_LTK_P256_DEBUG;
7194 	}
7195 
7196 	return MGMT_LTK_UNAUTHENTICATED;
7197 }
7198 
7199 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
7200 {
7201 	struct mgmt_ev_new_long_term_key ev;
7202 
7203 	memset(&ev, 0, sizeof(ev));
7204 
7205 	/* Devices using resolvable or non-resolvable random addresses
7206 	 * without providing an identity resolving key don't require
7207 	 * to store long term keys. Their addresses will change the
7208 	 * next time around.
7209 	 *
7210 	 * Only when a remote device provides an identity address
7211 	 * make sure the long term key is stored. If the remote
7212 	 * identity is known, the long term keys are internally
7213 	 * mapped to the identity address. So allow static random
7214 	 * and public addresses here.
7215 	 */
7216 	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
7217 	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
7218 		ev.store_hint = 0x00;
7219 	else
7220 		ev.store_hint = persistent;
7221 
7222 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7223 	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
7224 	ev.key.type = mgmt_ltk_type(key);
7225 	ev.key.enc_size = key->enc_size;
7226 	ev.key.ediv = key->ediv;
7227 	ev.key.rand = key->rand;
7228 
7229 	if (key->type == SMP_LTK)
7230 		ev.key.master = 1;
7231 
7232 	/* Make sure we copy only the significant bytes based on the
7233 	 * encryption key size, and set the rest of the value to zeroes.
7234 	 */
7235 	memcpy(ev.key.val, key->val, key->enc_size);
7236 	memset(ev.key.val + key->enc_size, 0,
7237 	       sizeof(ev.key.val) - key->enc_size);
7238 
7239 	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
7240 }
7241 
7242 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
7243 {
7244 	struct mgmt_ev_new_irk ev;
7245 
7246 	memset(&ev, 0, sizeof(ev));
7247 
7248 	ev.store_hint = persistent;
7249 
7250 	bacpy(&ev.rpa, &irk->rpa);
7251 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
7252 	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
7253 	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
7254 
7255 	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
7256 }
7257 
7258 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
7259 		   bool persistent)
7260 {
7261 	struct mgmt_ev_new_csrk ev;
7262 
7263 	memset(&ev, 0, sizeof(ev));
7264 
7265 	/* Devices using resolvable or non-resolvable random addresses
7266 	 * without providing an identity resolving key don't require
7267 	 * to store signature resolving keys. Their addresses will change
7268 	 * the next time around.
7269 	 *
7270 	 * Only when a remote device provides an identity address
7271 	 * make sure the signature resolving key is stored. So allow
7272 	 * static random and public addresses here.
7273 	 */
7274 	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
7275 	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
7276 		ev.store_hint = 0x00;
7277 	else
7278 		ev.store_hint = persistent;
7279 
7280 	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
7281 	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
7282 	ev.key.type = csrk->type;
7283 	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
7284 
7285 	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
7286 }
7287 
7288 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
7289 			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
7290 			 u16 max_interval, u16 latency, u16 timeout)
7291 {
7292 	struct mgmt_ev_new_conn_param ev;
7293 
7294 	if (!hci_is_identity_address(bdaddr, bdaddr_type))
7295 		return;
7296 
7297 	memset(&ev, 0, sizeof(ev));
7298 	bacpy(&ev.addr.bdaddr, bdaddr);
7299 	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
7300 	ev.store_hint = store_hint;
7301 	ev.min_interval = cpu_to_le16(min_interval);
7302 	ev.max_interval = cpu_to_le16(max_interval);
7303 	ev.latency = cpu_to_le16(latency);
7304 	ev.timeout = cpu_to_le16(timeout);
7305 
7306 	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
7307 }
7308 
7309 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
7310 			   u32 flags, u8 *name, u8 name_len)
7311 {
7312 	char buf[512];
7313 	struct mgmt_ev_device_connected *ev = (void *) buf;
7314 	u16 eir_len = 0;
7315 
7316 	bacpy(&ev->addr.bdaddr, &conn->dst);
7317 	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7318 
7319 	ev->flags = __cpu_to_le32(flags);
7320 
7321 	/* We must ensure that the EIR Data fields are ordered and
7322 	 * unique. Keep it simple for now and avoid the problem by not
7323 	 * adding any BR/EDR data to the LE adv.
7324 	 */
7325 	if (conn->le_adv_data_len > 0) {
7326 		memcpy(&ev->eir[eir_len],
7327 		       conn->le_adv_data, conn->le_adv_data_len);
7328 		eir_len = conn->le_adv_data_len;
7329 	} else {
7330 		if (name_len > 0)
7331 			eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
7332 						  name, name_len);
7333 
7334 		if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
7335 			eir_len = eir_append_data(ev->eir, eir_len,
7336 						  EIR_CLASS_OF_DEV,
7337 						  conn->dev_class, 3);
7338 	}
7339 
7340 	ev->eir_len = cpu_to_le16(eir_len);
7341 
7342 	mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
7343 		    sizeof(*ev) + eir_len, NULL);
7344 }
7345 
7346 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
7347 {
7348 	struct sock **sk = data;
7349 
7350 	cmd->cmd_complete(cmd, 0);
7351 
7352 	*sk = cmd->sk;
7353 	sock_hold(*sk);
7354 
7355 	mgmt_pending_remove(cmd);
7356 }
7357 
7358 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
7359 {
7360 	struct hci_dev *hdev = data;
7361 	struct mgmt_cp_unpair_device *cp = cmd->param;
7362 
7363 	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
7364 
7365 	cmd->cmd_complete(cmd, 0);
7366 	mgmt_pending_remove(cmd);
7367 }
7368 
7369 bool mgmt_powering_down(struct hci_dev *hdev)
7370 {
7371 	struct mgmt_pending_cmd *cmd;
7372 	struct mgmt_mode *cp;
7373 
7374 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7375 	if (!cmd)
7376 		return false;
7377 
7378 	cp = cmd->param;
7379 	if (!cp->val)
7380 		return true;
7381 
7382 	return false;
7383 }
7384 
7385 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
7386 			      u8 link_type, u8 addr_type, u8 reason,
7387 			      bool mgmt_connected)
7388 {
7389 	struct mgmt_ev_device_disconnected ev;
7390 	struct sock *sk = NULL;
7391 
7392 	/* The connection is still in hci_conn_hash so test for 1
7393 	 * instead of 0 to know if this is the last one.
7394 	 */
7395 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
7396 		cancel_delayed_work(&hdev->power_off);
7397 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
7398 	}
7399 
7400 	if (!mgmt_connected)
7401 		return;
7402 
7403 	if (link_type != ACL_LINK && link_type != LE_LINK)
7404 		return;
7405 
7406 	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
7407 
7408 	bacpy(&ev.addr.bdaddr, bdaddr);
7409 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7410 	ev.reason = reason;
7411 
7412 	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
7413 
7414 	if (sk)
7415 		sock_put(sk);
7416 
7417 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
7418 			     hdev);
7419 }
7420 
7421 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
7422 			    u8 link_type, u8 addr_type, u8 status)
7423 {
7424 	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
7425 	struct mgmt_cp_disconnect *cp;
7426 	struct mgmt_pending_cmd *cmd;
7427 
7428 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
7429 			     hdev);
7430 
7431 	cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
7432 	if (!cmd)
7433 		return;
7434 
7435 	cp = cmd->param;
7436 
7437 	if (bacmp(bdaddr, &cp->addr.bdaddr))
7438 		return;
7439 
7440 	if (cp->addr.type != bdaddr_type)
7441 		return;
7442 
7443 	cmd->cmd_complete(cmd, mgmt_status(status));
7444 	mgmt_pending_remove(cmd);
7445 }
7446 
7447 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7448 			 u8 addr_type, u8 status)
7449 {
7450 	struct mgmt_ev_connect_failed ev;
7451 
7452 	/* The connection is still in hci_conn_hash so test for 1
7453 	 * instead of 0 to know if this is the last one.
7454 	 */
7455 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
7456 		cancel_delayed_work(&hdev->power_off);
7457 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
7458 	}
7459 
7460 	bacpy(&ev.addr.bdaddr, bdaddr);
7461 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7462 	ev.status = mgmt_status(status);
7463 
7464 	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
7465 }
7466 
7467 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
7468 {
7469 	struct mgmt_ev_pin_code_request ev;
7470 
7471 	bacpy(&ev.addr.bdaddr, bdaddr);
7472 	ev.addr.type = BDADDR_BREDR;
7473 	ev.secure = secure;
7474 
7475 	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
7476 }
7477 
7478 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7479 				  u8 status)
7480 {
7481 	struct mgmt_pending_cmd *cmd;
7482 
7483 	cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
7484 	if (!cmd)
7485 		return;
7486 
7487 	cmd->cmd_complete(cmd, mgmt_status(status));
7488 	mgmt_pending_remove(cmd);
7489 }
7490 
7491 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7492 				      u8 status)
7493 {
7494 	struct mgmt_pending_cmd *cmd;
7495 
7496 	cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
7497 	if (!cmd)
7498 		return;
7499 
7500 	cmd->cmd_complete(cmd, mgmt_status(status));
7501 	mgmt_pending_remove(cmd);
7502 }
7503 
7504 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7505 			      u8 link_type, u8 addr_type, u32 value,
7506 			      u8 confirm_hint)
7507 {
7508 	struct mgmt_ev_user_confirm_request ev;
7509 
7510 	BT_DBG("%s", hdev->name);
7511 
7512 	bacpy(&ev.addr.bdaddr, bdaddr);
7513 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7514 	ev.confirm_hint = confirm_hint;
7515 	ev.value = cpu_to_le32(value);
7516 
7517 	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
7518 			  NULL);
7519 }
7520 
7521 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7522 			      u8 link_type, u8 addr_type)
7523 {
7524 	struct mgmt_ev_user_passkey_request ev;
7525 
7526 	BT_DBG("%s", hdev->name);
7527 
7528 	bacpy(&ev.addr.bdaddr, bdaddr);
7529 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7530 
7531 	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
7532 			  NULL);
7533 }
7534 
7535 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7536 				      u8 link_type, u8 addr_type, u8 status,
7537 				      u8 opcode)
7538 {
7539 	struct mgmt_pending_cmd *cmd;
7540 
7541 	cmd = pending_find(opcode, hdev);
7542 	if (!cmd)
7543 		return -ENOENT;
7544 
7545 	cmd->cmd_complete(cmd, mgmt_status(status));
7546 	mgmt_pending_remove(cmd);
7547 
7548 	return 0;
7549 }
7550 
7551 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7552 				     u8 link_type, u8 addr_type, u8 status)
7553 {
7554 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7555 					  status, MGMT_OP_USER_CONFIRM_REPLY);
7556 }
7557 
7558 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7559 					 u8 link_type, u8 addr_type, u8 status)
7560 {
7561 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7562 					  status,
7563 					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
7564 }
7565 
7566 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7567 				     u8 link_type, u8 addr_type, u8 status)
7568 {
7569 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7570 					  status, MGMT_OP_USER_PASSKEY_REPLY);
7571 }
7572 
7573 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7574 					 u8 link_type, u8 addr_type, u8 status)
7575 {
7576 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7577 					  status,
7578 					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
7579 }
7580 
7581 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
7582 			     u8 link_type, u8 addr_type, u32 passkey,
7583 			     u8 entered)
7584 {
7585 	struct mgmt_ev_passkey_notify ev;
7586 
7587 	BT_DBG("%s", hdev->name);
7588 
7589 	bacpy(&ev.addr.bdaddr, bdaddr);
7590 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7591 	ev.passkey = __cpu_to_le32(passkey);
7592 	ev.entered = entered;
7593 
7594 	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
7595 }
7596 
7597 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
7598 {
7599 	struct mgmt_ev_auth_failed ev;
7600 	struct mgmt_pending_cmd *cmd;
7601 	u8 status = mgmt_status(hci_status);
7602 
7603 	bacpy(&ev.addr.bdaddr, &conn->dst);
7604 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7605 	ev.status = status;
7606 
7607 	cmd = find_pairing(conn);
7608 
7609 	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
7610 		    cmd ? cmd->sk : NULL);
7611 
7612 	if (cmd) {
7613 		cmd->cmd_complete(cmd, status);
7614 		mgmt_pending_remove(cmd);
7615 	}
7616 }
7617 
7618 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
7619 {
7620 	struct cmd_lookup match = { NULL, hdev };
7621 	bool changed;
7622 
7623 	if (status) {
7624 		u8 mgmt_err = mgmt_status(status);
7625 		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
7626 				     cmd_status_rsp, &mgmt_err);
7627 		return;
7628 	}
7629 
7630 	if (test_bit(HCI_AUTH, &hdev->flags))
7631 		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
7632 	else
7633 		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
7634 
7635 	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
7636 			     &match);
7637 
7638 	if (changed)
7639 		new_settings(hdev, match.sk);
7640 
7641 	if (match.sk)
7642 		sock_put(match.sk);
7643 }
7644 
7645 static void clear_eir(struct hci_request *req)
7646 {
7647 	struct hci_dev *hdev = req->hdev;
7648 	struct hci_cp_write_eir cp;
7649 
7650 	if (!lmp_ext_inq_capable(hdev))
7651 		return;
7652 
7653 	memset(hdev->eir, 0, sizeof(hdev->eir));
7654 
7655 	memset(&cp, 0, sizeof(cp));
7656 
7657 	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
7658 }
7659 
7660 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
7661 {
7662 	struct cmd_lookup match = { NULL, hdev };
7663 	struct hci_request req;
7664 	bool changed = false;
7665 
7666 	if (status) {
7667 		u8 mgmt_err = mgmt_status(status);
7668 
7669 		if (enable && hci_dev_test_and_clear_flag(hdev,
7670 							  HCI_SSP_ENABLED)) {
7671 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7672 			new_settings(hdev, NULL);
7673 		}
7674 
7675 		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
7676 				     &mgmt_err);
7677 		return;
7678 	}
7679 
7680 	if (enable) {
7681 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
7682 	} else {
7683 		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
7684 		if (!changed)
7685 			changed = hci_dev_test_and_clear_flag(hdev,
7686 							      HCI_HS_ENABLED);
7687 		else
7688 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7689 	}
7690 
7691 	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
7692 
7693 	if (changed)
7694 		new_settings(hdev, match.sk);
7695 
7696 	if (match.sk)
7697 		sock_put(match.sk);
7698 
7699 	hci_req_init(&req, hdev);
7700 
7701 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7702 		if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
7703 			hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
7704 				    sizeof(enable), &enable);
7705 		__hci_req_update_eir(&req);
7706 	} else {
7707 		clear_eir(&req);
7708 	}
7709 
7710 	hci_req_run(&req, NULL);
7711 }
7712 
7713 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
7714 {
7715 	struct cmd_lookup *match = data;
7716 
7717 	if (match->sk == NULL) {
7718 		match->sk = cmd->sk;
7719 		sock_hold(match->sk);
7720 	}
7721 }
7722 
7723 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
7724 				    u8 status)
7725 {
7726 	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
7727 
7728 	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
7729 	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
7730 	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
7731 
7732 	if (!status) {
7733 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
7734 				   3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
7735 		ext_info_changed(hdev, NULL);
7736 	}
7737 
7738 	if (match.sk)
7739 		sock_put(match.sk);
7740 }
7741 
7742 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
7743 {
7744 	struct mgmt_cp_set_local_name ev;
7745 	struct mgmt_pending_cmd *cmd;
7746 
7747 	if (status)
7748 		return;
7749 
7750 	memset(&ev, 0, sizeof(ev));
7751 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
7752 	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
7753 
7754 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
7755 	if (!cmd) {
7756 		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
7757 
7758 		/* If this is a HCI command related to powering on the
7759 		 * HCI dev don't send any mgmt signals.
7760 		 */
7761 		if (pending_find(MGMT_OP_SET_POWERED, hdev))
7762 			return;
7763 	}
7764 
7765 	mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
7766 			   HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
7767 	ext_info_changed(hdev, cmd ? cmd->sk : NULL);
7768 }
7769 
7770 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
7771 {
7772 	int i;
7773 
7774 	for (i = 0; i < uuid_count; i++) {
7775 		if (!memcmp(uuid, uuids[i], 16))
7776 			return true;
7777 	}
7778 
7779 	return false;
7780 }
7781 
7782 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
7783 {
7784 	u16 parsed = 0;
7785 
7786 	while (parsed < eir_len) {
7787 		u8 field_len = eir[0];
7788 		u8 uuid[16];
7789 		int i;
7790 
7791 		if (field_len == 0)
7792 			break;
7793 
7794 		if (eir_len - parsed < field_len + 1)
7795 			break;
7796 
7797 		switch (eir[1]) {
7798 		case EIR_UUID16_ALL:
7799 		case EIR_UUID16_SOME:
7800 			for (i = 0; i + 3 <= field_len; i += 2) {
7801 				memcpy(uuid, bluetooth_base_uuid, 16);
7802 				uuid[13] = eir[i + 3];
7803 				uuid[12] = eir[i + 2];
7804 				if (has_uuid(uuid, uuid_count, uuids))
7805 					return true;
7806 			}
7807 			break;
7808 		case EIR_UUID32_ALL:
7809 		case EIR_UUID32_SOME:
7810 			for (i = 0; i + 5 <= field_len; i += 4) {
7811 				memcpy(uuid, bluetooth_base_uuid, 16);
7812 				uuid[15] = eir[i + 5];
7813 				uuid[14] = eir[i + 4];
7814 				uuid[13] = eir[i + 3];
7815 				uuid[12] = eir[i + 2];
7816 				if (has_uuid(uuid, uuid_count, uuids))
7817 					return true;
7818 			}
7819 			break;
7820 		case EIR_UUID128_ALL:
7821 		case EIR_UUID128_SOME:
7822 			for (i = 0; i + 17 <= field_len; i += 16) {
7823 				memcpy(uuid, eir + i + 2, 16);
7824 				if (has_uuid(uuid, uuid_count, uuids))
7825 					return true;
7826 			}
7827 			break;
7828 		}
7829 
7830 		parsed += field_len + 1;
7831 		eir += field_len + 1;
7832 	}
7833 
7834 	return false;
7835 }
7836 
7837 static void restart_le_scan(struct hci_dev *hdev)
7838 {
7839 	/* If controller is not scanning we are done. */
7840 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
7841 		return;
7842 
7843 	if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
7844 		       hdev->discovery.scan_start +
7845 		       hdev->discovery.scan_duration))
7846 		return;
7847 
7848 	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
7849 			   DISCOV_LE_RESTART_DELAY);
7850 }
7851 
7852 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
7853 			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7854 {
7855 	/* If a RSSI threshold has been specified, and
7856 	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
7857 	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
7858 	 * is set, let it through for further processing, as we might need to
7859 	 * restart the scan.
7860 	 *
7861 	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
7862 	 * the results are also dropped.
7863 	 */
7864 	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7865 	    (rssi == HCI_RSSI_INVALID ||
7866 	    (rssi < hdev->discovery.rssi &&
7867 	     !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
7868 		return  false;
7869 
7870 	if (hdev->discovery.uuid_count != 0) {
7871 		/* If a list of UUIDs is provided in filter, results with no
7872 		 * matching UUID should be dropped.
7873 		 */
7874 		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
7875 				   hdev->discovery.uuids) &&
7876 		    !eir_has_uuids(scan_rsp, scan_rsp_len,
7877 				   hdev->discovery.uuid_count,
7878 				   hdev->discovery.uuids))
7879 			return false;
7880 	}
7881 
7882 	/* If duplicate filtering does not report RSSI changes, then restart
7883 	 * scanning to ensure updated result with updated RSSI values.
7884 	 */
7885 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
7886 		restart_le_scan(hdev);
7887 
7888 		/* Validate RSSI value against the RSSI threshold once more. */
7889 		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7890 		    rssi < hdev->discovery.rssi)
7891 			return false;
7892 	}
7893 
7894 	return true;
7895 }
7896 
7897 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7898 		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
7899 		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7900 {
7901 	char buf[512];
7902 	struct mgmt_ev_device_found *ev = (void *)buf;
7903 	size_t ev_size;
7904 
7905 	/* Don't send events for a non-kernel initiated discovery. With
7906 	 * LE one exception is if we have pend_le_reports > 0 in which
7907 	 * case we're doing passive scanning and want these events.
7908 	 */
7909 	if (!hci_discovery_active(hdev)) {
7910 		if (link_type == ACL_LINK)
7911 			return;
7912 		if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
7913 			return;
7914 	}
7915 
7916 	if (hdev->discovery.result_filtering) {
7917 		/* We are using service discovery */
7918 		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
7919 				     scan_rsp_len))
7920 			return;
7921 	}
7922 
7923 	if (hdev->discovery.limited) {
7924 		/* Check for limited discoverable bit */
7925 		if (dev_class) {
7926 			if (!(dev_class[1] & 0x20))
7927 				return;
7928 		} else {
7929 			u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
7930 			if (!flags || !(flags[0] & LE_AD_LIMITED))
7931 				return;
7932 		}
7933 	}
7934 
7935 	/* Make sure that the buffer is big enough. The 5 extra bytes
7936 	 * are for the potential CoD field.
7937 	 */
7938 	if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
7939 		return;
7940 
7941 	memset(buf, 0, sizeof(buf));
7942 
7943 	/* In case of device discovery with BR/EDR devices (pre 1.2), the
7944 	 * RSSI value was reported as 0 when not available. This behavior
7945 	 * is kept when using device discovery. This is required for full
7946 	 * backwards compatibility with the API.
7947 	 *
7948 	 * However when using service discovery, the value 127 will be
7949 	 * returned when the RSSI is not available.
7950 	 */
7951 	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
7952 	    link_type == ACL_LINK)
7953 		rssi = 0;
7954 
7955 	bacpy(&ev->addr.bdaddr, bdaddr);
7956 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
7957 	ev->rssi = rssi;
7958 	ev->flags = cpu_to_le32(flags);
7959 
7960 	if (eir_len > 0)
7961 		/* Copy EIR or advertising data into event */
7962 		memcpy(ev->eir, eir, eir_len);
7963 
7964 	if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
7965 				       NULL))
7966 		eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
7967 					  dev_class, 3);
7968 
7969 	if (scan_rsp_len > 0)
7970 		/* Append scan response data to event */
7971 		memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
7972 
7973 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
7974 	ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
7975 
7976 	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
7977 }
7978 
7979 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7980 		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
7981 {
7982 	struct mgmt_ev_device_found *ev;
7983 	char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
7984 	u16 eir_len;
7985 
7986 	ev = (struct mgmt_ev_device_found *) buf;
7987 
7988 	memset(buf, 0, sizeof(buf));
7989 
7990 	bacpy(&ev->addr.bdaddr, bdaddr);
7991 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
7992 	ev->rssi = rssi;
7993 
7994 	eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
7995 				  name_len);
7996 
7997 	ev->eir_len = cpu_to_le16(eir_len);
7998 
7999 	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
8000 }
8001 
8002 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
8003 {
8004 	struct mgmt_ev_discovering ev;
8005 
8006 	BT_DBG("%s discovering %u", hdev->name, discovering);
8007 
8008 	memset(&ev, 0, sizeof(ev));
8009 	ev.type = hdev->discovery.type;
8010 	ev.discovering = discovering;
8011 
8012 	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
8013 }
8014 
8015 static struct hci_mgmt_chan chan = {
8016 	.channel	= HCI_CHANNEL_CONTROL,
8017 	.handler_count	= ARRAY_SIZE(mgmt_handlers),
8018 	.handlers	= mgmt_handlers,
8019 	.hdev_init	= mgmt_init_hdev,
8020 };
8021 
8022 int mgmt_init(void)
8023 {
8024 	return hci_mgmt_chan_register(&chan);
8025 }
8026 
8027 void mgmt_exit(void)
8028 {
8029 	hci_mgmt_chan_unregister(&chan);
8030 }
8031