xref: /openbmc/linux/net/bluetooth/mgmt.c (revision d81a494c43df66f053f7d1ec612e057eb99f34d4)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI Management interface */
26 
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35 
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
40 #include "msft.h"
41 #include "eir.h"
42 
43 #define MGMT_VERSION	1
44 #define MGMT_REVISION	21
45 
46 static const u16 mgmt_commands[] = {
47 	MGMT_OP_READ_INDEX_LIST,
48 	MGMT_OP_READ_INFO,
49 	MGMT_OP_SET_POWERED,
50 	MGMT_OP_SET_DISCOVERABLE,
51 	MGMT_OP_SET_CONNECTABLE,
52 	MGMT_OP_SET_FAST_CONNECTABLE,
53 	MGMT_OP_SET_BONDABLE,
54 	MGMT_OP_SET_LINK_SECURITY,
55 	MGMT_OP_SET_SSP,
56 	MGMT_OP_SET_HS,
57 	MGMT_OP_SET_LE,
58 	MGMT_OP_SET_DEV_CLASS,
59 	MGMT_OP_SET_LOCAL_NAME,
60 	MGMT_OP_ADD_UUID,
61 	MGMT_OP_REMOVE_UUID,
62 	MGMT_OP_LOAD_LINK_KEYS,
63 	MGMT_OP_LOAD_LONG_TERM_KEYS,
64 	MGMT_OP_DISCONNECT,
65 	MGMT_OP_GET_CONNECTIONS,
66 	MGMT_OP_PIN_CODE_REPLY,
67 	MGMT_OP_PIN_CODE_NEG_REPLY,
68 	MGMT_OP_SET_IO_CAPABILITY,
69 	MGMT_OP_PAIR_DEVICE,
70 	MGMT_OP_CANCEL_PAIR_DEVICE,
71 	MGMT_OP_UNPAIR_DEVICE,
72 	MGMT_OP_USER_CONFIRM_REPLY,
73 	MGMT_OP_USER_CONFIRM_NEG_REPLY,
74 	MGMT_OP_USER_PASSKEY_REPLY,
75 	MGMT_OP_USER_PASSKEY_NEG_REPLY,
76 	MGMT_OP_READ_LOCAL_OOB_DATA,
77 	MGMT_OP_ADD_REMOTE_OOB_DATA,
78 	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
79 	MGMT_OP_START_DISCOVERY,
80 	MGMT_OP_STOP_DISCOVERY,
81 	MGMT_OP_CONFIRM_NAME,
82 	MGMT_OP_BLOCK_DEVICE,
83 	MGMT_OP_UNBLOCK_DEVICE,
84 	MGMT_OP_SET_DEVICE_ID,
85 	MGMT_OP_SET_ADVERTISING,
86 	MGMT_OP_SET_BREDR,
87 	MGMT_OP_SET_STATIC_ADDRESS,
88 	MGMT_OP_SET_SCAN_PARAMS,
89 	MGMT_OP_SET_SECURE_CONN,
90 	MGMT_OP_SET_DEBUG_KEYS,
91 	MGMT_OP_SET_PRIVACY,
92 	MGMT_OP_LOAD_IRKS,
93 	MGMT_OP_GET_CONN_INFO,
94 	MGMT_OP_GET_CLOCK_INFO,
95 	MGMT_OP_ADD_DEVICE,
96 	MGMT_OP_REMOVE_DEVICE,
97 	MGMT_OP_LOAD_CONN_PARAM,
98 	MGMT_OP_READ_UNCONF_INDEX_LIST,
99 	MGMT_OP_READ_CONFIG_INFO,
100 	MGMT_OP_SET_EXTERNAL_CONFIG,
101 	MGMT_OP_SET_PUBLIC_ADDRESS,
102 	MGMT_OP_START_SERVICE_DISCOVERY,
103 	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
104 	MGMT_OP_READ_EXT_INDEX_LIST,
105 	MGMT_OP_READ_ADV_FEATURES,
106 	MGMT_OP_ADD_ADVERTISING,
107 	MGMT_OP_REMOVE_ADVERTISING,
108 	MGMT_OP_GET_ADV_SIZE_INFO,
109 	MGMT_OP_START_LIMITED_DISCOVERY,
110 	MGMT_OP_READ_EXT_INFO,
111 	MGMT_OP_SET_APPEARANCE,
112 	MGMT_OP_GET_PHY_CONFIGURATION,
113 	MGMT_OP_SET_PHY_CONFIGURATION,
114 	MGMT_OP_SET_BLOCKED_KEYS,
115 	MGMT_OP_SET_WIDEBAND_SPEECH,
116 	MGMT_OP_READ_CONTROLLER_CAP,
117 	MGMT_OP_READ_EXP_FEATURES_INFO,
118 	MGMT_OP_SET_EXP_FEATURE,
119 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
120 	MGMT_OP_SET_DEF_SYSTEM_CONFIG,
121 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
122 	MGMT_OP_SET_DEF_RUNTIME_CONFIG,
123 	MGMT_OP_GET_DEVICE_FLAGS,
124 	MGMT_OP_SET_DEVICE_FLAGS,
125 	MGMT_OP_READ_ADV_MONITOR_FEATURES,
126 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
127 	MGMT_OP_REMOVE_ADV_MONITOR,
128 	MGMT_OP_ADD_EXT_ADV_PARAMS,
129 	MGMT_OP_ADD_EXT_ADV_DATA,
130 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
131 };
132 
133 static const u16 mgmt_events[] = {
134 	MGMT_EV_CONTROLLER_ERROR,
135 	MGMT_EV_INDEX_ADDED,
136 	MGMT_EV_INDEX_REMOVED,
137 	MGMT_EV_NEW_SETTINGS,
138 	MGMT_EV_CLASS_OF_DEV_CHANGED,
139 	MGMT_EV_LOCAL_NAME_CHANGED,
140 	MGMT_EV_NEW_LINK_KEY,
141 	MGMT_EV_NEW_LONG_TERM_KEY,
142 	MGMT_EV_DEVICE_CONNECTED,
143 	MGMT_EV_DEVICE_DISCONNECTED,
144 	MGMT_EV_CONNECT_FAILED,
145 	MGMT_EV_PIN_CODE_REQUEST,
146 	MGMT_EV_USER_CONFIRM_REQUEST,
147 	MGMT_EV_USER_PASSKEY_REQUEST,
148 	MGMT_EV_AUTH_FAILED,
149 	MGMT_EV_DEVICE_FOUND,
150 	MGMT_EV_DISCOVERING,
151 	MGMT_EV_DEVICE_BLOCKED,
152 	MGMT_EV_DEVICE_UNBLOCKED,
153 	MGMT_EV_DEVICE_UNPAIRED,
154 	MGMT_EV_PASSKEY_NOTIFY,
155 	MGMT_EV_NEW_IRK,
156 	MGMT_EV_NEW_CSRK,
157 	MGMT_EV_DEVICE_ADDED,
158 	MGMT_EV_DEVICE_REMOVED,
159 	MGMT_EV_NEW_CONN_PARAM,
160 	MGMT_EV_UNCONF_INDEX_ADDED,
161 	MGMT_EV_UNCONF_INDEX_REMOVED,
162 	MGMT_EV_NEW_CONFIG_OPTIONS,
163 	MGMT_EV_EXT_INDEX_ADDED,
164 	MGMT_EV_EXT_INDEX_REMOVED,
165 	MGMT_EV_LOCAL_OOB_DATA_UPDATED,
166 	MGMT_EV_ADVERTISING_ADDED,
167 	MGMT_EV_ADVERTISING_REMOVED,
168 	MGMT_EV_EXT_INFO_CHANGED,
169 	MGMT_EV_PHY_CONFIGURATION_CHANGED,
170 	MGMT_EV_EXP_FEATURE_CHANGED,
171 	MGMT_EV_DEVICE_FLAGS_CHANGED,
172 	MGMT_EV_ADV_MONITOR_ADDED,
173 	MGMT_EV_ADV_MONITOR_REMOVED,
174 	MGMT_EV_CONTROLLER_SUSPEND,
175 	MGMT_EV_CONTROLLER_RESUME,
176 };
177 
178 static const u16 mgmt_untrusted_commands[] = {
179 	MGMT_OP_READ_INDEX_LIST,
180 	MGMT_OP_READ_INFO,
181 	MGMT_OP_READ_UNCONF_INDEX_LIST,
182 	MGMT_OP_READ_CONFIG_INFO,
183 	MGMT_OP_READ_EXT_INDEX_LIST,
184 	MGMT_OP_READ_EXT_INFO,
185 	MGMT_OP_READ_CONTROLLER_CAP,
186 	MGMT_OP_READ_EXP_FEATURES_INFO,
187 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
188 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
189 };
190 
191 static const u16 mgmt_untrusted_events[] = {
192 	MGMT_EV_INDEX_ADDED,
193 	MGMT_EV_INDEX_REMOVED,
194 	MGMT_EV_NEW_SETTINGS,
195 	MGMT_EV_CLASS_OF_DEV_CHANGED,
196 	MGMT_EV_LOCAL_NAME_CHANGED,
197 	MGMT_EV_UNCONF_INDEX_ADDED,
198 	MGMT_EV_UNCONF_INDEX_REMOVED,
199 	MGMT_EV_NEW_CONFIG_OPTIONS,
200 	MGMT_EV_EXT_INDEX_ADDED,
201 	MGMT_EV_EXT_INDEX_REMOVED,
202 	MGMT_EV_EXT_INFO_CHANGED,
203 	MGMT_EV_EXP_FEATURE_CHANGED,
204 };
205 
206 #define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
207 
208 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
209 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
210 
211 /* HCI to MGMT error code conversion table */
212 static const u8 mgmt_status_table[] = {
213 	MGMT_STATUS_SUCCESS,
214 	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
215 	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
216 	MGMT_STATUS_FAILED,		/* Hardware Failure */
217 	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
218 	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
219 	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
220 	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
221 	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
222 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
223 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
224 	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
225 	MGMT_STATUS_BUSY,		/* Command Disallowed */
226 	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
227 	MGMT_STATUS_REJECTED,		/* Rejected Security */
228 	MGMT_STATUS_REJECTED,		/* Rejected Personal */
229 	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
230 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
231 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
232 	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
233 	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
234 	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
235 	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
236 	MGMT_STATUS_BUSY,		/* Repeated Attempts */
237 	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
238 	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
239 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
240 	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
241 	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
242 	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
243 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
244 	MGMT_STATUS_FAILED,		/* Unspecified Error */
245 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
246 	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
247 	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
248 	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
249 	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
250 	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
251 	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
252 	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
253 	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
254 	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
255 	MGMT_STATUS_FAILED,		/* Transaction Collision */
256 	MGMT_STATUS_FAILED,		/* Reserved for future use */
257 	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
258 	MGMT_STATUS_REJECTED,		/* QoS Rejected */
259 	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
260 	MGMT_STATUS_REJECTED,		/* Insufficient Security */
261 	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
262 	MGMT_STATUS_FAILED,		/* Reserved for future use */
263 	MGMT_STATUS_BUSY,		/* Role Switch Pending */
264 	MGMT_STATUS_FAILED,		/* Reserved for future use */
265 	MGMT_STATUS_FAILED,		/* Slot Violation */
266 	MGMT_STATUS_FAILED,		/* Role Switch Failed */
267 	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
268 	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
269 	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
270 	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
271 	MGMT_STATUS_BUSY,		/* Controller Busy */
272 	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
273 	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
274 	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
275 	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
276 	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
277 };
278 
279 static u8 mgmt_errno_status(int err)
280 {
281 	switch (err) {
282 	case 0:
283 		return MGMT_STATUS_SUCCESS;
284 	case -EPERM:
285 		return MGMT_STATUS_REJECTED;
286 	case -EINVAL:
287 		return MGMT_STATUS_INVALID_PARAMS;
288 	case -EOPNOTSUPP:
289 		return MGMT_STATUS_NOT_SUPPORTED;
290 	case -EBUSY:
291 		return MGMT_STATUS_BUSY;
292 	case -ETIMEDOUT:
293 		return MGMT_STATUS_AUTH_FAILED;
294 	case -ENOMEM:
295 		return MGMT_STATUS_NO_RESOURCES;
296 	case -EISCONN:
297 		return MGMT_STATUS_ALREADY_CONNECTED;
298 	case -ENOTCONN:
299 		return MGMT_STATUS_DISCONNECTED;
300 	}
301 
302 	return MGMT_STATUS_FAILED;
303 }
304 
305 static u8 mgmt_status(int err)
306 {
307 	if (err < 0)
308 		return mgmt_errno_status(err);
309 
310 	if (err < ARRAY_SIZE(mgmt_status_table))
311 		return mgmt_status_table[err];
312 
313 	return MGMT_STATUS_FAILED;
314 }
315 
316 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
317 			    u16 len, int flag)
318 {
319 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
320 			       flag, NULL);
321 }
322 
323 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
324 			      u16 len, int flag, struct sock *skip_sk)
325 {
326 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
327 			       flag, skip_sk);
328 }
329 
330 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
331 		      struct sock *skip_sk)
332 {
333 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
334 			       HCI_SOCK_TRUSTED, skip_sk);
335 }
336 
337 static u8 le_addr_type(u8 mgmt_addr_type)
338 {
339 	if (mgmt_addr_type == BDADDR_LE_PUBLIC)
340 		return ADDR_LE_DEV_PUBLIC;
341 	else
342 		return ADDR_LE_DEV_RANDOM;
343 }
344 
345 void mgmt_fill_version_info(void *ver)
346 {
347 	struct mgmt_rp_read_version *rp = ver;
348 
349 	rp->version = MGMT_VERSION;
350 	rp->revision = cpu_to_le16(MGMT_REVISION);
351 }
352 
353 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
354 			u16 data_len)
355 {
356 	struct mgmt_rp_read_version rp;
357 
358 	bt_dev_dbg(hdev, "sock %p", sk);
359 
360 	mgmt_fill_version_info(&rp);
361 
362 	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
363 				 &rp, sizeof(rp));
364 }
365 
366 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
367 			 u16 data_len)
368 {
369 	struct mgmt_rp_read_commands *rp;
370 	u16 num_commands, num_events;
371 	size_t rp_size;
372 	int i, err;
373 
374 	bt_dev_dbg(hdev, "sock %p", sk);
375 
376 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
377 		num_commands = ARRAY_SIZE(mgmt_commands);
378 		num_events = ARRAY_SIZE(mgmt_events);
379 	} else {
380 		num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
381 		num_events = ARRAY_SIZE(mgmt_untrusted_events);
382 	}
383 
384 	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
385 
386 	rp = kmalloc(rp_size, GFP_KERNEL);
387 	if (!rp)
388 		return -ENOMEM;
389 
390 	rp->num_commands = cpu_to_le16(num_commands);
391 	rp->num_events = cpu_to_le16(num_events);
392 
393 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
394 		__le16 *opcode = rp->opcodes;
395 
396 		for (i = 0; i < num_commands; i++, opcode++)
397 			put_unaligned_le16(mgmt_commands[i], opcode);
398 
399 		for (i = 0; i < num_events; i++, opcode++)
400 			put_unaligned_le16(mgmt_events[i], opcode);
401 	} else {
402 		__le16 *opcode = rp->opcodes;
403 
404 		for (i = 0; i < num_commands; i++, opcode++)
405 			put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
406 
407 		for (i = 0; i < num_events; i++, opcode++)
408 			put_unaligned_le16(mgmt_untrusted_events[i], opcode);
409 	}
410 
411 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
412 				rp, rp_size);
413 	kfree(rp);
414 
415 	return err;
416 }
417 
418 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
419 			   u16 data_len)
420 {
421 	struct mgmt_rp_read_index_list *rp;
422 	struct hci_dev *d;
423 	size_t rp_len;
424 	u16 count;
425 	int err;
426 
427 	bt_dev_dbg(hdev, "sock %p", sk);
428 
429 	read_lock(&hci_dev_list_lock);
430 
431 	count = 0;
432 	list_for_each_entry(d, &hci_dev_list, list) {
433 		if (d->dev_type == HCI_PRIMARY &&
434 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED))
435 			count++;
436 	}
437 
438 	rp_len = sizeof(*rp) + (2 * count);
439 	rp = kmalloc(rp_len, GFP_ATOMIC);
440 	if (!rp) {
441 		read_unlock(&hci_dev_list_lock);
442 		return -ENOMEM;
443 	}
444 
445 	count = 0;
446 	list_for_each_entry(d, &hci_dev_list, list) {
447 		if (hci_dev_test_flag(d, HCI_SETUP) ||
448 		    hci_dev_test_flag(d, HCI_CONFIG) ||
449 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
450 			continue;
451 
452 		/* Devices marked as raw-only are neither configured
453 		 * nor unconfigured controllers.
454 		 */
455 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
456 			continue;
457 
458 		if (d->dev_type == HCI_PRIMARY &&
459 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
460 			rp->index[count++] = cpu_to_le16(d->id);
461 			bt_dev_dbg(hdev, "Added hci%u", d->id);
462 		}
463 	}
464 
465 	rp->num_controllers = cpu_to_le16(count);
466 	rp_len = sizeof(*rp) + (2 * count);
467 
468 	read_unlock(&hci_dev_list_lock);
469 
470 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
471 				0, rp, rp_len);
472 
473 	kfree(rp);
474 
475 	return err;
476 }
477 
478 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
479 				  void *data, u16 data_len)
480 {
481 	struct mgmt_rp_read_unconf_index_list *rp;
482 	struct hci_dev *d;
483 	size_t rp_len;
484 	u16 count;
485 	int err;
486 
487 	bt_dev_dbg(hdev, "sock %p", sk);
488 
489 	read_lock(&hci_dev_list_lock);
490 
491 	count = 0;
492 	list_for_each_entry(d, &hci_dev_list, list) {
493 		if (d->dev_type == HCI_PRIMARY &&
494 		    hci_dev_test_flag(d, HCI_UNCONFIGURED))
495 			count++;
496 	}
497 
498 	rp_len = sizeof(*rp) + (2 * count);
499 	rp = kmalloc(rp_len, GFP_ATOMIC);
500 	if (!rp) {
501 		read_unlock(&hci_dev_list_lock);
502 		return -ENOMEM;
503 	}
504 
505 	count = 0;
506 	list_for_each_entry(d, &hci_dev_list, list) {
507 		if (hci_dev_test_flag(d, HCI_SETUP) ||
508 		    hci_dev_test_flag(d, HCI_CONFIG) ||
509 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
510 			continue;
511 
512 		/* Devices marked as raw-only are neither configured
513 		 * nor unconfigured controllers.
514 		 */
515 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
516 			continue;
517 
518 		if (d->dev_type == HCI_PRIMARY &&
519 		    hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
520 			rp->index[count++] = cpu_to_le16(d->id);
521 			bt_dev_dbg(hdev, "Added hci%u", d->id);
522 		}
523 	}
524 
525 	rp->num_controllers = cpu_to_le16(count);
526 	rp_len = sizeof(*rp) + (2 * count);
527 
528 	read_unlock(&hci_dev_list_lock);
529 
530 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
531 				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
532 
533 	kfree(rp);
534 
535 	return err;
536 }
537 
538 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
539 			       void *data, u16 data_len)
540 {
541 	struct mgmt_rp_read_ext_index_list *rp;
542 	struct hci_dev *d;
543 	u16 count;
544 	int err;
545 
546 	bt_dev_dbg(hdev, "sock %p", sk);
547 
548 	read_lock(&hci_dev_list_lock);
549 
550 	count = 0;
551 	list_for_each_entry(d, &hci_dev_list, list) {
552 		if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
553 			count++;
554 	}
555 
556 	rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
557 	if (!rp) {
558 		read_unlock(&hci_dev_list_lock);
559 		return -ENOMEM;
560 	}
561 
562 	count = 0;
563 	list_for_each_entry(d, &hci_dev_list, list) {
564 		if (hci_dev_test_flag(d, HCI_SETUP) ||
565 		    hci_dev_test_flag(d, HCI_CONFIG) ||
566 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
567 			continue;
568 
569 		/* Devices marked as raw-only are neither configured
570 		 * nor unconfigured controllers.
571 		 */
572 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
573 			continue;
574 
575 		if (d->dev_type == HCI_PRIMARY) {
576 			if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
577 				rp->entry[count].type = 0x01;
578 			else
579 				rp->entry[count].type = 0x00;
580 		} else if (d->dev_type == HCI_AMP) {
581 			rp->entry[count].type = 0x02;
582 		} else {
583 			continue;
584 		}
585 
586 		rp->entry[count].bus = d->bus;
587 		rp->entry[count++].index = cpu_to_le16(d->id);
588 		bt_dev_dbg(hdev, "Added hci%u", d->id);
589 	}
590 
591 	rp->num_controllers = cpu_to_le16(count);
592 
593 	read_unlock(&hci_dev_list_lock);
594 
595 	/* If this command is called at least once, then all the
596 	 * default index and unconfigured index events are disabled
597 	 * and from now on only extended index events are used.
598 	 */
599 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
600 	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
601 	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
602 
603 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
604 				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
605 				struct_size(rp, entry, count));
606 
607 	kfree(rp);
608 
609 	return err;
610 }
611 
612 static bool is_configured(struct hci_dev *hdev)
613 {
614 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
615 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
616 		return false;
617 
618 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
619 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
620 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
621 		return false;
622 
623 	return true;
624 }
625 
626 static __le32 get_missing_options(struct hci_dev *hdev)
627 {
628 	u32 options = 0;
629 
630 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
631 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
632 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
633 
634 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
635 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
636 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
637 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
638 
639 	return cpu_to_le32(options);
640 }
641 
642 static int new_options(struct hci_dev *hdev, struct sock *skip)
643 {
644 	__le32 options = get_missing_options(hdev);
645 
646 	return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
647 				  sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
648 }
649 
650 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
651 {
652 	__le32 options = get_missing_options(hdev);
653 
654 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
655 				 sizeof(options));
656 }
657 
658 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
659 			    void *data, u16 data_len)
660 {
661 	struct mgmt_rp_read_config_info rp;
662 	u32 options = 0;
663 
664 	bt_dev_dbg(hdev, "sock %p", sk);
665 
666 	hci_dev_lock(hdev);
667 
668 	memset(&rp, 0, sizeof(rp));
669 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
670 
671 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
672 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
673 
674 	if (hdev->set_bdaddr)
675 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
676 
677 	rp.supported_options = cpu_to_le32(options);
678 	rp.missing_options = get_missing_options(hdev);
679 
680 	hci_dev_unlock(hdev);
681 
682 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
683 				 &rp, sizeof(rp));
684 }
685 
686 static u32 get_supported_phys(struct hci_dev *hdev)
687 {
688 	u32 supported_phys = 0;
689 
690 	if (lmp_bredr_capable(hdev)) {
691 		supported_phys |= MGMT_PHY_BR_1M_1SLOT;
692 
693 		if (hdev->features[0][0] & LMP_3SLOT)
694 			supported_phys |= MGMT_PHY_BR_1M_3SLOT;
695 
696 		if (hdev->features[0][0] & LMP_5SLOT)
697 			supported_phys |= MGMT_PHY_BR_1M_5SLOT;
698 
699 		if (lmp_edr_2m_capable(hdev)) {
700 			supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
701 
702 			if (lmp_edr_3slot_capable(hdev))
703 				supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
704 
705 			if (lmp_edr_5slot_capable(hdev))
706 				supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
707 
708 			if (lmp_edr_3m_capable(hdev)) {
709 				supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
710 
711 				if (lmp_edr_3slot_capable(hdev))
712 					supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
713 
714 				if (lmp_edr_5slot_capable(hdev))
715 					supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
716 			}
717 		}
718 	}
719 
720 	if (lmp_le_capable(hdev)) {
721 		supported_phys |= MGMT_PHY_LE_1M_TX;
722 		supported_phys |= MGMT_PHY_LE_1M_RX;
723 
724 		if (hdev->le_features[1] & HCI_LE_PHY_2M) {
725 			supported_phys |= MGMT_PHY_LE_2M_TX;
726 			supported_phys |= MGMT_PHY_LE_2M_RX;
727 		}
728 
729 		if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
730 			supported_phys |= MGMT_PHY_LE_CODED_TX;
731 			supported_phys |= MGMT_PHY_LE_CODED_RX;
732 		}
733 	}
734 
735 	return supported_phys;
736 }
737 
738 static u32 get_selected_phys(struct hci_dev *hdev)
739 {
740 	u32 selected_phys = 0;
741 
742 	if (lmp_bredr_capable(hdev)) {
743 		selected_phys |= MGMT_PHY_BR_1M_1SLOT;
744 
745 		if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
746 			selected_phys |= MGMT_PHY_BR_1M_3SLOT;
747 
748 		if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
749 			selected_phys |= MGMT_PHY_BR_1M_5SLOT;
750 
751 		if (lmp_edr_2m_capable(hdev)) {
752 			if (!(hdev->pkt_type & HCI_2DH1))
753 				selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
754 
755 			if (lmp_edr_3slot_capable(hdev) &&
756 			    !(hdev->pkt_type & HCI_2DH3))
757 				selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
758 
759 			if (lmp_edr_5slot_capable(hdev) &&
760 			    !(hdev->pkt_type & HCI_2DH5))
761 				selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
762 
763 			if (lmp_edr_3m_capable(hdev)) {
764 				if (!(hdev->pkt_type & HCI_3DH1))
765 					selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
766 
767 				if (lmp_edr_3slot_capable(hdev) &&
768 				    !(hdev->pkt_type & HCI_3DH3))
769 					selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
770 
771 				if (lmp_edr_5slot_capable(hdev) &&
772 				    !(hdev->pkt_type & HCI_3DH5))
773 					selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
774 			}
775 		}
776 	}
777 
778 	if (lmp_le_capable(hdev)) {
779 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
780 			selected_phys |= MGMT_PHY_LE_1M_TX;
781 
782 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
783 			selected_phys |= MGMT_PHY_LE_1M_RX;
784 
785 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
786 			selected_phys |= MGMT_PHY_LE_2M_TX;
787 
788 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
789 			selected_phys |= MGMT_PHY_LE_2M_RX;
790 
791 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
792 			selected_phys |= MGMT_PHY_LE_CODED_TX;
793 
794 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
795 			selected_phys |= MGMT_PHY_LE_CODED_RX;
796 	}
797 
798 	return selected_phys;
799 }
800 
801 static u32 get_configurable_phys(struct hci_dev *hdev)
802 {
803 	return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
804 		~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
805 }
806 
807 static u32 get_supported_settings(struct hci_dev *hdev)
808 {
809 	u32 settings = 0;
810 
811 	settings |= MGMT_SETTING_POWERED;
812 	settings |= MGMT_SETTING_BONDABLE;
813 	settings |= MGMT_SETTING_DEBUG_KEYS;
814 	settings |= MGMT_SETTING_CONNECTABLE;
815 	settings |= MGMT_SETTING_DISCOVERABLE;
816 
817 	if (lmp_bredr_capable(hdev)) {
818 		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
819 			settings |= MGMT_SETTING_FAST_CONNECTABLE;
820 		settings |= MGMT_SETTING_BREDR;
821 		settings |= MGMT_SETTING_LINK_SECURITY;
822 
823 		if (lmp_ssp_capable(hdev)) {
824 			settings |= MGMT_SETTING_SSP;
825 			if (IS_ENABLED(CONFIG_BT_HS))
826 				settings |= MGMT_SETTING_HS;
827 		}
828 
829 		if (lmp_sc_capable(hdev))
830 			settings |= MGMT_SETTING_SECURE_CONN;
831 
832 		if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
833 			     &hdev->quirks))
834 			settings |= MGMT_SETTING_WIDEBAND_SPEECH;
835 	}
836 
837 	if (lmp_le_capable(hdev)) {
838 		settings |= MGMT_SETTING_LE;
839 		settings |= MGMT_SETTING_SECURE_CONN;
840 		settings |= MGMT_SETTING_PRIVACY;
841 		settings |= MGMT_SETTING_STATIC_ADDRESS;
842 		settings |= MGMT_SETTING_ADVERTISING;
843 	}
844 
845 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
846 	    hdev->set_bdaddr)
847 		settings |= MGMT_SETTING_CONFIGURATION;
848 
849 	settings |= MGMT_SETTING_PHY_CONFIGURATION;
850 
851 	return settings;
852 }
853 
854 static u32 get_current_settings(struct hci_dev *hdev)
855 {
856 	u32 settings = 0;
857 
858 	if (hdev_is_powered(hdev))
859 		settings |= MGMT_SETTING_POWERED;
860 
861 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
862 		settings |= MGMT_SETTING_CONNECTABLE;
863 
864 	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
865 		settings |= MGMT_SETTING_FAST_CONNECTABLE;
866 
867 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
868 		settings |= MGMT_SETTING_DISCOVERABLE;
869 
870 	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
871 		settings |= MGMT_SETTING_BONDABLE;
872 
873 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
874 		settings |= MGMT_SETTING_BREDR;
875 
876 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
877 		settings |= MGMT_SETTING_LE;
878 
879 	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
880 		settings |= MGMT_SETTING_LINK_SECURITY;
881 
882 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
883 		settings |= MGMT_SETTING_SSP;
884 
885 	if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
886 		settings |= MGMT_SETTING_HS;
887 
888 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
889 		settings |= MGMT_SETTING_ADVERTISING;
890 
891 	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
892 		settings |= MGMT_SETTING_SECURE_CONN;
893 
894 	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
895 		settings |= MGMT_SETTING_DEBUG_KEYS;
896 
897 	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
898 		settings |= MGMT_SETTING_PRIVACY;
899 
900 	/* The current setting for static address has two purposes. The
901 	 * first is to indicate if the static address will be used and
902 	 * the second is to indicate if it is actually set.
903 	 *
904 	 * This means if the static address is not configured, this flag
905 	 * will never be set. If the address is configured, then if the
906 	 * address is actually used decides if the flag is set or not.
907 	 *
908 	 * For single mode LE only controllers and dual-mode controllers
909 	 * with BR/EDR disabled, the existence of the static address will
910 	 * be evaluated.
911 	 */
912 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
913 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
914 	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
915 		if (bacmp(&hdev->static_addr, BDADDR_ANY))
916 			settings |= MGMT_SETTING_STATIC_ADDRESS;
917 	}
918 
919 	if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
920 		settings |= MGMT_SETTING_WIDEBAND_SPEECH;
921 
922 	return settings;
923 }
924 
925 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
926 {
927 	return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
928 }
929 
930 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
931 {
932 	struct mgmt_pending_cmd *cmd;
933 
934 	/* If there's a pending mgmt command the flags will not yet have
935 	 * their final values, so check for this first.
936 	 */
937 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
938 	if (cmd) {
939 		struct mgmt_mode *cp = cmd->param;
940 		if (cp->val == 0x01)
941 			return LE_AD_GENERAL;
942 		else if (cp->val == 0x02)
943 			return LE_AD_LIMITED;
944 	} else {
945 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
946 			return LE_AD_LIMITED;
947 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
948 			return LE_AD_GENERAL;
949 	}
950 
951 	return 0;
952 }
953 
954 bool mgmt_get_connectable(struct hci_dev *hdev)
955 {
956 	struct mgmt_pending_cmd *cmd;
957 
958 	/* If there's a pending mgmt command the flag will not yet have
959 	 * it's final value, so check for this first.
960 	 */
961 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
962 	if (cmd) {
963 		struct mgmt_mode *cp = cmd->param;
964 
965 		return cp->val;
966 	}
967 
968 	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
969 }
970 
971 static int service_cache_sync(struct hci_dev *hdev, void *data)
972 {
973 	hci_update_eir_sync(hdev);
974 	hci_update_class_sync(hdev);
975 
976 	return 0;
977 }
978 
979 static void service_cache_off(struct work_struct *work)
980 {
981 	struct hci_dev *hdev = container_of(work, struct hci_dev,
982 					    service_cache.work);
983 
984 	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
985 		return;
986 
987 	hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
988 }
989 
990 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
991 {
992 	/* The generation of a new RPA and programming it into the
993 	 * controller happens in the hci_req_enable_advertising()
994 	 * function.
995 	 */
996 	if (ext_adv_capable(hdev))
997 		return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
998 	else
999 		return hci_enable_advertising_sync(hdev);
1000 }
1001 
1002 static void rpa_expired(struct work_struct *work)
1003 {
1004 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1005 					    rpa_expired.work);
1006 
1007 	bt_dev_dbg(hdev, "");
1008 
1009 	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1010 
1011 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1012 		return;
1013 
1014 	hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1015 }
1016 
1017 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1018 {
1019 	if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1020 		return;
1021 
1022 	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1023 	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1024 
1025 	/* Non-mgmt controlled devices get this bit set
1026 	 * implicitly so that pairing works for them, however
1027 	 * for mgmt we require user-space to explicitly enable
1028 	 * it
1029 	 */
1030 	hci_dev_clear_flag(hdev, HCI_BONDABLE);
1031 }
1032 
1033 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1034 				void *data, u16 data_len)
1035 {
1036 	struct mgmt_rp_read_info rp;
1037 
1038 	bt_dev_dbg(hdev, "sock %p", sk);
1039 
1040 	hci_dev_lock(hdev);
1041 
1042 	memset(&rp, 0, sizeof(rp));
1043 
1044 	bacpy(&rp.bdaddr, &hdev->bdaddr);
1045 
1046 	rp.version = hdev->hci_ver;
1047 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1048 
1049 	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1050 	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1051 
1052 	memcpy(rp.dev_class, hdev->dev_class, 3);
1053 
1054 	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1055 	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1056 
1057 	hci_dev_unlock(hdev);
1058 
1059 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1060 				 sizeof(rp));
1061 }
1062 
1063 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1064 {
1065 	u16 eir_len = 0;
1066 	size_t name_len;
1067 
1068 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1069 		eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1070 					  hdev->dev_class, 3);
1071 
1072 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1073 		eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1074 					  hdev->appearance);
1075 
1076 	name_len = strlen(hdev->dev_name);
1077 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1078 				  hdev->dev_name, name_len);
1079 
1080 	name_len = strlen(hdev->short_name);
1081 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1082 				  hdev->short_name, name_len);
1083 
1084 	return eir_len;
1085 }
1086 
1087 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1088 				    void *data, u16 data_len)
1089 {
1090 	char buf[512];
1091 	struct mgmt_rp_read_ext_info *rp = (void *)buf;
1092 	u16 eir_len;
1093 
1094 	bt_dev_dbg(hdev, "sock %p", sk);
1095 
1096 	memset(&buf, 0, sizeof(buf));
1097 
1098 	hci_dev_lock(hdev);
1099 
1100 	bacpy(&rp->bdaddr, &hdev->bdaddr);
1101 
1102 	rp->version = hdev->hci_ver;
1103 	rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1104 
1105 	rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1106 	rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1107 
1108 
1109 	eir_len = append_eir_data_to_buf(hdev, rp->eir);
1110 	rp->eir_len = cpu_to_le16(eir_len);
1111 
1112 	hci_dev_unlock(hdev);
1113 
1114 	/* If this command is called at least once, then the events
1115 	 * for class of device and local name changes are disabled
1116 	 * and only the new extended controller information event
1117 	 * is used.
1118 	 */
1119 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1120 	hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1121 	hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1122 
1123 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1124 				 sizeof(*rp) + eir_len);
1125 }
1126 
1127 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1128 {
1129 	char buf[512];
1130 	struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1131 	u16 eir_len;
1132 
1133 	memset(buf, 0, sizeof(buf));
1134 
1135 	eir_len = append_eir_data_to_buf(hdev, ev->eir);
1136 	ev->eir_len = cpu_to_le16(eir_len);
1137 
1138 	return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1139 				  sizeof(*ev) + eir_len,
1140 				  HCI_MGMT_EXT_INFO_EVENTS, skip);
1141 }
1142 
1143 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1144 {
1145 	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1146 
1147 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1148 				 sizeof(settings));
1149 }
1150 
1151 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1152 {
1153 	struct mgmt_ev_advertising_added ev;
1154 
1155 	ev.instance = instance;
1156 
1157 	mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1158 }
1159 
1160 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1161 			      u8 instance)
1162 {
1163 	struct mgmt_ev_advertising_removed ev;
1164 
1165 	ev.instance = instance;
1166 
1167 	mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1168 }
1169 
1170 static void cancel_adv_timeout(struct hci_dev *hdev)
1171 {
1172 	if (hdev->adv_instance_timeout) {
1173 		hdev->adv_instance_timeout = 0;
1174 		cancel_delayed_work(&hdev->adv_instance_expire);
1175 	}
1176 }
1177 
1178 /* This function requires the caller holds hdev->lock */
1179 static void restart_le_actions(struct hci_dev *hdev)
1180 {
1181 	struct hci_conn_params *p;
1182 
1183 	list_for_each_entry(p, &hdev->le_conn_params, list) {
1184 		/* Needed for AUTO_OFF case where might not "really"
1185 		 * have been powered off.
1186 		 */
1187 		list_del_init(&p->action);
1188 
1189 		switch (p->auto_connect) {
1190 		case HCI_AUTO_CONN_DIRECT:
1191 		case HCI_AUTO_CONN_ALWAYS:
1192 			list_add(&p->action, &hdev->pend_le_conns);
1193 			break;
1194 		case HCI_AUTO_CONN_REPORT:
1195 			list_add(&p->action, &hdev->pend_le_reports);
1196 			break;
1197 		default:
1198 			break;
1199 		}
1200 	}
1201 }
1202 
1203 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1204 {
1205 	__le32 ev = cpu_to_le32(get_current_settings(hdev));
1206 
1207 	return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1208 				  sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1209 }
1210 
1211 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1212 {
1213 	struct mgmt_pending_cmd *cmd = data;
1214 	struct mgmt_mode *cp = cmd->param;
1215 
1216 	bt_dev_dbg(hdev, "err %d", err);
1217 
1218 	if (!err) {
1219 		if (cp->val) {
1220 			hci_dev_lock(hdev);
1221 			restart_le_actions(hdev);
1222 			hci_update_passive_scan(hdev);
1223 			hci_dev_unlock(hdev);
1224 		}
1225 
1226 		send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1227 
1228 		/* Only call new_setting for power on as power off is deferred
1229 		 * to hdev->power_off work which does call hci_dev_do_close.
1230 		 */
1231 		if (cp->val)
1232 			new_settings(hdev, cmd->sk);
1233 	} else {
1234 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1235 				mgmt_status(err));
1236 	}
1237 
1238 	mgmt_pending_free(cmd);
1239 }
1240 
1241 static int set_powered_sync(struct hci_dev *hdev, void *data)
1242 {
1243 	struct mgmt_pending_cmd *cmd = data;
1244 	struct mgmt_mode *cp = cmd->param;
1245 
1246 	BT_DBG("%s", hdev->name);
1247 
1248 	return hci_set_powered_sync(hdev, cp->val);
1249 }
1250 
1251 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1252 		       u16 len)
1253 {
1254 	struct mgmt_mode *cp = data;
1255 	struct mgmt_pending_cmd *cmd;
1256 	int err;
1257 
1258 	bt_dev_dbg(hdev, "sock %p", sk);
1259 
1260 	if (cp->val != 0x00 && cp->val != 0x01)
1261 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1262 				       MGMT_STATUS_INVALID_PARAMS);
1263 
1264 	hci_dev_lock(hdev);
1265 
1266 	if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1267 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1268 				      MGMT_STATUS_BUSY);
1269 		goto failed;
1270 	}
1271 
1272 	if (!!cp->val == hdev_is_powered(hdev)) {
1273 		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1274 		goto failed;
1275 	}
1276 
1277 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1278 	if (!cmd) {
1279 		err = -ENOMEM;
1280 		goto failed;
1281 	}
1282 
1283 	err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1284 				 mgmt_set_powered_complete);
1285 
1286 failed:
1287 	hci_dev_unlock(hdev);
1288 	return err;
1289 }
1290 
1291 int mgmt_new_settings(struct hci_dev *hdev)
1292 {
1293 	return new_settings(hdev, NULL);
1294 }
1295 
1296 struct cmd_lookup {
1297 	struct sock *sk;
1298 	struct hci_dev *hdev;
1299 	u8 mgmt_status;
1300 };
1301 
1302 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1303 {
1304 	struct cmd_lookup *match = data;
1305 
1306 	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1307 
1308 	list_del(&cmd->list);
1309 
1310 	if (match->sk == NULL) {
1311 		match->sk = cmd->sk;
1312 		sock_hold(match->sk);
1313 	}
1314 
1315 	mgmt_pending_free(cmd);
1316 }
1317 
1318 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1319 {
1320 	u8 *status = data;
1321 
1322 	mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1323 	mgmt_pending_remove(cmd);
1324 }
1325 
1326 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1327 {
1328 	if (cmd->cmd_complete) {
1329 		u8 *status = data;
1330 
1331 		cmd->cmd_complete(cmd, *status);
1332 		mgmt_pending_remove(cmd);
1333 
1334 		return;
1335 	}
1336 
1337 	cmd_status_rsp(cmd, data);
1338 }
1339 
1340 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1341 {
1342 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1343 				 cmd->param, cmd->param_len);
1344 }
1345 
1346 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1347 {
1348 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1349 				 cmd->param, sizeof(struct mgmt_addr_info));
1350 }
1351 
1352 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1353 {
1354 	if (!lmp_bredr_capable(hdev))
1355 		return MGMT_STATUS_NOT_SUPPORTED;
1356 	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1357 		return MGMT_STATUS_REJECTED;
1358 	else
1359 		return MGMT_STATUS_SUCCESS;
1360 }
1361 
1362 static u8 mgmt_le_support(struct hci_dev *hdev)
1363 {
1364 	if (!lmp_le_capable(hdev))
1365 		return MGMT_STATUS_NOT_SUPPORTED;
1366 	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1367 		return MGMT_STATUS_REJECTED;
1368 	else
1369 		return MGMT_STATUS_SUCCESS;
1370 }
1371 
1372 void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
1373 {
1374 	struct mgmt_pending_cmd *cmd;
1375 
1376 	bt_dev_dbg(hdev, "status 0x%02x", status);
1377 
1378 	hci_dev_lock(hdev);
1379 
1380 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1381 	if (!cmd)
1382 		goto unlock;
1383 
1384 	if (status) {
1385 		u8 mgmt_err = mgmt_status(status);
1386 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1387 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1388 		goto remove_cmd;
1389 	}
1390 
1391 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1392 	    hdev->discov_timeout > 0) {
1393 		int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1394 		queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1395 	}
1396 
1397 	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1398 	new_settings(hdev, cmd->sk);
1399 
1400 remove_cmd:
1401 	mgmt_pending_remove(cmd);
1402 
1403 unlock:
1404 	hci_dev_unlock(hdev);
1405 }
1406 
1407 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1408 			    u16 len)
1409 {
1410 	struct mgmt_cp_set_discoverable *cp = data;
1411 	struct mgmt_pending_cmd *cmd;
1412 	u16 timeout;
1413 	int err;
1414 
1415 	bt_dev_dbg(hdev, "sock %p", sk);
1416 
1417 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1418 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1419 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1420 				       MGMT_STATUS_REJECTED);
1421 
1422 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1423 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1424 				       MGMT_STATUS_INVALID_PARAMS);
1425 
1426 	timeout = __le16_to_cpu(cp->timeout);
1427 
1428 	/* Disabling discoverable requires that no timeout is set,
1429 	 * and enabling limited discoverable requires a timeout.
1430 	 */
1431 	if ((cp->val == 0x00 && timeout > 0) ||
1432 	    (cp->val == 0x02 && timeout == 0))
1433 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1434 				       MGMT_STATUS_INVALID_PARAMS);
1435 
1436 	hci_dev_lock(hdev);
1437 
1438 	if (!hdev_is_powered(hdev) && timeout > 0) {
1439 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1440 				      MGMT_STATUS_NOT_POWERED);
1441 		goto failed;
1442 	}
1443 
1444 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1445 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1446 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1447 				      MGMT_STATUS_BUSY);
1448 		goto failed;
1449 	}
1450 
1451 	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1452 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1453 				      MGMT_STATUS_REJECTED);
1454 		goto failed;
1455 	}
1456 
1457 	if (hdev->advertising_paused) {
1458 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1459 				      MGMT_STATUS_BUSY);
1460 		goto failed;
1461 	}
1462 
1463 	if (!hdev_is_powered(hdev)) {
1464 		bool changed = false;
1465 
1466 		/* Setting limited discoverable when powered off is
1467 		 * not a valid operation since it requires a timeout
1468 		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1469 		 */
1470 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1471 			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1472 			changed = true;
1473 		}
1474 
1475 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1476 		if (err < 0)
1477 			goto failed;
1478 
1479 		if (changed)
1480 			err = new_settings(hdev, sk);
1481 
1482 		goto failed;
1483 	}
1484 
1485 	/* If the current mode is the same, then just update the timeout
1486 	 * value with the new value. And if only the timeout gets updated,
1487 	 * then no need for any HCI transactions.
1488 	 */
1489 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1490 	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
1491 						   HCI_LIMITED_DISCOVERABLE)) {
1492 		cancel_delayed_work(&hdev->discov_off);
1493 		hdev->discov_timeout = timeout;
1494 
1495 		if (cp->val && hdev->discov_timeout > 0) {
1496 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1497 			queue_delayed_work(hdev->req_workqueue,
1498 					   &hdev->discov_off, to);
1499 		}
1500 
1501 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1502 		goto failed;
1503 	}
1504 
1505 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1506 	if (!cmd) {
1507 		err = -ENOMEM;
1508 		goto failed;
1509 	}
1510 
1511 	/* Cancel any potential discoverable timeout that might be
1512 	 * still active and store new timeout value. The arming of
1513 	 * the timeout happens in the complete handler.
1514 	 */
1515 	cancel_delayed_work(&hdev->discov_off);
1516 	hdev->discov_timeout = timeout;
1517 
1518 	if (cp->val)
1519 		hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1520 	else
1521 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1522 
1523 	/* Limited discoverable mode */
1524 	if (cp->val == 0x02)
1525 		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1526 	else
1527 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1528 
1529 	queue_work(hdev->req_workqueue, &hdev->discoverable_update);
1530 	err = 0;
1531 
1532 failed:
1533 	hci_dev_unlock(hdev);
1534 	return err;
1535 }
1536 
1537 void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
1538 {
1539 	struct mgmt_pending_cmd *cmd;
1540 
1541 	bt_dev_dbg(hdev, "status 0x%02x", status);
1542 
1543 	hci_dev_lock(hdev);
1544 
1545 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1546 	if (!cmd)
1547 		goto unlock;
1548 
1549 	if (status) {
1550 		u8 mgmt_err = mgmt_status(status);
1551 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1552 		goto remove_cmd;
1553 	}
1554 
1555 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1556 	new_settings(hdev, cmd->sk);
1557 
1558 remove_cmd:
1559 	mgmt_pending_remove(cmd);
1560 
1561 unlock:
1562 	hci_dev_unlock(hdev);
1563 }
1564 
1565 static int set_connectable_update_settings(struct hci_dev *hdev,
1566 					   struct sock *sk, u8 val)
1567 {
1568 	bool changed = false;
1569 	int err;
1570 
1571 	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1572 		changed = true;
1573 
1574 	if (val) {
1575 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1576 	} else {
1577 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1578 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1579 	}
1580 
1581 	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1582 	if (err < 0)
1583 		return err;
1584 
1585 	if (changed) {
1586 		hci_req_update_scan(hdev);
1587 		hci_update_passive_scan(hdev);
1588 		return new_settings(hdev, sk);
1589 	}
1590 
1591 	return 0;
1592 }
1593 
1594 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1595 			   u16 len)
1596 {
1597 	struct mgmt_mode *cp = data;
1598 	struct mgmt_pending_cmd *cmd;
1599 	int err;
1600 
1601 	bt_dev_dbg(hdev, "sock %p", sk);
1602 
1603 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1604 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1605 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1606 				       MGMT_STATUS_REJECTED);
1607 
1608 	if (cp->val != 0x00 && cp->val != 0x01)
1609 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1610 				       MGMT_STATUS_INVALID_PARAMS);
1611 
1612 	hci_dev_lock(hdev);
1613 
1614 	if (!hdev_is_powered(hdev)) {
1615 		err = set_connectable_update_settings(hdev, sk, cp->val);
1616 		goto failed;
1617 	}
1618 
1619 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1620 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1621 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1622 				      MGMT_STATUS_BUSY);
1623 		goto failed;
1624 	}
1625 
1626 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1627 	if (!cmd) {
1628 		err = -ENOMEM;
1629 		goto failed;
1630 	}
1631 
1632 	if (cp->val) {
1633 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1634 	} else {
1635 		if (hdev->discov_timeout > 0)
1636 			cancel_delayed_work(&hdev->discov_off);
1637 
1638 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1639 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1640 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1641 	}
1642 
1643 	queue_work(hdev->req_workqueue, &hdev->connectable_update);
1644 	err = 0;
1645 
1646 failed:
1647 	hci_dev_unlock(hdev);
1648 	return err;
1649 }
1650 
1651 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1652 			u16 len)
1653 {
1654 	struct mgmt_mode *cp = data;
1655 	bool changed;
1656 	int err;
1657 
1658 	bt_dev_dbg(hdev, "sock %p", sk);
1659 
1660 	if (cp->val != 0x00 && cp->val != 0x01)
1661 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1662 				       MGMT_STATUS_INVALID_PARAMS);
1663 
1664 	hci_dev_lock(hdev);
1665 
1666 	if (cp->val)
1667 		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1668 	else
1669 		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1670 
1671 	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1672 	if (err < 0)
1673 		goto unlock;
1674 
1675 	if (changed) {
1676 		/* In limited privacy mode the change of bondable mode
1677 		 * may affect the local advertising address.
1678 		 */
1679 		if (hdev_is_powered(hdev) &&
1680 		    hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1681 		    hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1682 		    hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1683 			queue_work(hdev->req_workqueue,
1684 				   &hdev->discoverable_update);
1685 
1686 		err = new_settings(hdev, sk);
1687 	}
1688 
1689 unlock:
1690 	hci_dev_unlock(hdev);
1691 	return err;
1692 }
1693 
1694 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1695 			     u16 len)
1696 {
1697 	struct mgmt_mode *cp = data;
1698 	struct mgmt_pending_cmd *cmd;
1699 	u8 val, status;
1700 	int err;
1701 
1702 	bt_dev_dbg(hdev, "sock %p", sk);
1703 
1704 	status = mgmt_bredr_support(hdev);
1705 	if (status)
1706 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1707 				       status);
1708 
1709 	if (cp->val != 0x00 && cp->val != 0x01)
1710 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1711 				       MGMT_STATUS_INVALID_PARAMS);
1712 
1713 	hci_dev_lock(hdev);
1714 
1715 	if (!hdev_is_powered(hdev)) {
1716 		bool changed = false;
1717 
1718 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1719 			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1720 			changed = true;
1721 		}
1722 
1723 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1724 		if (err < 0)
1725 			goto failed;
1726 
1727 		if (changed)
1728 			err = new_settings(hdev, sk);
1729 
1730 		goto failed;
1731 	}
1732 
1733 	if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1734 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1735 				      MGMT_STATUS_BUSY);
1736 		goto failed;
1737 	}
1738 
1739 	val = !!cp->val;
1740 
1741 	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1742 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1743 		goto failed;
1744 	}
1745 
1746 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1747 	if (!cmd) {
1748 		err = -ENOMEM;
1749 		goto failed;
1750 	}
1751 
1752 	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1753 	if (err < 0) {
1754 		mgmt_pending_remove(cmd);
1755 		goto failed;
1756 	}
1757 
1758 failed:
1759 	hci_dev_unlock(hdev);
1760 	return err;
1761 }
1762 
1763 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1764 {
1765 	struct mgmt_mode *cp = data;
1766 	struct mgmt_pending_cmd *cmd;
1767 	u8 status;
1768 	int err;
1769 
1770 	bt_dev_dbg(hdev, "sock %p", sk);
1771 
1772 	status = mgmt_bredr_support(hdev);
1773 	if (status)
1774 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1775 
1776 	if (!lmp_ssp_capable(hdev))
1777 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1778 				       MGMT_STATUS_NOT_SUPPORTED);
1779 
1780 	if (cp->val != 0x00 && cp->val != 0x01)
1781 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1782 				       MGMT_STATUS_INVALID_PARAMS);
1783 
1784 	hci_dev_lock(hdev);
1785 
1786 	if (!hdev_is_powered(hdev)) {
1787 		bool changed;
1788 
1789 		if (cp->val) {
1790 			changed = !hci_dev_test_and_set_flag(hdev,
1791 							     HCI_SSP_ENABLED);
1792 		} else {
1793 			changed = hci_dev_test_and_clear_flag(hdev,
1794 							      HCI_SSP_ENABLED);
1795 			if (!changed)
1796 				changed = hci_dev_test_and_clear_flag(hdev,
1797 								      HCI_HS_ENABLED);
1798 			else
1799 				hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1800 		}
1801 
1802 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1803 		if (err < 0)
1804 			goto failed;
1805 
1806 		if (changed)
1807 			err = new_settings(hdev, sk);
1808 
1809 		goto failed;
1810 	}
1811 
1812 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1813 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1814 				      MGMT_STATUS_BUSY);
1815 		goto failed;
1816 	}
1817 
1818 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1819 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1820 		goto failed;
1821 	}
1822 
1823 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1824 	if (!cmd) {
1825 		err = -ENOMEM;
1826 		goto failed;
1827 	}
1828 
1829 	if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
1830 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1831 			     sizeof(cp->val), &cp->val);
1832 
1833 	err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1834 	if (err < 0) {
1835 		mgmt_pending_remove(cmd);
1836 		goto failed;
1837 	}
1838 
1839 failed:
1840 	hci_dev_unlock(hdev);
1841 	return err;
1842 }
1843 
1844 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1845 {
1846 	struct mgmt_mode *cp = data;
1847 	bool changed;
1848 	u8 status;
1849 	int err;
1850 
1851 	bt_dev_dbg(hdev, "sock %p", sk);
1852 
1853 	if (!IS_ENABLED(CONFIG_BT_HS))
1854 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1855 				       MGMT_STATUS_NOT_SUPPORTED);
1856 
1857 	status = mgmt_bredr_support(hdev);
1858 	if (status)
1859 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1860 
1861 	if (!lmp_ssp_capable(hdev))
1862 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1863 				       MGMT_STATUS_NOT_SUPPORTED);
1864 
1865 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1866 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1867 				       MGMT_STATUS_REJECTED);
1868 
1869 	if (cp->val != 0x00 && cp->val != 0x01)
1870 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1871 				       MGMT_STATUS_INVALID_PARAMS);
1872 
1873 	hci_dev_lock(hdev);
1874 
1875 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1876 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1877 				      MGMT_STATUS_BUSY);
1878 		goto unlock;
1879 	}
1880 
1881 	if (cp->val) {
1882 		changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1883 	} else {
1884 		if (hdev_is_powered(hdev)) {
1885 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1886 					      MGMT_STATUS_REJECTED);
1887 			goto unlock;
1888 		}
1889 
1890 		changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1891 	}
1892 
1893 	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1894 	if (err < 0)
1895 		goto unlock;
1896 
1897 	if (changed)
1898 		err = new_settings(hdev, sk);
1899 
1900 unlock:
1901 	hci_dev_unlock(hdev);
1902 	return err;
1903 }
1904 
1905 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
1906 {
1907 	struct cmd_lookup match = { NULL, hdev };
1908 	u8 status = mgmt_status(err);
1909 
1910 	bt_dev_dbg(hdev, "err %d", err);
1911 
1912 	if (status) {
1913 		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1914 							&status);
1915 		return;
1916 	}
1917 
1918 	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1919 
1920 	new_settings(hdev, match.sk);
1921 
1922 	if (match.sk)
1923 		sock_put(match.sk);
1924 }
1925 
1926 static int set_le_sync(struct hci_dev *hdev, void *data)
1927 {
1928 	struct mgmt_pending_cmd *cmd = data;
1929 	struct mgmt_mode *cp = cmd->param;
1930 	u8 val = !!cp->val;
1931 	int err;
1932 
1933 	if (!val) {
1934 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1935 			hci_disable_advertising_sync(hdev);
1936 
1937 		if (ext_adv_capable(hdev))
1938 			hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
1939 	} else {
1940 		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1941 	}
1942 
1943 	err = hci_write_le_host_supported_sync(hdev, val, 0);
1944 
1945 	/* Make sure the controller has a good default for
1946 	 * advertising data. Restrict the update to when LE
1947 	 * has actually been enabled. During power on, the
1948 	 * update in powered_update_hci will take care of it.
1949 	 */
1950 	if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1951 		if (ext_adv_capable(hdev)) {
1952 			int status;
1953 
1954 			status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
1955 			if (!status)
1956 				hci_update_scan_rsp_data_sync(hdev, 0x00);
1957 		} else {
1958 			hci_update_adv_data_sync(hdev, 0x00);
1959 			hci_update_scan_rsp_data_sync(hdev, 0x00);
1960 		}
1961 
1962 		hci_update_passive_scan(hdev);
1963 	}
1964 
1965 	return err;
1966 }
1967 
1968 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1969 {
1970 	struct mgmt_mode *cp = data;
1971 	struct mgmt_pending_cmd *cmd;
1972 	int err;
1973 	u8 val, enabled;
1974 
1975 	bt_dev_dbg(hdev, "sock %p", sk);
1976 
1977 	if (!lmp_le_capable(hdev))
1978 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1979 				       MGMT_STATUS_NOT_SUPPORTED);
1980 
1981 	if (cp->val != 0x00 && cp->val != 0x01)
1982 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1983 				       MGMT_STATUS_INVALID_PARAMS);
1984 
1985 	/* Bluetooth single mode LE only controllers or dual-mode
1986 	 * controllers configured as LE only devices, do not allow
1987 	 * switching LE off. These have either LE enabled explicitly
1988 	 * or BR/EDR has been previously switched off.
1989 	 *
1990 	 * When trying to enable an already enabled LE, then gracefully
1991 	 * send a positive response. Trying to disable it however will
1992 	 * result into rejection.
1993 	 */
1994 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1995 		if (cp->val == 0x01)
1996 			return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1997 
1998 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1999 				       MGMT_STATUS_REJECTED);
2000 	}
2001 
2002 	hci_dev_lock(hdev);
2003 
2004 	val = !!cp->val;
2005 	enabled = lmp_host_le_capable(hdev);
2006 
2007 	if (!val)
2008 		hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
2009 
2010 	if (!hdev_is_powered(hdev) || val == enabled) {
2011 		bool changed = false;
2012 
2013 		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2014 			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2015 			changed = true;
2016 		}
2017 
2018 		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2019 			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2020 			changed = true;
2021 		}
2022 
2023 		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2024 		if (err < 0)
2025 			goto unlock;
2026 
2027 		if (changed)
2028 			err = new_settings(hdev, sk);
2029 
2030 		goto unlock;
2031 	}
2032 
2033 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
2034 	    pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2035 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2036 				      MGMT_STATUS_BUSY);
2037 		goto unlock;
2038 	}
2039 
2040 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2041 	if (!cmd)
2042 		err = -ENOMEM;
2043 	else
2044 		err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2045 					 set_le_complete);
2046 
2047 	if (err < 0) {
2048 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2049 				      MGMT_STATUS_FAILED);
2050 
2051 		if (cmd)
2052 			mgmt_pending_remove(cmd);
2053 	}
2054 
2055 unlock:
2056 	hci_dev_unlock(hdev);
2057 	return err;
2058 }
2059 
2060 /* This is a helper function to test for pending mgmt commands that can
2061  * cause CoD or EIR HCI commands. We can only allow one such pending
2062  * mgmt command at a time since otherwise we cannot easily track what
2063  * the current values are, will be, and based on that calculate if a new
2064  * HCI command needs to be sent and if yes with what value.
2065  */
2066 static bool pending_eir_or_class(struct hci_dev *hdev)
2067 {
2068 	struct mgmt_pending_cmd *cmd;
2069 
2070 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2071 		switch (cmd->opcode) {
2072 		case MGMT_OP_ADD_UUID:
2073 		case MGMT_OP_REMOVE_UUID:
2074 		case MGMT_OP_SET_DEV_CLASS:
2075 		case MGMT_OP_SET_POWERED:
2076 			return true;
2077 		}
2078 	}
2079 
2080 	return false;
2081 }
2082 
2083 static const u8 bluetooth_base_uuid[] = {
2084 			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2085 			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2086 };
2087 
2088 static u8 get_uuid_size(const u8 *uuid)
2089 {
2090 	u32 val;
2091 
2092 	if (memcmp(uuid, bluetooth_base_uuid, 12))
2093 		return 128;
2094 
2095 	val = get_unaligned_le32(&uuid[12]);
2096 	if (val > 0xffff)
2097 		return 32;
2098 
2099 	return 16;
2100 }
2101 
2102 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2103 {
2104 	struct mgmt_pending_cmd *cmd = data;
2105 
2106 	bt_dev_dbg(hdev, "err %d", err);
2107 
2108 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2109 			  mgmt_status(err), hdev->dev_class, 3);
2110 
2111 	mgmt_pending_free(cmd);
2112 }
2113 
2114 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2115 {
2116 	int err;
2117 
2118 	err = hci_update_class_sync(hdev);
2119 	if (err)
2120 		return err;
2121 
2122 	return hci_update_eir_sync(hdev);
2123 }
2124 
2125 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2126 {
2127 	struct mgmt_cp_add_uuid *cp = data;
2128 	struct mgmt_pending_cmd *cmd;
2129 	struct bt_uuid *uuid;
2130 	int err;
2131 
2132 	bt_dev_dbg(hdev, "sock %p", sk);
2133 
2134 	hci_dev_lock(hdev);
2135 
2136 	if (pending_eir_or_class(hdev)) {
2137 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2138 				      MGMT_STATUS_BUSY);
2139 		goto failed;
2140 	}
2141 
2142 	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2143 	if (!uuid) {
2144 		err = -ENOMEM;
2145 		goto failed;
2146 	}
2147 
2148 	memcpy(uuid->uuid, cp->uuid, 16);
2149 	uuid->svc_hint = cp->svc_hint;
2150 	uuid->size = get_uuid_size(cp->uuid);
2151 
2152 	list_add_tail(&uuid->list, &hdev->uuids);
2153 
2154 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2155 	if (!cmd) {
2156 		err = -ENOMEM;
2157 		goto failed;
2158 	}
2159 
2160 	err = hci_cmd_sync_queue(hdev, add_uuid_sync, cmd, mgmt_class_complete);
2161 	if (err < 0) {
2162 		mgmt_pending_free(cmd);
2163 		goto failed;
2164 	}
2165 
2166 failed:
2167 	hci_dev_unlock(hdev);
2168 	return err;
2169 }
2170 
2171 static bool enable_service_cache(struct hci_dev *hdev)
2172 {
2173 	if (!hdev_is_powered(hdev))
2174 		return false;
2175 
2176 	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2177 		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2178 				   CACHE_TIMEOUT);
2179 		return true;
2180 	}
2181 
2182 	return false;
2183 }
2184 
2185 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2186 {
2187 	int err;
2188 
2189 	err = hci_update_class_sync(hdev);
2190 	if (err)
2191 		return err;
2192 
2193 	return hci_update_eir_sync(hdev);
2194 }
2195 
2196 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2197 		       u16 len)
2198 {
2199 	struct mgmt_cp_remove_uuid *cp = data;
2200 	struct mgmt_pending_cmd *cmd;
2201 	struct bt_uuid *match, *tmp;
2202 	u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2203 	int err, found;
2204 
2205 	bt_dev_dbg(hdev, "sock %p", sk);
2206 
2207 	hci_dev_lock(hdev);
2208 
2209 	if (pending_eir_or_class(hdev)) {
2210 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2211 				      MGMT_STATUS_BUSY);
2212 		goto unlock;
2213 	}
2214 
2215 	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2216 		hci_uuids_clear(hdev);
2217 
2218 		if (enable_service_cache(hdev)) {
2219 			err = mgmt_cmd_complete(sk, hdev->id,
2220 						MGMT_OP_REMOVE_UUID,
2221 						0, hdev->dev_class, 3);
2222 			goto unlock;
2223 		}
2224 
2225 		goto update_class;
2226 	}
2227 
2228 	found = 0;
2229 
2230 	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2231 		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2232 			continue;
2233 
2234 		list_del(&match->list);
2235 		kfree(match);
2236 		found++;
2237 	}
2238 
2239 	if (found == 0) {
2240 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2241 				      MGMT_STATUS_INVALID_PARAMS);
2242 		goto unlock;
2243 	}
2244 
2245 update_class:
2246 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2247 	if (!cmd) {
2248 		err = -ENOMEM;
2249 		goto unlock;
2250 	}
2251 
2252 	err = hci_cmd_sync_queue(hdev, remove_uuid_sync, cmd,
2253 				 mgmt_class_complete);
2254 	if (err < 0)
2255 		mgmt_pending_free(cmd);
2256 
2257 unlock:
2258 	hci_dev_unlock(hdev);
2259 	return err;
2260 }
2261 
2262 static int set_class_sync(struct hci_dev *hdev, void *data)
2263 {
2264 	int err = 0;
2265 
2266 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2267 		cancel_delayed_work_sync(&hdev->service_cache);
2268 		err = hci_update_eir_sync(hdev);
2269 	}
2270 
2271 	if (err)
2272 		return err;
2273 
2274 	return hci_update_class_sync(hdev);
2275 }
2276 
2277 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2278 			 u16 len)
2279 {
2280 	struct mgmt_cp_set_dev_class *cp = data;
2281 	struct mgmt_pending_cmd *cmd;
2282 	int err;
2283 
2284 	bt_dev_dbg(hdev, "sock %p", sk);
2285 
2286 	if (!lmp_bredr_capable(hdev))
2287 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2288 				       MGMT_STATUS_NOT_SUPPORTED);
2289 
2290 	hci_dev_lock(hdev);
2291 
2292 	if (pending_eir_or_class(hdev)) {
2293 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2294 				      MGMT_STATUS_BUSY);
2295 		goto unlock;
2296 	}
2297 
2298 	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2299 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2300 				      MGMT_STATUS_INVALID_PARAMS);
2301 		goto unlock;
2302 	}
2303 
2304 	hdev->major_class = cp->major;
2305 	hdev->minor_class = cp->minor;
2306 
2307 	if (!hdev_is_powered(hdev)) {
2308 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2309 					hdev->dev_class, 3);
2310 		goto unlock;
2311 	}
2312 
2313 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2314 	if (!cmd) {
2315 		err = -ENOMEM;
2316 		goto unlock;
2317 	}
2318 
2319 	err = hci_cmd_sync_queue(hdev, set_class_sync, cmd,
2320 				 mgmt_class_complete);
2321 	if (err < 0)
2322 		mgmt_pending_free(cmd);
2323 
2324 unlock:
2325 	hci_dev_unlock(hdev);
2326 	return err;
2327 }
2328 
2329 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2330 			  u16 len)
2331 {
2332 	struct mgmt_cp_load_link_keys *cp = data;
2333 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2334 				   sizeof(struct mgmt_link_key_info));
2335 	u16 key_count, expected_len;
2336 	bool changed;
2337 	int i;
2338 
2339 	bt_dev_dbg(hdev, "sock %p", sk);
2340 
2341 	if (!lmp_bredr_capable(hdev))
2342 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2343 				       MGMT_STATUS_NOT_SUPPORTED);
2344 
2345 	key_count = __le16_to_cpu(cp->key_count);
2346 	if (key_count > max_key_count) {
2347 		bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2348 			   key_count);
2349 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2350 				       MGMT_STATUS_INVALID_PARAMS);
2351 	}
2352 
2353 	expected_len = struct_size(cp, keys, key_count);
2354 	if (expected_len != len) {
2355 		bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2356 			   expected_len, len);
2357 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2358 				       MGMT_STATUS_INVALID_PARAMS);
2359 	}
2360 
2361 	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2362 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2363 				       MGMT_STATUS_INVALID_PARAMS);
2364 
2365 	bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2366 		   key_count);
2367 
2368 	for (i = 0; i < key_count; i++) {
2369 		struct mgmt_link_key_info *key = &cp->keys[i];
2370 
2371 		if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2372 			return mgmt_cmd_status(sk, hdev->id,
2373 					       MGMT_OP_LOAD_LINK_KEYS,
2374 					       MGMT_STATUS_INVALID_PARAMS);
2375 	}
2376 
2377 	hci_dev_lock(hdev);
2378 
2379 	hci_link_keys_clear(hdev);
2380 
2381 	if (cp->debug_keys)
2382 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2383 	else
2384 		changed = hci_dev_test_and_clear_flag(hdev,
2385 						      HCI_KEEP_DEBUG_KEYS);
2386 
2387 	if (changed)
2388 		new_settings(hdev, NULL);
2389 
2390 	for (i = 0; i < key_count; i++) {
2391 		struct mgmt_link_key_info *key = &cp->keys[i];
2392 
2393 		if (hci_is_blocked_key(hdev,
2394 				       HCI_BLOCKED_KEY_TYPE_LINKKEY,
2395 				       key->val)) {
2396 			bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2397 				    &key->addr.bdaddr);
2398 			continue;
2399 		}
2400 
2401 		/* Always ignore debug keys and require a new pairing if
2402 		 * the user wants to use them.
2403 		 */
2404 		if (key->type == HCI_LK_DEBUG_COMBINATION)
2405 			continue;
2406 
2407 		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2408 				 key->type, key->pin_len, NULL);
2409 	}
2410 
2411 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2412 
2413 	hci_dev_unlock(hdev);
2414 
2415 	return 0;
2416 }
2417 
2418 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2419 			   u8 addr_type, struct sock *skip_sk)
2420 {
2421 	struct mgmt_ev_device_unpaired ev;
2422 
2423 	bacpy(&ev.addr.bdaddr, bdaddr);
2424 	ev.addr.type = addr_type;
2425 
2426 	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2427 			  skip_sk);
2428 }
2429 
2430 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2431 			 u16 len)
2432 {
2433 	struct mgmt_cp_unpair_device *cp = data;
2434 	struct mgmt_rp_unpair_device rp;
2435 	struct hci_conn_params *params;
2436 	struct mgmt_pending_cmd *cmd;
2437 	struct hci_conn *conn;
2438 	u8 addr_type;
2439 	int err;
2440 
2441 	memset(&rp, 0, sizeof(rp));
2442 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2443 	rp.addr.type = cp->addr.type;
2444 
2445 	if (!bdaddr_type_is_valid(cp->addr.type))
2446 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2447 					 MGMT_STATUS_INVALID_PARAMS,
2448 					 &rp, sizeof(rp));
2449 
2450 	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2451 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2452 					 MGMT_STATUS_INVALID_PARAMS,
2453 					 &rp, sizeof(rp));
2454 
2455 	hci_dev_lock(hdev);
2456 
2457 	if (!hdev_is_powered(hdev)) {
2458 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2459 					MGMT_STATUS_NOT_POWERED, &rp,
2460 					sizeof(rp));
2461 		goto unlock;
2462 	}
2463 
2464 	if (cp->addr.type == BDADDR_BREDR) {
2465 		/* If disconnection is requested, then look up the
2466 		 * connection. If the remote device is connected, it
2467 		 * will be later used to terminate the link.
2468 		 *
2469 		 * Setting it to NULL explicitly will cause no
2470 		 * termination of the link.
2471 		 */
2472 		if (cp->disconnect)
2473 			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2474 						       &cp->addr.bdaddr);
2475 		else
2476 			conn = NULL;
2477 
2478 		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2479 		if (err < 0) {
2480 			err = mgmt_cmd_complete(sk, hdev->id,
2481 						MGMT_OP_UNPAIR_DEVICE,
2482 						MGMT_STATUS_NOT_PAIRED, &rp,
2483 						sizeof(rp));
2484 			goto unlock;
2485 		}
2486 
2487 		goto done;
2488 	}
2489 
2490 	/* LE address type */
2491 	addr_type = le_addr_type(cp->addr.type);
2492 
2493 	/* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2494 	err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2495 	if (err < 0) {
2496 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2497 					MGMT_STATUS_NOT_PAIRED, &rp,
2498 					sizeof(rp));
2499 		goto unlock;
2500 	}
2501 
2502 	conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2503 	if (!conn) {
2504 		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2505 		goto done;
2506 	}
2507 
2508 
2509 	/* Defer clearing up the connection parameters until closing to
2510 	 * give a chance of keeping them if a repairing happens.
2511 	 */
2512 	set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2513 
2514 	/* Disable auto-connection parameters if present */
2515 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2516 	if (params) {
2517 		if (params->explicit_connect)
2518 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2519 		else
2520 			params->auto_connect = HCI_AUTO_CONN_DISABLED;
2521 	}
2522 
2523 	/* If disconnection is not requested, then clear the connection
2524 	 * variable so that the link is not terminated.
2525 	 */
2526 	if (!cp->disconnect)
2527 		conn = NULL;
2528 
2529 done:
2530 	/* If the connection variable is set, then termination of the
2531 	 * link is requested.
2532 	 */
2533 	if (!conn) {
2534 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2535 					&rp, sizeof(rp));
2536 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2537 		goto unlock;
2538 	}
2539 
2540 	cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2541 			       sizeof(*cp));
2542 	if (!cmd) {
2543 		err = -ENOMEM;
2544 		goto unlock;
2545 	}
2546 
2547 	cmd->cmd_complete = addr_cmd_complete;
2548 
2549 	err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2550 	if (err < 0)
2551 		mgmt_pending_remove(cmd);
2552 
2553 unlock:
2554 	hci_dev_unlock(hdev);
2555 	return err;
2556 }
2557 
2558 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2559 		      u16 len)
2560 {
2561 	struct mgmt_cp_disconnect *cp = data;
2562 	struct mgmt_rp_disconnect rp;
2563 	struct mgmt_pending_cmd *cmd;
2564 	struct hci_conn *conn;
2565 	int err;
2566 
2567 	bt_dev_dbg(hdev, "sock %p", sk);
2568 
2569 	memset(&rp, 0, sizeof(rp));
2570 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2571 	rp.addr.type = cp->addr.type;
2572 
2573 	if (!bdaddr_type_is_valid(cp->addr.type))
2574 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2575 					 MGMT_STATUS_INVALID_PARAMS,
2576 					 &rp, sizeof(rp));
2577 
2578 	hci_dev_lock(hdev);
2579 
2580 	if (!test_bit(HCI_UP, &hdev->flags)) {
2581 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2582 					MGMT_STATUS_NOT_POWERED, &rp,
2583 					sizeof(rp));
2584 		goto failed;
2585 	}
2586 
2587 	if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2588 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2589 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
2590 		goto failed;
2591 	}
2592 
2593 	if (cp->addr.type == BDADDR_BREDR)
2594 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2595 					       &cp->addr.bdaddr);
2596 	else
2597 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2598 					       le_addr_type(cp->addr.type));
2599 
2600 	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2601 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2602 					MGMT_STATUS_NOT_CONNECTED, &rp,
2603 					sizeof(rp));
2604 		goto failed;
2605 	}
2606 
2607 	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2608 	if (!cmd) {
2609 		err = -ENOMEM;
2610 		goto failed;
2611 	}
2612 
2613 	cmd->cmd_complete = generic_cmd_complete;
2614 
2615 	err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2616 	if (err < 0)
2617 		mgmt_pending_remove(cmd);
2618 
2619 failed:
2620 	hci_dev_unlock(hdev);
2621 	return err;
2622 }
2623 
2624 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2625 {
2626 	switch (link_type) {
2627 	case LE_LINK:
2628 		switch (addr_type) {
2629 		case ADDR_LE_DEV_PUBLIC:
2630 			return BDADDR_LE_PUBLIC;
2631 
2632 		default:
2633 			/* Fallback to LE Random address type */
2634 			return BDADDR_LE_RANDOM;
2635 		}
2636 
2637 	default:
2638 		/* Fallback to BR/EDR type */
2639 		return BDADDR_BREDR;
2640 	}
2641 }
2642 
2643 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2644 			   u16 data_len)
2645 {
2646 	struct mgmt_rp_get_connections *rp;
2647 	struct hci_conn *c;
2648 	int err;
2649 	u16 i;
2650 
2651 	bt_dev_dbg(hdev, "sock %p", sk);
2652 
2653 	hci_dev_lock(hdev);
2654 
2655 	if (!hdev_is_powered(hdev)) {
2656 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2657 				      MGMT_STATUS_NOT_POWERED);
2658 		goto unlock;
2659 	}
2660 
2661 	i = 0;
2662 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2663 		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2664 			i++;
2665 	}
2666 
2667 	rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2668 	if (!rp) {
2669 		err = -ENOMEM;
2670 		goto unlock;
2671 	}
2672 
2673 	i = 0;
2674 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2675 		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2676 			continue;
2677 		bacpy(&rp->addr[i].bdaddr, &c->dst);
2678 		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2679 		if (c->type == SCO_LINK || c->type == ESCO_LINK)
2680 			continue;
2681 		i++;
2682 	}
2683 
2684 	rp->conn_count = cpu_to_le16(i);
2685 
2686 	/* Recalculate length in case of filtered SCO connections, etc */
2687 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2688 				struct_size(rp, addr, i));
2689 
2690 	kfree(rp);
2691 
2692 unlock:
2693 	hci_dev_unlock(hdev);
2694 	return err;
2695 }
2696 
2697 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2698 				   struct mgmt_cp_pin_code_neg_reply *cp)
2699 {
2700 	struct mgmt_pending_cmd *cmd;
2701 	int err;
2702 
2703 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2704 			       sizeof(*cp));
2705 	if (!cmd)
2706 		return -ENOMEM;
2707 
2708 	cmd->cmd_complete = addr_cmd_complete;
2709 
2710 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2711 			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2712 	if (err < 0)
2713 		mgmt_pending_remove(cmd);
2714 
2715 	return err;
2716 }
2717 
2718 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2719 			  u16 len)
2720 {
2721 	struct hci_conn *conn;
2722 	struct mgmt_cp_pin_code_reply *cp = data;
2723 	struct hci_cp_pin_code_reply reply;
2724 	struct mgmt_pending_cmd *cmd;
2725 	int err;
2726 
2727 	bt_dev_dbg(hdev, "sock %p", sk);
2728 
2729 	hci_dev_lock(hdev);
2730 
2731 	if (!hdev_is_powered(hdev)) {
2732 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2733 				      MGMT_STATUS_NOT_POWERED);
2734 		goto failed;
2735 	}
2736 
2737 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2738 	if (!conn) {
2739 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2740 				      MGMT_STATUS_NOT_CONNECTED);
2741 		goto failed;
2742 	}
2743 
2744 	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2745 		struct mgmt_cp_pin_code_neg_reply ncp;
2746 
2747 		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2748 
2749 		bt_dev_err(hdev, "PIN code is not 16 bytes long");
2750 
2751 		err = send_pin_code_neg_reply(sk, hdev, &ncp);
2752 		if (err >= 0)
2753 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2754 					      MGMT_STATUS_INVALID_PARAMS);
2755 
2756 		goto failed;
2757 	}
2758 
2759 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2760 	if (!cmd) {
2761 		err = -ENOMEM;
2762 		goto failed;
2763 	}
2764 
2765 	cmd->cmd_complete = addr_cmd_complete;
2766 
2767 	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2768 	reply.pin_len = cp->pin_len;
2769 	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2770 
2771 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2772 	if (err < 0)
2773 		mgmt_pending_remove(cmd);
2774 
2775 failed:
2776 	hci_dev_unlock(hdev);
2777 	return err;
2778 }
2779 
2780 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2781 			     u16 len)
2782 {
2783 	struct mgmt_cp_set_io_capability *cp = data;
2784 
2785 	bt_dev_dbg(hdev, "sock %p", sk);
2786 
2787 	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2788 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2789 				       MGMT_STATUS_INVALID_PARAMS);
2790 
2791 	hci_dev_lock(hdev);
2792 
2793 	hdev->io_capability = cp->io_capability;
2794 
2795 	bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
2796 
2797 	hci_dev_unlock(hdev);
2798 
2799 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2800 				 NULL, 0);
2801 }
2802 
2803 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2804 {
2805 	struct hci_dev *hdev = conn->hdev;
2806 	struct mgmt_pending_cmd *cmd;
2807 
2808 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2809 		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2810 			continue;
2811 
2812 		if (cmd->user_data != conn)
2813 			continue;
2814 
2815 		return cmd;
2816 	}
2817 
2818 	return NULL;
2819 }
2820 
2821 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2822 {
2823 	struct mgmt_rp_pair_device rp;
2824 	struct hci_conn *conn = cmd->user_data;
2825 	int err;
2826 
2827 	bacpy(&rp.addr.bdaddr, &conn->dst);
2828 	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2829 
2830 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2831 				status, &rp, sizeof(rp));
2832 
2833 	/* So we don't get further callbacks for this connection */
2834 	conn->connect_cfm_cb = NULL;
2835 	conn->security_cfm_cb = NULL;
2836 	conn->disconn_cfm_cb = NULL;
2837 
2838 	hci_conn_drop(conn);
2839 
2840 	/* The device is paired so there is no need to remove
2841 	 * its connection parameters anymore.
2842 	 */
2843 	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2844 
2845 	hci_conn_put(conn);
2846 
2847 	return err;
2848 }
2849 
2850 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2851 {
2852 	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2853 	struct mgmt_pending_cmd *cmd;
2854 
2855 	cmd = find_pairing(conn);
2856 	if (cmd) {
2857 		cmd->cmd_complete(cmd, status);
2858 		mgmt_pending_remove(cmd);
2859 	}
2860 }
2861 
2862 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2863 {
2864 	struct mgmt_pending_cmd *cmd;
2865 
2866 	BT_DBG("status %u", status);
2867 
2868 	cmd = find_pairing(conn);
2869 	if (!cmd) {
2870 		BT_DBG("Unable to find a pending command");
2871 		return;
2872 	}
2873 
2874 	cmd->cmd_complete(cmd, mgmt_status(status));
2875 	mgmt_pending_remove(cmd);
2876 }
2877 
2878 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2879 {
2880 	struct mgmt_pending_cmd *cmd;
2881 
2882 	BT_DBG("status %u", status);
2883 
2884 	if (!status)
2885 		return;
2886 
2887 	cmd = find_pairing(conn);
2888 	if (!cmd) {
2889 		BT_DBG("Unable to find a pending command");
2890 		return;
2891 	}
2892 
2893 	cmd->cmd_complete(cmd, mgmt_status(status));
2894 	mgmt_pending_remove(cmd);
2895 }
2896 
2897 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2898 		       u16 len)
2899 {
2900 	struct mgmt_cp_pair_device *cp = data;
2901 	struct mgmt_rp_pair_device rp;
2902 	struct mgmt_pending_cmd *cmd;
2903 	u8 sec_level, auth_type;
2904 	struct hci_conn *conn;
2905 	int err;
2906 
2907 	bt_dev_dbg(hdev, "sock %p", sk);
2908 
2909 	memset(&rp, 0, sizeof(rp));
2910 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2911 	rp.addr.type = cp->addr.type;
2912 
2913 	if (!bdaddr_type_is_valid(cp->addr.type))
2914 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2915 					 MGMT_STATUS_INVALID_PARAMS,
2916 					 &rp, sizeof(rp));
2917 
2918 	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2919 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2920 					 MGMT_STATUS_INVALID_PARAMS,
2921 					 &rp, sizeof(rp));
2922 
2923 	hci_dev_lock(hdev);
2924 
2925 	if (!hdev_is_powered(hdev)) {
2926 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2927 					MGMT_STATUS_NOT_POWERED, &rp,
2928 					sizeof(rp));
2929 		goto unlock;
2930 	}
2931 
2932 	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
2933 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2934 					MGMT_STATUS_ALREADY_PAIRED, &rp,
2935 					sizeof(rp));
2936 		goto unlock;
2937 	}
2938 
2939 	sec_level = BT_SECURITY_MEDIUM;
2940 	auth_type = HCI_AT_DEDICATED_BONDING;
2941 
2942 	if (cp->addr.type == BDADDR_BREDR) {
2943 		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2944 				       auth_type, CONN_REASON_PAIR_DEVICE);
2945 	} else {
2946 		u8 addr_type = le_addr_type(cp->addr.type);
2947 		struct hci_conn_params *p;
2948 
2949 		/* When pairing a new device, it is expected to remember
2950 		 * this device for future connections. Adding the connection
2951 		 * parameter information ahead of time allows tracking
2952 		 * of the peripheral preferred values and will speed up any
2953 		 * further connection establishment.
2954 		 *
2955 		 * If connection parameters already exist, then they
2956 		 * will be kept and this function does nothing.
2957 		 */
2958 		p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2959 
2960 		if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
2961 			p->auto_connect = HCI_AUTO_CONN_DISABLED;
2962 
2963 		conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
2964 					   sec_level, HCI_LE_CONN_TIMEOUT,
2965 					   CONN_REASON_PAIR_DEVICE);
2966 	}
2967 
2968 	if (IS_ERR(conn)) {
2969 		int status;
2970 
2971 		if (PTR_ERR(conn) == -EBUSY)
2972 			status = MGMT_STATUS_BUSY;
2973 		else if (PTR_ERR(conn) == -EOPNOTSUPP)
2974 			status = MGMT_STATUS_NOT_SUPPORTED;
2975 		else if (PTR_ERR(conn) == -ECONNREFUSED)
2976 			status = MGMT_STATUS_REJECTED;
2977 		else
2978 			status = MGMT_STATUS_CONNECT_FAILED;
2979 
2980 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2981 					status, &rp, sizeof(rp));
2982 		goto unlock;
2983 	}
2984 
2985 	if (conn->connect_cfm_cb) {
2986 		hci_conn_drop(conn);
2987 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2988 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
2989 		goto unlock;
2990 	}
2991 
2992 	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2993 	if (!cmd) {
2994 		err = -ENOMEM;
2995 		hci_conn_drop(conn);
2996 		goto unlock;
2997 	}
2998 
2999 	cmd->cmd_complete = pairing_complete;
3000 
3001 	/* For LE, just connecting isn't a proof that the pairing finished */
3002 	if (cp->addr.type == BDADDR_BREDR) {
3003 		conn->connect_cfm_cb = pairing_complete_cb;
3004 		conn->security_cfm_cb = pairing_complete_cb;
3005 		conn->disconn_cfm_cb = pairing_complete_cb;
3006 	} else {
3007 		conn->connect_cfm_cb = le_pairing_complete_cb;
3008 		conn->security_cfm_cb = le_pairing_complete_cb;
3009 		conn->disconn_cfm_cb = le_pairing_complete_cb;
3010 	}
3011 
3012 	conn->io_capability = cp->io_cap;
3013 	cmd->user_data = hci_conn_get(conn);
3014 
3015 	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3016 	    hci_conn_security(conn, sec_level, auth_type, true)) {
3017 		cmd->cmd_complete(cmd, 0);
3018 		mgmt_pending_remove(cmd);
3019 	}
3020 
3021 	err = 0;
3022 
3023 unlock:
3024 	hci_dev_unlock(hdev);
3025 	return err;
3026 }
3027 
3028 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3029 			      u16 len)
3030 {
3031 	struct mgmt_addr_info *addr = data;
3032 	struct mgmt_pending_cmd *cmd;
3033 	struct hci_conn *conn;
3034 	int err;
3035 
3036 	bt_dev_dbg(hdev, "sock %p", sk);
3037 
3038 	hci_dev_lock(hdev);
3039 
3040 	if (!hdev_is_powered(hdev)) {
3041 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3042 				      MGMT_STATUS_NOT_POWERED);
3043 		goto unlock;
3044 	}
3045 
3046 	cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3047 	if (!cmd) {
3048 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3049 				      MGMT_STATUS_INVALID_PARAMS);
3050 		goto unlock;
3051 	}
3052 
3053 	conn = cmd->user_data;
3054 
3055 	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3056 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3057 				      MGMT_STATUS_INVALID_PARAMS);
3058 		goto unlock;
3059 	}
3060 
3061 	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3062 	mgmt_pending_remove(cmd);
3063 
3064 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3065 				addr, sizeof(*addr));
3066 
3067 	/* Since user doesn't want to proceed with the connection, abort any
3068 	 * ongoing pairing and then terminate the link if it was created
3069 	 * because of the pair device action.
3070 	 */
3071 	if (addr->type == BDADDR_BREDR)
3072 		hci_remove_link_key(hdev, &addr->bdaddr);
3073 	else
3074 		smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3075 					      le_addr_type(addr->type));
3076 
3077 	if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3078 		hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3079 
3080 unlock:
3081 	hci_dev_unlock(hdev);
3082 	return err;
3083 }
3084 
3085 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3086 			     struct mgmt_addr_info *addr, u16 mgmt_op,
3087 			     u16 hci_op, __le32 passkey)
3088 {
3089 	struct mgmt_pending_cmd *cmd;
3090 	struct hci_conn *conn;
3091 	int err;
3092 
3093 	hci_dev_lock(hdev);
3094 
3095 	if (!hdev_is_powered(hdev)) {
3096 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3097 					MGMT_STATUS_NOT_POWERED, addr,
3098 					sizeof(*addr));
3099 		goto done;
3100 	}
3101 
3102 	if (addr->type == BDADDR_BREDR)
3103 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3104 	else
3105 		conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3106 					       le_addr_type(addr->type));
3107 
3108 	if (!conn) {
3109 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3110 					MGMT_STATUS_NOT_CONNECTED, addr,
3111 					sizeof(*addr));
3112 		goto done;
3113 	}
3114 
3115 	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3116 		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3117 		if (!err)
3118 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3119 						MGMT_STATUS_SUCCESS, addr,
3120 						sizeof(*addr));
3121 		else
3122 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3123 						MGMT_STATUS_FAILED, addr,
3124 						sizeof(*addr));
3125 
3126 		goto done;
3127 	}
3128 
3129 	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3130 	if (!cmd) {
3131 		err = -ENOMEM;
3132 		goto done;
3133 	}
3134 
3135 	cmd->cmd_complete = addr_cmd_complete;
3136 
3137 	/* Continue with pairing via HCI */
3138 	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3139 		struct hci_cp_user_passkey_reply cp;
3140 
3141 		bacpy(&cp.bdaddr, &addr->bdaddr);
3142 		cp.passkey = passkey;
3143 		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3144 	} else
3145 		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3146 				   &addr->bdaddr);
3147 
3148 	if (err < 0)
3149 		mgmt_pending_remove(cmd);
3150 
3151 done:
3152 	hci_dev_unlock(hdev);
3153 	return err;
3154 }
3155 
3156 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3157 			      void *data, u16 len)
3158 {
3159 	struct mgmt_cp_pin_code_neg_reply *cp = data;
3160 
3161 	bt_dev_dbg(hdev, "sock %p", sk);
3162 
3163 	return user_pairing_resp(sk, hdev, &cp->addr,
3164 				MGMT_OP_PIN_CODE_NEG_REPLY,
3165 				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3166 }
3167 
3168 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3169 			      u16 len)
3170 {
3171 	struct mgmt_cp_user_confirm_reply *cp = data;
3172 
3173 	bt_dev_dbg(hdev, "sock %p", sk);
3174 
3175 	if (len != sizeof(*cp))
3176 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3177 				       MGMT_STATUS_INVALID_PARAMS);
3178 
3179 	return user_pairing_resp(sk, hdev, &cp->addr,
3180 				 MGMT_OP_USER_CONFIRM_REPLY,
3181 				 HCI_OP_USER_CONFIRM_REPLY, 0);
3182 }
3183 
3184 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3185 				  void *data, u16 len)
3186 {
3187 	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3188 
3189 	bt_dev_dbg(hdev, "sock %p", sk);
3190 
3191 	return user_pairing_resp(sk, hdev, &cp->addr,
3192 				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3193 				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3194 }
3195 
3196 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3197 			      u16 len)
3198 {
3199 	struct mgmt_cp_user_passkey_reply *cp = data;
3200 
3201 	bt_dev_dbg(hdev, "sock %p", sk);
3202 
3203 	return user_pairing_resp(sk, hdev, &cp->addr,
3204 				 MGMT_OP_USER_PASSKEY_REPLY,
3205 				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3206 }
3207 
3208 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3209 				  void *data, u16 len)
3210 {
3211 	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3212 
3213 	bt_dev_dbg(hdev, "sock %p", sk);
3214 
3215 	return user_pairing_resp(sk, hdev, &cp->addr,
3216 				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3217 				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3218 }
3219 
3220 static void adv_expire(struct hci_dev *hdev, u32 flags)
3221 {
3222 	struct adv_info *adv_instance;
3223 	struct hci_request req;
3224 	int err;
3225 
3226 	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3227 	if (!adv_instance)
3228 		return;
3229 
3230 	/* stop if current instance doesn't need to be changed */
3231 	if (!(adv_instance->flags & flags))
3232 		return;
3233 
3234 	cancel_adv_timeout(hdev);
3235 
3236 	adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3237 	if (!adv_instance)
3238 		return;
3239 
3240 	hci_req_init(&req, hdev);
3241 	err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
3242 					      true);
3243 	if (err)
3244 		return;
3245 
3246 	hci_req_run(&req, NULL);
3247 }
3248 
3249 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3250 {
3251 	struct mgmt_cp_set_local_name *cp;
3252 	struct mgmt_pending_cmd *cmd;
3253 
3254 	bt_dev_dbg(hdev, "status 0x%02x", status);
3255 
3256 	hci_dev_lock(hdev);
3257 
3258 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3259 	if (!cmd)
3260 		goto unlock;
3261 
3262 	cp = cmd->param;
3263 
3264 	if (status) {
3265 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3266 			        mgmt_status(status));
3267 	} else {
3268 		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3269 				  cp, sizeof(*cp));
3270 
3271 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3272 			adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3273 	}
3274 
3275 	mgmt_pending_remove(cmd);
3276 
3277 unlock:
3278 	hci_dev_unlock(hdev);
3279 }
3280 
3281 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3282 			  u16 len)
3283 {
3284 	struct mgmt_cp_set_local_name *cp = data;
3285 	struct mgmt_pending_cmd *cmd;
3286 	struct hci_request req;
3287 	int err;
3288 
3289 	bt_dev_dbg(hdev, "sock %p", sk);
3290 
3291 	hci_dev_lock(hdev);
3292 
3293 	/* If the old values are the same as the new ones just return a
3294 	 * direct command complete event.
3295 	 */
3296 	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3297 	    !memcmp(hdev->short_name, cp->short_name,
3298 		    sizeof(hdev->short_name))) {
3299 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3300 					data, len);
3301 		goto failed;
3302 	}
3303 
3304 	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3305 
3306 	if (!hdev_is_powered(hdev)) {
3307 		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3308 
3309 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3310 					data, len);
3311 		if (err < 0)
3312 			goto failed;
3313 
3314 		err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3315 					 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3316 		ext_info_changed(hdev, sk);
3317 
3318 		goto failed;
3319 	}
3320 
3321 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3322 	if (!cmd) {
3323 		err = -ENOMEM;
3324 		goto failed;
3325 	}
3326 
3327 	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3328 
3329 	hci_req_init(&req, hdev);
3330 
3331 	if (lmp_bredr_capable(hdev)) {
3332 		__hci_req_update_name(&req);
3333 		__hci_req_update_eir(&req);
3334 	}
3335 
3336 	/* The name is stored in the scan response data and so
3337 	 * no need to update the advertising data here.
3338 	 */
3339 	if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3340 		__hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
3341 
3342 	err = hci_req_run(&req, set_name_complete);
3343 	if (err < 0)
3344 		mgmt_pending_remove(cmd);
3345 
3346 failed:
3347 	hci_dev_unlock(hdev);
3348 	return err;
3349 }
3350 
3351 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3352 			  u16 len)
3353 {
3354 	struct mgmt_cp_set_appearance *cp = data;
3355 	u16 appearance;
3356 	int err;
3357 
3358 	bt_dev_dbg(hdev, "sock %p", sk);
3359 
3360 	if (!lmp_le_capable(hdev))
3361 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3362 				       MGMT_STATUS_NOT_SUPPORTED);
3363 
3364 	appearance = le16_to_cpu(cp->appearance);
3365 
3366 	hci_dev_lock(hdev);
3367 
3368 	if (hdev->appearance != appearance) {
3369 		hdev->appearance = appearance;
3370 
3371 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3372 			adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
3373 
3374 		ext_info_changed(hdev, sk);
3375 	}
3376 
3377 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3378 				0);
3379 
3380 	hci_dev_unlock(hdev);
3381 
3382 	return err;
3383 }
3384 
3385 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3386 				 void *data, u16 len)
3387 {
3388 	struct mgmt_rp_get_phy_configuration rp;
3389 
3390 	bt_dev_dbg(hdev, "sock %p", sk);
3391 
3392 	hci_dev_lock(hdev);
3393 
3394 	memset(&rp, 0, sizeof(rp));
3395 
3396 	rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3397 	rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3398 	rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3399 
3400 	hci_dev_unlock(hdev);
3401 
3402 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3403 				 &rp, sizeof(rp));
3404 }
3405 
3406 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3407 {
3408 	struct mgmt_ev_phy_configuration_changed ev;
3409 
3410 	memset(&ev, 0, sizeof(ev));
3411 
3412 	ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3413 
3414 	return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3415 			  sizeof(ev), skip);
3416 }
3417 
3418 static void set_default_phy_complete(struct hci_dev *hdev, u8 status,
3419 				     u16 opcode, struct sk_buff *skb)
3420 {
3421 	struct mgmt_pending_cmd *cmd;
3422 
3423 	bt_dev_dbg(hdev, "status 0x%02x", status);
3424 
3425 	hci_dev_lock(hdev);
3426 
3427 	cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev);
3428 	if (!cmd)
3429 		goto unlock;
3430 
3431 	if (status) {
3432 		mgmt_cmd_status(cmd->sk, hdev->id,
3433 				MGMT_OP_SET_PHY_CONFIGURATION,
3434 				mgmt_status(status));
3435 	} else {
3436 		mgmt_cmd_complete(cmd->sk, hdev->id,
3437 				  MGMT_OP_SET_PHY_CONFIGURATION, 0,
3438 				  NULL, 0);
3439 
3440 		mgmt_phy_configuration_changed(hdev, cmd->sk);
3441 	}
3442 
3443 	mgmt_pending_remove(cmd);
3444 
3445 unlock:
3446 	hci_dev_unlock(hdev);
3447 }
3448 
3449 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3450 				 void *data, u16 len)
3451 {
3452 	struct mgmt_cp_set_phy_configuration *cp = data;
3453 	struct hci_cp_le_set_default_phy cp_phy;
3454 	struct mgmt_pending_cmd *cmd;
3455 	struct hci_request req;
3456 	u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3457 	u16 pkt_type = (HCI_DH1 | HCI_DM1);
3458 	bool changed = false;
3459 	int err;
3460 
3461 	bt_dev_dbg(hdev, "sock %p", sk);
3462 
3463 	configurable_phys = get_configurable_phys(hdev);
3464 	supported_phys = get_supported_phys(hdev);
3465 	selected_phys = __le32_to_cpu(cp->selected_phys);
3466 
3467 	if (selected_phys & ~supported_phys)
3468 		return mgmt_cmd_status(sk, hdev->id,
3469 				       MGMT_OP_SET_PHY_CONFIGURATION,
3470 				       MGMT_STATUS_INVALID_PARAMS);
3471 
3472 	unconfigure_phys = supported_phys & ~configurable_phys;
3473 
3474 	if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3475 		return mgmt_cmd_status(sk, hdev->id,
3476 				       MGMT_OP_SET_PHY_CONFIGURATION,
3477 				       MGMT_STATUS_INVALID_PARAMS);
3478 
3479 	if (selected_phys == get_selected_phys(hdev))
3480 		return mgmt_cmd_complete(sk, hdev->id,
3481 					 MGMT_OP_SET_PHY_CONFIGURATION,
3482 					 0, NULL, 0);
3483 
3484 	hci_dev_lock(hdev);
3485 
3486 	if (!hdev_is_powered(hdev)) {
3487 		err = mgmt_cmd_status(sk, hdev->id,
3488 				      MGMT_OP_SET_PHY_CONFIGURATION,
3489 				      MGMT_STATUS_REJECTED);
3490 		goto unlock;
3491 	}
3492 
3493 	if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3494 		err = mgmt_cmd_status(sk, hdev->id,
3495 				      MGMT_OP_SET_PHY_CONFIGURATION,
3496 				      MGMT_STATUS_BUSY);
3497 		goto unlock;
3498 	}
3499 
3500 	if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3501 		pkt_type |= (HCI_DH3 | HCI_DM3);
3502 	else
3503 		pkt_type &= ~(HCI_DH3 | HCI_DM3);
3504 
3505 	if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3506 		pkt_type |= (HCI_DH5 | HCI_DM5);
3507 	else
3508 		pkt_type &= ~(HCI_DH5 | HCI_DM5);
3509 
3510 	if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3511 		pkt_type &= ~HCI_2DH1;
3512 	else
3513 		pkt_type |= HCI_2DH1;
3514 
3515 	if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3516 		pkt_type &= ~HCI_2DH3;
3517 	else
3518 		pkt_type |= HCI_2DH3;
3519 
3520 	if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3521 		pkt_type &= ~HCI_2DH5;
3522 	else
3523 		pkt_type |= HCI_2DH5;
3524 
3525 	if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3526 		pkt_type &= ~HCI_3DH1;
3527 	else
3528 		pkt_type |= HCI_3DH1;
3529 
3530 	if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3531 		pkt_type &= ~HCI_3DH3;
3532 	else
3533 		pkt_type |= HCI_3DH3;
3534 
3535 	if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3536 		pkt_type &= ~HCI_3DH5;
3537 	else
3538 		pkt_type |= HCI_3DH5;
3539 
3540 	if (pkt_type != hdev->pkt_type) {
3541 		hdev->pkt_type = pkt_type;
3542 		changed = true;
3543 	}
3544 
3545 	if ((selected_phys & MGMT_PHY_LE_MASK) ==
3546 	    (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3547 		if (changed)
3548 			mgmt_phy_configuration_changed(hdev, sk);
3549 
3550 		err = mgmt_cmd_complete(sk, hdev->id,
3551 					MGMT_OP_SET_PHY_CONFIGURATION,
3552 					0, NULL, 0);
3553 
3554 		goto unlock;
3555 	}
3556 
3557 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3558 			       len);
3559 	if (!cmd) {
3560 		err = -ENOMEM;
3561 		goto unlock;
3562 	}
3563 
3564 	hci_req_init(&req, hdev);
3565 
3566 	memset(&cp_phy, 0, sizeof(cp_phy));
3567 
3568 	if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3569 		cp_phy.all_phys |= 0x01;
3570 
3571 	if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3572 		cp_phy.all_phys |= 0x02;
3573 
3574 	if (selected_phys & MGMT_PHY_LE_1M_TX)
3575 		cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3576 
3577 	if (selected_phys & MGMT_PHY_LE_2M_TX)
3578 		cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3579 
3580 	if (selected_phys & MGMT_PHY_LE_CODED_TX)
3581 		cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3582 
3583 	if (selected_phys & MGMT_PHY_LE_1M_RX)
3584 		cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3585 
3586 	if (selected_phys & MGMT_PHY_LE_2M_RX)
3587 		cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3588 
3589 	if (selected_phys & MGMT_PHY_LE_CODED_RX)
3590 		cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3591 
3592 	hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy);
3593 
3594 	err = hci_req_run_skb(&req, set_default_phy_complete);
3595 	if (err < 0)
3596 		mgmt_pending_remove(cmd);
3597 
3598 unlock:
3599 	hci_dev_unlock(hdev);
3600 
3601 	return err;
3602 }
3603 
3604 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
3605 			    u16 len)
3606 {
3607 	int err = MGMT_STATUS_SUCCESS;
3608 	struct mgmt_cp_set_blocked_keys *keys = data;
3609 	const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
3610 				   sizeof(struct mgmt_blocked_key_info));
3611 	u16 key_count, expected_len;
3612 	int i;
3613 
3614 	bt_dev_dbg(hdev, "sock %p", sk);
3615 
3616 	key_count = __le16_to_cpu(keys->key_count);
3617 	if (key_count > max_key_count) {
3618 		bt_dev_err(hdev, "too big key_count value %u", key_count);
3619 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3620 				       MGMT_STATUS_INVALID_PARAMS);
3621 	}
3622 
3623 	expected_len = struct_size(keys, keys, key_count);
3624 	if (expected_len != len) {
3625 		bt_dev_err(hdev, "expected %u bytes, got %u bytes",
3626 			   expected_len, len);
3627 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3628 				       MGMT_STATUS_INVALID_PARAMS);
3629 	}
3630 
3631 	hci_dev_lock(hdev);
3632 
3633 	hci_blocked_keys_clear(hdev);
3634 
3635 	for (i = 0; i < keys->key_count; ++i) {
3636 		struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
3637 
3638 		if (!b) {
3639 			err = MGMT_STATUS_NO_RESOURCES;
3640 			break;
3641 		}
3642 
3643 		b->type = keys->keys[i].type;
3644 		memcpy(b->val, keys->keys[i].val, sizeof(b->val));
3645 		list_add_rcu(&b->list, &hdev->blocked_keys);
3646 	}
3647 	hci_dev_unlock(hdev);
3648 
3649 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3650 				err, NULL, 0);
3651 }
3652 
3653 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
3654 			       void *data, u16 len)
3655 {
3656 	struct mgmt_mode *cp = data;
3657 	int err;
3658 	bool changed = false;
3659 
3660 	bt_dev_dbg(hdev, "sock %p", sk);
3661 
3662 	if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
3663 		return mgmt_cmd_status(sk, hdev->id,
3664 				       MGMT_OP_SET_WIDEBAND_SPEECH,
3665 				       MGMT_STATUS_NOT_SUPPORTED);
3666 
3667 	if (cp->val != 0x00 && cp->val != 0x01)
3668 		return mgmt_cmd_status(sk, hdev->id,
3669 				       MGMT_OP_SET_WIDEBAND_SPEECH,
3670 				       MGMT_STATUS_INVALID_PARAMS);
3671 
3672 	hci_dev_lock(hdev);
3673 
3674 	if (pending_find(MGMT_OP_SET_WIDEBAND_SPEECH, hdev)) {
3675 		err = mgmt_cmd_status(sk, hdev->id,
3676 				      MGMT_OP_SET_WIDEBAND_SPEECH,
3677 				      MGMT_STATUS_BUSY);
3678 		goto unlock;
3679 	}
3680 
3681 	if (hdev_is_powered(hdev) &&
3682 	    !!cp->val != hci_dev_test_flag(hdev,
3683 					   HCI_WIDEBAND_SPEECH_ENABLED)) {
3684 		err = mgmt_cmd_status(sk, hdev->id,
3685 				      MGMT_OP_SET_WIDEBAND_SPEECH,
3686 				      MGMT_STATUS_REJECTED);
3687 		goto unlock;
3688 	}
3689 
3690 	if (cp->val)
3691 		changed = !hci_dev_test_and_set_flag(hdev,
3692 						   HCI_WIDEBAND_SPEECH_ENABLED);
3693 	else
3694 		changed = hci_dev_test_and_clear_flag(hdev,
3695 						   HCI_WIDEBAND_SPEECH_ENABLED);
3696 
3697 	err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
3698 	if (err < 0)
3699 		goto unlock;
3700 
3701 	if (changed)
3702 		err = new_settings(hdev, sk);
3703 
3704 unlock:
3705 	hci_dev_unlock(hdev);
3706 	return err;
3707 }
3708 
3709 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
3710 			       void *data, u16 data_len)
3711 {
3712 	char buf[20];
3713 	struct mgmt_rp_read_controller_cap *rp = (void *)buf;
3714 	u16 cap_len = 0;
3715 	u8 flags = 0;
3716 	u8 tx_power_range[2];
3717 
3718 	bt_dev_dbg(hdev, "sock %p", sk);
3719 
3720 	memset(&buf, 0, sizeof(buf));
3721 
3722 	hci_dev_lock(hdev);
3723 
3724 	/* When the Read Simple Pairing Options command is supported, then
3725 	 * the remote public key validation is supported.
3726 	 *
3727 	 * Alternatively, when Microsoft extensions are available, they can
3728 	 * indicate support for public key validation as well.
3729 	 */
3730 	if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
3731 		flags |= 0x01;	/* Remote public key validation (BR/EDR) */
3732 
3733 	flags |= 0x02;		/* Remote public key validation (LE) */
3734 
3735 	/* When the Read Encryption Key Size command is supported, then the
3736 	 * encryption key size is enforced.
3737 	 */
3738 	if (hdev->commands[20] & 0x10)
3739 		flags |= 0x04;	/* Encryption key size enforcement (BR/EDR) */
3740 
3741 	flags |= 0x08;		/* Encryption key size enforcement (LE) */
3742 
3743 	cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
3744 				  &flags, 1);
3745 
3746 	/* When the Read Simple Pairing Options command is supported, then
3747 	 * also max encryption key size information is provided.
3748 	 */
3749 	if (hdev->commands[41] & 0x08)
3750 		cap_len = eir_append_le16(rp->cap, cap_len,
3751 					  MGMT_CAP_MAX_ENC_KEY_SIZE,
3752 					  hdev->max_enc_key_size);
3753 
3754 	cap_len = eir_append_le16(rp->cap, cap_len,
3755 				  MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
3756 				  SMP_MAX_ENC_KEY_SIZE);
3757 
3758 	/* Append the min/max LE tx power parameters if we were able to fetch
3759 	 * it from the controller
3760 	 */
3761 	if (hdev->commands[38] & 0x80) {
3762 		memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
3763 		memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
3764 		cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
3765 					  tx_power_range, 2);
3766 	}
3767 
3768 	rp->cap_len = cpu_to_le16(cap_len);
3769 
3770 	hci_dev_unlock(hdev);
3771 
3772 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
3773 				 rp, sizeof(*rp) + cap_len);
3774 }
3775 
3776 #ifdef CONFIG_BT_FEATURE_DEBUG
3777 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
3778 static const u8 debug_uuid[16] = {
3779 	0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
3780 	0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
3781 };
3782 #endif
3783 
3784 /* 330859bc-7506-492d-9370-9a6f0614037f */
3785 static const u8 quality_report_uuid[16] = {
3786 	0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
3787 	0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
3788 };
3789 
3790 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
3791 static const u8 offload_codecs_uuid[16] = {
3792 	0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
3793 	0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
3794 };
3795 
3796 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
3797 static const u8 simult_central_periph_uuid[16] = {
3798 	0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
3799 	0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
3800 };
3801 
3802 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
3803 static const u8 rpa_resolution_uuid[16] = {
3804 	0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
3805 	0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
3806 };
3807 
3808 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
3809 				  void *data, u16 data_len)
3810 {
3811 	char buf[102];   /* Enough space for 5 features: 2 + 20 * 5 */
3812 	struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
3813 	u16 idx = 0;
3814 	u32 flags;
3815 
3816 	bt_dev_dbg(hdev, "sock %p", sk);
3817 
3818 	memset(&buf, 0, sizeof(buf));
3819 
3820 #ifdef CONFIG_BT_FEATURE_DEBUG
3821 	if (!hdev) {
3822 		flags = bt_dbg_get() ? BIT(0) : 0;
3823 
3824 		memcpy(rp->features[idx].uuid, debug_uuid, 16);
3825 		rp->features[idx].flags = cpu_to_le32(flags);
3826 		idx++;
3827 	}
3828 #endif
3829 
3830 	if (hdev) {
3831 		if (test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) &&
3832 		    (hdev->le_states[4] & 0x08) &&	/* Central */
3833 		    (hdev->le_states[4] & 0x40) &&	/* Peripheral */
3834 		    (hdev->le_states[3] & 0x10))	/* Simultaneous */
3835 			flags = BIT(0);
3836 		else
3837 			flags = 0;
3838 
3839 		memcpy(rp->features[idx].uuid, simult_central_periph_uuid, 16);
3840 		rp->features[idx].flags = cpu_to_le32(flags);
3841 		idx++;
3842 	}
3843 
3844 	if (hdev && ll_privacy_capable(hdev)) {
3845 		if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
3846 			flags = BIT(0) | BIT(1);
3847 		else
3848 			flags = BIT(1);
3849 
3850 		memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
3851 		rp->features[idx].flags = cpu_to_le32(flags);
3852 		idx++;
3853 	}
3854 
3855 	if (hdev && hdev->set_quality_report) {
3856 		if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
3857 			flags = BIT(0);
3858 		else
3859 			flags = 0;
3860 
3861 		memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
3862 		rp->features[idx].flags = cpu_to_le32(flags);
3863 		idx++;
3864 	}
3865 
3866 	if (hdev && hdev->get_data_path_id) {
3867 		if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
3868 			flags = BIT(0);
3869 		else
3870 			flags = 0;
3871 
3872 		memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
3873 		rp->features[idx].flags = cpu_to_le32(flags);
3874 		idx++;
3875 	}
3876 
3877 	rp->feature_count = cpu_to_le16(idx);
3878 
3879 	/* After reading the experimental features information, enable
3880 	 * the events to update client on any future change.
3881 	 */
3882 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3883 
3884 	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3885 				 MGMT_OP_READ_EXP_FEATURES_INFO,
3886 				 0, rp, sizeof(*rp) + (20 * idx));
3887 }
3888 
3889 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
3890 					  struct sock *skip)
3891 {
3892 	struct mgmt_ev_exp_feature_changed ev;
3893 
3894 	memset(&ev, 0, sizeof(ev));
3895 	memcpy(ev.uuid, rpa_resolution_uuid, 16);
3896 	ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
3897 
3898 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
3899 				  &ev, sizeof(ev),
3900 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3901 
3902 }
3903 
3904 #ifdef CONFIG_BT_FEATURE_DEBUG
3905 static int exp_debug_feature_changed(bool enabled, struct sock *skip)
3906 {
3907 	struct mgmt_ev_exp_feature_changed ev;
3908 
3909 	memset(&ev, 0, sizeof(ev));
3910 	memcpy(ev.uuid, debug_uuid, 16);
3911 	ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
3912 
3913 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL,
3914 				  &ev, sizeof(ev),
3915 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3916 }
3917 #endif
3918 
3919 static int exp_quality_report_feature_changed(bool enabled,
3920 					      struct hci_dev *hdev,
3921 					      struct sock *skip)
3922 {
3923 	struct mgmt_ev_exp_feature_changed ev;
3924 
3925 	memset(&ev, 0, sizeof(ev));
3926 	memcpy(ev.uuid, quality_report_uuid, 16);
3927 	ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
3928 
3929 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
3930 				  &ev, sizeof(ev),
3931 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3932 }
3933 
3934 #define EXP_FEAT(_uuid, _set_func)	\
3935 {					\
3936 	.uuid = _uuid,			\
3937 	.set_func = _set_func,		\
3938 }
3939 
3940 /* The zero key uuid is special. Multiple exp features are set through it. */
3941 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
3942 			     struct mgmt_cp_set_exp_feature *cp, u16 data_len)
3943 {
3944 	struct mgmt_rp_set_exp_feature rp;
3945 
3946 	memset(rp.uuid, 0, 16);
3947 	rp.flags = cpu_to_le32(0);
3948 
3949 #ifdef CONFIG_BT_FEATURE_DEBUG
3950 	if (!hdev) {
3951 		bool changed = bt_dbg_get();
3952 
3953 		bt_dbg_set(false);
3954 
3955 		if (changed)
3956 			exp_debug_feature_changed(false, sk);
3957 	}
3958 #endif
3959 
3960 	if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
3961 		bool changed = hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY);
3962 
3963 		hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
3964 
3965 		if (changed)
3966 			exp_ll_privacy_feature_changed(false, hdev, sk);
3967 	}
3968 
3969 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3970 
3971 	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3972 				 MGMT_OP_SET_EXP_FEATURE, 0,
3973 				 &rp, sizeof(rp));
3974 }
3975 
3976 #ifdef CONFIG_BT_FEATURE_DEBUG
3977 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
3978 			  struct mgmt_cp_set_exp_feature *cp, u16 data_len)
3979 {
3980 	struct mgmt_rp_set_exp_feature rp;
3981 
3982 	bool val, changed;
3983 	int err;
3984 
3985 	/* Command requires to use the non-controller index */
3986 	if (hdev)
3987 		return mgmt_cmd_status(sk, hdev->id,
3988 				       MGMT_OP_SET_EXP_FEATURE,
3989 				       MGMT_STATUS_INVALID_INDEX);
3990 
3991 	/* Parameters are limited to a single octet */
3992 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
3993 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3994 				       MGMT_OP_SET_EXP_FEATURE,
3995 				       MGMT_STATUS_INVALID_PARAMS);
3996 
3997 	/* Only boolean on/off is supported */
3998 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
3999 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4000 				       MGMT_OP_SET_EXP_FEATURE,
4001 				       MGMT_STATUS_INVALID_PARAMS);
4002 
4003 	val = !!cp->param[0];
4004 	changed = val ? !bt_dbg_get() : bt_dbg_get();
4005 	bt_dbg_set(val);
4006 
4007 	memcpy(rp.uuid, debug_uuid, 16);
4008 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4009 
4010 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4011 
4012 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4013 				MGMT_OP_SET_EXP_FEATURE, 0,
4014 				&rp, sizeof(rp));
4015 
4016 	if (changed)
4017 		exp_debug_feature_changed(val, sk);
4018 
4019 	return err;
4020 }
4021 #endif
4022 
4023 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4024 				   struct mgmt_cp_set_exp_feature *cp,
4025 				   u16 data_len)
4026 {
4027 	struct mgmt_rp_set_exp_feature rp;
4028 	bool val, changed;
4029 	int err;
4030 	u32 flags;
4031 
4032 	/* Command requires to use the controller index */
4033 	if (!hdev)
4034 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4035 				       MGMT_OP_SET_EXP_FEATURE,
4036 				       MGMT_STATUS_INVALID_INDEX);
4037 
4038 	/* Changes can only be made when controller is powered down */
4039 	if (hdev_is_powered(hdev))
4040 		return mgmt_cmd_status(sk, hdev->id,
4041 				       MGMT_OP_SET_EXP_FEATURE,
4042 				       MGMT_STATUS_REJECTED);
4043 
4044 	/* Parameters are limited to a single octet */
4045 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4046 		return mgmt_cmd_status(sk, hdev->id,
4047 				       MGMT_OP_SET_EXP_FEATURE,
4048 				       MGMT_STATUS_INVALID_PARAMS);
4049 
4050 	/* Only boolean on/off is supported */
4051 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4052 		return mgmt_cmd_status(sk, hdev->id,
4053 				       MGMT_OP_SET_EXP_FEATURE,
4054 				       MGMT_STATUS_INVALID_PARAMS);
4055 
4056 	val = !!cp->param[0];
4057 
4058 	if (val) {
4059 		changed = !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY);
4060 		hci_dev_set_flag(hdev, HCI_ENABLE_LL_PRIVACY);
4061 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4062 
4063 		/* Enable LL privacy + supported settings changed */
4064 		flags = BIT(0) | BIT(1);
4065 	} else {
4066 		changed = hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY);
4067 		hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
4068 
4069 		/* Disable LL privacy + supported settings changed */
4070 		flags = BIT(1);
4071 	}
4072 
4073 	memcpy(rp.uuid, rpa_resolution_uuid, 16);
4074 	rp.flags = cpu_to_le32(flags);
4075 
4076 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4077 
4078 	err = mgmt_cmd_complete(sk, hdev->id,
4079 				MGMT_OP_SET_EXP_FEATURE, 0,
4080 				&rp, sizeof(rp));
4081 
4082 	if (changed)
4083 		exp_ll_privacy_feature_changed(val, hdev, sk);
4084 
4085 	return err;
4086 }
4087 
4088 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4089 				   struct mgmt_cp_set_exp_feature *cp,
4090 				   u16 data_len)
4091 {
4092 	struct mgmt_rp_set_exp_feature rp;
4093 	bool val, changed;
4094 	int err;
4095 
4096 	/* Command requires to use a valid controller index */
4097 	if (!hdev)
4098 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4099 				       MGMT_OP_SET_EXP_FEATURE,
4100 				       MGMT_STATUS_INVALID_INDEX);
4101 
4102 	/* Parameters are limited to a single octet */
4103 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4104 		return mgmt_cmd_status(sk, hdev->id,
4105 				       MGMT_OP_SET_EXP_FEATURE,
4106 				       MGMT_STATUS_INVALID_PARAMS);
4107 
4108 	/* Only boolean on/off is supported */
4109 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4110 		return mgmt_cmd_status(sk, hdev->id,
4111 				       MGMT_OP_SET_EXP_FEATURE,
4112 				       MGMT_STATUS_INVALID_PARAMS);
4113 
4114 	hci_req_sync_lock(hdev);
4115 
4116 	val = !!cp->param[0];
4117 	changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4118 
4119 	if (!hdev->set_quality_report) {
4120 		err = mgmt_cmd_status(sk, hdev->id,
4121 				      MGMT_OP_SET_EXP_FEATURE,
4122 				      MGMT_STATUS_NOT_SUPPORTED);
4123 		goto unlock_quality_report;
4124 	}
4125 
4126 	if (changed) {
4127 		err = hdev->set_quality_report(hdev, val);
4128 		if (err) {
4129 			err = mgmt_cmd_status(sk, hdev->id,
4130 					      MGMT_OP_SET_EXP_FEATURE,
4131 					      MGMT_STATUS_FAILED);
4132 			goto unlock_quality_report;
4133 		}
4134 		if (val)
4135 			hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4136 		else
4137 			hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4138 	}
4139 
4140 	bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4141 
4142 	memcpy(rp.uuid, quality_report_uuid, 16);
4143 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4144 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4145 	err = mgmt_cmd_complete(sk, hdev->id,
4146 				MGMT_OP_SET_EXP_FEATURE, 0,
4147 				&rp, sizeof(rp));
4148 
4149 	if (changed)
4150 		exp_quality_report_feature_changed(val, hdev, sk);
4151 
4152 unlock_quality_report:
4153 	hci_req_sync_unlock(hdev);
4154 	return err;
4155 }
4156 
4157 static int exp_offload_codec_feature_changed(bool enabled, struct hci_dev *hdev,
4158 					     struct sock *skip)
4159 {
4160 	struct mgmt_ev_exp_feature_changed ev;
4161 
4162 	memset(&ev, 0, sizeof(ev));
4163 	memcpy(ev.uuid, offload_codecs_uuid, 16);
4164 	ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4165 
4166 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4167 				  &ev, sizeof(ev),
4168 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4169 }
4170 
4171 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4172 				  struct mgmt_cp_set_exp_feature *cp,
4173 				  u16 data_len)
4174 {
4175 	bool val, changed;
4176 	int err;
4177 	struct mgmt_rp_set_exp_feature rp;
4178 
4179 	/* Command requires to use a valid controller index */
4180 	if (!hdev)
4181 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4182 				       MGMT_OP_SET_EXP_FEATURE,
4183 				       MGMT_STATUS_INVALID_INDEX);
4184 
4185 	/* Parameters are limited to a single octet */
4186 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4187 		return mgmt_cmd_status(sk, hdev->id,
4188 				       MGMT_OP_SET_EXP_FEATURE,
4189 				       MGMT_STATUS_INVALID_PARAMS);
4190 
4191 	/* Only boolean on/off is supported */
4192 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4193 		return mgmt_cmd_status(sk, hdev->id,
4194 				       MGMT_OP_SET_EXP_FEATURE,
4195 				       MGMT_STATUS_INVALID_PARAMS);
4196 
4197 	val = !!cp->param[0];
4198 	changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4199 
4200 	if (!hdev->get_data_path_id) {
4201 		return mgmt_cmd_status(sk, hdev->id,
4202 				       MGMT_OP_SET_EXP_FEATURE,
4203 				       MGMT_STATUS_NOT_SUPPORTED);
4204 	}
4205 
4206 	if (changed) {
4207 		if (val)
4208 			hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4209 		else
4210 			hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4211 	}
4212 
4213 	bt_dev_info(hdev, "offload codecs enable %d changed %d",
4214 		    val, changed);
4215 
4216 	memcpy(rp.uuid, offload_codecs_uuid, 16);
4217 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4218 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4219 	err = mgmt_cmd_complete(sk, hdev->id,
4220 				MGMT_OP_SET_EXP_FEATURE, 0,
4221 				&rp, sizeof(rp));
4222 
4223 	if (changed)
4224 		exp_offload_codec_feature_changed(val, hdev, sk);
4225 
4226 	return err;
4227 }
4228 
4229 static const struct mgmt_exp_feature {
4230 	const u8 *uuid;
4231 	int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4232 			struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4233 } exp_features[] = {
4234 	EXP_FEAT(ZERO_KEY, set_zero_key_func),
4235 #ifdef CONFIG_BT_FEATURE_DEBUG
4236 	EXP_FEAT(debug_uuid, set_debug_func),
4237 #endif
4238 	EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4239 	EXP_FEAT(quality_report_uuid, set_quality_report_func),
4240 	EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4241 
4242 	/* end with a null feature */
4243 	EXP_FEAT(NULL, NULL)
4244 };
4245 
4246 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4247 			   void *data, u16 data_len)
4248 {
4249 	struct mgmt_cp_set_exp_feature *cp = data;
4250 	size_t i = 0;
4251 
4252 	bt_dev_dbg(hdev, "sock %p", sk);
4253 
4254 	for (i = 0; exp_features[i].uuid; i++) {
4255 		if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4256 			return exp_features[i].set_func(sk, hdev, cp, data_len);
4257 	}
4258 
4259 	return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4260 			       MGMT_OP_SET_EXP_FEATURE,
4261 			       MGMT_STATUS_NOT_SUPPORTED);
4262 }
4263 
4264 #define SUPPORTED_DEVICE_FLAGS() ((1U << HCI_CONN_FLAG_MAX) - 1)
4265 
4266 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4267 			    u16 data_len)
4268 {
4269 	struct mgmt_cp_get_device_flags *cp = data;
4270 	struct mgmt_rp_get_device_flags rp;
4271 	struct bdaddr_list_with_flags *br_params;
4272 	struct hci_conn_params *params;
4273 	u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4274 	u32 current_flags = 0;
4275 	u8 status = MGMT_STATUS_INVALID_PARAMS;
4276 
4277 	bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4278 		   &cp->addr.bdaddr, cp->addr.type);
4279 
4280 	hci_dev_lock(hdev);
4281 
4282 	memset(&rp, 0, sizeof(rp));
4283 
4284 	if (cp->addr.type == BDADDR_BREDR) {
4285 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4286 							      &cp->addr.bdaddr,
4287 							      cp->addr.type);
4288 		if (!br_params)
4289 			goto done;
4290 
4291 		current_flags = br_params->current_flags;
4292 	} else {
4293 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4294 						le_addr_type(cp->addr.type));
4295 
4296 		if (!params)
4297 			goto done;
4298 
4299 		current_flags = params->current_flags;
4300 	}
4301 
4302 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4303 	rp.addr.type = cp->addr.type;
4304 	rp.supported_flags = cpu_to_le32(supported_flags);
4305 	rp.current_flags = cpu_to_le32(current_flags);
4306 
4307 	status = MGMT_STATUS_SUCCESS;
4308 
4309 done:
4310 	hci_dev_unlock(hdev);
4311 
4312 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
4313 				&rp, sizeof(rp));
4314 }
4315 
4316 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
4317 				 bdaddr_t *bdaddr, u8 bdaddr_type,
4318 				 u32 supported_flags, u32 current_flags)
4319 {
4320 	struct mgmt_ev_device_flags_changed ev;
4321 
4322 	bacpy(&ev.addr.bdaddr, bdaddr);
4323 	ev.addr.type = bdaddr_type;
4324 	ev.supported_flags = cpu_to_le32(supported_flags);
4325 	ev.current_flags = cpu_to_le32(current_flags);
4326 
4327 	mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
4328 }
4329 
4330 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4331 			    u16 len)
4332 {
4333 	struct mgmt_cp_set_device_flags *cp = data;
4334 	struct bdaddr_list_with_flags *br_params;
4335 	struct hci_conn_params *params;
4336 	u8 status = MGMT_STATUS_INVALID_PARAMS;
4337 	u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4338 	u32 current_flags = __le32_to_cpu(cp->current_flags);
4339 
4340 	bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
4341 		   &cp->addr.bdaddr, cp->addr.type,
4342 		   __le32_to_cpu(current_flags));
4343 
4344 	if ((supported_flags | current_flags) != supported_flags) {
4345 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
4346 			    current_flags, supported_flags);
4347 		goto done;
4348 	}
4349 
4350 	hci_dev_lock(hdev);
4351 
4352 	if (cp->addr.type == BDADDR_BREDR) {
4353 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4354 							      &cp->addr.bdaddr,
4355 							      cp->addr.type);
4356 
4357 		if (br_params) {
4358 			br_params->current_flags = current_flags;
4359 			status = MGMT_STATUS_SUCCESS;
4360 		} else {
4361 			bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
4362 				    &cp->addr.bdaddr, cp->addr.type);
4363 		}
4364 	} else {
4365 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4366 						le_addr_type(cp->addr.type));
4367 		if (params) {
4368 			params->current_flags = current_flags;
4369 			status = MGMT_STATUS_SUCCESS;
4370 		} else {
4371 			bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
4372 				    &cp->addr.bdaddr,
4373 				    le_addr_type(cp->addr.type));
4374 		}
4375 	}
4376 
4377 done:
4378 	hci_dev_unlock(hdev);
4379 
4380 	if (status == MGMT_STATUS_SUCCESS)
4381 		device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
4382 				     supported_flags, current_flags);
4383 
4384 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
4385 				 &cp->addr, sizeof(cp->addr));
4386 }
4387 
4388 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
4389 				   u16 handle)
4390 {
4391 	struct mgmt_ev_adv_monitor_added ev;
4392 
4393 	ev.monitor_handle = cpu_to_le16(handle);
4394 
4395 	mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
4396 }
4397 
4398 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
4399 {
4400 	struct mgmt_ev_adv_monitor_removed ev;
4401 	struct mgmt_pending_cmd *cmd;
4402 	struct sock *sk_skip = NULL;
4403 	struct mgmt_cp_remove_adv_monitor *cp;
4404 
4405 	cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4406 	if (cmd) {
4407 		cp = cmd->param;
4408 
4409 		if (cp->monitor_handle)
4410 			sk_skip = cmd->sk;
4411 	}
4412 
4413 	ev.monitor_handle = cpu_to_le16(handle);
4414 
4415 	mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
4416 }
4417 
4418 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
4419 				 void *data, u16 len)
4420 {
4421 	struct adv_monitor *monitor = NULL;
4422 	struct mgmt_rp_read_adv_monitor_features *rp = NULL;
4423 	int handle, err;
4424 	size_t rp_size = 0;
4425 	__u32 supported = 0;
4426 	__u32 enabled = 0;
4427 	__u16 num_handles = 0;
4428 	__u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
4429 
4430 	BT_DBG("request for %s", hdev->name);
4431 
4432 	hci_dev_lock(hdev);
4433 
4434 	if (msft_monitor_supported(hdev))
4435 		supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
4436 
4437 	idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
4438 		handles[num_handles++] = monitor->handle;
4439 
4440 	hci_dev_unlock(hdev);
4441 
4442 	rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
4443 	rp = kmalloc(rp_size, GFP_KERNEL);
4444 	if (!rp)
4445 		return -ENOMEM;
4446 
4447 	/* All supported features are currently enabled */
4448 	enabled = supported;
4449 
4450 	rp->supported_features = cpu_to_le32(supported);
4451 	rp->enabled_features = cpu_to_le32(enabled);
4452 	rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
4453 	rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
4454 	rp->num_handles = cpu_to_le16(num_handles);
4455 	if (num_handles)
4456 		memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
4457 
4458 	err = mgmt_cmd_complete(sk, hdev->id,
4459 				MGMT_OP_READ_ADV_MONITOR_FEATURES,
4460 				MGMT_STATUS_SUCCESS, rp, rp_size);
4461 
4462 	kfree(rp);
4463 
4464 	return err;
4465 }
4466 
4467 int mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
4468 {
4469 	struct mgmt_rp_add_adv_patterns_monitor rp;
4470 	struct mgmt_pending_cmd *cmd;
4471 	struct adv_monitor *monitor;
4472 	int err = 0;
4473 
4474 	hci_dev_lock(hdev);
4475 
4476 	cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev);
4477 	if (!cmd) {
4478 		cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev);
4479 		if (!cmd)
4480 			goto done;
4481 	}
4482 
4483 	monitor = cmd->user_data;
4484 	rp.monitor_handle = cpu_to_le16(monitor->handle);
4485 
4486 	if (!status) {
4487 		mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
4488 		hdev->adv_monitors_cnt++;
4489 		if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
4490 			monitor->state = ADV_MONITOR_STATE_REGISTERED;
4491 		hci_update_passive_scan(hdev);
4492 	}
4493 
4494 	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4495 				mgmt_status(status), &rp, sizeof(rp));
4496 	mgmt_pending_remove(cmd);
4497 
4498 done:
4499 	hci_dev_unlock(hdev);
4500 	bt_dev_dbg(hdev, "add monitor %d complete, status %u",
4501 		   rp.monitor_handle, status);
4502 
4503 	return err;
4504 }
4505 
4506 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4507 				      struct adv_monitor *m, u8 status,
4508 				      void *data, u16 len, u16 op)
4509 {
4510 	struct mgmt_rp_add_adv_patterns_monitor rp;
4511 	struct mgmt_pending_cmd *cmd;
4512 	int err;
4513 	bool pending;
4514 
4515 	hci_dev_lock(hdev);
4516 
4517 	if (status)
4518 		goto unlock;
4519 
4520 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
4521 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4522 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
4523 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
4524 		status = MGMT_STATUS_BUSY;
4525 		goto unlock;
4526 	}
4527 
4528 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
4529 	if (!cmd) {
4530 		status = MGMT_STATUS_NO_RESOURCES;
4531 		goto unlock;
4532 	}
4533 
4534 	cmd->user_data = m;
4535 	pending = hci_add_adv_monitor(hdev, m, &err);
4536 	if (err) {
4537 		if (err == -ENOSPC || err == -ENOMEM)
4538 			status = MGMT_STATUS_NO_RESOURCES;
4539 		else if (err == -EINVAL)
4540 			status = MGMT_STATUS_INVALID_PARAMS;
4541 		else
4542 			status = MGMT_STATUS_FAILED;
4543 
4544 		mgmt_pending_remove(cmd);
4545 		goto unlock;
4546 	}
4547 
4548 	if (!pending) {
4549 		mgmt_pending_remove(cmd);
4550 		rp.monitor_handle = cpu_to_le16(m->handle);
4551 		mgmt_adv_monitor_added(sk, hdev, m->handle);
4552 		m->state = ADV_MONITOR_STATE_REGISTERED;
4553 		hdev->adv_monitors_cnt++;
4554 
4555 		hci_dev_unlock(hdev);
4556 		return mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_SUCCESS,
4557 					 &rp, sizeof(rp));
4558 	}
4559 
4560 	hci_dev_unlock(hdev);
4561 
4562 	return 0;
4563 
4564 unlock:
4565 	hci_free_adv_monitor(hdev, m);
4566 	hci_dev_unlock(hdev);
4567 	return mgmt_cmd_status(sk, hdev->id, op, status);
4568 }
4569 
4570 static void parse_adv_monitor_rssi(struct adv_monitor *m,
4571 				   struct mgmt_adv_rssi_thresholds *rssi)
4572 {
4573 	if (rssi) {
4574 		m->rssi.low_threshold = rssi->low_threshold;
4575 		m->rssi.low_threshold_timeout =
4576 		    __le16_to_cpu(rssi->low_threshold_timeout);
4577 		m->rssi.high_threshold = rssi->high_threshold;
4578 		m->rssi.high_threshold_timeout =
4579 		    __le16_to_cpu(rssi->high_threshold_timeout);
4580 		m->rssi.sampling_period = rssi->sampling_period;
4581 	} else {
4582 		/* Default values. These numbers are the least constricting
4583 		 * parameters for MSFT API to work, so it behaves as if there
4584 		 * are no rssi parameter to consider. May need to be changed
4585 		 * if other API are to be supported.
4586 		 */
4587 		m->rssi.low_threshold = -127;
4588 		m->rssi.low_threshold_timeout = 60;
4589 		m->rssi.high_threshold = -127;
4590 		m->rssi.high_threshold_timeout = 0;
4591 		m->rssi.sampling_period = 0;
4592 	}
4593 }
4594 
4595 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
4596 				    struct mgmt_adv_pattern *patterns)
4597 {
4598 	u8 offset = 0, length = 0;
4599 	struct adv_pattern *p = NULL;
4600 	int i;
4601 
4602 	for (i = 0; i < pattern_count; i++) {
4603 		offset = patterns[i].offset;
4604 		length = patterns[i].length;
4605 		if (offset >= HCI_MAX_AD_LENGTH ||
4606 		    length > HCI_MAX_AD_LENGTH ||
4607 		    (offset + length) > HCI_MAX_AD_LENGTH)
4608 			return MGMT_STATUS_INVALID_PARAMS;
4609 
4610 		p = kmalloc(sizeof(*p), GFP_KERNEL);
4611 		if (!p)
4612 			return MGMT_STATUS_NO_RESOURCES;
4613 
4614 		p->ad_type = patterns[i].ad_type;
4615 		p->offset = patterns[i].offset;
4616 		p->length = patterns[i].length;
4617 		memcpy(p->value, patterns[i].value, p->length);
4618 
4619 		INIT_LIST_HEAD(&p->list);
4620 		list_add(&p->list, &m->patterns);
4621 	}
4622 
4623 	return MGMT_STATUS_SUCCESS;
4624 }
4625 
4626 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4627 				    void *data, u16 len)
4628 {
4629 	struct mgmt_cp_add_adv_patterns_monitor *cp = data;
4630 	struct adv_monitor *m = NULL;
4631 	u8 status = MGMT_STATUS_SUCCESS;
4632 	size_t expected_size = sizeof(*cp);
4633 
4634 	BT_DBG("request for %s", hdev->name);
4635 
4636 	if (len <= sizeof(*cp)) {
4637 		status = MGMT_STATUS_INVALID_PARAMS;
4638 		goto done;
4639 	}
4640 
4641 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4642 	if (len != expected_size) {
4643 		status = MGMT_STATUS_INVALID_PARAMS;
4644 		goto done;
4645 	}
4646 
4647 	m = kzalloc(sizeof(*m), GFP_KERNEL);
4648 	if (!m) {
4649 		status = MGMT_STATUS_NO_RESOURCES;
4650 		goto done;
4651 	}
4652 
4653 	INIT_LIST_HEAD(&m->patterns);
4654 
4655 	parse_adv_monitor_rssi(m, NULL);
4656 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4657 
4658 done:
4659 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4660 					  MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
4661 }
4662 
4663 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
4664 					 void *data, u16 len)
4665 {
4666 	struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
4667 	struct adv_monitor *m = NULL;
4668 	u8 status = MGMT_STATUS_SUCCESS;
4669 	size_t expected_size = sizeof(*cp);
4670 
4671 	BT_DBG("request for %s", hdev->name);
4672 
4673 	if (len <= sizeof(*cp)) {
4674 		status = MGMT_STATUS_INVALID_PARAMS;
4675 		goto done;
4676 	}
4677 
4678 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4679 	if (len != expected_size) {
4680 		status = MGMT_STATUS_INVALID_PARAMS;
4681 		goto done;
4682 	}
4683 
4684 	m = kzalloc(sizeof(*m), GFP_KERNEL);
4685 	if (!m) {
4686 		status = MGMT_STATUS_NO_RESOURCES;
4687 		goto done;
4688 	}
4689 
4690 	INIT_LIST_HEAD(&m->patterns);
4691 
4692 	parse_adv_monitor_rssi(m, &cp->rssi);
4693 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4694 
4695 done:
4696 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4697 					 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
4698 }
4699 
4700 int mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
4701 {
4702 	struct mgmt_rp_remove_adv_monitor rp;
4703 	struct mgmt_cp_remove_adv_monitor *cp;
4704 	struct mgmt_pending_cmd *cmd;
4705 	int err = 0;
4706 
4707 	hci_dev_lock(hdev);
4708 
4709 	cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4710 	if (!cmd)
4711 		goto done;
4712 
4713 	cp = cmd->param;
4714 	rp.monitor_handle = cp->monitor_handle;
4715 
4716 	if (!status)
4717 		hci_update_passive_scan(hdev);
4718 
4719 	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4720 				mgmt_status(status), &rp, sizeof(rp));
4721 	mgmt_pending_remove(cmd);
4722 
4723 done:
4724 	hci_dev_unlock(hdev);
4725 	bt_dev_dbg(hdev, "remove monitor %d complete, status %u",
4726 		   rp.monitor_handle, status);
4727 
4728 	return err;
4729 }
4730 
4731 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
4732 			      void *data, u16 len)
4733 {
4734 	struct mgmt_cp_remove_adv_monitor *cp = data;
4735 	struct mgmt_rp_remove_adv_monitor rp;
4736 	struct mgmt_pending_cmd *cmd;
4737 	u16 handle = __le16_to_cpu(cp->monitor_handle);
4738 	int err, status;
4739 	bool pending;
4740 
4741 	BT_DBG("request for %s", hdev->name);
4742 	rp.monitor_handle = cp->monitor_handle;
4743 
4744 	hci_dev_lock(hdev);
4745 
4746 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
4747 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
4748 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4749 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
4750 		status = MGMT_STATUS_BUSY;
4751 		goto unlock;
4752 	}
4753 
4754 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
4755 	if (!cmd) {
4756 		status = MGMT_STATUS_NO_RESOURCES;
4757 		goto unlock;
4758 	}
4759 
4760 	if (handle)
4761 		pending = hci_remove_single_adv_monitor(hdev, handle, &err);
4762 	else
4763 		pending = hci_remove_all_adv_monitor(hdev, &err);
4764 
4765 	if (err) {
4766 		mgmt_pending_remove(cmd);
4767 
4768 		if (err == -ENOENT)
4769 			status = MGMT_STATUS_INVALID_INDEX;
4770 		else
4771 			status = MGMT_STATUS_FAILED;
4772 
4773 		goto unlock;
4774 	}
4775 
4776 	/* monitor can be removed without forwarding request to controller */
4777 	if (!pending) {
4778 		mgmt_pending_remove(cmd);
4779 		hci_dev_unlock(hdev);
4780 
4781 		return mgmt_cmd_complete(sk, hdev->id,
4782 					 MGMT_OP_REMOVE_ADV_MONITOR,
4783 					 MGMT_STATUS_SUCCESS,
4784 					 &rp, sizeof(rp));
4785 	}
4786 
4787 	hci_dev_unlock(hdev);
4788 	return 0;
4789 
4790 unlock:
4791 	hci_dev_unlock(hdev);
4792 	return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4793 			       status);
4794 }
4795 
4796 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
4797 				         u16 opcode, struct sk_buff *skb)
4798 {
4799 	struct mgmt_rp_read_local_oob_data mgmt_rp;
4800 	size_t rp_size = sizeof(mgmt_rp);
4801 	struct mgmt_pending_cmd *cmd;
4802 
4803 	bt_dev_dbg(hdev, "status %u", status);
4804 
4805 	cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
4806 	if (!cmd)
4807 		return;
4808 
4809 	if (status || !skb) {
4810 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4811 				status ? mgmt_status(status) : MGMT_STATUS_FAILED);
4812 		goto remove;
4813 	}
4814 
4815 	memset(&mgmt_rp, 0, sizeof(mgmt_rp));
4816 
4817 	if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
4818 		struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
4819 
4820 		if (skb->len < sizeof(*rp)) {
4821 			mgmt_cmd_status(cmd->sk, hdev->id,
4822 					MGMT_OP_READ_LOCAL_OOB_DATA,
4823 					MGMT_STATUS_FAILED);
4824 			goto remove;
4825 		}
4826 
4827 		memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
4828 		memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
4829 
4830 		rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
4831 	} else {
4832 		struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
4833 
4834 		if (skb->len < sizeof(*rp)) {
4835 			mgmt_cmd_status(cmd->sk, hdev->id,
4836 					MGMT_OP_READ_LOCAL_OOB_DATA,
4837 					MGMT_STATUS_FAILED);
4838 			goto remove;
4839 		}
4840 
4841 		memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
4842 		memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
4843 
4844 		memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
4845 		memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
4846 	}
4847 
4848 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4849 			  MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
4850 
4851 remove:
4852 	mgmt_pending_remove(cmd);
4853 }
4854 
4855 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
4856 			       void *data, u16 data_len)
4857 {
4858 	struct mgmt_pending_cmd *cmd;
4859 	struct hci_request req;
4860 	int err;
4861 
4862 	bt_dev_dbg(hdev, "sock %p", sk);
4863 
4864 	hci_dev_lock(hdev);
4865 
4866 	if (!hdev_is_powered(hdev)) {
4867 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4868 				      MGMT_STATUS_NOT_POWERED);
4869 		goto unlock;
4870 	}
4871 
4872 	if (!lmp_ssp_capable(hdev)) {
4873 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4874 				      MGMT_STATUS_NOT_SUPPORTED);
4875 		goto unlock;
4876 	}
4877 
4878 	if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
4879 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4880 				      MGMT_STATUS_BUSY);
4881 		goto unlock;
4882 	}
4883 
4884 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
4885 	if (!cmd) {
4886 		err = -ENOMEM;
4887 		goto unlock;
4888 	}
4889 
4890 	hci_req_init(&req, hdev);
4891 
4892 	if (bredr_sc_enabled(hdev))
4893 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
4894 	else
4895 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
4896 
4897 	err = hci_req_run_skb(&req, read_local_oob_data_complete);
4898 	if (err < 0)
4899 		mgmt_pending_remove(cmd);
4900 
4901 unlock:
4902 	hci_dev_unlock(hdev);
4903 	return err;
4904 }
4905 
4906 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4907 			       void *data, u16 len)
4908 {
4909 	struct mgmt_addr_info *addr = data;
4910 	int err;
4911 
4912 	bt_dev_dbg(hdev, "sock %p", sk);
4913 
4914 	if (!bdaddr_type_is_valid(addr->type))
4915 		return mgmt_cmd_complete(sk, hdev->id,
4916 					 MGMT_OP_ADD_REMOTE_OOB_DATA,
4917 					 MGMT_STATUS_INVALID_PARAMS,
4918 					 addr, sizeof(*addr));
4919 
4920 	hci_dev_lock(hdev);
4921 
4922 	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
4923 		struct mgmt_cp_add_remote_oob_data *cp = data;
4924 		u8 status;
4925 
4926 		if (cp->addr.type != BDADDR_BREDR) {
4927 			err = mgmt_cmd_complete(sk, hdev->id,
4928 						MGMT_OP_ADD_REMOTE_OOB_DATA,
4929 						MGMT_STATUS_INVALID_PARAMS,
4930 						&cp->addr, sizeof(cp->addr));
4931 			goto unlock;
4932 		}
4933 
4934 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4935 					      cp->addr.type, cp->hash,
4936 					      cp->rand, NULL, NULL);
4937 		if (err < 0)
4938 			status = MGMT_STATUS_FAILED;
4939 		else
4940 			status = MGMT_STATUS_SUCCESS;
4941 
4942 		err = mgmt_cmd_complete(sk, hdev->id,
4943 					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
4944 					&cp->addr, sizeof(cp->addr));
4945 	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
4946 		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
4947 		u8 *rand192, *hash192, *rand256, *hash256;
4948 		u8 status;
4949 
4950 		if (bdaddr_type_is_le(cp->addr.type)) {
4951 			/* Enforce zero-valued 192-bit parameters as
4952 			 * long as legacy SMP OOB isn't implemented.
4953 			 */
4954 			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
4955 			    memcmp(cp->hash192, ZERO_KEY, 16)) {
4956 				err = mgmt_cmd_complete(sk, hdev->id,
4957 							MGMT_OP_ADD_REMOTE_OOB_DATA,
4958 							MGMT_STATUS_INVALID_PARAMS,
4959 							addr, sizeof(*addr));
4960 				goto unlock;
4961 			}
4962 
4963 			rand192 = NULL;
4964 			hash192 = NULL;
4965 		} else {
4966 			/* In case one of the P-192 values is set to zero,
4967 			 * then just disable OOB data for P-192.
4968 			 */
4969 			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
4970 			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
4971 				rand192 = NULL;
4972 				hash192 = NULL;
4973 			} else {
4974 				rand192 = cp->rand192;
4975 				hash192 = cp->hash192;
4976 			}
4977 		}
4978 
4979 		/* In case one of the P-256 values is set to zero, then just
4980 		 * disable OOB data for P-256.
4981 		 */
4982 		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
4983 		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
4984 			rand256 = NULL;
4985 			hash256 = NULL;
4986 		} else {
4987 			rand256 = cp->rand256;
4988 			hash256 = cp->hash256;
4989 		}
4990 
4991 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4992 					      cp->addr.type, hash192, rand192,
4993 					      hash256, rand256);
4994 		if (err < 0)
4995 			status = MGMT_STATUS_FAILED;
4996 		else
4997 			status = MGMT_STATUS_SUCCESS;
4998 
4999 		err = mgmt_cmd_complete(sk, hdev->id,
5000 					MGMT_OP_ADD_REMOTE_OOB_DATA,
5001 					status, &cp->addr, sizeof(cp->addr));
5002 	} else {
5003 		bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5004 			   len);
5005 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5006 				      MGMT_STATUS_INVALID_PARAMS);
5007 	}
5008 
5009 unlock:
5010 	hci_dev_unlock(hdev);
5011 	return err;
5012 }
5013 
5014 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5015 				  void *data, u16 len)
5016 {
5017 	struct mgmt_cp_remove_remote_oob_data *cp = data;
5018 	u8 status;
5019 	int err;
5020 
5021 	bt_dev_dbg(hdev, "sock %p", sk);
5022 
5023 	if (cp->addr.type != BDADDR_BREDR)
5024 		return mgmt_cmd_complete(sk, hdev->id,
5025 					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5026 					 MGMT_STATUS_INVALID_PARAMS,
5027 					 &cp->addr, sizeof(cp->addr));
5028 
5029 	hci_dev_lock(hdev);
5030 
5031 	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5032 		hci_remote_oob_data_clear(hdev);
5033 		status = MGMT_STATUS_SUCCESS;
5034 		goto done;
5035 	}
5036 
5037 	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5038 	if (err < 0)
5039 		status = MGMT_STATUS_INVALID_PARAMS;
5040 	else
5041 		status = MGMT_STATUS_SUCCESS;
5042 
5043 done:
5044 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5045 				status, &cp->addr, sizeof(cp->addr));
5046 
5047 	hci_dev_unlock(hdev);
5048 	return err;
5049 }
5050 
5051 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5052 {
5053 	struct mgmt_pending_cmd *cmd;
5054 
5055 	bt_dev_dbg(hdev, "status %u", status);
5056 
5057 	hci_dev_lock(hdev);
5058 
5059 	cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5060 	if (!cmd)
5061 		cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5062 
5063 	if (!cmd)
5064 		cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5065 
5066 	if (cmd) {
5067 		cmd->cmd_complete(cmd, mgmt_status(status));
5068 		mgmt_pending_remove(cmd);
5069 	}
5070 
5071 	hci_dev_unlock(hdev);
5072 
5073 	/* Handle suspend notifier */
5074 	if (test_and_clear_bit(SUSPEND_UNPAUSE_DISCOVERY,
5075 			       hdev->suspend_tasks)) {
5076 		bt_dev_dbg(hdev, "Unpaused discovery");
5077 		wake_up(&hdev->suspend_wait_q);
5078 	}
5079 }
5080 
5081 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5082 				    uint8_t *mgmt_status)
5083 {
5084 	switch (type) {
5085 	case DISCOV_TYPE_LE:
5086 		*mgmt_status = mgmt_le_support(hdev);
5087 		if (*mgmt_status)
5088 			return false;
5089 		break;
5090 	case DISCOV_TYPE_INTERLEAVED:
5091 		*mgmt_status = mgmt_le_support(hdev);
5092 		if (*mgmt_status)
5093 			return false;
5094 		fallthrough;
5095 	case DISCOV_TYPE_BREDR:
5096 		*mgmt_status = mgmt_bredr_support(hdev);
5097 		if (*mgmt_status)
5098 			return false;
5099 		break;
5100 	default:
5101 		*mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5102 		return false;
5103 	}
5104 
5105 	return true;
5106 }
5107 
5108 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5109 {
5110 	struct mgmt_pending_cmd *cmd = data;
5111 
5112 	bt_dev_dbg(hdev, "err %d", err);
5113 
5114 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5115 			  cmd->param, 1);
5116 	mgmt_pending_free(cmd);
5117 
5118 	/* Handle suspend notifier */
5119 	if (test_and_clear_bit(SUSPEND_UNPAUSE_DISCOVERY,
5120 			       hdev->suspend_tasks)) {
5121 		bt_dev_dbg(hdev, "Unpaused discovery");
5122 		wake_up(&hdev->suspend_wait_q);
5123 	}
5124 
5125 	hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED :
5126 				DISCOVERY_FINDING);
5127 }
5128 
5129 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5130 {
5131 	return hci_start_discovery_sync(hdev);
5132 }
5133 
5134 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5135 				    u16 op, void *data, u16 len)
5136 {
5137 	struct mgmt_cp_start_discovery *cp = data;
5138 	struct mgmt_pending_cmd *cmd;
5139 	u8 status;
5140 	int err;
5141 
5142 	bt_dev_dbg(hdev, "sock %p", sk);
5143 
5144 	hci_dev_lock(hdev);
5145 
5146 	if (!hdev_is_powered(hdev)) {
5147 		err = mgmt_cmd_complete(sk, hdev->id, op,
5148 					MGMT_STATUS_NOT_POWERED,
5149 					&cp->type, sizeof(cp->type));
5150 		goto failed;
5151 	}
5152 
5153 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
5154 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5155 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5156 					&cp->type, sizeof(cp->type));
5157 		goto failed;
5158 	}
5159 
5160 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5161 		err = mgmt_cmd_complete(sk, hdev->id, op, status,
5162 					&cp->type, sizeof(cp->type));
5163 		goto failed;
5164 	}
5165 
5166 	/* Can't start discovery when it is paused */
5167 	if (hdev->discovery_paused) {
5168 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5169 					&cp->type, sizeof(cp->type));
5170 		goto failed;
5171 	}
5172 
5173 	/* Clear the discovery filter first to free any previously
5174 	 * allocated memory for the UUID list.
5175 	 */
5176 	hci_discovery_filter_clear(hdev);
5177 
5178 	hdev->discovery.type = cp->type;
5179 	hdev->discovery.report_invalid_rssi = false;
5180 	if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5181 		hdev->discovery.limited = true;
5182 	else
5183 		hdev->discovery.limited = false;
5184 
5185 	cmd = mgmt_pending_new(sk, op, hdev, data, len);
5186 	if (!cmd) {
5187 		err = -ENOMEM;
5188 		goto failed;
5189 	}
5190 
5191 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5192 				 start_discovery_complete);
5193 	if (err < 0) {
5194 		mgmt_pending_free(cmd);
5195 		goto failed;
5196 	}
5197 
5198 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5199 
5200 failed:
5201 	hci_dev_unlock(hdev);
5202 	return err;
5203 }
5204 
5205 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5206 			   void *data, u16 len)
5207 {
5208 	return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5209 					data, len);
5210 }
5211 
5212 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5213 				   void *data, u16 len)
5214 {
5215 	return start_discovery_internal(sk, hdev,
5216 					MGMT_OP_START_LIMITED_DISCOVERY,
5217 					data, len);
5218 }
5219 
5220 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5221 				   void *data, u16 len)
5222 {
5223 	struct mgmt_cp_start_service_discovery *cp = data;
5224 	struct mgmt_pending_cmd *cmd;
5225 	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5226 	u16 uuid_count, expected_len;
5227 	u8 status;
5228 	int err;
5229 
5230 	bt_dev_dbg(hdev, "sock %p", sk);
5231 
5232 	hci_dev_lock(hdev);
5233 
5234 	if (!hdev_is_powered(hdev)) {
5235 		err = mgmt_cmd_complete(sk, hdev->id,
5236 					MGMT_OP_START_SERVICE_DISCOVERY,
5237 					MGMT_STATUS_NOT_POWERED,
5238 					&cp->type, sizeof(cp->type));
5239 		goto failed;
5240 	}
5241 
5242 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
5243 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5244 		err = mgmt_cmd_complete(sk, hdev->id,
5245 					MGMT_OP_START_SERVICE_DISCOVERY,
5246 					MGMT_STATUS_BUSY, &cp->type,
5247 					sizeof(cp->type));
5248 		goto failed;
5249 	}
5250 
5251 	if (hdev->discovery_paused) {
5252 		err = mgmt_cmd_complete(sk, hdev->id,
5253 					MGMT_OP_START_SERVICE_DISCOVERY,
5254 					MGMT_STATUS_BUSY, &cp->type,
5255 					sizeof(cp->type));
5256 		goto failed;
5257 	}
5258 
5259 	uuid_count = __le16_to_cpu(cp->uuid_count);
5260 	if (uuid_count > max_uuid_count) {
5261 		bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5262 			   uuid_count);
5263 		err = mgmt_cmd_complete(sk, hdev->id,
5264 					MGMT_OP_START_SERVICE_DISCOVERY,
5265 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
5266 					sizeof(cp->type));
5267 		goto failed;
5268 	}
5269 
5270 	expected_len = sizeof(*cp) + uuid_count * 16;
5271 	if (expected_len != len) {
5272 		bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5273 			   expected_len, len);
5274 		err = mgmt_cmd_complete(sk, hdev->id,
5275 					MGMT_OP_START_SERVICE_DISCOVERY,
5276 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
5277 					sizeof(cp->type));
5278 		goto failed;
5279 	}
5280 
5281 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5282 		err = mgmt_cmd_complete(sk, hdev->id,
5283 					MGMT_OP_START_SERVICE_DISCOVERY,
5284 					status, &cp->type, sizeof(cp->type));
5285 		goto failed;
5286 	}
5287 
5288 	cmd = mgmt_pending_new(sk, MGMT_OP_START_SERVICE_DISCOVERY,
5289 			       hdev, data, len);
5290 	if (!cmd) {
5291 		err = -ENOMEM;
5292 		goto failed;
5293 	}
5294 
5295 	/* Clear the discovery filter first to free any previously
5296 	 * allocated memory for the UUID list.
5297 	 */
5298 	hci_discovery_filter_clear(hdev);
5299 
5300 	hdev->discovery.result_filtering = true;
5301 	hdev->discovery.type = cp->type;
5302 	hdev->discovery.rssi = cp->rssi;
5303 	hdev->discovery.uuid_count = uuid_count;
5304 
5305 	if (uuid_count > 0) {
5306 		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
5307 						GFP_KERNEL);
5308 		if (!hdev->discovery.uuids) {
5309 			err = mgmt_cmd_complete(sk, hdev->id,
5310 						MGMT_OP_START_SERVICE_DISCOVERY,
5311 						MGMT_STATUS_FAILED,
5312 						&cp->type, sizeof(cp->type));
5313 			mgmt_pending_remove(cmd);
5314 			goto failed;
5315 		}
5316 	}
5317 
5318 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5319 				 start_discovery_complete);
5320 	if (err < 0) {
5321 		mgmt_pending_free(cmd);
5322 		goto failed;
5323 	}
5324 
5325 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5326 
5327 failed:
5328 	hci_dev_unlock(hdev);
5329 	return err;
5330 }
5331 
5332 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
5333 {
5334 	struct mgmt_pending_cmd *cmd;
5335 
5336 	bt_dev_dbg(hdev, "status %u", status);
5337 
5338 	hci_dev_lock(hdev);
5339 
5340 	cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5341 	if (cmd) {
5342 		cmd->cmd_complete(cmd, mgmt_status(status));
5343 		mgmt_pending_remove(cmd);
5344 	}
5345 
5346 	hci_dev_unlock(hdev);
5347 
5348 	/* Handle suspend notifier */
5349 	if (test_and_clear_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks)) {
5350 		bt_dev_dbg(hdev, "Paused discovery");
5351 		wake_up(&hdev->suspend_wait_q);
5352 	}
5353 }
5354 
5355 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
5356 {
5357 	struct mgmt_pending_cmd *cmd = data;
5358 
5359 	bt_dev_dbg(hdev, "err %d", err);
5360 
5361 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5362 			  cmd->param, 1);
5363 	mgmt_pending_free(cmd);
5364 
5365 	/* Handle suspend notifier */
5366 	if (test_and_clear_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks)) {
5367 		bt_dev_dbg(hdev, "Paused discovery");
5368 		wake_up(&hdev->suspend_wait_q);
5369 	}
5370 
5371 	if (!err)
5372 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
5373 }
5374 
5375 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
5376 {
5377 	return hci_stop_discovery_sync(hdev);
5378 }
5379 
5380 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
5381 			  u16 len)
5382 {
5383 	struct mgmt_cp_stop_discovery *mgmt_cp = data;
5384 	struct mgmt_pending_cmd *cmd;
5385 	int err;
5386 
5387 	bt_dev_dbg(hdev, "sock %p", sk);
5388 
5389 	hci_dev_lock(hdev);
5390 
5391 	if (!hci_discovery_active(hdev)) {
5392 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5393 					MGMT_STATUS_REJECTED, &mgmt_cp->type,
5394 					sizeof(mgmt_cp->type));
5395 		goto unlock;
5396 	}
5397 
5398 	if (hdev->discovery.type != mgmt_cp->type) {
5399 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5400 					MGMT_STATUS_INVALID_PARAMS,
5401 					&mgmt_cp->type, sizeof(mgmt_cp->type));
5402 		goto unlock;
5403 	}
5404 
5405 	cmd = mgmt_pending_new(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
5406 	if (!cmd) {
5407 		err = -ENOMEM;
5408 		goto unlock;
5409 	}
5410 
5411 	err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
5412 				 stop_discovery_complete);
5413 	if (err < 0) {
5414 		mgmt_pending_free(cmd);
5415 		goto unlock;
5416 	}
5417 
5418 	hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
5419 
5420 unlock:
5421 	hci_dev_unlock(hdev);
5422 	return err;
5423 }
5424 
5425 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
5426 			u16 len)
5427 {
5428 	struct mgmt_cp_confirm_name *cp = data;
5429 	struct inquiry_entry *e;
5430 	int err;
5431 
5432 	bt_dev_dbg(hdev, "sock %p", sk);
5433 
5434 	hci_dev_lock(hdev);
5435 
5436 	if (!hci_discovery_active(hdev)) {
5437 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5438 					MGMT_STATUS_FAILED, &cp->addr,
5439 					sizeof(cp->addr));
5440 		goto failed;
5441 	}
5442 
5443 	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
5444 	if (!e) {
5445 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5446 					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
5447 					sizeof(cp->addr));
5448 		goto failed;
5449 	}
5450 
5451 	if (cp->name_known) {
5452 		e->name_state = NAME_KNOWN;
5453 		list_del(&e->list);
5454 	} else {
5455 		e->name_state = NAME_NEEDED;
5456 		hci_inquiry_cache_update_resolve(hdev, e);
5457 	}
5458 
5459 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
5460 				&cp->addr, sizeof(cp->addr));
5461 
5462 failed:
5463 	hci_dev_unlock(hdev);
5464 	return err;
5465 }
5466 
5467 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
5468 			u16 len)
5469 {
5470 	struct mgmt_cp_block_device *cp = data;
5471 	u8 status;
5472 	int err;
5473 
5474 	bt_dev_dbg(hdev, "sock %p", sk);
5475 
5476 	if (!bdaddr_type_is_valid(cp->addr.type))
5477 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
5478 					 MGMT_STATUS_INVALID_PARAMS,
5479 					 &cp->addr, sizeof(cp->addr));
5480 
5481 	hci_dev_lock(hdev);
5482 
5483 	err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
5484 				  cp->addr.type);
5485 	if (err < 0) {
5486 		status = MGMT_STATUS_FAILED;
5487 		goto done;
5488 	}
5489 
5490 	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5491 		   sk);
5492 	status = MGMT_STATUS_SUCCESS;
5493 
5494 done:
5495 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
5496 				&cp->addr, sizeof(cp->addr));
5497 
5498 	hci_dev_unlock(hdev);
5499 
5500 	return err;
5501 }
5502 
5503 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
5504 			  u16 len)
5505 {
5506 	struct mgmt_cp_unblock_device *cp = data;
5507 	u8 status;
5508 	int err;
5509 
5510 	bt_dev_dbg(hdev, "sock %p", sk);
5511 
5512 	if (!bdaddr_type_is_valid(cp->addr.type))
5513 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
5514 					 MGMT_STATUS_INVALID_PARAMS,
5515 					 &cp->addr, sizeof(cp->addr));
5516 
5517 	hci_dev_lock(hdev);
5518 
5519 	err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
5520 				  cp->addr.type);
5521 	if (err < 0) {
5522 		status = MGMT_STATUS_INVALID_PARAMS;
5523 		goto done;
5524 	}
5525 
5526 	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5527 		   sk);
5528 	status = MGMT_STATUS_SUCCESS;
5529 
5530 done:
5531 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
5532 				&cp->addr, sizeof(cp->addr));
5533 
5534 	hci_dev_unlock(hdev);
5535 
5536 	return err;
5537 }
5538 
5539 static int set_device_id_sync(struct hci_dev *hdev, void *data)
5540 {
5541 	return hci_update_eir_sync(hdev);
5542 }
5543 
5544 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
5545 			 u16 len)
5546 {
5547 	struct mgmt_cp_set_device_id *cp = data;
5548 	int err;
5549 	__u16 source;
5550 
5551 	bt_dev_dbg(hdev, "sock %p", sk);
5552 
5553 	source = __le16_to_cpu(cp->source);
5554 
5555 	if (source > 0x0002)
5556 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
5557 				       MGMT_STATUS_INVALID_PARAMS);
5558 
5559 	hci_dev_lock(hdev);
5560 
5561 	hdev->devid_source = source;
5562 	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
5563 	hdev->devid_product = __le16_to_cpu(cp->product);
5564 	hdev->devid_version = __le16_to_cpu(cp->version);
5565 
5566 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
5567 				NULL, 0);
5568 
5569 	hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
5570 
5571 	hci_dev_unlock(hdev);
5572 
5573 	return err;
5574 }
5575 
5576 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
5577 					u16 opcode)
5578 {
5579 	bt_dev_dbg(hdev, "status %u", status);
5580 }
5581 
5582 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
5583 				     u16 opcode)
5584 {
5585 	struct cmd_lookup match = { NULL, hdev };
5586 	struct hci_request req;
5587 	u8 instance;
5588 	struct adv_info *adv_instance;
5589 	int err;
5590 
5591 	hci_dev_lock(hdev);
5592 
5593 	if (status) {
5594 		u8 mgmt_err = mgmt_status(status);
5595 
5596 		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
5597 				     cmd_status_rsp, &mgmt_err);
5598 		goto unlock;
5599 	}
5600 
5601 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
5602 		hci_dev_set_flag(hdev, HCI_ADVERTISING);
5603 	else
5604 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
5605 
5606 	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
5607 			     &match);
5608 
5609 	new_settings(hdev, match.sk);
5610 
5611 	if (match.sk)
5612 		sock_put(match.sk);
5613 
5614 	/* Handle suspend notifier */
5615 	if (test_and_clear_bit(SUSPEND_PAUSE_ADVERTISING,
5616 			       hdev->suspend_tasks)) {
5617 		bt_dev_dbg(hdev, "Paused advertising");
5618 		wake_up(&hdev->suspend_wait_q);
5619 	} else if (test_and_clear_bit(SUSPEND_UNPAUSE_ADVERTISING,
5620 				      hdev->suspend_tasks)) {
5621 		bt_dev_dbg(hdev, "Unpaused advertising");
5622 		wake_up(&hdev->suspend_wait_q);
5623 	}
5624 
5625 	/* If "Set Advertising" was just disabled and instance advertising was
5626 	 * set up earlier, then re-enable multi-instance advertising.
5627 	 */
5628 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5629 	    list_empty(&hdev->adv_instances))
5630 		goto unlock;
5631 
5632 	instance = hdev->cur_adv_instance;
5633 	if (!instance) {
5634 		adv_instance = list_first_entry_or_null(&hdev->adv_instances,
5635 							struct adv_info, list);
5636 		if (!adv_instance)
5637 			goto unlock;
5638 
5639 		instance = adv_instance->instance;
5640 	}
5641 
5642 	hci_req_init(&req, hdev);
5643 
5644 	err = __hci_req_schedule_adv_instance(&req, instance, true);
5645 
5646 	if (!err)
5647 		err = hci_req_run(&req, enable_advertising_instance);
5648 
5649 	if (err)
5650 		bt_dev_err(hdev, "failed to re-configure advertising");
5651 
5652 unlock:
5653 	hci_dev_unlock(hdev);
5654 }
5655 
5656 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
5657 			   u16 len)
5658 {
5659 	struct mgmt_mode *cp = data;
5660 	struct mgmt_pending_cmd *cmd;
5661 	struct hci_request req;
5662 	u8 val, status;
5663 	int err;
5664 
5665 	bt_dev_dbg(hdev, "sock %p", sk);
5666 
5667 	status = mgmt_le_support(hdev);
5668 	if (status)
5669 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5670 				       status);
5671 
5672 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5673 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5674 				       MGMT_STATUS_INVALID_PARAMS);
5675 
5676 	if (hdev->advertising_paused)
5677 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5678 				       MGMT_STATUS_BUSY);
5679 
5680 	hci_dev_lock(hdev);
5681 
5682 	val = !!cp->val;
5683 
5684 	/* The following conditions are ones which mean that we should
5685 	 * not do any HCI communication but directly send a mgmt
5686 	 * response to user space (after toggling the flag if
5687 	 * necessary).
5688 	 */
5689 	if (!hdev_is_powered(hdev) ||
5690 	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5691 	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
5692 	    hci_conn_num(hdev, LE_LINK) > 0 ||
5693 	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5694 	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
5695 		bool changed;
5696 
5697 		if (cp->val) {
5698 			hdev->cur_adv_instance = 0x00;
5699 			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
5700 			if (cp->val == 0x02)
5701 				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5702 			else
5703 				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5704 		} else {
5705 			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
5706 			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5707 		}
5708 
5709 		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
5710 		if (err < 0)
5711 			goto unlock;
5712 
5713 		if (changed)
5714 			err = new_settings(hdev, sk);
5715 
5716 		goto unlock;
5717 	}
5718 
5719 	if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
5720 	    pending_find(MGMT_OP_SET_LE, hdev)) {
5721 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5722 				      MGMT_STATUS_BUSY);
5723 		goto unlock;
5724 	}
5725 
5726 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
5727 	if (!cmd) {
5728 		err = -ENOMEM;
5729 		goto unlock;
5730 	}
5731 
5732 	hci_req_init(&req, hdev);
5733 
5734 	if (cp->val == 0x02)
5735 		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5736 	else
5737 		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5738 
5739 	cancel_adv_timeout(hdev);
5740 
5741 	if (val) {
5742 		/* Switch to instance "0" for the Set Advertising setting.
5743 		 * We cannot use update_[adv|scan_rsp]_data() here as the
5744 		 * HCI_ADVERTISING flag is not yet set.
5745 		 */
5746 		hdev->cur_adv_instance = 0x00;
5747 
5748 		if (ext_adv_capable(hdev)) {
5749 			__hci_req_start_ext_adv(&req, 0x00);
5750 		} else {
5751 			__hci_req_update_adv_data(&req, 0x00);
5752 			__hci_req_update_scan_rsp_data(&req, 0x00);
5753 			__hci_req_enable_advertising(&req);
5754 		}
5755 	} else {
5756 		__hci_req_disable_advertising(&req);
5757 	}
5758 
5759 	err = hci_req_run(&req, set_advertising_complete);
5760 	if (err < 0)
5761 		mgmt_pending_remove(cmd);
5762 
5763 unlock:
5764 	hci_dev_unlock(hdev);
5765 	return err;
5766 }
5767 
5768 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
5769 			      void *data, u16 len)
5770 {
5771 	struct mgmt_cp_set_static_address *cp = data;
5772 	int err;
5773 
5774 	bt_dev_dbg(hdev, "sock %p", sk);
5775 
5776 	if (!lmp_le_capable(hdev))
5777 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5778 				       MGMT_STATUS_NOT_SUPPORTED);
5779 
5780 	if (hdev_is_powered(hdev))
5781 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5782 				       MGMT_STATUS_REJECTED);
5783 
5784 	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
5785 		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
5786 			return mgmt_cmd_status(sk, hdev->id,
5787 					       MGMT_OP_SET_STATIC_ADDRESS,
5788 					       MGMT_STATUS_INVALID_PARAMS);
5789 
5790 		/* Two most significant bits shall be set */
5791 		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
5792 			return mgmt_cmd_status(sk, hdev->id,
5793 					       MGMT_OP_SET_STATIC_ADDRESS,
5794 					       MGMT_STATUS_INVALID_PARAMS);
5795 	}
5796 
5797 	hci_dev_lock(hdev);
5798 
5799 	bacpy(&hdev->static_addr, &cp->bdaddr);
5800 
5801 	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
5802 	if (err < 0)
5803 		goto unlock;
5804 
5805 	err = new_settings(hdev, sk);
5806 
5807 unlock:
5808 	hci_dev_unlock(hdev);
5809 	return err;
5810 }
5811 
5812 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
5813 			   void *data, u16 len)
5814 {
5815 	struct mgmt_cp_set_scan_params *cp = data;
5816 	__u16 interval, window;
5817 	int err;
5818 
5819 	bt_dev_dbg(hdev, "sock %p", sk);
5820 
5821 	if (!lmp_le_capable(hdev))
5822 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5823 				       MGMT_STATUS_NOT_SUPPORTED);
5824 
5825 	interval = __le16_to_cpu(cp->interval);
5826 
5827 	if (interval < 0x0004 || interval > 0x4000)
5828 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5829 				       MGMT_STATUS_INVALID_PARAMS);
5830 
5831 	window = __le16_to_cpu(cp->window);
5832 
5833 	if (window < 0x0004 || window > 0x4000)
5834 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5835 				       MGMT_STATUS_INVALID_PARAMS);
5836 
5837 	if (window > interval)
5838 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5839 				       MGMT_STATUS_INVALID_PARAMS);
5840 
5841 	hci_dev_lock(hdev);
5842 
5843 	hdev->le_scan_interval = interval;
5844 	hdev->le_scan_window = window;
5845 
5846 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
5847 				NULL, 0);
5848 
5849 	/* If background scan is running, restart it so new parameters are
5850 	 * loaded.
5851 	 */
5852 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5853 	    hdev->discovery.state == DISCOVERY_STOPPED)
5854 		hci_update_passive_scan(hdev);
5855 
5856 	hci_dev_unlock(hdev);
5857 
5858 	return err;
5859 }
5860 
5861 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
5862 {
5863 	struct mgmt_pending_cmd *cmd = data;
5864 
5865 	bt_dev_dbg(hdev, "err %d", err);
5866 
5867 	if (err) {
5868 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5869 				mgmt_status(err));
5870 	} else {
5871 		struct mgmt_mode *cp = cmd->param;
5872 
5873 		if (cp->val)
5874 			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
5875 		else
5876 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5877 
5878 		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5879 		new_settings(hdev, cmd->sk);
5880 	}
5881 
5882 	mgmt_pending_free(cmd);
5883 }
5884 
5885 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
5886 {
5887 	struct mgmt_pending_cmd *cmd = data;
5888 	struct mgmt_mode *cp = cmd->param;
5889 
5890 	return hci_write_fast_connectable_sync(hdev, cp->val);
5891 }
5892 
5893 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
5894 				void *data, u16 len)
5895 {
5896 	struct mgmt_mode *cp = data;
5897 	struct mgmt_pending_cmd *cmd;
5898 	int err;
5899 
5900 	bt_dev_dbg(hdev, "sock %p", sk);
5901 
5902 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
5903 	    hdev->hci_ver < BLUETOOTH_VER_1_2)
5904 		return mgmt_cmd_status(sk, hdev->id,
5905 				       MGMT_OP_SET_FAST_CONNECTABLE,
5906 				       MGMT_STATUS_NOT_SUPPORTED);
5907 
5908 	if (cp->val != 0x00 && cp->val != 0x01)
5909 		return mgmt_cmd_status(sk, hdev->id,
5910 				       MGMT_OP_SET_FAST_CONNECTABLE,
5911 				       MGMT_STATUS_INVALID_PARAMS);
5912 
5913 	hci_dev_lock(hdev);
5914 
5915 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
5916 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5917 		goto unlock;
5918 	}
5919 
5920 	if (!hdev_is_powered(hdev)) {
5921 		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
5922 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5923 		new_settings(hdev, sk);
5924 		goto unlock;
5925 	}
5926 
5927 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
5928 			       len);
5929 	if (!cmd)
5930 		err = -ENOMEM;
5931 	else
5932 		err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
5933 					 fast_connectable_complete);
5934 
5935 	if (err < 0) {
5936 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5937 				MGMT_STATUS_FAILED);
5938 
5939 		if (cmd)
5940 			mgmt_pending_free(cmd);
5941 	}
5942 
5943 unlock:
5944 	hci_dev_unlock(hdev);
5945 
5946 	return err;
5947 }
5948 
5949 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
5950 {
5951 	struct mgmt_pending_cmd *cmd = data;
5952 
5953 	bt_dev_dbg(hdev, "err %d", err);
5954 
5955 	if (err) {
5956 		u8 mgmt_err = mgmt_status(err);
5957 
5958 		/* We need to restore the flag if related HCI commands
5959 		 * failed.
5960 		 */
5961 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
5962 
5963 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
5964 	} else {
5965 		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
5966 		new_settings(hdev, cmd->sk);
5967 	}
5968 
5969 	mgmt_pending_free(cmd);
5970 }
5971 
5972 static int set_bredr_sync(struct hci_dev *hdev, void *data)
5973 {
5974 	int status;
5975 
5976 	status = hci_write_fast_connectable_sync(hdev, false);
5977 
5978 	if (!status)
5979 		status = hci_update_scan_sync(hdev);
5980 
5981 	/* Since only the advertising data flags will change, there
5982 	 * is no need to update the scan response data.
5983 	 */
5984 	if (!status)
5985 		status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
5986 
5987 	return status;
5988 }
5989 
5990 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
5991 {
5992 	struct mgmt_mode *cp = data;
5993 	struct mgmt_pending_cmd *cmd;
5994 	int err;
5995 
5996 	bt_dev_dbg(hdev, "sock %p", sk);
5997 
5998 	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
5999 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6000 				       MGMT_STATUS_NOT_SUPPORTED);
6001 
6002 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6003 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6004 				       MGMT_STATUS_REJECTED);
6005 
6006 	if (cp->val != 0x00 && cp->val != 0x01)
6007 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6008 				       MGMT_STATUS_INVALID_PARAMS);
6009 
6010 	hci_dev_lock(hdev);
6011 
6012 	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6013 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6014 		goto unlock;
6015 	}
6016 
6017 	if (!hdev_is_powered(hdev)) {
6018 		if (!cp->val) {
6019 			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6020 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6021 			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6022 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6023 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
6024 		}
6025 
6026 		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6027 
6028 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6029 		if (err < 0)
6030 			goto unlock;
6031 
6032 		err = new_settings(hdev, sk);
6033 		goto unlock;
6034 	}
6035 
6036 	/* Reject disabling when powered on */
6037 	if (!cp->val) {
6038 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6039 				      MGMT_STATUS_REJECTED);
6040 		goto unlock;
6041 	} else {
6042 		/* When configuring a dual-mode controller to operate
6043 		 * with LE only and using a static address, then switching
6044 		 * BR/EDR back on is not allowed.
6045 		 *
6046 		 * Dual-mode controllers shall operate with the public
6047 		 * address as its identity address for BR/EDR and LE. So
6048 		 * reject the attempt to create an invalid configuration.
6049 		 *
6050 		 * The same restrictions applies when secure connections
6051 		 * has been enabled. For BR/EDR this is a controller feature
6052 		 * while for LE it is a host stack feature. This means that
6053 		 * switching BR/EDR back on when secure connections has been
6054 		 * enabled is not a supported transaction.
6055 		 */
6056 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6057 		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6058 		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6059 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6060 					      MGMT_STATUS_REJECTED);
6061 			goto unlock;
6062 		}
6063 	}
6064 
6065 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6066 	if (!cmd)
6067 		err = -ENOMEM;
6068 	else
6069 		err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6070 					 set_bredr_complete);
6071 
6072 	if (err < 0) {
6073 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6074 				MGMT_STATUS_FAILED);
6075 		if (cmd)
6076 			mgmt_pending_free(cmd);
6077 
6078 		goto unlock;
6079 	}
6080 
6081 	/* We need to flip the bit already here so that
6082 	 * hci_req_update_adv_data generates the correct flags.
6083 	 */
6084 	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6085 
6086 unlock:
6087 	hci_dev_unlock(hdev);
6088 	return err;
6089 }
6090 
6091 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6092 {
6093 	struct mgmt_pending_cmd *cmd = data;
6094 	struct mgmt_mode *cp;
6095 
6096 	bt_dev_dbg(hdev, "err %d", err);
6097 
6098 	if (err) {
6099 		u8 mgmt_err = mgmt_status(err);
6100 
6101 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6102 		goto done;
6103 	}
6104 
6105 	cp = cmd->param;
6106 
6107 	switch (cp->val) {
6108 	case 0x00:
6109 		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6110 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6111 		break;
6112 	case 0x01:
6113 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6114 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6115 		break;
6116 	case 0x02:
6117 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6118 		hci_dev_set_flag(hdev, HCI_SC_ONLY);
6119 		break;
6120 	}
6121 
6122 	send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6123 	new_settings(hdev, cmd->sk);
6124 
6125 done:
6126 	mgmt_pending_free(cmd);
6127 }
6128 
6129 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6130 {
6131 	struct mgmt_pending_cmd *cmd = data;
6132 	struct mgmt_mode *cp = cmd->param;
6133 	u8 val = !!cp->val;
6134 
6135 	/* Force write of val */
6136 	hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6137 
6138 	return hci_write_sc_support_sync(hdev, val);
6139 }
6140 
6141 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6142 			   void *data, u16 len)
6143 {
6144 	struct mgmt_mode *cp = data;
6145 	struct mgmt_pending_cmd *cmd;
6146 	u8 val;
6147 	int err;
6148 
6149 	bt_dev_dbg(hdev, "sock %p", sk);
6150 
6151 	if (!lmp_sc_capable(hdev) &&
6152 	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6153 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6154 				       MGMT_STATUS_NOT_SUPPORTED);
6155 
6156 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6157 	    lmp_sc_capable(hdev) &&
6158 	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6159 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6160 				       MGMT_STATUS_REJECTED);
6161 
6162 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6163 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6164 				       MGMT_STATUS_INVALID_PARAMS);
6165 
6166 	hci_dev_lock(hdev);
6167 
6168 	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6169 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6170 		bool changed;
6171 
6172 		if (cp->val) {
6173 			changed = !hci_dev_test_and_set_flag(hdev,
6174 							     HCI_SC_ENABLED);
6175 			if (cp->val == 0x02)
6176 				hci_dev_set_flag(hdev, HCI_SC_ONLY);
6177 			else
6178 				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6179 		} else {
6180 			changed = hci_dev_test_and_clear_flag(hdev,
6181 							      HCI_SC_ENABLED);
6182 			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6183 		}
6184 
6185 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6186 		if (err < 0)
6187 			goto failed;
6188 
6189 		if (changed)
6190 			err = new_settings(hdev, sk);
6191 
6192 		goto failed;
6193 	}
6194 
6195 	val = !!cp->val;
6196 
6197 	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6198 	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6199 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6200 		goto failed;
6201 	}
6202 
6203 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6204 	if (!cmd)
6205 		err = -ENOMEM;
6206 	else
6207 		err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6208 					 set_secure_conn_complete);
6209 
6210 	if (err < 0) {
6211 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6212 				MGMT_STATUS_FAILED);
6213 		if (cmd)
6214 			mgmt_pending_free(cmd);
6215 	}
6216 
6217 failed:
6218 	hci_dev_unlock(hdev);
6219 	return err;
6220 }
6221 
6222 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6223 			  void *data, u16 len)
6224 {
6225 	struct mgmt_mode *cp = data;
6226 	bool changed, use_changed;
6227 	int err;
6228 
6229 	bt_dev_dbg(hdev, "sock %p", sk);
6230 
6231 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6232 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6233 				       MGMT_STATUS_INVALID_PARAMS);
6234 
6235 	hci_dev_lock(hdev);
6236 
6237 	if (cp->val)
6238 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6239 	else
6240 		changed = hci_dev_test_and_clear_flag(hdev,
6241 						      HCI_KEEP_DEBUG_KEYS);
6242 
6243 	if (cp->val == 0x02)
6244 		use_changed = !hci_dev_test_and_set_flag(hdev,
6245 							 HCI_USE_DEBUG_KEYS);
6246 	else
6247 		use_changed = hci_dev_test_and_clear_flag(hdev,
6248 							  HCI_USE_DEBUG_KEYS);
6249 
6250 	if (hdev_is_powered(hdev) && use_changed &&
6251 	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6252 		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6253 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6254 			     sizeof(mode), &mode);
6255 	}
6256 
6257 	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6258 	if (err < 0)
6259 		goto unlock;
6260 
6261 	if (changed)
6262 		err = new_settings(hdev, sk);
6263 
6264 unlock:
6265 	hci_dev_unlock(hdev);
6266 	return err;
6267 }
6268 
6269 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6270 		       u16 len)
6271 {
6272 	struct mgmt_cp_set_privacy *cp = cp_data;
6273 	bool changed;
6274 	int err;
6275 
6276 	bt_dev_dbg(hdev, "sock %p", sk);
6277 
6278 	if (!lmp_le_capable(hdev))
6279 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6280 				       MGMT_STATUS_NOT_SUPPORTED);
6281 
6282 	if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6283 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6284 				       MGMT_STATUS_INVALID_PARAMS);
6285 
6286 	if (hdev_is_powered(hdev))
6287 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6288 				       MGMT_STATUS_REJECTED);
6289 
6290 	hci_dev_lock(hdev);
6291 
6292 	/* If user space supports this command it is also expected to
6293 	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6294 	 */
6295 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6296 
6297 	if (cp->privacy) {
6298 		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6299 		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6300 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6301 		hci_adv_instances_set_rpa_expired(hdev, true);
6302 		if (cp->privacy == 0x02)
6303 			hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6304 		else
6305 			hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6306 	} else {
6307 		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6308 		memset(hdev->irk, 0, sizeof(hdev->irk));
6309 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6310 		hci_adv_instances_set_rpa_expired(hdev, false);
6311 		hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6312 	}
6313 
6314 	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
6315 	if (err < 0)
6316 		goto unlock;
6317 
6318 	if (changed)
6319 		err = new_settings(hdev, sk);
6320 
6321 unlock:
6322 	hci_dev_unlock(hdev);
6323 	return err;
6324 }
6325 
6326 static bool irk_is_valid(struct mgmt_irk_info *irk)
6327 {
6328 	switch (irk->addr.type) {
6329 	case BDADDR_LE_PUBLIC:
6330 		return true;
6331 
6332 	case BDADDR_LE_RANDOM:
6333 		/* Two most significant bits shall be set */
6334 		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6335 			return false;
6336 		return true;
6337 	}
6338 
6339 	return false;
6340 }
6341 
6342 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6343 		     u16 len)
6344 {
6345 	struct mgmt_cp_load_irks *cp = cp_data;
6346 	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
6347 				   sizeof(struct mgmt_irk_info));
6348 	u16 irk_count, expected_len;
6349 	int i, err;
6350 
6351 	bt_dev_dbg(hdev, "sock %p", sk);
6352 
6353 	if (!lmp_le_capable(hdev))
6354 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6355 				       MGMT_STATUS_NOT_SUPPORTED);
6356 
6357 	irk_count = __le16_to_cpu(cp->irk_count);
6358 	if (irk_count > max_irk_count) {
6359 		bt_dev_err(hdev, "load_irks: too big irk_count value %u",
6360 			   irk_count);
6361 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6362 				       MGMT_STATUS_INVALID_PARAMS);
6363 	}
6364 
6365 	expected_len = struct_size(cp, irks, irk_count);
6366 	if (expected_len != len) {
6367 		bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
6368 			   expected_len, len);
6369 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6370 				       MGMT_STATUS_INVALID_PARAMS);
6371 	}
6372 
6373 	bt_dev_dbg(hdev, "irk_count %u", irk_count);
6374 
6375 	for (i = 0; i < irk_count; i++) {
6376 		struct mgmt_irk_info *key = &cp->irks[i];
6377 
6378 		if (!irk_is_valid(key))
6379 			return mgmt_cmd_status(sk, hdev->id,
6380 					       MGMT_OP_LOAD_IRKS,
6381 					       MGMT_STATUS_INVALID_PARAMS);
6382 	}
6383 
6384 	hci_dev_lock(hdev);
6385 
6386 	hci_smp_irks_clear(hdev);
6387 
6388 	for (i = 0; i < irk_count; i++) {
6389 		struct mgmt_irk_info *irk = &cp->irks[i];
6390 
6391 		if (hci_is_blocked_key(hdev,
6392 				       HCI_BLOCKED_KEY_TYPE_IRK,
6393 				       irk->val)) {
6394 			bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
6395 				    &irk->addr.bdaddr);
6396 			continue;
6397 		}
6398 
6399 		hci_add_irk(hdev, &irk->addr.bdaddr,
6400 			    le_addr_type(irk->addr.type), irk->val,
6401 			    BDADDR_ANY);
6402 	}
6403 
6404 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6405 
6406 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
6407 
6408 	hci_dev_unlock(hdev);
6409 
6410 	return err;
6411 }
6412 
6413 static bool ltk_is_valid(struct mgmt_ltk_info *key)
6414 {
6415 	if (key->initiator != 0x00 && key->initiator != 0x01)
6416 		return false;
6417 
6418 	switch (key->addr.type) {
6419 	case BDADDR_LE_PUBLIC:
6420 		return true;
6421 
6422 	case BDADDR_LE_RANDOM:
6423 		/* Two most significant bits shall be set */
6424 		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6425 			return false;
6426 		return true;
6427 	}
6428 
6429 	return false;
6430 }
6431 
6432 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
6433 			       void *cp_data, u16 len)
6434 {
6435 	struct mgmt_cp_load_long_term_keys *cp = cp_data;
6436 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
6437 				   sizeof(struct mgmt_ltk_info));
6438 	u16 key_count, expected_len;
6439 	int i, err;
6440 
6441 	bt_dev_dbg(hdev, "sock %p", sk);
6442 
6443 	if (!lmp_le_capable(hdev))
6444 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6445 				       MGMT_STATUS_NOT_SUPPORTED);
6446 
6447 	key_count = __le16_to_cpu(cp->key_count);
6448 	if (key_count > max_key_count) {
6449 		bt_dev_err(hdev, "load_ltks: too big key_count value %u",
6450 			   key_count);
6451 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6452 				       MGMT_STATUS_INVALID_PARAMS);
6453 	}
6454 
6455 	expected_len = struct_size(cp, keys, key_count);
6456 	if (expected_len != len) {
6457 		bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
6458 			   expected_len, len);
6459 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6460 				       MGMT_STATUS_INVALID_PARAMS);
6461 	}
6462 
6463 	bt_dev_dbg(hdev, "key_count %u", key_count);
6464 
6465 	for (i = 0; i < key_count; i++) {
6466 		struct mgmt_ltk_info *key = &cp->keys[i];
6467 
6468 		if (!ltk_is_valid(key))
6469 			return mgmt_cmd_status(sk, hdev->id,
6470 					       MGMT_OP_LOAD_LONG_TERM_KEYS,
6471 					       MGMT_STATUS_INVALID_PARAMS);
6472 	}
6473 
6474 	hci_dev_lock(hdev);
6475 
6476 	hci_smp_ltks_clear(hdev);
6477 
6478 	for (i = 0; i < key_count; i++) {
6479 		struct mgmt_ltk_info *key = &cp->keys[i];
6480 		u8 type, authenticated;
6481 
6482 		if (hci_is_blocked_key(hdev,
6483 				       HCI_BLOCKED_KEY_TYPE_LTK,
6484 				       key->val)) {
6485 			bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
6486 				    &key->addr.bdaddr);
6487 			continue;
6488 		}
6489 
6490 		switch (key->type) {
6491 		case MGMT_LTK_UNAUTHENTICATED:
6492 			authenticated = 0x00;
6493 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
6494 			break;
6495 		case MGMT_LTK_AUTHENTICATED:
6496 			authenticated = 0x01;
6497 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
6498 			break;
6499 		case MGMT_LTK_P256_UNAUTH:
6500 			authenticated = 0x00;
6501 			type = SMP_LTK_P256;
6502 			break;
6503 		case MGMT_LTK_P256_AUTH:
6504 			authenticated = 0x01;
6505 			type = SMP_LTK_P256;
6506 			break;
6507 		case MGMT_LTK_P256_DEBUG:
6508 			authenticated = 0x00;
6509 			type = SMP_LTK_P256_DEBUG;
6510 			fallthrough;
6511 		default:
6512 			continue;
6513 		}
6514 
6515 		hci_add_ltk(hdev, &key->addr.bdaddr,
6516 			    le_addr_type(key->addr.type), type, authenticated,
6517 			    key->val, key->enc_size, key->ediv, key->rand);
6518 	}
6519 
6520 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
6521 			   NULL, 0);
6522 
6523 	hci_dev_unlock(hdev);
6524 
6525 	return err;
6526 }
6527 
6528 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
6529 {
6530 	struct mgmt_pending_cmd *cmd = data;
6531 	struct hci_conn *conn = cmd->user_data;
6532 	struct mgmt_cp_get_conn_info *cp = cmd->param;
6533 	struct mgmt_rp_get_conn_info rp;
6534 	u8 status;
6535 
6536 	bt_dev_dbg(hdev, "err %d", err);
6537 
6538 	memcpy(&rp.addr, &cp->addr.bdaddr, sizeof(rp.addr));
6539 
6540 	status = mgmt_status(err);
6541 	if (status == MGMT_STATUS_SUCCESS) {
6542 		rp.rssi = conn->rssi;
6543 		rp.tx_power = conn->tx_power;
6544 		rp.max_tx_power = conn->max_tx_power;
6545 	} else {
6546 		rp.rssi = HCI_RSSI_INVALID;
6547 		rp.tx_power = HCI_TX_POWER_INVALID;
6548 		rp.max_tx_power = HCI_TX_POWER_INVALID;
6549 	}
6550 
6551 	mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
6552 			  &rp, sizeof(rp));
6553 
6554 	if (conn) {
6555 		hci_conn_drop(conn);
6556 		hci_conn_put(conn);
6557 	}
6558 
6559 	mgmt_pending_free(cmd);
6560 }
6561 
6562 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
6563 {
6564 	struct mgmt_pending_cmd *cmd = data;
6565 	struct mgmt_cp_get_conn_info *cp = cmd->param;
6566 	struct hci_conn *conn;
6567 	int err;
6568 	__le16   handle;
6569 
6570 	/* Make sure we are still connected */
6571 	if (cp->addr.type == BDADDR_BREDR)
6572 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6573 					       &cp->addr.bdaddr);
6574 	else
6575 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6576 
6577 	if (!conn || conn != cmd->user_data || conn->state != BT_CONNECTED) {
6578 		if (cmd->user_data) {
6579 			hci_conn_drop(cmd->user_data);
6580 			hci_conn_put(cmd->user_data);
6581 			cmd->user_data = NULL;
6582 		}
6583 		return MGMT_STATUS_NOT_CONNECTED;
6584 	}
6585 
6586 	handle = cpu_to_le16(conn->handle);
6587 
6588 	/* Refresh RSSI each time */
6589 	err = hci_read_rssi_sync(hdev, handle);
6590 
6591 	/* For LE links TX power does not change thus we don't need to
6592 	 * query for it once value is known.
6593 	 */
6594 	if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
6595 		     conn->tx_power == HCI_TX_POWER_INVALID))
6596 		err = hci_read_tx_power_sync(hdev, handle, 0x00);
6597 
6598 	/* Max TX power needs to be read only once per connection */
6599 	if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
6600 		err = hci_read_tx_power_sync(hdev, handle, 0x01);
6601 
6602 	return err;
6603 }
6604 
6605 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
6606 			 u16 len)
6607 {
6608 	struct mgmt_cp_get_conn_info *cp = data;
6609 	struct mgmt_rp_get_conn_info rp;
6610 	struct hci_conn *conn;
6611 	unsigned long conn_info_age;
6612 	int err = 0;
6613 
6614 	bt_dev_dbg(hdev, "sock %p", sk);
6615 
6616 	memset(&rp, 0, sizeof(rp));
6617 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6618 	rp.addr.type = cp->addr.type;
6619 
6620 	if (!bdaddr_type_is_valid(cp->addr.type))
6621 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6622 					 MGMT_STATUS_INVALID_PARAMS,
6623 					 &rp, sizeof(rp));
6624 
6625 	hci_dev_lock(hdev);
6626 
6627 	if (!hdev_is_powered(hdev)) {
6628 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6629 					MGMT_STATUS_NOT_POWERED, &rp,
6630 					sizeof(rp));
6631 		goto unlock;
6632 	}
6633 
6634 	if (cp->addr.type == BDADDR_BREDR)
6635 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6636 					       &cp->addr.bdaddr);
6637 	else
6638 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6639 
6640 	if (!conn || conn->state != BT_CONNECTED) {
6641 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6642 					MGMT_STATUS_NOT_CONNECTED, &rp,
6643 					sizeof(rp));
6644 		goto unlock;
6645 	}
6646 
6647 	/* To avoid client trying to guess when to poll again for information we
6648 	 * calculate conn info age as random value between min/max set in hdev.
6649 	 */
6650 	conn_info_age = hdev->conn_info_min_age +
6651 			prandom_u32_max(hdev->conn_info_max_age -
6652 					hdev->conn_info_min_age);
6653 
6654 	/* Query controller to refresh cached values if they are too old or were
6655 	 * never read.
6656 	 */
6657 	if (time_after(jiffies, conn->conn_info_timestamp +
6658 		       msecs_to_jiffies(conn_info_age)) ||
6659 	    !conn->conn_info_timestamp) {
6660 		struct mgmt_pending_cmd *cmd;
6661 
6662 		cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
6663 				       len);
6664 		if (!cmd)
6665 			err = -ENOMEM;
6666 		else
6667 			err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
6668 						 cmd, get_conn_info_complete);
6669 
6670 		if (err < 0) {
6671 			mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6672 					  MGMT_STATUS_FAILED, &rp, sizeof(rp));
6673 
6674 			if (cmd)
6675 				mgmt_pending_free(cmd);
6676 
6677 			goto unlock;
6678 		}
6679 
6680 		hci_conn_hold(conn);
6681 		cmd->user_data = hci_conn_get(conn);
6682 
6683 		conn->conn_info_timestamp = jiffies;
6684 	} else {
6685 		/* Cache is valid, just reply with values cached in hci_conn */
6686 		rp.rssi = conn->rssi;
6687 		rp.tx_power = conn->tx_power;
6688 		rp.max_tx_power = conn->max_tx_power;
6689 
6690 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6691 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6692 	}
6693 
6694 unlock:
6695 	hci_dev_unlock(hdev);
6696 	return err;
6697 }
6698 
6699 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
6700 {
6701 	struct mgmt_pending_cmd *cmd = data;
6702 	struct mgmt_cp_get_clock_info *cp = cmd->param;
6703 	struct mgmt_rp_get_clock_info rp;
6704 	struct hci_conn *conn = cmd->user_data;
6705 	u8 status = mgmt_status(err);
6706 
6707 	bt_dev_dbg(hdev, "err %d", err);
6708 
6709 	memset(&rp, 0, sizeof(rp));
6710 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6711 	rp.addr.type = cp->addr.type;
6712 
6713 	if (err)
6714 		goto complete;
6715 
6716 	rp.local_clock = cpu_to_le32(hdev->clock);
6717 
6718 	if (conn) {
6719 		rp.piconet_clock = cpu_to_le32(conn->clock);
6720 		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
6721 		hci_conn_drop(conn);
6722 		hci_conn_put(conn);
6723 	}
6724 
6725 complete:
6726 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
6727 			  sizeof(rp));
6728 
6729 	mgmt_pending_free(cmd);
6730 }
6731 
6732 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
6733 {
6734 	struct mgmt_pending_cmd *cmd = data;
6735 	struct mgmt_cp_get_clock_info *cp = cmd->param;
6736 	struct hci_cp_read_clock hci_cp;
6737 	struct hci_conn *conn = cmd->user_data;
6738 	int err;
6739 
6740 	memset(&hci_cp, 0, sizeof(hci_cp));
6741 	err = hci_read_clock_sync(hdev, &hci_cp);
6742 
6743 	if (conn) {
6744 		/* Make sure connection still exists */
6745 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6746 					       &cp->addr.bdaddr);
6747 
6748 		if (conn && conn == cmd->user_data &&
6749 		    conn->state == BT_CONNECTED) {
6750 			hci_cp.handle = cpu_to_le16(conn->handle);
6751 			hci_cp.which = 0x01; /* Piconet clock */
6752 			err = hci_read_clock_sync(hdev, &hci_cp);
6753 		} else if (cmd->user_data) {
6754 			hci_conn_drop(cmd->user_data);
6755 			hci_conn_put(cmd->user_data);
6756 			cmd->user_data = NULL;
6757 		}
6758 	}
6759 
6760 	return err;
6761 }
6762 
6763 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
6764 								u16 len)
6765 {
6766 	struct mgmt_cp_get_clock_info *cp = data;
6767 	struct mgmt_rp_get_clock_info rp;
6768 	struct mgmt_pending_cmd *cmd;
6769 	struct hci_conn *conn;
6770 	int err;
6771 
6772 	bt_dev_dbg(hdev, "sock %p", sk);
6773 
6774 	memset(&rp, 0, sizeof(rp));
6775 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6776 	rp.addr.type = cp->addr.type;
6777 
6778 	if (cp->addr.type != BDADDR_BREDR)
6779 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6780 					 MGMT_STATUS_INVALID_PARAMS,
6781 					 &rp, sizeof(rp));
6782 
6783 	hci_dev_lock(hdev);
6784 
6785 	if (!hdev_is_powered(hdev)) {
6786 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6787 					MGMT_STATUS_NOT_POWERED, &rp,
6788 					sizeof(rp));
6789 		goto unlock;
6790 	}
6791 
6792 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6793 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6794 					       &cp->addr.bdaddr);
6795 		if (!conn || conn->state != BT_CONNECTED) {
6796 			err = mgmt_cmd_complete(sk, hdev->id,
6797 						MGMT_OP_GET_CLOCK_INFO,
6798 						MGMT_STATUS_NOT_CONNECTED,
6799 						&rp, sizeof(rp));
6800 			goto unlock;
6801 		}
6802 	} else {
6803 		conn = NULL;
6804 	}
6805 
6806 	cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
6807 	if (!cmd)
6808 		err = -ENOMEM;
6809 	else
6810 		err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
6811 					 get_clock_info_complete);
6812 
6813 	if (err < 0) {
6814 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6815 					MGMT_STATUS_FAILED, &rp, sizeof(rp));
6816 
6817 		if (cmd)
6818 			mgmt_pending_free(cmd);
6819 
6820 	} else if (conn) {
6821 		hci_conn_hold(conn);
6822 		cmd->user_data = hci_conn_get(conn);
6823 	}
6824 
6825 
6826 unlock:
6827 	hci_dev_unlock(hdev);
6828 	return err;
6829 }
6830 
6831 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
6832 {
6833 	struct hci_conn *conn;
6834 
6835 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
6836 	if (!conn)
6837 		return false;
6838 
6839 	if (conn->dst_type != type)
6840 		return false;
6841 
6842 	if (conn->state != BT_CONNECTED)
6843 		return false;
6844 
6845 	return true;
6846 }
6847 
6848 /* This function requires the caller holds hdev->lock */
6849 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
6850 			       u8 addr_type, u8 auto_connect)
6851 {
6852 	struct hci_conn_params *params;
6853 
6854 	params = hci_conn_params_add(hdev, addr, addr_type);
6855 	if (!params)
6856 		return -EIO;
6857 
6858 	if (params->auto_connect == auto_connect)
6859 		return 0;
6860 
6861 	list_del_init(&params->action);
6862 
6863 	switch (auto_connect) {
6864 	case HCI_AUTO_CONN_DISABLED:
6865 	case HCI_AUTO_CONN_LINK_LOSS:
6866 		/* If auto connect is being disabled when we're trying to
6867 		 * connect to device, keep connecting.
6868 		 */
6869 		if (params->explicit_connect)
6870 			list_add(&params->action, &hdev->pend_le_conns);
6871 		break;
6872 	case HCI_AUTO_CONN_REPORT:
6873 		if (params->explicit_connect)
6874 			list_add(&params->action, &hdev->pend_le_conns);
6875 		else
6876 			list_add(&params->action, &hdev->pend_le_reports);
6877 		break;
6878 	case HCI_AUTO_CONN_DIRECT:
6879 	case HCI_AUTO_CONN_ALWAYS:
6880 		if (!is_connected(hdev, addr, addr_type))
6881 			list_add(&params->action, &hdev->pend_le_conns);
6882 		break;
6883 	}
6884 
6885 	params->auto_connect = auto_connect;
6886 
6887 	bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
6888 		   addr, addr_type, auto_connect);
6889 
6890 	return 0;
6891 }
6892 
6893 static void device_added(struct sock *sk, struct hci_dev *hdev,
6894 			 bdaddr_t *bdaddr, u8 type, u8 action)
6895 {
6896 	struct mgmt_ev_device_added ev;
6897 
6898 	bacpy(&ev.addr.bdaddr, bdaddr);
6899 	ev.addr.type = type;
6900 	ev.action = action;
6901 
6902 	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
6903 }
6904 
6905 static int add_device_sync(struct hci_dev *hdev, void *data)
6906 {
6907 	return hci_update_passive_scan_sync(hdev);
6908 }
6909 
6910 static int add_device(struct sock *sk, struct hci_dev *hdev,
6911 		      void *data, u16 len)
6912 {
6913 	struct mgmt_cp_add_device *cp = data;
6914 	u8 auto_conn, addr_type;
6915 	struct hci_conn_params *params;
6916 	int err;
6917 	u32 current_flags = 0;
6918 
6919 	bt_dev_dbg(hdev, "sock %p", sk);
6920 
6921 	if (!bdaddr_type_is_valid(cp->addr.type) ||
6922 	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
6923 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6924 					 MGMT_STATUS_INVALID_PARAMS,
6925 					 &cp->addr, sizeof(cp->addr));
6926 
6927 	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
6928 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6929 					 MGMT_STATUS_INVALID_PARAMS,
6930 					 &cp->addr, sizeof(cp->addr));
6931 
6932 	hci_dev_lock(hdev);
6933 
6934 	if (cp->addr.type == BDADDR_BREDR) {
6935 		/* Only incoming connections action is supported for now */
6936 		if (cp->action != 0x01) {
6937 			err = mgmt_cmd_complete(sk, hdev->id,
6938 						MGMT_OP_ADD_DEVICE,
6939 						MGMT_STATUS_INVALID_PARAMS,
6940 						&cp->addr, sizeof(cp->addr));
6941 			goto unlock;
6942 		}
6943 
6944 		err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
6945 						     &cp->addr.bdaddr,
6946 						     cp->addr.type, 0);
6947 		if (err)
6948 			goto unlock;
6949 
6950 		hci_req_update_scan(hdev);
6951 
6952 		goto added;
6953 	}
6954 
6955 	addr_type = le_addr_type(cp->addr.type);
6956 
6957 	if (cp->action == 0x02)
6958 		auto_conn = HCI_AUTO_CONN_ALWAYS;
6959 	else if (cp->action == 0x01)
6960 		auto_conn = HCI_AUTO_CONN_DIRECT;
6961 	else
6962 		auto_conn = HCI_AUTO_CONN_REPORT;
6963 
6964 	/* Kernel internally uses conn_params with resolvable private
6965 	 * address, but Add Device allows only identity addresses.
6966 	 * Make sure it is enforced before calling
6967 	 * hci_conn_params_lookup.
6968 	 */
6969 	if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
6970 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6971 					MGMT_STATUS_INVALID_PARAMS,
6972 					&cp->addr, sizeof(cp->addr));
6973 		goto unlock;
6974 	}
6975 
6976 	/* If the connection parameters don't exist for this device,
6977 	 * they will be created and configured with defaults.
6978 	 */
6979 	if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
6980 				auto_conn) < 0) {
6981 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6982 					MGMT_STATUS_FAILED, &cp->addr,
6983 					sizeof(cp->addr));
6984 		goto unlock;
6985 	} else {
6986 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6987 						addr_type);
6988 		if (params)
6989 			current_flags = params->current_flags;
6990 	}
6991 
6992 	err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
6993 	if (err < 0)
6994 		goto unlock;
6995 
6996 added:
6997 	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
6998 	device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
6999 			     SUPPORTED_DEVICE_FLAGS(), current_flags);
7000 
7001 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7002 				MGMT_STATUS_SUCCESS, &cp->addr,
7003 				sizeof(cp->addr));
7004 
7005 unlock:
7006 	hci_dev_unlock(hdev);
7007 	return err;
7008 }
7009 
7010 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7011 			   bdaddr_t *bdaddr, u8 type)
7012 {
7013 	struct mgmt_ev_device_removed ev;
7014 
7015 	bacpy(&ev.addr.bdaddr, bdaddr);
7016 	ev.addr.type = type;
7017 
7018 	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7019 }
7020 
7021 static int remove_device_sync(struct hci_dev *hdev, void *data)
7022 {
7023 	return hci_update_passive_scan_sync(hdev);
7024 }
7025 
7026 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7027 			 void *data, u16 len)
7028 {
7029 	struct mgmt_cp_remove_device *cp = data;
7030 	int err;
7031 
7032 	bt_dev_dbg(hdev, "sock %p", sk);
7033 
7034 	hci_dev_lock(hdev);
7035 
7036 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7037 		struct hci_conn_params *params;
7038 		u8 addr_type;
7039 
7040 		if (!bdaddr_type_is_valid(cp->addr.type)) {
7041 			err = mgmt_cmd_complete(sk, hdev->id,
7042 						MGMT_OP_REMOVE_DEVICE,
7043 						MGMT_STATUS_INVALID_PARAMS,
7044 						&cp->addr, sizeof(cp->addr));
7045 			goto unlock;
7046 		}
7047 
7048 		if (cp->addr.type == BDADDR_BREDR) {
7049 			err = hci_bdaddr_list_del(&hdev->accept_list,
7050 						  &cp->addr.bdaddr,
7051 						  cp->addr.type);
7052 			if (err) {
7053 				err = mgmt_cmd_complete(sk, hdev->id,
7054 							MGMT_OP_REMOVE_DEVICE,
7055 							MGMT_STATUS_INVALID_PARAMS,
7056 							&cp->addr,
7057 							sizeof(cp->addr));
7058 				goto unlock;
7059 			}
7060 
7061 			hci_req_update_scan(hdev);
7062 
7063 			device_removed(sk, hdev, &cp->addr.bdaddr,
7064 				       cp->addr.type);
7065 			goto complete;
7066 		}
7067 
7068 		addr_type = le_addr_type(cp->addr.type);
7069 
7070 		/* Kernel internally uses conn_params with resolvable private
7071 		 * address, but Remove Device allows only identity addresses.
7072 		 * Make sure it is enforced before calling
7073 		 * hci_conn_params_lookup.
7074 		 */
7075 		if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7076 			err = mgmt_cmd_complete(sk, hdev->id,
7077 						MGMT_OP_REMOVE_DEVICE,
7078 						MGMT_STATUS_INVALID_PARAMS,
7079 						&cp->addr, sizeof(cp->addr));
7080 			goto unlock;
7081 		}
7082 
7083 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7084 						addr_type);
7085 		if (!params) {
7086 			err = mgmt_cmd_complete(sk, hdev->id,
7087 						MGMT_OP_REMOVE_DEVICE,
7088 						MGMT_STATUS_INVALID_PARAMS,
7089 						&cp->addr, sizeof(cp->addr));
7090 			goto unlock;
7091 		}
7092 
7093 		if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7094 		    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7095 			err = mgmt_cmd_complete(sk, hdev->id,
7096 						MGMT_OP_REMOVE_DEVICE,
7097 						MGMT_STATUS_INVALID_PARAMS,
7098 						&cp->addr, sizeof(cp->addr));
7099 			goto unlock;
7100 		}
7101 
7102 		list_del(&params->action);
7103 		list_del(&params->list);
7104 		kfree(params);
7105 
7106 		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7107 	} else {
7108 		struct hci_conn_params *p, *tmp;
7109 		struct bdaddr_list *b, *btmp;
7110 
7111 		if (cp->addr.type) {
7112 			err = mgmt_cmd_complete(sk, hdev->id,
7113 						MGMT_OP_REMOVE_DEVICE,
7114 						MGMT_STATUS_INVALID_PARAMS,
7115 						&cp->addr, sizeof(cp->addr));
7116 			goto unlock;
7117 		}
7118 
7119 		list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7120 			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7121 			list_del(&b->list);
7122 			kfree(b);
7123 		}
7124 
7125 		hci_req_update_scan(hdev);
7126 
7127 		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7128 			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7129 				continue;
7130 			device_removed(sk, hdev, &p->addr, p->addr_type);
7131 			if (p->explicit_connect) {
7132 				p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7133 				continue;
7134 			}
7135 			list_del(&p->action);
7136 			list_del(&p->list);
7137 			kfree(p);
7138 		}
7139 
7140 		bt_dev_dbg(hdev, "All LE connection parameters were removed");
7141 	}
7142 
7143 	hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7144 
7145 complete:
7146 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7147 				MGMT_STATUS_SUCCESS, &cp->addr,
7148 				sizeof(cp->addr));
7149 unlock:
7150 	hci_dev_unlock(hdev);
7151 	return err;
7152 }
7153 
7154 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7155 			   u16 len)
7156 {
7157 	struct mgmt_cp_load_conn_param *cp = data;
7158 	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7159 				     sizeof(struct mgmt_conn_param));
7160 	u16 param_count, expected_len;
7161 	int i;
7162 
7163 	if (!lmp_le_capable(hdev))
7164 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7165 				       MGMT_STATUS_NOT_SUPPORTED);
7166 
7167 	param_count = __le16_to_cpu(cp->param_count);
7168 	if (param_count > max_param_count) {
7169 		bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7170 			   param_count);
7171 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7172 				       MGMT_STATUS_INVALID_PARAMS);
7173 	}
7174 
7175 	expected_len = struct_size(cp, params, param_count);
7176 	if (expected_len != len) {
7177 		bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7178 			   expected_len, len);
7179 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7180 				       MGMT_STATUS_INVALID_PARAMS);
7181 	}
7182 
7183 	bt_dev_dbg(hdev, "param_count %u", param_count);
7184 
7185 	hci_dev_lock(hdev);
7186 
7187 	hci_conn_params_clear_disabled(hdev);
7188 
7189 	for (i = 0; i < param_count; i++) {
7190 		struct mgmt_conn_param *param = &cp->params[i];
7191 		struct hci_conn_params *hci_param;
7192 		u16 min, max, latency, timeout;
7193 		u8 addr_type;
7194 
7195 		bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
7196 			   param->addr.type);
7197 
7198 		if (param->addr.type == BDADDR_LE_PUBLIC) {
7199 			addr_type = ADDR_LE_DEV_PUBLIC;
7200 		} else if (param->addr.type == BDADDR_LE_RANDOM) {
7201 			addr_type = ADDR_LE_DEV_RANDOM;
7202 		} else {
7203 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7204 			continue;
7205 		}
7206 
7207 		min = le16_to_cpu(param->min_interval);
7208 		max = le16_to_cpu(param->max_interval);
7209 		latency = le16_to_cpu(param->latency);
7210 		timeout = le16_to_cpu(param->timeout);
7211 
7212 		bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7213 			   min, max, latency, timeout);
7214 
7215 		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7216 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7217 			continue;
7218 		}
7219 
7220 		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
7221 						addr_type);
7222 		if (!hci_param) {
7223 			bt_dev_err(hdev, "failed to add connection parameters");
7224 			continue;
7225 		}
7226 
7227 		hci_param->conn_min_interval = min;
7228 		hci_param->conn_max_interval = max;
7229 		hci_param->conn_latency = latency;
7230 		hci_param->supervision_timeout = timeout;
7231 	}
7232 
7233 	hci_dev_unlock(hdev);
7234 
7235 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7236 				 NULL, 0);
7237 }
7238 
7239 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7240 			       void *data, u16 len)
7241 {
7242 	struct mgmt_cp_set_external_config *cp = data;
7243 	bool changed;
7244 	int err;
7245 
7246 	bt_dev_dbg(hdev, "sock %p", sk);
7247 
7248 	if (hdev_is_powered(hdev))
7249 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7250 				       MGMT_STATUS_REJECTED);
7251 
7252 	if (cp->config != 0x00 && cp->config != 0x01)
7253 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7254 				         MGMT_STATUS_INVALID_PARAMS);
7255 
7256 	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7257 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7258 				       MGMT_STATUS_NOT_SUPPORTED);
7259 
7260 	hci_dev_lock(hdev);
7261 
7262 	if (cp->config)
7263 		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7264 	else
7265 		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7266 
7267 	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7268 	if (err < 0)
7269 		goto unlock;
7270 
7271 	if (!changed)
7272 		goto unlock;
7273 
7274 	err = new_options(hdev, sk);
7275 
7276 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7277 		mgmt_index_removed(hdev);
7278 
7279 		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7280 			hci_dev_set_flag(hdev, HCI_CONFIG);
7281 			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7282 
7283 			queue_work(hdev->req_workqueue, &hdev->power_on);
7284 		} else {
7285 			set_bit(HCI_RAW, &hdev->flags);
7286 			mgmt_index_added(hdev);
7287 		}
7288 	}
7289 
7290 unlock:
7291 	hci_dev_unlock(hdev);
7292 	return err;
7293 }
7294 
7295 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
7296 			      void *data, u16 len)
7297 {
7298 	struct mgmt_cp_set_public_address *cp = data;
7299 	bool changed;
7300 	int err;
7301 
7302 	bt_dev_dbg(hdev, "sock %p", sk);
7303 
7304 	if (hdev_is_powered(hdev))
7305 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7306 				       MGMT_STATUS_REJECTED);
7307 
7308 	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
7309 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7310 				       MGMT_STATUS_INVALID_PARAMS);
7311 
7312 	if (!hdev->set_bdaddr)
7313 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7314 				       MGMT_STATUS_NOT_SUPPORTED);
7315 
7316 	hci_dev_lock(hdev);
7317 
7318 	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
7319 	bacpy(&hdev->public_addr, &cp->bdaddr);
7320 
7321 	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
7322 	if (err < 0)
7323 		goto unlock;
7324 
7325 	if (!changed)
7326 		goto unlock;
7327 
7328 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
7329 		err = new_options(hdev, sk);
7330 
7331 	if (is_configured(hdev)) {
7332 		mgmt_index_removed(hdev);
7333 
7334 		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
7335 
7336 		hci_dev_set_flag(hdev, HCI_CONFIG);
7337 		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7338 
7339 		queue_work(hdev->req_workqueue, &hdev->power_on);
7340 	}
7341 
7342 unlock:
7343 	hci_dev_unlock(hdev);
7344 	return err;
7345 }
7346 
7347 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
7348 					     u16 opcode, struct sk_buff *skb)
7349 {
7350 	const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
7351 	struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
7352 	u8 *h192, *r192, *h256, *r256;
7353 	struct mgmt_pending_cmd *cmd;
7354 	u16 eir_len;
7355 	int err;
7356 
7357 	bt_dev_dbg(hdev, "status %u", status);
7358 
7359 	cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
7360 	if (!cmd)
7361 		return;
7362 
7363 	mgmt_cp = cmd->param;
7364 
7365 	if (status) {
7366 		status = mgmt_status(status);
7367 		eir_len = 0;
7368 
7369 		h192 = NULL;
7370 		r192 = NULL;
7371 		h256 = NULL;
7372 		r256 = NULL;
7373 	} else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
7374 		struct hci_rp_read_local_oob_data *rp;
7375 
7376 		if (skb->len != sizeof(*rp)) {
7377 			status = MGMT_STATUS_FAILED;
7378 			eir_len = 0;
7379 		} else {
7380 			status = MGMT_STATUS_SUCCESS;
7381 			rp = (void *)skb->data;
7382 
7383 			eir_len = 5 + 18 + 18;
7384 			h192 = rp->hash;
7385 			r192 = rp->rand;
7386 			h256 = NULL;
7387 			r256 = NULL;
7388 		}
7389 	} else {
7390 		struct hci_rp_read_local_oob_ext_data *rp;
7391 
7392 		if (skb->len != sizeof(*rp)) {
7393 			status = MGMT_STATUS_FAILED;
7394 			eir_len = 0;
7395 		} else {
7396 			status = MGMT_STATUS_SUCCESS;
7397 			rp = (void *)skb->data;
7398 
7399 			if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
7400 				eir_len = 5 + 18 + 18;
7401 				h192 = NULL;
7402 				r192 = NULL;
7403 			} else {
7404 				eir_len = 5 + 18 + 18 + 18 + 18;
7405 				h192 = rp->hash192;
7406 				r192 = rp->rand192;
7407 			}
7408 
7409 			h256 = rp->hash256;
7410 			r256 = rp->rand256;
7411 		}
7412 	}
7413 
7414 	mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
7415 	if (!mgmt_rp)
7416 		goto done;
7417 
7418 	if (eir_len == 0)
7419 		goto send_rsp;
7420 
7421 	eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
7422 				  hdev->dev_class, 3);
7423 
7424 	if (h192 && r192) {
7425 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7426 					  EIR_SSP_HASH_C192, h192, 16);
7427 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7428 					  EIR_SSP_RAND_R192, r192, 16);
7429 	}
7430 
7431 	if (h256 && r256) {
7432 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7433 					  EIR_SSP_HASH_C256, h256, 16);
7434 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7435 					  EIR_SSP_RAND_R256, r256, 16);
7436 	}
7437 
7438 send_rsp:
7439 	mgmt_rp->type = mgmt_cp->type;
7440 	mgmt_rp->eir_len = cpu_to_le16(eir_len);
7441 
7442 	err = mgmt_cmd_complete(cmd->sk, hdev->id,
7443 				MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
7444 				mgmt_rp, sizeof(*mgmt_rp) + eir_len);
7445 	if (err < 0 || status)
7446 		goto done;
7447 
7448 	hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
7449 
7450 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7451 				 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
7452 				 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
7453 done:
7454 	kfree(mgmt_rp);
7455 	mgmt_pending_remove(cmd);
7456 }
7457 
7458 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
7459 				  struct mgmt_cp_read_local_oob_ext_data *cp)
7460 {
7461 	struct mgmt_pending_cmd *cmd;
7462 	struct hci_request req;
7463 	int err;
7464 
7465 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
7466 			       cp, sizeof(*cp));
7467 	if (!cmd)
7468 		return -ENOMEM;
7469 
7470 	hci_req_init(&req, hdev);
7471 
7472 	if (bredr_sc_enabled(hdev))
7473 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
7474 	else
7475 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
7476 
7477 	err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
7478 	if (err < 0) {
7479 		mgmt_pending_remove(cmd);
7480 		return err;
7481 	}
7482 
7483 	return 0;
7484 }
7485 
7486 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
7487 				   void *data, u16 data_len)
7488 {
7489 	struct mgmt_cp_read_local_oob_ext_data *cp = data;
7490 	struct mgmt_rp_read_local_oob_ext_data *rp;
7491 	size_t rp_len;
7492 	u16 eir_len;
7493 	u8 status, flags, role, addr[7], hash[16], rand[16];
7494 	int err;
7495 
7496 	bt_dev_dbg(hdev, "sock %p", sk);
7497 
7498 	if (hdev_is_powered(hdev)) {
7499 		switch (cp->type) {
7500 		case BIT(BDADDR_BREDR):
7501 			status = mgmt_bredr_support(hdev);
7502 			if (status)
7503 				eir_len = 0;
7504 			else
7505 				eir_len = 5;
7506 			break;
7507 		case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7508 			status = mgmt_le_support(hdev);
7509 			if (status)
7510 				eir_len = 0;
7511 			else
7512 				eir_len = 9 + 3 + 18 + 18 + 3;
7513 			break;
7514 		default:
7515 			status = MGMT_STATUS_INVALID_PARAMS;
7516 			eir_len = 0;
7517 			break;
7518 		}
7519 	} else {
7520 		status = MGMT_STATUS_NOT_POWERED;
7521 		eir_len = 0;
7522 	}
7523 
7524 	rp_len = sizeof(*rp) + eir_len;
7525 	rp = kmalloc(rp_len, GFP_ATOMIC);
7526 	if (!rp)
7527 		return -ENOMEM;
7528 
7529 	if (!status && !lmp_ssp_capable(hdev)) {
7530 		status = MGMT_STATUS_NOT_SUPPORTED;
7531 		eir_len = 0;
7532 	}
7533 
7534 	if (status)
7535 		goto complete;
7536 
7537 	hci_dev_lock(hdev);
7538 
7539 	eir_len = 0;
7540 	switch (cp->type) {
7541 	case BIT(BDADDR_BREDR):
7542 		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7543 			err = read_local_ssp_oob_req(hdev, sk, cp);
7544 			hci_dev_unlock(hdev);
7545 			if (!err)
7546 				goto done;
7547 
7548 			status = MGMT_STATUS_FAILED;
7549 			goto complete;
7550 		} else {
7551 			eir_len = eir_append_data(rp->eir, eir_len,
7552 						  EIR_CLASS_OF_DEV,
7553 						  hdev->dev_class, 3);
7554 		}
7555 		break;
7556 	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7557 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
7558 		    smp_generate_oob(hdev, hash, rand) < 0) {
7559 			hci_dev_unlock(hdev);
7560 			status = MGMT_STATUS_FAILED;
7561 			goto complete;
7562 		}
7563 
7564 		/* This should return the active RPA, but since the RPA
7565 		 * is only programmed on demand, it is really hard to fill
7566 		 * this in at the moment. For now disallow retrieving
7567 		 * local out-of-band data when privacy is in use.
7568 		 *
7569 		 * Returning the identity address will not help here since
7570 		 * pairing happens before the identity resolving key is
7571 		 * known and thus the connection establishment happens
7572 		 * based on the RPA and not the identity address.
7573 		 */
7574 		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
7575 			hci_dev_unlock(hdev);
7576 			status = MGMT_STATUS_REJECTED;
7577 			goto complete;
7578 		}
7579 
7580 		if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
7581 		   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
7582 		   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
7583 		    bacmp(&hdev->static_addr, BDADDR_ANY))) {
7584 			memcpy(addr, &hdev->static_addr, 6);
7585 			addr[6] = 0x01;
7586 		} else {
7587 			memcpy(addr, &hdev->bdaddr, 6);
7588 			addr[6] = 0x00;
7589 		}
7590 
7591 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
7592 					  addr, sizeof(addr));
7593 
7594 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7595 			role = 0x02;
7596 		else
7597 			role = 0x01;
7598 
7599 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
7600 					  &role, sizeof(role));
7601 
7602 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
7603 			eir_len = eir_append_data(rp->eir, eir_len,
7604 						  EIR_LE_SC_CONFIRM,
7605 						  hash, sizeof(hash));
7606 
7607 			eir_len = eir_append_data(rp->eir, eir_len,
7608 						  EIR_LE_SC_RANDOM,
7609 						  rand, sizeof(rand));
7610 		}
7611 
7612 		flags = mgmt_get_adv_discov_flags(hdev);
7613 
7614 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
7615 			flags |= LE_AD_NO_BREDR;
7616 
7617 		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
7618 					  &flags, sizeof(flags));
7619 		break;
7620 	}
7621 
7622 	hci_dev_unlock(hdev);
7623 
7624 	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
7625 
7626 	status = MGMT_STATUS_SUCCESS;
7627 
7628 complete:
7629 	rp->type = cp->type;
7630 	rp->eir_len = cpu_to_le16(eir_len);
7631 
7632 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
7633 				status, rp, sizeof(*rp) + eir_len);
7634 	if (err < 0 || status)
7635 		goto done;
7636 
7637 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7638 				 rp, sizeof(*rp) + eir_len,
7639 				 HCI_MGMT_OOB_DATA_EVENTS, sk);
7640 
7641 done:
7642 	kfree(rp);
7643 
7644 	return err;
7645 }
7646 
7647 static u32 get_supported_adv_flags(struct hci_dev *hdev)
7648 {
7649 	u32 flags = 0;
7650 
7651 	flags |= MGMT_ADV_FLAG_CONNECTABLE;
7652 	flags |= MGMT_ADV_FLAG_DISCOV;
7653 	flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
7654 	flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
7655 	flags |= MGMT_ADV_FLAG_APPEARANCE;
7656 	flags |= MGMT_ADV_FLAG_LOCAL_NAME;
7657 	flags |= MGMT_ADV_PARAM_DURATION;
7658 	flags |= MGMT_ADV_PARAM_TIMEOUT;
7659 	flags |= MGMT_ADV_PARAM_INTERVALS;
7660 	flags |= MGMT_ADV_PARAM_TX_POWER;
7661 	flags |= MGMT_ADV_PARAM_SCAN_RSP;
7662 
7663 	/* In extended adv TX_POWER returned from Set Adv Param
7664 	 * will be always valid.
7665 	 */
7666 	if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
7667 	    ext_adv_capable(hdev))
7668 		flags |= MGMT_ADV_FLAG_TX_POWER;
7669 
7670 	if (ext_adv_capable(hdev)) {
7671 		flags |= MGMT_ADV_FLAG_SEC_1M;
7672 		flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
7673 		flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
7674 
7675 		if (hdev->le_features[1] & HCI_LE_PHY_2M)
7676 			flags |= MGMT_ADV_FLAG_SEC_2M;
7677 
7678 		if (hdev->le_features[1] & HCI_LE_PHY_CODED)
7679 			flags |= MGMT_ADV_FLAG_SEC_CODED;
7680 	}
7681 
7682 	return flags;
7683 }
7684 
7685 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
7686 			     void *data, u16 data_len)
7687 {
7688 	struct mgmt_rp_read_adv_features *rp;
7689 	size_t rp_len;
7690 	int err;
7691 	struct adv_info *adv_instance;
7692 	u32 supported_flags;
7693 	u8 *instance;
7694 
7695 	bt_dev_dbg(hdev, "sock %p", sk);
7696 
7697 	if (!lmp_le_capable(hdev))
7698 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7699 				       MGMT_STATUS_REJECTED);
7700 
7701 	hci_dev_lock(hdev);
7702 
7703 	rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
7704 	rp = kmalloc(rp_len, GFP_ATOMIC);
7705 	if (!rp) {
7706 		hci_dev_unlock(hdev);
7707 		return -ENOMEM;
7708 	}
7709 
7710 	supported_flags = get_supported_adv_flags(hdev);
7711 
7712 	rp->supported_flags = cpu_to_le32(supported_flags);
7713 	rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
7714 	rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
7715 	rp->max_instances = hdev->le_num_of_adv_sets;
7716 	rp->num_instances = hdev->adv_instance_cnt;
7717 
7718 	instance = rp->instance;
7719 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
7720 		*instance = adv_instance->instance;
7721 		instance++;
7722 	}
7723 
7724 	hci_dev_unlock(hdev);
7725 
7726 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7727 				MGMT_STATUS_SUCCESS, rp, rp_len);
7728 
7729 	kfree(rp);
7730 
7731 	return err;
7732 }
7733 
7734 static u8 calculate_name_len(struct hci_dev *hdev)
7735 {
7736 	u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
7737 
7738 	return eir_append_local_name(hdev, buf, 0);
7739 }
7740 
7741 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
7742 			   bool is_adv_data)
7743 {
7744 	u8 max_len = HCI_MAX_AD_LENGTH;
7745 
7746 	if (is_adv_data) {
7747 		if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
7748 				 MGMT_ADV_FLAG_LIMITED_DISCOV |
7749 				 MGMT_ADV_FLAG_MANAGED_FLAGS))
7750 			max_len -= 3;
7751 
7752 		if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
7753 			max_len -= 3;
7754 	} else {
7755 		if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
7756 			max_len -= calculate_name_len(hdev);
7757 
7758 		if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
7759 			max_len -= 4;
7760 	}
7761 
7762 	return max_len;
7763 }
7764 
7765 static bool flags_managed(u32 adv_flags)
7766 {
7767 	return adv_flags & (MGMT_ADV_FLAG_DISCOV |
7768 			    MGMT_ADV_FLAG_LIMITED_DISCOV |
7769 			    MGMT_ADV_FLAG_MANAGED_FLAGS);
7770 }
7771 
7772 static bool tx_power_managed(u32 adv_flags)
7773 {
7774 	return adv_flags & MGMT_ADV_FLAG_TX_POWER;
7775 }
7776 
7777 static bool name_managed(u32 adv_flags)
7778 {
7779 	return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
7780 }
7781 
7782 static bool appearance_managed(u32 adv_flags)
7783 {
7784 	return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
7785 }
7786 
7787 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
7788 			      u8 len, bool is_adv_data)
7789 {
7790 	int i, cur_len;
7791 	u8 max_len;
7792 
7793 	max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
7794 
7795 	if (len > max_len)
7796 		return false;
7797 
7798 	/* Make sure that the data is correctly formatted. */
7799 	for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
7800 		cur_len = data[i];
7801 
7802 		if (!cur_len)
7803 			continue;
7804 
7805 		if (data[i + 1] == EIR_FLAGS &&
7806 		    (!is_adv_data || flags_managed(adv_flags)))
7807 			return false;
7808 
7809 		if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
7810 			return false;
7811 
7812 		if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
7813 			return false;
7814 
7815 		if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
7816 			return false;
7817 
7818 		if (data[i + 1] == EIR_APPEARANCE &&
7819 		    appearance_managed(adv_flags))
7820 			return false;
7821 
7822 		/* If the current field length would exceed the total data
7823 		 * length, then it's invalid.
7824 		 */
7825 		if (i + cur_len >= len)
7826 			return false;
7827 	}
7828 
7829 	return true;
7830 }
7831 
7832 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
7833 {
7834 	u32 supported_flags, phy_flags;
7835 
7836 	/* The current implementation only supports a subset of the specified
7837 	 * flags. Also need to check mutual exclusiveness of sec flags.
7838 	 */
7839 	supported_flags = get_supported_adv_flags(hdev);
7840 	phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
7841 	if (adv_flags & ~supported_flags ||
7842 	    ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
7843 		return false;
7844 
7845 	return true;
7846 }
7847 
7848 static bool adv_busy(struct hci_dev *hdev)
7849 {
7850 	return (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
7851 		pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
7852 		pending_find(MGMT_OP_SET_LE, hdev) ||
7853 		pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev) ||
7854 		pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev));
7855 }
7856 
7857 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
7858 			     int err)
7859 {
7860 	struct adv_info *adv, *n;
7861 
7862 	bt_dev_dbg(hdev, "err %d", err);
7863 
7864 	hci_dev_lock(hdev);
7865 
7866 	list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
7867 		u8 instance;
7868 
7869 		if (!adv->pending)
7870 			continue;
7871 
7872 		if (!err) {
7873 			adv->pending = false;
7874 			continue;
7875 		}
7876 
7877 		instance = adv->instance;
7878 
7879 		if (hdev->cur_adv_instance == instance)
7880 			cancel_adv_timeout(hdev);
7881 
7882 		hci_remove_adv_instance(hdev, instance);
7883 		mgmt_advertising_removed(sk, hdev, instance);
7884 	}
7885 
7886 	hci_dev_unlock(hdev);
7887 }
7888 
7889 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
7890 {
7891 	struct mgmt_pending_cmd *cmd = data;
7892 	struct mgmt_cp_add_advertising *cp = cmd->param;
7893 	struct mgmt_rp_add_advertising rp;
7894 
7895 	memset(&rp, 0, sizeof(rp));
7896 
7897 	rp.instance = cp->instance;
7898 
7899 	if (err)
7900 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
7901 				mgmt_status(err));
7902 	else
7903 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
7904 				  mgmt_status(err), &rp, sizeof(rp));
7905 
7906 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
7907 
7908 	mgmt_pending_free(cmd);
7909 }
7910 
7911 static int add_advertising_sync(struct hci_dev *hdev, void *data)
7912 {
7913 	struct mgmt_pending_cmd *cmd = data;
7914 	struct mgmt_cp_add_advertising *cp = cmd->param;
7915 
7916 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
7917 }
7918 
7919 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
7920 			   void *data, u16 data_len)
7921 {
7922 	struct mgmt_cp_add_advertising *cp = data;
7923 	struct mgmt_rp_add_advertising rp;
7924 	u32 flags;
7925 	u8 status;
7926 	u16 timeout, duration;
7927 	unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
7928 	u8 schedule_instance = 0;
7929 	struct adv_info *next_instance;
7930 	int err;
7931 	struct mgmt_pending_cmd *cmd;
7932 
7933 	bt_dev_dbg(hdev, "sock %p", sk);
7934 
7935 	status = mgmt_le_support(hdev);
7936 	if (status)
7937 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7938 				       status);
7939 
7940 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
7941 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7942 				       MGMT_STATUS_INVALID_PARAMS);
7943 
7944 	if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
7945 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7946 				       MGMT_STATUS_INVALID_PARAMS);
7947 
7948 	flags = __le32_to_cpu(cp->flags);
7949 	timeout = __le16_to_cpu(cp->timeout);
7950 	duration = __le16_to_cpu(cp->duration);
7951 
7952 	if (!requested_adv_flags_are_valid(hdev, flags))
7953 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7954 				       MGMT_STATUS_INVALID_PARAMS);
7955 
7956 	hci_dev_lock(hdev);
7957 
7958 	if (timeout && !hdev_is_powered(hdev)) {
7959 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7960 				      MGMT_STATUS_REJECTED);
7961 		goto unlock;
7962 	}
7963 
7964 	if (adv_busy(hdev)) {
7965 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7966 				      MGMT_STATUS_BUSY);
7967 		goto unlock;
7968 	}
7969 
7970 	if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
7971 	    !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
7972 			       cp->scan_rsp_len, false)) {
7973 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7974 				      MGMT_STATUS_INVALID_PARAMS);
7975 		goto unlock;
7976 	}
7977 
7978 	err = hci_add_adv_instance(hdev, cp->instance, flags,
7979 				   cp->adv_data_len, cp->data,
7980 				   cp->scan_rsp_len,
7981 				   cp->data + cp->adv_data_len,
7982 				   timeout, duration,
7983 				   HCI_ADV_TX_POWER_NO_PREFERENCE,
7984 				   hdev->le_adv_min_interval,
7985 				   hdev->le_adv_max_interval);
7986 	if (err < 0) {
7987 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7988 				      MGMT_STATUS_FAILED);
7989 		goto unlock;
7990 	}
7991 
7992 	/* Only trigger an advertising added event if a new instance was
7993 	 * actually added.
7994 	 */
7995 	if (hdev->adv_instance_cnt > prev_instance_cnt)
7996 		mgmt_advertising_added(sk, hdev, cp->instance);
7997 
7998 	if (hdev->cur_adv_instance == cp->instance) {
7999 		/* If the currently advertised instance is being changed then
8000 		 * cancel the current advertising and schedule the next
8001 		 * instance. If there is only one instance then the overridden
8002 		 * advertising data will be visible right away.
8003 		 */
8004 		cancel_adv_timeout(hdev);
8005 
8006 		next_instance = hci_get_next_instance(hdev, cp->instance);
8007 		if (next_instance)
8008 			schedule_instance = next_instance->instance;
8009 	} else if (!hdev->adv_instance_timeout) {
8010 		/* Immediately advertise the new instance if no other
8011 		 * instance is currently being advertised.
8012 		 */
8013 		schedule_instance = cp->instance;
8014 	}
8015 
8016 	/* If the HCI_ADVERTISING flag is set or the device isn't powered or
8017 	 * there is no instance to be advertised then we have no HCI
8018 	 * communication to make. Simply return.
8019 	 */
8020 	if (!hdev_is_powered(hdev) ||
8021 	    hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8022 	    !schedule_instance) {
8023 		rp.instance = cp->instance;
8024 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8025 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8026 		goto unlock;
8027 	}
8028 
8029 	/* We're good to go, update advertising data, parameters, and start
8030 	 * advertising.
8031 	 */
8032 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8033 			       data_len);
8034 	if (!cmd) {
8035 		err = -ENOMEM;
8036 		goto unlock;
8037 	}
8038 
8039 	cp->instance = schedule_instance;
8040 
8041 	err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8042 				 add_advertising_complete);
8043 	if (err < 0)
8044 		mgmt_pending_free(cmd);
8045 
8046 unlock:
8047 	hci_dev_unlock(hdev);
8048 
8049 	return err;
8050 }
8051 
8052 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8053 					int err)
8054 {
8055 	struct mgmt_pending_cmd *cmd = data;
8056 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8057 	struct mgmt_rp_add_ext_adv_params rp;
8058 	struct adv_info *adv;
8059 	u32 flags;
8060 
8061 	BT_DBG("%s", hdev->name);
8062 
8063 	hci_dev_lock(hdev);
8064 
8065 	adv = hci_find_adv_instance(hdev, cp->instance);
8066 	if (!adv)
8067 		goto unlock;
8068 
8069 	rp.instance = cp->instance;
8070 	rp.tx_power = adv->tx_power;
8071 
8072 	/* While we're at it, inform userspace of the available space for this
8073 	 * advertisement, given the flags that will be used.
8074 	 */
8075 	flags = __le32_to_cpu(cp->flags);
8076 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8077 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8078 
8079 	if (err) {
8080 		/* If this advertisement was previously advertising and we
8081 		 * failed to update it, we signal that it has been removed and
8082 		 * delete its structure
8083 		 */
8084 		if (!adv->pending)
8085 			mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8086 
8087 		hci_remove_adv_instance(hdev, cp->instance);
8088 
8089 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8090 				mgmt_status(err));
8091 	} else {
8092 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8093 				  mgmt_status(err), &rp, sizeof(rp));
8094 	}
8095 
8096 unlock:
8097 	if (cmd)
8098 		mgmt_pending_free(cmd);
8099 
8100 	hci_dev_unlock(hdev);
8101 }
8102 
8103 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8104 {
8105 	struct mgmt_pending_cmd *cmd = data;
8106 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8107 
8108 	return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8109 }
8110 
8111 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8112 			      void *data, u16 data_len)
8113 {
8114 	struct mgmt_cp_add_ext_adv_params *cp = data;
8115 	struct mgmt_rp_add_ext_adv_params rp;
8116 	struct mgmt_pending_cmd *cmd = NULL;
8117 	u32 flags, min_interval, max_interval;
8118 	u16 timeout, duration;
8119 	u8 status;
8120 	s8 tx_power;
8121 	int err;
8122 
8123 	BT_DBG("%s", hdev->name);
8124 
8125 	status = mgmt_le_support(hdev);
8126 	if (status)
8127 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8128 				       status);
8129 
8130 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8131 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8132 				       MGMT_STATUS_INVALID_PARAMS);
8133 
8134 	/* The purpose of breaking add_advertising into two separate MGMT calls
8135 	 * for params and data is to allow more parameters to be added to this
8136 	 * structure in the future. For this reason, we verify that we have the
8137 	 * bare minimum structure we know of when the interface was defined. Any
8138 	 * extra parameters we don't know about will be ignored in this request.
8139 	 */
8140 	if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8141 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8142 				       MGMT_STATUS_INVALID_PARAMS);
8143 
8144 	flags = __le32_to_cpu(cp->flags);
8145 
8146 	if (!requested_adv_flags_are_valid(hdev, flags))
8147 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8148 				       MGMT_STATUS_INVALID_PARAMS);
8149 
8150 	hci_dev_lock(hdev);
8151 
8152 	/* In new interface, we require that we are powered to register */
8153 	if (!hdev_is_powered(hdev)) {
8154 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8155 				      MGMT_STATUS_REJECTED);
8156 		goto unlock;
8157 	}
8158 
8159 	if (adv_busy(hdev)) {
8160 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8161 				      MGMT_STATUS_BUSY);
8162 		goto unlock;
8163 	}
8164 
8165 	/* Parse defined parameters from request, use defaults otherwise */
8166 	timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8167 		  __le16_to_cpu(cp->timeout) : 0;
8168 
8169 	duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8170 		   __le16_to_cpu(cp->duration) :
8171 		   hdev->def_multi_adv_rotation_duration;
8172 
8173 	min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8174 		       __le32_to_cpu(cp->min_interval) :
8175 		       hdev->le_adv_min_interval;
8176 
8177 	max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8178 		       __le32_to_cpu(cp->max_interval) :
8179 		       hdev->le_adv_max_interval;
8180 
8181 	tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8182 		   cp->tx_power :
8183 		   HCI_ADV_TX_POWER_NO_PREFERENCE;
8184 
8185 	/* Create advertising instance with no advertising or response data */
8186 	err = hci_add_adv_instance(hdev, cp->instance, flags,
8187 				   0, NULL, 0, NULL, timeout, duration,
8188 				   tx_power, min_interval, max_interval);
8189 
8190 	if (err < 0) {
8191 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8192 				      MGMT_STATUS_FAILED);
8193 		goto unlock;
8194 	}
8195 
8196 	/* Submit request for advertising params if ext adv available */
8197 	if (ext_adv_capable(hdev)) {
8198 		cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8199 				       data, data_len);
8200 		if (!cmd) {
8201 			err = -ENOMEM;
8202 			hci_remove_adv_instance(hdev, cp->instance);
8203 			goto unlock;
8204 		}
8205 
8206 		err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8207 					 add_ext_adv_params_complete);
8208 		if (err < 0)
8209 			mgmt_pending_free(cmd);
8210 	} else {
8211 		rp.instance = cp->instance;
8212 		rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8213 		rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8214 		rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8215 		err = mgmt_cmd_complete(sk, hdev->id,
8216 					MGMT_OP_ADD_EXT_ADV_PARAMS,
8217 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8218 	}
8219 
8220 unlock:
8221 	hci_dev_unlock(hdev);
8222 
8223 	return err;
8224 }
8225 
8226 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8227 {
8228 	struct mgmt_pending_cmd *cmd = data;
8229 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8230 	struct mgmt_rp_add_advertising rp;
8231 
8232 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
8233 
8234 	memset(&rp, 0, sizeof(rp));
8235 
8236 	rp.instance = cp->instance;
8237 
8238 	if (err)
8239 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8240 				mgmt_status(err));
8241 	else
8242 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8243 				  mgmt_status(err), &rp, sizeof(rp));
8244 
8245 	mgmt_pending_free(cmd);
8246 }
8247 
8248 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8249 {
8250 	struct mgmt_pending_cmd *cmd = data;
8251 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8252 	int err;
8253 
8254 	if (ext_adv_capable(hdev)) {
8255 		err = hci_update_adv_data_sync(hdev, cp->instance);
8256 		if (err)
8257 			return err;
8258 
8259 		err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8260 		if (err)
8261 			return err;
8262 
8263 		return hci_enable_ext_advertising_sync(hdev, cp->instance);
8264 	}
8265 
8266 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8267 }
8268 
8269 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8270 			    u16 data_len)
8271 {
8272 	struct mgmt_cp_add_ext_adv_data *cp = data;
8273 	struct mgmt_rp_add_ext_adv_data rp;
8274 	u8 schedule_instance = 0;
8275 	struct adv_info *next_instance;
8276 	struct adv_info *adv_instance;
8277 	int err = 0;
8278 	struct mgmt_pending_cmd *cmd;
8279 
8280 	BT_DBG("%s", hdev->name);
8281 
8282 	hci_dev_lock(hdev);
8283 
8284 	adv_instance = hci_find_adv_instance(hdev, cp->instance);
8285 
8286 	if (!adv_instance) {
8287 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8288 				      MGMT_STATUS_INVALID_PARAMS);
8289 		goto unlock;
8290 	}
8291 
8292 	/* In new interface, we require that we are powered to register */
8293 	if (!hdev_is_powered(hdev)) {
8294 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8295 				      MGMT_STATUS_REJECTED);
8296 		goto clear_new_instance;
8297 	}
8298 
8299 	if (adv_busy(hdev)) {
8300 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8301 				      MGMT_STATUS_BUSY);
8302 		goto clear_new_instance;
8303 	}
8304 
8305 	/* Validate new data */
8306 	if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
8307 			       cp->adv_data_len, true) ||
8308 	    !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
8309 			       cp->adv_data_len, cp->scan_rsp_len, false)) {
8310 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8311 				      MGMT_STATUS_INVALID_PARAMS);
8312 		goto clear_new_instance;
8313 	}
8314 
8315 	/* Set the data in the advertising instance */
8316 	hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
8317 				  cp->data, cp->scan_rsp_len,
8318 				  cp->data + cp->adv_data_len);
8319 
8320 	/* If using software rotation, determine next instance to use */
8321 	if (hdev->cur_adv_instance == cp->instance) {
8322 		/* If the currently advertised instance is being changed
8323 		 * then cancel the current advertising and schedule the
8324 		 * next instance. If there is only one instance then the
8325 		 * overridden advertising data will be visible right
8326 		 * away
8327 		 */
8328 		cancel_adv_timeout(hdev);
8329 
8330 		next_instance = hci_get_next_instance(hdev, cp->instance);
8331 		if (next_instance)
8332 			schedule_instance = next_instance->instance;
8333 	} else if (!hdev->adv_instance_timeout) {
8334 		/* Immediately advertise the new instance if no other
8335 		 * instance is currently being advertised.
8336 		 */
8337 		schedule_instance = cp->instance;
8338 	}
8339 
8340 	/* If the HCI_ADVERTISING flag is set or there is no instance to
8341 	 * be advertised then we have no HCI communication to make.
8342 	 * Simply return.
8343 	 */
8344 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
8345 		if (adv_instance->pending) {
8346 			mgmt_advertising_added(sk, hdev, cp->instance);
8347 			adv_instance->pending = false;
8348 		}
8349 		rp.instance = cp->instance;
8350 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8351 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8352 		goto unlock;
8353 	}
8354 
8355 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
8356 			       data_len);
8357 	if (!cmd) {
8358 		err = -ENOMEM;
8359 		goto clear_new_instance;
8360 	}
8361 
8362 	err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
8363 				 add_ext_adv_data_complete);
8364 	if (err < 0) {
8365 		mgmt_pending_free(cmd);
8366 		goto clear_new_instance;
8367 	}
8368 
8369 	/* We were successful in updating data, so trigger advertising_added
8370 	 * event if this is an instance that wasn't previously advertising. If
8371 	 * a failure occurs in the requests we initiated, we will remove the
8372 	 * instance again in add_advertising_complete
8373 	 */
8374 	if (adv_instance->pending)
8375 		mgmt_advertising_added(sk, hdev, cp->instance);
8376 
8377 	goto unlock;
8378 
8379 clear_new_instance:
8380 	hci_remove_adv_instance(hdev, cp->instance);
8381 
8382 unlock:
8383 	hci_dev_unlock(hdev);
8384 
8385 	return err;
8386 }
8387 
8388 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
8389 					int err)
8390 {
8391 	struct mgmt_pending_cmd *cmd = data;
8392 	struct mgmt_cp_remove_advertising *cp = cmd->param;
8393 	struct mgmt_rp_remove_advertising rp;
8394 
8395 	bt_dev_dbg(hdev, "err %d", err);
8396 
8397 	memset(&rp, 0, sizeof(rp));
8398 	rp.instance = cp->instance;
8399 
8400 	if (err)
8401 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8402 				mgmt_status(err));
8403 	else
8404 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8405 				  MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8406 
8407 	mgmt_pending_free(cmd);
8408 }
8409 
8410 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
8411 {
8412 	struct mgmt_pending_cmd *cmd = data;
8413 	struct mgmt_cp_remove_advertising *cp = cmd->param;
8414 	int err;
8415 
8416 	err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
8417 	if (err)
8418 		return err;
8419 
8420 	if (list_empty(&hdev->adv_instances))
8421 		err = hci_disable_advertising_sync(hdev);
8422 
8423 	return err;
8424 }
8425 
8426 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
8427 			      void *data, u16 data_len)
8428 {
8429 	struct mgmt_cp_remove_advertising *cp = data;
8430 	struct mgmt_pending_cmd *cmd;
8431 	int err;
8432 
8433 	bt_dev_dbg(hdev, "sock %p", sk);
8434 
8435 	hci_dev_lock(hdev);
8436 
8437 	if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
8438 		err = mgmt_cmd_status(sk, hdev->id,
8439 				      MGMT_OP_REMOVE_ADVERTISING,
8440 				      MGMT_STATUS_INVALID_PARAMS);
8441 		goto unlock;
8442 	}
8443 
8444 	if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
8445 	    pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
8446 	    pending_find(MGMT_OP_SET_LE, hdev)) {
8447 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8448 				      MGMT_STATUS_BUSY);
8449 		goto unlock;
8450 	}
8451 
8452 	if (list_empty(&hdev->adv_instances)) {
8453 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8454 				      MGMT_STATUS_INVALID_PARAMS);
8455 		goto unlock;
8456 	}
8457 
8458 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
8459 			       data_len);
8460 	if (!cmd) {
8461 		err = -ENOMEM;
8462 		goto unlock;
8463 	}
8464 
8465 	err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
8466 				 remove_advertising_complete);
8467 	if (err < 0)
8468 		mgmt_pending_free(cmd);
8469 
8470 unlock:
8471 	hci_dev_unlock(hdev);
8472 
8473 	return err;
8474 }
8475 
8476 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
8477 			     void *data, u16 data_len)
8478 {
8479 	struct mgmt_cp_get_adv_size_info *cp = data;
8480 	struct mgmt_rp_get_adv_size_info rp;
8481 	u32 flags, supported_flags;
8482 	int err;
8483 
8484 	bt_dev_dbg(hdev, "sock %p", sk);
8485 
8486 	if (!lmp_le_capable(hdev))
8487 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8488 				       MGMT_STATUS_REJECTED);
8489 
8490 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8491 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8492 				       MGMT_STATUS_INVALID_PARAMS);
8493 
8494 	flags = __le32_to_cpu(cp->flags);
8495 
8496 	/* The current implementation only supports a subset of the specified
8497 	 * flags.
8498 	 */
8499 	supported_flags = get_supported_adv_flags(hdev);
8500 	if (flags & ~supported_flags)
8501 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8502 				       MGMT_STATUS_INVALID_PARAMS);
8503 
8504 	rp.instance = cp->instance;
8505 	rp.flags = cp->flags;
8506 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8507 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8508 
8509 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8510 				MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8511 
8512 	return err;
8513 }
8514 
8515 static const struct hci_mgmt_handler mgmt_handlers[] = {
8516 	{ NULL }, /* 0x0000 (no command) */
8517 	{ read_version,            MGMT_READ_VERSION_SIZE,
8518 						HCI_MGMT_NO_HDEV |
8519 						HCI_MGMT_UNTRUSTED },
8520 	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
8521 						HCI_MGMT_NO_HDEV |
8522 						HCI_MGMT_UNTRUSTED },
8523 	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
8524 						HCI_MGMT_NO_HDEV |
8525 						HCI_MGMT_UNTRUSTED },
8526 	{ read_controller_info,    MGMT_READ_INFO_SIZE,
8527 						HCI_MGMT_UNTRUSTED },
8528 	{ set_powered,             MGMT_SETTING_SIZE },
8529 	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
8530 	{ set_connectable,         MGMT_SETTING_SIZE },
8531 	{ set_fast_connectable,    MGMT_SETTING_SIZE },
8532 	{ set_bondable,            MGMT_SETTING_SIZE },
8533 	{ set_link_security,       MGMT_SETTING_SIZE },
8534 	{ set_ssp,                 MGMT_SETTING_SIZE },
8535 	{ set_hs,                  MGMT_SETTING_SIZE },
8536 	{ set_le,                  MGMT_SETTING_SIZE },
8537 	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
8538 	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
8539 	{ add_uuid,                MGMT_ADD_UUID_SIZE },
8540 	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
8541 	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
8542 						HCI_MGMT_VAR_LEN },
8543 	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
8544 						HCI_MGMT_VAR_LEN },
8545 	{ disconnect,              MGMT_DISCONNECT_SIZE },
8546 	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
8547 	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
8548 	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
8549 	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
8550 	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
8551 	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
8552 	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
8553 	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
8554 	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
8555 	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
8556 	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
8557 	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
8558 	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
8559 						HCI_MGMT_VAR_LEN },
8560 	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
8561 	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
8562 	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
8563 	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
8564 	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
8565 	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
8566 	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
8567 	{ set_advertising,         MGMT_SETTING_SIZE },
8568 	{ set_bredr,               MGMT_SETTING_SIZE },
8569 	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
8570 	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
8571 	{ set_secure_conn,         MGMT_SETTING_SIZE },
8572 	{ set_debug_keys,          MGMT_SETTING_SIZE },
8573 	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
8574 	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
8575 						HCI_MGMT_VAR_LEN },
8576 	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
8577 	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
8578 	{ add_device,              MGMT_ADD_DEVICE_SIZE },
8579 	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
8580 	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
8581 						HCI_MGMT_VAR_LEN },
8582 	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
8583 						HCI_MGMT_NO_HDEV |
8584 						HCI_MGMT_UNTRUSTED },
8585 	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
8586 						HCI_MGMT_UNCONFIGURED |
8587 						HCI_MGMT_UNTRUSTED },
8588 	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
8589 						HCI_MGMT_UNCONFIGURED },
8590 	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
8591 						HCI_MGMT_UNCONFIGURED },
8592 	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
8593 						HCI_MGMT_VAR_LEN },
8594 	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
8595 	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
8596 						HCI_MGMT_NO_HDEV |
8597 						HCI_MGMT_UNTRUSTED },
8598 	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
8599 	{ add_advertising,	   MGMT_ADD_ADVERTISING_SIZE,
8600 						HCI_MGMT_VAR_LEN },
8601 	{ remove_advertising,	   MGMT_REMOVE_ADVERTISING_SIZE },
8602 	{ get_adv_size_info,       MGMT_GET_ADV_SIZE_INFO_SIZE },
8603 	{ start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
8604 	{ read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
8605 						HCI_MGMT_UNTRUSTED },
8606 	{ set_appearance,	   MGMT_SET_APPEARANCE_SIZE },
8607 	{ get_phy_configuration,   MGMT_GET_PHY_CONFIGURATION_SIZE },
8608 	{ set_phy_configuration,   MGMT_SET_PHY_CONFIGURATION_SIZE },
8609 	{ set_blocked_keys,	   MGMT_OP_SET_BLOCKED_KEYS_SIZE,
8610 						HCI_MGMT_VAR_LEN },
8611 	{ set_wideband_speech,	   MGMT_SETTING_SIZE },
8612 	{ read_controller_cap,     MGMT_READ_CONTROLLER_CAP_SIZE,
8613 						HCI_MGMT_UNTRUSTED },
8614 	{ read_exp_features_info,  MGMT_READ_EXP_FEATURES_INFO_SIZE,
8615 						HCI_MGMT_UNTRUSTED |
8616 						HCI_MGMT_HDEV_OPTIONAL },
8617 	{ set_exp_feature,         MGMT_SET_EXP_FEATURE_SIZE,
8618 						HCI_MGMT_VAR_LEN |
8619 						HCI_MGMT_HDEV_OPTIONAL },
8620 	{ read_def_system_config,  MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
8621 						HCI_MGMT_UNTRUSTED },
8622 	{ set_def_system_config,   MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
8623 						HCI_MGMT_VAR_LEN },
8624 	{ read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
8625 						HCI_MGMT_UNTRUSTED },
8626 	{ set_def_runtime_config,  MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
8627 						HCI_MGMT_VAR_LEN },
8628 	{ get_device_flags,        MGMT_GET_DEVICE_FLAGS_SIZE },
8629 	{ set_device_flags,        MGMT_SET_DEVICE_FLAGS_SIZE },
8630 	{ read_adv_mon_features,   MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
8631 	{ add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
8632 						HCI_MGMT_VAR_LEN },
8633 	{ remove_adv_monitor,      MGMT_REMOVE_ADV_MONITOR_SIZE },
8634 	{ add_ext_adv_params,      MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
8635 						HCI_MGMT_VAR_LEN },
8636 	{ add_ext_adv_data,        MGMT_ADD_EXT_ADV_DATA_SIZE,
8637 						HCI_MGMT_VAR_LEN },
8638 	{ add_adv_patterns_monitor_rssi,
8639 				   MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
8640 						HCI_MGMT_VAR_LEN },
8641 };
8642 
8643 void mgmt_index_added(struct hci_dev *hdev)
8644 {
8645 	struct mgmt_ev_ext_index ev;
8646 
8647 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8648 		return;
8649 
8650 	switch (hdev->dev_type) {
8651 	case HCI_PRIMARY:
8652 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8653 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
8654 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8655 			ev.type = 0x01;
8656 		} else {
8657 			mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
8658 					 HCI_MGMT_INDEX_EVENTS);
8659 			ev.type = 0x00;
8660 		}
8661 		break;
8662 	case HCI_AMP:
8663 		ev.type = 0x02;
8664 		break;
8665 	default:
8666 		return;
8667 	}
8668 
8669 	ev.bus = hdev->bus;
8670 
8671 	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
8672 			 HCI_MGMT_EXT_INDEX_EVENTS);
8673 }
8674 
8675 void mgmt_index_removed(struct hci_dev *hdev)
8676 {
8677 	struct mgmt_ev_ext_index ev;
8678 	u8 status = MGMT_STATUS_INVALID_INDEX;
8679 
8680 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8681 		return;
8682 
8683 	switch (hdev->dev_type) {
8684 	case HCI_PRIMARY:
8685 		mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8686 
8687 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8688 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
8689 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8690 			ev.type = 0x01;
8691 		} else {
8692 			mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
8693 					 HCI_MGMT_INDEX_EVENTS);
8694 			ev.type = 0x00;
8695 		}
8696 		break;
8697 	case HCI_AMP:
8698 		ev.type = 0x02;
8699 		break;
8700 	default:
8701 		return;
8702 	}
8703 
8704 	ev.bus = hdev->bus;
8705 
8706 	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
8707 			 HCI_MGMT_EXT_INDEX_EVENTS);
8708 }
8709 
8710 void mgmt_power_on(struct hci_dev *hdev, int err)
8711 {
8712 	struct cmd_lookup match = { NULL, hdev };
8713 
8714 	bt_dev_dbg(hdev, "err %d", err);
8715 
8716 	hci_dev_lock(hdev);
8717 
8718 	if (!err) {
8719 		restart_le_actions(hdev);
8720 		hci_update_passive_scan(hdev);
8721 	}
8722 
8723 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8724 
8725 	new_settings(hdev, match.sk);
8726 
8727 	if (match.sk)
8728 		sock_put(match.sk);
8729 
8730 	hci_dev_unlock(hdev);
8731 }
8732 
8733 void __mgmt_power_off(struct hci_dev *hdev)
8734 {
8735 	struct cmd_lookup match = { NULL, hdev };
8736 	u8 status, zero_cod[] = { 0, 0, 0 };
8737 
8738 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8739 
8740 	/* If the power off is because of hdev unregistration let
8741 	 * use the appropriate INVALID_INDEX status. Otherwise use
8742 	 * NOT_POWERED. We cover both scenarios here since later in
8743 	 * mgmt_index_removed() any hci_conn callbacks will have already
8744 	 * been triggered, potentially causing misleading DISCONNECTED
8745 	 * status responses.
8746 	 */
8747 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
8748 		status = MGMT_STATUS_INVALID_INDEX;
8749 	else
8750 		status = MGMT_STATUS_NOT_POWERED;
8751 
8752 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8753 
8754 	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
8755 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
8756 				   zero_cod, sizeof(zero_cod),
8757 				   HCI_MGMT_DEV_CLASS_EVENTS, NULL);
8758 		ext_info_changed(hdev, NULL);
8759 	}
8760 
8761 	new_settings(hdev, match.sk);
8762 
8763 	if (match.sk)
8764 		sock_put(match.sk);
8765 }
8766 
8767 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
8768 {
8769 	struct mgmt_pending_cmd *cmd;
8770 	u8 status;
8771 
8772 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8773 	if (!cmd)
8774 		return;
8775 
8776 	if (err == -ERFKILL)
8777 		status = MGMT_STATUS_RFKILLED;
8778 	else
8779 		status = MGMT_STATUS_FAILED;
8780 
8781 	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
8782 
8783 	mgmt_pending_remove(cmd);
8784 }
8785 
8786 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
8787 		       bool persistent)
8788 {
8789 	struct mgmt_ev_new_link_key ev;
8790 
8791 	memset(&ev, 0, sizeof(ev));
8792 
8793 	ev.store_hint = persistent;
8794 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8795 	ev.key.addr.type = BDADDR_BREDR;
8796 	ev.key.type = key->type;
8797 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
8798 	ev.key.pin_len = key->pin_len;
8799 
8800 	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
8801 }
8802 
8803 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
8804 {
8805 	switch (ltk->type) {
8806 	case SMP_LTK:
8807 	case SMP_LTK_RESPONDER:
8808 		if (ltk->authenticated)
8809 			return MGMT_LTK_AUTHENTICATED;
8810 		return MGMT_LTK_UNAUTHENTICATED;
8811 	case SMP_LTK_P256:
8812 		if (ltk->authenticated)
8813 			return MGMT_LTK_P256_AUTH;
8814 		return MGMT_LTK_P256_UNAUTH;
8815 	case SMP_LTK_P256_DEBUG:
8816 		return MGMT_LTK_P256_DEBUG;
8817 	}
8818 
8819 	return MGMT_LTK_UNAUTHENTICATED;
8820 }
8821 
8822 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
8823 {
8824 	struct mgmt_ev_new_long_term_key ev;
8825 
8826 	memset(&ev, 0, sizeof(ev));
8827 
8828 	/* Devices using resolvable or non-resolvable random addresses
8829 	 * without providing an identity resolving key don't require
8830 	 * to store long term keys. Their addresses will change the
8831 	 * next time around.
8832 	 *
8833 	 * Only when a remote device provides an identity address
8834 	 * make sure the long term key is stored. If the remote
8835 	 * identity is known, the long term keys are internally
8836 	 * mapped to the identity address. So allow static random
8837 	 * and public addresses here.
8838 	 */
8839 	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8840 	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
8841 		ev.store_hint = 0x00;
8842 	else
8843 		ev.store_hint = persistent;
8844 
8845 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8846 	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
8847 	ev.key.type = mgmt_ltk_type(key);
8848 	ev.key.enc_size = key->enc_size;
8849 	ev.key.ediv = key->ediv;
8850 	ev.key.rand = key->rand;
8851 
8852 	if (key->type == SMP_LTK)
8853 		ev.key.initiator = 1;
8854 
8855 	/* Make sure we copy only the significant bytes based on the
8856 	 * encryption key size, and set the rest of the value to zeroes.
8857 	 */
8858 	memcpy(ev.key.val, key->val, key->enc_size);
8859 	memset(ev.key.val + key->enc_size, 0,
8860 	       sizeof(ev.key.val) - key->enc_size);
8861 
8862 	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
8863 }
8864 
8865 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
8866 {
8867 	struct mgmt_ev_new_irk ev;
8868 
8869 	memset(&ev, 0, sizeof(ev));
8870 
8871 	ev.store_hint = persistent;
8872 
8873 	bacpy(&ev.rpa, &irk->rpa);
8874 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
8875 	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
8876 	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
8877 
8878 	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
8879 }
8880 
8881 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
8882 		   bool persistent)
8883 {
8884 	struct mgmt_ev_new_csrk ev;
8885 
8886 	memset(&ev, 0, sizeof(ev));
8887 
8888 	/* Devices using resolvable or non-resolvable random addresses
8889 	 * without providing an identity resolving key don't require
8890 	 * to store signature resolving keys. Their addresses will change
8891 	 * the next time around.
8892 	 *
8893 	 * Only when a remote device provides an identity address
8894 	 * make sure the signature resolving key is stored. So allow
8895 	 * static random and public addresses here.
8896 	 */
8897 	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8898 	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
8899 		ev.store_hint = 0x00;
8900 	else
8901 		ev.store_hint = persistent;
8902 
8903 	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
8904 	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
8905 	ev.key.type = csrk->type;
8906 	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
8907 
8908 	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
8909 }
8910 
8911 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
8912 			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
8913 			 u16 max_interval, u16 latency, u16 timeout)
8914 {
8915 	struct mgmt_ev_new_conn_param ev;
8916 
8917 	if (!hci_is_identity_address(bdaddr, bdaddr_type))
8918 		return;
8919 
8920 	memset(&ev, 0, sizeof(ev));
8921 	bacpy(&ev.addr.bdaddr, bdaddr);
8922 	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
8923 	ev.store_hint = store_hint;
8924 	ev.min_interval = cpu_to_le16(min_interval);
8925 	ev.max_interval = cpu_to_le16(max_interval);
8926 	ev.latency = cpu_to_le16(latency);
8927 	ev.timeout = cpu_to_le16(timeout);
8928 
8929 	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
8930 }
8931 
8932 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
8933 			   u8 *name, u8 name_len)
8934 {
8935 	char buf[512];
8936 	struct mgmt_ev_device_connected *ev = (void *) buf;
8937 	u16 eir_len = 0;
8938 	u32 flags = 0;
8939 
8940 	bacpy(&ev->addr.bdaddr, &conn->dst);
8941 	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
8942 
8943 	if (conn->out)
8944 		flags |= MGMT_DEV_FOUND_INITIATED_CONN;
8945 
8946 	ev->flags = __cpu_to_le32(flags);
8947 
8948 	/* We must ensure that the EIR Data fields are ordered and
8949 	 * unique. Keep it simple for now and avoid the problem by not
8950 	 * adding any BR/EDR data to the LE adv.
8951 	 */
8952 	if (conn->le_adv_data_len > 0) {
8953 		memcpy(&ev->eir[eir_len],
8954 		       conn->le_adv_data, conn->le_adv_data_len);
8955 		eir_len = conn->le_adv_data_len;
8956 	} else {
8957 		if (name_len > 0)
8958 			eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
8959 						  name, name_len);
8960 
8961 		if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
8962 			eir_len = eir_append_data(ev->eir, eir_len,
8963 						  EIR_CLASS_OF_DEV,
8964 						  conn->dev_class, 3);
8965 	}
8966 
8967 	ev->eir_len = cpu_to_le16(eir_len);
8968 
8969 	mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
8970 		    sizeof(*ev) + eir_len, NULL);
8971 }
8972 
8973 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
8974 {
8975 	struct sock **sk = data;
8976 
8977 	cmd->cmd_complete(cmd, 0);
8978 
8979 	*sk = cmd->sk;
8980 	sock_hold(*sk);
8981 
8982 	mgmt_pending_remove(cmd);
8983 }
8984 
8985 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
8986 {
8987 	struct hci_dev *hdev = data;
8988 	struct mgmt_cp_unpair_device *cp = cmd->param;
8989 
8990 	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
8991 
8992 	cmd->cmd_complete(cmd, 0);
8993 	mgmt_pending_remove(cmd);
8994 }
8995 
8996 bool mgmt_powering_down(struct hci_dev *hdev)
8997 {
8998 	struct mgmt_pending_cmd *cmd;
8999 	struct mgmt_mode *cp;
9000 
9001 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9002 	if (!cmd)
9003 		return false;
9004 
9005 	cp = cmd->param;
9006 	if (!cp->val)
9007 		return true;
9008 
9009 	return false;
9010 }
9011 
9012 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9013 			      u8 link_type, u8 addr_type, u8 reason,
9014 			      bool mgmt_connected)
9015 {
9016 	struct mgmt_ev_device_disconnected ev;
9017 	struct sock *sk = NULL;
9018 
9019 	/* The connection is still in hci_conn_hash so test for 1
9020 	 * instead of 0 to know if this is the last one.
9021 	 */
9022 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9023 		cancel_delayed_work(&hdev->power_off);
9024 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
9025 	}
9026 
9027 	if (!mgmt_connected)
9028 		return;
9029 
9030 	if (link_type != ACL_LINK && link_type != LE_LINK)
9031 		return;
9032 
9033 	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
9034 
9035 	bacpy(&ev.addr.bdaddr, bdaddr);
9036 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9037 	ev.reason = reason;
9038 
9039 	/* Report disconnects due to suspend */
9040 	if (hdev->suspended)
9041 		ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9042 
9043 	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9044 
9045 	if (sk)
9046 		sock_put(sk);
9047 
9048 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9049 			     hdev);
9050 }
9051 
9052 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9053 			    u8 link_type, u8 addr_type, u8 status)
9054 {
9055 	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9056 	struct mgmt_cp_disconnect *cp;
9057 	struct mgmt_pending_cmd *cmd;
9058 
9059 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9060 			     hdev);
9061 
9062 	cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9063 	if (!cmd)
9064 		return;
9065 
9066 	cp = cmd->param;
9067 
9068 	if (bacmp(bdaddr, &cp->addr.bdaddr))
9069 		return;
9070 
9071 	if (cp->addr.type != bdaddr_type)
9072 		return;
9073 
9074 	cmd->cmd_complete(cmd, mgmt_status(status));
9075 	mgmt_pending_remove(cmd);
9076 }
9077 
9078 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9079 			 u8 addr_type, u8 status)
9080 {
9081 	struct mgmt_ev_connect_failed ev;
9082 
9083 	/* The connection is still in hci_conn_hash so test for 1
9084 	 * instead of 0 to know if this is the last one.
9085 	 */
9086 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9087 		cancel_delayed_work(&hdev->power_off);
9088 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
9089 	}
9090 
9091 	bacpy(&ev.addr.bdaddr, bdaddr);
9092 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9093 	ev.status = mgmt_status(status);
9094 
9095 	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9096 }
9097 
9098 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9099 {
9100 	struct mgmt_ev_pin_code_request ev;
9101 
9102 	bacpy(&ev.addr.bdaddr, bdaddr);
9103 	ev.addr.type = BDADDR_BREDR;
9104 	ev.secure = secure;
9105 
9106 	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9107 }
9108 
9109 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9110 				  u8 status)
9111 {
9112 	struct mgmt_pending_cmd *cmd;
9113 
9114 	cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9115 	if (!cmd)
9116 		return;
9117 
9118 	cmd->cmd_complete(cmd, mgmt_status(status));
9119 	mgmt_pending_remove(cmd);
9120 }
9121 
9122 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9123 				      u8 status)
9124 {
9125 	struct mgmt_pending_cmd *cmd;
9126 
9127 	cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9128 	if (!cmd)
9129 		return;
9130 
9131 	cmd->cmd_complete(cmd, mgmt_status(status));
9132 	mgmt_pending_remove(cmd);
9133 }
9134 
9135 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9136 			      u8 link_type, u8 addr_type, u32 value,
9137 			      u8 confirm_hint)
9138 {
9139 	struct mgmt_ev_user_confirm_request ev;
9140 
9141 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9142 
9143 	bacpy(&ev.addr.bdaddr, bdaddr);
9144 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9145 	ev.confirm_hint = confirm_hint;
9146 	ev.value = cpu_to_le32(value);
9147 
9148 	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9149 			  NULL);
9150 }
9151 
9152 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9153 			      u8 link_type, u8 addr_type)
9154 {
9155 	struct mgmt_ev_user_passkey_request ev;
9156 
9157 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9158 
9159 	bacpy(&ev.addr.bdaddr, bdaddr);
9160 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9161 
9162 	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9163 			  NULL);
9164 }
9165 
9166 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9167 				      u8 link_type, u8 addr_type, u8 status,
9168 				      u8 opcode)
9169 {
9170 	struct mgmt_pending_cmd *cmd;
9171 
9172 	cmd = pending_find(opcode, hdev);
9173 	if (!cmd)
9174 		return -ENOENT;
9175 
9176 	cmd->cmd_complete(cmd, mgmt_status(status));
9177 	mgmt_pending_remove(cmd);
9178 
9179 	return 0;
9180 }
9181 
9182 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9183 				     u8 link_type, u8 addr_type, u8 status)
9184 {
9185 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9186 					  status, MGMT_OP_USER_CONFIRM_REPLY);
9187 }
9188 
9189 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9190 					 u8 link_type, u8 addr_type, u8 status)
9191 {
9192 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9193 					  status,
9194 					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
9195 }
9196 
9197 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9198 				     u8 link_type, u8 addr_type, u8 status)
9199 {
9200 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9201 					  status, MGMT_OP_USER_PASSKEY_REPLY);
9202 }
9203 
9204 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9205 					 u8 link_type, u8 addr_type, u8 status)
9206 {
9207 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9208 					  status,
9209 					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
9210 }
9211 
9212 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9213 			     u8 link_type, u8 addr_type, u32 passkey,
9214 			     u8 entered)
9215 {
9216 	struct mgmt_ev_passkey_notify ev;
9217 
9218 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9219 
9220 	bacpy(&ev.addr.bdaddr, bdaddr);
9221 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9222 	ev.passkey = __cpu_to_le32(passkey);
9223 	ev.entered = entered;
9224 
9225 	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9226 }
9227 
9228 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9229 {
9230 	struct mgmt_ev_auth_failed ev;
9231 	struct mgmt_pending_cmd *cmd;
9232 	u8 status = mgmt_status(hci_status);
9233 
9234 	bacpy(&ev.addr.bdaddr, &conn->dst);
9235 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9236 	ev.status = status;
9237 
9238 	cmd = find_pairing(conn);
9239 
9240 	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9241 		    cmd ? cmd->sk : NULL);
9242 
9243 	if (cmd) {
9244 		cmd->cmd_complete(cmd, status);
9245 		mgmt_pending_remove(cmd);
9246 	}
9247 }
9248 
9249 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9250 {
9251 	struct cmd_lookup match = { NULL, hdev };
9252 	bool changed;
9253 
9254 	if (status) {
9255 		u8 mgmt_err = mgmt_status(status);
9256 		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9257 				     cmd_status_rsp, &mgmt_err);
9258 		return;
9259 	}
9260 
9261 	if (test_bit(HCI_AUTH, &hdev->flags))
9262 		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9263 	else
9264 		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9265 
9266 	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9267 			     &match);
9268 
9269 	if (changed)
9270 		new_settings(hdev, match.sk);
9271 
9272 	if (match.sk)
9273 		sock_put(match.sk);
9274 }
9275 
9276 static void clear_eir(struct hci_request *req)
9277 {
9278 	struct hci_dev *hdev = req->hdev;
9279 	struct hci_cp_write_eir cp;
9280 
9281 	if (!lmp_ext_inq_capable(hdev))
9282 		return;
9283 
9284 	memset(hdev->eir, 0, sizeof(hdev->eir));
9285 
9286 	memset(&cp, 0, sizeof(cp));
9287 
9288 	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
9289 }
9290 
9291 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
9292 {
9293 	struct cmd_lookup match = { NULL, hdev };
9294 	struct hci_request req;
9295 	bool changed = false;
9296 
9297 	if (status) {
9298 		u8 mgmt_err = mgmt_status(status);
9299 
9300 		if (enable && hci_dev_test_and_clear_flag(hdev,
9301 							  HCI_SSP_ENABLED)) {
9302 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
9303 			new_settings(hdev, NULL);
9304 		}
9305 
9306 		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
9307 				     &mgmt_err);
9308 		return;
9309 	}
9310 
9311 	if (enable) {
9312 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
9313 	} else {
9314 		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
9315 		if (!changed)
9316 			changed = hci_dev_test_and_clear_flag(hdev,
9317 							      HCI_HS_ENABLED);
9318 		else
9319 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
9320 	}
9321 
9322 	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
9323 
9324 	if (changed)
9325 		new_settings(hdev, match.sk);
9326 
9327 	if (match.sk)
9328 		sock_put(match.sk);
9329 
9330 	hci_req_init(&req, hdev);
9331 
9332 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
9333 		if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
9334 			hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
9335 				    sizeof(enable), &enable);
9336 		__hci_req_update_eir(&req);
9337 	} else {
9338 		clear_eir(&req);
9339 	}
9340 
9341 	hci_req_run(&req, NULL);
9342 }
9343 
9344 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9345 {
9346 	struct cmd_lookup *match = data;
9347 
9348 	if (match->sk == NULL) {
9349 		match->sk = cmd->sk;
9350 		sock_hold(match->sk);
9351 	}
9352 }
9353 
9354 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9355 				    u8 status)
9356 {
9357 	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9358 
9359 	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
9360 	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
9361 	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
9362 
9363 	if (!status) {
9364 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9365 				   3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9366 		ext_info_changed(hdev, NULL);
9367 	}
9368 
9369 	if (match.sk)
9370 		sock_put(match.sk);
9371 }
9372 
9373 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
9374 {
9375 	struct mgmt_cp_set_local_name ev;
9376 	struct mgmt_pending_cmd *cmd;
9377 
9378 	if (status)
9379 		return;
9380 
9381 	memset(&ev, 0, sizeof(ev));
9382 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
9383 	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
9384 
9385 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
9386 	if (!cmd) {
9387 		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
9388 
9389 		/* If this is a HCI command related to powering on the
9390 		 * HCI dev don't send any mgmt signals.
9391 		 */
9392 		if (pending_find(MGMT_OP_SET_POWERED, hdev))
9393 			return;
9394 	}
9395 
9396 	mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
9397 			   HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
9398 	ext_info_changed(hdev, cmd ? cmd->sk : NULL);
9399 }
9400 
9401 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
9402 {
9403 	int i;
9404 
9405 	for (i = 0; i < uuid_count; i++) {
9406 		if (!memcmp(uuid, uuids[i], 16))
9407 			return true;
9408 	}
9409 
9410 	return false;
9411 }
9412 
9413 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
9414 {
9415 	u16 parsed = 0;
9416 
9417 	while (parsed < eir_len) {
9418 		u8 field_len = eir[0];
9419 		u8 uuid[16];
9420 		int i;
9421 
9422 		if (field_len == 0)
9423 			break;
9424 
9425 		if (eir_len - parsed < field_len + 1)
9426 			break;
9427 
9428 		switch (eir[1]) {
9429 		case EIR_UUID16_ALL:
9430 		case EIR_UUID16_SOME:
9431 			for (i = 0; i + 3 <= field_len; i += 2) {
9432 				memcpy(uuid, bluetooth_base_uuid, 16);
9433 				uuid[13] = eir[i + 3];
9434 				uuid[12] = eir[i + 2];
9435 				if (has_uuid(uuid, uuid_count, uuids))
9436 					return true;
9437 			}
9438 			break;
9439 		case EIR_UUID32_ALL:
9440 		case EIR_UUID32_SOME:
9441 			for (i = 0; i + 5 <= field_len; i += 4) {
9442 				memcpy(uuid, bluetooth_base_uuid, 16);
9443 				uuid[15] = eir[i + 5];
9444 				uuid[14] = eir[i + 4];
9445 				uuid[13] = eir[i + 3];
9446 				uuid[12] = eir[i + 2];
9447 				if (has_uuid(uuid, uuid_count, uuids))
9448 					return true;
9449 			}
9450 			break;
9451 		case EIR_UUID128_ALL:
9452 		case EIR_UUID128_SOME:
9453 			for (i = 0; i + 17 <= field_len; i += 16) {
9454 				memcpy(uuid, eir + i + 2, 16);
9455 				if (has_uuid(uuid, uuid_count, uuids))
9456 					return true;
9457 			}
9458 			break;
9459 		}
9460 
9461 		parsed += field_len + 1;
9462 		eir += field_len + 1;
9463 	}
9464 
9465 	return false;
9466 }
9467 
9468 static void restart_le_scan(struct hci_dev *hdev)
9469 {
9470 	/* If controller is not scanning we are done. */
9471 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
9472 		return;
9473 
9474 	if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
9475 		       hdev->discovery.scan_start +
9476 		       hdev->discovery.scan_duration))
9477 		return;
9478 
9479 	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
9480 			   DISCOV_LE_RESTART_DELAY);
9481 }
9482 
9483 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
9484 			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9485 {
9486 	/* If a RSSI threshold has been specified, and
9487 	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
9488 	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
9489 	 * is set, let it through for further processing, as we might need to
9490 	 * restart the scan.
9491 	 *
9492 	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
9493 	 * the results are also dropped.
9494 	 */
9495 	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9496 	    (rssi == HCI_RSSI_INVALID ||
9497 	    (rssi < hdev->discovery.rssi &&
9498 	     !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
9499 		return  false;
9500 
9501 	if (hdev->discovery.uuid_count != 0) {
9502 		/* If a list of UUIDs is provided in filter, results with no
9503 		 * matching UUID should be dropped.
9504 		 */
9505 		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
9506 				   hdev->discovery.uuids) &&
9507 		    !eir_has_uuids(scan_rsp, scan_rsp_len,
9508 				   hdev->discovery.uuid_count,
9509 				   hdev->discovery.uuids))
9510 			return false;
9511 	}
9512 
9513 	/* If duplicate filtering does not report RSSI changes, then restart
9514 	 * scanning to ensure updated result with updated RSSI values.
9515 	 */
9516 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
9517 		restart_le_scan(hdev);
9518 
9519 		/* Validate RSSI value against the RSSI threshold once more. */
9520 		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9521 		    rssi < hdev->discovery.rssi)
9522 			return false;
9523 	}
9524 
9525 	return true;
9526 }
9527 
9528 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9529 		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
9530 		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9531 {
9532 	char buf[512];
9533 	struct mgmt_ev_device_found *ev = (void *)buf;
9534 	size_t ev_size;
9535 
9536 	/* Don't send events for a non-kernel initiated discovery. With
9537 	 * LE one exception is if we have pend_le_reports > 0 in which
9538 	 * case we're doing passive scanning and want these events.
9539 	 */
9540 	if (!hci_discovery_active(hdev)) {
9541 		if (link_type == ACL_LINK)
9542 			return;
9543 		if (link_type == LE_LINK &&
9544 		    list_empty(&hdev->pend_le_reports) &&
9545 		    !hci_is_adv_monitoring(hdev)) {
9546 			return;
9547 		}
9548 	}
9549 
9550 	if (hdev->discovery.result_filtering) {
9551 		/* We are using service discovery */
9552 		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
9553 				     scan_rsp_len))
9554 			return;
9555 	}
9556 
9557 	if (hdev->discovery.limited) {
9558 		/* Check for limited discoverable bit */
9559 		if (dev_class) {
9560 			if (!(dev_class[1] & 0x20))
9561 				return;
9562 		} else {
9563 			u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
9564 			if (!flags || !(flags[0] & LE_AD_LIMITED))
9565 				return;
9566 		}
9567 	}
9568 
9569 	/* Make sure that the buffer is big enough. The 5 extra bytes
9570 	 * are for the potential CoD field.
9571 	 */
9572 	if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
9573 		return;
9574 
9575 	memset(buf, 0, sizeof(buf));
9576 
9577 	/* In case of device discovery with BR/EDR devices (pre 1.2), the
9578 	 * RSSI value was reported as 0 when not available. This behavior
9579 	 * is kept when using device discovery. This is required for full
9580 	 * backwards compatibility with the API.
9581 	 *
9582 	 * However when using service discovery, the value 127 will be
9583 	 * returned when the RSSI is not available.
9584 	 */
9585 	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
9586 	    link_type == ACL_LINK)
9587 		rssi = 0;
9588 
9589 	bacpy(&ev->addr.bdaddr, bdaddr);
9590 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
9591 	ev->rssi = rssi;
9592 	ev->flags = cpu_to_le32(flags);
9593 
9594 	if (eir_len > 0)
9595 		/* Copy EIR or advertising data into event */
9596 		memcpy(ev->eir, eir, eir_len);
9597 
9598 	if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
9599 				       NULL))
9600 		eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
9601 					  dev_class, 3);
9602 
9603 	if (scan_rsp_len > 0)
9604 		/* Append scan response data to event */
9605 		memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
9606 
9607 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
9608 	ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
9609 
9610 	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
9611 }
9612 
9613 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9614 		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
9615 {
9616 	struct mgmt_ev_device_found *ev;
9617 	char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
9618 	u16 eir_len;
9619 
9620 	ev = (struct mgmt_ev_device_found *) buf;
9621 
9622 	memset(buf, 0, sizeof(buf));
9623 
9624 	bacpy(&ev->addr.bdaddr, bdaddr);
9625 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
9626 	ev->rssi = rssi;
9627 
9628 	eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
9629 				  name_len);
9630 
9631 	ev->eir_len = cpu_to_le16(eir_len);
9632 
9633 	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
9634 }
9635 
9636 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
9637 {
9638 	struct mgmt_ev_discovering ev;
9639 
9640 	bt_dev_dbg(hdev, "discovering %u", discovering);
9641 
9642 	memset(&ev, 0, sizeof(ev));
9643 	ev.type = hdev->discovery.type;
9644 	ev.discovering = discovering;
9645 
9646 	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
9647 }
9648 
9649 void mgmt_suspending(struct hci_dev *hdev, u8 state)
9650 {
9651 	struct mgmt_ev_controller_suspend ev;
9652 
9653 	ev.suspend_state = state;
9654 	mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
9655 }
9656 
9657 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
9658 		   u8 addr_type)
9659 {
9660 	struct mgmt_ev_controller_resume ev;
9661 
9662 	ev.wake_reason = reason;
9663 	if (bdaddr) {
9664 		bacpy(&ev.addr.bdaddr, bdaddr);
9665 		ev.addr.type = addr_type;
9666 	} else {
9667 		memset(&ev.addr, 0, sizeof(ev.addr));
9668 	}
9669 
9670 	mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
9671 }
9672 
9673 static struct hci_mgmt_chan chan = {
9674 	.channel	= HCI_CHANNEL_CONTROL,
9675 	.handler_count	= ARRAY_SIZE(mgmt_handlers),
9676 	.handlers	= mgmt_handlers,
9677 	.hdev_init	= mgmt_init_hdev,
9678 };
9679 
9680 int mgmt_init(void)
9681 {
9682 	return hci_mgmt_chan_register(&chan);
9683 }
9684 
9685 void mgmt_exit(void)
9686 {
9687 	hci_mgmt_chan_unregister(&chan);
9688 }
9689