xref: /openbmc/linux/net/bluetooth/mgmt.c (revision a9ca9f9c)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI Management interface */
26 
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35 
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
40 #include "msft.h"
41 #include "eir.h"
42 #include "aosp.h"
43 
44 #define MGMT_VERSION	1
45 #define MGMT_REVISION	22
46 
47 static const u16 mgmt_commands[] = {
48 	MGMT_OP_READ_INDEX_LIST,
49 	MGMT_OP_READ_INFO,
50 	MGMT_OP_SET_POWERED,
51 	MGMT_OP_SET_DISCOVERABLE,
52 	MGMT_OP_SET_CONNECTABLE,
53 	MGMT_OP_SET_FAST_CONNECTABLE,
54 	MGMT_OP_SET_BONDABLE,
55 	MGMT_OP_SET_LINK_SECURITY,
56 	MGMT_OP_SET_SSP,
57 	MGMT_OP_SET_HS,
58 	MGMT_OP_SET_LE,
59 	MGMT_OP_SET_DEV_CLASS,
60 	MGMT_OP_SET_LOCAL_NAME,
61 	MGMT_OP_ADD_UUID,
62 	MGMT_OP_REMOVE_UUID,
63 	MGMT_OP_LOAD_LINK_KEYS,
64 	MGMT_OP_LOAD_LONG_TERM_KEYS,
65 	MGMT_OP_DISCONNECT,
66 	MGMT_OP_GET_CONNECTIONS,
67 	MGMT_OP_PIN_CODE_REPLY,
68 	MGMT_OP_PIN_CODE_NEG_REPLY,
69 	MGMT_OP_SET_IO_CAPABILITY,
70 	MGMT_OP_PAIR_DEVICE,
71 	MGMT_OP_CANCEL_PAIR_DEVICE,
72 	MGMT_OP_UNPAIR_DEVICE,
73 	MGMT_OP_USER_CONFIRM_REPLY,
74 	MGMT_OP_USER_CONFIRM_NEG_REPLY,
75 	MGMT_OP_USER_PASSKEY_REPLY,
76 	MGMT_OP_USER_PASSKEY_NEG_REPLY,
77 	MGMT_OP_READ_LOCAL_OOB_DATA,
78 	MGMT_OP_ADD_REMOTE_OOB_DATA,
79 	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
80 	MGMT_OP_START_DISCOVERY,
81 	MGMT_OP_STOP_DISCOVERY,
82 	MGMT_OP_CONFIRM_NAME,
83 	MGMT_OP_BLOCK_DEVICE,
84 	MGMT_OP_UNBLOCK_DEVICE,
85 	MGMT_OP_SET_DEVICE_ID,
86 	MGMT_OP_SET_ADVERTISING,
87 	MGMT_OP_SET_BREDR,
88 	MGMT_OP_SET_STATIC_ADDRESS,
89 	MGMT_OP_SET_SCAN_PARAMS,
90 	MGMT_OP_SET_SECURE_CONN,
91 	MGMT_OP_SET_DEBUG_KEYS,
92 	MGMT_OP_SET_PRIVACY,
93 	MGMT_OP_LOAD_IRKS,
94 	MGMT_OP_GET_CONN_INFO,
95 	MGMT_OP_GET_CLOCK_INFO,
96 	MGMT_OP_ADD_DEVICE,
97 	MGMT_OP_REMOVE_DEVICE,
98 	MGMT_OP_LOAD_CONN_PARAM,
99 	MGMT_OP_READ_UNCONF_INDEX_LIST,
100 	MGMT_OP_READ_CONFIG_INFO,
101 	MGMT_OP_SET_EXTERNAL_CONFIG,
102 	MGMT_OP_SET_PUBLIC_ADDRESS,
103 	MGMT_OP_START_SERVICE_DISCOVERY,
104 	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
105 	MGMT_OP_READ_EXT_INDEX_LIST,
106 	MGMT_OP_READ_ADV_FEATURES,
107 	MGMT_OP_ADD_ADVERTISING,
108 	MGMT_OP_REMOVE_ADVERTISING,
109 	MGMT_OP_GET_ADV_SIZE_INFO,
110 	MGMT_OP_START_LIMITED_DISCOVERY,
111 	MGMT_OP_READ_EXT_INFO,
112 	MGMT_OP_SET_APPEARANCE,
113 	MGMT_OP_GET_PHY_CONFIGURATION,
114 	MGMT_OP_SET_PHY_CONFIGURATION,
115 	MGMT_OP_SET_BLOCKED_KEYS,
116 	MGMT_OP_SET_WIDEBAND_SPEECH,
117 	MGMT_OP_READ_CONTROLLER_CAP,
118 	MGMT_OP_READ_EXP_FEATURES_INFO,
119 	MGMT_OP_SET_EXP_FEATURE,
120 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
121 	MGMT_OP_SET_DEF_SYSTEM_CONFIG,
122 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
123 	MGMT_OP_SET_DEF_RUNTIME_CONFIG,
124 	MGMT_OP_GET_DEVICE_FLAGS,
125 	MGMT_OP_SET_DEVICE_FLAGS,
126 	MGMT_OP_READ_ADV_MONITOR_FEATURES,
127 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
128 	MGMT_OP_REMOVE_ADV_MONITOR,
129 	MGMT_OP_ADD_EXT_ADV_PARAMS,
130 	MGMT_OP_ADD_EXT_ADV_DATA,
131 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
132 	MGMT_OP_SET_MESH_RECEIVER,
133 	MGMT_OP_MESH_READ_FEATURES,
134 	MGMT_OP_MESH_SEND,
135 	MGMT_OP_MESH_SEND_CANCEL,
136 };
137 
138 static const u16 mgmt_events[] = {
139 	MGMT_EV_CONTROLLER_ERROR,
140 	MGMT_EV_INDEX_ADDED,
141 	MGMT_EV_INDEX_REMOVED,
142 	MGMT_EV_NEW_SETTINGS,
143 	MGMT_EV_CLASS_OF_DEV_CHANGED,
144 	MGMT_EV_LOCAL_NAME_CHANGED,
145 	MGMT_EV_NEW_LINK_KEY,
146 	MGMT_EV_NEW_LONG_TERM_KEY,
147 	MGMT_EV_DEVICE_CONNECTED,
148 	MGMT_EV_DEVICE_DISCONNECTED,
149 	MGMT_EV_CONNECT_FAILED,
150 	MGMT_EV_PIN_CODE_REQUEST,
151 	MGMT_EV_USER_CONFIRM_REQUEST,
152 	MGMT_EV_USER_PASSKEY_REQUEST,
153 	MGMT_EV_AUTH_FAILED,
154 	MGMT_EV_DEVICE_FOUND,
155 	MGMT_EV_DISCOVERING,
156 	MGMT_EV_DEVICE_BLOCKED,
157 	MGMT_EV_DEVICE_UNBLOCKED,
158 	MGMT_EV_DEVICE_UNPAIRED,
159 	MGMT_EV_PASSKEY_NOTIFY,
160 	MGMT_EV_NEW_IRK,
161 	MGMT_EV_NEW_CSRK,
162 	MGMT_EV_DEVICE_ADDED,
163 	MGMT_EV_DEVICE_REMOVED,
164 	MGMT_EV_NEW_CONN_PARAM,
165 	MGMT_EV_UNCONF_INDEX_ADDED,
166 	MGMT_EV_UNCONF_INDEX_REMOVED,
167 	MGMT_EV_NEW_CONFIG_OPTIONS,
168 	MGMT_EV_EXT_INDEX_ADDED,
169 	MGMT_EV_EXT_INDEX_REMOVED,
170 	MGMT_EV_LOCAL_OOB_DATA_UPDATED,
171 	MGMT_EV_ADVERTISING_ADDED,
172 	MGMT_EV_ADVERTISING_REMOVED,
173 	MGMT_EV_EXT_INFO_CHANGED,
174 	MGMT_EV_PHY_CONFIGURATION_CHANGED,
175 	MGMT_EV_EXP_FEATURE_CHANGED,
176 	MGMT_EV_DEVICE_FLAGS_CHANGED,
177 	MGMT_EV_ADV_MONITOR_ADDED,
178 	MGMT_EV_ADV_MONITOR_REMOVED,
179 	MGMT_EV_CONTROLLER_SUSPEND,
180 	MGMT_EV_CONTROLLER_RESUME,
181 	MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
182 	MGMT_EV_ADV_MONITOR_DEVICE_LOST,
183 };
184 
185 static const u16 mgmt_untrusted_commands[] = {
186 	MGMT_OP_READ_INDEX_LIST,
187 	MGMT_OP_READ_INFO,
188 	MGMT_OP_READ_UNCONF_INDEX_LIST,
189 	MGMT_OP_READ_CONFIG_INFO,
190 	MGMT_OP_READ_EXT_INDEX_LIST,
191 	MGMT_OP_READ_EXT_INFO,
192 	MGMT_OP_READ_CONTROLLER_CAP,
193 	MGMT_OP_READ_EXP_FEATURES_INFO,
194 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
195 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
196 };
197 
198 static const u16 mgmt_untrusted_events[] = {
199 	MGMT_EV_INDEX_ADDED,
200 	MGMT_EV_INDEX_REMOVED,
201 	MGMT_EV_NEW_SETTINGS,
202 	MGMT_EV_CLASS_OF_DEV_CHANGED,
203 	MGMT_EV_LOCAL_NAME_CHANGED,
204 	MGMT_EV_UNCONF_INDEX_ADDED,
205 	MGMT_EV_UNCONF_INDEX_REMOVED,
206 	MGMT_EV_NEW_CONFIG_OPTIONS,
207 	MGMT_EV_EXT_INDEX_ADDED,
208 	MGMT_EV_EXT_INDEX_REMOVED,
209 	MGMT_EV_EXT_INFO_CHANGED,
210 	MGMT_EV_EXP_FEATURE_CHANGED,
211 };
212 
213 #define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
214 
215 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
216 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
217 
218 /* HCI to MGMT error code conversion table */
219 static const u8 mgmt_status_table[] = {
220 	MGMT_STATUS_SUCCESS,
221 	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
222 	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
223 	MGMT_STATUS_FAILED,		/* Hardware Failure */
224 	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
225 	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
226 	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
227 	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
228 	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
229 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
230 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
231 	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
232 	MGMT_STATUS_BUSY,		/* Command Disallowed */
233 	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
234 	MGMT_STATUS_REJECTED,		/* Rejected Security */
235 	MGMT_STATUS_REJECTED,		/* Rejected Personal */
236 	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
237 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
238 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
239 	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
240 	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
241 	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
242 	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
243 	MGMT_STATUS_BUSY,		/* Repeated Attempts */
244 	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
245 	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
246 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
247 	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
248 	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
249 	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
250 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
251 	MGMT_STATUS_FAILED,		/* Unspecified Error */
252 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
253 	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
254 	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
255 	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
256 	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
257 	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
258 	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
259 	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
260 	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
261 	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
262 	MGMT_STATUS_FAILED,		/* Transaction Collision */
263 	MGMT_STATUS_FAILED,		/* Reserved for future use */
264 	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
265 	MGMT_STATUS_REJECTED,		/* QoS Rejected */
266 	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
267 	MGMT_STATUS_REJECTED,		/* Insufficient Security */
268 	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
269 	MGMT_STATUS_FAILED,		/* Reserved for future use */
270 	MGMT_STATUS_BUSY,		/* Role Switch Pending */
271 	MGMT_STATUS_FAILED,		/* Reserved for future use */
272 	MGMT_STATUS_FAILED,		/* Slot Violation */
273 	MGMT_STATUS_FAILED,		/* Role Switch Failed */
274 	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
275 	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
276 	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
277 	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
278 	MGMT_STATUS_BUSY,		/* Controller Busy */
279 	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
280 	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
281 	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
282 	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
283 	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
284 };
285 
286 static u8 mgmt_errno_status(int err)
287 {
288 	switch (err) {
289 	case 0:
290 		return MGMT_STATUS_SUCCESS;
291 	case -EPERM:
292 		return MGMT_STATUS_REJECTED;
293 	case -EINVAL:
294 		return MGMT_STATUS_INVALID_PARAMS;
295 	case -EOPNOTSUPP:
296 		return MGMT_STATUS_NOT_SUPPORTED;
297 	case -EBUSY:
298 		return MGMT_STATUS_BUSY;
299 	case -ETIMEDOUT:
300 		return MGMT_STATUS_AUTH_FAILED;
301 	case -ENOMEM:
302 		return MGMT_STATUS_NO_RESOURCES;
303 	case -EISCONN:
304 		return MGMT_STATUS_ALREADY_CONNECTED;
305 	case -ENOTCONN:
306 		return MGMT_STATUS_DISCONNECTED;
307 	}
308 
309 	return MGMT_STATUS_FAILED;
310 }
311 
312 static u8 mgmt_status(int err)
313 {
314 	if (err < 0)
315 		return mgmt_errno_status(err);
316 
317 	if (err < ARRAY_SIZE(mgmt_status_table))
318 		return mgmt_status_table[err];
319 
320 	return MGMT_STATUS_FAILED;
321 }
322 
323 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
324 			    u16 len, int flag)
325 {
326 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
327 			       flag, NULL);
328 }
329 
330 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
331 			      u16 len, int flag, struct sock *skip_sk)
332 {
333 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
334 			       flag, skip_sk);
335 }
336 
337 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
338 		      struct sock *skip_sk)
339 {
340 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341 			       HCI_SOCK_TRUSTED, skip_sk);
342 }
343 
344 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
345 {
346 	return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
347 				   skip_sk);
348 }
349 
350 static u8 le_addr_type(u8 mgmt_addr_type)
351 {
352 	if (mgmt_addr_type == BDADDR_LE_PUBLIC)
353 		return ADDR_LE_DEV_PUBLIC;
354 	else
355 		return ADDR_LE_DEV_RANDOM;
356 }
357 
358 void mgmt_fill_version_info(void *ver)
359 {
360 	struct mgmt_rp_read_version *rp = ver;
361 
362 	rp->version = MGMT_VERSION;
363 	rp->revision = cpu_to_le16(MGMT_REVISION);
364 }
365 
366 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
367 			u16 data_len)
368 {
369 	struct mgmt_rp_read_version rp;
370 
371 	bt_dev_dbg(hdev, "sock %p", sk);
372 
373 	mgmt_fill_version_info(&rp);
374 
375 	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
376 				 &rp, sizeof(rp));
377 }
378 
379 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
380 			 u16 data_len)
381 {
382 	struct mgmt_rp_read_commands *rp;
383 	u16 num_commands, num_events;
384 	size_t rp_size;
385 	int i, err;
386 
387 	bt_dev_dbg(hdev, "sock %p", sk);
388 
389 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
390 		num_commands = ARRAY_SIZE(mgmt_commands);
391 		num_events = ARRAY_SIZE(mgmt_events);
392 	} else {
393 		num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
394 		num_events = ARRAY_SIZE(mgmt_untrusted_events);
395 	}
396 
397 	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
398 
399 	rp = kmalloc(rp_size, GFP_KERNEL);
400 	if (!rp)
401 		return -ENOMEM;
402 
403 	rp->num_commands = cpu_to_le16(num_commands);
404 	rp->num_events = cpu_to_le16(num_events);
405 
406 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
407 		__le16 *opcode = rp->opcodes;
408 
409 		for (i = 0; i < num_commands; i++, opcode++)
410 			put_unaligned_le16(mgmt_commands[i], opcode);
411 
412 		for (i = 0; i < num_events; i++, opcode++)
413 			put_unaligned_le16(mgmt_events[i], opcode);
414 	} else {
415 		__le16 *opcode = rp->opcodes;
416 
417 		for (i = 0; i < num_commands; i++, opcode++)
418 			put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
419 
420 		for (i = 0; i < num_events; i++, opcode++)
421 			put_unaligned_le16(mgmt_untrusted_events[i], opcode);
422 	}
423 
424 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
425 				rp, rp_size);
426 	kfree(rp);
427 
428 	return err;
429 }
430 
431 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
432 			   u16 data_len)
433 {
434 	struct mgmt_rp_read_index_list *rp;
435 	struct hci_dev *d;
436 	size_t rp_len;
437 	u16 count;
438 	int err;
439 
440 	bt_dev_dbg(hdev, "sock %p", sk);
441 
442 	read_lock(&hci_dev_list_lock);
443 
444 	count = 0;
445 	list_for_each_entry(d, &hci_dev_list, list) {
446 		if (d->dev_type == HCI_PRIMARY &&
447 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED))
448 			count++;
449 	}
450 
451 	rp_len = sizeof(*rp) + (2 * count);
452 	rp = kmalloc(rp_len, GFP_ATOMIC);
453 	if (!rp) {
454 		read_unlock(&hci_dev_list_lock);
455 		return -ENOMEM;
456 	}
457 
458 	count = 0;
459 	list_for_each_entry(d, &hci_dev_list, list) {
460 		if (hci_dev_test_flag(d, HCI_SETUP) ||
461 		    hci_dev_test_flag(d, HCI_CONFIG) ||
462 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
463 			continue;
464 
465 		/* Devices marked as raw-only are neither configured
466 		 * nor unconfigured controllers.
467 		 */
468 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
469 			continue;
470 
471 		if (d->dev_type == HCI_PRIMARY &&
472 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
473 			rp->index[count++] = cpu_to_le16(d->id);
474 			bt_dev_dbg(hdev, "Added hci%u", d->id);
475 		}
476 	}
477 
478 	rp->num_controllers = cpu_to_le16(count);
479 	rp_len = sizeof(*rp) + (2 * count);
480 
481 	read_unlock(&hci_dev_list_lock);
482 
483 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
484 				0, rp, rp_len);
485 
486 	kfree(rp);
487 
488 	return err;
489 }
490 
491 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
492 				  void *data, u16 data_len)
493 {
494 	struct mgmt_rp_read_unconf_index_list *rp;
495 	struct hci_dev *d;
496 	size_t rp_len;
497 	u16 count;
498 	int err;
499 
500 	bt_dev_dbg(hdev, "sock %p", sk);
501 
502 	read_lock(&hci_dev_list_lock);
503 
504 	count = 0;
505 	list_for_each_entry(d, &hci_dev_list, list) {
506 		if (d->dev_type == HCI_PRIMARY &&
507 		    hci_dev_test_flag(d, HCI_UNCONFIGURED))
508 			count++;
509 	}
510 
511 	rp_len = sizeof(*rp) + (2 * count);
512 	rp = kmalloc(rp_len, GFP_ATOMIC);
513 	if (!rp) {
514 		read_unlock(&hci_dev_list_lock);
515 		return -ENOMEM;
516 	}
517 
518 	count = 0;
519 	list_for_each_entry(d, &hci_dev_list, list) {
520 		if (hci_dev_test_flag(d, HCI_SETUP) ||
521 		    hci_dev_test_flag(d, HCI_CONFIG) ||
522 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
523 			continue;
524 
525 		/* Devices marked as raw-only are neither configured
526 		 * nor unconfigured controllers.
527 		 */
528 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
529 			continue;
530 
531 		if (d->dev_type == HCI_PRIMARY &&
532 		    hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
533 			rp->index[count++] = cpu_to_le16(d->id);
534 			bt_dev_dbg(hdev, "Added hci%u", d->id);
535 		}
536 	}
537 
538 	rp->num_controllers = cpu_to_le16(count);
539 	rp_len = sizeof(*rp) + (2 * count);
540 
541 	read_unlock(&hci_dev_list_lock);
542 
543 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
544 				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
545 
546 	kfree(rp);
547 
548 	return err;
549 }
550 
551 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
552 			       void *data, u16 data_len)
553 {
554 	struct mgmt_rp_read_ext_index_list *rp;
555 	struct hci_dev *d;
556 	u16 count;
557 	int err;
558 
559 	bt_dev_dbg(hdev, "sock %p", sk);
560 
561 	read_lock(&hci_dev_list_lock);
562 
563 	count = 0;
564 	list_for_each_entry(d, &hci_dev_list, list) {
565 		if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
566 			count++;
567 	}
568 
569 	rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
570 	if (!rp) {
571 		read_unlock(&hci_dev_list_lock);
572 		return -ENOMEM;
573 	}
574 
575 	count = 0;
576 	list_for_each_entry(d, &hci_dev_list, list) {
577 		if (hci_dev_test_flag(d, HCI_SETUP) ||
578 		    hci_dev_test_flag(d, HCI_CONFIG) ||
579 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
580 			continue;
581 
582 		/* Devices marked as raw-only are neither configured
583 		 * nor unconfigured controllers.
584 		 */
585 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
586 			continue;
587 
588 		if (d->dev_type == HCI_PRIMARY) {
589 			if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
590 				rp->entry[count].type = 0x01;
591 			else
592 				rp->entry[count].type = 0x00;
593 		} else if (d->dev_type == HCI_AMP) {
594 			rp->entry[count].type = 0x02;
595 		} else {
596 			continue;
597 		}
598 
599 		rp->entry[count].bus = d->bus;
600 		rp->entry[count++].index = cpu_to_le16(d->id);
601 		bt_dev_dbg(hdev, "Added hci%u", d->id);
602 	}
603 
604 	rp->num_controllers = cpu_to_le16(count);
605 
606 	read_unlock(&hci_dev_list_lock);
607 
608 	/* If this command is called at least once, then all the
609 	 * default index and unconfigured index events are disabled
610 	 * and from now on only extended index events are used.
611 	 */
612 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
613 	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
614 	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
615 
616 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
617 				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
618 				struct_size(rp, entry, count));
619 
620 	kfree(rp);
621 
622 	return err;
623 }
624 
625 static bool is_configured(struct hci_dev *hdev)
626 {
627 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
628 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
629 		return false;
630 
631 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
632 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
633 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
634 		return false;
635 
636 	return true;
637 }
638 
639 static __le32 get_missing_options(struct hci_dev *hdev)
640 {
641 	u32 options = 0;
642 
643 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
644 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
645 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
646 
647 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
648 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
649 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
650 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
651 
652 	return cpu_to_le32(options);
653 }
654 
655 static int new_options(struct hci_dev *hdev, struct sock *skip)
656 {
657 	__le32 options = get_missing_options(hdev);
658 
659 	return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
660 				  sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
661 }
662 
663 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
664 {
665 	__le32 options = get_missing_options(hdev);
666 
667 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
668 				 sizeof(options));
669 }
670 
671 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
672 			    void *data, u16 data_len)
673 {
674 	struct mgmt_rp_read_config_info rp;
675 	u32 options = 0;
676 
677 	bt_dev_dbg(hdev, "sock %p", sk);
678 
679 	hci_dev_lock(hdev);
680 
681 	memset(&rp, 0, sizeof(rp));
682 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
683 
684 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
685 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
686 
687 	if (hdev->set_bdaddr)
688 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
689 
690 	rp.supported_options = cpu_to_le32(options);
691 	rp.missing_options = get_missing_options(hdev);
692 
693 	hci_dev_unlock(hdev);
694 
695 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
696 				 &rp, sizeof(rp));
697 }
698 
699 static u32 get_supported_phys(struct hci_dev *hdev)
700 {
701 	u32 supported_phys = 0;
702 
703 	if (lmp_bredr_capable(hdev)) {
704 		supported_phys |= MGMT_PHY_BR_1M_1SLOT;
705 
706 		if (hdev->features[0][0] & LMP_3SLOT)
707 			supported_phys |= MGMT_PHY_BR_1M_3SLOT;
708 
709 		if (hdev->features[0][0] & LMP_5SLOT)
710 			supported_phys |= MGMT_PHY_BR_1M_5SLOT;
711 
712 		if (lmp_edr_2m_capable(hdev)) {
713 			supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
714 
715 			if (lmp_edr_3slot_capable(hdev))
716 				supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
717 
718 			if (lmp_edr_5slot_capable(hdev))
719 				supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
720 
721 			if (lmp_edr_3m_capable(hdev)) {
722 				supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
723 
724 				if (lmp_edr_3slot_capable(hdev))
725 					supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
726 
727 				if (lmp_edr_5slot_capable(hdev))
728 					supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
729 			}
730 		}
731 	}
732 
733 	if (lmp_le_capable(hdev)) {
734 		supported_phys |= MGMT_PHY_LE_1M_TX;
735 		supported_phys |= MGMT_PHY_LE_1M_RX;
736 
737 		if (hdev->le_features[1] & HCI_LE_PHY_2M) {
738 			supported_phys |= MGMT_PHY_LE_2M_TX;
739 			supported_phys |= MGMT_PHY_LE_2M_RX;
740 		}
741 
742 		if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
743 			supported_phys |= MGMT_PHY_LE_CODED_TX;
744 			supported_phys |= MGMT_PHY_LE_CODED_RX;
745 		}
746 	}
747 
748 	return supported_phys;
749 }
750 
751 static u32 get_selected_phys(struct hci_dev *hdev)
752 {
753 	u32 selected_phys = 0;
754 
755 	if (lmp_bredr_capable(hdev)) {
756 		selected_phys |= MGMT_PHY_BR_1M_1SLOT;
757 
758 		if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
759 			selected_phys |= MGMT_PHY_BR_1M_3SLOT;
760 
761 		if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
762 			selected_phys |= MGMT_PHY_BR_1M_5SLOT;
763 
764 		if (lmp_edr_2m_capable(hdev)) {
765 			if (!(hdev->pkt_type & HCI_2DH1))
766 				selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
767 
768 			if (lmp_edr_3slot_capable(hdev) &&
769 			    !(hdev->pkt_type & HCI_2DH3))
770 				selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
771 
772 			if (lmp_edr_5slot_capable(hdev) &&
773 			    !(hdev->pkt_type & HCI_2DH5))
774 				selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
775 
776 			if (lmp_edr_3m_capable(hdev)) {
777 				if (!(hdev->pkt_type & HCI_3DH1))
778 					selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
779 
780 				if (lmp_edr_3slot_capable(hdev) &&
781 				    !(hdev->pkt_type & HCI_3DH3))
782 					selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
783 
784 				if (lmp_edr_5slot_capable(hdev) &&
785 				    !(hdev->pkt_type & HCI_3DH5))
786 					selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
787 			}
788 		}
789 	}
790 
791 	if (lmp_le_capable(hdev)) {
792 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
793 			selected_phys |= MGMT_PHY_LE_1M_TX;
794 
795 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
796 			selected_phys |= MGMT_PHY_LE_1M_RX;
797 
798 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
799 			selected_phys |= MGMT_PHY_LE_2M_TX;
800 
801 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
802 			selected_phys |= MGMT_PHY_LE_2M_RX;
803 
804 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
805 			selected_phys |= MGMT_PHY_LE_CODED_TX;
806 
807 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
808 			selected_phys |= MGMT_PHY_LE_CODED_RX;
809 	}
810 
811 	return selected_phys;
812 }
813 
814 static u32 get_configurable_phys(struct hci_dev *hdev)
815 {
816 	return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
817 		~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
818 }
819 
820 static u32 get_supported_settings(struct hci_dev *hdev)
821 {
822 	u32 settings = 0;
823 
824 	settings |= MGMT_SETTING_POWERED;
825 	settings |= MGMT_SETTING_BONDABLE;
826 	settings |= MGMT_SETTING_DEBUG_KEYS;
827 	settings |= MGMT_SETTING_CONNECTABLE;
828 	settings |= MGMT_SETTING_DISCOVERABLE;
829 
830 	if (lmp_bredr_capable(hdev)) {
831 		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
832 			settings |= MGMT_SETTING_FAST_CONNECTABLE;
833 		settings |= MGMT_SETTING_BREDR;
834 		settings |= MGMT_SETTING_LINK_SECURITY;
835 
836 		if (lmp_ssp_capable(hdev)) {
837 			settings |= MGMT_SETTING_SSP;
838 			if (IS_ENABLED(CONFIG_BT_HS))
839 				settings |= MGMT_SETTING_HS;
840 		}
841 
842 		if (lmp_sc_capable(hdev))
843 			settings |= MGMT_SETTING_SECURE_CONN;
844 
845 		if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
846 			     &hdev->quirks))
847 			settings |= MGMT_SETTING_WIDEBAND_SPEECH;
848 	}
849 
850 	if (lmp_le_capable(hdev)) {
851 		settings |= MGMT_SETTING_LE;
852 		settings |= MGMT_SETTING_SECURE_CONN;
853 		settings |= MGMT_SETTING_PRIVACY;
854 		settings |= MGMT_SETTING_STATIC_ADDRESS;
855 		settings |= MGMT_SETTING_ADVERTISING;
856 	}
857 
858 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
859 	    hdev->set_bdaddr)
860 		settings |= MGMT_SETTING_CONFIGURATION;
861 
862 	if (cis_central_capable(hdev))
863 		settings |= MGMT_SETTING_CIS_CENTRAL;
864 
865 	if (cis_peripheral_capable(hdev))
866 		settings |= MGMT_SETTING_CIS_PERIPHERAL;
867 
868 	settings |= MGMT_SETTING_PHY_CONFIGURATION;
869 
870 	return settings;
871 }
872 
873 static u32 get_current_settings(struct hci_dev *hdev)
874 {
875 	u32 settings = 0;
876 
877 	if (hdev_is_powered(hdev))
878 		settings |= MGMT_SETTING_POWERED;
879 
880 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
881 		settings |= MGMT_SETTING_CONNECTABLE;
882 
883 	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
884 		settings |= MGMT_SETTING_FAST_CONNECTABLE;
885 
886 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
887 		settings |= MGMT_SETTING_DISCOVERABLE;
888 
889 	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
890 		settings |= MGMT_SETTING_BONDABLE;
891 
892 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
893 		settings |= MGMT_SETTING_BREDR;
894 
895 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
896 		settings |= MGMT_SETTING_LE;
897 
898 	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
899 		settings |= MGMT_SETTING_LINK_SECURITY;
900 
901 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
902 		settings |= MGMT_SETTING_SSP;
903 
904 	if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
905 		settings |= MGMT_SETTING_HS;
906 
907 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
908 		settings |= MGMT_SETTING_ADVERTISING;
909 
910 	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
911 		settings |= MGMT_SETTING_SECURE_CONN;
912 
913 	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
914 		settings |= MGMT_SETTING_DEBUG_KEYS;
915 
916 	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
917 		settings |= MGMT_SETTING_PRIVACY;
918 
919 	/* The current setting for static address has two purposes. The
920 	 * first is to indicate if the static address will be used and
921 	 * the second is to indicate if it is actually set.
922 	 *
923 	 * This means if the static address is not configured, this flag
924 	 * will never be set. If the address is configured, then if the
925 	 * address is actually used decides if the flag is set or not.
926 	 *
927 	 * For single mode LE only controllers and dual-mode controllers
928 	 * with BR/EDR disabled, the existence of the static address will
929 	 * be evaluated.
930 	 */
931 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
932 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
933 	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
934 		if (bacmp(&hdev->static_addr, BDADDR_ANY))
935 			settings |= MGMT_SETTING_STATIC_ADDRESS;
936 	}
937 
938 	if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
939 		settings |= MGMT_SETTING_WIDEBAND_SPEECH;
940 
941 	if (cis_central_capable(hdev))
942 		settings |= MGMT_SETTING_CIS_CENTRAL;
943 
944 	if (cis_peripheral_capable(hdev))
945 		settings |= MGMT_SETTING_CIS_PERIPHERAL;
946 
947 	return settings;
948 }
949 
950 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
951 {
952 	return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
953 }
954 
955 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
956 {
957 	struct mgmt_pending_cmd *cmd;
958 
959 	/* If there's a pending mgmt command the flags will not yet have
960 	 * their final values, so check for this first.
961 	 */
962 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
963 	if (cmd) {
964 		struct mgmt_mode *cp = cmd->param;
965 		if (cp->val == 0x01)
966 			return LE_AD_GENERAL;
967 		else if (cp->val == 0x02)
968 			return LE_AD_LIMITED;
969 	} else {
970 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
971 			return LE_AD_LIMITED;
972 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
973 			return LE_AD_GENERAL;
974 	}
975 
976 	return 0;
977 }
978 
979 bool mgmt_get_connectable(struct hci_dev *hdev)
980 {
981 	struct mgmt_pending_cmd *cmd;
982 
983 	/* If there's a pending mgmt command the flag will not yet have
984 	 * it's final value, so check for this first.
985 	 */
986 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
987 	if (cmd) {
988 		struct mgmt_mode *cp = cmd->param;
989 
990 		return cp->val;
991 	}
992 
993 	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
994 }
995 
996 static int service_cache_sync(struct hci_dev *hdev, void *data)
997 {
998 	hci_update_eir_sync(hdev);
999 	hci_update_class_sync(hdev);
1000 
1001 	return 0;
1002 }
1003 
1004 static void service_cache_off(struct work_struct *work)
1005 {
1006 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1007 					    service_cache.work);
1008 
1009 	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1010 		return;
1011 
1012 	hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1013 }
1014 
1015 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1016 {
1017 	/* The generation of a new RPA and programming it into the
1018 	 * controller happens in the hci_req_enable_advertising()
1019 	 * function.
1020 	 */
1021 	if (ext_adv_capable(hdev))
1022 		return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1023 	else
1024 		return hci_enable_advertising_sync(hdev);
1025 }
1026 
1027 static void rpa_expired(struct work_struct *work)
1028 {
1029 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1030 					    rpa_expired.work);
1031 
1032 	bt_dev_dbg(hdev, "");
1033 
1034 	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1035 
1036 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1037 		return;
1038 
1039 	hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1040 }
1041 
1042 static void discov_off(struct work_struct *work)
1043 {
1044 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1045 					    discov_off.work);
1046 
1047 	bt_dev_dbg(hdev, "");
1048 
1049 	hci_dev_lock(hdev);
1050 
1051 	/* When discoverable timeout triggers, then just make sure
1052 	 * the limited discoverable flag is cleared. Even in the case
1053 	 * of a timeout triggered from general discoverable, it is
1054 	 * safe to unconditionally clear the flag.
1055 	 */
1056 	hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1057 	hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1058 	hdev->discov_timeout = 0;
1059 
1060 	hci_update_discoverable(hdev);
1061 
1062 	mgmt_new_settings(hdev);
1063 
1064 	hci_dev_unlock(hdev);
1065 }
1066 
1067 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1068 
1069 static void mesh_send_complete(struct hci_dev *hdev,
1070 			       struct mgmt_mesh_tx *mesh_tx, bool silent)
1071 {
1072 	u8 handle = mesh_tx->handle;
1073 
1074 	if (!silent)
1075 		mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1076 			   sizeof(handle), NULL);
1077 
1078 	mgmt_mesh_remove(mesh_tx);
1079 }
1080 
1081 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1082 {
1083 	struct mgmt_mesh_tx *mesh_tx;
1084 
1085 	hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1086 	hci_disable_advertising_sync(hdev);
1087 	mesh_tx = mgmt_mesh_next(hdev, NULL);
1088 
1089 	if (mesh_tx)
1090 		mesh_send_complete(hdev, mesh_tx, false);
1091 
1092 	return 0;
1093 }
1094 
1095 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1096 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
1097 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1098 {
1099 	struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1100 
1101 	if (!mesh_tx)
1102 		return;
1103 
1104 	err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1105 				 mesh_send_start_complete);
1106 
1107 	if (err < 0)
1108 		mesh_send_complete(hdev, mesh_tx, false);
1109 	else
1110 		hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1111 }
1112 
1113 static void mesh_send_done(struct work_struct *work)
1114 {
1115 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1116 					    mesh_send_done.work);
1117 
1118 	if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1119 		return;
1120 
1121 	hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1122 }
1123 
1124 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1125 {
1126 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1127 		return;
1128 
1129 	BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1130 
1131 	INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1132 	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1133 	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1134 	INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1135 
1136 	/* Non-mgmt controlled devices get this bit set
1137 	 * implicitly so that pairing works for them, however
1138 	 * for mgmt we require user-space to explicitly enable
1139 	 * it
1140 	 */
1141 	hci_dev_clear_flag(hdev, HCI_BONDABLE);
1142 
1143 	hci_dev_set_flag(hdev, HCI_MGMT);
1144 }
1145 
1146 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1147 				void *data, u16 data_len)
1148 {
1149 	struct mgmt_rp_read_info rp;
1150 
1151 	bt_dev_dbg(hdev, "sock %p", sk);
1152 
1153 	hci_dev_lock(hdev);
1154 
1155 	memset(&rp, 0, sizeof(rp));
1156 
1157 	bacpy(&rp.bdaddr, &hdev->bdaddr);
1158 
1159 	rp.version = hdev->hci_ver;
1160 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1161 
1162 	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1163 	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1164 
1165 	memcpy(rp.dev_class, hdev->dev_class, 3);
1166 
1167 	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1168 	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1169 
1170 	hci_dev_unlock(hdev);
1171 
1172 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1173 				 sizeof(rp));
1174 }
1175 
1176 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1177 {
1178 	u16 eir_len = 0;
1179 	size_t name_len;
1180 
1181 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1182 		eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1183 					  hdev->dev_class, 3);
1184 
1185 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1186 		eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1187 					  hdev->appearance);
1188 
1189 	name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1190 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1191 				  hdev->dev_name, name_len);
1192 
1193 	name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1194 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1195 				  hdev->short_name, name_len);
1196 
1197 	return eir_len;
1198 }
1199 
1200 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1201 				    void *data, u16 data_len)
1202 {
1203 	char buf[512];
1204 	struct mgmt_rp_read_ext_info *rp = (void *)buf;
1205 	u16 eir_len;
1206 
1207 	bt_dev_dbg(hdev, "sock %p", sk);
1208 
1209 	memset(&buf, 0, sizeof(buf));
1210 
1211 	hci_dev_lock(hdev);
1212 
1213 	bacpy(&rp->bdaddr, &hdev->bdaddr);
1214 
1215 	rp->version = hdev->hci_ver;
1216 	rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1217 
1218 	rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1219 	rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1220 
1221 
1222 	eir_len = append_eir_data_to_buf(hdev, rp->eir);
1223 	rp->eir_len = cpu_to_le16(eir_len);
1224 
1225 	hci_dev_unlock(hdev);
1226 
1227 	/* If this command is called at least once, then the events
1228 	 * for class of device and local name changes are disabled
1229 	 * and only the new extended controller information event
1230 	 * is used.
1231 	 */
1232 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1233 	hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1234 	hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1235 
1236 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1237 				 sizeof(*rp) + eir_len);
1238 }
1239 
1240 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1241 {
1242 	char buf[512];
1243 	struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1244 	u16 eir_len;
1245 
1246 	memset(buf, 0, sizeof(buf));
1247 
1248 	eir_len = append_eir_data_to_buf(hdev, ev->eir);
1249 	ev->eir_len = cpu_to_le16(eir_len);
1250 
1251 	return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1252 				  sizeof(*ev) + eir_len,
1253 				  HCI_MGMT_EXT_INFO_EVENTS, skip);
1254 }
1255 
1256 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1257 {
1258 	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1259 
1260 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1261 				 sizeof(settings));
1262 }
1263 
1264 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1265 {
1266 	struct mgmt_ev_advertising_added ev;
1267 
1268 	ev.instance = instance;
1269 
1270 	mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1271 }
1272 
1273 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1274 			      u8 instance)
1275 {
1276 	struct mgmt_ev_advertising_removed ev;
1277 
1278 	ev.instance = instance;
1279 
1280 	mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1281 }
1282 
1283 static void cancel_adv_timeout(struct hci_dev *hdev)
1284 {
1285 	if (hdev->adv_instance_timeout) {
1286 		hdev->adv_instance_timeout = 0;
1287 		cancel_delayed_work(&hdev->adv_instance_expire);
1288 	}
1289 }
1290 
1291 /* This function requires the caller holds hdev->lock */
1292 static void restart_le_actions(struct hci_dev *hdev)
1293 {
1294 	struct hci_conn_params *p;
1295 
1296 	list_for_each_entry(p, &hdev->le_conn_params, list) {
1297 		/* Needed for AUTO_OFF case where might not "really"
1298 		 * have been powered off.
1299 		 */
1300 		hci_pend_le_list_del_init(p);
1301 
1302 		switch (p->auto_connect) {
1303 		case HCI_AUTO_CONN_DIRECT:
1304 		case HCI_AUTO_CONN_ALWAYS:
1305 			hci_pend_le_list_add(p, &hdev->pend_le_conns);
1306 			break;
1307 		case HCI_AUTO_CONN_REPORT:
1308 			hci_pend_le_list_add(p, &hdev->pend_le_reports);
1309 			break;
1310 		default:
1311 			break;
1312 		}
1313 	}
1314 }
1315 
1316 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1317 {
1318 	__le32 ev = cpu_to_le32(get_current_settings(hdev));
1319 
1320 	return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1321 				  sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1322 }
1323 
1324 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1325 {
1326 	struct mgmt_pending_cmd *cmd = data;
1327 	struct mgmt_mode *cp;
1328 
1329 	/* Make sure cmd still outstanding. */
1330 	if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1331 		return;
1332 
1333 	cp = cmd->param;
1334 
1335 	bt_dev_dbg(hdev, "err %d", err);
1336 
1337 	if (!err) {
1338 		if (cp->val) {
1339 			hci_dev_lock(hdev);
1340 			restart_le_actions(hdev);
1341 			hci_update_passive_scan(hdev);
1342 			hci_dev_unlock(hdev);
1343 		}
1344 
1345 		send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1346 
1347 		/* Only call new_setting for power on as power off is deferred
1348 		 * to hdev->power_off work which does call hci_dev_do_close.
1349 		 */
1350 		if (cp->val)
1351 			new_settings(hdev, cmd->sk);
1352 	} else {
1353 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1354 				mgmt_status(err));
1355 	}
1356 
1357 	mgmt_pending_remove(cmd);
1358 }
1359 
1360 static int set_powered_sync(struct hci_dev *hdev, void *data)
1361 {
1362 	struct mgmt_pending_cmd *cmd = data;
1363 	struct mgmt_mode *cp = cmd->param;
1364 
1365 	BT_DBG("%s", hdev->name);
1366 
1367 	return hci_set_powered_sync(hdev, cp->val);
1368 }
1369 
1370 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1371 		       u16 len)
1372 {
1373 	struct mgmt_mode *cp = data;
1374 	struct mgmt_pending_cmd *cmd;
1375 	int err;
1376 
1377 	bt_dev_dbg(hdev, "sock %p", sk);
1378 
1379 	if (cp->val != 0x00 && cp->val != 0x01)
1380 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1381 				       MGMT_STATUS_INVALID_PARAMS);
1382 
1383 	hci_dev_lock(hdev);
1384 
1385 	if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1386 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1387 				      MGMT_STATUS_BUSY);
1388 		goto failed;
1389 	}
1390 
1391 	if (!!cp->val == hdev_is_powered(hdev)) {
1392 		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1393 		goto failed;
1394 	}
1395 
1396 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1397 	if (!cmd) {
1398 		err = -ENOMEM;
1399 		goto failed;
1400 	}
1401 
1402 	/* Cancel potentially blocking sync operation before power off */
1403 	if (cp->val == 0x00) {
1404 		__hci_cmd_sync_cancel(hdev, -EHOSTDOWN);
1405 		err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1406 					 mgmt_set_powered_complete);
1407 	} else {
1408 		/* Use hci_cmd_sync_submit since hdev might not be running */
1409 		err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
1410 					  mgmt_set_powered_complete);
1411 	}
1412 
1413 	if (err < 0)
1414 		mgmt_pending_remove(cmd);
1415 
1416 failed:
1417 	hci_dev_unlock(hdev);
1418 	return err;
1419 }
1420 
1421 int mgmt_new_settings(struct hci_dev *hdev)
1422 {
1423 	return new_settings(hdev, NULL);
1424 }
1425 
1426 struct cmd_lookup {
1427 	struct sock *sk;
1428 	struct hci_dev *hdev;
1429 	u8 mgmt_status;
1430 };
1431 
1432 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1433 {
1434 	struct cmd_lookup *match = data;
1435 
1436 	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1437 
1438 	list_del(&cmd->list);
1439 
1440 	if (match->sk == NULL) {
1441 		match->sk = cmd->sk;
1442 		sock_hold(match->sk);
1443 	}
1444 
1445 	mgmt_pending_free(cmd);
1446 }
1447 
1448 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1449 {
1450 	u8 *status = data;
1451 
1452 	mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1453 	mgmt_pending_remove(cmd);
1454 }
1455 
1456 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1457 {
1458 	if (cmd->cmd_complete) {
1459 		u8 *status = data;
1460 
1461 		cmd->cmd_complete(cmd, *status);
1462 		mgmt_pending_remove(cmd);
1463 
1464 		return;
1465 	}
1466 
1467 	cmd_status_rsp(cmd, data);
1468 }
1469 
1470 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1471 {
1472 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1473 				 cmd->param, cmd->param_len);
1474 }
1475 
1476 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1477 {
1478 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1479 				 cmd->param, sizeof(struct mgmt_addr_info));
1480 }
1481 
1482 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1483 {
1484 	if (!lmp_bredr_capable(hdev))
1485 		return MGMT_STATUS_NOT_SUPPORTED;
1486 	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1487 		return MGMT_STATUS_REJECTED;
1488 	else
1489 		return MGMT_STATUS_SUCCESS;
1490 }
1491 
1492 static u8 mgmt_le_support(struct hci_dev *hdev)
1493 {
1494 	if (!lmp_le_capable(hdev))
1495 		return MGMT_STATUS_NOT_SUPPORTED;
1496 	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1497 		return MGMT_STATUS_REJECTED;
1498 	else
1499 		return MGMT_STATUS_SUCCESS;
1500 }
1501 
1502 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1503 					   int err)
1504 {
1505 	struct mgmt_pending_cmd *cmd = data;
1506 
1507 	bt_dev_dbg(hdev, "err %d", err);
1508 
1509 	/* Make sure cmd still outstanding. */
1510 	if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1511 		return;
1512 
1513 	hci_dev_lock(hdev);
1514 
1515 	if (err) {
1516 		u8 mgmt_err = mgmt_status(err);
1517 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1518 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1519 		goto done;
1520 	}
1521 
1522 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1523 	    hdev->discov_timeout > 0) {
1524 		int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1525 		queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1526 	}
1527 
1528 	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1529 	new_settings(hdev, cmd->sk);
1530 
1531 done:
1532 	mgmt_pending_remove(cmd);
1533 	hci_dev_unlock(hdev);
1534 }
1535 
1536 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1537 {
1538 	BT_DBG("%s", hdev->name);
1539 
1540 	return hci_update_discoverable_sync(hdev);
1541 }
1542 
1543 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1544 			    u16 len)
1545 {
1546 	struct mgmt_cp_set_discoverable *cp = data;
1547 	struct mgmt_pending_cmd *cmd;
1548 	u16 timeout;
1549 	int err;
1550 
1551 	bt_dev_dbg(hdev, "sock %p", sk);
1552 
1553 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1554 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1555 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1556 				       MGMT_STATUS_REJECTED);
1557 
1558 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1559 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1560 				       MGMT_STATUS_INVALID_PARAMS);
1561 
1562 	timeout = __le16_to_cpu(cp->timeout);
1563 
1564 	/* Disabling discoverable requires that no timeout is set,
1565 	 * and enabling limited discoverable requires a timeout.
1566 	 */
1567 	if ((cp->val == 0x00 && timeout > 0) ||
1568 	    (cp->val == 0x02 && timeout == 0))
1569 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1570 				       MGMT_STATUS_INVALID_PARAMS);
1571 
1572 	hci_dev_lock(hdev);
1573 
1574 	if (!hdev_is_powered(hdev) && timeout > 0) {
1575 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1576 				      MGMT_STATUS_NOT_POWERED);
1577 		goto failed;
1578 	}
1579 
1580 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1581 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1582 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1583 				      MGMT_STATUS_BUSY);
1584 		goto failed;
1585 	}
1586 
1587 	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1588 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1589 				      MGMT_STATUS_REJECTED);
1590 		goto failed;
1591 	}
1592 
1593 	if (hdev->advertising_paused) {
1594 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1595 				      MGMT_STATUS_BUSY);
1596 		goto failed;
1597 	}
1598 
1599 	if (!hdev_is_powered(hdev)) {
1600 		bool changed = false;
1601 
1602 		/* Setting limited discoverable when powered off is
1603 		 * not a valid operation since it requires a timeout
1604 		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1605 		 */
1606 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1607 			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1608 			changed = true;
1609 		}
1610 
1611 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1612 		if (err < 0)
1613 			goto failed;
1614 
1615 		if (changed)
1616 			err = new_settings(hdev, sk);
1617 
1618 		goto failed;
1619 	}
1620 
1621 	/* If the current mode is the same, then just update the timeout
1622 	 * value with the new value. And if only the timeout gets updated,
1623 	 * then no need for any HCI transactions.
1624 	 */
1625 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1626 	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
1627 						   HCI_LIMITED_DISCOVERABLE)) {
1628 		cancel_delayed_work(&hdev->discov_off);
1629 		hdev->discov_timeout = timeout;
1630 
1631 		if (cp->val && hdev->discov_timeout > 0) {
1632 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1633 			queue_delayed_work(hdev->req_workqueue,
1634 					   &hdev->discov_off, to);
1635 		}
1636 
1637 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1638 		goto failed;
1639 	}
1640 
1641 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1642 	if (!cmd) {
1643 		err = -ENOMEM;
1644 		goto failed;
1645 	}
1646 
1647 	/* Cancel any potential discoverable timeout that might be
1648 	 * still active and store new timeout value. The arming of
1649 	 * the timeout happens in the complete handler.
1650 	 */
1651 	cancel_delayed_work(&hdev->discov_off);
1652 	hdev->discov_timeout = timeout;
1653 
1654 	if (cp->val)
1655 		hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1656 	else
1657 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1658 
1659 	/* Limited discoverable mode */
1660 	if (cp->val == 0x02)
1661 		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1662 	else
1663 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1664 
1665 	err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1666 				 mgmt_set_discoverable_complete);
1667 
1668 	if (err < 0)
1669 		mgmt_pending_remove(cmd);
1670 
1671 failed:
1672 	hci_dev_unlock(hdev);
1673 	return err;
1674 }
1675 
1676 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1677 					  int err)
1678 {
1679 	struct mgmt_pending_cmd *cmd = data;
1680 
1681 	bt_dev_dbg(hdev, "err %d", err);
1682 
1683 	/* Make sure cmd still outstanding. */
1684 	if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1685 		return;
1686 
1687 	hci_dev_lock(hdev);
1688 
1689 	if (err) {
1690 		u8 mgmt_err = mgmt_status(err);
1691 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1692 		goto done;
1693 	}
1694 
1695 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1696 	new_settings(hdev, cmd->sk);
1697 
1698 done:
1699 	if (cmd)
1700 		mgmt_pending_remove(cmd);
1701 
1702 	hci_dev_unlock(hdev);
1703 }
1704 
1705 static int set_connectable_update_settings(struct hci_dev *hdev,
1706 					   struct sock *sk, u8 val)
1707 {
1708 	bool changed = false;
1709 	int err;
1710 
1711 	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1712 		changed = true;
1713 
1714 	if (val) {
1715 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1716 	} else {
1717 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1718 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1719 	}
1720 
1721 	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1722 	if (err < 0)
1723 		return err;
1724 
1725 	if (changed) {
1726 		hci_update_scan(hdev);
1727 		hci_update_passive_scan(hdev);
1728 		return new_settings(hdev, sk);
1729 	}
1730 
1731 	return 0;
1732 }
1733 
1734 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1735 {
1736 	BT_DBG("%s", hdev->name);
1737 
1738 	return hci_update_connectable_sync(hdev);
1739 }
1740 
1741 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1742 			   u16 len)
1743 {
1744 	struct mgmt_mode *cp = data;
1745 	struct mgmt_pending_cmd *cmd;
1746 	int err;
1747 
1748 	bt_dev_dbg(hdev, "sock %p", sk);
1749 
1750 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1751 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1752 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1753 				       MGMT_STATUS_REJECTED);
1754 
1755 	if (cp->val != 0x00 && cp->val != 0x01)
1756 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1757 				       MGMT_STATUS_INVALID_PARAMS);
1758 
1759 	hci_dev_lock(hdev);
1760 
1761 	if (!hdev_is_powered(hdev)) {
1762 		err = set_connectable_update_settings(hdev, sk, cp->val);
1763 		goto failed;
1764 	}
1765 
1766 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1767 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1768 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1769 				      MGMT_STATUS_BUSY);
1770 		goto failed;
1771 	}
1772 
1773 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1774 	if (!cmd) {
1775 		err = -ENOMEM;
1776 		goto failed;
1777 	}
1778 
1779 	if (cp->val) {
1780 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1781 	} else {
1782 		if (hdev->discov_timeout > 0)
1783 			cancel_delayed_work(&hdev->discov_off);
1784 
1785 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1786 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1787 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1788 	}
1789 
1790 	err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1791 				 mgmt_set_connectable_complete);
1792 
1793 	if (err < 0)
1794 		mgmt_pending_remove(cmd);
1795 
1796 failed:
1797 	hci_dev_unlock(hdev);
1798 	return err;
1799 }
1800 
1801 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1802 			u16 len)
1803 {
1804 	struct mgmt_mode *cp = data;
1805 	bool changed;
1806 	int err;
1807 
1808 	bt_dev_dbg(hdev, "sock %p", sk);
1809 
1810 	if (cp->val != 0x00 && cp->val != 0x01)
1811 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1812 				       MGMT_STATUS_INVALID_PARAMS);
1813 
1814 	hci_dev_lock(hdev);
1815 
1816 	if (cp->val)
1817 		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1818 	else
1819 		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1820 
1821 	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1822 	if (err < 0)
1823 		goto unlock;
1824 
1825 	if (changed) {
1826 		/* In limited privacy mode the change of bondable mode
1827 		 * may affect the local advertising address.
1828 		 */
1829 		hci_update_discoverable(hdev);
1830 
1831 		err = new_settings(hdev, sk);
1832 	}
1833 
1834 unlock:
1835 	hci_dev_unlock(hdev);
1836 	return err;
1837 }
1838 
1839 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1840 			     u16 len)
1841 {
1842 	struct mgmt_mode *cp = data;
1843 	struct mgmt_pending_cmd *cmd;
1844 	u8 val, status;
1845 	int err;
1846 
1847 	bt_dev_dbg(hdev, "sock %p", sk);
1848 
1849 	status = mgmt_bredr_support(hdev);
1850 	if (status)
1851 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1852 				       status);
1853 
1854 	if (cp->val != 0x00 && cp->val != 0x01)
1855 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1856 				       MGMT_STATUS_INVALID_PARAMS);
1857 
1858 	hci_dev_lock(hdev);
1859 
1860 	if (!hdev_is_powered(hdev)) {
1861 		bool changed = false;
1862 
1863 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1864 			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1865 			changed = true;
1866 		}
1867 
1868 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1869 		if (err < 0)
1870 			goto failed;
1871 
1872 		if (changed)
1873 			err = new_settings(hdev, sk);
1874 
1875 		goto failed;
1876 	}
1877 
1878 	if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1879 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1880 				      MGMT_STATUS_BUSY);
1881 		goto failed;
1882 	}
1883 
1884 	val = !!cp->val;
1885 
1886 	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1887 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1888 		goto failed;
1889 	}
1890 
1891 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1892 	if (!cmd) {
1893 		err = -ENOMEM;
1894 		goto failed;
1895 	}
1896 
1897 	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1898 	if (err < 0) {
1899 		mgmt_pending_remove(cmd);
1900 		goto failed;
1901 	}
1902 
1903 failed:
1904 	hci_dev_unlock(hdev);
1905 	return err;
1906 }
1907 
1908 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1909 {
1910 	struct cmd_lookup match = { NULL, hdev };
1911 	struct mgmt_pending_cmd *cmd = data;
1912 	struct mgmt_mode *cp = cmd->param;
1913 	u8 enable = cp->val;
1914 	bool changed;
1915 
1916 	/* Make sure cmd still outstanding. */
1917 	if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1918 		return;
1919 
1920 	if (err) {
1921 		u8 mgmt_err = mgmt_status(err);
1922 
1923 		if (enable && hci_dev_test_and_clear_flag(hdev,
1924 							  HCI_SSP_ENABLED)) {
1925 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1926 			new_settings(hdev, NULL);
1927 		}
1928 
1929 		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1930 				     &mgmt_err);
1931 		return;
1932 	}
1933 
1934 	if (enable) {
1935 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1936 	} else {
1937 		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1938 
1939 		if (!changed)
1940 			changed = hci_dev_test_and_clear_flag(hdev,
1941 							      HCI_HS_ENABLED);
1942 		else
1943 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1944 	}
1945 
1946 	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1947 
1948 	if (changed)
1949 		new_settings(hdev, match.sk);
1950 
1951 	if (match.sk)
1952 		sock_put(match.sk);
1953 
1954 	hci_update_eir_sync(hdev);
1955 }
1956 
1957 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1958 {
1959 	struct mgmt_pending_cmd *cmd = data;
1960 	struct mgmt_mode *cp = cmd->param;
1961 	bool changed = false;
1962 	int err;
1963 
1964 	if (cp->val)
1965 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1966 
1967 	err = hci_write_ssp_mode_sync(hdev, cp->val);
1968 
1969 	if (!err && changed)
1970 		hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1971 
1972 	return err;
1973 }
1974 
1975 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1976 {
1977 	struct mgmt_mode *cp = data;
1978 	struct mgmt_pending_cmd *cmd;
1979 	u8 status;
1980 	int err;
1981 
1982 	bt_dev_dbg(hdev, "sock %p", sk);
1983 
1984 	status = mgmt_bredr_support(hdev);
1985 	if (status)
1986 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1987 
1988 	if (!lmp_ssp_capable(hdev))
1989 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1990 				       MGMT_STATUS_NOT_SUPPORTED);
1991 
1992 	if (cp->val != 0x00 && cp->val != 0x01)
1993 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1994 				       MGMT_STATUS_INVALID_PARAMS);
1995 
1996 	hci_dev_lock(hdev);
1997 
1998 	if (!hdev_is_powered(hdev)) {
1999 		bool changed;
2000 
2001 		if (cp->val) {
2002 			changed = !hci_dev_test_and_set_flag(hdev,
2003 							     HCI_SSP_ENABLED);
2004 		} else {
2005 			changed = hci_dev_test_and_clear_flag(hdev,
2006 							      HCI_SSP_ENABLED);
2007 			if (!changed)
2008 				changed = hci_dev_test_and_clear_flag(hdev,
2009 								      HCI_HS_ENABLED);
2010 			else
2011 				hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
2012 		}
2013 
2014 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2015 		if (err < 0)
2016 			goto failed;
2017 
2018 		if (changed)
2019 			err = new_settings(hdev, sk);
2020 
2021 		goto failed;
2022 	}
2023 
2024 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2025 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2026 				      MGMT_STATUS_BUSY);
2027 		goto failed;
2028 	}
2029 
2030 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2031 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2032 		goto failed;
2033 	}
2034 
2035 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2036 	if (!cmd)
2037 		err = -ENOMEM;
2038 	else
2039 		err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2040 					 set_ssp_complete);
2041 
2042 	if (err < 0) {
2043 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2044 				      MGMT_STATUS_FAILED);
2045 
2046 		if (cmd)
2047 			mgmt_pending_remove(cmd);
2048 	}
2049 
2050 failed:
2051 	hci_dev_unlock(hdev);
2052 	return err;
2053 }
2054 
2055 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2056 {
2057 	struct mgmt_mode *cp = data;
2058 	bool changed;
2059 	u8 status;
2060 	int err;
2061 
2062 	bt_dev_dbg(hdev, "sock %p", sk);
2063 
2064 	if (!IS_ENABLED(CONFIG_BT_HS))
2065 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2066 				       MGMT_STATUS_NOT_SUPPORTED);
2067 
2068 	status = mgmt_bredr_support(hdev);
2069 	if (status)
2070 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2071 
2072 	if (!lmp_ssp_capable(hdev))
2073 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2074 				       MGMT_STATUS_NOT_SUPPORTED);
2075 
2076 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
2077 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2078 				       MGMT_STATUS_REJECTED);
2079 
2080 	if (cp->val != 0x00 && cp->val != 0x01)
2081 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2082 				       MGMT_STATUS_INVALID_PARAMS);
2083 
2084 	hci_dev_lock(hdev);
2085 
2086 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2087 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2088 				      MGMT_STATUS_BUSY);
2089 		goto unlock;
2090 	}
2091 
2092 	if (cp->val) {
2093 		changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
2094 	} else {
2095 		if (hdev_is_powered(hdev)) {
2096 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2097 					      MGMT_STATUS_REJECTED);
2098 			goto unlock;
2099 		}
2100 
2101 		changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
2102 	}
2103 
2104 	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2105 	if (err < 0)
2106 		goto unlock;
2107 
2108 	if (changed)
2109 		err = new_settings(hdev, sk);
2110 
2111 unlock:
2112 	hci_dev_unlock(hdev);
2113 	return err;
2114 }
2115 
2116 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2117 {
2118 	struct cmd_lookup match = { NULL, hdev };
2119 	u8 status = mgmt_status(err);
2120 
2121 	bt_dev_dbg(hdev, "err %d", err);
2122 
2123 	if (status) {
2124 		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2125 							&status);
2126 		return;
2127 	}
2128 
2129 	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2130 
2131 	new_settings(hdev, match.sk);
2132 
2133 	if (match.sk)
2134 		sock_put(match.sk);
2135 }
2136 
2137 static int set_le_sync(struct hci_dev *hdev, void *data)
2138 {
2139 	struct mgmt_pending_cmd *cmd = data;
2140 	struct mgmt_mode *cp = cmd->param;
2141 	u8 val = !!cp->val;
2142 	int err;
2143 
2144 	if (!val) {
2145 		hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2146 
2147 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2148 			hci_disable_advertising_sync(hdev);
2149 
2150 		if (ext_adv_capable(hdev))
2151 			hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2152 	} else {
2153 		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2154 	}
2155 
2156 	err = hci_write_le_host_supported_sync(hdev, val, 0);
2157 
2158 	/* Make sure the controller has a good default for
2159 	 * advertising data. Restrict the update to when LE
2160 	 * has actually been enabled. During power on, the
2161 	 * update in powered_update_hci will take care of it.
2162 	 */
2163 	if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2164 		if (ext_adv_capable(hdev)) {
2165 			int status;
2166 
2167 			status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2168 			if (!status)
2169 				hci_update_scan_rsp_data_sync(hdev, 0x00);
2170 		} else {
2171 			hci_update_adv_data_sync(hdev, 0x00);
2172 			hci_update_scan_rsp_data_sync(hdev, 0x00);
2173 		}
2174 
2175 		hci_update_passive_scan(hdev);
2176 	}
2177 
2178 	return err;
2179 }
2180 
2181 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2182 {
2183 	struct mgmt_pending_cmd *cmd = data;
2184 	u8 status = mgmt_status(err);
2185 	struct sock *sk = cmd->sk;
2186 
2187 	if (status) {
2188 		mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
2189 				     cmd_status_rsp, &status);
2190 		return;
2191 	}
2192 
2193 	mgmt_pending_remove(cmd);
2194 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2195 }
2196 
2197 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2198 {
2199 	struct mgmt_pending_cmd *cmd = data;
2200 	struct mgmt_cp_set_mesh *cp = cmd->param;
2201 	size_t len = cmd->param_len;
2202 
2203 	memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2204 
2205 	if (cp->enable)
2206 		hci_dev_set_flag(hdev, HCI_MESH);
2207 	else
2208 		hci_dev_clear_flag(hdev, HCI_MESH);
2209 
2210 	len -= sizeof(*cp);
2211 
2212 	/* If filters don't fit, forward all adv pkts */
2213 	if (len <= sizeof(hdev->mesh_ad_types))
2214 		memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2215 
2216 	hci_update_passive_scan_sync(hdev);
2217 	return 0;
2218 }
2219 
2220 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2221 {
2222 	struct mgmt_cp_set_mesh *cp = data;
2223 	struct mgmt_pending_cmd *cmd;
2224 	int err = 0;
2225 
2226 	bt_dev_dbg(hdev, "sock %p", sk);
2227 
2228 	if (!lmp_le_capable(hdev) ||
2229 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2230 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2231 				       MGMT_STATUS_NOT_SUPPORTED);
2232 
2233 	if (cp->enable != 0x00 && cp->enable != 0x01)
2234 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2235 				       MGMT_STATUS_INVALID_PARAMS);
2236 
2237 	hci_dev_lock(hdev);
2238 
2239 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2240 	if (!cmd)
2241 		err = -ENOMEM;
2242 	else
2243 		err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2244 					 set_mesh_complete);
2245 
2246 	if (err < 0) {
2247 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2248 				      MGMT_STATUS_FAILED);
2249 
2250 		if (cmd)
2251 			mgmt_pending_remove(cmd);
2252 	}
2253 
2254 	hci_dev_unlock(hdev);
2255 	return err;
2256 }
2257 
2258 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2259 {
2260 	struct mgmt_mesh_tx *mesh_tx = data;
2261 	struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2262 	unsigned long mesh_send_interval;
2263 	u8 mgmt_err = mgmt_status(err);
2264 
2265 	/* Report any errors here, but don't report completion */
2266 
2267 	if (mgmt_err) {
2268 		hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2269 		/* Send Complete Error Code for handle */
2270 		mesh_send_complete(hdev, mesh_tx, false);
2271 		return;
2272 	}
2273 
2274 	mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2275 	queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2276 			   mesh_send_interval);
2277 }
2278 
2279 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2280 {
2281 	struct mgmt_mesh_tx *mesh_tx = data;
2282 	struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2283 	struct adv_info *adv, *next_instance;
2284 	u8 instance = hdev->le_num_of_adv_sets + 1;
2285 	u16 timeout, duration;
2286 	int err = 0;
2287 
2288 	if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2289 		return MGMT_STATUS_BUSY;
2290 
2291 	timeout = 1000;
2292 	duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2293 	adv = hci_add_adv_instance(hdev, instance, 0,
2294 				   send->adv_data_len, send->adv_data,
2295 				   0, NULL,
2296 				   timeout, duration,
2297 				   HCI_ADV_TX_POWER_NO_PREFERENCE,
2298 				   hdev->le_adv_min_interval,
2299 				   hdev->le_adv_max_interval,
2300 				   mesh_tx->handle);
2301 
2302 	if (!IS_ERR(adv))
2303 		mesh_tx->instance = instance;
2304 	else
2305 		err = PTR_ERR(adv);
2306 
2307 	if (hdev->cur_adv_instance == instance) {
2308 		/* If the currently advertised instance is being changed then
2309 		 * cancel the current advertising and schedule the next
2310 		 * instance. If there is only one instance then the overridden
2311 		 * advertising data will be visible right away.
2312 		 */
2313 		cancel_adv_timeout(hdev);
2314 
2315 		next_instance = hci_get_next_instance(hdev, instance);
2316 		if (next_instance)
2317 			instance = next_instance->instance;
2318 		else
2319 			instance = 0;
2320 	} else if (hdev->adv_instance_timeout) {
2321 		/* Immediately advertise the new instance if no other, or
2322 		 * let it go naturally from queue if ADV is already happening
2323 		 */
2324 		instance = 0;
2325 	}
2326 
2327 	if (instance)
2328 		return hci_schedule_adv_instance_sync(hdev, instance, true);
2329 
2330 	return err;
2331 }
2332 
2333 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2334 {
2335 	struct mgmt_rp_mesh_read_features *rp = data;
2336 
2337 	if (rp->used_handles >= rp->max_handles)
2338 		return;
2339 
2340 	rp->handles[rp->used_handles++] = mesh_tx->handle;
2341 }
2342 
2343 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2344 			 void *data, u16 len)
2345 {
2346 	struct mgmt_rp_mesh_read_features rp;
2347 
2348 	if (!lmp_le_capable(hdev) ||
2349 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2350 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2351 				       MGMT_STATUS_NOT_SUPPORTED);
2352 
2353 	memset(&rp, 0, sizeof(rp));
2354 	rp.index = cpu_to_le16(hdev->id);
2355 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2356 		rp.max_handles = MESH_HANDLES_MAX;
2357 
2358 	hci_dev_lock(hdev);
2359 
2360 	if (rp.max_handles)
2361 		mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2362 
2363 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2364 			  rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2365 
2366 	hci_dev_unlock(hdev);
2367 	return 0;
2368 }
2369 
2370 static int send_cancel(struct hci_dev *hdev, void *data)
2371 {
2372 	struct mgmt_pending_cmd *cmd = data;
2373 	struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2374 	struct mgmt_mesh_tx *mesh_tx;
2375 
2376 	if (!cancel->handle) {
2377 		do {
2378 			mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2379 
2380 			if (mesh_tx)
2381 				mesh_send_complete(hdev, mesh_tx, false);
2382 		} while (mesh_tx);
2383 	} else {
2384 		mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2385 
2386 		if (mesh_tx && mesh_tx->sk == cmd->sk)
2387 			mesh_send_complete(hdev, mesh_tx, false);
2388 	}
2389 
2390 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2391 			  0, NULL, 0);
2392 	mgmt_pending_free(cmd);
2393 
2394 	return 0;
2395 }
2396 
2397 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2398 			    void *data, u16 len)
2399 {
2400 	struct mgmt_pending_cmd *cmd;
2401 	int err;
2402 
2403 	if (!lmp_le_capable(hdev) ||
2404 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2405 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2406 				       MGMT_STATUS_NOT_SUPPORTED);
2407 
2408 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2409 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2410 				       MGMT_STATUS_REJECTED);
2411 
2412 	hci_dev_lock(hdev);
2413 	cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2414 	if (!cmd)
2415 		err = -ENOMEM;
2416 	else
2417 		err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2418 
2419 	if (err < 0) {
2420 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2421 				      MGMT_STATUS_FAILED);
2422 
2423 		if (cmd)
2424 			mgmt_pending_free(cmd);
2425 	}
2426 
2427 	hci_dev_unlock(hdev);
2428 	return err;
2429 }
2430 
2431 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2432 {
2433 	struct mgmt_mesh_tx *mesh_tx;
2434 	struct mgmt_cp_mesh_send *send = data;
2435 	struct mgmt_rp_mesh_read_features rp;
2436 	bool sending;
2437 	int err = 0;
2438 
2439 	if (!lmp_le_capable(hdev) ||
2440 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2441 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2442 				       MGMT_STATUS_NOT_SUPPORTED);
2443 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2444 	    len <= MGMT_MESH_SEND_SIZE ||
2445 	    len > (MGMT_MESH_SEND_SIZE + 31))
2446 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2447 				       MGMT_STATUS_REJECTED);
2448 
2449 	hci_dev_lock(hdev);
2450 
2451 	memset(&rp, 0, sizeof(rp));
2452 	rp.max_handles = MESH_HANDLES_MAX;
2453 
2454 	mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2455 
2456 	if (rp.max_handles <= rp.used_handles) {
2457 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2458 				      MGMT_STATUS_BUSY);
2459 		goto done;
2460 	}
2461 
2462 	sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2463 	mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2464 
2465 	if (!mesh_tx)
2466 		err = -ENOMEM;
2467 	else if (!sending)
2468 		err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2469 					 mesh_send_start_complete);
2470 
2471 	if (err < 0) {
2472 		bt_dev_err(hdev, "Send Mesh Failed %d", err);
2473 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2474 				      MGMT_STATUS_FAILED);
2475 
2476 		if (mesh_tx) {
2477 			if (sending)
2478 				mgmt_mesh_remove(mesh_tx);
2479 		}
2480 	} else {
2481 		hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2482 
2483 		mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2484 				  &mesh_tx->handle, 1);
2485 	}
2486 
2487 done:
2488 	hci_dev_unlock(hdev);
2489 	return err;
2490 }
2491 
2492 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2493 {
2494 	struct mgmt_mode *cp = data;
2495 	struct mgmt_pending_cmd *cmd;
2496 	int err;
2497 	u8 val, enabled;
2498 
2499 	bt_dev_dbg(hdev, "sock %p", sk);
2500 
2501 	if (!lmp_le_capable(hdev))
2502 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2503 				       MGMT_STATUS_NOT_SUPPORTED);
2504 
2505 	if (cp->val != 0x00 && cp->val != 0x01)
2506 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2507 				       MGMT_STATUS_INVALID_PARAMS);
2508 
2509 	/* Bluetooth single mode LE only controllers or dual-mode
2510 	 * controllers configured as LE only devices, do not allow
2511 	 * switching LE off. These have either LE enabled explicitly
2512 	 * or BR/EDR has been previously switched off.
2513 	 *
2514 	 * When trying to enable an already enabled LE, then gracefully
2515 	 * send a positive response. Trying to disable it however will
2516 	 * result into rejection.
2517 	 */
2518 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2519 		if (cp->val == 0x01)
2520 			return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2521 
2522 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2523 				       MGMT_STATUS_REJECTED);
2524 	}
2525 
2526 	hci_dev_lock(hdev);
2527 
2528 	val = !!cp->val;
2529 	enabled = lmp_host_le_capable(hdev);
2530 
2531 	if (!hdev_is_powered(hdev) || val == enabled) {
2532 		bool changed = false;
2533 
2534 		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2535 			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2536 			changed = true;
2537 		}
2538 
2539 		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2540 			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2541 			changed = true;
2542 		}
2543 
2544 		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2545 		if (err < 0)
2546 			goto unlock;
2547 
2548 		if (changed)
2549 			err = new_settings(hdev, sk);
2550 
2551 		goto unlock;
2552 	}
2553 
2554 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
2555 	    pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2556 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2557 				      MGMT_STATUS_BUSY);
2558 		goto unlock;
2559 	}
2560 
2561 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2562 	if (!cmd)
2563 		err = -ENOMEM;
2564 	else
2565 		err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2566 					 set_le_complete);
2567 
2568 	if (err < 0) {
2569 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2570 				      MGMT_STATUS_FAILED);
2571 
2572 		if (cmd)
2573 			mgmt_pending_remove(cmd);
2574 	}
2575 
2576 unlock:
2577 	hci_dev_unlock(hdev);
2578 	return err;
2579 }
2580 
2581 /* This is a helper function to test for pending mgmt commands that can
2582  * cause CoD or EIR HCI commands. We can only allow one such pending
2583  * mgmt command at a time since otherwise we cannot easily track what
2584  * the current values are, will be, and based on that calculate if a new
2585  * HCI command needs to be sent and if yes with what value.
2586  */
2587 static bool pending_eir_or_class(struct hci_dev *hdev)
2588 {
2589 	struct mgmt_pending_cmd *cmd;
2590 
2591 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2592 		switch (cmd->opcode) {
2593 		case MGMT_OP_ADD_UUID:
2594 		case MGMT_OP_REMOVE_UUID:
2595 		case MGMT_OP_SET_DEV_CLASS:
2596 		case MGMT_OP_SET_POWERED:
2597 			return true;
2598 		}
2599 	}
2600 
2601 	return false;
2602 }
2603 
2604 static const u8 bluetooth_base_uuid[] = {
2605 			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2606 			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2607 };
2608 
2609 static u8 get_uuid_size(const u8 *uuid)
2610 {
2611 	u32 val;
2612 
2613 	if (memcmp(uuid, bluetooth_base_uuid, 12))
2614 		return 128;
2615 
2616 	val = get_unaligned_le32(&uuid[12]);
2617 	if (val > 0xffff)
2618 		return 32;
2619 
2620 	return 16;
2621 }
2622 
2623 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2624 {
2625 	struct mgmt_pending_cmd *cmd = data;
2626 
2627 	bt_dev_dbg(hdev, "err %d", err);
2628 
2629 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2630 			  mgmt_status(err), hdev->dev_class, 3);
2631 
2632 	mgmt_pending_free(cmd);
2633 }
2634 
2635 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2636 {
2637 	int err;
2638 
2639 	err = hci_update_class_sync(hdev);
2640 	if (err)
2641 		return err;
2642 
2643 	return hci_update_eir_sync(hdev);
2644 }
2645 
2646 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2647 {
2648 	struct mgmt_cp_add_uuid *cp = data;
2649 	struct mgmt_pending_cmd *cmd;
2650 	struct bt_uuid *uuid;
2651 	int err;
2652 
2653 	bt_dev_dbg(hdev, "sock %p", sk);
2654 
2655 	hci_dev_lock(hdev);
2656 
2657 	if (pending_eir_or_class(hdev)) {
2658 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2659 				      MGMT_STATUS_BUSY);
2660 		goto failed;
2661 	}
2662 
2663 	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2664 	if (!uuid) {
2665 		err = -ENOMEM;
2666 		goto failed;
2667 	}
2668 
2669 	memcpy(uuid->uuid, cp->uuid, 16);
2670 	uuid->svc_hint = cp->svc_hint;
2671 	uuid->size = get_uuid_size(cp->uuid);
2672 
2673 	list_add_tail(&uuid->list, &hdev->uuids);
2674 
2675 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2676 	if (!cmd) {
2677 		err = -ENOMEM;
2678 		goto failed;
2679 	}
2680 
2681 	err = hci_cmd_sync_queue(hdev, add_uuid_sync, cmd, mgmt_class_complete);
2682 	if (err < 0) {
2683 		mgmt_pending_free(cmd);
2684 		goto failed;
2685 	}
2686 
2687 failed:
2688 	hci_dev_unlock(hdev);
2689 	return err;
2690 }
2691 
2692 static bool enable_service_cache(struct hci_dev *hdev)
2693 {
2694 	if (!hdev_is_powered(hdev))
2695 		return false;
2696 
2697 	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2698 		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2699 				   CACHE_TIMEOUT);
2700 		return true;
2701 	}
2702 
2703 	return false;
2704 }
2705 
2706 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2707 {
2708 	int err;
2709 
2710 	err = hci_update_class_sync(hdev);
2711 	if (err)
2712 		return err;
2713 
2714 	return hci_update_eir_sync(hdev);
2715 }
2716 
2717 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2718 		       u16 len)
2719 {
2720 	struct mgmt_cp_remove_uuid *cp = data;
2721 	struct mgmt_pending_cmd *cmd;
2722 	struct bt_uuid *match, *tmp;
2723 	static const u8 bt_uuid_any[] = {
2724 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2725 	};
2726 	int err, found;
2727 
2728 	bt_dev_dbg(hdev, "sock %p", sk);
2729 
2730 	hci_dev_lock(hdev);
2731 
2732 	if (pending_eir_or_class(hdev)) {
2733 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2734 				      MGMT_STATUS_BUSY);
2735 		goto unlock;
2736 	}
2737 
2738 	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2739 		hci_uuids_clear(hdev);
2740 
2741 		if (enable_service_cache(hdev)) {
2742 			err = mgmt_cmd_complete(sk, hdev->id,
2743 						MGMT_OP_REMOVE_UUID,
2744 						0, hdev->dev_class, 3);
2745 			goto unlock;
2746 		}
2747 
2748 		goto update_class;
2749 	}
2750 
2751 	found = 0;
2752 
2753 	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2754 		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2755 			continue;
2756 
2757 		list_del(&match->list);
2758 		kfree(match);
2759 		found++;
2760 	}
2761 
2762 	if (found == 0) {
2763 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2764 				      MGMT_STATUS_INVALID_PARAMS);
2765 		goto unlock;
2766 	}
2767 
2768 update_class:
2769 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2770 	if (!cmd) {
2771 		err = -ENOMEM;
2772 		goto unlock;
2773 	}
2774 
2775 	err = hci_cmd_sync_queue(hdev, remove_uuid_sync, cmd,
2776 				 mgmt_class_complete);
2777 	if (err < 0)
2778 		mgmt_pending_free(cmd);
2779 
2780 unlock:
2781 	hci_dev_unlock(hdev);
2782 	return err;
2783 }
2784 
2785 static int set_class_sync(struct hci_dev *hdev, void *data)
2786 {
2787 	int err = 0;
2788 
2789 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2790 		cancel_delayed_work_sync(&hdev->service_cache);
2791 		err = hci_update_eir_sync(hdev);
2792 	}
2793 
2794 	if (err)
2795 		return err;
2796 
2797 	return hci_update_class_sync(hdev);
2798 }
2799 
2800 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2801 			 u16 len)
2802 {
2803 	struct mgmt_cp_set_dev_class *cp = data;
2804 	struct mgmt_pending_cmd *cmd;
2805 	int err;
2806 
2807 	bt_dev_dbg(hdev, "sock %p", sk);
2808 
2809 	if (!lmp_bredr_capable(hdev))
2810 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2811 				       MGMT_STATUS_NOT_SUPPORTED);
2812 
2813 	hci_dev_lock(hdev);
2814 
2815 	if (pending_eir_or_class(hdev)) {
2816 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2817 				      MGMT_STATUS_BUSY);
2818 		goto unlock;
2819 	}
2820 
2821 	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2822 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2823 				      MGMT_STATUS_INVALID_PARAMS);
2824 		goto unlock;
2825 	}
2826 
2827 	hdev->major_class = cp->major;
2828 	hdev->minor_class = cp->minor;
2829 
2830 	if (!hdev_is_powered(hdev)) {
2831 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2832 					hdev->dev_class, 3);
2833 		goto unlock;
2834 	}
2835 
2836 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2837 	if (!cmd) {
2838 		err = -ENOMEM;
2839 		goto unlock;
2840 	}
2841 
2842 	err = hci_cmd_sync_queue(hdev, set_class_sync, cmd,
2843 				 mgmt_class_complete);
2844 	if (err < 0)
2845 		mgmt_pending_free(cmd);
2846 
2847 unlock:
2848 	hci_dev_unlock(hdev);
2849 	return err;
2850 }
2851 
2852 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2853 			  u16 len)
2854 {
2855 	struct mgmt_cp_load_link_keys *cp = data;
2856 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2857 				   sizeof(struct mgmt_link_key_info));
2858 	u16 key_count, expected_len;
2859 	bool changed;
2860 	int i;
2861 
2862 	bt_dev_dbg(hdev, "sock %p", sk);
2863 
2864 	if (!lmp_bredr_capable(hdev))
2865 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2866 				       MGMT_STATUS_NOT_SUPPORTED);
2867 
2868 	key_count = __le16_to_cpu(cp->key_count);
2869 	if (key_count > max_key_count) {
2870 		bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2871 			   key_count);
2872 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2873 				       MGMT_STATUS_INVALID_PARAMS);
2874 	}
2875 
2876 	expected_len = struct_size(cp, keys, key_count);
2877 	if (expected_len != len) {
2878 		bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2879 			   expected_len, len);
2880 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2881 				       MGMT_STATUS_INVALID_PARAMS);
2882 	}
2883 
2884 	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2885 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2886 				       MGMT_STATUS_INVALID_PARAMS);
2887 
2888 	bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2889 		   key_count);
2890 
2891 	for (i = 0; i < key_count; i++) {
2892 		struct mgmt_link_key_info *key = &cp->keys[i];
2893 
2894 		if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2895 			return mgmt_cmd_status(sk, hdev->id,
2896 					       MGMT_OP_LOAD_LINK_KEYS,
2897 					       MGMT_STATUS_INVALID_PARAMS);
2898 	}
2899 
2900 	hci_dev_lock(hdev);
2901 
2902 	hci_link_keys_clear(hdev);
2903 
2904 	if (cp->debug_keys)
2905 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2906 	else
2907 		changed = hci_dev_test_and_clear_flag(hdev,
2908 						      HCI_KEEP_DEBUG_KEYS);
2909 
2910 	if (changed)
2911 		new_settings(hdev, NULL);
2912 
2913 	for (i = 0; i < key_count; i++) {
2914 		struct mgmt_link_key_info *key = &cp->keys[i];
2915 
2916 		if (hci_is_blocked_key(hdev,
2917 				       HCI_BLOCKED_KEY_TYPE_LINKKEY,
2918 				       key->val)) {
2919 			bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2920 				    &key->addr.bdaddr);
2921 			continue;
2922 		}
2923 
2924 		/* Always ignore debug keys and require a new pairing if
2925 		 * the user wants to use them.
2926 		 */
2927 		if (key->type == HCI_LK_DEBUG_COMBINATION)
2928 			continue;
2929 
2930 		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2931 				 key->type, key->pin_len, NULL);
2932 	}
2933 
2934 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2935 
2936 	hci_dev_unlock(hdev);
2937 
2938 	return 0;
2939 }
2940 
2941 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2942 			   u8 addr_type, struct sock *skip_sk)
2943 {
2944 	struct mgmt_ev_device_unpaired ev;
2945 
2946 	bacpy(&ev.addr.bdaddr, bdaddr);
2947 	ev.addr.type = addr_type;
2948 
2949 	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2950 			  skip_sk);
2951 }
2952 
2953 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2954 {
2955 	struct mgmt_pending_cmd *cmd = data;
2956 	struct mgmt_cp_unpair_device *cp = cmd->param;
2957 
2958 	if (!err)
2959 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2960 
2961 	cmd->cmd_complete(cmd, err);
2962 	mgmt_pending_free(cmd);
2963 }
2964 
2965 static int unpair_device_sync(struct hci_dev *hdev, void *data)
2966 {
2967 	struct mgmt_pending_cmd *cmd = data;
2968 	struct mgmt_cp_unpair_device *cp = cmd->param;
2969 	struct hci_conn *conn;
2970 
2971 	if (cp->addr.type == BDADDR_BREDR)
2972 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2973 					       &cp->addr.bdaddr);
2974 	else
2975 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2976 					       le_addr_type(cp->addr.type));
2977 
2978 	if (!conn)
2979 		return 0;
2980 
2981 	return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
2982 }
2983 
2984 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2985 			 u16 len)
2986 {
2987 	struct mgmt_cp_unpair_device *cp = data;
2988 	struct mgmt_rp_unpair_device rp;
2989 	struct hci_conn_params *params;
2990 	struct mgmt_pending_cmd *cmd;
2991 	struct hci_conn *conn;
2992 	u8 addr_type;
2993 	int err;
2994 
2995 	memset(&rp, 0, sizeof(rp));
2996 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2997 	rp.addr.type = cp->addr.type;
2998 
2999 	if (!bdaddr_type_is_valid(cp->addr.type))
3000 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3001 					 MGMT_STATUS_INVALID_PARAMS,
3002 					 &rp, sizeof(rp));
3003 
3004 	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
3005 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3006 					 MGMT_STATUS_INVALID_PARAMS,
3007 					 &rp, sizeof(rp));
3008 
3009 	hci_dev_lock(hdev);
3010 
3011 	if (!hdev_is_powered(hdev)) {
3012 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3013 					MGMT_STATUS_NOT_POWERED, &rp,
3014 					sizeof(rp));
3015 		goto unlock;
3016 	}
3017 
3018 	if (cp->addr.type == BDADDR_BREDR) {
3019 		/* If disconnection is requested, then look up the
3020 		 * connection. If the remote device is connected, it
3021 		 * will be later used to terminate the link.
3022 		 *
3023 		 * Setting it to NULL explicitly will cause no
3024 		 * termination of the link.
3025 		 */
3026 		if (cp->disconnect)
3027 			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3028 						       &cp->addr.bdaddr);
3029 		else
3030 			conn = NULL;
3031 
3032 		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
3033 		if (err < 0) {
3034 			err = mgmt_cmd_complete(sk, hdev->id,
3035 						MGMT_OP_UNPAIR_DEVICE,
3036 						MGMT_STATUS_NOT_PAIRED, &rp,
3037 						sizeof(rp));
3038 			goto unlock;
3039 		}
3040 
3041 		goto done;
3042 	}
3043 
3044 	/* LE address type */
3045 	addr_type = le_addr_type(cp->addr.type);
3046 
3047 	/* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3048 	err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
3049 	if (err < 0) {
3050 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3051 					MGMT_STATUS_NOT_PAIRED, &rp,
3052 					sizeof(rp));
3053 		goto unlock;
3054 	}
3055 
3056 	conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3057 	if (!conn) {
3058 		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3059 		goto done;
3060 	}
3061 
3062 
3063 	/* Defer clearing up the connection parameters until closing to
3064 	 * give a chance of keeping them if a repairing happens.
3065 	 */
3066 	set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3067 
3068 	/* Disable auto-connection parameters if present */
3069 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3070 	if (params) {
3071 		if (params->explicit_connect)
3072 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3073 		else
3074 			params->auto_connect = HCI_AUTO_CONN_DISABLED;
3075 	}
3076 
3077 	/* If disconnection is not requested, then clear the connection
3078 	 * variable so that the link is not terminated.
3079 	 */
3080 	if (!cp->disconnect)
3081 		conn = NULL;
3082 
3083 done:
3084 	/* If the connection variable is set, then termination of the
3085 	 * link is requested.
3086 	 */
3087 	if (!conn) {
3088 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3089 					&rp, sizeof(rp));
3090 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3091 		goto unlock;
3092 	}
3093 
3094 	cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3095 			       sizeof(*cp));
3096 	if (!cmd) {
3097 		err = -ENOMEM;
3098 		goto unlock;
3099 	}
3100 
3101 	cmd->cmd_complete = addr_cmd_complete;
3102 
3103 	err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3104 				 unpair_device_complete);
3105 	if (err < 0)
3106 		mgmt_pending_free(cmd);
3107 
3108 unlock:
3109 	hci_dev_unlock(hdev);
3110 	return err;
3111 }
3112 
3113 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3114 		      u16 len)
3115 {
3116 	struct mgmt_cp_disconnect *cp = data;
3117 	struct mgmt_rp_disconnect rp;
3118 	struct mgmt_pending_cmd *cmd;
3119 	struct hci_conn *conn;
3120 	int err;
3121 
3122 	bt_dev_dbg(hdev, "sock %p", sk);
3123 
3124 	memset(&rp, 0, sizeof(rp));
3125 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3126 	rp.addr.type = cp->addr.type;
3127 
3128 	if (!bdaddr_type_is_valid(cp->addr.type))
3129 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3130 					 MGMT_STATUS_INVALID_PARAMS,
3131 					 &rp, sizeof(rp));
3132 
3133 	hci_dev_lock(hdev);
3134 
3135 	if (!test_bit(HCI_UP, &hdev->flags)) {
3136 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3137 					MGMT_STATUS_NOT_POWERED, &rp,
3138 					sizeof(rp));
3139 		goto failed;
3140 	}
3141 
3142 	if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
3143 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3144 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
3145 		goto failed;
3146 	}
3147 
3148 	if (cp->addr.type == BDADDR_BREDR)
3149 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3150 					       &cp->addr.bdaddr);
3151 	else
3152 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3153 					       le_addr_type(cp->addr.type));
3154 
3155 	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3156 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3157 					MGMT_STATUS_NOT_CONNECTED, &rp,
3158 					sizeof(rp));
3159 		goto failed;
3160 	}
3161 
3162 	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3163 	if (!cmd) {
3164 		err = -ENOMEM;
3165 		goto failed;
3166 	}
3167 
3168 	cmd->cmd_complete = generic_cmd_complete;
3169 
3170 	err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3171 	if (err < 0)
3172 		mgmt_pending_remove(cmd);
3173 
3174 failed:
3175 	hci_dev_unlock(hdev);
3176 	return err;
3177 }
3178 
3179 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3180 {
3181 	switch (link_type) {
3182 	case LE_LINK:
3183 		switch (addr_type) {
3184 		case ADDR_LE_DEV_PUBLIC:
3185 			return BDADDR_LE_PUBLIC;
3186 
3187 		default:
3188 			/* Fallback to LE Random address type */
3189 			return BDADDR_LE_RANDOM;
3190 		}
3191 
3192 	default:
3193 		/* Fallback to BR/EDR type */
3194 		return BDADDR_BREDR;
3195 	}
3196 }
3197 
3198 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3199 			   u16 data_len)
3200 {
3201 	struct mgmt_rp_get_connections *rp;
3202 	struct hci_conn *c;
3203 	int err;
3204 	u16 i;
3205 
3206 	bt_dev_dbg(hdev, "sock %p", sk);
3207 
3208 	hci_dev_lock(hdev);
3209 
3210 	if (!hdev_is_powered(hdev)) {
3211 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3212 				      MGMT_STATUS_NOT_POWERED);
3213 		goto unlock;
3214 	}
3215 
3216 	i = 0;
3217 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
3218 		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3219 			i++;
3220 	}
3221 
3222 	rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3223 	if (!rp) {
3224 		err = -ENOMEM;
3225 		goto unlock;
3226 	}
3227 
3228 	i = 0;
3229 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
3230 		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3231 			continue;
3232 		bacpy(&rp->addr[i].bdaddr, &c->dst);
3233 		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3234 		if (c->type == SCO_LINK || c->type == ESCO_LINK)
3235 			continue;
3236 		i++;
3237 	}
3238 
3239 	rp->conn_count = cpu_to_le16(i);
3240 
3241 	/* Recalculate length in case of filtered SCO connections, etc */
3242 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3243 				struct_size(rp, addr, i));
3244 
3245 	kfree(rp);
3246 
3247 unlock:
3248 	hci_dev_unlock(hdev);
3249 	return err;
3250 }
3251 
3252 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3253 				   struct mgmt_cp_pin_code_neg_reply *cp)
3254 {
3255 	struct mgmt_pending_cmd *cmd;
3256 	int err;
3257 
3258 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3259 			       sizeof(*cp));
3260 	if (!cmd)
3261 		return -ENOMEM;
3262 
3263 	cmd->cmd_complete = addr_cmd_complete;
3264 
3265 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3266 			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3267 	if (err < 0)
3268 		mgmt_pending_remove(cmd);
3269 
3270 	return err;
3271 }
3272 
3273 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3274 			  u16 len)
3275 {
3276 	struct hci_conn *conn;
3277 	struct mgmt_cp_pin_code_reply *cp = data;
3278 	struct hci_cp_pin_code_reply reply;
3279 	struct mgmt_pending_cmd *cmd;
3280 	int err;
3281 
3282 	bt_dev_dbg(hdev, "sock %p", sk);
3283 
3284 	hci_dev_lock(hdev);
3285 
3286 	if (!hdev_is_powered(hdev)) {
3287 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3288 				      MGMT_STATUS_NOT_POWERED);
3289 		goto failed;
3290 	}
3291 
3292 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3293 	if (!conn) {
3294 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3295 				      MGMT_STATUS_NOT_CONNECTED);
3296 		goto failed;
3297 	}
3298 
3299 	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3300 		struct mgmt_cp_pin_code_neg_reply ncp;
3301 
3302 		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3303 
3304 		bt_dev_err(hdev, "PIN code is not 16 bytes long");
3305 
3306 		err = send_pin_code_neg_reply(sk, hdev, &ncp);
3307 		if (err >= 0)
3308 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3309 					      MGMT_STATUS_INVALID_PARAMS);
3310 
3311 		goto failed;
3312 	}
3313 
3314 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3315 	if (!cmd) {
3316 		err = -ENOMEM;
3317 		goto failed;
3318 	}
3319 
3320 	cmd->cmd_complete = addr_cmd_complete;
3321 
3322 	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3323 	reply.pin_len = cp->pin_len;
3324 	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3325 
3326 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3327 	if (err < 0)
3328 		mgmt_pending_remove(cmd);
3329 
3330 failed:
3331 	hci_dev_unlock(hdev);
3332 	return err;
3333 }
3334 
3335 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3336 			     u16 len)
3337 {
3338 	struct mgmt_cp_set_io_capability *cp = data;
3339 
3340 	bt_dev_dbg(hdev, "sock %p", sk);
3341 
3342 	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3343 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3344 				       MGMT_STATUS_INVALID_PARAMS);
3345 
3346 	hci_dev_lock(hdev);
3347 
3348 	hdev->io_capability = cp->io_capability;
3349 
3350 	bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3351 
3352 	hci_dev_unlock(hdev);
3353 
3354 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3355 				 NULL, 0);
3356 }
3357 
3358 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3359 {
3360 	struct hci_dev *hdev = conn->hdev;
3361 	struct mgmt_pending_cmd *cmd;
3362 
3363 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3364 		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3365 			continue;
3366 
3367 		if (cmd->user_data != conn)
3368 			continue;
3369 
3370 		return cmd;
3371 	}
3372 
3373 	return NULL;
3374 }
3375 
3376 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3377 {
3378 	struct mgmt_rp_pair_device rp;
3379 	struct hci_conn *conn = cmd->user_data;
3380 	int err;
3381 
3382 	bacpy(&rp.addr.bdaddr, &conn->dst);
3383 	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3384 
3385 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3386 				status, &rp, sizeof(rp));
3387 
3388 	/* So we don't get further callbacks for this connection */
3389 	conn->connect_cfm_cb = NULL;
3390 	conn->security_cfm_cb = NULL;
3391 	conn->disconn_cfm_cb = NULL;
3392 
3393 	hci_conn_drop(conn);
3394 
3395 	/* The device is paired so there is no need to remove
3396 	 * its connection parameters anymore.
3397 	 */
3398 	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3399 
3400 	hci_conn_put(conn);
3401 
3402 	return err;
3403 }
3404 
3405 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3406 {
3407 	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3408 	struct mgmt_pending_cmd *cmd;
3409 
3410 	cmd = find_pairing(conn);
3411 	if (cmd) {
3412 		cmd->cmd_complete(cmd, status);
3413 		mgmt_pending_remove(cmd);
3414 	}
3415 }
3416 
3417 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3418 {
3419 	struct mgmt_pending_cmd *cmd;
3420 
3421 	BT_DBG("status %u", status);
3422 
3423 	cmd = find_pairing(conn);
3424 	if (!cmd) {
3425 		BT_DBG("Unable to find a pending command");
3426 		return;
3427 	}
3428 
3429 	cmd->cmd_complete(cmd, mgmt_status(status));
3430 	mgmt_pending_remove(cmd);
3431 }
3432 
3433 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3434 {
3435 	struct mgmt_pending_cmd *cmd;
3436 
3437 	BT_DBG("status %u", status);
3438 
3439 	if (!status)
3440 		return;
3441 
3442 	cmd = find_pairing(conn);
3443 	if (!cmd) {
3444 		BT_DBG("Unable to find a pending command");
3445 		return;
3446 	}
3447 
3448 	cmd->cmd_complete(cmd, mgmt_status(status));
3449 	mgmt_pending_remove(cmd);
3450 }
3451 
3452 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3453 		       u16 len)
3454 {
3455 	struct mgmt_cp_pair_device *cp = data;
3456 	struct mgmt_rp_pair_device rp;
3457 	struct mgmt_pending_cmd *cmd;
3458 	u8 sec_level, auth_type;
3459 	struct hci_conn *conn;
3460 	int err;
3461 
3462 	bt_dev_dbg(hdev, "sock %p", sk);
3463 
3464 	memset(&rp, 0, sizeof(rp));
3465 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3466 	rp.addr.type = cp->addr.type;
3467 
3468 	if (!bdaddr_type_is_valid(cp->addr.type))
3469 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3470 					 MGMT_STATUS_INVALID_PARAMS,
3471 					 &rp, sizeof(rp));
3472 
3473 	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3474 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3475 					 MGMT_STATUS_INVALID_PARAMS,
3476 					 &rp, sizeof(rp));
3477 
3478 	hci_dev_lock(hdev);
3479 
3480 	if (!hdev_is_powered(hdev)) {
3481 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3482 					MGMT_STATUS_NOT_POWERED, &rp,
3483 					sizeof(rp));
3484 		goto unlock;
3485 	}
3486 
3487 	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3488 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3489 					MGMT_STATUS_ALREADY_PAIRED, &rp,
3490 					sizeof(rp));
3491 		goto unlock;
3492 	}
3493 
3494 	sec_level = BT_SECURITY_MEDIUM;
3495 	auth_type = HCI_AT_DEDICATED_BONDING;
3496 
3497 	if (cp->addr.type == BDADDR_BREDR) {
3498 		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3499 				       auth_type, CONN_REASON_PAIR_DEVICE);
3500 	} else {
3501 		u8 addr_type = le_addr_type(cp->addr.type);
3502 		struct hci_conn_params *p;
3503 
3504 		/* When pairing a new device, it is expected to remember
3505 		 * this device for future connections. Adding the connection
3506 		 * parameter information ahead of time allows tracking
3507 		 * of the peripheral preferred values and will speed up any
3508 		 * further connection establishment.
3509 		 *
3510 		 * If connection parameters already exist, then they
3511 		 * will be kept and this function does nothing.
3512 		 */
3513 		p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3514 
3515 		if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3516 			p->auto_connect = HCI_AUTO_CONN_DISABLED;
3517 
3518 		conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3519 					   sec_level, HCI_LE_CONN_TIMEOUT,
3520 					   CONN_REASON_PAIR_DEVICE);
3521 	}
3522 
3523 	if (IS_ERR(conn)) {
3524 		int status;
3525 
3526 		if (PTR_ERR(conn) == -EBUSY)
3527 			status = MGMT_STATUS_BUSY;
3528 		else if (PTR_ERR(conn) == -EOPNOTSUPP)
3529 			status = MGMT_STATUS_NOT_SUPPORTED;
3530 		else if (PTR_ERR(conn) == -ECONNREFUSED)
3531 			status = MGMT_STATUS_REJECTED;
3532 		else
3533 			status = MGMT_STATUS_CONNECT_FAILED;
3534 
3535 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3536 					status, &rp, sizeof(rp));
3537 		goto unlock;
3538 	}
3539 
3540 	if (conn->connect_cfm_cb) {
3541 		hci_conn_drop(conn);
3542 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3543 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
3544 		goto unlock;
3545 	}
3546 
3547 	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3548 	if (!cmd) {
3549 		err = -ENOMEM;
3550 		hci_conn_drop(conn);
3551 		goto unlock;
3552 	}
3553 
3554 	cmd->cmd_complete = pairing_complete;
3555 
3556 	/* For LE, just connecting isn't a proof that the pairing finished */
3557 	if (cp->addr.type == BDADDR_BREDR) {
3558 		conn->connect_cfm_cb = pairing_complete_cb;
3559 		conn->security_cfm_cb = pairing_complete_cb;
3560 		conn->disconn_cfm_cb = pairing_complete_cb;
3561 	} else {
3562 		conn->connect_cfm_cb = le_pairing_complete_cb;
3563 		conn->security_cfm_cb = le_pairing_complete_cb;
3564 		conn->disconn_cfm_cb = le_pairing_complete_cb;
3565 	}
3566 
3567 	conn->io_capability = cp->io_cap;
3568 	cmd->user_data = hci_conn_get(conn);
3569 
3570 	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3571 	    hci_conn_security(conn, sec_level, auth_type, true)) {
3572 		cmd->cmd_complete(cmd, 0);
3573 		mgmt_pending_remove(cmd);
3574 	}
3575 
3576 	err = 0;
3577 
3578 unlock:
3579 	hci_dev_unlock(hdev);
3580 	return err;
3581 }
3582 
3583 static int abort_conn_sync(struct hci_dev *hdev, void *data)
3584 {
3585 	struct hci_conn *conn;
3586 	u16 handle = PTR_ERR(data);
3587 
3588 	conn = hci_conn_hash_lookup_handle(hdev, handle);
3589 	if (!conn)
3590 		return 0;
3591 
3592 	return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
3593 }
3594 
3595 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3596 			      u16 len)
3597 {
3598 	struct mgmt_addr_info *addr = data;
3599 	struct mgmt_pending_cmd *cmd;
3600 	struct hci_conn *conn;
3601 	int err;
3602 
3603 	bt_dev_dbg(hdev, "sock %p", sk);
3604 
3605 	hci_dev_lock(hdev);
3606 
3607 	if (!hdev_is_powered(hdev)) {
3608 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3609 				      MGMT_STATUS_NOT_POWERED);
3610 		goto unlock;
3611 	}
3612 
3613 	cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3614 	if (!cmd) {
3615 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3616 				      MGMT_STATUS_INVALID_PARAMS);
3617 		goto unlock;
3618 	}
3619 
3620 	conn = cmd->user_data;
3621 
3622 	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3623 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3624 				      MGMT_STATUS_INVALID_PARAMS);
3625 		goto unlock;
3626 	}
3627 
3628 	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3629 	mgmt_pending_remove(cmd);
3630 
3631 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3632 				addr, sizeof(*addr));
3633 
3634 	/* Since user doesn't want to proceed with the connection, abort any
3635 	 * ongoing pairing and then terminate the link if it was created
3636 	 * because of the pair device action.
3637 	 */
3638 	if (addr->type == BDADDR_BREDR)
3639 		hci_remove_link_key(hdev, &addr->bdaddr);
3640 	else
3641 		smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3642 					      le_addr_type(addr->type));
3643 
3644 	if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3645 		hci_cmd_sync_queue(hdev, abort_conn_sync, ERR_PTR(conn->handle),
3646 				   NULL);
3647 
3648 unlock:
3649 	hci_dev_unlock(hdev);
3650 	return err;
3651 }
3652 
3653 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3654 			     struct mgmt_addr_info *addr, u16 mgmt_op,
3655 			     u16 hci_op, __le32 passkey)
3656 {
3657 	struct mgmt_pending_cmd *cmd;
3658 	struct hci_conn *conn;
3659 	int err;
3660 
3661 	hci_dev_lock(hdev);
3662 
3663 	if (!hdev_is_powered(hdev)) {
3664 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3665 					MGMT_STATUS_NOT_POWERED, addr,
3666 					sizeof(*addr));
3667 		goto done;
3668 	}
3669 
3670 	if (addr->type == BDADDR_BREDR)
3671 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3672 	else
3673 		conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3674 					       le_addr_type(addr->type));
3675 
3676 	if (!conn) {
3677 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3678 					MGMT_STATUS_NOT_CONNECTED, addr,
3679 					sizeof(*addr));
3680 		goto done;
3681 	}
3682 
3683 	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3684 		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3685 		if (!err)
3686 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3687 						MGMT_STATUS_SUCCESS, addr,
3688 						sizeof(*addr));
3689 		else
3690 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3691 						MGMT_STATUS_FAILED, addr,
3692 						sizeof(*addr));
3693 
3694 		goto done;
3695 	}
3696 
3697 	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3698 	if (!cmd) {
3699 		err = -ENOMEM;
3700 		goto done;
3701 	}
3702 
3703 	cmd->cmd_complete = addr_cmd_complete;
3704 
3705 	/* Continue with pairing via HCI */
3706 	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3707 		struct hci_cp_user_passkey_reply cp;
3708 
3709 		bacpy(&cp.bdaddr, &addr->bdaddr);
3710 		cp.passkey = passkey;
3711 		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3712 	} else
3713 		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3714 				   &addr->bdaddr);
3715 
3716 	if (err < 0)
3717 		mgmt_pending_remove(cmd);
3718 
3719 done:
3720 	hci_dev_unlock(hdev);
3721 	return err;
3722 }
3723 
3724 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3725 			      void *data, u16 len)
3726 {
3727 	struct mgmt_cp_pin_code_neg_reply *cp = data;
3728 
3729 	bt_dev_dbg(hdev, "sock %p", sk);
3730 
3731 	return user_pairing_resp(sk, hdev, &cp->addr,
3732 				MGMT_OP_PIN_CODE_NEG_REPLY,
3733 				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3734 }
3735 
3736 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3737 			      u16 len)
3738 {
3739 	struct mgmt_cp_user_confirm_reply *cp = data;
3740 
3741 	bt_dev_dbg(hdev, "sock %p", sk);
3742 
3743 	if (len != sizeof(*cp))
3744 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3745 				       MGMT_STATUS_INVALID_PARAMS);
3746 
3747 	return user_pairing_resp(sk, hdev, &cp->addr,
3748 				 MGMT_OP_USER_CONFIRM_REPLY,
3749 				 HCI_OP_USER_CONFIRM_REPLY, 0);
3750 }
3751 
3752 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3753 				  void *data, u16 len)
3754 {
3755 	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3756 
3757 	bt_dev_dbg(hdev, "sock %p", sk);
3758 
3759 	return user_pairing_resp(sk, hdev, &cp->addr,
3760 				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3761 				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3762 }
3763 
3764 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3765 			      u16 len)
3766 {
3767 	struct mgmt_cp_user_passkey_reply *cp = data;
3768 
3769 	bt_dev_dbg(hdev, "sock %p", sk);
3770 
3771 	return user_pairing_resp(sk, hdev, &cp->addr,
3772 				 MGMT_OP_USER_PASSKEY_REPLY,
3773 				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3774 }
3775 
3776 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3777 				  void *data, u16 len)
3778 {
3779 	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3780 
3781 	bt_dev_dbg(hdev, "sock %p", sk);
3782 
3783 	return user_pairing_resp(sk, hdev, &cp->addr,
3784 				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3785 				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3786 }
3787 
3788 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3789 {
3790 	struct adv_info *adv_instance;
3791 
3792 	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3793 	if (!adv_instance)
3794 		return 0;
3795 
3796 	/* stop if current instance doesn't need to be changed */
3797 	if (!(adv_instance->flags & flags))
3798 		return 0;
3799 
3800 	cancel_adv_timeout(hdev);
3801 
3802 	adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3803 	if (!adv_instance)
3804 		return 0;
3805 
3806 	hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3807 
3808 	return 0;
3809 }
3810 
3811 static int name_changed_sync(struct hci_dev *hdev, void *data)
3812 {
3813 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3814 }
3815 
3816 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3817 {
3818 	struct mgmt_pending_cmd *cmd = data;
3819 	struct mgmt_cp_set_local_name *cp = cmd->param;
3820 	u8 status = mgmt_status(err);
3821 
3822 	bt_dev_dbg(hdev, "err %d", err);
3823 
3824 	if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3825 		return;
3826 
3827 	if (status) {
3828 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3829 				status);
3830 	} else {
3831 		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3832 				  cp, sizeof(*cp));
3833 
3834 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3835 			hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3836 	}
3837 
3838 	mgmt_pending_remove(cmd);
3839 }
3840 
3841 static int set_name_sync(struct hci_dev *hdev, void *data)
3842 {
3843 	if (lmp_bredr_capable(hdev)) {
3844 		hci_update_name_sync(hdev);
3845 		hci_update_eir_sync(hdev);
3846 	}
3847 
3848 	/* The name is stored in the scan response data and so
3849 	 * no need to update the advertising data here.
3850 	 */
3851 	if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3852 		hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3853 
3854 	return 0;
3855 }
3856 
3857 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3858 			  u16 len)
3859 {
3860 	struct mgmt_cp_set_local_name *cp = data;
3861 	struct mgmt_pending_cmd *cmd;
3862 	int err;
3863 
3864 	bt_dev_dbg(hdev, "sock %p", sk);
3865 
3866 	hci_dev_lock(hdev);
3867 
3868 	/* If the old values are the same as the new ones just return a
3869 	 * direct command complete event.
3870 	 */
3871 	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3872 	    !memcmp(hdev->short_name, cp->short_name,
3873 		    sizeof(hdev->short_name))) {
3874 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3875 					data, len);
3876 		goto failed;
3877 	}
3878 
3879 	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3880 
3881 	if (!hdev_is_powered(hdev)) {
3882 		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3883 
3884 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3885 					data, len);
3886 		if (err < 0)
3887 			goto failed;
3888 
3889 		err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3890 					 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3891 		ext_info_changed(hdev, sk);
3892 
3893 		goto failed;
3894 	}
3895 
3896 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3897 	if (!cmd)
3898 		err = -ENOMEM;
3899 	else
3900 		err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3901 					 set_name_complete);
3902 
3903 	if (err < 0) {
3904 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3905 				      MGMT_STATUS_FAILED);
3906 
3907 		if (cmd)
3908 			mgmt_pending_remove(cmd);
3909 
3910 		goto failed;
3911 	}
3912 
3913 	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3914 
3915 failed:
3916 	hci_dev_unlock(hdev);
3917 	return err;
3918 }
3919 
3920 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3921 {
3922 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3923 }
3924 
3925 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3926 			  u16 len)
3927 {
3928 	struct mgmt_cp_set_appearance *cp = data;
3929 	u16 appearance;
3930 	int err;
3931 
3932 	bt_dev_dbg(hdev, "sock %p", sk);
3933 
3934 	if (!lmp_le_capable(hdev))
3935 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3936 				       MGMT_STATUS_NOT_SUPPORTED);
3937 
3938 	appearance = le16_to_cpu(cp->appearance);
3939 
3940 	hci_dev_lock(hdev);
3941 
3942 	if (hdev->appearance != appearance) {
3943 		hdev->appearance = appearance;
3944 
3945 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3946 			hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3947 					   NULL);
3948 
3949 		ext_info_changed(hdev, sk);
3950 	}
3951 
3952 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3953 				0);
3954 
3955 	hci_dev_unlock(hdev);
3956 
3957 	return err;
3958 }
3959 
3960 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3961 				 void *data, u16 len)
3962 {
3963 	struct mgmt_rp_get_phy_configuration rp;
3964 
3965 	bt_dev_dbg(hdev, "sock %p", sk);
3966 
3967 	hci_dev_lock(hdev);
3968 
3969 	memset(&rp, 0, sizeof(rp));
3970 
3971 	rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3972 	rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3973 	rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3974 
3975 	hci_dev_unlock(hdev);
3976 
3977 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3978 				 &rp, sizeof(rp));
3979 }
3980 
3981 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3982 {
3983 	struct mgmt_ev_phy_configuration_changed ev;
3984 
3985 	memset(&ev, 0, sizeof(ev));
3986 
3987 	ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3988 
3989 	return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3990 			  sizeof(ev), skip);
3991 }
3992 
3993 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3994 {
3995 	struct mgmt_pending_cmd *cmd = data;
3996 	struct sk_buff *skb = cmd->skb;
3997 	u8 status = mgmt_status(err);
3998 
3999 	if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
4000 		return;
4001 
4002 	if (!status) {
4003 		if (!skb)
4004 			status = MGMT_STATUS_FAILED;
4005 		else if (IS_ERR(skb))
4006 			status = mgmt_status(PTR_ERR(skb));
4007 		else
4008 			status = mgmt_status(skb->data[0]);
4009 	}
4010 
4011 	bt_dev_dbg(hdev, "status %d", status);
4012 
4013 	if (status) {
4014 		mgmt_cmd_status(cmd->sk, hdev->id,
4015 				MGMT_OP_SET_PHY_CONFIGURATION, status);
4016 	} else {
4017 		mgmt_cmd_complete(cmd->sk, hdev->id,
4018 				  MGMT_OP_SET_PHY_CONFIGURATION, 0,
4019 				  NULL, 0);
4020 
4021 		mgmt_phy_configuration_changed(hdev, cmd->sk);
4022 	}
4023 
4024 	if (skb && !IS_ERR(skb))
4025 		kfree_skb(skb);
4026 
4027 	mgmt_pending_remove(cmd);
4028 }
4029 
4030 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
4031 {
4032 	struct mgmt_pending_cmd *cmd = data;
4033 	struct mgmt_cp_set_phy_configuration *cp = cmd->param;
4034 	struct hci_cp_le_set_default_phy cp_phy;
4035 	u32 selected_phys = __le32_to_cpu(cp->selected_phys);
4036 
4037 	memset(&cp_phy, 0, sizeof(cp_phy));
4038 
4039 	if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
4040 		cp_phy.all_phys |= 0x01;
4041 
4042 	if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
4043 		cp_phy.all_phys |= 0x02;
4044 
4045 	if (selected_phys & MGMT_PHY_LE_1M_TX)
4046 		cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
4047 
4048 	if (selected_phys & MGMT_PHY_LE_2M_TX)
4049 		cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
4050 
4051 	if (selected_phys & MGMT_PHY_LE_CODED_TX)
4052 		cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
4053 
4054 	if (selected_phys & MGMT_PHY_LE_1M_RX)
4055 		cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4056 
4057 	if (selected_phys & MGMT_PHY_LE_2M_RX)
4058 		cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4059 
4060 	if (selected_phys & MGMT_PHY_LE_CODED_RX)
4061 		cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4062 
4063 	cmd->skb =  __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4064 				   sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4065 
4066 	return 0;
4067 }
4068 
4069 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4070 				 void *data, u16 len)
4071 {
4072 	struct mgmt_cp_set_phy_configuration *cp = data;
4073 	struct mgmt_pending_cmd *cmd;
4074 	u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4075 	u16 pkt_type = (HCI_DH1 | HCI_DM1);
4076 	bool changed = false;
4077 	int err;
4078 
4079 	bt_dev_dbg(hdev, "sock %p", sk);
4080 
4081 	configurable_phys = get_configurable_phys(hdev);
4082 	supported_phys = get_supported_phys(hdev);
4083 	selected_phys = __le32_to_cpu(cp->selected_phys);
4084 
4085 	if (selected_phys & ~supported_phys)
4086 		return mgmt_cmd_status(sk, hdev->id,
4087 				       MGMT_OP_SET_PHY_CONFIGURATION,
4088 				       MGMT_STATUS_INVALID_PARAMS);
4089 
4090 	unconfigure_phys = supported_phys & ~configurable_phys;
4091 
4092 	if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4093 		return mgmt_cmd_status(sk, hdev->id,
4094 				       MGMT_OP_SET_PHY_CONFIGURATION,
4095 				       MGMT_STATUS_INVALID_PARAMS);
4096 
4097 	if (selected_phys == get_selected_phys(hdev))
4098 		return mgmt_cmd_complete(sk, hdev->id,
4099 					 MGMT_OP_SET_PHY_CONFIGURATION,
4100 					 0, NULL, 0);
4101 
4102 	hci_dev_lock(hdev);
4103 
4104 	if (!hdev_is_powered(hdev)) {
4105 		err = mgmt_cmd_status(sk, hdev->id,
4106 				      MGMT_OP_SET_PHY_CONFIGURATION,
4107 				      MGMT_STATUS_REJECTED);
4108 		goto unlock;
4109 	}
4110 
4111 	if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4112 		err = mgmt_cmd_status(sk, hdev->id,
4113 				      MGMT_OP_SET_PHY_CONFIGURATION,
4114 				      MGMT_STATUS_BUSY);
4115 		goto unlock;
4116 	}
4117 
4118 	if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4119 		pkt_type |= (HCI_DH3 | HCI_DM3);
4120 	else
4121 		pkt_type &= ~(HCI_DH3 | HCI_DM3);
4122 
4123 	if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4124 		pkt_type |= (HCI_DH5 | HCI_DM5);
4125 	else
4126 		pkt_type &= ~(HCI_DH5 | HCI_DM5);
4127 
4128 	if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4129 		pkt_type &= ~HCI_2DH1;
4130 	else
4131 		pkt_type |= HCI_2DH1;
4132 
4133 	if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4134 		pkt_type &= ~HCI_2DH3;
4135 	else
4136 		pkt_type |= HCI_2DH3;
4137 
4138 	if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4139 		pkt_type &= ~HCI_2DH5;
4140 	else
4141 		pkt_type |= HCI_2DH5;
4142 
4143 	if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4144 		pkt_type &= ~HCI_3DH1;
4145 	else
4146 		pkt_type |= HCI_3DH1;
4147 
4148 	if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4149 		pkt_type &= ~HCI_3DH3;
4150 	else
4151 		pkt_type |= HCI_3DH3;
4152 
4153 	if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4154 		pkt_type &= ~HCI_3DH5;
4155 	else
4156 		pkt_type |= HCI_3DH5;
4157 
4158 	if (pkt_type != hdev->pkt_type) {
4159 		hdev->pkt_type = pkt_type;
4160 		changed = true;
4161 	}
4162 
4163 	if ((selected_phys & MGMT_PHY_LE_MASK) ==
4164 	    (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4165 		if (changed)
4166 			mgmt_phy_configuration_changed(hdev, sk);
4167 
4168 		err = mgmt_cmd_complete(sk, hdev->id,
4169 					MGMT_OP_SET_PHY_CONFIGURATION,
4170 					0, NULL, 0);
4171 
4172 		goto unlock;
4173 	}
4174 
4175 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4176 			       len);
4177 	if (!cmd)
4178 		err = -ENOMEM;
4179 	else
4180 		err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4181 					 set_default_phy_complete);
4182 
4183 	if (err < 0) {
4184 		err = mgmt_cmd_status(sk, hdev->id,
4185 				      MGMT_OP_SET_PHY_CONFIGURATION,
4186 				      MGMT_STATUS_FAILED);
4187 
4188 		if (cmd)
4189 			mgmt_pending_remove(cmd);
4190 	}
4191 
4192 unlock:
4193 	hci_dev_unlock(hdev);
4194 
4195 	return err;
4196 }
4197 
4198 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4199 			    u16 len)
4200 {
4201 	int err = MGMT_STATUS_SUCCESS;
4202 	struct mgmt_cp_set_blocked_keys *keys = data;
4203 	const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4204 				   sizeof(struct mgmt_blocked_key_info));
4205 	u16 key_count, expected_len;
4206 	int i;
4207 
4208 	bt_dev_dbg(hdev, "sock %p", sk);
4209 
4210 	key_count = __le16_to_cpu(keys->key_count);
4211 	if (key_count > max_key_count) {
4212 		bt_dev_err(hdev, "too big key_count value %u", key_count);
4213 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4214 				       MGMT_STATUS_INVALID_PARAMS);
4215 	}
4216 
4217 	expected_len = struct_size(keys, keys, key_count);
4218 	if (expected_len != len) {
4219 		bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4220 			   expected_len, len);
4221 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4222 				       MGMT_STATUS_INVALID_PARAMS);
4223 	}
4224 
4225 	hci_dev_lock(hdev);
4226 
4227 	hci_blocked_keys_clear(hdev);
4228 
4229 	for (i = 0; i < key_count; ++i) {
4230 		struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4231 
4232 		if (!b) {
4233 			err = MGMT_STATUS_NO_RESOURCES;
4234 			break;
4235 		}
4236 
4237 		b->type = keys->keys[i].type;
4238 		memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4239 		list_add_rcu(&b->list, &hdev->blocked_keys);
4240 	}
4241 	hci_dev_unlock(hdev);
4242 
4243 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4244 				err, NULL, 0);
4245 }
4246 
4247 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4248 			       void *data, u16 len)
4249 {
4250 	struct mgmt_mode *cp = data;
4251 	int err;
4252 	bool changed = false;
4253 
4254 	bt_dev_dbg(hdev, "sock %p", sk);
4255 
4256 	if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4257 		return mgmt_cmd_status(sk, hdev->id,
4258 				       MGMT_OP_SET_WIDEBAND_SPEECH,
4259 				       MGMT_STATUS_NOT_SUPPORTED);
4260 
4261 	if (cp->val != 0x00 && cp->val != 0x01)
4262 		return mgmt_cmd_status(sk, hdev->id,
4263 				       MGMT_OP_SET_WIDEBAND_SPEECH,
4264 				       MGMT_STATUS_INVALID_PARAMS);
4265 
4266 	hci_dev_lock(hdev);
4267 
4268 	if (hdev_is_powered(hdev) &&
4269 	    !!cp->val != hci_dev_test_flag(hdev,
4270 					   HCI_WIDEBAND_SPEECH_ENABLED)) {
4271 		err = mgmt_cmd_status(sk, hdev->id,
4272 				      MGMT_OP_SET_WIDEBAND_SPEECH,
4273 				      MGMT_STATUS_REJECTED);
4274 		goto unlock;
4275 	}
4276 
4277 	if (cp->val)
4278 		changed = !hci_dev_test_and_set_flag(hdev,
4279 						   HCI_WIDEBAND_SPEECH_ENABLED);
4280 	else
4281 		changed = hci_dev_test_and_clear_flag(hdev,
4282 						   HCI_WIDEBAND_SPEECH_ENABLED);
4283 
4284 	err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4285 	if (err < 0)
4286 		goto unlock;
4287 
4288 	if (changed)
4289 		err = new_settings(hdev, sk);
4290 
4291 unlock:
4292 	hci_dev_unlock(hdev);
4293 	return err;
4294 }
4295 
4296 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4297 			       void *data, u16 data_len)
4298 {
4299 	char buf[20];
4300 	struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4301 	u16 cap_len = 0;
4302 	u8 flags = 0;
4303 	u8 tx_power_range[2];
4304 
4305 	bt_dev_dbg(hdev, "sock %p", sk);
4306 
4307 	memset(&buf, 0, sizeof(buf));
4308 
4309 	hci_dev_lock(hdev);
4310 
4311 	/* When the Read Simple Pairing Options command is supported, then
4312 	 * the remote public key validation is supported.
4313 	 *
4314 	 * Alternatively, when Microsoft extensions are available, they can
4315 	 * indicate support for public key validation as well.
4316 	 */
4317 	if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4318 		flags |= 0x01;	/* Remote public key validation (BR/EDR) */
4319 
4320 	flags |= 0x02;		/* Remote public key validation (LE) */
4321 
4322 	/* When the Read Encryption Key Size command is supported, then the
4323 	 * encryption key size is enforced.
4324 	 */
4325 	if (hdev->commands[20] & 0x10)
4326 		flags |= 0x04;	/* Encryption key size enforcement (BR/EDR) */
4327 
4328 	flags |= 0x08;		/* Encryption key size enforcement (LE) */
4329 
4330 	cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4331 				  &flags, 1);
4332 
4333 	/* When the Read Simple Pairing Options command is supported, then
4334 	 * also max encryption key size information is provided.
4335 	 */
4336 	if (hdev->commands[41] & 0x08)
4337 		cap_len = eir_append_le16(rp->cap, cap_len,
4338 					  MGMT_CAP_MAX_ENC_KEY_SIZE,
4339 					  hdev->max_enc_key_size);
4340 
4341 	cap_len = eir_append_le16(rp->cap, cap_len,
4342 				  MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4343 				  SMP_MAX_ENC_KEY_SIZE);
4344 
4345 	/* Append the min/max LE tx power parameters if we were able to fetch
4346 	 * it from the controller
4347 	 */
4348 	if (hdev->commands[38] & 0x80) {
4349 		memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4350 		memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4351 		cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4352 					  tx_power_range, 2);
4353 	}
4354 
4355 	rp->cap_len = cpu_to_le16(cap_len);
4356 
4357 	hci_dev_unlock(hdev);
4358 
4359 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4360 				 rp, sizeof(*rp) + cap_len);
4361 }
4362 
4363 #ifdef CONFIG_BT_FEATURE_DEBUG
4364 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4365 static const u8 debug_uuid[16] = {
4366 	0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4367 	0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4368 };
4369 #endif
4370 
4371 /* 330859bc-7506-492d-9370-9a6f0614037f */
4372 static const u8 quality_report_uuid[16] = {
4373 	0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4374 	0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4375 };
4376 
4377 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4378 static const u8 offload_codecs_uuid[16] = {
4379 	0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4380 	0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4381 };
4382 
4383 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4384 static const u8 le_simultaneous_roles_uuid[16] = {
4385 	0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4386 	0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4387 };
4388 
4389 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
4390 static const u8 rpa_resolution_uuid[16] = {
4391 	0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
4392 	0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
4393 };
4394 
4395 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4396 static const u8 iso_socket_uuid[16] = {
4397 	0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4398 	0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4399 };
4400 
4401 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4402 static const u8 mgmt_mesh_uuid[16] = {
4403 	0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4404 	0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4405 };
4406 
4407 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4408 				  void *data, u16 data_len)
4409 {
4410 	struct mgmt_rp_read_exp_features_info *rp;
4411 	size_t len;
4412 	u16 idx = 0;
4413 	u32 flags;
4414 	int status;
4415 
4416 	bt_dev_dbg(hdev, "sock %p", sk);
4417 
4418 	/* Enough space for 7 features */
4419 	len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4420 	rp = kzalloc(len, GFP_KERNEL);
4421 	if (!rp)
4422 		return -ENOMEM;
4423 
4424 #ifdef CONFIG_BT_FEATURE_DEBUG
4425 	if (!hdev) {
4426 		flags = bt_dbg_get() ? BIT(0) : 0;
4427 
4428 		memcpy(rp->features[idx].uuid, debug_uuid, 16);
4429 		rp->features[idx].flags = cpu_to_le32(flags);
4430 		idx++;
4431 	}
4432 #endif
4433 
4434 	if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4435 		if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4436 			flags = BIT(0);
4437 		else
4438 			flags = 0;
4439 
4440 		memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4441 		rp->features[idx].flags = cpu_to_le32(flags);
4442 		idx++;
4443 	}
4444 
4445 	if (hdev && ll_privacy_capable(hdev)) {
4446 		if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
4447 			flags = BIT(0) | BIT(1);
4448 		else
4449 			flags = BIT(1);
4450 
4451 		memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
4452 		rp->features[idx].flags = cpu_to_le32(flags);
4453 		idx++;
4454 	}
4455 
4456 	if (hdev && (aosp_has_quality_report(hdev) ||
4457 		     hdev->set_quality_report)) {
4458 		if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4459 			flags = BIT(0);
4460 		else
4461 			flags = 0;
4462 
4463 		memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4464 		rp->features[idx].flags = cpu_to_le32(flags);
4465 		idx++;
4466 	}
4467 
4468 	if (hdev && hdev->get_data_path_id) {
4469 		if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4470 			flags = BIT(0);
4471 		else
4472 			flags = 0;
4473 
4474 		memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4475 		rp->features[idx].flags = cpu_to_le32(flags);
4476 		idx++;
4477 	}
4478 
4479 	if (IS_ENABLED(CONFIG_BT_LE)) {
4480 		flags = iso_enabled() ? BIT(0) : 0;
4481 		memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4482 		rp->features[idx].flags = cpu_to_le32(flags);
4483 		idx++;
4484 	}
4485 
4486 	if (hdev && lmp_le_capable(hdev)) {
4487 		if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4488 			flags = BIT(0);
4489 		else
4490 			flags = 0;
4491 
4492 		memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4493 		rp->features[idx].flags = cpu_to_le32(flags);
4494 		idx++;
4495 	}
4496 
4497 	rp->feature_count = cpu_to_le16(idx);
4498 
4499 	/* After reading the experimental features information, enable
4500 	 * the events to update client on any future change.
4501 	 */
4502 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4503 
4504 	status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4505 				   MGMT_OP_READ_EXP_FEATURES_INFO,
4506 				   0, rp, sizeof(*rp) + (20 * idx));
4507 
4508 	kfree(rp);
4509 	return status;
4510 }
4511 
4512 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4513 					  struct sock *skip)
4514 {
4515 	struct mgmt_ev_exp_feature_changed ev;
4516 
4517 	memset(&ev, 0, sizeof(ev));
4518 	memcpy(ev.uuid, rpa_resolution_uuid, 16);
4519 	ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4520 
4521 	// Do we need to be atomic with the conn_flags?
4522 	if (enabled && privacy_mode_capable(hdev))
4523 		hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4524 	else
4525 		hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4526 
4527 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4528 				  &ev, sizeof(ev),
4529 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4530 
4531 }
4532 
4533 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4534 			       bool enabled, struct sock *skip)
4535 {
4536 	struct mgmt_ev_exp_feature_changed ev;
4537 
4538 	memset(&ev, 0, sizeof(ev));
4539 	memcpy(ev.uuid, uuid, 16);
4540 	ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4541 
4542 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4543 				  &ev, sizeof(ev),
4544 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4545 }
4546 
4547 #define EXP_FEAT(_uuid, _set_func)	\
4548 {					\
4549 	.uuid = _uuid,			\
4550 	.set_func = _set_func,		\
4551 }
4552 
4553 /* The zero key uuid is special. Multiple exp features are set through it. */
4554 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4555 			     struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4556 {
4557 	struct mgmt_rp_set_exp_feature rp;
4558 
4559 	memset(rp.uuid, 0, 16);
4560 	rp.flags = cpu_to_le32(0);
4561 
4562 #ifdef CONFIG_BT_FEATURE_DEBUG
4563 	if (!hdev) {
4564 		bool changed = bt_dbg_get();
4565 
4566 		bt_dbg_set(false);
4567 
4568 		if (changed)
4569 			exp_feature_changed(NULL, ZERO_KEY, false, sk);
4570 	}
4571 #endif
4572 
4573 	if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4574 		bool changed;
4575 
4576 		changed = hci_dev_test_and_clear_flag(hdev,
4577 						      HCI_ENABLE_LL_PRIVACY);
4578 		if (changed)
4579 			exp_feature_changed(hdev, rpa_resolution_uuid, false,
4580 					    sk);
4581 	}
4582 
4583 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4584 
4585 	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4586 				 MGMT_OP_SET_EXP_FEATURE, 0,
4587 				 &rp, sizeof(rp));
4588 }
4589 
4590 #ifdef CONFIG_BT_FEATURE_DEBUG
4591 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4592 			  struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4593 {
4594 	struct mgmt_rp_set_exp_feature rp;
4595 
4596 	bool val, changed;
4597 	int err;
4598 
4599 	/* Command requires to use the non-controller index */
4600 	if (hdev)
4601 		return mgmt_cmd_status(sk, hdev->id,
4602 				       MGMT_OP_SET_EXP_FEATURE,
4603 				       MGMT_STATUS_INVALID_INDEX);
4604 
4605 	/* Parameters are limited to a single octet */
4606 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4607 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4608 				       MGMT_OP_SET_EXP_FEATURE,
4609 				       MGMT_STATUS_INVALID_PARAMS);
4610 
4611 	/* Only boolean on/off is supported */
4612 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4613 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4614 				       MGMT_OP_SET_EXP_FEATURE,
4615 				       MGMT_STATUS_INVALID_PARAMS);
4616 
4617 	val = !!cp->param[0];
4618 	changed = val ? !bt_dbg_get() : bt_dbg_get();
4619 	bt_dbg_set(val);
4620 
4621 	memcpy(rp.uuid, debug_uuid, 16);
4622 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4623 
4624 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4625 
4626 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4627 				MGMT_OP_SET_EXP_FEATURE, 0,
4628 				&rp, sizeof(rp));
4629 
4630 	if (changed)
4631 		exp_feature_changed(hdev, debug_uuid, val, sk);
4632 
4633 	return err;
4634 }
4635 #endif
4636 
4637 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4638 			      struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4639 {
4640 	struct mgmt_rp_set_exp_feature rp;
4641 	bool val, changed;
4642 	int err;
4643 
4644 	/* Command requires to use the controller index */
4645 	if (!hdev)
4646 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4647 				       MGMT_OP_SET_EXP_FEATURE,
4648 				       MGMT_STATUS_INVALID_INDEX);
4649 
4650 	/* Parameters are limited to a single octet */
4651 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4652 		return mgmt_cmd_status(sk, hdev->id,
4653 				       MGMT_OP_SET_EXP_FEATURE,
4654 				       MGMT_STATUS_INVALID_PARAMS);
4655 
4656 	/* Only boolean on/off is supported */
4657 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4658 		return mgmt_cmd_status(sk, hdev->id,
4659 				       MGMT_OP_SET_EXP_FEATURE,
4660 				       MGMT_STATUS_INVALID_PARAMS);
4661 
4662 	val = !!cp->param[0];
4663 
4664 	if (val) {
4665 		changed = !hci_dev_test_and_set_flag(hdev,
4666 						     HCI_MESH_EXPERIMENTAL);
4667 	} else {
4668 		hci_dev_clear_flag(hdev, HCI_MESH);
4669 		changed = hci_dev_test_and_clear_flag(hdev,
4670 						      HCI_MESH_EXPERIMENTAL);
4671 	}
4672 
4673 	memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4674 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4675 
4676 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4677 
4678 	err = mgmt_cmd_complete(sk, hdev->id,
4679 				MGMT_OP_SET_EXP_FEATURE, 0,
4680 				&rp, sizeof(rp));
4681 
4682 	if (changed)
4683 		exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4684 
4685 	return err;
4686 }
4687 
4688 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4689 				   struct mgmt_cp_set_exp_feature *cp,
4690 				   u16 data_len)
4691 {
4692 	struct mgmt_rp_set_exp_feature rp;
4693 	bool val, changed;
4694 	int err;
4695 	u32 flags;
4696 
4697 	/* Command requires to use the controller index */
4698 	if (!hdev)
4699 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4700 				       MGMT_OP_SET_EXP_FEATURE,
4701 				       MGMT_STATUS_INVALID_INDEX);
4702 
4703 	/* Changes can only be made when controller is powered down */
4704 	if (hdev_is_powered(hdev))
4705 		return mgmt_cmd_status(sk, hdev->id,
4706 				       MGMT_OP_SET_EXP_FEATURE,
4707 				       MGMT_STATUS_REJECTED);
4708 
4709 	/* Parameters are limited to a single octet */
4710 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4711 		return mgmt_cmd_status(sk, hdev->id,
4712 				       MGMT_OP_SET_EXP_FEATURE,
4713 				       MGMT_STATUS_INVALID_PARAMS);
4714 
4715 	/* Only boolean on/off is supported */
4716 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4717 		return mgmt_cmd_status(sk, hdev->id,
4718 				       MGMT_OP_SET_EXP_FEATURE,
4719 				       MGMT_STATUS_INVALID_PARAMS);
4720 
4721 	val = !!cp->param[0];
4722 
4723 	if (val) {
4724 		changed = !hci_dev_test_and_set_flag(hdev,
4725 						     HCI_ENABLE_LL_PRIVACY);
4726 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4727 
4728 		/* Enable LL privacy + supported settings changed */
4729 		flags = BIT(0) | BIT(1);
4730 	} else {
4731 		changed = hci_dev_test_and_clear_flag(hdev,
4732 						      HCI_ENABLE_LL_PRIVACY);
4733 
4734 		/* Disable LL privacy + supported settings changed */
4735 		flags = BIT(1);
4736 	}
4737 
4738 	memcpy(rp.uuid, rpa_resolution_uuid, 16);
4739 	rp.flags = cpu_to_le32(flags);
4740 
4741 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4742 
4743 	err = mgmt_cmd_complete(sk, hdev->id,
4744 				MGMT_OP_SET_EXP_FEATURE, 0,
4745 				&rp, sizeof(rp));
4746 
4747 	if (changed)
4748 		exp_ll_privacy_feature_changed(val, hdev, sk);
4749 
4750 	return err;
4751 }
4752 
4753 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4754 				   struct mgmt_cp_set_exp_feature *cp,
4755 				   u16 data_len)
4756 {
4757 	struct mgmt_rp_set_exp_feature rp;
4758 	bool val, changed;
4759 	int err;
4760 
4761 	/* Command requires to use a valid controller index */
4762 	if (!hdev)
4763 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4764 				       MGMT_OP_SET_EXP_FEATURE,
4765 				       MGMT_STATUS_INVALID_INDEX);
4766 
4767 	/* Parameters are limited to a single octet */
4768 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4769 		return mgmt_cmd_status(sk, hdev->id,
4770 				       MGMT_OP_SET_EXP_FEATURE,
4771 				       MGMT_STATUS_INVALID_PARAMS);
4772 
4773 	/* Only boolean on/off is supported */
4774 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4775 		return mgmt_cmd_status(sk, hdev->id,
4776 				       MGMT_OP_SET_EXP_FEATURE,
4777 				       MGMT_STATUS_INVALID_PARAMS);
4778 
4779 	hci_req_sync_lock(hdev);
4780 
4781 	val = !!cp->param[0];
4782 	changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4783 
4784 	if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4785 		err = mgmt_cmd_status(sk, hdev->id,
4786 				      MGMT_OP_SET_EXP_FEATURE,
4787 				      MGMT_STATUS_NOT_SUPPORTED);
4788 		goto unlock_quality_report;
4789 	}
4790 
4791 	if (changed) {
4792 		if (hdev->set_quality_report)
4793 			err = hdev->set_quality_report(hdev, val);
4794 		else
4795 			err = aosp_set_quality_report(hdev, val);
4796 
4797 		if (err) {
4798 			err = mgmt_cmd_status(sk, hdev->id,
4799 					      MGMT_OP_SET_EXP_FEATURE,
4800 					      MGMT_STATUS_FAILED);
4801 			goto unlock_quality_report;
4802 		}
4803 
4804 		if (val)
4805 			hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4806 		else
4807 			hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4808 	}
4809 
4810 	bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4811 
4812 	memcpy(rp.uuid, quality_report_uuid, 16);
4813 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4814 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4815 
4816 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4817 				&rp, sizeof(rp));
4818 
4819 	if (changed)
4820 		exp_feature_changed(hdev, quality_report_uuid, val, sk);
4821 
4822 unlock_quality_report:
4823 	hci_req_sync_unlock(hdev);
4824 	return err;
4825 }
4826 
4827 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4828 				  struct mgmt_cp_set_exp_feature *cp,
4829 				  u16 data_len)
4830 {
4831 	bool val, changed;
4832 	int err;
4833 	struct mgmt_rp_set_exp_feature rp;
4834 
4835 	/* Command requires to use a valid controller index */
4836 	if (!hdev)
4837 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4838 				       MGMT_OP_SET_EXP_FEATURE,
4839 				       MGMT_STATUS_INVALID_INDEX);
4840 
4841 	/* Parameters are limited to a single octet */
4842 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4843 		return mgmt_cmd_status(sk, hdev->id,
4844 				       MGMT_OP_SET_EXP_FEATURE,
4845 				       MGMT_STATUS_INVALID_PARAMS);
4846 
4847 	/* Only boolean on/off is supported */
4848 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4849 		return mgmt_cmd_status(sk, hdev->id,
4850 				       MGMT_OP_SET_EXP_FEATURE,
4851 				       MGMT_STATUS_INVALID_PARAMS);
4852 
4853 	val = !!cp->param[0];
4854 	changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4855 
4856 	if (!hdev->get_data_path_id) {
4857 		return mgmt_cmd_status(sk, hdev->id,
4858 				       MGMT_OP_SET_EXP_FEATURE,
4859 				       MGMT_STATUS_NOT_SUPPORTED);
4860 	}
4861 
4862 	if (changed) {
4863 		if (val)
4864 			hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4865 		else
4866 			hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4867 	}
4868 
4869 	bt_dev_info(hdev, "offload codecs enable %d changed %d",
4870 		    val, changed);
4871 
4872 	memcpy(rp.uuid, offload_codecs_uuid, 16);
4873 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4874 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4875 	err = mgmt_cmd_complete(sk, hdev->id,
4876 				MGMT_OP_SET_EXP_FEATURE, 0,
4877 				&rp, sizeof(rp));
4878 
4879 	if (changed)
4880 		exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4881 
4882 	return err;
4883 }
4884 
4885 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4886 					  struct mgmt_cp_set_exp_feature *cp,
4887 					  u16 data_len)
4888 {
4889 	bool val, changed;
4890 	int err;
4891 	struct mgmt_rp_set_exp_feature rp;
4892 
4893 	/* Command requires to use a valid controller index */
4894 	if (!hdev)
4895 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4896 				       MGMT_OP_SET_EXP_FEATURE,
4897 				       MGMT_STATUS_INVALID_INDEX);
4898 
4899 	/* Parameters are limited to a single octet */
4900 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4901 		return mgmt_cmd_status(sk, hdev->id,
4902 				       MGMT_OP_SET_EXP_FEATURE,
4903 				       MGMT_STATUS_INVALID_PARAMS);
4904 
4905 	/* Only boolean on/off is supported */
4906 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4907 		return mgmt_cmd_status(sk, hdev->id,
4908 				       MGMT_OP_SET_EXP_FEATURE,
4909 				       MGMT_STATUS_INVALID_PARAMS);
4910 
4911 	val = !!cp->param[0];
4912 	changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4913 
4914 	if (!hci_dev_le_state_simultaneous(hdev)) {
4915 		return mgmt_cmd_status(sk, hdev->id,
4916 				       MGMT_OP_SET_EXP_FEATURE,
4917 				       MGMT_STATUS_NOT_SUPPORTED);
4918 	}
4919 
4920 	if (changed) {
4921 		if (val)
4922 			hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4923 		else
4924 			hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4925 	}
4926 
4927 	bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4928 		    val, changed);
4929 
4930 	memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4931 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4932 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4933 	err = mgmt_cmd_complete(sk, hdev->id,
4934 				MGMT_OP_SET_EXP_FEATURE, 0,
4935 				&rp, sizeof(rp));
4936 
4937 	if (changed)
4938 		exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4939 
4940 	return err;
4941 }
4942 
4943 #ifdef CONFIG_BT_LE
4944 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4945 			       struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4946 {
4947 	struct mgmt_rp_set_exp_feature rp;
4948 	bool val, changed = false;
4949 	int err;
4950 
4951 	/* Command requires to use the non-controller index */
4952 	if (hdev)
4953 		return mgmt_cmd_status(sk, hdev->id,
4954 				       MGMT_OP_SET_EXP_FEATURE,
4955 				       MGMT_STATUS_INVALID_INDEX);
4956 
4957 	/* Parameters are limited to a single octet */
4958 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4959 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4960 				       MGMT_OP_SET_EXP_FEATURE,
4961 				       MGMT_STATUS_INVALID_PARAMS);
4962 
4963 	/* Only boolean on/off is supported */
4964 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4965 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4966 				       MGMT_OP_SET_EXP_FEATURE,
4967 				       MGMT_STATUS_INVALID_PARAMS);
4968 
4969 	val = cp->param[0] ? true : false;
4970 	if (val)
4971 		err = iso_init();
4972 	else
4973 		err = iso_exit();
4974 
4975 	if (!err)
4976 		changed = true;
4977 
4978 	memcpy(rp.uuid, iso_socket_uuid, 16);
4979 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4980 
4981 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4982 
4983 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4984 				MGMT_OP_SET_EXP_FEATURE, 0,
4985 				&rp, sizeof(rp));
4986 
4987 	if (changed)
4988 		exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4989 
4990 	return err;
4991 }
4992 #endif
4993 
4994 static const struct mgmt_exp_feature {
4995 	const u8 *uuid;
4996 	int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4997 			struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4998 } exp_features[] = {
4999 	EXP_FEAT(ZERO_KEY, set_zero_key_func),
5000 #ifdef CONFIG_BT_FEATURE_DEBUG
5001 	EXP_FEAT(debug_uuid, set_debug_func),
5002 #endif
5003 	EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
5004 	EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
5005 	EXP_FEAT(quality_report_uuid, set_quality_report_func),
5006 	EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
5007 	EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
5008 #ifdef CONFIG_BT_LE
5009 	EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
5010 #endif
5011 
5012 	/* end with a null feature */
5013 	EXP_FEAT(NULL, NULL)
5014 };
5015 
5016 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
5017 			   void *data, u16 data_len)
5018 {
5019 	struct mgmt_cp_set_exp_feature *cp = data;
5020 	size_t i = 0;
5021 
5022 	bt_dev_dbg(hdev, "sock %p", sk);
5023 
5024 	for (i = 0; exp_features[i].uuid; i++) {
5025 		if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
5026 			return exp_features[i].set_func(sk, hdev, cp, data_len);
5027 	}
5028 
5029 	return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
5030 			       MGMT_OP_SET_EXP_FEATURE,
5031 			       MGMT_STATUS_NOT_SUPPORTED);
5032 }
5033 
5034 static u32 get_params_flags(struct hci_dev *hdev,
5035 			    struct hci_conn_params *params)
5036 {
5037 	u32 flags = hdev->conn_flags;
5038 
5039 	/* Devices using RPAs can only be programmed in the acceptlist if
5040 	 * LL Privacy has been enable otherwise they cannot mark
5041 	 * HCI_CONN_FLAG_REMOTE_WAKEUP.
5042 	 */
5043 	if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
5044 	    hci_find_irk_by_addr(hdev, &params->addr, params->addr_type))
5045 		flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
5046 
5047 	return flags;
5048 }
5049 
5050 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5051 			    u16 data_len)
5052 {
5053 	struct mgmt_cp_get_device_flags *cp = data;
5054 	struct mgmt_rp_get_device_flags rp;
5055 	struct bdaddr_list_with_flags *br_params;
5056 	struct hci_conn_params *params;
5057 	u32 supported_flags;
5058 	u32 current_flags = 0;
5059 	u8 status = MGMT_STATUS_INVALID_PARAMS;
5060 
5061 	bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
5062 		   &cp->addr.bdaddr, cp->addr.type);
5063 
5064 	hci_dev_lock(hdev);
5065 
5066 	supported_flags = hdev->conn_flags;
5067 
5068 	memset(&rp, 0, sizeof(rp));
5069 
5070 	if (cp->addr.type == BDADDR_BREDR) {
5071 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5072 							      &cp->addr.bdaddr,
5073 							      cp->addr.type);
5074 		if (!br_params)
5075 			goto done;
5076 
5077 		current_flags = br_params->flags;
5078 	} else {
5079 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5080 						le_addr_type(cp->addr.type));
5081 		if (!params)
5082 			goto done;
5083 
5084 		supported_flags = get_params_flags(hdev, params);
5085 		current_flags = params->flags;
5086 	}
5087 
5088 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5089 	rp.addr.type = cp->addr.type;
5090 	rp.supported_flags = cpu_to_le32(supported_flags);
5091 	rp.current_flags = cpu_to_le32(current_flags);
5092 
5093 	status = MGMT_STATUS_SUCCESS;
5094 
5095 done:
5096 	hci_dev_unlock(hdev);
5097 
5098 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5099 				&rp, sizeof(rp));
5100 }
5101 
5102 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5103 				 bdaddr_t *bdaddr, u8 bdaddr_type,
5104 				 u32 supported_flags, u32 current_flags)
5105 {
5106 	struct mgmt_ev_device_flags_changed ev;
5107 
5108 	bacpy(&ev.addr.bdaddr, bdaddr);
5109 	ev.addr.type = bdaddr_type;
5110 	ev.supported_flags = cpu_to_le32(supported_flags);
5111 	ev.current_flags = cpu_to_le32(current_flags);
5112 
5113 	mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5114 }
5115 
5116 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5117 			    u16 len)
5118 {
5119 	struct mgmt_cp_set_device_flags *cp = data;
5120 	struct bdaddr_list_with_flags *br_params;
5121 	struct hci_conn_params *params;
5122 	u8 status = MGMT_STATUS_INVALID_PARAMS;
5123 	u32 supported_flags;
5124 	u32 current_flags = __le32_to_cpu(cp->current_flags);
5125 
5126 	bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5127 		   &cp->addr.bdaddr, cp->addr.type, current_flags);
5128 
5129 	// We should take hci_dev_lock() early, I think.. conn_flags can change
5130 	supported_flags = hdev->conn_flags;
5131 
5132 	if ((supported_flags | current_flags) != supported_flags) {
5133 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5134 			    current_flags, supported_flags);
5135 		goto done;
5136 	}
5137 
5138 	hci_dev_lock(hdev);
5139 
5140 	if (cp->addr.type == BDADDR_BREDR) {
5141 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5142 							      &cp->addr.bdaddr,
5143 							      cp->addr.type);
5144 
5145 		if (br_params) {
5146 			br_params->flags = current_flags;
5147 			status = MGMT_STATUS_SUCCESS;
5148 		} else {
5149 			bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5150 				    &cp->addr.bdaddr, cp->addr.type);
5151 		}
5152 
5153 		goto unlock;
5154 	}
5155 
5156 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5157 					le_addr_type(cp->addr.type));
5158 	if (!params) {
5159 		bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5160 			    &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5161 		goto unlock;
5162 	}
5163 
5164 	supported_flags = get_params_flags(hdev, params);
5165 
5166 	if ((supported_flags | current_flags) != supported_flags) {
5167 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5168 			    current_flags, supported_flags);
5169 		goto unlock;
5170 	}
5171 
5172 	WRITE_ONCE(params->flags, current_flags);
5173 	status = MGMT_STATUS_SUCCESS;
5174 
5175 	/* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5176 	 * has been set.
5177 	 */
5178 	if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5179 		hci_update_passive_scan(hdev);
5180 
5181 unlock:
5182 	hci_dev_unlock(hdev);
5183 
5184 done:
5185 	if (status == MGMT_STATUS_SUCCESS)
5186 		device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5187 				     supported_flags, current_flags);
5188 
5189 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5190 				 &cp->addr, sizeof(cp->addr));
5191 }
5192 
5193 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5194 				   u16 handle)
5195 {
5196 	struct mgmt_ev_adv_monitor_added ev;
5197 
5198 	ev.monitor_handle = cpu_to_le16(handle);
5199 
5200 	mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5201 }
5202 
5203 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
5204 {
5205 	struct mgmt_ev_adv_monitor_removed ev;
5206 	struct mgmt_pending_cmd *cmd;
5207 	struct sock *sk_skip = NULL;
5208 	struct mgmt_cp_remove_adv_monitor *cp;
5209 
5210 	cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
5211 	if (cmd) {
5212 		cp = cmd->param;
5213 
5214 		if (cp->monitor_handle)
5215 			sk_skip = cmd->sk;
5216 	}
5217 
5218 	ev.monitor_handle = cpu_to_le16(handle);
5219 
5220 	mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
5221 }
5222 
5223 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5224 				 void *data, u16 len)
5225 {
5226 	struct adv_monitor *monitor = NULL;
5227 	struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5228 	int handle, err;
5229 	size_t rp_size = 0;
5230 	__u32 supported = 0;
5231 	__u32 enabled = 0;
5232 	__u16 num_handles = 0;
5233 	__u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5234 
5235 	BT_DBG("request for %s", hdev->name);
5236 
5237 	hci_dev_lock(hdev);
5238 
5239 	if (msft_monitor_supported(hdev))
5240 		supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5241 
5242 	idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5243 		handles[num_handles++] = monitor->handle;
5244 
5245 	hci_dev_unlock(hdev);
5246 
5247 	rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5248 	rp = kmalloc(rp_size, GFP_KERNEL);
5249 	if (!rp)
5250 		return -ENOMEM;
5251 
5252 	/* All supported features are currently enabled */
5253 	enabled = supported;
5254 
5255 	rp->supported_features = cpu_to_le32(supported);
5256 	rp->enabled_features = cpu_to_le32(enabled);
5257 	rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5258 	rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5259 	rp->num_handles = cpu_to_le16(num_handles);
5260 	if (num_handles)
5261 		memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5262 
5263 	err = mgmt_cmd_complete(sk, hdev->id,
5264 				MGMT_OP_READ_ADV_MONITOR_FEATURES,
5265 				MGMT_STATUS_SUCCESS, rp, rp_size);
5266 
5267 	kfree(rp);
5268 
5269 	return err;
5270 }
5271 
5272 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5273 						   void *data, int status)
5274 {
5275 	struct mgmt_rp_add_adv_patterns_monitor rp;
5276 	struct mgmt_pending_cmd *cmd = data;
5277 	struct adv_monitor *monitor = cmd->user_data;
5278 
5279 	hci_dev_lock(hdev);
5280 
5281 	rp.monitor_handle = cpu_to_le16(monitor->handle);
5282 
5283 	if (!status) {
5284 		mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5285 		hdev->adv_monitors_cnt++;
5286 		if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5287 			monitor->state = ADV_MONITOR_STATE_REGISTERED;
5288 		hci_update_passive_scan(hdev);
5289 	}
5290 
5291 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5292 			  mgmt_status(status), &rp, sizeof(rp));
5293 	mgmt_pending_remove(cmd);
5294 
5295 	hci_dev_unlock(hdev);
5296 	bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5297 		   rp.monitor_handle, status);
5298 }
5299 
5300 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5301 {
5302 	struct mgmt_pending_cmd *cmd = data;
5303 	struct adv_monitor *monitor = cmd->user_data;
5304 
5305 	return hci_add_adv_monitor(hdev, monitor);
5306 }
5307 
5308 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5309 				      struct adv_monitor *m, u8 status,
5310 				      void *data, u16 len, u16 op)
5311 {
5312 	struct mgmt_pending_cmd *cmd;
5313 	int err;
5314 
5315 	hci_dev_lock(hdev);
5316 
5317 	if (status)
5318 		goto unlock;
5319 
5320 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
5321 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5322 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
5323 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
5324 		status = MGMT_STATUS_BUSY;
5325 		goto unlock;
5326 	}
5327 
5328 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
5329 	if (!cmd) {
5330 		status = MGMT_STATUS_NO_RESOURCES;
5331 		goto unlock;
5332 	}
5333 
5334 	cmd->user_data = m;
5335 	err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5336 				 mgmt_add_adv_patterns_monitor_complete);
5337 	if (err) {
5338 		if (err == -ENOMEM)
5339 			status = MGMT_STATUS_NO_RESOURCES;
5340 		else
5341 			status = MGMT_STATUS_FAILED;
5342 
5343 		goto unlock;
5344 	}
5345 
5346 	hci_dev_unlock(hdev);
5347 
5348 	return 0;
5349 
5350 unlock:
5351 	hci_free_adv_monitor(hdev, m);
5352 	hci_dev_unlock(hdev);
5353 	return mgmt_cmd_status(sk, hdev->id, op, status);
5354 }
5355 
5356 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5357 				   struct mgmt_adv_rssi_thresholds *rssi)
5358 {
5359 	if (rssi) {
5360 		m->rssi.low_threshold = rssi->low_threshold;
5361 		m->rssi.low_threshold_timeout =
5362 		    __le16_to_cpu(rssi->low_threshold_timeout);
5363 		m->rssi.high_threshold = rssi->high_threshold;
5364 		m->rssi.high_threshold_timeout =
5365 		    __le16_to_cpu(rssi->high_threshold_timeout);
5366 		m->rssi.sampling_period = rssi->sampling_period;
5367 	} else {
5368 		/* Default values. These numbers are the least constricting
5369 		 * parameters for MSFT API to work, so it behaves as if there
5370 		 * are no rssi parameter to consider. May need to be changed
5371 		 * if other API are to be supported.
5372 		 */
5373 		m->rssi.low_threshold = -127;
5374 		m->rssi.low_threshold_timeout = 60;
5375 		m->rssi.high_threshold = -127;
5376 		m->rssi.high_threshold_timeout = 0;
5377 		m->rssi.sampling_period = 0;
5378 	}
5379 }
5380 
5381 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5382 				    struct mgmt_adv_pattern *patterns)
5383 {
5384 	u8 offset = 0, length = 0;
5385 	struct adv_pattern *p = NULL;
5386 	int i;
5387 
5388 	for (i = 0; i < pattern_count; i++) {
5389 		offset = patterns[i].offset;
5390 		length = patterns[i].length;
5391 		if (offset >= HCI_MAX_AD_LENGTH ||
5392 		    length > HCI_MAX_AD_LENGTH ||
5393 		    (offset + length) > HCI_MAX_AD_LENGTH)
5394 			return MGMT_STATUS_INVALID_PARAMS;
5395 
5396 		p = kmalloc(sizeof(*p), GFP_KERNEL);
5397 		if (!p)
5398 			return MGMT_STATUS_NO_RESOURCES;
5399 
5400 		p->ad_type = patterns[i].ad_type;
5401 		p->offset = patterns[i].offset;
5402 		p->length = patterns[i].length;
5403 		memcpy(p->value, patterns[i].value, p->length);
5404 
5405 		INIT_LIST_HEAD(&p->list);
5406 		list_add(&p->list, &m->patterns);
5407 	}
5408 
5409 	return MGMT_STATUS_SUCCESS;
5410 }
5411 
5412 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5413 				    void *data, u16 len)
5414 {
5415 	struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5416 	struct adv_monitor *m = NULL;
5417 	u8 status = MGMT_STATUS_SUCCESS;
5418 	size_t expected_size = sizeof(*cp);
5419 
5420 	BT_DBG("request for %s", hdev->name);
5421 
5422 	if (len <= sizeof(*cp)) {
5423 		status = MGMT_STATUS_INVALID_PARAMS;
5424 		goto done;
5425 	}
5426 
5427 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5428 	if (len != expected_size) {
5429 		status = MGMT_STATUS_INVALID_PARAMS;
5430 		goto done;
5431 	}
5432 
5433 	m = kzalloc(sizeof(*m), GFP_KERNEL);
5434 	if (!m) {
5435 		status = MGMT_STATUS_NO_RESOURCES;
5436 		goto done;
5437 	}
5438 
5439 	INIT_LIST_HEAD(&m->patterns);
5440 
5441 	parse_adv_monitor_rssi(m, NULL);
5442 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5443 
5444 done:
5445 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5446 					  MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5447 }
5448 
5449 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5450 					 void *data, u16 len)
5451 {
5452 	struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5453 	struct adv_monitor *m = NULL;
5454 	u8 status = MGMT_STATUS_SUCCESS;
5455 	size_t expected_size = sizeof(*cp);
5456 
5457 	BT_DBG("request for %s", hdev->name);
5458 
5459 	if (len <= sizeof(*cp)) {
5460 		status = MGMT_STATUS_INVALID_PARAMS;
5461 		goto done;
5462 	}
5463 
5464 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5465 	if (len != expected_size) {
5466 		status = MGMT_STATUS_INVALID_PARAMS;
5467 		goto done;
5468 	}
5469 
5470 	m = kzalloc(sizeof(*m), GFP_KERNEL);
5471 	if (!m) {
5472 		status = MGMT_STATUS_NO_RESOURCES;
5473 		goto done;
5474 	}
5475 
5476 	INIT_LIST_HEAD(&m->patterns);
5477 
5478 	parse_adv_monitor_rssi(m, &cp->rssi);
5479 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5480 
5481 done:
5482 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5483 					 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5484 }
5485 
5486 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5487 					     void *data, int status)
5488 {
5489 	struct mgmt_rp_remove_adv_monitor rp;
5490 	struct mgmt_pending_cmd *cmd = data;
5491 	struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5492 
5493 	hci_dev_lock(hdev);
5494 
5495 	rp.monitor_handle = cp->monitor_handle;
5496 
5497 	if (!status)
5498 		hci_update_passive_scan(hdev);
5499 
5500 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5501 			  mgmt_status(status), &rp, sizeof(rp));
5502 	mgmt_pending_remove(cmd);
5503 
5504 	hci_dev_unlock(hdev);
5505 	bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5506 		   rp.monitor_handle, status);
5507 }
5508 
5509 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5510 {
5511 	struct mgmt_pending_cmd *cmd = data;
5512 	struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5513 	u16 handle = __le16_to_cpu(cp->monitor_handle);
5514 
5515 	if (!handle)
5516 		return hci_remove_all_adv_monitor(hdev);
5517 
5518 	return hci_remove_single_adv_monitor(hdev, handle);
5519 }
5520 
5521 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5522 			      void *data, u16 len)
5523 {
5524 	struct mgmt_pending_cmd *cmd;
5525 	int err, status;
5526 
5527 	hci_dev_lock(hdev);
5528 
5529 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
5530 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5531 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5532 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5533 		status = MGMT_STATUS_BUSY;
5534 		goto unlock;
5535 	}
5536 
5537 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5538 	if (!cmd) {
5539 		status = MGMT_STATUS_NO_RESOURCES;
5540 		goto unlock;
5541 	}
5542 
5543 	err = hci_cmd_sync_queue(hdev, mgmt_remove_adv_monitor_sync, cmd,
5544 				 mgmt_remove_adv_monitor_complete);
5545 
5546 	if (err) {
5547 		mgmt_pending_remove(cmd);
5548 
5549 		if (err == -ENOMEM)
5550 			status = MGMT_STATUS_NO_RESOURCES;
5551 		else
5552 			status = MGMT_STATUS_FAILED;
5553 
5554 		goto unlock;
5555 	}
5556 
5557 	hci_dev_unlock(hdev);
5558 
5559 	return 0;
5560 
5561 unlock:
5562 	hci_dev_unlock(hdev);
5563 	return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5564 			       status);
5565 }
5566 
5567 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5568 {
5569 	struct mgmt_rp_read_local_oob_data mgmt_rp;
5570 	size_t rp_size = sizeof(mgmt_rp);
5571 	struct mgmt_pending_cmd *cmd = data;
5572 	struct sk_buff *skb = cmd->skb;
5573 	u8 status = mgmt_status(err);
5574 
5575 	if (!status) {
5576 		if (!skb)
5577 			status = MGMT_STATUS_FAILED;
5578 		else if (IS_ERR(skb))
5579 			status = mgmt_status(PTR_ERR(skb));
5580 		else
5581 			status = mgmt_status(skb->data[0]);
5582 	}
5583 
5584 	bt_dev_dbg(hdev, "status %d", status);
5585 
5586 	if (status) {
5587 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5588 		goto remove;
5589 	}
5590 
5591 	memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5592 
5593 	if (!bredr_sc_enabled(hdev)) {
5594 		struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5595 
5596 		if (skb->len < sizeof(*rp)) {
5597 			mgmt_cmd_status(cmd->sk, hdev->id,
5598 					MGMT_OP_READ_LOCAL_OOB_DATA,
5599 					MGMT_STATUS_FAILED);
5600 			goto remove;
5601 		}
5602 
5603 		memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5604 		memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5605 
5606 		rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5607 	} else {
5608 		struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5609 
5610 		if (skb->len < sizeof(*rp)) {
5611 			mgmt_cmd_status(cmd->sk, hdev->id,
5612 					MGMT_OP_READ_LOCAL_OOB_DATA,
5613 					MGMT_STATUS_FAILED);
5614 			goto remove;
5615 		}
5616 
5617 		memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5618 		memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5619 
5620 		memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5621 		memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5622 	}
5623 
5624 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5625 			  MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5626 
5627 remove:
5628 	if (skb && !IS_ERR(skb))
5629 		kfree_skb(skb);
5630 
5631 	mgmt_pending_free(cmd);
5632 }
5633 
5634 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5635 {
5636 	struct mgmt_pending_cmd *cmd = data;
5637 
5638 	if (bredr_sc_enabled(hdev))
5639 		cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5640 	else
5641 		cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5642 
5643 	if (IS_ERR(cmd->skb))
5644 		return PTR_ERR(cmd->skb);
5645 	else
5646 		return 0;
5647 }
5648 
5649 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5650 			       void *data, u16 data_len)
5651 {
5652 	struct mgmt_pending_cmd *cmd;
5653 	int err;
5654 
5655 	bt_dev_dbg(hdev, "sock %p", sk);
5656 
5657 	hci_dev_lock(hdev);
5658 
5659 	if (!hdev_is_powered(hdev)) {
5660 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5661 				      MGMT_STATUS_NOT_POWERED);
5662 		goto unlock;
5663 	}
5664 
5665 	if (!lmp_ssp_capable(hdev)) {
5666 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5667 				      MGMT_STATUS_NOT_SUPPORTED);
5668 		goto unlock;
5669 	}
5670 
5671 	cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5672 	if (!cmd)
5673 		err = -ENOMEM;
5674 	else
5675 		err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5676 					 read_local_oob_data_complete);
5677 
5678 	if (err < 0) {
5679 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5680 				      MGMT_STATUS_FAILED);
5681 
5682 		if (cmd)
5683 			mgmt_pending_free(cmd);
5684 	}
5685 
5686 unlock:
5687 	hci_dev_unlock(hdev);
5688 	return err;
5689 }
5690 
5691 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5692 			       void *data, u16 len)
5693 {
5694 	struct mgmt_addr_info *addr = data;
5695 	int err;
5696 
5697 	bt_dev_dbg(hdev, "sock %p", sk);
5698 
5699 	if (!bdaddr_type_is_valid(addr->type))
5700 		return mgmt_cmd_complete(sk, hdev->id,
5701 					 MGMT_OP_ADD_REMOTE_OOB_DATA,
5702 					 MGMT_STATUS_INVALID_PARAMS,
5703 					 addr, sizeof(*addr));
5704 
5705 	hci_dev_lock(hdev);
5706 
5707 	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5708 		struct mgmt_cp_add_remote_oob_data *cp = data;
5709 		u8 status;
5710 
5711 		if (cp->addr.type != BDADDR_BREDR) {
5712 			err = mgmt_cmd_complete(sk, hdev->id,
5713 						MGMT_OP_ADD_REMOTE_OOB_DATA,
5714 						MGMT_STATUS_INVALID_PARAMS,
5715 						&cp->addr, sizeof(cp->addr));
5716 			goto unlock;
5717 		}
5718 
5719 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5720 					      cp->addr.type, cp->hash,
5721 					      cp->rand, NULL, NULL);
5722 		if (err < 0)
5723 			status = MGMT_STATUS_FAILED;
5724 		else
5725 			status = MGMT_STATUS_SUCCESS;
5726 
5727 		err = mgmt_cmd_complete(sk, hdev->id,
5728 					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5729 					&cp->addr, sizeof(cp->addr));
5730 	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5731 		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5732 		u8 *rand192, *hash192, *rand256, *hash256;
5733 		u8 status;
5734 
5735 		if (bdaddr_type_is_le(cp->addr.type)) {
5736 			/* Enforce zero-valued 192-bit parameters as
5737 			 * long as legacy SMP OOB isn't implemented.
5738 			 */
5739 			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5740 			    memcmp(cp->hash192, ZERO_KEY, 16)) {
5741 				err = mgmt_cmd_complete(sk, hdev->id,
5742 							MGMT_OP_ADD_REMOTE_OOB_DATA,
5743 							MGMT_STATUS_INVALID_PARAMS,
5744 							addr, sizeof(*addr));
5745 				goto unlock;
5746 			}
5747 
5748 			rand192 = NULL;
5749 			hash192 = NULL;
5750 		} else {
5751 			/* In case one of the P-192 values is set to zero,
5752 			 * then just disable OOB data for P-192.
5753 			 */
5754 			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5755 			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
5756 				rand192 = NULL;
5757 				hash192 = NULL;
5758 			} else {
5759 				rand192 = cp->rand192;
5760 				hash192 = cp->hash192;
5761 			}
5762 		}
5763 
5764 		/* In case one of the P-256 values is set to zero, then just
5765 		 * disable OOB data for P-256.
5766 		 */
5767 		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5768 		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
5769 			rand256 = NULL;
5770 			hash256 = NULL;
5771 		} else {
5772 			rand256 = cp->rand256;
5773 			hash256 = cp->hash256;
5774 		}
5775 
5776 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5777 					      cp->addr.type, hash192, rand192,
5778 					      hash256, rand256);
5779 		if (err < 0)
5780 			status = MGMT_STATUS_FAILED;
5781 		else
5782 			status = MGMT_STATUS_SUCCESS;
5783 
5784 		err = mgmt_cmd_complete(sk, hdev->id,
5785 					MGMT_OP_ADD_REMOTE_OOB_DATA,
5786 					status, &cp->addr, sizeof(cp->addr));
5787 	} else {
5788 		bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5789 			   len);
5790 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5791 				      MGMT_STATUS_INVALID_PARAMS);
5792 	}
5793 
5794 unlock:
5795 	hci_dev_unlock(hdev);
5796 	return err;
5797 }
5798 
5799 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5800 				  void *data, u16 len)
5801 {
5802 	struct mgmt_cp_remove_remote_oob_data *cp = data;
5803 	u8 status;
5804 	int err;
5805 
5806 	bt_dev_dbg(hdev, "sock %p", sk);
5807 
5808 	if (cp->addr.type != BDADDR_BREDR)
5809 		return mgmt_cmd_complete(sk, hdev->id,
5810 					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5811 					 MGMT_STATUS_INVALID_PARAMS,
5812 					 &cp->addr, sizeof(cp->addr));
5813 
5814 	hci_dev_lock(hdev);
5815 
5816 	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5817 		hci_remote_oob_data_clear(hdev);
5818 		status = MGMT_STATUS_SUCCESS;
5819 		goto done;
5820 	}
5821 
5822 	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5823 	if (err < 0)
5824 		status = MGMT_STATUS_INVALID_PARAMS;
5825 	else
5826 		status = MGMT_STATUS_SUCCESS;
5827 
5828 done:
5829 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5830 				status, &cp->addr, sizeof(cp->addr));
5831 
5832 	hci_dev_unlock(hdev);
5833 	return err;
5834 }
5835 
5836 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5837 {
5838 	struct mgmt_pending_cmd *cmd;
5839 
5840 	bt_dev_dbg(hdev, "status %u", status);
5841 
5842 	hci_dev_lock(hdev);
5843 
5844 	cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5845 	if (!cmd)
5846 		cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5847 
5848 	if (!cmd)
5849 		cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5850 
5851 	if (cmd) {
5852 		cmd->cmd_complete(cmd, mgmt_status(status));
5853 		mgmt_pending_remove(cmd);
5854 	}
5855 
5856 	hci_dev_unlock(hdev);
5857 }
5858 
5859 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5860 				    uint8_t *mgmt_status)
5861 {
5862 	switch (type) {
5863 	case DISCOV_TYPE_LE:
5864 		*mgmt_status = mgmt_le_support(hdev);
5865 		if (*mgmt_status)
5866 			return false;
5867 		break;
5868 	case DISCOV_TYPE_INTERLEAVED:
5869 		*mgmt_status = mgmt_le_support(hdev);
5870 		if (*mgmt_status)
5871 			return false;
5872 		fallthrough;
5873 	case DISCOV_TYPE_BREDR:
5874 		*mgmt_status = mgmt_bredr_support(hdev);
5875 		if (*mgmt_status)
5876 			return false;
5877 		break;
5878 	default:
5879 		*mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5880 		return false;
5881 	}
5882 
5883 	return true;
5884 }
5885 
5886 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5887 {
5888 	struct mgmt_pending_cmd *cmd = data;
5889 
5890 	if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5891 	    cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5892 	    cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5893 		return;
5894 
5895 	bt_dev_dbg(hdev, "err %d", err);
5896 
5897 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5898 			  cmd->param, 1);
5899 	mgmt_pending_remove(cmd);
5900 
5901 	hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5902 				DISCOVERY_FINDING);
5903 }
5904 
5905 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5906 {
5907 	return hci_start_discovery_sync(hdev);
5908 }
5909 
5910 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5911 				    u16 op, void *data, u16 len)
5912 {
5913 	struct mgmt_cp_start_discovery *cp = data;
5914 	struct mgmt_pending_cmd *cmd;
5915 	u8 status;
5916 	int err;
5917 
5918 	bt_dev_dbg(hdev, "sock %p", sk);
5919 
5920 	hci_dev_lock(hdev);
5921 
5922 	if (!hdev_is_powered(hdev)) {
5923 		err = mgmt_cmd_complete(sk, hdev->id, op,
5924 					MGMT_STATUS_NOT_POWERED,
5925 					&cp->type, sizeof(cp->type));
5926 		goto failed;
5927 	}
5928 
5929 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
5930 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5931 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5932 					&cp->type, sizeof(cp->type));
5933 		goto failed;
5934 	}
5935 
5936 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5937 		err = mgmt_cmd_complete(sk, hdev->id, op, status,
5938 					&cp->type, sizeof(cp->type));
5939 		goto failed;
5940 	}
5941 
5942 	/* Can't start discovery when it is paused */
5943 	if (hdev->discovery_paused) {
5944 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5945 					&cp->type, sizeof(cp->type));
5946 		goto failed;
5947 	}
5948 
5949 	/* Clear the discovery filter first to free any previously
5950 	 * allocated memory for the UUID list.
5951 	 */
5952 	hci_discovery_filter_clear(hdev);
5953 
5954 	hdev->discovery.type = cp->type;
5955 	hdev->discovery.report_invalid_rssi = false;
5956 	if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5957 		hdev->discovery.limited = true;
5958 	else
5959 		hdev->discovery.limited = false;
5960 
5961 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
5962 	if (!cmd) {
5963 		err = -ENOMEM;
5964 		goto failed;
5965 	}
5966 
5967 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5968 				 start_discovery_complete);
5969 	if (err < 0) {
5970 		mgmt_pending_remove(cmd);
5971 		goto failed;
5972 	}
5973 
5974 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5975 
5976 failed:
5977 	hci_dev_unlock(hdev);
5978 	return err;
5979 }
5980 
5981 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5982 			   void *data, u16 len)
5983 {
5984 	return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5985 					data, len);
5986 }
5987 
5988 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5989 				   void *data, u16 len)
5990 {
5991 	return start_discovery_internal(sk, hdev,
5992 					MGMT_OP_START_LIMITED_DISCOVERY,
5993 					data, len);
5994 }
5995 
5996 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5997 				   void *data, u16 len)
5998 {
5999 	struct mgmt_cp_start_service_discovery *cp = data;
6000 	struct mgmt_pending_cmd *cmd;
6001 	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
6002 	u16 uuid_count, expected_len;
6003 	u8 status;
6004 	int err;
6005 
6006 	bt_dev_dbg(hdev, "sock %p", sk);
6007 
6008 	hci_dev_lock(hdev);
6009 
6010 	if (!hdev_is_powered(hdev)) {
6011 		err = mgmt_cmd_complete(sk, hdev->id,
6012 					MGMT_OP_START_SERVICE_DISCOVERY,
6013 					MGMT_STATUS_NOT_POWERED,
6014 					&cp->type, sizeof(cp->type));
6015 		goto failed;
6016 	}
6017 
6018 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
6019 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
6020 		err = mgmt_cmd_complete(sk, hdev->id,
6021 					MGMT_OP_START_SERVICE_DISCOVERY,
6022 					MGMT_STATUS_BUSY, &cp->type,
6023 					sizeof(cp->type));
6024 		goto failed;
6025 	}
6026 
6027 	if (hdev->discovery_paused) {
6028 		err = mgmt_cmd_complete(sk, hdev->id,
6029 					MGMT_OP_START_SERVICE_DISCOVERY,
6030 					MGMT_STATUS_BUSY, &cp->type,
6031 					sizeof(cp->type));
6032 		goto failed;
6033 	}
6034 
6035 	uuid_count = __le16_to_cpu(cp->uuid_count);
6036 	if (uuid_count > max_uuid_count) {
6037 		bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
6038 			   uuid_count);
6039 		err = mgmt_cmd_complete(sk, hdev->id,
6040 					MGMT_OP_START_SERVICE_DISCOVERY,
6041 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
6042 					sizeof(cp->type));
6043 		goto failed;
6044 	}
6045 
6046 	expected_len = sizeof(*cp) + uuid_count * 16;
6047 	if (expected_len != len) {
6048 		bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
6049 			   expected_len, len);
6050 		err = mgmt_cmd_complete(sk, hdev->id,
6051 					MGMT_OP_START_SERVICE_DISCOVERY,
6052 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
6053 					sizeof(cp->type));
6054 		goto failed;
6055 	}
6056 
6057 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
6058 		err = mgmt_cmd_complete(sk, hdev->id,
6059 					MGMT_OP_START_SERVICE_DISCOVERY,
6060 					status, &cp->type, sizeof(cp->type));
6061 		goto failed;
6062 	}
6063 
6064 	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
6065 			       hdev, data, len);
6066 	if (!cmd) {
6067 		err = -ENOMEM;
6068 		goto failed;
6069 	}
6070 
6071 	/* Clear the discovery filter first to free any previously
6072 	 * allocated memory for the UUID list.
6073 	 */
6074 	hci_discovery_filter_clear(hdev);
6075 
6076 	hdev->discovery.result_filtering = true;
6077 	hdev->discovery.type = cp->type;
6078 	hdev->discovery.rssi = cp->rssi;
6079 	hdev->discovery.uuid_count = uuid_count;
6080 
6081 	if (uuid_count > 0) {
6082 		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6083 						GFP_KERNEL);
6084 		if (!hdev->discovery.uuids) {
6085 			err = mgmt_cmd_complete(sk, hdev->id,
6086 						MGMT_OP_START_SERVICE_DISCOVERY,
6087 						MGMT_STATUS_FAILED,
6088 						&cp->type, sizeof(cp->type));
6089 			mgmt_pending_remove(cmd);
6090 			goto failed;
6091 		}
6092 	}
6093 
6094 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6095 				 start_discovery_complete);
6096 	if (err < 0) {
6097 		mgmt_pending_remove(cmd);
6098 		goto failed;
6099 	}
6100 
6101 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6102 
6103 failed:
6104 	hci_dev_unlock(hdev);
6105 	return err;
6106 }
6107 
6108 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
6109 {
6110 	struct mgmt_pending_cmd *cmd;
6111 
6112 	bt_dev_dbg(hdev, "status %u", status);
6113 
6114 	hci_dev_lock(hdev);
6115 
6116 	cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6117 	if (cmd) {
6118 		cmd->cmd_complete(cmd, mgmt_status(status));
6119 		mgmt_pending_remove(cmd);
6120 	}
6121 
6122 	hci_dev_unlock(hdev);
6123 }
6124 
6125 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6126 {
6127 	struct mgmt_pending_cmd *cmd = data;
6128 
6129 	if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6130 		return;
6131 
6132 	bt_dev_dbg(hdev, "err %d", err);
6133 
6134 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
6135 			  cmd->param, 1);
6136 	mgmt_pending_remove(cmd);
6137 
6138 	if (!err)
6139 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6140 }
6141 
6142 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6143 {
6144 	return hci_stop_discovery_sync(hdev);
6145 }
6146 
6147 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6148 			  u16 len)
6149 {
6150 	struct mgmt_cp_stop_discovery *mgmt_cp = data;
6151 	struct mgmt_pending_cmd *cmd;
6152 	int err;
6153 
6154 	bt_dev_dbg(hdev, "sock %p", sk);
6155 
6156 	hci_dev_lock(hdev);
6157 
6158 	if (!hci_discovery_active(hdev)) {
6159 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6160 					MGMT_STATUS_REJECTED, &mgmt_cp->type,
6161 					sizeof(mgmt_cp->type));
6162 		goto unlock;
6163 	}
6164 
6165 	if (hdev->discovery.type != mgmt_cp->type) {
6166 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6167 					MGMT_STATUS_INVALID_PARAMS,
6168 					&mgmt_cp->type, sizeof(mgmt_cp->type));
6169 		goto unlock;
6170 	}
6171 
6172 	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6173 	if (!cmd) {
6174 		err = -ENOMEM;
6175 		goto unlock;
6176 	}
6177 
6178 	err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6179 				 stop_discovery_complete);
6180 	if (err < 0) {
6181 		mgmt_pending_remove(cmd);
6182 		goto unlock;
6183 	}
6184 
6185 	hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6186 
6187 unlock:
6188 	hci_dev_unlock(hdev);
6189 	return err;
6190 }
6191 
6192 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6193 			u16 len)
6194 {
6195 	struct mgmt_cp_confirm_name *cp = data;
6196 	struct inquiry_entry *e;
6197 	int err;
6198 
6199 	bt_dev_dbg(hdev, "sock %p", sk);
6200 
6201 	hci_dev_lock(hdev);
6202 
6203 	if (!hci_discovery_active(hdev)) {
6204 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6205 					MGMT_STATUS_FAILED, &cp->addr,
6206 					sizeof(cp->addr));
6207 		goto failed;
6208 	}
6209 
6210 	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6211 	if (!e) {
6212 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6213 					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6214 					sizeof(cp->addr));
6215 		goto failed;
6216 	}
6217 
6218 	if (cp->name_known) {
6219 		e->name_state = NAME_KNOWN;
6220 		list_del(&e->list);
6221 	} else {
6222 		e->name_state = NAME_NEEDED;
6223 		hci_inquiry_cache_update_resolve(hdev, e);
6224 	}
6225 
6226 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6227 				&cp->addr, sizeof(cp->addr));
6228 
6229 failed:
6230 	hci_dev_unlock(hdev);
6231 	return err;
6232 }
6233 
6234 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6235 			u16 len)
6236 {
6237 	struct mgmt_cp_block_device *cp = data;
6238 	u8 status;
6239 	int err;
6240 
6241 	bt_dev_dbg(hdev, "sock %p", sk);
6242 
6243 	if (!bdaddr_type_is_valid(cp->addr.type))
6244 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6245 					 MGMT_STATUS_INVALID_PARAMS,
6246 					 &cp->addr, sizeof(cp->addr));
6247 
6248 	hci_dev_lock(hdev);
6249 
6250 	err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6251 				  cp->addr.type);
6252 	if (err < 0) {
6253 		status = MGMT_STATUS_FAILED;
6254 		goto done;
6255 	}
6256 
6257 	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6258 		   sk);
6259 	status = MGMT_STATUS_SUCCESS;
6260 
6261 done:
6262 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6263 				&cp->addr, sizeof(cp->addr));
6264 
6265 	hci_dev_unlock(hdev);
6266 
6267 	return err;
6268 }
6269 
6270 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6271 			  u16 len)
6272 {
6273 	struct mgmt_cp_unblock_device *cp = data;
6274 	u8 status;
6275 	int err;
6276 
6277 	bt_dev_dbg(hdev, "sock %p", sk);
6278 
6279 	if (!bdaddr_type_is_valid(cp->addr.type))
6280 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6281 					 MGMT_STATUS_INVALID_PARAMS,
6282 					 &cp->addr, sizeof(cp->addr));
6283 
6284 	hci_dev_lock(hdev);
6285 
6286 	err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6287 				  cp->addr.type);
6288 	if (err < 0) {
6289 		status = MGMT_STATUS_INVALID_PARAMS;
6290 		goto done;
6291 	}
6292 
6293 	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6294 		   sk);
6295 	status = MGMT_STATUS_SUCCESS;
6296 
6297 done:
6298 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6299 				&cp->addr, sizeof(cp->addr));
6300 
6301 	hci_dev_unlock(hdev);
6302 
6303 	return err;
6304 }
6305 
6306 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6307 {
6308 	return hci_update_eir_sync(hdev);
6309 }
6310 
6311 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6312 			 u16 len)
6313 {
6314 	struct mgmt_cp_set_device_id *cp = data;
6315 	int err;
6316 	__u16 source;
6317 
6318 	bt_dev_dbg(hdev, "sock %p", sk);
6319 
6320 	source = __le16_to_cpu(cp->source);
6321 
6322 	if (source > 0x0002)
6323 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6324 				       MGMT_STATUS_INVALID_PARAMS);
6325 
6326 	hci_dev_lock(hdev);
6327 
6328 	hdev->devid_source = source;
6329 	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6330 	hdev->devid_product = __le16_to_cpu(cp->product);
6331 	hdev->devid_version = __le16_to_cpu(cp->version);
6332 
6333 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6334 				NULL, 0);
6335 
6336 	hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6337 
6338 	hci_dev_unlock(hdev);
6339 
6340 	return err;
6341 }
6342 
6343 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6344 {
6345 	if (err)
6346 		bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6347 	else
6348 		bt_dev_dbg(hdev, "status %d", err);
6349 }
6350 
6351 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6352 {
6353 	struct cmd_lookup match = { NULL, hdev };
6354 	u8 instance;
6355 	struct adv_info *adv_instance;
6356 	u8 status = mgmt_status(err);
6357 
6358 	if (status) {
6359 		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
6360 				     cmd_status_rsp, &status);
6361 		return;
6362 	}
6363 
6364 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6365 		hci_dev_set_flag(hdev, HCI_ADVERTISING);
6366 	else
6367 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6368 
6369 	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
6370 			     &match);
6371 
6372 	new_settings(hdev, match.sk);
6373 
6374 	if (match.sk)
6375 		sock_put(match.sk);
6376 
6377 	/* If "Set Advertising" was just disabled and instance advertising was
6378 	 * set up earlier, then re-enable multi-instance advertising.
6379 	 */
6380 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6381 	    list_empty(&hdev->adv_instances))
6382 		return;
6383 
6384 	instance = hdev->cur_adv_instance;
6385 	if (!instance) {
6386 		adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6387 							struct adv_info, list);
6388 		if (!adv_instance)
6389 			return;
6390 
6391 		instance = adv_instance->instance;
6392 	}
6393 
6394 	err = hci_schedule_adv_instance_sync(hdev, instance, true);
6395 
6396 	enable_advertising_instance(hdev, err);
6397 }
6398 
6399 static int set_adv_sync(struct hci_dev *hdev, void *data)
6400 {
6401 	struct mgmt_pending_cmd *cmd = data;
6402 	struct mgmt_mode *cp = cmd->param;
6403 	u8 val = !!cp->val;
6404 
6405 	if (cp->val == 0x02)
6406 		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6407 	else
6408 		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6409 
6410 	cancel_adv_timeout(hdev);
6411 
6412 	if (val) {
6413 		/* Switch to instance "0" for the Set Advertising setting.
6414 		 * We cannot use update_[adv|scan_rsp]_data() here as the
6415 		 * HCI_ADVERTISING flag is not yet set.
6416 		 */
6417 		hdev->cur_adv_instance = 0x00;
6418 
6419 		if (ext_adv_capable(hdev)) {
6420 			hci_start_ext_adv_sync(hdev, 0x00);
6421 		} else {
6422 			hci_update_adv_data_sync(hdev, 0x00);
6423 			hci_update_scan_rsp_data_sync(hdev, 0x00);
6424 			hci_enable_advertising_sync(hdev);
6425 		}
6426 	} else {
6427 		hci_disable_advertising_sync(hdev);
6428 	}
6429 
6430 	return 0;
6431 }
6432 
6433 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6434 			   u16 len)
6435 {
6436 	struct mgmt_mode *cp = data;
6437 	struct mgmt_pending_cmd *cmd;
6438 	u8 val, status;
6439 	int err;
6440 
6441 	bt_dev_dbg(hdev, "sock %p", sk);
6442 
6443 	status = mgmt_le_support(hdev);
6444 	if (status)
6445 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6446 				       status);
6447 
6448 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6449 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6450 				       MGMT_STATUS_INVALID_PARAMS);
6451 
6452 	if (hdev->advertising_paused)
6453 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6454 				       MGMT_STATUS_BUSY);
6455 
6456 	hci_dev_lock(hdev);
6457 
6458 	val = !!cp->val;
6459 
6460 	/* The following conditions are ones which mean that we should
6461 	 * not do any HCI communication but directly send a mgmt
6462 	 * response to user space (after toggling the flag if
6463 	 * necessary).
6464 	 */
6465 	if (!hdev_is_powered(hdev) ||
6466 	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6467 	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6468 	    hci_dev_test_flag(hdev, HCI_MESH) ||
6469 	    hci_conn_num(hdev, LE_LINK) > 0 ||
6470 	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6471 	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6472 		bool changed;
6473 
6474 		if (cp->val) {
6475 			hdev->cur_adv_instance = 0x00;
6476 			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6477 			if (cp->val == 0x02)
6478 				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6479 			else
6480 				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6481 		} else {
6482 			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6483 			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6484 		}
6485 
6486 		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6487 		if (err < 0)
6488 			goto unlock;
6489 
6490 		if (changed)
6491 			err = new_settings(hdev, sk);
6492 
6493 		goto unlock;
6494 	}
6495 
6496 	if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6497 	    pending_find(MGMT_OP_SET_LE, hdev)) {
6498 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6499 				      MGMT_STATUS_BUSY);
6500 		goto unlock;
6501 	}
6502 
6503 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6504 	if (!cmd)
6505 		err = -ENOMEM;
6506 	else
6507 		err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6508 					 set_advertising_complete);
6509 
6510 	if (err < 0 && cmd)
6511 		mgmt_pending_remove(cmd);
6512 
6513 unlock:
6514 	hci_dev_unlock(hdev);
6515 	return err;
6516 }
6517 
6518 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6519 			      void *data, u16 len)
6520 {
6521 	struct mgmt_cp_set_static_address *cp = data;
6522 	int err;
6523 
6524 	bt_dev_dbg(hdev, "sock %p", sk);
6525 
6526 	if (!lmp_le_capable(hdev))
6527 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6528 				       MGMT_STATUS_NOT_SUPPORTED);
6529 
6530 	if (hdev_is_powered(hdev))
6531 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6532 				       MGMT_STATUS_REJECTED);
6533 
6534 	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6535 		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6536 			return mgmt_cmd_status(sk, hdev->id,
6537 					       MGMT_OP_SET_STATIC_ADDRESS,
6538 					       MGMT_STATUS_INVALID_PARAMS);
6539 
6540 		/* Two most significant bits shall be set */
6541 		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6542 			return mgmt_cmd_status(sk, hdev->id,
6543 					       MGMT_OP_SET_STATIC_ADDRESS,
6544 					       MGMT_STATUS_INVALID_PARAMS);
6545 	}
6546 
6547 	hci_dev_lock(hdev);
6548 
6549 	bacpy(&hdev->static_addr, &cp->bdaddr);
6550 
6551 	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6552 	if (err < 0)
6553 		goto unlock;
6554 
6555 	err = new_settings(hdev, sk);
6556 
6557 unlock:
6558 	hci_dev_unlock(hdev);
6559 	return err;
6560 }
6561 
6562 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6563 			   void *data, u16 len)
6564 {
6565 	struct mgmt_cp_set_scan_params *cp = data;
6566 	__u16 interval, window;
6567 	int err;
6568 
6569 	bt_dev_dbg(hdev, "sock %p", sk);
6570 
6571 	if (!lmp_le_capable(hdev))
6572 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6573 				       MGMT_STATUS_NOT_SUPPORTED);
6574 
6575 	interval = __le16_to_cpu(cp->interval);
6576 
6577 	if (interval < 0x0004 || interval > 0x4000)
6578 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6579 				       MGMT_STATUS_INVALID_PARAMS);
6580 
6581 	window = __le16_to_cpu(cp->window);
6582 
6583 	if (window < 0x0004 || window > 0x4000)
6584 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6585 				       MGMT_STATUS_INVALID_PARAMS);
6586 
6587 	if (window > interval)
6588 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6589 				       MGMT_STATUS_INVALID_PARAMS);
6590 
6591 	hci_dev_lock(hdev);
6592 
6593 	hdev->le_scan_interval = interval;
6594 	hdev->le_scan_window = window;
6595 
6596 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6597 				NULL, 0);
6598 
6599 	/* If background scan is running, restart it so new parameters are
6600 	 * loaded.
6601 	 */
6602 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6603 	    hdev->discovery.state == DISCOVERY_STOPPED)
6604 		hci_update_passive_scan(hdev);
6605 
6606 	hci_dev_unlock(hdev);
6607 
6608 	return err;
6609 }
6610 
6611 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6612 {
6613 	struct mgmt_pending_cmd *cmd = data;
6614 
6615 	bt_dev_dbg(hdev, "err %d", err);
6616 
6617 	if (err) {
6618 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6619 				mgmt_status(err));
6620 	} else {
6621 		struct mgmt_mode *cp = cmd->param;
6622 
6623 		if (cp->val)
6624 			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6625 		else
6626 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6627 
6628 		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6629 		new_settings(hdev, cmd->sk);
6630 	}
6631 
6632 	mgmt_pending_free(cmd);
6633 }
6634 
6635 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6636 {
6637 	struct mgmt_pending_cmd *cmd = data;
6638 	struct mgmt_mode *cp = cmd->param;
6639 
6640 	return hci_write_fast_connectable_sync(hdev, cp->val);
6641 }
6642 
6643 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6644 				void *data, u16 len)
6645 {
6646 	struct mgmt_mode *cp = data;
6647 	struct mgmt_pending_cmd *cmd;
6648 	int err;
6649 
6650 	bt_dev_dbg(hdev, "sock %p", sk);
6651 
6652 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6653 	    hdev->hci_ver < BLUETOOTH_VER_1_2)
6654 		return mgmt_cmd_status(sk, hdev->id,
6655 				       MGMT_OP_SET_FAST_CONNECTABLE,
6656 				       MGMT_STATUS_NOT_SUPPORTED);
6657 
6658 	if (cp->val != 0x00 && cp->val != 0x01)
6659 		return mgmt_cmd_status(sk, hdev->id,
6660 				       MGMT_OP_SET_FAST_CONNECTABLE,
6661 				       MGMT_STATUS_INVALID_PARAMS);
6662 
6663 	hci_dev_lock(hdev);
6664 
6665 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6666 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6667 		goto unlock;
6668 	}
6669 
6670 	if (!hdev_is_powered(hdev)) {
6671 		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6672 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6673 		new_settings(hdev, sk);
6674 		goto unlock;
6675 	}
6676 
6677 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6678 			       len);
6679 	if (!cmd)
6680 		err = -ENOMEM;
6681 	else
6682 		err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6683 					 fast_connectable_complete);
6684 
6685 	if (err < 0) {
6686 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6687 				MGMT_STATUS_FAILED);
6688 
6689 		if (cmd)
6690 			mgmt_pending_free(cmd);
6691 	}
6692 
6693 unlock:
6694 	hci_dev_unlock(hdev);
6695 
6696 	return err;
6697 }
6698 
6699 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6700 {
6701 	struct mgmt_pending_cmd *cmd = data;
6702 
6703 	bt_dev_dbg(hdev, "err %d", err);
6704 
6705 	if (err) {
6706 		u8 mgmt_err = mgmt_status(err);
6707 
6708 		/* We need to restore the flag if related HCI commands
6709 		 * failed.
6710 		 */
6711 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6712 
6713 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6714 	} else {
6715 		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6716 		new_settings(hdev, cmd->sk);
6717 	}
6718 
6719 	mgmt_pending_free(cmd);
6720 }
6721 
6722 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6723 {
6724 	int status;
6725 
6726 	status = hci_write_fast_connectable_sync(hdev, false);
6727 
6728 	if (!status)
6729 		status = hci_update_scan_sync(hdev);
6730 
6731 	/* Since only the advertising data flags will change, there
6732 	 * is no need to update the scan response data.
6733 	 */
6734 	if (!status)
6735 		status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6736 
6737 	return status;
6738 }
6739 
6740 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6741 {
6742 	struct mgmt_mode *cp = data;
6743 	struct mgmt_pending_cmd *cmd;
6744 	int err;
6745 
6746 	bt_dev_dbg(hdev, "sock %p", sk);
6747 
6748 	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6749 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6750 				       MGMT_STATUS_NOT_SUPPORTED);
6751 
6752 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6753 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6754 				       MGMT_STATUS_REJECTED);
6755 
6756 	if (cp->val != 0x00 && cp->val != 0x01)
6757 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6758 				       MGMT_STATUS_INVALID_PARAMS);
6759 
6760 	hci_dev_lock(hdev);
6761 
6762 	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6763 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6764 		goto unlock;
6765 	}
6766 
6767 	if (!hdev_is_powered(hdev)) {
6768 		if (!cp->val) {
6769 			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6770 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6771 			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6772 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6773 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
6774 		}
6775 
6776 		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6777 
6778 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6779 		if (err < 0)
6780 			goto unlock;
6781 
6782 		err = new_settings(hdev, sk);
6783 		goto unlock;
6784 	}
6785 
6786 	/* Reject disabling when powered on */
6787 	if (!cp->val) {
6788 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6789 				      MGMT_STATUS_REJECTED);
6790 		goto unlock;
6791 	} else {
6792 		/* When configuring a dual-mode controller to operate
6793 		 * with LE only and using a static address, then switching
6794 		 * BR/EDR back on is not allowed.
6795 		 *
6796 		 * Dual-mode controllers shall operate with the public
6797 		 * address as its identity address for BR/EDR and LE. So
6798 		 * reject the attempt to create an invalid configuration.
6799 		 *
6800 		 * The same restrictions applies when secure connections
6801 		 * has been enabled. For BR/EDR this is a controller feature
6802 		 * while for LE it is a host stack feature. This means that
6803 		 * switching BR/EDR back on when secure connections has been
6804 		 * enabled is not a supported transaction.
6805 		 */
6806 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6807 		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6808 		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6809 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6810 					      MGMT_STATUS_REJECTED);
6811 			goto unlock;
6812 		}
6813 	}
6814 
6815 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6816 	if (!cmd)
6817 		err = -ENOMEM;
6818 	else
6819 		err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6820 					 set_bredr_complete);
6821 
6822 	if (err < 0) {
6823 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6824 				MGMT_STATUS_FAILED);
6825 		if (cmd)
6826 			mgmt_pending_free(cmd);
6827 
6828 		goto unlock;
6829 	}
6830 
6831 	/* We need to flip the bit already here so that
6832 	 * hci_req_update_adv_data generates the correct flags.
6833 	 */
6834 	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6835 
6836 unlock:
6837 	hci_dev_unlock(hdev);
6838 	return err;
6839 }
6840 
6841 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6842 {
6843 	struct mgmt_pending_cmd *cmd = data;
6844 	struct mgmt_mode *cp;
6845 
6846 	bt_dev_dbg(hdev, "err %d", err);
6847 
6848 	if (err) {
6849 		u8 mgmt_err = mgmt_status(err);
6850 
6851 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6852 		goto done;
6853 	}
6854 
6855 	cp = cmd->param;
6856 
6857 	switch (cp->val) {
6858 	case 0x00:
6859 		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6860 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6861 		break;
6862 	case 0x01:
6863 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6864 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6865 		break;
6866 	case 0x02:
6867 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6868 		hci_dev_set_flag(hdev, HCI_SC_ONLY);
6869 		break;
6870 	}
6871 
6872 	send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6873 	new_settings(hdev, cmd->sk);
6874 
6875 done:
6876 	mgmt_pending_free(cmd);
6877 }
6878 
6879 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6880 {
6881 	struct mgmt_pending_cmd *cmd = data;
6882 	struct mgmt_mode *cp = cmd->param;
6883 	u8 val = !!cp->val;
6884 
6885 	/* Force write of val */
6886 	hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6887 
6888 	return hci_write_sc_support_sync(hdev, val);
6889 }
6890 
6891 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6892 			   void *data, u16 len)
6893 {
6894 	struct mgmt_mode *cp = data;
6895 	struct mgmt_pending_cmd *cmd;
6896 	u8 val;
6897 	int err;
6898 
6899 	bt_dev_dbg(hdev, "sock %p", sk);
6900 
6901 	if (!lmp_sc_capable(hdev) &&
6902 	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6903 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6904 				       MGMT_STATUS_NOT_SUPPORTED);
6905 
6906 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6907 	    lmp_sc_capable(hdev) &&
6908 	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6909 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6910 				       MGMT_STATUS_REJECTED);
6911 
6912 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6913 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6914 				       MGMT_STATUS_INVALID_PARAMS);
6915 
6916 	hci_dev_lock(hdev);
6917 
6918 	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6919 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6920 		bool changed;
6921 
6922 		if (cp->val) {
6923 			changed = !hci_dev_test_and_set_flag(hdev,
6924 							     HCI_SC_ENABLED);
6925 			if (cp->val == 0x02)
6926 				hci_dev_set_flag(hdev, HCI_SC_ONLY);
6927 			else
6928 				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6929 		} else {
6930 			changed = hci_dev_test_and_clear_flag(hdev,
6931 							      HCI_SC_ENABLED);
6932 			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6933 		}
6934 
6935 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6936 		if (err < 0)
6937 			goto failed;
6938 
6939 		if (changed)
6940 			err = new_settings(hdev, sk);
6941 
6942 		goto failed;
6943 	}
6944 
6945 	val = !!cp->val;
6946 
6947 	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6948 	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6949 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6950 		goto failed;
6951 	}
6952 
6953 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6954 	if (!cmd)
6955 		err = -ENOMEM;
6956 	else
6957 		err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6958 					 set_secure_conn_complete);
6959 
6960 	if (err < 0) {
6961 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6962 				MGMT_STATUS_FAILED);
6963 		if (cmd)
6964 			mgmt_pending_free(cmd);
6965 	}
6966 
6967 failed:
6968 	hci_dev_unlock(hdev);
6969 	return err;
6970 }
6971 
6972 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6973 			  void *data, u16 len)
6974 {
6975 	struct mgmt_mode *cp = data;
6976 	bool changed, use_changed;
6977 	int err;
6978 
6979 	bt_dev_dbg(hdev, "sock %p", sk);
6980 
6981 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6982 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6983 				       MGMT_STATUS_INVALID_PARAMS);
6984 
6985 	hci_dev_lock(hdev);
6986 
6987 	if (cp->val)
6988 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6989 	else
6990 		changed = hci_dev_test_and_clear_flag(hdev,
6991 						      HCI_KEEP_DEBUG_KEYS);
6992 
6993 	if (cp->val == 0x02)
6994 		use_changed = !hci_dev_test_and_set_flag(hdev,
6995 							 HCI_USE_DEBUG_KEYS);
6996 	else
6997 		use_changed = hci_dev_test_and_clear_flag(hdev,
6998 							  HCI_USE_DEBUG_KEYS);
6999 
7000 	if (hdev_is_powered(hdev) && use_changed &&
7001 	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7002 		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
7003 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
7004 			     sizeof(mode), &mode);
7005 	}
7006 
7007 	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
7008 	if (err < 0)
7009 		goto unlock;
7010 
7011 	if (changed)
7012 		err = new_settings(hdev, sk);
7013 
7014 unlock:
7015 	hci_dev_unlock(hdev);
7016 	return err;
7017 }
7018 
7019 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7020 		       u16 len)
7021 {
7022 	struct mgmt_cp_set_privacy *cp = cp_data;
7023 	bool changed;
7024 	int err;
7025 
7026 	bt_dev_dbg(hdev, "sock %p", sk);
7027 
7028 	if (!lmp_le_capable(hdev))
7029 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7030 				       MGMT_STATUS_NOT_SUPPORTED);
7031 
7032 	if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
7033 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7034 				       MGMT_STATUS_INVALID_PARAMS);
7035 
7036 	if (hdev_is_powered(hdev))
7037 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7038 				       MGMT_STATUS_REJECTED);
7039 
7040 	hci_dev_lock(hdev);
7041 
7042 	/* If user space supports this command it is also expected to
7043 	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
7044 	 */
7045 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7046 
7047 	if (cp->privacy) {
7048 		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
7049 		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
7050 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
7051 		hci_adv_instances_set_rpa_expired(hdev, true);
7052 		if (cp->privacy == 0x02)
7053 			hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
7054 		else
7055 			hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7056 	} else {
7057 		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
7058 		memset(hdev->irk, 0, sizeof(hdev->irk));
7059 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
7060 		hci_adv_instances_set_rpa_expired(hdev, false);
7061 		hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7062 	}
7063 
7064 	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
7065 	if (err < 0)
7066 		goto unlock;
7067 
7068 	if (changed)
7069 		err = new_settings(hdev, sk);
7070 
7071 unlock:
7072 	hci_dev_unlock(hdev);
7073 	return err;
7074 }
7075 
7076 static bool irk_is_valid(struct mgmt_irk_info *irk)
7077 {
7078 	switch (irk->addr.type) {
7079 	case BDADDR_LE_PUBLIC:
7080 		return true;
7081 
7082 	case BDADDR_LE_RANDOM:
7083 		/* Two most significant bits shall be set */
7084 		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7085 			return false;
7086 		return true;
7087 	}
7088 
7089 	return false;
7090 }
7091 
7092 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7093 		     u16 len)
7094 {
7095 	struct mgmt_cp_load_irks *cp = cp_data;
7096 	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7097 				   sizeof(struct mgmt_irk_info));
7098 	u16 irk_count, expected_len;
7099 	int i, err;
7100 
7101 	bt_dev_dbg(hdev, "sock %p", sk);
7102 
7103 	if (!lmp_le_capable(hdev))
7104 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7105 				       MGMT_STATUS_NOT_SUPPORTED);
7106 
7107 	irk_count = __le16_to_cpu(cp->irk_count);
7108 	if (irk_count > max_irk_count) {
7109 		bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7110 			   irk_count);
7111 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7112 				       MGMT_STATUS_INVALID_PARAMS);
7113 	}
7114 
7115 	expected_len = struct_size(cp, irks, irk_count);
7116 	if (expected_len != len) {
7117 		bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7118 			   expected_len, len);
7119 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7120 				       MGMT_STATUS_INVALID_PARAMS);
7121 	}
7122 
7123 	bt_dev_dbg(hdev, "irk_count %u", irk_count);
7124 
7125 	for (i = 0; i < irk_count; i++) {
7126 		struct mgmt_irk_info *key = &cp->irks[i];
7127 
7128 		if (!irk_is_valid(key))
7129 			return mgmt_cmd_status(sk, hdev->id,
7130 					       MGMT_OP_LOAD_IRKS,
7131 					       MGMT_STATUS_INVALID_PARAMS);
7132 	}
7133 
7134 	hci_dev_lock(hdev);
7135 
7136 	hci_smp_irks_clear(hdev);
7137 
7138 	for (i = 0; i < irk_count; i++) {
7139 		struct mgmt_irk_info *irk = &cp->irks[i];
7140 
7141 		if (hci_is_blocked_key(hdev,
7142 				       HCI_BLOCKED_KEY_TYPE_IRK,
7143 				       irk->val)) {
7144 			bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7145 				    &irk->addr.bdaddr);
7146 			continue;
7147 		}
7148 
7149 		hci_add_irk(hdev, &irk->addr.bdaddr,
7150 			    le_addr_type(irk->addr.type), irk->val,
7151 			    BDADDR_ANY);
7152 	}
7153 
7154 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7155 
7156 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7157 
7158 	hci_dev_unlock(hdev);
7159 
7160 	return err;
7161 }
7162 
7163 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7164 {
7165 	if (key->initiator != 0x00 && key->initiator != 0x01)
7166 		return false;
7167 
7168 	switch (key->addr.type) {
7169 	case BDADDR_LE_PUBLIC:
7170 		return true;
7171 
7172 	case BDADDR_LE_RANDOM:
7173 		/* Two most significant bits shall be set */
7174 		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7175 			return false;
7176 		return true;
7177 	}
7178 
7179 	return false;
7180 }
7181 
7182 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7183 			       void *cp_data, u16 len)
7184 {
7185 	struct mgmt_cp_load_long_term_keys *cp = cp_data;
7186 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7187 				   sizeof(struct mgmt_ltk_info));
7188 	u16 key_count, expected_len;
7189 	int i, err;
7190 
7191 	bt_dev_dbg(hdev, "sock %p", sk);
7192 
7193 	if (!lmp_le_capable(hdev))
7194 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7195 				       MGMT_STATUS_NOT_SUPPORTED);
7196 
7197 	key_count = __le16_to_cpu(cp->key_count);
7198 	if (key_count > max_key_count) {
7199 		bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7200 			   key_count);
7201 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7202 				       MGMT_STATUS_INVALID_PARAMS);
7203 	}
7204 
7205 	expected_len = struct_size(cp, keys, key_count);
7206 	if (expected_len != len) {
7207 		bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7208 			   expected_len, len);
7209 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7210 				       MGMT_STATUS_INVALID_PARAMS);
7211 	}
7212 
7213 	bt_dev_dbg(hdev, "key_count %u", key_count);
7214 
7215 	for (i = 0; i < key_count; i++) {
7216 		struct mgmt_ltk_info *key = &cp->keys[i];
7217 
7218 		if (!ltk_is_valid(key))
7219 			return mgmt_cmd_status(sk, hdev->id,
7220 					       MGMT_OP_LOAD_LONG_TERM_KEYS,
7221 					       MGMT_STATUS_INVALID_PARAMS);
7222 	}
7223 
7224 	hci_dev_lock(hdev);
7225 
7226 	hci_smp_ltks_clear(hdev);
7227 
7228 	for (i = 0; i < key_count; i++) {
7229 		struct mgmt_ltk_info *key = &cp->keys[i];
7230 		u8 type, authenticated;
7231 
7232 		if (hci_is_blocked_key(hdev,
7233 				       HCI_BLOCKED_KEY_TYPE_LTK,
7234 				       key->val)) {
7235 			bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7236 				    &key->addr.bdaddr);
7237 			continue;
7238 		}
7239 
7240 		switch (key->type) {
7241 		case MGMT_LTK_UNAUTHENTICATED:
7242 			authenticated = 0x00;
7243 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7244 			break;
7245 		case MGMT_LTK_AUTHENTICATED:
7246 			authenticated = 0x01;
7247 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7248 			break;
7249 		case MGMT_LTK_P256_UNAUTH:
7250 			authenticated = 0x00;
7251 			type = SMP_LTK_P256;
7252 			break;
7253 		case MGMT_LTK_P256_AUTH:
7254 			authenticated = 0x01;
7255 			type = SMP_LTK_P256;
7256 			break;
7257 		case MGMT_LTK_P256_DEBUG:
7258 			authenticated = 0x00;
7259 			type = SMP_LTK_P256_DEBUG;
7260 			fallthrough;
7261 		default:
7262 			continue;
7263 		}
7264 
7265 		hci_add_ltk(hdev, &key->addr.bdaddr,
7266 			    le_addr_type(key->addr.type), type, authenticated,
7267 			    key->val, key->enc_size, key->ediv, key->rand);
7268 	}
7269 
7270 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7271 			   NULL, 0);
7272 
7273 	hci_dev_unlock(hdev);
7274 
7275 	return err;
7276 }
7277 
7278 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7279 {
7280 	struct mgmt_pending_cmd *cmd = data;
7281 	struct hci_conn *conn = cmd->user_data;
7282 	struct mgmt_cp_get_conn_info *cp = cmd->param;
7283 	struct mgmt_rp_get_conn_info rp;
7284 	u8 status;
7285 
7286 	bt_dev_dbg(hdev, "err %d", err);
7287 
7288 	memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
7289 
7290 	status = mgmt_status(err);
7291 	if (status == MGMT_STATUS_SUCCESS) {
7292 		rp.rssi = conn->rssi;
7293 		rp.tx_power = conn->tx_power;
7294 		rp.max_tx_power = conn->max_tx_power;
7295 	} else {
7296 		rp.rssi = HCI_RSSI_INVALID;
7297 		rp.tx_power = HCI_TX_POWER_INVALID;
7298 		rp.max_tx_power = HCI_TX_POWER_INVALID;
7299 	}
7300 
7301 	mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
7302 			  &rp, sizeof(rp));
7303 
7304 	mgmt_pending_free(cmd);
7305 }
7306 
7307 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7308 {
7309 	struct mgmt_pending_cmd *cmd = data;
7310 	struct mgmt_cp_get_conn_info *cp = cmd->param;
7311 	struct hci_conn *conn;
7312 	int err;
7313 	__le16   handle;
7314 
7315 	/* Make sure we are still connected */
7316 	if (cp->addr.type == BDADDR_BREDR)
7317 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7318 					       &cp->addr.bdaddr);
7319 	else
7320 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7321 
7322 	if (!conn || conn->state != BT_CONNECTED)
7323 		return MGMT_STATUS_NOT_CONNECTED;
7324 
7325 	cmd->user_data = conn;
7326 	handle = cpu_to_le16(conn->handle);
7327 
7328 	/* Refresh RSSI each time */
7329 	err = hci_read_rssi_sync(hdev, handle);
7330 
7331 	/* For LE links TX power does not change thus we don't need to
7332 	 * query for it once value is known.
7333 	 */
7334 	if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7335 		     conn->tx_power == HCI_TX_POWER_INVALID))
7336 		err = hci_read_tx_power_sync(hdev, handle, 0x00);
7337 
7338 	/* Max TX power needs to be read only once per connection */
7339 	if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7340 		err = hci_read_tx_power_sync(hdev, handle, 0x01);
7341 
7342 	return err;
7343 }
7344 
7345 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7346 			 u16 len)
7347 {
7348 	struct mgmt_cp_get_conn_info *cp = data;
7349 	struct mgmt_rp_get_conn_info rp;
7350 	struct hci_conn *conn;
7351 	unsigned long conn_info_age;
7352 	int err = 0;
7353 
7354 	bt_dev_dbg(hdev, "sock %p", sk);
7355 
7356 	memset(&rp, 0, sizeof(rp));
7357 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7358 	rp.addr.type = cp->addr.type;
7359 
7360 	if (!bdaddr_type_is_valid(cp->addr.type))
7361 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7362 					 MGMT_STATUS_INVALID_PARAMS,
7363 					 &rp, sizeof(rp));
7364 
7365 	hci_dev_lock(hdev);
7366 
7367 	if (!hdev_is_powered(hdev)) {
7368 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7369 					MGMT_STATUS_NOT_POWERED, &rp,
7370 					sizeof(rp));
7371 		goto unlock;
7372 	}
7373 
7374 	if (cp->addr.type == BDADDR_BREDR)
7375 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7376 					       &cp->addr.bdaddr);
7377 	else
7378 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7379 
7380 	if (!conn || conn->state != BT_CONNECTED) {
7381 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7382 					MGMT_STATUS_NOT_CONNECTED, &rp,
7383 					sizeof(rp));
7384 		goto unlock;
7385 	}
7386 
7387 	/* To avoid client trying to guess when to poll again for information we
7388 	 * calculate conn info age as random value between min/max set in hdev.
7389 	 */
7390 	conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
7391 						 hdev->conn_info_max_age - 1);
7392 
7393 	/* Query controller to refresh cached values if they are too old or were
7394 	 * never read.
7395 	 */
7396 	if (time_after(jiffies, conn->conn_info_timestamp +
7397 		       msecs_to_jiffies(conn_info_age)) ||
7398 	    !conn->conn_info_timestamp) {
7399 		struct mgmt_pending_cmd *cmd;
7400 
7401 		cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7402 				       len);
7403 		if (!cmd) {
7404 			err = -ENOMEM;
7405 		} else {
7406 			err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7407 						 cmd, get_conn_info_complete);
7408 		}
7409 
7410 		if (err < 0) {
7411 			mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7412 					  MGMT_STATUS_FAILED, &rp, sizeof(rp));
7413 
7414 			if (cmd)
7415 				mgmt_pending_free(cmd);
7416 
7417 			goto unlock;
7418 		}
7419 
7420 		conn->conn_info_timestamp = jiffies;
7421 	} else {
7422 		/* Cache is valid, just reply with values cached in hci_conn */
7423 		rp.rssi = conn->rssi;
7424 		rp.tx_power = conn->tx_power;
7425 		rp.max_tx_power = conn->max_tx_power;
7426 
7427 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7428 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7429 	}
7430 
7431 unlock:
7432 	hci_dev_unlock(hdev);
7433 	return err;
7434 }
7435 
7436 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7437 {
7438 	struct mgmt_pending_cmd *cmd = data;
7439 	struct mgmt_cp_get_clock_info *cp = cmd->param;
7440 	struct mgmt_rp_get_clock_info rp;
7441 	struct hci_conn *conn = cmd->user_data;
7442 	u8 status = mgmt_status(err);
7443 
7444 	bt_dev_dbg(hdev, "err %d", err);
7445 
7446 	memset(&rp, 0, sizeof(rp));
7447 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7448 	rp.addr.type = cp->addr.type;
7449 
7450 	if (err)
7451 		goto complete;
7452 
7453 	rp.local_clock = cpu_to_le32(hdev->clock);
7454 
7455 	if (conn) {
7456 		rp.piconet_clock = cpu_to_le32(conn->clock);
7457 		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7458 	}
7459 
7460 complete:
7461 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
7462 			  sizeof(rp));
7463 
7464 	mgmt_pending_free(cmd);
7465 }
7466 
7467 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7468 {
7469 	struct mgmt_pending_cmd *cmd = data;
7470 	struct mgmt_cp_get_clock_info *cp = cmd->param;
7471 	struct hci_cp_read_clock hci_cp;
7472 	struct hci_conn *conn;
7473 
7474 	memset(&hci_cp, 0, sizeof(hci_cp));
7475 	hci_read_clock_sync(hdev, &hci_cp);
7476 
7477 	/* Make sure connection still exists */
7478 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7479 	if (!conn || conn->state != BT_CONNECTED)
7480 		return MGMT_STATUS_NOT_CONNECTED;
7481 
7482 	cmd->user_data = conn;
7483 	hci_cp.handle = cpu_to_le16(conn->handle);
7484 	hci_cp.which = 0x01; /* Piconet clock */
7485 
7486 	return hci_read_clock_sync(hdev, &hci_cp);
7487 }
7488 
7489 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7490 								u16 len)
7491 {
7492 	struct mgmt_cp_get_clock_info *cp = data;
7493 	struct mgmt_rp_get_clock_info rp;
7494 	struct mgmt_pending_cmd *cmd;
7495 	struct hci_conn *conn;
7496 	int err;
7497 
7498 	bt_dev_dbg(hdev, "sock %p", sk);
7499 
7500 	memset(&rp, 0, sizeof(rp));
7501 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7502 	rp.addr.type = cp->addr.type;
7503 
7504 	if (cp->addr.type != BDADDR_BREDR)
7505 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7506 					 MGMT_STATUS_INVALID_PARAMS,
7507 					 &rp, sizeof(rp));
7508 
7509 	hci_dev_lock(hdev);
7510 
7511 	if (!hdev_is_powered(hdev)) {
7512 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7513 					MGMT_STATUS_NOT_POWERED, &rp,
7514 					sizeof(rp));
7515 		goto unlock;
7516 	}
7517 
7518 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7519 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7520 					       &cp->addr.bdaddr);
7521 		if (!conn || conn->state != BT_CONNECTED) {
7522 			err = mgmt_cmd_complete(sk, hdev->id,
7523 						MGMT_OP_GET_CLOCK_INFO,
7524 						MGMT_STATUS_NOT_CONNECTED,
7525 						&rp, sizeof(rp));
7526 			goto unlock;
7527 		}
7528 	} else {
7529 		conn = NULL;
7530 	}
7531 
7532 	cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7533 	if (!cmd)
7534 		err = -ENOMEM;
7535 	else
7536 		err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7537 					 get_clock_info_complete);
7538 
7539 	if (err < 0) {
7540 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7541 					MGMT_STATUS_FAILED, &rp, sizeof(rp));
7542 
7543 		if (cmd)
7544 			mgmt_pending_free(cmd);
7545 	}
7546 
7547 
7548 unlock:
7549 	hci_dev_unlock(hdev);
7550 	return err;
7551 }
7552 
7553 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7554 {
7555 	struct hci_conn *conn;
7556 
7557 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7558 	if (!conn)
7559 		return false;
7560 
7561 	if (conn->dst_type != type)
7562 		return false;
7563 
7564 	if (conn->state != BT_CONNECTED)
7565 		return false;
7566 
7567 	return true;
7568 }
7569 
7570 /* This function requires the caller holds hdev->lock */
7571 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7572 			       u8 addr_type, u8 auto_connect)
7573 {
7574 	struct hci_conn_params *params;
7575 
7576 	params = hci_conn_params_add(hdev, addr, addr_type);
7577 	if (!params)
7578 		return -EIO;
7579 
7580 	if (params->auto_connect == auto_connect)
7581 		return 0;
7582 
7583 	hci_pend_le_list_del_init(params);
7584 
7585 	switch (auto_connect) {
7586 	case HCI_AUTO_CONN_DISABLED:
7587 	case HCI_AUTO_CONN_LINK_LOSS:
7588 		/* If auto connect is being disabled when we're trying to
7589 		 * connect to device, keep connecting.
7590 		 */
7591 		if (params->explicit_connect)
7592 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
7593 		break;
7594 	case HCI_AUTO_CONN_REPORT:
7595 		if (params->explicit_connect)
7596 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
7597 		else
7598 			hci_pend_le_list_add(params, &hdev->pend_le_reports);
7599 		break;
7600 	case HCI_AUTO_CONN_DIRECT:
7601 	case HCI_AUTO_CONN_ALWAYS:
7602 		if (!is_connected(hdev, addr, addr_type))
7603 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
7604 		break;
7605 	}
7606 
7607 	params->auto_connect = auto_connect;
7608 
7609 	bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7610 		   addr, addr_type, auto_connect);
7611 
7612 	return 0;
7613 }
7614 
7615 static void device_added(struct sock *sk, struct hci_dev *hdev,
7616 			 bdaddr_t *bdaddr, u8 type, u8 action)
7617 {
7618 	struct mgmt_ev_device_added ev;
7619 
7620 	bacpy(&ev.addr.bdaddr, bdaddr);
7621 	ev.addr.type = type;
7622 	ev.action = action;
7623 
7624 	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7625 }
7626 
7627 static int add_device_sync(struct hci_dev *hdev, void *data)
7628 {
7629 	return hci_update_passive_scan_sync(hdev);
7630 }
7631 
7632 static int add_device(struct sock *sk, struct hci_dev *hdev,
7633 		      void *data, u16 len)
7634 {
7635 	struct mgmt_cp_add_device *cp = data;
7636 	u8 auto_conn, addr_type;
7637 	struct hci_conn_params *params;
7638 	int err;
7639 	u32 current_flags = 0;
7640 	u32 supported_flags;
7641 
7642 	bt_dev_dbg(hdev, "sock %p", sk);
7643 
7644 	if (!bdaddr_type_is_valid(cp->addr.type) ||
7645 	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7646 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7647 					 MGMT_STATUS_INVALID_PARAMS,
7648 					 &cp->addr, sizeof(cp->addr));
7649 
7650 	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7651 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7652 					 MGMT_STATUS_INVALID_PARAMS,
7653 					 &cp->addr, sizeof(cp->addr));
7654 
7655 	hci_dev_lock(hdev);
7656 
7657 	if (cp->addr.type == BDADDR_BREDR) {
7658 		/* Only incoming connections action is supported for now */
7659 		if (cp->action != 0x01) {
7660 			err = mgmt_cmd_complete(sk, hdev->id,
7661 						MGMT_OP_ADD_DEVICE,
7662 						MGMT_STATUS_INVALID_PARAMS,
7663 						&cp->addr, sizeof(cp->addr));
7664 			goto unlock;
7665 		}
7666 
7667 		err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7668 						     &cp->addr.bdaddr,
7669 						     cp->addr.type, 0);
7670 		if (err)
7671 			goto unlock;
7672 
7673 		hci_update_scan(hdev);
7674 
7675 		goto added;
7676 	}
7677 
7678 	addr_type = le_addr_type(cp->addr.type);
7679 
7680 	if (cp->action == 0x02)
7681 		auto_conn = HCI_AUTO_CONN_ALWAYS;
7682 	else if (cp->action == 0x01)
7683 		auto_conn = HCI_AUTO_CONN_DIRECT;
7684 	else
7685 		auto_conn = HCI_AUTO_CONN_REPORT;
7686 
7687 	/* Kernel internally uses conn_params with resolvable private
7688 	 * address, but Add Device allows only identity addresses.
7689 	 * Make sure it is enforced before calling
7690 	 * hci_conn_params_lookup.
7691 	 */
7692 	if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7693 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7694 					MGMT_STATUS_INVALID_PARAMS,
7695 					&cp->addr, sizeof(cp->addr));
7696 		goto unlock;
7697 	}
7698 
7699 	/* If the connection parameters don't exist for this device,
7700 	 * they will be created and configured with defaults.
7701 	 */
7702 	if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7703 				auto_conn) < 0) {
7704 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7705 					MGMT_STATUS_FAILED, &cp->addr,
7706 					sizeof(cp->addr));
7707 		goto unlock;
7708 	} else {
7709 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7710 						addr_type);
7711 		if (params)
7712 			current_flags = params->flags;
7713 	}
7714 
7715 	err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
7716 	if (err < 0)
7717 		goto unlock;
7718 
7719 added:
7720 	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7721 	supported_flags = hdev->conn_flags;
7722 	device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7723 			     supported_flags, current_flags);
7724 
7725 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7726 				MGMT_STATUS_SUCCESS, &cp->addr,
7727 				sizeof(cp->addr));
7728 
7729 unlock:
7730 	hci_dev_unlock(hdev);
7731 	return err;
7732 }
7733 
7734 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7735 			   bdaddr_t *bdaddr, u8 type)
7736 {
7737 	struct mgmt_ev_device_removed ev;
7738 
7739 	bacpy(&ev.addr.bdaddr, bdaddr);
7740 	ev.addr.type = type;
7741 
7742 	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7743 }
7744 
7745 static int remove_device_sync(struct hci_dev *hdev, void *data)
7746 {
7747 	return hci_update_passive_scan_sync(hdev);
7748 }
7749 
7750 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7751 			 void *data, u16 len)
7752 {
7753 	struct mgmt_cp_remove_device *cp = data;
7754 	int err;
7755 
7756 	bt_dev_dbg(hdev, "sock %p", sk);
7757 
7758 	hci_dev_lock(hdev);
7759 
7760 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7761 		struct hci_conn_params *params;
7762 		u8 addr_type;
7763 
7764 		if (!bdaddr_type_is_valid(cp->addr.type)) {
7765 			err = mgmt_cmd_complete(sk, hdev->id,
7766 						MGMT_OP_REMOVE_DEVICE,
7767 						MGMT_STATUS_INVALID_PARAMS,
7768 						&cp->addr, sizeof(cp->addr));
7769 			goto unlock;
7770 		}
7771 
7772 		if (cp->addr.type == BDADDR_BREDR) {
7773 			err = hci_bdaddr_list_del(&hdev->accept_list,
7774 						  &cp->addr.bdaddr,
7775 						  cp->addr.type);
7776 			if (err) {
7777 				err = mgmt_cmd_complete(sk, hdev->id,
7778 							MGMT_OP_REMOVE_DEVICE,
7779 							MGMT_STATUS_INVALID_PARAMS,
7780 							&cp->addr,
7781 							sizeof(cp->addr));
7782 				goto unlock;
7783 			}
7784 
7785 			hci_update_scan(hdev);
7786 
7787 			device_removed(sk, hdev, &cp->addr.bdaddr,
7788 				       cp->addr.type);
7789 			goto complete;
7790 		}
7791 
7792 		addr_type = le_addr_type(cp->addr.type);
7793 
7794 		/* Kernel internally uses conn_params with resolvable private
7795 		 * address, but Remove Device allows only identity addresses.
7796 		 * Make sure it is enforced before calling
7797 		 * hci_conn_params_lookup.
7798 		 */
7799 		if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7800 			err = mgmt_cmd_complete(sk, hdev->id,
7801 						MGMT_OP_REMOVE_DEVICE,
7802 						MGMT_STATUS_INVALID_PARAMS,
7803 						&cp->addr, sizeof(cp->addr));
7804 			goto unlock;
7805 		}
7806 
7807 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7808 						addr_type);
7809 		if (!params) {
7810 			err = mgmt_cmd_complete(sk, hdev->id,
7811 						MGMT_OP_REMOVE_DEVICE,
7812 						MGMT_STATUS_INVALID_PARAMS,
7813 						&cp->addr, sizeof(cp->addr));
7814 			goto unlock;
7815 		}
7816 
7817 		if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7818 		    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7819 			err = mgmt_cmd_complete(sk, hdev->id,
7820 						MGMT_OP_REMOVE_DEVICE,
7821 						MGMT_STATUS_INVALID_PARAMS,
7822 						&cp->addr, sizeof(cp->addr));
7823 			goto unlock;
7824 		}
7825 
7826 		hci_conn_params_free(params);
7827 
7828 		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7829 	} else {
7830 		struct hci_conn_params *p, *tmp;
7831 		struct bdaddr_list *b, *btmp;
7832 
7833 		if (cp->addr.type) {
7834 			err = mgmt_cmd_complete(sk, hdev->id,
7835 						MGMT_OP_REMOVE_DEVICE,
7836 						MGMT_STATUS_INVALID_PARAMS,
7837 						&cp->addr, sizeof(cp->addr));
7838 			goto unlock;
7839 		}
7840 
7841 		list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7842 			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7843 			list_del(&b->list);
7844 			kfree(b);
7845 		}
7846 
7847 		hci_update_scan(hdev);
7848 
7849 		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7850 			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7851 				continue;
7852 			device_removed(sk, hdev, &p->addr, p->addr_type);
7853 			if (p->explicit_connect) {
7854 				p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7855 				continue;
7856 			}
7857 			hci_conn_params_free(p);
7858 		}
7859 
7860 		bt_dev_dbg(hdev, "All LE connection parameters were removed");
7861 	}
7862 
7863 	hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7864 
7865 complete:
7866 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7867 				MGMT_STATUS_SUCCESS, &cp->addr,
7868 				sizeof(cp->addr));
7869 unlock:
7870 	hci_dev_unlock(hdev);
7871 	return err;
7872 }
7873 
7874 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7875 			   u16 len)
7876 {
7877 	struct mgmt_cp_load_conn_param *cp = data;
7878 	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7879 				     sizeof(struct mgmt_conn_param));
7880 	u16 param_count, expected_len;
7881 	int i;
7882 
7883 	if (!lmp_le_capable(hdev))
7884 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7885 				       MGMT_STATUS_NOT_SUPPORTED);
7886 
7887 	param_count = __le16_to_cpu(cp->param_count);
7888 	if (param_count > max_param_count) {
7889 		bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7890 			   param_count);
7891 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7892 				       MGMT_STATUS_INVALID_PARAMS);
7893 	}
7894 
7895 	expected_len = struct_size(cp, params, param_count);
7896 	if (expected_len != len) {
7897 		bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7898 			   expected_len, len);
7899 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7900 				       MGMT_STATUS_INVALID_PARAMS);
7901 	}
7902 
7903 	bt_dev_dbg(hdev, "param_count %u", param_count);
7904 
7905 	hci_dev_lock(hdev);
7906 
7907 	hci_conn_params_clear_disabled(hdev);
7908 
7909 	for (i = 0; i < param_count; i++) {
7910 		struct mgmt_conn_param *param = &cp->params[i];
7911 		struct hci_conn_params *hci_param;
7912 		u16 min, max, latency, timeout;
7913 		u8 addr_type;
7914 
7915 		bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
7916 			   param->addr.type);
7917 
7918 		if (param->addr.type == BDADDR_LE_PUBLIC) {
7919 			addr_type = ADDR_LE_DEV_PUBLIC;
7920 		} else if (param->addr.type == BDADDR_LE_RANDOM) {
7921 			addr_type = ADDR_LE_DEV_RANDOM;
7922 		} else {
7923 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7924 			continue;
7925 		}
7926 
7927 		min = le16_to_cpu(param->min_interval);
7928 		max = le16_to_cpu(param->max_interval);
7929 		latency = le16_to_cpu(param->latency);
7930 		timeout = le16_to_cpu(param->timeout);
7931 
7932 		bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7933 			   min, max, latency, timeout);
7934 
7935 		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7936 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7937 			continue;
7938 		}
7939 
7940 		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
7941 						addr_type);
7942 		if (!hci_param) {
7943 			bt_dev_err(hdev, "failed to add connection parameters");
7944 			continue;
7945 		}
7946 
7947 		hci_param->conn_min_interval = min;
7948 		hci_param->conn_max_interval = max;
7949 		hci_param->conn_latency = latency;
7950 		hci_param->supervision_timeout = timeout;
7951 	}
7952 
7953 	hci_dev_unlock(hdev);
7954 
7955 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7956 				 NULL, 0);
7957 }
7958 
7959 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7960 			       void *data, u16 len)
7961 {
7962 	struct mgmt_cp_set_external_config *cp = data;
7963 	bool changed;
7964 	int err;
7965 
7966 	bt_dev_dbg(hdev, "sock %p", sk);
7967 
7968 	if (hdev_is_powered(hdev))
7969 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7970 				       MGMT_STATUS_REJECTED);
7971 
7972 	if (cp->config != 0x00 && cp->config != 0x01)
7973 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7974 				         MGMT_STATUS_INVALID_PARAMS);
7975 
7976 	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7977 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7978 				       MGMT_STATUS_NOT_SUPPORTED);
7979 
7980 	hci_dev_lock(hdev);
7981 
7982 	if (cp->config)
7983 		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7984 	else
7985 		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7986 
7987 	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7988 	if (err < 0)
7989 		goto unlock;
7990 
7991 	if (!changed)
7992 		goto unlock;
7993 
7994 	err = new_options(hdev, sk);
7995 
7996 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7997 		mgmt_index_removed(hdev);
7998 
7999 		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
8000 			hci_dev_set_flag(hdev, HCI_CONFIG);
8001 			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8002 
8003 			queue_work(hdev->req_workqueue, &hdev->power_on);
8004 		} else {
8005 			set_bit(HCI_RAW, &hdev->flags);
8006 			mgmt_index_added(hdev);
8007 		}
8008 	}
8009 
8010 unlock:
8011 	hci_dev_unlock(hdev);
8012 	return err;
8013 }
8014 
8015 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
8016 			      void *data, u16 len)
8017 {
8018 	struct mgmt_cp_set_public_address *cp = data;
8019 	bool changed;
8020 	int err;
8021 
8022 	bt_dev_dbg(hdev, "sock %p", sk);
8023 
8024 	if (hdev_is_powered(hdev))
8025 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8026 				       MGMT_STATUS_REJECTED);
8027 
8028 	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
8029 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8030 				       MGMT_STATUS_INVALID_PARAMS);
8031 
8032 	if (!hdev->set_bdaddr)
8033 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8034 				       MGMT_STATUS_NOT_SUPPORTED);
8035 
8036 	hci_dev_lock(hdev);
8037 
8038 	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
8039 	bacpy(&hdev->public_addr, &cp->bdaddr);
8040 
8041 	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
8042 	if (err < 0)
8043 		goto unlock;
8044 
8045 	if (!changed)
8046 		goto unlock;
8047 
8048 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
8049 		err = new_options(hdev, sk);
8050 
8051 	if (is_configured(hdev)) {
8052 		mgmt_index_removed(hdev);
8053 
8054 		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8055 
8056 		hci_dev_set_flag(hdev, HCI_CONFIG);
8057 		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8058 
8059 		queue_work(hdev->req_workqueue, &hdev->power_on);
8060 	}
8061 
8062 unlock:
8063 	hci_dev_unlock(hdev);
8064 	return err;
8065 }
8066 
8067 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8068 					     int err)
8069 {
8070 	const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8071 	struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8072 	u8 *h192, *r192, *h256, *r256;
8073 	struct mgmt_pending_cmd *cmd = data;
8074 	struct sk_buff *skb = cmd->skb;
8075 	u8 status = mgmt_status(err);
8076 	u16 eir_len;
8077 
8078 	if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
8079 		return;
8080 
8081 	if (!status) {
8082 		if (!skb)
8083 			status = MGMT_STATUS_FAILED;
8084 		else if (IS_ERR(skb))
8085 			status = mgmt_status(PTR_ERR(skb));
8086 		else
8087 			status = mgmt_status(skb->data[0]);
8088 	}
8089 
8090 	bt_dev_dbg(hdev, "status %u", status);
8091 
8092 	mgmt_cp = cmd->param;
8093 
8094 	if (status) {
8095 		status = mgmt_status(status);
8096 		eir_len = 0;
8097 
8098 		h192 = NULL;
8099 		r192 = NULL;
8100 		h256 = NULL;
8101 		r256 = NULL;
8102 	} else if (!bredr_sc_enabled(hdev)) {
8103 		struct hci_rp_read_local_oob_data *rp;
8104 
8105 		if (skb->len != sizeof(*rp)) {
8106 			status = MGMT_STATUS_FAILED;
8107 			eir_len = 0;
8108 		} else {
8109 			status = MGMT_STATUS_SUCCESS;
8110 			rp = (void *)skb->data;
8111 
8112 			eir_len = 5 + 18 + 18;
8113 			h192 = rp->hash;
8114 			r192 = rp->rand;
8115 			h256 = NULL;
8116 			r256 = NULL;
8117 		}
8118 	} else {
8119 		struct hci_rp_read_local_oob_ext_data *rp;
8120 
8121 		if (skb->len != sizeof(*rp)) {
8122 			status = MGMT_STATUS_FAILED;
8123 			eir_len = 0;
8124 		} else {
8125 			status = MGMT_STATUS_SUCCESS;
8126 			rp = (void *)skb->data;
8127 
8128 			if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8129 				eir_len = 5 + 18 + 18;
8130 				h192 = NULL;
8131 				r192 = NULL;
8132 			} else {
8133 				eir_len = 5 + 18 + 18 + 18 + 18;
8134 				h192 = rp->hash192;
8135 				r192 = rp->rand192;
8136 			}
8137 
8138 			h256 = rp->hash256;
8139 			r256 = rp->rand256;
8140 		}
8141 	}
8142 
8143 	mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8144 	if (!mgmt_rp)
8145 		goto done;
8146 
8147 	if (eir_len == 0)
8148 		goto send_rsp;
8149 
8150 	eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8151 				  hdev->dev_class, 3);
8152 
8153 	if (h192 && r192) {
8154 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8155 					  EIR_SSP_HASH_C192, h192, 16);
8156 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8157 					  EIR_SSP_RAND_R192, r192, 16);
8158 	}
8159 
8160 	if (h256 && r256) {
8161 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8162 					  EIR_SSP_HASH_C256, h256, 16);
8163 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8164 					  EIR_SSP_RAND_R256, r256, 16);
8165 	}
8166 
8167 send_rsp:
8168 	mgmt_rp->type = mgmt_cp->type;
8169 	mgmt_rp->eir_len = cpu_to_le16(eir_len);
8170 
8171 	err = mgmt_cmd_complete(cmd->sk, hdev->id,
8172 				MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8173 				mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8174 	if (err < 0 || status)
8175 		goto done;
8176 
8177 	hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8178 
8179 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8180 				 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8181 				 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8182 done:
8183 	if (skb && !IS_ERR(skb))
8184 		kfree_skb(skb);
8185 
8186 	kfree(mgmt_rp);
8187 	mgmt_pending_remove(cmd);
8188 }
8189 
8190 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8191 				  struct mgmt_cp_read_local_oob_ext_data *cp)
8192 {
8193 	struct mgmt_pending_cmd *cmd;
8194 	int err;
8195 
8196 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8197 			       cp, sizeof(*cp));
8198 	if (!cmd)
8199 		return -ENOMEM;
8200 
8201 	err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8202 				 read_local_oob_ext_data_complete);
8203 
8204 	if (err < 0) {
8205 		mgmt_pending_remove(cmd);
8206 		return err;
8207 	}
8208 
8209 	return 0;
8210 }
8211 
8212 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8213 				   void *data, u16 data_len)
8214 {
8215 	struct mgmt_cp_read_local_oob_ext_data *cp = data;
8216 	struct mgmt_rp_read_local_oob_ext_data *rp;
8217 	size_t rp_len;
8218 	u16 eir_len;
8219 	u8 status, flags, role, addr[7], hash[16], rand[16];
8220 	int err;
8221 
8222 	bt_dev_dbg(hdev, "sock %p", sk);
8223 
8224 	if (hdev_is_powered(hdev)) {
8225 		switch (cp->type) {
8226 		case BIT(BDADDR_BREDR):
8227 			status = mgmt_bredr_support(hdev);
8228 			if (status)
8229 				eir_len = 0;
8230 			else
8231 				eir_len = 5;
8232 			break;
8233 		case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8234 			status = mgmt_le_support(hdev);
8235 			if (status)
8236 				eir_len = 0;
8237 			else
8238 				eir_len = 9 + 3 + 18 + 18 + 3;
8239 			break;
8240 		default:
8241 			status = MGMT_STATUS_INVALID_PARAMS;
8242 			eir_len = 0;
8243 			break;
8244 		}
8245 	} else {
8246 		status = MGMT_STATUS_NOT_POWERED;
8247 		eir_len = 0;
8248 	}
8249 
8250 	rp_len = sizeof(*rp) + eir_len;
8251 	rp = kmalloc(rp_len, GFP_ATOMIC);
8252 	if (!rp)
8253 		return -ENOMEM;
8254 
8255 	if (!status && !lmp_ssp_capable(hdev)) {
8256 		status = MGMT_STATUS_NOT_SUPPORTED;
8257 		eir_len = 0;
8258 	}
8259 
8260 	if (status)
8261 		goto complete;
8262 
8263 	hci_dev_lock(hdev);
8264 
8265 	eir_len = 0;
8266 	switch (cp->type) {
8267 	case BIT(BDADDR_BREDR):
8268 		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8269 			err = read_local_ssp_oob_req(hdev, sk, cp);
8270 			hci_dev_unlock(hdev);
8271 			if (!err)
8272 				goto done;
8273 
8274 			status = MGMT_STATUS_FAILED;
8275 			goto complete;
8276 		} else {
8277 			eir_len = eir_append_data(rp->eir, eir_len,
8278 						  EIR_CLASS_OF_DEV,
8279 						  hdev->dev_class, 3);
8280 		}
8281 		break;
8282 	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8283 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8284 		    smp_generate_oob(hdev, hash, rand) < 0) {
8285 			hci_dev_unlock(hdev);
8286 			status = MGMT_STATUS_FAILED;
8287 			goto complete;
8288 		}
8289 
8290 		/* This should return the active RPA, but since the RPA
8291 		 * is only programmed on demand, it is really hard to fill
8292 		 * this in at the moment. For now disallow retrieving
8293 		 * local out-of-band data when privacy is in use.
8294 		 *
8295 		 * Returning the identity address will not help here since
8296 		 * pairing happens before the identity resolving key is
8297 		 * known and thus the connection establishment happens
8298 		 * based on the RPA and not the identity address.
8299 		 */
8300 		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8301 			hci_dev_unlock(hdev);
8302 			status = MGMT_STATUS_REJECTED;
8303 			goto complete;
8304 		}
8305 
8306 		if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8307 		   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8308 		   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8309 		    bacmp(&hdev->static_addr, BDADDR_ANY))) {
8310 			memcpy(addr, &hdev->static_addr, 6);
8311 			addr[6] = 0x01;
8312 		} else {
8313 			memcpy(addr, &hdev->bdaddr, 6);
8314 			addr[6] = 0x00;
8315 		}
8316 
8317 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8318 					  addr, sizeof(addr));
8319 
8320 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8321 			role = 0x02;
8322 		else
8323 			role = 0x01;
8324 
8325 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8326 					  &role, sizeof(role));
8327 
8328 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8329 			eir_len = eir_append_data(rp->eir, eir_len,
8330 						  EIR_LE_SC_CONFIRM,
8331 						  hash, sizeof(hash));
8332 
8333 			eir_len = eir_append_data(rp->eir, eir_len,
8334 						  EIR_LE_SC_RANDOM,
8335 						  rand, sizeof(rand));
8336 		}
8337 
8338 		flags = mgmt_get_adv_discov_flags(hdev);
8339 
8340 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8341 			flags |= LE_AD_NO_BREDR;
8342 
8343 		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8344 					  &flags, sizeof(flags));
8345 		break;
8346 	}
8347 
8348 	hci_dev_unlock(hdev);
8349 
8350 	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8351 
8352 	status = MGMT_STATUS_SUCCESS;
8353 
8354 complete:
8355 	rp->type = cp->type;
8356 	rp->eir_len = cpu_to_le16(eir_len);
8357 
8358 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8359 				status, rp, sizeof(*rp) + eir_len);
8360 	if (err < 0 || status)
8361 		goto done;
8362 
8363 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8364 				 rp, sizeof(*rp) + eir_len,
8365 				 HCI_MGMT_OOB_DATA_EVENTS, sk);
8366 
8367 done:
8368 	kfree(rp);
8369 
8370 	return err;
8371 }
8372 
8373 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8374 {
8375 	u32 flags = 0;
8376 
8377 	flags |= MGMT_ADV_FLAG_CONNECTABLE;
8378 	flags |= MGMT_ADV_FLAG_DISCOV;
8379 	flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8380 	flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8381 	flags |= MGMT_ADV_FLAG_APPEARANCE;
8382 	flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8383 	flags |= MGMT_ADV_PARAM_DURATION;
8384 	flags |= MGMT_ADV_PARAM_TIMEOUT;
8385 	flags |= MGMT_ADV_PARAM_INTERVALS;
8386 	flags |= MGMT_ADV_PARAM_TX_POWER;
8387 	flags |= MGMT_ADV_PARAM_SCAN_RSP;
8388 
8389 	/* In extended adv TX_POWER returned from Set Adv Param
8390 	 * will be always valid.
8391 	 */
8392 	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8393 		flags |= MGMT_ADV_FLAG_TX_POWER;
8394 
8395 	if (ext_adv_capable(hdev)) {
8396 		flags |= MGMT_ADV_FLAG_SEC_1M;
8397 		flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8398 		flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8399 
8400 		if (le_2m_capable(hdev))
8401 			flags |= MGMT_ADV_FLAG_SEC_2M;
8402 
8403 		if (le_coded_capable(hdev))
8404 			flags |= MGMT_ADV_FLAG_SEC_CODED;
8405 	}
8406 
8407 	return flags;
8408 }
8409 
8410 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8411 			     void *data, u16 data_len)
8412 {
8413 	struct mgmt_rp_read_adv_features *rp;
8414 	size_t rp_len;
8415 	int err;
8416 	struct adv_info *adv_instance;
8417 	u32 supported_flags;
8418 	u8 *instance;
8419 
8420 	bt_dev_dbg(hdev, "sock %p", sk);
8421 
8422 	if (!lmp_le_capable(hdev))
8423 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8424 				       MGMT_STATUS_REJECTED);
8425 
8426 	hci_dev_lock(hdev);
8427 
8428 	rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8429 	rp = kmalloc(rp_len, GFP_ATOMIC);
8430 	if (!rp) {
8431 		hci_dev_unlock(hdev);
8432 		return -ENOMEM;
8433 	}
8434 
8435 	supported_flags = get_supported_adv_flags(hdev);
8436 
8437 	rp->supported_flags = cpu_to_le32(supported_flags);
8438 	rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
8439 	rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
8440 	rp->max_instances = hdev->le_num_of_adv_sets;
8441 	rp->num_instances = hdev->adv_instance_cnt;
8442 
8443 	instance = rp->instance;
8444 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8445 		/* Only instances 1-le_num_of_adv_sets are externally visible */
8446 		if (adv_instance->instance <= hdev->adv_instance_cnt) {
8447 			*instance = adv_instance->instance;
8448 			instance++;
8449 		} else {
8450 			rp->num_instances--;
8451 			rp_len--;
8452 		}
8453 	}
8454 
8455 	hci_dev_unlock(hdev);
8456 
8457 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8458 				MGMT_STATUS_SUCCESS, rp, rp_len);
8459 
8460 	kfree(rp);
8461 
8462 	return err;
8463 }
8464 
8465 static u8 calculate_name_len(struct hci_dev *hdev)
8466 {
8467 	u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
8468 
8469 	return eir_append_local_name(hdev, buf, 0);
8470 }
8471 
8472 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8473 			   bool is_adv_data)
8474 {
8475 	u8 max_len = HCI_MAX_AD_LENGTH;
8476 
8477 	if (is_adv_data) {
8478 		if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8479 				 MGMT_ADV_FLAG_LIMITED_DISCOV |
8480 				 MGMT_ADV_FLAG_MANAGED_FLAGS))
8481 			max_len -= 3;
8482 
8483 		if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8484 			max_len -= 3;
8485 	} else {
8486 		if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8487 			max_len -= calculate_name_len(hdev);
8488 
8489 		if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8490 			max_len -= 4;
8491 	}
8492 
8493 	return max_len;
8494 }
8495 
8496 static bool flags_managed(u32 adv_flags)
8497 {
8498 	return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8499 			    MGMT_ADV_FLAG_LIMITED_DISCOV |
8500 			    MGMT_ADV_FLAG_MANAGED_FLAGS);
8501 }
8502 
8503 static bool tx_power_managed(u32 adv_flags)
8504 {
8505 	return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8506 }
8507 
8508 static bool name_managed(u32 adv_flags)
8509 {
8510 	return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8511 }
8512 
8513 static bool appearance_managed(u32 adv_flags)
8514 {
8515 	return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8516 }
8517 
8518 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8519 			      u8 len, bool is_adv_data)
8520 {
8521 	int i, cur_len;
8522 	u8 max_len;
8523 
8524 	max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8525 
8526 	if (len > max_len)
8527 		return false;
8528 
8529 	/* Make sure that the data is correctly formatted. */
8530 	for (i = 0; i < len; i += (cur_len + 1)) {
8531 		cur_len = data[i];
8532 
8533 		if (!cur_len)
8534 			continue;
8535 
8536 		if (data[i + 1] == EIR_FLAGS &&
8537 		    (!is_adv_data || flags_managed(adv_flags)))
8538 			return false;
8539 
8540 		if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8541 			return false;
8542 
8543 		if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8544 			return false;
8545 
8546 		if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8547 			return false;
8548 
8549 		if (data[i + 1] == EIR_APPEARANCE &&
8550 		    appearance_managed(adv_flags))
8551 			return false;
8552 
8553 		/* If the current field length would exceed the total data
8554 		 * length, then it's invalid.
8555 		 */
8556 		if (i + cur_len >= len)
8557 			return false;
8558 	}
8559 
8560 	return true;
8561 }
8562 
8563 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8564 {
8565 	u32 supported_flags, phy_flags;
8566 
8567 	/* The current implementation only supports a subset of the specified
8568 	 * flags. Also need to check mutual exclusiveness of sec flags.
8569 	 */
8570 	supported_flags = get_supported_adv_flags(hdev);
8571 	phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8572 	if (adv_flags & ~supported_flags ||
8573 	    ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8574 		return false;
8575 
8576 	return true;
8577 }
8578 
8579 static bool adv_busy(struct hci_dev *hdev)
8580 {
8581 	return pending_find(MGMT_OP_SET_LE, hdev);
8582 }
8583 
8584 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8585 			     int err)
8586 {
8587 	struct adv_info *adv, *n;
8588 
8589 	bt_dev_dbg(hdev, "err %d", err);
8590 
8591 	hci_dev_lock(hdev);
8592 
8593 	list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8594 		u8 instance;
8595 
8596 		if (!adv->pending)
8597 			continue;
8598 
8599 		if (!err) {
8600 			adv->pending = false;
8601 			continue;
8602 		}
8603 
8604 		instance = adv->instance;
8605 
8606 		if (hdev->cur_adv_instance == instance)
8607 			cancel_adv_timeout(hdev);
8608 
8609 		hci_remove_adv_instance(hdev, instance);
8610 		mgmt_advertising_removed(sk, hdev, instance);
8611 	}
8612 
8613 	hci_dev_unlock(hdev);
8614 }
8615 
8616 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8617 {
8618 	struct mgmt_pending_cmd *cmd = data;
8619 	struct mgmt_cp_add_advertising *cp = cmd->param;
8620 	struct mgmt_rp_add_advertising rp;
8621 
8622 	memset(&rp, 0, sizeof(rp));
8623 
8624 	rp.instance = cp->instance;
8625 
8626 	if (err)
8627 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8628 				mgmt_status(err));
8629 	else
8630 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8631 				  mgmt_status(err), &rp, sizeof(rp));
8632 
8633 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
8634 
8635 	mgmt_pending_free(cmd);
8636 }
8637 
8638 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8639 {
8640 	struct mgmt_pending_cmd *cmd = data;
8641 	struct mgmt_cp_add_advertising *cp = cmd->param;
8642 
8643 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8644 }
8645 
8646 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8647 			   void *data, u16 data_len)
8648 {
8649 	struct mgmt_cp_add_advertising *cp = data;
8650 	struct mgmt_rp_add_advertising rp;
8651 	u32 flags;
8652 	u8 status;
8653 	u16 timeout, duration;
8654 	unsigned int prev_instance_cnt;
8655 	u8 schedule_instance = 0;
8656 	struct adv_info *adv, *next_instance;
8657 	int err;
8658 	struct mgmt_pending_cmd *cmd;
8659 
8660 	bt_dev_dbg(hdev, "sock %p", sk);
8661 
8662 	status = mgmt_le_support(hdev);
8663 	if (status)
8664 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8665 				       status);
8666 
8667 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8668 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8669 				       MGMT_STATUS_INVALID_PARAMS);
8670 
8671 	if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8672 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8673 				       MGMT_STATUS_INVALID_PARAMS);
8674 
8675 	flags = __le32_to_cpu(cp->flags);
8676 	timeout = __le16_to_cpu(cp->timeout);
8677 	duration = __le16_to_cpu(cp->duration);
8678 
8679 	if (!requested_adv_flags_are_valid(hdev, flags))
8680 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8681 				       MGMT_STATUS_INVALID_PARAMS);
8682 
8683 	hci_dev_lock(hdev);
8684 
8685 	if (timeout && !hdev_is_powered(hdev)) {
8686 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8687 				      MGMT_STATUS_REJECTED);
8688 		goto unlock;
8689 	}
8690 
8691 	if (adv_busy(hdev)) {
8692 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8693 				      MGMT_STATUS_BUSY);
8694 		goto unlock;
8695 	}
8696 
8697 	if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8698 	    !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8699 			       cp->scan_rsp_len, false)) {
8700 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8701 				      MGMT_STATUS_INVALID_PARAMS);
8702 		goto unlock;
8703 	}
8704 
8705 	prev_instance_cnt = hdev->adv_instance_cnt;
8706 
8707 	adv = hci_add_adv_instance(hdev, cp->instance, flags,
8708 				   cp->adv_data_len, cp->data,
8709 				   cp->scan_rsp_len,
8710 				   cp->data + cp->adv_data_len,
8711 				   timeout, duration,
8712 				   HCI_ADV_TX_POWER_NO_PREFERENCE,
8713 				   hdev->le_adv_min_interval,
8714 				   hdev->le_adv_max_interval, 0);
8715 	if (IS_ERR(adv)) {
8716 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8717 				      MGMT_STATUS_FAILED);
8718 		goto unlock;
8719 	}
8720 
8721 	/* Only trigger an advertising added event if a new instance was
8722 	 * actually added.
8723 	 */
8724 	if (hdev->adv_instance_cnt > prev_instance_cnt)
8725 		mgmt_advertising_added(sk, hdev, cp->instance);
8726 
8727 	if (hdev->cur_adv_instance == cp->instance) {
8728 		/* If the currently advertised instance is being changed then
8729 		 * cancel the current advertising and schedule the next
8730 		 * instance. If there is only one instance then the overridden
8731 		 * advertising data will be visible right away.
8732 		 */
8733 		cancel_adv_timeout(hdev);
8734 
8735 		next_instance = hci_get_next_instance(hdev, cp->instance);
8736 		if (next_instance)
8737 			schedule_instance = next_instance->instance;
8738 	} else if (!hdev->adv_instance_timeout) {
8739 		/* Immediately advertise the new instance if no other
8740 		 * instance is currently being advertised.
8741 		 */
8742 		schedule_instance = cp->instance;
8743 	}
8744 
8745 	/* If the HCI_ADVERTISING flag is set or the device isn't powered or
8746 	 * there is no instance to be advertised then we have no HCI
8747 	 * communication to make. Simply return.
8748 	 */
8749 	if (!hdev_is_powered(hdev) ||
8750 	    hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8751 	    !schedule_instance) {
8752 		rp.instance = cp->instance;
8753 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8754 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8755 		goto unlock;
8756 	}
8757 
8758 	/* We're good to go, update advertising data, parameters, and start
8759 	 * advertising.
8760 	 */
8761 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8762 			       data_len);
8763 	if (!cmd) {
8764 		err = -ENOMEM;
8765 		goto unlock;
8766 	}
8767 
8768 	cp->instance = schedule_instance;
8769 
8770 	err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8771 				 add_advertising_complete);
8772 	if (err < 0)
8773 		mgmt_pending_free(cmd);
8774 
8775 unlock:
8776 	hci_dev_unlock(hdev);
8777 
8778 	return err;
8779 }
8780 
8781 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8782 					int err)
8783 {
8784 	struct mgmt_pending_cmd *cmd = data;
8785 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8786 	struct mgmt_rp_add_ext_adv_params rp;
8787 	struct adv_info *adv;
8788 	u32 flags;
8789 
8790 	BT_DBG("%s", hdev->name);
8791 
8792 	hci_dev_lock(hdev);
8793 
8794 	adv = hci_find_adv_instance(hdev, cp->instance);
8795 	if (!adv)
8796 		goto unlock;
8797 
8798 	rp.instance = cp->instance;
8799 	rp.tx_power = adv->tx_power;
8800 
8801 	/* While we're at it, inform userspace of the available space for this
8802 	 * advertisement, given the flags that will be used.
8803 	 */
8804 	flags = __le32_to_cpu(cp->flags);
8805 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8806 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8807 
8808 	if (err) {
8809 		/* If this advertisement was previously advertising and we
8810 		 * failed to update it, we signal that it has been removed and
8811 		 * delete its structure
8812 		 */
8813 		if (!adv->pending)
8814 			mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8815 
8816 		hci_remove_adv_instance(hdev, cp->instance);
8817 
8818 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8819 				mgmt_status(err));
8820 	} else {
8821 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8822 				  mgmt_status(err), &rp, sizeof(rp));
8823 	}
8824 
8825 unlock:
8826 	if (cmd)
8827 		mgmt_pending_free(cmd);
8828 
8829 	hci_dev_unlock(hdev);
8830 }
8831 
8832 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8833 {
8834 	struct mgmt_pending_cmd *cmd = data;
8835 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8836 
8837 	return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8838 }
8839 
8840 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8841 			      void *data, u16 data_len)
8842 {
8843 	struct mgmt_cp_add_ext_adv_params *cp = data;
8844 	struct mgmt_rp_add_ext_adv_params rp;
8845 	struct mgmt_pending_cmd *cmd = NULL;
8846 	struct adv_info *adv;
8847 	u32 flags, min_interval, max_interval;
8848 	u16 timeout, duration;
8849 	u8 status;
8850 	s8 tx_power;
8851 	int err;
8852 
8853 	BT_DBG("%s", hdev->name);
8854 
8855 	status = mgmt_le_support(hdev);
8856 	if (status)
8857 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8858 				       status);
8859 
8860 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8861 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8862 				       MGMT_STATUS_INVALID_PARAMS);
8863 
8864 	/* The purpose of breaking add_advertising into two separate MGMT calls
8865 	 * for params and data is to allow more parameters to be added to this
8866 	 * structure in the future. For this reason, we verify that we have the
8867 	 * bare minimum structure we know of when the interface was defined. Any
8868 	 * extra parameters we don't know about will be ignored in this request.
8869 	 */
8870 	if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8871 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8872 				       MGMT_STATUS_INVALID_PARAMS);
8873 
8874 	flags = __le32_to_cpu(cp->flags);
8875 
8876 	if (!requested_adv_flags_are_valid(hdev, flags))
8877 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8878 				       MGMT_STATUS_INVALID_PARAMS);
8879 
8880 	hci_dev_lock(hdev);
8881 
8882 	/* In new interface, we require that we are powered to register */
8883 	if (!hdev_is_powered(hdev)) {
8884 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8885 				      MGMT_STATUS_REJECTED);
8886 		goto unlock;
8887 	}
8888 
8889 	if (adv_busy(hdev)) {
8890 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8891 				      MGMT_STATUS_BUSY);
8892 		goto unlock;
8893 	}
8894 
8895 	/* Parse defined parameters from request, use defaults otherwise */
8896 	timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8897 		  __le16_to_cpu(cp->timeout) : 0;
8898 
8899 	duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8900 		   __le16_to_cpu(cp->duration) :
8901 		   hdev->def_multi_adv_rotation_duration;
8902 
8903 	min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8904 		       __le32_to_cpu(cp->min_interval) :
8905 		       hdev->le_adv_min_interval;
8906 
8907 	max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8908 		       __le32_to_cpu(cp->max_interval) :
8909 		       hdev->le_adv_max_interval;
8910 
8911 	tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8912 		   cp->tx_power :
8913 		   HCI_ADV_TX_POWER_NO_PREFERENCE;
8914 
8915 	/* Create advertising instance with no advertising or response data */
8916 	adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
8917 				   timeout, duration, tx_power, min_interval,
8918 				   max_interval, 0);
8919 
8920 	if (IS_ERR(adv)) {
8921 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8922 				      MGMT_STATUS_FAILED);
8923 		goto unlock;
8924 	}
8925 
8926 	/* Submit request for advertising params if ext adv available */
8927 	if (ext_adv_capable(hdev)) {
8928 		cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8929 				       data, data_len);
8930 		if (!cmd) {
8931 			err = -ENOMEM;
8932 			hci_remove_adv_instance(hdev, cp->instance);
8933 			goto unlock;
8934 		}
8935 
8936 		err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8937 					 add_ext_adv_params_complete);
8938 		if (err < 0)
8939 			mgmt_pending_free(cmd);
8940 	} else {
8941 		rp.instance = cp->instance;
8942 		rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8943 		rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8944 		rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8945 		err = mgmt_cmd_complete(sk, hdev->id,
8946 					MGMT_OP_ADD_EXT_ADV_PARAMS,
8947 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8948 	}
8949 
8950 unlock:
8951 	hci_dev_unlock(hdev);
8952 
8953 	return err;
8954 }
8955 
8956 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8957 {
8958 	struct mgmt_pending_cmd *cmd = data;
8959 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8960 	struct mgmt_rp_add_advertising rp;
8961 
8962 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
8963 
8964 	memset(&rp, 0, sizeof(rp));
8965 
8966 	rp.instance = cp->instance;
8967 
8968 	if (err)
8969 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8970 				mgmt_status(err));
8971 	else
8972 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8973 				  mgmt_status(err), &rp, sizeof(rp));
8974 
8975 	mgmt_pending_free(cmd);
8976 }
8977 
8978 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8979 {
8980 	struct mgmt_pending_cmd *cmd = data;
8981 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8982 	int err;
8983 
8984 	if (ext_adv_capable(hdev)) {
8985 		err = hci_update_adv_data_sync(hdev, cp->instance);
8986 		if (err)
8987 			return err;
8988 
8989 		err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8990 		if (err)
8991 			return err;
8992 
8993 		return hci_enable_ext_advertising_sync(hdev, cp->instance);
8994 	}
8995 
8996 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8997 }
8998 
8999 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
9000 			    u16 data_len)
9001 {
9002 	struct mgmt_cp_add_ext_adv_data *cp = data;
9003 	struct mgmt_rp_add_ext_adv_data rp;
9004 	u8 schedule_instance = 0;
9005 	struct adv_info *next_instance;
9006 	struct adv_info *adv_instance;
9007 	int err = 0;
9008 	struct mgmt_pending_cmd *cmd;
9009 
9010 	BT_DBG("%s", hdev->name);
9011 
9012 	hci_dev_lock(hdev);
9013 
9014 	adv_instance = hci_find_adv_instance(hdev, cp->instance);
9015 
9016 	if (!adv_instance) {
9017 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9018 				      MGMT_STATUS_INVALID_PARAMS);
9019 		goto unlock;
9020 	}
9021 
9022 	/* In new interface, we require that we are powered to register */
9023 	if (!hdev_is_powered(hdev)) {
9024 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9025 				      MGMT_STATUS_REJECTED);
9026 		goto clear_new_instance;
9027 	}
9028 
9029 	if (adv_busy(hdev)) {
9030 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9031 				      MGMT_STATUS_BUSY);
9032 		goto clear_new_instance;
9033 	}
9034 
9035 	/* Validate new data */
9036 	if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
9037 			       cp->adv_data_len, true) ||
9038 	    !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
9039 			       cp->adv_data_len, cp->scan_rsp_len, false)) {
9040 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9041 				      MGMT_STATUS_INVALID_PARAMS);
9042 		goto clear_new_instance;
9043 	}
9044 
9045 	/* Set the data in the advertising instance */
9046 	hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
9047 				  cp->data, cp->scan_rsp_len,
9048 				  cp->data + cp->adv_data_len);
9049 
9050 	/* If using software rotation, determine next instance to use */
9051 	if (hdev->cur_adv_instance == cp->instance) {
9052 		/* If the currently advertised instance is being changed
9053 		 * then cancel the current advertising and schedule the
9054 		 * next instance. If there is only one instance then the
9055 		 * overridden advertising data will be visible right
9056 		 * away
9057 		 */
9058 		cancel_adv_timeout(hdev);
9059 
9060 		next_instance = hci_get_next_instance(hdev, cp->instance);
9061 		if (next_instance)
9062 			schedule_instance = next_instance->instance;
9063 	} else if (!hdev->adv_instance_timeout) {
9064 		/* Immediately advertise the new instance if no other
9065 		 * instance is currently being advertised.
9066 		 */
9067 		schedule_instance = cp->instance;
9068 	}
9069 
9070 	/* If the HCI_ADVERTISING flag is set or there is no instance to
9071 	 * be advertised then we have no HCI communication to make.
9072 	 * Simply return.
9073 	 */
9074 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9075 		if (adv_instance->pending) {
9076 			mgmt_advertising_added(sk, hdev, cp->instance);
9077 			adv_instance->pending = false;
9078 		}
9079 		rp.instance = cp->instance;
9080 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9081 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9082 		goto unlock;
9083 	}
9084 
9085 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9086 			       data_len);
9087 	if (!cmd) {
9088 		err = -ENOMEM;
9089 		goto clear_new_instance;
9090 	}
9091 
9092 	err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9093 				 add_ext_adv_data_complete);
9094 	if (err < 0) {
9095 		mgmt_pending_free(cmd);
9096 		goto clear_new_instance;
9097 	}
9098 
9099 	/* We were successful in updating data, so trigger advertising_added
9100 	 * event if this is an instance that wasn't previously advertising. If
9101 	 * a failure occurs in the requests we initiated, we will remove the
9102 	 * instance again in add_advertising_complete
9103 	 */
9104 	if (adv_instance->pending)
9105 		mgmt_advertising_added(sk, hdev, cp->instance);
9106 
9107 	goto unlock;
9108 
9109 clear_new_instance:
9110 	hci_remove_adv_instance(hdev, cp->instance);
9111 
9112 unlock:
9113 	hci_dev_unlock(hdev);
9114 
9115 	return err;
9116 }
9117 
9118 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9119 					int err)
9120 {
9121 	struct mgmt_pending_cmd *cmd = data;
9122 	struct mgmt_cp_remove_advertising *cp = cmd->param;
9123 	struct mgmt_rp_remove_advertising rp;
9124 
9125 	bt_dev_dbg(hdev, "err %d", err);
9126 
9127 	memset(&rp, 0, sizeof(rp));
9128 	rp.instance = cp->instance;
9129 
9130 	if (err)
9131 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9132 				mgmt_status(err));
9133 	else
9134 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9135 				  MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9136 
9137 	mgmt_pending_free(cmd);
9138 }
9139 
9140 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9141 {
9142 	struct mgmt_pending_cmd *cmd = data;
9143 	struct mgmt_cp_remove_advertising *cp = cmd->param;
9144 	int err;
9145 
9146 	err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9147 	if (err)
9148 		return err;
9149 
9150 	if (list_empty(&hdev->adv_instances))
9151 		err = hci_disable_advertising_sync(hdev);
9152 
9153 	return err;
9154 }
9155 
9156 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9157 			      void *data, u16 data_len)
9158 {
9159 	struct mgmt_cp_remove_advertising *cp = data;
9160 	struct mgmt_pending_cmd *cmd;
9161 	int err;
9162 
9163 	bt_dev_dbg(hdev, "sock %p", sk);
9164 
9165 	hci_dev_lock(hdev);
9166 
9167 	if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9168 		err = mgmt_cmd_status(sk, hdev->id,
9169 				      MGMT_OP_REMOVE_ADVERTISING,
9170 				      MGMT_STATUS_INVALID_PARAMS);
9171 		goto unlock;
9172 	}
9173 
9174 	if (pending_find(MGMT_OP_SET_LE, hdev)) {
9175 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9176 				      MGMT_STATUS_BUSY);
9177 		goto unlock;
9178 	}
9179 
9180 	if (list_empty(&hdev->adv_instances)) {
9181 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9182 				      MGMT_STATUS_INVALID_PARAMS);
9183 		goto unlock;
9184 	}
9185 
9186 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9187 			       data_len);
9188 	if (!cmd) {
9189 		err = -ENOMEM;
9190 		goto unlock;
9191 	}
9192 
9193 	err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9194 				 remove_advertising_complete);
9195 	if (err < 0)
9196 		mgmt_pending_free(cmd);
9197 
9198 unlock:
9199 	hci_dev_unlock(hdev);
9200 
9201 	return err;
9202 }
9203 
9204 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9205 			     void *data, u16 data_len)
9206 {
9207 	struct mgmt_cp_get_adv_size_info *cp = data;
9208 	struct mgmt_rp_get_adv_size_info rp;
9209 	u32 flags, supported_flags;
9210 
9211 	bt_dev_dbg(hdev, "sock %p", sk);
9212 
9213 	if (!lmp_le_capable(hdev))
9214 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9215 				       MGMT_STATUS_REJECTED);
9216 
9217 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9218 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9219 				       MGMT_STATUS_INVALID_PARAMS);
9220 
9221 	flags = __le32_to_cpu(cp->flags);
9222 
9223 	/* The current implementation only supports a subset of the specified
9224 	 * flags.
9225 	 */
9226 	supported_flags = get_supported_adv_flags(hdev);
9227 	if (flags & ~supported_flags)
9228 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9229 				       MGMT_STATUS_INVALID_PARAMS);
9230 
9231 	rp.instance = cp->instance;
9232 	rp.flags = cp->flags;
9233 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9234 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9235 
9236 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9237 				 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9238 }
9239 
9240 static const struct hci_mgmt_handler mgmt_handlers[] = {
9241 	{ NULL }, /* 0x0000 (no command) */
9242 	{ read_version,            MGMT_READ_VERSION_SIZE,
9243 						HCI_MGMT_NO_HDEV |
9244 						HCI_MGMT_UNTRUSTED },
9245 	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
9246 						HCI_MGMT_NO_HDEV |
9247 						HCI_MGMT_UNTRUSTED },
9248 	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
9249 						HCI_MGMT_NO_HDEV |
9250 						HCI_MGMT_UNTRUSTED },
9251 	{ read_controller_info,    MGMT_READ_INFO_SIZE,
9252 						HCI_MGMT_UNTRUSTED },
9253 	{ set_powered,             MGMT_SETTING_SIZE },
9254 	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
9255 	{ set_connectable,         MGMT_SETTING_SIZE },
9256 	{ set_fast_connectable,    MGMT_SETTING_SIZE },
9257 	{ set_bondable,            MGMT_SETTING_SIZE },
9258 	{ set_link_security,       MGMT_SETTING_SIZE },
9259 	{ set_ssp,                 MGMT_SETTING_SIZE },
9260 	{ set_hs,                  MGMT_SETTING_SIZE },
9261 	{ set_le,                  MGMT_SETTING_SIZE },
9262 	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
9263 	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
9264 	{ add_uuid,                MGMT_ADD_UUID_SIZE },
9265 	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
9266 	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
9267 						HCI_MGMT_VAR_LEN },
9268 	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9269 						HCI_MGMT_VAR_LEN },
9270 	{ disconnect,              MGMT_DISCONNECT_SIZE },
9271 	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
9272 	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
9273 	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
9274 	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
9275 	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
9276 	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
9277 	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
9278 	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
9279 	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9280 	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
9281 	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9282 	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
9283 	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9284 						HCI_MGMT_VAR_LEN },
9285 	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9286 	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
9287 	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
9288 	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
9289 	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
9290 	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
9291 	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
9292 	{ set_advertising,         MGMT_SETTING_SIZE },
9293 	{ set_bredr,               MGMT_SETTING_SIZE },
9294 	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
9295 	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
9296 	{ set_secure_conn,         MGMT_SETTING_SIZE },
9297 	{ set_debug_keys,          MGMT_SETTING_SIZE },
9298 	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
9299 	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
9300 						HCI_MGMT_VAR_LEN },
9301 	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
9302 	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
9303 	{ add_device,              MGMT_ADD_DEVICE_SIZE },
9304 	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
9305 	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
9306 						HCI_MGMT_VAR_LEN },
9307 	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9308 						HCI_MGMT_NO_HDEV |
9309 						HCI_MGMT_UNTRUSTED },
9310 	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
9311 						HCI_MGMT_UNCONFIGURED |
9312 						HCI_MGMT_UNTRUSTED },
9313 	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
9314 						HCI_MGMT_UNCONFIGURED },
9315 	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
9316 						HCI_MGMT_UNCONFIGURED },
9317 	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9318 						HCI_MGMT_VAR_LEN },
9319 	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9320 	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
9321 						HCI_MGMT_NO_HDEV |
9322 						HCI_MGMT_UNTRUSTED },
9323 	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
9324 	{ add_advertising,	   MGMT_ADD_ADVERTISING_SIZE,
9325 						HCI_MGMT_VAR_LEN },
9326 	{ remove_advertising,	   MGMT_REMOVE_ADVERTISING_SIZE },
9327 	{ get_adv_size_info,       MGMT_GET_ADV_SIZE_INFO_SIZE },
9328 	{ start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9329 	{ read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9330 						HCI_MGMT_UNTRUSTED },
9331 	{ set_appearance,	   MGMT_SET_APPEARANCE_SIZE },
9332 	{ get_phy_configuration,   MGMT_GET_PHY_CONFIGURATION_SIZE },
9333 	{ set_phy_configuration,   MGMT_SET_PHY_CONFIGURATION_SIZE },
9334 	{ set_blocked_keys,	   MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9335 						HCI_MGMT_VAR_LEN },
9336 	{ set_wideband_speech,	   MGMT_SETTING_SIZE },
9337 	{ read_controller_cap,     MGMT_READ_CONTROLLER_CAP_SIZE,
9338 						HCI_MGMT_UNTRUSTED },
9339 	{ read_exp_features_info,  MGMT_READ_EXP_FEATURES_INFO_SIZE,
9340 						HCI_MGMT_UNTRUSTED |
9341 						HCI_MGMT_HDEV_OPTIONAL },
9342 	{ set_exp_feature,         MGMT_SET_EXP_FEATURE_SIZE,
9343 						HCI_MGMT_VAR_LEN |
9344 						HCI_MGMT_HDEV_OPTIONAL },
9345 	{ read_def_system_config,  MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9346 						HCI_MGMT_UNTRUSTED },
9347 	{ set_def_system_config,   MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9348 						HCI_MGMT_VAR_LEN },
9349 	{ read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9350 						HCI_MGMT_UNTRUSTED },
9351 	{ set_def_runtime_config,  MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9352 						HCI_MGMT_VAR_LEN },
9353 	{ get_device_flags,        MGMT_GET_DEVICE_FLAGS_SIZE },
9354 	{ set_device_flags,        MGMT_SET_DEVICE_FLAGS_SIZE },
9355 	{ read_adv_mon_features,   MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9356 	{ add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9357 						HCI_MGMT_VAR_LEN },
9358 	{ remove_adv_monitor,      MGMT_REMOVE_ADV_MONITOR_SIZE },
9359 	{ add_ext_adv_params,      MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9360 						HCI_MGMT_VAR_LEN },
9361 	{ add_ext_adv_data,        MGMT_ADD_EXT_ADV_DATA_SIZE,
9362 						HCI_MGMT_VAR_LEN },
9363 	{ add_adv_patterns_monitor_rssi,
9364 				   MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9365 						HCI_MGMT_VAR_LEN },
9366 	{ set_mesh,                MGMT_SET_MESH_RECEIVER_SIZE,
9367 						HCI_MGMT_VAR_LEN },
9368 	{ mesh_features,           MGMT_MESH_READ_FEATURES_SIZE },
9369 	{ mesh_send,               MGMT_MESH_SEND_SIZE,
9370 						HCI_MGMT_VAR_LEN },
9371 	{ mesh_send_cancel,        MGMT_MESH_SEND_CANCEL_SIZE },
9372 };
9373 
9374 void mgmt_index_added(struct hci_dev *hdev)
9375 {
9376 	struct mgmt_ev_ext_index ev;
9377 
9378 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9379 		return;
9380 
9381 	switch (hdev->dev_type) {
9382 	case HCI_PRIMARY:
9383 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9384 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
9385 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
9386 			ev.type = 0x01;
9387 		} else {
9388 			mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9389 					 HCI_MGMT_INDEX_EVENTS);
9390 			ev.type = 0x00;
9391 		}
9392 		break;
9393 	case HCI_AMP:
9394 		ev.type = 0x02;
9395 		break;
9396 	default:
9397 		return;
9398 	}
9399 
9400 	ev.bus = hdev->bus;
9401 
9402 	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9403 			 HCI_MGMT_EXT_INDEX_EVENTS);
9404 }
9405 
9406 void mgmt_index_removed(struct hci_dev *hdev)
9407 {
9408 	struct mgmt_ev_ext_index ev;
9409 	u8 status = MGMT_STATUS_INVALID_INDEX;
9410 
9411 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9412 		return;
9413 
9414 	switch (hdev->dev_type) {
9415 	case HCI_PRIMARY:
9416 		mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9417 
9418 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9419 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
9420 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
9421 			ev.type = 0x01;
9422 		} else {
9423 			mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9424 					 HCI_MGMT_INDEX_EVENTS);
9425 			ev.type = 0x00;
9426 		}
9427 		break;
9428 	case HCI_AMP:
9429 		ev.type = 0x02;
9430 		break;
9431 	default:
9432 		return;
9433 	}
9434 
9435 	ev.bus = hdev->bus;
9436 
9437 	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9438 			 HCI_MGMT_EXT_INDEX_EVENTS);
9439 
9440 	/* Cancel any remaining timed work */
9441 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
9442 		return;
9443 	cancel_delayed_work_sync(&hdev->discov_off);
9444 	cancel_delayed_work_sync(&hdev->service_cache);
9445 	cancel_delayed_work_sync(&hdev->rpa_expired);
9446 }
9447 
9448 void mgmt_power_on(struct hci_dev *hdev, int err)
9449 {
9450 	struct cmd_lookup match = { NULL, hdev };
9451 
9452 	bt_dev_dbg(hdev, "err %d", err);
9453 
9454 	hci_dev_lock(hdev);
9455 
9456 	if (!err) {
9457 		restart_le_actions(hdev);
9458 		hci_update_passive_scan(hdev);
9459 	}
9460 
9461 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9462 
9463 	new_settings(hdev, match.sk);
9464 
9465 	if (match.sk)
9466 		sock_put(match.sk);
9467 
9468 	hci_dev_unlock(hdev);
9469 }
9470 
9471 void __mgmt_power_off(struct hci_dev *hdev)
9472 {
9473 	struct cmd_lookup match = { NULL, hdev };
9474 	u8 status, zero_cod[] = { 0, 0, 0 };
9475 
9476 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9477 
9478 	/* If the power off is because of hdev unregistration let
9479 	 * use the appropriate INVALID_INDEX status. Otherwise use
9480 	 * NOT_POWERED. We cover both scenarios here since later in
9481 	 * mgmt_index_removed() any hci_conn callbacks will have already
9482 	 * been triggered, potentially causing misleading DISCONNECTED
9483 	 * status responses.
9484 	 */
9485 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9486 		status = MGMT_STATUS_INVALID_INDEX;
9487 	else
9488 		status = MGMT_STATUS_NOT_POWERED;
9489 
9490 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9491 
9492 	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9493 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9494 				   zero_cod, sizeof(zero_cod),
9495 				   HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9496 		ext_info_changed(hdev, NULL);
9497 	}
9498 
9499 	new_settings(hdev, match.sk);
9500 
9501 	if (match.sk)
9502 		sock_put(match.sk);
9503 }
9504 
9505 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9506 {
9507 	struct mgmt_pending_cmd *cmd;
9508 	u8 status;
9509 
9510 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9511 	if (!cmd)
9512 		return;
9513 
9514 	if (err == -ERFKILL)
9515 		status = MGMT_STATUS_RFKILLED;
9516 	else
9517 		status = MGMT_STATUS_FAILED;
9518 
9519 	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9520 
9521 	mgmt_pending_remove(cmd);
9522 }
9523 
9524 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9525 		       bool persistent)
9526 {
9527 	struct mgmt_ev_new_link_key ev;
9528 
9529 	memset(&ev, 0, sizeof(ev));
9530 
9531 	ev.store_hint = persistent;
9532 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9533 	ev.key.addr.type = BDADDR_BREDR;
9534 	ev.key.type = key->type;
9535 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9536 	ev.key.pin_len = key->pin_len;
9537 
9538 	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9539 }
9540 
9541 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9542 {
9543 	switch (ltk->type) {
9544 	case SMP_LTK:
9545 	case SMP_LTK_RESPONDER:
9546 		if (ltk->authenticated)
9547 			return MGMT_LTK_AUTHENTICATED;
9548 		return MGMT_LTK_UNAUTHENTICATED;
9549 	case SMP_LTK_P256:
9550 		if (ltk->authenticated)
9551 			return MGMT_LTK_P256_AUTH;
9552 		return MGMT_LTK_P256_UNAUTH;
9553 	case SMP_LTK_P256_DEBUG:
9554 		return MGMT_LTK_P256_DEBUG;
9555 	}
9556 
9557 	return MGMT_LTK_UNAUTHENTICATED;
9558 }
9559 
9560 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9561 {
9562 	struct mgmt_ev_new_long_term_key ev;
9563 
9564 	memset(&ev, 0, sizeof(ev));
9565 
9566 	/* Devices using resolvable or non-resolvable random addresses
9567 	 * without providing an identity resolving key don't require
9568 	 * to store long term keys. Their addresses will change the
9569 	 * next time around.
9570 	 *
9571 	 * Only when a remote device provides an identity address
9572 	 * make sure the long term key is stored. If the remote
9573 	 * identity is known, the long term keys are internally
9574 	 * mapped to the identity address. So allow static random
9575 	 * and public addresses here.
9576 	 */
9577 	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9578 	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
9579 		ev.store_hint = 0x00;
9580 	else
9581 		ev.store_hint = persistent;
9582 
9583 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9584 	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
9585 	ev.key.type = mgmt_ltk_type(key);
9586 	ev.key.enc_size = key->enc_size;
9587 	ev.key.ediv = key->ediv;
9588 	ev.key.rand = key->rand;
9589 
9590 	if (key->type == SMP_LTK)
9591 		ev.key.initiator = 1;
9592 
9593 	/* Make sure we copy only the significant bytes based on the
9594 	 * encryption key size, and set the rest of the value to zeroes.
9595 	 */
9596 	memcpy(ev.key.val, key->val, key->enc_size);
9597 	memset(ev.key.val + key->enc_size, 0,
9598 	       sizeof(ev.key.val) - key->enc_size);
9599 
9600 	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9601 }
9602 
9603 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9604 {
9605 	struct mgmt_ev_new_irk ev;
9606 
9607 	memset(&ev, 0, sizeof(ev));
9608 
9609 	ev.store_hint = persistent;
9610 
9611 	bacpy(&ev.rpa, &irk->rpa);
9612 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9613 	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9614 	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9615 
9616 	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9617 }
9618 
9619 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9620 		   bool persistent)
9621 {
9622 	struct mgmt_ev_new_csrk ev;
9623 
9624 	memset(&ev, 0, sizeof(ev));
9625 
9626 	/* Devices using resolvable or non-resolvable random addresses
9627 	 * without providing an identity resolving key don't require
9628 	 * to store signature resolving keys. Their addresses will change
9629 	 * the next time around.
9630 	 *
9631 	 * Only when a remote device provides an identity address
9632 	 * make sure the signature resolving key is stored. So allow
9633 	 * static random and public addresses here.
9634 	 */
9635 	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9636 	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9637 		ev.store_hint = 0x00;
9638 	else
9639 		ev.store_hint = persistent;
9640 
9641 	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9642 	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9643 	ev.key.type = csrk->type;
9644 	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9645 
9646 	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9647 }
9648 
9649 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9650 			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9651 			 u16 max_interval, u16 latency, u16 timeout)
9652 {
9653 	struct mgmt_ev_new_conn_param ev;
9654 
9655 	if (!hci_is_identity_address(bdaddr, bdaddr_type))
9656 		return;
9657 
9658 	memset(&ev, 0, sizeof(ev));
9659 	bacpy(&ev.addr.bdaddr, bdaddr);
9660 	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9661 	ev.store_hint = store_hint;
9662 	ev.min_interval = cpu_to_le16(min_interval);
9663 	ev.max_interval = cpu_to_le16(max_interval);
9664 	ev.latency = cpu_to_le16(latency);
9665 	ev.timeout = cpu_to_le16(timeout);
9666 
9667 	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9668 }
9669 
9670 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9671 			   u8 *name, u8 name_len)
9672 {
9673 	struct sk_buff *skb;
9674 	struct mgmt_ev_device_connected *ev;
9675 	u16 eir_len = 0;
9676 	u32 flags = 0;
9677 
9678 	/* allocate buff for LE or BR/EDR adv */
9679 	if (conn->le_adv_data_len > 0)
9680 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9681 				     sizeof(*ev) + conn->le_adv_data_len);
9682 	else
9683 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9684 				     sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9685 				     eir_precalc_len(sizeof(conn->dev_class)));
9686 
9687 	ev = skb_put(skb, sizeof(*ev));
9688 	bacpy(&ev->addr.bdaddr, &conn->dst);
9689 	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9690 
9691 	if (conn->out)
9692 		flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9693 
9694 	ev->flags = __cpu_to_le32(flags);
9695 
9696 	/* We must ensure that the EIR Data fields are ordered and
9697 	 * unique. Keep it simple for now and avoid the problem by not
9698 	 * adding any BR/EDR data to the LE adv.
9699 	 */
9700 	if (conn->le_adv_data_len > 0) {
9701 		skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9702 		eir_len = conn->le_adv_data_len;
9703 	} else {
9704 		if (name)
9705 			eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9706 
9707 		if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9708 			eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9709 						    conn->dev_class, sizeof(conn->dev_class));
9710 	}
9711 
9712 	ev->eir_len = cpu_to_le16(eir_len);
9713 
9714 	mgmt_event_skb(skb, NULL);
9715 }
9716 
9717 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
9718 {
9719 	struct sock **sk = data;
9720 
9721 	cmd->cmd_complete(cmd, 0);
9722 
9723 	*sk = cmd->sk;
9724 	sock_hold(*sk);
9725 
9726 	mgmt_pending_remove(cmd);
9727 }
9728 
9729 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9730 {
9731 	struct hci_dev *hdev = data;
9732 	struct mgmt_cp_unpair_device *cp = cmd->param;
9733 
9734 	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9735 
9736 	cmd->cmd_complete(cmd, 0);
9737 	mgmt_pending_remove(cmd);
9738 }
9739 
9740 bool mgmt_powering_down(struct hci_dev *hdev)
9741 {
9742 	struct mgmt_pending_cmd *cmd;
9743 	struct mgmt_mode *cp;
9744 
9745 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9746 	if (!cmd)
9747 		return false;
9748 
9749 	cp = cmd->param;
9750 	if (!cp->val)
9751 		return true;
9752 
9753 	return false;
9754 }
9755 
9756 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9757 			      u8 link_type, u8 addr_type, u8 reason,
9758 			      bool mgmt_connected)
9759 {
9760 	struct mgmt_ev_device_disconnected ev;
9761 	struct sock *sk = NULL;
9762 
9763 	/* The connection is still in hci_conn_hash so test for 1
9764 	 * instead of 0 to know if this is the last one.
9765 	 */
9766 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9767 		cancel_delayed_work(&hdev->power_off);
9768 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
9769 	}
9770 
9771 	if (!mgmt_connected)
9772 		return;
9773 
9774 	if (link_type != ACL_LINK && link_type != LE_LINK)
9775 		return;
9776 
9777 	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
9778 
9779 	bacpy(&ev.addr.bdaddr, bdaddr);
9780 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9781 	ev.reason = reason;
9782 
9783 	/* Report disconnects due to suspend */
9784 	if (hdev->suspended)
9785 		ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9786 
9787 	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9788 
9789 	if (sk)
9790 		sock_put(sk);
9791 
9792 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9793 			     hdev);
9794 }
9795 
9796 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9797 			    u8 link_type, u8 addr_type, u8 status)
9798 {
9799 	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9800 	struct mgmt_cp_disconnect *cp;
9801 	struct mgmt_pending_cmd *cmd;
9802 
9803 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9804 			     hdev);
9805 
9806 	cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9807 	if (!cmd)
9808 		return;
9809 
9810 	cp = cmd->param;
9811 
9812 	if (bacmp(bdaddr, &cp->addr.bdaddr))
9813 		return;
9814 
9815 	if (cp->addr.type != bdaddr_type)
9816 		return;
9817 
9818 	cmd->cmd_complete(cmd, mgmt_status(status));
9819 	mgmt_pending_remove(cmd);
9820 }
9821 
9822 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9823 			 u8 addr_type, u8 status)
9824 {
9825 	struct mgmt_ev_connect_failed ev;
9826 
9827 	/* The connection is still in hci_conn_hash so test for 1
9828 	 * instead of 0 to know if this is the last one.
9829 	 */
9830 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9831 		cancel_delayed_work(&hdev->power_off);
9832 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
9833 	}
9834 
9835 	bacpy(&ev.addr.bdaddr, bdaddr);
9836 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9837 	ev.status = mgmt_status(status);
9838 
9839 	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9840 }
9841 
9842 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9843 {
9844 	struct mgmt_ev_pin_code_request ev;
9845 
9846 	bacpy(&ev.addr.bdaddr, bdaddr);
9847 	ev.addr.type = BDADDR_BREDR;
9848 	ev.secure = secure;
9849 
9850 	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9851 }
9852 
9853 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9854 				  u8 status)
9855 {
9856 	struct mgmt_pending_cmd *cmd;
9857 
9858 	cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9859 	if (!cmd)
9860 		return;
9861 
9862 	cmd->cmd_complete(cmd, mgmt_status(status));
9863 	mgmt_pending_remove(cmd);
9864 }
9865 
9866 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9867 				      u8 status)
9868 {
9869 	struct mgmt_pending_cmd *cmd;
9870 
9871 	cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9872 	if (!cmd)
9873 		return;
9874 
9875 	cmd->cmd_complete(cmd, mgmt_status(status));
9876 	mgmt_pending_remove(cmd);
9877 }
9878 
9879 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9880 			      u8 link_type, u8 addr_type, u32 value,
9881 			      u8 confirm_hint)
9882 {
9883 	struct mgmt_ev_user_confirm_request ev;
9884 
9885 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9886 
9887 	bacpy(&ev.addr.bdaddr, bdaddr);
9888 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9889 	ev.confirm_hint = confirm_hint;
9890 	ev.value = cpu_to_le32(value);
9891 
9892 	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9893 			  NULL);
9894 }
9895 
9896 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9897 			      u8 link_type, u8 addr_type)
9898 {
9899 	struct mgmt_ev_user_passkey_request ev;
9900 
9901 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9902 
9903 	bacpy(&ev.addr.bdaddr, bdaddr);
9904 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9905 
9906 	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9907 			  NULL);
9908 }
9909 
9910 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9911 				      u8 link_type, u8 addr_type, u8 status,
9912 				      u8 opcode)
9913 {
9914 	struct mgmt_pending_cmd *cmd;
9915 
9916 	cmd = pending_find(opcode, hdev);
9917 	if (!cmd)
9918 		return -ENOENT;
9919 
9920 	cmd->cmd_complete(cmd, mgmt_status(status));
9921 	mgmt_pending_remove(cmd);
9922 
9923 	return 0;
9924 }
9925 
9926 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9927 				     u8 link_type, u8 addr_type, u8 status)
9928 {
9929 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9930 					  status, MGMT_OP_USER_CONFIRM_REPLY);
9931 }
9932 
9933 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9934 					 u8 link_type, u8 addr_type, u8 status)
9935 {
9936 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9937 					  status,
9938 					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
9939 }
9940 
9941 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9942 				     u8 link_type, u8 addr_type, u8 status)
9943 {
9944 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9945 					  status, MGMT_OP_USER_PASSKEY_REPLY);
9946 }
9947 
9948 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9949 					 u8 link_type, u8 addr_type, u8 status)
9950 {
9951 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9952 					  status,
9953 					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
9954 }
9955 
9956 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9957 			     u8 link_type, u8 addr_type, u32 passkey,
9958 			     u8 entered)
9959 {
9960 	struct mgmt_ev_passkey_notify ev;
9961 
9962 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9963 
9964 	bacpy(&ev.addr.bdaddr, bdaddr);
9965 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9966 	ev.passkey = __cpu_to_le32(passkey);
9967 	ev.entered = entered;
9968 
9969 	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9970 }
9971 
9972 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9973 {
9974 	struct mgmt_ev_auth_failed ev;
9975 	struct mgmt_pending_cmd *cmd;
9976 	u8 status = mgmt_status(hci_status);
9977 
9978 	bacpy(&ev.addr.bdaddr, &conn->dst);
9979 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9980 	ev.status = status;
9981 
9982 	cmd = find_pairing(conn);
9983 
9984 	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9985 		    cmd ? cmd->sk : NULL);
9986 
9987 	if (cmd) {
9988 		cmd->cmd_complete(cmd, status);
9989 		mgmt_pending_remove(cmd);
9990 	}
9991 }
9992 
9993 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9994 {
9995 	struct cmd_lookup match = { NULL, hdev };
9996 	bool changed;
9997 
9998 	if (status) {
9999 		u8 mgmt_err = mgmt_status(status);
10000 		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
10001 				     cmd_status_rsp, &mgmt_err);
10002 		return;
10003 	}
10004 
10005 	if (test_bit(HCI_AUTH, &hdev->flags))
10006 		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
10007 	else
10008 		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
10009 
10010 	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
10011 			     &match);
10012 
10013 	if (changed)
10014 		new_settings(hdev, match.sk);
10015 
10016 	if (match.sk)
10017 		sock_put(match.sk);
10018 }
10019 
10020 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
10021 {
10022 	struct cmd_lookup *match = data;
10023 
10024 	if (match->sk == NULL) {
10025 		match->sk = cmd->sk;
10026 		sock_hold(match->sk);
10027 	}
10028 }
10029 
10030 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
10031 				    u8 status)
10032 {
10033 	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
10034 
10035 	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
10036 	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
10037 	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
10038 
10039 	if (!status) {
10040 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
10041 				   3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
10042 		ext_info_changed(hdev, NULL);
10043 	}
10044 
10045 	if (match.sk)
10046 		sock_put(match.sk);
10047 }
10048 
10049 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
10050 {
10051 	struct mgmt_cp_set_local_name ev;
10052 	struct mgmt_pending_cmd *cmd;
10053 
10054 	if (status)
10055 		return;
10056 
10057 	memset(&ev, 0, sizeof(ev));
10058 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
10059 	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
10060 
10061 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
10062 	if (!cmd) {
10063 		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
10064 
10065 		/* If this is a HCI command related to powering on the
10066 		 * HCI dev don't send any mgmt signals.
10067 		 */
10068 		if (pending_find(MGMT_OP_SET_POWERED, hdev))
10069 			return;
10070 	}
10071 
10072 	mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
10073 			   HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
10074 	ext_info_changed(hdev, cmd ? cmd->sk : NULL);
10075 }
10076 
10077 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10078 {
10079 	int i;
10080 
10081 	for (i = 0; i < uuid_count; i++) {
10082 		if (!memcmp(uuid, uuids[i], 16))
10083 			return true;
10084 	}
10085 
10086 	return false;
10087 }
10088 
10089 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10090 {
10091 	u16 parsed = 0;
10092 
10093 	while (parsed < eir_len) {
10094 		u8 field_len = eir[0];
10095 		u8 uuid[16];
10096 		int i;
10097 
10098 		if (field_len == 0)
10099 			break;
10100 
10101 		if (eir_len - parsed < field_len + 1)
10102 			break;
10103 
10104 		switch (eir[1]) {
10105 		case EIR_UUID16_ALL:
10106 		case EIR_UUID16_SOME:
10107 			for (i = 0; i + 3 <= field_len; i += 2) {
10108 				memcpy(uuid, bluetooth_base_uuid, 16);
10109 				uuid[13] = eir[i + 3];
10110 				uuid[12] = eir[i + 2];
10111 				if (has_uuid(uuid, uuid_count, uuids))
10112 					return true;
10113 			}
10114 			break;
10115 		case EIR_UUID32_ALL:
10116 		case EIR_UUID32_SOME:
10117 			for (i = 0; i + 5 <= field_len; i += 4) {
10118 				memcpy(uuid, bluetooth_base_uuid, 16);
10119 				uuid[15] = eir[i + 5];
10120 				uuid[14] = eir[i + 4];
10121 				uuid[13] = eir[i + 3];
10122 				uuid[12] = eir[i + 2];
10123 				if (has_uuid(uuid, uuid_count, uuids))
10124 					return true;
10125 			}
10126 			break;
10127 		case EIR_UUID128_ALL:
10128 		case EIR_UUID128_SOME:
10129 			for (i = 0; i + 17 <= field_len; i += 16) {
10130 				memcpy(uuid, eir + i + 2, 16);
10131 				if (has_uuid(uuid, uuid_count, uuids))
10132 					return true;
10133 			}
10134 			break;
10135 		}
10136 
10137 		parsed += field_len + 1;
10138 		eir += field_len + 1;
10139 	}
10140 
10141 	return false;
10142 }
10143 
10144 static void restart_le_scan(struct hci_dev *hdev)
10145 {
10146 	/* If controller is not scanning we are done. */
10147 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
10148 		return;
10149 
10150 	if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
10151 		       hdev->discovery.scan_start +
10152 		       hdev->discovery.scan_duration))
10153 		return;
10154 
10155 	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
10156 			   DISCOV_LE_RESTART_DELAY);
10157 }
10158 
10159 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10160 			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10161 {
10162 	/* If a RSSI threshold has been specified, and
10163 	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10164 	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10165 	 * is set, let it through for further processing, as we might need to
10166 	 * restart the scan.
10167 	 *
10168 	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10169 	 * the results are also dropped.
10170 	 */
10171 	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10172 	    (rssi == HCI_RSSI_INVALID ||
10173 	    (rssi < hdev->discovery.rssi &&
10174 	     !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
10175 		return  false;
10176 
10177 	if (hdev->discovery.uuid_count != 0) {
10178 		/* If a list of UUIDs is provided in filter, results with no
10179 		 * matching UUID should be dropped.
10180 		 */
10181 		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10182 				   hdev->discovery.uuids) &&
10183 		    !eir_has_uuids(scan_rsp, scan_rsp_len,
10184 				   hdev->discovery.uuid_count,
10185 				   hdev->discovery.uuids))
10186 			return false;
10187 	}
10188 
10189 	/* If duplicate filtering does not report RSSI changes, then restart
10190 	 * scanning to ensure updated result with updated RSSI values.
10191 	 */
10192 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
10193 		restart_le_scan(hdev);
10194 
10195 		/* Validate RSSI value against the RSSI threshold once more. */
10196 		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10197 		    rssi < hdev->discovery.rssi)
10198 			return false;
10199 	}
10200 
10201 	return true;
10202 }
10203 
10204 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10205 				  bdaddr_t *bdaddr, u8 addr_type)
10206 {
10207 	struct mgmt_ev_adv_monitor_device_lost ev;
10208 
10209 	ev.monitor_handle = cpu_to_le16(handle);
10210 	bacpy(&ev.addr.bdaddr, bdaddr);
10211 	ev.addr.type = addr_type;
10212 
10213 	mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10214 		   NULL);
10215 }
10216 
10217 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10218 					       struct sk_buff *skb,
10219 					       struct sock *skip_sk,
10220 					       u16 handle)
10221 {
10222 	struct sk_buff *advmon_skb;
10223 	size_t advmon_skb_len;
10224 	__le16 *monitor_handle;
10225 
10226 	if (!skb)
10227 		return;
10228 
10229 	advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10230 			  sizeof(struct mgmt_ev_device_found)) + skb->len;
10231 	advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10232 				    advmon_skb_len);
10233 	if (!advmon_skb)
10234 		return;
10235 
10236 	/* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10237 	 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10238 	 * store monitor_handle of the matched monitor.
10239 	 */
10240 	monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10241 	*monitor_handle = cpu_to_le16(handle);
10242 	skb_put_data(advmon_skb, skb->data, skb->len);
10243 
10244 	mgmt_event_skb(advmon_skb, skip_sk);
10245 }
10246 
10247 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10248 					  bdaddr_t *bdaddr, bool report_device,
10249 					  struct sk_buff *skb,
10250 					  struct sock *skip_sk)
10251 {
10252 	struct monitored_device *dev, *tmp;
10253 	bool matched = false;
10254 	bool notified = false;
10255 
10256 	/* We have received the Advertisement Report because:
10257 	 * 1. the kernel has initiated active discovery
10258 	 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10259 	 *    passive scanning
10260 	 * 3. if none of the above is true, we have one or more active
10261 	 *    Advertisement Monitor
10262 	 *
10263 	 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10264 	 * and report ONLY one advertisement per device for the matched Monitor
10265 	 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10266 	 *
10267 	 * For case 3, since we are not active scanning and all advertisements
10268 	 * received are due to a matched Advertisement Monitor, report all
10269 	 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10270 	 */
10271 	if (report_device && !hdev->advmon_pend_notify) {
10272 		mgmt_event_skb(skb, skip_sk);
10273 		return;
10274 	}
10275 
10276 	hdev->advmon_pend_notify = false;
10277 
10278 	list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10279 		if (!bacmp(&dev->bdaddr, bdaddr)) {
10280 			matched = true;
10281 
10282 			if (!dev->notified) {
10283 				mgmt_send_adv_monitor_device_found(hdev, skb,
10284 								   skip_sk,
10285 								   dev->handle);
10286 				notified = true;
10287 				dev->notified = true;
10288 			}
10289 		}
10290 
10291 		if (!dev->notified)
10292 			hdev->advmon_pend_notify = true;
10293 	}
10294 
10295 	if (!report_device &&
10296 	    ((matched && !notified) || !msft_monitor_supported(hdev))) {
10297 		/* Handle 0 indicates that we are not active scanning and this
10298 		 * is a subsequent advertisement report for an already matched
10299 		 * Advertisement Monitor or the controller offloading support
10300 		 * is not available.
10301 		 */
10302 		mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10303 	}
10304 
10305 	if (report_device)
10306 		mgmt_event_skb(skb, skip_sk);
10307 	else
10308 		kfree_skb(skb);
10309 }
10310 
10311 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10312 			      u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10313 			      u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10314 			      u64 instant)
10315 {
10316 	struct sk_buff *skb;
10317 	struct mgmt_ev_mesh_device_found *ev;
10318 	int i, j;
10319 
10320 	if (!hdev->mesh_ad_types[0])
10321 		goto accepted;
10322 
10323 	/* Scan for requested AD types */
10324 	if (eir_len > 0) {
10325 		for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10326 			for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10327 				if (!hdev->mesh_ad_types[j])
10328 					break;
10329 
10330 				if (hdev->mesh_ad_types[j] == eir[i + 1])
10331 					goto accepted;
10332 			}
10333 		}
10334 	}
10335 
10336 	if (scan_rsp_len > 0) {
10337 		for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10338 			for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10339 				if (!hdev->mesh_ad_types[j])
10340 					break;
10341 
10342 				if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10343 					goto accepted;
10344 			}
10345 		}
10346 	}
10347 
10348 	return;
10349 
10350 accepted:
10351 	skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10352 			     sizeof(*ev) + eir_len + scan_rsp_len);
10353 	if (!skb)
10354 		return;
10355 
10356 	ev = skb_put(skb, sizeof(*ev));
10357 
10358 	bacpy(&ev->addr.bdaddr, bdaddr);
10359 	ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10360 	ev->rssi = rssi;
10361 	ev->flags = cpu_to_le32(flags);
10362 	ev->instant = cpu_to_le64(instant);
10363 
10364 	if (eir_len > 0)
10365 		/* Copy EIR or advertising data into event */
10366 		skb_put_data(skb, eir, eir_len);
10367 
10368 	if (scan_rsp_len > 0)
10369 		/* Append scan response data to event */
10370 		skb_put_data(skb, scan_rsp, scan_rsp_len);
10371 
10372 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10373 
10374 	mgmt_event_skb(skb, NULL);
10375 }
10376 
10377 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10378 		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10379 		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10380 		       u64 instant)
10381 {
10382 	struct sk_buff *skb;
10383 	struct mgmt_ev_device_found *ev;
10384 	bool report_device = hci_discovery_active(hdev);
10385 
10386 	if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10387 		mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10388 				  eir, eir_len, scan_rsp, scan_rsp_len,
10389 				  instant);
10390 
10391 	/* Don't send events for a non-kernel initiated discovery. With
10392 	 * LE one exception is if we have pend_le_reports > 0 in which
10393 	 * case we're doing passive scanning and want these events.
10394 	 */
10395 	if (!hci_discovery_active(hdev)) {
10396 		if (link_type == ACL_LINK)
10397 			return;
10398 		if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10399 			report_device = true;
10400 		else if (!hci_is_adv_monitoring(hdev))
10401 			return;
10402 	}
10403 
10404 	if (hdev->discovery.result_filtering) {
10405 		/* We are using service discovery */
10406 		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10407 				     scan_rsp_len))
10408 			return;
10409 	}
10410 
10411 	if (hdev->discovery.limited) {
10412 		/* Check for limited discoverable bit */
10413 		if (dev_class) {
10414 			if (!(dev_class[1] & 0x20))
10415 				return;
10416 		} else {
10417 			u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10418 			if (!flags || !(flags[0] & LE_AD_LIMITED))
10419 				return;
10420 		}
10421 	}
10422 
10423 	/* Allocate skb. The 5 extra bytes are for the potential CoD field */
10424 	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10425 			     sizeof(*ev) + eir_len + scan_rsp_len + 5);
10426 	if (!skb)
10427 		return;
10428 
10429 	ev = skb_put(skb, sizeof(*ev));
10430 
10431 	/* In case of device discovery with BR/EDR devices (pre 1.2), the
10432 	 * RSSI value was reported as 0 when not available. This behavior
10433 	 * is kept when using device discovery. This is required for full
10434 	 * backwards compatibility with the API.
10435 	 *
10436 	 * However when using service discovery, the value 127 will be
10437 	 * returned when the RSSI is not available.
10438 	 */
10439 	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10440 	    link_type == ACL_LINK)
10441 		rssi = 0;
10442 
10443 	bacpy(&ev->addr.bdaddr, bdaddr);
10444 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
10445 	ev->rssi = rssi;
10446 	ev->flags = cpu_to_le32(flags);
10447 
10448 	if (eir_len > 0)
10449 		/* Copy EIR or advertising data into event */
10450 		skb_put_data(skb, eir, eir_len);
10451 
10452 	if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10453 		u8 eir_cod[5];
10454 
10455 		eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10456 					   dev_class, 3);
10457 		skb_put_data(skb, eir_cod, sizeof(eir_cod));
10458 	}
10459 
10460 	if (scan_rsp_len > 0)
10461 		/* Append scan response data to event */
10462 		skb_put_data(skb, scan_rsp, scan_rsp_len);
10463 
10464 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10465 
10466 	mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10467 }
10468 
10469 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10470 		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10471 {
10472 	struct sk_buff *skb;
10473 	struct mgmt_ev_device_found *ev;
10474 	u16 eir_len = 0;
10475 	u32 flags = 0;
10476 
10477 	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10478 			     sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10479 
10480 	ev = skb_put(skb, sizeof(*ev));
10481 	bacpy(&ev->addr.bdaddr, bdaddr);
10482 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
10483 	ev->rssi = rssi;
10484 
10485 	if (name)
10486 		eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10487 	else
10488 		flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10489 
10490 	ev->eir_len = cpu_to_le16(eir_len);
10491 	ev->flags = cpu_to_le32(flags);
10492 
10493 	mgmt_event_skb(skb, NULL);
10494 }
10495 
10496 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10497 {
10498 	struct mgmt_ev_discovering ev;
10499 
10500 	bt_dev_dbg(hdev, "discovering %u", discovering);
10501 
10502 	memset(&ev, 0, sizeof(ev));
10503 	ev.type = hdev->discovery.type;
10504 	ev.discovering = discovering;
10505 
10506 	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10507 }
10508 
10509 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10510 {
10511 	struct mgmt_ev_controller_suspend ev;
10512 
10513 	ev.suspend_state = state;
10514 	mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10515 }
10516 
10517 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10518 		   u8 addr_type)
10519 {
10520 	struct mgmt_ev_controller_resume ev;
10521 
10522 	ev.wake_reason = reason;
10523 	if (bdaddr) {
10524 		bacpy(&ev.addr.bdaddr, bdaddr);
10525 		ev.addr.type = addr_type;
10526 	} else {
10527 		memset(&ev.addr, 0, sizeof(ev.addr));
10528 	}
10529 
10530 	mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10531 }
10532 
10533 static struct hci_mgmt_chan chan = {
10534 	.channel	= HCI_CHANNEL_CONTROL,
10535 	.handler_count	= ARRAY_SIZE(mgmt_handlers),
10536 	.handlers	= mgmt_handlers,
10537 	.hdev_init	= mgmt_init_hdev,
10538 };
10539 
10540 int mgmt_init(void)
10541 {
10542 	return hci_mgmt_chan_register(&chan);
10543 }
10544 
10545 void mgmt_exit(void)
10546 {
10547 	hci_mgmt_chan_unregister(&chan);
10548 }
10549 
10550 void mgmt_cleanup(struct sock *sk)
10551 {
10552 	struct mgmt_mesh_tx *mesh_tx;
10553 	struct hci_dev *hdev;
10554 
10555 	read_lock(&hci_dev_list_lock);
10556 
10557 	list_for_each_entry(hdev, &hci_dev_list, list) {
10558 		do {
10559 			mesh_tx = mgmt_mesh_next(hdev, sk);
10560 
10561 			if (mesh_tx)
10562 				mesh_send_complete(hdev, mesh_tx, true);
10563 		} while (mesh_tx);
10564 	}
10565 
10566 	read_unlock(&hci_dev_list_lock);
10567 }
10568