xref: /openbmc/linux/net/bluetooth/mgmt.c (revision da1d9caf)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI Management interface */
26 
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35 
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
40 #include "msft.h"
41 #include "eir.h"
42 #include "aosp.h"
43 
44 #define MGMT_VERSION	1
45 #define MGMT_REVISION	22
46 
47 static const u16 mgmt_commands[] = {
48 	MGMT_OP_READ_INDEX_LIST,
49 	MGMT_OP_READ_INFO,
50 	MGMT_OP_SET_POWERED,
51 	MGMT_OP_SET_DISCOVERABLE,
52 	MGMT_OP_SET_CONNECTABLE,
53 	MGMT_OP_SET_FAST_CONNECTABLE,
54 	MGMT_OP_SET_BONDABLE,
55 	MGMT_OP_SET_LINK_SECURITY,
56 	MGMT_OP_SET_SSP,
57 	MGMT_OP_SET_HS,
58 	MGMT_OP_SET_LE,
59 	MGMT_OP_SET_DEV_CLASS,
60 	MGMT_OP_SET_LOCAL_NAME,
61 	MGMT_OP_ADD_UUID,
62 	MGMT_OP_REMOVE_UUID,
63 	MGMT_OP_LOAD_LINK_KEYS,
64 	MGMT_OP_LOAD_LONG_TERM_KEYS,
65 	MGMT_OP_DISCONNECT,
66 	MGMT_OP_GET_CONNECTIONS,
67 	MGMT_OP_PIN_CODE_REPLY,
68 	MGMT_OP_PIN_CODE_NEG_REPLY,
69 	MGMT_OP_SET_IO_CAPABILITY,
70 	MGMT_OP_PAIR_DEVICE,
71 	MGMT_OP_CANCEL_PAIR_DEVICE,
72 	MGMT_OP_UNPAIR_DEVICE,
73 	MGMT_OP_USER_CONFIRM_REPLY,
74 	MGMT_OP_USER_CONFIRM_NEG_REPLY,
75 	MGMT_OP_USER_PASSKEY_REPLY,
76 	MGMT_OP_USER_PASSKEY_NEG_REPLY,
77 	MGMT_OP_READ_LOCAL_OOB_DATA,
78 	MGMT_OP_ADD_REMOTE_OOB_DATA,
79 	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
80 	MGMT_OP_START_DISCOVERY,
81 	MGMT_OP_STOP_DISCOVERY,
82 	MGMT_OP_CONFIRM_NAME,
83 	MGMT_OP_BLOCK_DEVICE,
84 	MGMT_OP_UNBLOCK_DEVICE,
85 	MGMT_OP_SET_DEVICE_ID,
86 	MGMT_OP_SET_ADVERTISING,
87 	MGMT_OP_SET_BREDR,
88 	MGMT_OP_SET_STATIC_ADDRESS,
89 	MGMT_OP_SET_SCAN_PARAMS,
90 	MGMT_OP_SET_SECURE_CONN,
91 	MGMT_OP_SET_DEBUG_KEYS,
92 	MGMT_OP_SET_PRIVACY,
93 	MGMT_OP_LOAD_IRKS,
94 	MGMT_OP_GET_CONN_INFO,
95 	MGMT_OP_GET_CLOCK_INFO,
96 	MGMT_OP_ADD_DEVICE,
97 	MGMT_OP_REMOVE_DEVICE,
98 	MGMT_OP_LOAD_CONN_PARAM,
99 	MGMT_OP_READ_UNCONF_INDEX_LIST,
100 	MGMT_OP_READ_CONFIG_INFO,
101 	MGMT_OP_SET_EXTERNAL_CONFIG,
102 	MGMT_OP_SET_PUBLIC_ADDRESS,
103 	MGMT_OP_START_SERVICE_DISCOVERY,
104 	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
105 	MGMT_OP_READ_EXT_INDEX_LIST,
106 	MGMT_OP_READ_ADV_FEATURES,
107 	MGMT_OP_ADD_ADVERTISING,
108 	MGMT_OP_REMOVE_ADVERTISING,
109 	MGMT_OP_GET_ADV_SIZE_INFO,
110 	MGMT_OP_START_LIMITED_DISCOVERY,
111 	MGMT_OP_READ_EXT_INFO,
112 	MGMT_OP_SET_APPEARANCE,
113 	MGMT_OP_GET_PHY_CONFIGURATION,
114 	MGMT_OP_SET_PHY_CONFIGURATION,
115 	MGMT_OP_SET_BLOCKED_KEYS,
116 	MGMT_OP_SET_WIDEBAND_SPEECH,
117 	MGMT_OP_READ_CONTROLLER_CAP,
118 	MGMT_OP_READ_EXP_FEATURES_INFO,
119 	MGMT_OP_SET_EXP_FEATURE,
120 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
121 	MGMT_OP_SET_DEF_SYSTEM_CONFIG,
122 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
123 	MGMT_OP_SET_DEF_RUNTIME_CONFIG,
124 	MGMT_OP_GET_DEVICE_FLAGS,
125 	MGMT_OP_SET_DEVICE_FLAGS,
126 	MGMT_OP_READ_ADV_MONITOR_FEATURES,
127 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
128 	MGMT_OP_REMOVE_ADV_MONITOR,
129 	MGMT_OP_ADD_EXT_ADV_PARAMS,
130 	MGMT_OP_ADD_EXT_ADV_DATA,
131 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
132 };
133 
134 static const u16 mgmt_events[] = {
135 	MGMT_EV_CONTROLLER_ERROR,
136 	MGMT_EV_INDEX_ADDED,
137 	MGMT_EV_INDEX_REMOVED,
138 	MGMT_EV_NEW_SETTINGS,
139 	MGMT_EV_CLASS_OF_DEV_CHANGED,
140 	MGMT_EV_LOCAL_NAME_CHANGED,
141 	MGMT_EV_NEW_LINK_KEY,
142 	MGMT_EV_NEW_LONG_TERM_KEY,
143 	MGMT_EV_DEVICE_CONNECTED,
144 	MGMT_EV_DEVICE_DISCONNECTED,
145 	MGMT_EV_CONNECT_FAILED,
146 	MGMT_EV_PIN_CODE_REQUEST,
147 	MGMT_EV_USER_CONFIRM_REQUEST,
148 	MGMT_EV_USER_PASSKEY_REQUEST,
149 	MGMT_EV_AUTH_FAILED,
150 	MGMT_EV_DEVICE_FOUND,
151 	MGMT_EV_DISCOVERING,
152 	MGMT_EV_DEVICE_BLOCKED,
153 	MGMT_EV_DEVICE_UNBLOCKED,
154 	MGMT_EV_DEVICE_UNPAIRED,
155 	MGMT_EV_PASSKEY_NOTIFY,
156 	MGMT_EV_NEW_IRK,
157 	MGMT_EV_NEW_CSRK,
158 	MGMT_EV_DEVICE_ADDED,
159 	MGMT_EV_DEVICE_REMOVED,
160 	MGMT_EV_NEW_CONN_PARAM,
161 	MGMT_EV_UNCONF_INDEX_ADDED,
162 	MGMT_EV_UNCONF_INDEX_REMOVED,
163 	MGMT_EV_NEW_CONFIG_OPTIONS,
164 	MGMT_EV_EXT_INDEX_ADDED,
165 	MGMT_EV_EXT_INDEX_REMOVED,
166 	MGMT_EV_LOCAL_OOB_DATA_UPDATED,
167 	MGMT_EV_ADVERTISING_ADDED,
168 	MGMT_EV_ADVERTISING_REMOVED,
169 	MGMT_EV_EXT_INFO_CHANGED,
170 	MGMT_EV_PHY_CONFIGURATION_CHANGED,
171 	MGMT_EV_EXP_FEATURE_CHANGED,
172 	MGMT_EV_DEVICE_FLAGS_CHANGED,
173 	MGMT_EV_ADV_MONITOR_ADDED,
174 	MGMT_EV_ADV_MONITOR_REMOVED,
175 	MGMT_EV_CONTROLLER_SUSPEND,
176 	MGMT_EV_CONTROLLER_RESUME,
177 	MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
178 	MGMT_EV_ADV_MONITOR_DEVICE_LOST,
179 };
180 
181 static const u16 mgmt_untrusted_commands[] = {
182 	MGMT_OP_READ_INDEX_LIST,
183 	MGMT_OP_READ_INFO,
184 	MGMT_OP_READ_UNCONF_INDEX_LIST,
185 	MGMT_OP_READ_CONFIG_INFO,
186 	MGMT_OP_READ_EXT_INDEX_LIST,
187 	MGMT_OP_READ_EXT_INFO,
188 	MGMT_OP_READ_CONTROLLER_CAP,
189 	MGMT_OP_READ_EXP_FEATURES_INFO,
190 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
191 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
192 };
193 
194 static const u16 mgmt_untrusted_events[] = {
195 	MGMT_EV_INDEX_ADDED,
196 	MGMT_EV_INDEX_REMOVED,
197 	MGMT_EV_NEW_SETTINGS,
198 	MGMT_EV_CLASS_OF_DEV_CHANGED,
199 	MGMT_EV_LOCAL_NAME_CHANGED,
200 	MGMT_EV_UNCONF_INDEX_ADDED,
201 	MGMT_EV_UNCONF_INDEX_REMOVED,
202 	MGMT_EV_NEW_CONFIG_OPTIONS,
203 	MGMT_EV_EXT_INDEX_ADDED,
204 	MGMT_EV_EXT_INDEX_REMOVED,
205 	MGMT_EV_EXT_INFO_CHANGED,
206 	MGMT_EV_EXP_FEATURE_CHANGED,
207 };
208 
209 #define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
210 
211 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
212 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
213 
214 /* HCI to MGMT error code conversion table */
215 static const u8 mgmt_status_table[] = {
216 	MGMT_STATUS_SUCCESS,
217 	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
218 	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
219 	MGMT_STATUS_FAILED,		/* Hardware Failure */
220 	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
221 	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
222 	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
223 	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
224 	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
225 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
226 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
227 	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
228 	MGMT_STATUS_BUSY,		/* Command Disallowed */
229 	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
230 	MGMT_STATUS_REJECTED,		/* Rejected Security */
231 	MGMT_STATUS_REJECTED,		/* Rejected Personal */
232 	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
233 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
234 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
235 	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
236 	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
237 	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
238 	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
239 	MGMT_STATUS_BUSY,		/* Repeated Attempts */
240 	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
241 	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
242 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
243 	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
244 	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
245 	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
246 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
247 	MGMT_STATUS_FAILED,		/* Unspecified Error */
248 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
249 	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
250 	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
251 	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
252 	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
253 	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
254 	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
255 	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
256 	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
257 	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
258 	MGMT_STATUS_FAILED,		/* Transaction Collision */
259 	MGMT_STATUS_FAILED,		/* Reserved for future use */
260 	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
261 	MGMT_STATUS_REJECTED,		/* QoS Rejected */
262 	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
263 	MGMT_STATUS_REJECTED,		/* Insufficient Security */
264 	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
265 	MGMT_STATUS_FAILED,		/* Reserved for future use */
266 	MGMT_STATUS_BUSY,		/* Role Switch Pending */
267 	MGMT_STATUS_FAILED,		/* Reserved for future use */
268 	MGMT_STATUS_FAILED,		/* Slot Violation */
269 	MGMT_STATUS_FAILED,		/* Role Switch Failed */
270 	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
271 	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
272 	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
273 	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
274 	MGMT_STATUS_BUSY,		/* Controller Busy */
275 	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
276 	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
277 	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
278 	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
279 	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
280 };
281 
282 static u8 mgmt_errno_status(int err)
283 {
284 	switch (err) {
285 	case 0:
286 		return MGMT_STATUS_SUCCESS;
287 	case -EPERM:
288 		return MGMT_STATUS_REJECTED;
289 	case -EINVAL:
290 		return MGMT_STATUS_INVALID_PARAMS;
291 	case -EOPNOTSUPP:
292 		return MGMT_STATUS_NOT_SUPPORTED;
293 	case -EBUSY:
294 		return MGMT_STATUS_BUSY;
295 	case -ETIMEDOUT:
296 		return MGMT_STATUS_AUTH_FAILED;
297 	case -ENOMEM:
298 		return MGMT_STATUS_NO_RESOURCES;
299 	case -EISCONN:
300 		return MGMT_STATUS_ALREADY_CONNECTED;
301 	case -ENOTCONN:
302 		return MGMT_STATUS_DISCONNECTED;
303 	}
304 
305 	return MGMT_STATUS_FAILED;
306 }
307 
308 static u8 mgmt_status(int err)
309 {
310 	if (err < 0)
311 		return mgmt_errno_status(err);
312 
313 	if (err < ARRAY_SIZE(mgmt_status_table))
314 		return mgmt_status_table[err];
315 
316 	return MGMT_STATUS_FAILED;
317 }
318 
319 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
320 			    u16 len, int flag)
321 {
322 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
323 			       flag, NULL);
324 }
325 
326 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
327 			      u16 len, int flag, struct sock *skip_sk)
328 {
329 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
330 			       flag, skip_sk);
331 }
332 
333 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
334 		      struct sock *skip_sk)
335 {
336 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
337 			       HCI_SOCK_TRUSTED, skip_sk);
338 }
339 
340 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
341 {
342 	return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
343 				   skip_sk);
344 }
345 
346 static u8 le_addr_type(u8 mgmt_addr_type)
347 {
348 	if (mgmt_addr_type == BDADDR_LE_PUBLIC)
349 		return ADDR_LE_DEV_PUBLIC;
350 	else
351 		return ADDR_LE_DEV_RANDOM;
352 }
353 
354 void mgmt_fill_version_info(void *ver)
355 {
356 	struct mgmt_rp_read_version *rp = ver;
357 
358 	rp->version = MGMT_VERSION;
359 	rp->revision = cpu_to_le16(MGMT_REVISION);
360 }
361 
362 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
363 			u16 data_len)
364 {
365 	struct mgmt_rp_read_version rp;
366 
367 	bt_dev_dbg(hdev, "sock %p", sk);
368 
369 	mgmt_fill_version_info(&rp);
370 
371 	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
372 				 &rp, sizeof(rp));
373 }
374 
375 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
376 			 u16 data_len)
377 {
378 	struct mgmt_rp_read_commands *rp;
379 	u16 num_commands, num_events;
380 	size_t rp_size;
381 	int i, err;
382 
383 	bt_dev_dbg(hdev, "sock %p", sk);
384 
385 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
386 		num_commands = ARRAY_SIZE(mgmt_commands);
387 		num_events = ARRAY_SIZE(mgmt_events);
388 	} else {
389 		num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
390 		num_events = ARRAY_SIZE(mgmt_untrusted_events);
391 	}
392 
393 	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
394 
395 	rp = kmalloc(rp_size, GFP_KERNEL);
396 	if (!rp)
397 		return -ENOMEM;
398 
399 	rp->num_commands = cpu_to_le16(num_commands);
400 	rp->num_events = cpu_to_le16(num_events);
401 
402 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
403 		__le16 *opcode = rp->opcodes;
404 
405 		for (i = 0; i < num_commands; i++, opcode++)
406 			put_unaligned_le16(mgmt_commands[i], opcode);
407 
408 		for (i = 0; i < num_events; i++, opcode++)
409 			put_unaligned_le16(mgmt_events[i], opcode);
410 	} else {
411 		__le16 *opcode = rp->opcodes;
412 
413 		for (i = 0; i < num_commands; i++, opcode++)
414 			put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
415 
416 		for (i = 0; i < num_events; i++, opcode++)
417 			put_unaligned_le16(mgmt_untrusted_events[i], opcode);
418 	}
419 
420 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
421 				rp, rp_size);
422 	kfree(rp);
423 
424 	return err;
425 }
426 
427 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
428 			   u16 data_len)
429 {
430 	struct mgmt_rp_read_index_list *rp;
431 	struct hci_dev *d;
432 	size_t rp_len;
433 	u16 count;
434 	int err;
435 
436 	bt_dev_dbg(hdev, "sock %p", sk);
437 
438 	read_lock(&hci_dev_list_lock);
439 
440 	count = 0;
441 	list_for_each_entry(d, &hci_dev_list, list) {
442 		if (d->dev_type == HCI_PRIMARY &&
443 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED))
444 			count++;
445 	}
446 
447 	rp_len = sizeof(*rp) + (2 * count);
448 	rp = kmalloc(rp_len, GFP_ATOMIC);
449 	if (!rp) {
450 		read_unlock(&hci_dev_list_lock);
451 		return -ENOMEM;
452 	}
453 
454 	count = 0;
455 	list_for_each_entry(d, &hci_dev_list, list) {
456 		if (hci_dev_test_flag(d, HCI_SETUP) ||
457 		    hci_dev_test_flag(d, HCI_CONFIG) ||
458 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
459 			continue;
460 
461 		/* Devices marked as raw-only are neither configured
462 		 * nor unconfigured controllers.
463 		 */
464 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
465 			continue;
466 
467 		if (d->dev_type == HCI_PRIMARY &&
468 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
469 			rp->index[count++] = cpu_to_le16(d->id);
470 			bt_dev_dbg(hdev, "Added hci%u", d->id);
471 		}
472 	}
473 
474 	rp->num_controllers = cpu_to_le16(count);
475 	rp_len = sizeof(*rp) + (2 * count);
476 
477 	read_unlock(&hci_dev_list_lock);
478 
479 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
480 				0, rp, rp_len);
481 
482 	kfree(rp);
483 
484 	return err;
485 }
486 
487 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
488 				  void *data, u16 data_len)
489 {
490 	struct mgmt_rp_read_unconf_index_list *rp;
491 	struct hci_dev *d;
492 	size_t rp_len;
493 	u16 count;
494 	int err;
495 
496 	bt_dev_dbg(hdev, "sock %p", sk);
497 
498 	read_lock(&hci_dev_list_lock);
499 
500 	count = 0;
501 	list_for_each_entry(d, &hci_dev_list, list) {
502 		if (d->dev_type == HCI_PRIMARY &&
503 		    hci_dev_test_flag(d, HCI_UNCONFIGURED))
504 			count++;
505 	}
506 
507 	rp_len = sizeof(*rp) + (2 * count);
508 	rp = kmalloc(rp_len, GFP_ATOMIC);
509 	if (!rp) {
510 		read_unlock(&hci_dev_list_lock);
511 		return -ENOMEM;
512 	}
513 
514 	count = 0;
515 	list_for_each_entry(d, &hci_dev_list, list) {
516 		if (hci_dev_test_flag(d, HCI_SETUP) ||
517 		    hci_dev_test_flag(d, HCI_CONFIG) ||
518 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
519 			continue;
520 
521 		/* Devices marked as raw-only are neither configured
522 		 * nor unconfigured controllers.
523 		 */
524 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
525 			continue;
526 
527 		if (d->dev_type == HCI_PRIMARY &&
528 		    hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
529 			rp->index[count++] = cpu_to_le16(d->id);
530 			bt_dev_dbg(hdev, "Added hci%u", d->id);
531 		}
532 	}
533 
534 	rp->num_controllers = cpu_to_le16(count);
535 	rp_len = sizeof(*rp) + (2 * count);
536 
537 	read_unlock(&hci_dev_list_lock);
538 
539 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
540 				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
541 
542 	kfree(rp);
543 
544 	return err;
545 }
546 
547 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
548 			       void *data, u16 data_len)
549 {
550 	struct mgmt_rp_read_ext_index_list *rp;
551 	struct hci_dev *d;
552 	u16 count;
553 	int err;
554 
555 	bt_dev_dbg(hdev, "sock %p", sk);
556 
557 	read_lock(&hci_dev_list_lock);
558 
559 	count = 0;
560 	list_for_each_entry(d, &hci_dev_list, list) {
561 		if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
562 			count++;
563 	}
564 
565 	rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
566 	if (!rp) {
567 		read_unlock(&hci_dev_list_lock);
568 		return -ENOMEM;
569 	}
570 
571 	count = 0;
572 	list_for_each_entry(d, &hci_dev_list, list) {
573 		if (hci_dev_test_flag(d, HCI_SETUP) ||
574 		    hci_dev_test_flag(d, HCI_CONFIG) ||
575 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
576 			continue;
577 
578 		/* Devices marked as raw-only are neither configured
579 		 * nor unconfigured controllers.
580 		 */
581 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
582 			continue;
583 
584 		if (d->dev_type == HCI_PRIMARY) {
585 			if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
586 				rp->entry[count].type = 0x01;
587 			else
588 				rp->entry[count].type = 0x00;
589 		} else if (d->dev_type == HCI_AMP) {
590 			rp->entry[count].type = 0x02;
591 		} else {
592 			continue;
593 		}
594 
595 		rp->entry[count].bus = d->bus;
596 		rp->entry[count++].index = cpu_to_le16(d->id);
597 		bt_dev_dbg(hdev, "Added hci%u", d->id);
598 	}
599 
600 	rp->num_controllers = cpu_to_le16(count);
601 
602 	read_unlock(&hci_dev_list_lock);
603 
604 	/* If this command is called at least once, then all the
605 	 * default index and unconfigured index events are disabled
606 	 * and from now on only extended index events are used.
607 	 */
608 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
609 	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
610 	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
611 
612 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
613 				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
614 				struct_size(rp, entry, count));
615 
616 	kfree(rp);
617 
618 	return err;
619 }
620 
621 static bool is_configured(struct hci_dev *hdev)
622 {
623 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
624 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
625 		return false;
626 
627 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
628 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
629 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
630 		return false;
631 
632 	return true;
633 }
634 
635 static __le32 get_missing_options(struct hci_dev *hdev)
636 {
637 	u32 options = 0;
638 
639 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
640 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
641 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
642 
643 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
644 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
645 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
646 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
647 
648 	return cpu_to_le32(options);
649 }
650 
651 static int new_options(struct hci_dev *hdev, struct sock *skip)
652 {
653 	__le32 options = get_missing_options(hdev);
654 
655 	return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
656 				  sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
657 }
658 
659 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
660 {
661 	__le32 options = get_missing_options(hdev);
662 
663 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
664 				 sizeof(options));
665 }
666 
667 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
668 			    void *data, u16 data_len)
669 {
670 	struct mgmt_rp_read_config_info rp;
671 	u32 options = 0;
672 
673 	bt_dev_dbg(hdev, "sock %p", sk);
674 
675 	hci_dev_lock(hdev);
676 
677 	memset(&rp, 0, sizeof(rp));
678 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
679 
680 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
681 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
682 
683 	if (hdev->set_bdaddr)
684 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
685 
686 	rp.supported_options = cpu_to_le32(options);
687 	rp.missing_options = get_missing_options(hdev);
688 
689 	hci_dev_unlock(hdev);
690 
691 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
692 				 &rp, sizeof(rp));
693 }
694 
695 static u32 get_supported_phys(struct hci_dev *hdev)
696 {
697 	u32 supported_phys = 0;
698 
699 	if (lmp_bredr_capable(hdev)) {
700 		supported_phys |= MGMT_PHY_BR_1M_1SLOT;
701 
702 		if (hdev->features[0][0] & LMP_3SLOT)
703 			supported_phys |= MGMT_PHY_BR_1M_3SLOT;
704 
705 		if (hdev->features[0][0] & LMP_5SLOT)
706 			supported_phys |= MGMT_PHY_BR_1M_5SLOT;
707 
708 		if (lmp_edr_2m_capable(hdev)) {
709 			supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
710 
711 			if (lmp_edr_3slot_capable(hdev))
712 				supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
713 
714 			if (lmp_edr_5slot_capable(hdev))
715 				supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
716 
717 			if (lmp_edr_3m_capable(hdev)) {
718 				supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
719 
720 				if (lmp_edr_3slot_capable(hdev))
721 					supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
722 
723 				if (lmp_edr_5slot_capable(hdev))
724 					supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
725 			}
726 		}
727 	}
728 
729 	if (lmp_le_capable(hdev)) {
730 		supported_phys |= MGMT_PHY_LE_1M_TX;
731 		supported_phys |= MGMT_PHY_LE_1M_RX;
732 
733 		if (hdev->le_features[1] & HCI_LE_PHY_2M) {
734 			supported_phys |= MGMT_PHY_LE_2M_TX;
735 			supported_phys |= MGMT_PHY_LE_2M_RX;
736 		}
737 
738 		if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
739 			supported_phys |= MGMT_PHY_LE_CODED_TX;
740 			supported_phys |= MGMT_PHY_LE_CODED_RX;
741 		}
742 	}
743 
744 	return supported_phys;
745 }
746 
747 static u32 get_selected_phys(struct hci_dev *hdev)
748 {
749 	u32 selected_phys = 0;
750 
751 	if (lmp_bredr_capable(hdev)) {
752 		selected_phys |= MGMT_PHY_BR_1M_1SLOT;
753 
754 		if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
755 			selected_phys |= MGMT_PHY_BR_1M_3SLOT;
756 
757 		if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
758 			selected_phys |= MGMT_PHY_BR_1M_5SLOT;
759 
760 		if (lmp_edr_2m_capable(hdev)) {
761 			if (!(hdev->pkt_type & HCI_2DH1))
762 				selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
763 
764 			if (lmp_edr_3slot_capable(hdev) &&
765 			    !(hdev->pkt_type & HCI_2DH3))
766 				selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
767 
768 			if (lmp_edr_5slot_capable(hdev) &&
769 			    !(hdev->pkt_type & HCI_2DH5))
770 				selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
771 
772 			if (lmp_edr_3m_capable(hdev)) {
773 				if (!(hdev->pkt_type & HCI_3DH1))
774 					selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
775 
776 				if (lmp_edr_3slot_capable(hdev) &&
777 				    !(hdev->pkt_type & HCI_3DH3))
778 					selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
779 
780 				if (lmp_edr_5slot_capable(hdev) &&
781 				    !(hdev->pkt_type & HCI_3DH5))
782 					selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
783 			}
784 		}
785 	}
786 
787 	if (lmp_le_capable(hdev)) {
788 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
789 			selected_phys |= MGMT_PHY_LE_1M_TX;
790 
791 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
792 			selected_phys |= MGMT_PHY_LE_1M_RX;
793 
794 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
795 			selected_phys |= MGMT_PHY_LE_2M_TX;
796 
797 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
798 			selected_phys |= MGMT_PHY_LE_2M_RX;
799 
800 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
801 			selected_phys |= MGMT_PHY_LE_CODED_TX;
802 
803 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
804 			selected_phys |= MGMT_PHY_LE_CODED_RX;
805 	}
806 
807 	return selected_phys;
808 }
809 
810 static u32 get_configurable_phys(struct hci_dev *hdev)
811 {
812 	return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
813 		~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
814 }
815 
816 static u32 get_supported_settings(struct hci_dev *hdev)
817 {
818 	u32 settings = 0;
819 
820 	settings |= MGMT_SETTING_POWERED;
821 	settings |= MGMT_SETTING_BONDABLE;
822 	settings |= MGMT_SETTING_DEBUG_KEYS;
823 	settings |= MGMT_SETTING_CONNECTABLE;
824 	settings |= MGMT_SETTING_DISCOVERABLE;
825 
826 	if (lmp_bredr_capable(hdev)) {
827 		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
828 			settings |= MGMT_SETTING_FAST_CONNECTABLE;
829 		settings |= MGMT_SETTING_BREDR;
830 		settings |= MGMT_SETTING_LINK_SECURITY;
831 
832 		if (lmp_ssp_capable(hdev)) {
833 			settings |= MGMT_SETTING_SSP;
834 			if (IS_ENABLED(CONFIG_BT_HS))
835 				settings |= MGMT_SETTING_HS;
836 		}
837 
838 		if (lmp_sc_capable(hdev))
839 			settings |= MGMT_SETTING_SECURE_CONN;
840 
841 		if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
842 			     &hdev->quirks))
843 			settings |= MGMT_SETTING_WIDEBAND_SPEECH;
844 	}
845 
846 	if (lmp_le_capable(hdev)) {
847 		settings |= MGMT_SETTING_LE;
848 		settings |= MGMT_SETTING_SECURE_CONN;
849 		settings |= MGMT_SETTING_PRIVACY;
850 		settings |= MGMT_SETTING_STATIC_ADDRESS;
851 		settings |= MGMT_SETTING_ADVERTISING;
852 	}
853 
854 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
855 	    hdev->set_bdaddr)
856 		settings |= MGMT_SETTING_CONFIGURATION;
857 
858 	settings |= MGMT_SETTING_PHY_CONFIGURATION;
859 
860 	return settings;
861 }
862 
863 static u32 get_current_settings(struct hci_dev *hdev)
864 {
865 	u32 settings = 0;
866 
867 	if (hdev_is_powered(hdev))
868 		settings |= MGMT_SETTING_POWERED;
869 
870 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
871 		settings |= MGMT_SETTING_CONNECTABLE;
872 
873 	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
874 		settings |= MGMT_SETTING_FAST_CONNECTABLE;
875 
876 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
877 		settings |= MGMT_SETTING_DISCOVERABLE;
878 
879 	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
880 		settings |= MGMT_SETTING_BONDABLE;
881 
882 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
883 		settings |= MGMT_SETTING_BREDR;
884 
885 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
886 		settings |= MGMT_SETTING_LE;
887 
888 	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
889 		settings |= MGMT_SETTING_LINK_SECURITY;
890 
891 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
892 		settings |= MGMT_SETTING_SSP;
893 
894 	if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
895 		settings |= MGMT_SETTING_HS;
896 
897 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
898 		settings |= MGMT_SETTING_ADVERTISING;
899 
900 	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
901 		settings |= MGMT_SETTING_SECURE_CONN;
902 
903 	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
904 		settings |= MGMT_SETTING_DEBUG_KEYS;
905 
906 	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
907 		settings |= MGMT_SETTING_PRIVACY;
908 
909 	/* The current setting for static address has two purposes. The
910 	 * first is to indicate if the static address will be used and
911 	 * the second is to indicate if it is actually set.
912 	 *
913 	 * This means if the static address is not configured, this flag
914 	 * will never be set. If the address is configured, then if the
915 	 * address is actually used decides if the flag is set or not.
916 	 *
917 	 * For single mode LE only controllers and dual-mode controllers
918 	 * with BR/EDR disabled, the existence of the static address will
919 	 * be evaluated.
920 	 */
921 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
922 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
923 	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
924 		if (bacmp(&hdev->static_addr, BDADDR_ANY))
925 			settings |= MGMT_SETTING_STATIC_ADDRESS;
926 	}
927 
928 	if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
929 		settings |= MGMT_SETTING_WIDEBAND_SPEECH;
930 
931 	return settings;
932 }
933 
934 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
935 {
936 	return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
937 }
938 
939 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
940 {
941 	struct mgmt_pending_cmd *cmd;
942 
943 	/* If there's a pending mgmt command the flags will not yet have
944 	 * their final values, so check for this first.
945 	 */
946 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
947 	if (cmd) {
948 		struct mgmt_mode *cp = cmd->param;
949 		if (cp->val == 0x01)
950 			return LE_AD_GENERAL;
951 		else if (cp->val == 0x02)
952 			return LE_AD_LIMITED;
953 	} else {
954 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
955 			return LE_AD_LIMITED;
956 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
957 			return LE_AD_GENERAL;
958 	}
959 
960 	return 0;
961 }
962 
963 bool mgmt_get_connectable(struct hci_dev *hdev)
964 {
965 	struct mgmt_pending_cmd *cmd;
966 
967 	/* If there's a pending mgmt command the flag will not yet have
968 	 * it's final value, so check for this first.
969 	 */
970 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
971 	if (cmd) {
972 		struct mgmt_mode *cp = cmd->param;
973 
974 		return cp->val;
975 	}
976 
977 	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
978 }
979 
980 static int service_cache_sync(struct hci_dev *hdev, void *data)
981 {
982 	hci_update_eir_sync(hdev);
983 	hci_update_class_sync(hdev);
984 
985 	return 0;
986 }
987 
988 static void service_cache_off(struct work_struct *work)
989 {
990 	struct hci_dev *hdev = container_of(work, struct hci_dev,
991 					    service_cache.work);
992 
993 	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
994 		return;
995 
996 	hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
997 }
998 
999 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1000 {
1001 	/* The generation of a new RPA and programming it into the
1002 	 * controller happens in the hci_req_enable_advertising()
1003 	 * function.
1004 	 */
1005 	if (ext_adv_capable(hdev))
1006 		return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1007 	else
1008 		return hci_enable_advertising_sync(hdev);
1009 }
1010 
1011 static void rpa_expired(struct work_struct *work)
1012 {
1013 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1014 					    rpa_expired.work);
1015 
1016 	bt_dev_dbg(hdev, "");
1017 
1018 	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1019 
1020 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1021 		return;
1022 
1023 	hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1024 }
1025 
1026 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1027 {
1028 	if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1029 		return;
1030 
1031 	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1032 	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1033 
1034 	/* Non-mgmt controlled devices get this bit set
1035 	 * implicitly so that pairing works for them, however
1036 	 * for mgmt we require user-space to explicitly enable
1037 	 * it
1038 	 */
1039 	hci_dev_clear_flag(hdev, HCI_BONDABLE);
1040 }
1041 
1042 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1043 				void *data, u16 data_len)
1044 {
1045 	struct mgmt_rp_read_info rp;
1046 
1047 	bt_dev_dbg(hdev, "sock %p", sk);
1048 
1049 	hci_dev_lock(hdev);
1050 
1051 	memset(&rp, 0, sizeof(rp));
1052 
1053 	bacpy(&rp.bdaddr, &hdev->bdaddr);
1054 
1055 	rp.version = hdev->hci_ver;
1056 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1057 
1058 	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1059 	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1060 
1061 	memcpy(rp.dev_class, hdev->dev_class, 3);
1062 
1063 	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1064 	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1065 
1066 	hci_dev_unlock(hdev);
1067 
1068 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1069 				 sizeof(rp));
1070 }
1071 
1072 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1073 {
1074 	u16 eir_len = 0;
1075 	size_t name_len;
1076 
1077 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1078 		eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1079 					  hdev->dev_class, 3);
1080 
1081 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1082 		eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1083 					  hdev->appearance);
1084 
1085 	name_len = strlen(hdev->dev_name);
1086 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1087 				  hdev->dev_name, name_len);
1088 
1089 	name_len = strlen(hdev->short_name);
1090 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1091 				  hdev->short_name, name_len);
1092 
1093 	return eir_len;
1094 }
1095 
1096 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1097 				    void *data, u16 data_len)
1098 {
1099 	char buf[512];
1100 	struct mgmt_rp_read_ext_info *rp = (void *)buf;
1101 	u16 eir_len;
1102 
1103 	bt_dev_dbg(hdev, "sock %p", sk);
1104 
1105 	memset(&buf, 0, sizeof(buf));
1106 
1107 	hci_dev_lock(hdev);
1108 
1109 	bacpy(&rp->bdaddr, &hdev->bdaddr);
1110 
1111 	rp->version = hdev->hci_ver;
1112 	rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1113 
1114 	rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1115 	rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1116 
1117 
1118 	eir_len = append_eir_data_to_buf(hdev, rp->eir);
1119 	rp->eir_len = cpu_to_le16(eir_len);
1120 
1121 	hci_dev_unlock(hdev);
1122 
1123 	/* If this command is called at least once, then the events
1124 	 * for class of device and local name changes are disabled
1125 	 * and only the new extended controller information event
1126 	 * is used.
1127 	 */
1128 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1129 	hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1130 	hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1131 
1132 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1133 				 sizeof(*rp) + eir_len);
1134 }
1135 
1136 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1137 {
1138 	char buf[512];
1139 	struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1140 	u16 eir_len;
1141 
1142 	memset(buf, 0, sizeof(buf));
1143 
1144 	eir_len = append_eir_data_to_buf(hdev, ev->eir);
1145 	ev->eir_len = cpu_to_le16(eir_len);
1146 
1147 	return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1148 				  sizeof(*ev) + eir_len,
1149 				  HCI_MGMT_EXT_INFO_EVENTS, skip);
1150 }
1151 
1152 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1153 {
1154 	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1155 
1156 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1157 				 sizeof(settings));
1158 }
1159 
1160 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1161 {
1162 	struct mgmt_ev_advertising_added ev;
1163 
1164 	ev.instance = instance;
1165 
1166 	mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1167 }
1168 
1169 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1170 			      u8 instance)
1171 {
1172 	struct mgmt_ev_advertising_removed ev;
1173 
1174 	ev.instance = instance;
1175 
1176 	mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1177 }
1178 
1179 static void cancel_adv_timeout(struct hci_dev *hdev)
1180 {
1181 	if (hdev->adv_instance_timeout) {
1182 		hdev->adv_instance_timeout = 0;
1183 		cancel_delayed_work(&hdev->adv_instance_expire);
1184 	}
1185 }
1186 
1187 /* This function requires the caller holds hdev->lock */
1188 static void restart_le_actions(struct hci_dev *hdev)
1189 {
1190 	struct hci_conn_params *p;
1191 
1192 	list_for_each_entry(p, &hdev->le_conn_params, list) {
1193 		/* Needed for AUTO_OFF case where might not "really"
1194 		 * have been powered off.
1195 		 */
1196 		list_del_init(&p->action);
1197 
1198 		switch (p->auto_connect) {
1199 		case HCI_AUTO_CONN_DIRECT:
1200 		case HCI_AUTO_CONN_ALWAYS:
1201 			list_add(&p->action, &hdev->pend_le_conns);
1202 			break;
1203 		case HCI_AUTO_CONN_REPORT:
1204 			list_add(&p->action, &hdev->pend_le_reports);
1205 			break;
1206 		default:
1207 			break;
1208 		}
1209 	}
1210 }
1211 
1212 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1213 {
1214 	__le32 ev = cpu_to_le32(get_current_settings(hdev));
1215 
1216 	return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1217 				  sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1218 }
1219 
1220 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1221 {
1222 	struct mgmt_pending_cmd *cmd = data;
1223 	struct mgmt_mode *cp;
1224 
1225 	/* Make sure cmd still outstanding. */
1226 	if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1227 		return;
1228 
1229 	cp = cmd->param;
1230 
1231 	bt_dev_dbg(hdev, "err %d", err);
1232 
1233 	if (!err) {
1234 		if (cp->val) {
1235 			hci_dev_lock(hdev);
1236 			restart_le_actions(hdev);
1237 			hci_update_passive_scan(hdev);
1238 			hci_dev_unlock(hdev);
1239 		}
1240 
1241 		send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1242 
1243 		/* Only call new_setting for power on as power off is deferred
1244 		 * to hdev->power_off work which does call hci_dev_do_close.
1245 		 */
1246 		if (cp->val)
1247 			new_settings(hdev, cmd->sk);
1248 	} else {
1249 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1250 				mgmt_status(err));
1251 	}
1252 
1253 	mgmt_pending_remove(cmd);
1254 }
1255 
1256 static int set_powered_sync(struct hci_dev *hdev, void *data)
1257 {
1258 	struct mgmt_pending_cmd *cmd = data;
1259 	struct mgmt_mode *cp = cmd->param;
1260 
1261 	BT_DBG("%s", hdev->name);
1262 
1263 	return hci_set_powered_sync(hdev, cp->val);
1264 }
1265 
1266 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1267 		       u16 len)
1268 {
1269 	struct mgmt_mode *cp = data;
1270 	struct mgmt_pending_cmd *cmd;
1271 	int err;
1272 
1273 	bt_dev_dbg(hdev, "sock %p", sk);
1274 
1275 	if (cp->val != 0x00 && cp->val != 0x01)
1276 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1277 				       MGMT_STATUS_INVALID_PARAMS);
1278 
1279 	hci_dev_lock(hdev);
1280 
1281 	if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1282 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1283 				      MGMT_STATUS_BUSY);
1284 		goto failed;
1285 	}
1286 
1287 	if (!!cp->val == hdev_is_powered(hdev)) {
1288 		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1289 		goto failed;
1290 	}
1291 
1292 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1293 	if (!cmd) {
1294 		err = -ENOMEM;
1295 		goto failed;
1296 	}
1297 
1298 	err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1299 				 mgmt_set_powered_complete);
1300 
1301 	if (err < 0)
1302 		mgmt_pending_remove(cmd);
1303 
1304 failed:
1305 	hci_dev_unlock(hdev);
1306 	return err;
1307 }
1308 
1309 int mgmt_new_settings(struct hci_dev *hdev)
1310 {
1311 	return new_settings(hdev, NULL);
1312 }
1313 
1314 struct cmd_lookup {
1315 	struct sock *sk;
1316 	struct hci_dev *hdev;
1317 	u8 mgmt_status;
1318 };
1319 
1320 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1321 {
1322 	struct cmd_lookup *match = data;
1323 
1324 	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1325 
1326 	list_del(&cmd->list);
1327 
1328 	if (match->sk == NULL) {
1329 		match->sk = cmd->sk;
1330 		sock_hold(match->sk);
1331 	}
1332 
1333 	mgmt_pending_free(cmd);
1334 }
1335 
1336 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1337 {
1338 	u8 *status = data;
1339 
1340 	mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1341 	mgmt_pending_remove(cmd);
1342 }
1343 
1344 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1345 {
1346 	if (cmd->cmd_complete) {
1347 		u8 *status = data;
1348 
1349 		cmd->cmd_complete(cmd, *status);
1350 		mgmt_pending_remove(cmd);
1351 
1352 		return;
1353 	}
1354 
1355 	cmd_status_rsp(cmd, data);
1356 }
1357 
1358 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1359 {
1360 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1361 				 cmd->param, cmd->param_len);
1362 }
1363 
1364 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1365 {
1366 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1367 				 cmd->param, sizeof(struct mgmt_addr_info));
1368 }
1369 
1370 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1371 {
1372 	if (!lmp_bredr_capable(hdev))
1373 		return MGMT_STATUS_NOT_SUPPORTED;
1374 	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1375 		return MGMT_STATUS_REJECTED;
1376 	else
1377 		return MGMT_STATUS_SUCCESS;
1378 }
1379 
1380 static u8 mgmt_le_support(struct hci_dev *hdev)
1381 {
1382 	if (!lmp_le_capable(hdev))
1383 		return MGMT_STATUS_NOT_SUPPORTED;
1384 	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1385 		return MGMT_STATUS_REJECTED;
1386 	else
1387 		return MGMT_STATUS_SUCCESS;
1388 }
1389 
1390 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1391 					   int err)
1392 {
1393 	struct mgmt_pending_cmd *cmd = data;
1394 
1395 	bt_dev_dbg(hdev, "err %d", err);
1396 
1397 	/* Make sure cmd still outstanding. */
1398 	if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1399 		return;
1400 
1401 	hci_dev_lock(hdev);
1402 
1403 	if (err) {
1404 		u8 mgmt_err = mgmt_status(err);
1405 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1406 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1407 		goto done;
1408 	}
1409 
1410 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1411 	    hdev->discov_timeout > 0) {
1412 		int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1413 		queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1414 	}
1415 
1416 	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1417 	new_settings(hdev, cmd->sk);
1418 
1419 done:
1420 	mgmt_pending_remove(cmd);
1421 	hci_dev_unlock(hdev);
1422 }
1423 
1424 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1425 {
1426 	BT_DBG("%s", hdev->name);
1427 
1428 	return hci_update_discoverable_sync(hdev);
1429 }
1430 
1431 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1432 			    u16 len)
1433 {
1434 	struct mgmt_cp_set_discoverable *cp = data;
1435 	struct mgmt_pending_cmd *cmd;
1436 	u16 timeout;
1437 	int err;
1438 
1439 	bt_dev_dbg(hdev, "sock %p", sk);
1440 
1441 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1442 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1443 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1444 				       MGMT_STATUS_REJECTED);
1445 
1446 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1447 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1448 				       MGMT_STATUS_INVALID_PARAMS);
1449 
1450 	timeout = __le16_to_cpu(cp->timeout);
1451 
1452 	/* Disabling discoverable requires that no timeout is set,
1453 	 * and enabling limited discoverable requires a timeout.
1454 	 */
1455 	if ((cp->val == 0x00 && timeout > 0) ||
1456 	    (cp->val == 0x02 && timeout == 0))
1457 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1458 				       MGMT_STATUS_INVALID_PARAMS);
1459 
1460 	hci_dev_lock(hdev);
1461 
1462 	if (!hdev_is_powered(hdev) && timeout > 0) {
1463 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1464 				      MGMT_STATUS_NOT_POWERED);
1465 		goto failed;
1466 	}
1467 
1468 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1469 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1470 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1471 				      MGMT_STATUS_BUSY);
1472 		goto failed;
1473 	}
1474 
1475 	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1476 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1477 				      MGMT_STATUS_REJECTED);
1478 		goto failed;
1479 	}
1480 
1481 	if (hdev->advertising_paused) {
1482 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1483 				      MGMT_STATUS_BUSY);
1484 		goto failed;
1485 	}
1486 
1487 	if (!hdev_is_powered(hdev)) {
1488 		bool changed = false;
1489 
1490 		/* Setting limited discoverable when powered off is
1491 		 * not a valid operation since it requires a timeout
1492 		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1493 		 */
1494 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1495 			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1496 			changed = true;
1497 		}
1498 
1499 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1500 		if (err < 0)
1501 			goto failed;
1502 
1503 		if (changed)
1504 			err = new_settings(hdev, sk);
1505 
1506 		goto failed;
1507 	}
1508 
1509 	/* If the current mode is the same, then just update the timeout
1510 	 * value with the new value. And if only the timeout gets updated,
1511 	 * then no need for any HCI transactions.
1512 	 */
1513 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1514 	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
1515 						   HCI_LIMITED_DISCOVERABLE)) {
1516 		cancel_delayed_work(&hdev->discov_off);
1517 		hdev->discov_timeout = timeout;
1518 
1519 		if (cp->val && hdev->discov_timeout > 0) {
1520 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1521 			queue_delayed_work(hdev->req_workqueue,
1522 					   &hdev->discov_off, to);
1523 		}
1524 
1525 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1526 		goto failed;
1527 	}
1528 
1529 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1530 	if (!cmd) {
1531 		err = -ENOMEM;
1532 		goto failed;
1533 	}
1534 
1535 	/* Cancel any potential discoverable timeout that might be
1536 	 * still active and store new timeout value. The arming of
1537 	 * the timeout happens in the complete handler.
1538 	 */
1539 	cancel_delayed_work(&hdev->discov_off);
1540 	hdev->discov_timeout = timeout;
1541 
1542 	if (cp->val)
1543 		hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1544 	else
1545 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1546 
1547 	/* Limited discoverable mode */
1548 	if (cp->val == 0x02)
1549 		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1550 	else
1551 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1552 
1553 	err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1554 				 mgmt_set_discoverable_complete);
1555 
1556 	if (err < 0)
1557 		mgmt_pending_remove(cmd);
1558 
1559 failed:
1560 	hci_dev_unlock(hdev);
1561 	return err;
1562 }
1563 
1564 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1565 					  int err)
1566 {
1567 	struct mgmt_pending_cmd *cmd = data;
1568 
1569 	bt_dev_dbg(hdev, "err %d", err);
1570 
1571 	/* Make sure cmd still outstanding. */
1572 	if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1573 		return;
1574 
1575 	hci_dev_lock(hdev);
1576 
1577 	if (err) {
1578 		u8 mgmt_err = mgmt_status(err);
1579 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1580 		goto done;
1581 	}
1582 
1583 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1584 	new_settings(hdev, cmd->sk);
1585 
1586 done:
1587 	if (cmd)
1588 		mgmt_pending_remove(cmd);
1589 
1590 	hci_dev_unlock(hdev);
1591 }
1592 
1593 static int set_connectable_update_settings(struct hci_dev *hdev,
1594 					   struct sock *sk, u8 val)
1595 {
1596 	bool changed = false;
1597 	int err;
1598 
1599 	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1600 		changed = true;
1601 
1602 	if (val) {
1603 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1604 	} else {
1605 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1606 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1607 	}
1608 
1609 	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1610 	if (err < 0)
1611 		return err;
1612 
1613 	if (changed) {
1614 		hci_req_update_scan(hdev);
1615 		hci_update_passive_scan(hdev);
1616 		return new_settings(hdev, sk);
1617 	}
1618 
1619 	return 0;
1620 }
1621 
1622 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1623 {
1624 	BT_DBG("%s", hdev->name);
1625 
1626 	return hci_update_connectable_sync(hdev);
1627 }
1628 
1629 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1630 			   u16 len)
1631 {
1632 	struct mgmt_mode *cp = data;
1633 	struct mgmt_pending_cmd *cmd;
1634 	int err;
1635 
1636 	bt_dev_dbg(hdev, "sock %p", sk);
1637 
1638 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1639 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1640 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1641 				       MGMT_STATUS_REJECTED);
1642 
1643 	if (cp->val != 0x00 && cp->val != 0x01)
1644 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1645 				       MGMT_STATUS_INVALID_PARAMS);
1646 
1647 	hci_dev_lock(hdev);
1648 
1649 	if (!hdev_is_powered(hdev)) {
1650 		err = set_connectable_update_settings(hdev, sk, cp->val);
1651 		goto failed;
1652 	}
1653 
1654 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1655 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1656 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1657 				      MGMT_STATUS_BUSY);
1658 		goto failed;
1659 	}
1660 
1661 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1662 	if (!cmd) {
1663 		err = -ENOMEM;
1664 		goto failed;
1665 	}
1666 
1667 	if (cp->val) {
1668 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1669 	} else {
1670 		if (hdev->discov_timeout > 0)
1671 			cancel_delayed_work(&hdev->discov_off);
1672 
1673 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1674 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1675 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1676 	}
1677 
1678 	err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1679 				 mgmt_set_connectable_complete);
1680 
1681 	if (err < 0)
1682 		mgmt_pending_remove(cmd);
1683 
1684 failed:
1685 	hci_dev_unlock(hdev);
1686 	return err;
1687 }
1688 
1689 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1690 			u16 len)
1691 {
1692 	struct mgmt_mode *cp = data;
1693 	bool changed;
1694 	int err;
1695 
1696 	bt_dev_dbg(hdev, "sock %p", sk);
1697 
1698 	if (cp->val != 0x00 && cp->val != 0x01)
1699 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1700 				       MGMT_STATUS_INVALID_PARAMS);
1701 
1702 	hci_dev_lock(hdev);
1703 
1704 	if (cp->val)
1705 		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1706 	else
1707 		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1708 
1709 	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1710 	if (err < 0)
1711 		goto unlock;
1712 
1713 	if (changed) {
1714 		/* In limited privacy mode the change of bondable mode
1715 		 * may affect the local advertising address.
1716 		 */
1717 		hci_update_discoverable(hdev);
1718 
1719 		err = new_settings(hdev, sk);
1720 	}
1721 
1722 unlock:
1723 	hci_dev_unlock(hdev);
1724 	return err;
1725 }
1726 
1727 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1728 			     u16 len)
1729 {
1730 	struct mgmt_mode *cp = data;
1731 	struct mgmt_pending_cmd *cmd;
1732 	u8 val, status;
1733 	int err;
1734 
1735 	bt_dev_dbg(hdev, "sock %p", sk);
1736 
1737 	status = mgmt_bredr_support(hdev);
1738 	if (status)
1739 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1740 				       status);
1741 
1742 	if (cp->val != 0x00 && cp->val != 0x01)
1743 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1744 				       MGMT_STATUS_INVALID_PARAMS);
1745 
1746 	hci_dev_lock(hdev);
1747 
1748 	if (!hdev_is_powered(hdev)) {
1749 		bool changed = false;
1750 
1751 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1752 			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1753 			changed = true;
1754 		}
1755 
1756 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1757 		if (err < 0)
1758 			goto failed;
1759 
1760 		if (changed)
1761 			err = new_settings(hdev, sk);
1762 
1763 		goto failed;
1764 	}
1765 
1766 	if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1767 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1768 				      MGMT_STATUS_BUSY);
1769 		goto failed;
1770 	}
1771 
1772 	val = !!cp->val;
1773 
1774 	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1775 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1776 		goto failed;
1777 	}
1778 
1779 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1780 	if (!cmd) {
1781 		err = -ENOMEM;
1782 		goto failed;
1783 	}
1784 
1785 	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1786 	if (err < 0) {
1787 		mgmt_pending_remove(cmd);
1788 		goto failed;
1789 	}
1790 
1791 failed:
1792 	hci_dev_unlock(hdev);
1793 	return err;
1794 }
1795 
1796 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1797 {
1798 	struct cmd_lookup match = { NULL, hdev };
1799 	struct mgmt_pending_cmd *cmd = data;
1800 	struct mgmt_mode *cp = cmd->param;
1801 	u8 enable = cp->val;
1802 	bool changed;
1803 
1804 	/* Make sure cmd still outstanding. */
1805 	if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1806 		return;
1807 
1808 	if (err) {
1809 		u8 mgmt_err = mgmt_status(err);
1810 
1811 		if (enable && hci_dev_test_and_clear_flag(hdev,
1812 							  HCI_SSP_ENABLED)) {
1813 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1814 			new_settings(hdev, NULL);
1815 		}
1816 
1817 		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1818 				     &mgmt_err);
1819 		return;
1820 	}
1821 
1822 	if (enable) {
1823 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1824 	} else {
1825 		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1826 
1827 		if (!changed)
1828 			changed = hci_dev_test_and_clear_flag(hdev,
1829 							      HCI_HS_ENABLED);
1830 		else
1831 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1832 	}
1833 
1834 	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1835 
1836 	if (changed)
1837 		new_settings(hdev, match.sk);
1838 
1839 	if (match.sk)
1840 		sock_put(match.sk);
1841 
1842 	hci_update_eir_sync(hdev);
1843 }
1844 
1845 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1846 {
1847 	struct mgmt_pending_cmd *cmd = data;
1848 	struct mgmt_mode *cp = cmd->param;
1849 	bool changed = false;
1850 	int err;
1851 
1852 	if (cp->val)
1853 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1854 
1855 	err = hci_write_ssp_mode_sync(hdev, cp->val);
1856 
1857 	if (!err && changed)
1858 		hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1859 
1860 	return err;
1861 }
1862 
1863 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1864 {
1865 	struct mgmt_mode *cp = data;
1866 	struct mgmt_pending_cmd *cmd;
1867 	u8 status;
1868 	int err;
1869 
1870 	bt_dev_dbg(hdev, "sock %p", sk);
1871 
1872 	status = mgmt_bredr_support(hdev);
1873 	if (status)
1874 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1875 
1876 	if (!lmp_ssp_capable(hdev))
1877 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1878 				       MGMT_STATUS_NOT_SUPPORTED);
1879 
1880 	if (cp->val != 0x00 && cp->val != 0x01)
1881 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1882 				       MGMT_STATUS_INVALID_PARAMS);
1883 
1884 	hci_dev_lock(hdev);
1885 
1886 	if (!hdev_is_powered(hdev)) {
1887 		bool changed;
1888 
1889 		if (cp->val) {
1890 			changed = !hci_dev_test_and_set_flag(hdev,
1891 							     HCI_SSP_ENABLED);
1892 		} else {
1893 			changed = hci_dev_test_and_clear_flag(hdev,
1894 							      HCI_SSP_ENABLED);
1895 			if (!changed)
1896 				changed = hci_dev_test_and_clear_flag(hdev,
1897 								      HCI_HS_ENABLED);
1898 			else
1899 				hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1900 		}
1901 
1902 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1903 		if (err < 0)
1904 			goto failed;
1905 
1906 		if (changed)
1907 			err = new_settings(hdev, sk);
1908 
1909 		goto failed;
1910 	}
1911 
1912 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1913 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1914 				      MGMT_STATUS_BUSY);
1915 		goto failed;
1916 	}
1917 
1918 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1919 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1920 		goto failed;
1921 	}
1922 
1923 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1924 	if (!cmd)
1925 		err = -ENOMEM;
1926 	else
1927 		err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
1928 					 set_ssp_complete);
1929 
1930 	if (err < 0) {
1931 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1932 				      MGMT_STATUS_FAILED);
1933 
1934 		if (cmd)
1935 			mgmt_pending_remove(cmd);
1936 	}
1937 
1938 failed:
1939 	hci_dev_unlock(hdev);
1940 	return err;
1941 }
1942 
1943 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1944 {
1945 	struct mgmt_mode *cp = data;
1946 	bool changed;
1947 	u8 status;
1948 	int err;
1949 
1950 	bt_dev_dbg(hdev, "sock %p", sk);
1951 
1952 	if (!IS_ENABLED(CONFIG_BT_HS))
1953 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1954 				       MGMT_STATUS_NOT_SUPPORTED);
1955 
1956 	status = mgmt_bredr_support(hdev);
1957 	if (status)
1958 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1959 
1960 	if (!lmp_ssp_capable(hdev))
1961 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1962 				       MGMT_STATUS_NOT_SUPPORTED);
1963 
1964 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1965 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1966 				       MGMT_STATUS_REJECTED);
1967 
1968 	if (cp->val != 0x00 && cp->val != 0x01)
1969 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1970 				       MGMT_STATUS_INVALID_PARAMS);
1971 
1972 	hci_dev_lock(hdev);
1973 
1974 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1975 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1976 				      MGMT_STATUS_BUSY);
1977 		goto unlock;
1978 	}
1979 
1980 	if (cp->val) {
1981 		changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1982 	} else {
1983 		if (hdev_is_powered(hdev)) {
1984 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1985 					      MGMT_STATUS_REJECTED);
1986 			goto unlock;
1987 		}
1988 
1989 		changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1990 	}
1991 
1992 	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1993 	if (err < 0)
1994 		goto unlock;
1995 
1996 	if (changed)
1997 		err = new_settings(hdev, sk);
1998 
1999 unlock:
2000 	hci_dev_unlock(hdev);
2001 	return err;
2002 }
2003 
2004 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2005 {
2006 	struct cmd_lookup match = { NULL, hdev };
2007 	u8 status = mgmt_status(err);
2008 
2009 	bt_dev_dbg(hdev, "err %d", err);
2010 
2011 	if (status) {
2012 		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2013 							&status);
2014 		return;
2015 	}
2016 
2017 	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2018 
2019 	new_settings(hdev, match.sk);
2020 
2021 	if (match.sk)
2022 		sock_put(match.sk);
2023 }
2024 
2025 static int set_le_sync(struct hci_dev *hdev, void *data)
2026 {
2027 	struct mgmt_pending_cmd *cmd = data;
2028 	struct mgmt_mode *cp = cmd->param;
2029 	u8 val = !!cp->val;
2030 	int err;
2031 
2032 	if (!val) {
2033 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2034 			hci_disable_advertising_sync(hdev);
2035 
2036 		if (ext_adv_capable(hdev))
2037 			hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2038 	} else {
2039 		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2040 	}
2041 
2042 	err = hci_write_le_host_supported_sync(hdev, val, 0);
2043 
2044 	/* Make sure the controller has a good default for
2045 	 * advertising data. Restrict the update to when LE
2046 	 * has actually been enabled. During power on, the
2047 	 * update in powered_update_hci will take care of it.
2048 	 */
2049 	if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2050 		if (ext_adv_capable(hdev)) {
2051 			int status;
2052 
2053 			status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2054 			if (!status)
2055 				hci_update_scan_rsp_data_sync(hdev, 0x00);
2056 		} else {
2057 			hci_update_adv_data_sync(hdev, 0x00);
2058 			hci_update_scan_rsp_data_sync(hdev, 0x00);
2059 		}
2060 
2061 		hci_update_passive_scan(hdev);
2062 	}
2063 
2064 	return err;
2065 }
2066 
2067 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2068 {
2069 	struct mgmt_mode *cp = data;
2070 	struct mgmt_pending_cmd *cmd;
2071 	int err;
2072 	u8 val, enabled;
2073 
2074 	bt_dev_dbg(hdev, "sock %p", sk);
2075 
2076 	if (!lmp_le_capable(hdev))
2077 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2078 				       MGMT_STATUS_NOT_SUPPORTED);
2079 
2080 	if (cp->val != 0x00 && cp->val != 0x01)
2081 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2082 				       MGMT_STATUS_INVALID_PARAMS);
2083 
2084 	/* Bluetooth single mode LE only controllers or dual-mode
2085 	 * controllers configured as LE only devices, do not allow
2086 	 * switching LE off. These have either LE enabled explicitly
2087 	 * or BR/EDR has been previously switched off.
2088 	 *
2089 	 * When trying to enable an already enabled LE, then gracefully
2090 	 * send a positive response. Trying to disable it however will
2091 	 * result into rejection.
2092 	 */
2093 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2094 		if (cp->val == 0x01)
2095 			return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2096 
2097 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2098 				       MGMT_STATUS_REJECTED);
2099 	}
2100 
2101 	hci_dev_lock(hdev);
2102 
2103 	val = !!cp->val;
2104 	enabled = lmp_host_le_capable(hdev);
2105 
2106 	if (!val)
2107 		hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
2108 
2109 	if (!hdev_is_powered(hdev) || val == enabled) {
2110 		bool changed = false;
2111 
2112 		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2113 			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2114 			changed = true;
2115 		}
2116 
2117 		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2118 			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2119 			changed = true;
2120 		}
2121 
2122 		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2123 		if (err < 0)
2124 			goto unlock;
2125 
2126 		if (changed)
2127 			err = new_settings(hdev, sk);
2128 
2129 		goto unlock;
2130 	}
2131 
2132 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
2133 	    pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2134 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2135 				      MGMT_STATUS_BUSY);
2136 		goto unlock;
2137 	}
2138 
2139 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2140 	if (!cmd)
2141 		err = -ENOMEM;
2142 	else
2143 		err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2144 					 set_le_complete);
2145 
2146 	if (err < 0) {
2147 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2148 				      MGMT_STATUS_FAILED);
2149 
2150 		if (cmd)
2151 			mgmt_pending_remove(cmd);
2152 	}
2153 
2154 unlock:
2155 	hci_dev_unlock(hdev);
2156 	return err;
2157 }
2158 
2159 /* This is a helper function to test for pending mgmt commands that can
2160  * cause CoD or EIR HCI commands. We can only allow one such pending
2161  * mgmt command at a time since otherwise we cannot easily track what
2162  * the current values are, will be, and based on that calculate if a new
2163  * HCI command needs to be sent and if yes with what value.
2164  */
2165 static bool pending_eir_or_class(struct hci_dev *hdev)
2166 {
2167 	struct mgmt_pending_cmd *cmd;
2168 
2169 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2170 		switch (cmd->opcode) {
2171 		case MGMT_OP_ADD_UUID:
2172 		case MGMT_OP_REMOVE_UUID:
2173 		case MGMT_OP_SET_DEV_CLASS:
2174 		case MGMT_OP_SET_POWERED:
2175 			return true;
2176 		}
2177 	}
2178 
2179 	return false;
2180 }
2181 
2182 static const u8 bluetooth_base_uuid[] = {
2183 			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2184 			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2185 };
2186 
2187 static u8 get_uuid_size(const u8 *uuid)
2188 {
2189 	u32 val;
2190 
2191 	if (memcmp(uuid, bluetooth_base_uuid, 12))
2192 		return 128;
2193 
2194 	val = get_unaligned_le32(&uuid[12]);
2195 	if (val > 0xffff)
2196 		return 32;
2197 
2198 	return 16;
2199 }
2200 
2201 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2202 {
2203 	struct mgmt_pending_cmd *cmd = data;
2204 
2205 	bt_dev_dbg(hdev, "err %d", err);
2206 
2207 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2208 			  mgmt_status(err), hdev->dev_class, 3);
2209 
2210 	mgmt_pending_free(cmd);
2211 }
2212 
2213 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2214 {
2215 	int err;
2216 
2217 	err = hci_update_class_sync(hdev);
2218 	if (err)
2219 		return err;
2220 
2221 	return hci_update_eir_sync(hdev);
2222 }
2223 
2224 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2225 {
2226 	struct mgmt_cp_add_uuid *cp = data;
2227 	struct mgmt_pending_cmd *cmd;
2228 	struct bt_uuid *uuid;
2229 	int err;
2230 
2231 	bt_dev_dbg(hdev, "sock %p", sk);
2232 
2233 	hci_dev_lock(hdev);
2234 
2235 	if (pending_eir_or_class(hdev)) {
2236 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2237 				      MGMT_STATUS_BUSY);
2238 		goto failed;
2239 	}
2240 
2241 	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2242 	if (!uuid) {
2243 		err = -ENOMEM;
2244 		goto failed;
2245 	}
2246 
2247 	memcpy(uuid->uuid, cp->uuid, 16);
2248 	uuid->svc_hint = cp->svc_hint;
2249 	uuid->size = get_uuid_size(cp->uuid);
2250 
2251 	list_add_tail(&uuid->list, &hdev->uuids);
2252 
2253 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2254 	if (!cmd) {
2255 		err = -ENOMEM;
2256 		goto failed;
2257 	}
2258 
2259 	err = hci_cmd_sync_queue(hdev, add_uuid_sync, cmd, mgmt_class_complete);
2260 	if (err < 0) {
2261 		mgmt_pending_free(cmd);
2262 		goto failed;
2263 	}
2264 
2265 failed:
2266 	hci_dev_unlock(hdev);
2267 	return err;
2268 }
2269 
2270 static bool enable_service_cache(struct hci_dev *hdev)
2271 {
2272 	if (!hdev_is_powered(hdev))
2273 		return false;
2274 
2275 	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2276 		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2277 				   CACHE_TIMEOUT);
2278 		return true;
2279 	}
2280 
2281 	return false;
2282 }
2283 
2284 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2285 {
2286 	int err;
2287 
2288 	err = hci_update_class_sync(hdev);
2289 	if (err)
2290 		return err;
2291 
2292 	return hci_update_eir_sync(hdev);
2293 }
2294 
2295 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2296 		       u16 len)
2297 {
2298 	struct mgmt_cp_remove_uuid *cp = data;
2299 	struct mgmt_pending_cmd *cmd;
2300 	struct bt_uuid *match, *tmp;
2301 	static const u8 bt_uuid_any[] = {
2302 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2303 	};
2304 	int err, found;
2305 
2306 	bt_dev_dbg(hdev, "sock %p", sk);
2307 
2308 	hci_dev_lock(hdev);
2309 
2310 	if (pending_eir_or_class(hdev)) {
2311 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2312 				      MGMT_STATUS_BUSY);
2313 		goto unlock;
2314 	}
2315 
2316 	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2317 		hci_uuids_clear(hdev);
2318 
2319 		if (enable_service_cache(hdev)) {
2320 			err = mgmt_cmd_complete(sk, hdev->id,
2321 						MGMT_OP_REMOVE_UUID,
2322 						0, hdev->dev_class, 3);
2323 			goto unlock;
2324 		}
2325 
2326 		goto update_class;
2327 	}
2328 
2329 	found = 0;
2330 
2331 	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2332 		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2333 			continue;
2334 
2335 		list_del(&match->list);
2336 		kfree(match);
2337 		found++;
2338 	}
2339 
2340 	if (found == 0) {
2341 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2342 				      MGMT_STATUS_INVALID_PARAMS);
2343 		goto unlock;
2344 	}
2345 
2346 update_class:
2347 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2348 	if (!cmd) {
2349 		err = -ENOMEM;
2350 		goto unlock;
2351 	}
2352 
2353 	err = hci_cmd_sync_queue(hdev, remove_uuid_sync, cmd,
2354 				 mgmt_class_complete);
2355 	if (err < 0)
2356 		mgmt_pending_free(cmd);
2357 
2358 unlock:
2359 	hci_dev_unlock(hdev);
2360 	return err;
2361 }
2362 
2363 static int set_class_sync(struct hci_dev *hdev, void *data)
2364 {
2365 	int err = 0;
2366 
2367 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2368 		cancel_delayed_work_sync(&hdev->service_cache);
2369 		err = hci_update_eir_sync(hdev);
2370 	}
2371 
2372 	if (err)
2373 		return err;
2374 
2375 	return hci_update_class_sync(hdev);
2376 }
2377 
2378 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2379 			 u16 len)
2380 {
2381 	struct mgmt_cp_set_dev_class *cp = data;
2382 	struct mgmt_pending_cmd *cmd;
2383 	int err;
2384 
2385 	bt_dev_dbg(hdev, "sock %p", sk);
2386 
2387 	if (!lmp_bredr_capable(hdev))
2388 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2389 				       MGMT_STATUS_NOT_SUPPORTED);
2390 
2391 	hci_dev_lock(hdev);
2392 
2393 	if (pending_eir_or_class(hdev)) {
2394 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2395 				      MGMT_STATUS_BUSY);
2396 		goto unlock;
2397 	}
2398 
2399 	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2400 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2401 				      MGMT_STATUS_INVALID_PARAMS);
2402 		goto unlock;
2403 	}
2404 
2405 	hdev->major_class = cp->major;
2406 	hdev->minor_class = cp->minor;
2407 
2408 	if (!hdev_is_powered(hdev)) {
2409 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2410 					hdev->dev_class, 3);
2411 		goto unlock;
2412 	}
2413 
2414 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2415 	if (!cmd) {
2416 		err = -ENOMEM;
2417 		goto unlock;
2418 	}
2419 
2420 	err = hci_cmd_sync_queue(hdev, set_class_sync, cmd,
2421 				 mgmt_class_complete);
2422 	if (err < 0)
2423 		mgmt_pending_free(cmd);
2424 
2425 unlock:
2426 	hci_dev_unlock(hdev);
2427 	return err;
2428 }
2429 
2430 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2431 			  u16 len)
2432 {
2433 	struct mgmt_cp_load_link_keys *cp = data;
2434 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2435 				   sizeof(struct mgmt_link_key_info));
2436 	u16 key_count, expected_len;
2437 	bool changed;
2438 	int i;
2439 
2440 	bt_dev_dbg(hdev, "sock %p", sk);
2441 
2442 	if (!lmp_bredr_capable(hdev))
2443 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2444 				       MGMT_STATUS_NOT_SUPPORTED);
2445 
2446 	key_count = __le16_to_cpu(cp->key_count);
2447 	if (key_count > max_key_count) {
2448 		bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2449 			   key_count);
2450 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2451 				       MGMT_STATUS_INVALID_PARAMS);
2452 	}
2453 
2454 	expected_len = struct_size(cp, keys, key_count);
2455 	if (expected_len != len) {
2456 		bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2457 			   expected_len, len);
2458 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2459 				       MGMT_STATUS_INVALID_PARAMS);
2460 	}
2461 
2462 	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2463 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2464 				       MGMT_STATUS_INVALID_PARAMS);
2465 
2466 	bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2467 		   key_count);
2468 
2469 	for (i = 0; i < key_count; i++) {
2470 		struct mgmt_link_key_info *key = &cp->keys[i];
2471 
2472 		if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2473 			return mgmt_cmd_status(sk, hdev->id,
2474 					       MGMT_OP_LOAD_LINK_KEYS,
2475 					       MGMT_STATUS_INVALID_PARAMS);
2476 	}
2477 
2478 	hci_dev_lock(hdev);
2479 
2480 	hci_link_keys_clear(hdev);
2481 
2482 	if (cp->debug_keys)
2483 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2484 	else
2485 		changed = hci_dev_test_and_clear_flag(hdev,
2486 						      HCI_KEEP_DEBUG_KEYS);
2487 
2488 	if (changed)
2489 		new_settings(hdev, NULL);
2490 
2491 	for (i = 0; i < key_count; i++) {
2492 		struct mgmt_link_key_info *key = &cp->keys[i];
2493 
2494 		if (hci_is_blocked_key(hdev,
2495 				       HCI_BLOCKED_KEY_TYPE_LINKKEY,
2496 				       key->val)) {
2497 			bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2498 				    &key->addr.bdaddr);
2499 			continue;
2500 		}
2501 
2502 		/* Always ignore debug keys and require a new pairing if
2503 		 * the user wants to use them.
2504 		 */
2505 		if (key->type == HCI_LK_DEBUG_COMBINATION)
2506 			continue;
2507 
2508 		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2509 				 key->type, key->pin_len, NULL);
2510 	}
2511 
2512 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2513 
2514 	hci_dev_unlock(hdev);
2515 
2516 	return 0;
2517 }
2518 
2519 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2520 			   u8 addr_type, struct sock *skip_sk)
2521 {
2522 	struct mgmt_ev_device_unpaired ev;
2523 
2524 	bacpy(&ev.addr.bdaddr, bdaddr);
2525 	ev.addr.type = addr_type;
2526 
2527 	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2528 			  skip_sk);
2529 }
2530 
2531 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2532 			 u16 len)
2533 {
2534 	struct mgmt_cp_unpair_device *cp = data;
2535 	struct mgmt_rp_unpair_device rp;
2536 	struct hci_conn_params *params;
2537 	struct mgmt_pending_cmd *cmd;
2538 	struct hci_conn *conn;
2539 	u8 addr_type;
2540 	int err;
2541 
2542 	memset(&rp, 0, sizeof(rp));
2543 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2544 	rp.addr.type = cp->addr.type;
2545 
2546 	if (!bdaddr_type_is_valid(cp->addr.type))
2547 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2548 					 MGMT_STATUS_INVALID_PARAMS,
2549 					 &rp, sizeof(rp));
2550 
2551 	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2552 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2553 					 MGMT_STATUS_INVALID_PARAMS,
2554 					 &rp, sizeof(rp));
2555 
2556 	hci_dev_lock(hdev);
2557 
2558 	if (!hdev_is_powered(hdev)) {
2559 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2560 					MGMT_STATUS_NOT_POWERED, &rp,
2561 					sizeof(rp));
2562 		goto unlock;
2563 	}
2564 
2565 	if (cp->addr.type == BDADDR_BREDR) {
2566 		/* If disconnection is requested, then look up the
2567 		 * connection. If the remote device is connected, it
2568 		 * will be later used to terminate the link.
2569 		 *
2570 		 * Setting it to NULL explicitly will cause no
2571 		 * termination of the link.
2572 		 */
2573 		if (cp->disconnect)
2574 			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2575 						       &cp->addr.bdaddr);
2576 		else
2577 			conn = NULL;
2578 
2579 		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2580 		if (err < 0) {
2581 			err = mgmt_cmd_complete(sk, hdev->id,
2582 						MGMT_OP_UNPAIR_DEVICE,
2583 						MGMT_STATUS_NOT_PAIRED, &rp,
2584 						sizeof(rp));
2585 			goto unlock;
2586 		}
2587 
2588 		goto done;
2589 	}
2590 
2591 	/* LE address type */
2592 	addr_type = le_addr_type(cp->addr.type);
2593 
2594 	/* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2595 	err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2596 	if (err < 0) {
2597 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2598 					MGMT_STATUS_NOT_PAIRED, &rp,
2599 					sizeof(rp));
2600 		goto unlock;
2601 	}
2602 
2603 	conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2604 	if (!conn) {
2605 		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2606 		goto done;
2607 	}
2608 
2609 
2610 	/* Defer clearing up the connection parameters until closing to
2611 	 * give a chance of keeping them if a repairing happens.
2612 	 */
2613 	set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2614 
2615 	/* Disable auto-connection parameters if present */
2616 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2617 	if (params) {
2618 		if (params->explicit_connect)
2619 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2620 		else
2621 			params->auto_connect = HCI_AUTO_CONN_DISABLED;
2622 	}
2623 
2624 	/* If disconnection is not requested, then clear the connection
2625 	 * variable so that the link is not terminated.
2626 	 */
2627 	if (!cp->disconnect)
2628 		conn = NULL;
2629 
2630 done:
2631 	/* If the connection variable is set, then termination of the
2632 	 * link is requested.
2633 	 */
2634 	if (!conn) {
2635 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2636 					&rp, sizeof(rp));
2637 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2638 		goto unlock;
2639 	}
2640 
2641 	cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2642 			       sizeof(*cp));
2643 	if (!cmd) {
2644 		err = -ENOMEM;
2645 		goto unlock;
2646 	}
2647 
2648 	cmd->cmd_complete = addr_cmd_complete;
2649 
2650 	err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2651 	if (err < 0)
2652 		mgmt_pending_remove(cmd);
2653 
2654 unlock:
2655 	hci_dev_unlock(hdev);
2656 	return err;
2657 }
2658 
2659 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2660 		      u16 len)
2661 {
2662 	struct mgmt_cp_disconnect *cp = data;
2663 	struct mgmt_rp_disconnect rp;
2664 	struct mgmt_pending_cmd *cmd;
2665 	struct hci_conn *conn;
2666 	int err;
2667 
2668 	bt_dev_dbg(hdev, "sock %p", sk);
2669 
2670 	memset(&rp, 0, sizeof(rp));
2671 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2672 	rp.addr.type = cp->addr.type;
2673 
2674 	if (!bdaddr_type_is_valid(cp->addr.type))
2675 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2676 					 MGMT_STATUS_INVALID_PARAMS,
2677 					 &rp, sizeof(rp));
2678 
2679 	hci_dev_lock(hdev);
2680 
2681 	if (!test_bit(HCI_UP, &hdev->flags)) {
2682 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2683 					MGMT_STATUS_NOT_POWERED, &rp,
2684 					sizeof(rp));
2685 		goto failed;
2686 	}
2687 
2688 	if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2689 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2690 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
2691 		goto failed;
2692 	}
2693 
2694 	if (cp->addr.type == BDADDR_BREDR)
2695 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2696 					       &cp->addr.bdaddr);
2697 	else
2698 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2699 					       le_addr_type(cp->addr.type));
2700 
2701 	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2702 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2703 					MGMT_STATUS_NOT_CONNECTED, &rp,
2704 					sizeof(rp));
2705 		goto failed;
2706 	}
2707 
2708 	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2709 	if (!cmd) {
2710 		err = -ENOMEM;
2711 		goto failed;
2712 	}
2713 
2714 	cmd->cmd_complete = generic_cmd_complete;
2715 
2716 	err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2717 	if (err < 0)
2718 		mgmt_pending_remove(cmd);
2719 
2720 failed:
2721 	hci_dev_unlock(hdev);
2722 	return err;
2723 }
2724 
2725 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2726 {
2727 	switch (link_type) {
2728 	case LE_LINK:
2729 		switch (addr_type) {
2730 		case ADDR_LE_DEV_PUBLIC:
2731 			return BDADDR_LE_PUBLIC;
2732 
2733 		default:
2734 			/* Fallback to LE Random address type */
2735 			return BDADDR_LE_RANDOM;
2736 		}
2737 
2738 	default:
2739 		/* Fallback to BR/EDR type */
2740 		return BDADDR_BREDR;
2741 	}
2742 }
2743 
2744 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2745 			   u16 data_len)
2746 {
2747 	struct mgmt_rp_get_connections *rp;
2748 	struct hci_conn *c;
2749 	int err;
2750 	u16 i;
2751 
2752 	bt_dev_dbg(hdev, "sock %p", sk);
2753 
2754 	hci_dev_lock(hdev);
2755 
2756 	if (!hdev_is_powered(hdev)) {
2757 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2758 				      MGMT_STATUS_NOT_POWERED);
2759 		goto unlock;
2760 	}
2761 
2762 	i = 0;
2763 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2764 		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2765 			i++;
2766 	}
2767 
2768 	rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2769 	if (!rp) {
2770 		err = -ENOMEM;
2771 		goto unlock;
2772 	}
2773 
2774 	i = 0;
2775 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2776 		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2777 			continue;
2778 		bacpy(&rp->addr[i].bdaddr, &c->dst);
2779 		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2780 		if (c->type == SCO_LINK || c->type == ESCO_LINK)
2781 			continue;
2782 		i++;
2783 	}
2784 
2785 	rp->conn_count = cpu_to_le16(i);
2786 
2787 	/* Recalculate length in case of filtered SCO connections, etc */
2788 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2789 				struct_size(rp, addr, i));
2790 
2791 	kfree(rp);
2792 
2793 unlock:
2794 	hci_dev_unlock(hdev);
2795 	return err;
2796 }
2797 
2798 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2799 				   struct mgmt_cp_pin_code_neg_reply *cp)
2800 {
2801 	struct mgmt_pending_cmd *cmd;
2802 	int err;
2803 
2804 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2805 			       sizeof(*cp));
2806 	if (!cmd)
2807 		return -ENOMEM;
2808 
2809 	cmd->cmd_complete = addr_cmd_complete;
2810 
2811 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2812 			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2813 	if (err < 0)
2814 		mgmt_pending_remove(cmd);
2815 
2816 	return err;
2817 }
2818 
2819 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2820 			  u16 len)
2821 {
2822 	struct hci_conn *conn;
2823 	struct mgmt_cp_pin_code_reply *cp = data;
2824 	struct hci_cp_pin_code_reply reply;
2825 	struct mgmt_pending_cmd *cmd;
2826 	int err;
2827 
2828 	bt_dev_dbg(hdev, "sock %p", sk);
2829 
2830 	hci_dev_lock(hdev);
2831 
2832 	if (!hdev_is_powered(hdev)) {
2833 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2834 				      MGMT_STATUS_NOT_POWERED);
2835 		goto failed;
2836 	}
2837 
2838 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2839 	if (!conn) {
2840 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2841 				      MGMT_STATUS_NOT_CONNECTED);
2842 		goto failed;
2843 	}
2844 
2845 	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2846 		struct mgmt_cp_pin_code_neg_reply ncp;
2847 
2848 		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2849 
2850 		bt_dev_err(hdev, "PIN code is not 16 bytes long");
2851 
2852 		err = send_pin_code_neg_reply(sk, hdev, &ncp);
2853 		if (err >= 0)
2854 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2855 					      MGMT_STATUS_INVALID_PARAMS);
2856 
2857 		goto failed;
2858 	}
2859 
2860 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2861 	if (!cmd) {
2862 		err = -ENOMEM;
2863 		goto failed;
2864 	}
2865 
2866 	cmd->cmd_complete = addr_cmd_complete;
2867 
2868 	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2869 	reply.pin_len = cp->pin_len;
2870 	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2871 
2872 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2873 	if (err < 0)
2874 		mgmt_pending_remove(cmd);
2875 
2876 failed:
2877 	hci_dev_unlock(hdev);
2878 	return err;
2879 }
2880 
2881 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2882 			     u16 len)
2883 {
2884 	struct mgmt_cp_set_io_capability *cp = data;
2885 
2886 	bt_dev_dbg(hdev, "sock %p", sk);
2887 
2888 	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2889 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2890 				       MGMT_STATUS_INVALID_PARAMS);
2891 
2892 	hci_dev_lock(hdev);
2893 
2894 	hdev->io_capability = cp->io_capability;
2895 
2896 	bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
2897 
2898 	hci_dev_unlock(hdev);
2899 
2900 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2901 				 NULL, 0);
2902 }
2903 
2904 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2905 {
2906 	struct hci_dev *hdev = conn->hdev;
2907 	struct mgmt_pending_cmd *cmd;
2908 
2909 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2910 		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2911 			continue;
2912 
2913 		if (cmd->user_data != conn)
2914 			continue;
2915 
2916 		return cmd;
2917 	}
2918 
2919 	return NULL;
2920 }
2921 
2922 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2923 {
2924 	struct mgmt_rp_pair_device rp;
2925 	struct hci_conn *conn = cmd->user_data;
2926 	int err;
2927 
2928 	bacpy(&rp.addr.bdaddr, &conn->dst);
2929 	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2930 
2931 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2932 				status, &rp, sizeof(rp));
2933 
2934 	/* So we don't get further callbacks for this connection */
2935 	conn->connect_cfm_cb = NULL;
2936 	conn->security_cfm_cb = NULL;
2937 	conn->disconn_cfm_cb = NULL;
2938 
2939 	hci_conn_drop(conn);
2940 
2941 	/* The device is paired so there is no need to remove
2942 	 * its connection parameters anymore.
2943 	 */
2944 	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2945 
2946 	hci_conn_put(conn);
2947 
2948 	return err;
2949 }
2950 
2951 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2952 {
2953 	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2954 	struct mgmt_pending_cmd *cmd;
2955 
2956 	cmd = find_pairing(conn);
2957 	if (cmd) {
2958 		cmd->cmd_complete(cmd, status);
2959 		mgmt_pending_remove(cmd);
2960 	}
2961 }
2962 
2963 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2964 {
2965 	struct mgmt_pending_cmd *cmd;
2966 
2967 	BT_DBG("status %u", status);
2968 
2969 	cmd = find_pairing(conn);
2970 	if (!cmd) {
2971 		BT_DBG("Unable to find a pending command");
2972 		return;
2973 	}
2974 
2975 	cmd->cmd_complete(cmd, mgmt_status(status));
2976 	mgmt_pending_remove(cmd);
2977 }
2978 
2979 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2980 {
2981 	struct mgmt_pending_cmd *cmd;
2982 
2983 	BT_DBG("status %u", status);
2984 
2985 	if (!status)
2986 		return;
2987 
2988 	cmd = find_pairing(conn);
2989 	if (!cmd) {
2990 		BT_DBG("Unable to find a pending command");
2991 		return;
2992 	}
2993 
2994 	cmd->cmd_complete(cmd, mgmt_status(status));
2995 	mgmt_pending_remove(cmd);
2996 }
2997 
2998 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2999 		       u16 len)
3000 {
3001 	struct mgmt_cp_pair_device *cp = data;
3002 	struct mgmt_rp_pair_device rp;
3003 	struct mgmt_pending_cmd *cmd;
3004 	u8 sec_level, auth_type;
3005 	struct hci_conn *conn;
3006 	int err;
3007 
3008 	bt_dev_dbg(hdev, "sock %p", sk);
3009 
3010 	memset(&rp, 0, sizeof(rp));
3011 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3012 	rp.addr.type = cp->addr.type;
3013 
3014 	if (!bdaddr_type_is_valid(cp->addr.type))
3015 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3016 					 MGMT_STATUS_INVALID_PARAMS,
3017 					 &rp, sizeof(rp));
3018 
3019 	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3020 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3021 					 MGMT_STATUS_INVALID_PARAMS,
3022 					 &rp, sizeof(rp));
3023 
3024 	hci_dev_lock(hdev);
3025 
3026 	if (!hdev_is_powered(hdev)) {
3027 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3028 					MGMT_STATUS_NOT_POWERED, &rp,
3029 					sizeof(rp));
3030 		goto unlock;
3031 	}
3032 
3033 	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3034 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3035 					MGMT_STATUS_ALREADY_PAIRED, &rp,
3036 					sizeof(rp));
3037 		goto unlock;
3038 	}
3039 
3040 	sec_level = BT_SECURITY_MEDIUM;
3041 	auth_type = HCI_AT_DEDICATED_BONDING;
3042 
3043 	if (cp->addr.type == BDADDR_BREDR) {
3044 		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3045 				       auth_type, CONN_REASON_PAIR_DEVICE);
3046 	} else {
3047 		u8 addr_type = le_addr_type(cp->addr.type);
3048 		struct hci_conn_params *p;
3049 
3050 		/* When pairing a new device, it is expected to remember
3051 		 * this device for future connections. Adding the connection
3052 		 * parameter information ahead of time allows tracking
3053 		 * of the peripheral preferred values and will speed up any
3054 		 * further connection establishment.
3055 		 *
3056 		 * If connection parameters already exist, then they
3057 		 * will be kept and this function does nothing.
3058 		 */
3059 		p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3060 
3061 		if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3062 			p->auto_connect = HCI_AUTO_CONN_DISABLED;
3063 
3064 		conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3065 					   sec_level, HCI_LE_CONN_TIMEOUT,
3066 					   CONN_REASON_PAIR_DEVICE);
3067 	}
3068 
3069 	if (IS_ERR(conn)) {
3070 		int status;
3071 
3072 		if (PTR_ERR(conn) == -EBUSY)
3073 			status = MGMT_STATUS_BUSY;
3074 		else if (PTR_ERR(conn) == -EOPNOTSUPP)
3075 			status = MGMT_STATUS_NOT_SUPPORTED;
3076 		else if (PTR_ERR(conn) == -ECONNREFUSED)
3077 			status = MGMT_STATUS_REJECTED;
3078 		else
3079 			status = MGMT_STATUS_CONNECT_FAILED;
3080 
3081 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3082 					status, &rp, sizeof(rp));
3083 		goto unlock;
3084 	}
3085 
3086 	if (conn->connect_cfm_cb) {
3087 		hci_conn_drop(conn);
3088 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3089 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
3090 		goto unlock;
3091 	}
3092 
3093 	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3094 	if (!cmd) {
3095 		err = -ENOMEM;
3096 		hci_conn_drop(conn);
3097 		goto unlock;
3098 	}
3099 
3100 	cmd->cmd_complete = pairing_complete;
3101 
3102 	/* For LE, just connecting isn't a proof that the pairing finished */
3103 	if (cp->addr.type == BDADDR_BREDR) {
3104 		conn->connect_cfm_cb = pairing_complete_cb;
3105 		conn->security_cfm_cb = pairing_complete_cb;
3106 		conn->disconn_cfm_cb = pairing_complete_cb;
3107 	} else {
3108 		conn->connect_cfm_cb = le_pairing_complete_cb;
3109 		conn->security_cfm_cb = le_pairing_complete_cb;
3110 		conn->disconn_cfm_cb = le_pairing_complete_cb;
3111 	}
3112 
3113 	conn->io_capability = cp->io_cap;
3114 	cmd->user_data = hci_conn_get(conn);
3115 
3116 	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3117 	    hci_conn_security(conn, sec_level, auth_type, true)) {
3118 		cmd->cmd_complete(cmd, 0);
3119 		mgmt_pending_remove(cmd);
3120 	}
3121 
3122 	err = 0;
3123 
3124 unlock:
3125 	hci_dev_unlock(hdev);
3126 	return err;
3127 }
3128 
3129 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3130 			      u16 len)
3131 {
3132 	struct mgmt_addr_info *addr = data;
3133 	struct mgmt_pending_cmd *cmd;
3134 	struct hci_conn *conn;
3135 	int err;
3136 
3137 	bt_dev_dbg(hdev, "sock %p", sk);
3138 
3139 	hci_dev_lock(hdev);
3140 
3141 	if (!hdev_is_powered(hdev)) {
3142 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3143 				      MGMT_STATUS_NOT_POWERED);
3144 		goto unlock;
3145 	}
3146 
3147 	cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3148 	if (!cmd) {
3149 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3150 				      MGMT_STATUS_INVALID_PARAMS);
3151 		goto unlock;
3152 	}
3153 
3154 	conn = cmd->user_data;
3155 
3156 	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3157 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3158 				      MGMT_STATUS_INVALID_PARAMS);
3159 		goto unlock;
3160 	}
3161 
3162 	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3163 	mgmt_pending_remove(cmd);
3164 
3165 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3166 				addr, sizeof(*addr));
3167 
3168 	/* Since user doesn't want to proceed with the connection, abort any
3169 	 * ongoing pairing and then terminate the link if it was created
3170 	 * because of the pair device action.
3171 	 */
3172 	if (addr->type == BDADDR_BREDR)
3173 		hci_remove_link_key(hdev, &addr->bdaddr);
3174 	else
3175 		smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3176 					      le_addr_type(addr->type));
3177 
3178 	if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3179 		hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3180 
3181 unlock:
3182 	hci_dev_unlock(hdev);
3183 	return err;
3184 }
3185 
3186 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3187 			     struct mgmt_addr_info *addr, u16 mgmt_op,
3188 			     u16 hci_op, __le32 passkey)
3189 {
3190 	struct mgmt_pending_cmd *cmd;
3191 	struct hci_conn *conn;
3192 	int err;
3193 
3194 	hci_dev_lock(hdev);
3195 
3196 	if (!hdev_is_powered(hdev)) {
3197 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3198 					MGMT_STATUS_NOT_POWERED, addr,
3199 					sizeof(*addr));
3200 		goto done;
3201 	}
3202 
3203 	if (addr->type == BDADDR_BREDR)
3204 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3205 	else
3206 		conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3207 					       le_addr_type(addr->type));
3208 
3209 	if (!conn) {
3210 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3211 					MGMT_STATUS_NOT_CONNECTED, addr,
3212 					sizeof(*addr));
3213 		goto done;
3214 	}
3215 
3216 	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3217 		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3218 		if (!err)
3219 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3220 						MGMT_STATUS_SUCCESS, addr,
3221 						sizeof(*addr));
3222 		else
3223 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3224 						MGMT_STATUS_FAILED, addr,
3225 						sizeof(*addr));
3226 
3227 		goto done;
3228 	}
3229 
3230 	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3231 	if (!cmd) {
3232 		err = -ENOMEM;
3233 		goto done;
3234 	}
3235 
3236 	cmd->cmd_complete = addr_cmd_complete;
3237 
3238 	/* Continue with pairing via HCI */
3239 	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3240 		struct hci_cp_user_passkey_reply cp;
3241 
3242 		bacpy(&cp.bdaddr, &addr->bdaddr);
3243 		cp.passkey = passkey;
3244 		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3245 	} else
3246 		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3247 				   &addr->bdaddr);
3248 
3249 	if (err < 0)
3250 		mgmt_pending_remove(cmd);
3251 
3252 done:
3253 	hci_dev_unlock(hdev);
3254 	return err;
3255 }
3256 
3257 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3258 			      void *data, u16 len)
3259 {
3260 	struct mgmt_cp_pin_code_neg_reply *cp = data;
3261 
3262 	bt_dev_dbg(hdev, "sock %p", sk);
3263 
3264 	return user_pairing_resp(sk, hdev, &cp->addr,
3265 				MGMT_OP_PIN_CODE_NEG_REPLY,
3266 				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3267 }
3268 
3269 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3270 			      u16 len)
3271 {
3272 	struct mgmt_cp_user_confirm_reply *cp = data;
3273 
3274 	bt_dev_dbg(hdev, "sock %p", sk);
3275 
3276 	if (len != sizeof(*cp))
3277 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3278 				       MGMT_STATUS_INVALID_PARAMS);
3279 
3280 	return user_pairing_resp(sk, hdev, &cp->addr,
3281 				 MGMT_OP_USER_CONFIRM_REPLY,
3282 				 HCI_OP_USER_CONFIRM_REPLY, 0);
3283 }
3284 
3285 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3286 				  void *data, u16 len)
3287 {
3288 	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3289 
3290 	bt_dev_dbg(hdev, "sock %p", sk);
3291 
3292 	return user_pairing_resp(sk, hdev, &cp->addr,
3293 				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3294 				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3295 }
3296 
3297 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3298 			      u16 len)
3299 {
3300 	struct mgmt_cp_user_passkey_reply *cp = data;
3301 
3302 	bt_dev_dbg(hdev, "sock %p", sk);
3303 
3304 	return user_pairing_resp(sk, hdev, &cp->addr,
3305 				 MGMT_OP_USER_PASSKEY_REPLY,
3306 				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3307 }
3308 
3309 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3310 				  void *data, u16 len)
3311 {
3312 	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3313 
3314 	bt_dev_dbg(hdev, "sock %p", sk);
3315 
3316 	return user_pairing_resp(sk, hdev, &cp->addr,
3317 				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3318 				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3319 }
3320 
3321 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3322 {
3323 	struct adv_info *adv_instance;
3324 
3325 	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3326 	if (!adv_instance)
3327 		return 0;
3328 
3329 	/* stop if current instance doesn't need to be changed */
3330 	if (!(adv_instance->flags & flags))
3331 		return 0;
3332 
3333 	cancel_adv_timeout(hdev);
3334 
3335 	adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3336 	if (!adv_instance)
3337 		return 0;
3338 
3339 	hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3340 
3341 	return 0;
3342 }
3343 
3344 static int name_changed_sync(struct hci_dev *hdev, void *data)
3345 {
3346 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3347 }
3348 
3349 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3350 {
3351 	struct mgmt_pending_cmd *cmd = data;
3352 	struct mgmt_cp_set_local_name *cp = cmd->param;
3353 	u8 status = mgmt_status(err);
3354 
3355 	bt_dev_dbg(hdev, "err %d", err);
3356 
3357 	if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3358 		return;
3359 
3360 	if (status) {
3361 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3362 				status);
3363 	} else {
3364 		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3365 				  cp, sizeof(*cp));
3366 
3367 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3368 			hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3369 	}
3370 
3371 	mgmt_pending_remove(cmd);
3372 }
3373 
3374 static int set_name_sync(struct hci_dev *hdev, void *data)
3375 {
3376 	if (lmp_bredr_capable(hdev)) {
3377 		hci_update_name_sync(hdev);
3378 		hci_update_eir_sync(hdev);
3379 	}
3380 
3381 	/* The name is stored in the scan response data and so
3382 	 * no need to update the advertising data here.
3383 	 */
3384 	if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3385 		hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3386 
3387 	return 0;
3388 }
3389 
3390 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3391 			  u16 len)
3392 {
3393 	struct mgmt_cp_set_local_name *cp = data;
3394 	struct mgmt_pending_cmd *cmd;
3395 	int err;
3396 
3397 	bt_dev_dbg(hdev, "sock %p", sk);
3398 
3399 	hci_dev_lock(hdev);
3400 
3401 	/* If the old values are the same as the new ones just return a
3402 	 * direct command complete event.
3403 	 */
3404 	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3405 	    !memcmp(hdev->short_name, cp->short_name,
3406 		    sizeof(hdev->short_name))) {
3407 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3408 					data, len);
3409 		goto failed;
3410 	}
3411 
3412 	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3413 
3414 	if (!hdev_is_powered(hdev)) {
3415 		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3416 
3417 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3418 					data, len);
3419 		if (err < 0)
3420 			goto failed;
3421 
3422 		err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3423 					 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3424 		ext_info_changed(hdev, sk);
3425 
3426 		goto failed;
3427 	}
3428 
3429 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3430 	if (!cmd)
3431 		err = -ENOMEM;
3432 	else
3433 		err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3434 					 set_name_complete);
3435 
3436 	if (err < 0) {
3437 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3438 				      MGMT_STATUS_FAILED);
3439 
3440 		if (cmd)
3441 			mgmt_pending_remove(cmd);
3442 
3443 		goto failed;
3444 	}
3445 
3446 	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3447 
3448 failed:
3449 	hci_dev_unlock(hdev);
3450 	return err;
3451 }
3452 
3453 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3454 {
3455 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3456 }
3457 
3458 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3459 			  u16 len)
3460 {
3461 	struct mgmt_cp_set_appearance *cp = data;
3462 	u16 appearance;
3463 	int err;
3464 
3465 	bt_dev_dbg(hdev, "sock %p", sk);
3466 
3467 	if (!lmp_le_capable(hdev))
3468 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3469 				       MGMT_STATUS_NOT_SUPPORTED);
3470 
3471 	appearance = le16_to_cpu(cp->appearance);
3472 
3473 	hci_dev_lock(hdev);
3474 
3475 	if (hdev->appearance != appearance) {
3476 		hdev->appearance = appearance;
3477 
3478 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3479 			hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3480 					   NULL);
3481 
3482 		ext_info_changed(hdev, sk);
3483 	}
3484 
3485 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3486 				0);
3487 
3488 	hci_dev_unlock(hdev);
3489 
3490 	return err;
3491 }
3492 
3493 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3494 				 void *data, u16 len)
3495 {
3496 	struct mgmt_rp_get_phy_configuration rp;
3497 
3498 	bt_dev_dbg(hdev, "sock %p", sk);
3499 
3500 	hci_dev_lock(hdev);
3501 
3502 	memset(&rp, 0, sizeof(rp));
3503 
3504 	rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3505 	rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3506 	rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3507 
3508 	hci_dev_unlock(hdev);
3509 
3510 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3511 				 &rp, sizeof(rp));
3512 }
3513 
3514 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3515 {
3516 	struct mgmt_ev_phy_configuration_changed ev;
3517 
3518 	memset(&ev, 0, sizeof(ev));
3519 
3520 	ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3521 
3522 	return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3523 			  sizeof(ev), skip);
3524 }
3525 
3526 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3527 {
3528 	struct mgmt_pending_cmd *cmd = data;
3529 	struct sk_buff *skb = cmd->skb;
3530 	u8 status = mgmt_status(err);
3531 
3532 	if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
3533 		return;
3534 
3535 	if (!status) {
3536 		if (!skb)
3537 			status = MGMT_STATUS_FAILED;
3538 		else if (IS_ERR(skb))
3539 			status = mgmt_status(PTR_ERR(skb));
3540 		else
3541 			status = mgmt_status(skb->data[0]);
3542 	}
3543 
3544 	bt_dev_dbg(hdev, "status %d", status);
3545 
3546 	if (status) {
3547 		mgmt_cmd_status(cmd->sk, hdev->id,
3548 				MGMT_OP_SET_PHY_CONFIGURATION, status);
3549 	} else {
3550 		mgmt_cmd_complete(cmd->sk, hdev->id,
3551 				  MGMT_OP_SET_PHY_CONFIGURATION, 0,
3552 				  NULL, 0);
3553 
3554 		mgmt_phy_configuration_changed(hdev, cmd->sk);
3555 	}
3556 
3557 	if (skb && !IS_ERR(skb))
3558 		kfree_skb(skb);
3559 
3560 	mgmt_pending_remove(cmd);
3561 }
3562 
3563 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
3564 {
3565 	struct mgmt_pending_cmd *cmd = data;
3566 	struct mgmt_cp_set_phy_configuration *cp = cmd->param;
3567 	struct hci_cp_le_set_default_phy cp_phy;
3568 	u32 selected_phys = __le32_to_cpu(cp->selected_phys);
3569 
3570 	memset(&cp_phy, 0, sizeof(cp_phy));
3571 
3572 	if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3573 		cp_phy.all_phys |= 0x01;
3574 
3575 	if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3576 		cp_phy.all_phys |= 0x02;
3577 
3578 	if (selected_phys & MGMT_PHY_LE_1M_TX)
3579 		cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3580 
3581 	if (selected_phys & MGMT_PHY_LE_2M_TX)
3582 		cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3583 
3584 	if (selected_phys & MGMT_PHY_LE_CODED_TX)
3585 		cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3586 
3587 	if (selected_phys & MGMT_PHY_LE_1M_RX)
3588 		cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3589 
3590 	if (selected_phys & MGMT_PHY_LE_2M_RX)
3591 		cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3592 
3593 	if (selected_phys & MGMT_PHY_LE_CODED_RX)
3594 		cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3595 
3596 	cmd->skb =  __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
3597 				   sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
3598 
3599 	return 0;
3600 }
3601 
3602 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3603 				 void *data, u16 len)
3604 {
3605 	struct mgmt_cp_set_phy_configuration *cp = data;
3606 	struct mgmt_pending_cmd *cmd;
3607 	u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3608 	u16 pkt_type = (HCI_DH1 | HCI_DM1);
3609 	bool changed = false;
3610 	int err;
3611 
3612 	bt_dev_dbg(hdev, "sock %p", sk);
3613 
3614 	configurable_phys = get_configurable_phys(hdev);
3615 	supported_phys = get_supported_phys(hdev);
3616 	selected_phys = __le32_to_cpu(cp->selected_phys);
3617 
3618 	if (selected_phys & ~supported_phys)
3619 		return mgmt_cmd_status(sk, hdev->id,
3620 				       MGMT_OP_SET_PHY_CONFIGURATION,
3621 				       MGMT_STATUS_INVALID_PARAMS);
3622 
3623 	unconfigure_phys = supported_phys & ~configurable_phys;
3624 
3625 	if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3626 		return mgmt_cmd_status(sk, hdev->id,
3627 				       MGMT_OP_SET_PHY_CONFIGURATION,
3628 				       MGMT_STATUS_INVALID_PARAMS);
3629 
3630 	if (selected_phys == get_selected_phys(hdev))
3631 		return mgmt_cmd_complete(sk, hdev->id,
3632 					 MGMT_OP_SET_PHY_CONFIGURATION,
3633 					 0, NULL, 0);
3634 
3635 	hci_dev_lock(hdev);
3636 
3637 	if (!hdev_is_powered(hdev)) {
3638 		err = mgmt_cmd_status(sk, hdev->id,
3639 				      MGMT_OP_SET_PHY_CONFIGURATION,
3640 				      MGMT_STATUS_REJECTED);
3641 		goto unlock;
3642 	}
3643 
3644 	if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3645 		err = mgmt_cmd_status(sk, hdev->id,
3646 				      MGMT_OP_SET_PHY_CONFIGURATION,
3647 				      MGMT_STATUS_BUSY);
3648 		goto unlock;
3649 	}
3650 
3651 	if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3652 		pkt_type |= (HCI_DH3 | HCI_DM3);
3653 	else
3654 		pkt_type &= ~(HCI_DH3 | HCI_DM3);
3655 
3656 	if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3657 		pkt_type |= (HCI_DH5 | HCI_DM5);
3658 	else
3659 		pkt_type &= ~(HCI_DH5 | HCI_DM5);
3660 
3661 	if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3662 		pkt_type &= ~HCI_2DH1;
3663 	else
3664 		pkt_type |= HCI_2DH1;
3665 
3666 	if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3667 		pkt_type &= ~HCI_2DH3;
3668 	else
3669 		pkt_type |= HCI_2DH3;
3670 
3671 	if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3672 		pkt_type &= ~HCI_2DH5;
3673 	else
3674 		pkt_type |= HCI_2DH5;
3675 
3676 	if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3677 		pkt_type &= ~HCI_3DH1;
3678 	else
3679 		pkt_type |= HCI_3DH1;
3680 
3681 	if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3682 		pkt_type &= ~HCI_3DH3;
3683 	else
3684 		pkt_type |= HCI_3DH3;
3685 
3686 	if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3687 		pkt_type &= ~HCI_3DH5;
3688 	else
3689 		pkt_type |= HCI_3DH5;
3690 
3691 	if (pkt_type != hdev->pkt_type) {
3692 		hdev->pkt_type = pkt_type;
3693 		changed = true;
3694 	}
3695 
3696 	if ((selected_phys & MGMT_PHY_LE_MASK) ==
3697 	    (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3698 		if (changed)
3699 			mgmt_phy_configuration_changed(hdev, sk);
3700 
3701 		err = mgmt_cmd_complete(sk, hdev->id,
3702 					MGMT_OP_SET_PHY_CONFIGURATION,
3703 					0, NULL, 0);
3704 
3705 		goto unlock;
3706 	}
3707 
3708 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3709 			       len);
3710 	if (!cmd)
3711 		err = -ENOMEM;
3712 	else
3713 		err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
3714 					 set_default_phy_complete);
3715 
3716 	if (err < 0) {
3717 		err = mgmt_cmd_status(sk, hdev->id,
3718 				      MGMT_OP_SET_PHY_CONFIGURATION,
3719 				      MGMT_STATUS_FAILED);
3720 
3721 		if (cmd)
3722 			mgmt_pending_remove(cmd);
3723 	}
3724 
3725 unlock:
3726 	hci_dev_unlock(hdev);
3727 
3728 	return err;
3729 }
3730 
3731 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
3732 			    u16 len)
3733 {
3734 	int err = MGMT_STATUS_SUCCESS;
3735 	struct mgmt_cp_set_blocked_keys *keys = data;
3736 	const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
3737 				   sizeof(struct mgmt_blocked_key_info));
3738 	u16 key_count, expected_len;
3739 	int i;
3740 
3741 	bt_dev_dbg(hdev, "sock %p", sk);
3742 
3743 	key_count = __le16_to_cpu(keys->key_count);
3744 	if (key_count > max_key_count) {
3745 		bt_dev_err(hdev, "too big key_count value %u", key_count);
3746 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3747 				       MGMT_STATUS_INVALID_PARAMS);
3748 	}
3749 
3750 	expected_len = struct_size(keys, keys, key_count);
3751 	if (expected_len != len) {
3752 		bt_dev_err(hdev, "expected %u bytes, got %u bytes",
3753 			   expected_len, len);
3754 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3755 				       MGMT_STATUS_INVALID_PARAMS);
3756 	}
3757 
3758 	hci_dev_lock(hdev);
3759 
3760 	hci_blocked_keys_clear(hdev);
3761 
3762 	for (i = 0; i < keys->key_count; ++i) {
3763 		struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
3764 
3765 		if (!b) {
3766 			err = MGMT_STATUS_NO_RESOURCES;
3767 			break;
3768 		}
3769 
3770 		b->type = keys->keys[i].type;
3771 		memcpy(b->val, keys->keys[i].val, sizeof(b->val));
3772 		list_add_rcu(&b->list, &hdev->blocked_keys);
3773 	}
3774 	hci_dev_unlock(hdev);
3775 
3776 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3777 				err, NULL, 0);
3778 }
3779 
3780 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
3781 			       void *data, u16 len)
3782 {
3783 	struct mgmt_mode *cp = data;
3784 	int err;
3785 	bool changed = false;
3786 
3787 	bt_dev_dbg(hdev, "sock %p", sk);
3788 
3789 	if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
3790 		return mgmt_cmd_status(sk, hdev->id,
3791 				       MGMT_OP_SET_WIDEBAND_SPEECH,
3792 				       MGMT_STATUS_NOT_SUPPORTED);
3793 
3794 	if (cp->val != 0x00 && cp->val != 0x01)
3795 		return mgmt_cmd_status(sk, hdev->id,
3796 				       MGMT_OP_SET_WIDEBAND_SPEECH,
3797 				       MGMT_STATUS_INVALID_PARAMS);
3798 
3799 	hci_dev_lock(hdev);
3800 
3801 	if (hdev_is_powered(hdev) &&
3802 	    !!cp->val != hci_dev_test_flag(hdev,
3803 					   HCI_WIDEBAND_SPEECH_ENABLED)) {
3804 		err = mgmt_cmd_status(sk, hdev->id,
3805 				      MGMT_OP_SET_WIDEBAND_SPEECH,
3806 				      MGMT_STATUS_REJECTED);
3807 		goto unlock;
3808 	}
3809 
3810 	if (cp->val)
3811 		changed = !hci_dev_test_and_set_flag(hdev,
3812 						   HCI_WIDEBAND_SPEECH_ENABLED);
3813 	else
3814 		changed = hci_dev_test_and_clear_flag(hdev,
3815 						   HCI_WIDEBAND_SPEECH_ENABLED);
3816 
3817 	err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
3818 	if (err < 0)
3819 		goto unlock;
3820 
3821 	if (changed)
3822 		err = new_settings(hdev, sk);
3823 
3824 unlock:
3825 	hci_dev_unlock(hdev);
3826 	return err;
3827 }
3828 
3829 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
3830 			       void *data, u16 data_len)
3831 {
3832 	char buf[20];
3833 	struct mgmt_rp_read_controller_cap *rp = (void *)buf;
3834 	u16 cap_len = 0;
3835 	u8 flags = 0;
3836 	u8 tx_power_range[2];
3837 
3838 	bt_dev_dbg(hdev, "sock %p", sk);
3839 
3840 	memset(&buf, 0, sizeof(buf));
3841 
3842 	hci_dev_lock(hdev);
3843 
3844 	/* When the Read Simple Pairing Options command is supported, then
3845 	 * the remote public key validation is supported.
3846 	 *
3847 	 * Alternatively, when Microsoft extensions are available, they can
3848 	 * indicate support for public key validation as well.
3849 	 */
3850 	if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
3851 		flags |= 0x01;	/* Remote public key validation (BR/EDR) */
3852 
3853 	flags |= 0x02;		/* Remote public key validation (LE) */
3854 
3855 	/* When the Read Encryption Key Size command is supported, then the
3856 	 * encryption key size is enforced.
3857 	 */
3858 	if (hdev->commands[20] & 0x10)
3859 		flags |= 0x04;	/* Encryption key size enforcement (BR/EDR) */
3860 
3861 	flags |= 0x08;		/* Encryption key size enforcement (LE) */
3862 
3863 	cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
3864 				  &flags, 1);
3865 
3866 	/* When the Read Simple Pairing Options command is supported, then
3867 	 * also max encryption key size information is provided.
3868 	 */
3869 	if (hdev->commands[41] & 0x08)
3870 		cap_len = eir_append_le16(rp->cap, cap_len,
3871 					  MGMT_CAP_MAX_ENC_KEY_SIZE,
3872 					  hdev->max_enc_key_size);
3873 
3874 	cap_len = eir_append_le16(rp->cap, cap_len,
3875 				  MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
3876 				  SMP_MAX_ENC_KEY_SIZE);
3877 
3878 	/* Append the min/max LE tx power parameters if we were able to fetch
3879 	 * it from the controller
3880 	 */
3881 	if (hdev->commands[38] & 0x80) {
3882 		memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
3883 		memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
3884 		cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
3885 					  tx_power_range, 2);
3886 	}
3887 
3888 	rp->cap_len = cpu_to_le16(cap_len);
3889 
3890 	hci_dev_unlock(hdev);
3891 
3892 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
3893 				 rp, sizeof(*rp) + cap_len);
3894 }
3895 
3896 #ifdef CONFIG_BT_FEATURE_DEBUG
3897 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
3898 static const u8 debug_uuid[16] = {
3899 	0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
3900 	0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
3901 };
3902 #endif
3903 
3904 /* 330859bc-7506-492d-9370-9a6f0614037f */
3905 static const u8 quality_report_uuid[16] = {
3906 	0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
3907 	0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
3908 };
3909 
3910 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
3911 static const u8 offload_codecs_uuid[16] = {
3912 	0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
3913 	0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
3914 };
3915 
3916 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
3917 static const u8 le_simultaneous_roles_uuid[16] = {
3918 	0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
3919 	0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
3920 };
3921 
3922 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
3923 static const u8 rpa_resolution_uuid[16] = {
3924 	0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
3925 	0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
3926 };
3927 
3928 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
3929 				  void *data, u16 data_len)
3930 {
3931 	char buf[102];   /* Enough space for 5 features: 2 + 20 * 5 */
3932 	struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
3933 	u16 idx = 0;
3934 	u32 flags;
3935 
3936 	bt_dev_dbg(hdev, "sock %p", sk);
3937 
3938 	memset(&buf, 0, sizeof(buf));
3939 
3940 #ifdef CONFIG_BT_FEATURE_DEBUG
3941 	if (!hdev) {
3942 		flags = bt_dbg_get() ? BIT(0) : 0;
3943 
3944 		memcpy(rp->features[idx].uuid, debug_uuid, 16);
3945 		rp->features[idx].flags = cpu_to_le32(flags);
3946 		idx++;
3947 	}
3948 #endif
3949 
3950 	if (hdev && hci_dev_le_state_simultaneous(hdev)) {
3951 		if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
3952 			flags = BIT(0);
3953 		else
3954 			flags = 0;
3955 
3956 		memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
3957 		rp->features[idx].flags = cpu_to_le32(flags);
3958 		idx++;
3959 	}
3960 
3961 	if (hdev && ll_privacy_capable(hdev)) {
3962 		if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
3963 			flags = BIT(0) | BIT(1);
3964 		else
3965 			flags = BIT(1);
3966 
3967 		memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
3968 		rp->features[idx].flags = cpu_to_le32(flags);
3969 		idx++;
3970 	}
3971 
3972 	if (hdev && (aosp_has_quality_report(hdev) ||
3973 		     hdev->set_quality_report)) {
3974 		if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
3975 			flags = BIT(0);
3976 		else
3977 			flags = 0;
3978 
3979 		memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
3980 		rp->features[idx].flags = cpu_to_le32(flags);
3981 		idx++;
3982 	}
3983 
3984 	if (hdev && hdev->get_data_path_id) {
3985 		if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
3986 			flags = BIT(0);
3987 		else
3988 			flags = 0;
3989 
3990 		memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
3991 		rp->features[idx].flags = cpu_to_le32(flags);
3992 		idx++;
3993 	}
3994 
3995 	rp->feature_count = cpu_to_le16(idx);
3996 
3997 	/* After reading the experimental features information, enable
3998 	 * the events to update client on any future change.
3999 	 */
4000 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4001 
4002 	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4003 				 MGMT_OP_READ_EXP_FEATURES_INFO,
4004 				 0, rp, sizeof(*rp) + (20 * idx));
4005 }
4006 
4007 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4008 					  struct sock *skip)
4009 {
4010 	struct mgmt_ev_exp_feature_changed ev;
4011 
4012 	memset(&ev, 0, sizeof(ev));
4013 	memcpy(ev.uuid, rpa_resolution_uuid, 16);
4014 	ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4015 
4016 	// Do we need to be atomic with the conn_flags?
4017 	if (enabled && privacy_mode_capable(hdev))
4018 		hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4019 	else
4020 		hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4021 
4022 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4023 				  &ev, sizeof(ev),
4024 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4025 
4026 }
4027 
4028 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4029 			       bool enabled, struct sock *skip)
4030 {
4031 	struct mgmt_ev_exp_feature_changed ev;
4032 
4033 	memset(&ev, 0, sizeof(ev));
4034 	memcpy(ev.uuid, uuid, 16);
4035 	ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4036 
4037 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4038 				  &ev, sizeof(ev),
4039 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4040 }
4041 
4042 #define EXP_FEAT(_uuid, _set_func)	\
4043 {					\
4044 	.uuid = _uuid,			\
4045 	.set_func = _set_func,		\
4046 }
4047 
4048 /* The zero key uuid is special. Multiple exp features are set through it. */
4049 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4050 			     struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4051 {
4052 	struct mgmt_rp_set_exp_feature rp;
4053 
4054 	memset(rp.uuid, 0, 16);
4055 	rp.flags = cpu_to_le32(0);
4056 
4057 #ifdef CONFIG_BT_FEATURE_DEBUG
4058 	if (!hdev) {
4059 		bool changed = bt_dbg_get();
4060 
4061 		bt_dbg_set(false);
4062 
4063 		if (changed)
4064 			exp_feature_changed(NULL, ZERO_KEY, false, sk);
4065 	}
4066 #endif
4067 
4068 	if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4069 		bool changed;
4070 
4071 		changed = hci_dev_test_and_clear_flag(hdev,
4072 						      HCI_ENABLE_LL_PRIVACY);
4073 		if (changed)
4074 			exp_feature_changed(hdev, rpa_resolution_uuid, false,
4075 					    sk);
4076 	}
4077 
4078 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4079 
4080 	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4081 				 MGMT_OP_SET_EXP_FEATURE, 0,
4082 				 &rp, sizeof(rp));
4083 }
4084 
4085 #ifdef CONFIG_BT_FEATURE_DEBUG
4086 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4087 			  struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4088 {
4089 	struct mgmt_rp_set_exp_feature rp;
4090 
4091 	bool val, changed;
4092 	int err;
4093 
4094 	/* Command requires to use the non-controller index */
4095 	if (hdev)
4096 		return mgmt_cmd_status(sk, hdev->id,
4097 				       MGMT_OP_SET_EXP_FEATURE,
4098 				       MGMT_STATUS_INVALID_INDEX);
4099 
4100 	/* Parameters are limited to a single octet */
4101 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4102 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4103 				       MGMT_OP_SET_EXP_FEATURE,
4104 				       MGMT_STATUS_INVALID_PARAMS);
4105 
4106 	/* Only boolean on/off is supported */
4107 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4108 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4109 				       MGMT_OP_SET_EXP_FEATURE,
4110 				       MGMT_STATUS_INVALID_PARAMS);
4111 
4112 	val = !!cp->param[0];
4113 	changed = val ? !bt_dbg_get() : bt_dbg_get();
4114 	bt_dbg_set(val);
4115 
4116 	memcpy(rp.uuid, debug_uuid, 16);
4117 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4118 
4119 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4120 
4121 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4122 				MGMT_OP_SET_EXP_FEATURE, 0,
4123 				&rp, sizeof(rp));
4124 
4125 	if (changed)
4126 		exp_feature_changed(hdev, debug_uuid, val, sk);
4127 
4128 	return err;
4129 }
4130 #endif
4131 
4132 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4133 				   struct mgmt_cp_set_exp_feature *cp,
4134 				   u16 data_len)
4135 {
4136 	struct mgmt_rp_set_exp_feature rp;
4137 	bool val, changed;
4138 	int err;
4139 	u32 flags;
4140 
4141 	/* Command requires to use the controller index */
4142 	if (!hdev)
4143 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4144 				       MGMT_OP_SET_EXP_FEATURE,
4145 				       MGMT_STATUS_INVALID_INDEX);
4146 
4147 	/* Changes can only be made when controller is powered down */
4148 	if (hdev_is_powered(hdev))
4149 		return mgmt_cmd_status(sk, hdev->id,
4150 				       MGMT_OP_SET_EXP_FEATURE,
4151 				       MGMT_STATUS_REJECTED);
4152 
4153 	/* Parameters are limited to a single octet */
4154 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4155 		return mgmt_cmd_status(sk, hdev->id,
4156 				       MGMT_OP_SET_EXP_FEATURE,
4157 				       MGMT_STATUS_INVALID_PARAMS);
4158 
4159 	/* Only boolean on/off is supported */
4160 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4161 		return mgmt_cmd_status(sk, hdev->id,
4162 				       MGMT_OP_SET_EXP_FEATURE,
4163 				       MGMT_STATUS_INVALID_PARAMS);
4164 
4165 	val = !!cp->param[0];
4166 
4167 	if (val) {
4168 		changed = !hci_dev_test_and_set_flag(hdev,
4169 						     HCI_ENABLE_LL_PRIVACY);
4170 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4171 
4172 		/* Enable LL privacy + supported settings changed */
4173 		flags = BIT(0) | BIT(1);
4174 	} else {
4175 		changed = hci_dev_test_and_clear_flag(hdev,
4176 						      HCI_ENABLE_LL_PRIVACY);
4177 
4178 		/* Disable LL privacy + supported settings changed */
4179 		flags = BIT(1);
4180 	}
4181 
4182 	memcpy(rp.uuid, rpa_resolution_uuid, 16);
4183 	rp.flags = cpu_to_le32(flags);
4184 
4185 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4186 
4187 	err = mgmt_cmd_complete(sk, hdev->id,
4188 				MGMT_OP_SET_EXP_FEATURE, 0,
4189 				&rp, sizeof(rp));
4190 
4191 	if (changed)
4192 		exp_ll_privacy_feature_changed(val, hdev, sk);
4193 
4194 	return err;
4195 }
4196 
4197 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4198 				   struct mgmt_cp_set_exp_feature *cp,
4199 				   u16 data_len)
4200 {
4201 	struct mgmt_rp_set_exp_feature rp;
4202 	bool val, changed;
4203 	int err;
4204 
4205 	/* Command requires to use a valid controller index */
4206 	if (!hdev)
4207 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4208 				       MGMT_OP_SET_EXP_FEATURE,
4209 				       MGMT_STATUS_INVALID_INDEX);
4210 
4211 	/* Parameters are limited to a single octet */
4212 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4213 		return mgmt_cmd_status(sk, hdev->id,
4214 				       MGMT_OP_SET_EXP_FEATURE,
4215 				       MGMT_STATUS_INVALID_PARAMS);
4216 
4217 	/* Only boolean on/off is supported */
4218 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4219 		return mgmt_cmd_status(sk, hdev->id,
4220 				       MGMT_OP_SET_EXP_FEATURE,
4221 				       MGMT_STATUS_INVALID_PARAMS);
4222 
4223 	hci_req_sync_lock(hdev);
4224 
4225 	val = !!cp->param[0];
4226 	changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4227 
4228 	if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4229 		err = mgmt_cmd_status(sk, hdev->id,
4230 				      MGMT_OP_SET_EXP_FEATURE,
4231 				      MGMT_STATUS_NOT_SUPPORTED);
4232 		goto unlock_quality_report;
4233 	}
4234 
4235 	if (changed) {
4236 		if (hdev->set_quality_report)
4237 			err = hdev->set_quality_report(hdev, val);
4238 		else
4239 			err = aosp_set_quality_report(hdev, val);
4240 
4241 		if (err) {
4242 			err = mgmt_cmd_status(sk, hdev->id,
4243 					      MGMT_OP_SET_EXP_FEATURE,
4244 					      MGMT_STATUS_FAILED);
4245 			goto unlock_quality_report;
4246 		}
4247 
4248 		if (val)
4249 			hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4250 		else
4251 			hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4252 	}
4253 
4254 	bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4255 
4256 	memcpy(rp.uuid, quality_report_uuid, 16);
4257 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4258 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4259 
4260 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4261 				&rp, sizeof(rp));
4262 
4263 	if (changed)
4264 		exp_feature_changed(hdev, quality_report_uuid, val, sk);
4265 
4266 unlock_quality_report:
4267 	hci_req_sync_unlock(hdev);
4268 	return err;
4269 }
4270 
4271 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4272 				  struct mgmt_cp_set_exp_feature *cp,
4273 				  u16 data_len)
4274 {
4275 	bool val, changed;
4276 	int err;
4277 	struct mgmt_rp_set_exp_feature rp;
4278 
4279 	/* Command requires to use a valid controller index */
4280 	if (!hdev)
4281 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4282 				       MGMT_OP_SET_EXP_FEATURE,
4283 				       MGMT_STATUS_INVALID_INDEX);
4284 
4285 	/* Parameters are limited to a single octet */
4286 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4287 		return mgmt_cmd_status(sk, hdev->id,
4288 				       MGMT_OP_SET_EXP_FEATURE,
4289 				       MGMT_STATUS_INVALID_PARAMS);
4290 
4291 	/* Only boolean on/off is supported */
4292 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4293 		return mgmt_cmd_status(sk, hdev->id,
4294 				       MGMT_OP_SET_EXP_FEATURE,
4295 				       MGMT_STATUS_INVALID_PARAMS);
4296 
4297 	val = !!cp->param[0];
4298 	changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4299 
4300 	if (!hdev->get_data_path_id) {
4301 		return mgmt_cmd_status(sk, hdev->id,
4302 				       MGMT_OP_SET_EXP_FEATURE,
4303 				       MGMT_STATUS_NOT_SUPPORTED);
4304 	}
4305 
4306 	if (changed) {
4307 		if (val)
4308 			hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4309 		else
4310 			hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4311 	}
4312 
4313 	bt_dev_info(hdev, "offload codecs enable %d changed %d",
4314 		    val, changed);
4315 
4316 	memcpy(rp.uuid, offload_codecs_uuid, 16);
4317 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4318 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4319 	err = mgmt_cmd_complete(sk, hdev->id,
4320 				MGMT_OP_SET_EXP_FEATURE, 0,
4321 				&rp, sizeof(rp));
4322 
4323 	if (changed)
4324 		exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4325 
4326 	return err;
4327 }
4328 
4329 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4330 					  struct mgmt_cp_set_exp_feature *cp,
4331 					  u16 data_len)
4332 {
4333 	bool val, changed;
4334 	int err;
4335 	struct mgmt_rp_set_exp_feature rp;
4336 
4337 	/* Command requires to use a valid controller index */
4338 	if (!hdev)
4339 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4340 				       MGMT_OP_SET_EXP_FEATURE,
4341 				       MGMT_STATUS_INVALID_INDEX);
4342 
4343 	/* Parameters are limited to a single octet */
4344 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4345 		return mgmt_cmd_status(sk, hdev->id,
4346 				       MGMT_OP_SET_EXP_FEATURE,
4347 				       MGMT_STATUS_INVALID_PARAMS);
4348 
4349 	/* Only boolean on/off is supported */
4350 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4351 		return mgmt_cmd_status(sk, hdev->id,
4352 				       MGMT_OP_SET_EXP_FEATURE,
4353 				       MGMT_STATUS_INVALID_PARAMS);
4354 
4355 	val = !!cp->param[0];
4356 	changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4357 
4358 	if (!hci_dev_le_state_simultaneous(hdev)) {
4359 		return mgmt_cmd_status(sk, hdev->id,
4360 				       MGMT_OP_SET_EXP_FEATURE,
4361 				       MGMT_STATUS_NOT_SUPPORTED);
4362 	}
4363 
4364 	if (changed) {
4365 		if (val)
4366 			hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4367 		else
4368 			hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4369 	}
4370 
4371 	bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4372 		    val, changed);
4373 
4374 	memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4375 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4376 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4377 	err = mgmt_cmd_complete(sk, hdev->id,
4378 				MGMT_OP_SET_EXP_FEATURE, 0,
4379 				&rp, sizeof(rp));
4380 
4381 	if (changed)
4382 		exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4383 
4384 	return err;
4385 }
4386 
4387 static const struct mgmt_exp_feature {
4388 	const u8 *uuid;
4389 	int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4390 			struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4391 } exp_features[] = {
4392 	EXP_FEAT(ZERO_KEY, set_zero_key_func),
4393 #ifdef CONFIG_BT_FEATURE_DEBUG
4394 	EXP_FEAT(debug_uuid, set_debug_func),
4395 #endif
4396 	EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4397 	EXP_FEAT(quality_report_uuid, set_quality_report_func),
4398 	EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4399 	EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
4400 
4401 	/* end with a null feature */
4402 	EXP_FEAT(NULL, NULL)
4403 };
4404 
4405 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4406 			   void *data, u16 data_len)
4407 {
4408 	struct mgmt_cp_set_exp_feature *cp = data;
4409 	size_t i = 0;
4410 
4411 	bt_dev_dbg(hdev, "sock %p", sk);
4412 
4413 	for (i = 0; exp_features[i].uuid; i++) {
4414 		if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4415 			return exp_features[i].set_func(sk, hdev, cp, data_len);
4416 	}
4417 
4418 	return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4419 			       MGMT_OP_SET_EXP_FEATURE,
4420 			       MGMT_STATUS_NOT_SUPPORTED);
4421 }
4422 
4423 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4424 			    u16 data_len)
4425 {
4426 	struct mgmt_cp_get_device_flags *cp = data;
4427 	struct mgmt_rp_get_device_flags rp;
4428 	struct bdaddr_list_with_flags *br_params;
4429 	struct hci_conn_params *params;
4430 	u32 supported_flags;
4431 	u32 current_flags = 0;
4432 	u8 status = MGMT_STATUS_INVALID_PARAMS;
4433 
4434 	bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4435 		   &cp->addr.bdaddr, cp->addr.type);
4436 
4437 	hci_dev_lock(hdev);
4438 
4439 	supported_flags = hdev->conn_flags;
4440 
4441 	memset(&rp, 0, sizeof(rp));
4442 
4443 	if (cp->addr.type == BDADDR_BREDR) {
4444 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4445 							      &cp->addr.bdaddr,
4446 							      cp->addr.type);
4447 		if (!br_params)
4448 			goto done;
4449 
4450 		current_flags = br_params->flags;
4451 	} else {
4452 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4453 						le_addr_type(cp->addr.type));
4454 
4455 		if (!params)
4456 			goto done;
4457 
4458 		current_flags = params->flags;
4459 	}
4460 
4461 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4462 	rp.addr.type = cp->addr.type;
4463 	rp.supported_flags = cpu_to_le32(supported_flags);
4464 	rp.current_flags = cpu_to_le32(current_flags);
4465 
4466 	status = MGMT_STATUS_SUCCESS;
4467 
4468 done:
4469 	hci_dev_unlock(hdev);
4470 
4471 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
4472 				&rp, sizeof(rp));
4473 }
4474 
4475 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
4476 				 bdaddr_t *bdaddr, u8 bdaddr_type,
4477 				 u32 supported_flags, u32 current_flags)
4478 {
4479 	struct mgmt_ev_device_flags_changed ev;
4480 
4481 	bacpy(&ev.addr.bdaddr, bdaddr);
4482 	ev.addr.type = bdaddr_type;
4483 	ev.supported_flags = cpu_to_le32(supported_flags);
4484 	ev.current_flags = cpu_to_le32(current_flags);
4485 
4486 	mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
4487 }
4488 
4489 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4490 			    u16 len)
4491 {
4492 	struct mgmt_cp_set_device_flags *cp = data;
4493 	struct bdaddr_list_with_flags *br_params;
4494 	struct hci_conn_params *params;
4495 	u8 status = MGMT_STATUS_INVALID_PARAMS;
4496 	u32 supported_flags;
4497 	u32 current_flags = __le32_to_cpu(cp->current_flags);
4498 
4499 	bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
4500 		   &cp->addr.bdaddr, cp->addr.type,
4501 		   __le32_to_cpu(current_flags));
4502 
4503 	// We should take hci_dev_lock() early, I think.. conn_flags can change
4504 	supported_flags = hdev->conn_flags;
4505 
4506 	if ((supported_flags | current_flags) != supported_flags) {
4507 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
4508 			    current_flags, supported_flags);
4509 		goto done;
4510 	}
4511 
4512 	hci_dev_lock(hdev);
4513 
4514 	if (cp->addr.type == BDADDR_BREDR) {
4515 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4516 							      &cp->addr.bdaddr,
4517 							      cp->addr.type);
4518 
4519 		if (br_params) {
4520 			br_params->flags = current_flags;
4521 			status = MGMT_STATUS_SUCCESS;
4522 		} else {
4523 			bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
4524 				    &cp->addr.bdaddr, cp->addr.type);
4525 		}
4526 	} else {
4527 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4528 						le_addr_type(cp->addr.type));
4529 		if (params) {
4530 			/* Devices using RPAs can only be programmed in the
4531 			 * acceptlist LL Privacy has been enable otherwise they
4532 			 * cannot mark HCI_CONN_FLAG_REMOTE_WAKEUP.
4533 			 */
4534 			if ((current_flags & HCI_CONN_FLAG_REMOTE_WAKEUP) &&
4535 			    !use_ll_privacy(hdev) &&
4536 			    hci_find_irk_by_addr(hdev, &params->addr,
4537 						 params->addr_type)) {
4538 				bt_dev_warn(hdev,
4539 					    "Cannot set wakeable for RPA");
4540 				goto unlock;
4541 			}
4542 
4543 			params->flags = current_flags;
4544 			status = MGMT_STATUS_SUCCESS;
4545 
4546 			/* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
4547 			 * has been set.
4548 			 */
4549 			if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
4550 				hci_update_passive_scan(hdev);
4551 		} else {
4552 			bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
4553 				    &cp->addr.bdaddr,
4554 				    le_addr_type(cp->addr.type));
4555 		}
4556 	}
4557 
4558 unlock:
4559 	hci_dev_unlock(hdev);
4560 
4561 done:
4562 	if (status == MGMT_STATUS_SUCCESS)
4563 		device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
4564 				     supported_flags, current_flags);
4565 
4566 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
4567 				 &cp->addr, sizeof(cp->addr));
4568 }
4569 
4570 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
4571 				   u16 handle)
4572 {
4573 	struct mgmt_ev_adv_monitor_added ev;
4574 
4575 	ev.monitor_handle = cpu_to_le16(handle);
4576 
4577 	mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
4578 }
4579 
4580 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
4581 {
4582 	struct mgmt_ev_adv_monitor_removed ev;
4583 	struct mgmt_pending_cmd *cmd;
4584 	struct sock *sk_skip = NULL;
4585 	struct mgmt_cp_remove_adv_monitor *cp;
4586 
4587 	cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4588 	if (cmd) {
4589 		cp = cmd->param;
4590 
4591 		if (cp->monitor_handle)
4592 			sk_skip = cmd->sk;
4593 	}
4594 
4595 	ev.monitor_handle = cpu_to_le16(handle);
4596 
4597 	mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
4598 }
4599 
4600 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
4601 				 void *data, u16 len)
4602 {
4603 	struct adv_monitor *monitor = NULL;
4604 	struct mgmt_rp_read_adv_monitor_features *rp = NULL;
4605 	int handle, err;
4606 	size_t rp_size = 0;
4607 	__u32 supported = 0;
4608 	__u32 enabled = 0;
4609 	__u16 num_handles = 0;
4610 	__u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
4611 
4612 	BT_DBG("request for %s", hdev->name);
4613 
4614 	hci_dev_lock(hdev);
4615 
4616 	if (msft_monitor_supported(hdev))
4617 		supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
4618 
4619 	idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
4620 		handles[num_handles++] = monitor->handle;
4621 
4622 	hci_dev_unlock(hdev);
4623 
4624 	rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
4625 	rp = kmalloc(rp_size, GFP_KERNEL);
4626 	if (!rp)
4627 		return -ENOMEM;
4628 
4629 	/* All supported features are currently enabled */
4630 	enabled = supported;
4631 
4632 	rp->supported_features = cpu_to_le32(supported);
4633 	rp->enabled_features = cpu_to_le32(enabled);
4634 	rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
4635 	rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
4636 	rp->num_handles = cpu_to_le16(num_handles);
4637 	if (num_handles)
4638 		memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
4639 
4640 	err = mgmt_cmd_complete(sk, hdev->id,
4641 				MGMT_OP_READ_ADV_MONITOR_FEATURES,
4642 				MGMT_STATUS_SUCCESS, rp, rp_size);
4643 
4644 	kfree(rp);
4645 
4646 	return err;
4647 }
4648 
4649 int mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
4650 {
4651 	struct mgmt_rp_add_adv_patterns_monitor rp;
4652 	struct mgmt_pending_cmd *cmd;
4653 	struct adv_monitor *monitor;
4654 	int err = 0;
4655 
4656 	hci_dev_lock(hdev);
4657 
4658 	cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev);
4659 	if (!cmd) {
4660 		cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev);
4661 		if (!cmd)
4662 			goto done;
4663 	}
4664 
4665 	monitor = cmd->user_data;
4666 	rp.monitor_handle = cpu_to_le16(monitor->handle);
4667 
4668 	if (!status) {
4669 		mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
4670 		hdev->adv_monitors_cnt++;
4671 		if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
4672 			monitor->state = ADV_MONITOR_STATE_REGISTERED;
4673 		hci_update_passive_scan(hdev);
4674 	}
4675 
4676 	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4677 				mgmt_status(status), &rp, sizeof(rp));
4678 	mgmt_pending_remove(cmd);
4679 
4680 done:
4681 	hci_dev_unlock(hdev);
4682 	bt_dev_dbg(hdev, "add monitor %d complete, status %u",
4683 		   rp.monitor_handle, status);
4684 
4685 	return err;
4686 }
4687 
4688 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4689 				      struct adv_monitor *m, u8 status,
4690 				      void *data, u16 len, u16 op)
4691 {
4692 	struct mgmt_rp_add_adv_patterns_monitor rp;
4693 	struct mgmt_pending_cmd *cmd;
4694 	int err;
4695 	bool pending;
4696 
4697 	hci_dev_lock(hdev);
4698 
4699 	if (status)
4700 		goto unlock;
4701 
4702 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
4703 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4704 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
4705 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
4706 		status = MGMT_STATUS_BUSY;
4707 		goto unlock;
4708 	}
4709 
4710 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
4711 	if (!cmd) {
4712 		status = MGMT_STATUS_NO_RESOURCES;
4713 		goto unlock;
4714 	}
4715 
4716 	cmd->user_data = m;
4717 	pending = hci_add_adv_monitor(hdev, m, &err);
4718 	if (err) {
4719 		if (err == -ENOSPC || err == -ENOMEM)
4720 			status = MGMT_STATUS_NO_RESOURCES;
4721 		else if (err == -EINVAL)
4722 			status = MGMT_STATUS_INVALID_PARAMS;
4723 		else
4724 			status = MGMT_STATUS_FAILED;
4725 
4726 		mgmt_pending_remove(cmd);
4727 		goto unlock;
4728 	}
4729 
4730 	if (!pending) {
4731 		mgmt_pending_remove(cmd);
4732 		rp.monitor_handle = cpu_to_le16(m->handle);
4733 		mgmt_adv_monitor_added(sk, hdev, m->handle);
4734 		m->state = ADV_MONITOR_STATE_REGISTERED;
4735 		hdev->adv_monitors_cnt++;
4736 
4737 		hci_dev_unlock(hdev);
4738 		return mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_SUCCESS,
4739 					 &rp, sizeof(rp));
4740 	}
4741 
4742 	hci_dev_unlock(hdev);
4743 
4744 	return 0;
4745 
4746 unlock:
4747 	hci_free_adv_monitor(hdev, m);
4748 	hci_dev_unlock(hdev);
4749 	return mgmt_cmd_status(sk, hdev->id, op, status);
4750 }
4751 
4752 static void parse_adv_monitor_rssi(struct adv_monitor *m,
4753 				   struct mgmt_adv_rssi_thresholds *rssi)
4754 {
4755 	if (rssi) {
4756 		m->rssi.low_threshold = rssi->low_threshold;
4757 		m->rssi.low_threshold_timeout =
4758 		    __le16_to_cpu(rssi->low_threshold_timeout);
4759 		m->rssi.high_threshold = rssi->high_threshold;
4760 		m->rssi.high_threshold_timeout =
4761 		    __le16_to_cpu(rssi->high_threshold_timeout);
4762 		m->rssi.sampling_period = rssi->sampling_period;
4763 	} else {
4764 		/* Default values. These numbers are the least constricting
4765 		 * parameters for MSFT API to work, so it behaves as if there
4766 		 * are no rssi parameter to consider. May need to be changed
4767 		 * if other API are to be supported.
4768 		 */
4769 		m->rssi.low_threshold = -127;
4770 		m->rssi.low_threshold_timeout = 60;
4771 		m->rssi.high_threshold = -127;
4772 		m->rssi.high_threshold_timeout = 0;
4773 		m->rssi.sampling_period = 0;
4774 	}
4775 }
4776 
4777 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
4778 				    struct mgmt_adv_pattern *patterns)
4779 {
4780 	u8 offset = 0, length = 0;
4781 	struct adv_pattern *p = NULL;
4782 	int i;
4783 
4784 	for (i = 0; i < pattern_count; i++) {
4785 		offset = patterns[i].offset;
4786 		length = patterns[i].length;
4787 		if (offset >= HCI_MAX_AD_LENGTH ||
4788 		    length > HCI_MAX_AD_LENGTH ||
4789 		    (offset + length) > HCI_MAX_AD_LENGTH)
4790 			return MGMT_STATUS_INVALID_PARAMS;
4791 
4792 		p = kmalloc(sizeof(*p), GFP_KERNEL);
4793 		if (!p)
4794 			return MGMT_STATUS_NO_RESOURCES;
4795 
4796 		p->ad_type = patterns[i].ad_type;
4797 		p->offset = patterns[i].offset;
4798 		p->length = patterns[i].length;
4799 		memcpy(p->value, patterns[i].value, p->length);
4800 
4801 		INIT_LIST_HEAD(&p->list);
4802 		list_add(&p->list, &m->patterns);
4803 	}
4804 
4805 	return MGMT_STATUS_SUCCESS;
4806 }
4807 
4808 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4809 				    void *data, u16 len)
4810 {
4811 	struct mgmt_cp_add_adv_patterns_monitor *cp = data;
4812 	struct adv_monitor *m = NULL;
4813 	u8 status = MGMT_STATUS_SUCCESS;
4814 	size_t expected_size = sizeof(*cp);
4815 
4816 	BT_DBG("request for %s", hdev->name);
4817 
4818 	if (len <= sizeof(*cp)) {
4819 		status = MGMT_STATUS_INVALID_PARAMS;
4820 		goto done;
4821 	}
4822 
4823 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4824 	if (len != expected_size) {
4825 		status = MGMT_STATUS_INVALID_PARAMS;
4826 		goto done;
4827 	}
4828 
4829 	m = kzalloc(sizeof(*m), GFP_KERNEL);
4830 	if (!m) {
4831 		status = MGMT_STATUS_NO_RESOURCES;
4832 		goto done;
4833 	}
4834 
4835 	INIT_LIST_HEAD(&m->patterns);
4836 
4837 	parse_adv_monitor_rssi(m, NULL);
4838 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4839 
4840 done:
4841 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4842 					  MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
4843 }
4844 
4845 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
4846 					 void *data, u16 len)
4847 {
4848 	struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
4849 	struct adv_monitor *m = NULL;
4850 	u8 status = MGMT_STATUS_SUCCESS;
4851 	size_t expected_size = sizeof(*cp);
4852 
4853 	BT_DBG("request for %s", hdev->name);
4854 
4855 	if (len <= sizeof(*cp)) {
4856 		status = MGMT_STATUS_INVALID_PARAMS;
4857 		goto done;
4858 	}
4859 
4860 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4861 	if (len != expected_size) {
4862 		status = MGMT_STATUS_INVALID_PARAMS;
4863 		goto done;
4864 	}
4865 
4866 	m = kzalloc(sizeof(*m), GFP_KERNEL);
4867 	if (!m) {
4868 		status = MGMT_STATUS_NO_RESOURCES;
4869 		goto done;
4870 	}
4871 
4872 	INIT_LIST_HEAD(&m->patterns);
4873 
4874 	parse_adv_monitor_rssi(m, &cp->rssi);
4875 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4876 
4877 done:
4878 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4879 					 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
4880 }
4881 
4882 int mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
4883 {
4884 	struct mgmt_rp_remove_adv_monitor rp;
4885 	struct mgmt_cp_remove_adv_monitor *cp;
4886 	struct mgmt_pending_cmd *cmd;
4887 	int err = 0;
4888 
4889 	hci_dev_lock(hdev);
4890 
4891 	cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4892 	if (!cmd)
4893 		goto done;
4894 
4895 	cp = cmd->param;
4896 	rp.monitor_handle = cp->monitor_handle;
4897 
4898 	if (!status)
4899 		hci_update_passive_scan(hdev);
4900 
4901 	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4902 				mgmt_status(status), &rp, sizeof(rp));
4903 	mgmt_pending_remove(cmd);
4904 
4905 done:
4906 	hci_dev_unlock(hdev);
4907 	bt_dev_dbg(hdev, "remove monitor %d complete, status %u",
4908 		   rp.monitor_handle, status);
4909 
4910 	return err;
4911 }
4912 
4913 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
4914 			      void *data, u16 len)
4915 {
4916 	struct mgmt_cp_remove_adv_monitor *cp = data;
4917 	struct mgmt_rp_remove_adv_monitor rp;
4918 	struct mgmt_pending_cmd *cmd;
4919 	u16 handle = __le16_to_cpu(cp->monitor_handle);
4920 	int err, status;
4921 	bool pending;
4922 
4923 	BT_DBG("request for %s", hdev->name);
4924 	rp.monitor_handle = cp->monitor_handle;
4925 
4926 	hci_dev_lock(hdev);
4927 
4928 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
4929 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
4930 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4931 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
4932 		status = MGMT_STATUS_BUSY;
4933 		goto unlock;
4934 	}
4935 
4936 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
4937 	if (!cmd) {
4938 		status = MGMT_STATUS_NO_RESOURCES;
4939 		goto unlock;
4940 	}
4941 
4942 	if (handle)
4943 		pending = hci_remove_single_adv_monitor(hdev, handle, &err);
4944 	else
4945 		pending = hci_remove_all_adv_monitor(hdev, &err);
4946 
4947 	if (err) {
4948 		mgmt_pending_remove(cmd);
4949 
4950 		if (err == -ENOENT)
4951 			status = MGMT_STATUS_INVALID_INDEX;
4952 		else
4953 			status = MGMT_STATUS_FAILED;
4954 
4955 		goto unlock;
4956 	}
4957 
4958 	/* monitor can be removed without forwarding request to controller */
4959 	if (!pending) {
4960 		mgmt_pending_remove(cmd);
4961 		hci_dev_unlock(hdev);
4962 
4963 		return mgmt_cmd_complete(sk, hdev->id,
4964 					 MGMT_OP_REMOVE_ADV_MONITOR,
4965 					 MGMT_STATUS_SUCCESS,
4966 					 &rp, sizeof(rp));
4967 	}
4968 
4969 	hci_dev_unlock(hdev);
4970 	return 0;
4971 
4972 unlock:
4973 	hci_dev_unlock(hdev);
4974 	return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4975 			       status);
4976 }
4977 
4978 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
4979 {
4980 	struct mgmt_rp_read_local_oob_data mgmt_rp;
4981 	size_t rp_size = sizeof(mgmt_rp);
4982 	struct mgmt_pending_cmd *cmd = data;
4983 	struct sk_buff *skb = cmd->skb;
4984 	u8 status = mgmt_status(err);
4985 
4986 	if (!status) {
4987 		if (!skb)
4988 			status = MGMT_STATUS_FAILED;
4989 		else if (IS_ERR(skb))
4990 			status = mgmt_status(PTR_ERR(skb));
4991 		else
4992 			status = mgmt_status(skb->data[0]);
4993 	}
4994 
4995 	bt_dev_dbg(hdev, "status %d", status);
4996 
4997 	if (status) {
4998 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
4999 		goto remove;
5000 	}
5001 
5002 	memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5003 
5004 	if (!bredr_sc_enabled(hdev)) {
5005 		struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5006 
5007 		if (skb->len < sizeof(*rp)) {
5008 			mgmt_cmd_status(cmd->sk, hdev->id,
5009 					MGMT_OP_READ_LOCAL_OOB_DATA,
5010 					MGMT_STATUS_FAILED);
5011 			goto remove;
5012 		}
5013 
5014 		memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5015 		memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5016 
5017 		rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5018 	} else {
5019 		struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5020 
5021 		if (skb->len < sizeof(*rp)) {
5022 			mgmt_cmd_status(cmd->sk, hdev->id,
5023 					MGMT_OP_READ_LOCAL_OOB_DATA,
5024 					MGMT_STATUS_FAILED);
5025 			goto remove;
5026 		}
5027 
5028 		memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5029 		memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5030 
5031 		memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5032 		memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5033 	}
5034 
5035 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5036 			  MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5037 
5038 remove:
5039 	if (skb && !IS_ERR(skb))
5040 		kfree_skb(skb);
5041 
5042 	mgmt_pending_free(cmd);
5043 }
5044 
5045 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5046 {
5047 	struct mgmt_pending_cmd *cmd = data;
5048 
5049 	if (bredr_sc_enabled(hdev))
5050 		cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5051 	else
5052 		cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5053 
5054 	if (IS_ERR(cmd->skb))
5055 		return PTR_ERR(cmd->skb);
5056 	else
5057 		return 0;
5058 }
5059 
5060 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5061 			       void *data, u16 data_len)
5062 {
5063 	struct mgmt_pending_cmd *cmd;
5064 	int err;
5065 
5066 	bt_dev_dbg(hdev, "sock %p", sk);
5067 
5068 	hci_dev_lock(hdev);
5069 
5070 	if (!hdev_is_powered(hdev)) {
5071 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5072 				      MGMT_STATUS_NOT_POWERED);
5073 		goto unlock;
5074 	}
5075 
5076 	if (!lmp_ssp_capable(hdev)) {
5077 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5078 				      MGMT_STATUS_NOT_SUPPORTED);
5079 		goto unlock;
5080 	}
5081 
5082 	cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5083 	if (!cmd)
5084 		err = -ENOMEM;
5085 	else
5086 		err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5087 					 read_local_oob_data_complete);
5088 
5089 	if (err < 0) {
5090 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5091 				      MGMT_STATUS_FAILED);
5092 
5093 		if (cmd)
5094 			mgmt_pending_free(cmd);
5095 	}
5096 
5097 unlock:
5098 	hci_dev_unlock(hdev);
5099 	return err;
5100 }
5101 
5102 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5103 			       void *data, u16 len)
5104 {
5105 	struct mgmt_addr_info *addr = data;
5106 	int err;
5107 
5108 	bt_dev_dbg(hdev, "sock %p", sk);
5109 
5110 	if (!bdaddr_type_is_valid(addr->type))
5111 		return mgmt_cmd_complete(sk, hdev->id,
5112 					 MGMT_OP_ADD_REMOTE_OOB_DATA,
5113 					 MGMT_STATUS_INVALID_PARAMS,
5114 					 addr, sizeof(*addr));
5115 
5116 	hci_dev_lock(hdev);
5117 
5118 	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5119 		struct mgmt_cp_add_remote_oob_data *cp = data;
5120 		u8 status;
5121 
5122 		if (cp->addr.type != BDADDR_BREDR) {
5123 			err = mgmt_cmd_complete(sk, hdev->id,
5124 						MGMT_OP_ADD_REMOTE_OOB_DATA,
5125 						MGMT_STATUS_INVALID_PARAMS,
5126 						&cp->addr, sizeof(cp->addr));
5127 			goto unlock;
5128 		}
5129 
5130 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5131 					      cp->addr.type, cp->hash,
5132 					      cp->rand, NULL, NULL);
5133 		if (err < 0)
5134 			status = MGMT_STATUS_FAILED;
5135 		else
5136 			status = MGMT_STATUS_SUCCESS;
5137 
5138 		err = mgmt_cmd_complete(sk, hdev->id,
5139 					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5140 					&cp->addr, sizeof(cp->addr));
5141 	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5142 		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5143 		u8 *rand192, *hash192, *rand256, *hash256;
5144 		u8 status;
5145 
5146 		if (bdaddr_type_is_le(cp->addr.type)) {
5147 			/* Enforce zero-valued 192-bit parameters as
5148 			 * long as legacy SMP OOB isn't implemented.
5149 			 */
5150 			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5151 			    memcmp(cp->hash192, ZERO_KEY, 16)) {
5152 				err = mgmt_cmd_complete(sk, hdev->id,
5153 							MGMT_OP_ADD_REMOTE_OOB_DATA,
5154 							MGMT_STATUS_INVALID_PARAMS,
5155 							addr, sizeof(*addr));
5156 				goto unlock;
5157 			}
5158 
5159 			rand192 = NULL;
5160 			hash192 = NULL;
5161 		} else {
5162 			/* In case one of the P-192 values is set to zero,
5163 			 * then just disable OOB data for P-192.
5164 			 */
5165 			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5166 			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
5167 				rand192 = NULL;
5168 				hash192 = NULL;
5169 			} else {
5170 				rand192 = cp->rand192;
5171 				hash192 = cp->hash192;
5172 			}
5173 		}
5174 
5175 		/* In case one of the P-256 values is set to zero, then just
5176 		 * disable OOB data for P-256.
5177 		 */
5178 		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5179 		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
5180 			rand256 = NULL;
5181 			hash256 = NULL;
5182 		} else {
5183 			rand256 = cp->rand256;
5184 			hash256 = cp->hash256;
5185 		}
5186 
5187 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5188 					      cp->addr.type, hash192, rand192,
5189 					      hash256, rand256);
5190 		if (err < 0)
5191 			status = MGMT_STATUS_FAILED;
5192 		else
5193 			status = MGMT_STATUS_SUCCESS;
5194 
5195 		err = mgmt_cmd_complete(sk, hdev->id,
5196 					MGMT_OP_ADD_REMOTE_OOB_DATA,
5197 					status, &cp->addr, sizeof(cp->addr));
5198 	} else {
5199 		bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5200 			   len);
5201 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5202 				      MGMT_STATUS_INVALID_PARAMS);
5203 	}
5204 
5205 unlock:
5206 	hci_dev_unlock(hdev);
5207 	return err;
5208 }
5209 
5210 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5211 				  void *data, u16 len)
5212 {
5213 	struct mgmt_cp_remove_remote_oob_data *cp = data;
5214 	u8 status;
5215 	int err;
5216 
5217 	bt_dev_dbg(hdev, "sock %p", sk);
5218 
5219 	if (cp->addr.type != BDADDR_BREDR)
5220 		return mgmt_cmd_complete(sk, hdev->id,
5221 					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5222 					 MGMT_STATUS_INVALID_PARAMS,
5223 					 &cp->addr, sizeof(cp->addr));
5224 
5225 	hci_dev_lock(hdev);
5226 
5227 	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5228 		hci_remote_oob_data_clear(hdev);
5229 		status = MGMT_STATUS_SUCCESS;
5230 		goto done;
5231 	}
5232 
5233 	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5234 	if (err < 0)
5235 		status = MGMT_STATUS_INVALID_PARAMS;
5236 	else
5237 		status = MGMT_STATUS_SUCCESS;
5238 
5239 done:
5240 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5241 				status, &cp->addr, sizeof(cp->addr));
5242 
5243 	hci_dev_unlock(hdev);
5244 	return err;
5245 }
5246 
5247 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5248 {
5249 	struct mgmt_pending_cmd *cmd;
5250 
5251 	bt_dev_dbg(hdev, "status %u", status);
5252 
5253 	hci_dev_lock(hdev);
5254 
5255 	cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5256 	if (!cmd)
5257 		cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5258 
5259 	if (!cmd)
5260 		cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5261 
5262 	if (cmd) {
5263 		cmd->cmd_complete(cmd, mgmt_status(status));
5264 		mgmt_pending_remove(cmd);
5265 	}
5266 
5267 	hci_dev_unlock(hdev);
5268 }
5269 
5270 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5271 				    uint8_t *mgmt_status)
5272 {
5273 	switch (type) {
5274 	case DISCOV_TYPE_LE:
5275 		*mgmt_status = mgmt_le_support(hdev);
5276 		if (*mgmt_status)
5277 			return false;
5278 		break;
5279 	case DISCOV_TYPE_INTERLEAVED:
5280 		*mgmt_status = mgmt_le_support(hdev);
5281 		if (*mgmt_status)
5282 			return false;
5283 		fallthrough;
5284 	case DISCOV_TYPE_BREDR:
5285 		*mgmt_status = mgmt_bredr_support(hdev);
5286 		if (*mgmt_status)
5287 			return false;
5288 		break;
5289 	default:
5290 		*mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5291 		return false;
5292 	}
5293 
5294 	return true;
5295 }
5296 
5297 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5298 {
5299 	struct mgmt_pending_cmd *cmd = data;
5300 
5301 	if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5302 	    cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5303 	    cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5304 		return;
5305 
5306 	bt_dev_dbg(hdev, "err %d", err);
5307 
5308 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5309 			  cmd->param, 1);
5310 	mgmt_pending_remove(cmd);
5311 
5312 	hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5313 				DISCOVERY_FINDING);
5314 }
5315 
5316 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5317 {
5318 	return hci_start_discovery_sync(hdev);
5319 }
5320 
5321 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5322 				    u16 op, void *data, u16 len)
5323 {
5324 	struct mgmt_cp_start_discovery *cp = data;
5325 	struct mgmt_pending_cmd *cmd;
5326 	u8 status;
5327 	int err;
5328 
5329 	bt_dev_dbg(hdev, "sock %p", sk);
5330 
5331 	hci_dev_lock(hdev);
5332 
5333 	if (!hdev_is_powered(hdev)) {
5334 		err = mgmt_cmd_complete(sk, hdev->id, op,
5335 					MGMT_STATUS_NOT_POWERED,
5336 					&cp->type, sizeof(cp->type));
5337 		goto failed;
5338 	}
5339 
5340 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
5341 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5342 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5343 					&cp->type, sizeof(cp->type));
5344 		goto failed;
5345 	}
5346 
5347 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5348 		err = mgmt_cmd_complete(sk, hdev->id, op, status,
5349 					&cp->type, sizeof(cp->type));
5350 		goto failed;
5351 	}
5352 
5353 	/* Can't start discovery when it is paused */
5354 	if (hdev->discovery_paused) {
5355 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5356 					&cp->type, sizeof(cp->type));
5357 		goto failed;
5358 	}
5359 
5360 	/* Clear the discovery filter first to free any previously
5361 	 * allocated memory for the UUID list.
5362 	 */
5363 	hci_discovery_filter_clear(hdev);
5364 
5365 	hdev->discovery.type = cp->type;
5366 	hdev->discovery.report_invalid_rssi = false;
5367 	if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5368 		hdev->discovery.limited = true;
5369 	else
5370 		hdev->discovery.limited = false;
5371 
5372 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
5373 	if (!cmd) {
5374 		err = -ENOMEM;
5375 		goto failed;
5376 	}
5377 
5378 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5379 				 start_discovery_complete);
5380 	if (err < 0) {
5381 		mgmt_pending_remove(cmd);
5382 		goto failed;
5383 	}
5384 
5385 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5386 
5387 failed:
5388 	hci_dev_unlock(hdev);
5389 	return err;
5390 }
5391 
5392 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5393 			   void *data, u16 len)
5394 {
5395 	return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5396 					data, len);
5397 }
5398 
5399 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5400 				   void *data, u16 len)
5401 {
5402 	return start_discovery_internal(sk, hdev,
5403 					MGMT_OP_START_LIMITED_DISCOVERY,
5404 					data, len);
5405 }
5406 
5407 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5408 				   void *data, u16 len)
5409 {
5410 	struct mgmt_cp_start_service_discovery *cp = data;
5411 	struct mgmt_pending_cmd *cmd;
5412 	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5413 	u16 uuid_count, expected_len;
5414 	u8 status;
5415 	int err;
5416 
5417 	bt_dev_dbg(hdev, "sock %p", sk);
5418 
5419 	hci_dev_lock(hdev);
5420 
5421 	if (!hdev_is_powered(hdev)) {
5422 		err = mgmt_cmd_complete(sk, hdev->id,
5423 					MGMT_OP_START_SERVICE_DISCOVERY,
5424 					MGMT_STATUS_NOT_POWERED,
5425 					&cp->type, sizeof(cp->type));
5426 		goto failed;
5427 	}
5428 
5429 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
5430 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5431 		err = mgmt_cmd_complete(sk, hdev->id,
5432 					MGMT_OP_START_SERVICE_DISCOVERY,
5433 					MGMT_STATUS_BUSY, &cp->type,
5434 					sizeof(cp->type));
5435 		goto failed;
5436 	}
5437 
5438 	if (hdev->discovery_paused) {
5439 		err = mgmt_cmd_complete(sk, hdev->id,
5440 					MGMT_OP_START_SERVICE_DISCOVERY,
5441 					MGMT_STATUS_BUSY, &cp->type,
5442 					sizeof(cp->type));
5443 		goto failed;
5444 	}
5445 
5446 	uuid_count = __le16_to_cpu(cp->uuid_count);
5447 	if (uuid_count > max_uuid_count) {
5448 		bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5449 			   uuid_count);
5450 		err = mgmt_cmd_complete(sk, hdev->id,
5451 					MGMT_OP_START_SERVICE_DISCOVERY,
5452 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
5453 					sizeof(cp->type));
5454 		goto failed;
5455 	}
5456 
5457 	expected_len = sizeof(*cp) + uuid_count * 16;
5458 	if (expected_len != len) {
5459 		bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5460 			   expected_len, len);
5461 		err = mgmt_cmd_complete(sk, hdev->id,
5462 					MGMT_OP_START_SERVICE_DISCOVERY,
5463 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
5464 					sizeof(cp->type));
5465 		goto failed;
5466 	}
5467 
5468 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5469 		err = mgmt_cmd_complete(sk, hdev->id,
5470 					MGMT_OP_START_SERVICE_DISCOVERY,
5471 					status, &cp->type, sizeof(cp->type));
5472 		goto failed;
5473 	}
5474 
5475 	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
5476 			       hdev, data, len);
5477 	if (!cmd) {
5478 		err = -ENOMEM;
5479 		goto failed;
5480 	}
5481 
5482 	/* Clear the discovery filter first to free any previously
5483 	 * allocated memory for the UUID list.
5484 	 */
5485 	hci_discovery_filter_clear(hdev);
5486 
5487 	hdev->discovery.result_filtering = true;
5488 	hdev->discovery.type = cp->type;
5489 	hdev->discovery.rssi = cp->rssi;
5490 	hdev->discovery.uuid_count = uuid_count;
5491 
5492 	if (uuid_count > 0) {
5493 		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
5494 						GFP_KERNEL);
5495 		if (!hdev->discovery.uuids) {
5496 			err = mgmt_cmd_complete(sk, hdev->id,
5497 						MGMT_OP_START_SERVICE_DISCOVERY,
5498 						MGMT_STATUS_FAILED,
5499 						&cp->type, sizeof(cp->type));
5500 			mgmt_pending_remove(cmd);
5501 			goto failed;
5502 		}
5503 	}
5504 
5505 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5506 				 start_discovery_complete);
5507 	if (err < 0) {
5508 		mgmt_pending_remove(cmd);
5509 		goto failed;
5510 	}
5511 
5512 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5513 
5514 failed:
5515 	hci_dev_unlock(hdev);
5516 	return err;
5517 }
5518 
5519 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
5520 {
5521 	struct mgmt_pending_cmd *cmd;
5522 
5523 	bt_dev_dbg(hdev, "status %u", status);
5524 
5525 	hci_dev_lock(hdev);
5526 
5527 	cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5528 	if (cmd) {
5529 		cmd->cmd_complete(cmd, mgmt_status(status));
5530 		mgmt_pending_remove(cmd);
5531 	}
5532 
5533 	hci_dev_unlock(hdev);
5534 }
5535 
5536 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
5537 {
5538 	struct mgmt_pending_cmd *cmd = data;
5539 
5540 	if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
5541 		return;
5542 
5543 	bt_dev_dbg(hdev, "err %d", err);
5544 
5545 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5546 			  cmd->param, 1);
5547 	mgmt_pending_remove(cmd);
5548 
5549 	if (!err)
5550 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
5551 }
5552 
5553 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
5554 {
5555 	return hci_stop_discovery_sync(hdev);
5556 }
5557 
5558 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
5559 			  u16 len)
5560 {
5561 	struct mgmt_cp_stop_discovery *mgmt_cp = data;
5562 	struct mgmt_pending_cmd *cmd;
5563 	int err;
5564 
5565 	bt_dev_dbg(hdev, "sock %p", sk);
5566 
5567 	hci_dev_lock(hdev);
5568 
5569 	if (!hci_discovery_active(hdev)) {
5570 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5571 					MGMT_STATUS_REJECTED, &mgmt_cp->type,
5572 					sizeof(mgmt_cp->type));
5573 		goto unlock;
5574 	}
5575 
5576 	if (hdev->discovery.type != mgmt_cp->type) {
5577 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5578 					MGMT_STATUS_INVALID_PARAMS,
5579 					&mgmt_cp->type, sizeof(mgmt_cp->type));
5580 		goto unlock;
5581 	}
5582 
5583 	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
5584 	if (!cmd) {
5585 		err = -ENOMEM;
5586 		goto unlock;
5587 	}
5588 
5589 	err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
5590 				 stop_discovery_complete);
5591 	if (err < 0) {
5592 		mgmt_pending_remove(cmd);
5593 		goto unlock;
5594 	}
5595 
5596 	hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
5597 
5598 unlock:
5599 	hci_dev_unlock(hdev);
5600 	return err;
5601 }
5602 
5603 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
5604 			u16 len)
5605 {
5606 	struct mgmt_cp_confirm_name *cp = data;
5607 	struct inquiry_entry *e;
5608 	int err;
5609 
5610 	bt_dev_dbg(hdev, "sock %p", sk);
5611 
5612 	hci_dev_lock(hdev);
5613 
5614 	if (!hci_discovery_active(hdev)) {
5615 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5616 					MGMT_STATUS_FAILED, &cp->addr,
5617 					sizeof(cp->addr));
5618 		goto failed;
5619 	}
5620 
5621 	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
5622 	if (!e) {
5623 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5624 					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
5625 					sizeof(cp->addr));
5626 		goto failed;
5627 	}
5628 
5629 	if (cp->name_known) {
5630 		e->name_state = NAME_KNOWN;
5631 		list_del(&e->list);
5632 	} else {
5633 		e->name_state = NAME_NEEDED;
5634 		hci_inquiry_cache_update_resolve(hdev, e);
5635 	}
5636 
5637 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
5638 				&cp->addr, sizeof(cp->addr));
5639 
5640 failed:
5641 	hci_dev_unlock(hdev);
5642 	return err;
5643 }
5644 
5645 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
5646 			u16 len)
5647 {
5648 	struct mgmt_cp_block_device *cp = data;
5649 	u8 status;
5650 	int err;
5651 
5652 	bt_dev_dbg(hdev, "sock %p", sk);
5653 
5654 	if (!bdaddr_type_is_valid(cp->addr.type))
5655 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
5656 					 MGMT_STATUS_INVALID_PARAMS,
5657 					 &cp->addr, sizeof(cp->addr));
5658 
5659 	hci_dev_lock(hdev);
5660 
5661 	err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
5662 				  cp->addr.type);
5663 	if (err < 0) {
5664 		status = MGMT_STATUS_FAILED;
5665 		goto done;
5666 	}
5667 
5668 	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5669 		   sk);
5670 	status = MGMT_STATUS_SUCCESS;
5671 
5672 done:
5673 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
5674 				&cp->addr, sizeof(cp->addr));
5675 
5676 	hci_dev_unlock(hdev);
5677 
5678 	return err;
5679 }
5680 
5681 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
5682 			  u16 len)
5683 {
5684 	struct mgmt_cp_unblock_device *cp = data;
5685 	u8 status;
5686 	int err;
5687 
5688 	bt_dev_dbg(hdev, "sock %p", sk);
5689 
5690 	if (!bdaddr_type_is_valid(cp->addr.type))
5691 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
5692 					 MGMT_STATUS_INVALID_PARAMS,
5693 					 &cp->addr, sizeof(cp->addr));
5694 
5695 	hci_dev_lock(hdev);
5696 
5697 	err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
5698 				  cp->addr.type);
5699 	if (err < 0) {
5700 		status = MGMT_STATUS_INVALID_PARAMS;
5701 		goto done;
5702 	}
5703 
5704 	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5705 		   sk);
5706 	status = MGMT_STATUS_SUCCESS;
5707 
5708 done:
5709 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
5710 				&cp->addr, sizeof(cp->addr));
5711 
5712 	hci_dev_unlock(hdev);
5713 
5714 	return err;
5715 }
5716 
5717 static int set_device_id_sync(struct hci_dev *hdev, void *data)
5718 {
5719 	return hci_update_eir_sync(hdev);
5720 }
5721 
5722 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
5723 			 u16 len)
5724 {
5725 	struct mgmt_cp_set_device_id *cp = data;
5726 	int err;
5727 	__u16 source;
5728 
5729 	bt_dev_dbg(hdev, "sock %p", sk);
5730 
5731 	source = __le16_to_cpu(cp->source);
5732 
5733 	if (source > 0x0002)
5734 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
5735 				       MGMT_STATUS_INVALID_PARAMS);
5736 
5737 	hci_dev_lock(hdev);
5738 
5739 	hdev->devid_source = source;
5740 	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
5741 	hdev->devid_product = __le16_to_cpu(cp->product);
5742 	hdev->devid_version = __le16_to_cpu(cp->version);
5743 
5744 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
5745 				NULL, 0);
5746 
5747 	hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
5748 
5749 	hci_dev_unlock(hdev);
5750 
5751 	return err;
5752 }
5753 
5754 static void enable_advertising_instance(struct hci_dev *hdev, int err)
5755 {
5756 	if (err)
5757 		bt_dev_err(hdev, "failed to re-configure advertising %d", err);
5758 	else
5759 		bt_dev_dbg(hdev, "status %d", err);
5760 }
5761 
5762 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
5763 {
5764 	struct cmd_lookup match = { NULL, hdev };
5765 	u8 instance;
5766 	struct adv_info *adv_instance;
5767 	u8 status = mgmt_status(err);
5768 
5769 	if (status) {
5770 		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
5771 				     cmd_status_rsp, &status);
5772 		return;
5773 	}
5774 
5775 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
5776 		hci_dev_set_flag(hdev, HCI_ADVERTISING);
5777 	else
5778 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
5779 
5780 	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
5781 			     &match);
5782 
5783 	new_settings(hdev, match.sk);
5784 
5785 	if (match.sk)
5786 		sock_put(match.sk);
5787 
5788 	/* If "Set Advertising" was just disabled and instance advertising was
5789 	 * set up earlier, then re-enable multi-instance advertising.
5790 	 */
5791 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5792 	    list_empty(&hdev->adv_instances))
5793 		return;
5794 
5795 	instance = hdev->cur_adv_instance;
5796 	if (!instance) {
5797 		adv_instance = list_first_entry_or_null(&hdev->adv_instances,
5798 							struct adv_info, list);
5799 		if (!adv_instance)
5800 			return;
5801 
5802 		instance = adv_instance->instance;
5803 	}
5804 
5805 	err = hci_schedule_adv_instance_sync(hdev, instance, true);
5806 
5807 	enable_advertising_instance(hdev, err);
5808 }
5809 
5810 static int set_adv_sync(struct hci_dev *hdev, void *data)
5811 {
5812 	struct mgmt_pending_cmd *cmd = data;
5813 	struct mgmt_mode *cp = cmd->param;
5814 	u8 val = !!cp->val;
5815 
5816 	if (cp->val == 0x02)
5817 		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5818 	else
5819 		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5820 
5821 	cancel_adv_timeout(hdev);
5822 
5823 	if (val) {
5824 		/* Switch to instance "0" for the Set Advertising setting.
5825 		 * We cannot use update_[adv|scan_rsp]_data() here as the
5826 		 * HCI_ADVERTISING flag is not yet set.
5827 		 */
5828 		hdev->cur_adv_instance = 0x00;
5829 
5830 		if (ext_adv_capable(hdev)) {
5831 			hci_start_ext_adv_sync(hdev, 0x00);
5832 		} else {
5833 			hci_update_adv_data_sync(hdev, 0x00);
5834 			hci_update_scan_rsp_data_sync(hdev, 0x00);
5835 			hci_enable_advertising_sync(hdev);
5836 		}
5837 	} else {
5838 		hci_disable_advertising_sync(hdev);
5839 	}
5840 
5841 	return 0;
5842 }
5843 
5844 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
5845 			   u16 len)
5846 {
5847 	struct mgmt_mode *cp = data;
5848 	struct mgmt_pending_cmd *cmd;
5849 	u8 val, status;
5850 	int err;
5851 
5852 	bt_dev_dbg(hdev, "sock %p", sk);
5853 
5854 	status = mgmt_le_support(hdev);
5855 	if (status)
5856 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5857 				       status);
5858 
5859 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5860 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5861 				       MGMT_STATUS_INVALID_PARAMS);
5862 
5863 	if (hdev->advertising_paused)
5864 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5865 				       MGMT_STATUS_BUSY);
5866 
5867 	hci_dev_lock(hdev);
5868 
5869 	val = !!cp->val;
5870 
5871 	/* The following conditions are ones which mean that we should
5872 	 * not do any HCI communication but directly send a mgmt
5873 	 * response to user space (after toggling the flag if
5874 	 * necessary).
5875 	 */
5876 	if (!hdev_is_powered(hdev) ||
5877 	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5878 	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
5879 	    hci_conn_num(hdev, LE_LINK) > 0 ||
5880 	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5881 	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
5882 		bool changed;
5883 
5884 		if (cp->val) {
5885 			hdev->cur_adv_instance = 0x00;
5886 			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
5887 			if (cp->val == 0x02)
5888 				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5889 			else
5890 				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5891 		} else {
5892 			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
5893 			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5894 		}
5895 
5896 		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
5897 		if (err < 0)
5898 			goto unlock;
5899 
5900 		if (changed)
5901 			err = new_settings(hdev, sk);
5902 
5903 		goto unlock;
5904 	}
5905 
5906 	if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
5907 	    pending_find(MGMT_OP_SET_LE, hdev)) {
5908 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5909 				      MGMT_STATUS_BUSY);
5910 		goto unlock;
5911 	}
5912 
5913 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
5914 	if (!cmd)
5915 		err = -ENOMEM;
5916 	else
5917 		err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
5918 					 set_advertising_complete);
5919 
5920 	if (err < 0 && cmd)
5921 		mgmt_pending_remove(cmd);
5922 
5923 unlock:
5924 	hci_dev_unlock(hdev);
5925 	return err;
5926 }
5927 
5928 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
5929 			      void *data, u16 len)
5930 {
5931 	struct mgmt_cp_set_static_address *cp = data;
5932 	int err;
5933 
5934 	bt_dev_dbg(hdev, "sock %p", sk);
5935 
5936 	if (!lmp_le_capable(hdev))
5937 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5938 				       MGMT_STATUS_NOT_SUPPORTED);
5939 
5940 	if (hdev_is_powered(hdev))
5941 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5942 				       MGMT_STATUS_REJECTED);
5943 
5944 	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
5945 		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
5946 			return mgmt_cmd_status(sk, hdev->id,
5947 					       MGMT_OP_SET_STATIC_ADDRESS,
5948 					       MGMT_STATUS_INVALID_PARAMS);
5949 
5950 		/* Two most significant bits shall be set */
5951 		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
5952 			return mgmt_cmd_status(sk, hdev->id,
5953 					       MGMT_OP_SET_STATIC_ADDRESS,
5954 					       MGMT_STATUS_INVALID_PARAMS);
5955 	}
5956 
5957 	hci_dev_lock(hdev);
5958 
5959 	bacpy(&hdev->static_addr, &cp->bdaddr);
5960 
5961 	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
5962 	if (err < 0)
5963 		goto unlock;
5964 
5965 	err = new_settings(hdev, sk);
5966 
5967 unlock:
5968 	hci_dev_unlock(hdev);
5969 	return err;
5970 }
5971 
5972 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
5973 			   void *data, u16 len)
5974 {
5975 	struct mgmt_cp_set_scan_params *cp = data;
5976 	__u16 interval, window;
5977 	int err;
5978 
5979 	bt_dev_dbg(hdev, "sock %p", sk);
5980 
5981 	if (!lmp_le_capable(hdev))
5982 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5983 				       MGMT_STATUS_NOT_SUPPORTED);
5984 
5985 	interval = __le16_to_cpu(cp->interval);
5986 
5987 	if (interval < 0x0004 || interval > 0x4000)
5988 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5989 				       MGMT_STATUS_INVALID_PARAMS);
5990 
5991 	window = __le16_to_cpu(cp->window);
5992 
5993 	if (window < 0x0004 || window > 0x4000)
5994 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5995 				       MGMT_STATUS_INVALID_PARAMS);
5996 
5997 	if (window > interval)
5998 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5999 				       MGMT_STATUS_INVALID_PARAMS);
6000 
6001 	hci_dev_lock(hdev);
6002 
6003 	hdev->le_scan_interval = interval;
6004 	hdev->le_scan_window = window;
6005 
6006 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6007 				NULL, 0);
6008 
6009 	/* If background scan is running, restart it so new parameters are
6010 	 * loaded.
6011 	 */
6012 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6013 	    hdev->discovery.state == DISCOVERY_STOPPED)
6014 		hci_update_passive_scan(hdev);
6015 
6016 	hci_dev_unlock(hdev);
6017 
6018 	return err;
6019 }
6020 
6021 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6022 {
6023 	struct mgmt_pending_cmd *cmd = data;
6024 
6025 	bt_dev_dbg(hdev, "err %d", err);
6026 
6027 	if (err) {
6028 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6029 				mgmt_status(err));
6030 	} else {
6031 		struct mgmt_mode *cp = cmd->param;
6032 
6033 		if (cp->val)
6034 			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6035 		else
6036 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6037 
6038 		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6039 		new_settings(hdev, cmd->sk);
6040 	}
6041 
6042 	mgmt_pending_free(cmd);
6043 }
6044 
6045 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6046 {
6047 	struct mgmt_pending_cmd *cmd = data;
6048 	struct mgmt_mode *cp = cmd->param;
6049 
6050 	return hci_write_fast_connectable_sync(hdev, cp->val);
6051 }
6052 
6053 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6054 				void *data, u16 len)
6055 {
6056 	struct mgmt_mode *cp = data;
6057 	struct mgmt_pending_cmd *cmd;
6058 	int err;
6059 
6060 	bt_dev_dbg(hdev, "sock %p", sk);
6061 
6062 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6063 	    hdev->hci_ver < BLUETOOTH_VER_1_2)
6064 		return mgmt_cmd_status(sk, hdev->id,
6065 				       MGMT_OP_SET_FAST_CONNECTABLE,
6066 				       MGMT_STATUS_NOT_SUPPORTED);
6067 
6068 	if (cp->val != 0x00 && cp->val != 0x01)
6069 		return mgmt_cmd_status(sk, hdev->id,
6070 				       MGMT_OP_SET_FAST_CONNECTABLE,
6071 				       MGMT_STATUS_INVALID_PARAMS);
6072 
6073 	hci_dev_lock(hdev);
6074 
6075 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6076 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6077 		goto unlock;
6078 	}
6079 
6080 	if (!hdev_is_powered(hdev)) {
6081 		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6082 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6083 		new_settings(hdev, sk);
6084 		goto unlock;
6085 	}
6086 
6087 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6088 			       len);
6089 	if (!cmd)
6090 		err = -ENOMEM;
6091 	else
6092 		err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6093 					 fast_connectable_complete);
6094 
6095 	if (err < 0) {
6096 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6097 				MGMT_STATUS_FAILED);
6098 
6099 		if (cmd)
6100 			mgmt_pending_free(cmd);
6101 	}
6102 
6103 unlock:
6104 	hci_dev_unlock(hdev);
6105 
6106 	return err;
6107 }
6108 
6109 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6110 {
6111 	struct mgmt_pending_cmd *cmd = data;
6112 
6113 	bt_dev_dbg(hdev, "err %d", err);
6114 
6115 	if (err) {
6116 		u8 mgmt_err = mgmt_status(err);
6117 
6118 		/* We need to restore the flag if related HCI commands
6119 		 * failed.
6120 		 */
6121 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6122 
6123 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6124 	} else {
6125 		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6126 		new_settings(hdev, cmd->sk);
6127 	}
6128 
6129 	mgmt_pending_free(cmd);
6130 }
6131 
6132 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6133 {
6134 	int status;
6135 
6136 	status = hci_write_fast_connectable_sync(hdev, false);
6137 
6138 	if (!status)
6139 		status = hci_update_scan_sync(hdev);
6140 
6141 	/* Since only the advertising data flags will change, there
6142 	 * is no need to update the scan response data.
6143 	 */
6144 	if (!status)
6145 		status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6146 
6147 	return status;
6148 }
6149 
6150 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6151 {
6152 	struct mgmt_mode *cp = data;
6153 	struct mgmt_pending_cmd *cmd;
6154 	int err;
6155 
6156 	bt_dev_dbg(hdev, "sock %p", sk);
6157 
6158 	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6159 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6160 				       MGMT_STATUS_NOT_SUPPORTED);
6161 
6162 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6163 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6164 				       MGMT_STATUS_REJECTED);
6165 
6166 	if (cp->val != 0x00 && cp->val != 0x01)
6167 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6168 				       MGMT_STATUS_INVALID_PARAMS);
6169 
6170 	hci_dev_lock(hdev);
6171 
6172 	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6173 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6174 		goto unlock;
6175 	}
6176 
6177 	if (!hdev_is_powered(hdev)) {
6178 		if (!cp->val) {
6179 			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6180 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6181 			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6182 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6183 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
6184 		}
6185 
6186 		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6187 
6188 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6189 		if (err < 0)
6190 			goto unlock;
6191 
6192 		err = new_settings(hdev, sk);
6193 		goto unlock;
6194 	}
6195 
6196 	/* Reject disabling when powered on */
6197 	if (!cp->val) {
6198 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6199 				      MGMT_STATUS_REJECTED);
6200 		goto unlock;
6201 	} else {
6202 		/* When configuring a dual-mode controller to operate
6203 		 * with LE only and using a static address, then switching
6204 		 * BR/EDR back on is not allowed.
6205 		 *
6206 		 * Dual-mode controllers shall operate with the public
6207 		 * address as its identity address for BR/EDR and LE. So
6208 		 * reject the attempt to create an invalid configuration.
6209 		 *
6210 		 * The same restrictions applies when secure connections
6211 		 * has been enabled. For BR/EDR this is a controller feature
6212 		 * while for LE it is a host stack feature. This means that
6213 		 * switching BR/EDR back on when secure connections has been
6214 		 * enabled is not a supported transaction.
6215 		 */
6216 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6217 		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6218 		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6219 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6220 					      MGMT_STATUS_REJECTED);
6221 			goto unlock;
6222 		}
6223 	}
6224 
6225 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6226 	if (!cmd)
6227 		err = -ENOMEM;
6228 	else
6229 		err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6230 					 set_bredr_complete);
6231 
6232 	if (err < 0) {
6233 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6234 				MGMT_STATUS_FAILED);
6235 		if (cmd)
6236 			mgmt_pending_free(cmd);
6237 
6238 		goto unlock;
6239 	}
6240 
6241 	/* We need to flip the bit already here so that
6242 	 * hci_req_update_adv_data generates the correct flags.
6243 	 */
6244 	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6245 
6246 unlock:
6247 	hci_dev_unlock(hdev);
6248 	return err;
6249 }
6250 
6251 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6252 {
6253 	struct mgmt_pending_cmd *cmd = data;
6254 	struct mgmt_mode *cp;
6255 
6256 	bt_dev_dbg(hdev, "err %d", err);
6257 
6258 	if (err) {
6259 		u8 mgmt_err = mgmt_status(err);
6260 
6261 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6262 		goto done;
6263 	}
6264 
6265 	cp = cmd->param;
6266 
6267 	switch (cp->val) {
6268 	case 0x00:
6269 		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6270 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6271 		break;
6272 	case 0x01:
6273 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6274 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6275 		break;
6276 	case 0x02:
6277 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6278 		hci_dev_set_flag(hdev, HCI_SC_ONLY);
6279 		break;
6280 	}
6281 
6282 	send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6283 	new_settings(hdev, cmd->sk);
6284 
6285 done:
6286 	mgmt_pending_free(cmd);
6287 }
6288 
6289 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6290 {
6291 	struct mgmt_pending_cmd *cmd = data;
6292 	struct mgmt_mode *cp = cmd->param;
6293 	u8 val = !!cp->val;
6294 
6295 	/* Force write of val */
6296 	hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6297 
6298 	return hci_write_sc_support_sync(hdev, val);
6299 }
6300 
6301 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6302 			   void *data, u16 len)
6303 {
6304 	struct mgmt_mode *cp = data;
6305 	struct mgmt_pending_cmd *cmd;
6306 	u8 val;
6307 	int err;
6308 
6309 	bt_dev_dbg(hdev, "sock %p", sk);
6310 
6311 	if (!lmp_sc_capable(hdev) &&
6312 	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6313 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6314 				       MGMT_STATUS_NOT_SUPPORTED);
6315 
6316 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6317 	    lmp_sc_capable(hdev) &&
6318 	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6319 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6320 				       MGMT_STATUS_REJECTED);
6321 
6322 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6323 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6324 				       MGMT_STATUS_INVALID_PARAMS);
6325 
6326 	hci_dev_lock(hdev);
6327 
6328 	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6329 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6330 		bool changed;
6331 
6332 		if (cp->val) {
6333 			changed = !hci_dev_test_and_set_flag(hdev,
6334 							     HCI_SC_ENABLED);
6335 			if (cp->val == 0x02)
6336 				hci_dev_set_flag(hdev, HCI_SC_ONLY);
6337 			else
6338 				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6339 		} else {
6340 			changed = hci_dev_test_and_clear_flag(hdev,
6341 							      HCI_SC_ENABLED);
6342 			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6343 		}
6344 
6345 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6346 		if (err < 0)
6347 			goto failed;
6348 
6349 		if (changed)
6350 			err = new_settings(hdev, sk);
6351 
6352 		goto failed;
6353 	}
6354 
6355 	val = !!cp->val;
6356 
6357 	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6358 	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6359 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6360 		goto failed;
6361 	}
6362 
6363 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6364 	if (!cmd)
6365 		err = -ENOMEM;
6366 	else
6367 		err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6368 					 set_secure_conn_complete);
6369 
6370 	if (err < 0) {
6371 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6372 				MGMT_STATUS_FAILED);
6373 		if (cmd)
6374 			mgmt_pending_free(cmd);
6375 	}
6376 
6377 failed:
6378 	hci_dev_unlock(hdev);
6379 	return err;
6380 }
6381 
6382 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6383 			  void *data, u16 len)
6384 {
6385 	struct mgmt_mode *cp = data;
6386 	bool changed, use_changed;
6387 	int err;
6388 
6389 	bt_dev_dbg(hdev, "sock %p", sk);
6390 
6391 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6392 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6393 				       MGMT_STATUS_INVALID_PARAMS);
6394 
6395 	hci_dev_lock(hdev);
6396 
6397 	if (cp->val)
6398 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6399 	else
6400 		changed = hci_dev_test_and_clear_flag(hdev,
6401 						      HCI_KEEP_DEBUG_KEYS);
6402 
6403 	if (cp->val == 0x02)
6404 		use_changed = !hci_dev_test_and_set_flag(hdev,
6405 							 HCI_USE_DEBUG_KEYS);
6406 	else
6407 		use_changed = hci_dev_test_and_clear_flag(hdev,
6408 							  HCI_USE_DEBUG_KEYS);
6409 
6410 	if (hdev_is_powered(hdev) && use_changed &&
6411 	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6412 		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6413 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6414 			     sizeof(mode), &mode);
6415 	}
6416 
6417 	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6418 	if (err < 0)
6419 		goto unlock;
6420 
6421 	if (changed)
6422 		err = new_settings(hdev, sk);
6423 
6424 unlock:
6425 	hci_dev_unlock(hdev);
6426 	return err;
6427 }
6428 
6429 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6430 		       u16 len)
6431 {
6432 	struct mgmt_cp_set_privacy *cp = cp_data;
6433 	bool changed;
6434 	int err;
6435 
6436 	bt_dev_dbg(hdev, "sock %p", sk);
6437 
6438 	if (!lmp_le_capable(hdev))
6439 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6440 				       MGMT_STATUS_NOT_SUPPORTED);
6441 
6442 	if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6443 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6444 				       MGMT_STATUS_INVALID_PARAMS);
6445 
6446 	if (hdev_is_powered(hdev))
6447 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6448 				       MGMT_STATUS_REJECTED);
6449 
6450 	hci_dev_lock(hdev);
6451 
6452 	/* If user space supports this command it is also expected to
6453 	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6454 	 */
6455 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6456 
6457 	if (cp->privacy) {
6458 		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6459 		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6460 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6461 		hci_adv_instances_set_rpa_expired(hdev, true);
6462 		if (cp->privacy == 0x02)
6463 			hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6464 		else
6465 			hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6466 	} else {
6467 		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6468 		memset(hdev->irk, 0, sizeof(hdev->irk));
6469 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6470 		hci_adv_instances_set_rpa_expired(hdev, false);
6471 		hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6472 	}
6473 
6474 	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
6475 	if (err < 0)
6476 		goto unlock;
6477 
6478 	if (changed)
6479 		err = new_settings(hdev, sk);
6480 
6481 unlock:
6482 	hci_dev_unlock(hdev);
6483 	return err;
6484 }
6485 
6486 static bool irk_is_valid(struct mgmt_irk_info *irk)
6487 {
6488 	switch (irk->addr.type) {
6489 	case BDADDR_LE_PUBLIC:
6490 		return true;
6491 
6492 	case BDADDR_LE_RANDOM:
6493 		/* Two most significant bits shall be set */
6494 		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6495 			return false;
6496 		return true;
6497 	}
6498 
6499 	return false;
6500 }
6501 
6502 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6503 		     u16 len)
6504 {
6505 	struct mgmt_cp_load_irks *cp = cp_data;
6506 	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
6507 				   sizeof(struct mgmt_irk_info));
6508 	u16 irk_count, expected_len;
6509 	int i, err;
6510 
6511 	bt_dev_dbg(hdev, "sock %p", sk);
6512 
6513 	if (!lmp_le_capable(hdev))
6514 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6515 				       MGMT_STATUS_NOT_SUPPORTED);
6516 
6517 	irk_count = __le16_to_cpu(cp->irk_count);
6518 	if (irk_count > max_irk_count) {
6519 		bt_dev_err(hdev, "load_irks: too big irk_count value %u",
6520 			   irk_count);
6521 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6522 				       MGMT_STATUS_INVALID_PARAMS);
6523 	}
6524 
6525 	expected_len = struct_size(cp, irks, irk_count);
6526 	if (expected_len != len) {
6527 		bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
6528 			   expected_len, len);
6529 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6530 				       MGMT_STATUS_INVALID_PARAMS);
6531 	}
6532 
6533 	bt_dev_dbg(hdev, "irk_count %u", irk_count);
6534 
6535 	for (i = 0; i < irk_count; i++) {
6536 		struct mgmt_irk_info *key = &cp->irks[i];
6537 
6538 		if (!irk_is_valid(key))
6539 			return mgmt_cmd_status(sk, hdev->id,
6540 					       MGMT_OP_LOAD_IRKS,
6541 					       MGMT_STATUS_INVALID_PARAMS);
6542 	}
6543 
6544 	hci_dev_lock(hdev);
6545 
6546 	hci_smp_irks_clear(hdev);
6547 
6548 	for (i = 0; i < irk_count; i++) {
6549 		struct mgmt_irk_info *irk = &cp->irks[i];
6550 
6551 		if (hci_is_blocked_key(hdev,
6552 				       HCI_BLOCKED_KEY_TYPE_IRK,
6553 				       irk->val)) {
6554 			bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
6555 				    &irk->addr.bdaddr);
6556 			continue;
6557 		}
6558 
6559 		hci_add_irk(hdev, &irk->addr.bdaddr,
6560 			    le_addr_type(irk->addr.type), irk->val,
6561 			    BDADDR_ANY);
6562 	}
6563 
6564 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6565 
6566 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
6567 
6568 	hci_dev_unlock(hdev);
6569 
6570 	return err;
6571 }
6572 
6573 static bool ltk_is_valid(struct mgmt_ltk_info *key)
6574 {
6575 	if (key->initiator != 0x00 && key->initiator != 0x01)
6576 		return false;
6577 
6578 	switch (key->addr.type) {
6579 	case BDADDR_LE_PUBLIC:
6580 		return true;
6581 
6582 	case BDADDR_LE_RANDOM:
6583 		/* Two most significant bits shall be set */
6584 		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6585 			return false;
6586 		return true;
6587 	}
6588 
6589 	return false;
6590 }
6591 
6592 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
6593 			       void *cp_data, u16 len)
6594 {
6595 	struct mgmt_cp_load_long_term_keys *cp = cp_data;
6596 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
6597 				   sizeof(struct mgmt_ltk_info));
6598 	u16 key_count, expected_len;
6599 	int i, err;
6600 
6601 	bt_dev_dbg(hdev, "sock %p", sk);
6602 
6603 	if (!lmp_le_capable(hdev))
6604 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6605 				       MGMT_STATUS_NOT_SUPPORTED);
6606 
6607 	key_count = __le16_to_cpu(cp->key_count);
6608 	if (key_count > max_key_count) {
6609 		bt_dev_err(hdev, "load_ltks: too big key_count value %u",
6610 			   key_count);
6611 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6612 				       MGMT_STATUS_INVALID_PARAMS);
6613 	}
6614 
6615 	expected_len = struct_size(cp, keys, key_count);
6616 	if (expected_len != len) {
6617 		bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
6618 			   expected_len, len);
6619 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6620 				       MGMT_STATUS_INVALID_PARAMS);
6621 	}
6622 
6623 	bt_dev_dbg(hdev, "key_count %u", key_count);
6624 
6625 	for (i = 0; i < key_count; i++) {
6626 		struct mgmt_ltk_info *key = &cp->keys[i];
6627 
6628 		if (!ltk_is_valid(key))
6629 			return mgmt_cmd_status(sk, hdev->id,
6630 					       MGMT_OP_LOAD_LONG_TERM_KEYS,
6631 					       MGMT_STATUS_INVALID_PARAMS);
6632 	}
6633 
6634 	hci_dev_lock(hdev);
6635 
6636 	hci_smp_ltks_clear(hdev);
6637 
6638 	for (i = 0; i < key_count; i++) {
6639 		struct mgmt_ltk_info *key = &cp->keys[i];
6640 		u8 type, authenticated;
6641 
6642 		if (hci_is_blocked_key(hdev,
6643 				       HCI_BLOCKED_KEY_TYPE_LTK,
6644 				       key->val)) {
6645 			bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
6646 				    &key->addr.bdaddr);
6647 			continue;
6648 		}
6649 
6650 		switch (key->type) {
6651 		case MGMT_LTK_UNAUTHENTICATED:
6652 			authenticated = 0x00;
6653 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
6654 			break;
6655 		case MGMT_LTK_AUTHENTICATED:
6656 			authenticated = 0x01;
6657 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
6658 			break;
6659 		case MGMT_LTK_P256_UNAUTH:
6660 			authenticated = 0x00;
6661 			type = SMP_LTK_P256;
6662 			break;
6663 		case MGMT_LTK_P256_AUTH:
6664 			authenticated = 0x01;
6665 			type = SMP_LTK_P256;
6666 			break;
6667 		case MGMT_LTK_P256_DEBUG:
6668 			authenticated = 0x00;
6669 			type = SMP_LTK_P256_DEBUG;
6670 			fallthrough;
6671 		default:
6672 			continue;
6673 		}
6674 
6675 		hci_add_ltk(hdev, &key->addr.bdaddr,
6676 			    le_addr_type(key->addr.type), type, authenticated,
6677 			    key->val, key->enc_size, key->ediv, key->rand);
6678 	}
6679 
6680 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
6681 			   NULL, 0);
6682 
6683 	hci_dev_unlock(hdev);
6684 
6685 	return err;
6686 }
6687 
6688 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
6689 {
6690 	struct mgmt_pending_cmd *cmd = data;
6691 	struct hci_conn *conn = cmd->user_data;
6692 	struct mgmt_cp_get_conn_info *cp = cmd->param;
6693 	struct mgmt_rp_get_conn_info rp;
6694 	u8 status;
6695 
6696 	bt_dev_dbg(hdev, "err %d", err);
6697 
6698 	memcpy(&rp.addr, &cp->addr.bdaddr, sizeof(rp.addr));
6699 
6700 	status = mgmt_status(err);
6701 	if (status == MGMT_STATUS_SUCCESS) {
6702 		rp.rssi = conn->rssi;
6703 		rp.tx_power = conn->tx_power;
6704 		rp.max_tx_power = conn->max_tx_power;
6705 	} else {
6706 		rp.rssi = HCI_RSSI_INVALID;
6707 		rp.tx_power = HCI_TX_POWER_INVALID;
6708 		rp.max_tx_power = HCI_TX_POWER_INVALID;
6709 	}
6710 
6711 	mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
6712 			  &rp, sizeof(rp));
6713 
6714 	if (conn) {
6715 		hci_conn_drop(conn);
6716 		hci_conn_put(conn);
6717 	}
6718 
6719 	mgmt_pending_free(cmd);
6720 }
6721 
6722 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
6723 {
6724 	struct mgmt_pending_cmd *cmd = data;
6725 	struct mgmt_cp_get_conn_info *cp = cmd->param;
6726 	struct hci_conn *conn;
6727 	int err;
6728 	__le16   handle;
6729 
6730 	/* Make sure we are still connected */
6731 	if (cp->addr.type == BDADDR_BREDR)
6732 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6733 					       &cp->addr.bdaddr);
6734 	else
6735 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6736 
6737 	if (!conn || conn != cmd->user_data || conn->state != BT_CONNECTED) {
6738 		if (cmd->user_data) {
6739 			hci_conn_drop(cmd->user_data);
6740 			hci_conn_put(cmd->user_data);
6741 			cmd->user_data = NULL;
6742 		}
6743 		return MGMT_STATUS_NOT_CONNECTED;
6744 	}
6745 
6746 	handle = cpu_to_le16(conn->handle);
6747 
6748 	/* Refresh RSSI each time */
6749 	err = hci_read_rssi_sync(hdev, handle);
6750 
6751 	/* For LE links TX power does not change thus we don't need to
6752 	 * query for it once value is known.
6753 	 */
6754 	if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
6755 		     conn->tx_power == HCI_TX_POWER_INVALID))
6756 		err = hci_read_tx_power_sync(hdev, handle, 0x00);
6757 
6758 	/* Max TX power needs to be read only once per connection */
6759 	if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
6760 		err = hci_read_tx_power_sync(hdev, handle, 0x01);
6761 
6762 	return err;
6763 }
6764 
6765 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
6766 			 u16 len)
6767 {
6768 	struct mgmt_cp_get_conn_info *cp = data;
6769 	struct mgmt_rp_get_conn_info rp;
6770 	struct hci_conn *conn;
6771 	unsigned long conn_info_age;
6772 	int err = 0;
6773 
6774 	bt_dev_dbg(hdev, "sock %p", sk);
6775 
6776 	memset(&rp, 0, sizeof(rp));
6777 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6778 	rp.addr.type = cp->addr.type;
6779 
6780 	if (!bdaddr_type_is_valid(cp->addr.type))
6781 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6782 					 MGMT_STATUS_INVALID_PARAMS,
6783 					 &rp, sizeof(rp));
6784 
6785 	hci_dev_lock(hdev);
6786 
6787 	if (!hdev_is_powered(hdev)) {
6788 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6789 					MGMT_STATUS_NOT_POWERED, &rp,
6790 					sizeof(rp));
6791 		goto unlock;
6792 	}
6793 
6794 	if (cp->addr.type == BDADDR_BREDR)
6795 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6796 					       &cp->addr.bdaddr);
6797 	else
6798 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6799 
6800 	if (!conn || conn->state != BT_CONNECTED) {
6801 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6802 					MGMT_STATUS_NOT_CONNECTED, &rp,
6803 					sizeof(rp));
6804 		goto unlock;
6805 	}
6806 
6807 	/* To avoid client trying to guess when to poll again for information we
6808 	 * calculate conn info age as random value between min/max set in hdev.
6809 	 */
6810 	conn_info_age = hdev->conn_info_min_age +
6811 			prandom_u32_max(hdev->conn_info_max_age -
6812 					hdev->conn_info_min_age);
6813 
6814 	/* Query controller to refresh cached values if they are too old or were
6815 	 * never read.
6816 	 */
6817 	if (time_after(jiffies, conn->conn_info_timestamp +
6818 		       msecs_to_jiffies(conn_info_age)) ||
6819 	    !conn->conn_info_timestamp) {
6820 		struct mgmt_pending_cmd *cmd;
6821 
6822 		cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
6823 				       len);
6824 		if (!cmd)
6825 			err = -ENOMEM;
6826 		else
6827 			err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
6828 						 cmd, get_conn_info_complete);
6829 
6830 		if (err < 0) {
6831 			mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6832 					  MGMT_STATUS_FAILED, &rp, sizeof(rp));
6833 
6834 			if (cmd)
6835 				mgmt_pending_free(cmd);
6836 
6837 			goto unlock;
6838 		}
6839 
6840 		hci_conn_hold(conn);
6841 		cmd->user_data = hci_conn_get(conn);
6842 
6843 		conn->conn_info_timestamp = jiffies;
6844 	} else {
6845 		/* Cache is valid, just reply with values cached in hci_conn */
6846 		rp.rssi = conn->rssi;
6847 		rp.tx_power = conn->tx_power;
6848 		rp.max_tx_power = conn->max_tx_power;
6849 
6850 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6851 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6852 	}
6853 
6854 unlock:
6855 	hci_dev_unlock(hdev);
6856 	return err;
6857 }
6858 
6859 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
6860 {
6861 	struct mgmt_pending_cmd *cmd = data;
6862 	struct mgmt_cp_get_clock_info *cp = cmd->param;
6863 	struct mgmt_rp_get_clock_info rp;
6864 	struct hci_conn *conn = cmd->user_data;
6865 	u8 status = mgmt_status(err);
6866 
6867 	bt_dev_dbg(hdev, "err %d", err);
6868 
6869 	memset(&rp, 0, sizeof(rp));
6870 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6871 	rp.addr.type = cp->addr.type;
6872 
6873 	if (err)
6874 		goto complete;
6875 
6876 	rp.local_clock = cpu_to_le32(hdev->clock);
6877 
6878 	if (conn) {
6879 		rp.piconet_clock = cpu_to_le32(conn->clock);
6880 		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
6881 		hci_conn_drop(conn);
6882 		hci_conn_put(conn);
6883 	}
6884 
6885 complete:
6886 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
6887 			  sizeof(rp));
6888 
6889 	mgmt_pending_free(cmd);
6890 }
6891 
6892 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
6893 {
6894 	struct mgmt_pending_cmd *cmd = data;
6895 	struct mgmt_cp_get_clock_info *cp = cmd->param;
6896 	struct hci_cp_read_clock hci_cp;
6897 	struct hci_conn *conn = cmd->user_data;
6898 	int err;
6899 
6900 	memset(&hci_cp, 0, sizeof(hci_cp));
6901 	err = hci_read_clock_sync(hdev, &hci_cp);
6902 
6903 	if (conn) {
6904 		/* Make sure connection still exists */
6905 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6906 					       &cp->addr.bdaddr);
6907 
6908 		if (conn && conn == cmd->user_data &&
6909 		    conn->state == BT_CONNECTED) {
6910 			hci_cp.handle = cpu_to_le16(conn->handle);
6911 			hci_cp.which = 0x01; /* Piconet clock */
6912 			err = hci_read_clock_sync(hdev, &hci_cp);
6913 		} else if (cmd->user_data) {
6914 			hci_conn_drop(cmd->user_data);
6915 			hci_conn_put(cmd->user_data);
6916 			cmd->user_data = NULL;
6917 		}
6918 	}
6919 
6920 	return err;
6921 }
6922 
6923 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
6924 								u16 len)
6925 {
6926 	struct mgmt_cp_get_clock_info *cp = data;
6927 	struct mgmt_rp_get_clock_info rp;
6928 	struct mgmt_pending_cmd *cmd;
6929 	struct hci_conn *conn;
6930 	int err;
6931 
6932 	bt_dev_dbg(hdev, "sock %p", sk);
6933 
6934 	memset(&rp, 0, sizeof(rp));
6935 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6936 	rp.addr.type = cp->addr.type;
6937 
6938 	if (cp->addr.type != BDADDR_BREDR)
6939 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6940 					 MGMT_STATUS_INVALID_PARAMS,
6941 					 &rp, sizeof(rp));
6942 
6943 	hci_dev_lock(hdev);
6944 
6945 	if (!hdev_is_powered(hdev)) {
6946 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6947 					MGMT_STATUS_NOT_POWERED, &rp,
6948 					sizeof(rp));
6949 		goto unlock;
6950 	}
6951 
6952 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6953 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6954 					       &cp->addr.bdaddr);
6955 		if (!conn || conn->state != BT_CONNECTED) {
6956 			err = mgmt_cmd_complete(sk, hdev->id,
6957 						MGMT_OP_GET_CLOCK_INFO,
6958 						MGMT_STATUS_NOT_CONNECTED,
6959 						&rp, sizeof(rp));
6960 			goto unlock;
6961 		}
6962 	} else {
6963 		conn = NULL;
6964 	}
6965 
6966 	cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
6967 	if (!cmd)
6968 		err = -ENOMEM;
6969 	else
6970 		err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
6971 					 get_clock_info_complete);
6972 
6973 	if (err < 0) {
6974 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6975 					MGMT_STATUS_FAILED, &rp, sizeof(rp));
6976 
6977 		if (cmd)
6978 			mgmt_pending_free(cmd);
6979 
6980 	} else if (conn) {
6981 		hci_conn_hold(conn);
6982 		cmd->user_data = hci_conn_get(conn);
6983 	}
6984 
6985 
6986 unlock:
6987 	hci_dev_unlock(hdev);
6988 	return err;
6989 }
6990 
6991 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
6992 {
6993 	struct hci_conn *conn;
6994 
6995 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
6996 	if (!conn)
6997 		return false;
6998 
6999 	if (conn->dst_type != type)
7000 		return false;
7001 
7002 	if (conn->state != BT_CONNECTED)
7003 		return false;
7004 
7005 	return true;
7006 }
7007 
7008 /* This function requires the caller holds hdev->lock */
7009 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7010 			       u8 addr_type, u8 auto_connect)
7011 {
7012 	struct hci_conn_params *params;
7013 
7014 	params = hci_conn_params_add(hdev, addr, addr_type);
7015 	if (!params)
7016 		return -EIO;
7017 
7018 	if (params->auto_connect == auto_connect)
7019 		return 0;
7020 
7021 	list_del_init(&params->action);
7022 
7023 	switch (auto_connect) {
7024 	case HCI_AUTO_CONN_DISABLED:
7025 	case HCI_AUTO_CONN_LINK_LOSS:
7026 		/* If auto connect is being disabled when we're trying to
7027 		 * connect to device, keep connecting.
7028 		 */
7029 		if (params->explicit_connect)
7030 			list_add(&params->action, &hdev->pend_le_conns);
7031 		break;
7032 	case HCI_AUTO_CONN_REPORT:
7033 		if (params->explicit_connect)
7034 			list_add(&params->action, &hdev->pend_le_conns);
7035 		else
7036 			list_add(&params->action, &hdev->pend_le_reports);
7037 		break;
7038 	case HCI_AUTO_CONN_DIRECT:
7039 	case HCI_AUTO_CONN_ALWAYS:
7040 		if (!is_connected(hdev, addr, addr_type))
7041 			list_add(&params->action, &hdev->pend_le_conns);
7042 		break;
7043 	}
7044 
7045 	params->auto_connect = auto_connect;
7046 
7047 	bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7048 		   addr, addr_type, auto_connect);
7049 
7050 	return 0;
7051 }
7052 
7053 static void device_added(struct sock *sk, struct hci_dev *hdev,
7054 			 bdaddr_t *bdaddr, u8 type, u8 action)
7055 {
7056 	struct mgmt_ev_device_added ev;
7057 
7058 	bacpy(&ev.addr.bdaddr, bdaddr);
7059 	ev.addr.type = type;
7060 	ev.action = action;
7061 
7062 	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7063 }
7064 
7065 static int add_device_sync(struct hci_dev *hdev, void *data)
7066 {
7067 	return hci_update_passive_scan_sync(hdev);
7068 }
7069 
7070 static int add_device(struct sock *sk, struct hci_dev *hdev,
7071 		      void *data, u16 len)
7072 {
7073 	struct mgmt_cp_add_device *cp = data;
7074 	u8 auto_conn, addr_type;
7075 	struct hci_conn_params *params;
7076 	int err;
7077 	u32 current_flags = 0;
7078 	u32 supported_flags;
7079 
7080 	bt_dev_dbg(hdev, "sock %p", sk);
7081 
7082 	if (!bdaddr_type_is_valid(cp->addr.type) ||
7083 	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7084 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7085 					 MGMT_STATUS_INVALID_PARAMS,
7086 					 &cp->addr, sizeof(cp->addr));
7087 
7088 	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7089 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7090 					 MGMT_STATUS_INVALID_PARAMS,
7091 					 &cp->addr, sizeof(cp->addr));
7092 
7093 	hci_dev_lock(hdev);
7094 
7095 	if (cp->addr.type == BDADDR_BREDR) {
7096 		/* Only incoming connections action is supported for now */
7097 		if (cp->action != 0x01) {
7098 			err = mgmt_cmd_complete(sk, hdev->id,
7099 						MGMT_OP_ADD_DEVICE,
7100 						MGMT_STATUS_INVALID_PARAMS,
7101 						&cp->addr, sizeof(cp->addr));
7102 			goto unlock;
7103 		}
7104 
7105 		err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7106 						     &cp->addr.bdaddr,
7107 						     cp->addr.type, 0);
7108 		if (err)
7109 			goto unlock;
7110 
7111 		hci_req_update_scan(hdev);
7112 
7113 		goto added;
7114 	}
7115 
7116 	addr_type = le_addr_type(cp->addr.type);
7117 
7118 	if (cp->action == 0x02)
7119 		auto_conn = HCI_AUTO_CONN_ALWAYS;
7120 	else if (cp->action == 0x01)
7121 		auto_conn = HCI_AUTO_CONN_DIRECT;
7122 	else
7123 		auto_conn = HCI_AUTO_CONN_REPORT;
7124 
7125 	/* Kernel internally uses conn_params with resolvable private
7126 	 * address, but Add Device allows only identity addresses.
7127 	 * Make sure it is enforced before calling
7128 	 * hci_conn_params_lookup.
7129 	 */
7130 	if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7131 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7132 					MGMT_STATUS_INVALID_PARAMS,
7133 					&cp->addr, sizeof(cp->addr));
7134 		goto unlock;
7135 	}
7136 
7137 	/* If the connection parameters don't exist for this device,
7138 	 * they will be created and configured with defaults.
7139 	 */
7140 	if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7141 				auto_conn) < 0) {
7142 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7143 					MGMT_STATUS_FAILED, &cp->addr,
7144 					sizeof(cp->addr));
7145 		goto unlock;
7146 	} else {
7147 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7148 						addr_type);
7149 		if (params)
7150 			current_flags = params->flags;
7151 	}
7152 
7153 	err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
7154 	if (err < 0)
7155 		goto unlock;
7156 
7157 added:
7158 	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7159 	supported_flags = hdev->conn_flags;
7160 	device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7161 			     supported_flags, current_flags);
7162 
7163 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7164 				MGMT_STATUS_SUCCESS, &cp->addr,
7165 				sizeof(cp->addr));
7166 
7167 unlock:
7168 	hci_dev_unlock(hdev);
7169 	return err;
7170 }
7171 
7172 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7173 			   bdaddr_t *bdaddr, u8 type)
7174 {
7175 	struct mgmt_ev_device_removed ev;
7176 
7177 	bacpy(&ev.addr.bdaddr, bdaddr);
7178 	ev.addr.type = type;
7179 
7180 	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7181 }
7182 
7183 static int remove_device_sync(struct hci_dev *hdev, void *data)
7184 {
7185 	return hci_update_passive_scan_sync(hdev);
7186 }
7187 
7188 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7189 			 void *data, u16 len)
7190 {
7191 	struct mgmt_cp_remove_device *cp = data;
7192 	int err;
7193 
7194 	bt_dev_dbg(hdev, "sock %p", sk);
7195 
7196 	hci_dev_lock(hdev);
7197 
7198 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7199 		struct hci_conn_params *params;
7200 		u8 addr_type;
7201 
7202 		if (!bdaddr_type_is_valid(cp->addr.type)) {
7203 			err = mgmt_cmd_complete(sk, hdev->id,
7204 						MGMT_OP_REMOVE_DEVICE,
7205 						MGMT_STATUS_INVALID_PARAMS,
7206 						&cp->addr, sizeof(cp->addr));
7207 			goto unlock;
7208 		}
7209 
7210 		if (cp->addr.type == BDADDR_BREDR) {
7211 			err = hci_bdaddr_list_del(&hdev->accept_list,
7212 						  &cp->addr.bdaddr,
7213 						  cp->addr.type);
7214 			if (err) {
7215 				err = mgmt_cmd_complete(sk, hdev->id,
7216 							MGMT_OP_REMOVE_DEVICE,
7217 							MGMT_STATUS_INVALID_PARAMS,
7218 							&cp->addr,
7219 							sizeof(cp->addr));
7220 				goto unlock;
7221 			}
7222 
7223 			hci_req_update_scan(hdev);
7224 
7225 			device_removed(sk, hdev, &cp->addr.bdaddr,
7226 				       cp->addr.type);
7227 			goto complete;
7228 		}
7229 
7230 		addr_type = le_addr_type(cp->addr.type);
7231 
7232 		/* Kernel internally uses conn_params with resolvable private
7233 		 * address, but Remove Device allows only identity addresses.
7234 		 * Make sure it is enforced before calling
7235 		 * hci_conn_params_lookup.
7236 		 */
7237 		if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7238 			err = mgmt_cmd_complete(sk, hdev->id,
7239 						MGMT_OP_REMOVE_DEVICE,
7240 						MGMT_STATUS_INVALID_PARAMS,
7241 						&cp->addr, sizeof(cp->addr));
7242 			goto unlock;
7243 		}
7244 
7245 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7246 						addr_type);
7247 		if (!params) {
7248 			err = mgmt_cmd_complete(sk, hdev->id,
7249 						MGMT_OP_REMOVE_DEVICE,
7250 						MGMT_STATUS_INVALID_PARAMS,
7251 						&cp->addr, sizeof(cp->addr));
7252 			goto unlock;
7253 		}
7254 
7255 		if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7256 		    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7257 			err = mgmt_cmd_complete(sk, hdev->id,
7258 						MGMT_OP_REMOVE_DEVICE,
7259 						MGMT_STATUS_INVALID_PARAMS,
7260 						&cp->addr, sizeof(cp->addr));
7261 			goto unlock;
7262 		}
7263 
7264 		list_del(&params->action);
7265 		list_del(&params->list);
7266 		kfree(params);
7267 
7268 		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7269 	} else {
7270 		struct hci_conn_params *p, *tmp;
7271 		struct bdaddr_list *b, *btmp;
7272 
7273 		if (cp->addr.type) {
7274 			err = mgmt_cmd_complete(sk, hdev->id,
7275 						MGMT_OP_REMOVE_DEVICE,
7276 						MGMT_STATUS_INVALID_PARAMS,
7277 						&cp->addr, sizeof(cp->addr));
7278 			goto unlock;
7279 		}
7280 
7281 		list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7282 			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7283 			list_del(&b->list);
7284 			kfree(b);
7285 		}
7286 
7287 		hci_req_update_scan(hdev);
7288 
7289 		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7290 			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7291 				continue;
7292 			device_removed(sk, hdev, &p->addr, p->addr_type);
7293 			if (p->explicit_connect) {
7294 				p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7295 				continue;
7296 			}
7297 			list_del(&p->action);
7298 			list_del(&p->list);
7299 			kfree(p);
7300 		}
7301 
7302 		bt_dev_dbg(hdev, "All LE connection parameters were removed");
7303 	}
7304 
7305 	hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7306 
7307 complete:
7308 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7309 				MGMT_STATUS_SUCCESS, &cp->addr,
7310 				sizeof(cp->addr));
7311 unlock:
7312 	hci_dev_unlock(hdev);
7313 	return err;
7314 }
7315 
7316 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7317 			   u16 len)
7318 {
7319 	struct mgmt_cp_load_conn_param *cp = data;
7320 	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7321 				     sizeof(struct mgmt_conn_param));
7322 	u16 param_count, expected_len;
7323 	int i;
7324 
7325 	if (!lmp_le_capable(hdev))
7326 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7327 				       MGMT_STATUS_NOT_SUPPORTED);
7328 
7329 	param_count = __le16_to_cpu(cp->param_count);
7330 	if (param_count > max_param_count) {
7331 		bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7332 			   param_count);
7333 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7334 				       MGMT_STATUS_INVALID_PARAMS);
7335 	}
7336 
7337 	expected_len = struct_size(cp, params, param_count);
7338 	if (expected_len != len) {
7339 		bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7340 			   expected_len, len);
7341 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7342 				       MGMT_STATUS_INVALID_PARAMS);
7343 	}
7344 
7345 	bt_dev_dbg(hdev, "param_count %u", param_count);
7346 
7347 	hci_dev_lock(hdev);
7348 
7349 	hci_conn_params_clear_disabled(hdev);
7350 
7351 	for (i = 0; i < param_count; i++) {
7352 		struct mgmt_conn_param *param = &cp->params[i];
7353 		struct hci_conn_params *hci_param;
7354 		u16 min, max, latency, timeout;
7355 		u8 addr_type;
7356 
7357 		bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
7358 			   param->addr.type);
7359 
7360 		if (param->addr.type == BDADDR_LE_PUBLIC) {
7361 			addr_type = ADDR_LE_DEV_PUBLIC;
7362 		} else if (param->addr.type == BDADDR_LE_RANDOM) {
7363 			addr_type = ADDR_LE_DEV_RANDOM;
7364 		} else {
7365 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7366 			continue;
7367 		}
7368 
7369 		min = le16_to_cpu(param->min_interval);
7370 		max = le16_to_cpu(param->max_interval);
7371 		latency = le16_to_cpu(param->latency);
7372 		timeout = le16_to_cpu(param->timeout);
7373 
7374 		bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7375 			   min, max, latency, timeout);
7376 
7377 		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7378 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7379 			continue;
7380 		}
7381 
7382 		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
7383 						addr_type);
7384 		if (!hci_param) {
7385 			bt_dev_err(hdev, "failed to add connection parameters");
7386 			continue;
7387 		}
7388 
7389 		hci_param->conn_min_interval = min;
7390 		hci_param->conn_max_interval = max;
7391 		hci_param->conn_latency = latency;
7392 		hci_param->supervision_timeout = timeout;
7393 	}
7394 
7395 	hci_dev_unlock(hdev);
7396 
7397 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7398 				 NULL, 0);
7399 }
7400 
7401 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7402 			       void *data, u16 len)
7403 {
7404 	struct mgmt_cp_set_external_config *cp = data;
7405 	bool changed;
7406 	int err;
7407 
7408 	bt_dev_dbg(hdev, "sock %p", sk);
7409 
7410 	if (hdev_is_powered(hdev))
7411 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7412 				       MGMT_STATUS_REJECTED);
7413 
7414 	if (cp->config != 0x00 && cp->config != 0x01)
7415 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7416 				         MGMT_STATUS_INVALID_PARAMS);
7417 
7418 	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7419 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7420 				       MGMT_STATUS_NOT_SUPPORTED);
7421 
7422 	hci_dev_lock(hdev);
7423 
7424 	if (cp->config)
7425 		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7426 	else
7427 		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7428 
7429 	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7430 	if (err < 0)
7431 		goto unlock;
7432 
7433 	if (!changed)
7434 		goto unlock;
7435 
7436 	err = new_options(hdev, sk);
7437 
7438 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7439 		mgmt_index_removed(hdev);
7440 
7441 		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7442 			hci_dev_set_flag(hdev, HCI_CONFIG);
7443 			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7444 
7445 			queue_work(hdev->req_workqueue, &hdev->power_on);
7446 		} else {
7447 			set_bit(HCI_RAW, &hdev->flags);
7448 			mgmt_index_added(hdev);
7449 		}
7450 	}
7451 
7452 unlock:
7453 	hci_dev_unlock(hdev);
7454 	return err;
7455 }
7456 
7457 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
7458 			      void *data, u16 len)
7459 {
7460 	struct mgmt_cp_set_public_address *cp = data;
7461 	bool changed;
7462 	int err;
7463 
7464 	bt_dev_dbg(hdev, "sock %p", sk);
7465 
7466 	if (hdev_is_powered(hdev))
7467 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7468 				       MGMT_STATUS_REJECTED);
7469 
7470 	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
7471 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7472 				       MGMT_STATUS_INVALID_PARAMS);
7473 
7474 	if (!hdev->set_bdaddr)
7475 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7476 				       MGMT_STATUS_NOT_SUPPORTED);
7477 
7478 	hci_dev_lock(hdev);
7479 
7480 	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
7481 	bacpy(&hdev->public_addr, &cp->bdaddr);
7482 
7483 	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
7484 	if (err < 0)
7485 		goto unlock;
7486 
7487 	if (!changed)
7488 		goto unlock;
7489 
7490 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
7491 		err = new_options(hdev, sk);
7492 
7493 	if (is_configured(hdev)) {
7494 		mgmt_index_removed(hdev);
7495 
7496 		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
7497 
7498 		hci_dev_set_flag(hdev, HCI_CONFIG);
7499 		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7500 
7501 		queue_work(hdev->req_workqueue, &hdev->power_on);
7502 	}
7503 
7504 unlock:
7505 	hci_dev_unlock(hdev);
7506 	return err;
7507 }
7508 
7509 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
7510 					     int err)
7511 {
7512 	const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
7513 	struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
7514 	u8 *h192, *r192, *h256, *r256;
7515 	struct mgmt_pending_cmd *cmd = data;
7516 	struct sk_buff *skb = cmd->skb;
7517 	u8 status = mgmt_status(err);
7518 	u16 eir_len;
7519 
7520 	if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
7521 		return;
7522 
7523 	if (!status) {
7524 		if (!skb)
7525 			status = MGMT_STATUS_FAILED;
7526 		else if (IS_ERR(skb))
7527 			status = mgmt_status(PTR_ERR(skb));
7528 		else
7529 			status = mgmt_status(skb->data[0]);
7530 	}
7531 
7532 	bt_dev_dbg(hdev, "status %u", status);
7533 
7534 	mgmt_cp = cmd->param;
7535 
7536 	if (status) {
7537 		status = mgmt_status(status);
7538 		eir_len = 0;
7539 
7540 		h192 = NULL;
7541 		r192 = NULL;
7542 		h256 = NULL;
7543 		r256 = NULL;
7544 	} else if (!bredr_sc_enabled(hdev)) {
7545 		struct hci_rp_read_local_oob_data *rp;
7546 
7547 		if (skb->len != sizeof(*rp)) {
7548 			status = MGMT_STATUS_FAILED;
7549 			eir_len = 0;
7550 		} else {
7551 			status = MGMT_STATUS_SUCCESS;
7552 			rp = (void *)skb->data;
7553 
7554 			eir_len = 5 + 18 + 18;
7555 			h192 = rp->hash;
7556 			r192 = rp->rand;
7557 			h256 = NULL;
7558 			r256 = NULL;
7559 		}
7560 	} else {
7561 		struct hci_rp_read_local_oob_ext_data *rp;
7562 
7563 		if (skb->len != sizeof(*rp)) {
7564 			status = MGMT_STATUS_FAILED;
7565 			eir_len = 0;
7566 		} else {
7567 			status = MGMT_STATUS_SUCCESS;
7568 			rp = (void *)skb->data;
7569 
7570 			if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
7571 				eir_len = 5 + 18 + 18;
7572 				h192 = NULL;
7573 				r192 = NULL;
7574 			} else {
7575 				eir_len = 5 + 18 + 18 + 18 + 18;
7576 				h192 = rp->hash192;
7577 				r192 = rp->rand192;
7578 			}
7579 
7580 			h256 = rp->hash256;
7581 			r256 = rp->rand256;
7582 		}
7583 	}
7584 
7585 	mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
7586 	if (!mgmt_rp)
7587 		goto done;
7588 
7589 	if (eir_len == 0)
7590 		goto send_rsp;
7591 
7592 	eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
7593 				  hdev->dev_class, 3);
7594 
7595 	if (h192 && r192) {
7596 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7597 					  EIR_SSP_HASH_C192, h192, 16);
7598 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7599 					  EIR_SSP_RAND_R192, r192, 16);
7600 	}
7601 
7602 	if (h256 && r256) {
7603 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7604 					  EIR_SSP_HASH_C256, h256, 16);
7605 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7606 					  EIR_SSP_RAND_R256, r256, 16);
7607 	}
7608 
7609 send_rsp:
7610 	mgmt_rp->type = mgmt_cp->type;
7611 	mgmt_rp->eir_len = cpu_to_le16(eir_len);
7612 
7613 	err = mgmt_cmd_complete(cmd->sk, hdev->id,
7614 				MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
7615 				mgmt_rp, sizeof(*mgmt_rp) + eir_len);
7616 	if (err < 0 || status)
7617 		goto done;
7618 
7619 	hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
7620 
7621 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7622 				 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
7623 				 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
7624 done:
7625 	if (skb && !IS_ERR(skb))
7626 		kfree_skb(skb);
7627 
7628 	kfree(mgmt_rp);
7629 	mgmt_pending_remove(cmd);
7630 }
7631 
7632 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
7633 				  struct mgmt_cp_read_local_oob_ext_data *cp)
7634 {
7635 	struct mgmt_pending_cmd *cmd;
7636 	int err;
7637 
7638 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
7639 			       cp, sizeof(*cp));
7640 	if (!cmd)
7641 		return -ENOMEM;
7642 
7643 	err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
7644 				 read_local_oob_ext_data_complete);
7645 
7646 	if (err < 0) {
7647 		mgmt_pending_remove(cmd);
7648 		return err;
7649 	}
7650 
7651 	return 0;
7652 }
7653 
7654 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
7655 				   void *data, u16 data_len)
7656 {
7657 	struct mgmt_cp_read_local_oob_ext_data *cp = data;
7658 	struct mgmt_rp_read_local_oob_ext_data *rp;
7659 	size_t rp_len;
7660 	u16 eir_len;
7661 	u8 status, flags, role, addr[7], hash[16], rand[16];
7662 	int err;
7663 
7664 	bt_dev_dbg(hdev, "sock %p", sk);
7665 
7666 	if (hdev_is_powered(hdev)) {
7667 		switch (cp->type) {
7668 		case BIT(BDADDR_BREDR):
7669 			status = mgmt_bredr_support(hdev);
7670 			if (status)
7671 				eir_len = 0;
7672 			else
7673 				eir_len = 5;
7674 			break;
7675 		case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7676 			status = mgmt_le_support(hdev);
7677 			if (status)
7678 				eir_len = 0;
7679 			else
7680 				eir_len = 9 + 3 + 18 + 18 + 3;
7681 			break;
7682 		default:
7683 			status = MGMT_STATUS_INVALID_PARAMS;
7684 			eir_len = 0;
7685 			break;
7686 		}
7687 	} else {
7688 		status = MGMT_STATUS_NOT_POWERED;
7689 		eir_len = 0;
7690 	}
7691 
7692 	rp_len = sizeof(*rp) + eir_len;
7693 	rp = kmalloc(rp_len, GFP_ATOMIC);
7694 	if (!rp)
7695 		return -ENOMEM;
7696 
7697 	if (!status && !lmp_ssp_capable(hdev)) {
7698 		status = MGMT_STATUS_NOT_SUPPORTED;
7699 		eir_len = 0;
7700 	}
7701 
7702 	if (status)
7703 		goto complete;
7704 
7705 	hci_dev_lock(hdev);
7706 
7707 	eir_len = 0;
7708 	switch (cp->type) {
7709 	case BIT(BDADDR_BREDR):
7710 		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7711 			err = read_local_ssp_oob_req(hdev, sk, cp);
7712 			hci_dev_unlock(hdev);
7713 			if (!err)
7714 				goto done;
7715 
7716 			status = MGMT_STATUS_FAILED;
7717 			goto complete;
7718 		} else {
7719 			eir_len = eir_append_data(rp->eir, eir_len,
7720 						  EIR_CLASS_OF_DEV,
7721 						  hdev->dev_class, 3);
7722 		}
7723 		break;
7724 	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7725 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
7726 		    smp_generate_oob(hdev, hash, rand) < 0) {
7727 			hci_dev_unlock(hdev);
7728 			status = MGMT_STATUS_FAILED;
7729 			goto complete;
7730 		}
7731 
7732 		/* This should return the active RPA, but since the RPA
7733 		 * is only programmed on demand, it is really hard to fill
7734 		 * this in at the moment. For now disallow retrieving
7735 		 * local out-of-band data when privacy is in use.
7736 		 *
7737 		 * Returning the identity address will not help here since
7738 		 * pairing happens before the identity resolving key is
7739 		 * known and thus the connection establishment happens
7740 		 * based on the RPA and not the identity address.
7741 		 */
7742 		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
7743 			hci_dev_unlock(hdev);
7744 			status = MGMT_STATUS_REJECTED;
7745 			goto complete;
7746 		}
7747 
7748 		if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
7749 		   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
7750 		   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
7751 		    bacmp(&hdev->static_addr, BDADDR_ANY))) {
7752 			memcpy(addr, &hdev->static_addr, 6);
7753 			addr[6] = 0x01;
7754 		} else {
7755 			memcpy(addr, &hdev->bdaddr, 6);
7756 			addr[6] = 0x00;
7757 		}
7758 
7759 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
7760 					  addr, sizeof(addr));
7761 
7762 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7763 			role = 0x02;
7764 		else
7765 			role = 0x01;
7766 
7767 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
7768 					  &role, sizeof(role));
7769 
7770 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
7771 			eir_len = eir_append_data(rp->eir, eir_len,
7772 						  EIR_LE_SC_CONFIRM,
7773 						  hash, sizeof(hash));
7774 
7775 			eir_len = eir_append_data(rp->eir, eir_len,
7776 						  EIR_LE_SC_RANDOM,
7777 						  rand, sizeof(rand));
7778 		}
7779 
7780 		flags = mgmt_get_adv_discov_flags(hdev);
7781 
7782 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
7783 			flags |= LE_AD_NO_BREDR;
7784 
7785 		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
7786 					  &flags, sizeof(flags));
7787 		break;
7788 	}
7789 
7790 	hci_dev_unlock(hdev);
7791 
7792 	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
7793 
7794 	status = MGMT_STATUS_SUCCESS;
7795 
7796 complete:
7797 	rp->type = cp->type;
7798 	rp->eir_len = cpu_to_le16(eir_len);
7799 
7800 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
7801 				status, rp, sizeof(*rp) + eir_len);
7802 	if (err < 0 || status)
7803 		goto done;
7804 
7805 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7806 				 rp, sizeof(*rp) + eir_len,
7807 				 HCI_MGMT_OOB_DATA_EVENTS, sk);
7808 
7809 done:
7810 	kfree(rp);
7811 
7812 	return err;
7813 }
7814 
7815 static u32 get_supported_adv_flags(struct hci_dev *hdev)
7816 {
7817 	u32 flags = 0;
7818 
7819 	flags |= MGMT_ADV_FLAG_CONNECTABLE;
7820 	flags |= MGMT_ADV_FLAG_DISCOV;
7821 	flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
7822 	flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
7823 	flags |= MGMT_ADV_FLAG_APPEARANCE;
7824 	flags |= MGMT_ADV_FLAG_LOCAL_NAME;
7825 	flags |= MGMT_ADV_PARAM_DURATION;
7826 	flags |= MGMT_ADV_PARAM_TIMEOUT;
7827 	flags |= MGMT_ADV_PARAM_INTERVALS;
7828 	flags |= MGMT_ADV_PARAM_TX_POWER;
7829 	flags |= MGMT_ADV_PARAM_SCAN_RSP;
7830 
7831 	/* In extended adv TX_POWER returned from Set Adv Param
7832 	 * will be always valid.
7833 	 */
7834 	if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
7835 	    ext_adv_capable(hdev))
7836 		flags |= MGMT_ADV_FLAG_TX_POWER;
7837 
7838 	if (ext_adv_capable(hdev)) {
7839 		flags |= MGMT_ADV_FLAG_SEC_1M;
7840 		flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
7841 		flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
7842 
7843 		if (hdev->le_features[1] & HCI_LE_PHY_2M)
7844 			flags |= MGMT_ADV_FLAG_SEC_2M;
7845 
7846 		if (hdev->le_features[1] & HCI_LE_PHY_CODED)
7847 			flags |= MGMT_ADV_FLAG_SEC_CODED;
7848 	}
7849 
7850 	return flags;
7851 }
7852 
7853 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
7854 			     void *data, u16 data_len)
7855 {
7856 	struct mgmt_rp_read_adv_features *rp;
7857 	size_t rp_len;
7858 	int err;
7859 	struct adv_info *adv_instance;
7860 	u32 supported_flags;
7861 	u8 *instance;
7862 
7863 	bt_dev_dbg(hdev, "sock %p", sk);
7864 
7865 	if (!lmp_le_capable(hdev))
7866 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7867 				       MGMT_STATUS_REJECTED);
7868 
7869 	hci_dev_lock(hdev);
7870 
7871 	rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
7872 	rp = kmalloc(rp_len, GFP_ATOMIC);
7873 	if (!rp) {
7874 		hci_dev_unlock(hdev);
7875 		return -ENOMEM;
7876 	}
7877 
7878 	supported_flags = get_supported_adv_flags(hdev);
7879 
7880 	rp->supported_flags = cpu_to_le32(supported_flags);
7881 	rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
7882 	rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
7883 	rp->max_instances = hdev->le_num_of_adv_sets;
7884 	rp->num_instances = hdev->adv_instance_cnt;
7885 
7886 	instance = rp->instance;
7887 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
7888 		*instance = adv_instance->instance;
7889 		instance++;
7890 	}
7891 
7892 	hci_dev_unlock(hdev);
7893 
7894 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7895 				MGMT_STATUS_SUCCESS, rp, rp_len);
7896 
7897 	kfree(rp);
7898 
7899 	return err;
7900 }
7901 
7902 static u8 calculate_name_len(struct hci_dev *hdev)
7903 {
7904 	u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
7905 
7906 	return eir_append_local_name(hdev, buf, 0);
7907 }
7908 
7909 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
7910 			   bool is_adv_data)
7911 {
7912 	u8 max_len = HCI_MAX_AD_LENGTH;
7913 
7914 	if (is_adv_data) {
7915 		if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
7916 				 MGMT_ADV_FLAG_LIMITED_DISCOV |
7917 				 MGMT_ADV_FLAG_MANAGED_FLAGS))
7918 			max_len -= 3;
7919 
7920 		if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
7921 			max_len -= 3;
7922 	} else {
7923 		if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
7924 			max_len -= calculate_name_len(hdev);
7925 
7926 		if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
7927 			max_len -= 4;
7928 	}
7929 
7930 	return max_len;
7931 }
7932 
7933 static bool flags_managed(u32 adv_flags)
7934 {
7935 	return adv_flags & (MGMT_ADV_FLAG_DISCOV |
7936 			    MGMT_ADV_FLAG_LIMITED_DISCOV |
7937 			    MGMT_ADV_FLAG_MANAGED_FLAGS);
7938 }
7939 
7940 static bool tx_power_managed(u32 adv_flags)
7941 {
7942 	return adv_flags & MGMT_ADV_FLAG_TX_POWER;
7943 }
7944 
7945 static bool name_managed(u32 adv_flags)
7946 {
7947 	return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
7948 }
7949 
7950 static bool appearance_managed(u32 adv_flags)
7951 {
7952 	return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
7953 }
7954 
7955 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
7956 			      u8 len, bool is_adv_data)
7957 {
7958 	int i, cur_len;
7959 	u8 max_len;
7960 
7961 	max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
7962 
7963 	if (len > max_len)
7964 		return false;
7965 
7966 	/* Make sure that the data is correctly formatted. */
7967 	for (i = 0; i < len; i += (cur_len + 1)) {
7968 		cur_len = data[i];
7969 
7970 		if (!cur_len)
7971 			continue;
7972 
7973 		if (data[i + 1] == EIR_FLAGS &&
7974 		    (!is_adv_data || flags_managed(adv_flags)))
7975 			return false;
7976 
7977 		if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
7978 			return false;
7979 
7980 		if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
7981 			return false;
7982 
7983 		if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
7984 			return false;
7985 
7986 		if (data[i + 1] == EIR_APPEARANCE &&
7987 		    appearance_managed(adv_flags))
7988 			return false;
7989 
7990 		/* If the current field length would exceed the total data
7991 		 * length, then it's invalid.
7992 		 */
7993 		if (i + cur_len >= len)
7994 			return false;
7995 	}
7996 
7997 	return true;
7998 }
7999 
8000 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8001 {
8002 	u32 supported_flags, phy_flags;
8003 
8004 	/* The current implementation only supports a subset of the specified
8005 	 * flags. Also need to check mutual exclusiveness of sec flags.
8006 	 */
8007 	supported_flags = get_supported_adv_flags(hdev);
8008 	phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8009 	if (adv_flags & ~supported_flags ||
8010 	    ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8011 		return false;
8012 
8013 	return true;
8014 }
8015 
8016 static bool adv_busy(struct hci_dev *hdev)
8017 {
8018 	return pending_find(MGMT_OP_SET_LE, hdev);
8019 }
8020 
8021 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8022 			     int err)
8023 {
8024 	struct adv_info *adv, *n;
8025 
8026 	bt_dev_dbg(hdev, "err %d", err);
8027 
8028 	hci_dev_lock(hdev);
8029 
8030 	list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8031 		u8 instance;
8032 
8033 		if (!adv->pending)
8034 			continue;
8035 
8036 		if (!err) {
8037 			adv->pending = false;
8038 			continue;
8039 		}
8040 
8041 		instance = adv->instance;
8042 
8043 		if (hdev->cur_adv_instance == instance)
8044 			cancel_adv_timeout(hdev);
8045 
8046 		hci_remove_adv_instance(hdev, instance);
8047 		mgmt_advertising_removed(sk, hdev, instance);
8048 	}
8049 
8050 	hci_dev_unlock(hdev);
8051 }
8052 
8053 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8054 {
8055 	struct mgmt_pending_cmd *cmd = data;
8056 	struct mgmt_cp_add_advertising *cp = cmd->param;
8057 	struct mgmt_rp_add_advertising rp;
8058 
8059 	memset(&rp, 0, sizeof(rp));
8060 
8061 	rp.instance = cp->instance;
8062 
8063 	if (err)
8064 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8065 				mgmt_status(err));
8066 	else
8067 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8068 				  mgmt_status(err), &rp, sizeof(rp));
8069 
8070 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
8071 
8072 	mgmt_pending_free(cmd);
8073 }
8074 
8075 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8076 {
8077 	struct mgmt_pending_cmd *cmd = data;
8078 	struct mgmt_cp_add_advertising *cp = cmd->param;
8079 
8080 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8081 }
8082 
8083 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8084 			   void *data, u16 data_len)
8085 {
8086 	struct mgmt_cp_add_advertising *cp = data;
8087 	struct mgmt_rp_add_advertising rp;
8088 	u32 flags;
8089 	u8 status;
8090 	u16 timeout, duration;
8091 	unsigned int prev_instance_cnt;
8092 	u8 schedule_instance = 0;
8093 	struct adv_info *next_instance;
8094 	int err;
8095 	struct mgmt_pending_cmd *cmd;
8096 
8097 	bt_dev_dbg(hdev, "sock %p", sk);
8098 
8099 	status = mgmt_le_support(hdev);
8100 	if (status)
8101 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8102 				       status);
8103 
8104 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8105 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8106 				       MGMT_STATUS_INVALID_PARAMS);
8107 
8108 	if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8109 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8110 				       MGMT_STATUS_INVALID_PARAMS);
8111 
8112 	flags = __le32_to_cpu(cp->flags);
8113 	timeout = __le16_to_cpu(cp->timeout);
8114 	duration = __le16_to_cpu(cp->duration);
8115 
8116 	if (!requested_adv_flags_are_valid(hdev, flags))
8117 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8118 				       MGMT_STATUS_INVALID_PARAMS);
8119 
8120 	hci_dev_lock(hdev);
8121 
8122 	if (timeout && !hdev_is_powered(hdev)) {
8123 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8124 				      MGMT_STATUS_REJECTED);
8125 		goto unlock;
8126 	}
8127 
8128 	if (adv_busy(hdev)) {
8129 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8130 				      MGMT_STATUS_BUSY);
8131 		goto unlock;
8132 	}
8133 
8134 	if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8135 	    !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8136 			       cp->scan_rsp_len, false)) {
8137 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8138 				      MGMT_STATUS_INVALID_PARAMS);
8139 		goto unlock;
8140 	}
8141 
8142 	prev_instance_cnt = hdev->adv_instance_cnt;
8143 
8144 	err = hci_add_adv_instance(hdev, cp->instance, flags,
8145 				   cp->adv_data_len, cp->data,
8146 				   cp->scan_rsp_len,
8147 				   cp->data + cp->adv_data_len,
8148 				   timeout, duration,
8149 				   HCI_ADV_TX_POWER_NO_PREFERENCE,
8150 				   hdev->le_adv_min_interval,
8151 				   hdev->le_adv_max_interval);
8152 	if (err < 0) {
8153 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8154 				      MGMT_STATUS_FAILED);
8155 		goto unlock;
8156 	}
8157 
8158 	/* Only trigger an advertising added event if a new instance was
8159 	 * actually added.
8160 	 */
8161 	if (hdev->adv_instance_cnt > prev_instance_cnt)
8162 		mgmt_advertising_added(sk, hdev, cp->instance);
8163 
8164 	if (hdev->cur_adv_instance == cp->instance) {
8165 		/* If the currently advertised instance is being changed then
8166 		 * cancel the current advertising and schedule the next
8167 		 * instance. If there is only one instance then the overridden
8168 		 * advertising data will be visible right away.
8169 		 */
8170 		cancel_adv_timeout(hdev);
8171 
8172 		next_instance = hci_get_next_instance(hdev, cp->instance);
8173 		if (next_instance)
8174 			schedule_instance = next_instance->instance;
8175 	} else if (!hdev->adv_instance_timeout) {
8176 		/* Immediately advertise the new instance if no other
8177 		 * instance is currently being advertised.
8178 		 */
8179 		schedule_instance = cp->instance;
8180 	}
8181 
8182 	/* If the HCI_ADVERTISING flag is set or the device isn't powered or
8183 	 * there is no instance to be advertised then we have no HCI
8184 	 * communication to make. Simply return.
8185 	 */
8186 	if (!hdev_is_powered(hdev) ||
8187 	    hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8188 	    !schedule_instance) {
8189 		rp.instance = cp->instance;
8190 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8191 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8192 		goto unlock;
8193 	}
8194 
8195 	/* We're good to go, update advertising data, parameters, and start
8196 	 * advertising.
8197 	 */
8198 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8199 			       data_len);
8200 	if (!cmd) {
8201 		err = -ENOMEM;
8202 		goto unlock;
8203 	}
8204 
8205 	cp->instance = schedule_instance;
8206 
8207 	err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8208 				 add_advertising_complete);
8209 	if (err < 0)
8210 		mgmt_pending_free(cmd);
8211 
8212 unlock:
8213 	hci_dev_unlock(hdev);
8214 
8215 	return err;
8216 }
8217 
8218 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8219 					int err)
8220 {
8221 	struct mgmt_pending_cmd *cmd = data;
8222 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8223 	struct mgmt_rp_add_ext_adv_params rp;
8224 	struct adv_info *adv;
8225 	u32 flags;
8226 
8227 	BT_DBG("%s", hdev->name);
8228 
8229 	hci_dev_lock(hdev);
8230 
8231 	adv = hci_find_adv_instance(hdev, cp->instance);
8232 	if (!adv)
8233 		goto unlock;
8234 
8235 	rp.instance = cp->instance;
8236 	rp.tx_power = adv->tx_power;
8237 
8238 	/* While we're at it, inform userspace of the available space for this
8239 	 * advertisement, given the flags that will be used.
8240 	 */
8241 	flags = __le32_to_cpu(cp->flags);
8242 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8243 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8244 
8245 	if (err) {
8246 		/* If this advertisement was previously advertising and we
8247 		 * failed to update it, we signal that it has been removed and
8248 		 * delete its structure
8249 		 */
8250 		if (!adv->pending)
8251 			mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8252 
8253 		hci_remove_adv_instance(hdev, cp->instance);
8254 
8255 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8256 				mgmt_status(err));
8257 	} else {
8258 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8259 				  mgmt_status(err), &rp, sizeof(rp));
8260 	}
8261 
8262 unlock:
8263 	if (cmd)
8264 		mgmt_pending_free(cmd);
8265 
8266 	hci_dev_unlock(hdev);
8267 }
8268 
8269 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8270 {
8271 	struct mgmt_pending_cmd *cmd = data;
8272 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8273 
8274 	return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8275 }
8276 
8277 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8278 			      void *data, u16 data_len)
8279 {
8280 	struct mgmt_cp_add_ext_adv_params *cp = data;
8281 	struct mgmt_rp_add_ext_adv_params rp;
8282 	struct mgmt_pending_cmd *cmd = NULL;
8283 	u32 flags, min_interval, max_interval;
8284 	u16 timeout, duration;
8285 	u8 status;
8286 	s8 tx_power;
8287 	int err;
8288 
8289 	BT_DBG("%s", hdev->name);
8290 
8291 	status = mgmt_le_support(hdev);
8292 	if (status)
8293 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8294 				       status);
8295 
8296 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8297 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8298 				       MGMT_STATUS_INVALID_PARAMS);
8299 
8300 	/* The purpose of breaking add_advertising into two separate MGMT calls
8301 	 * for params and data is to allow more parameters to be added to this
8302 	 * structure in the future. For this reason, we verify that we have the
8303 	 * bare minimum structure we know of when the interface was defined. Any
8304 	 * extra parameters we don't know about will be ignored in this request.
8305 	 */
8306 	if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8307 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8308 				       MGMT_STATUS_INVALID_PARAMS);
8309 
8310 	flags = __le32_to_cpu(cp->flags);
8311 
8312 	if (!requested_adv_flags_are_valid(hdev, flags))
8313 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8314 				       MGMT_STATUS_INVALID_PARAMS);
8315 
8316 	hci_dev_lock(hdev);
8317 
8318 	/* In new interface, we require that we are powered to register */
8319 	if (!hdev_is_powered(hdev)) {
8320 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8321 				      MGMT_STATUS_REJECTED);
8322 		goto unlock;
8323 	}
8324 
8325 	if (adv_busy(hdev)) {
8326 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8327 				      MGMT_STATUS_BUSY);
8328 		goto unlock;
8329 	}
8330 
8331 	/* Parse defined parameters from request, use defaults otherwise */
8332 	timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8333 		  __le16_to_cpu(cp->timeout) : 0;
8334 
8335 	duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8336 		   __le16_to_cpu(cp->duration) :
8337 		   hdev->def_multi_adv_rotation_duration;
8338 
8339 	min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8340 		       __le32_to_cpu(cp->min_interval) :
8341 		       hdev->le_adv_min_interval;
8342 
8343 	max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8344 		       __le32_to_cpu(cp->max_interval) :
8345 		       hdev->le_adv_max_interval;
8346 
8347 	tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8348 		   cp->tx_power :
8349 		   HCI_ADV_TX_POWER_NO_PREFERENCE;
8350 
8351 	/* Create advertising instance with no advertising or response data */
8352 	err = hci_add_adv_instance(hdev, cp->instance, flags,
8353 				   0, NULL, 0, NULL, timeout, duration,
8354 				   tx_power, min_interval, max_interval);
8355 
8356 	if (err < 0) {
8357 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8358 				      MGMT_STATUS_FAILED);
8359 		goto unlock;
8360 	}
8361 
8362 	/* Submit request for advertising params if ext adv available */
8363 	if (ext_adv_capable(hdev)) {
8364 		cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8365 				       data, data_len);
8366 		if (!cmd) {
8367 			err = -ENOMEM;
8368 			hci_remove_adv_instance(hdev, cp->instance);
8369 			goto unlock;
8370 		}
8371 
8372 		err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8373 					 add_ext_adv_params_complete);
8374 		if (err < 0)
8375 			mgmt_pending_free(cmd);
8376 	} else {
8377 		rp.instance = cp->instance;
8378 		rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8379 		rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8380 		rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8381 		err = mgmt_cmd_complete(sk, hdev->id,
8382 					MGMT_OP_ADD_EXT_ADV_PARAMS,
8383 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8384 	}
8385 
8386 unlock:
8387 	hci_dev_unlock(hdev);
8388 
8389 	return err;
8390 }
8391 
8392 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8393 {
8394 	struct mgmt_pending_cmd *cmd = data;
8395 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8396 	struct mgmt_rp_add_advertising rp;
8397 
8398 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
8399 
8400 	memset(&rp, 0, sizeof(rp));
8401 
8402 	rp.instance = cp->instance;
8403 
8404 	if (err)
8405 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8406 				mgmt_status(err));
8407 	else
8408 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8409 				  mgmt_status(err), &rp, sizeof(rp));
8410 
8411 	mgmt_pending_free(cmd);
8412 }
8413 
8414 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8415 {
8416 	struct mgmt_pending_cmd *cmd = data;
8417 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8418 	int err;
8419 
8420 	if (ext_adv_capable(hdev)) {
8421 		err = hci_update_adv_data_sync(hdev, cp->instance);
8422 		if (err)
8423 			return err;
8424 
8425 		err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8426 		if (err)
8427 			return err;
8428 
8429 		return hci_enable_ext_advertising_sync(hdev, cp->instance);
8430 	}
8431 
8432 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8433 }
8434 
8435 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8436 			    u16 data_len)
8437 {
8438 	struct mgmt_cp_add_ext_adv_data *cp = data;
8439 	struct mgmt_rp_add_ext_adv_data rp;
8440 	u8 schedule_instance = 0;
8441 	struct adv_info *next_instance;
8442 	struct adv_info *adv_instance;
8443 	int err = 0;
8444 	struct mgmt_pending_cmd *cmd;
8445 
8446 	BT_DBG("%s", hdev->name);
8447 
8448 	hci_dev_lock(hdev);
8449 
8450 	adv_instance = hci_find_adv_instance(hdev, cp->instance);
8451 
8452 	if (!adv_instance) {
8453 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8454 				      MGMT_STATUS_INVALID_PARAMS);
8455 		goto unlock;
8456 	}
8457 
8458 	/* In new interface, we require that we are powered to register */
8459 	if (!hdev_is_powered(hdev)) {
8460 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8461 				      MGMT_STATUS_REJECTED);
8462 		goto clear_new_instance;
8463 	}
8464 
8465 	if (adv_busy(hdev)) {
8466 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8467 				      MGMT_STATUS_BUSY);
8468 		goto clear_new_instance;
8469 	}
8470 
8471 	/* Validate new data */
8472 	if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
8473 			       cp->adv_data_len, true) ||
8474 	    !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
8475 			       cp->adv_data_len, cp->scan_rsp_len, false)) {
8476 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8477 				      MGMT_STATUS_INVALID_PARAMS);
8478 		goto clear_new_instance;
8479 	}
8480 
8481 	/* Set the data in the advertising instance */
8482 	hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
8483 				  cp->data, cp->scan_rsp_len,
8484 				  cp->data + cp->adv_data_len);
8485 
8486 	/* If using software rotation, determine next instance to use */
8487 	if (hdev->cur_adv_instance == cp->instance) {
8488 		/* If the currently advertised instance is being changed
8489 		 * then cancel the current advertising and schedule the
8490 		 * next instance. If there is only one instance then the
8491 		 * overridden advertising data will be visible right
8492 		 * away
8493 		 */
8494 		cancel_adv_timeout(hdev);
8495 
8496 		next_instance = hci_get_next_instance(hdev, cp->instance);
8497 		if (next_instance)
8498 			schedule_instance = next_instance->instance;
8499 	} else if (!hdev->adv_instance_timeout) {
8500 		/* Immediately advertise the new instance if no other
8501 		 * instance is currently being advertised.
8502 		 */
8503 		schedule_instance = cp->instance;
8504 	}
8505 
8506 	/* If the HCI_ADVERTISING flag is set or there is no instance to
8507 	 * be advertised then we have no HCI communication to make.
8508 	 * Simply return.
8509 	 */
8510 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
8511 		if (adv_instance->pending) {
8512 			mgmt_advertising_added(sk, hdev, cp->instance);
8513 			adv_instance->pending = false;
8514 		}
8515 		rp.instance = cp->instance;
8516 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8517 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8518 		goto unlock;
8519 	}
8520 
8521 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
8522 			       data_len);
8523 	if (!cmd) {
8524 		err = -ENOMEM;
8525 		goto clear_new_instance;
8526 	}
8527 
8528 	err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
8529 				 add_ext_adv_data_complete);
8530 	if (err < 0) {
8531 		mgmt_pending_free(cmd);
8532 		goto clear_new_instance;
8533 	}
8534 
8535 	/* We were successful in updating data, so trigger advertising_added
8536 	 * event if this is an instance that wasn't previously advertising. If
8537 	 * a failure occurs in the requests we initiated, we will remove the
8538 	 * instance again in add_advertising_complete
8539 	 */
8540 	if (adv_instance->pending)
8541 		mgmt_advertising_added(sk, hdev, cp->instance);
8542 
8543 	goto unlock;
8544 
8545 clear_new_instance:
8546 	hci_remove_adv_instance(hdev, cp->instance);
8547 
8548 unlock:
8549 	hci_dev_unlock(hdev);
8550 
8551 	return err;
8552 }
8553 
8554 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
8555 					int err)
8556 {
8557 	struct mgmt_pending_cmd *cmd = data;
8558 	struct mgmt_cp_remove_advertising *cp = cmd->param;
8559 	struct mgmt_rp_remove_advertising rp;
8560 
8561 	bt_dev_dbg(hdev, "err %d", err);
8562 
8563 	memset(&rp, 0, sizeof(rp));
8564 	rp.instance = cp->instance;
8565 
8566 	if (err)
8567 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8568 				mgmt_status(err));
8569 	else
8570 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8571 				  MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8572 
8573 	mgmt_pending_free(cmd);
8574 }
8575 
8576 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
8577 {
8578 	struct mgmt_pending_cmd *cmd = data;
8579 	struct mgmt_cp_remove_advertising *cp = cmd->param;
8580 	int err;
8581 
8582 	err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
8583 	if (err)
8584 		return err;
8585 
8586 	if (list_empty(&hdev->adv_instances))
8587 		err = hci_disable_advertising_sync(hdev);
8588 
8589 	return err;
8590 }
8591 
8592 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
8593 			      void *data, u16 data_len)
8594 {
8595 	struct mgmt_cp_remove_advertising *cp = data;
8596 	struct mgmt_pending_cmd *cmd;
8597 	int err;
8598 
8599 	bt_dev_dbg(hdev, "sock %p", sk);
8600 
8601 	hci_dev_lock(hdev);
8602 
8603 	if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
8604 		err = mgmt_cmd_status(sk, hdev->id,
8605 				      MGMT_OP_REMOVE_ADVERTISING,
8606 				      MGMT_STATUS_INVALID_PARAMS);
8607 		goto unlock;
8608 	}
8609 
8610 	if (pending_find(MGMT_OP_SET_LE, hdev)) {
8611 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8612 				      MGMT_STATUS_BUSY);
8613 		goto unlock;
8614 	}
8615 
8616 	if (list_empty(&hdev->adv_instances)) {
8617 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8618 				      MGMT_STATUS_INVALID_PARAMS);
8619 		goto unlock;
8620 	}
8621 
8622 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
8623 			       data_len);
8624 	if (!cmd) {
8625 		err = -ENOMEM;
8626 		goto unlock;
8627 	}
8628 
8629 	err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
8630 				 remove_advertising_complete);
8631 	if (err < 0)
8632 		mgmt_pending_free(cmd);
8633 
8634 unlock:
8635 	hci_dev_unlock(hdev);
8636 
8637 	return err;
8638 }
8639 
8640 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
8641 			     void *data, u16 data_len)
8642 {
8643 	struct mgmt_cp_get_adv_size_info *cp = data;
8644 	struct mgmt_rp_get_adv_size_info rp;
8645 	u32 flags, supported_flags;
8646 
8647 	bt_dev_dbg(hdev, "sock %p", sk);
8648 
8649 	if (!lmp_le_capable(hdev))
8650 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8651 				       MGMT_STATUS_REJECTED);
8652 
8653 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8654 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8655 				       MGMT_STATUS_INVALID_PARAMS);
8656 
8657 	flags = __le32_to_cpu(cp->flags);
8658 
8659 	/* The current implementation only supports a subset of the specified
8660 	 * flags.
8661 	 */
8662 	supported_flags = get_supported_adv_flags(hdev);
8663 	if (flags & ~supported_flags)
8664 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8665 				       MGMT_STATUS_INVALID_PARAMS);
8666 
8667 	rp.instance = cp->instance;
8668 	rp.flags = cp->flags;
8669 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8670 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8671 
8672 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8673 				 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8674 }
8675 
8676 static const struct hci_mgmt_handler mgmt_handlers[] = {
8677 	{ NULL }, /* 0x0000 (no command) */
8678 	{ read_version,            MGMT_READ_VERSION_SIZE,
8679 						HCI_MGMT_NO_HDEV |
8680 						HCI_MGMT_UNTRUSTED },
8681 	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
8682 						HCI_MGMT_NO_HDEV |
8683 						HCI_MGMT_UNTRUSTED },
8684 	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
8685 						HCI_MGMT_NO_HDEV |
8686 						HCI_MGMT_UNTRUSTED },
8687 	{ read_controller_info,    MGMT_READ_INFO_SIZE,
8688 						HCI_MGMT_UNTRUSTED },
8689 	{ set_powered,             MGMT_SETTING_SIZE },
8690 	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
8691 	{ set_connectable,         MGMT_SETTING_SIZE },
8692 	{ set_fast_connectable,    MGMT_SETTING_SIZE },
8693 	{ set_bondable,            MGMT_SETTING_SIZE },
8694 	{ set_link_security,       MGMT_SETTING_SIZE },
8695 	{ set_ssp,                 MGMT_SETTING_SIZE },
8696 	{ set_hs,                  MGMT_SETTING_SIZE },
8697 	{ set_le,                  MGMT_SETTING_SIZE },
8698 	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
8699 	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
8700 	{ add_uuid,                MGMT_ADD_UUID_SIZE },
8701 	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
8702 	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
8703 						HCI_MGMT_VAR_LEN },
8704 	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
8705 						HCI_MGMT_VAR_LEN },
8706 	{ disconnect,              MGMT_DISCONNECT_SIZE },
8707 	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
8708 	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
8709 	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
8710 	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
8711 	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
8712 	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
8713 	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
8714 	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
8715 	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
8716 	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
8717 	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
8718 	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
8719 	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
8720 						HCI_MGMT_VAR_LEN },
8721 	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
8722 	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
8723 	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
8724 	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
8725 	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
8726 	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
8727 	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
8728 	{ set_advertising,         MGMT_SETTING_SIZE },
8729 	{ set_bredr,               MGMT_SETTING_SIZE },
8730 	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
8731 	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
8732 	{ set_secure_conn,         MGMT_SETTING_SIZE },
8733 	{ set_debug_keys,          MGMT_SETTING_SIZE },
8734 	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
8735 	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
8736 						HCI_MGMT_VAR_LEN },
8737 	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
8738 	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
8739 	{ add_device,              MGMT_ADD_DEVICE_SIZE },
8740 	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
8741 	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
8742 						HCI_MGMT_VAR_LEN },
8743 	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
8744 						HCI_MGMT_NO_HDEV |
8745 						HCI_MGMT_UNTRUSTED },
8746 	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
8747 						HCI_MGMT_UNCONFIGURED |
8748 						HCI_MGMT_UNTRUSTED },
8749 	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
8750 						HCI_MGMT_UNCONFIGURED },
8751 	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
8752 						HCI_MGMT_UNCONFIGURED },
8753 	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
8754 						HCI_MGMT_VAR_LEN },
8755 	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
8756 	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
8757 						HCI_MGMT_NO_HDEV |
8758 						HCI_MGMT_UNTRUSTED },
8759 	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
8760 	{ add_advertising,	   MGMT_ADD_ADVERTISING_SIZE,
8761 						HCI_MGMT_VAR_LEN },
8762 	{ remove_advertising,	   MGMT_REMOVE_ADVERTISING_SIZE },
8763 	{ get_adv_size_info,       MGMT_GET_ADV_SIZE_INFO_SIZE },
8764 	{ start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
8765 	{ read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
8766 						HCI_MGMT_UNTRUSTED },
8767 	{ set_appearance,	   MGMT_SET_APPEARANCE_SIZE },
8768 	{ get_phy_configuration,   MGMT_GET_PHY_CONFIGURATION_SIZE },
8769 	{ set_phy_configuration,   MGMT_SET_PHY_CONFIGURATION_SIZE },
8770 	{ set_blocked_keys,	   MGMT_OP_SET_BLOCKED_KEYS_SIZE,
8771 						HCI_MGMT_VAR_LEN },
8772 	{ set_wideband_speech,	   MGMT_SETTING_SIZE },
8773 	{ read_controller_cap,     MGMT_READ_CONTROLLER_CAP_SIZE,
8774 						HCI_MGMT_UNTRUSTED },
8775 	{ read_exp_features_info,  MGMT_READ_EXP_FEATURES_INFO_SIZE,
8776 						HCI_MGMT_UNTRUSTED |
8777 						HCI_MGMT_HDEV_OPTIONAL },
8778 	{ set_exp_feature,         MGMT_SET_EXP_FEATURE_SIZE,
8779 						HCI_MGMT_VAR_LEN |
8780 						HCI_MGMT_HDEV_OPTIONAL },
8781 	{ read_def_system_config,  MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
8782 						HCI_MGMT_UNTRUSTED },
8783 	{ set_def_system_config,   MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
8784 						HCI_MGMT_VAR_LEN },
8785 	{ read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
8786 						HCI_MGMT_UNTRUSTED },
8787 	{ set_def_runtime_config,  MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
8788 						HCI_MGMT_VAR_LEN },
8789 	{ get_device_flags,        MGMT_GET_DEVICE_FLAGS_SIZE },
8790 	{ set_device_flags,        MGMT_SET_DEVICE_FLAGS_SIZE },
8791 	{ read_adv_mon_features,   MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
8792 	{ add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
8793 						HCI_MGMT_VAR_LEN },
8794 	{ remove_adv_monitor,      MGMT_REMOVE_ADV_MONITOR_SIZE },
8795 	{ add_ext_adv_params,      MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
8796 						HCI_MGMT_VAR_LEN },
8797 	{ add_ext_adv_data,        MGMT_ADD_EXT_ADV_DATA_SIZE,
8798 						HCI_MGMT_VAR_LEN },
8799 	{ add_adv_patterns_monitor_rssi,
8800 				   MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
8801 						HCI_MGMT_VAR_LEN },
8802 };
8803 
8804 void mgmt_index_added(struct hci_dev *hdev)
8805 {
8806 	struct mgmt_ev_ext_index ev;
8807 
8808 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8809 		return;
8810 
8811 	switch (hdev->dev_type) {
8812 	case HCI_PRIMARY:
8813 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8814 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
8815 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8816 			ev.type = 0x01;
8817 		} else {
8818 			mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
8819 					 HCI_MGMT_INDEX_EVENTS);
8820 			ev.type = 0x00;
8821 		}
8822 		break;
8823 	case HCI_AMP:
8824 		ev.type = 0x02;
8825 		break;
8826 	default:
8827 		return;
8828 	}
8829 
8830 	ev.bus = hdev->bus;
8831 
8832 	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
8833 			 HCI_MGMT_EXT_INDEX_EVENTS);
8834 }
8835 
8836 void mgmt_index_removed(struct hci_dev *hdev)
8837 {
8838 	struct mgmt_ev_ext_index ev;
8839 	u8 status = MGMT_STATUS_INVALID_INDEX;
8840 
8841 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8842 		return;
8843 
8844 	switch (hdev->dev_type) {
8845 	case HCI_PRIMARY:
8846 		mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8847 
8848 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8849 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
8850 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8851 			ev.type = 0x01;
8852 		} else {
8853 			mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
8854 					 HCI_MGMT_INDEX_EVENTS);
8855 			ev.type = 0x00;
8856 		}
8857 		break;
8858 	case HCI_AMP:
8859 		ev.type = 0x02;
8860 		break;
8861 	default:
8862 		return;
8863 	}
8864 
8865 	ev.bus = hdev->bus;
8866 
8867 	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
8868 			 HCI_MGMT_EXT_INDEX_EVENTS);
8869 }
8870 
8871 void mgmt_power_on(struct hci_dev *hdev, int err)
8872 {
8873 	struct cmd_lookup match = { NULL, hdev };
8874 
8875 	bt_dev_dbg(hdev, "err %d", err);
8876 
8877 	hci_dev_lock(hdev);
8878 
8879 	if (!err) {
8880 		restart_le_actions(hdev);
8881 		hci_update_passive_scan(hdev);
8882 	}
8883 
8884 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8885 
8886 	new_settings(hdev, match.sk);
8887 
8888 	if (match.sk)
8889 		sock_put(match.sk);
8890 
8891 	hci_dev_unlock(hdev);
8892 }
8893 
8894 void __mgmt_power_off(struct hci_dev *hdev)
8895 {
8896 	struct cmd_lookup match = { NULL, hdev };
8897 	u8 status, zero_cod[] = { 0, 0, 0 };
8898 
8899 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8900 
8901 	/* If the power off is because of hdev unregistration let
8902 	 * use the appropriate INVALID_INDEX status. Otherwise use
8903 	 * NOT_POWERED. We cover both scenarios here since later in
8904 	 * mgmt_index_removed() any hci_conn callbacks will have already
8905 	 * been triggered, potentially causing misleading DISCONNECTED
8906 	 * status responses.
8907 	 */
8908 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
8909 		status = MGMT_STATUS_INVALID_INDEX;
8910 	else
8911 		status = MGMT_STATUS_NOT_POWERED;
8912 
8913 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8914 
8915 	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
8916 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
8917 				   zero_cod, sizeof(zero_cod),
8918 				   HCI_MGMT_DEV_CLASS_EVENTS, NULL);
8919 		ext_info_changed(hdev, NULL);
8920 	}
8921 
8922 	new_settings(hdev, match.sk);
8923 
8924 	if (match.sk)
8925 		sock_put(match.sk);
8926 }
8927 
8928 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
8929 {
8930 	struct mgmt_pending_cmd *cmd;
8931 	u8 status;
8932 
8933 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8934 	if (!cmd)
8935 		return;
8936 
8937 	if (err == -ERFKILL)
8938 		status = MGMT_STATUS_RFKILLED;
8939 	else
8940 		status = MGMT_STATUS_FAILED;
8941 
8942 	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
8943 
8944 	mgmt_pending_remove(cmd);
8945 }
8946 
8947 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
8948 		       bool persistent)
8949 {
8950 	struct mgmt_ev_new_link_key ev;
8951 
8952 	memset(&ev, 0, sizeof(ev));
8953 
8954 	ev.store_hint = persistent;
8955 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8956 	ev.key.addr.type = BDADDR_BREDR;
8957 	ev.key.type = key->type;
8958 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
8959 	ev.key.pin_len = key->pin_len;
8960 
8961 	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
8962 }
8963 
8964 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
8965 {
8966 	switch (ltk->type) {
8967 	case SMP_LTK:
8968 	case SMP_LTK_RESPONDER:
8969 		if (ltk->authenticated)
8970 			return MGMT_LTK_AUTHENTICATED;
8971 		return MGMT_LTK_UNAUTHENTICATED;
8972 	case SMP_LTK_P256:
8973 		if (ltk->authenticated)
8974 			return MGMT_LTK_P256_AUTH;
8975 		return MGMT_LTK_P256_UNAUTH;
8976 	case SMP_LTK_P256_DEBUG:
8977 		return MGMT_LTK_P256_DEBUG;
8978 	}
8979 
8980 	return MGMT_LTK_UNAUTHENTICATED;
8981 }
8982 
8983 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
8984 {
8985 	struct mgmt_ev_new_long_term_key ev;
8986 
8987 	memset(&ev, 0, sizeof(ev));
8988 
8989 	/* Devices using resolvable or non-resolvable random addresses
8990 	 * without providing an identity resolving key don't require
8991 	 * to store long term keys. Their addresses will change the
8992 	 * next time around.
8993 	 *
8994 	 * Only when a remote device provides an identity address
8995 	 * make sure the long term key is stored. If the remote
8996 	 * identity is known, the long term keys are internally
8997 	 * mapped to the identity address. So allow static random
8998 	 * and public addresses here.
8999 	 */
9000 	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9001 	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
9002 		ev.store_hint = 0x00;
9003 	else
9004 		ev.store_hint = persistent;
9005 
9006 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9007 	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
9008 	ev.key.type = mgmt_ltk_type(key);
9009 	ev.key.enc_size = key->enc_size;
9010 	ev.key.ediv = key->ediv;
9011 	ev.key.rand = key->rand;
9012 
9013 	if (key->type == SMP_LTK)
9014 		ev.key.initiator = 1;
9015 
9016 	/* Make sure we copy only the significant bytes based on the
9017 	 * encryption key size, and set the rest of the value to zeroes.
9018 	 */
9019 	memcpy(ev.key.val, key->val, key->enc_size);
9020 	memset(ev.key.val + key->enc_size, 0,
9021 	       sizeof(ev.key.val) - key->enc_size);
9022 
9023 	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9024 }
9025 
9026 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9027 {
9028 	struct mgmt_ev_new_irk ev;
9029 
9030 	memset(&ev, 0, sizeof(ev));
9031 
9032 	ev.store_hint = persistent;
9033 
9034 	bacpy(&ev.rpa, &irk->rpa);
9035 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9036 	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9037 	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9038 
9039 	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9040 }
9041 
9042 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9043 		   bool persistent)
9044 {
9045 	struct mgmt_ev_new_csrk ev;
9046 
9047 	memset(&ev, 0, sizeof(ev));
9048 
9049 	/* Devices using resolvable or non-resolvable random addresses
9050 	 * without providing an identity resolving key don't require
9051 	 * to store signature resolving keys. Their addresses will change
9052 	 * the next time around.
9053 	 *
9054 	 * Only when a remote device provides an identity address
9055 	 * make sure the signature resolving key is stored. So allow
9056 	 * static random and public addresses here.
9057 	 */
9058 	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9059 	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9060 		ev.store_hint = 0x00;
9061 	else
9062 		ev.store_hint = persistent;
9063 
9064 	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9065 	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9066 	ev.key.type = csrk->type;
9067 	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9068 
9069 	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9070 }
9071 
9072 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9073 			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9074 			 u16 max_interval, u16 latency, u16 timeout)
9075 {
9076 	struct mgmt_ev_new_conn_param ev;
9077 
9078 	if (!hci_is_identity_address(bdaddr, bdaddr_type))
9079 		return;
9080 
9081 	memset(&ev, 0, sizeof(ev));
9082 	bacpy(&ev.addr.bdaddr, bdaddr);
9083 	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9084 	ev.store_hint = store_hint;
9085 	ev.min_interval = cpu_to_le16(min_interval);
9086 	ev.max_interval = cpu_to_le16(max_interval);
9087 	ev.latency = cpu_to_le16(latency);
9088 	ev.timeout = cpu_to_le16(timeout);
9089 
9090 	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9091 }
9092 
9093 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9094 			   u8 *name, u8 name_len)
9095 {
9096 	struct sk_buff *skb;
9097 	struct mgmt_ev_device_connected *ev;
9098 	u16 eir_len = 0;
9099 	u32 flags = 0;
9100 
9101 	/* allocate buff for LE or BR/EDR adv */
9102 	if (conn->le_adv_data_len > 0)
9103 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9104 				     sizeof(*ev) + conn->le_adv_data_len);
9105 	else
9106 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9107 				     sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9108 				     eir_precalc_len(sizeof(conn->dev_class)));
9109 
9110 	ev = skb_put(skb, sizeof(*ev));
9111 	bacpy(&ev->addr.bdaddr, &conn->dst);
9112 	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9113 
9114 	if (conn->out)
9115 		flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9116 
9117 	ev->flags = __cpu_to_le32(flags);
9118 
9119 	/* We must ensure that the EIR Data fields are ordered and
9120 	 * unique. Keep it simple for now and avoid the problem by not
9121 	 * adding any BR/EDR data to the LE adv.
9122 	 */
9123 	if (conn->le_adv_data_len > 0) {
9124 		skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9125 		eir_len = conn->le_adv_data_len;
9126 	} else {
9127 		if (name)
9128 			eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9129 
9130 		if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9131 			eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9132 						    conn->dev_class, sizeof(conn->dev_class));
9133 	}
9134 
9135 	ev->eir_len = cpu_to_le16(eir_len);
9136 
9137 	mgmt_event_skb(skb, NULL);
9138 }
9139 
9140 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
9141 {
9142 	struct sock **sk = data;
9143 
9144 	cmd->cmd_complete(cmd, 0);
9145 
9146 	*sk = cmd->sk;
9147 	sock_hold(*sk);
9148 
9149 	mgmt_pending_remove(cmd);
9150 }
9151 
9152 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9153 {
9154 	struct hci_dev *hdev = data;
9155 	struct mgmt_cp_unpair_device *cp = cmd->param;
9156 
9157 	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9158 
9159 	cmd->cmd_complete(cmd, 0);
9160 	mgmt_pending_remove(cmd);
9161 }
9162 
9163 bool mgmt_powering_down(struct hci_dev *hdev)
9164 {
9165 	struct mgmt_pending_cmd *cmd;
9166 	struct mgmt_mode *cp;
9167 
9168 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9169 	if (!cmd)
9170 		return false;
9171 
9172 	cp = cmd->param;
9173 	if (!cp->val)
9174 		return true;
9175 
9176 	return false;
9177 }
9178 
9179 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9180 			      u8 link_type, u8 addr_type, u8 reason,
9181 			      bool mgmt_connected)
9182 {
9183 	struct mgmt_ev_device_disconnected ev;
9184 	struct sock *sk = NULL;
9185 
9186 	/* The connection is still in hci_conn_hash so test for 1
9187 	 * instead of 0 to know if this is the last one.
9188 	 */
9189 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9190 		cancel_delayed_work(&hdev->power_off);
9191 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
9192 	}
9193 
9194 	if (!mgmt_connected)
9195 		return;
9196 
9197 	if (link_type != ACL_LINK && link_type != LE_LINK)
9198 		return;
9199 
9200 	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
9201 
9202 	bacpy(&ev.addr.bdaddr, bdaddr);
9203 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9204 	ev.reason = reason;
9205 
9206 	/* Report disconnects due to suspend */
9207 	if (hdev->suspended)
9208 		ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9209 
9210 	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9211 
9212 	if (sk)
9213 		sock_put(sk);
9214 
9215 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9216 			     hdev);
9217 }
9218 
9219 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9220 			    u8 link_type, u8 addr_type, u8 status)
9221 {
9222 	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9223 	struct mgmt_cp_disconnect *cp;
9224 	struct mgmt_pending_cmd *cmd;
9225 
9226 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9227 			     hdev);
9228 
9229 	cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9230 	if (!cmd)
9231 		return;
9232 
9233 	cp = cmd->param;
9234 
9235 	if (bacmp(bdaddr, &cp->addr.bdaddr))
9236 		return;
9237 
9238 	if (cp->addr.type != bdaddr_type)
9239 		return;
9240 
9241 	cmd->cmd_complete(cmd, mgmt_status(status));
9242 	mgmt_pending_remove(cmd);
9243 }
9244 
9245 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9246 			 u8 addr_type, u8 status)
9247 {
9248 	struct mgmt_ev_connect_failed ev;
9249 
9250 	/* The connection is still in hci_conn_hash so test for 1
9251 	 * instead of 0 to know if this is the last one.
9252 	 */
9253 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9254 		cancel_delayed_work(&hdev->power_off);
9255 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
9256 	}
9257 
9258 	bacpy(&ev.addr.bdaddr, bdaddr);
9259 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9260 	ev.status = mgmt_status(status);
9261 
9262 	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9263 }
9264 
9265 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9266 {
9267 	struct mgmt_ev_pin_code_request ev;
9268 
9269 	bacpy(&ev.addr.bdaddr, bdaddr);
9270 	ev.addr.type = BDADDR_BREDR;
9271 	ev.secure = secure;
9272 
9273 	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9274 }
9275 
9276 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9277 				  u8 status)
9278 {
9279 	struct mgmt_pending_cmd *cmd;
9280 
9281 	cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9282 	if (!cmd)
9283 		return;
9284 
9285 	cmd->cmd_complete(cmd, mgmt_status(status));
9286 	mgmt_pending_remove(cmd);
9287 }
9288 
9289 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9290 				      u8 status)
9291 {
9292 	struct mgmt_pending_cmd *cmd;
9293 
9294 	cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9295 	if (!cmd)
9296 		return;
9297 
9298 	cmd->cmd_complete(cmd, mgmt_status(status));
9299 	mgmt_pending_remove(cmd);
9300 }
9301 
9302 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9303 			      u8 link_type, u8 addr_type, u32 value,
9304 			      u8 confirm_hint)
9305 {
9306 	struct mgmt_ev_user_confirm_request ev;
9307 
9308 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9309 
9310 	bacpy(&ev.addr.bdaddr, bdaddr);
9311 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9312 	ev.confirm_hint = confirm_hint;
9313 	ev.value = cpu_to_le32(value);
9314 
9315 	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9316 			  NULL);
9317 }
9318 
9319 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9320 			      u8 link_type, u8 addr_type)
9321 {
9322 	struct mgmt_ev_user_passkey_request ev;
9323 
9324 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9325 
9326 	bacpy(&ev.addr.bdaddr, bdaddr);
9327 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9328 
9329 	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9330 			  NULL);
9331 }
9332 
9333 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9334 				      u8 link_type, u8 addr_type, u8 status,
9335 				      u8 opcode)
9336 {
9337 	struct mgmt_pending_cmd *cmd;
9338 
9339 	cmd = pending_find(opcode, hdev);
9340 	if (!cmd)
9341 		return -ENOENT;
9342 
9343 	cmd->cmd_complete(cmd, mgmt_status(status));
9344 	mgmt_pending_remove(cmd);
9345 
9346 	return 0;
9347 }
9348 
9349 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9350 				     u8 link_type, u8 addr_type, u8 status)
9351 {
9352 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9353 					  status, MGMT_OP_USER_CONFIRM_REPLY);
9354 }
9355 
9356 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9357 					 u8 link_type, u8 addr_type, u8 status)
9358 {
9359 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9360 					  status,
9361 					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
9362 }
9363 
9364 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9365 				     u8 link_type, u8 addr_type, u8 status)
9366 {
9367 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9368 					  status, MGMT_OP_USER_PASSKEY_REPLY);
9369 }
9370 
9371 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9372 					 u8 link_type, u8 addr_type, u8 status)
9373 {
9374 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9375 					  status,
9376 					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
9377 }
9378 
9379 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9380 			     u8 link_type, u8 addr_type, u32 passkey,
9381 			     u8 entered)
9382 {
9383 	struct mgmt_ev_passkey_notify ev;
9384 
9385 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9386 
9387 	bacpy(&ev.addr.bdaddr, bdaddr);
9388 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9389 	ev.passkey = __cpu_to_le32(passkey);
9390 	ev.entered = entered;
9391 
9392 	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9393 }
9394 
9395 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9396 {
9397 	struct mgmt_ev_auth_failed ev;
9398 	struct mgmt_pending_cmd *cmd;
9399 	u8 status = mgmt_status(hci_status);
9400 
9401 	bacpy(&ev.addr.bdaddr, &conn->dst);
9402 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9403 	ev.status = status;
9404 
9405 	cmd = find_pairing(conn);
9406 
9407 	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9408 		    cmd ? cmd->sk : NULL);
9409 
9410 	if (cmd) {
9411 		cmd->cmd_complete(cmd, status);
9412 		mgmt_pending_remove(cmd);
9413 	}
9414 }
9415 
9416 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9417 {
9418 	struct cmd_lookup match = { NULL, hdev };
9419 	bool changed;
9420 
9421 	if (status) {
9422 		u8 mgmt_err = mgmt_status(status);
9423 		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9424 				     cmd_status_rsp, &mgmt_err);
9425 		return;
9426 	}
9427 
9428 	if (test_bit(HCI_AUTH, &hdev->flags))
9429 		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9430 	else
9431 		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9432 
9433 	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9434 			     &match);
9435 
9436 	if (changed)
9437 		new_settings(hdev, match.sk);
9438 
9439 	if (match.sk)
9440 		sock_put(match.sk);
9441 }
9442 
9443 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9444 {
9445 	struct cmd_lookup *match = data;
9446 
9447 	if (match->sk == NULL) {
9448 		match->sk = cmd->sk;
9449 		sock_hold(match->sk);
9450 	}
9451 }
9452 
9453 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9454 				    u8 status)
9455 {
9456 	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9457 
9458 	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
9459 	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
9460 	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
9461 
9462 	if (!status) {
9463 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9464 				   3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9465 		ext_info_changed(hdev, NULL);
9466 	}
9467 
9468 	if (match.sk)
9469 		sock_put(match.sk);
9470 }
9471 
9472 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
9473 {
9474 	struct mgmt_cp_set_local_name ev;
9475 	struct mgmt_pending_cmd *cmd;
9476 
9477 	if (status)
9478 		return;
9479 
9480 	memset(&ev, 0, sizeof(ev));
9481 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
9482 	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
9483 
9484 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
9485 	if (!cmd) {
9486 		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
9487 
9488 		/* If this is a HCI command related to powering on the
9489 		 * HCI dev don't send any mgmt signals.
9490 		 */
9491 		if (pending_find(MGMT_OP_SET_POWERED, hdev))
9492 			return;
9493 	}
9494 
9495 	mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
9496 			   HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
9497 	ext_info_changed(hdev, cmd ? cmd->sk : NULL);
9498 }
9499 
9500 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
9501 {
9502 	int i;
9503 
9504 	for (i = 0; i < uuid_count; i++) {
9505 		if (!memcmp(uuid, uuids[i], 16))
9506 			return true;
9507 	}
9508 
9509 	return false;
9510 }
9511 
9512 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
9513 {
9514 	u16 parsed = 0;
9515 
9516 	while (parsed < eir_len) {
9517 		u8 field_len = eir[0];
9518 		u8 uuid[16];
9519 		int i;
9520 
9521 		if (field_len == 0)
9522 			break;
9523 
9524 		if (eir_len - parsed < field_len + 1)
9525 			break;
9526 
9527 		switch (eir[1]) {
9528 		case EIR_UUID16_ALL:
9529 		case EIR_UUID16_SOME:
9530 			for (i = 0; i + 3 <= field_len; i += 2) {
9531 				memcpy(uuid, bluetooth_base_uuid, 16);
9532 				uuid[13] = eir[i + 3];
9533 				uuid[12] = eir[i + 2];
9534 				if (has_uuid(uuid, uuid_count, uuids))
9535 					return true;
9536 			}
9537 			break;
9538 		case EIR_UUID32_ALL:
9539 		case EIR_UUID32_SOME:
9540 			for (i = 0; i + 5 <= field_len; i += 4) {
9541 				memcpy(uuid, bluetooth_base_uuid, 16);
9542 				uuid[15] = eir[i + 5];
9543 				uuid[14] = eir[i + 4];
9544 				uuid[13] = eir[i + 3];
9545 				uuid[12] = eir[i + 2];
9546 				if (has_uuid(uuid, uuid_count, uuids))
9547 					return true;
9548 			}
9549 			break;
9550 		case EIR_UUID128_ALL:
9551 		case EIR_UUID128_SOME:
9552 			for (i = 0; i + 17 <= field_len; i += 16) {
9553 				memcpy(uuid, eir + i + 2, 16);
9554 				if (has_uuid(uuid, uuid_count, uuids))
9555 					return true;
9556 			}
9557 			break;
9558 		}
9559 
9560 		parsed += field_len + 1;
9561 		eir += field_len + 1;
9562 	}
9563 
9564 	return false;
9565 }
9566 
9567 static void restart_le_scan(struct hci_dev *hdev)
9568 {
9569 	/* If controller is not scanning we are done. */
9570 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
9571 		return;
9572 
9573 	if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
9574 		       hdev->discovery.scan_start +
9575 		       hdev->discovery.scan_duration))
9576 		return;
9577 
9578 	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
9579 			   DISCOV_LE_RESTART_DELAY);
9580 }
9581 
9582 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
9583 			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9584 {
9585 	/* If a RSSI threshold has been specified, and
9586 	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
9587 	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
9588 	 * is set, let it through for further processing, as we might need to
9589 	 * restart the scan.
9590 	 *
9591 	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
9592 	 * the results are also dropped.
9593 	 */
9594 	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9595 	    (rssi == HCI_RSSI_INVALID ||
9596 	    (rssi < hdev->discovery.rssi &&
9597 	     !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
9598 		return  false;
9599 
9600 	if (hdev->discovery.uuid_count != 0) {
9601 		/* If a list of UUIDs is provided in filter, results with no
9602 		 * matching UUID should be dropped.
9603 		 */
9604 		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
9605 				   hdev->discovery.uuids) &&
9606 		    !eir_has_uuids(scan_rsp, scan_rsp_len,
9607 				   hdev->discovery.uuid_count,
9608 				   hdev->discovery.uuids))
9609 			return false;
9610 	}
9611 
9612 	/* If duplicate filtering does not report RSSI changes, then restart
9613 	 * scanning to ensure updated result with updated RSSI values.
9614 	 */
9615 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
9616 		restart_le_scan(hdev);
9617 
9618 		/* Validate RSSI value against the RSSI threshold once more. */
9619 		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9620 		    rssi < hdev->discovery.rssi)
9621 			return false;
9622 	}
9623 
9624 	return true;
9625 }
9626 
9627 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
9628 				  bdaddr_t *bdaddr, u8 addr_type)
9629 {
9630 	struct mgmt_ev_adv_monitor_device_lost ev;
9631 
9632 	ev.monitor_handle = cpu_to_le16(handle);
9633 	bacpy(&ev.addr.bdaddr, bdaddr);
9634 	ev.addr.type = addr_type;
9635 
9636 	mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
9637 		   NULL);
9638 }
9639 
9640 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
9641 					       struct sk_buff *skb,
9642 					       struct sock *skip_sk,
9643 					       u16 handle)
9644 {
9645 	struct sk_buff *advmon_skb;
9646 	size_t advmon_skb_len;
9647 	__le16 *monitor_handle;
9648 
9649 	if (!skb)
9650 		return;
9651 
9652 	advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
9653 			  sizeof(struct mgmt_ev_device_found)) + skb->len;
9654 	advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
9655 				    advmon_skb_len);
9656 	if (!advmon_skb)
9657 		return;
9658 
9659 	/* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
9660 	 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
9661 	 * store monitor_handle of the matched monitor.
9662 	 */
9663 	monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
9664 	*monitor_handle = cpu_to_le16(handle);
9665 	skb_put_data(advmon_skb, skb->data, skb->len);
9666 
9667 	mgmt_event_skb(advmon_skb, skip_sk);
9668 }
9669 
9670 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
9671 					  bdaddr_t *bdaddr, bool report_device,
9672 					  struct sk_buff *skb,
9673 					  struct sock *skip_sk)
9674 {
9675 	struct monitored_device *dev, *tmp;
9676 	bool matched = false;
9677 	bool notified = false;
9678 
9679 	/* We have received the Advertisement Report because:
9680 	 * 1. the kernel has initiated active discovery
9681 	 * 2. if not, we have pend_le_reports > 0 in which case we are doing
9682 	 *    passive scanning
9683 	 * 3. if none of the above is true, we have one or more active
9684 	 *    Advertisement Monitor
9685 	 *
9686 	 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
9687 	 * and report ONLY one advertisement per device for the matched Monitor
9688 	 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
9689 	 *
9690 	 * For case 3, since we are not active scanning and all advertisements
9691 	 * received are due to a matched Advertisement Monitor, report all
9692 	 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
9693 	 */
9694 	if (report_device && !hdev->advmon_pend_notify) {
9695 		mgmt_event_skb(skb, skip_sk);
9696 		return;
9697 	}
9698 
9699 	hdev->advmon_pend_notify = false;
9700 
9701 	list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
9702 		if (!bacmp(&dev->bdaddr, bdaddr)) {
9703 			matched = true;
9704 
9705 			if (!dev->notified) {
9706 				mgmt_send_adv_monitor_device_found(hdev, skb,
9707 								   skip_sk,
9708 								   dev->handle);
9709 				notified = true;
9710 				dev->notified = true;
9711 			}
9712 		}
9713 
9714 		if (!dev->notified)
9715 			hdev->advmon_pend_notify = true;
9716 	}
9717 
9718 	if (!report_device &&
9719 	    ((matched && !notified) || !msft_monitor_supported(hdev))) {
9720 		/* Handle 0 indicates that we are not active scanning and this
9721 		 * is a subsequent advertisement report for an already matched
9722 		 * Advertisement Monitor or the controller offloading support
9723 		 * is not available.
9724 		 */
9725 		mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
9726 	}
9727 
9728 	if (report_device)
9729 		mgmt_event_skb(skb, skip_sk);
9730 	else
9731 		kfree_skb(skb);
9732 }
9733 
9734 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9735 		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
9736 		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9737 {
9738 	struct sk_buff *skb;
9739 	struct mgmt_ev_device_found *ev;
9740 	bool report_device = hci_discovery_active(hdev);
9741 
9742 	/* Don't send events for a non-kernel initiated discovery. With
9743 	 * LE one exception is if we have pend_le_reports > 0 in which
9744 	 * case we're doing passive scanning and want these events.
9745 	 */
9746 	if (!hci_discovery_active(hdev)) {
9747 		if (link_type == ACL_LINK)
9748 			return;
9749 		if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
9750 			report_device = true;
9751 		else if (!hci_is_adv_monitoring(hdev))
9752 			return;
9753 	}
9754 
9755 	if (hdev->discovery.result_filtering) {
9756 		/* We are using service discovery */
9757 		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
9758 				     scan_rsp_len))
9759 			return;
9760 	}
9761 
9762 	if (hdev->discovery.limited) {
9763 		/* Check for limited discoverable bit */
9764 		if (dev_class) {
9765 			if (!(dev_class[1] & 0x20))
9766 				return;
9767 		} else {
9768 			u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
9769 			if (!flags || !(flags[0] & LE_AD_LIMITED))
9770 				return;
9771 		}
9772 	}
9773 
9774 	/* Allocate skb. The 5 extra bytes are for the potential CoD field */
9775 	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
9776 			     sizeof(*ev) + eir_len + scan_rsp_len + 5);
9777 	if (!skb)
9778 		return;
9779 
9780 	ev = skb_put(skb, sizeof(*ev));
9781 
9782 	/* In case of device discovery with BR/EDR devices (pre 1.2), the
9783 	 * RSSI value was reported as 0 when not available. This behavior
9784 	 * is kept when using device discovery. This is required for full
9785 	 * backwards compatibility with the API.
9786 	 *
9787 	 * However when using service discovery, the value 127 will be
9788 	 * returned when the RSSI is not available.
9789 	 */
9790 	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
9791 	    link_type == ACL_LINK)
9792 		rssi = 0;
9793 
9794 	bacpy(&ev->addr.bdaddr, bdaddr);
9795 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
9796 	ev->rssi = rssi;
9797 	ev->flags = cpu_to_le32(flags);
9798 
9799 	if (eir_len > 0)
9800 		/* Copy EIR or advertising data into event */
9801 		skb_put_data(skb, eir, eir_len);
9802 
9803 	if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
9804 		u8 eir_cod[5];
9805 
9806 		eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
9807 					   dev_class, 3);
9808 		skb_put_data(skb, eir_cod, sizeof(eir_cod));
9809 	}
9810 
9811 	if (scan_rsp_len > 0)
9812 		/* Append scan response data to event */
9813 		skb_put_data(skb, scan_rsp, scan_rsp_len);
9814 
9815 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
9816 
9817 	mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
9818 }
9819 
9820 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9821 		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
9822 {
9823 	struct sk_buff *skb;
9824 	struct mgmt_ev_device_found *ev;
9825 	u16 eir_len = 0;
9826 	u32 flags = 0;
9827 
9828 	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
9829 			     sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
9830 
9831 	ev = skb_put(skb, sizeof(*ev));
9832 	bacpy(&ev->addr.bdaddr, bdaddr);
9833 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
9834 	ev->rssi = rssi;
9835 
9836 	if (name)
9837 		eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9838 	else
9839 		flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
9840 
9841 	ev->eir_len = cpu_to_le16(eir_len);
9842 	ev->flags = cpu_to_le32(flags);
9843 
9844 	mgmt_event_skb(skb, NULL);
9845 }
9846 
9847 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
9848 {
9849 	struct mgmt_ev_discovering ev;
9850 
9851 	bt_dev_dbg(hdev, "discovering %u", discovering);
9852 
9853 	memset(&ev, 0, sizeof(ev));
9854 	ev.type = hdev->discovery.type;
9855 	ev.discovering = discovering;
9856 
9857 	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
9858 }
9859 
9860 void mgmt_suspending(struct hci_dev *hdev, u8 state)
9861 {
9862 	struct mgmt_ev_controller_suspend ev;
9863 
9864 	ev.suspend_state = state;
9865 	mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
9866 }
9867 
9868 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
9869 		   u8 addr_type)
9870 {
9871 	struct mgmt_ev_controller_resume ev;
9872 
9873 	ev.wake_reason = reason;
9874 	if (bdaddr) {
9875 		bacpy(&ev.addr.bdaddr, bdaddr);
9876 		ev.addr.type = addr_type;
9877 	} else {
9878 		memset(&ev.addr, 0, sizeof(ev.addr));
9879 	}
9880 
9881 	mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
9882 }
9883 
9884 static struct hci_mgmt_chan chan = {
9885 	.channel	= HCI_CHANNEL_CONTROL,
9886 	.handler_count	= ARRAY_SIZE(mgmt_handlers),
9887 	.handlers	= mgmt_handlers,
9888 	.hdev_init	= mgmt_init_hdev,
9889 };
9890 
9891 int mgmt_init(void)
9892 {
9893 	return hci_mgmt_chan_register(&chan);
9894 }
9895 
9896 void mgmt_exit(void)
9897 {
9898 	hci_mgmt_chan_unregister(&chan);
9899 }
9900