xref: /openbmc/linux/net/bluetooth/mgmt.c (revision d3402925)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI Management interface */
26 
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35 
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
40 #include "msft.h"
41 #include "eir.h"
42 #include "aosp.h"
43 
44 #define MGMT_VERSION	1
45 #define MGMT_REVISION	22
46 
47 static const u16 mgmt_commands[] = {
48 	MGMT_OP_READ_INDEX_LIST,
49 	MGMT_OP_READ_INFO,
50 	MGMT_OP_SET_POWERED,
51 	MGMT_OP_SET_DISCOVERABLE,
52 	MGMT_OP_SET_CONNECTABLE,
53 	MGMT_OP_SET_FAST_CONNECTABLE,
54 	MGMT_OP_SET_BONDABLE,
55 	MGMT_OP_SET_LINK_SECURITY,
56 	MGMT_OP_SET_SSP,
57 	MGMT_OP_SET_HS,
58 	MGMT_OP_SET_LE,
59 	MGMT_OP_SET_DEV_CLASS,
60 	MGMT_OP_SET_LOCAL_NAME,
61 	MGMT_OP_ADD_UUID,
62 	MGMT_OP_REMOVE_UUID,
63 	MGMT_OP_LOAD_LINK_KEYS,
64 	MGMT_OP_LOAD_LONG_TERM_KEYS,
65 	MGMT_OP_DISCONNECT,
66 	MGMT_OP_GET_CONNECTIONS,
67 	MGMT_OP_PIN_CODE_REPLY,
68 	MGMT_OP_PIN_CODE_NEG_REPLY,
69 	MGMT_OP_SET_IO_CAPABILITY,
70 	MGMT_OP_PAIR_DEVICE,
71 	MGMT_OP_CANCEL_PAIR_DEVICE,
72 	MGMT_OP_UNPAIR_DEVICE,
73 	MGMT_OP_USER_CONFIRM_REPLY,
74 	MGMT_OP_USER_CONFIRM_NEG_REPLY,
75 	MGMT_OP_USER_PASSKEY_REPLY,
76 	MGMT_OP_USER_PASSKEY_NEG_REPLY,
77 	MGMT_OP_READ_LOCAL_OOB_DATA,
78 	MGMT_OP_ADD_REMOTE_OOB_DATA,
79 	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
80 	MGMT_OP_START_DISCOVERY,
81 	MGMT_OP_STOP_DISCOVERY,
82 	MGMT_OP_CONFIRM_NAME,
83 	MGMT_OP_BLOCK_DEVICE,
84 	MGMT_OP_UNBLOCK_DEVICE,
85 	MGMT_OP_SET_DEVICE_ID,
86 	MGMT_OP_SET_ADVERTISING,
87 	MGMT_OP_SET_BREDR,
88 	MGMT_OP_SET_STATIC_ADDRESS,
89 	MGMT_OP_SET_SCAN_PARAMS,
90 	MGMT_OP_SET_SECURE_CONN,
91 	MGMT_OP_SET_DEBUG_KEYS,
92 	MGMT_OP_SET_PRIVACY,
93 	MGMT_OP_LOAD_IRKS,
94 	MGMT_OP_GET_CONN_INFO,
95 	MGMT_OP_GET_CLOCK_INFO,
96 	MGMT_OP_ADD_DEVICE,
97 	MGMT_OP_REMOVE_DEVICE,
98 	MGMT_OP_LOAD_CONN_PARAM,
99 	MGMT_OP_READ_UNCONF_INDEX_LIST,
100 	MGMT_OP_READ_CONFIG_INFO,
101 	MGMT_OP_SET_EXTERNAL_CONFIG,
102 	MGMT_OP_SET_PUBLIC_ADDRESS,
103 	MGMT_OP_START_SERVICE_DISCOVERY,
104 	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
105 	MGMT_OP_READ_EXT_INDEX_LIST,
106 	MGMT_OP_READ_ADV_FEATURES,
107 	MGMT_OP_ADD_ADVERTISING,
108 	MGMT_OP_REMOVE_ADVERTISING,
109 	MGMT_OP_GET_ADV_SIZE_INFO,
110 	MGMT_OP_START_LIMITED_DISCOVERY,
111 	MGMT_OP_READ_EXT_INFO,
112 	MGMT_OP_SET_APPEARANCE,
113 	MGMT_OP_GET_PHY_CONFIGURATION,
114 	MGMT_OP_SET_PHY_CONFIGURATION,
115 	MGMT_OP_SET_BLOCKED_KEYS,
116 	MGMT_OP_SET_WIDEBAND_SPEECH,
117 	MGMT_OP_READ_CONTROLLER_CAP,
118 	MGMT_OP_READ_EXP_FEATURES_INFO,
119 	MGMT_OP_SET_EXP_FEATURE,
120 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
121 	MGMT_OP_SET_DEF_SYSTEM_CONFIG,
122 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
123 	MGMT_OP_SET_DEF_RUNTIME_CONFIG,
124 	MGMT_OP_GET_DEVICE_FLAGS,
125 	MGMT_OP_SET_DEVICE_FLAGS,
126 	MGMT_OP_READ_ADV_MONITOR_FEATURES,
127 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
128 	MGMT_OP_REMOVE_ADV_MONITOR,
129 	MGMT_OP_ADD_EXT_ADV_PARAMS,
130 	MGMT_OP_ADD_EXT_ADV_DATA,
131 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
132 	MGMT_OP_SET_MESH_RECEIVER,
133 	MGMT_OP_MESH_READ_FEATURES,
134 	MGMT_OP_MESH_SEND,
135 	MGMT_OP_MESH_SEND_CANCEL,
136 };
137 
138 static const u16 mgmt_events[] = {
139 	MGMT_EV_CONTROLLER_ERROR,
140 	MGMT_EV_INDEX_ADDED,
141 	MGMT_EV_INDEX_REMOVED,
142 	MGMT_EV_NEW_SETTINGS,
143 	MGMT_EV_CLASS_OF_DEV_CHANGED,
144 	MGMT_EV_LOCAL_NAME_CHANGED,
145 	MGMT_EV_NEW_LINK_KEY,
146 	MGMT_EV_NEW_LONG_TERM_KEY,
147 	MGMT_EV_DEVICE_CONNECTED,
148 	MGMT_EV_DEVICE_DISCONNECTED,
149 	MGMT_EV_CONNECT_FAILED,
150 	MGMT_EV_PIN_CODE_REQUEST,
151 	MGMT_EV_USER_CONFIRM_REQUEST,
152 	MGMT_EV_USER_PASSKEY_REQUEST,
153 	MGMT_EV_AUTH_FAILED,
154 	MGMT_EV_DEVICE_FOUND,
155 	MGMT_EV_DISCOVERING,
156 	MGMT_EV_DEVICE_BLOCKED,
157 	MGMT_EV_DEVICE_UNBLOCKED,
158 	MGMT_EV_DEVICE_UNPAIRED,
159 	MGMT_EV_PASSKEY_NOTIFY,
160 	MGMT_EV_NEW_IRK,
161 	MGMT_EV_NEW_CSRK,
162 	MGMT_EV_DEVICE_ADDED,
163 	MGMT_EV_DEVICE_REMOVED,
164 	MGMT_EV_NEW_CONN_PARAM,
165 	MGMT_EV_UNCONF_INDEX_ADDED,
166 	MGMT_EV_UNCONF_INDEX_REMOVED,
167 	MGMT_EV_NEW_CONFIG_OPTIONS,
168 	MGMT_EV_EXT_INDEX_ADDED,
169 	MGMT_EV_EXT_INDEX_REMOVED,
170 	MGMT_EV_LOCAL_OOB_DATA_UPDATED,
171 	MGMT_EV_ADVERTISING_ADDED,
172 	MGMT_EV_ADVERTISING_REMOVED,
173 	MGMT_EV_EXT_INFO_CHANGED,
174 	MGMT_EV_PHY_CONFIGURATION_CHANGED,
175 	MGMT_EV_EXP_FEATURE_CHANGED,
176 	MGMT_EV_DEVICE_FLAGS_CHANGED,
177 	MGMT_EV_ADV_MONITOR_ADDED,
178 	MGMT_EV_ADV_MONITOR_REMOVED,
179 	MGMT_EV_CONTROLLER_SUSPEND,
180 	MGMT_EV_CONTROLLER_RESUME,
181 	MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
182 	MGMT_EV_ADV_MONITOR_DEVICE_LOST,
183 };
184 
185 static const u16 mgmt_untrusted_commands[] = {
186 	MGMT_OP_READ_INDEX_LIST,
187 	MGMT_OP_READ_INFO,
188 	MGMT_OP_READ_UNCONF_INDEX_LIST,
189 	MGMT_OP_READ_CONFIG_INFO,
190 	MGMT_OP_READ_EXT_INDEX_LIST,
191 	MGMT_OP_READ_EXT_INFO,
192 	MGMT_OP_READ_CONTROLLER_CAP,
193 	MGMT_OP_READ_EXP_FEATURES_INFO,
194 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
195 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
196 };
197 
198 static const u16 mgmt_untrusted_events[] = {
199 	MGMT_EV_INDEX_ADDED,
200 	MGMT_EV_INDEX_REMOVED,
201 	MGMT_EV_NEW_SETTINGS,
202 	MGMT_EV_CLASS_OF_DEV_CHANGED,
203 	MGMT_EV_LOCAL_NAME_CHANGED,
204 	MGMT_EV_UNCONF_INDEX_ADDED,
205 	MGMT_EV_UNCONF_INDEX_REMOVED,
206 	MGMT_EV_NEW_CONFIG_OPTIONS,
207 	MGMT_EV_EXT_INDEX_ADDED,
208 	MGMT_EV_EXT_INDEX_REMOVED,
209 	MGMT_EV_EXT_INFO_CHANGED,
210 	MGMT_EV_EXP_FEATURE_CHANGED,
211 };
212 
213 #define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
214 
215 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
216 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
217 
218 /* HCI to MGMT error code conversion table */
219 static const u8 mgmt_status_table[] = {
220 	MGMT_STATUS_SUCCESS,
221 	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
222 	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
223 	MGMT_STATUS_FAILED,		/* Hardware Failure */
224 	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
225 	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
226 	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
227 	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
228 	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
229 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
230 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
231 	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
232 	MGMT_STATUS_BUSY,		/* Command Disallowed */
233 	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
234 	MGMT_STATUS_REJECTED,		/* Rejected Security */
235 	MGMT_STATUS_REJECTED,		/* Rejected Personal */
236 	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
237 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
238 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
239 	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
240 	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
241 	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
242 	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
243 	MGMT_STATUS_BUSY,		/* Repeated Attempts */
244 	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
245 	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
246 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
247 	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
248 	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
249 	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
250 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
251 	MGMT_STATUS_FAILED,		/* Unspecified Error */
252 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
253 	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
254 	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
255 	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
256 	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
257 	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
258 	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
259 	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
260 	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
261 	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
262 	MGMT_STATUS_FAILED,		/* Transaction Collision */
263 	MGMT_STATUS_FAILED,		/* Reserved for future use */
264 	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
265 	MGMT_STATUS_REJECTED,		/* QoS Rejected */
266 	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
267 	MGMT_STATUS_REJECTED,		/* Insufficient Security */
268 	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
269 	MGMT_STATUS_FAILED,		/* Reserved for future use */
270 	MGMT_STATUS_BUSY,		/* Role Switch Pending */
271 	MGMT_STATUS_FAILED,		/* Reserved for future use */
272 	MGMT_STATUS_FAILED,		/* Slot Violation */
273 	MGMT_STATUS_FAILED,		/* Role Switch Failed */
274 	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
275 	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
276 	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
277 	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
278 	MGMT_STATUS_BUSY,		/* Controller Busy */
279 	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
280 	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
281 	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
282 	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
283 	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
284 };
285 
286 static u8 mgmt_errno_status(int err)
287 {
288 	switch (err) {
289 	case 0:
290 		return MGMT_STATUS_SUCCESS;
291 	case -EPERM:
292 		return MGMT_STATUS_REJECTED;
293 	case -EINVAL:
294 		return MGMT_STATUS_INVALID_PARAMS;
295 	case -EOPNOTSUPP:
296 		return MGMT_STATUS_NOT_SUPPORTED;
297 	case -EBUSY:
298 		return MGMT_STATUS_BUSY;
299 	case -ETIMEDOUT:
300 		return MGMT_STATUS_AUTH_FAILED;
301 	case -ENOMEM:
302 		return MGMT_STATUS_NO_RESOURCES;
303 	case -EISCONN:
304 		return MGMT_STATUS_ALREADY_CONNECTED;
305 	case -ENOTCONN:
306 		return MGMT_STATUS_DISCONNECTED;
307 	}
308 
309 	return MGMT_STATUS_FAILED;
310 }
311 
312 static u8 mgmt_status(int err)
313 {
314 	if (err < 0)
315 		return mgmt_errno_status(err);
316 
317 	if (err < ARRAY_SIZE(mgmt_status_table))
318 		return mgmt_status_table[err];
319 
320 	return MGMT_STATUS_FAILED;
321 }
322 
323 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
324 			    u16 len, int flag)
325 {
326 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
327 			       flag, NULL);
328 }
329 
330 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
331 			      u16 len, int flag, struct sock *skip_sk)
332 {
333 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
334 			       flag, skip_sk);
335 }
336 
337 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
338 		      struct sock *skip_sk)
339 {
340 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341 			       HCI_SOCK_TRUSTED, skip_sk);
342 }
343 
344 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
345 {
346 	return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
347 				   skip_sk);
348 }
349 
350 static u8 le_addr_type(u8 mgmt_addr_type)
351 {
352 	if (mgmt_addr_type == BDADDR_LE_PUBLIC)
353 		return ADDR_LE_DEV_PUBLIC;
354 	else
355 		return ADDR_LE_DEV_RANDOM;
356 }
357 
358 void mgmt_fill_version_info(void *ver)
359 {
360 	struct mgmt_rp_read_version *rp = ver;
361 
362 	rp->version = MGMT_VERSION;
363 	rp->revision = cpu_to_le16(MGMT_REVISION);
364 }
365 
366 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
367 			u16 data_len)
368 {
369 	struct mgmt_rp_read_version rp;
370 
371 	bt_dev_dbg(hdev, "sock %p", sk);
372 
373 	mgmt_fill_version_info(&rp);
374 
375 	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
376 				 &rp, sizeof(rp));
377 }
378 
379 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
380 			 u16 data_len)
381 {
382 	struct mgmt_rp_read_commands *rp;
383 	u16 num_commands, num_events;
384 	size_t rp_size;
385 	int i, err;
386 
387 	bt_dev_dbg(hdev, "sock %p", sk);
388 
389 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
390 		num_commands = ARRAY_SIZE(mgmt_commands);
391 		num_events = ARRAY_SIZE(mgmt_events);
392 	} else {
393 		num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
394 		num_events = ARRAY_SIZE(mgmt_untrusted_events);
395 	}
396 
397 	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
398 
399 	rp = kmalloc(rp_size, GFP_KERNEL);
400 	if (!rp)
401 		return -ENOMEM;
402 
403 	rp->num_commands = cpu_to_le16(num_commands);
404 	rp->num_events = cpu_to_le16(num_events);
405 
406 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
407 		__le16 *opcode = rp->opcodes;
408 
409 		for (i = 0; i < num_commands; i++, opcode++)
410 			put_unaligned_le16(mgmt_commands[i], opcode);
411 
412 		for (i = 0; i < num_events; i++, opcode++)
413 			put_unaligned_le16(mgmt_events[i], opcode);
414 	} else {
415 		__le16 *opcode = rp->opcodes;
416 
417 		for (i = 0; i < num_commands; i++, opcode++)
418 			put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
419 
420 		for (i = 0; i < num_events; i++, opcode++)
421 			put_unaligned_le16(mgmt_untrusted_events[i], opcode);
422 	}
423 
424 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
425 				rp, rp_size);
426 	kfree(rp);
427 
428 	return err;
429 }
430 
431 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
432 			   u16 data_len)
433 {
434 	struct mgmt_rp_read_index_list *rp;
435 	struct hci_dev *d;
436 	size_t rp_len;
437 	u16 count;
438 	int err;
439 
440 	bt_dev_dbg(hdev, "sock %p", sk);
441 
442 	read_lock(&hci_dev_list_lock);
443 
444 	count = 0;
445 	list_for_each_entry(d, &hci_dev_list, list) {
446 		if (d->dev_type == HCI_PRIMARY &&
447 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED))
448 			count++;
449 	}
450 
451 	rp_len = sizeof(*rp) + (2 * count);
452 	rp = kmalloc(rp_len, GFP_ATOMIC);
453 	if (!rp) {
454 		read_unlock(&hci_dev_list_lock);
455 		return -ENOMEM;
456 	}
457 
458 	count = 0;
459 	list_for_each_entry(d, &hci_dev_list, list) {
460 		if (hci_dev_test_flag(d, HCI_SETUP) ||
461 		    hci_dev_test_flag(d, HCI_CONFIG) ||
462 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
463 			continue;
464 
465 		/* Devices marked as raw-only are neither configured
466 		 * nor unconfigured controllers.
467 		 */
468 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
469 			continue;
470 
471 		if (d->dev_type == HCI_PRIMARY &&
472 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
473 			rp->index[count++] = cpu_to_le16(d->id);
474 			bt_dev_dbg(hdev, "Added hci%u", d->id);
475 		}
476 	}
477 
478 	rp->num_controllers = cpu_to_le16(count);
479 	rp_len = sizeof(*rp) + (2 * count);
480 
481 	read_unlock(&hci_dev_list_lock);
482 
483 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
484 				0, rp, rp_len);
485 
486 	kfree(rp);
487 
488 	return err;
489 }
490 
491 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
492 				  void *data, u16 data_len)
493 {
494 	struct mgmt_rp_read_unconf_index_list *rp;
495 	struct hci_dev *d;
496 	size_t rp_len;
497 	u16 count;
498 	int err;
499 
500 	bt_dev_dbg(hdev, "sock %p", sk);
501 
502 	read_lock(&hci_dev_list_lock);
503 
504 	count = 0;
505 	list_for_each_entry(d, &hci_dev_list, list) {
506 		if (d->dev_type == HCI_PRIMARY &&
507 		    hci_dev_test_flag(d, HCI_UNCONFIGURED))
508 			count++;
509 	}
510 
511 	rp_len = sizeof(*rp) + (2 * count);
512 	rp = kmalloc(rp_len, GFP_ATOMIC);
513 	if (!rp) {
514 		read_unlock(&hci_dev_list_lock);
515 		return -ENOMEM;
516 	}
517 
518 	count = 0;
519 	list_for_each_entry(d, &hci_dev_list, list) {
520 		if (hci_dev_test_flag(d, HCI_SETUP) ||
521 		    hci_dev_test_flag(d, HCI_CONFIG) ||
522 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
523 			continue;
524 
525 		/* Devices marked as raw-only are neither configured
526 		 * nor unconfigured controllers.
527 		 */
528 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
529 			continue;
530 
531 		if (d->dev_type == HCI_PRIMARY &&
532 		    hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
533 			rp->index[count++] = cpu_to_le16(d->id);
534 			bt_dev_dbg(hdev, "Added hci%u", d->id);
535 		}
536 	}
537 
538 	rp->num_controllers = cpu_to_le16(count);
539 	rp_len = sizeof(*rp) + (2 * count);
540 
541 	read_unlock(&hci_dev_list_lock);
542 
543 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
544 				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
545 
546 	kfree(rp);
547 
548 	return err;
549 }
550 
551 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
552 			       void *data, u16 data_len)
553 {
554 	struct mgmt_rp_read_ext_index_list *rp;
555 	struct hci_dev *d;
556 	u16 count;
557 	int err;
558 
559 	bt_dev_dbg(hdev, "sock %p", sk);
560 
561 	read_lock(&hci_dev_list_lock);
562 
563 	count = 0;
564 	list_for_each_entry(d, &hci_dev_list, list) {
565 		if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
566 			count++;
567 	}
568 
569 	rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
570 	if (!rp) {
571 		read_unlock(&hci_dev_list_lock);
572 		return -ENOMEM;
573 	}
574 
575 	count = 0;
576 	list_for_each_entry(d, &hci_dev_list, list) {
577 		if (hci_dev_test_flag(d, HCI_SETUP) ||
578 		    hci_dev_test_flag(d, HCI_CONFIG) ||
579 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
580 			continue;
581 
582 		/* Devices marked as raw-only are neither configured
583 		 * nor unconfigured controllers.
584 		 */
585 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
586 			continue;
587 
588 		if (d->dev_type == HCI_PRIMARY) {
589 			if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
590 				rp->entry[count].type = 0x01;
591 			else
592 				rp->entry[count].type = 0x00;
593 		} else if (d->dev_type == HCI_AMP) {
594 			rp->entry[count].type = 0x02;
595 		} else {
596 			continue;
597 		}
598 
599 		rp->entry[count].bus = d->bus;
600 		rp->entry[count++].index = cpu_to_le16(d->id);
601 		bt_dev_dbg(hdev, "Added hci%u", d->id);
602 	}
603 
604 	rp->num_controllers = cpu_to_le16(count);
605 
606 	read_unlock(&hci_dev_list_lock);
607 
608 	/* If this command is called at least once, then all the
609 	 * default index and unconfigured index events are disabled
610 	 * and from now on only extended index events are used.
611 	 */
612 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
613 	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
614 	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
615 
616 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
617 				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
618 				struct_size(rp, entry, count));
619 
620 	kfree(rp);
621 
622 	return err;
623 }
624 
625 static bool is_configured(struct hci_dev *hdev)
626 {
627 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
628 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
629 		return false;
630 
631 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
632 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
633 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
634 		return false;
635 
636 	return true;
637 }
638 
639 static __le32 get_missing_options(struct hci_dev *hdev)
640 {
641 	u32 options = 0;
642 
643 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
644 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
645 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
646 
647 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
648 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
649 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
650 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
651 
652 	return cpu_to_le32(options);
653 }
654 
655 static int new_options(struct hci_dev *hdev, struct sock *skip)
656 {
657 	__le32 options = get_missing_options(hdev);
658 
659 	return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
660 				  sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
661 }
662 
663 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
664 {
665 	__le32 options = get_missing_options(hdev);
666 
667 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
668 				 sizeof(options));
669 }
670 
671 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
672 			    void *data, u16 data_len)
673 {
674 	struct mgmt_rp_read_config_info rp;
675 	u32 options = 0;
676 
677 	bt_dev_dbg(hdev, "sock %p", sk);
678 
679 	hci_dev_lock(hdev);
680 
681 	memset(&rp, 0, sizeof(rp));
682 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
683 
684 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
685 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
686 
687 	if (hdev->set_bdaddr)
688 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
689 
690 	rp.supported_options = cpu_to_le32(options);
691 	rp.missing_options = get_missing_options(hdev);
692 
693 	hci_dev_unlock(hdev);
694 
695 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
696 				 &rp, sizeof(rp));
697 }
698 
699 static u32 get_supported_phys(struct hci_dev *hdev)
700 {
701 	u32 supported_phys = 0;
702 
703 	if (lmp_bredr_capable(hdev)) {
704 		supported_phys |= MGMT_PHY_BR_1M_1SLOT;
705 
706 		if (hdev->features[0][0] & LMP_3SLOT)
707 			supported_phys |= MGMT_PHY_BR_1M_3SLOT;
708 
709 		if (hdev->features[0][0] & LMP_5SLOT)
710 			supported_phys |= MGMT_PHY_BR_1M_5SLOT;
711 
712 		if (lmp_edr_2m_capable(hdev)) {
713 			supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
714 
715 			if (lmp_edr_3slot_capable(hdev))
716 				supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
717 
718 			if (lmp_edr_5slot_capable(hdev))
719 				supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
720 
721 			if (lmp_edr_3m_capable(hdev)) {
722 				supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
723 
724 				if (lmp_edr_3slot_capable(hdev))
725 					supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
726 
727 				if (lmp_edr_5slot_capable(hdev))
728 					supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
729 			}
730 		}
731 	}
732 
733 	if (lmp_le_capable(hdev)) {
734 		supported_phys |= MGMT_PHY_LE_1M_TX;
735 		supported_phys |= MGMT_PHY_LE_1M_RX;
736 
737 		if (hdev->le_features[1] & HCI_LE_PHY_2M) {
738 			supported_phys |= MGMT_PHY_LE_2M_TX;
739 			supported_phys |= MGMT_PHY_LE_2M_RX;
740 		}
741 
742 		if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
743 			supported_phys |= MGMT_PHY_LE_CODED_TX;
744 			supported_phys |= MGMT_PHY_LE_CODED_RX;
745 		}
746 	}
747 
748 	return supported_phys;
749 }
750 
751 static u32 get_selected_phys(struct hci_dev *hdev)
752 {
753 	u32 selected_phys = 0;
754 
755 	if (lmp_bredr_capable(hdev)) {
756 		selected_phys |= MGMT_PHY_BR_1M_1SLOT;
757 
758 		if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
759 			selected_phys |= MGMT_PHY_BR_1M_3SLOT;
760 
761 		if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
762 			selected_phys |= MGMT_PHY_BR_1M_5SLOT;
763 
764 		if (lmp_edr_2m_capable(hdev)) {
765 			if (!(hdev->pkt_type & HCI_2DH1))
766 				selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
767 
768 			if (lmp_edr_3slot_capable(hdev) &&
769 			    !(hdev->pkt_type & HCI_2DH3))
770 				selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
771 
772 			if (lmp_edr_5slot_capable(hdev) &&
773 			    !(hdev->pkt_type & HCI_2DH5))
774 				selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
775 
776 			if (lmp_edr_3m_capable(hdev)) {
777 				if (!(hdev->pkt_type & HCI_3DH1))
778 					selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
779 
780 				if (lmp_edr_3slot_capable(hdev) &&
781 				    !(hdev->pkt_type & HCI_3DH3))
782 					selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
783 
784 				if (lmp_edr_5slot_capable(hdev) &&
785 				    !(hdev->pkt_type & HCI_3DH5))
786 					selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
787 			}
788 		}
789 	}
790 
791 	if (lmp_le_capable(hdev)) {
792 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
793 			selected_phys |= MGMT_PHY_LE_1M_TX;
794 
795 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
796 			selected_phys |= MGMT_PHY_LE_1M_RX;
797 
798 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
799 			selected_phys |= MGMT_PHY_LE_2M_TX;
800 
801 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
802 			selected_phys |= MGMT_PHY_LE_2M_RX;
803 
804 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
805 			selected_phys |= MGMT_PHY_LE_CODED_TX;
806 
807 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
808 			selected_phys |= MGMT_PHY_LE_CODED_RX;
809 	}
810 
811 	return selected_phys;
812 }
813 
814 static u32 get_configurable_phys(struct hci_dev *hdev)
815 {
816 	return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
817 		~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
818 }
819 
820 static u32 get_supported_settings(struct hci_dev *hdev)
821 {
822 	u32 settings = 0;
823 
824 	settings |= MGMT_SETTING_POWERED;
825 	settings |= MGMT_SETTING_BONDABLE;
826 	settings |= MGMT_SETTING_DEBUG_KEYS;
827 	settings |= MGMT_SETTING_CONNECTABLE;
828 	settings |= MGMT_SETTING_DISCOVERABLE;
829 
830 	if (lmp_bredr_capable(hdev)) {
831 		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
832 			settings |= MGMT_SETTING_FAST_CONNECTABLE;
833 		settings |= MGMT_SETTING_BREDR;
834 		settings |= MGMT_SETTING_LINK_SECURITY;
835 
836 		if (lmp_ssp_capable(hdev)) {
837 			settings |= MGMT_SETTING_SSP;
838 			if (IS_ENABLED(CONFIG_BT_HS))
839 				settings |= MGMT_SETTING_HS;
840 		}
841 
842 		if (lmp_sc_capable(hdev))
843 			settings |= MGMT_SETTING_SECURE_CONN;
844 
845 		if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
846 			     &hdev->quirks))
847 			settings |= MGMT_SETTING_WIDEBAND_SPEECH;
848 	}
849 
850 	if (lmp_le_capable(hdev)) {
851 		settings |= MGMT_SETTING_LE;
852 		settings |= MGMT_SETTING_SECURE_CONN;
853 		settings |= MGMT_SETTING_PRIVACY;
854 		settings |= MGMT_SETTING_STATIC_ADDRESS;
855 		settings |= MGMT_SETTING_ADVERTISING;
856 	}
857 
858 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
859 	    hdev->set_bdaddr)
860 		settings |= MGMT_SETTING_CONFIGURATION;
861 
862 	if (cis_central_capable(hdev))
863 		settings |= MGMT_SETTING_CIS_CENTRAL;
864 
865 	if (cis_peripheral_capable(hdev))
866 		settings |= MGMT_SETTING_CIS_PERIPHERAL;
867 
868 	settings |= MGMT_SETTING_PHY_CONFIGURATION;
869 
870 	return settings;
871 }
872 
873 static u32 get_current_settings(struct hci_dev *hdev)
874 {
875 	u32 settings = 0;
876 
877 	if (hdev_is_powered(hdev))
878 		settings |= MGMT_SETTING_POWERED;
879 
880 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
881 		settings |= MGMT_SETTING_CONNECTABLE;
882 
883 	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
884 		settings |= MGMT_SETTING_FAST_CONNECTABLE;
885 
886 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
887 		settings |= MGMT_SETTING_DISCOVERABLE;
888 
889 	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
890 		settings |= MGMT_SETTING_BONDABLE;
891 
892 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
893 		settings |= MGMT_SETTING_BREDR;
894 
895 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
896 		settings |= MGMT_SETTING_LE;
897 
898 	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
899 		settings |= MGMT_SETTING_LINK_SECURITY;
900 
901 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
902 		settings |= MGMT_SETTING_SSP;
903 
904 	if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
905 		settings |= MGMT_SETTING_HS;
906 
907 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
908 		settings |= MGMT_SETTING_ADVERTISING;
909 
910 	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
911 		settings |= MGMT_SETTING_SECURE_CONN;
912 
913 	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
914 		settings |= MGMT_SETTING_DEBUG_KEYS;
915 
916 	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
917 		settings |= MGMT_SETTING_PRIVACY;
918 
919 	/* The current setting for static address has two purposes. The
920 	 * first is to indicate if the static address will be used and
921 	 * the second is to indicate if it is actually set.
922 	 *
923 	 * This means if the static address is not configured, this flag
924 	 * will never be set. If the address is configured, then if the
925 	 * address is actually used decides if the flag is set or not.
926 	 *
927 	 * For single mode LE only controllers and dual-mode controllers
928 	 * with BR/EDR disabled, the existence of the static address will
929 	 * be evaluated.
930 	 */
931 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
932 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
933 	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
934 		if (bacmp(&hdev->static_addr, BDADDR_ANY))
935 			settings |= MGMT_SETTING_STATIC_ADDRESS;
936 	}
937 
938 	if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
939 		settings |= MGMT_SETTING_WIDEBAND_SPEECH;
940 
941 	if (cis_central_capable(hdev))
942 		settings |= MGMT_SETTING_CIS_CENTRAL;
943 
944 	if (cis_peripheral_capable(hdev))
945 		settings |= MGMT_SETTING_CIS_PERIPHERAL;
946 
947 	return settings;
948 }
949 
950 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
951 {
952 	return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
953 }
954 
955 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
956 {
957 	struct mgmt_pending_cmd *cmd;
958 
959 	/* If there's a pending mgmt command the flags will not yet have
960 	 * their final values, so check for this first.
961 	 */
962 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
963 	if (cmd) {
964 		struct mgmt_mode *cp = cmd->param;
965 		if (cp->val == 0x01)
966 			return LE_AD_GENERAL;
967 		else if (cp->val == 0x02)
968 			return LE_AD_LIMITED;
969 	} else {
970 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
971 			return LE_AD_LIMITED;
972 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
973 			return LE_AD_GENERAL;
974 	}
975 
976 	return 0;
977 }
978 
979 bool mgmt_get_connectable(struct hci_dev *hdev)
980 {
981 	struct mgmt_pending_cmd *cmd;
982 
983 	/* If there's a pending mgmt command the flag will not yet have
984 	 * it's final value, so check for this first.
985 	 */
986 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
987 	if (cmd) {
988 		struct mgmt_mode *cp = cmd->param;
989 
990 		return cp->val;
991 	}
992 
993 	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
994 }
995 
996 static int service_cache_sync(struct hci_dev *hdev, void *data)
997 {
998 	hci_update_eir_sync(hdev);
999 	hci_update_class_sync(hdev);
1000 
1001 	return 0;
1002 }
1003 
1004 static void service_cache_off(struct work_struct *work)
1005 {
1006 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1007 					    service_cache.work);
1008 
1009 	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1010 		return;
1011 
1012 	hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1013 }
1014 
1015 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1016 {
1017 	/* The generation of a new RPA and programming it into the
1018 	 * controller happens in the hci_req_enable_advertising()
1019 	 * function.
1020 	 */
1021 	if (ext_adv_capable(hdev))
1022 		return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1023 	else
1024 		return hci_enable_advertising_sync(hdev);
1025 }
1026 
1027 static void rpa_expired(struct work_struct *work)
1028 {
1029 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1030 					    rpa_expired.work);
1031 
1032 	bt_dev_dbg(hdev, "");
1033 
1034 	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1035 
1036 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1037 		return;
1038 
1039 	hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1040 }
1041 
1042 static void discov_off(struct work_struct *work)
1043 {
1044 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1045 					    discov_off.work);
1046 
1047 	bt_dev_dbg(hdev, "");
1048 
1049 	hci_dev_lock(hdev);
1050 
1051 	/* When discoverable timeout triggers, then just make sure
1052 	 * the limited discoverable flag is cleared. Even in the case
1053 	 * of a timeout triggered from general discoverable, it is
1054 	 * safe to unconditionally clear the flag.
1055 	 */
1056 	hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1057 	hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1058 	hdev->discov_timeout = 0;
1059 
1060 	hci_update_discoverable(hdev);
1061 
1062 	mgmt_new_settings(hdev);
1063 
1064 	hci_dev_unlock(hdev);
1065 }
1066 
1067 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1068 
1069 static void mesh_send_complete(struct hci_dev *hdev,
1070 			       struct mgmt_mesh_tx *mesh_tx, bool silent)
1071 {
1072 	u8 handle = mesh_tx->handle;
1073 
1074 	if (!silent)
1075 		mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1076 			   sizeof(handle), NULL);
1077 
1078 	mgmt_mesh_remove(mesh_tx);
1079 }
1080 
1081 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1082 {
1083 	struct mgmt_mesh_tx *mesh_tx;
1084 
1085 	hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1086 	hci_disable_advertising_sync(hdev);
1087 	mesh_tx = mgmt_mesh_next(hdev, NULL);
1088 
1089 	if (mesh_tx)
1090 		mesh_send_complete(hdev, mesh_tx, false);
1091 
1092 	return 0;
1093 }
1094 
1095 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1096 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
1097 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1098 {
1099 	struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1100 
1101 	if (!mesh_tx)
1102 		return;
1103 
1104 	err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1105 				 mesh_send_start_complete);
1106 
1107 	if (err < 0)
1108 		mesh_send_complete(hdev, mesh_tx, false);
1109 	else
1110 		hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1111 }
1112 
1113 static void mesh_send_done(struct work_struct *work)
1114 {
1115 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1116 					    mesh_send_done.work);
1117 
1118 	if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1119 		return;
1120 
1121 	hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1122 }
1123 
1124 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1125 {
1126 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1127 		return;
1128 
1129 	BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1130 
1131 	INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1132 	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1133 	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1134 	INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1135 
1136 	/* Non-mgmt controlled devices get this bit set
1137 	 * implicitly so that pairing works for them, however
1138 	 * for mgmt we require user-space to explicitly enable
1139 	 * it
1140 	 */
1141 	hci_dev_clear_flag(hdev, HCI_BONDABLE);
1142 
1143 	hci_dev_set_flag(hdev, HCI_MGMT);
1144 }
1145 
1146 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1147 				void *data, u16 data_len)
1148 {
1149 	struct mgmt_rp_read_info rp;
1150 
1151 	bt_dev_dbg(hdev, "sock %p", sk);
1152 
1153 	hci_dev_lock(hdev);
1154 
1155 	memset(&rp, 0, sizeof(rp));
1156 
1157 	bacpy(&rp.bdaddr, &hdev->bdaddr);
1158 
1159 	rp.version = hdev->hci_ver;
1160 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1161 
1162 	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1163 	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1164 
1165 	memcpy(rp.dev_class, hdev->dev_class, 3);
1166 
1167 	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1168 	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1169 
1170 	hci_dev_unlock(hdev);
1171 
1172 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1173 				 sizeof(rp));
1174 }
1175 
1176 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1177 {
1178 	u16 eir_len = 0;
1179 	size_t name_len;
1180 
1181 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1182 		eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1183 					  hdev->dev_class, 3);
1184 
1185 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1186 		eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1187 					  hdev->appearance);
1188 
1189 	name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1190 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1191 				  hdev->dev_name, name_len);
1192 
1193 	name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1194 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1195 				  hdev->short_name, name_len);
1196 
1197 	return eir_len;
1198 }
1199 
1200 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1201 				    void *data, u16 data_len)
1202 {
1203 	char buf[512];
1204 	struct mgmt_rp_read_ext_info *rp = (void *)buf;
1205 	u16 eir_len;
1206 
1207 	bt_dev_dbg(hdev, "sock %p", sk);
1208 
1209 	memset(&buf, 0, sizeof(buf));
1210 
1211 	hci_dev_lock(hdev);
1212 
1213 	bacpy(&rp->bdaddr, &hdev->bdaddr);
1214 
1215 	rp->version = hdev->hci_ver;
1216 	rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1217 
1218 	rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1219 	rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1220 
1221 
1222 	eir_len = append_eir_data_to_buf(hdev, rp->eir);
1223 	rp->eir_len = cpu_to_le16(eir_len);
1224 
1225 	hci_dev_unlock(hdev);
1226 
1227 	/* If this command is called at least once, then the events
1228 	 * for class of device and local name changes are disabled
1229 	 * and only the new extended controller information event
1230 	 * is used.
1231 	 */
1232 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1233 	hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1234 	hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1235 
1236 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1237 				 sizeof(*rp) + eir_len);
1238 }
1239 
1240 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1241 {
1242 	char buf[512];
1243 	struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1244 	u16 eir_len;
1245 
1246 	memset(buf, 0, sizeof(buf));
1247 
1248 	eir_len = append_eir_data_to_buf(hdev, ev->eir);
1249 	ev->eir_len = cpu_to_le16(eir_len);
1250 
1251 	return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1252 				  sizeof(*ev) + eir_len,
1253 				  HCI_MGMT_EXT_INFO_EVENTS, skip);
1254 }
1255 
1256 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1257 {
1258 	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1259 
1260 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1261 				 sizeof(settings));
1262 }
1263 
1264 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1265 {
1266 	struct mgmt_ev_advertising_added ev;
1267 
1268 	ev.instance = instance;
1269 
1270 	mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1271 }
1272 
1273 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1274 			      u8 instance)
1275 {
1276 	struct mgmt_ev_advertising_removed ev;
1277 
1278 	ev.instance = instance;
1279 
1280 	mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1281 }
1282 
1283 static void cancel_adv_timeout(struct hci_dev *hdev)
1284 {
1285 	if (hdev->adv_instance_timeout) {
1286 		hdev->adv_instance_timeout = 0;
1287 		cancel_delayed_work(&hdev->adv_instance_expire);
1288 	}
1289 }
1290 
1291 /* This function requires the caller holds hdev->lock */
1292 static void restart_le_actions(struct hci_dev *hdev)
1293 {
1294 	struct hci_conn_params *p;
1295 
1296 	list_for_each_entry(p, &hdev->le_conn_params, list) {
1297 		/* Needed for AUTO_OFF case where might not "really"
1298 		 * have been powered off.
1299 		 */
1300 		list_del_init(&p->action);
1301 
1302 		switch (p->auto_connect) {
1303 		case HCI_AUTO_CONN_DIRECT:
1304 		case HCI_AUTO_CONN_ALWAYS:
1305 			list_add(&p->action, &hdev->pend_le_conns);
1306 			break;
1307 		case HCI_AUTO_CONN_REPORT:
1308 			list_add(&p->action, &hdev->pend_le_reports);
1309 			break;
1310 		default:
1311 			break;
1312 		}
1313 	}
1314 }
1315 
1316 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1317 {
1318 	__le32 ev = cpu_to_le32(get_current_settings(hdev));
1319 
1320 	return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1321 				  sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1322 }
1323 
1324 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1325 {
1326 	struct mgmt_pending_cmd *cmd = data;
1327 	struct mgmt_mode *cp;
1328 
1329 	/* Make sure cmd still outstanding. */
1330 	if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1331 		return;
1332 
1333 	cp = cmd->param;
1334 
1335 	bt_dev_dbg(hdev, "err %d", err);
1336 
1337 	if (!err) {
1338 		if (cp->val) {
1339 			hci_dev_lock(hdev);
1340 			restart_le_actions(hdev);
1341 			hci_update_passive_scan(hdev);
1342 			hci_dev_unlock(hdev);
1343 		}
1344 
1345 		send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1346 
1347 		/* Only call new_setting for power on as power off is deferred
1348 		 * to hdev->power_off work which does call hci_dev_do_close.
1349 		 */
1350 		if (cp->val)
1351 			new_settings(hdev, cmd->sk);
1352 	} else {
1353 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1354 				mgmt_status(err));
1355 	}
1356 
1357 	mgmt_pending_remove(cmd);
1358 }
1359 
1360 static int set_powered_sync(struct hci_dev *hdev, void *data)
1361 {
1362 	struct mgmt_pending_cmd *cmd = data;
1363 	struct mgmt_mode *cp = cmd->param;
1364 
1365 	BT_DBG("%s", hdev->name);
1366 
1367 	return hci_set_powered_sync(hdev, cp->val);
1368 }
1369 
1370 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1371 		       u16 len)
1372 {
1373 	struct mgmt_mode *cp = data;
1374 	struct mgmt_pending_cmd *cmd;
1375 	int err;
1376 
1377 	bt_dev_dbg(hdev, "sock %p", sk);
1378 
1379 	if (cp->val != 0x00 && cp->val != 0x01)
1380 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1381 				       MGMT_STATUS_INVALID_PARAMS);
1382 
1383 	hci_dev_lock(hdev);
1384 
1385 	if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1386 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1387 				      MGMT_STATUS_BUSY);
1388 		goto failed;
1389 	}
1390 
1391 	if (!!cp->val == hdev_is_powered(hdev)) {
1392 		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1393 		goto failed;
1394 	}
1395 
1396 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1397 	if (!cmd) {
1398 		err = -ENOMEM;
1399 		goto failed;
1400 	}
1401 
1402 	err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1403 				 mgmt_set_powered_complete);
1404 
1405 	if (err < 0)
1406 		mgmt_pending_remove(cmd);
1407 
1408 failed:
1409 	hci_dev_unlock(hdev);
1410 	return err;
1411 }
1412 
1413 int mgmt_new_settings(struct hci_dev *hdev)
1414 {
1415 	return new_settings(hdev, NULL);
1416 }
1417 
1418 struct cmd_lookup {
1419 	struct sock *sk;
1420 	struct hci_dev *hdev;
1421 	u8 mgmt_status;
1422 };
1423 
1424 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1425 {
1426 	struct cmd_lookup *match = data;
1427 
1428 	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1429 
1430 	list_del(&cmd->list);
1431 
1432 	if (match->sk == NULL) {
1433 		match->sk = cmd->sk;
1434 		sock_hold(match->sk);
1435 	}
1436 
1437 	mgmt_pending_free(cmd);
1438 }
1439 
1440 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1441 {
1442 	u8 *status = data;
1443 
1444 	mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1445 	mgmt_pending_remove(cmd);
1446 }
1447 
1448 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1449 {
1450 	if (cmd->cmd_complete) {
1451 		u8 *status = data;
1452 
1453 		cmd->cmd_complete(cmd, *status);
1454 		mgmt_pending_remove(cmd);
1455 
1456 		return;
1457 	}
1458 
1459 	cmd_status_rsp(cmd, data);
1460 }
1461 
1462 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1463 {
1464 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1465 				 cmd->param, cmd->param_len);
1466 }
1467 
1468 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1469 {
1470 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1471 				 cmd->param, sizeof(struct mgmt_addr_info));
1472 }
1473 
1474 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1475 {
1476 	if (!lmp_bredr_capable(hdev))
1477 		return MGMT_STATUS_NOT_SUPPORTED;
1478 	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1479 		return MGMT_STATUS_REJECTED;
1480 	else
1481 		return MGMT_STATUS_SUCCESS;
1482 }
1483 
1484 static u8 mgmt_le_support(struct hci_dev *hdev)
1485 {
1486 	if (!lmp_le_capable(hdev))
1487 		return MGMT_STATUS_NOT_SUPPORTED;
1488 	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1489 		return MGMT_STATUS_REJECTED;
1490 	else
1491 		return MGMT_STATUS_SUCCESS;
1492 }
1493 
1494 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1495 					   int err)
1496 {
1497 	struct mgmt_pending_cmd *cmd = data;
1498 
1499 	bt_dev_dbg(hdev, "err %d", err);
1500 
1501 	/* Make sure cmd still outstanding. */
1502 	if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1503 		return;
1504 
1505 	hci_dev_lock(hdev);
1506 
1507 	if (err) {
1508 		u8 mgmt_err = mgmt_status(err);
1509 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1510 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1511 		goto done;
1512 	}
1513 
1514 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1515 	    hdev->discov_timeout > 0) {
1516 		int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1517 		queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1518 	}
1519 
1520 	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1521 	new_settings(hdev, cmd->sk);
1522 
1523 done:
1524 	mgmt_pending_remove(cmd);
1525 	hci_dev_unlock(hdev);
1526 }
1527 
1528 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1529 {
1530 	BT_DBG("%s", hdev->name);
1531 
1532 	return hci_update_discoverable_sync(hdev);
1533 }
1534 
1535 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1536 			    u16 len)
1537 {
1538 	struct mgmt_cp_set_discoverable *cp = data;
1539 	struct mgmt_pending_cmd *cmd;
1540 	u16 timeout;
1541 	int err;
1542 
1543 	bt_dev_dbg(hdev, "sock %p", sk);
1544 
1545 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1546 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1547 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1548 				       MGMT_STATUS_REJECTED);
1549 
1550 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1551 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1552 				       MGMT_STATUS_INVALID_PARAMS);
1553 
1554 	timeout = __le16_to_cpu(cp->timeout);
1555 
1556 	/* Disabling discoverable requires that no timeout is set,
1557 	 * and enabling limited discoverable requires a timeout.
1558 	 */
1559 	if ((cp->val == 0x00 && timeout > 0) ||
1560 	    (cp->val == 0x02 && timeout == 0))
1561 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1562 				       MGMT_STATUS_INVALID_PARAMS);
1563 
1564 	hci_dev_lock(hdev);
1565 
1566 	if (!hdev_is_powered(hdev) && timeout > 0) {
1567 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1568 				      MGMT_STATUS_NOT_POWERED);
1569 		goto failed;
1570 	}
1571 
1572 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1573 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1574 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1575 				      MGMT_STATUS_BUSY);
1576 		goto failed;
1577 	}
1578 
1579 	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1580 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1581 				      MGMT_STATUS_REJECTED);
1582 		goto failed;
1583 	}
1584 
1585 	if (hdev->advertising_paused) {
1586 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1587 				      MGMT_STATUS_BUSY);
1588 		goto failed;
1589 	}
1590 
1591 	if (!hdev_is_powered(hdev)) {
1592 		bool changed = false;
1593 
1594 		/* Setting limited discoverable when powered off is
1595 		 * not a valid operation since it requires a timeout
1596 		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1597 		 */
1598 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1599 			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1600 			changed = true;
1601 		}
1602 
1603 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1604 		if (err < 0)
1605 			goto failed;
1606 
1607 		if (changed)
1608 			err = new_settings(hdev, sk);
1609 
1610 		goto failed;
1611 	}
1612 
1613 	/* If the current mode is the same, then just update the timeout
1614 	 * value with the new value. And if only the timeout gets updated,
1615 	 * then no need for any HCI transactions.
1616 	 */
1617 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1618 	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
1619 						   HCI_LIMITED_DISCOVERABLE)) {
1620 		cancel_delayed_work(&hdev->discov_off);
1621 		hdev->discov_timeout = timeout;
1622 
1623 		if (cp->val && hdev->discov_timeout > 0) {
1624 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1625 			queue_delayed_work(hdev->req_workqueue,
1626 					   &hdev->discov_off, to);
1627 		}
1628 
1629 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1630 		goto failed;
1631 	}
1632 
1633 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1634 	if (!cmd) {
1635 		err = -ENOMEM;
1636 		goto failed;
1637 	}
1638 
1639 	/* Cancel any potential discoverable timeout that might be
1640 	 * still active and store new timeout value. The arming of
1641 	 * the timeout happens in the complete handler.
1642 	 */
1643 	cancel_delayed_work(&hdev->discov_off);
1644 	hdev->discov_timeout = timeout;
1645 
1646 	if (cp->val)
1647 		hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1648 	else
1649 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1650 
1651 	/* Limited discoverable mode */
1652 	if (cp->val == 0x02)
1653 		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1654 	else
1655 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1656 
1657 	err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1658 				 mgmt_set_discoverable_complete);
1659 
1660 	if (err < 0)
1661 		mgmt_pending_remove(cmd);
1662 
1663 failed:
1664 	hci_dev_unlock(hdev);
1665 	return err;
1666 }
1667 
1668 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1669 					  int err)
1670 {
1671 	struct mgmt_pending_cmd *cmd = data;
1672 
1673 	bt_dev_dbg(hdev, "err %d", err);
1674 
1675 	/* Make sure cmd still outstanding. */
1676 	if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1677 		return;
1678 
1679 	hci_dev_lock(hdev);
1680 
1681 	if (err) {
1682 		u8 mgmt_err = mgmt_status(err);
1683 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1684 		goto done;
1685 	}
1686 
1687 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1688 	new_settings(hdev, cmd->sk);
1689 
1690 done:
1691 	if (cmd)
1692 		mgmt_pending_remove(cmd);
1693 
1694 	hci_dev_unlock(hdev);
1695 }
1696 
1697 static int set_connectable_update_settings(struct hci_dev *hdev,
1698 					   struct sock *sk, u8 val)
1699 {
1700 	bool changed = false;
1701 	int err;
1702 
1703 	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1704 		changed = true;
1705 
1706 	if (val) {
1707 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1708 	} else {
1709 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1710 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1711 	}
1712 
1713 	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1714 	if (err < 0)
1715 		return err;
1716 
1717 	if (changed) {
1718 		hci_update_scan(hdev);
1719 		hci_update_passive_scan(hdev);
1720 		return new_settings(hdev, sk);
1721 	}
1722 
1723 	return 0;
1724 }
1725 
1726 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1727 {
1728 	BT_DBG("%s", hdev->name);
1729 
1730 	return hci_update_connectable_sync(hdev);
1731 }
1732 
1733 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1734 			   u16 len)
1735 {
1736 	struct mgmt_mode *cp = data;
1737 	struct mgmt_pending_cmd *cmd;
1738 	int err;
1739 
1740 	bt_dev_dbg(hdev, "sock %p", sk);
1741 
1742 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1743 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1744 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1745 				       MGMT_STATUS_REJECTED);
1746 
1747 	if (cp->val != 0x00 && cp->val != 0x01)
1748 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1749 				       MGMT_STATUS_INVALID_PARAMS);
1750 
1751 	hci_dev_lock(hdev);
1752 
1753 	if (!hdev_is_powered(hdev)) {
1754 		err = set_connectable_update_settings(hdev, sk, cp->val);
1755 		goto failed;
1756 	}
1757 
1758 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1759 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1760 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1761 				      MGMT_STATUS_BUSY);
1762 		goto failed;
1763 	}
1764 
1765 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1766 	if (!cmd) {
1767 		err = -ENOMEM;
1768 		goto failed;
1769 	}
1770 
1771 	if (cp->val) {
1772 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1773 	} else {
1774 		if (hdev->discov_timeout > 0)
1775 			cancel_delayed_work(&hdev->discov_off);
1776 
1777 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1778 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1779 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1780 	}
1781 
1782 	err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1783 				 mgmt_set_connectable_complete);
1784 
1785 	if (err < 0)
1786 		mgmt_pending_remove(cmd);
1787 
1788 failed:
1789 	hci_dev_unlock(hdev);
1790 	return err;
1791 }
1792 
1793 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1794 			u16 len)
1795 {
1796 	struct mgmt_mode *cp = data;
1797 	bool changed;
1798 	int err;
1799 
1800 	bt_dev_dbg(hdev, "sock %p", sk);
1801 
1802 	if (cp->val != 0x00 && cp->val != 0x01)
1803 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1804 				       MGMT_STATUS_INVALID_PARAMS);
1805 
1806 	hci_dev_lock(hdev);
1807 
1808 	if (cp->val)
1809 		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1810 	else
1811 		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1812 
1813 	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1814 	if (err < 0)
1815 		goto unlock;
1816 
1817 	if (changed) {
1818 		/* In limited privacy mode the change of bondable mode
1819 		 * may affect the local advertising address.
1820 		 */
1821 		hci_update_discoverable(hdev);
1822 
1823 		err = new_settings(hdev, sk);
1824 	}
1825 
1826 unlock:
1827 	hci_dev_unlock(hdev);
1828 	return err;
1829 }
1830 
1831 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1832 			     u16 len)
1833 {
1834 	struct mgmt_mode *cp = data;
1835 	struct mgmt_pending_cmd *cmd;
1836 	u8 val, status;
1837 	int err;
1838 
1839 	bt_dev_dbg(hdev, "sock %p", sk);
1840 
1841 	status = mgmt_bredr_support(hdev);
1842 	if (status)
1843 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1844 				       status);
1845 
1846 	if (cp->val != 0x00 && cp->val != 0x01)
1847 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1848 				       MGMT_STATUS_INVALID_PARAMS);
1849 
1850 	hci_dev_lock(hdev);
1851 
1852 	if (!hdev_is_powered(hdev)) {
1853 		bool changed = false;
1854 
1855 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1856 			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1857 			changed = true;
1858 		}
1859 
1860 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1861 		if (err < 0)
1862 			goto failed;
1863 
1864 		if (changed)
1865 			err = new_settings(hdev, sk);
1866 
1867 		goto failed;
1868 	}
1869 
1870 	if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1871 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1872 				      MGMT_STATUS_BUSY);
1873 		goto failed;
1874 	}
1875 
1876 	val = !!cp->val;
1877 
1878 	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1879 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1880 		goto failed;
1881 	}
1882 
1883 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1884 	if (!cmd) {
1885 		err = -ENOMEM;
1886 		goto failed;
1887 	}
1888 
1889 	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1890 	if (err < 0) {
1891 		mgmt_pending_remove(cmd);
1892 		goto failed;
1893 	}
1894 
1895 failed:
1896 	hci_dev_unlock(hdev);
1897 	return err;
1898 }
1899 
1900 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1901 {
1902 	struct cmd_lookup match = { NULL, hdev };
1903 	struct mgmt_pending_cmd *cmd = data;
1904 	struct mgmt_mode *cp = cmd->param;
1905 	u8 enable = cp->val;
1906 	bool changed;
1907 
1908 	/* Make sure cmd still outstanding. */
1909 	if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1910 		return;
1911 
1912 	if (err) {
1913 		u8 mgmt_err = mgmt_status(err);
1914 
1915 		if (enable && hci_dev_test_and_clear_flag(hdev,
1916 							  HCI_SSP_ENABLED)) {
1917 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1918 			new_settings(hdev, NULL);
1919 		}
1920 
1921 		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1922 				     &mgmt_err);
1923 		return;
1924 	}
1925 
1926 	if (enable) {
1927 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1928 	} else {
1929 		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1930 
1931 		if (!changed)
1932 			changed = hci_dev_test_and_clear_flag(hdev,
1933 							      HCI_HS_ENABLED);
1934 		else
1935 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1936 	}
1937 
1938 	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1939 
1940 	if (changed)
1941 		new_settings(hdev, match.sk);
1942 
1943 	if (match.sk)
1944 		sock_put(match.sk);
1945 
1946 	hci_update_eir_sync(hdev);
1947 }
1948 
1949 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1950 {
1951 	struct mgmt_pending_cmd *cmd = data;
1952 	struct mgmt_mode *cp = cmd->param;
1953 	bool changed = false;
1954 	int err;
1955 
1956 	if (cp->val)
1957 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1958 
1959 	err = hci_write_ssp_mode_sync(hdev, cp->val);
1960 
1961 	if (!err && changed)
1962 		hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1963 
1964 	return err;
1965 }
1966 
1967 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1968 {
1969 	struct mgmt_mode *cp = data;
1970 	struct mgmt_pending_cmd *cmd;
1971 	u8 status;
1972 	int err;
1973 
1974 	bt_dev_dbg(hdev, "sock %p", sk);
1975 
1976 	status = mgmt_bredr_support(hdev);
1977 	if (status)
1978 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1979 
1980 	if (!lmp_ssp_capable(hdev))
1981 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1982 				       MGMT_STATUS_NOT_SUPPORTED);
1983 
1984 	if (cp->val != 0x00 && cp->val != 0x01)
1985 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1986 				       MGMT_STATUS_INVALID_PARAMS);
1987 
1988 	hci_dev_lock(hdev);
1989 
1990 	if (!hdev_is_powered(hdev)) {
1991 		bool changed;
1992 
1993 		if (cp->val) {
1994 			changed = !hci_dev_test_and_set_flag(hdev,
1995 							     HCI_SSP_ENABLED);
1996 		} else {
1997 			changed = hci_dev_test_and_clear_flag(hdev,
1998 							      HCI_SSP_ENABLED);
1999 			if (!changed)
2000 				changed = hci_dev_test_and_clear_flag(hdev,
2001 								      HCI_HS_ENABLED);
2002 			else
2003 				hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
2004 		}
2005 
2006 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2007 		if (err < 0)
2008 			goto failed;
2009 
2010 		if (changed)
2011 			err = new_settings(hdev, sk);
2012 
2013 		goto failed;
2014 	}
2015 
2016 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2017 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2018 				      MGMT_STATUS_BUSY);
2019 		goto failed;
2020 	}
2021 
2022 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2023 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2024 		goto failed;
2025 	}
2026 
2027 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2028 	if (!cmd)
2029 		err = -ENOMEM;
2030 	else
2031 		err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2032 					 set_ssp_complete);
2033 
2034 	if (err < 0) {
2035 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2036 				      MGMT_STATUS_FAILED);
2037 
2038 		if (cmd)
2039 			mgmt_pending_remove(cmd);
2040 	}
2041 
2042 failed:
2043 	hci_dev_unlock(hdev);
2044 	return err;
2045 }
2046 
2047 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2048 {
2049 	struct mgmt_mode *cp = data;
2050 	bool changed;
2051 	u8 status;
2052 	int err;
2053 
2054 	bt_dev_dbg(hdev, "sock %p", sk);
2055 
2056 	if (!IS_ENABLED(CONFIG_BT_HS))
2057 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2058 				       MGMT_STATUS_NOT_SUPPORTED);
2059 
2060 	status = mgmt_bredr_support(hdev);
2061 	if (status)
2062 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2063 
2064 	if (!lmp_ssp_capable(hdev))
2065 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2066 				       MGMT_STATUS_NOT_SUPPORTED);
2067 
2068 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
2069 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2070 				       MGMT_STATUS_REJECTED);
2071 
2072 	if (cp->val != 0x00 && cp->val != 0x01)
2073 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2074 				       MGMT_STATUS_INVALID_PARAMS);
2075 
2076 	hci_dev_lock(hdev);
2077 
2078 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2079 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2080 				      MGMT_STATUS_BUSY);
2081 		goto unlock;
2082 	}
2083 
2084 	if (cp->val) {
2085 		changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
2086 	} else {
2087 		if (hdev_is_powered(hdev)) {
2088 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2089 					      MGMT_STATUS_REJECTED);
2090 			goto unlock;
2091 		}
2092 
2093 		changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
2094 	}
2095 
2096 	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2097 	if (err < 0)
2098 		goto unlock;
2099 
2100 	if (changed)
2101 		err = new_settings(hdev, sk);
2102 
2103 unlock:
2104 	hci_dev_unlock(hdev);
2105 	return err;
2106 }
2107 
2108 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2109 {
2110 	struct cmd_lookup match = { NULL, hdev };
2111 	u8 status = mgmt_status(err);
2112 
2113 	bt_dev_dbg(hdev, "err %d", err);
2114 
2115 	if (status) {
2116 		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2117 							&status);
2118 		return;
2119 	}
2120 
2121 	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2122 
2123 	new_settings(hdev, match.sk);
2124 
2125 	if (match.sk)
2126 		sock_put(match.sk);
2127 }
2128 
2129 static int set_le_sync(struct hci_dev *hdev, void *data)
2130 {
2131 	struct mgmt_pending_cmd *cmd = data;
2132 	struct mgmt_mode *cp = cmd->param;
2133 	u8 val = !!cp->val;
2134 	int err;
2135 
2136 	if (!val) {
2137 		hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2138 
2139 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2140 			hci_disable_advertising_sync(hdev);
2141 
2142 		if (ext_adv_capable(hdev))
2143 			hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2144 	} else {
2145 		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2146 	}
2147 
2148 	err = hci_write_le_host_supported_sync(hdev, val, 0);
2149 
2150 	/* Make sure the controller has a good default for
2151 	 * advertising data. Restrict the update to when LE
2152 	 * has actually been enabled. During power on, the
2153 	 * update in powered_update_hci will take care of it.
2154 	 */
2155 	if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2156 		if (ext_adv_capable(hdev)) {
2157 			int status;
2158 
2159 			status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2160 			if (!status)
2161 				hci_update_scan_rsp_data_sync(hdev, 0x00);
2162 		} else {
2163 			hci_update_adv_data_sync(hdev, 0x00);
2164 			hci_update_scan_rsp_data_sync(hdev, 0x00);
2165 		}
2166 
2167 		hci_update_passive_scan(hdev);
2168 	}
2169 
2170 	return err;
2171 }
2172 
2173 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2174 {
2175 	struct mgmt_pending_cmd *cmd = data;
2176 	u8 status = mgmt_status(err);
2177 	struct sock *sk = cmd->sk;
2178 
2179 	if (status) {
2180 		mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
2181 				     cmd_status_rsp, &status);
2182 		return;
2183 	}
2184 
2185 	mgmt_pending_remove(cmd);
2186 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2187 }
2188 
2189 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2190 {
2191 	struct mgmt_pending_cmd *cmd = data;
2192 	struct mgmt_cp_set_mesh *cp = cmd->param;
2193 	size_t len = cmd->param_len;
2194 
2195 	memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2196 
2197 	if (cp->enable)
2198 		hci_dev_set_flag(hdev, HCI_MESH);
2199 	else
2200 		hci_dev_clear_flag(hdev, HCI_MESH);
2201 
2202 	len -= sizeof(*cp);
2203 
2204 	/* If filters don't fit, forward all adv pkts */
2205 	if (len <= sizeof(hdev->mesh_ad_types))
2206 		memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2207 
2208 	hci_update_passive_scan_sync(hdev);
2209 	return 0;
2210 }
2211 
2212 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2213 {
2214 	struct mgmt_cp_set_mesh *cp = data;
2215 	struct mgmt_pending_cmd *cmd;
2216 	int err = 0;
2217 
2218 	bt_dev_dbg(hdev, "sock %p", sk);
2219 
2220 	if (!lmp_le_capable(hdev) ||
2221 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2222 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2223 				       MGMT_STATUS_NOT_SUPPORTED);
2224 
2225 	if (cp->enable != 0x00 && cp->enable != 0x01)
2226 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2227 				       MGMT_STATUS_INVALID_PARAMS);
2228 
2229 	hci_dev_lock(hdev);
2230 
2231 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2232 	if (!cmd)
2233 		err = -ENOMEM;
2234 	else
2235 		err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2236 					 set_mesh_complete);
2237 
2238 	if (err < 0) {
2239 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2240 				      MGMT_STATUS_FAILED);
2241 
2242 		if (cmd)
2243 			mgmt_pending_remove(cmd);
2244 	}
2245 
2246 	hci_dev_unlock(hdev);
2247 	return err;
2248 }
2249 
2250 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2251 {
2252 	struct mgmt_mesh_tx *mesh_tx = data;
2253 	struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2254 	unsigned long mesh_send_interval;
2255 	u8 mgmt_err = mgmt_status(err);
2256 
2257 	/* Report any errors here, but don't report completion */
2258 
2259 	if (mgmt_err) {
2260 		hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2261 		/* Send Complete Error Code for handle */
2262 		mesh_send_complete(hdev, mesh_tx, false);
2263 		return;
2264 	}
2265 
2266 	mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2267 	queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2268 			   mesh_send_interval);
2269 }
2270 
2271 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2272 {
2273 	struct mgmt_mesh_tx *mesh_tx = data;
2274 	struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2275 	struct adv_info *adv, *next_instance;
2276 	u8 instance = hdev->le_num_of_adv_sets + 1;
2277 	u16 timeout, duration;
2278 	int err = 0;
2279 
2280 	if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2281 		return MGMT_STATUS_BUSY;
2282 
2283 	timeout = 1000;
2284 	duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2285 	adv = hci_add_adv_instance(hdev, instance, 0,
2286 				   send->adv_data_len, send->adv_data,
2287 				   0, NULL,
2288 				   timeout, duration,
2289 				   HCI_ADV_TX_POWER_NO_PREFERENCE,
2290 				   hdev->le_adv_min_interval,
2291 				   hdev->le_adv_max_interval,
2292 				   mesh_tx->handle);
2293 
2294 	if (!IS_ERR(adv))
2295 		mesh_tx->instance = instance;
2296 	else
2297 		err = PTR_ERR(adv);
2298 
2299 	if (hdev->cur_adv_instance == instance) {
2300 		/* If the currently advertised instance is being changed then
2301 		 * cancel the current advertising and schedule the next
2302 		 * instance. If there is only one instance then the overridden
2303 		 * advertising data will be visible right away.
2304 		 */
2305 		cancel_adv_timeout(hdev);
2306 
2307 		next_instance = hci_get_next_instance(hdev, instance);
2308 		if (next_instance)
2309 			instance = next_instance->instance;
2310 		else
2311 			instance = 0;
2312 	} else if (hdev->adv_instance_timeout) {
2313 		/* Immediately advertise the new instance if no other, or
2314 		 * let it go naturally from queue if ADV is already happening
2315 		 */
2316 		instance = 0;
2317 	}
2318 
2319 	if (instance)
2320 		return hci_schedule_adv_instance_sync(hdev, instance, true);
2321 
2322 	return err;
2323 }
2324 
2325 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2326 {
2327 	struct mgmt_rp_mesh_read_features *rp = data;
2328 
2329 	if (rp->used_handles >= rp->max_handles)
2330 		return;
2331 
2332 	rp->handles[rp->used_handles++] = mesh_tx->handle;
2333 }
2334 
2335 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2336 			 void *data, u16 len)
2337 {
2338 	struct mgmt_rp_mesh_read_features rp;
2339 
2340 	if (!lmp_le_capable(hdev) ||
2341 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2342 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2343 				       MGMT_STATUS_NOT_SUPPORTED);
2344 
2345 	memset(&rp, 0, sizeof(rp));
2346 	rp.index = cpu_to_le16(hdev->id);
2347 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2348 		rp.max_handles = MESH_HANDLES_MAX;
2349 
2350 	hci_dev_lock(hdev);
2351 
2352 	if (rp.max_handles)
2353 		mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2354 
2355 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2356 			  rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2357 
2358 	hci_dev_unlock(hdev);
2359 	return 0;
2360 }
2361 
2362 static int send_cancel(struct hci_dev *hdev, void *data)
2363 {
2364 	struct mgmt_pending_cmd *cmd = data;
2365 	struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2366 	struct mgmt_mesh_tx *mesh_tx;
2367 
2368 	if (!cancel->handle) {
2369 		do {
2370 			mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2371 
2372 			if (mesh_tx)
2373 				mesh_send_complete(hdev, mesh_tx, false);
2374 		} while (mesh_tx);
2375 	} else {
2376 		mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2377 
2378 		if (mesh_tx && mesh_tx->sk == cmd->sk)
2379 			mesh_send_complete(hdev, mesh_tx, false);
2380 	}
2381 
2382 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2383 			  0, NULL, 0);
2384 	mgmt_pending_free(cmd);
2385 
2386 	return 0;
2387 }
2388 
2389 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2390 			    void *data, u16 len)
2391 {
2392 	struct mgmt_pending_cmd *cmd;
2393 	int err;
2394 
2395 	if (!lmp_le_capable(hdev) ||
2396 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2397 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2398 				       MGMT_STATUS_NOT_SUPPORTED);
2399 
2400 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2401 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2402 				       MGMT_STATUS_REJECTED);
2403 
2404 	hci_dev_lock(hdev);
2405 	cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2406 	if (!cmd)
2407 		err = -ENOMEM;
2408 	else
2409 		err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2410 
2411 	if (err < 0) {
2412 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2413 				      MGMT_STATUS_FAILED);
2414 
2415 		if (cmd)
2416 			mgmt_pending_free(cmd);
2417 	}
2418 
2419 	hci_dev_unlock(hdev);
2420 	return err;
2421 }
2422 
2423 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2424 {
2425 	struct mgmt_mesh_tx *mesh_tx;
2426 	struct mgmt_cp_mesh_send *send = data;
2427 	struct mgmt_rp_mesh_read_features rp;
2428 	bool sending;
2429 	int err = 0;
2430 
2431 	if (!lmp_le_capable(hdev) ||
2432 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2433 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2434 				       MGMT_STATUS_NOT_SUPPORTED);
2435 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2436 	    len <= MGMT_MESH_SEND_SIZE ||
2437 	    len > (MGMT_MESH_SEND_SIZE + 31))
2438 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2439 				       MGMT_STATUS_REJECTED);
2440 
2441 	hci_dev_lock(hdev);
2442 
2443 	memset(&rp, 0, sizeof(rp));
2444 	rp.max_handles = MESH_HANDLES_MAX;
2445 
2446 	mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2447 
2448 	if (rp.max_handles <= rp.used_handles) {
2449 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2450 				      MGMT_STATUS_BUSY);
2451 		goto done;
2452 	}
2453 
2454 	sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2455 	mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2456 
2457 	if (!mesh_tx)
2458 		err = -ENOMEM;
2459 	else if (!sending)
2460 		err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2461 					 mesh_send_start_complete);
2462 
2463 	if (err < 0) {
2464 		bt_dev_err(hdev, "Send Mesh Failed %d", err);
2465 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2466 				      MGMT_STATUS_FAILED);
2467 
2468 		if (mesh_tx) {
2469 			if (sending)
2470 				mgmt_mesh_remove(mesh_tx);
2471 		}
2472 	} else {
2473 		hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2474 
2475 		mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2476 				  &mesh_tx->handle, 1);
2477 	}
2478 
2479 done:
2480 	hci_dev_unlock(hdev);
2481 	return err;
2482 }
2483 
2484 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2485 {
2486 	struct mgmt_mode *cp = data;
2487 	struct mgmt_pending_cmd *cmd;
2488 	int err;
2489 	u8 val, enabled;
2490 
2491 	bt_dev_dbg(hdev, "sock %p", sk);
2492 
2493 	if (!lmp_le_capable(hdev))
2494 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2495 				       MGMT_STATUS_NOT_SUPPORTED);
2496 
2497 	if (cp->val != 0x00 && cp->val != 0x01)
2498 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2499 				       MGMT_STATUS_INVALID_PARAMS);
2500 
2501 	/* Bluetooth single mode LE only controllers or dual-mode
2502 	 * controllers configured as LE only devices, do not allow
2503 	 * switching LE off. These have either LE enabled explicitly
2504 	 * or BR/EDR has been previously switched off.
2505 	 *
2506 	 * When trying to enable an already enabled LE, then gracefully
2507 	 * send a positive response. Trying to disable it however will
2508 	 * result into rejection.
2509 	 */
2510 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2511 		if (cp->val == 0x01)
2512 			return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2513 
2514 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2515 				       MGMT_STATUS_REJECTED);
2516 	}
2517 
2518 	hci_dev_lock(hdev);
2519 
2520 	val = !!cp->val;
2521 	enabled = lmp_host_le_capable(hdev);
2522 
2523 	if (!hdev_is_powered(hdev) || val == enabled) {
2524 		bool changed = false;
2525 
2526 		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2527 			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2528 			changed = true;
2529 		}
2530 
2531 		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2532 			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2533 			changed = true;
2534 		}
2535 
2536 		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2537 		if (err < 0)
2538 			goto unlock;
2539 
2540 		if (changed)
2541 			err = new_settings(hdev, sk);
2542 
2543 		goto unlock;
2544 	}
2545 
2546 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
2547 	    pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2548 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2549 				      MGMT_STATUS_BUSY);
2550 		goto unlock;
2551 	}
2552 
2553 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2554 	if (!cmd)
2555 		err = -ENOMEM;
2556 	else
2557 		err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2558 					 set_le_complete);
2559 
2560 	if (err < 0) {
2561 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2562 				      MGMT_STATUS_FAILED);
2563 
2564 		if (cmd)
2565 			mgmt_pending_remove(cmd);
2566 	}
2567 
2568 unlock:
2569 	hci_dev_unlock(hdev);
2570 	return err;
2571 }
2572 
2573 /* This is a helper function to test for pending mgmt commands that can
2574  * cause CoD or EIR HCI commands. We can only allow one such pending
2575  * mgmt command at a time since otherwise we cannot easily track what
2576  * the current values are, will be, and based on that calculate if a new
2577  * HCI command needs to be sent and if yes with what value.
2578  */
2579 static bool pending_eir_or_class(struct hci_dev *hdev)
2580 {
2581 	struct mgmt_pending_cmd *cmd;
2582 
2583 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2584 		switch (cmd->opcode) {
2585 		case MGMT_OP_ADD_UUID:
2586 		case MGMT_OP_REMOVE_UUID:
2587 		case MGMT_OP_SET_DEV_CLASS:
2588 		case MGMT_OP_SET_POWERED:
2589 			return true;
2590 		}
2591 	}
2592 
2593 	return false;
2594 }
2595 
2596 static const u8 bluetooth_base_uuid[] = {
2597 			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2598 			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2599 };
2600 
2601 static u8 get_uuid_size(const u8 *uuid)
2602 {
2603 	u32 val;
2604 
2605 	if (memcmp(uuid, bluetooth_base_uuid, 12))
2606 		return 128;
2607 
2608 	val = get_unaligned_le32(&uuid[12]);
2609 	if (val > 0xffff)
2610 		return 32;
2611 
2612 	return 16;
2613 }
2614 
2615 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2616 {
2617 	struct mgmt_pending_cmd *cmd = data;
2618 
2619 	bt_dev_dbg(hdev, "err %d", err);
2620 
2621 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2622 			  mgmt_status(err), hdev->dev_class, 3);
2623 
2624 	mgmt_pending_free(cmd);
2625 }
2626 
2627 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2628 {
2629 	int err;
2630 
2631 	err = hci_update_class_sync(hdev);
2632 	if (err)
2633 		return err;
2634 
2635 	return hci_update_eir_sync(hdev);
2636 }
2637 
2638 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2639 {
2640 	struct mgmt_cp_add_uuid *cp = data;
2641 	struct mgmt_pending_cmd *cmd;
2642 	struct bt_uuid *uuid;
2643 	int err;
2644 
2645 	bt_dev_dbg(hdev, "sock %p", sk);
2646 
2647 	hci_dev_lock(hdev);
2648 
2649 	if (pending_eir_or_class(hdev)) {
2650 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2651 				      MGMT_STATUS_BUSY);
2652 		goto failed;
2653 	}
2654 
2655 	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2656 	if (!uuid) {
2657 		err = -ENOMEM;
2658 		goto failed;
2659 	}
2660 
2661 	memcpy(uuid->uuid, cp->uuid, 16);
2662 	uuid->svc_hint = cp->svc_hint;
2663 	uuid->size = get_uuid_size(cp->uuid);
2664 
2665 	list_add_tail(&uuid->list, &hdev->uuids);
2666 
2667 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2668 	if (!cmd) {
2669 		err = -ENOMEM;
2670 		goto failed;
2671 	}
2672 
2673 	err = hci_cmd_sync_queue(hdev, add_uuid_sync, cmd, mgmt_class_complete);
2674 	if (err < 0) {
2675 		mgmt_pending_free(cmd);
2676 		goto failed;
2677 	}
2678 
2679 failed:
2680 	hci_dev_unlock(hdev);
2681 	return err;
2682 }
2683 
2684 static bool enable_service_cache(struct hci_dev *hdev)
2685 {
2686 	if (!hdev_is_powered(hdev))
2687 		return false;
2688 
2689 	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2690 		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2691 				   CACHE_TIMEOUT);
2692 		return true;
2693 	}
2694 
2695 	return false;
2696 }
2697 
2698 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2699 {
2700 	int err;
2701 
2702 	err = hci_update_class_sync(hdev);
2703 	if (err)
2704 		return err;
2705 
2706 	return hci_update_eir_sync(hdev);
2707 }
2708 
2709 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2710 		       u16 len)
2711 {
2712 	struct mgmt_cp_remove_uuid *cp = data;
2713 	struct mgmt_pending_cmd *cmd;
2714 	struct bt_uuid *match, *tmp;
2715 	static const u8 bt_uuid_any[] = {
2716 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2717 	};
2718 	int err, found;
2719 
2720 	bt_dev_dbg(hdev, "sock %p", sk);
2721 
2722 	hci_dev_lock(hdev);
2723 
2724 	if (pending_eir_or_class(hdev)) {
2725 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2726 				      MGMT_STATUS_BUSY);
2727 		goto unlock;
2728 	}
2729 
2730 	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2731 		hci_uuids_clear(hdev);
2732 
2733 		if (enable_service_cache(hdev)) {
2734 			err = mgmt_cmd_complete(sk, hdev->id,
2735 						MGMT_OP_REMOVE_UUID,
2736 						0, hdev->dev_class, 3);
2737 			goto unlock;
2738 		}
2739 
2740 		goto update_class;
2741 	}
2742 
2743 	found = 0;
2744 
2745 	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2746 		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2747 			continue;
2748 
2749 		list_del(&match->list);
2750 		kfree(match);
2751 		found++;
2752 	}
2753 
2754 	if (found == 0) {
2755 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2756 				      MGMT_STATUS_INVALID_PARAMS);
2757 		goto unlock;
2758 	}
2759 
2760 update_class:
2761 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2762 	if (!cmd) {
2763 		err = -ENOMEM;
2764 		goto unlock;
2765 	}
2766 
2767 	err = hci_cmd_sync_queue(hdev, remove_uuid_sync, cmd,
2768 				 mgmt_class_complete);
2769 	if (err < 0)
2770 		mgmt_pending_free(cmd);
2771 
2772 unlock:
2773 	hci_dev_unlock(hdev);
2774 	return err;
2775 }
2776 
2777 static int set_class_sync(struct hci_dev *hdev, void *data)
2778 {
2779 	int err = 0;
2780 
2781 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2782 		cancel_delayed_work_sync(&hdev->service_cache);
2783 		err = hci_update_eir_sync(hdev);
2784 	}
2785 
2786 	if (err)
2787 		return err;
2788 
2789 	return hci_update_class_sync(hdev);
2790 }
2791 
2792 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2793 			 u16 len)
2794 {
2795 	struct mgmt_cp_set_dev_class *cp = data;
2796 	struct mgmt_pending_cmd *cmd;
2797 	int err;
2798 
2799 	bt_dev_dbg(hdev, "sock %p", sk);
2800 
2801 	if (!lmp_bredr_capable(hdev))
2802 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2803 				       MGMT_STATUS_NOT_SUPPORTED);
2804 
2805 	hci_dev_lock(hdev);
2806 
2807 	if (pending_eir_or_class(hdev)) {
2808 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2809 				      MGMT_STATUS_BUSY);
2810 		goto unlock;
2811 	}
2812 
2813 	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2814 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2815 				      MGMT_STATUS_INVALID_PARAMS);
2816 		goto unlock;
2817 	}
2818 
2819 	hdev->major_class = cp->major;
2820 	hdev->minor_class = cp->minor;
2821 
2822 	if (!hdev_is_powered(hdev)) {
2823 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2824 					hdev->dev_class, 3);
2825 		goto unlock;
2826 	}
2827 
2828 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2829 	if (!cmd) {
2830 		err = -ENOMEM;
2831 		goto unlock;
2832 	}
2833 
2834 	err = hci_cmd_sync_queue(hdev, set_class_sync, cmd,
2835 				 mgmt_class_complete);
2836 	if (err < 0)
2837 		mgmt_pending_free(cmd);
2838 
2839 unlock:
2840 	hci_dev_unlock(hdev);
2841 	return err;
2842 }
2843 
2844 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2845 			  u16 len)
2846 {
2847 	struct mgmt_cp_load_link_keys *cp = data;
2848 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2849 				   sizeof(struct mgmt_link_key_info));
2850 	u16 key_count, expected_len;
2851 	bool changed;
2852 	int i;
2853 
2854 	bt_dev_dbg(hdev, "sock %p", sk);
2855 
2856 	if (!lmp_bredr_capable(hdev))
2857 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2858 				       MGMT_STATUS_NOT_SUPPORTED);
2859 
2860 	key_count = __le16_to_cpu(cp->key_count);
2861 	if (key_count > max_key_count) {
2862 		bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2863 			   key_count);
2864 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2865 				       MGMT_STATUS_INVALID_PARAMS);
2866 	}
2867 
2868 	expected_len = struct_size(cp, keys, key_count);
2869 	if (expected_len != len) {
2870 		bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2871 			   expected_len, len);
2872 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2873 				       MGMT_STATUS_INVALID_PARAMS);
2874 	}
2875 
2876 	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2877 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2878 				       MGMT_STATUS_INVALID_PARAMS);
2879 
2880 	bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2881 		   key_count);
2882 
2883 	for (i = 0; i < key_count; i++) {
2884 		struct mgmt_link_key_info *key = &cp->keys[i];
2885 
2886 		if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2887 			return mgmt_cmd_status(sk, hdev->id,
2888 					       MGMT_OP_LOAD_LINK_KEYS,
2889 					       MGMT_STATUS_INVALID_PARAMS);
2890 	}
2891 
2892 	hci_dev_lock(hdev);
2893 
2894 	hci_link_keys_clear(hdev);
2895 
2896 	if (cp->debug_keys)
2897 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2898 	else
2899 		changed = hci_dev_test_and_clear_flag(hdev,
2900 						      HCI_KEEP_DEBUG_KEYS);
2901 
2902 	if (changed)
2903 		new_settings(hdev, NULL);
2904 
2905 	for (i = 0; i < key_count; i++) {
2906 		struct mgmt_link_key_info *key = &cp->keys[i];
2907 
2908 		if (hci_is_blocked_key(hdev,
2909 				       HCI_BLOCKED_KEY_TYPE_LINKKEY,
2910 				       key->val)) {
2911 			bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2912 				    &key->addr.bdaddr);
2913 			continue;
2914 		}
2915 
2916 		/* Always ignore debug keys and require a new pairing if
2917 		 * the user wants to use them.
2918 		 */
2919 		if (key->type == HCI_LK_DEBUG_COMBINATION)
2920 			continue;
2921 
2922 		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2923 				 key->type, key->pin_len, NULL);
2924 	}
2925 
2926 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2927 
2928 	hci_dev_unlock(hdev);
2929 
2930 	return 0;
2931 }
2932 
2933 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2934 			   u8 addr_type, struct sock *skip_sk)
2935 {
2936 	struct mgmt_ev_device_unpaired ev;
2937 
2938 	bacpy(&ev.addr.bdaddr, bdaddr);
2939 	ev.addr.type = addr_type;
2940 
2941 	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2942 			  skip_sk);
2943 }
2944 
2945 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2946 {
2947 	struct mgmt_pending_cmd *cmd = data;
2948 	struct mgmt_cp_unpair_device *cp = cmd->param;
2949 
2950 	if (!err)
2951 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2952 
2953 	cmd->cmd_complete(cmd, err);
2954 	mgmt_pending_free(cmd);
2955 }
2956 
2957 static int unpair_device_sync(struct hci_dev *hdev, void *data)
2958 {
2959 	struct mgmt_pending_cmd *cmd = data;
2960 	struct mgmt_cp_unpair_device *cp = cmd->param;
2961 	struct hci_conn *conn;
2962 
2963 	if (cp->addr.type == BDADDR_BREDR)
2964 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2965 					       &cp->addr.bdaddr);
2966 	else
2967 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2968 					       le_addr_type(cp->addr.type));
2969 
2970 	if (!conn)
2971 		return 0;
2972 
2973 	return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
2974 }
2975 
2976 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2977 			 u16 len)
2978 {
2979 	struct mgmt_cp_unpair_device *cp = data;
2980 	struct mgmt_rp_unpair_device rp;
2981 	struct hci_conn_params *params;
2982 	struct mgmt_pending_cmd *cmd;
2983 	struct hci_conn *conn;
2984 	u8 addr_type;
2985 	int err;
2986 
2987 	memset(&rp, 0, sizeof(rp));
2988 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2989 	rp.addr.type = cp->addr.type;
2990 
2991 	if (!bdaddr_type_is_valid(cp->addr.type))
2992 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2993 					 MGMT_STATUS_INVALID_PARAMS,
2994 					 &rp, sizeof(rp));
2995 
2996 	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2997 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2998 					 MGMT_STATUS_INVALID_PARAMS,
2999 					 &rp, sizeof(rp));
3000 
3001 	hci_dev_lock(hdev);
3002 
3003 	if (!hdev_is_powered(hdev)) {
3004 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3005 					MGMT_STATUS_NOT_POWERED, &rp,
3006 					sizeof(rp));
3007 		goto unlock;
3008 	}
3009 
3010 	if (cp->addr.type == BDADDR_BREDR) {
3011 		/* If disconnection is requested, then look up the
3012 		 * connection. If the remote device is connected, it
3013 		 * will be later used to terminate the link.
3014 		 *
3015 		 * Setting it to NULL explicitly will cause no
3016 		 * termination of the link.
3017 		 */
3018 		if (cp->disconnect)
3019 			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3020 						       &cp->addr.bdaddr);
3021 		else
3022 			conn = NULL;
3023 
3024 		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
3025 		if (err < 0) {
3026 			err = mgmt_cmd_complete(sk, hdev->id,
3027 						MGMT_OP_UNPAIR_DEVICE,
3028 						MGMT_STATUS_NOT_PAIRED, &rp,
3029 						sizeof(rp));
3030 			goto unlock;
3031 		}
3032 
3033 		goto done;
3034 	}
3035 
3036 	/* LE address type */
3037 	addr_type = le_addr_type(cp->addr.type);
3038 
3039 	/* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3040 	err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
3041 	if (err < 0) {
3042 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3043 					MGMT_STATUS_NOT_PAIRED, &rp,
3044 					sizeof(rp));
3045 		goto unlock;
3046 	}
3047 
3048 	conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3049 	if (!conn) {
3050 		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3051 		goto done;
3052 	}
3053 
3054 
3055 	/* Defer clearing up the connection parameters until closing to
3056 	 * give a chance of keeping them if a repairing happens.
3057 	 */
3058 	set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3059 
3060 	/* Disable auto-connection parameters if present */
3061 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3062 	if (params) {
3063 		if (params->explicit_connect)
3064 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3065 		else
3066 			params->auto_connect = HCI_AUTO_CONN_DISABLED;
3067 	}
3068 
3069 	/* If disconnection is not requested, then clear the connection
3070 	 * variable so that the link is not terminated.
3071 	 */
3072 	if (!cp->disconnect)
3073 		conn = NULL;
3074 
3075 done:
3076 	/* If the connection variable is set, then termination of the
3077 	 * link is requested.
3078 	 */
3079 	if (!conn) {
3080 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3081 					&rp, sizeof(rp));
3082 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3083 		goto unlock;
3084 	}
3085 
3086 	cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3087 			       sizeof(*cp));
3088 	if (!cmd) {
3089 		err = -ENOMEM;
3090 		goto unlock;
3091 	}
3092 
3093 	cmd->cmd_complete = addr_cmd_complete;
3094 
3095 	err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3096 				 unpair_device_complete);
3097 	if (err < 0)
3098 		mgmt_pending_free(cmd);
3099 
3100 unlock:
3101 	hci_dev_unlock(hdev);
3102 	return err;
3103 }
3104 
3105 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3106 		      u16 len)
3107 {
3108 	struct mgmt_cp_disconnect *cp = data;
3109 	struct mgmt_rp_disconnect rp;
3110 	struct mgmt_pending_cmd *cmd;
3111 	struct hci_conn *conn;
3112 	int err;
3113 
3114 	bt_dev_dbg(hdev, "sock %p", sk);
3115 
3116 	memset(&rp, 0, sizeof(rp));
3117 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3118 	rp.addr.type = cp->addr.type;
3119 
3120 	if (!bdaddr_type_is_valid(cp->addr.type))
3121 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3122 					 MGMT_STATUS_INVALID_PARAMS,
3123 					 &rp, sizeof(rp));
3124 
3125 	hci_dev_lock(hdev);
3126 
3127 	if (!test_bit(HCI_UP, &hdev->flags)) {
3128 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3129 					MGMT_STATUS_NOT_POWERED, &rp,
3130 					sizeof(rp));
3131 		goto failed;
3132 	}
3133 
3134 	if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
3135 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3136 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
3137 		goto failed;
3138 	}
3139 
3140 	if (cp->addr.type == BDADDR_BREDR)
3141 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3142 					       &cp->addr.bdaddr);
3143 	else
3144 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3145 					       le_addr_type(cp->addr.type));
3146 
3147 	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3148 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3149 					MGMT_STATUS_NOT_CONNECTED, &rp,
3150 					sizeof(rp));
3151 		goto failed;
3152 	}
3153 
3154 	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3155 	if (!cmd) {
3156 		err = -ENOMEM;
3157 		goto failed;
3158 	}
3159 
3160 	cmd->cmd_complete = generic_cmd_complete;
3161 
3162 	err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3163 	if (err < 0)
3164 		mgmt_pending_remove(cmd);
3165 
3166 failed:
3167 	hci_dev_unlock(hdev);
3168 	return err;
3169 }
3170 
3171 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3172 {
3173 	switch (link_type) {
3174 	case LE_LINK:
3175 		switch (addr_type) {
3176 		case ADDR_LE_DEV_PUBLIC:
3177 			return BDADDR_LE_PUBLIC;
3178 
3179 		default:
3180 			/* Fallback to LE Random address type */
3181 			return BDADDR_LE_RANDOM;
3182 		}
3183 
3184 	default:
3185 		/* Fallback to BR/EDR type */
3186 		return BDADDR_BREDR;
3187 	}
3188 }
3189 
3190 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3191 			   u16 data_len)
3192 {
3193 	struct mgmt_rp_get_connections *rp;
3194 	struct hci_conn *c;
3195 	int err;
3196 	u16 i;
3197 
3198 	bt_dev_dbg(hdev, "sock %p", sk);
3199 
3200 	hci_dev_lock(hdev);
3201 
3202 	if (!hdev_is_powered(hdev)) {
3203 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3204 				      MGMT_STATUS_NOT_POWERED);
3205 		goto unlock;
3206 	}
3207 
3208 	i = 0;
3209 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
3210 		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3211 			i++;
3212 	}
3213 
3214 	rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3215 	if (!rp) {
3216 		err = -ENOMEM;
3217 		goto unlock;
3218 	}
3219 
3220 	i = 0;
3221 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
3222 		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3223 			continue;
3224 		bacpy(&rp->addr[i].bdaddr, &c->dst);
3225 		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3226 		if (c->type == SCO_LINK || c->type == ESCO_LINK)
3227 			continue;
3228 		i++;
3229 	}
3230 
3231 	rp->conn_count = cpu_to_le16(i);
3232 
3233 	/* Recalculate length in case of filtered SCO connections, etc */
3234 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3235 				struct_size(rp, addr, i));
3236 
3237 	kfree(rp);
3238 
3239 unlock:
3240 	hci_dev_unlock(hdev);
3241 	return err;
3242 }
3243 
3244 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3245 				   struct mgmt_cp_pin_code_neg_reply *cp)
3246 {
3247 	struct mgmt_pending_cmd *cmd;
3248 	int err;
3249 
3250 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3251 			       sizeof(*cp));
3252 	if (!cmd)
3253 		return -ENOMEM;
3254 
3255 	cmd->cmd_complete = addr_cmd_complete;
3256 
3257 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3258 			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3259 	if (err < 0)
3260 		mgmt_pending_remove(cmd);
3261 
3262 	return err;
3263 }
3264 
3265 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3266 			  u16 len)
3267 {
3268 	struct hci_conn *conn;
3269 	struct mgmt_cp_pin_code_reply *cp = data;
3270 	struct hci_cp_pin_code_reply reply;
3271 	struct mgmt_pending_cmd *cmd;
3272 	int err;
3273 
3274 	bt_dev_dbg(hdev, "sock %p", sk);
3275 
3276 	hci_dev_lock(hdev);
3277 
3278 	if (!hdev_is_powered(hdev)) {
3279 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3280 				      MGMT_STATUS_NOT_POWERED);
3281 		goto failed;
3282 	}
3283 
3284 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3285 	if (!conn) {
3286 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3287 				      MGMT_STATUS_NOT_CONNECTED);
3288 		goto failed;
3289 	}
3290 
3291 	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3292 		struct mgmt_cp_pin_code_neg_reply ncp;
3293 
3294 		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3295 
3296 		bt_dev_err(hdev, "PIN code is not 16 bytes long");
3297 
3298 		err = send_pin_code_neg_reply(sk, hdev, &ncp);
3299 		if (err >= 0)
3300 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3301 					      MGMT_STATUS_INVALID_PARAMS);
3302 
3303 		goto failed;
3304 	}
3305 
3306 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3307 	if (!cmd) {
3308 		err = -ENOMEM;
3309 		goto failed;
3310 	}
3311 
3312 	cmd->cmd_complete = addr_cmd_complete;
3313 
3314 	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3315 	reply.pin_len = cp->pin_len;
3316 	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3317 
3318 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3319 	if (err < 0)
3320 		mgmt_pending_remove(cmd);
3321 
3322 failed:
3323 	hci_dev_unlock(hdev);
3324 	return err;
3325 }
3326 
3327 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3328 			     u16 len)
3329 {
3330 	struct mgmt_cp_set_io_capability *cp = data;
3331 
3332 	bt_dev_dbg(hdev, "sock %p", sk);
3333 
3334 	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3335 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3336 				       MGMT_STATUS_INVALID_PARAMS);
3337 
3338 	hci_dev_lock(hdev);
3339 
3340 	hdev->io_capability = cp->io_capability;
3341 
3342 	bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3343 
3344 	hci_dev_unlock(hdev);
3345 
3346 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3347 				 NULL, 0);
3348 }
3349 
3350 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3351 {
3352 	struct hci_dev *hdev = conn->hdev;
3353 	struct mgmt_pending_cmd *cmd;
3354 
3355 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3356 		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3357 			continue;
3358 
3359 		if (cmd->user_data != conn)
3360 			continue;
3361 
3362 		return cmd;
3363 	}
3364 
3365 	return NULL;
3366 }
3367 
3368 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3369 {
3370 	struct mgmt_rp_pair_device rp;
3371 	struct hci_conn *conn = cmd->user_data;
3372 	int err;
3373 
3374 	bacpy(&rp.addr.bdaddr, &conn->dst);
3375 	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3376 
3377 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3378 				status, &rp, sizeof(rp));
3379 
3380 	/* So we don't get further callbacks for this connection */
3381 	conn->connect_cfm_cb = NULL;
3382 	conn->security_cfm_cb = NULL;
3383 	conn->disconn_cfm_cb = NULL;
3384 
3385 	hci_conn_drop(conn);
3386 
3387 	/* The device is paired so there is no need to remove
3388 	 * its connection parameters anymore.
3389 	 */
3390 	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3391 
3392 	hci_conn_put(conn);
3393 
3394 	return err;
3395 }
3396 
3397 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3398 {
3399 	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3400 	struct mgmt_pending_cmd *cmd;
3401 
3402 	cmd = find_pairing(conn);
3403 	if (cmd) {
3404 		cmd->cmd_complete(cmd, status);
3405 		mgmt_pending_remove(cmd);
3406 	}
3407 }
3408 
3409 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3410 {
3411 	struct mgmt_pending_cmd *cmd;
3412 
3413 	BT_DBG("status %u", status);
3414 
3415 	cmd = find_pairing(conn);
3416 	if (!cmd) {
3417 		BT_DBG("Unable to find a pending command");
3418 		return;
3419 	}
3420 
3421 	cmd->cmd_complete(cmd, mgmt_status(status));
3422 	mgmt_pending_remove(cmd);
3423 }
3424 
3425 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3426 {
3427 	struct mgmt_pending_cmd *cmd;
3428 
3429 	BT_DBG("status %u", status);
3430 
3431 	if (!status)
3432 		return;
3433 
3434 	cmd = find_pairing(conn);
3435 	if (!cmd) {
3436 		BT_DBG("Unable to find a pending command");
3437 		return;
3438 	}
3439 
3440 	cmd->cmd_complete(cmd, mgmt_status(status));
3441 	mgmt_pending_remove(cmd);
3442 }
3443 
3444 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3445 		       u16 len)
3446 {
3447 	struct mgmt_cp_pair_device *cp = data;
3448 	struct mgmt_rp_pair_device rp;
3449 	struct mgmt_pending_cmd *cmd;
3450 	u8 sec_level, auth_type;
3451 	struct hci_conn *conn;
3452 	int err;
3453 
3454 	bt_dev_dbg(hdev, "sock %p", sk);
3455 
3456 	memset(&rp, 0, sizeof(rp));
3457 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3458 	rp.addr.type = cp->addr.type;
3459 
3460 	if (!bdaddr_type_is_valid(cp->addr.type))
3461 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3462 					 MGMT_STATUS_INVALID_PARAMS,
3463 					 &rp, sizeof(rp));
3464 
3465 	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3466 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3467 					 MGMT_STATUS_INVALID_PARAMS,
3468 					 &rp, sizeof(rp));
3469 
3470 	hci_dev_lock(hdev);
3471 
3472 	if (!hdev_is_powered(hdev)) {
3473 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3474 					MGMT_STATUS_NOT_POWERED, &rp,
3475 					sizeof(rp));
3476 		goto unlock;
3477 	}
3478 
3479 	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3480 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3481 					MGMT_STATUS_ALREADY_PAIRED, &rp,
3482 					sizeof(rp));
3483 		goto unlock;
3484 	}
3485 
3486 	sec_level = BT_SECURITY_MEDIUM;
3487 	auth_type = HCI_AT_DEDICATED_BONDING;
3488 
3489 	if (cp->addr.type == BDADDR_BREDR) {
3490 		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3491 				       auth_type, CONN_REASON_PAIR_DEVICE);
3492 	} else {
3493 		u8 addr_type = le_addr_type(cp->addr.type);
3494 		struct hci_conn_params *p;
3495 
3496 		/* When pairing a new device, it is expected to remember
3497 		 * this device for future connections. Adding the connection
3498 		 * parameter information ahead of time allows tracking
3499 		 * of the peripheral preferred values and will speed up any
3500 		 * further connection establishment.
3501 		 *
3502 		 * If connection parameters already exist, then they
3503 		 * will be kept and this function does nothing.
3504 		 */
3505 		p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3506 
3507 		if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3508 			p->auto_connect = HCI_AUTO_CONN_DISABLED;
3509 
3510 		conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3511 					   sec_level, HCI_LE_CONN_TIMEOUT,
3512 					   CONN_REASON_PAIR_DEVICE);
3513 	}
3514 
3515 	if (IS_ERR(conn)) {
3516 		int status;
3517 
3518 		if (PTR_ERR(conn) == -EBUSY)
3519 			status = MGMT_STATUS_BUSY;
3520 		else if (PTR_ERR(conn) == -EOPNOTSUPP)
3521 			status = MGMT_STATUS_NOT_SUPPORTED;
3522 		else if (PTR_ERR(conn) == -ECONNREFUSED)
3523 			status = MGMT_STATUS_REJECTED;
3524 		else
3525 			status = MGMT_STATUS_CONNECT_FAILED;
3526 
3527 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3528 					status, &rp, sizeof(rp));
3529 		goto unlock;
3530 	}
3531 
3532 	if (conn->connect_cfm_cb) {
3533 		hci_conn_drop(conn);
3534 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3535 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
3536 		goto unlock;
3537 	}
3538 
3539 	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3540 	if (!cmd) {
3541 		err = -ENOMEM;
3542 		hci_conn_drop(conn);
3543 		goto unlock;
3544 	}
3545 
3546 	cmd->cmd_complete = pairing_complete;
3547 
3548 	/* For LE, just connecting isn't a proof that the pairing finished */
3549 	if (cp->addr.type == BDADDR_BREDR) {
3550 		conn->connect_cfm_cb = pairing_complete_cb;
3551 		conn->security_cfm_cb = pairing_complete_cb;
3552 		conn->disconn_cfm_cb = pairing_complete_cb;
3553 	} else {
3554 		conn->connect_cfm_cb = le_pairing_complete_cb;
3555 		conn->security_cfm_cb = le_pairing_complete_cb;
3556 		conn->disconn_cfm_cb = le_pairing_complete_cb;
3557 	}
3558 
3559 	conn->io_capability = cp->io_cap;
3560 	cmd->user_data = hci_conn_get(conn);
3561 
3562 	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3563 	    hci_conn_security(conn, sec_level, auth_type, true)) {
3564 		cmd->cmd_complete(cmd, 0);
3565 		mgmt_pending_remove(cmd);
3566 	}
3567 
3568 	err = 0;
3569 
3570 unlock:
3571 	hci_dev_unlock(hdev);
3572 	return err;
3573 }
3574 
3575 static int abort_conn_sync(struct hci_dev *hdev, void *data)
3576 {
3577 	struct hci_conn *conn;
3578 	u16 handle = PTR_ERR(data);
3579 
3580 	conn = hci_conn_hash_lookup_handle(hdev, handle);
3581 	if (!conn)
3582 		return 0;
3583 
3584 	return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
3585 }
3586 
3587 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3588 			      u16 len)
3589 {
3590 	struct mgmt_addr_info *addr = data;
3591 	struct mgmt_pending_cmd *cmd;
3592 	struct hci_conn *conn;
3593 	int err;
3594 
3595 	bt_dev_dbg(hdev, "sock %p", sk);
3596 
3597 	hci_dev_lock(hdev);
3598 
3599 	if (!hdev_is_powered(hdev)) {
3600 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3601 				      MGMT_STATUS_NOT_POWERED);
3602 		goto unlock;
3603 	}
3604 
3605 	cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3606 	if (!cmd) {
3607 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3608 				      MGMT_STATUS_INVALID_PARAMS);
3609 		goto unlock;
3610 	}
3611 
3612 	conn = cmd->user_data;
3613 
3614 	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3615 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3616 				      MGMT_STATUS_INVALID_PARAMS);
3617 		goto unlock;
3618 	}
3619 
3620 	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3621 	mgmt_pending_remove(cmd);
3622 
3623 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3624 				addr, sizeof(*addr));
3625 
3626 	/* Since user doesn't want to proceed with the connection, abort any
3627 	 * ongoing pairing and then terminate the link if it was created
3628 	 * because of the pair device action.
3629 	 */
3630 	if (addr->type == BDADDR_BREDR)
3631 		hci_remove_link_key(hdev, &addr->bdaddr);
3632 	else
3633 		smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3634 					      le_addr_type(addr->type));
3635 
3636 	if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3637 		hci_cmd_sync_queue(hdev, abort_conn_sync, ERR_PTR(conn->handle),
3638 				   NULL);
3639 
3640 unlock:
3641 	hci_dev_unlock(hdev);
3642 	return err;
3643 }
3644 
3645 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3646 			     struct mgmt_addr_info *addr, u16 mgmt_op,
3647 			     u16 hci_op, __le32 passkey)
3648 {
3649 	struct mgmt_pending_cmd *cmd;
3650 	struct hci_conn *conn;
3651 	int err;
3652 
3653 	hci_dev_lock(hdev);
3654 
3655 	if (!hdev_is_powered(hdev)) {
3656 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3657 					MGMT_STATUS_NOT_POWERED, addr,
3658 					sizeof(*addr));
3659 		goto done;
3660 	}
3661 
3662 	if (addr->type == BDADDR_BREDR)
3663 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3664 	else
3665 		conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3666 					       le_addr_type(addr->type));
3667 
3668 	if (!conn) {
3669 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3670 					MGMT_STATUS_NOT_CONNECTED, addr,
3671 					sizeof(*addr));
3672 		goto done;
3673 	}
3674 
3675 	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3676 		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3677 		if (!err)
3678 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3679 						MGMT_STATUS_SUCCESS, addr,
3680 						sizeof(*addr));
3681 		else
3682 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3683 						MGMT_STATUS_FAILED, addr,
3684 						sizeof(*addr));
3685 
3686 		goto done;
3687 	}
3688 
3689 	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3690 	if (!cmd) {
3691 		err = -ENOMEM;
3692 		goto done;
3693 	}
3694 
3695 	cmd->cmd_complete = addr_cmd_complete;
3696 
3697 	/* Continue with pairing via HCI */
3698 	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3699 		struct hci_cp_user_passkey_reply cp;
3700 
3701 		bacpy(&cp.bdaddr, &addr->bdaddr);
3702 		cp.passkey = passkey;
3703 		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3704 	} else
3705 		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3706 				   &addr->bdaddr);
3707 
3708 	if (err < 0)
3709 		mgmt_pending_remove(cmd);
3710 
3711 done:
3712 	hci_dev_unlock(hdev);
3713 	return err;
3714 }
3715 
3716 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3717 			      void *data, u16 len)
3718 {
3719 	struct mgmt_cp_pin_code_neg_reply *cp = data;
3720 
3721 	bt_dev_dbg(hdev, "sock %p", sk);
3722 
3723 	return user_pairing_resp(sk, hdev, &cp->addr,
3724 				MGMT_OP_PIN_CODE_NEG_REPLY,
3725 				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3726 }
3727 
3728 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3729 			      u16 len)
3730 {
3731 	struct mgmt_cp_user_confirm_reply *cp = data;
3732 
3733 	bt_dev_dbg(hdev, "sock %p", sk);
3734 
3735 	if (len != sizeof(*cp))
3736 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3737 				       MGMT_STATUS_INVALID_PARAMS);
3738 
3739 	return user_pairing_resp(sk, hdev, &cp->addr,
3740 				 MGMT_OP_USER_CONFIRM_REPLY,
3741 				 HCI_OP_USER_CONFIRM_REPLY, 0);
3742 }
3743 
3744 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3745 				  void *data, u16 len)
3746 {
3747 	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3748 
3749 	bt_dev_dbg(hdev, "sock %p", sk);
3750 
3751 	return user_pairing_resp(sk, hdev, &cp->addr,
3752 				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3753 				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3754 }
3755 
3756 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3757 			      u16 len)
3758 {
3759 	struct mgmt_cp_user_passkey_reply *cp = data;
3760 
3761 	bt_dev_dbg(hdev, "sock %p", sk);
3762 
3763 	return user_pairing_resp(sk, hdev, &cp->addr,
3764 				 MGMT_OP_USER_PASSKEY_REPLY,
3765 				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3766 }
3767 
3768 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3769 				  void *data, u16 len)
3770 {
3771 	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3772 
3773 	bt_dev_dbg(hdev, "sock %p", sk);
3774 
3775 	return user_pairing_resp(sk, hdev, &cp->addr,
3776 				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3777 				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3778 }
3779 
3780 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3781 {
3782 	struct adv_info *adv_instance;
3783 
3784 	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3785 	if (!adv_instance)
3786 		return 0;
3787 
3788 	/* stop if current instance doesn't need to be changed */
3789 	if (!(adv_instance->flags & flags))
3790 		return 0;
3791 
3792 	cancel_adv_timeout(hdev);
3793 
3794 	adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3795 	if (!adv_instance)
3796 		return 0;
3797 
3798 	hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3799 
3800 	return 0;
3801 }
3802 
3803 static int name_changed_sync(struct hci_dev *hdev, void *data)
3804 {
3805 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3806 }
3807 
3808 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3809 {
3810 	struct mgmt_pending_cmd *cmd = data;
3811 	struct mgmt_cp_set_local_name *cp = cmd->param;
3812 	u8 status = mgmt_status(err);
3813 
3814 	bt_dev_dbg(hdev, "err %d", err);
3815 
3816 	if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3817 		return;
3818 
3819 	if (status) {
3820 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3821 				status);
3822 	} else {
3823 		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3824 				  cp, sizeof(*cp));
3825 
3826 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3827 			hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3828 	}
3829 
3830 	mgmt_pending_remove(cmd);
3831 }
3832 
3833 static int set_name_sync(struct hci_dev *hdev, void *data)
3834 {
3835 	if (lmp_bredr_capable(hdev)) {
3836 		hci_update_name_sync(hdev);
3837 		hci_update_eir_sync(hdev);
3838 	}
3839 
3840 	/* The name is stored in the scan response data and so
3841 	 * no need to update the advertising data here.
3842 	 */
3843 	if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3844 		hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3845 
3846 	return 0;
3847 }
3848 
3849 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3850 			  u16 len)
3851 {
3852 	struct mgmt_cp_set_local_name *cp = data;
3853 	struct mgmt_pending_cmd *cmd;
3854 	int err;
3855 
3856 	bt_dev_dbg(hdev, "sock %p", sk);
3857 
3858 	hci_dev_lock(hdev);
3859 
3860 	/* If the old values are the same as the new ones just return a
3861 	 * direct command complete event.
3862 	 */
3863 	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3864 	    !memcmp(hdev->short_name, cp->short_name,
3865 		    sizeof(hdev->short_name))) {
3866 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3867 					data, len);
3868 		goto failed;
3869 	}
3870 
3871 	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3872 
3873 	if (!hdev_is_powered(hdev)) {
3874 		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3875 
3876 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3877 					data, len);
3878 		if (err < 0)
3879 			goto failed;
3880 
3881 		err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3882 					 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3883 		ext_info_changed(hdev, sk);
3884 
3885 		goto failed;
3886 	}
3887 
3888 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3889 	if (!cmd)
3890 		err = -ENOMEM;
3891 	else
3892 		err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3893 					 set_name_complete);
3894 
3895 	if (err < 0) {
3896 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3897 				      MGMT_STATUS_FAILED);
3898 
3899 		if (cmd)
3900 			mgmt_pending_remove(cmd);
3901 
3902 		goto failed;
3903 	}
3904 
3905 	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3906 
3907 failed:
3908 	hci_dev_unlock(hdev);
3909 	return err;
3910 }
3911 
3912 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3913 {
3914 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3915 }
3916 
3917 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3918 			  u16 len)
3919 {
3920 	struct mgmt_cp_set_appearance *cp = data;
3921 	u16 appearance;
3922 	int err;
3923 
3924 	bt_dev_dbg(hdev, "sock %p", sk);
3925 
3926 	if (!lmp_le_capable(hdev))
3927 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3928 				       MGMT_STATUS_NOT_SUPPORTED);
3929 
3930 	appearance = le16_to_cpu(cp->appearance);
3931 
3932 	hci_dev_lock(hdev);
3933 
3934 	if (hdev->appearance != appearance) {
3935 		hdev->appearance = appearance;
3936 
3937 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3938 			hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3939 					   NULL);
3940 
3941 		ext_info_changed(hdev, sk);
3942 	}
3943 
3944 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3945 				0);
3946 
3947 	hci_dev_unlock(hdev);
3948 
3949 	return err;
3950 }
3951 
3952 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3953 				 void *data, u16 len)
3954 {
3955 	struct mgmt_rp_get_phy_configuration rp;
3956 
3957 	bt_dev_dbg(hdev, "sock %p", sk);
3958 
3959 	hci_dev_lock(hdev);
3960 
3961 	memset(&rp, 0, sizeof(rp));
3962 
3963 	rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3964 	rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3965 	rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3966 
3967 	hci_dev_unlock(hdev);
3968 
3969 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3970 				 &rp, sizeof(rp));
3971 }
3972 
3973 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3974 {
3975 	struct mgmt_ev_phy_configuration_changed ev;
3976 
3977 	memset(&ev, 0, sizeof(ev));
3978 
3979 	ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3980 
3981 	return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3982 			  sizeof(ev), skip);
3983 }
3984 
3985 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3986 {
3987 	struct mgmt_pending_cmd *cmd = data;
3988 	struct sk_buff *skb = cmd->skb;
3989 	u8 status = mgmt_status(err);
3990 
3991 	if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
3992 		return;
3993 
3994 	if (!status) {
3995 		if (!skb)
3996 			status = MGMT_STATUS_FAILED;
3997 		else if (IS_ERR(skb))
3998 			status = mgmt_status(PTR_ERR(skb));
3999 		else
4000 			status = mgmt_status(skb->data[0]);
4001 	}
4002 
4003 	bt_dev_dbg(hdev, "status %d", status);
4004 
4005 	if (status) {
4006 		mgmt_cmd_status(cmd->sk, hdev->id,
4007 				MGMT_OP_SET_PHY_CONFIGURATION, status);
4008 	} else {
4009 		mgmt_cmd_complete(cmd->sk, hdev->id,
4010 				  MGMT_OP_SET_PHY_CONFIGURATION, 0,
4011 				  NULL, 0);
4012 
4013 		mgmt_phy_configuration_changed(hdev, cmd->sk);
4014 	}
4015 
4016 	if (skb && !IS_ERR(skb))
4017 		kfree_skb(skb);
4018 
4019 	mgmt_pending_remove(cmd);
4020 }
4021 
4022 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
4023 {
4024 	struct mgmt_pending_cmd *cmd = data;
4025 	struct mgmt_cp_set_phy_configuration *cp = cmd->param;
4026 	struct hci_cp_le_set_default_phy cp_phy;
4027 	u32 selected_phys = __le32_to_cpu(cp->selected_phys);
4028 
4029 	memset(&cp_phy, 0, sizeof(cp_phy));
4030 
4031 	if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
4032 		cp_phy.all_phys |= 0x01;
4033 
4034 	if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
4035 		cp_phy.all_phys |= 0x02;
4036 
4037 	if (selected_phys & MGMT_PHY_LE_1M_TX)
4038 		cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
4039 
4040 	if (selected_phys & MGMT_PHY_LE_2M_TX)
4041 		cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
4042 
4043 	if (selected_phys & MGMT_PHY_LE_CODED_TX)
4044 		cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
4045 
4046 	if (selected_phys & MGMT_PHY_LE_1M_RX)
4047 		cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4048 
4049 	if (selected_phys & MGMT_PHY_LE_2M_RX)
4050 		cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4051 
4052 	if (selected_phys & MGMT_PHY_LE_CODED_RX)
4053 		cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4054 
4055 	cmd->skb =  __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4056 				   sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4057 
4058 	return 0;
4059 }
4060 
4061 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4062 				 void *data, u16 len)
4063 {
4064 	struct mgmt_cp_set_phy_configuration *cp = data;
4065 	struct mgmt_pending_cmd *cmd;
4066 	u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4067 	u16 pkt_type = (HCI_DH1 | HCI_DM1);
4068 	bool changed = false;
4069 	int err;
4070 
4071 	bt_dev_dbg(hdev, "sock %p", sk);
4072 
4073 	configurable_phys = get_configurable_phys(hdev);
4074 	supported_phys = get_supported_phys(hdev);
4075 	selected_phys = __le32_to_cpu(cp->selected_phys);
4076 
4077 	if (selected_phys & ~supported_phys)
4078 		return mgmt_cmd_status(sk, hdev->id,
4079 				       MGMT_OP_SET_PHY_CONFIGURATION,
4080 				       MGMT_STATUS_INVALID_PARAMS);
4081 
4082 	unconfigure_phys = supported_phys & ~configurable_phys;
4083 
4084 	if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4085 		return mgmt_cmd_status(sk, hdev->id,
4086 				       MGMT_OP_SET_PHY_CONFIGURATION,
4087 				       MGMT_STATUS_INVALID_PARAMS);
4088 
4089 	if (selected_phys == get_selected_phys(hdev))
4090 		return mgmt_cmd_complete(sk, hdev->id,
4091 					 MGMT_OP_SET_PHY_CONFIGURATION,
4092 					 0, NULL, 0);
4093 
4094 	hci_dev_lock(hdev);
4095 
4096 	if (!hdev_is_powered(hdev)) {
4097 		err = mgmt_cmd_status(sk, hdev->id,
4098 				      MGMT_OP_SET_PHY_CONFIGURATION,
4099 				      MGMT_STATUS_REJECTED);
4100 		goto unlock;
4101 	}
4102 
4103 	if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4104 		err = mgmt_cmd_status(sk, hdev->id,
4105 				      MGMT_OP_SET_PHY_CONFIGURATION,
4106 				      MGMT_STATUS_BUSY);
4107 		goto unlock;
4108 	}
4109 
4110 	if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4111 		pkt_type |= (HCI_DH3 | HCI_DM3);
4112 	else
4113 		pkt_type &= ~(HCI_DH3 | HCI_DM3);
4114 
4115 	if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4116 		pkt_type |= (HCI_DH5 | HCI_DM5);
4117 	else
4118 		pkt_type &= ~(HCI_DH5 | HCI_DM5);
4119 
4120 	if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4121 		pkt_type &= ~HCI_2DH1;
4122 	else
4123 		pkt_type |= HCI_2DH1;
4124 
4125 	if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4126 		pkt_type &= ~HCI_2DH3;
4127 	else
4128 		pkt_type |= HCI_2DH3;
4129 
4130 	if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4131 		pkt_type &= ~HCI_2DH5;
4132 	else
4133 		pkt_type |= HCI_2DH5;
4134 
4135 	if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4136 		pkt_type &= ~HCI_3DH1;
4137 	else
4138 		pkt_type |= HCI_3DH1;
4139 
4140 	if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4141 		pkt_type &= ~HCI_3DH3;
4142 	else
4143 		pkt_type |= HCI_3DH3;
4144 
4145 	if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4146 		pkt_type &= ~HCI_3DH5;
4147 	else
4148 		pkt_type |= HCI_3DH5;
4149 
4150 	if (pkt_type != hdev->pkt_type) {
4151 		hdev->pkt_type = pkt_type;
4152 		changed = true;
4153 	}
4154 
4155 	if ((selected_phys & MGMT_PHY_LE_MASK) ==
4156 	    (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4157 		if (changed)
4158 			mgmt_phy_configuration_changed(hdev, sk);
4159 
4160 		err = mgmt_cmd_complete(sk, hdev->id,
4161 					MGMT_OP_SET_PHY_CONFIGURATION,
4162 					0, NULL, 0);
4163 
4164 		goto unlock;
4165 	}
4166 
4167 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4168 			       len);
4169 	if (!cmd)
4170 		err = -ENOMEM;
4171 	else
4172 		err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4173 					 set_default_phy_complete);
4174 
4175 	if (err < 0) {
4176 		err = mgmt_cmd_status(sk, hdev->id,
4177 				      MGMT_OP_SET_PHY_CONFIGURATION,
4178 				      MGMT_STATUS_FAILED);
4179 
4180 		if (cmd)
4181 			mgmt_pending_remove(cmd);
4182 	}
4183 
4184 unlock:
4185 	hci_dev_unlock(hdev);
4186 
4187 	return err;
4188 }
4189 
4190 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4191 			    u16 len)
4192 {
4193 	int err = MGMT_STATUS_SUCCESS;
4194 	struct mgmt_cp_set_blocked_keys *keys = data;
4195 	const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4196 				   sizeof(struct mgmt_blocked_key_info));
4197 	u16 key_count, expected_len;
4198 	int i;
4199 
4200 	bt_dev_dbg(hdev, "sock %p", sk);
4201 
4202 	key_count = __le16_to_cpu(keys->key_count);
4203 	if (key_count > max_key_count) {
4204 		bt_dev_err(hdev, "too big key_count value %u", key_count);
4205 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4206 				       MGMT_STATUS_INVALID_PARAMS);
4207 	}
4208 
4209 	expected_len = struct_size(keys, keys, key_count);
4210 	if (expected_len != len) {
4211 		bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4212 			   expected_len, len);
4213 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4214 				       MGMT_STATUS_INVALID_PARAMS);
4215 	}
4216 
4217 	hci_dev_lock(hdev);
4218 
4219 	hci_blocked_keys_clear(hdev);
4220 
4221 	for (i = 0; i < key_count; ++i) {
4222 		struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4223 
4224 		if (!b) {
4225 			err = MGMT_STATUS_NO_RESOURCES;
4226 			break;
4227 		}
4228 
4229 		b->type = keys->keys[i].type;
4230 		memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4231 		list_add_rcu(&b->list, &hdev->blocked_keys);
4232 	}
4233 	hci_dev_unlock(hdev);
4234 
4235 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4236 				err, NULL, 0);
4237 }
4238 
4239 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4240 			       void *data, u16 len)
4241 {
4242 	struct mgmt_mode *cp = data;
4243 	int err;
4244 	bool changed = false;
4245 
4246 	bt_dev_dbg(hdev, "sock %p", sk);
4247 
4248 	if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4249 		return mgmt_cmd_status(sk, hdev->id,
4250 				       MGMT_OP_SET_WIDEBAND_SPEECH,
4251 				       MGMT_STATUS_NOT_SUPPORTED);
4252 
4253 	if (cp->val != 0x00 && cp->val != 0x01)
4254 		return mgmt_cmd_status(sk, hdev->id,
4255 				       MGMT_OP_SET_WIDEBAND_SPEECH,
4256 				       MGMT_STATUS_INVALID_PARAMS);
4257 
4258 	hci_dev_lock(hdev);
4259 
4260 	if (hdev_is_powered(hdev) &&
4261 	    !!cp->val != hci_dev_test_flag(hdev,
4262 					   HCI_WIDEBAND_SPEECH_ENABLED)) {
4263 		err = mgmt_cmd_status(sk, hdev->id,
4264 				      MGMT_OP_SET_WIDEBAND_SPEECH,
4265 				      MGMT_STATUS_REJECTED);
4266 		goto unlock;
4267 	}
4268 
4269 	if (cp->val)
4270 		changed = !hci_dev_test_and_set_flag(hdev,
4271 						   HCI_WIDEBAND_SPEECH_ENABLED);
4272 	else
4273 		changed = hci_dev_test_and_clear_flag(hdev,
4274 						   HCI_WIDEBAND_SPEECH_ENABLED);
4275 
4276 	err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4277 	if (err < 0)
4278 		goto unlock;
4279 
4280 	if (changed)
4281 		err = new_settings(hdev, sk);
4282 
4283 unlock:
4284 	hci_dev_unlock(hdev);
4285 	return err;
4286 }
4287 
4288 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4289 			       void *data, u16 data_len)
4290 {
4291 	char buf[20];
4292 	struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4293 	u16 cap_len = 0;
4294 	u8 flags = 0;
4295 	u8 tx_power_range[2];
4296 
4297 	bt_dev_dbg(hdev, "sock %p", sk);
4298 
4299 	memset(&buf, 0, sizeof(buf));
4300 
4301 	hci_dev_lock(hdev);
4302 
4303 	/* When the Read Simple Pairing Options command is supported, then
4304 	 * the remote public key validation is supported.
4305 	 *
4306 	 * Alternatively, when Microsoft extensions are available, they can
4307 	 * indicate support for public key validation as well.
4308 	 */
4309 	if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4310 		flags |= 0x01;	/* Remote public key validation (BR/EDR) */
4311 
4312 	flags |= 0x02;		/* Remote public key validation (LE) */
4313 
4314 	/* When the Read Encryption Key Size command is supported, then the
4315 	 * encryption key size is enforced.
4316 	 */
4317 	if (hdev->commands[20] & 0x10)
4318 		flags |= 0x04;	/* Encryption key size enforcement (BR/EDR) */
4319 
4320 	flags |= 0x08;		/* Encryption key size enforcement (LE) */
4321 
4322 	cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4323 				  &flags, 1);
4324 
4325 	/* When the Read Simple Pairing Options command is supported, then
4326 	 * also max encryption key size information is provided.
4327 	 */
4328 	if (hdev->commands[41] & 0x08)
4329 		cap_len = eir_append_le16(rp->cap, cap_len,
4330 					  MGMT_CAP_MAX_ENC_KEY_SIZE,
4331 					  hdev->max_enc_key_size);
4332 
4333 	cap_len = eir_append_le16(rp->cap, cap_len,
4334 				  MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4335 				  SMP_MAX_ENC_KEY_SIZE);
4336 
4337 	/* Append the min/max LE tx power parameters if we were able to fetch
4338 	 * it from the controller
4339 	 */
4340 	if (hdev->commands[38] & 0x80) {
4341 		memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4342 		memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4343 		cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4344 					  tx_power_range, 2);
4345 	}
4346 
4347 	rp->cap_len = cpu_to_le16(cap_len);
4348 
4349 	hci_dev_unlock(hdev);
4350 
4351 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4352 				 rp, sizeof(*rp) + cap_len);
4353 }
4354 
4355 #ifdef CONFIG_BT_FEATURE_DEBUG
4356 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4357 static const u8 debug_uuid[16] = {
4358 	0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4359 	0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4360 };
4361 #endif
4362 
4363 /* 330859bc-7506-492d-9370-9a6f0614037f */
4364 static const u8 quality_report_uuid[16] = {
4365 	0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4366 	0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4367 };
4368 
4369 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4370 static const u8 offload_codecs_uuid[16] = {
4371 	0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4372 	0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4373 };
4374 
4375 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4376 static const u8 le_simultaneous_roles_uuid[16] = {
4377 	0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4378 	0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4379 };
4380 
4381 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
4382 static const u8 rpa_resolution_uuid[16] = {
4383 	0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
4384 	0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
4385 };
4386 
4387 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4388 static const u8 iso_socket_uuid[16] = {
4389 	0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4390 	0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4391 };
4392 
4393 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4394 static const u8 mgmt_mesh_uuid[16] = {
4395 	0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4396 	0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4397 };
4398 
4399 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4400 				  void *data, u16 data_len)
4401 {
4402 	struct mgmt_rp_read_exp_features_info *rp;
4403 	size_t len;
4404 	u16 idx = 0;
4405 	u32 flags;
4406 	int status;
4407 
4408 	bt_dev_dbg(hdev, "sock %p", sk);
4409 
4410 	/* Enough space for 7 features */
4411 	len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4412 	rp = kzalloc(len, GFP_KERNEL);
4413 	if (!rp)
4414 		return -ENOMEM;
4415 
4416 #ifdef CONFIG_BT_FEATURE_DEBUG
4417 	if (!hdev) {
4418 		flags = bt_dbg_get() ? BIT(0) : 0;
4419 
4420 		memcpy(rp->features[idx].uuid, debug_uuid, 16);
4421 		rp->features[idx].flags = cpu_to_le32(flags);
4422 		idx++;
4423 	}
4424 #endif
4425 
4426 	if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4427 		if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4428 			flags = BIT(0);
4429 		else
4430 			flags = 0;
4431 
4432 		memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4433 		rp->features[idx].flags = cpu_to_le32(flags);
4434 		idx++;
4435 	}
4436 
4437 	if (hdev && ll_privacy_capable(hdev)) {
4438 		if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
4439 			flags = BIT(0) | BIT(1);
4440 		else
4441 			flags = BIT(1);
4442 
4443 		memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
4444 		rp->features[idx].flags = cpu_to_le32(flags);
4445 		idx++;
4446 	}
4447 
4448 	if (hdev && (aosp_has_quality_report(hdev) ||
4449 		     hdev->set_quality_report)) {
4450 		if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4451 			flags = BIT(0);
4452 		else
4453 			flags = 0;
4454 
4455 		memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4456 		rp->features[idx].flags = cpu_to_le32(flags);
4457 		idx++;
4458 	}
4459 
4460 	if (hdev && hdev->get_data_path_id) {
4461 		if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4462 			flags = BIT(0);
4463 		else
4464 			flags = 0;
4465 
4466 		memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4467 		rp->features[idx].flags = cpu_to_le32(flags);
4468 		idx++;
4469 	}
4470 
4471 	if (IS_ENABLED(CONFIG_BT_LE)) {
4472 		flags = iso_enabled() ? BIT(0) : 0;
4473 		memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4474 		rp->features[idx].flags = cpu_to_le32(flags);
4475 		idx++;
4476 	}
4477 
4478 	if (hdev && lmp_le_capable(hdev)) {
4479 		if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4480 			flags = BIT(0);
4481 		else
4482 			flags = 0;
4483 
4484 		memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4485 		rp->features[idx].flags = cpu_to_le32(flags);
4486 		idx++;
4487 	}
4488 
4489 	rp->feature_count = cpu_to_le16(idx);
4490 
4491 	/* After reading the experimental features information, enable
4492 	 * the events to update client on any future change.
4493 	 */
4494 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4495 
4496 	status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4497 				   MGMT_OP_READ_EXP_FEATURES_INFO,
4498 				   0, rp, sizeof(*rp) + (20 * idx));
4499 
4500 	kfree(rp);
4501 	return status;
4502 }
4503 
4504 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4505 					  struct sock *skip)
4506 {
4507 	struct mgmt_ev_exp_feature_changed ev;
4508 
4509 	memset(&ev, 0, sizeof(ev));
4510 	memcpy(ev.uuid, rpa_resolution_uuid, 16);
4511 	ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4512 
4513 	// Do we need to be atomic with the conn_flags?
4514 	if (enabled && privacy_mode_capable(hdev))
4515 		hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4516 	else
4517 		hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4518 
4519 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4520 				  &ev, sizeof(ev),
4521 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4522 
4523 }
4524 
4525 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4526 			       bool enabled, struct sock *skip)
4527 {
4528 	struct mgmt_ev_exp_feature_changed ev;
4529 
4530 	memset(&ev, 0, sizeof(ev));
4531 	memcpy(ev.uuid, uuid, 16);
4532 	ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4533 
4534 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4535 				  &ev, sizeof(ev),
4536 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4537 }
4538 
4539 #define EXP_FEAT(_uuid, _set_func)	\
4540 {					\
4541 	.uuid = _uuid,			\
4542 	.set_func = _set_func,		\
4543 }
4544 
4545 /* The zero key uuid is special. Multiple exp features are set through it. */
4546 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4547 			     struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4548 {
4549 	struct mgmt_rp_set_exp_feature rp;
4550 
4551 	memset(rp.uuid, 0, 16);
4552 	rp.flags = cpu_to_le32(0);
4553 
4554 #ifdef CONFIG_BT_FEATURE_DEBUG
4555 	if (!hdev) {
4556 		bool changed = bt_dbg_get();
4557 
4558 		bt_dbg_set(false);
4559 
4560 		if (changed)
4561 			exp_feature_changed(NULL, ZERO_KEY, false, sk);
4562 	}
4563 #endif
4564 
4565 	if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4566 		bool changed;
4567 
4568 		changed = hci_dev_test_and_clear_flag(hdev,
4569 						      HCI_ENABLE_LL_PRIVACY);
4570 		if (changed)
4571 			exp_feature_changed(hdev, rpa_resolution_uuid, false,
4572 					    sk);
4573 	}
4574 
4575 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4576 
4577 	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4578 				 MGMT_OP_SET_EXP_FEATURE, 0,
4579 				 &rp, sizeof(rp));
4580 }
4581 
4582 #ifdef CONFIG_BT_FEATURE_DEBUG
4583 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4584 			  struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4585 {
4586 	struct mgmt_rp_set_exp_feature rp;
4587 
4588 	bool val, changed;
4589 	int err;
4590 
4591 	/* Command requires to use the non-controller index */
4592 	if (hdev)
4593 		return mgmt_cmd_status(sk, hdev->id,
4594 				       MGMT_OP_SET_EXP_FEATURE,
4595 				       MGMT_STATUS_INVALID_INDEX);
4596 
4597 	/* Parameters are limited to a single octet */
4598 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4599 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4600 				       MGMT_OP_SET_EXP_FEATURE,
4601 				       MGMT_STATUS_INVALID_PARAMS);
4602 
4603 	/* Only boolean on/off is supported */
4604 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4605 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4606 				       MGMT_OP_SET_EXP_FEATURE,
4607 				       MGMT_STATUS_INVALID_PARAMS);
4608 
4609 	val = !!cp->param[0];
4610 	changed = val ? !bt_dbg_get() : bt_dbg_get();
4611 	bt_dbg_set(val);
4612 
4613 	memcpy(rp.uuid, debug_uuid, 16);
4614 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4615 
4616 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4617 
4618 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4619 				MGMT_OP_SET_EXP_FEATURE, 0,
4620 				&rp, sizeof(rp));
4621 
4622 	if (changed)
4623 		exp_feature_changed(hdev, debug_uuid, val, sk);
4624 
4625 	return err;
4626 }
4627 #endif
4628 
4629 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4630 			      struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4631 {
4632 	struct mgmt_rp_set_exp_feature rp;
4633 	bool val, changed;
4634 	int err;
4635 
4636 	/* Command requires to use the controller index */
4637 	if (!hdev)
4638 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4639 				       MGMT_OP_SET_EXP_FEATURE,
4640 				       MGMT_STATUS_INVALID_INDEX);
4641 
4642 	/* Parameters are limited to a single octet */
4643 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4644 		return mgmt_cmd_status(sk, hdev->id,
4645 				       MGMT_OP_SET_EXP_FEATURE,
4646 				       MGMT_STATUS_INVALID_PARAMS);
4647 
4648 	/* Only boolean on/off is supported */
4649 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4650 		return mgmt_cmd_status(sk, hdev->id,
4651 				       MGMT_OP_SET_EXP_FEATURE,
4652 				       MGMT_STATUS_INVALID_PARAMS);
4653 
4654 	val = !!cp->param[0];
4655 
4656 	if (val) {
4657 		changed = !hci_dev_test_and_set_flag(hdev,
4658 						     HCI_MESH_EXPERIMENTAL);
4659 	} else {
4660 		hci_dev_clear_flag(hdev, HCI_MESH);
4661 		changed = hci_dev_test_and_clear_flag(hdev,
4662 						      HCI_MESH_EXPERIMENTAL);
4663 	}
4664 
4665 	memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4666 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4667 
4668 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4669 
4670 	err = mgmt_cmd_complete(sk, hdev->id,
4671 				MGMT_OP_SET_EXP_FEATURE, 0,
4672 				&rp, sizeof(rp));
4673 
4674 	if (changed)
4675 		exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4676 
4677 	return err;
4678 }
4679 
4680 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4681 				   struct mgmt_cp_set_exp_feature *cp,
4682 				   u16 data_len)
4683 {
4684 	struct mgmt_rp_set_exp_feature rp;
4685 	bool val, changed;
4686 	int err;
4687 	u32 flags;
4688 
4689 	/* Command requires to use the controller index */
4690 	if (!hdev)
4691 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4692 				       MGMT_OP_SET_EXP_FEATURE,
4693 				       MGMT_STATUS_INVALID_INDEX);
4694 
4695 	/* Changes can only be made when controller is powered down */
4696 	if (hdev_is_powered(hdev))
4697 		return mgmt_cmd_status(sk, hdev->id,
4698 				       MGMT_OP_SET_EXP_FEATURE,
4699 				       MGMT_STATUS_REJECTED);
4700 
4701 	/* Parameters are limited to a single octet */
4702 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4703 		return mgmt_cmd_status(sk, hdev->id,
4704 				       MGMT_OP_SET_EXP_FEATURE,
4705 				       MGMT_STATUS_INVALID_PARAMS);
4706 
4707 	/* Only boolean on/off is supported */
4708 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4709 		return mgmt_cmd_status(sk, hdev->id,
4710 				       MGMT_OP_SET_EXP_FEATURE,
4711 				       MGMT_STATUS_INVALID_PARAMS);
4712 
4713 	val = !!cp->param[0];
4714 
4715 	if (val) {
4716 		changed = !hci_dev_test_and_set_flag(hdev,
4717 						     HCI_ENABLE_LL_PRIVACY);
4718 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4719 
4720 		/* Enable LL privacy + supported settings changed */
4721 		flags = BIT(0) | BIT(1);
4722 	} else {
4723 		changed = hci_dev_test_and_clear_flag(hdev,
4724 						      HCI_ENABLE_LL_PRIVACY);
4725 
4726 		/* Disable LL privacy + supported settings changed */
4727 		flags = BIT(1);
4728 	}
4729 
4730 	memcpy(rp.uuid, rpa_resolution_uuid, 16);
4731 	rp.flags = cpu_to_le32(flags);
4732 
4733 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4734 
4735 	err = mgmt_cmd_complete(sk, hdev->id,
4736 				MGMT_OP_SET_EXP_FEATURE, 0,
4737 				&rp, sizeof(rp));
4738 
4739 	if (changed)
4740 		exp_ll_privacy_feature_changed(val, hdev, sk);
4741 
4742 	return err;
4743 }
4744 
4745 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4746 				   struct mgmt_cp_set_exp_feature *cp,
4747 				   u16 data_len)
4748 {
4749 	struct mgmt_rp_set_exp_feature rp;
4750 	bool val, changed;
4751 	int err;
4752 
4753 	/* Command requires to use a valid controller index */
4754 	if (!hdev)
4755 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4756 				       MGMT_OP_SET_EXP_FEATURE,
4757 				       MGMT_STATUS_INVALID_INDEX);
4758 
4759 	/* Parameters are limited to a single octet */
4760 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4761 		return mgmt_cmd_status(sk, hdev->id,
4762 				       MGMT_OP_SET_EXP_FEATURE,
4763 				       MGMT_STATUS_INVALID_PARAMS);
4764 
4765 	/* Only boolean on/off is supported */
4766 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4767 		return mgmt_cmd_status(sk, hdev->id,
4768 				       MGMT_OP_SET_EXP_FEATURE,
4769 				       MGMT_STATUS_INVALID_PARAMS);
4770 
4771 	hci_req_sync_lock(hdev);
4772 
4773 	val = !!cp->param[0];
4774 	changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4775 
4776 	if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4777 		err = mgmt_cmd_status(sk, hdev->id,
4778 				      MGMT_OP_SET_EXP_FEATURE,
4779 				      MGMT_STATUS_NOT_SUPPORTED);
4780 		goto unlock_quality_report;
4781 	}
4782 
4783 	if (changed) {
4784 		if (hdev->set_quality_report)
4785 			err = hdev->set_quality_report(hdev, val);
4786 		else
4787 			err = aosp_set_quality_report(hdev, val);
4788 
4789 		if (err) {
4790 			err = mgmt_cmd_status(sk, hdev->id,
4791 					      MGMT_OP_SET_EXP_FEATURE,
4792 					      MGMT_STATUS_FAILED);
4793 			goto unlock_quality_report;
4794 		}
4795 
4796 		if (val)
4797 			hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4798 		else
4799 			hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4800 	}
4801 
4802 	bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4803 
4804 	memcpy(rp.uuid, quality_report_uuid, 16);
4805 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4806 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4807 
4808 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4809 				&rp, sizeof(rp));
4810 
4811 	if (changed)
4812 		exp_feature_changed(hdev, quality_report_uuid, val, sk);
4813 
4814 unlock_quality_report:
4815 	hci_req_sync_unlock(hdev);
4816 	return err;
4817 }
4818 
4819 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4820 				  struct mgmt_cp_set_exp_feature *cp,
4821 				  u16 data_len)
4822 {
4823 	bool val, changed;
4824 	int err;
4825 	struct mgmt_rp_set_exp_feature rp;
4826 
4827 	/* Command requires to use a valid controller index */
4828 	if (!hdev)
4829 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4830 				       MGMT_OP_SET_EXP_FEATURE,
4831 				       MGMT_STATUS_INVALID_INDEX);
4832 
4833 	/* Parameters are limited to a single octet */
4834 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4835 		return mgmt_cmd_status(sk, hdev->id,
4836 				       MGMT_OP_SET_EXP_FEATURE,
4837 				       MGMT_STATUS_INVALID_PARAMS);
4838 
4839 	/* Only boolean on/off is supported */
4840 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4841 		return mgmt_cmd_status(sk, hdev->id,
4842 				       MGMT_OP_SET_EXP_FEATURE,
4843 				       MGMT_STATUS_INVALID_PARAMS);
4844 
4845 	val = !!cp->param[0];
4846 	changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4847 
4848 	if (!hdev->get_data_path_id) {
4849 		return mgmt_cmd_status(sk, hdev->id,
4850 				       MGMT_OP_SET_EXP_FEATURE,
4851 				       MGMT_STATUS_NOT_SUPPORTED);
4852 	}
4853 
4854 	if (changed) {
4855 		if (val)
4856 			hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4857 		else
4858 			hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4859 	}
4860 
4861 	bt_dev_info(hdev, "offload codecs enable %d changed %d",
4862 		    val, changed);
4863 
4864 	memcpy(rp.uuid, offload_codecs_uuid, 16);
4865 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4866 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4867 	err = mgmt_cmd_complete(sk, hdev->id,
4868 				MGMT_OP_SET_EXP_FEATURE, 0,
4869 				&rp, sizeof(rp));
4870 
4871 	if (changed)
4872 		exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4873 
4874 	return err;
4875 }
4876 
4877 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4878 					  struct mgmt_cp_set_exp_feature *cp,
4879 					  u16 data_len)
4880 {
4881 	bool val, changed;
4882 	int err;
4883 	struct mgmt_rp_set_exp_feature rp;
4884 
4885 	/* Command requires to use a valid controller index */
4886 	if (!hdev)
4887 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4888 				       MGMT_OP_SET_EXP_FEATURE,
4889 				       MGMT_STATUS_INVALID_INDEX);
4890 
4891 	/* Parameters are limited to a single octet */
4892 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4893 		return mgmt_cmd_status(sk, hdev->id,
4894 				       MGMT_OP_SET_EXP_FEATURE,
4895 				       MGMT_STATUS_INVALID_PARAMS);
4896 
4897 	/* Only boolean on/off is supported */
4898 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4899 		return mgmt_cmd_status(sk, hdev->id,
4900 				       MGMT_OP_SET_EXP_FEATURE,
4901 				       MGMT_STATUS_INVALID_PARAMS);
4902 
4903 	val = !!cp->param[0];
4904 	changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4905 
4906 	if (!hci_dev_le_state_simultaneous(hdev)) {
4907 		return mgmt_cmd_status(sk, hdev->id,
4908 				       MGMT_OP_SET_EXP_FEATURE,
4909 				       MGMT_STATUS_NOT_SUPPORTED);
4910 	}
4911 
4912 	if (changed) {
4913 		if (val)
4914 			hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4915 		else
4916 			hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4917 	}
4918 
4919 	bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4920 		    val, changed);
4921 
4922 	memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4923 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4924 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4925 	err = mgmt_cmd_complete(sk, hdev->id,
4926 				MGMT_OP_SET_EXP_FEATURE, 0,
4927 				&rp, sizeof(rp));
4928 
4929 	if (changed)
4930 		exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4931 
4932 	return err;
4933 }
4934 
4935 #ifdef CONFIG_BT_LE
4936 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4937 			       struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4938 {
4939 	struct mgmt_rp_set_exp_feature rp;
4940 	bool val, changed = false;
4941 	int err;
4942 
4943 	/* Command requires to use the non-controller index */
4944 	if (hdev)
4945 		return mgmt_cmd_status(sk, hdev->id,
4946 				       MGMT_OP_SET_EXP_FEATURE,
4947 				       MGMT_STATUS_INVALID_INDEX);
4948 
4949 	/* Parameters are limited to a single octet */
4950 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4951 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4952 				       MGMT_OP_SET_EXP_FEATURE,
4953 				       MGMT_STATUS_INVALID_PARAMS);
4954 
4955 	/* Only boolean on/off is supported */
4956 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4957 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4958 				       MGMT_OP_SET_EXP_FEATURE,
4959 				       MGMT_STATUS_INVALID_PARAMS);
4960 
4961 	val = cp->param[0] ? true : false;
4962 	if (val)
4963 		err = iso_init();
4964 	else
4965 		err = iso_exit();
4966 
4967 	if (!err)
4968 		changed = true;
4969 
4970 	memcpy(rp.uuid, iso_socket_uuid, 16);
4971 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4972 
4973 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4974 
4975 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4976 				MGMT_OP_SET_EXP_FEATURE, 0,
4977 				&rp, sizeof(rp));
4978 
4979 	if (changed)
4980 		exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4981 
4982 	return err;
4983 }
4984 #endif
4985 
4986 static const struct mgmt_exp_feature {
4987 	const u8 *uuid;
4988 	int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4989 			struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4990 } exp_features[] = {
4991 	EXP_FEAT(ZERO_KEY, set_zero_key_func),
4992 #ifdef CONFIG_BT_FEATURE_DEBUG
4993 	EXP_FEAT(debug_uuid, set_debug_func),
4994 #endif
4995 	EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
4996 	EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4997 	EXP_FEAT(quality_report_uuid, set_quality_report_func),
4998 	EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4999 	EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
5000 #ifdef CONFIG_BT_LE
5001 	EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
5002 #endif
5003 
5004 	/* end with a null feature */
5005 	EXP_FEAT(NULL, NULL)
5006 };
5007 
5008 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
5009 			   void *data, u16 data_len)
5010 {
5011 	struct mgmt_cp_set_exp_feature *cp = data;
5012 	size_t i = 0;
5013 
5014 	bt_dev_dbg(hdev, "sock %p", sk);
5015 
5016 	for (i = 0; exp_features[i].uuid; i++) {
5017 		if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
5018 			return exp_features[i].set_func(sk, hdev, cp, data_len);
5019 	}
5020 
5021 	return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
5022 			       MGMT_OP_SET_EXP_FEATURE,
5023 			       MGMT_STATUS_NOT_SUPPORTED);
5024 }
5025 
5026 static u32 get_params_flags(struct hci_dev *hdev,
5027 			    struct hci_conn_params *params)
5028 {
5029 	u32 flags = hdev->conn_flags;
5030 
5031 	/* Devices using RPAs can only be programmed in the acceptlist if
5032 	 * LL Privacy has been enable otherwise they cannot mark
5033 	 * HCI_CONN_FLAG_REMOTE_WAKEUP.
5034 	 */
5035 	if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
5036 	    hci_find_irk_by_addr(hdev, &params->addr, params->addr_type))
5037 		flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
5038 
5039 	return flags;
5040 }
5041 
5042 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5043 			    u16 data_len)
5044 {
5045 	struct mgmt_cp_get_device_flags *cp = data;
5046 	struct mgmt_rp_get_device_flags rp;
5047 	struct bdaddr_list_with_flags *br_params;
5048 	struct hci_conn_params *params;
5049 	u32 supported_flags;
5050 	u32 current_flags = 0;
5051 	u8 status = MGMT_STATUS_INVALID_PARAMS;
5052 
5053 	bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
5054 		   &cp->addr.bdaddr, cp->addr.type);
5055 
5056 	hci_dev_lock(hdev);
5057 
5058 	supported_flags = hdev->conn_flags;
5059 
5060 	memset(&rp, 0, sizeof(rp));
5061 
5062 	if (cp->addr.type == BDADDR_BREDR) {
5063 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5064 							      &cp->addr.bdaddr,
5065 							      cp->addr.type);
5066 		if (!br_params)
5067 			goto done;
5068 
5069 		current_flags = br_params->flags;
5070 	} else {
5071 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5072 						le_addr_type(cp->addr.type));
5073 		if (!params)
5074 			goto done;
5075 
5076 		supported_flags = get_params_flags(hdev, params);
5077 		current_flags = params->flags;
5078 	}
5079 
5080 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5081 	rp.addr.type = cp->addr.type;
5082 	rp.supported_flags = cpu_to_le32(supported_flags);
5083 	rp.current_flags = cpu_to_le32(current_flags);
5084 
5085 	status = MGMT_STATUS_SUCCESS;
5086 
5087 done:
5088 	hci_dev_unlock(hdev);
5089 
5090 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5091 				&rp, sizeof(rp));
5092 }
5093 
5094 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5095 				 bdaddr_t *bdaddr, u8 bdaddr_type,
5096 				 u32 supported_flags, u32 current_flags)
5097 {
5098 	struct mgmt_ev_device_flags_changed ev;
5099 
5100 	bacpy(&ev.addr.bdaddr, bdaddr);
5101 	ev.addr.type = bdaddr_type;
5102 	ev.supported_flags = cpu_to_le32(supported_flags);
5103 	ev.current_flags = cpu_to_le32(current_flags);
5104 
5105 	mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5106 }
5107 
5108 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5109 			    u16 len)
5110 {
5111 	struct mgmt_cp_set_device_flags *cp = data;
5112 	struct bdaddr_list_with_flags *br_params;
5113 	struct hci_conn_params *params;
5114 	u8 status = MGMT_STATUS_INVALID_PARAMS;
5115 	u32 supported_flags;
5116 	u32 current_flags = __le32_to_cpu(cp->current_flags);
5117 
5118 	bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5119 		   &cp->addr.bdaddr, cp->addr.type, current_flags);
5120 
5121 	// We should take hci_dev_lock() early, I think.. conn_flags can change
5122 	supported_flags = hdev->conn_flags;
5123 
5124 	if ((supported_flags | current_flags) != supported_flags) {
5125 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5126 			    current_flags, supported_flags);
5127 		goto done;
5128 	}
5129 
5130 	hci_dev_lock(hdev);
5131 
5132 	if (cp->addr.type == BDADDR_BREDR) {
5133 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5134 							      &cp->addr.bdaddr,
5135 							      cp->addr.type);
5136 
5137 		if (br_params) {
5138 			br_params->flags = current_flags;
5139 			status = MGMT_STATUS_SUCCESS;
5140 		} else {
5141 			bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5142 				    &cp->addr.bdaddr, cp->addr.type);
5143 		}
5144 
5145 		goto unlock;
5146 	}
5147 
5148 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5149 					le_addr_type(cp->addr.type));
5150 	if (!params) {
5151 		bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5152 			    &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5153 		goto unlock;
5154 	}
5155 
5156 	supported_flags = get_params_flags(hdev, params);
5157 
5158 	if ((supported_flags | current_flags) != supported_flags) {
5159 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5160 			    current_flags, supported_flags);
5161 		goto unlock;
5162 	}
5163 
5164 	params->flags = current_flags;
5165 	status = MGMT_STATUS_SUCCESS;
5166 
5167 	/* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5168 	 * has been set.
5169 	 */
5170 	if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5171 		hci_update_passive_scan(hdev);
5172 
5173 unlock:
5174 	hci_dev_unlock(hdev);
5175 
5176 done:
5177 	if (status == MGMT_STATUS_SUCCESS)
5178 		device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5179 				     supported_flags, current_flags);
5180 
5181 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5182 				 &cp->addr, sizeof(cp->addr));
5183 }
5184 
5185 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5186 				   u16 handle)
5187 {
5188 	struct mgmt_ev_adv_monitor_added ev;
5189 
5190 	ev.monitor_handle = cpu_to_le16(handle);
5191 
5192 	mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5193 }
5194 
5195 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
5196 {
5197 	struct mgmt_ev_adv_monitor_removed ev;
5198 	struct mgmt_pending_cmd *cmd;
5199 	struct sock *sk_skip = NULL;
5200 	struct mgmt_cp_remove_adv_monitor *cp;
5201 
5202 	cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
5203 	if (cmd) {
5204 		cp = cmd->param;
5205 
5206 		if (cp->monitor_handle)
5207 			sk_skip = cmd->sk;
5208 	}
5209 
5210 	ev.monitor_handle = cpu_to_le16(handle);
5211 
5212 	mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
5213 }
5214 
5215 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5216 				 void *data, u16 len)
5217 {
5218 	struct adv_monitor *monitor = NULL;
5219 	struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5220 	int handle, err;
5221 	size_t rp_size = 0;
5222 	__u32 supported = 0;
5223 	__u32 enabled = 0;
5224 	__u16 num_handles = 0;
5225 	__u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5226 
5227 	BT_DBG("request for %s", hdev->name);
5228 
5229 	hci_dev_lock(hdev);
5230 
5231 	if (msft_monitor_supported(hdev))
5232 		supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5233 
5234 	idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5235 		handles[num_handles++] = monitor->handle;
5236 
5237 	hci_dev_unlock(hdev);
5238 
5239 	rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5240 	rp = kmalloc(rp_size, GFP_KERNEL);
5241 	if (!rp)
5242 		return -ENOMEM;
5243 
5244 	/* All supported features are currently enabled */
5245 	enabled = supported;
5246 
5247 	rp->supported_features = cpu_to_le32(supported);
5248 	rp->enabled_features = cpu_to_le32(enabled);
5249 	rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5250 	rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5251 	rp->num_handles = cpu_to_le16(num_handles);
5252 	if (num_handles)
5253 		memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5254 
5255 	err = mgmt_cmd_complete(sk, hdev->id,
5256 				MGMT_OP_READ_ADV_MONITOR_FEATURES,
5257 				MGMT_STATUS_SUCCESS, rp, rp_size);
5258 
5259 	kfree(rp);
5260 
5261 	return err;
5262 }
5263 
5264 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5265 						   void *data, int status)
5266 {
5267 	struct mgmt_rp_add_adv_patterns_monitor rp;
5268 	struct mgmt_pending_cmd *cmd = data;
5269 	struct adv_monitor *monitor = cmd->user_data;
5270 
5271 	hci_dev_lock(hdev);
5272 
5273 	rp.monitor_handle = cpu_to_le16(monitor->handle);
5274 
5275 	if (!status) {
5276 		mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5277 		hdev->adv_monitors_cnt++;
5278 		if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5279 			monitor->state = ADV_MONITOR_STATE_REGISTERED;
5280 		hci_update_passive_scan(hdev);
5281 	}
5282 
5283 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5284 			  mgmt_status(status), &rp, sizeof(rp));
5285 	mgmt_pending_remove(cmd);
5286 
5287 	hci_dev_unlock(hdev);
5288 	bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5289 		   rp.monitor_handle, status);
5290 }
5291 
5292 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5293 {
5294 	struct mgmt_pending_cmd *cmd = data;
5295 	struct adv_monitor *monitor = cmd->user_data;
5296 
5297 	return hci_add_adv_monitor(hdev, monitor);
5298 }
5299 
5300 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5301 				      struct adv_monitor *m, u8 status,
5302 				      void *data, u16 len, u16 op)
5303 {
5304 	struct mgmt_pending_cmd *cmd;
5305 	int err;
5306 
5307 	hci_dev_lock(hdev);
5308 
5309 	if (status)
5310 		goto unlock;
5311 
5312 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
5313 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5314 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
5315 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
5316 		status = MGMT_STATUS_BUSY;
5317 		goto unlock;
5318 	}
5319 
5320 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
5321 	if (!cmd) {
5322 		status = MGMT_STATUS_NO_RESOURCES;
5323 		goto unlock;
5324 	}
5325 
5326 	cmd->user_data = m;
5327 	err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5328 				 mgmt_add_adv_patterns_monitor_complete);
5329 	if (err) {
5330 		if (err == -ENOMEM)
5331 			status = MGMT_STATUS_NO_RESOURCES;
5332 		else
5333 			status = MGMT_STATUS_FAILED;
5334 
5335 		goto unlock;
5336 	}
5337 
5338 	hci_dev_unlock(hdev);
5339 
5340 	return 0;
5341 
5342 unlock:
5343 	hci_free_adv_monitor(hdev, m);
5344 	hci_dev_unlock(hdev);
5345 	return mgmt_cmd_status(sk, hdev->id, op, status);
5346 }
5347 
5348 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5349 				   struct mgmt_adv_rssi_thresholds *rssi)
5350 {
5351 	if (rssi) {
5352 		m->rssi.low_threshold = rssi->low_threshold;
5353 		m->rssi.low_threshold_timeout =
5354 		    __le16_to_cpu(rssi->low_threshold_timeout);
5355 		m->rssi.high_threshold = rssi->high_threshold;
5356 		m->rssi.high_threshold_timeout =
5357 		    __le16_to_cpu(rssi->high_threshold_timeout);
5358 		m->rssi.sampling_period = rssi->sampling_period;
5359 	} else {
5360 		/* Default values. These numbers are the least constricting
5361 		 * parameters for MSFT API to work, so it behaves as if there
5362 		 * are no rssi parameter to consider. May need to be changed
5363 		 * if other API are to be supported.
5364 		 */
5365 		m->rssi.low_threshold = -127;
5366 		m->rssi.low_threshold_timeout = 60;
5367 		m->rssi.high_threshold = -127;
5368 		m->rssi.high_threshold_timeout = 0;
5369 		m->rssi.sampling_period = 0;
5370 	}
5371 }
5372 
5373 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5374 				    struct mgmt_adv_pattern *patterns)
5375 {
5376 	u8 offset = 0, length = 0;
5377 	struct adv_pattern *p = NULL;
5378 	int i;
5379 
5380 	for (i = 0; i < pattern_count; i++) {
5381 		offset = patterns[i].offset;
5382 		length = patterns[i].length;
5383 		if (offset >= HCI_MAX_AD_LENGTH ||
5384 		    length > HCI_MAX_AD_LENGTH ||
5385 		    (offset + length) > HCI_MAX_AD_LENGTH)
5386 			return MGMT_STATUS_INVALID_PARAMS;
5387 
5388 		p = kmalloc(sizeof(*p), GFP_KERNEL);
5389 		if (!p)
5390 			return MGMT_STATUS_NO_RESOURCES;
5391 
5392 		p->ad_type = patterns[i].ad_type;
5393 		p->offset = patterns[i].offset;
5394 		p->length = patterns[i].length;
5395 		memcpy(p->value, patterns[i].value, p->length);
5396 
5397 		INIT_LIST_HEAD(&p->list);
5398 		list_add(&p->list, &m->patterns);
5399 	}
5400 
5401 	return MGMT_STATUS_SUCCESS;
5402 }
5403 
5404 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5405 				    void *data, u16 len)
5406 {
5407 	struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5408 	struct adv_monitor *m = NULL;
5409 	u8 status = MGMT_STATUS_SUCCESS;
5410 	size_t expected_size = sizeof(*cp);
5411 
5412 	BT_DBG("request for %s", hdev->name);
5413 
5414 	if (len <= sizeof(*cp)) {
5415 		status = MGMT_STATUS_INVALID_PARAMS;
5416 		goto done;
5417 	}
5418 
5419 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5420 	if (len != expected_size) {
5421 		status = MGMT_STATUS_INVALID_PARAMS;
5422 		goto done;
5423 	}
5424 
5425 	m = kzalloc(sizeof(*m), GFP_KERNEL);
5426 	if (!m) {
5427 		status = MGMT_STATUS_NO_RESOURCES;
5428 		goto done;
5429 	}
5430 
5431 	INIT_LIST_HEAD(&m->patterns);
5432 
5433 	parse_adv_monitor_rssi(m, NULL);
5434 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5435 
5436 done:
5437 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5438 					  MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5439 }
5440 
5441 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5442 					 void *data, u16 len)
5443 {
5444 	struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5445 	struct adv_monitor *m = NULL;
5446 	u8 status = MGMT_STATUS_SUCCESS;
5447 	size_t expected_size = sizeof(*cp);
5448 
5449 	BT_DBG("request for %s", hdev->name);
5450 
5451 	if (len <= sizeof(*cp)) {
5452 		status = MGMT_STATUS_INVALID_PARAMS;
5453 		goto done;
5454 	}
5455 
5456 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5457 	if (len != expected_size) {
5458 		status = MGMT_STATUS_INVALID_PARAMS;
5459 		goto done;
5460 	}
5461 
5462 	m = kzalloc(sizeof(*m), GFP_KERNEL);
5463 	if (!m) {
5464 		status = MGMT_STATUS_NO_RESOURCES;
5465 		goto done;
5466 	}
5467 
5468 	INIT_LIST_HEAD(&m->patterns);
5469 
5470 	parse_adv_monitor_rssi(m, &cp->rssi);
5471 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5472 
5473 done:
5474 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5475 					 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5476 }
5477 
5478 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5479 					     void *data, int status)
5480 {
5481 	struct mgmt_rp_remove_adv_monitor rp;
5482 	struct mgmt_pending_cmd *cmd = data;
5483 	struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5484 
5485 	hci_dev_lock(hdev);
5486 
5487 	rp.monitor_handle = cp->monitor_handle;
5488 
5489 	if (!status)
5490 		hci_update_passive_scan(hdev);
5491 
5492 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5493 			  mgmt_status(status), &rp, sizeof(rp));
5494 	mgmt_pending_remove(cmd);
5495 
5496 	hci_dev_unlock(hdev);
5497 	bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5498 		   rp.monitor_handle, status);
5499 }
5500 
5501 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5502 {
5503 	struct mgmt_pending_cmd *cmd = data;
5504 	struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5505 	u16 handle = __le16_to_cpu(cp->monitor_handle);
5506 
5507 	if (!handle)
5508 		return hci_remove_all_adv_monitor(hdev);
5509 
5510 	return hci_remove_single_adv_monitor(hdev, handle);
5511 }
5512 
5513 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5514 			      void *data, u16 len)
5515 {
5516 	struct mgmt_pending_cmd *cmd;
5517 	int err, status;
5518 
5519 	hci_dev_lock(hdev);
5520 
5521 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
5522 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5523 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5524 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5525 		status = MGMT_STATUS_BUSY;
5526 		goto unlock;
5527 	}
5528 
5529 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5530 	if (!cmd) {
5531 		status = MGMT_STATUS_NO_RESOURCES;
5532 		goto unlock;
5533 	}
5534 
5535 	err = hci_cmd_sync_queue(hdev, mgmt_remove_adv_monitor_sync, cmd,
5536 				 mgmt_remove_adv_monitor_complete);
5537 
5538 	if (err) {
5539 		mgmt_pending_remove(cmd);
5540 
5541 		if (err == -ENOMEM)
5542 			status = MGMT_STATUS_NO_RESOURCES;
5543 		else
5544 			status = MGMT_STATUS_FAILED;
5545 
5546 		goto unlock;
5547 	}
5548 
5549 	hci_dev_unlock(hdev);
5550 
5551 	return 0;
5552 
5553 unlock:
5554 	hci_dev_unlock(hdev);
5555 	return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5556 			       status);
5557 }
5558 
5559 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5560 {
5561 	struct mgmt_rp_read_local_oob_data mgmt_rp;
5562 	size_t rp_size = sizeof(mgmt_rp);
5563 	struct mgmt_pending_cmd *cmd = data;
5564 	struct sk_buff *skb = cmd->skb;
5565 	u8 status = mgmt_status(err);
5566 
5567 	if (!status) {
5568 		if (!skb)
5569 			status = MGMT_STATUS_FAILED;
5570 		else if (IS_ERR(skb))
5571 			status = mgmt_status(PTR_ERR(skb));
5572 		else
5573 			status = mgmt_status(skb->data[0]);
5574 	}
5575 
5576 	bt_dev_dbg(hdev, "status %d", status);
5577 
5578 	if (status) {
5579 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5580 		goto remove;
5581 	}
5582 
5583 	memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5584 
5585 	if (!bredr_sc_enabled(hdev)) {
5586 		struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5587 
5588 		if (skb->len < sizeof(*rp)) {
5589 			mgmt_cmd_status(cmd->sk, hdev->id,
5590 					MGMT_OP_READ_LOCAL_OOB_DATA,
5591 					MGMT_STATUS_FAILED);
5592 			goto remove;
5593 		}
5594 
5595 		memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5596 		memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5597 
5598 		rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5599 	} else {
5600 		struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5601 
5602 		if (skb->len < sizeof(*rp)) {
5603 			mgmt_cmd_status(cmd->sk, hdev->id,
5604 					MGMT_OP_READ_LOCAL_OOB_DATA,
5605 					MGMT_STATUS_FAILED);
5606 			goto remove;
5607 		}
5608 
5609 		memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5610 		memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5611 
5612 		memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5613 		memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5614 	}
5615 
5616 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5617 			  MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5618 
5619 remove:
5620 	if (skb && !IS_ERR(skb))
5621 		kfree_skb(skb);
5622 
5623 	mgmt_pending_free(cmd);
5624 }
5625 
5626 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5627 {
5628 	struct mgmt_pending_cmd *cmd = data;
5629 
5630 	if (bredr_sc_enabled(hdev))
5631 		cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5632 	else
5633 		cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5634 
5635 	if (IS_ERR(cmd->skb))
5636 		return PTR_ERR(cmd->skb);
5637 	else
5638 		return 0;
5639 }
5640 
5641 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5642 			       void *data, u16 data_len)
5643 {
5644 	struct mgmt_pending_cmd *cmd;
5645 	int err;
5646 
5647 	bt_dev_dbg(hdev, "sock %p", sk);
5648 
5649 	hci_dev_lock(hdev);
5650 
5651 	if (!hdev_is_powered(hdev)) {
5652 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5653 				      MGMT_STATUS_NOT_POWERED);
5654 		goto unlock;
5655 	}
5656 
5657 	if (!lmp_ssp_capable(hdev)) {
5658 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5659 				      MGMT_STATUS_NOT_SUPPORTED);
5660 		goto unlock;
5661 	}
5662 
5663 	cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5664 	if (!cmd)
5665 		err = -ENOMEM;
5666 	else
5667 		err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5668 					 read_local_oob_data_complete);
5669 
5670 	if (err < 0) {
5671 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5672 				      MGMT_STATUS_FAILED);
5673 
5674 		if (cmd)
5675 			mgmt_pending_free(cmd);
5676 	}
5677 
5678 unlock:
5679 	hci_dev_unlock(hdev);
5680 	return err;
5681 }
5682 
5683 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5684 			       void *data, u16 len)
5685 {
5686 	struct mgmt_addr_info *addr = data;
5687 	int err;
5688 
5689 	bt_dev_dbg(hdev, "sock %p", sk);
5690 
5691 	if (!bdaddr_type_is_valid(addr->type))
5692 		return mgmt_cmd_complete(sk, hdev->id,
5693 					 MGMT_OP_ADD_REMOTE_OOB_DATA,
5694 					 MGMT_STATUS_INVALID_PARAMS,
5695 					 addr, sizeof(*addr));
5696 
5697 	hci_dev_lock(hdev);
5698 
5699 	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5700 		struct mgmt_cp_add_remote_oob_data *cp = data;
5701 		u8 status;
5702 
5703 		if (cp->addr.type != BDADDR_BREDR) {
5704 			err = mgmt_cmd_complete(sk, hdev->id,
5705 						MGMT_OP_ADD_REMOTE_OOB_DATA,
5706 						MGMT_STATUS_INVALID_PARAMS,
5707 						&cp->addr, sizeof(cp->addr));
5708 			goto unlock;
5709 		}
5710 
5711 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5712 					      cp->addr.type, cp->hash,
5713 					      cp->rand, NULL, NULL);
5714 		if (err < 0)
5715 			status = MGMT_STATUS_FAILED;
5716 		else
5717 			status = MGMT_STATUS_SUCCESS;
5718 
5719 		err = mgmt_cmd_complete(sk, hdev->id,
5720 					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5721 					&cp->addr, sizeof(cp->addr));
5722 	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5723 		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5724 		u8 *rand192, *hash192, *rand256, *hash256;
5725 		u8 status;
5726 
5727 		if (bdaddr_type_is_le(cp->addr.type)) {
5728 			/* Enforce zero-valued 192-bit parameters as
5729 			 * long as legacy SMP OOB isn't implemented.
5730 			 */
5731 			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5732 			    memcmp(cp->hash192, ZERO_KEY, 16)) {
5733 				err = mgmt_cmd_complete(sk, hdev->id,
5734 							MGMT_OP_ADD_REMOTE_OOB_DATA,
5735 							MGMT_STATUS_INVALID_PARAMS,
5736 							addr, sizeof(*addr));
5737 				goto unlock;
5738 			}
5739 
5740 			rand192 = NULL;
5741 			hash192 = NULL;
5742 		} else {
5743 			/* In case one of the P-192 values is set to zero,
5744 			 * then just disable OOB data for P-192.
5745 			 */
5746 			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5747 			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
5748 				rand192 = NULL;
5749 				hash192 = NULL;
5750 			} else {
5751 				rand192 = cp->rand192;
5752 				hash192 = cp->hash192;
5753 			}
5754 		}
5755 
5756 		/* In case one of the P-256 values is set to zero, then just
5757 		 * disable OOB data for P-256.
5758 		 */
5759 		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5760 		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
5761 			rand256 = NULL;
5762 			hash256 = NULL;
5763 		} else {
5764 			rand256 = cp->rand256;
5765 			hash256 = cp->hash256;
5766 		}
5767 
5768 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5769 					      cp->addr.type, hash192, rand192,
5770 					      hash256, rand256);
5771 		if (err < 0)
5772 			status = MGMT_STATUS_FAILED;
5773 		else
5774 			status = MGMT_STATUS_SUCCESS;
5775 
5776 		err = mgmt_cmd_complete(sk, hdev->id,
5777 					MGMT_OP_ADD_REMOTE_OOB_DATA,
5778 					status, &cp->addr, sizeof(cp->addr));
5779 	} else {
5780 		bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5781 			   len);
5782 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5783 				      MGMT_STATUS_INVALID_PARAMS);
5784 	}
5785 
5786 unlock:
5787 	hci_dev_unlock(hdev);
5788 	return err;
5789 }
5790 
5791 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5792 				  void *data, u16 len)
5793 {
5794 	struct mgmt_cp_remove_remote_oob_data *cp = data;
5795 	u8 status;
5796 	int err;
5797 
5798 	bt_dev_dbg(hdev, "sock %p", sk);
5799 
5800 	if (cp->addr.type != BDADDR_BREDR)
5801 		return mgmt_cmd_complete(sk, hdev->id,
5802 					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5803 					 MGMT_STATUS_INVALID_PARAMS,
5804 					 &cp->addr, sizeof(cp->addr));
5805 
5806 	hci_dev_lock(hdev);
5807 
5808 	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5809 		hci_remote_oob_data_clear(hdev);
5810 		status = MGMT_STATUS_SUCCESS;
5811 		goto done;
5812 	}
5813 
5814 	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5815 	if (err < 0)
5816 		status = MGMT_STATUS_INVALID_PARAMS;
5817 	else
5818 		status = MGMT_STATUS_SUCCESS;
5819 
5820 done:
5821 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5822 				status, &cp->addr, sizeof(cp->addr));
5823 
5824 	hci_dev_unlock(hdev);
5825 	return err;
5826 }
5827 
5828 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5829 {
5830 	struct mgmt_pending_cmd *cmd;
5831 
5832 	bt_dev_dbg(hdev, "status %u", status);
5833 
5834 	hci_dev_lock(hdev);
5835 
5836 	cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5837 	if (!cmd)
5838 		cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5839 
5840 	if (!cmd)
5841 		cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5842 
5843 	if (cmd) {
5844 		cmd->cmd_complete(cmd, mgmt_status(status));
5845 		mgmt_pending_remove(cmd);
5846 	}
5847 
5848 	hci_dev_unlock(hdev);
5849 }
5850 
5851 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5852 				    uint8_t *mgmt_status)
5853 {
5854 	switch (type) {
5855 	case DISCOV_TYPE_LE:
5856 		*mgmt_status = mgmt_le_support(hdev);
5857 		if (*mgmt_status)
5858 			return false;
5859 		break;
5860 	case DISCOV_TYPE_INTERLEAVED:
5861 		*mgmt_status = mgmt_le_support(hdev);
5862 		if (*mgmt_status)
5863 			return false;
5864 		fallthrough;
5865 	case DISCOV_TYPE_BREDR:
5866 		*mgmt_status = mgmt_bredr_support(hdev);
5867 		if (*mgmt_status)
5868 			return false;
5869 		break;
5870 	default:
5871 		*mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5872 		return false;
5873 	}
5874 
5875 	return true;
5876 }
5877 
5878 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5879 {
5880 	struct mgmt_pending_cmd *cmd = data;
5881 
5882 	if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5883 	    cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5884 	    cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5885 		return;
5886 
5887 	bt_dev_dbg(hdev, "err %d", err);
5888 
5889 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5890 			  cmd->param, 1);
5891 	mgmt_pending_remove(cmd);
5892 
5893 	hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5894 				DISCOVERY_FINDING);
5895 }
5896 
5897 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5898 {
5899 	return hci_start_discovery_sync(hdev);
5900 }
5901 
5902 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5903 				    u16 op, void *data, u16 len)
5904 {
5905 	struct mgmt_cp_start_discovery *cp = data;
5906 	struct mgmt_pending_cmd *cmd;
5907 	u8 status;
5908 	int err;
5909 
5910 	bt_dev_dbg(hdev, "sock %p", sk);
5911 
5912 	hci_dev_lock(hdev);
5913 
5914 	if (!hdev_is_powered(hdev)) {
5915 		err = mgmt_cmd_complete(sk, hdev->id, op,
5916 					MGMT_STATUS_NOT_POWERED,
5917 					&cp->type, sizeof(cp->type));
5918 		goto failed;
5919 	}
5920 
5921 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
5922 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5923 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5924 					&cp->type, sizeof(cp->type));
5925 		goto failed;
5926 	}
5927 
5928 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5929 		err = mgmt_cmd_complete(sk, hdev->id, op, status,
5930 					&cp->type, sizeof(cp->type));
5931 		goto failed;
5932 	}
5933 
5934 	/* Can't start discovery when it is paused */
5935 	if (hdev->discovery_paused) {
5936 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5937 					&cp->type, sizeof(cp->type));
5938 		goto failed;
5939 	}
5940 
5941 	/* Clear the discovery filter first to free any previously
5942 	 * allocated memory for the UUID list.
5943 	 */
5944 	hci_discovery_filter_clear(hdev);
5945 
5946 	hdev->discovery.type = cp->type;
5947 	hdev->discovery.report_invalid_rssi = false;
5948 	if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5949 		hdev->discovery.limited = true;
5950 	else
5951 		hdev->discovery.limited = false;
5952 
5953 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
5954 	if (!cmd) {
5955 		err = -ENOMEM;
5956 		goto failed;
5957 	}
5958 
5959 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5960 				 start_discovery_complete);
5961 	if (err < 0) {
5962 		mgmt_pending_remove(cmd);
5963 		goto failed;
5964 	}
5965 
5966 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5967 
5968 failed:
5969 	hci_dev_unlock(hdev);
5970 	return err;
5971 }
5972 
5973 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5974 			   void *data, u16 len)
5975 {
5976 	return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5977 					data, len);
5978 }
5979 
5980 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5981 				   void *data, u16 len)
5982 {
5983 	return start_discovery_internal(sk, hdev,
5984 					MGMT_OP_START_LIMITED_DISCOVERY,
5985 					data, len);
5986 }
5987 
5988 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5989 				   void *data, u16 len)
5990 {
5991 	struct mgmt_cp_start_service_discovery *cp = data;
5992 	struct mgmt_pending_cmd *cmd;
5993 	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5994 	u16 uuid_count, expected_len;
5995 	u8 status;
5996 	int err;
5997 
5998 	bt_dev_dbg(hdev, "sock %p", sk);
5999 
6000 	hci_dev_lock(hdev);
6001 
6002 	if (!hdev_is_powered(hdev)) {
6003 		err = mgmt_cmd_complete(sk, hdev->id,
6004 					MGMT_OP_START_SERVICE_DISCOVERY,
6005 					MGMT_STATUS_NOT_POWERED,
6006 					&cp->type, sizeof(cp->type));
6007 		goto failed;
6008 	}
6009 
6010 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
6011 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
6012 		err = mgmt_cmd_complete(sk, hdev->id,
6013 					MGMT_OP_START_SERVICE_DISCOVERY,
6014 					MGMT_STATUS_BUSY, &cp->type,
6015 					sizeof(cp->type));
6016 		goto failed;
6017 	}
6018 
6019 	if (hdev->discovery_paused) {
6020 		err = mgmt_cmd_complete(sk, hdev->id,
6021 					MGMT_OP_START_SERVICE_DISCOVERY,
6022 					MGMT_STATUS_BUSY, &cp->type,
6023 					sizeof(cp->type));
6024 		goto failed;
6025 	}
6026 
6027 	uuid_count = __le16_to_cpu(cp->uuid_count);
6028 	if (uuid_count > max_uuid_count) {
6029 		bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
6030 			   uuid_count);
6031 		err = mgmt_cmd_complete(sk, hdev->id,
6032 					MGMT_OP_START_SERVICE_DISCOVERY,
6033 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
6034 					sizeof(cp->type));
6035 		goto failed;
6036 	}
6037 
6038 	expected_len = sizeof(*cp) + uuid_count * 16;
6039 	if (expected_len != len) {
6040 		bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
6041 			   expected_len, len);
6042 		err = mgmt_cmd_complete(sk, hdev->id,
6043 					MGMT_OP_START_SERVICE_DISCOVERY,
6044 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
6045 					sizeof(cp->type));
6046 		goto failed;
6047 	}
6048 
6049 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
6050 		err = mgmt_cmd_complete(sk, hdev->id,
6051 					MGMT_OP_START_SERVICE_DISCOVERY,
6052 					status, &cp->type, sizeof(cp->type));
6053 		goto failed;
6054 	}
6055 
6056 	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
6057 			       hdev, data, len);
6058 	if (!cmd) {
6059 		err = -ENOMEM;
6060 		goto failed;
6061 	}
6062 
6063 	/* Clear the discovery filter first to free any previously
6064 	 * allocated memory for the UUID list.
6065 	 */
6066 	hci_discovery_filter_clear(hdev);
6067 
6068 	hdev->discovery.result_filtering = true;
6069 	hdev->discovery.type = cp->type;
6070 	hdev->discovery.rssi = cp->rssi;
6071 	hdev->discovery.uuid_count = uuid_count;
6072 
6073 	if (uuid_count > 0) {
6074 		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6075 						GFP_KERNEL);
6076 		if (!hdev->discovery.uuids) {
6077 			err = mgmt_cmd_complete(sk, hdev->id,
6078 						MGMT_OP_START_SERVICE_DISCOVERY,
6079 						MGMT_STATUS_FAILED,
6080 						&cp->type, sizeof(cp->type));
6081 			mgmt_pending_remove(cmd);
6082 			goto failed;
6083 		}
6084 	}
6085 
6086 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6087 				 start_discovery_complete);
6088 	if (err < 0) {
6089 		mgmt_pending_remove(cmd);
6090 		goto failed;
6091 	}
6092 
6093 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6094 
6095 failed:
6096 	hci_dev_unlock(hdev);
6097 	return err;
6098 }
6099 
6100 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
6101 {
6102 	struct mgmt_pending_cmd *cmd;
6103 
6104 	bt_dev_dbg(hdev, "status %u", status);
6105 
6106 	hci_dev_lock(hdev);
6107 
6108 	cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6109 	if (cmd) {
6110 		cmd->cmd_complete(cmd, mgmt_status(status));
6111 		mgmt_pending_remove(cmd);
6112 	}
6113 
6114 	hci_dev_unlock(hdev);
6115 }
6116 
6117 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6118 {
6119 	struct mgmt_pending_cmd *cmd = data;
6120 
6121 	if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6122 		return;
6123 
6124 	bt_dev_dbg(hdev, "err %d", err);
6125 
6126 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
6127 			  cmd->param, 1);
6128 	mgmt_pending_remove(cmd);
6129 
6130 	if (!err)
6131 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6132 }
6133 
6134 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6135 {
6136 	return hci_stop_discovery_sync(hdev);
6137 }
6138 
6139 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6140 			  u16 len)
6141 {
6142 	struct mgmt_cp_stop_discovery *mgmt_cp = data;
6143 	struct mgmt_pending_cmd *cmd;
6144 	int err;
6145 
6146 	bt_dev_dbg(hdev, "sock %p", sk);
6147 
6148 	hci_dev_lock(hdev);
6149 
6150 	if (!hci_discovery_active(hdev)) {
6151 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6152 					MGMT_STATUS_REJECTED, &mgmt_cp->type,
6153 					sizeof(mgmt_cp->type));
6154 		goto unlock;
6155 	}
6156 
6157 	if (hdev->discovery.type != mgmt_cp->type) {
6158 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6159 					MGMT_STATUS_INVALID_PARAMS,
6160 					&mgmt_cp->type, sizeof(mgmt_cp->type));
6161 		goto unlock;
6162 	}
6163 
6164 	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6165 	if (!cmd) {
6166 		err = -ENOMEM;
6167 		goto unlock;
6168 	}
6169 
6170 	err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6171 				 stop_discovery_complete);
6172 	if (err < 0) {
6173 		mgmt_pending_remove(cmd);
6174 		goto unlock;
6175 	}
6176 
6177 	hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6178 
6179 unlock:
6180 	hci_dev_unlock(hdev);
6181 	return err;
6182 }
6183 
6184 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6185 			u16 len)
6186 {
6187 	struct mgmt_cp_confirm_name *cp = data;
6188 	struct inquiry_entry *e;
6189 	int err;
6190 
6191 	bt_dev_dbg(hdev, "sock %p", sk);
6192 
6193 	hci_dev_lock(hdev);
6194 
6195 	if (!hci_discovery_active(hdev)) {
6196 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6197 					MGMT_STATUS_FAILED, &cp->addr,
6198 					sizeof(cp->addr));
6199 		goto failed;
6200 	}
6201 
6202 	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6203 	if (!e) {
6204 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6205 					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6206 					sizeof(cp->addr));
6207 		goto failed;
6208 	}
6209 
6210 	if (cp->name_known) {
6211 		e->name_state = NAME_KNOWN;
6212 		list_del(&e->list);
6213 	} else {
6214 		e->name_state = NAME_NEEDED;
6215 		hci_inquiry_cache_update_resolve(hdev, e);
6216 	}
6217 
6218 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6219 				&cp->addr, sizeof(cp->addr));
6220 
6221 failed:
6222 	hci_dev_unlock(hdev);
6223 	return err;
6224 }
6225 
6226 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6227 			u16 len)
6228 {
6229 	struct mgmt_cp_block_device *cp = data;
6230 	u8 status;
6231 	int err;
6232 
6233 	bt_dev_dbg(hdev, "sock %p", sk);
6234 
6235 	if (!bdaddr_type_is_valid(cp->addr.type))
6236 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6237 					 MGMT_STATUS_INVALID_PARAMS,
6238 					 &cp->addr, sizeof(cp->addr));
6239 
6240 	hci_dev_lock(hdev);
6241 
6242 	err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6243 				  cp->addr.type);
6244 	if (err < 0) {
6245 		status = MGMT_STATUS_FAILED;
6246 		goto done;
6247 	}
6248 
6249 	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6250 		   sk);
6251 	status = MGMT_STATUS_SUCCESS;
6252 
6253 done:
6254 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6255 				&cp->addr, sizeof(cp->addr));
6256 
6257 	hci_dev_unlock(hdev);
6258 
6259 	return err;
6260 }
6261 
6262 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6263 			  u16 len)
6264 {
6265 	struct mgmt_cp_unblock_device *cp = data;
6266 	u8 status;
6267 	int err;
6268 
6269 	bt_dev_dbg(hdev, "sock %p", sk);
6270 
6271 	if (!bdaddr_type_is_valid(cp->addr.type))
6272 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6273 					 MGMT_STATUS_INVALID_PARAMS,
6274 					 &cp->addr, sizeof(cp->addr));
6275 
6276 	hci_dev_lock(hdev);
6277 
6278 	err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6279 				  cp->addr.type);
6280 	if (err < 0) {
6281 		status = MGMT_STATUS_INVALID_PARAMS;
6282 		goto done;
6283 	}
6284 
6285 	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6286 		   sk);
6287 	status = MGMT_STATUS_SUCCESS;
6288 
6289 done:
6290 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6291 				&cp->addr, sizeof(cp->addr));
6292 
6293 	hci_dev_unlock(hdev);
6294 
6295 	return err;
6296 }
6297 
6298 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6299 {
6300 	return hci_update_eir_sync(hdev);
6301 }
6302 
6303 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6304 			 u16 len)
6305 {
6306 	struct mgmt_cp_set_device_id *cp = data;
6307 	int err;
6308 	__u16 source;
6309 
6310 	bt_dev_dbg(hdev, "sock %p", sk);
6311 
6312 	source = __le16_to_cpu(cp->source);
6313 
6314 	if (source > 0x0002)
6315 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6316 				       MGMT_STATUS_INVALID_PARAMS);
6317 
6318 	hci_dev_lock(hdev);
6319 
6320 	hdev->devid_source = source;
6321 	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6322 	hdev->devid_product = __le16_to_cpu(cp->product);
6323 	hdev->devid_version = __le16_to_cpu(cp->version);
6324 
6325 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6326 				NULL, 0);
6327 
6328 	hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6329 
6330 	hci_dev_unlock(hdev);
6331 
6332 	return err;
6333 }
6334 
6335 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6336 {
6337 	if (err)
6338 		bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6339 	else
6340 		bt_dev_dbg(hdev, "status %d", err);
6341 }
6342 
6343 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6344 {
6345 	struct cmd_lookup match = { NULL, hdev };
6346 	u8 instance;
6347 	struct adv_info *adv_instance;
6348 	u8 status = mgmt_status(err);
6349 
6350 	if (status) {
6351 		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
6352 				     cmd_status_rsp, &status);
6353 		return;
6354 	}
6355 
6356 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6357 		hci_dev_set_flag(hdev, HCI_ADVERTISING);
6358 	else
6359 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6360 
6361 	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
6362 			     &match);
6363 
6364 	new_settings(hdev, match.sk);
6365 
6366 	if (match.sk)
6367 		sock_put(match.sk);
6368 
6369 	/* If "Set Advertising" was just disabled and instance advertising was
6370 	 * set up earlier, then re-enable multi-instance advertising.
6371 	 */
6372 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6373 	    list_empty(&hdev->adv_instances))
6374 		return;
6375 
6376 	instance = hdev->cur_adv_instance;
6377 	if (!instance) {
6378 		adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6379 							struct adv_info, list);
6380 		if (!adv_instance)
6381 			return;
6382 
6383 		instance = adv_instance->instance;
6384 	}
6385 
6386 	err = hci_schedule_adv_instance_sync(hdev, instance, true);
6387 
6388 	enable_advertising_instance(hdev, err);
6389 }
6390 
6391 static int set_adv_sync(struct hci_dev *hdev, void *data)
6392 {
6393 	struct mgmt_pending_cmd *cmd = data;
6394 	struct mgmt_mode *cp = cmd->param;
6395 	u8 val = !!cp->val;
6396 
6397 	if (cp->val == 0x02)
6398 		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6399 	else
6400 		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6401 
6402 	cancel_adv_timeout(hdev);
6403 
6404 	if (val) {
6405 		/* Switch to instance "0" for the Set Advertising setting.
6406 		 * We cannot use update_[adv|scan_rsp]_data() here as the
6407 		 * HCI_ADVERTISING flag is not yet set.
6408 		 */
6409 		hdev->cur_adv_instance = 0x00;
6410 
6411 		if (ext_adv_capable(hdev)) {
6412 			hci_start_ext_adv_sync(hdev, 0x00);
6413 		} else {
6414 			hci_update_adv_data_sync(hdev, 0x00);
6415 			hci_update_scan_rsp_data_sync(hdev, 0x00);
6416 			hci_enable_advertising_sync(hdev);
6417 		}
6418 	} else {
6419 		hci_disable_advertising_sync(hdev);
6420 	}
6421 
6422 	return 0;
6423 }
6424 
6425 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6426 			   u16 len)
6427 {
6428 	struct mgmt_mode *cp = data;
6429 	struct mgmt_pending_cmd *cmd;
6430 	u8 val, status;
6431 	int err;
6432 
6433 	bt_dev_dbg(hdev, "sock %p", sk);
6434 
6435 	status = mgmt_le_support(hdev);
6436 	if (status)
6437 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6438 				       status);
6439 
6440 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6441 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6442 				       MGMT_STATUS_INVALID_PARAMS);
6443 
6444 	if (hdev->advertising_paused)
6445 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6446 				       MGMT_STATUS_BUSY);
6447 
6448 	hci_dev_lock(hdev);
6449 
6450 	val = !!cp->val;
6451 
6452 	/* The following conditions are ones which mean that we should
6453 	 * not do any HCI communication but directly send a mgmt
6454 	 * response to user space (after toggling the flag if
6455 	 * necessary).
6456 	 */
6457 	if (!hdev_is_powered(hdev) ||
6458 	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6459 	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6460 	    hci_dev_test_flag(hdev, HCI_MESH) ||
6461 	    hci_conn_num(hdev, LE_LINK) > 0 ||
6462 	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6463 	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6464 		bool changed;
6465 
6466 		if (cp->val) {
6467 			hdev->cur_adv_instance = 0x00;
6468 			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6469 			if (cp->val == 0x02)
6470 				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6471 			else
6472 				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6473 		} else {
6474 			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6475 			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6476 		}
6477 
6478 		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6479 		if (err < 0)
6480 			goto unlock;
6481 
6482 		if (changed)
6483 			err = new_settings(hdev, sk);
6484 
6485 		goto unlock;
6486 	}
6487 
6488 	if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6489 	    pending_find(MGMT_OP_SET_LE, hdev)) {
6490 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6491 				      MGMT_STATUS_BUSY);
6492 		goto unlock;
6493 	}
6494 
6495 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6496 	if (!cmd)
6497 		err = -ENOMEM;
6498 	else
6499 		err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6500 					 set_advertising_complete);
6501 
6502 	if (err < 0 && cmd)
6503 		mgmt_pending_remove(cmd);
6504 
6505 unlock:
6506 	hci_dev_unlock(hdev);
6507 	return err;
6508 }
6509 
6510 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6511 			      void *data, u16 len)
6512 {
6513 	struct mgmt_cp_set_static_address *cp = data;
6514 	int err;
6515 
6516 	bt_dev_dbg(hdev, "sock %p", sk);
6517 
6518 	if (!lmp_le_capable(hdev))
6519 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6520 				       MGMT_STATUS_NOT_SUPPORTED);
6521 
6522 	if (hdev_is_powered(hdev))
6523 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6524 				       MGMT_STATUS_REJECTED);
6525 
6526 	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6527 		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6528 			return mgmt_cmd_status(sk, hdev->id,
6529 					       MGMT_OP_SET_STATIC_ADDRESS,
6530 					       MGMT_STATUS_INVALID_PARAMS);
6531 
6532 		/* Two most significant bits shall be set */
6533 		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6534 			return mgmt_cmd_status(sk, hdev->id,
6535 					       MGMT_OP_SET_STATIC_ADDRESS,
6536 					       MGMT_STATUS_INVALID_PARAMS);
6537 	}
6538 
6539 	hci_dev_lock(hdev);
6540 
6541 	bacpy(&hdev->static_addr, &cp->bdaddr);
6542 
6543 	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6544 	if (err < 0)
6545 		goto unlock;
6546 
6547 	err = new_settings(hdev, sk);
6548 
6549 unlock:
6550 	hci_dev_unlock(hdev);
6551 	return err;
6552 }
6553 
6554 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6555 			   void *data, u16 len)
6556 {
6557 	struct mgmt_cp_set_scan_params *cp = data;
6558 	__u16 interval, window;
6559 	int err;
6560 
6561 	bt_dev_dbg(hdev, "sock %p", sk);
6562 
6563 	if (!lmp_le_capable(hdev))
6564 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6565 				       MGMT_STATUS_NOT_SUPPORTED);
6566 
6567 	interval = __le16_to_cpu(cp->interval);
6568 
6569 	if (interval < 0x0004 || interval > 0x4000)
6570 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6571 				       MGMT_STATUS_INVALID_PARAMS);
6572 
6573 	window = __le16_to_cpu(cp->window);
6574 
6575 	if (window < 0x0004 || window > 0x4000)
6576 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6577 				       MGMT_STATUS_INVALID_PARAMS);
6578 
6579 	if (window > interval)
6580 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6581 				       MGMT_STATUS_INVALID_PARAMS);
6582 
6583 	hci_dev_lock(hdev);
6584 
6585 	hdev->le_scan_interval = interval;
6586 	hdev->le_scan_window = window;
6587 
6588 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6589 				NULL, 0);
6590 
6591 	/* If background scan is running, restart it so new parameters are
6592 	 * loaded.
6593 	 */
6594 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6595 	    hdev->discovery.state == DISCOVERY_STOPPED)
6596 		hci_update_passive_scan(hdev);
6597 
6598 	hci_dev_unlock(hdev);
6599 
6600 	return err;
6601 }
6602 
6603 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6604 {
6605 	struct mgmt_pending_cmd *cmd = data;
6606 
6607 	bt_dev_dbg(hdev, "err %d", err);
6608 
6609 	if (err) {
6610 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6611 				mgmt_status(err));
6612 	} else {
6613 		struct mgmt_mode *cp = cmd->param;
6614 
6615 		if (cp->val)
6616 			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6617 		else
6618 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6619 
6620 		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6621 		new_settings(hdev, cmd->sk);
6622 	}
6623 
6624 	mgmt_pending_free(cmd);
6625 }
6626 
6627 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6628 {
6629 	struct mgmt_pending_cmd *cmd = data;
6630 	struct mgmt_mode *cp = cmd->param;
6631 
6632 	return hci_write_fast_connectable_sync(hdev, cp->val);
6633 }
6634 
6635 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6636 				void *data, u16 len)
6637 {
6638 	struct mgmt_mode *cp = data;
6639 	struct mgmt_pending_cmd *cmd;
6640 	int err;
6641 
6642 	bt_dev_dbg(hdev, "sock %p", sk);
6643 
6644 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6645 	    hdev->hci_ver < BLUETOOTH_VER_1_2)
6646 		return mgmt_cmd_status(sk, hdev->id,
6647 				       MGMT_OP_SET_FAST_CONNECTABLE,
6648 				       MGMT_STATUS_NOT_SUPPORTED);
6649 
6650 	if (cp->val != 0x00 && cp->val != 0x01)
6651 		return mgmt_cmd_status(sk, hdev->id,
6652 				       MGMT_OP_SET_FAST_CONNECTABLE,
6653 				       MGMT_STATUS_INVALID_PARAMS);
6654 
6655 	hci_dev_lock(hdev);
6656 
6657 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6658 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6659 		goto unlock;
6660 	}
6661 
6662 	if (!hdev_is_powered(hdev)) {
6663 		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6664 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6665 		new_settings(hdev, sk);
6666 		goto unlock;
6667 	}
6668 
6669 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6670 			       len);
6671 	if (!cmd)
6672 		err = -ENOMEM;
6673 	else
6674 		err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6675 					 fast_connectable_complete);
6676 
6677 	if (err < 0) {
6678 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6679 				MGMT_STATUS_FAILED);
6680 
6681 		if (cmd)
6682 			mgmt_pending_free(cmd);
6683 	}
6684 
6685 unlock:
6686 	hci_dev_unlock(hdev);
6687 
6688 	return err;
6689 }
6690 
6691 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6692 {
6693 	struct mgmt_pending_cmd *cmd = data;
6694 
6695 	bt_dev_dbg(hdev, "err %d", err);
6696 
6697 	if (err) {
6698 		u8 mgmt_err = mgmt_status(err);
6699 
6700 		/* We need to restore the flag if related HCI commands
6701 		 * failed.
6702 		 */
6703 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6704 
6705 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6706 	} else {
6707 		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6708 		new_settings(hdev, cmd->sk);
6709 	}
6710 
6711 	mgmt_pending_free(cmd);
6712 }
6713 
6714 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6715 {
6716 	int status;
6717 
6718 	status = hci_write_fast_connectable_sync(hdev, false);
6719 
6720 	if (!status)
6721 		status = hci_update_scan_sync(hdev);
6722 
6723 	/* Since only the advertising data flags will change, there
6724 	 * is no need to update the scan response data.
6725 	 */
6726 	if (!status)
6727 		status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6728 
6729 	return status;
6730 }
6731 
6732 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6733 {
6734 	struct mgmt_mode *cp = data;
6735 	struct mgmt_pending_cmd *cmd;
6736 	int err;
6737 
6738 	bt_dev_dbg(hdev, "sock %p", sk);
6739 
6740 	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6741 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6742 				       MGMT_STATUS_NOT_SUPPORTED);
6743 
6744 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6745 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6746 				       MGMT_STATUS_REJECTED);
6747 
6748 	if (cp->val != 0x00 && cp->val != 0x01)
6749 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6750 				       MGMT_STATUS_INVALID_PARAMS);
6751 
6752 	hci_dev_lock(hdev);
6753 
6754 	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6755 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6756 		goto unlock;
6757 	}
6758 
6759 	if (!hdev_is_powered(hdev)) {
6760 		if (!cp->val) {
6761 			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6762 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6763 			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6764 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6765 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
6766 		}
6767 
6768 		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6769 
6770 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6771 		if (err < 0)
6772 			goto unlock;
6773 
6774 		err = new_settings(hdev, sk);
6775 		goto unlock;
6776 	}
6777 
6778 	/* Reject disabling when powered on */
6779 	if (!cp->val) {
6780 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6781 				      MGMT_STATUS_REJECTED);
6782 		goto unlock;
6783 	} else {
6784 		/* When configuring a dual-mode controller to operate
6785 		 * with LE only and using a static address, then switching
6786 		 * BR/EDR back on is not allowed.
6787 		 *
6788 		 * Dual-mode controllers shall operate with the public
6789 		 * address as its identity address for BR/EDR and LE. So
6790 		 * reject the attempt to create an invalid configuration.
6791 		 *
6792 		 * The same restrictions applies when secure connections
6793 		 * has been enabled. For BR/EDR this is a controller feature
6794 		 * while for LE it is a host stack feature. This means that
6795 		 * switching BR/EDR back on when secure connections has been
6796 		 * enabled is not a supported transaction.
6797 		 */
6798 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6799 		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6800 		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6801 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6802 					      MGMT_STATUS_REJECTED);
6803 			goto unlock;
6804 		}
6805 	}
6806 
6807 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6808 	if (!cmd)
6809 		err = -ENOMEM;
6810 	else
6811 		err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6812 					 set_bredr_complete);
6813 
6814 	if (err < 0) {
6815 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6816 				MGMT_STATUS_FAILED);
6817 		if (cmd)
6818 			mgmt_pending_free(cmd);
6819 
6820 		goto unlock;
6821 	}
6822 
6823 	/* We need to flip the bit already here so that
6824 	 * hci_req_update_adv_data generates the correct flags.
6825 	 */
6826 	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6827 
6828 unlock:
6829 	hci_dev_unlock(hdev);
6830 	return err;
6831 }
6832 
6833 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6834 {
6835 	struct mgmt_pending_cmd *cmd = data;
6836 	struct mgmt_mode *cp;
6837 
6838 	bt_dev_dbg(hdev, "err %d", err);
6839 
6840 	if (err) {
6841 		u8 mgmt_err = mgmt_status(err);
6842 
6843 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6844 		goto done;
6845 	}
6846 
6847 	cp = cmd->param;
6848 
6849 	switch (cp->val) {
6850 	case 0x00:
6851 		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6852 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6853 		break;
6854 	case 0x01:
6855 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6856 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6857 		break;
6858 	case 0x02:
6859 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6860 		hci_dev_set_flag(hdev, HCI_SC_ONLY);
6861 		break;
6862 	}
6863 
6864 	send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6865 	new_settings(hdev, cmd->sk);
6866 
6867 done:
6868 	mgmt_pending_free(cmd);
6869 }
6870 
6871 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6872 {
6873 	struct mgmt_pending_cmd *cmd = data;
6874 	struct mgmt_mode *cp = cmd->param;
6875 	u8 val = !!cp->val;
6876 
6877 	/* Force write of val */
6878 	hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6879 
6880 	return hci_write_sc_support_sync(hdev, val);
6881 }
6882 
6883 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6884 			   void *data, u16 len)
6885 {
6886 	struct mgmt_mode *cp = data;
6887 	struct mgmt_pending_cmd *cmd;
6888 	u8 val;
6889 	int err;
6890 
6891 	bt_dev_dbg(hdev, "sock %p", sk);
6892 
6893 	if (!lmp_sc_capable(hdev) &&
6894 	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6895 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6896 				       MGMT_STATUS_NOT_SUPPORTED);
6897 
6898 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6899 	    lmp_sc_capable(hdev) &&
6900 	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6901 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6902 				       MGMT_STATUS_REJECTED);
6903 
6904 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6905 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6906 				       MGMT_STATUS_INVALID_PARAMS);
6907 
6908 	hci_dev_lock(hdev);
6909 
6910 	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6911 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6912 		bool changed;
6913 
6914 		if (cp->val) {
6915 			changed = !hci_dev_test_and_set_flag(hdev,
6916 							     HCI_SC_ENABLED);
6917 			if (cp->val == 0x02)
6918 				hci_dev_set_flag(hdev, HCI_SC_ONLY);
6919 			else
6920 				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6921 		} else {
6922 			changed = hci_dev_test_and_clear_flag(hdev,
6923 							      HCI_SC_ENABLED);
6924 			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6925 		}
6926 
6927 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6928 		if (err < 0)
6929 			goto failed;
6930 
6931 		if (changed)
6932 			err = new_settings(hdev, sk);
6933 
6934 		goto failed;
6935 	}
6936 
6937 	val = !!cp->val;
6938 
6939 	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6940 	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6941 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6942 		goto failed;
6943 	}
6944 
6945 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6946 	if (!cmd)
6947 		err = -ENOMEM;
6948 	else
6949 		err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6950 					 set_secure_conn_complete);
6951 
6952 	if (err < 0) {
6953 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6954 				MGMT_STATUS_FAILED);
6955 		if (cmd)
6956 			mgmt_pending_free(cmd);
6957 	}
6958 
6959 failed:
6960 	hci_dev_unlock(hdev);
6961 	return err;
6962 }
6963 
6964 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6965 			  void *data, u16 len)
6966 {
6967 	struct mgmt_mode *cp = data;
6968 	bool changed, use_changed;
6969 	int err;
6970 
6971 	bt_dev_dbg(hdev, "sock %p", sk);
6972 
6973 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6974 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6975 				       MGMT_STATUS_INVALID_PARAMS);
6976 
6977 	hci_dev_lock(hdev);
6978 
6979 	if (cp->val)
6980 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6981 	else
6982 		changed = hci_dev_test_and_clear_flag(hdev,
6983 						      HCI_KEEP_DEBUG_KEYS);
6984 
6985 	if (cp->val == 0x02)
6986 		use_changed = !hci_dev_test_and_set_flag(hdev,
6987 							 HCI_USE_DEBUG_KEYS);
6988 	else
6989 		use_changed = hci_dev_test_and_clear_flag(hdev,
6990 							  HCI_USE_DEBUG_KEYS);
6991 
6992 	if (hdev_is_powered(hdev) && use_changed &&
6993 	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6994 		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6995 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6996 			     sizeof(mode), &mode);
6997 	}
6998 
6999 	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
7000 	if (err < 0)
7001 		goto unlock;
7002 
7003 	if (changed)
7004 		err = new_settings(hdev, sk);
7005 
7006 unlock:
7007 	hci_dev_unlock(hdev);
7008 	return err;
7009 }
7010 
7011 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7012 		       u16 len)
7013 {
7014 	struct mgmt_cp_set_privacy *cp = cp_data;
7015 	bool changed;
7016 	int err;
7017 
7018 	bt_dev_dbg(hdev, "sock %p", sk);
7019 
7020 	if (!lmp_le_capable(hdev))
7021 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7022 				       MGMT_STATUS_NOT_SUPPORTED);
7023 
7024 	if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
7025 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7026 				       MGMT_STATUS_INVALID_PARAMS);
7027 
7028 	if (hdev_is_powered(hdev))
7029 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7030 				       MGMT_STATUS_REJECTED);
7031 
7032 	hci_dev_lock(hdev);
7033 
7034 	/* If user space supports this command it is also expected to
7035 	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
7036 	 */
7037 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7038 
7039 	if (cp->privacy) {
7040 		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
7041 		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
7042 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
7043 		hci_adv_instances_set_rpa_expired(hdev, true);
7044 		if (cp->privacy == 0x02)
7045 			hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
7046 		else
7047 			hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7048 	} else {
7049 		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
7050 		memset(hdev->irk, 0, sizeof(hdev->irk));
7051 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
7052 		hci_adv_instances_set_rpa_expired(hdev, false);
7053 		hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7054 	}
7055 
7056 	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
7057 	if (err < 0)
7058 		goto unlock;
7059 
7060 	if (changed)
7061 		err = new_settings(hdev, sk);
7062 
7063 unlock:
7064 	hci_dev_unlock(hdev);
7065 	return err;
7066 }
7067 
7068 static bool irk_is_valid(struct mgmt_irk_info *irk)
7069 {
7070 	switch (irk->addr.type) {
7071 	case BDADDR_LE_PUBLIC:
7072 		return true;
7073 
7074 	case BDADDR_LE_RANDOM:
7075 		/* Two most significant bits shall be set */
7076 		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7077 			return false;
7078 		return true;
7079 	}
7080 
7081 	return false;
7082 }
7083 
7084 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7085 		     u16 len)
7086 {
7087 	struct mgmt_cp_load_irks *cp = cp_data;
7088 	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7089 				   sizeof(struct mgmt_irk_info));
7090 	u16 irk_count, expected_len;
7091 	int i, err;
7092 
7093 	bt_dev_dbg(hdev, "sock %p", sk);
7094 
7095 	if (!lmp_le_capable(hdev))
7096 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7097 				       MGMT_STATUS_NOT_SUPPORTED);
7098 
7099 	irk_count = __le16_to_cpu(cp->irk_count);
7100 	if (irk_count > max_irk_count) {
7101 		bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7102 			   irk_count);
7103 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7104 				       MGMT_STATUS_INVALID_PARAMS);
7105 	}
7106 
7107 	expected_len = struct_size(cp, irks, irk_count);
7108 	if (expected_len != len) {
7109 		bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7110 			   expected_len, len);
7111 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7112 				       MGMT_STATUS_INVALID_PARAMS);
7113 	}
7114 
7115 	bt_dev_dbg(hdev, "irk_count %u", irk_count);
7116 
7117 	for (i = 0; i < irk_count; i++) {
7118 		struct mgmt_irk_info *key = &cp->irks[i];
7119 
7120 		if (!irk_is_valid(key))
7121 			return mgmt_cmd_status(sk, hdev->id,
7122 					       MGMT_OP_LOAD_IRKS,
7123 					       MGMT_STATUS_INVALID_PARAMS);
7124 	}
7125 
7126 	hci_dev_lock(hdev);
7127 
7128 	hci_smp_irks_clear(hdev);
7129 
7130 	for (i = 0; i < irk_count; i++) {
7131 		struct mgmt_irk_info *irk = &cp->irks[i];
7132 
7133 		if (hci_is_blocked_key(hdev,
7134 				       HCI_BLOCKED_KEY_TYPE_IRK,
7135 				       irk->val)) {
7136 			bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7137 				    &irk->addr.bdaddr);
7138 			continue;
7139 		}
7140 
7141 		hci_add_irk(hdev, &irk->addr.bdaddr,
7142 			    le_addr_type(irk->addr.type), irk->val,
7143 			    BDADDR_ANY);
7144 	}
7145 
7146 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7147 
7148 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7149 
7150 	hci_dev_unlock(hdev);
7151 
7152 	return err;
7153 }
7154 
7155 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7156 {
7157 	if (key->initiator != 0x00 && key->initiator != 0x01)
7158 		return false;
7159 
7160 	switch (key->addr.type) {
7161 	case BDADDR_LE_PUBLIC:
7162 		return true;
7163 
7164 	case BDADDR_LE_RANDOM:
7165 		/* Two most significant bits shall be set */
7166 		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7167 			return false;
7168 		return true;
7169 	}
7170 
7171 	return false;
7172 }
7173 
7174 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7175 			       void *cp_data, u16 len)
7176 {
7177 	struct mgmt_cp_load_long_term_keys *cp = cp_data;
7178 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7179 				   sizeof(struct mgmt_ltk_info));
7180 	u16 key_count, expected_len;
7181 	int i, err;
7182 
7183 	bt_dev_dbg(hdev, "sock %p", sk);
7184 
7185 	if (!lmp_le_capable(hdev))
7186 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7187 				       MGMT_STATUS_NOT_SUPPORTED);
7188 
7189 	key_count = __le16_to_cpu(cp->key_count);
7190 	if (key_count > max_key_count) {
7191 		bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7192 			   key_count);
7193 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7194 				       MGMT_STATUS_INVALID_PARAMS);
7195 	}
7196 
7197 	expected_len = struct_size(cp, keys, key_count);
7198 	if (expected_len != len) {
7199 		bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7200 			   expected_len, len);
7201 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7202 				       MGMT_STATUS_INVALID_PARAMS);
7203 	}
7204 
7205 	bt_dev_dbg(hdev, "key_count %u", key_count);
7206 
7207 	for (i = 0; i < key_count; i++) {
7208 		struct mgmt_ltk_info *key = &cp->keys[i];
7209 
7210 		if (!ltk_is_valid(key))
7211 			return mgmt_cmd_status(sk, hdev->id,
7212 					       MGMT_OP_LOAD_LONG_TERM_KEYS,
7213 					       MGMT_STATUS_INVALID_PARAMS);
7214 	}
7215 
7216 	hci_dev_lock(hdev);
7217 
7218 	hci_smp_ltks_clear(hdev);
7219 
7220 	for (i = 0; i < key_count; i++) {
7221 		struct mgmt_ltk_info *key = &cp->keys[i];
7222 		u8 type, authenticated;
7223 
7224 		if (hci_is_blocked_key(hdev,
7225 				       HCI_BLOCKED_KEY_TYPE_LTK,
7226 				       key->val)) {
7227 			bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7228 				    &key->addr.bdaddr);
7229 			continue;
7230 		}
7231 
7232 		switch (key->type) {
7233 		case MGMT_LTK_UNAUTHENTICATED:
7234 			authenticated = 0x00;
7235 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7236 			break;
7237 		case MGMT_LTK_AUTHENTICATED:
7238 			authenticated = 0x01;
7239 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7240 			break;
7241 		case MGMT_LTK_P256_UNAUTH:
7242 			authenticated = 0x00;
7243 			type = SMP_LTK_P256;
7244 			break;
7245 		case MGMT_LTK_P256_AUTH:
7246 			authenticated = 0x01;
7247 			type = SMP_LTK_P256;
7248 			break;
7249 		case MGMT_LTK_P256_DEBUG:
7250 			authenticated = 0x00;
7251 			type = SMP_LTK_P256_DEBUG;
7252 			fallthrough;
7253 		default:
7254 			continue;
7255 		}
7256 
7257 		hci_add_ltk(hdev, &key->addr.bdaddr,
7258 			    le_addr_type(key->addr.type), type, authenticated,
7259 			    key->val, key->enc_size, key->ediv, key->rand);
7260 	}
7261 
7262 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7263 			   NULL, 0);
7264 
7265 	hci_dev_unlock(hdev);
7266 
7267 	return err;
7268 }
7269 
7270 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7271 {
7272 	struct mgmt_pending_cmd *cmd = data;
7273 	struct hci_conn *conn = cmd->user_data;
7274 	struct mgmt_cp_get_conn_info *cp = cmd->param;
7275 	struct mgmt_rp_get_conn_info rp;
7276 	u8 status;
7277 
7278 	bt_dev_dbg(hdev, "err %d", err);
7279 
7280 	memcpy(&rp.addr, &cp->addr.bdaddr, sizeof(rp.addr));
7281 
7282 	status = mgmt_status(err);
7283 	if (status == MGMT_STATUS_SUCCESS) {
7284 		rp.rssi = conn->rssi;
7285 		rp.tx_power = conn->tx_power;
7286 		rp.max_tx_power = conn->max_tx_power;
7287 	} else {
7288 		rp.rssi = HCI_RSSI_INVALID;
7289 		rp.tx_power = HCI_TX_POWER_INVALID;
7290 		rp.max_tx_power = HCI_TX_POWER_INVALID;
7291 	}
7292 
7293 	mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
7294 			  &rp, sizeof(rp));
7295 
7296 	mgmt_pending_free(cmd);
7297 }
7298 
7299 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7300 {
7301 	struct mgmt_pending_cmd *cmd = data;
7302 	struct mgmt_cp_get_conn_info *cp = cmd->param;
7303 	struct hci_conn *conn;
7304 	int err;
7305 	__le16   handle;
7306 
7307 	/* Make sure we are still connected */
7308 	if (cp->addr.type == BDADDR_BREDR)
7309 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7310 					       &cp->addr.bdaddr);
7311 	else
7312 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7313 
7314 	if (!conn || conn->state != BT_CONNECTED)
7315 		return MGMT_STATUS_NOT_CONNECTED;
7316 
7317 	cmd->user_data = conn;
7318 	handle = cpu_to_le16(conn->handle);
7319 
7320 	/* Refresh RSSI each time */
7321 	err = hci_read_rssi_sync(hdev, handle);
7322 
7323 	/* For LE links TX power does not change thus we don't need to
7324 	 * query for it once value is known.
7325 	 */
7326 	if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7327 		     conn->tx_power == HCI_TX_POWER_INVALID))
7328 		err = hci_read_tx_power_sync(hdev, handle, 0x00);
7329 
7330 	/* Max TX power needs to be read only once per connection */
7331 	if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7332 		err = hci_read_tx_power_sync(hdev, handle, 0x01);
7333 
7334 	return err;
7335 }
7336 
7337 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7338 			 u16 len)
7339 {
7340 	struct mgmt_cp_get_conn_info *cp = data;
7341 	struct mgmt_rp_get_conn_info rp;
7342 	struct hci_conn *conn;
7343 	unsigned long conn_info_age;
7344 	int err = 0;
7345 
7346 	bt_dev_dbg(hdev, "sock %p", sk);
7347 
7348 	memset(&rp, 0, sizeof(rp));
7349 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7350 	rp.addr.type = cp->addr.type;
7351 
7352 	if (!bdaddr_type_is_valid(cp->addr.type))
7353 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7354 					 MGMT_STATUS_INVALID_PARAMS,
7355 					 &rp, sizeof(rp));
7356 
7357 	hci_dev_lock(hdev);
7358 
7359 	if (!hdev_is_powered(hdev)) {
7360 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7361 					MGMT_STATUS_NOT_POWERED, &rp,
7362 					sizeof(rp));
7363 		goto unlock;
7364 	}
7365 
7366 	if (cp->addr.type == BDADDR_BREDR)
7367 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7368 					       &cp->addr.bdaddr);
7369 	else
7370 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7371 
7372 	if (!conn || conn->state != BT_CONNECTED) {
7373 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7374 					MGMT_STATUS_NOT_CONNECTED, &rp,
7375 					sizeof(rp));
7376 		goto unlock;
7377 	}
7378 
7379 	/* To avoid client trying to guess when to poll again for information we
7380 	 * calculate conn info age as random value between min/max set in hdev.
7381 	 */
7382 	conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
7383 						 hdev->conn_info_max_age - 1);
7384 
7385 	/* Query controller to refresh cached values if they are too old or were
7386 	 * never read.
7387 	 */
7388 	if (time_after(jiffies, conn->conn_info_timestamp +
7389 		       msecs_to_jiffies(conn_info_age)) ||
7390 	    !conn->conn_info_timestamp) {
7391 		struct mgmt_pending_cmd *cmd;
7392 
7393 		cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7394 				       len);
7395 		if (!cmd) {
7396 			err = -ENOMEM;
7397 		} else {
7398 			err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7399 						 cmd, get_conn_info_complete);
7400 		}
7401 
7402 		if (err < 0) {
7403 			mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7404 					  MGMT_STATUS_FAILED, &rp, sizeof(rp));
7405 
7406 			if (cmd)
7407 				mgmt_pending_free(cmd);
7408 
7409 			goto unlock;
7410 		}
7411 
7412 		conn->conn_info_timestamp = jiffies;
7413 	} else {
7414 		/* Cache is valid, just reply with values cached in hci_conn */
7415 		rp.rssi = conn->rssi;
7416 		rp.tx_power = conn->tx_power;
7417 		rp.max_tx_power = conn->max_tx_power;
7418 
7419 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7420 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7421 	}
7422 
7423 unlock:
7424 	hci_dev_unlock(hdev);
7425 	return err;
7426 }
7427 
7428 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7429 {
7430 	struct mgmt_pending_cmd *cmd = data;
7431 	struct mgmt_cp_get_clock_info *cp = cmd->param;
7432 	struct mgmt_rp_get_clock_info rp;
7433 	struct hci_conn *conn = cmd->user_data;
7434 	u8 status = mgmt_status(err);
7435 
7436 	bt_dev_dbg(hdev, "err %d", err);
7437 
7438 	memset(&rp, 0, sizeof(rp));
7439 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7440 	rp.addr.type = cp->addr.type;
7441 
7442 	if (err)
7443 		goto complete;
7444 
7445 	rp.local_clock = cpu_to_le32(hdev->clock);
7446 
7447 	if (conn) {
7448 		rp.piconet_clock = cpu_to_le32(conn->clock);
7449 		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7450 	}
7451 
7452 complete:
7453 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
7454 			  sizeof(rp));
7455 
7456 	mgmt_pending_free(cmd);
7457 }
7458 
7459 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7460 {
7461 	struct mgmt_pending_cmd *cmd = data;
7462 	struct mgmt_cp_get_clock_info *cp = cmd->param;
7463 	struct hci_cp_read_clock hci_cp;
7464 	struct hci_conn *conn;
7465 
7466 	memset(&hci_cp, 0, sizeof(hci_cp));
7467 	hci_read_clock_sync(hdev, &hci_cp);
7468 
7469 	/* Make sure connection still exists */
7470 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7471 	if (!conn || conn->state != BT_CONNECTED)
7472 		return MGMT_STATUS_NOT_CONNECTED;
7473 
7474 	cmd->user_data = conn;
7475 	hci_cp.handle = cpu_to_le16(conn->handle);
7476 	hci_cp.which = 0x01; /* Piconet clock */
7477 
7478 	return hci_read_clock_sync(hdev, &hci_cp);
7479 }
7480 
7481 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7482 								u16 len)
7483 {
7484 	struct mgmt_cp_get_clock_info *cp = data;
7485 	struct mgmt_rp_get_clock_info rp;
7486 	struct mgmt_pending_cmd *cmd;
7487 	struct hci_conn *conn;
7488 	int err;
7489 
7490 	bt_dev_dbg(hdev, "sock %p", sk);
7491 
7492 	memset(&rp, 0, sizeof(rp));
7493 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7494 	rp.addr.type = cp->addr.type;
7495 
7496 	if (cp->addr.type != BDADDR_BREDR)
7497 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7498 					 MGMT_STATUS_INVALID_PARAMS,
7499 					 &rp, sizeof(rp));
7500 
7501 	hci_dev_lock(hdev);
7502 
7503 	if (!hdev_is_powered(hdev)) {
7504 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7505 					MGMT_STATUS_NOT_POWERED, &rp,
7506 					sizeof(rp));
7507 		goto unlock;
7508 	}
7509 
7510 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7511 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7512 					       &cp->addr.bdaddr);
7513 		if (!conn || conn->state != BT_CONNECTED) {
7514 			err = mgmt_cmd_complete(sk, hdev->id,
7515 						MGMT_OP_GET_CLOCK_INFO,
7516 						MGMT_STATUS_NOT_CONNECTED,
7517 						&rp, sizeof(rp));
7518 			goto unlock;
7519 		}
7520 	} else {
7521 		conn = NULL;
7522 	}
7523 
7524 	cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7525 	if (!cmd)
7526 		err = -ENOMEM;
7527 	else
7528 		err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7529 					 get_clock_info_complete);
7530 
7531 	if (err < 0) {
7532 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7533 					MGMT_STATUS_FAILED, &rp, sizeof(rp));
7534 
7535 		if (cmd)
7536 			mgmt_pending_free(cmd);
7537 	}
7538 
7539 
7540 unlock:
7541 	hci_dev_unlock(hdev);
7542 	return err;
7543 }
7544 
7545 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7546 {
7547 	struct hci_conn *conn;
7548 
7549 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7550 	if (!conn)
7551 		return false;
7552 
7553 	if (conn->dst_type != type)
7554 		return false;
7555 
7556 	if (conn->state != BT_CONNECTED)
7557 		return false;
7558 
7559 	return true;
7560 }
7561 
7562 /* This function requires the caller holds hdev->lock */
7563 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7564 			       u8 addr_type, u8 auto_connect)
7565 {
7566 	struct hci_conn_params *params;
7567 
7568 	params = hci_conn_params_add(hdev, addr, addr_type);
7569 	if (!params)
7570 		return -EIO;
7571 
7572 	if (params->auto_connect == auto_connect)
7573 		return 0;
7574 
7575 	list_del_init(&params->action);
7576 
7577 	switch (auto_connect) {
7578 	case HCI_AUTO_CONN_DISABLED:
7579 	case HCI_AUTO_CONN_LINK_LOSS:
7580 		/* If auto connect is being disabled when we're trying to
7581 		 * connect to device, keep connecting.
7582 		 */
7583 		if (params->explicit_connect)
7584 			list_add(&params->action, &hdev->pend_le_conns);
7585 		break;
7586 	case HCI_AUTO_CONN_REPORT:
7587 		if (params->explicit_connect)
7588 			list_add(&params->action, &hdev->pend_le_conns);
7589 		else
7590 			list_add(&params->action, &hdev->pend_le_reports);
7591 		break;
7592 	case HCI_AUTO_CONN_DIRECT:
7593 	case HCI_AUTO_CONN_ALWAYS:
7594 		if (!is_connected(hdev, addr, addr_type))
7595 			list_add(&params->action, &hdev->pend_le_conns);
7596 		break;
7597 	}
7598 
7599 	params->auto_connect = auto_connect;
7600 
7601 	bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7602 		   addr, addr_type, auto_connect);
7603 
7604 	return 0;
7605 }
7606 
7607 static void device_added(struct sock *sk, struct hci_dev *hdev,
7608 			 bdaddr_t *bdaddr, u8 type, u8 action)
7609 {
7610 	struct mgmt_ev_device_added ev;
7611 
7612 	bacpy(&ev.addr.bdaddr, bdaddr);
7613 	ev.addr.type = type;
7614 	ev.action = action;
7615 
7616 	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7617 }
7618 
7619 static int add_device_sync(struct hci_dev *hdev, void *data)
7620 {
7621 	return hci_update_passive_scan_sync(hdev);
7622 }
7623 
7624 static int add_device(struct sock *sk, struct hci_dev *hdev,
7625 		      void *data, u16 len)
7626 {
7627 	struct mgmt_cp_add_device *cp = data;
7628 	u8 auto_conn, addr_type;
7629 	struct hci_conn_params *params;
7630 	int err;
7631 	u32 current_flags = 0;
7632 	u32 supported_flags;
7633 
7634 	bt_dev_dbg(hdev, "sock %p", sk);
7635 
7636 	if (!bdaddr_type_is_valid(cp->addr.type) ||
7637 	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7638 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7639 					 MGMT_STATUS_INVALID_PARAMS,
7640 					 &cp->addr, sizeof(cp->addr));
7641 
7642 	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7643 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7644 					 MGMT_STATUS_INVALID_PARAMS,
7645 					 &cp->addr, sizeof(cp->addr));
7646 
7647 	hci_dev_lock(hdev);
7648 
7649 	if (cp->addr.type == BDADDR_BREDR) {
7650 		/* Only incoming connections action is supported for now */
7651 		if (cp->action != 0x01) {
7652 			err = mgmt_cmd_complete(sk, hdev->id,
7653 						MGMT_OP_ADD_DEVICE,
7654 						MGMT_STATUS_INVALID_PARAMS,
7655 						&cp->addr, sizeof(cp->addr));
7656 			goto unlock;
7657 		}
7658 
7659 		err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7660 						     &cp->addr.bdaddr,
7661 						     cp->addr.type, 0);
7662 		if (err)
7663 			goto unlock;
7664 
7665 		hci_update_scan(hdev);
7666 
7667 		goto added;
7668 	}
7669 
7670 	addr_type = le_addr_type(cp->addr.type);
7671 
7672 	if (cp->action == 0x02)
7673 		auto_conn = HCI_AUTO_CONN_ALWAYS;
7674 	else if (cp->action == 0x01)
7675 		auto_conn = HCI_AUTO_CONN_DIRECT;
7676 	else
7677 		auto_conn = HCI_AUTO_CONN_REPORT;
7678 
7679 	/* Kernel internally uses conn_params with resolvable private
7680 	 * address, but Add Device allows only identity addresses.
7681 	 * Make sure it is enforced before calling
7682 	 * hci_conn_params_lookup.
7683 	 */
7684 	if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7685 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7686 					MGMT_STATUS_INVALID_PARAMS,
7687 					&cp->addr, sizeof(cp->addr));
7688 		goto unlock;
7689 	}
7690 
7691 	/* If the connection parameters don't exist for this device,
7692 	 * they will be created and configured with defaults.
7693 	 */
7694 	if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7695 				auto_conn) < 0) {
7696 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7697 					MGMT_STATUS_FAILED, &cp->addr,
7698 					sizeof(cp->addr));
7699 		goto unlock;
7700 	} else {
7701 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7702 						addr_type);
7703 		if (params)
7704 			current_flags = params->flags;
7705 	}
7706 
7707 	err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
7708 	if (err < 0)
7709 		goto unlock;
7710 
7711 added:
7712 	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7713 	supported_flags = hdev->conn_flags;
7714 	device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7715 			     supported_flags, current_flags);
7716 
7717 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7718 				MGMT_STATUS_SUCCESS, &cp->addr,
7719 				sizeof(cp->addr));
7720 
7721 unlock:
7722 	hci_dev_unlock(hdev);
7723 	return err;
7724 }
7725 
7726 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7727 			   bdaddr_t *bdaddr, u8 type)
7728 {
7729 	struct mgmt_ev_device_removed ev;
7730 
7731 	bacpy(&ev.addr.bdaddr, bdaddr);
7732 	ev.addr.type = type;
7733 
7734 	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7735 }
7736 
7737 static int remove_device_sync(struct hci_dev *hdev, void *data)
7738 {
7739 	return hci_update_passive_scan_sync(hdev);
7740 }
7741 
7742 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7743 			 void *data, u16 len)
7744 {
7745 	struct mgmt_cp_remove_device *cp = data;
7746 	int err;
7747 
7748 	bt_dev_dbg(hdev, "sock %p", sk);
7749 
7750 	hci_dev_lock(hdev);
7751 
7752 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7753 		struct hci_conn_params *params;
7754 		u8 addr_type;
7755 
7756 		if (!bdaddr_type_is_valid(cp->addr.type)) {
7757 			err = mgmt_cmd_complete(sk, hdev->id,
7758 						MGMT_OP_REMOVE_DEVICE,
7759 						MGMT_STATUS_INVALID_PARAMS,
7760 						&cp->addr, sizeof(cp->addr));
7761 			goto unlock;
7762 		}
7763 
7764 		if (cp->addr.type == BDADDR_BREDR) {
7765 			err = hci_bdaddr_list_del(&hdev->accept_list,
7766 						  &cp->addr.bdaddr,
7767 						  cp->addr.type);
7768 			if (err) {
7769 				err = mgmt_cmd_complete(sk, hdev->id,
7770 							MGMT_OP_REMOVE_DEVICE,
7771 							MGMT_STATUS_INVALID_PARAMS,
7772 							&cp->addr,
7773 							sizeof(cp->addr));
7774 				goto unlock;
7775 			}
7776 
7777 			hci_update_scan(hdev);
7778 
7779 			device_removed(sk, hdev, &cp->addr.bdaddr,
7780 				       cp->addr.type);
7781 			goto complete;
7782 		}
7783 
7784 		addr_type = le_addr_type(cp->addr.type);
7785 
7786 		/* Kernel internally uses conn_params with resolvable private
7787 		 * address, but Remove Device allows only identity addresses.
7788 		 * Make sure it is enforced before calling
7789 		 * hci_conn_params_lookup.
7790 		 */
7791 		if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7792 			err = mgmt_cmd_complete(sk, hdev->id,
7793 						MGMT_OP_REMOVE_DEVICE,
7794 						MGMT_STATUS_INVALID_PARAMS,
7795 						&cp->addr, sizeof(cp->addr));
7796 			goto unlock;
7797 		}
7798 
7799 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7800 						addr_type);
7801 		if (!params) {
7802 			err = mgmt_cmd_complete(sk, hdev->id,
7803 						MGMT_OP_REMOVE_DEVICE,
7804 						MGMT_STATUS_INVALID_PARAMS,
7805 						&cp->addr, sizeof(cp->addr));
7806 			goto unlock;
7807 		}
7808 
7809 		if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7810 		    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7811 			err = mgmt_cmd_complete(sk, hdev->id,
7812 						MGMT_OP_REMOVE_DEVICE,
7813 						MGMT_STATUS_INVALID_PARAMS,
7814 						&cp->addr, sizeof(cp->addr));
7815 			goto unlock;
7816 		}
7817 
7818 		list_del(&params->action);
7819 		list_del(&params->list);
7820 		kfree(params);
7821 
7822 		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7823 	} else {
7824 		struct hci_conn_params *p, *tmp;
7825 		struct bdaddr_list *b, *btmp;
7826 
7827 		if (cp->addr.type) {
7828 			err = mgmt_cmd_complete(sk, hdev->id,
7829 						MGMT_OP_REMOVE_DEVICE,
7830 						MGMT_STATUS_INVALID_PARAMS,
7831 						&cp->addr, sizeof(cp->addr));
7832 			goto unlock;
7833 		}
7834 
7835 		list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7836 			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7837 			list_del(&b->list);
7838 			kfree(b);
7839 		}
7840 
7841 		hci_update_scan(hdev);
7842 
7843 		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7844 			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7845 				continue;
7846 			device_removed(sk, hdev, &p->addr, p->addr_type);
7847 			if (p->explicit_connect) {
7848 				p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7849 				continue;
7850 			}
7851 			list_del(&p->action);
7852 			list_del(&p->list);
7853 			kfree(p);
7854 		}
7855 
7856 		bt_dev_dbg(hdev, "All LE connection parameters were removed");
7857 	}
7858 
7859 	hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7860 
7861 complete:
7862 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7863 				MGMT_STATUS_SUCCESS, &cp->addr,
7864 				sizeof(cp->addr));
7865 unlock:
7866 	hci_dev_unlock(hdev);
7867 	return err;
7868 }
7869 
7870 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7871 			   u16 len)
7872 {
7873 	struct mgmt_cp_load_conn_param *cp = data;
7874 	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7875 				     sizeof(struct mgmt_conn_param));
7876 	u16 param_count, expected_len;
7877 	int i;
7878 
7879 	if (!lmp_le_capable(hdev))
7880 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7881 				       MGMT_STATUS_NOT_SUPPORTED);
7882 
7883 	param_count = __le16_to_cpu(cp->param_count);
7884 	if (param_count > max_param_count) {
7885 		bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7886 			   param_count);
7887 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7888 				       MGMT_STATUS_INVALID_PARAMS);
7889 	}
7890 
7891 	expected_len = struct_size(cp, params, param_count);
7892 	if (expected_len != len) {
7893 		bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7894 			   expected_len, len);
7895 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7896 				       MGMT_STATUS_INVALID_PARAMS);
7897 	}
7898 
7899 	bt_dev_dbg(hdev, "param_count %u", param_count);
7900 
7901 	hci_dev_lock(hdev);
7902 
7903 	hci_conn_params_clear_disabled(hdev);
7904 
7905 	for (i = 0; i < param_count; i++) {
7906 		struct mgmt_conn_param *param = &cp->params[i];
7907 		struct hci_conn_params *hci_param;
7908 		u16 min, max, latency, timeout;
7909 		u8 addr_type;
7910 
7911 		bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
7912 			   param->addr.type);
7913 
7914 		if (param->addr.type == BDADDR_LE_PUBLIC) {
7915 			addr_type = ADDR_LE_DEV_PUBLIC;
7916 		} else if (param->addr.type == BDADDR_LE_RANDOM) {
7917 			addr_type = ADDR_LE_DEV_RANDOM;
7918 		} else {
7919 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7920 			continue;
7921 		}
7922 
7923 		min = le16_to_cpu(param->min_interval);
7924 		max = le16_to_cpu(param->max_interval);
7925 		latency = le16_to_cpu(param->latency);
7926 		timeout = le16_to_cpu(param->timeout);
7927 
7928 		bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7929 			   min, max, latency, timeout);
7930 
7931 		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7932 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7933 			continue;
7934 		}
7935 
7936 		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
7937 						addr_type);
7938 		if (!hci_param) {
7939 			bt_dev_err(hdev, "failed to add connection parameters");
7940 			continue;
7941 		}
7942 
7943 		hci_param->conn_min_interval = min;
7944 		hci_param->conn_max_interval = max;
7945 		hci_param->conn_latency = latency;
7946 		hci_param->supervision_timeout = timeout;
7947 	}
7948 
7949 	hci_dev_unlock(hdev);
7950 
7951 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7952 				 NULL, 0);
7953 }
7954 
7955 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7956 			       void *data, u16 len)
7957 {
7958 	struct mgmt_cp_set_external_config *cp = data;
7959 	bool changed;
7960 	int err;
7961 
7962 	bt_dev_dbg(hdev, "sock %p", sk);
7963 
7964 	if (hdev_is_powered(hdev))
7965 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7966 				       MGMT_STATUS_REJECTED);
7967 
7968 	if (cp->config != 0x00 && cp->config != 0x01)
7969 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7970 				         MGMT_STATUS_INVALID_PARAMS);
7971 
7972 	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7973 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7974 				       MGMT_STATUS_NOT_SUPPORTED);
7975 
7976 	hci_dev_lock(hdev);
7977 
7978 	if (cp->config)
7979 		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7980 	else
7981 		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7982 
7983 	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7984 	if (err < 0)
7985 		goto unlock;
7986 
7987 	if (!changed)
7988 		goto unlock;
7989 
7990 	err = new_options(hdev, sk);
7991 
7992 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7993 		mgmt_index_removed(hdev);
7994 
7995 		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7996 			hci_dev_set_flag(hdev, HCI_CONFIG);
7997 			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7998 
7999 			queue_work(hdev->req_workqueue, &hdev->power_on);
8000 		} else {
8001 			set_bit(HCI_RAW, &hdev->flags);
8002 			mgmt_index_added(hdev);
8003 		}
8004 	}
8005 
8006 unlock:
8007 	hci_dev_unlock(hdev);
8008 	return err;
8009 }
8010 
8011 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
8012 			      void *data, u16 len)
8013 {
8014 	struct mgmt_cp_set_public_address *cp = data;
8015 	bool changed;
8016 	int err;
8017 
8018 	bt_dev_dbg(hdev, "sock %p", sk);
8019 
8020 	if (hdev_is_powered(hdev))
8021 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8022 				       MGMT_STATUS_REJECTED);
8023 
8024 	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
8025 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8026 				       MGMT_STATUS_INVALID_PARAMS);
8027 
8028 	if (!hdev->set_bdaddr)
8029 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8030 				       MGMT_STATUS_NOT_SUPPORTED);
8031 
8032 	hci_dev_lock(hdev);
8033 
8034 	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
8035 	bacpy(&hdev->public_addr, &cp->bdaddr);
8036 
8037 	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
8038 	if (err < 0)
8039 		goto unlock;
8040 
8041 	if (!changed)
8042 		goto unlock;
8043 
8044 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
8045 		err = new_options(hdev, sk);
8046 
8047 	if (is_configured(hdev)) {
8048 		mgmt_index_removed(hdev);
8049 
8050 		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8051 
8052 		hci_dev_set_flag(hdev, HCI_CONFIG);
8053 		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8054 
8055 		queue_work(hdev->req_workqueue, &hdev->power_on);
8056 	}
8057 
8058 unlock:
8059 	hci_dev_unlock(hdev);
8060 	return err;
8061 }
8062 
8063 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8064 					     int err)
8065 {
8066 	const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8067 	struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8068 	u8 *h192, *r192, *h256, *r256;
8069 	struct mgmt_pending_cmd *cmd = data;
8070 	struct sk_buff *skb = cmd->skb;
8071 	u8 status = mgmt_status(err);
8072 	u16 eir_len;
8073 
8074 	if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
8075 		return;
8076 
8077 	if (!status) {
8078 		if (!skb)
8079 			status = MGMT_STATUS_FAILED;
8080 		else if (IS_ERR(skb))
8081 			status = mgmt_status(PTR_ERR(skb));
8082 		else
8083 			status = mgmt_status(skb->data[0]);
8084 	}
8085 
8086 	bt_dev_dbg(hdev, "status %u", status);
8087 
8088 	mgmt_cp = cmd->param;
8089 
8090 	if (status) {
8091 		status = mgmt_status(status);
8092 		eir_len = 0;
8093 
8094 		h192 = NULL;
8095 		r192 = NULL;
8096 		h256 = NULL;
8097 		r256 = NULL;
8098 	} else if (!bredr_sc_enabled(hdev)) {
8099 		struct hci_rp_read_local_oob_data *rp;
8100 
8101 		if (skb->len != sizeof(*rp)) {
8102 			status = MGMT_STATUS_FAILED;
8103 			eir_len = 0;
8104 		} else {
8105 			status = MGMT_STATUS_SUCCESS;
8106 			rp = (void *)skb->data;
8107 
8108 			eir_len = 5 + 18 + 18;
8109 			h192 = rp->hash;
8110 			r192 = rp->rand;
8111 			h256 = NULL;
8112 			r256 = NULL;
8113 		}
8114 	} else {
8115 		struct hci_rp_read_local_oob_ext_data *rp;
8116 
8117 		if (skb->len != sizeof(*rp)) {
8118 			status = MGMT_STATUS_FAILED;
8119 			eir_len = 0;
8120 		} else {
8121 			status = MGMT_STATUS_SUCCESS;
8122 			rp = (void *)skb->data;
8123 
8124 			if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8125 				eir_len = 5 + 18 + 18;
8126 				h192 = NULL;
8127 				r192 = NULL;
8128 			} else {
8129 				eir_len = 5 + 18 + 18 + 18 + 18;
8130 				h192 = rp->hash192;
8131 				r192 = rp->rand192;
8132 			}
8133 
8134 			h256 = rp->hash256;
8135 			r256 = rp->rand256;
8136 		}
8137 	}
8138 
8139 	mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8140 	if (!mgmt_rp)
8141 		goto done;
8142 
8143 	if (eir_len == 0)
8144 		goto send_rsp;
8145 
8146 	eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8147 				  hdev->dev_class, 3);
8148 
8149 	if (h192 && r192) {
8150 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8151 					  EIR_SSP_HASH_C192, h192, 16);
8152 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8153 					  EIR_SSP_RAND_R192, r192, 16);
8154 	}
8155 
8156 	if (h256 && r256) {
8157 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8158 					  EIR_SSP_HASH_C256, h256, 16);
8159 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8160 					  EIR_SSP_RAND_R256, r256, 16);
8161 	}
8162 
8163 send_rsp:
8164 	mgmt_rp->type = mgmt_cp->type;
8165 	mgmt_rp->eir_len = cpu_to_le16(eir_len);
8166 
8167 	err = mgmt_cmd_complete(cmd->sk, hdev->id,
8168 				MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8169 				mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8170 	if (err < 0 || status)
8171 		goto done;
8172 
8173 	hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8174 
8175 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8176 				 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8177 				 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8178 done:
8179 	if (skb && !IS_ERR(skb))
8180 		kfree_skb(skb);
8181 
8182 	kfree(mgmt_rp);
8183 	mgmt_pending_remove(cmd);
8184 }
8185 
8186 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8187 				  struct mgmt_cp_read_local_oob_ext_data *cp)
8188 {
8189 	struct mgmt_pending_cmd *cmd;
8190 	int err;
8191 
8192 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8193 			       cp, sizeof(*cp));
8194 	if (!cmd)
8195 		return -ENOMEM;
8196 
8197 	err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8198 				 read_local_oob_ext_data_complete);
8199 
8200 	if (err < 0) {
8201 		mgmt_pending_remove(cmd);
8202 		return err;
8203 	}
8204 
8205 	return 0;
8206 }
8207 
8208 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8209 				   void *data, u16 data_len)
8210 {
8211 	struct mgmt_cp_read_local_oob_ext_data *cp = data;
8212 	struct mgmt_rp_read_local_oob_ext_data *rp;
8213 	size_t rp_len;
8214 	u16 eir_len;
8215 	u8 status, flags, role, addr[7], hash[16], rand[16];
8216 	int err;
8217 
8218 	bt_dev_dbg(hdev, "sock %p", sk);
8219 
8220 	if (hdev_is_powered(hdev)) {
8221 		switch (cp->type) {
8222 		case BIT(BDADDR_BREDR):
8223 			status = mgmt_bredr_support(hdev);
8224 			if (status)
8225 				eir_len = 0;
8226 			else
8227 				eir_len = 5;
8228 			break;
8229 		case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8230 			status = mgmt_le_support(hdev);
8231 			if (status)
8232 				eir_len = 0;
8233 			else
8234 				eir_len = 9 + 3 + 18 + 18 + 3;
8235 			break;
8236 		default:
8237 			status = MGMT_STATUS_INVALID_PARAMS;
8238 			eir_len = 0;
8239 			break;
8240 		}
8241 	} else {
8242 		status = MGMT_STATUS_NOT_POWERED;
8243 		eir_len = 0;
8244 	}
8245 
8246 	rp_len = sizeof(*rp) + eir_len;
8247 	rp = kmalloc(rp_len, GFP_ATOMIC);
8248 	if (!rp)
8249 		return -ENOMEM;
8250 
8251 	if (!status && !lmp_ssp_capable(hdev)) {
8252 		status = MGMT_STATUS_NOT_SUPPORTED;
8253 		eir_len = 0;
8254 	}
8255 
8256 	if (status)
8257 		goto complete;
8258 
8259 	hci_dev_lock(hdev);
8260 
8261 	eir_len = 0;
8262 	switch (cp->type) {
8263 	case BIT(BDADDR_BREDR):
8264 		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8265 			err = read_local_ssp_oob_req(hdev, sk, cp);
8266 			hci_dev_unlock(hdev);
8267 			if (!err)
8268 				goto done;
8269 
8270 			status = MGMT_STATUS_FAILED;
8271 			goto complete;
8272 		} else {
8273 			eir_len = eir_append_data(rp->eir, eir_len,
8274 						  EIR_CLASS_OF_DEV,
8275 						  hdev->dev_class, 3);
8276 		}
8277 		break;
8278 	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8279 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8280 		    smp_generate_oob(hdev, hash, rand) < 0) {
8281 			hci_dev_unlock(hdev);
8282 			status = MGMT_STATUS_FAILED;
8283 			goto complete;
8284 		}
8285 
8286 		/* This should return the active RPA, but since the RPA
8287 		 * is only programmed on demand, it is really hard to fill
8288 		 * this in at the moment. For now disallow retrieving
8289 		 * local out-of-band data when privacy is in use.
8290 		 *
8291 		 * Returning the identity address will not help here since
8292 		 * pairing happens before the identity resolving key is
8293 		 * known and thus the connection establishment happens
8294 		 * based on the RPA and not the identity address.
8295 		 */
8296 		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8297 			hci_dev_unlock(hdev);
8298 			status = MGMT_STATUS_REJECTED;
8299 			goto complete;
8300 		}
8301 
8302 		if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8303 		   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8304 		   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8305 		    bacmp(&hdev->static_addr, BDADDR_ANY))) {
8306 			memcpy(addr, &hdev->static_addr, 6);
8307 			addr[6] = 0x01;
8308 		} else {
8309 			memcpy(addr, &hdev->bdaddr, 6);
8310 			addr[6] = 0x00;
8311 		}
8312 
8313 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8314 					  addr, sizeof(addr));
8315 
8316 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8317 			role = 0x02;
8318 		else
8319 			role = 0x01;
8320 
8321 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8322 					  &role, sizeof(role));
8323 
8324 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8325 			eir_len = eir_append_data(rp->eir, eir_len,
8326 						  EIR_LE_SC_CONFIRM,
8327 						  hash, sizeof(hash));
8328 
8329 			eir_len = eir_append_data(rp->eir, eir_len,
8330 						  EIR_LE_SC_RANDOM,
8331 						  rand, sizeof(rand));
8332 		}
8333 
8334 		flags = mgmt_get_adv_discov_flags(hdev);
8335 
8336 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8337 			flags |= LE_AD_NO_BREDR;
8338 
8339 		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8340 					  &flags, sizeof(flags));
8341 		break;
8342 	}
8343 
8344 	hci_dev_unlock(hdev);
8345 
8346 	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8347 
8348 	status = MGMT_STATUS_SUCCESS;
8349 
8350 complete:
8351 	rp->type = cp->type;
8352 	rp->eir_len = cpu_to_le16(eir_len);
8353 
8354 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8355 				status, rp, sizeof(*rp) + eir_len);
8356 	if (err < 0 || status)
8357 		goto done;
8358 
8359 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8360 				 rp, sizeof(*rp) + eir_len,
8361 				 HCI_MGMT_OOB_DATA_EVENTS, sk);
8362 
8363 done:
8364 	kfree(rp);
8365 
8366 	return err;
8367 }
8368 
8369 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8370 {
8371 	u32 flags = 0;
8372 
8373 	flags |= MGMT_ADV_FLAG_CONNECTABLE;
8374 	flags |= MGMT_ADV_FLAG_DISCOV;
8375 	flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8376 	flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8377 	flags |= MGMT_ADV_FLAG_APPEARANCE;
8378 	flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8379 	flags |= MGMT_ADV_PARAM_DURATION;
8380 	flags |= MGMT_ADV_PARAM_TIMEOUT;
8381 	flags |= MGMT_ADV_PARAM_INTERVALS;
8382 	flags |= MGMT_ADV_PARAM_TX_POWER;
8383 	flags |= MGMT_ADV_PARAM_SCAN_RSP;
8384 
8385 	/* In extended adv TX_POWER returned from Set Adv Param
8386 	 * will be always valid.
8387 	 */
8388 	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8389 		flags |= MGMT_ADV_FLAG_TX_POWER;
8390 
8391 	if (ext_adv_capable(hdev)) {
8392 		flags |= MGMT_ADV_FLAG_SEC_1M;
8393 		flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8394 		flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8395 
8396 		if (hdev->le_features[1] & HCI_LE_PHY_2M)
8397 			flags |= MGMT_ADV_FLAG_SEC_2M;
8398 
8399 		if (hdev->le_features[1] & HCI_LE_PHY_CODED)
8400 			flags |= MGMT_ADV_FLAG_SEC_CODED;
8401 	}
8402 
8403 	return flags;
8404 }
8405 
8406 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8407 			     void *data, u16 data_len)
8408 {
8409 	struct mgmt_rp_read_adv_features *rp;
8410 	size_t rp_len;
8411 	int err;
8412 	struct adv_info *adv_instance;
8413 	u32 supported_flags;
8414 	u8 *instance;
8415 
8416 	bt_dev_dbg(hdev, "sock %p", sk);
8417 
8418 	if (!lmp_le_capable(hdev))
8419 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8420 				       MGMT_STATUS_REJECTED);
8421 
8422 	hci_dev_lock(hdev);
8423 
8424 	rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8425 	rp = kmalloc(rp_len, GFP_ATOMIC);
8426 	if (!rp) {
8427 		hci_dev_unlock(hdev);
8428 		return -ENOMEM;
8429 	}
8430 
8431 	supported_flags = get_supported_adv_flags(hdev);
8432 
8433 	rp->supported_flags = cpu_to_le32(supported_flags);
8434 	rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
8435 	rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
8436 	rp->max_instances = hdev->le_num_of_adv_sets;
8437 	rp->num_instances = hdev->adv_instance_cnt;
8438 
8439 	instance = rp->instance;
8440 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8441 		/* Only instances 1-le_num_of_adv_sets are externally visible */
8442 		if (adv_instance->instance <= hdev->adv_instance_cnt) {
8443 			*instance = adv_instance->instance;
8444 			instance++;
8445 		} else {
8446 			rp->num_instances--;
8447 			rp_len--;
8448 		}
8449 	}
8450 
8451 	hci_dev_unlock(hdev);
8452 
8453 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8454 				MGMT_STATUS_SUCCESS, rp, rp_len);
8455 
8456 	kfree(rp);
8457 
8458 	return err;
8459 }
8460 
8461 static u8 calculate_name_len(struct hci_dev *hdev)
8462 {
8463 	u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
8464 
8465 	return eir_append_local_name(hdev, buf, 0);
8466 }
8467 
8468 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8469 			   bool is_adv_data)
8470 {
8471 	u8 max_len = HCI_MAX_AD_LENGTH;
8472 
8473 	if (is_adv_data) {
8474 		if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8475 				 MGMT_ADV_FLAG_LIMITED_DISCOV |
8476 				 MGMT_ADV_FLAG_MANAGED_FLAGS))
8477 			max_len -= 3;
8478 
8479 		if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8480 			max_len -= 3;
8481 	} else {
8482 		if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8483 			max_len -= calculate_name_len(hdev);
8484 
8485 		if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8486 			max_len -= 4;
8487 	}
8488 
8489 	return max_len;
8490 }
8491 
8492 static bool flags_managed(u32 adv_flags)
8493 {
8494 	return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8495 			    MGMT_ADV_FLAG_LIMITED_DISCOV |
8496 			    MGMT_ADV_FLAG_MANAGED_FLAGS);
8497 }
8498 
8499 static bool tx_power_managed(u32 adv_flags)
8500 {
8501 	return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8502 }
8503 
8504 static bool name_managed(u32 adv_flags)
8505 {
8506 	return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8507 }
8508 
8509 static bool appearance_managed(u32 adv_flags)
8510 {
8511 	return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8512 }
8513 
8514 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8515 			      u8 len, bool is_adv_data)
8516 {
8517 	int i, cur_len;
8518 	u8 max_len;
8519 
8520 	max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8521 
8522 	if (len > max_len)
8523 		return false;
8524 
8525 	/* Make sure that the data is correctly formatted. */
8526 	for (i = 0; i < len; i += (cur_len + 1)) {
8527 		cur_len = data[i];
8528 
8529 		if (!cur_len)
8530 			continue;
8531 
8532 		if (data[i + 1] == EIR_FLAGS &&
8533 		    (!is_adv_data || flags_managed(adv_flags)))
8534 			return false;
8535 
8536 		if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8537 			return false;
8538 
8539 		if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8540 			return false;
8541 
8542 		if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8543 			return false;
8544 
8545 		if (data[i + 1] == EIR_APPEARANCE &&
8546 		    appearance_managed(adv_flags))
8547 			return false;
8548 
8549 		/* If the current field length would exceed the total data
8550 		 * length, then it's invalid.
8551 		 */
8552 		if (i + cur_len >= len)
8553 			return false;
8554 	}
8555 
8556 	return true;
8557 }
8558 
8559 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8560 {
8561 	u32 supported_flags, phy_flags;
8562 
8563 	/* The current implementation only supports a subset of the specified
8564 	 * flags. Also need to check mutual exclusiveness of sec flags.
8565 	 */
8566 	supported_flags = get_supported_adv_flags(hdev);
8567 	phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8568 	if (adv_flags & ~supported_flags ||
8569 	    ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8570 		return false;
8571 
8572 	return true;
8573 }
8574 
8575 static bool adv_busy(struct hci_dev *hdev)
8576 {
8577 	return pending_find(MGMT_OP_SET_LE, hdev);
8578 }
8579 
8580 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8581 			     int err)
8582 {
8583 	struct adv_info *adv, *n;
8584 
8585 	bt_dev_dbg(hdev, "err %d", err);
8586 
8587 	hci_dev_lock(hdev);
8588 
8589 	list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8590 		u8 instance;
8591 
8592 		if (!adv->pending)
8593 			continue;
8594 
8595 		if (!err) {
8596 			adv->pending = false;
8597 			continue;
8598 		}
8599 
8600 		instance = adv->instance;
8601 
8602 		if (hdev->cur_adv_instance == instance)
8603 			cancel_adv_timeout(hdev);
8604 
8605 		hci_remove_adv_instance(hdev, instance);
8606 		mgmt_advertising_removed(sk, hdev, instance);
8607 	}
8608 
8609 	hci_dev_unlock(hdev);
8610 }
8611 
8612 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8613 {
8614 	struct mgmt_pending_cmd *cmd = data;
8615 	struct mgmt_cp_add_advertising *cp = cmd->param;
8616 	struct mgmt_rp_add_advertising rp;
8617 
8618 	memset(&rp, 0, sizeof(rp));
8619 
8620 	rp.instance = cp->instance;
8621 
8622 	if (err)
8623 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8624 				mgmt_status(err));
8625 	else
8626 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8627 				  mgmt_status(err), &rp, sizeof(rp));
8628 
8629 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
8630 
8631 	mgmt_pending_free(cmd);
8632 }
8633 
8634 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8635 {
8636 	struct mgmt_pending_cmd *cmd = data;
8637 	struct mgmt_cp_add_advertising *cp = cmd->param;
8638 
8639 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8640 }
8641 
8642 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8643 			   void *data, u16 data_len)
8644 {
8645 	struct mgmt_cp_add_advertising *cp = data;
8646 	struct mgmt_rp_add_advertising rp;
8647 	u32 flags;
8648 	u8 status;
8649 	u16 timeout, duration;
8650 	unsigned int prev_instance_cnt;
8651 	u8 schedule_instance = 0;
8652 	struct adv_info *adv, *next_instance;
8653 	int err;
8654 	struct mgmt_pending_cmd *cmd;
8655 
8656 	bt_dev_dbg(hdev, "sock %p", sk);
8657 
8658 	status = mgmt_le_support(hdev);
8659 	if (status)
8660 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8661 				       status);
8662 
8663 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8664 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8665 				       MGMT_STATUS_INVALID_PARAMS);
8666 
8667 	if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8668 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8669 				       MGMT_STATUS_INVALID_PARAMS);
8670 
8671 	flags = __le32_to_cpu(cp->flags);
8672 	timeout = __le16_to_cpu(cp->timeout);
8673 	duration = __le16_to_cpu(cp->duration);
8674 
8675 	if (!requested_adv_flags_are_valid(hdev, flags))
8676 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8677 				       MGMT_STATUS_INVALID_PARAMS);
8678 
8679 	hci_dev_lock(hdev);
8680 
8681 	if (timeout && !hdev_is_powered(hdev)) {
8682 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8683 				      MGMT_STATUS_REJECTED);
8684 		goto unlock;
8685 	}
8686 
8687 	if (adv_busy(hdev)) {
8688 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8689 				      MGMT_STATUS_BUSY);
8690 		goto unlock;
8691 	}
8692 
8693 	if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8694 	    !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8695 			       cp->scan_rsp_len, false)) {
8696 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8697 				      MGMT_STATUS_INVALID_PARAMS);
8698 		goto unlock;
8699 	}
8700 
8701 	prev_instance_cnt = hdev->adv_instance_cnt;
8702 
8703 	adv = hci_add_adv_instance(hdev, cp->instance, flags,
8704 				   cp->adv_data_len, cp->data,
8705 				   cp->scan_rsp_len,
8706 				   cp->data + cp->adv_data_len,
8707 				   timeout, duration,
8708 				   HCI_ADV_TX_POWER_NO_PREFERENCE,
8709 				   hdev->le_adv_min_interval,
8710 				   hdev->le_adv_max_interval, 0);
8711 	if (IS_ERR(adv)) {
8712 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8713 				      MGMT_STATUS_FAILED);
8714 		goto unlock;
8715 	}
8716 
8717 	/* Only trigger an advertising added event if a new instance was
8718 	 * actually added.
8719 	 */
8720 	if (hdev->adv_instance_cnt > prev_instance_cnt)
8721 		mgmt_advertising_added(sk, hdev, cp->instance);
8722 
8723 	if (hdev->cur_adv_instance == cp->instance) {
8724 		/* If the currently advertised instance is being changed then
8725 		 * cancel the current advertising and schedule the next
8726 		 * instance. If there is only one instance then the overridden
8727 		 * advertising data will be visible right away.
8728 		 */
8729 		cancel_adv_timeout(hdev);
8730 
8731 		next_instance = hci_get_next_instance(hdev, cp->instance);
8732 		if (next_instance)
8733 			schedule_instance = next_instance->instance;
8734 	} else if (!hdev->adv_instance_timeout) {
8735 		/* Immediately advertise the new instance if no other
8736 		 * instance is currently being advertised.
8737 		 */
8738 		schedule_instance = cp->instance;
8739 	}
8740 
8741 	/* If the HCI_ADVERTISING flag is set or the device isn't powered or
8742 	 * there is no instance to be advertised then we have no HCI
8743 	 * communication to make. Simply return.
8744 	 */
8745 	if (!hdev_is_powered(hdev) ||
8746 	    hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8747 	    !schedule_instance) {
8748 		rp.instance = cp->instance;
8749 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8750 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8751 		goto unlock;
8752 	}
8753 
8754 	/* We're good to go, update advertising data, parameters, and start
8755 	 * advertising.
8756 	 */
8757 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8758 			       data_len);
8759 	if (!cmd) {
8760 		err = -ENOMEM;
8761 		goto unlock;
8762 	}
8763 
8764 	cp->instance = schedule_instance;
8765 
8766 	err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8767 				 add_advertising_complete);
8768 	if (err < 0)
8769 		mgmt_pending_free(cmd);
8770 
8771 unlock:
8772 	hci_dev_unlock(hdev);
8773 
8774 	return err;
8775 }
8776 
8777 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8778 					int err)
8779 {
8780 	struct mgmt_pending_cmd *cmd = data;
8781 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8782 	struct mgmt_rp_add_ext_adv_params rp;
8783 	struct adv_info *adv;
8784 	u32 flags;
8785 
8786 	BT_DBG("%s", hdev->name);
8787 
8788 	hci_dev_lock(hdev);
8789 
8790 	adv = hci_find_adv_instance(hdev, cp->instance);
8791 	if (!adv)
8792 		goto unlock;
8793 
8794 	rp.instance = cp->instance;
8795 	rp.tx_power = adv->tx_power;
8796 
8797 	/* While we're at it, inform userspace of the available space for this
8798 	 * advertisement, given the flags that will be used.
8799 	 */
8800 	flags = __le32_to_cpu(cp->flags);
8801 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8802 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8803 
8804 	if (err) {
8805 		/* If this advertisement was previously advertising and we
8806 		 * failed to update it, we signal that it has been removed and
8807 		 * delete its structure
8808 		 */
8809 		if (!adv->pending)
8810 			mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8811 
8812 		hci_remove_adv_instance(hdev, cp->instance);
8813 
8814 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8815 				mgmt_status(err));
8816 	} else {
8817 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8818 				  mgmt_status(err), &rp, sizeof(rp));
8819 	}
8820 
8821 unlock:
8822 	if (cmd)
8823 		mgmt_pending_free(cmd);
8824 
8825 	hci_dev_unlock(hdev);
8826 }
8827 
8828 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8829 {
8830 	struct mgmt_pending_cmd *cmd = data;
8831 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8832 
8833 	return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8834 }
8835 
8836 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8837 			      void *data, u16 data_len)
8838 {
8839 	struct mgmt_cp_add_ext_adv_params *cp = data;
8840 	struct mgmt_rp_add_ext_adv_params rp;
8841 	struct mgmt_pending_cmd *cmd = NULL;
8842 	struct adv_info *adv;
8843 	u32 flags, min_interval, max_interval;
8844 	u16 timeout, duration;
8845 	u8 status;
8846 	s8 tx_power;
8847 	int err;
8848 
8849 	BT_DBG("%s", hdev->name);
8850 
8851 	status = mgmt_le_support(hdev);
8852 	if (status)
8853 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8854 				       status);
8855 
8856 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8857 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8858 				       MGMT_STATUS_INVALID_PARAMS);
8859 
8860 	/* The purpose of breaking add_advertising into two separate MGMT calls
8861 	 * for params and data is to allow more parameters to be added to this
8862 	 * structure in the future. For this reason, we verify that we have the
8863 	 * bare minimum structure we know of when the interface was defined. Any
8864 	 * extra parameters we don't know about will be ignored in this request.
8865 	 */
8866 	if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8867 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8868 				       MGMT_STATUS_INVALID_PARAMS);
8869 
8870 	flags = __le32_to_cpu(cp->flags);
8871 
8872 	if (!requested_adv_flags_are_valid(hdev, flags))
8873 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8874 				       MGMT_STATUS_INVALID_PARAMS);
8875 
8876 	hci_dev_lock(hdev);
8877 
8878 	/* In new interface, we require that we are powered to register */
8879 	if (!hdev_is_powered(hdev)) {
8880 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8881 				      MGMT_STATUS_REJECTED);
8882 		goto unlock;
8883 	}
8884 
8885 	if (adv_busy(hdev)) {
8886 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8887 				      MGMT_STATUS_BUSY);
8888 		goto unlock;
8889 	}
8890 
8891 	/* Parse defined parameters from request, use defaults otherwise */
8892 	timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8893 		  __le16_to_cpu(cp->timeout) : 0;
8894 
8895 	duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8896 		   __le16_to_cpu(cp->duration) :
8897 		   hdev->def_multi_adv_rotation_duration;
8898 
8899 	min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8900 		       __le32_to_cpu(cp->min_interval) :
8901 		       hdev->le_adv_min_interval;
8902 
8903 	max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8904 		       __le32_to_cpu(cp->max_interval) :
8905 		       hdev->le_adv_max_interval;
8906 
8907 	tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8908 		   cp->tx_power :
8909 		   HCI_ADV_TX_POWER_NO_PREFERENCE;
8910 
8911 	/* Create advertising instance with no advertising or response data */
8912 	adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
8913 				   timeout, duration, tx_power, min_interval,
8914 				   max_interval, 0);
8915 
8916 	if (IS_ERR(adv)) {
8917 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8918 				      MGMT_STATUS_FAILED);
8919 		goto unlock;
8920 	}
8921 
8922 	/* Submit request for advertising params if ext adv available */
8923 	if (ext_adv_capable(hdev)) {
8924 		cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8925 				       data, data_len);
8926 		if (!cmd) {
8927 			err = -ENOMEM;
8928 			hci_remove_adv_instance(hdev, cp->instance);
8929 			goto unlock;
8930 		}
8931 
8932 		err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8933 					 add_ext_adv_params_complete);
8934 		if (err < 0)
8935 			mgmt_pending_free(cmd);
8936 	} else {
8937 		rp.instance = cp->instance;
8938 		rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8939 		rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8940 		rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8941 		err = mgmt_cmd_complete(sk, hdev->id,
8942 					MGMT_OP_ADD_EXT_ADV_PARAMS,
8943 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8944 	}
8945 
8946 unlock:
8947 	hci_dev_unlock(hdev);
8948 
8949 	return err;
8950 }
8951 
8952 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8953 {
8954 	struct mgmt_pending_cmd *cmd = data;
8955 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8956 	struct mgmt_rp_add_advertising rp;
8957 
8958 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
8959 
8960 	memset(&rp, 0, sizeof(rp));
8961 
8962 	rp.instance = cp->instance;
8963 
8964 	if (err)
8965 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8966 				mgmt_status(err));
8967 	else
8968 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8969 				  mgmt_status(err), &rp, sizeof(rp));
8970 
8971 	mgmt_pending_free(cmd);
8972 }
8973 
8974 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8975 {
8976 	struct mgmt_pending_cmd *cmd = data;
8977 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8978 	int err;
8979 
8980 	if (ext_adv_capable(hdev)) {
8981 		err = hci_update_adv_data_sync(hdev, cp->instance);
8982 		if (err)
8983 			return err;
8984 
8985 		err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8986 		if (err)
8987 			return err;
8988 
8989 		return hci_enable_ext_advertising_sync(hdev, cp->instance);
8990 	}
8991 
8992 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8993 }
8994 
8995 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8996 			    u16 data_len)
8997 {
8998 	struct mgmt_cp_add_ext_adv_data *cp = data;
8999 	struct mgmt_rp_add_ext_adv_data rp;
9000 	u8 schedule_instance = 0;
9001 	struct adv_info *next_instance;
9002 	struct adv_info *adv_instance;
9003 	int err = 0;
9004 	struct mgmt_pending_cmd *cmd;
9005 
9006 	BT_DBG("%s", hdev->name);
9007 
9008 	hci_dev_lock(hdev);
9009 
9010 	adv_instance = hci_find_adv_instance(hdev, cp->instance);
9011 
9012 	if (!adv_instance) {
9013 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9014 				      MGMT_STATUS_INVALID_PARAMS);
9015 		goto unlock;
9016 	}
9017 
9018 	/* In new interface, we require that we are powered to register */
9019 	if (!hdev_is_powered(hdev)) {
9020 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9021 				      MGMT_STATUS_REJECTED);
9022 		goto clear_new_instance;
9023 	}
9024 
9025 	if (adv_busy(hdev)) {
9026 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9027 				      MGMT_STATUS_BUSY);
9028 		goto clear_new_instance;
9029 	}
9030 
9031 	/* Validate new data */
9032 	if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
9033 			       cp->adv_data_len, true) ||
9034 	    !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
9035 			       cp->adv_data_len, cp->scan_rsp_len, false)) {
9036 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9037 				      MGMT_STATUS_INVALID_PARAMS);
9038 		goto clear_new_instance;
9039 	}
9040 
9041 	/* Set the data in the advertising instance */
9042 	hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
9043 				  cp->data, cp->scan_rsp_len,
9044 				  cp->data + cp->adv_data_len);
9045 
9046 	/* If using software rotation, determine next instance to use */
9047 	if (hdev->cur_adv_instance == cp->instance) {
9048 		/* If the currently advertised instance is being changed
9049 		 * then cancel the current advertising and schedule the
9050 		 * next instance. If there is only one instance then the
9051 		 * overridden advertising data will be visible right
9052 		 * away
9053 		 */
9054 		cancel_adv_timeout(hdev);
9055 
9056 		next_instance = hci_get_next_instance(hdev, cp->instance);
9057 		if (next_instance)
9058 			schedule_instance = next_instance->instance;
9059 	} else if (!hdev->adv_instance_timeout) {
9060 		/* Immediately advertise the new instance if no other
9061 		 * instance is currently being advertised.
9062 		 */
9063 		schedule_instance = cp->instance;
9064 	}
9065 
9066 	/* If the HCI_ADVERTISING flag is set or there is no instance to
9067 	 * be advertised then we have no HCI communication to make.
9068 	 * Simply return.
9069 	 */
9070 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9071 		if (adv_instance->pending) {
9072 			mgmt_advertising_added(sk, hdev, cp->instance);
9073 			adv_instance->pending = false;
9074 		}
9075 		rp.instance = cp->instance;
9076 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9077 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9078 		goto unlock;
9079 	}
9080 
9081 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9082 			       data_len);
9083 	if (!cmd) {
9084 		err = -ENOMEM;
9085 		goto clear_new_instance;
9086 	}
9087 
9088 	err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9089 				 add_ext_adv_data_complete);
9090 	if (err < 0) {
9091 		mgmt_pending_free(cmd);
9092 		goto clear_new_instance;
9093 	}
9094 
9095 	/* We were successful in updating data, so trigger advertising_added
9096 	 * event if this is an instance that wasn't previously advertising. If
9097 	 * a failure occurs in the requests we initiated, we will remove the
9098 	 * instance again in add_advertising_complete
9099 	 */
9100 	if (adv_instance->pending)
9101 		mgmt_advertising_added(sk, hdev, cp->instance);
9102 
9103 	goto unlock;
9104 
9105 clear_new_instance:
9106 	hci_remove_adv_instance(hdev, cp->instance);
9107 
9108 unlock:
9109 	hci_dev_unlock(hdev);
9110 
9111 	return err;
9112 }
9113 
9114 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9115 					int err)
9116 {
9117 	struct mgmt_pending_cmd *cmd = data;
9118 	struct mgmt_cp_remove_advertising *cp = cmd->param;
9119 	struct mgmt_rp_remove_advertising rp;
9120 
9121 	bt_dev_dbg(hdev, "err %d", err);
9122 
9123 	memset(&rp, 0, sizeof(rp));
9124 	rp.instance = cp->instance;
9125 
9126 	if (err)
9127 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9128 				mgmt_status(err));
9129 	else
9130 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9131 				  MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9132 
9133 	mgmt_pending_free(cmd);
9134 }
9135 
9136 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9137 {
9138 	struct mgmt_pending_cmd *cmd = data;
9139 	struct mgmt_cp_remove_advertising *cp = cmd->param;
9140 	int err;
9141 
9142 	err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9143 	if (err)
9144 		return err;
9145 
9146 	if (list_empty(&hdev->adv_instances))
9147 		err = hci_disable_advertising_sync(hdev);
9148 
9149 	return err;
9150 }
9151 
9152 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9153 			      void *data, u16 data_len)
9154 {
9155 	struct mgmt_cp_remove_advertising *cp = data;
9156 	struct mgmt_pending_cmd *cmd;
9157 	int err;
9158 
9159 	bt_dev_dbg(hdev, "sock %p", sk);
9160 
9161 	hci_dev_lock(hdev);
9162 
9163 	if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9164 		err = mgmt_cmd_status(sk, hdev->id,
9165 				      MGMT_OP_REMOVE_ADVERTISING,
9166 				      MGMT_STATUS_INVALID_PARAMS);
9167 		goto unlock;
9168 	}
9169 
9170 	if (pending_find(MGMT_OP_SET_LE, hdev)) {
9171 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9172 				      MGMT_STATUS_BUSY);
9173 		goto unlock;
9174 	}
9175 
9176 	if (list_empty(&hdev->adv_instances)) {
9177 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9178 				      MGMT_STATUS_INVALID_PARAMS);
9179 		goto unlock;
9180 	}
9181 
9182 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9183 			       data_len);
9184 	if (!cmd) {
9185 		err = -ENOMEM;
9186 		goto unlock;
9187 	}
9188 
9189 	err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9190 				 remove_advertising_complete);
9191 	if (err < 0)
9192 		mgmt_pending_free(cmd);
9193 
9194 unlock:
9195 	hci_dev_unlock(hdev);
9196 
9197 	return err;
9198 }
9199 
9200 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9201 			     void *data, u16 data_len)
9202 {
9203 	struct mgmt_cp_get_adv_size_info *cp = data;
9204 	struct mgmt_rp_get_adv_size_info rp;
9205 	u32 flags, supported_flags;
9206 
9207 	bt_dev_dbg(hdev, "sock %p", sk);
9208 
9209 	if (!lmp_le_capable(hdev))
9210 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9211 				       MGMT_STATUS_REJECTED);
9212 
9213 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9214 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9215 				       MGMT_STATUS_INVALID_PARAMS);
9216 
9217 	flags = __le32_to_cpu(cp->flags);
9218 
9219 	/* The current implementation only supports a subset of the specified
9220 	 * flags.
9221 	 */
9222 	supported_flags = get_supported_adv_flags(hdev);
9223 	if (flags & ~supported_flags)
9224 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9225 				       MGMT_STATUS_INVALID_PARAMS);
9226 
9227 	rp.instance = cp->instance;
9228 	rp.flags = cp->flags;
9229 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9230 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9231 
9232 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9233 				 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9234 }
9235 
9236 static const struct hci_mgmt_handler mgmt_handlers[] = {
9237 	{ NULL }, /* 0x0000 (no command) */
9238 	{ read_version,            MGMT_READ_VERSION_SIZE,
9239 						HCI_MGMT_NO_HDEV |
9240 						HCI_MGMT_UNTRUSTED },
9241 	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
9242 						HCI_MGMT_NO_HDEV |
9243 						HCI_MGMT_UNTRUSTED },
9244 	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
9245 						HCI_MGMT_NO_HDEV |
9246 						HCI_MGMT_UNTRUSTED },
9247 	{ read_controller_info,    MGMT_READ_INFO_SIZE,
9248 						HCI_MGMT_UNTRUSTED },
9249 	{ set_powered,             MGMT_SETTING_SIZE },
9250 	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
9251 	{ set_connectable,         MGMT_SETTING_SIZE },
9252 	{ set_fast_connectable,    MGMT_SETTING_SIZE },
9253 	{ set_bondable,            MGMT_SETTING_SIZE },
9254 	{ set_link_security,       MGMT_SETTING_SIZE },
9255 	{ set_ssp,                 MGMT_SETTING_SIZE },
9256 	{ set_hs,                  MGMT_SETTING_SIZE },
9257 	{ set_le,                  MGMT_SETTING_SIZE },
9258 	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
9259 	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
9260 	{ add_uuid,                MGMT_ADD_UUID_SIZE },
9261 	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
9262 	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
9263 						HCI_MGMT_VAR_LEN },
9264 	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9265 						HCI_MGMT_VAR_LEN },
9266 	{ disconnect,              MGMT_DISCONNECT_SIZE },
9267 	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
9268 	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
9269 	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
9270 	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
9271 	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
9272 	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
9273 	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
9274 	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
9275 	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9276 	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
9277 	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9278 	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
9279 	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9280 						HCI_MGMT_VAR_LEN },
9281 	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9282 	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
9283 	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
9284 	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
9285 	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
9286 	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
9287 	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
9288 	{ set_advertising,         MGMT_SETTING_SIZE },
9289 	{ set_bredr,               MGMT_SETTING_SIZE },
9290 	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
9291 	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
9292 	{ set_secure_conn,         MGMT_SETTING_SIZE },
9293 	{ set_debug_keys,          MGMT_SETTING_SIZE },
9294 	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
9295 	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
9296 						HCI_MGMT_VAR_LEN },
9297 	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
9298 	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
9299 	{ add_device,              MGMT_ADD_DEVICE_SIZE },
9300 	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
9301 	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
9302 						HCI_MGMT_VAR_LEN },
9303 	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9304 						HCI_MGMT_NO_HDEV |
9305 						HCI_MGMT_UNTRUSTED },
9306 	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
9307 						HCI_MGMT_UNCONFIGURED |
9308 						HCI_MGMT_UNTRUSTED },
9309 	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
9310 						HCI_MGMT_UNCONFIGURED },
9311 	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
9312 						HCI_MGMT_UNCONFIGURED },
9313 	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9314 						HCI_MGMT_VAR_LEN },
9315 	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9316 	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
9317 						HCI_MGMT_NO_HDEV |
9318 						HCI_MGMT_UNTRUSTED },
9319 	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
9320 	{ add_advertising,	   MGMT_ADD_ADVERTISING_SIZE,
9321 						HCI_MGMT_VAR_LEN },
9322 	{ remove_advertising,	   MGMT_REMOVE_ADVERTISING_SIZE },
9323 	{ get_adv_size_info,       MGMT_GET_ADV_SIZE_INFO_SIZE },
9324 	{ start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9325 	{ read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9326 						HCI_MGMT_UNTRUSTED },
9327 	{ set_appearance,	   MGMT_SET_APPEARANCE_SIZE },
9328 	{ get_phy_configuration,   MGMT_GET_PHY_CONFIGURATION_SIZE },
9329 	{ set_phy_configuration,   MGMT_SET_PHY_CONFIGURATION_SIZE },
9330 	{ set_blocked_keys,	   MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9331 						HCI_MGMT_VAR_LEN },
9332 	{ set_wideband_speech,	   MGMT_SETTING_SIZE },
9333 	{ read_controller_cap,     MGMT_READ_CONTROLLER_CAP_SIZE,
9334 						HCI_MGMT_UNTRUSTED },
9335 	{ read_exp_features_info,  MGMT_READ_EXP_FEATURES_INFO_SIZE,
9336 						HCI_MGMT_UNTRUSTED |
9337 						HCI_MGMT_HDEV_OPTIONAL },
9338 	{ set_exp_feature,         MGMT_SET_EXP_FEATURE_SIZE,
9339 						HCI_MGMT_VAR_LEN |
9340 						HCI_MGMT_HDEV_OPTIONAL },
9341 	{ read_def_system_config,  MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9342 						HCI_MGMT_UNTRUSTED },
9343 	{ set_def_system_config,   MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9344 						HCI_MGMT_VAR_LEN },
9345 	{ read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9346 						HCI_MGMT_UNTRUSTED },
9347 	{ set_def_runtime_config,  MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9348 						HCI_MGMT_VAR_LEN },
9349 	{ get_device_flags,        MGMT_GET_DEVICE_FLAGS_SIZE },
9350 	{ set_device_flags,        MGMT_SET_DEVICE_FLAGS_SIZE },
9351 	{ read_adv_mon_features,   MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9352 	{ add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9353 						HCI_MGMT_VAR_LEN },
9354 	{ remove_adv_monitor,      MGMT_REMOVE_ADV_MONITOR_SIZE },
9355 	{ add_ext_adv_params,      MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9356 						HCI_MGMT_VAR_LEN },
9357 	{ add_ext_adv_data,        MGMT_ADD_EXT_ADV_DATA_SIZE,
9358 						HCI_MGMT_VAR_LEN },
9359 	{ add_adv_patterns_monitor_rssi,
9360 				   MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9361 						HCI_MGMT_VAR_LEN },
9362 	{ set_mesh,                MGMT_SET_MESH_RECEIVER_SIZE,
9363 						HCI_MGMT_VAR_LEN },
9364 	{ mesh_features,           MGMT_MESH_READ_FEATURES_SIZE },
9365 	{ mesh_send,               MGMT_MESH_SEND_SIZE,
9366 						HCI_MGMT_VAR_LEN },
9367 	{ mesh_send_cancel,        MGMT_MESH_SEND_CANCEL_SIZE },
9368 };
9369 
9370 void mgmt_index_added(struct hci_dev *hdev)
9371 {
9372 	struct mgmt_ev_ext_index ev;
9373 
9374 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9375 		return;
9376 
9377 	switch (hdev->dev_type) {
9378 	case HCI_PRIMARY:
9379 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9380 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
9381 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
9382 			ev.type = 0x01;
9383 		} else {
9384 			mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9385 					 HCI_MGMT_INDEX_EVENTS);
9386 			ev.type = 0x00;
9387 		}
9388 		break;
9389 	case HCI_AMP:
9390 		ev.type = 0x02;
9391 		break;
9392 	default:
9393 		return;
9394 	}
9395 
9396 	ev.bus = hdev->bus;
9397 
9398 	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9399 			 HCI_MGMT_EXT_INDEX_EVENTS);
9400 }
9401 
9402 void mgmt_index_removed(struct hci_dev *hdev)
9403 {
9404 	struct mgmt_ev_ext_index ev;
9405 	u8 status = MGMT_STATUS_INVALID_INDEX;
9406 
9407 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9408 		return;
9409 
9410 	switch (hdev->dev_type) {
9411 	case HCI_PRIMARY:
9412 		mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9413 
9414 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9415 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
9416 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
9417 			ev.type = 0x01;
9418 		} else {
9419 			mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9420 					 HCI_MGMT_INDEX_EVENTS);
9421 			ev.type = 0x00;
9422 		}
9423 		break;
9424 	case HCI_AMP:
9425 		ev.type = 0x02;
9426 		break;
9427 	default:
9428 		return;
9429 	}
9430 
9431 	ev.bus = hdev->bus;
9432 
9433 	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9434 			 HCI_MGMT_EXT_INDEX_EVENTS);
9435 
9436 	/* Cancel any remaining timed work */
9437 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
9438 		return;
9439 	cancel_delayed_work_sync(&hdev->discov_off);
9440 	cancel_delayed_work_sync(&hdev->service_cache);
9441 	cancel_delayed_work_sync(&hdev->rpa_expired);
9442 }
9443 
9444 void mgmt_power_on(struct hci_dev *hdev, int err)
9445 {
9446 	struct cmd_lookup match = { NULL, hdev };
9447 
9448 	bt_dev_dbg(hdev, "err %d", err);
9449 
9450 	hci_dev_lock(hdev);
9451 
9452 	if (!err) {
9453 		restart_le_actions(hdev);
9454 		hci_update_passive_scan(hdev);
9455 	}
9456 
9457 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9458 
9459 	new_settings(hdev, match.sk);
9460 
9461 	if (match.sk)
9462 		sock_put(match.sk);
9463 
9464 	hci_dev_unlock(hdev);
9465 }
9466 
9467 void __mgmt_power_off(struct hci_dev *hdev)
9468 {
9469 	struct cmd_lookup match = { NULL, hdev };
9470 	u8 status, zero_cod[] = { 0, 0, 0 };
9471 
9472 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9473 
9474 	/* If the power off is because of hdev unregistration let
9475 	 * use the appropriate INVALID_INDEX status. Otherwise use
9476 	 * NOT_POWERED. We cover both scenarios here since later in
9477 	 * mgmt_index_removed() any hci_conn callbacks will have already
9478 	 * been triggered, potentially causing misleading DISCONNECTED
9479 	 * status responses.
9480 	 */
9481 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9482 		status = MGMT_STATUS_INVALID_INDEX;
9483 	else
9484 		status = MGMT_STATUS_NOT_POWERED;
9485 
9486 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9487 
9488 	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9489 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9490 				   zero_cod, sizeof(zero_cod),
9491 				   HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9492 		ext_info_changed(hdev, NULL);
9493 	}
9494 
9495 	new_settings(hdev, match.sk);
9496 
9497 	if (match.sk)
9498 		sock_put(match.sk);
9499 }
9500 
9501 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9502 {
9503 	struct mgmt_pending_cmd *cmd;
9504 	u8 status;
9505 
9506 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9507 	if (!cmd)
9508 		return;
9509 
9510 	if (err == -ERFKILL)
9511 		status = MGMT_STATUS_RFKILLED;
9512 	else
9513 		status = MGMT_STATUS_FAILED;
9514 
9515 	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9516 
9517 	mgmt_pending_remove(cmd);
9518 }
9519 
9520 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9521 		       bool persistent)
9522 {
9523 	struct mgmt_ev_new_link_key ev;
9524 
9525 	memset(&ev, 0, sizeof(ev));
9526 
9527 	ev.store_hint = persistent;
9528 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9529 	ev.key.addr.type = BDADDR_BREDR;
9530 	ev.key.type = key->type;
9531 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9532 	ev.key.pin_len = key->pin_len;
9533 
9534 	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9535 }
9536 
9537 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9538 {
9539 	switch (ltk->type) {
9540 	case SMP_LTK:
9541 	case SMP_LTK_RESPONDER:
9542 		if (ltk->authenticated)
9543 			return MGMT_LTK_AUTHENTICATED;
9544 		return MGMT_LTK_UNAUTHENTICATED;
9545 	case SMP_LTK_P256:
9546 		if (ltk->authenticated)
9547 			return MGMT_LTK_P256_AUTH;
9548 		return MGMT_LTK_P256_UNAUTH;
9549 	case SMP_LTK_P256_DEBUG:
9550 		return MGMT_LTK_P256_DEBUG;
9551 	}
9552 
9553 	return MGMT_LTK_UNAUTHENTICATED;
9554 }
9555 
9556 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9557 {
9558 	struct mgmt_ev_new_long_term_key ev;
9559 
9560 	memset(&ev, 0, sizeof(ev));
9561 
9562 	/* Devices using resolvable or non-resolvable random addresses
9563 	 * without providing an identity resolving key don't require
9564 	 * to store long term keys. Their addresses will change the
9565 	 * next time around.
9566 	 *
9567 	 * Only when a remote device provides an identity address
9568 	 * make sure the long term key is stored. If the remote
9569 	 * identity is known, the long term keys are internally
9570 	 * mapped to the identity address. So allow static random
9571 	 * and public addresses here.
9572 	 */
9573 	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9574 	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
9575 		ev.store_hint = 0x00;
9576 	else
9577 		ev.store_hint = persistent;
9578 
9579 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9580 	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
9581 	ev.key.type = mgmt_ltk_type(key);
9582 	ev.key.enc_size = key->enc_size;
9583 	ev.key.ediv = key->ediv;
9584 	ev.key.rand = key->rand;
9585 
9586 	if (key->type == SMP_LTK)
9587 		ev.key.initiator = 1;
9588 
9589 	/* Make sure we copy only the significant bytes based on the
9590 	 * encryption key size, and set the rest of the value to zeroes.
9591 	 */
9592 	memcpy(ev.key.val, key->val, key->enc_size);
9593 	memset(ev.key.val + key->enc_size, 0,
9594 	       sizeof(ev.key.val) - key->enc_size);
9595 
9596 	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9597 }
9598 
9599 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9600 {
9601 	struct mgmt_ev_new_irk ev;
9602 
9603 	memset(&ev, 0, sizeof(ev));
9604 
9605 	ev.store_hint = persistent;
9606 
9607 	bacpy(&ev.rpa, &irk->rpa);
9608 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9609 	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9610 	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9611 
9612 	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9613 }
9614 
9615 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9616 		   bool persistent)
9617 {
9618 	struct mgmt_ev_new_csrk ev;
9619 
9620 	memset(&ev, 0, sizeof(ev));
9621 
9622 	/* Devices using resolvable or non-resolvable random addresses
9623 	 * without providing an identity resolving key don't require
9624 	 * to store signature resolving keys. Their addresses will change
9625 	 * the next time around.
9626 	 *
9627 	 * Only when a remote device provides an identity address
9628 	 * make sure the signature resolving key is stored. So allow
9629 	 * static random and public addresses here.
9630 	 */
9631 	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9632 	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9633 		ev.store_hint = 0x00;
9634 	else
9635 		ev.store_hint = persistent;
9636 
9637 	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9638 	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9639 	ev.key.type = csrk->type;
9640 	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9641 
9642 	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9643 }
9644 
9645 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9646 			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9647 			 u16 max_interval, u16 latency, u16 timeout)
9648 {
9649 	struct mgmt_ev_new_conn_param ev;
9650 
9651 	if (!hci_is_identity_address(bdaddr, bdaddr_type))
9652 		return;
9653 
9654 	memset(&ev, 0, sizeof(ev));
9655 	bacpy(&ev.addr.bdaddr, bdaddr);
9656 	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9657 	ev.store_hint = store_hint;
9658 	ev.min_interval = cpu_to_le16(min_interval);
9659 	ev.max_interval = cpu_to_le16(max_interval);
9660 	ev.latency = cpu_to_le16(latency);
9661 	ev.timeout = cpu_to_le16(timeout);
9662 
9663 	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9664 }
9665 
9666 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9667 			   u8 *name, u8 name_len)
9668 {
9669 	struct sk_buff *skb;
9670 	struct mgmt_ev_device_connected *ev;
9671 	u16 eir_len = 0;
9672 	u32 flags = 0;
9673 
9674 	/* allocate buff for LE or BR/EDR adv */
9675 	if (conn->le_adv_data_len > 0)
9676 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9677 				     sizeof(*ev) + conn->le_adv_data_len);
9678 	else
9679 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9680 				     sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9681 				     eir_precalc_len(sizeof(conn->dev_class)));
9682 
9683 	ev = skb_put(skb, sizeof(*ev));
9684 	bacpy(&ev->addr.bdaddr, &conn->dst);
9685 	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9686 
9687 	if (conn->out)
9688 		flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9689 
9690 	ev->flags = __cpu_to_le32(flags);
9691 
9692 	/* We must ensure that the EIR Data fields are ordered and
9693 	 * unique. Keep it simple for now and avoid the problem by not
9694 	 * adding any BR/EDR data to the LE adv.
9695 	 */
9696 	if (conn->le_adv_data_len > 0) {
9697 		skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9698 		eir_len = conn->le_adv_data_len;
9699 	} else {
9700 		if (name)
9701 			eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9702 
9703 		if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9704 			eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9705 						    conn->dev_class, sizeof(conn->dev_class));
9706 	}
9707 
9708 	ev->eir_len = cpu_to_le16(eir_len);
9709 
9710 	mgmt_event_skb(skb, NULL);
9711 }
9712 
9713 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
9714 {
9715 	struct sock **sk = data;
9716 
9717 	cmd->cmd_complete(cmd, 0);
9718 
9719 	*sk = cmd->sk;
9720 	sock_hold(*sk);
9721 
9722 	mgmt_pending_remove(cmd);
9723 }
9724 
9725 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9726 {
9727 	struct hci_dev *hdev = data;
9728 	struct mgmt_cp_unpair_device *cp = cmd->param;
9729 
9730 	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9731 
9732 	cmd->cmd_complete(cmd, 0);
9733 	mgmt_pending_remove(cmd);
9734 }
9735 
9736 bool mgmt_powering_down(struct hci_dev *hdev)
9737 {
9738 	struct mgmt_pending_cmd *cmd;
9739 	struct mgmt_mode *cp;
9740 
9741 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9742 	if (!cmd)
9743 		return false;
9744 
9745 	cp = cmd->param;
9746 	if (!cp->val)
9747 		return true;
9748 
9749 	return false;
9750 }
9751 
9752 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9753 			      u8 link_type, u8 addr_type, u8 reason,
9754 			      bool mgmt_connected)
9755 {
9756 	struct mgmt_ev_device_disconnected ev;
9757 	struct sock *sk = NULL;
9758 
9759 	/* The connection is still in hci_conn_hash so test for 1
9760 	 * instead of 0 to know if this is the last one.
9761 	 */
9762 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9763 		cancel_delayed_work(&hdev->power_off);
9764 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
9765 	}
9766 
9767 	if (!mgmt_connected)
9768 		return;
9769 
9770 	if (link_type != ACL_LINK && link_type != LE_LINK)
9771 		return;
9772 
9773 	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
9774 
9775 	bacpy(&ev.addr.bdaddr, bdaddr);
9776 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9777 	ev.reason = reason;
9778 
9779 	/* Report disconnects due to suspend */
9780 	if (hdev->suspended)
9781 		ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9782 
9783 	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9784 
9785 	if (sk)
9786 		sock_put(sk);
9787 
9788 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9789 			     hdev);
9790 }
9791 
9792 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9793 			    u8 link_type, u8 addr_type, u8 status)
9794 {
9795 	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9796 	struct mgmt_cp_disconnect *cp;
9797 	struct mgmt_pending_cmd *cmd;
9798 
9799 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9800 			     hdev);
9801 
9802 	cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9803 	if (!cmd)
9804 		return;
9805 
9806 	cp = cmd->param;
9807 
9808 	if (bacmp(bdaddr, &cp->addr.bdaddr))
9809 		return;
9810 
9811 	if (cp->addr.type != bdaddr_type)
9812 		return;
9813 
9814 	cmd->cmd_complete(cmd, mgmt_status(status));
9815 	mgmt_pending_remove(cmd);
9816 }
9817 
9818 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9819 			 u8 addr_type, u8 status)
9820 {
9821 	struct mgmt_ev_connect_failed ev;
9822 
9823 	/* The connection is still in hci_conn_hash so test for 1
9824 	 * instead of 0 to know if this is the last one.
9825 	 */
9826 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9827 		cancel_delayed_work(&hdev->power_off);
9828 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
9829 	}
9830 
9831 	bacpy(&ev.addr.bdaddr, bdaddr);
9832 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9833 	ev.status = mgmt_status(status);
9834 
9835 	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9836 }
9837 
9838 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9839 {
9840 	struct mgmt_ev_pin_code_request ev;
9841 
9842 	bacpy(&ev.addr.bdaddr, bdaddr);
9843 	ev.addr.type = BDADDR_BREDR;
9844 	ev.secure = secure;
9845 
9846 	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9847 }
9848 
9849 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9850 				  u8 status)
9851 {
9852 	struct mgmt_pending_cmd *cmd;
9853 
9854 	cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9855 	if (!cmd)
9856 		return;
9857 
9858 	cmd->cmd_complete(cmd, mgmt_status(status));
9859 	mgmt_pending_remove(cmd);
9860 }
9861 
9862 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9863 				      u8 status)
9864 {
9865 	struct mgmt_pending_cmd *cmd;
9866 
9867 	cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9868 	if (!cmd)
9869 		return;
9870 
9871 	cmd->cmd_complete(cmd, mgmt_status(status));
9872 	mgmt_pending_remove(cmd);
9873 }
9874 
9875 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9876 			      u8 link_type, u8 addr_type, u32 value,
9877 			      u8 confirm_hint)
9878 {
9879 	struct mgmt_ev_user_confirm_request ev;
9880 
9881 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9882 
9883 	bacpy(&ev.addr.bdaddr, bdaddr);
9884 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9885 	ev.confirm_hint = confirm_hint;
9886 	ev.value = cpu_to_le32(value);
9887 
9888 	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9889 			  NULL);
9890 }
9891 
9892 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9893 			      u8 link_type, u8 addr_type)
9894 {
9895 	struct mgmt_ev_user_passkey_request ev;
9896 
9897 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9898 
9899 	bacpy(&ev.addr.bdaddr, bdaddr);
9900 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9901 
9902 	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9903 			  NULL);
9904 }
9905 
9906 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9907 				      u8 link_type, u8 addr_type, u8 status,
9908 				      u8 opcode)
9909 {
9910 	struct mgmt_pending_cmd *cmd;
9911 
9912 	cmd = pending_find(opcode, hdev);
9913 	if (!cmd)
9914 		return -ENOENT;
9915 
9916 	cmd->cmd_complete(cmd, mgmt_status(status));
9917 	mgmt_pending_remove(cmd);
9918 
9919 	return 0;
9920 }
9921 
9922 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9923 				     u8 link_type, u8 addr_type, u8 status)
9924 {
9925 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9926 					  status, MGMT_OP_USER_CONFIRM_REPLY);
9927 }
9928 
9929 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9930 					 u8 link_type, u8 addr_type, u8 status)
9931 {
9932 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9933 					  status,
9934 					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
9935 }
9936 
9937 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9938 				     u8 link_type, u8 addr_type, u8 status)
9939 {
9940 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9941 					  status, MGMT_OP_USER_PASSKEY_REPLY);
9942 }
9943 
9944 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9945 					 u8 link_type, u8 addr_type, u8 status)
9946 {
9947 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9948 					  status,
9949 					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
9950 }
9951 
9952 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9953 			     u8 link_type, u8 addr_type, u32 passkey,
9954 			     u8 entered)
9955 {
9956 	struct mgmt_ev_passkey_notify ev;
9957 
9958 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9959 
9960 	bacpy(&ev.addr.bdaddr, bdaddr);
9961 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9962 	ev.passkey = __cpu_to_le32(passkey);
9963 	ev.entered = entered;
9964 
9965 	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9966 }
9967 
9968 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9969 {
9970 	struct mgmt_ev_auth_failed ev;
9971 	struct mgmt_pending_cmd *cmd;
9972 	u8 status = mgmt_status(hci_status);
9973 
9974 	bacpy(&ev.addr.bdaddr, &conn->dst);
9975 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9976 	ev.status = status;
9977 
9978 	cmd = find_pairing(conn);
9979 
9980 	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9981 		    cmd ? cmd->sk : NULL);
9982 
9983 	if (cmd) {
9984 		cmd->cmd_complete(cmd, status);
9985 		mgmt_pending_remove(cmd);
9986 	}
9987 }
9988 
9989 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9990 {
9991 	struct cmd_lookup match = { NULL, hdev };
9992 	bool changed;
9993 
9994 	if (status) {
9995 		u8 mgmt_err = mgmt_status(status);
9996 		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9997 				     cmd_status_rsp, &mgmt_err);
9998 		return;
9999 	}
10000 
10001 	if (test_bit(HCI_AUTH, &hdev->flags))
10002 		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
10003 	else
10004 		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
10005 
10006 	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
10007 			     &match);
10008 
10009 	if (changed)
10010 		new_settings(hdev, match.sk);
10011 
10012 	if (match.sk)
10013 		sock_put(match.sk);
10014 }
10015 
10016 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
10017 {
10018 	struct cmd_lookup *match = data;
10019 
10020 	if (match->sk == NULL) {
10021 		match->sk = cmd->sk;
10022 		sock_hold(match->sk);
10023 	}
10024 }
10025 
10026 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
10027 				    u8 status)
10028 {
10029 	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
10030 
10031 	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
10032 	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
10033 	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
10034 
10035 	if (!status) {
10036 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
10037 				   3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
10038 		ext_info_changed(hdev, NULL);
10039 	}
10040 
10041 	if (match.sk)
10042 		sock_put(match.sk);
10043 }
10044 
10045 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
10046 {
10047 	struct mgmt_cp_set_local_name ev;
10048 	struct mgmt_pending_cmd *cmd;
10049 
10050 	if (status)
10051 		return;
10052 
10053 	memset(&ev, 0, sizeof(ev));
10054 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
10055 	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
10056 
10057 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
10058 	if (!cmd) {
10059 		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
10060 
10061 		/* If this is a HCI command related to powering on the
10062 		 * HCI dev don't send any mgmt signals.
10063 		 */
10064 		if (pending_find(MGMT_OP_SET_POWERED, hdev))
10065 			return;
10066 	}
10067 
10068 	mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
10069 			   HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
10070 	ext_info_changed(hdev, cmd ? cmd->sk : NULL);
10071 }
10072 
10073 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10074 {
10075 	int i;
10076 
10077 	for (i = 0; i < uuid_count; i++) {
10078 		if (!memcmp(uuid, uuids[i], 16))
10079 			return true;
10080 	}
10081 
10082 	return false;
10083 }
10084 
10085 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10086 {
10087 	u16 parsed = 0;
10088 
10089 	while (parsed < eir_len) {
10090 		u8 field_len = eir[0];
10091 		u8 uuid[16];
10092 		int i;
10093 
10094 		if (field_len == 0)
10095 			break;
10096 
10097 		if (eir_len - parsed < field_len + 1)
10098 			break;
10099 
10100 		switch (eir[1]) {
10101 		case EIR_UUID16_ALL:
10102 		case EIR_UUID16_SOME:
10103 			for (i = 0; i + 3 <= field_len; i += 2) {
10104 				memcpy(uuid, bluetooth_base_uuid, 16);
10105 				uuid[13] = eir[i + 3];
10106 				uuid[12] = eir[i + 2];
10107 				if (has_uuid(uuid, uuid_count, uuids))
10108 					return true;
10109 			}
10110 			break;
10111 		case EIR_UUID32_ALL:
10112 		case EIR_UUID32_SOME:
10113 			for (i = 0; i + 5 <= field_len; i += 4) {
10114 				memcpy(uuid, bluetooth_base_uuid, 16);
10115 				uuid[15] = eir[i + 5];
10116 				uuid[14] = eir[i + 4];
10117 				uuid[13] = eir[i + 3];
10118 				uuid[12] = eir[i + 2];
10119 				if (has_uuid(uuid, uuid_count, uuids))
10120 					return true;
10121 			}
10122 			break;
10123 		case EIR_UUID128_ALL:
10124 		case EIR_UUID128_SOME:
10125 			for (i = 0; i + 17 <= field_len; i += 16) {
10126 				memcpy(uuid, eir + i + 2, 16);
10127 				if (has_uuid(uuid, uuid_count, uuids))
10128 					return true;
10129 			}
10130 			break;
10131 		}
10132 
10133 		parsed += field_len + 1;
10134 		eir += field_len + 1;
10135 	}
10136 
10137 	return false;
10138 }
10139 
10140 static void restart_le_scan(struct hci_dev *hdev)
10141 {
10142 	/* If controller is not scanning we are done. */
10143 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
10144 		return;
10145 
10146 	if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
10147 		       hdev->discovery.scan_start +
10148 		       hdev->discovery.scan_duration))
10149 		return;
10150 
10151 	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
10152 			   DISCOV_LE_RESTART_DELAY);
10153 }
10154 
10155 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10156 			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10157 {
10158 	/* If a RSSI threshold has been specified, and
10159 	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10160 	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10161 	 * is set, let it through for further processing, as we might need to
10162 	 * restart the scan.
10163 	 *
10164 	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10165 	 * the results are also dropped.
10166 	 */
10167 	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10168 	    (rssi == HCI_RSSI_INVALID ||
10169 	    (rssi < hdev->discovery.rssi &&
10170 	     !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
10171 		return  false;
10172 
10173 	if (hdev->discovery.uuid_count != 0) {
10174 		/* If a list of UUIDs is provided in filter, results with no
10175 		 * matching UUID should be dropped.
10176 		 */
10177 		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10178 				   hdev->discovery.uuids) &&
10179 		    !eir_has_uuids(scan_rsp, scan_rsp_len,
10180 				   hdev->discovery.uuid_count,
10181 				   hdev->discovery.uuids))
10182 			return false;
10183 	}
10184 
10185 	/* If duplicate filtering does not report RSSI changes, then restart
10186 	 * scanning to ensure updated result with updated RSSI values.
10187 	 */
10188 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
10189 		restart_le_scan(hdev);
10190 
10191 		/* Validate RSSI value against the RSSI threshold once more. */
10192 		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10193 		    rssi < hdev->discovery.rssi)
10194 			return false;
10195 	}
10196 
10197 	return true;
10198 }
10199 
10200 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10201 				  bdaddr_t *bdaddr, u8 addr_type)
10202 {
10203 	struct mgmt_ev_adv_monitor_device_lost ev;
10204 
10205 	ev.monitor_handle = cpu_to_le16(handle);
10206 	bacpy(&ev.addr.bdaddr, bdaddr);
10207 	ev.addr.type = addr_type;
10208 
10209 	mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10210 		   NULL);
10211 }
10212 
10213 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10214 					       struct sk_buff *skb,
10215 					       struct sock *skip_sk,
10216 					       u16 handle)
10217 {
10218 	struct sk_buff *advmon_skb;
10219 	size_t advmon_skb_len;
10220 	__le16 *monitor_handle;
10221 
10222 	if (!skb)
10223 		return;
10224 
10225 	advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10226 			  sizeof(struct mgmt_ev_device_found)) + skb->len;
10227 	advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10228 				    advmon_skb_len);
10229 	if (!advmon_skb)
10230 		return;
10231 
10232 	/* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10233 	 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10234 	 * store monitor_handle of the matched monitor.
10235 	 */
10236 	monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10237 	*monitor_handle = cpu_to_le16(handle);
10238 	skb_put_data(advmon_skb, skb->data, skb->len);
10239 
10240 	mgmt_event_skb(advmon_skb, skip_sk);
10241 }
10242 
10243 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10244 					  bdaddr_t *bdaddr, bool report_device,
10245 					  struct sk_buff *skb,
10246 					  struct sock *skip_sk)
10247 {
10248 	struct monitored_device *dev, *tmp;
10249 	bool matched = false;
10250 	bool notified = false;
10251 
10252 	/* We have received the Advertisement Report because:
10253 	 * 1. the kernel has initiated active discovery
10254 	 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10255 	 *    passive scanning
10256 	 * 3. if none of the above is true, we have one or more active
10257 	 *    Advertisement Monitor
10258 	 *
10259 	 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10260 	 * and report ONLY one advertisement per device for the matched Monitor
10261 	 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10262 	 *
10263 	 * For case 3, since we are not active scanning and all advertisements
10264 	 * received are due to a matched Advertisement Monitor, report all
10265 	 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10266 	 */
10267 	if (report_device && !hdev->advmon_pend_notify) {
10268 		mgmt_event_skb(skb, skip_sk);
10269 		return;
10270 	}
10271 
10272 	hdev->advmon_pend_notify = false;
10273 
10274 	list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10275 		if (!bacmp(&dev->bdaddr, bdaddr)) {
10276 			matched = true;
10277 
10278 			if (!dev->notified) {
10279 				mgmt_send_adv_monitor_device_found(hdev, skb,
10280 								   skip_sk,
10281 								   dev->handle);
10282 				notified = true;
10283 				dev->notified = true;
10284 			}
10285 		}
10286 
10287 		if (!dev->notified)
10288 			hdev->advmon_pend_notify = true;
10289 	}
10290 
10291 	if (!report_device &&
10292 	    ((matched && !notified) || !msft_monitor_supported(hdev))) {
10293 		/* Handle 0 indicates that we are not active scanning and this
10294 		 * is a subsequent advertisement report for an already matched
10295 		 * Advertisement Monitor or the controller offloading support
10296 		 * is not available.
10297 		 */
10298 		mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10299 	}
10300 
10301 	if (report_device)
10302 		mgmt_event_skb(skb, skip_sk);
10303 	else
10304 		kfree_skb(skb);
10305 }
10306 
10307 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10308 			      u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10309 			      u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10310 			      u64 instant)
10311 {
10312 	struct sk_buff *skb;
10313 	struct mgmt_ev_mesh_device_found *ev;
10314 	int i, j;
10315 
10316 	if (!hdev->mesh_ad_types[0])
10317 		goto accepted;
10318 
10319 	/* Scan for requested AD types */
10320 	if (eir_len > 0) {
10321 		for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10322 			for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10323 				if (!hdev->mesh_ad_types[j])
10324 					break;
10325 
10326 				if (hdev->mesh_ad_types[j] == eir[i + 1])
10327 					goto accepted;
10328 			}
10329 		}
10330 	}
10331 
10332 	if (scan_rsp_len > 0) {
10333 		for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10334 			for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10335 				if (!hdev->mesh_ad_types[j])
10336 					break;
10337 
10338 				if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10339 					goto accepted;
10340 			}
10341 		}
10342 	}
10343 
10344 	return;
10345 
10346 accepted:
10347 	skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10348 			     sizeof(*ev) + eir_len + scan_rsp_len);
10349 	if (!skb)
10350 		return;
10351 
10352 	ev = skb_put(skb, sizeof(*ev));
10353 
10354 	bacpy(&ev->addr.bdaddr, bdaddr);
10355 	ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10356 	ev->rssi = rssi;
10357 	ev->flags = cpu_to_le32(flags);
10358 	ev->instant = cpu_to_le64(instant);
10359 
10360 	if (eir_len > 0)
10361 		/* Copy EIR or advertising data into event */
10362 		skb_put_data(skb, eir, eir_len);
10363 
10364 	if (scan_rsp_len > 0)
10365 		/* Append scan response data to event */
10366 		skb_put_data(skb, scan_rsp, scan_rsp_len);
10367 
10368 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10369 
10370 	mgmt_event_skb(skb, NULL);
10371 }
10372 
10373 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10374 		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10375 		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10376 		       u64 instant)
10377 {
10378 	struct sk_buff *skb;
10379 	struct mgmt_ev_device_found *ev;
10380 	bool report_device = hci_discovery_active(hdev);
10381 
10382 	if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10383 		mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10384 				  eir, eir_len, scan_rsp, scan_rsp_len,
10385 				  instant);
10386 
10387 	/* Don't send events for a non-kernel initiated discovery. With
10388 	 * LE one exception is if we have pend_le_reports > 0 in which
10389 	 * case we're doing passive scanning and want these events.
10390 	 */
10391 	if (!hci_discovery_active(hdev)) {
10392 		if (link_type == ACL_LINK)
10393 			return;
10394 		if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10395 			report_device = true;
10396 		else if (!hci_is_adv_monitoring(hdev))
10397 			return;
10398 	}
10399 
10400 	if (hdev->discovery.result_filtering) {
10401 		/* We are using service discovery */
10402 		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10403 				     scan_rsp_len))
10404 			return;
10405 	}
10406 
10407 	if (hdev->discovery.limited) {
10408 		/* Check for limited discoverable bit */
10409 		if (dev_class) {
10410 			if (!(dev_class[1] & 0x20))
10411 				return;
10412 		} else {
10413 			u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10414 			if (!flags || !(flags[0] & LE_AD_LIMITED))
10415 				return;
10416 		}
10417 	}
10418 
10419 	/* Allocate skb. The 5 extra bytes are for the potential CoD field */
10420 	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10421 			     sizeof(*ev) + eir_len + scan_rsp_len + 5);
10422 	if (!skb)
10423 		return;
10424 
10425 	ev = skb_put(skb, sizeof(*ev));
10426 
10427 	/* In case of device discovery with BR/EDR devices (pre 1.2), the
10428 	 * RSSI value was reported as 0 when not available. This behavior
10429 	 * is kept when using device discovery. This is required for full
10430 	 * backwards compatibility with the API.
10431 	 *
10432 	 * However when using service discovery, the value 127 will be
10433 	 * returned when the RSSI is not available.
10434 	 */
10435 	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10436 	    link_type == ACL_LINK)
10437 		rssi = 0;
10438 
10439 	bacpy(&ev->addr.bdaddr, bdaddr);
10440 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
10441 	ev->rssi = rssi;
10442 	ev->flags = cpu_to_le32(flags);
10443 
10444 	if (eir_len > 0)
10445 		/* Copy EIR or advertising data into event */
10446 		skb_put_data(skb, eir, eir_len);
10447 
10448 	if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10449 		u8 eir_cod[5];
10450 
10451 		eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10452 					   dev_class, 3);
10453 		skb_put_data(skb, eir_cod, sizeof(eir_cod));
10454 	}
10455 
10456 	if (scan_rsp_len > 0)
10457 		/* Append scan response data to event */
10458 		skb_put_data(skb, scan_rsp, scan_rsp_len);
10459 
10460 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10461 
10462 	mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10463 }
10464 
10465 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10466 		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10467 {
10468 	struct sk_buff *skb;
10469 	struct mgmt_ev_device_found *ev;
10470 	u16 eir_len = 0;
10471 	u32 flags = 0;
10472 
10473 	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10474 			     sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10475 
10476 	ev = skb_put(skb, sizeof(*ev));
10477 	bacpy(&ev->addr.bdaddr, bdaddr);
10478 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
10479 	ev->rssi = rssi;
10480 
10481 	if (name)
10482 		eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10483 	else
10484 		flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10485 
10486 	ev->eir_len = cpu_to_le16(eir_len);
10487 	ev->flags = cpu_to_le32(flags);
10488 
10489 	mgmt_event_skb(skb, NULL);
10490 }
10491 
10492 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10493 {
10494 	struct mgmt_ev_discovering ev;
10495 
10496 	bt_dev_dbg(hdev, "discovering %u", discovering);
10497 
10498 	memset(&ev, 0, sizeof(ev));
10499 	ev.type = hdev->discovery.type;
10500 	ev.discovering = discovering;
10501 
10502 	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10503 }
10504 
10505 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10506 {
10507 	struct mgmt_ev_controller_suspend ev;
10508 
10509 	ev.suspend_state = state;
10510 	mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10511 }
10512 
10513 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10514 		   u8 addr_type)
10515 {
10516 	struct mgmt_ev_controller_resume ev;
10517 
10518 	ev.wake_reason = reason;
10519 	if (bdaddr) {
10520 		bacpy(&ev.addr.bdaddr, bdaddr);
10521 		ev.addr.type = addr_type;
10522 	} else {
10523 		memset(&ev.addr, 0, sizeof(ev.addr));
10524 	}
10525 
10526 	mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10527 }
10528 
10529 static struct hci_mgmt_chan chan = {
10530 	.channel	= HCI_CHANNEL_CONTROL,
10531 	.handler_count	= ARRAY_SIZE(mgmt_handlers),
10532 	.handlers	= mgmt_handlers,
10533 	.hdev_init	= mgmt_init_hdev,
10534 };
10535 
10536 int mgmt_init(void)
10537 {
10538 	return hci_mgmt_chan_register(&chan);
10539 }
10540 
10541 void mgmt_exit(void)
10542 {
10543 	hci_mgmt_chan_unregister(&chan);
10544 }
10545 
10546 void mgmt_cleanup(struct sock *sk)
10547 {
10548 	struct mgmt_mesh_tx *mesh_tx;
10549 	struct hci_dev *hdev;
10550 
10551 	read_lock(&hci_dev_list_lock);
10552 
10553 	list_for_each_entry(hdev, &hci_dev_list, list) {
10554 		do {
10555 			mesh_tx = mgmt_mesh_next(hdev, sk);
10556 
10557 			if (mesh_tx)
10558 				mesh_send_complete(hdev, mesh_tx, true);
10559 		} while (mesh_tx);
10560 	}
10561 
10562 	read_unlock(&hci_dev_list_lock);
10563 }
10564