xref: /openbmc/linux/net/bluetooth/mgmt.c (revision 96d3e6f0)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI Management interface */
26 
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35 
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
40 #include "msft.h"
41 #include "eir.h"
42 #include "aosp.h"
43 
44 #define MGMT_VERSION	1
45 #define MGMT_REVISION	22
46 
47 static const u16 mgmt_commands[] = {
48 	MGMT_OP_READ_INDEX_LIST,
49 	MGMT_OP_READ_INFO,
50 	MGMT_OP_SET_POWERED,
51 	MGMT_OP_SET_DISCOVERABLE,
52 	MGMT_OP_SET_CONNECTABLE,
53 	MGMT_OP_SET_FAST_CONNECTABLE,
54 	MGMT_OP_SET_BONDABLE,
55 	MGMT_OP_SET_LINK_SECURITY,
56 	MGMT_OP_SET_SSP,
57 	MGMT_OP_SET_HS,
58 	MGMT_OP_SET_LE,
59 	MGMT_OP_SET_DEV_CLASS,
60 	MGMT_OP_SET_LOCAL_NAME,
61 	MGMT_OP_ADD_UUID,
62 	MGMT_OP_REMOVE_UUID,
63 	MGMT_OP_LOAD_LINK_KEYS,
64 	MGMT_OP_LOAD_LONG_TERM_KEYS,
65 	MGMT_OP_DISCONNECT,
66 	MGMT_OP_GET_CONNECTIONS,
67 	MGMT_OP_PIN_CODE_REPLY,
68 	MGMT_OP_PIN_CODE_NEG_REPLY,
69 	MGMT_OP_SET_IO_CAPABILITY,
70 	MGMT_OP_PAIR_DEVICE,
71 	MGMT_OP_CANCEL_PAIR_DEVICE,
72 	MGMT_OP_UNPAIR_DEVICE,
73 	MGMT_OP_USER_CONFIRM_REPLY,
74 	MGMT_OP_USER_CONFIRM_NEG_REPLY,
75 	MGMT_OP_USER_PASSKEY_REPLY,
76 	MGMT_OP_USER_PASSKEY_NEG_REPLY,
77 	MGMT_OP_READ_LOCAL_OOB_DATA,
78 	MGMT_OP_ADD_REMOTE_OOB_DATA,
79 	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
80 	MGMT_OP_START_DISCOVERY,
81 	MGMT_OP_STOP_DISCOVERY,
82 	MGMT_OP_CONFIRM_NAME,
83 	MGMT_OP_BLOCK_DEVICE,
84 	MGMT_OP_UNBLOCK_DEVICE,
85 	MGMT_OP_SET_DEVICE_ID,
86 	MGMT_OP_SET_ADVERTISING,
87 	MGMT_OP_SET_BREDR,
88 	MGMT_OP_SET_STATIC_ADDRESS,
89 	MGMT_OP_SET_SCAN_PARAMS,
90 	MGMT_OP_SET_SECURE_CONN,
91 	MGMT_OP_SET_DEBUG_KEYS,
92 	MGMT_OP_SET_PRIVACY,
93 	MGMT_OP_LOAD_IRKS,
94 	MGMT_OP_GET_CONN_INFO,
95 	MGMT_OP_GET_CLOCK_INFO,
96 	MGMT_OP_ADD_DEVICE,
97 	MGMT_OP_REMOVE_DEVICE,
98 	MGMT_OP_LOAD_CONN_PARAM,
99 	MGMT_OP_READ_UNCONF_INDEX_LIST,
100 	MGMT_OP_READ_CONFIG_INFO,
101 	MGMT_OP_SET_EXTERNAL_CONFIG,
102 	MGMT_OP_SET_PUBLIC_ADDRESS,
103 	MGMT_OP_START_SERVICE_DISCOVERY,
104 	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
105 	MGMT_OP_READ_EXT_INDEX_LIST,
106 	MGMT_OP_READ_ADV_FEATURES,
107 	MGMT_OP_ADD_ADVERTISING,
108 	MGMT_OP_REMOVE_ADVERTISING,
109 	MGMT_OP_GET_ADV_SIZE_INFO,
110 	MGMT_OP_START_LIMITED_DISCOVERY,
111 	MGMT_OP_READ_EXT_INFO,
112 	MGMT_OP_SET_APPEARANCE,
113 	MGMT_OP_GET_PHY_CONFIGURATION,
114 	MGMT_OP_SET_PHY_CONFIGURATION,
115 	MGMT_OP_SET_BLOCKED_KEYS,
116 	MGMT_OP_SET_WIDEBAND_SPEECH,
117 	MGMT_OP_READ_CONTROLLER_CAP,
118 	MGMT_OP_READ_EXP_FEATURES_INFO,
119 	MGMT_OP_SET_EXP_FEATURE,
120 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
121 	MGMT_OP_SET_DEF_SYSTEM_CONFIG,
122 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
123 	MGMT_OP_SET_DEF_RUNTIME_CONFIG,
124 	MGMT_OP_GET_DEVICE_FLAGS,
125 	MGMT_OP_SET_DEVICE_FLAGS,
126 	MGMT_OP_READ_ADV_MONITOR_FEATURES,
127 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
128 	MGMT_OP_REMOVE_ADV_MONITOR,
129 	MGMT_OP_ADD_EXT_ADV_PARAMS,
130 	MGMT_OP_ADD_EXT_ADV_DATA,
131 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
132 };
133 
134 static const u16 mgmt_events[] = {
135 	MGMT_EV_CONTROLLER_ERROR,
136 	MGMT_EV_INDEX_ADDED,
137 	MGMT_EV_INDEX_REMOVED,
138 	MGMT_EV_NEW_SETTINGS,
139 	MGMT_EV_CLASS_OF_DEV_CHANGED,
140 	MGMT_EV_LOCAL_NAME_CHANGED,
141 	MGMT_EV_NEW_LINK_KEY,
142 	MGMT_EV_NEW_LONG_TERM_KEY,
143 	MGMT_EV_DEVICE_CONNECTED,
144 	MGMT_EV_DEVICE_DISCONNECTED,
145 	MGMT_EV_CONNECT_FAILED,
146 	MGMT_EV_PIN_CODE_REQUEST,
147 	MGMT_EV_USER_CONFIRM_REQUEST,
148 	MGMT_EV_USER_PASSKEY_REQUEST,
149 	MGMT_EV_AUTH_FAILED,
150 	MGMT_EV_DEVICE_FOUND,
151 	MGMT_EV_DISCOVERING,
152 	MGMT_EV_DEVICE_BLOCKED,
153 	MGMT_EV_DEVICE_UNBLOCKED,
154 	MGMT_EV_DEVICE_UNPAIRED,
155 	MGMT_EV_PASSKEY_NOTIFY,
156 	MGMT_EV_NEW_IRK,
157 	MGMT_EV_NEW_CSRK,
158 	MGMT_EV_DEVICE_ADDED,
159 	MGMT_EV_DEVICE_REMOVED,
160 	MGMT_EV_NEW_CONN_PARAM,
161 	MGMT_EV_UNCONF_INDEX_ADDED,
162 	MGMT_EV_UNCONF_INDEX_REMOVED,
163 	MGMT_EV_NEW_CONFIG_OPTIONS,
164 	MGMT_EV_EXT_INDEX_ADDED,
165 	MGMT_EV_EXT_INDEX_REMOVED,
166 	MGMT_EV_LOCAL_OOB_DATA_UPDATED,
167 	MGMT_EV_ADVERTISING_ADDED,
168 	MGMT_EV_ADVERTISING_REMOVED,
169 	MGMT_EV_EXT_INFO_CHANGED,
170 	MGMT_EV_PHY_CONFIGURATION_CHANGED,
171 	MGMT_EV_EXP_FEATURE_CHANGED,
172 	MGMT_EV_DEVICE_FLAGS_CHANGED,
173 	MGMT_EV_ADV_MONITOR_ADDED,
174 	MGMT_EV_ADV_MONITOR_REMOVED,
175 	MGMT_EV_CONTROLLER_SUSPEND,
176 	MGMT_EV_CONTROLLER_RESUME,
177 	MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
178 	MGMT_EV_ADV_MONITOR_DEVICE_LOST,
179 };
180 
181 static const u16 mgmt_untrusted_commands[] = {
182 	MGMT_OP_READ_INDEX_LIST,
183 	MGMT_OP_READ_INFO,
184 	MGMT_OP_READ_UNCONF_INDEX_LIST,
185 	MGMT_OP_READ_CONFIG_INFO,
186 	MGMT_OP_READ_EXT_INDEX_LIST,
187 	MGMT_OP_READ_EXT_INFO,
188 	MGMT_OP_READ_CONTROLLER_CAP,
189 	MGMT_OP_READ_EXP_FEATURES_INFO,
190 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
191 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
192 };
193 
194 static const u16 mgmt_untrusted_events[] = {
195 	MGMT_EV_INDEX_ADDED,
196 	MGMT_EV_INDEX_REMOVED,
197 	MGMT_EV_NEW_SETTINGS,
198 	MGMT_EV_CLASS_OF_DEV_CHANGED,
199 	MGMT_EV_LOCAL_NAME_CHANGED,
200 	MGMT_EV_UNCONF_INDEX_ADDED,
201 	MGMT_EV_UNCONF_INDEX_REMOVED,
202 	MGMT_EV_NEW_CONFIG_OPTIONS,
203 	MGMT_EV_EXT_INDEX_ADDED,
204 	MGMT_EV_EXT_INDEX_REMOVED,
205 	MGMT_EV_EXT_INFO_CHANGED,
206 	MGMT_EV_EXP_FEATURE_CHANGED,
207 };
208 
209 #define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
210 
211 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
212 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
213 
214 /* HCI to MGMT error code conversion table */
215 static const u8 mgmt_status_table[] = {
216 	MGMT_STATUS_SUCCESS,
217 	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
218 	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
219 	MGMT_STATUS_FAILED,		/* Hardware Failure */
220 	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
221 	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
222 	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
223 	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
224 	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
225 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
226 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
227 	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
228 	MGMT_STATUS_BUSY,		/* Command Disallowed */
229 	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
230 	MGMT_STATUS_REJECTED,		/* Rejected Security */
231 	MGMT_STATUS_REJECTED,		/* Rejected Personal */
232 	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
233 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
234 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
235 	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
236 	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
237 	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
238 	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
239 	MGMT_STATUS_BUSY,		/* Repeated Attempts */
240 	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
241 	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
242 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
243 	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
244 	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
245 	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
246 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
247 	MGMT_STATUS_FAILED,		/* Unspecified Error */
248 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
249 	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
250 	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
251 	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
252 	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
253 	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
254 	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
255 	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
256 	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
257 	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
258 	MGMT_STATUS_FAILED,		/* Transaction Collision */
259 	MGMT_STATUS_FAILED,		/* Reserved for future use */
260 	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
261 	MGMT_STATUS_REJECTED,		/* QoS Rejected */
262 	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
263 	MGMT_STATUS_REJECTED,		/* Insufficient Security */
264 	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
265 	MGMT_STATUS_FAILED,		/* Reserved for future use */
266 	MGMT_STATUS_BUSY,		/* Role Switch Pending */
267 	MGMT_STATUS_FAILED,		/* Reserved for future use */
268 	MGMT_STATUS_FAILED,		/* Slot Violation */
269 	MGMT_STATUS_FAILED,		/* Role Switch Failed */
270 	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
271 	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
272 	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
273 	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
274 	MGMT_STATUS_BUSY,		/* Controller Busy */
275 	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
276 	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
277 	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
278 	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
279 	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
280 };
281 
282 static u8 mgmt_errno_status(int err)
283 {
284 	switch (err) {
285 	case 0:
286 		return MGMT_STATUS_SUCCESS;
287 	case -EPERM:
288 		return MGMT_STATUS_REJECTED;
289 	case -EINVAL:
290 		return MGMT_STATUS_INVALID_PARAMS;
291 	case -EOPNOTSUPP:
292 		return MGMT_STATUS_NOT_SUPPORTED;
293 	case -EBUSY:
294 		return MGMT_STATUS_BUSY;
295 	case -ETIMEDOUT:
296 		return MGMT_STATUS_AUTH_FAILED;
297 	case -ENOMEM:
298 		return MGMT_STATUS_NO_RESOURCES;
299 	case -EISCONN:
300 		return MGMT_STATUS_ALREADY_CONNECTED;
301 	case -ENOTCONN:
302 		return MGMT_STATUS_DISCONNECTED;
303 	}
304 
305 	return MGMT_STATUS_FAILED;
306 }
307 
308 static u8 mgmt_status(int err)
309 {
310 	if (err < 0)
311 		return mgmt_errno_status(err);
312 
313 	if (err < ARRAY_SIZE(mgmt_status_table))
314 		return mgmt_status_table[err];
315 
316 	return MGMT_STATUS_FAILED;
317 }
318 
319 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
320 			    u16 len, int flag)
321 {
322 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
323 			       flag, NULL);
324 }
325 
326 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
327 			      u16 len, int flag, struct sock *skip_sk)
328 {
329 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
330 			       flag, skip_sk);
331 }
332 
333 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
334 		      struct sock *skip_sk)
335 {
336 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
337 			       HCI_SOCK_TRUSTED, skip_sk);
338 }
339 
340 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
341 {
342 	return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
343 				   skip_sk);
344 }
345 
346 static u8 le_addr_type(u8 mgmt_addr_type)
347 {
348 	if (mgmt_addr_type == BDADDR_LE_PUBLIC)
349 		return ADDR_LE_DEV_PUBLIC;
350 	else
351 		return ADDR_LE_DEV_RANDOM;
352 }
353 
354 void mgmt_fill_version_info(void *ver)
355 {
356 	struct mgmt_rp_read_version *rp = ver;
357 
358 	rp->version = MGMT_VERSION;
359 	rp->revision = cpu_to_le16(MGMT_REVISION);
360 }
361 
362 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
363 			u16 data_len)
364 {
365 	struct mgmt_rp_read_version rp;
366 
367 	bt_dev_dbg(hdev, "sock %p", sk);
368 
369 	mgmt_fill_version_info(&rp);
370 
371 	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
372 				 &rp, sizeof(rp));
373 }
374 
375 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
376 			 u16 data_len)
377 {
378 	struct mgmt_rp_read_commands *rp;
379 	u16 num_commands, num_events;
380 	size_t rp_size;
381 	int i, err;
382 
383 	bt_dev_dbg(hdev, "sock %p", sk);
384 
385 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
386 		num_commands = ARRAY_SIZE(mgmt_commands);
387 		num_events = ARRAY_SIZE(mgmt_events);
388 	} else {
389 		num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
390 		num_events = ARRAY_SIZE(mgmt_untrusted_events);
391 	}
392 
393 	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
394 
395 	rp = kmalloc(rp_size, GFP_KERNEL);
396 	if (!rp)
397 		return -ENOMEM;
398 
399 	rp->num_commands = cpu_to_le16(num_commands);
400 	rp->num_events = cpu_to_le16(num_events);
401 
402 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
403 		__le16 *opcode = rp->opcodes;
404 
405 		for (i = 0; i < num_commands; i++, opcode++)
406 			put_unaligned_le16(mgmt_commands[i], opcode);
407 
408 		for (i = 0; i < num_events; i++, opcode++)
409 			put_unaligned_le16(mgmt_events[i], opcode);
410 	} else {
411 		__le16 *opcode = rp->opcodes;
412 
413 		for (i = 0; i < num_commands; i++, opcode++)
414 			put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
415 
416 		for (i = 0; i < num_events; i++, opcode++)
417 			put_unaligned_le16(mgmt_untrusted_events[i], opcode);
418 	}
419 
420 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
421 				rp, rp_size);
422 	kfree(rp);
423 
424 	return err;
425 }
426 
427 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
428 			   u16 data_len)
429 {
430 	struct mgmt_rp_read_index_list *rp;
431 	struct hci_dev *d;
432 	size_t rp_len;
433 	u16 count;
434 	int err;
435 
436 	bt_dev_dbg(hdev, "sock %p", sk);
437 
438 	read_lock(&hci_dev_list_lock);
439 
440 	count = 0;
441 	list_for_each_entry(d, &hci_dev_list, list) {
442 		if (d->dev_type == HCI_PRIMARY &&
443 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED))
444 			count++;
445 	}
446 
447 	rp_len = sizeof(*rp) + (2 * count);
448 	rp = kmalloc(rp_len, GFP_ATOMIC);
449 	if (!rp) {
450 		read_unlock(&hci_dev_list_lock);
451 		return -ENOMEM;
452 	}
453 
454 	count = 0;
455 	list_for_each_entry(d, &hci_dev_list, list) {
456 		if (hci_dev_test_flag(d, HCI_SETUP) ||
457 		    hci_dev_test_flag(d, HCI_CONFIG) ||
458 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
459 			continue;
460 
461 		/* Devices marked as raw-only are neither configured
462 		 * nor unconfigured controllers.
463 		 */
464 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
465 			continue;
466 
467 		if (d->dev_type == HCI_PRIMARY &&
468 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
469 			rp->index[count++] = cpu_to_le16(d->id);
470 			bt_dev_dbg(hdev, "Added hci%u", d->id);
471 		}
472 	}
473 
474 	rp->num_controllers = cpu_to_le16(count);
475 	rp_len = sizeof(*rp) + (2 * count);
476 
477 	read_unlock(&hci_dev_list_lock);
478 
479 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
480 				0, rp, rp_len);
481 
482 	kfree(rp);
483 
484 	return err;
485 }
486 
487 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
488 				  void *data, u16 data_len)
489 {
490 	struct mgmt_rp_read_unconf_index_list *rp;
491 	struct hci_dev *d;
492 	size_t rp_len;
493 	u16 count;
494 	int err;
495 
496 	bt_dev_dbg(hdev, "sock %p", sk);
497 
498 	read_lock(&hci_dev_list_lock);
499 
500 	count = 0;
501 	list_for_each_entry(d, &hci_dev_list, list) {
502 		if (d->dev_type == HCI_PRIMARY &&
503 		    hci_dev_test_flag(d, HCI_UNCONFIGURED))
504 			count++;
505 	}
506 
507 	rp_len = sizeof(*rp) + (2 * count);
508 	rp = kmalloc(rp_len, GFP_ATOMIC);
509 	if (!rp) {
510 		read_unlock(&hci_dev_list_lock);
511 		return -ENOMEM;
512 	}
513 
514 	count = 0;
515 	list_for_each_entry(d, &hci_dev_list, list) {
516 		if (hci_dev_test_flag(d, HCI_SETUP) ||
517 		    hci_dev_test_flag(d, HCI_CONFIG) ||
518 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
519 			continue;
520 
521 		/* Devices marked as raw-only are neither configured
522 		 * nor unconfigured controllers.
523 		 */
524 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
525 			continue;
526 
527 		if (d->dev_type == HCI_PRIMARY &&
528 		    hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
529 			rp->index[count++] = cpu_to_le16(d->id);
530 			bt_dev_dbg(hdev, "Added hci%u", d->id);
531 		}
532 	}
533 
534 	rp->num_controllers = cpu_to_le16(count);
535 	rp_len = sizeof(*rp) + (2 * count);
536 
537 	read_unlock(&hci_dev_list_lock);
538 
539 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
540 				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
541 
542 	kfree(rp);
543 
544 	return err;
545 }
546 
547 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
548 			       void *data, u16 data_len)
549 {
550 	struct mgmt_rp_read_ext_index_list *rp;
551 	struct hci_dev *d;
552 	u16 count;
553 	int err;
554 
555 	bt_dev_dbg(hdev, "sock %p", sk);
556 
557 	read_lock(&hci_dev_list_lock);
558 
559 	count = 0;
560 	list_for_each_entry(d, &hci_dev_list, list) {
561 		if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
562 			count++;
563 	}
564 
565 	rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
566 	if (!rp) {
567 		read_unlock(&hci_dev_list_lock);
568 		return -ENOMEM;
569 	}
570 
571 	count = 0;
572 	list_for_each_entry(d, &hci_dev_list, list) {
573 		if (hci_dev_test_flag(d, HCI_SETUP) ||
574 		    hci_dev_test_flag(d, HCI_CONFIG) ||
575 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
576 			continue;
577 
578 		/* Devices marked as raw-only are neither configured
579 		 * nor unconfigured controllers.
580 		 */
581 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
582 			continue;
583 
584 		if (d->dev_type == HCI_PRIMARY) {
585 			if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
586 				rp->entry[count].type = 0x01;
587 			else
588 				rp->entry[count].type = 0x00;
589 		} else if (d->dev_type == HCI_AMP) {
590 			rp->entry[count].type = 0x02;
591 		} else {
592 			continue;
593 		}
594 
595 		rp->entry[count].bus = d->bus;
596 		rp->entry[count++].index = cpu_to_le16(d->id);
597 		bt_dev_dbg(hdev, "Added hci%u", d->id);
598 	}
599 
600 	rp->num_controllers = cpu_to_le16(count);
601 
602 	read_unlock(&hci_dev_list_lock);
603 
604 	/* If this command is called at least once, then all the
605 	 * default index and unconfigured index events are disabled
606 	 * and from now on only extended index events are used.
607 	 */
608 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
609 	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
610 	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
611 
612 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
613 				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
614 				struct_size(rp, entry, count));
615 
616 	kfree(rp);
617 
618 	return err;
619 }
620 
621 static bool is_configured(struct hci_dev *hdev)
622 {
623 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
624 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
625 		return false;
626 
627 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
628 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
629 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
630 		return false;
631 
632 	return true;
633 }
634 
635 static __le32 get_missing_options(struct hci_dev *hdev)
636 {
637 	u32 options = 0;
638 
639 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
640 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
641 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
642 
643 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
644 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
645 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
646 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
647 
648 	return cpu_to_le32(options);
649 }
650 
651 static int new_options(struct hci_dev *hdev, struct sock *skip)
652 {
653 	__le32 options = get_missing_options(hdev);
654 
655 	return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
656 				  sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
657 }
658 
659 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
660 {
661 	__le32 options = get_missing_options(hdev);
662 
663 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
664 				 sizeof(options));
665 }
666 
667 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
668 			    void *data, u16 data_len)
669 {
670 	struct mgmt_rp_read_config_info rp;
671 	u32 options = 0;
672 
673 	bt_dev_dbg(hdev, "sock %p", sk);
674 
675 	hci_dev_lock(hdev);
676 
677 	memset(&rp, 0, sizeof(rp));
678 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
679 
680 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
681 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
682 
683 	if (hdev->set_bdaddr)
684 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
685 
686 	rp.supported_options = cpu_to_le32(options);
687 	rp.missing_options = get_missing_options(hdev);
688 
689 	hci_dev_unlock(hdev);
690 
691 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
692 				 &rp, sizeof(rp));
693 }
694 
695 static u32 get_supported_phys(struct hci_dev *hdev)
696 {
697 	u32 supported_phys = 0;
698 
699 	if (lmp_bredr_capable(hdev)) {
700 		supported_phys |= MGMT_PHY_BR_1M_1SLOT;
701 
702 		if (hdev->features[0][0] & LMP_3SLOT)
703 			supported_phys |= MGMT_PHY_BR_1M_3SLOT;
704 
705 		if (hdev->features[0][0] & LMP_5SLOT)
706 			supported_phys |= MGMT_PHY_BR_1M_5SLOT;
707 
708 		if (lmp_edr_2m_capable(hdev)) {
709 			supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
710 
711 			if (lmp_edr_3slot_capable(hdev))
712 				supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
713 
714 			if (lmp_edr_5slot_capable(hdev))
715 				supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
716 
717 			if (lmp_edr_3m_capable(hdev)) {
718 				supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
719 
720 				if (lmp_edr_3slot_capable(hdev))
721 					supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
722 
723 				if (lmp_edr_5slot_capable(hdev))
724 					supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
725 			}
726 		}
727 	}
728 
729 	if (lmp_le_capable(hdev)) {
730 		supported_phys |= MGMT_PHY_LE_1M_TX;
731 		supported_phys |= MGMT_PHY_LE_1M_RX;
732 
733 		if (hdev->le_features[1] & HCI_LE_PHY_2M) {
734 			supported_phys |= MGMT_PHY_LE_2M_TX;
735 			supported_phys |= MGMT_PHY_LE_2M_RX;
736 		}
737 
738 		if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
739 			supported_phys |= MGMT_PHY_LE_CODED_TX;
740 			supported_phys |= MGMT_PHY_LE_CODED_RX;
741 		}
742 	}
743 
744 	return supported_phys;
745 }
746 
747 static u32 get_selected_phys(struct hci_dev *hdev)
748 {
749 	u32 selected_phys = 0;
750 
751 	if (lmp_bredr_capable(hdev)) {
752 		selected_phys |= MGMT_PHY_BR_1M_1SLOT;
753 
754 		if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
755 			selected_phys |= MGMT_PHY_BR_1M_3SLOT;
756 
757 		if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
758 			selected_phys |= MGMT_PHY_BR_1M_5SLOT;
759 
760 		if (lmp_edr_2m_capable(hdev)) {
761 			if (!(hdev->pkt_type & HCI_2DH1))
762 				selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
763 
764 			if (lmp_edr_3slot_capable(hdev) &&
765 			    !(hdev->pkt_type & HCI_2DH3))
766 				selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
767 
768 			if (lmp_edr_5slot_capable(hdev) &&
769 			    !(hdev->pkt_type & HCI_2DH5))
770 				selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
771 
772 			if (lmp_edr_3m_capable(hdev)) {
773 				if (!(hdev->pkt_type & HCI_3DH1))
774 					selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
775 
776 				if (lmp_edr_3slot_capable(hdev) &&
777 				    !(hdev->pkt_type & HCI_3DH3))
778 					selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
779 
780 				if (lmp_edr_5slot_capable(hdev) &&
781 				    !(hdev->pkt_type & HCI_3DH5))
782 					selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
783 			}
784 		}
785 	}
786 
787 	if (lmp_le_capable(hdev)) {
788 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
789 			selected_phys |= MGMT_PHY_LE_1M_TX;
790 
791 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
792 			selected_phys |= MGMT_PHY_LE_1M_RX;
793 
794 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
795 			selected_phys |= MGMT_PHY_LE_2M_TX;
796 
797 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
798 			selected_phys |= MGMT_PHY_LE_2M_RX;
799 
800 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
801 			selected_phys |= MGMT_PHY_LE_CODED_TX;
802 
803 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
804 			selected_phys |= MGMT_PHY_LE_CODED_RX;
805 	}
806 
807 	return selected_phys;
808 }
809 
810 static u32 get_configurable_phys(struct hci_dev *hdev)
811 {
812 	return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
813 		~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
814 }
815 
816 static u32 get_supported_settings(struct hci_dev *hdev)
817 {
818 	u32 settings = 0;
819 
820 	settings |= MGMT_SETTING_POWERED;
821 	settings |= MGMT_SETTING_BONDABLE;
822 	settings |= MGMT_SETTING_DEBUG_KEYS;
823 	settings |= MGMT_SETTING_CONNECTABLE;
824 	settings |= MGMT_SETTING_DISCOVERABLE;
825 
826 	if (lmp_bredr_capable(hdev)) {
827 		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
828 			settings |= MGMT_SETTING_FAST_CONNECTABLE;
829 		settings |= MGMT_SETTING_BREDR;
830 		settings |= MGMT_SETTING_LINK_SECURITY;
831 
832 		if (lmp_ssp_capable(hdev)) {
833 			settings |= MGMT_SETTING_SSP;
834 			if (IS_ENABLED(CONFIG_BT_HS))
835 				settings |= MGMT_SETTING_HS;
836 		}
837 
838 		if (lmp_sc_capable(hdev))
839 			settings |= MGMT_SETTING_SECURE_CONN;
840 
841 		if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
842 			     &hdev->quirks))
843 			settings |= MGMT_SETTING_WIDEBAND_SPEECH;
844 	}
845 
846 	if (lmp_le_capable(hdev)) {
847 		settings |= MGMT_SETTING_LE;
848 		settings |= MGMT_SETTING_SECURE_CONN;
849 		settings |= MGMT_SETTING_PRIVACY;
850 		settings |= MGMT_SETTING_STATIC_ADDRESS;
851 		settings |= MGMT_SETTING_ADVERTISING;
852 	}
853 
854 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
855 	    hdev->set_bdaddr)
856 		settings |= MGMT_SETTING_CONFIGURATION;
857 
858 	settings |= MGMT_SETTING_PHY_CONFIGURATION;
859 
860 	return settings;
861 }
862 
863 static u32 get_current_settings(struct hci_dev *hdev)
864 {
865 	u32 settings = 0;
866 
867 	if (hdev_is_powered(hdev))
868 		settings |= MGMT_SETTING_POWERED;
869 
870 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
871 		settings |= MGMT_SETTING_CONNECTABLE;
872 
873 	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
874 		settings |= MGMT_SETTING_FAST_CONNECTABLE;
875 
876 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
877 		settings |= MGMT_SETTING_DISCOVERABLE;
878 
879 	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
880 		settings |= MGMT_SETTING_BONDABLE;
881 
882 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
883 		settings |= MGMT_SETTING_BREDR;
884 
885 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
886 		settings |= MGMT_SETTING_LE;
887 
888 	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
889 		settings |= MGMT_SETTING_LINK_SECURITY;
890 
891 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
892 		settings |= MGMT_SETTING_SSP;
893 
894 	if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
895 		settings |= MGMT_SETTING_HS;
896 
897 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
898 		settings |= MGMT_SETTING_ADVERTISING;
899 
900 	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
901 		settings |= MGMT_SETTING_SECURE_CONN;
902 
903 	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
904 		settings |= MGMT_SETTING_DEBUG_KEYS;
905 
906 	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
907 		settings |= MGMT_SETTING_PRIVACY;
908 
909 	/* The current setting for static address has two purposes. The
910 	 * first is to indicate if the static address will be used and
911 	 * the second is to indicate if it is actually set.
912 	 *
913 	 * This means if the static address is not configured, this flag
914 	 * will never be set. If the address is configured, then if the
915 	 * address is actually used decides if the flag is set or not.
916 	 *
917 	 * For single mode LE only controllers and dual-mode controllers
918 	 * with BR/EDR disabled, the existence of the static address will
919 	 * be evaluated.
920 	 */
921 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
922 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
923 	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
924 		if (bacmp(&hdev->static_addr, BDADDR_ANY))
925 			settings |= MGMT_SETTING_STATIC_ADDRESS;
926 	}
927 
928 	if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
929 		settings |= MGMT_SETTING_WIDEBAND_SPEECH;
930 
931 	return settings;
932 }
933 
934 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
935 {
936 	return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
937 }
938 
939 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
940 {
941 	struct mgmt_pending_cmd *cmd;
942 
943 	/* If there's a pending mgmt command the flags will not yet have
944 	 * their final values, so check for this first.
945 	 */
946 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
947 	if (cmd) {
948 		struct mgmt_mode *cp = cmd->param;
949 		if (cp->val == 0x01)
950 			return LE_AD_GENERAL;
951 		else if (cp->val == 0x02)
952 			return LE_AD_LIMITED;
953 	} else {
954 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
955 			return LE_AD_LIMITED;
956 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
957 			return LE_AD_GENERAL;
958 	}
959 
960 	return 0;
961 }
962 
963 bool mgmt_get_connectable(struct hci_dev *hdev)
964 {
965 	struct mgmt_pending_cmd *cmd;
966 
967 	/* If there's a pending mgmt command the flag will not yet have
968 	 * it's final value, so check for this first.
969 	 */
970 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
971 	if (cmd) {
972 		struct mgmt_mode *cp = cmd->param;
973 
974 		return cp->val;
975 	}
976 
977 	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
978 }
979 
980 static int service_cache_sync(struct hci_dev *hdev, void *data)
981 {
982 	hci_update_eir_sync(hdev);
983 	hci_update_class_sync(hdev);
984 
985 	return 0;
986 }
987 
988 static void service_cache_off(struct work_struct *work)
989 {
990 	struct hci_dev *hdev = container_of(work, struct hci_dev,
991 					    service_cache.work);
992 
993 	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
994 		return;
995 
996 	hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
997 }
998 
999 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1000 {
1001 	/* The generation of a new RPA and programming it into the
1002 	 * controller happens in the hci_req_enable_advertising()
1003 	 * function.
1004 	 */
1005 	if (ext_adv_capable(hdev))
1006 		return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1007 	else
1008 		return hci_enable_advertising_sync(hdev);
1009 }
1010 
1011 static void rpa_expired(struct work_struct *work)
1012 {
1013 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1014 					    rpa_expired.work);
1015 
1016 	bt_dev_dbg(hdev, "");
1017 
1018 	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1019 
1020 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1021 		return;
1022 
1023 	hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1024 }
1025 
1026 static void discov_off(struct work_struct *work)
1027 {
1028 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1029 					    discov_off.work);
1030 
1031 	bt_dev_dbg(hdev, "");
1032 
1033 	hci_dev_lock(hdev);
1034 
1035 	/* When discoverable timeout triggers, then just make sure
1036 	 * the limited discoverable flag is cleared. Even in the case
1037 	 * of a timeout triggered from general discoverable, it is
1038 	 * safe to unconditionally clear the flag.
1039 	 */
1040 	hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1041 	hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1042 	hdev->discov_timeout = 0;
1043 
1044 	hci_update_discoverable(hdev);
1045 
1046 	mgmt_new_settings(hdev);
1047 
1048 	hci_dev_unlock(hdev);
1049 }
1050 
1051 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1052 {
1053 	if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1054 		return;
1055 
1056 	BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1057 
1058 	INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1059 	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1060 	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1061 
1062 	/* Non-mgmt controlled devices get this bit set
1063 	 * implicitly so that pairing works for them, however
1064 	 * for mgmt we require user-space to explicitly enable
1065 	 * it
1066 	 */
1067 	hci_dev_clear_flag(hdev, HCI_BONDABLE);
1068 }
1069 
1070 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1071 				void *data, u16 data_len)
1072 {
1073 	struct mgmt_rp_read_info rp;
1074 
1075 	bt_dev_dbg(hdev, "sock %p", sk);
1076 
1077 	hci_dev_lock(hdev);
1078 
1079 	memset(&rp, 0, sizeof(rp));
1080 
1081 	bacpy(&rp.bdaddr, &hdev->bdaddr);
1082 
1083 	rp.version = hdev->hci_ver;
1084 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1085 
1086 	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1087 	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1088 
1089 	memcpy(rp.dev_class, hdev->dev_class, 3);
1090 
1091 	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1092 	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1093 
1094 	hci_dev_unlock(hdev);
1095 
1096 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1097 				 sizeof(rp));
1098 }
1099 
1100 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1101 {
1102 	u16 eir_len = 0;
1103 	size_t name_len;
1104 
1105 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1106 		eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1107 					  hdev->dev_class, 3);
1108 
1109 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1110 		eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1111 					  hdev->appearance);
1112 
1113 	name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1114 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1115 				  hdev->dev_name, name_len);
1116 
1117 	name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1118 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1119 				  hdev->short_name, name_len);
1120 
1121 	return eir_len;
1122 }
1123 
1124 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1125 				    void *data, u16 data_len)
1126 {
1127 	char buf[512];
1128 	struct mgmt_rp_read_ext_info *rp = (void *)buf;
1129 	u16 eir_len;
1130 
1131 	bt_dev_dbg(hdev, "sock %p", sk);
1132 
1133 	memset(&buf, 0, sizeof(buf));
1134 
1135 	hci_dev_lock(hdev);
1136 
1137 	bacpy(&rp->bdaddr, &hdev->bdaddr);
1138 
1139 	rp->version = hdev->hci_ver;
1140 	rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1141 
1142 	rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1143 	rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1144 
1145 
1146 	eir_len = append_eir_data_to_buf(hdev, rp->eir);
1147 	rp->eir_len = cpu_to_le16(eir_len);
1148 
1149 	hci_dev_unlock(hdev);
1150 
1151 	/* If this command is called at least once, then the events
1152 	 * for class of device and local name changes are disabled
1153 	 * and only the new extended controller information event
1154 	 * is used.
1155 	 */
1156 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1157 	hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1158 	hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1159 
1160 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1161 				 sizeof(*rp) + eir_len);
1162 }
1163 
1164 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1165 {
1166 	char buf[512];
1167 	struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1168 	u16 eir_len;
1169 
1170 	memset(buf, 0, sizeof(buf));
1171 
1172 	eir_len = append_eir_data_to_buf(hdev, ev->eir);
1173 	ev->eir_len = cpu_to_le16(eir_len);
1174 
1175 	return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1176 				  sizeof(*ev) + eir_len,
1177 				  HCI_MGMT_EXT_INFO_EVENTS, skip);
1178 }
1179 
1180 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1181 {
1182 	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1183 
1184 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1185 				 sizeof(settings));
1186 }
1187 
1188 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1189 {
1190 	struct mgmt_ev_advertising_added ev;
1191 
1192 	ev.instance = instance;
1193 
1194 	mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1195 }
1196 
1197 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1198 			      u8 instance)
1199 {
1200 	struct mgmt_ev_advertising_removed ev;
1201 
1202 	ev.instance = instance;
1203 
1204 	mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1205 }
1206 
1207 static void cancel_adv_timeout(struct hci_dev *hdev)
1208 {
1209 	if (hdev->adv_instance_timeout) {
1210 		hdev->adv_instance_timeout = 0;
1211 		cancel_delayed_work(&hdev->adv_instance_expire);
1212 	}
1213 }
1214 
1215 /* This function requires the caller holds hdev->lock */
1216 static void restart_le_actions(struct hci_dev *hdev)
1217 {
1218 	struct hci_conn_params *p;
1219 
1220 	list_for_each_entry(p, &hdev->le_conn_params, list) {
1221 		/* Needed for AUTO_OFF case where might not "really"
1222 		 * have been powered off.
1223 		 */
1224 		list_del_init(&p->action);
1225 
1226 		switch (p->auto_connect) {
1227 		case HCI_AUTO_CONN_DIRECT:
1228 		case HCI_AUTO_CONN_ALWAYS:
1229 			list_add(&p->action, &hdev->pend_le_conns);
1230 			break;
1231 		case HCI_AUTO_CONN_REPORT:
1232 			list_add(&p->action, &hdev->pend_le_reports);
1233 			break;
1234 		default:
1235 			break;
1236 		}
1237 	}
1238 }
1239 
1240 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1241 {
1242 	__le32 ev = cpu_to_le32(get_current_settings(hdev));
1243 
1244 	return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1245 				  sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1246 }
1247 
1248 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1249 {
1250 	struct mgmt_pending_cmd *cmd = data;
1251 	struct mgmt_mode *cp;
1252 
1253 	/* Make sure cmd still outstanding. */
1254 	if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1255 		return;
1256 
1257 	cp = cmd->param;
1258 
1259 	bt_dev_dbg(hdev, "err %d", err);
1260 
1261 	if (!err) {
1262 		if (cp->val) {
1263 			hci_dev_lock(hdev);
1264 			restart_le_actions(hdev);
1265 			hci_update_passive_scan(hdev);
1266 			hci_dev_unlock(hdev);
1267 		}
1268 
1269 		send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1270 
1271 		/* Only call new_setting for power on as power off is deferred
1272 		 * to hdev->power_off work which does call hci_dev_do_close.
1273 		 */
1274 		if (cp->val)
1275 			new_settings(hdev, cmd->sk);
1276 	} else {
1277 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1278 				mgmt_status(err));
1279 	}
1280 
1281 	mgmt_pending_remove(cmd);
1282 }
1283 
1284 static int set_powered_sync(struct hci_dev *hdev, void *data)
1285 {
1286 	struct mgmt_pending_cmd *cmd = data;
1287 	struct mgmt_mode *cp = cmd->param;
1288 
1289 	BT_DBG("%s", hdev->name);
1290 
1291 	return hci_set_powered_sync(hdev, cp->val);
1292 }
1293 
1294 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1295 		       u16 len)
1296 {
1297 	struct mgmt_mode *cp = data;
1298 	struct mgmt_pending_cmd *cmd;
1299 	int err;
1300 
1301 	bt_dev_dbg(hdev, "sock %p", sk);
1302 
1303 	if (cp->val != 0x00 && cp->val != 0x01)
1304 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1305 				       MGMT_STATUS_INVALID_PARAMS);
1306 
1307 	hci_dev_lock(hdev);
1308 
1309 	if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1310 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1311 				      MGMT_STATUS_BUSY);
1312 		goto failed;
1313 	}
1314 
1315 	if (!!cp->val == hdev_is_powered(hdev)) {
1316 		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1317 		goto failed;
1318 	}
1319 
1320 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1321 	if (!cmd) {
1322 		err = -ENOMEM;
1323 		goto failed;
1324 	}
1325 
1326 	err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1327 				 mgmt_set_powered_complete);
1328 
1329 	if (err < 0)
1330 		mgmt_pending_remove(cmd);
1331 
1332 failed:
1333 	hci_dev_unlock(hdev);
1334 	return err;
1335 }
1336 
1337 int mgmt_new_settings(struct hci_dev *hdev)
1338 {
1339 	return new_settings(hdev, NULL);
1340 }
1341 
1342 struct cmd_lookup {
1343 	struct sock *sk;
1344 	struct hci_dev *hdev;
1345 	u8 mgmt_status;
1346 };
1347 
1348 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1349 {
1350 	struct cmd_lookup *match = data;
1351 
1352 	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1353 
1354 	list_del(&cmd->list);
1355 
1356 	if (match->sk == NULL) {
1357 		match->sk = cmd->sk;
1358 		sock_hold(match->sk);
1359 	}
1360 
1361 	mgmt_pending_free(cmd);
1362 }
1363 
1364 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1365 {
1366 	u8 *status = data;
1367 
1368 	mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1369 	mgmt_pending_remove(cmd);
1370 }
1371 
1372 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1373 {
1374 	if (cmd->cmd_complete) {
1375 		u8 *status = data;
1376 
1377 		cmd->cmd_complete(cmd, *status);
1378 		mgmt_pending_remove(cmd);
1379 
1380 		return;
1381 	}
1382 
1383 	cmd_status_rsp(cmd, data);
1384 }
1385 
1386 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1387 {
1388 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1389 				 cmd->param, cmd->param_len);
1390 }
1391 
1392 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1393 {
1394 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1395 				 cmd->param, sizeof(struct mgmt_addr_info));
1396 }
1397 
1398 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1399 {
1400 	if (!lmp_bredr_capable(hdev))
1401 		return MGMT_STATUS_NOT_SUPPORTED;
1402 	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1403 		return MGMT_STATUS_REJECTED;
1404 	else
1405 		return MGMT_STATUS_SUCCESS;
1406 }
1407 
1408 static u8 mgmt_le_support(struct hci_dev *hdev)
1409 {
1410 	if (!lmp_le_capable(hdev))
1411 		return MGMT_STATUS_NOT_SUPPORTED;
1412 	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1413 		return MGMT_STATUS_REJECTED;
1414 	else
1415 		return MGMT_STATUS_SUCCESS;
1416 }
1417 
1418 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1419 					   int err)
1420 {
1421 	struct mgmt_pending_cmd *cmd = data;
1422 
1423 	bt_dev_dbg(hdev, "err %d", err);
1424 
1425 	/* Make sure cmd still outstanding. */
1426 	if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1427 		return;
1428 
1429 	hci_dev_lock(hdev);
1430 
1431 	if (err) {
1432 		u8 mgmt_err = mgmt_status(err);
1433 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1434 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1435 		goto done;
1436 	}
1437 
1438 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1439 	    hdev->discov_timeout > 0) {
1440 		int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1441 		queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1442 	}
1443 
1444 	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1445 	new_settings(hdev, cmd->sk);
1446 
1447 done:
1448 	mgmt_pending_remove(cmd);
1449 	hci_dev_unlock(hdev);
1450 }
1451 
1452 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1453 {
1454 	BT_DBG("%s", hdev->name);
1455 
1456 	return hci_update_discoverable_sync(hdev);
1457 }
1458 
1459 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1460 			    u16 len)
1461 {
1462 	struct mgmt_cp_set_discoverable *cp = data;
1463 	struct mgmt_pending_cmd *cmd;
1464 	u16 timeout;
1465 	int err;
1466 
1467 	bt_dev_dbg(hdev, "sock %p", sk);
1468 
1469 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1470 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1471 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1472 				       MGMT_STATUS_REJECTED);
1473 
1474 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1475 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1476 				       MGMT_STATUS_INVALID_PARAMS);
1477 
1478 	timeout = __le16_to_cpu(cp->timeout);
1479 
1480 	/* Disabling discoverable requires that no timeout is set,
1481 	 * and enabling limited discoverable requires a timeout.
1482 	 */
1483 	if ((cp->val == 0x00 && timeout > 0) ||
1484 	    (cp->val == 0x02 && timeout == 0))
1485 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1486 				       MGMT_STATUS_INVALID_PARAMS);
1487 
1488 	hci_dev_lock(hdev);
1489 
1490 	if (!hdev_is_powered(hdev) && timeout > 0) {
1491 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1492 				      MGMT_STATUS_NOT_POWERED);
1493 		goto failed;
1494 	}
1495 
1496 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1497 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1498 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1499 				      MGMT_STATUS_BUSY);
1500 		goto failed;
1501 	}
1502 
1503 	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1504 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1505 				      MGMT_STATUS_REJECTED);
1506 		goto failed;
1507 	}
1508 
1509 	if (hdev->advertising_paused) {
1510 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1511 				      MGMT_STATUS_BUSY);
1512 		goto failed;
1513 	}
1514 
1515 	if (!hdev_is_powered(hdev)) {
1516 		bool changed = false;
1517 
1518 		/* Setting limited discoverable when powered off is
1519 		 * not a valid operation since it requires a timeout
1520 		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1521 		 */
1522 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1523 			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1524 			changed = true;
1525 		}
1526 
1527 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1528 		if (err < 0)
1529 			goto failed;
1530 
1531 		if (changed)
1532 			err = new_settings(hdev, sk);
1533 
1534 		goto failed;
1535 	}
1536 
1537 	/* If the current mode is the same, then just update the timeout
1538 	 * value with the new value. And if only the timeout gets updated,
1539 	 * then no need for any HCI transactions.
1540 	 */
1541 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1542 	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
1543 						   HCI_LIMITED_DISCOVERABLE)) {
1544 		cancel_delayed_work(&hdev->discov_off);
1545 		hdev->discov_timeout = timeout;
1546 
1547 		if (cp->val && hdev->discov_timeout > 0) {
1548 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1549 			queue_delayed_work(hdev->req_workqueue,
1550 					   &hdev->discov_off, to);
1551 		}
1552 
1553 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1554 		goto failed;
1555 	}
1556 
1557 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1558 	if (!cmd) {
1559 		err = -ENOMEM;
1560 		goto failed;
1561 	}
1562 
1563 	/* Cancel any potential discoverable timeout that might be
1564 	 * still active and store new timeout value. The arming of
1565 	 * the timeout happens in the complete handler.
1566 	 */
1567 	cancel_delayed_work(&hdev->discov_off);
1568 	hdev->discov_timeout = timeout;
1569 
1570 	if (cp->val)
1571 		hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1572 	else
1573 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1574 
1575 	/* Limited discoverable mode */
1576 	if (cp->val == 0x02)
1577 		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1578 	else
1579 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1580 
1581 	err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1582 				 mgmt_set_discoverable_complete);
1583 
1584 	if (err < 0)
1585 		mgmt_pending_remove(cmd);
1586 
1587 failed:
1588 	hci_dev_unlock(hdev);
1589 	return err;
1590 }
1591 
1592 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1593 					  int err)
1594 {
1595 	struct mgmt_pending_cmd *cmd = data;
1596 
1597 	bt_dev_dbg(hdev, "err %d", err);
1598 
1599 	/* Make sure cmd still outstanding. */
1600 	if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1601 		return;
1602 
1603 	hci_dev_lock(hdev);
1604 
1605 	if (err) {
1606 		u8 mgmt_err = mgmt_status(err);
1607 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1608 		goto done;
1609 	}
1610 
1611 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1612 	new_settings(hdev, cmd->sk);
1613 
1614 done:
1615 	if (cmd)
1616 		mgmt_pending_remove(cmd);
1617 
1618 	hci_dev_unlock(hdev);
1619 }
1620 
1621 static int set_connectable_update_settings(struct hci_dev *hdev,
1622 					   struct sock *sk, u8 val)
1623 {
1624 	bool changed = false;
1625 	int err;
1626 
1627 	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1628 		changed = true;
1629 
1630 	if (val) {
1631 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1632 	} else {
1633 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1634 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1635 	}
1636 
1637 	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1638 	if (err < 0)
1639 		return err;
1640 
1641 	if (changed) {
1642 		hci_update_scan(hdev);
1643 		hci_update_passive_scan(hdev);
1644 		return new_settings(hdev, sk);
1645 	}
1646 
1647 	return 0;
1648 }
1649 
1650 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1651 {
1652 	BT_DBG("%s", hdev->name);
1653 
1654 	return hci_update_connectable_sync(hdev);
1655 }
1656 
1657 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1658 			   u16 len)
1659 {
1660 	struct mgmt_mode *cp = data;
1661 	struct mgmt_pending_cmd *cmd;
1662 	int err;
1663 
1664 	bt_dev_dbg(hdev, "sock %p", sk);
1665 
1666 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1667 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1668 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1669 				       MGMT_STATUS_REJECTED);
1670 
1671 	if (cp->val != 0x00 && cp->val != 0x01)
1672 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1673 				       MGMT_STATUS_INVALID_PARAMS);
1674 
1675 	hci_dev_lock(hdev);
1676 
1677 	if (!hdev_is_powered(hdev)) {
1678 		err = set_connectable_update_settings(hdev, sk, cp->val);
1679 		goto failed;
1680 	}
1681 
1682 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1683 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1684 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1685 				      MGMT_STATUS_BUSY);
1686 		goto failed;
1687 	}
1688 
1689 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1690 	if (!cmd) {
1691 		err = -ENOMEM;
1692 		goto failed;
1693 	}
1694 
1695 	if (cp->val) {
1696 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1697 	} else {
1698 		if (hdev->discov_timeout > 0)
1699 			cancel_delayed_work(&hdev->discov_off);
1700 
1701 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1702 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1703 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1704 	}
1705 
1706 	err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1707 				 mgmt_set_connectable_complete);
1708 
1709 	if (err < 0)
1710 		mgmt_pending_remove(cmd);
1711 
1712 failed:
1713 	hci_dev_unlock(hdev);
1714 	return err;
1715 }
1716 
1717 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1718 			u16 len)
1719 {
1720 	struct mgmt_mode *cp = data;
1721 	bool changed;
1722 	int err;
1723 
1724 	bt_dev_dbg(hdev, "sock %p", sk);
1725 
1726 	if (cp->val != 0x00 && cp->val != 0x01)
1727 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1728 				       MGMT_STATUS_INVALID_PARAMS);
1729 
1730 	hci_dev_lock(hdev);
1731 
1732 	if (cp->val)
1733 		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1734 	else
1735 		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1736 
1737 	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1738 	if (err < 0)
1739 		goto unlock;
1740 
1741 	if (changed) {
1742 		/* In limited privacy mode the change of bondable mode
1743 		 * may affect the local advertising address.
1744 		 */
1745 		hci_update_discoverable(hdev);
1746 
1747 		err = new_settings(hdev, sk);
1748 	}
1749 
1750 unlock:
1751 	hci_dev_unlock(hdev);
1752 	return err;
1753 }
1754 
1755 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1756 			     u16 len)
1757 {
1758 	struct mgmt_mode *cp = data;
1759 	struct mgmt_pending_cmd *cmd;
1760 	u8 val, status;
1761 	int err;
1762 
1763 	bt_dev_dbg(hdev, "sock %p", sk);
1764 
1765 	status = mgmt_bredr_support(hdev);
1766 	if (status)
1767 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1768 				       status);
1769 
1770 	if (cp->val != 0x00 && cp->val != 0x01)
1771 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1772 				       MGMT_STATUS_INVALID_PARAMS);
1773 
1774 	hci_dev_lock(hdev);
1775 
1776 	if (!hdev_is_powered(hdev)) {
1777 		bool changed = false;
1778 
1779 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1780 			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1781 			changed = true;
1782 		}
1783 
1784 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1785 		if (err < 0)
1786 			goto failed;
1787 
1788 		if (changed)
1789 			err = new_settings(hdev, sk);
1790 
1791 		goto failed;
1792 	}
1793 
1794 	if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1795 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1796 				      MGMT_STATUS_BUSY);
1797 		goto failed;
1798 	}
1799 
1800 	val = !!cp->val;
1801 
1802 	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1803 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1804 		goto failed;
1805 	}
1806 
1807 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1808 	if (!cmd) {
1809 		err = -ENOMEM;
1810 		goto failed;
1811 	}
1812 
1813 	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1814 	if (err < 0) {
1815 		mgmt_pending_remove(cmd);
1816 		goto failed;
1817 	}
1818 
1819 failed:
1820 	hci_dev_unlock(hdev);
1821 	return err;
1822 }
1823 
1824 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1825 {
1826 	struct cmd_lookup match = { NULL, hdev };
1827 	struct mgmt_pending_cmd *cmd = data;
1828 	struct mgmt_mode *cp = cmd->param;
1829 	u8 enable = cp->val;
1830 	bool changed;
1831 
1832 	/* Make sure cmd still outstanding. */
1833 	if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1834 		return;
1835 
1836 	if (err) {
1837 		u8 mgmt_err = mgmt_status(err);
1838 
1839 		if (enable && hci_dev_test_and_clear_flag(hdev,
1840 							  HCI_SSP_ENABLED)) {
1841 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1842 			new_settings(hdev, NULL);
1843 		}
1844 
1845 		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1846 				     &mgmt_err);
1847 		return;
1848 	}
1849 
1850 	if (enable) {
1851 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1852 	} else {
1853 		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1854 
1855 		if (!changed)
1856 			changed = hci_dev_test_and_clear_flag(hdev,
1857 							      HCI_HS_ENABLED);
1858 		else
1859 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1860 	}
1861 
1862 	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1863 
1864 	if (changed)
1865 		new_settings(hdev, match.sk);
1866 
1867 	if (match.sk)
1868 		sock_put(match.sk);
1869 
1870 	hci_update_eir_sync(hdev);
1871 }
1872 
1873 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1874 {
1875 	struct mgmt_pending_cmd *cmd = data;
1876 	struct mgmt_mode *cp = cmd->param;
1877 	bool changed = false;
1878 	int err;
1879 
1880 	if (cp->val)
1881 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1882 
1883 	err = hci_write_ssp_mode_sync(hdev, cp->val);
1884 
1885 	if (!err && changed)
1886 		hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1887 
1888 	return err;
1889 }
1890 
1891 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1892 {
1893 	struct mgmt_mode *cp = data;
1894 	struct mgmt_pending_cmd *cmd;
1895 	u8 status;
1896 	int err;
1897 
1898 	bt_dev_dbg(hdev, "sock %p", sk);
1899 
1900 	status = mgmt_bredr_support(hdev);
1901 	if (status)
1902 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1903 
1904 	if (!lmp_ssp_capable(hdev))
1905 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1906 				       MGMT_STATUS_NOT_SUPPORTED);
1907 
1908 	if (cp->val != 0x00 && cp->val != 0x01)
1909 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1910 				       MGMT_STATUS_INVALID_PARAMS);
1911 
1912 	hci_dev_lock(hdev);
1913 
1914 	if (!hdev_is_powered(hdev)) {
1915 		bool changed;
1916 
1917 		if (cp->val) {
1918 			changed = !hci_dev_test_and_set_flag(hdev,
1919 							     HCI_SSP_ENABLED);
1920 		} else {
1921 			changed = hci_dev_test_and_clear_flag(hdev,
1922 							      HCI_SSP_ENABLED);
1923 			if (!changed)
1924 				changed = hci_dev_test_and_clear_flag(hdev,
1925 								      HCI_HS_ENABLED);
1926 			else
1927 				hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1928 		}
1929 
1930 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1931 		if (err < 0)
1932 			goto failed;
1933 
1934 		if (changed)
1935 			err = new_settings(hdev, sk);
1936 
1937 		goto failed;
1938 	}
1939 
1940 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1941 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1942 				      MGMT_STATUS_BUSY);
1943 		goto failed;
1944 	}
1945 
1946 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1947 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1948 		goto failed;
1949 	}
1950 
1951 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1952 	if (!cmd)
1953 		err = -ENOMEM;
1954 	else
1955 		err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
1956 					 set_ssp_complete);
1957 
1958 	if (err < 0) {
1959 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1960 				      MGMT_STATUS_FAILED);
1961 
1962 		if (cmd)
1963 			mgmt_pending_remove(cmd);
1964 	}
1965 
1966 failed:
1967 	hci_dev_unlock(hdev);
1968 	return err;
1969 }
1970 
1971 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1972 {
1973 	struct mgmt_mode *cp = data;
1974 	bool changed;
1975 	u8 status;
1976 	int err;
1977 
1978 	bt_dev_dbg(hdev, "sock %p", sk);
1979 
1980 	if (!IS_ENABLED(CONFIG_BT_HS))
1981 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1982 				       MGMT_STATUS_NOT_SUPPORTED);
1983 
1984 	status = mgmt_bredr_support(hdev);
1985 	if (status)
1986 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1987 
1988 	if (!lmp_ssp_capable(hdev))
1989 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1990 				       MGMT_STATUS_NOT_SUPPORTED);
1991 
1992 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1993 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1994 				       MGMT_STATUS_REJECTED);
1995 
1996 	if (cp->val != 0x00 && cp->val != 0x01)
1997 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1998 				       MGMT_STATUS_INVALID_PARAMS);
1999 
2000 	hci_dev_lock(hdev);
2001 
2002 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2003 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2004 				      MGMT_STATUS_BUSY);
2005 		goto unlock;
2006 	}
2007 
2008 	if (cp->val) {
2009 		changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
2010 	} else {
2011 		if (hdev_is_powered(hdev)) {
2012 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2013 					      MGMT_STATUS_REJECTED);
2014 			goto unlock;
2015 		}
2016 
2017 		changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
2018 	}
2019 
2020 	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2021 	if (err < 0)
2022 		goto unlock;
2023 
2024 	if (changed)
2025 		err = new_settings(hdev, sk);
2026 
2027 unlock:
2028 	hci_dev_unlock(hdev);
2029 	return err;
2030 }
2031 
2032 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2033 {
2034 	struct cmd_lookup match = { NULL, hdev };
2035 	u8 status = mgmt_status(err);
2036 
2037 	bt_dev_dbg(hdev, "err %d", err);
2038 
2039 	if (status) {
2040 		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2041 							&status);
2042 		return;
2043 	}
2044 
2045 	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2046 
2047 	new_settings(hdev, match.sk);
2048 
2049 	if (match.sk)
2050 		sock_put(match.sk);
2051 }
2052 
2053 static int set_le_sync(struct hci_dev *hdev, void *data)
2054 {
2055 	struct mgmt_pending_cmd *cmd = data;
2056 	struct mgmt_mode *cp = cmd->param;
2057 	u8 val = !!cp->val;
2058 	int err;
2059 
2060 	if (!val) {
2061 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2062 			hci_disable_advertising_sync(hdev);
2063 
2064 		if (ext_adv_capable(hdev))
2065 			hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2066 	} else {
2067 		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2068 	}
2069 
2070 	err = hci_write_le_host_supported_sync(hdev, val, 0);
2071 
2072 	/* Make sure the controller has a good default for
2073 	 * advertising data. Restrict the update to when LE
2074 	 * has actually been enabled. During power on, the
2075 	 * update in powered_update_hci will take care of it.
2076 	 */
2077 	if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2078 		if (ext_adv_capable(hdev)) {
2079 			int status;
2080 
2081 			status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2082 			if (!status)
2083 				hci_update_scan_rsp_data_sync(hdev, 0x00);
2084 		} else {
2085 			hci_update_adv_data_sync(hdev, 0x00);
2086 			hci_update_scan_rsp_data_sync(hdev, 0x00);
2087 		}
2088 
2089 		hci_update_passive_scan(hdev);
2090 	}
2091 
2092 	return err;
2093 }
2094 
2095 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2096 {
2097 	struct mgmt_mode *cp = data;
2098 	struct mgmt_pending_cmd *cmd;
2099 	int err;
2100 	u8 val, enabled;
2101 
2102 	bt_dev_dbg(hdev, "sock %p", sk);
2103 
2104 	if (!lmp_le_capable(hdev))
2105 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2106 				       MGMT_STATUS_NOT_SUPPORTED);
2107 
2108 	if (cp->val != 0x00 && cp->val != 0x01)
2109 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2110 				       MGMT_STATUS_INVALID_PARAMS);
2111 
2112 	/* Bluetooth single mode LE only controllers or dual-mode
2113 	 * controllers configured as LE only devices, do not allow
2114 	 * switching LE off. These have either LE enabled explicitly
2115 	 * or BR/EDR has been previously switched off.
2116 	 *
2117 	 * When trying to enable an already enabled LE, then gracefully
2118 	 * send a positive response. Trying to disable it however will
2119 	 * result into rejection.
2120 	 */
2121 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2122 		if (cp->val == 0x01)
2123 			return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2124 
2125 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2126 				       MGMT_STATUS_REJECTED);
2127 	}
2128 
2129 	hci_dev_lock(hdev);
2130 
2131 	val = !!cp->val;
2132 	enabled = lmp_host_le_capable(hdev);
2133 
2134 	if (!val)
2135 		hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
2136 
2137 	if (!hdev_is_powered(hdev) || val == enabled) {
2138 		bool changed = false;
2139 
2140 		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2141 			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2142 			changed = true;
2143 		}
2144 
2145 		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2146 			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2147 			changed = true;
2148 		}
2149 
2150 		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2151 		if (err < 0)
2152 			goto unlock;
2153 
2154 		if (changed)
2155 			err = new_settings(hdev, sk);
2156 
2157 		goto unlock;
2158 	}
2159 
2160 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
2161 	    pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2162 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2163 				      MGMT_STATUS_BUSY);
2164 		goto unlock;
2165 	}
2166 
2167 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2168 	if (!cmd)
2169 		err = -ENOMEM;
2170 	else
2171 		err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2172 					 set_le_complete);
2173 
2174 	if (err < 0) {
2175 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2176 				      MGMT_STATUS_FAILED);
2177 
2178 		if (cmd)
2179 			mgmt_pending_remove(cmd);
2180 	}
2181 
2182 unlock:
2183 	hci_dev_unlock(hdev);
2184 	return err;
2185 }
2186 
2187 /* This is a helper function to test for pending mgmt commands that can
2188  * cause CoD or EIR HCI commands. We can only allow one such pending
2189  * mgmt command at a time since otherwise we cannot easily track what
2190  * the current values are, will be, and based on that calculate if a new
2191  * HCI command needs to be sent and if yes with what value.
2192  */
2193 static bool pending_eir_or_class(struct hci_dev *hdev)
2194 {
2195 	struct mgmt_pending_cmd *cmd;
2196 
2197 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2198 		switch (cmd->opcode) {
2199 		case MGMT_OP_ADD_UUID:
2200 		case MGMT_OP_REMOVE_UUID:
2201 		case MGMT_OP_SET_DEV_CLASS:
2202 		case MGMT_OP_SET_POWERED:
2203 			return true;
2204 		}
2205 	}
2206 
2207 	return false;
2208 }
2209 
2210 static const u8 bluetooth_base_uuid[] = {
2211 			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2212 			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2213 };
2214 
2215 static u8 get_uuid_size(const u8 *uuid)
2216 {
2217 	u32 val;
2218 
2219 	if (memcmp(uuid, bluetooth_base_uuid, 12))
2220 		return 128;
2221 
2222 	val = get_unaligned_le32(&uuid[12]);
2223 	if (val > 0xffff)
2224 		return 32;
2225 
2226 	return 16;
2227 }
2228 
2229 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2230 {
2231 	struct mgmt_pending_cmd *cmd = data;
2232 
2233 	bt_dev_dbg(hdev, "err %d", err);
2234 
2235 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2236 			  mgmt_status(err), hdev->dev_class, 3);
2237 
2238 	mgmt_pending_free(cmd);
2239 }
2240 
2241 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2242 {
2243 	int err;
2244 
2245 	err = hci_update_class_sync(hdev);
2246 	if (err)
2247 		return err;
2248 
2249 	return hci_update_eir_sync(hdev);
2250 }
2251 
2252 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2253 {
2254 	struct mgmt_cp_add_uuid *cp = data;
2255 	struct mgmt_pending_cmd *cmd;
2256 	struct bt_uuid *uuid;
2257 	int err;
2258 
2259 	bt_dev_dbg(hdev, "sock %p", sk);
2260 
2261 	hci_dev_lock(hdev);
2262 
2263 	if (pending_eir_or_class(hdev)) {
2264 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2265 				      MGMT_STATUS_BUSY);
2266 		goto failed;
2267 	}
2268 
2269 	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2270 	if (!uuid) {
2271 		err = -ENOMEM;
2272 		goto failed;
2273 	}
2274 
2275 	memcpy(uuid->uuid, cp->uuid, 16);
2276 	uuid->svc_hint = cp->svc_hint;
2277 	uuid->size = get_uuid_size(cp->uuid);
2278 
2279 	list_add_tail(&uuid->list, &hdev->uuids);
2280 
2281 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2282 	if (!cmd) {
2283 		err = -ENOMEM;
2284 		goto failed;
2285 	}
2286 
2287 	err = hci_cmd_sync_queue(hdev, add_uuid_sync, cmd, mgmt_class_complete);
2288 	if (err < 0) {
2289 		mgmt_pending_free(cmd);
2290 		goto failed;
2291 	}
2292 
2293 failed:
2294 	hci_dev_unlock(hdev);
2295 	return err;
2296 }
2297 
2298 static bool enable_service_cache(struct hci_dev *hdev)
2299 {
2300 	if (!hdev_is_powered(hdev))
2301 		return false;
2302 
2303 	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2304 		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2305 				   CACHE_TIMEOUT);
2306 		return true;
2307 	}
2308 
2309 	return false;
2310 }
2311 
2312 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2313 {
2314 	int err;
2315 
2316 	err = hci_update_class_sync(hdev);
2317 	if (err)
2318 		return err;
2319 
2320 	return hci_update_eir_sync(hdev);
2321 }
2322 
2323 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2324 		       u16 len)
2325 {
2326 	struct mgmt_cp_remove_uuid *cp = data;
2327 	struct mgmt_pending_cmd *cmd;
2328 	struct bt_uuid *match, *tmp;
2329 	static const u8 bt_uuid_any[] = {
2330 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2331 	};
2332 	int err, found;
2333 
2334 	bt_dev_dbg(hdev, "sock %p", sk);
2335 
2336 	hci_dev_lock(hdev);
2337 
2338 	if (pending_eir_or_class(hdev)) {
2339 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2340 				      MGMT_STATUS_BUSY);
2341 		goto unlock;
2342 	}
2343 
2344 	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2345 		hci_uuids_clear(hdev);
2346 
2347 		if (enable_service_cache(hdev)) {
2348 			err = mgmt_cmd_complete(sk, hdev->id,
2349 						MGMT_OP_REMOVE_UUID,
2350 						0, hdev->dev_class, 3);
2351 			goto unlock;
2352 		}
2353 
2354 		goto update_class;
2355 	}
2356 
2357 	found = 0;
2358 
2359 	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2360 		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2361 			continue;
2362 
2363 		list_del(&match->list);
2364 		kfree(match);
2365 		found++;
2366 	}
2367 
2368 	if (found == 0) {
2369 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2370 				      MGMT_STATUS_INVALID_PARAMS);
2371 		goto unlock;
2372 	}
2373 
2374 update_class:
2375 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2376 	if (!cmd) {
2377 		err = -ENOMEM;
2378 		goto unlock;
2379 	}
2380 
2381 	err = hci_cmd_sync_queue(hdev, remove_uuid_sync, cmd,
2382 				 mgmt_class_complete);
2383 	if (err < 0)
2384 		mgmt_pending_free(cmd);
2385 
2386 unlock:
2387 	hci_dev_unlock(hdev);
2388 	return err;
2389 }
2390 
2391 static int set_class_sync(struct hci_dev *hdev, void *data)
2392 {
2393 	int err = 0;
2394 
2395 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2396 		cancel_delayed_work_sync(&hdev->service_cache);
2397 		err = hci_update_eir_sync(hdev);
2398 	}
2399 
2400 	if (err)
2401 		return err;
2402 
2403 	return hci_update_class_sync(hdev);
2404 }
2405 
2406 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2407 			 u16 len)
2408 {
2409 	struct mgmt_cp_set_dev_class *cp = data;
2410 	struct mgmt_pending_cmd *cmd;
2411 	int err;
2412 
2413 	bt_dev_dbg(hdev, "sock %p", sk);
2414 
2415 	if (!lmp_bredr_capable(hdev))
2416 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2417 				       MGMT_STATUS_NOT_SUPPORTED);
2418 
2419 	hci_dev_lock(hdev);
2420 
2421 	if (pending_eir_or_class(hdev)) {
2422 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2423 				      MGMT_STATUS_BUSY);
2424 		goto unlock;
2425 	}
2426 
2427 	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2428 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2429 				      MGMT_STATUS_INVALID_PARAMS);
2430 		goto unlock;
2431 	}
2432 
2433 	hdev->major_class = cp->major;
2434 	hdev->minor_class = cp->minor;
2435 
2436 	if (!hdev_is_powered(hdev)) {
2437 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2438 					hdev->dev_class, 3);
2439 		goto unlock;
2440 	}
2441 
2442 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2443 	if (!cmd) {
2444 		err = -ENOMEM;
2445 		goto unlock;
2446 	}
2447 
2448 	err = hci_cmd_sync_queue(hdev, set_class_sync, cmd,
2449 				 mgmt_class_complete);
2450 	if (err < 0)
2451 		mgmt_pending_free(cmd);
2452 
2453 unlock:
2454 	hci_dev_unlock(hdev);
2455 	return err;
2456 }
2457 
2458 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2459 			  u16 len)
2460 {
2461 	struct mgmt_cp_load_link_keys *cp = data;
2462 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2463 				   sizeof(struct mgmt_link_key_info));
2464 	u16 key_count, expected_len;
2465 	bool changed;
2466 	int i;
2467 
2468 	bt_dev_dbg(hdev, "sock %p", sk);
2469 
2470 	if (!lmp_bredr_capable(hdev))
2471 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2472 				       MGMT_STATUS_NOT_SUPPORTED);
2473 
2474 	key_count = __le16_to_cpu(cp->key_count);
2475 	if (key_count > max_key_count) {
2476 		bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2477 			   key_count);
2478 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2479 				       MGMT_STATUS_INVALID_PARAMS);
2480 	}
2481 
2482 	expected_len = struct_size(cp, keys, key_count);
2483 	if (expected_len != len) {
2484 		bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2485 			   expected_len, len);
2486 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2487 				       MGMT_STATUS_INVALID_PARAMS);
2488 	}
2489 
2490 	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2491 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2492 				       MGMT_STATUS_INVALID_PARAMS);
2493 
2494 	bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2495 		   key_count);
2496 
2497 	for (i = 0; i < key_count; i++) {
2498 		struct mgmt_link_key_info *key = &cp->keys[i];
2499 
2500 		if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2501 			return mgmt_cmd_status(sk, hdev->id,
2502 					       MGMT_OP_LOAD_LINK_KEYS,
2503 					       MGMT_STATUS_INVALID_PARAMS);
2504 	}
2505 
2506 	hci_dev_lock(hdev);
2507 
2508 	hci_link_keys_clear(hdev);
2509 
2510 	if (cp->debug_keys)
2511 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2512 	else
2513 		changed = hci_dev_test_and_clear_flag(hdev,
2514 						      HCI_KEEP_DEBUG_KEYS);
2515 
2516 	if (changed)
2517 		new_settings(hdev, NULL);
2518 
2519 	for (i = 0; i < key_count; i++) {
2520 		struct mgmt_link_key_info *key = &cp->keys[i];
2521 
2522 		if (hci_is_blocked_key(hdev,
2523 				       HCI_BLOCKED_KEY_TYPE_LINKKEY,
2524 				       key->val)) {
2525 			bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2526 				    &key->addr.bdaddr);
2527 			continue;
2528 		}
2529 
2530 		/* Always ignore debug keys and require a new pairing if
2531 		 * the user wants to use them.
2532 		 */
2533 		if (key->type == HCI_LK_DEBUG_COMBINATION)
2534 			continue;
2535 
2536 		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2537 				 key->type, key->pin_len, NULL);
2538 	}
2539 
2540 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2541 
2542 	hci_dev_unlock(hdev);
2543 
2544 	return 0;
2545 }
2546 
2547 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2548 			   u8 addr_type, struct sock *skip_sk)
2549 {
2550 	struct mgmt_ev_device_unpaired ev;
2551 
2552 	bacpy(&ev.addr.bdaddr, bdaddr);
2553 	ev.addr.type = addr_type;
2554 
2555 	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2556 			  skip_sk);
2557 }
2558 
2559 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2560 {
2561 	struct mgmt_pending_cmd *cmd = data;
2562 	struct mgmt_cp_unpair_device *cp = cmd->param;
2563 
2564 	if (!err)
2565 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2566 
2567 	cmd->cmd_complete(cmd, err);
2568 	mgmt_pending_free(cmd);
2569 }
2570 
2571 static int unpair_device_sync(struct hci_dev *hdev, void *data)
2572 {
2573 	struct mgmt_pending_cmd *cmd = data;
2574 	struct mgmt_cp_unpair_device *cp = cmd->param;
2575 	struct hci_conn *conn;
2576 
2577 	if (cp->addr.type == BDADDR_BREDR)
2578 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2579 					       &cp->addr.bdaddr);
2580 	else
2581 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2582 					       le_addr_type(cp->addr.type));
2583 
2584 	if (!conn)
2585 		return 0;
2586 
2587 	return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
2588 }
2589 
2590 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2591 			 u16 len)
2592 {
2593 	struct mgmt_cp_unpair_device *cp = data;
2594 	struct mgmt_rp_unpair_device rp;
2595 	struct hci_conn_params *params;
2596 	struct mgmt_pending_cmd *cmd;
2597 	struct hci_conn *conn;
2598 	u8 addr_type;
2599 	int err;
2600 
2601 	memset(&rp, 0, sizeof(rp));
2602 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2603 	rp.addr.type = cp->addr.type;
2604 
2605 	if (!bdaddr_type_is_valid(cp->addr.type))
2606 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2607 					 MGMT_STATUS_INVALID_PARAMS,
2608 					 &rp, sizeof(rp));
2609 
2610 	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2611 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2612 					 MGMT_STATUS_INVALID_PARAMS,
2613 					 &rp, sizeof(rp));
2614 
2615 	hci_dev_lock(hdev);
2616 
2617 	if (!hdev_is_powered(hdev)) {
2618 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2619 					MGMT_STATUS_NOT_POWERED, &rp,
2620 					sizeof(rp));
2621 		goto unlock;
2622 	}
2623 
2624 	if (cp->addr.type == BDADDR_BREDR) {
2625 		/* If disconnection is requested, then look up the
2626 		 * connection. If the remote device is connected, it
2627 		 * will be later used to terminate the link.
2628 		 *
2629 		 * Setting it to NULL explicitly will cause no
2630 		 * termination of the link.
2631 		 */
2632 		if (cp->disconnect)
2633 			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2634 						       &cp->addr.bdaddr);
2635 		else
2636 			conn = NULL;
2637 
2638 		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2639 		if (err < 0) {
2640 			err = mgmt_cmd_complete(sk, hdev->id,
2641 						MGMT_OP_UNPAIR_DEVICE,
2642 						MGMT_STATUS_NOT_PAIRED, &rp,
2643 						sizeof(rp));
2644 			goto unlock;
2645 		}
2646 
2647 		goto done;
2648 	}
2649 
2650 	/* LE address type */
2651 	addr_type = le_addr_type(cp->addr.type);
2652 
2653 	/* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2654 	err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2655 	if (err < 0) {
2656 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2657 					MGMT_STATUS_NOT_PAIRED, &rp,
2658 					sizeof(rp));
2659 		goto unlock;
2660 	}
2661 
2662 	conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2663 	if (!conn) {
2664 		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2665 		goto done;
2666 	}
2667 
2668 
2669 	/* Defer clearing up the connection parameters until closing to
2670 	 * give a chance of keeping them if a repairing happens.
2671 	 */
2672 	set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2673 
2674 	/* Disable auto-connection parameters if present */
2675 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2676 	if (params) {
2677 		if (params->explicit_connect)
2678 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2679 		else
2680 			params->auto_connect = HCI_AUTO_CONN_DISABLED;
2681 	}
2682 
2683 	/* If disconnection is not requested, then clear the connection
2684 	 * variable so that the link is not terminated.
2685 	 */
2686 	if (!cp->disconnect)
2687 		conn = NULL;
2688 
2689 done:
2690 	/* If the connection variable is set, then termination of the
2691 	 * link is requested.
2692 	 */
2693 	if (!conn) {
2694 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2695 					&rp, sizeof(rp));
2696 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2697 		goto unlock;
2698 	}
2699 
2700 	cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2701 			       sizeof(*cp));
2702 	if (!cmd) {
2703 		err = -ENOMEM;
2704 		goto unlock;
2705 	}
2706 
2707 	cmd->cmd_complete = addr_cmd_complete;
2708 
2709 	err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
2710 				 unpair_device_complete);
2711 	if (err < 0)
2712 		mgmt_pending_free(cmd);
2713 
2714 unlock:
2715 	hci_dev_unlock(hdev);
2716 	return err;
2717 }
2718 
2719 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2720 		      u16 len)
2721 {
2722 	struct mgmt_cp_disconnect *cp = data;
2723 	struct mgmt_rp_disconnect rp;
2724 	struct mgmt_pending_cmd *cmd;
2725 	struct hci_conn *conn;
2726 	int err;
2727 
2728 	bt_dev_dbg(hdev, "sock %p", sk);
2729 
2730 	memset(&rp, 0, sizeof(rp));
2731 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2732 	rp.addr.type = cp->addr.type;
2733 
2734 	if (!bdaddr_type_is_valid(cp->addr.type))
2735 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2736 					 MGMT_STATUS_INVALID_PARAMS,
2737 					 &rp, sizeof(rp));
2738 
2739 	hci_dev_lock(hdev);
2740 
2741 	if (!test_bit(HCI_UP, &hdev->flags)) {
2742 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2743 					MGMT_STATUS_NOT_POWERED, &rp,
2744 					sizeof(rp));
2745 		goto failed;
2746 	}
2747 
2748 	if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2749 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2750 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
2751 		goto failed;
2752 	}
2753 
2754 	if (cp->addr.type == BDADDR_BREDR)
2755 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2756 					       &cp->addr.bdaddr);
2757 	else
2758 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2759 					       le_addr_type(cp->addr.type));
2760 
2761 	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2762 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2763 					MGMT_STATUS_NOT_CONNECTED, &rp,
2764 					sizeof(rp));
2765 		goto failed;
2766 	}
2767 
2768 	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2769 	if (!cmd) {
2770 		err = -ENOMEM;
2771 		goto failed;
2772 	}
2773 
2774 	cmd->cmd_complete = generic_cmd_complete;
2775 
2776 	err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2777 	if (err < 0)
2778 		mgmt_pending_remove(cmd);
2779 
2780 failed:
2781 	hci_dev_unlock(hdev);
2782 	return err;
2783 }
2784 
2785 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2786 {
2787 	switch (link_type) {
2788 	case LE_LINK:
2789 		switch (addr_type) {
2790 		case ADDR_LE_DEV_PUBLIC:
2791 			return BDADDR_LE_PUBLIC;
2792 
2793 		default:
2794 			/* Fallback to LE Random address type */
2795 			return BDADDR_LE_RANDOM;
2796 		}
2797 
2798 	default:
2799 		/* Fallback to BR/EDR type */
2800 		return BDADDR_BREDR;
2801 	}
2802 }
2803 
2804 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2805 			   u16 data_len)
2806 {
2807 	struct mgmt_rp_get_connections *rp;
2808 	struct hci_conn *c;
2809 	int err;
2810 	u16 i;
2811 
2812 	bt_dev_dbg(hdev, "sock %p", sk);
2813 
2814 	hci_dev_lock(hdev);
2815 
2816 	if (!hdev_is_powered(hdev)) {
2817 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2818 				      MGMT_STATUS_NOT_POWERED);
2819 		goto unlock;
2820 	}
2821 
2822 	i = 0;
2823 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2824 		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2825 			i++;
2826 	}
2827 
2828 	rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2829 	if (!rp) {
2830 		err = -ENOMEM;
2831 		goto unlock;
2832 	}
2833 
2834 	i = 0;
2835 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2836 		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2837 			continue;
2838 		bacpy(&rp->addr[i].bdaddr, &c->dst);
2839 		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2840 		if (c->type == SCO_LINK || c->type == ESCO_LINK)
2841 			continue;
2842 		i++;
2843 	}
2844 
2845 	rp->conn_count = cpu_to_le16(i);
2846 
2847 	/* Recalculate length in case of filtered SCO connections, etc */
2848 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2849 				struct_size(rp, addr, i));
2850 
2851 	kfree(rp);
2852 
2853 unlock:
2854 	hci_dev_unlock(hdev);
2855 	return err;
2856 }
2857 
2858 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2859 				   struct mgmt_cp_pin_code_neg_reply *cp)
2860 {
2861 	struct mgmt_pending_cmd *cmd;
2862 	int err;
2863 
2864 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2865 			       sizeof(*cp));
2866 	if (!cmd)
2867 		return -ENOMEM;
2868 
2869 	cmd->cmd_complete = addr_cmd_complete;
2870 
2871 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2872 			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2873 	if (err < 0)
2874 		mgmt_pending_remove(cmd);
2875 
2876 	return err;
2877 }
2878 
2879 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2880 			  u16 len)
2881 {
2882 	struct hci_conn *conn;
2883 	struct mgmt_cp_pin_code_reply *cp = data;
2884 	struct hci_cp_pin_code_reply reply;
2885 	struct mgmt_pending_cmd *cmd;
2886 	int err;
2887 
2888 	bt_dev_dbg(hdev, "sock %p", sk);
2889 
2890 	hci_dev_lock(hdev);
2891 
2892 	if (!hdev_is_powered(hdev)) {
2893 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2894 				      MGMT_STATUS_NOT_POWERED);
2895 		goto failed;
2896 	}
2897 
2898 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2899 	if (!conn) {
2900 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2901 				      MGMT_STATUS_NOT_CONNECTED);
2902 		goto failed;
2903 	}
2904 
2905 	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2906 		struct mgmt_cp_pin_code_neg_reply ncp;
2907 
2908 		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2909 
2910 		bt_dev_err(hdev, "PIN code is not 16 bytes long");
2911 
2912 		err = send_pin_code_neg_reply(sk, hdev, &ncp);
2913 		if (err >= 0)
2914 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2915 					      MGMT_STATUS_INVALID_PARAMS);
2916 
2917 		goto failed;
2918 	}
2919 
2920 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2921 	if (!cmd) {
2922 		err = -ENOMEM;
2923 		goto failed;
2924 	}
2925 
2926 	cmd->cmd_complete = addr_cmd_complete;
2927 
2928 	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2929 	reply.pin_len = cp->pin_len;
2930 	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2931 
2932 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2933 	if (err < 0)
2934 		mgmt_pending_remove(cmd);
2935 
2936 failed:
2937 	hci_dev_unlock(hdev);
2938 	return err;
2939 }
2940 
2941 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2942 			     u16 len)
2943 {
2944 	struct mgmt_cp_set_io_capability *cp = data;
2945 
2946 	bt_dev_dbg(hdev, "sock %p", sk);
2947 
2948 	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2949 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2950 				       MGMT_STATUS_INVALID_PARAMS);
2951 
2952 	hci_dev_lock(hdev);
2953 
2954 	hdev->io_capability = cp->io_capability;
2955 
2956 	bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
2957 
2958 	hci_dev_unlock(hdev);
2959 
2960 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2961 				 NULL, 0);
2962 }
2963 
2964 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2965 {
2966 	struct hci_dev *hdev = conn->hdev;
2967 	struct mgmt_pending_cmd *cmd;
2968 
2969 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2970 		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2971 			continue;
2972 
2973 		if (cmd->user_data != conn)
2974 			continue;
2975 
2976 		return cmd;
2977 	}
2978 
2979 	return NULL;
2980 }
2981 
2982 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2983 {
2984 	struct mgmt_rp_pair_device rp;
2985 	struct hci_conn *conn = cmd->user_data;
2986 	int err;
2987 
2988 	bacpy(&rp.addr.bdaddr, &conn->dst);
2989 	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2990 
2991 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2992 				status, &rp, sizeof(rp));
2993 
2994 	/* So we don't get further callbacks for this connection */
2995 	conn->connect_cfm_cb = NULL;
2996 	conn->security_cfm_cb = NULL;
2997 	conn->disconn_cfm_cb = NULL;
2998 
2999 	hci_conn_drop(conn);
3000 
3001 	/* The device is paired so there is no need to remove
3002 	 * its connection parameters anymore.
3003 	 */
3004 	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3005 
3006 	hci_conn_put(conn);
3007 
3008 	return err;
3009 }
3010 
3011 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3012 {
3013 	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3014 	struct mgmt_pending_cmd *cmd;
3015 
3016 	cmd = find_pairing(conn);
3017 	if (cmd) {
3018 		cmd->cmd_complete(cmd, status);
3019 		mgmt_pending_remove(cmd);
3020 	}
3021 }
3022 
3023 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3024 {
3025 	struct mgmt_pending_cmd *cmd;
3026 
3027 	BT_DBG("status %u", status);
3028 
3029 	cmd = find_pairing(conn);
3030 	if (!cmd) {
3031 		BT_DBG("Unable to find a pending command");
3032 		return;
3033 	}
3034 
3035 	cmd->cmd_complete(cmd, mgmt_status(status));
3036 	mgmt_pending_remove(cmd);
3037 }
3038 
3039 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3040 {
3041 	struct mgmt_pending_cmd *cmd;
3042 
3043 	BT_DBG("status %u", status);
3044 
3045 	if (!status)
3046 		return;
3047 
3048 	cmd = find_pairing(conn);
3049 	if (!cmd) {
3050 		BT_DBG("Unable to find a pending command");
3051 		return;
3052 	}
3053 
3054 	cmd->cmd_complete(cmd, mgmt_status(status));
3055 	mgmt_pending_remove(cmd);
3056 }
3057 
3058 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3059 		       u16 len)
3060 {
3061 	struct mgmt_cp_pair_device *cp = data;
3062 	struct mgmt_rp_pair_device rp;
3063 	struct mgmt_pending_cmd *cmd;
3064 	u8 sec_level, auth_type;
3065 	struct hci_conn *conn;
3066 	int err;
3067 
3068 	bt_dev_dbg(hdev, "sock %p", sk);
3069 
3070 	memset(&rp, 0, sizeof(rp));
3071 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3072 	rp.addr.type = cp->addr.type;
3073 
3074 	if (!bdaddr_type_is_valid(cp->addr.type))
3075 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3076 					 MGMT_STATUS_INVALID_PARAMS,
3077 					 &rp, sizeof(rp));
3078 
3079 	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3080 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3081 					 MGMT_STATUS_INVALID_PARAMS,
3082 					 &rp, sizeof(rp));
3083 
3084 	hci_dev_lock(hdev);
3085 
3086 	if (!hdev_is_powered(hdev)) {
3087 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3088 					MGMT_STATUS_NOT_POWERED, &rp,
3089 					sizeof(rp));
3090 		goto unlock;
3091 	}
3092 
3093 	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3094 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3095 					MGMT_STATUS_ALREADY_PAIRED, &rp,
3096 					sizeof(rp));
3097 		goto unlock;
3098 	}
3099 
3100 	sec_level = BT_SECURITY_MEDIUM;
3101 	auth_type = HCI_AT_DEDICATED_BONDING;
3102 
3103 	if (cp->addr.type == BDADDR_BREDR) {
3104 		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3105 				       auth_type, CONN_REASON_PAIR_DEVICE);
3106 	} else {
3107 		u8 addr_type = le_addr_type(cp->addr.type);
3108 		struct hci_conn_params *p;
3109 
3110 		/* When pairing a new device, it is expected to remember
3111 		 * this device for future connections. Adding the connection
3112 		 * parameter information ahead of time allows tracking
3113 		 * of the peripheral preferred values and will speed up any
3114 		 * further connection establishment.
3115 		 *
3116 		 * If connection parameters already exist, then they
3117 		 * will be kept and this function does nothing.
3118 		 */
3119 		p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3120 
3121 		if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3122 			p->auto_connect = HCI_AUTO_CONN_DISABLED;
3123 
3124 		conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3125 					   sec_level, HCI_LE_CONN_TIMEOUT,
3126 					   CONN_REASON_PAIR_DEVICE);
3127 	}
3128 
3129 	if (IS_ERR(conn)) {
3130 		int status;
3131 
3132 		if (PTR_ERR(conn) == -EBUSY)
3133 			status = MGMT_STATUS_BUSY;
3134 		else if (PTR_ERR(conn) == -EOPNOTSUPP)
3135 			status = MGMT_STATUS_NOT_SUPPORTED;
3136 		else if (PTR_ERR(conn) == -ECONNREFUSED)
3137 			status = MGMT_STATUS_REJECTED;
3138 		else
3139 			status = MGMT_STATUS_CONNECT_FAILED;
3140 
3141 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3142 					status, &rp, sizeof(rp));
3143 		goto unlock;
3144 	}
3145 
3146 	if (conn->connect_cfm_cb) {
3147 		hci_conn_drop(conn);
3148 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3149 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
3150 		goto unlock;
3151 	}
3152 
3153 	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3154 	if (!cmd) {
3155 		err = -ENOMEM;
3156 		hci_conn_drop(conn);
3157 		goto unlock;
3158 	}
3159 
3160 	cmd->cmd_complete = pairing_complete;
3161 
3162 	/* For LE, just connecting isn't a proof that the pairing finished */
3163 	if (cp->addr.type == BDADDR_BREDR) {
3164 		conn->connect_cfm_cb = pairing_complete_cb;
3165 		conn->security_cfm_cb = pairing_complete_cb;
3166 		conn->disconn_cfm_cb = pairing_complete_cb;
3167 	} else {
3168 		conn->connect_cfm_cb = le_pairing_complete_cb;
3169 		conn->security_cfm_cb = le_pairing_complete_cb;
3170 		conn->disconn_cfm_cb = le_pairing_complete_cb;
3171 	}
3172 
3173 	conn->io_capability = cp->io_cap;
3174 	cmd->user_data = hci_conn_get(conn);
3175 
3176 	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3177 	    hci_conn_security(conn, sec_level, auth_type, true)) {
3178 		cmd->cmd_complete(cmd, 0);
3179 		mgmt_pending_remove(cmd);
3180 	}
3181 
3182 	err = 0;
3183 
3184 unlock:
3185 	hci_dev_unlock(hdev);
3186 	return err;
3187 }
3188 
3189 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3190 			      u16 len)
3191 {
3192 	struct mgmt_addr_info *addr = data;
3193 	struct mgmt_pending_cmd *cmd;
3194 	struct hci_conn *conn;
3195 	int err;
3196 
3197 	bt_dev_dbg(hdev, "sock %p", sk);
3198 
3199 	hci_dev_lock(hdev);
3200 
3201 	if (!hdev_is_powered(hdev)) {
3202 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3203 				      MGMT_STATUS_NOT_POWERED);
3204 		goto unlock;
3205 	}
3206 
3207 	cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3208 	if (!cmd) {
3209 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3210 				      MGMT_STATUS_INVALID_PARAMS);
3211 		goto unlock;
3212 	}
3213 
3214 	conn = cmd->user_data;
3215 
3216 	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3217 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3218 				      MGMT_STATUS_INVALID_PARAMS);
3219 		goto unlock;
3220 	}
3221 
3222 	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3223 	mgmt_pending_remove(cmd);
3224 
3225 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3226 				addr, sizeof(*addr));
3227 
3228 	/* Since user doesn't want to proceed with the connection, abort any
3229 	 * ongoing pairing and then terminate the link if it was created
3230 	 * because of the pair device action.
3231 	 */
3232 	if (addr->type == BDADDR_BREDR)
3233 		hci_remove_link_key(hdev, &addr->bdaddr);
3234 	else
3235 		smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3236 					      le_addr_type(addr->type));
3237 
3238 	if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3239 		hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3240 
3241 unlock:
3242 	hci_dev_unlock(hdev);
3243 	return err;
3244 }
3245 
3246 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3247 			     struct mgmt_addr_info *addr, u16 mgmt_op,
3248 			     u16 hci_op, __le32 passkey)
3249 {
3250 	struct mgmt_pending_cmd *cmd;
3251 	struct hci_conn *conn;
3252 	int err;
3253 
3254 	hci_dev_lock(hdev);
3255 
3256 	if (!hdev_is_powered(hdev)) {
3257 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3258 					MGMT_STATUS_NOT_POWERED, addr,
3259 					sizeof(*addr));
3260 		goto done;
3261 	}
3262 
3263 	if (addr->type == BDADDR_BREDR)
3264 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3265 	else
3266 		conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3267 					       le_addr_type(addr->type));
3268 
3269 	if (!conn) {
3270 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3271 					MGMT_STATUS_NOT_CONNECTED, addr,
3272 					sizeof(*addr));
3273 		goto done;
3274 	}
3275 
3276 	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3277 		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3278 		if (!err)
3279 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3280 						MGMT_STATUS_SUCCESS, addr,
3281 						sizeof(*addr));
3282 		else
3283 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3284 						MGMT_STATUS_FAILED, addr,
3285 						sizeof(*addr));
3286 
3287 		goto done;
3288 	}
3289 
3290 	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3291 	if (!cmd) {
3292 		err = -ENOMEM;
3293 		goto done;
3294 	}
3295 
3296 	cmd->cmd_complete = addr_cmd_complete;
3297 
3298 	/* Continue with pairing via HCI */
3299 	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3300 		struct hci_cp_user_passkey_reply cp;
3301 
3302 		bacpy(&cp.bdaddr, &addr->bdaddr);
3303 		cp.passkey = passkey;
3304 		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3305 	} else
3306 		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3307 				   &addr->bdaddr);
3308 
3309 	if (err < 0)
3310 		mgmt_pending_remove(cmd);
3311 
3312 done:
3313 	hci_dev_unlock(hdev);
3314 	return err;
3315 }
3316 
3317 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3318 			      void *data, u16 len)
3319 {
3320 	struct mgmt_cp_pin_code_neg_reply *cp = data;
3321 
3322 	bt_dev_dbg(hdev, "sock %p", sk);
3323 
3324 	return user_pairing_resp(sk, hdev, &cp->addr,
3325 				MGMT_OP_PIN_CODE_NEG_REPLY,
3326 				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3327 }
3328 
3329 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3330 			      u16 len)
3331 {
3332 	struct mgmt_cp_user_confirm_reply *cp = data;
3333 
3334 	bt_dev_dbg(hdev, "sock %p", sk);
3335 
3336 	if (len != sizeof(*cp))
3337 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3338 				       MGMT_STATUS_INVALID_PARAMS);
3339 
3340 	return user_pairing_resp(sk, hdev, &cp->addr,
3341 				 MGMT_OP_USER_CONFIRM_REPLY,
3342 				 HCI_OP_USER_CONFIRM_REPLY, 0);
3343 }
3344 
3345 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3346 				  void *data, u16 len)
3347 {
3348 	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3349 
3350 	bt_dev_dbg(hdev, "sock %p", sk);
3351 
3352 	return user_pairing_resp(sk, hdev, &cp->addr,
3353 				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3354 				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3355 }
3356 
3357 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3358 			      u16 len)
3359 {
3360 	struct mgmt_cp_user_passkey_reply *cp = data;
3361 
3362 	bt_dev_dbg(hdev, "sock %p", sk);
3363 
3364 	return user_pairing_resp(sk, hdev, &cp->addr,
3365 				 MGMT_OP_USER_PASSKEY_REPLY,
3366 				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3367 }
3368 
3369 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3370 				  void *data, u16 len)
3371 {
3372 	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3373 
3374 	bt_dev_dbg(hdev, "sock %p", sk);
3375 
3376 	return user_pairing_resp(sk, hdev, &cp->addr,
3377 				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3378 				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3379 }
3380 
3381 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3382 {
3383 	struct adv_info *adv_instance;
3384 
3385 	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3386 	if (!adv_instance)
3387 		return 0;
3388 
3389 	/* stop if current instance doesn't need to be changed */
3390 	if (!(adv_instance->flags & flags))
3391 		return 0;
3392 
3393 	cancel_adv_timeout(hdev);
3394 
3395 	adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3396 	if (!adv_instance)
3397 		return 0;
3398 
3399 	hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3400 
3401 	return 0;
3402 }
3403 
3404 static int name_changed_sync(struct hci_dev *hdev, void *data)
3405 {
3406 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3407 }
3408 
3409 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3410 {
3411 	struct mgmt_pending_cmd *cmd = data;
3412 	struct mgmt_cp_set_local_name *cp = cmd->param;
3413 	u8 status = mgmt_status(err);
3414 
3415 	bt_dev_dbg(hdev, "err %d", err);
3416 
3417 	if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3418 		return;
3419 
3420 	if (status) {
3421 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3422 				status);
3423 	} else {
3424 		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3425 				  cp, sizeof(*cp));
3426 
3427 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3428 			hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3429 	}
3430 
3431 	mgmt_pending_remove(cmd);
3432 }
3433 
3434 static int set_name_sync(struct hci_dev *hdev, void *data)
3435 {
3436 	if (lmp_bredr_capable(hdev)) {
3437 		hci_update_name_sync(hdev);
3438 		hci_update_eir_sync(hdev);
3439 	}
3440 
3441 	/* The name is stored in the scan response data and so
3442 	 * no need to update the advertising data here.
3443 	 */
3444 	if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3445 		hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3446 
3447 	return 0;
3448 }
3449 
3450 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3451 			  u16 len)
3452 {
3453 	struct mgmt_cp_set_local_name *cp = data;
3454 	struct mgmt_pending_cmd *cmd;
3455 	int err;
3456 
3457 	bt_dev_dbg(hdev, "sock %p", sk);
3458 
3459 	hci_dev_lock(hdev);
3460 
3461 	/* If the old values are the same as the new ones just return a
3462 	 * direct command complete event.
3463 	 */
3464 	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3465 	    !memcmp(hdev->short_name, cp->short_name,
3466 		    sizeof(hdev->short_name))) {
3467 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3468 					data, len);
3469 		goto failed;
3470 	}
3471 
3472 	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3473 
3474 	if (!hdev_is_powered(hdev)) {
3475 		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3476 
3477 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3478 					data, len);
3479 		if (err < 0)
3480 			goto failed;
3481 
3482 		err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3483 					 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3484 		ext_info_changed(hdev, sk);
3485 
3486 		goto failed;
3487 	}
3488 
3489 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3490 	if (!cmd)
3491 		err = -ENOMEM;
3492 	else
3493 		err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3494 					 set_name_complete);
3495 
3496 	if (err < 0) {
3497 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3498 				      MGMT_STATUS_FAILED);
3499 
3500 		if (cmd)
3501 			mgmt_pending_remove(cmd);
3502 
3503 		goto failed;
3504 	}
3505 
3506 	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3507 
3508 failed:
3509 	hci_dev_unlock(hdev);
3510 	return err;
3511 }
3512 
3513 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3514 {
3515 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3516 }
3517 
3518 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3519 			  u16 len)
3520 {
3521 	struct mgmt_cp_set_appearance *cp = data;
3522 	u16 appearance;
3523 	int err;
3524 
3525 	bt_dev_dbg(hdev, "sock %p", sk);
3526 
3527 	if (!lmp_le_capable(hdev))
3528 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3529 				       MGMT_STATUS_NOT_SUPPORTED);
3530 
3531 	appearance = le16_to_cpu(cp->appearance);
3532 
3533 	hci_dev_lock(hdev);
3534 
3535 	if (hdev->appearance != appearance) {
3536 		hdev->appearance = appearance;
3537 
3538 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3539 			hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3540 					   NULL);
3541 
3542 		ext_info_changed(hdev, sk);
3543 	}
3544 
3545 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3546 				0);
3547 
3548 	hci_dev_unlock(hdev);
3549 
3550 	return err;
3551 }
3552 
3553 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3554 				 void *data, u16 len)
3555 {
3556 	struct mgmt_rp_get_phy_configuration rp;
3557 
3558 	bt_dev_dbg(hdev, "sock %p", sk);
3559 
3560 	hci_dev_lock(hdev);
3561 
3562 	memset(&rp, 0, sizeof(rp));
3563 
3564 	rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3565 	rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3566 	rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3567 
3568 	hci_dev_unlock(hdev);
3569 
3570 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3571 				 &rp, sizeof(rp));
3572 }
3573 
3574 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3575 {
3576 	struct mgmt_ev_phy_configuration_changed ev;
3577 
3578 	memset(&ev, 0, sizeof(ev));
3579 
3580 	ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3581 
3582 	return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3583 			  sizeof(ev), skip);
3584 }
3585 
3586 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3587 {
3588 	struct mgmt_pending_cmd *cmd = data;
3589 	struct sk_buff *skb = cmd->skb;
3590 	u8 status = mgmt_status(err);
3591 
3592 	if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
3593 		return;
3594 
3595 	if (!status) {
3596 		if (!skb)
3597 			status = MGMT_STATUS_FAILED;
3598 		else if (IS_ERR(skb))
3599 			status = mgmt_status(PTR_ERR(skb));
3600 		else
3601 			status = mgmt_status(skb->data[0]);
3602 	}
3603 
3604 	bt_dev_dbg(hdev, "status %d", status);
3605 
3606 	if (status) {
3607 		mgmt_cmd_status(cmd->sk, hdev->id,
3608 				MGMT_OP_SET_PHY_CONFIGURATION, status);
3609 	} else {
3610 		mgmt_cmd_complete(cmd->sk, hdev->id,
3611 				  MGMT_OP_SET_PHY_CONFIGURATION, 0,
3612 				  NULL, 0);
3613 
3614 		mgmt_phy_configuration_changed(hdev, cmd->sk);
3615 	}
3616 
3617 	if (skb && !IS_ERR(skb))
3618 		kfree_skb(skb);
3619 
3620 	mgmt_pending_remove(cmd);
3621 }
3622 
3623 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
3624 {
3625 	struct mgmt_pending_cmd *cmd = data;
3626 	struct mgmt_cp_set_phy_configuration *cp = cmd->param;
3627 	struct hci_cp_le_set_default_phy cp_phy;
3628 	u32 selected_phys = __le32_to_cpu(cp->selected_phys);
3629 
3630 	memset(&cp_phy, 0, sizeof(cp_phy));
3631 
3632 	if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3633 		cp_phy.all_phys |= 0x01;
3634 
3635 	if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3636 		cp_phy.all_phys |= 0x02;
3637 
3638 	if (selected_phys & MGMT_PHY_LE_1M_TX)
3639 		cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3640 
3641 	if (selected_phys & MGMT_PHY_LE_2M_TX)
3642 		cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3643 
3644 	if (selected_phys & MGMT_PHY_LE_CODED_TX)
3645 		cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3646 
3647 	if (selected_phys & MGMT_PHY_LE_1M_RX)
3648 		cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3649 
3650 	if (selected_phys & MGMT_PHY_LE_2M_RX)
3651 		cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3652 
3653 	if (selected_phys & MGMT_PHY_LE_CODED_RX)
3654 		cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3655 
3656 	cmd->skb =  __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
3657 				   sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
3658 
3659 	return 0;
3660 }
3661 
3662 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3663 				 void *data, u16 len)
3664 {
3665 	struct mgmt_cp_set_phy_configuration *cp = data;
3666 	struct mgmt_pending_cmd *cmd;
3667 	u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3668 	u16 pkt_type = (HCI_DH1 | HCI_DM1);
3669 	bool changed = false;
3670 	int err;
3671 
3672 	bt_dev_dbg(hdev, "sock %p", sk);
3673 
3674 	configurable_phys = get_configurable_phys(hdev);
3675 	supported_phys = get_supported_phys(hdev);
3676 	selected_phys = __le32_to_cpu(cp->selected_phys);
3677 
3678 	if (selected_phys & ~supported_phys)
3679 		return mgmt_cmd_status(sk, hdev->id,
3680 				       MGMT_OP_SET_PHY_CONFIGURATION,
3681 				       MGMT_STATUS_INVALID_PARAMS);
3682 
3683 	unconfigure_phys = supported_phys & ~configurable_phys;
3684 
3685 	if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3686 		return mgmt_cmd_status(sk, hdev->id,
3687 				       MGMT_OP_SET_PHY_CONFIGURATION,
3688 				       MGMT_STATUS_INVALID_PARAMS);
3689 
3690 	if (selected_phys == get_selected_phys(hdev))
3691 		return mgmt_cmd_complete(sk, hdev->id,
3692 					 MGMT_OP_SET_PHY_CONFIGURATION,
3693 					 0, NULL, 0);
3694 
3695 	hci_dev_lock(hdev);
3696 
3697 	if (!hdev_is_powered(hdev)) {
3698 		err = mgmt_cmd_status(sk, hdev->id,
3699 				      MGMT_OP_SET_PHY_CONFIGURATION,
3700 				      MGMT_STATUS_REJECTED);
3701 		goto unlock;
3702 	}
3703 
3704 	if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3705 		err = mgmt_cmd_status(sk, hdev->id,
3706 				      MGMT_OP_SET_PHY_CONFIGURATION,
3707 				      MGMT_STATUS_BUSY);
3708 		goto unlock;
3709 	}
3710 
3711 	if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3712 		pkt_type |= (HCI_DH3 | HCI_DM3);
3713 	else
3714 		pkt_type &= ~(HCI_DH3 | HCI_DM3);
3715 
3716 	if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3717 		pkt_type |= (HCI_DH5 | HCI_DM5);
3718 	else
3719 		pkt_type &= ~(HCI_DH5 | HCI_DM5);
3720 
3721 	if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3722 		pkt_type &= ~HCI_2DH1;
3723 	else
3724 		pkt_type |= HCI_2DH1;
3725 
3726 	if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3727 		pkt_type &= ~HCI_2DH3;
3728 	else
3729 		pkt_type |= HCI_2DH3;
3730 
3731 	if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3732 		pkt_type &= ~HCI_2DH5;
3733 	else
3734 		pkt_type |= HCI_2DH5;
3735 
3736 	if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3737 		pkt_type &= ~HCI_3DH1;
3738 	else
3739 		pkt_type |= HCI_3DH1;
3740 
3741 	if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3742 		pkt_type &= ~HCI_3DH3;
3743 	else
3744 		pkt_type |= HCI_3DH3;
3745 
3746 	if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3747 		pkt_type &= ~HCI_3DH5;
3748 	else
3749 		pkt_type |= HCI_3DH5;
3750 
3751 	if (pkt_type != hdev->pkt_type) {
3752 		hdev->pkt_type = pkt_type;
3753 		changed = true;
3754 	}
3755 
3756 	if ((selected_phys & MGMT_PHY_LE_MASK) ==
3757 	    (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3758 		if (changed)
3759 			mgmt_phy_configuration_changed(hdev, sk);
3760 
3761 		err = mgmt_cmd_complete(sk, hdev->id,
3762 					MGMT_OP_SET_PHY_CONFIGURATION,
3763 					0, NULL, 0);
3764 
3765 		goto unlock;
3766 	}
3767 
3768 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3769 			       len);
3770 	if (!cmd)
3771 		err = -ENOMEM;
3772 	else
3773 		err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
3774 					 set_default_phy_complete);
3775 
3776 	if (err < 0) {
3777 		err = mgmt_cmd_status(sk, hdev->id,
3778 				      MGMT_OP_SET_PHY_CONFIGURATION,
3779 				      MGMT_STATUS_FAILED);
3780 
3781 		if (cmd)
3782 			mgmt_pending_remove(cmd);
3783 	}
3784 
3785 unlock:
3786 	hci_dev_unlock(hdev);
3787 
3788 	return err;
3789 }
3790 
3791 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
3792 			    u16 len)
3793 {
3794 	int err = MGMT_STATUS_SUCCESS;
3795 	struct mgmt_cp_set_blocked_keys *keys = data;
3796 	const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
3797 				   sizeof(struct mgmt_blocked_key_info));
3798 	u16 key_count, expected_len;
3799 	int i;
3800 
3801 	bt_dev_dbg(hdev, "sock %p", sk);
3802 
3803 	key_count = __le16_to_cpu(keys->key_count);
3804 	if (key_count > max_key_count) {
3805 		bt_dev_err(hdev, "too big key_count value %u", key_count);
3806 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3807 				       MGMT_STATUS_INVALID_PARAMS);
3808 	}
3809 
3810 	expected_len = struct_size(keys, keys, key_count);
3811 	if (expected_len != len) {
3812 		bt_dev_err(hdev, "expected %u bytes, got %u bytes",
3813 			   expected_len, len);
3814 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3815 				       MGMT_STATUS_INVALID_PARAMS);
3816 	}
3817 
3818 	hci_dev_lock(hdev);
3819 
3820 	hci_blocked_keys_clear(hdev);
3821 
3822 	for (i = 0; i < key_count; ++i) {
3823 		struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
3824 
3825 		if (!b) {
3826 			err = MGMT_STATUS_NO_RESOURCES;
3827 			break;
3828 		}
3829 
3830 		b->type = keys->keys[i].type;
3831 		memcpy(b->val, keys->keys[i].val, sizeof(b->val));
3832 		list_add_rcu(&b->list, &hdev->blocked_keys);
3833 	}
3834 	hci_dev_unlock(hdev);
3835 
3836 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3837 				err, NULL, 0);
3838 }
3839 
3840 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
3841 			       void *data, u16 len)
3842 {
3843 	struct mgmt_mode *cp = data;
3844 	int err;
3845 	bool changed = false;
3846 
3847 	bt_dev_dbg(hdev, "sock %p", sk);
3848 
3849 	if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
3850 		return mgmt_cmd_status(sk, hdev->id,
3851 				       MGMT_OP_SET_WIDEBAND_SPEECH,
3852 				       MGMT_STATUS_NOT_SUPPORTED);
3853 
3854 	if (cp->val != 0x00 && cp->val != 0x01)
3855 		return mgmt_cmd_status(sk, hdev->id,
3856 				       MGMT_OP_SET_WIDEBAND_SPEECH,
3857 				       MGMT_STATUS_INVALID_PARAMS);
3858 
3859 	hci_dev_lock(hdev);
3860 
3861 	if (hdev_is_powered(hdev) &&
3862 	    !!cp->val != hci_dev_test_flag(hdev,
3863 					   HCI_WIDEBAND_SPEECH_ENABLED)) {
3864 		err = mgmt_cmd_status(sk, hdev->id,
3865 				      MGMT_OP_SET_WIDEBAND_SPEECH,
3866 				      MGMT_STATUS_REJECTED);
3867 		goto unlock;
3868 	}
3869 
3870 	if (cp->val)
3871 		changed = !hci_dev_test_and_set_flag(hdev,
3872 						   HCI_WIDEBAND_SPEECH_ENABLED);
3873 	else
3874 		changed = hci_dev_test_and_clear_flag(hdev,
3875 						   HCI_WIDEBAND_SPEECH_ENABLED);
3876 
3877 	err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
3878 	if (err < 0)
3879 		goto unlock;
3880 
3881 	if (changed)
3882 		err = new_settings(hdev, sk);
3883 
3884 unlock:
3885 	hci_dev_unlock(hdev);
3886 	return err;
3887 }
3888 
3889 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
3890 			       void *data, u16 data_len)
3891 {
3892 	char buf[20];
3893 	struct mgmt_rp_read_controller_cap *rp = (void *)buf;
3894 	u16 cap_len = 0;
3895 	u8 flags = 0;
3896 	u8 tx_power_range[2];
3897 
3898 	bt_dev_dbg(hdev, "sock %p", sk);
3899 
3900 	memset(&buf, 0, sizeof(buf));
3901 
3902 	hci_dev_lock(hdev);
3903 
3904 	/* When the Read Simple Pairing Options command is supported, then
3905 	 * the remote public key validation is supported.
3906 	 *
3907 	 * Alternatively, when Microsoft extensions are available, they can
3908 	 * indicate support for public key validation as well.
3909 	 */
3910 	if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
3911 		flags |= 0x01;	/* Remote public key validation (BR/EDR) */
3912 
3913 	flags |= 0x02;		/* Remote public key validation (LE) */
3914 
3915 	/* When the Read Encryption Key Size command is supported, then the
3916 	 * encryption key size is enforced.
3917 	 */
3918 	if (hdev->commands[20] & 0x10)
3919 		flags |= 0x04;	/* Encryption key size enforcement (BR/EDR) */
3920 
3921 	flags |= 0x08;		/* Encryption key size enforcement (LE) */
3922 
3923 	cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
3924 				  &flags, 1);
3925 
3926 	/* When the Read Simple Pairing Options command is supported, then
3927 	 * also max encryption key size information is provided.
3928 	 */
3929 	if (hdev->commands[41] & 0x08)
3930 		cap_len = eir_append_le16(rp->cap, cap_len,
3931 					  MGMT_CAP_MAX_ENC_KEY_SIZE,
3932 					  hdev->max_enc_key_size);
3933 
3934 	cap_len = eir_append_le16(rp->cap, cap_len,
3935 				  MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
3936 				  SMP_MAX_ENC_KEY_SIZE);
3937 
3938 	/* Append the min/max LE tx power parameters if we were able to fetch
3939 	 * it from the controller
3940 	 */
3941 	if (hdev->commands[38] & 0x80) {
3942 		memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
3943 		memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
3944 		cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
3945 					  tx_power_range, 2);
3946 	}
3947 
3948 	rp->cap_len = cpu_to_le16(cap_len);
3949 
3950 	hci_dev_unlock(hdev);
3951 
3952 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
3953 				 rp, sizeof(*rp) + cap_len);
3954 }
3955 
3956 #ifdef CONFIG_BT_FEATURE_DEBUG
3957 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
3958 static const u8 debug_uuid[16] = {
3959 	0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
3960 	0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
3961 };
3962 #endif
3963 
3964 /* 330859bc-7506-492d-9370-9a6f0614037f */
3965 static const u8 quality_report_uuid[16] = {
3966 	0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
3967 	0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
3968 };
3969 
3970 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
3971 static const u8 offload_codecs_uuid[16] = {
3972 	0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
3973 	0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
3974 };
3975 
3976 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
3977 static const u8 le_simultaneous_roles_uuid[16] = {
3978 	0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
3979 	0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
3980 };
3981 
3982 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
3983 static const u8 rpa_resolution_uuid[16] = {
3984 	0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
3985 	0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
3986 };
3987 
3988 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
3989 static const u8 iso_socket_uuid[16] = {
3990 	0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
3991 	0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
3992 };
3993 
3994 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
3995 				  void *data, u16 data_len)
3996 {
3997 	char buf[122];   /* Enough space for 6 features: 2 + 20 * 6 */
3998 	struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
3999 	u16 idx = 0;
4000 	u32 flags;
4001 
4002 	bt_dev_dbg(hdev, "sock %p", sk);
4003 
4004 	memset(&buf, 0, sizeof(buf));
4005 
4006 #ifdef CONFIG_BT_FEATURE_DEBUG
4007 	if (!hdev) {
4008 		flags = bt_dbg_get() ? BIT(0) : 0;
4009 
4010 		memcpy(rp->features[idx].uuid, debug_uuid, 16);
4011 		rp->features[idx].flags = cpu_to_le32(flags);
4012 		idx++;
4013 	}
4014 #endif
4015 
4016 	if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4017 		if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4018 			flags = BIT(0);
4019 		else
4020 			flags = 0;
4021 
4022 		memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4023 		rp->features[idx].flags = cpu_to_le32(flags);
4024 		idx++;
4025 	}
4026 
4027 	if (hdev && ll_privacy_capable(hdev)) {
4028 		if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
4029 			flags = BIT(0) | BIT(1);
4030 		else
4031 			flags = BIT(1);
4032 
4033 		memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
4034 		rp->features[idx].flags = cpu_to_le32(flags);
4035 		idx++;
4036 	}
4037 
4038 	if (hdev && (aosp_has_quality_report(hdev) ||
4039 		     hdev->set_quality_report)) {
4040 		if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4041 			flags = BIT(0);
4042 		else
4043 			flags = 0;
4044 
4045 		memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4046 		rp->features[idx].flags = cpu_to_le32(flags);
4047 		idx++;
4048 	}
4049 
4050 	if (hdev && hdev->get_data_path_id) {
4051 		if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4052 			flags = BIT(0);
4053 		else
4054 			flags = 0;
4055 
4056 		memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4057 		rp->features[idx].flags = cpu_to_le32(flags);
4058 		idx++;
4059 	}
4060 
4061 	if (IS_ENABLED(CONFIG_BT_LE)) {
4062 		flags = iso_enabled() ? BIT(0) : 0;
4063 		memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4064 		rp->features[idx].flags = cpu_to_le32(flags);
4065 		idx++;
4066 	}
4067 
4068 	rp->feature_count = cpu_to_le16(idx);
4069 
4070 	/* After reading the experimental features information, enable
4071 	 * the events to update client on any future change.
4072 	 */
4073 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4074 
4075 	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4076 				 MGMT_OP_READ_EXP_FEATURES_INFO,
4077 				 0, rp, sizeof(*rp) + (20 * idx));
4078 }
4079 
4080 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4081 					  struct sock *skip)
4082 {
4083 	struct mgmt_ev_exp_feature_changed ev;
4084 
4085 	memset(&ev, 0, sizeof(ev));
4086 	memcpy(ev.uuid, rpa_resolution_uuid, 16);
4087 	ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4088 
4089 	// Do we need to be atomic with the conn_flags?
4090 	if (enabled && privacy_mode_capable(hdev))
4091 		hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4092 	else
4093 		hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4094 
4095 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4096 				  &ev, sizeof(ev),
4097 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4098 
4099 }
4100 
4101 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4102 			       bool enabled, struct sock *skip)
4103 {
4104 	struct mgmt_ev_exp_feature_changed ev;
4105 
4106 	memset(&ev, 0, sizeof(ev));
4107 	memcpy(ev.uuid, uuid, 16);
4108 	ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4109 
4110 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4111 				  &ev, sizeof(ev),
4112 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4113 }
4114 
4115 #define EXP_FEAT(_uuid, _set_func)	\
4116 {					\
4117 	.uuid = _uuid,			\
4118 	.set_func = _set_func,		\
4119 }
4120 
4121 /* The zero key uuid is special. Multiple exp features are set through it. */
4122 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4123 			     struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4124 {
4125 	struct mgmt_rp_set_exp_feature rp;
4126 
4127 	memset(rp.uuid, 0, 16);
4128 	rp.flags = cpu_to_le32(0);
4129 
4130 #ifdef CONFIG_BT_FEATURE_DEBUG
4131 	if (!hdev) {
4132 		bool changed = bt_dbg_get();
4133 
4134 		bt_dbg_set(false);
4135 
4136 		if (changed)
4137 			exp_feature_changed(NULL, ZERO_KEY, false, sk);
4138 	}
4139 #endif
4140 
4141 	if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4142 		bool changed;
4143 
4144 		changed = hci_dev_test_and_clear_flag(hdev,
4145 						      HCI_ENABLE_LL_PRIVACY);
4146 		if (changed)
4147 			exp_feature_changed(hdev, rpa_resolution_uuid, false,
4148 					    sk);
4149 	}
4150 
4151 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4152 
4153 	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4154 				 MGMT_OP_SET_EXP_FEATURE, 0,
4155 				 &rp, sizeof(rp));
4156 }
4157 
4158 #ifdef CONFIG_BT_FEATURE_DEBUG
4159 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4160 			  struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4161 {
4162 	struct mgmt_rp_set_exp_feature rp;
4163 
4164 	bool val, changed;
4165 	int err;
4166 
4167 	/* Command requires to use the non-controller index */
4168 	if (hdev)
4169 		return mgmt_cmd_status(sk, hdev->id,
4170 				       MGMT_OP_SET_EXP_FEATURE,
4171 				       MGMT_STATUS_INVALID_INDEX);
4172 
4173 	/* Parameters are limited to a single octet */
4174 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4175 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4176 				       MGMT_OP_SET_EXP_FEATURE,
4177 				       MGMT_STATUS_INVALID_PARAMS);
4178 
4179 	/* Only boolean on/off is supported */
4180 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4181 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4182 				       MGMT_OP_SET_EXP_FEATURE,
4183 				       MGMT_STATUS_INVALID_PARAMS);
4184 
4185 	val = !!cp->param[0];
4186 	changed = val ? !bt_dbg_get() : bt_dbg_get();
4187 	bt_dbg_set(val);
4188 
4189 	memcpy(rp.uuid, debug_uuid, 16);
4190 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4191 
4192 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4193 
4194 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4195 				MGMT_OP_SET_EXP_FEATURE, 0,
4196 				&rp, sizeof(rp));
4197 
4198 	if (changed)
4199 		exp_feature_changed(hdev, debug_uuid, val, sk);
4200 
4201 	return err;
4202 }
4203 #endif
4204 
4205 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4206 				   struct mgmt_cp_set_exp_feature *cp,
4207 				   u16 data_len)
4208 {
4209 	struct mgmt_rp_set_exp_feature rp;
4210 	bool val, changed;
4211 	int err;
4212 	u32 flags;
4213 
4214 	/* Command requires to use the controller index */
4215 	if (!hdev)
4216 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4217 				       MGMT_OP_SET_EXP_FEATURE,
4218 				       MGMT_STATUS_INVALID_INDEX);
4219 
4220 	/* Changes can only be made when controller is powered down */
4221 	if (hdev_is_powered(hdev))
4222 		return mgmt_cmd_status(sk, hdev->id,
4223 				       MGMT_OP_SET_EXP_FEATURE,
4224 				       MGMT_STATUS_REJECTED);
4225 
4226 	/* Parameters are limited to a single octet */
4227 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4228 		return mgmt_cmd_status(sk, hdev->id,
4229 				       MGMT_OP_SET_EXP_FEATURE,
4230 				       MGMT_STATUS_INVALID_PARAMS);
4231 
4232 	/* Only boolean on/off is supported */
4233 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4234 		return mgmt_cmd_status(sk, hdev->id,
4235 				       MGMT_OP_SET_EXP_FEATURE,
4236 				       MGMT_STATUS_INVALID_PARAMS);
4237 
4238 	val = !!cp->param[0];
4239 
4240 	if (val) {
4241 		changed = !hci_dev_test_and_set_flag(hdev,
4242 						     HCI_ENABLE_LL_PRIVACY);
4243 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4244 
4245 		/* Enable LL privacy + supported settings changed */
4246 		flags = BIT(0) | BIT(1);
4247 	} else {
4248 		changed = hci_dev_test_and_clear_flag(hdev,
4249 						      HCI_ENABLE_LL_PRIVACY);
4250 
4251 		/* Disable LL privacy + supported settings changed */
4252 		flags = BIT(1);
4253 	}
4254 
4255 	memcpy(rp.uuid, rpa_resolution_uuid, 16);
4256 	rp.flags = cpu_to_le32(flags);
4257 
4258 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4259 
4260 	err = mgmt_cmd_complete(sk, hdev->id,
4261 				MGMT_OP_SET_EXP_FEATURE, 0,
4262 				&rp, sizeof(rp));
4263 
4264 	if (changed)
4265 		exp_ll_privacy_feature_changed(val, hdev, sk);
4266 
4267 	return err;
4268 }
4269 
4270 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4271 				   struct mgmt_cp_set_exp_feature *cp,
4272 				   u16 data_len)
4273 {
4274 	struct mgmt_rp_set_exp_feature rp;
4275 	bool val, changed;
4276 	int err;
4277 
4278 	/* Command requires to use a valid controller index */
4279 	if (!hdev)
4280 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4281 				       MGMT_OP_SET_EXP_FEATURE,
4282 				       MGMT_STATUS_INVALID_INDEX);
4283 
4284 	/* Parameters are limited to a single octet */
4285 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4286 		return mgmt_cmd_status(sk, hdev->id,
4287 				       MGMT_OP_SET_EXP_FEATURE,
4288 				       MGMT_STATUS_INVALID_PARAMS);
4289 
4290 	/* Only boolean on/off is supported */
4291 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4292 		return mgmt_cmd_status(sk, hdev->id,
4293 				       MGMT_OP_SET_EXP_FEATURE,
4294 				       MGMT_STATUS_INVALID_PARAMS);
4295 
4296 	hci_req_sync_lock(hdev);
4297 
4298 	val = !!cp->param[0];
4299 	changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4300 
4301 	if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4302 		err = mgmt_cmd_status(sk, hdev->id,
4303 				      MGMT_OP_SET_EXP_FEATURE,
4304 				      MGMT_STATUS_NOT_SUPPORTED);
4305 		goto unlock_quality_report;
4306 	}
4307 
4308 	if (changed) {
4309 		if (hdev->set_quality_report)
4310 			err = hdev->set_quality_report(hdev, val);
4311 		else
4312 			err = aosp_set_quality_report(hdev, val);
4313 
4314 		if (err) {
4315 			err = mgmt_cmd_status(sk, hdev->id,
4316 					      MGMT_OP_SET_EXP_FEATURE,
4317 					      MGMT_STATUS_FAILED);
4318 			goto unlock_quality_report;
4319 		}
4320 
4321 		if (val)
4322 			hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4323 		else
4324 			hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4325 	}
4326 
4327 	bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4328 
4329 	memcpy(rp.uuid, quality_report_uuid, 16);
4330 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4331 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4332 
4333 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4334 				&rp, sizeof(rp));
4335 
4336 	if (changed)
4337 		exp_feature_changed(hdev, quality_report_uuid, val, sk);
4338 
4339 unlock_quality_report:
4340 	hci_req_sync_unlock(hdev);
4341 	return err;
4342 }
4343 
4344 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4345 				  struct mgmt_cp_set_exp_feature *cp,
4346 				  u16 data_len)
4347 {
4348 	bool val, changed;
4349 	int err;
4350 	struct mgmt_rp_set_exp_feature rp;
4351 
4352 	/* Command requires to use a valid controller index */
4353 	if (!hdev)
4354 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4355 				       MGMT_OP_SET_EXP_FEATURE,
4356 				       MGMT_STATUS_INVALID_INDEX);
4357 
4358 	/* Parameters are limited to a single octet */
4359 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4360 		return mgmt_cmd_status(sk, hdev->id,
4361 				       MGMT_OP_SET_EXP_FEATURE,
4362 				       MGMT_STATUS_INVALID_PARAMS);
4363 
4364 	/* Only boolean on/off is supported */
4365 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4366 		return mgmt_cmd_status(sk, hdev->id,
4367 				       MGMT_OP_SET_EXP_FEATURE,
4368 				       MGMT_STATUS_INVALID_PARAMS);
4369 
4370 	val = !!cp->param[0];
4371 	changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4372 
4373 	if (!hdev->get_data_path_id) {
4374 		return mgmt_cmd_status(sk, hdev->id,
4375 				       MGMT_OP_SET_EXP_FEATURE,
4376 				       MGMT_STATUS_NOT_SUPPORTED);
4377 	}
4378 
4379 	if (changed) {
4380 		if (val)
4381 			hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4382 		else
4383 			hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4384 	}
4385 
4386 	bt_dev_info(hdev, "offload codecs enable %d changed %d",
4387 		    val, changed);
4388 
4389 	memcpy(rp.uuid, offload_codecs_uuid, 16);
4390 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4391 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4392 	err = mgmt_cmd_complete(sk, hdev->id,
4393 				MGMT_OP_SET_EXP_FEATURE, 0,
4394 				&rp, sizeof(rp));
4395 
4396 	if (changed)
4397 		exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4398 
4399 	return err;
4400 }
4401 
4402 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4403 					  struct mgmt_cp_set_exp_feature *cp,
4404 					  u16 data_len)
4405 {
4406 	bool val, changed;
4407 	int err;
4408 	struct mgmt_rp_set_exp_feature rp;
4409 
4410 	/* Command requires to use a valid controller index */
4411 	if (!hdev)
4412 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4413 				       MGMT_OP_SET_EXP_FEATURE,
4414 				       MGMT_STATUS_INVALID_INDEX);
4415 
4416 	/* Parameters are limited to a single octet */
4417 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4418 		return mgmt_cmd_status(sk, hdev->id,
4419 				       MGMT_OP_SET_EXP_FEATURE,
4420 				       MGMT_STATUS_INVALID_PARAMS);
4421 
4422 	/* Only boolean on/off is supported */
4423 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4424 		return mgmt_cmd_status(sk, hdev->id,
4425 				       MGMT_OP_SET_EXP_FEATURE,
4426 				       MGMT_STATUS_INVALID_PARAMS);
4427 
4428 	val = !!cp->param[0];
4429 	changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4430 
4431 	if (!hci_dev_le_state_simultaneous(hdev)) {
4432 		return mgmt_cmd_status(sk, hdev->id,
4433 				       MGMT_OP_SET_EXP_FEATURE,
4434 				       MGMT_STATUS_NOT_SUPPORTED);
4435 	}
4436 
4437 	if (changed) {
4438 		if (val)
4439 			hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4440 		else
4441 			hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4442 	}
4443 
4444 	bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4445 		    val, changed);
4446 
4447 	memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4448 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4449 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4450 	err = mgmt_cmd_complete(sk, hdev->id,
4451 				MGMT_OP_SET_EXP_FEATURE, 0,
4452 				&rp, sizeof(rp));
4453 
4454 	if (changed)
4455 		exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4456 
4457 	return err;
4458 }
4459 
4460 #ifdef CONFIG_BT_LE
4461 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4462 			       struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4463 {
4464 	struct mgmt_rp_set_exp_feature rp;
4465 	bool val, changed = false;
4466 	int err;
4467 
4468 	/* Command requires to use the non-controller index */
4469 	if (hdev)
4470 		return mgmt_cmd_status(sk, hdev->id,
4471 				       MGMT_OP_SET_EXP_FEATURE,
4472 				       MGMT_STATUS_INVALID_INDEX);
4473 
4474 	/* Parameters are limited to a single octet */
4475 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4476 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4477 				       MGMT_OP_SET_EXP_FEATURE,
4478 				       MGMT_STATUS_INVALID_PARAMS);
4479 
4480 	/* Only boolean on/off is supported */
4481 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4482 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4483 				       MGMT_OP_SET_EXP_FEATURE,
4484 				       MGMT_STATUS_INVALID_PARAMS);
4485 
4486 	val = cp->param[0] ? true : false;
4487 	if (val)
4488 		err = iso_init();
4489 	else
4490 		err = iso_exit();
4491 
4492 	if (!err)
4493 		changed = true;
4494 
4495 	memcpy(rp.uuid, iso_socket_uuid, 16);
4496 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4497 
4498 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4499 
4500 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4501 				MGMT_OP_SET_EXP_FEATURE, 0,
4502 				&rp, sizeof(rp));
4503 
4504 	if (changed)
4505 		exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4506 
4507 	return err;
4508 }
4509 #endif
4510 
4511 static const struct mgmt_exp_feature {
4512 	const u8 *uuid;
4513 	int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4514 			struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4515 } exp_features[] = {
4516 	EXP_FEAT(ZERO_KEY, set_zero_key_func),
4517 #ifdef CONFIG_BT_FEATURE_DEBUG
4518 	EXP_FEAT(debug_uuid, set_debug_func),
4519 #endif
4520 	EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4521 	EXP_FEAT(quality_report_uuid, set_quality_report_func),
4522 	EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4523 	EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
4524 #ifdef CONFIG_BT_LE
4525 	EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
4526 #endif
4527 
4528 	/* end with a null feature */
4529 	EXP_FEAT(NULL, NULL)
4530 };
4531 
4532 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4533 			   void *data, u16 data_len)
4534 {
4535 	struct mgmt_cp_set_exp_feature *cp = data;
4536 	size_t i = 0;
4537 
4538 	bt_dev_dbg(hdev, "sock %p", sk);
4539 
4540 	for (i = 0; exp_features[i].uuid; i++) {
4541 		if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4542 			return exp_features[i].set_func(sk, hdev, cp, data_len);
4543 	}
4544 
4545 	return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4546 			       MGMT_OP_SET_EXP_FEATURE,
4547 			       MGMT_STATUS_NOT_SUPPORTED);
4548 }
4549 
4550 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4551 			    u16 data_len)
4552 {
4553 	struct mgmt_cp_get_device_flags *cp = data;
4554 	struct mgmt_rp_get_device_flags rp;
4555 	struct bdaddr_list_with_flags *br_params;
4556 	struct hci_conn_params *params;
4557 	u32 supported_flags;
4558 	u32 current_flags = 0;
4559 	u8 status = MGMT_STATUS_INVALID_PARAMS;
4560 
4561 	bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4562 		   &cp->addr.bdaddr, cp->addr.type);
4563 
4564 	hci_dev_lock(hdev);
4565 
4566 	supported_flags = hdev->conn_flags;
4567 
4568 	memset(&rp, 0, sizeof(rp));
4569 
4570 	if (cp->addr.type == BDADDR_BREDR) {
4571 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4572 							      &cp->addr.bdaddr,
4573 							      cp->addr.type);
4574 		if (!br_params)
4575 			goto done;
4576 
4577 		current_flags = br_params->flags;
4578 	} else {
4579 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4580 						le_addr_type(cp->addr.type));
4581 
4582 		if (!params)
4583 			goto done;
4584 
4585 		current_flags = params->flags;
4586 	}
4587 
4588 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4589 	rp.addr.type = cp->addr.type;
4590 	rp.supported_flags = cpu_to_le32(supported_flags);
4591 	rp.current_flags = cpu_to_le32(current_flags);
4592 
4593 	status = MGMT_STATUS_SUCCESS;
4594 
4595 done:
4596 	hci_dev_unlock(hdev);
4597 
4598 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
4599 				&rp, sizeof(rp));
4600 }
4601 
4602 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
4603 				 bdaddr_t *bdaddr, u8 bdaddr_type,
4604 				 u32 supported_flags, u32 current_flags)
4605 {
4606 	struct mgmt_ev_device_flags_changed ev;
4607 
4608 	bacpy(&ev.addr.bdaddr, bdaddr);
4609 	ev.addr.type = bdaddr_type;
4610 	ev.supported_flags = cpu_to_le32(supported_flags);
4611 	ev.current_flags = cpu_to_le32(current_flags);
4612 
4613 	mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
4614 }
4615 
4616 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4617 			    u16 len)
4618 {
4619 	struct mgmt_cp_set_device_flags *cp = data;
4620 	struct bdaddr_list_with_flags *br_params;
4621 	struct hci_conn_params *params;
4622 	u8 status = MGMT_STATUS_INVALID_PARAMS;
4623 	u32 supported_flags;
4624 	u32 current_flags = __le32_to_cpu(cp->current_flags);
4625 
4626 	bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
4627 		   &cp->addr.bdaddr, cp->addr.type, current_flags);
4628 
4629 	// We should take hci_dev_lock() early, I think.. conn_flags can change
4630 	supported_flags = hdev->conn_flags;
4631 
4632 	if ((supported_flags | current_flags) != supported_flags) {
4633 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
4634 			    current_flags, supported_flags);
4635 		goto done;
4636 	}
4637 
4638 	hci_dev_lock(hdev);
4639 
4640 	if (cp->addr.type == BDADDR_BREDR) {
4641 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4642 							      &cp->addr.bdaddr,
4643 							      cp->addr.type);
4644 
4645 		if (br_params) {
4646 			br_params->flags = current_flags;
4647 			status = MGMT_STATUS_SUCCESS;
4648 		} else {
4649 			bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
4650 				    &cp->addr.bdaddr, cp->addr.type);
4651 		}
4652 	} else {
4653 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4654 						le_addr_type(cp->addr.type));
4655 		if (params) {
4656 			/* Devices using RPAs can only be programmed in the
4657 			 * acceptlist LL Privacy has been enable otherwise they
4658 			 * cannot mark HCI_CONN_FLAG_REMOTE_WAKEUP.
4659 			 */
4660 			if ((current_flags & HCI_CONN_FLAG_REMOTE_WAKEUP) &&
4661 			    !use_ll_privacy(hdev) &&
4662 			    hci_find_irk_by_addr(hdev, &params->addr,
4663 						 params->addr_type)) {
4664 				bt_dev_warn(hdev,
4665 					    "Cannot set wakeable for RPA");
4666 				goto unlock;
4667 			}
4668 
4669 			params->flags = current_flags;
4670 			status = MGMT_STATUS_SUCCESS;
4671 
4672 			/* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
4673 			 * has been set.
4674 			 */
4675 			if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
4676 				hci_update_passive_scan(hdev);
4677 		} else {
4678 			bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
4679 				    &cp->addr.bdaddr,
4680 				    le_addr_type(cp->addr.type));
4681 		}
4682 	}
4683 
4684 unlock:
4685 	hci_dev_unlock(hdev);
4686 
4687 done:
4688 	if (status == MGMT_STATUS_SUCCESS)
4689 		device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
4690 				     supported_flags, current_flags);
4691 
4692 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
4693 				 &cp->addr, sizeof(cp->addr));
4694 }
4695 
4696 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
4697 				   u16 handle)
4698 {
4699 	struct mgmt_ev_adv_monitor_added ev;
4700 
4701 	ev.monitor_handle = cpu_to_le16(handle);
4702 
4703 	mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
4704 }
4705 
4706 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
4707 {
4708 	struct mgmt_ev_adv_monitor_removed ev;
4709 	struct mgmt_pending_cmd *cmd;
4710 	struct sock *sk_skip = NULL;
4711 	struct mgmt_cp_remove_adv_monitor *cp;
4712 
4713 	cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4714 	if (cmd) {
4715 		cp = cmd->param;
4716 
4717 		if (cp->monitor_handle)
4718 			sk_skip = cmd->sk;
4719 	}
4720 
4721 	ev.monitor_handle = cpu_to_le16(handle);
4722 
4723 	mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
4724 }
4725 
4726 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
4727 				 void *data, u16 len)
4728 {
4729 	struct adv_monitor *monitor = NULL;
4730 	struct mgmt_rp_read_adv_monitor_features *rp = NULL;
4731 	int handle, err;
4732 	size_t rp_size = 0;
4733 	__u32 supported = 0;
4734 	__u32 enabled = 0;
4735 	__u16 num_handles = 0;
4736 	__u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
4737 
4738 	BT_DBG("request for %s", hdev->name);
4739 
4740 	hci_dev_lock(hdev);
4741 
4742 	if (msft_monitor_supported(hdev))
4743 		supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
4744 
4745 	idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
4746 		handles[num_handles++] = monitor->handle;
4747 
4748 	hci_dev_unlock(hdev);
4749 
4750 	rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
4751 	rp = kmalloc(rp_size, GFP_KERNEL);
4752 	if (!rp)
4753 		return -ENOMEM;
4754 
4755 	/* All supported features are currently enabled */
4756 	enabled = supported;
4757 
4758 	rp->supported_features = cpu_to_le32(supported);
4759 	rp->enabled_features = cpu_to_le32(enabled);
4760 	rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
4761 	rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
4762 	rp->num_handles = cpu_to_le16(num_handles);
4763 	if (num_handles)
4764 		memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
4765 
4766 	err = mgmt_cmd_complete(sk, hdev->id,
4767 				MGMT_OP_READ_ADV_MONITOR_FEATURES,
4768 				MGMT_STATUS_SUCCESS, rp, rp_size);
4769 
4770 	kfree(rp);
4771 
4772 	return err;
4773 }
4774 
4775 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
4776 						   void *data, int status)
4777 {
4778 	struct mgmt_rp_add_adv_patterns_monitor rp;
4779 	struct mgmt_pending_cmd *cmd = data;
4780 	struct adv_monitor *monitor = cmd->user_data;
4781 
4782 	hci_dev_lock(hdev);
4783 
4784 	rp.monitor_handle = cpu_to_le16(monitor->handle);
4785 
4786 	if (!status) {
4787 		mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
4788 		hdev->adv_monitors_cnt++;
4789 		if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
4790 			monitor->state = ADV_MONITOR_STATE_REGISTERED;
4791 		hci_update_passive_scan(hdev);
4792 	}
4793 
4794 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4795 			  mgmt_status(status), &rp, sizeof(rp));
4796 	mgmt_pending_remove(cmd);
4797 
4798 	hci_dev_unlock(hdev);
4799 	bt_dev_dbg(hdev, "add monitor %d complete, status %d",
4800 		   rp.monitor_handle, status);
4801 }
4802 
4803 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
4804 {
4805 	struct mgmt_pending_cmd *cmd = data;
4806 	struct adv_monitor *monitor = cmd->user_data;
4807 
4808 	return hci_add_adv_monitor(hdev, monitor);
4809 }
4810 
4811 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4812 				      struct adv_monitor *m, u8 status,
4813 				      void *data, u16 len, u16 op)
4814 {
4815 	struct mgmt_pending_cmd *cmd;
4816 	int err;
4817 
4818 	hci_dev_lock(hdev);
4819 
4820 	if (status)
4821 		goto unlock;
4822 
4823 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
4824 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4825 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
4826 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
4827 		status = MGMT_STATUS_BUSY;
4828 		goto unlock;
4829 	}
4830 
4831 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
4832 	if (!cmd) {
4833 		status = MGMT_STATUS_NO_RESOURCES;
4834 		goto unlock;
4835 	}
4836 
4837 	cmd->user_data = m;
4838 	err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
4839 				 mgmt_add_adv_patterns_monitor_complete);
4840 	if (err) {
4841 		if (err == -ENOMEM)
4842 			status = MGMT_STATUS_NO_RESOURCES;
4843 		else
4844 			status = MGMT_STATUS_FAILED;
4845 
4846 		goto unlock;
4847 	}
4848 
4849 	hci_dev_unlock(hdev);
4850 
4851 	return 0;
4852 
4853 unlock:
4854 	hci_free_adv_monitor(hdev, m);
4855 	hci_dev_unlock(hdev);
4856 	return mgmt_cmd_status(sk, hdev->id, op, status);
4857 }
4858 
4859 static void parse_adv_monitor_rssi(struct adv_monitor *m,
4860 				   struct mgmt_adv_rssi_thresholds *rssi)
4861 {
4862 	if (rssi) {
4863 		m->rssi.low_threshold = rssi->low_threshold;
4864 		m->rssi.low_threshold_timeout =
4865 		    __le16_to_cpu(rssi->low_threshold_timeout);
4866 		m->rssi.high_threshold = rssi->high_threshold;
4867 		m->rssi.high_threshold_timeout =
4868 		    __le16_to_cpu(rssi->high_threshold_timeout);
4869 		m->rssi.sampling_period = rssi->sampling_period;
4870 	} else {
4871 		/* Default values. These numbers are the least constricting
4872 		 * parameters for MSFT API to work, so it behaves as if there
4873 		 * are no rssi parameter to consider. May need to be changed
4874 		 * if other API are to be supported.
4875 		 */
4876 		m->rssi.low_threshold = -127;
4877 		m->rssi.low_threshold_timeout = 60;
4878 		m->rssi.high_threshold = -127;
4879 		m->rssi.high_threshold_timeout = 0;
4880 		m->rssi.sampling_period = 0;
4881 	}
4882 }
4883 
4884 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
4885 				    struct mgmt_adv_pattern *patterns)
4886 {
4887 	u8 offset = 0, length = 0;
4888 	struct adv_pattern *p = NULL;
4889 	int i;
4890 
4891 	for (i = 0; i < pattern_count; i++) {
4892 		offset = patterns[i].offset;
4893 		length = patterns[i].length;
4894 		if (offset >= HCI_MAX_AD_LENGTH ||
4895 		    length > HCI_MAX_AD_LENGTH ||
4896 		    (offset + length) > HCI_MAX_AD_LENGTH)
4897 			return MGMT_STATUS_INVALID_PARAMS;
4898 
4899 		p = kmalloc(sizeof(*p), GFP_KERNEL);
4900 		if (!p)
4901 			return MGMT_STATUS_NO_RESOURCES;
4902 
4903 		p->ad_type = patterns[i].ad_type;
4904 		p->offset = patterns[i].offset;
4905 		p->length = patterns[i].length;
4906 		memcpy(p->value, patterns[i].value, p->length);
4907 
4908 		INIT_LIST_HEAD(&p->list);
4909 		list_add(&p->list, &m->patterns);
4910 	}
4911 
4912 	return MGMT_STATUS_SUCCESS;
4913 }
4914 
4915 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4916 				    void *data, u16 len)
4917 {
4918 	struct mgmt_cp_add_adv_patterns_monitor *cp = data;
4919 	struct adv_monitor *m = NULL;
4920 	u8 status = MGMT_STATUS_SUCCESS;
4921 	size_t expected_size = sizeof(*cp);
4922 
4923 	BT_DBG("request for %s", hdev->name);
4924 
4925 	if (len <= sizeof(*cp)) {
4926 		status = MGMT_STATUS_INVALID_PARAMS;
4927 		goto done;
4928 	}
4929 
4930 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4931 	if (len != expected_size) {
4932 		status = MGMT_STATUS_INVALID_PARAMS;
4933 		goto done;
4934 	}
4935 
4936 	m = kzalloc(sizeof(*m), GFP_KERNEL);
4937 	if (!m) {
4938 		status = MGMT_STATUS_NO_RESOURCES;
4939 		goto done;
4940 	}
4941 
4942 	INIT_LIST_HEAD(&m->patterns);
4943 
4944 	parse_adv_monitor_rssi(m, NULL);
4945 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4946 
4947 done:
4948 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4949 					  MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
4950 }
4951 
4952 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
4953 					 void *data, u16 len)
4954 {
4955 	struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
4956 	struct adv_monitor *m = NULL;
4957 	u8 status = MGMT_STATUS_SUCCESS;
4958 	size_t expected_size = sizeof(*cp);
4959 
4960 	BT_DBG("request for %s", hdev->name);
4961 
4962 	if (len <= sizeof(*cp)) {
4963 		status = MGMT_STATUS_INVALID_PARAMS;
4964 		goto done;
4965 	}
4966 
4967 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4968 	if (len != expected_size) {
4969 		status = MGMT_STATUS_INVALID_PARAMS;
4970 		goto done;
4971 	}
4972 
4973 	m = kzalloc(sizeof(*m), GFP_KERNEL);
4974 	if (!m) {
4975 		status = MGMT_STATUS_NO_RESOURCES;
4976 		goto done;
4977 	}
4978 
4979 	INIT_LIST_HEAD(&m->patterns);
4980 
4981 	parse_adv_monitor_rssi(m, &cp->rssi);
4982 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4983 
4984 done:
4985 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4986 					 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
4987 }
4988 
4989 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
4990 					     void *data, int status)
4991 {
4992 	struct mgmt_rp_remove_adv_monitor rp;
4993 	struct mgmt_pending_cmd *cmd = data;
4994 	struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
4995 
4996 	hci_dev_lock(hdev);
4997 
4998 	rp.monitor_handle = cp->monitor_handle;
4999 
5000 	if (!status)
5001 		hci_update_passive_scan(hdev);
5002 
5003 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5004 			  mgmt_status(status), &rp, sizeof(rp));
5005 	mgmt_pending_remove(cmd);
5006 
5007 	hci_dev_unlock(hdev);
5008 	bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5009 		   rp.monitor_handle, status);
5010 }
5011 
5012 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5013 {
5014 	struct mgmt_pending_cmd *cmd = data;
5015 	struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5016 	u16 handle = __le16_to_cpu(cp->monitor_handle);
5017 
5018 	if (!handle)
5019 		return hci_remove_all_adv_monitor(hdev);
5020 
5021 	return hci_remove_single_adv_monitor(hdev, handle);
5022 }
5023 
5024 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5025 			      void *data, u16 len)
5026 {
5027 	struct mgmt_pending_cmd *cmd;
5028 	int err, status;
5029 
5030 	hci_dev_lock(hdev);
5031 
5032 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
5033 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5034 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5035 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5036 		status = MGMT_STATUS_BUSY;
5037 		goto unlock;
5038 	}
5039 
5040 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5041 	if (!cmd) {
5042 		status = MGMT_STATUS_NO_RESOURCES;
5043 		goto unlock;
5044 	}
5045 
5046 	err = hci_cmd_sync_queue(hdev, mgmt_remove_adv_monitor_sync, cmd,
5047 				 mgmt_remove_adv_monitor_complete);
5048 
5049 	if (err) {
5050 		mgmt_pending_remove(cmd);
5051 
5052 		if (err == -ENOMEM)
5053 			status = MGMT_STATUS_NO_RESOURCES;
5054 		else
5055 			status = MGMT_STATUS_FAILED;
5056 
5057 		mgmt_pending_remove(cmd);
5058 		goto unlock;
5059 	}
5060 
5061 	hci_dev_unlock(hdev);
5062 
5063 	return 0;
5064 
5065 unlock:
5066 	hci_dev_unlock(hdev);
5067 	return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5068 			       status);
5069 }
5070 
5071 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5072 {
5073 	struct mgmt_rp_read_local_oob_data mgmt_rp;
5074 	size_t rp_size = sizeof(mgmt_rp);
5075 	struct mgmt_pending_cmd *cmd = data;
5076 	struct sk_buff *skb = cmd->skb;
5077 	u8 status = mgmt_status(err);
5078 
5079 	if (!status) {
5080 		if (!skb)
5081 			status = MGMT_STATUS_FAILED;
5082 		else if (IS_ERR(skb))
5083 			status = mgmt_status(PTR_ERR(skb));
5084 		else
5085 			status = mgmt_status(skb->data[0]);
5086 	}
5087 
5088 	bt_dev_dbg(hdev, "status %d", status);
5089 
5090 	if (status) {
5091 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5092 		goto remove;
5093 	}
5094 
5095 	memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5096 
5097 	if (!bredr_sc_enabled(hdev)) {
5098 		struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5099 
5100 		if (skb->len < sizeof(*rp)) {
5101 			mgmt_cmd_status(cmd->sk, hdev->id,
5102 					MGMT_OP_READ_LOCAL_OOB_DATA,
5103 					MGMT_STATUS_FAILED);
5104 			goto remove;
5105 		}
5106 
5107 		memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5108 		memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5109 
5110 		rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5111 	} else {
5112 		struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5113 
5114 		if (skb->len < sizeof(*rp)) {
5115 			mgmt_cmd_status(cmd->sk, hdev->id,
5116 					MGMT_OP_READ_LOCAL_OOB_DATA,
5117 					MGMT_STATUS_FAILED);
5118 			goto remove;
5119 		}
5120 
5121 		memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5122 		memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5123 
5124 		memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5125 		memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5126 	}
5127 
5128 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5129 			  MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5130 
5131 remove:
5132 	if (skb && !IS_ERR(skb))
5133 		kfree_skb(skb);
5134 
5135 	mgmt_pending_free(cmd);
5136 }
5137 
5138 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5139 {
5140 	struct mgmt_pending_cmd *cmd = data;
5141 
5142 	if (bredr_sc_enabled(hdev))
5143 		cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5144 	else
5145 		cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5146 
5147 	if (IS_ERR(cmd->skb))
5148 		return PTR_ERR(cmd->skb);
5149 	else
5150 		return 0;
5151 }
5152 
5153 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5154 			       void *data, u16 data_len)
5155 {
5156 	struct mgmt_pending_cmd *cmd;
5157 	int err;
5158 
5159 	bt_dev_dbg(hdev, "sock %p", sk);
5160 
5161 	hci_dev_lock(hdev);
5162 
5163 	if (!hdev_is_powered(hdev)) {
5164 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5165 				      MGMT_STATUS_NOT_POWERED);
5166 		goto unlock;
5167 	}
5168 
5169 	if (!lmp_ssp_capable(hdev)) {
5170 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5171 				      MGMT_STATUS_NOT_SUPPORTED);
5172 		goto unlock;
5173 	}
5174 
5175 	cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5176 	if (!cmd)
5177 		err = -ENOMEM;
5178 	else
5179 		err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5180 					 read_local_oob_data_complete);
5181 
5182 	if (err < 0) {
5183 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5184 				      MGMT_STATUS_FAILED);
5185 
5186 		if (cmd)
5187 			mgmt_pending_free(cmd);
5188 	}
5189 
5190 unlock:
5191 	hci_dev_unlock(hdev);
5192 	return err;
5193 }
5194 
5195 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5196 			       void *data, u16 len)
5197 {
5198 	struct mgmt_addr_info *addr = data;
5199 	int err;
5200 
5201 	bt_dev_dbg(hdev, "sock %p", sk);
5202 
5203 	if (!bdaddr_type_is_valid(addr->type))
5204 		return mgmt_cmd_complete(sk, hdev->id,
5205 					 MGMT_OP_ADD_REMOTE_OOB_DATA,
5206 					 MGMT_STATUS_INVALID_PARAMS,
5207 					 addr, sizeof(*addr));
5208 
5209 	hci_dev_lock(hdev);
5210 
5211 	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5212 		struct mgmt_cp_add_remote_oob_data *cp = data;
5213 		u8 status;
5214 
5215 		if (cp->addr.type != BDADDR_BREDR) {
5216 			err = mgmt_cmd_complete(sk, hdev->id,
5217 						MGMT_OP_ADD_REMOTE_OOB_DATA,
5218 						MGMT_STATUS_INVALID_PARAMS,
5219 						&cp->addr, sizeof(cp->addr));
5220 			goto unlock;
5221 		}
5222 
5223 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5224 					      cp->addr.type, cp->hash,
5225 					      cp->rand, NULL, NULL);
5226 		if (err < 0)
5227 			status = MGMT_STATUS_FAILED;
5228 		else
5229 			status = MGMT_STATUS_SUCCESS;
5230 
5231 		err = mgmt_cmd_complete(sk, hdev->id,
5232 					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5233 					&cp->addr, sizeof(cp->addr));
5234 	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5235 		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5236 		u8 *rand192, *hash192, *rand256, *hash256;
5237 		u8 status;
5238 
5239 		if (bdaddr_type_is_le(cp->addr.type)) {
5240 			/* Enforce zero-valued 192-bit parameters as
5241 			 * long as legacy SMP OOB isn't implemented.
5242 			 */
5243 			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5244 			    memcmp(cp->hash192, ZERO_KEY, 16)) {
5245 				err = mgmt_cmd_complete(sk, hdev->id,
5246 							MGMT_OP_ADD_REMOTE_OOB_DATA,
5247 							MGMT_STATUS_INVALID_PARAMS,
5248 							addr, sizeof(*addr));
5249 				goto unlock;
5250 			}
5251 
5252 			rand192 = NULL;
5253 			hash192 = NULL;
5254 		} else {
5255 			/* In case one of the P-192 values is set to zero,
5256 			 * then just disable OOB data for P-192.
5257 			 */
5258 			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5259 			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
5260 				rand192 = NULL;
5261 				hash192 = NULL;
5262 			} else {
5263 				rand192 = cp->rand192;
5264 				hash192 = cp->hash192;
5265 			}
5266 		}
5267 
5268 		/* In case one of the P-256 values is set to zero, then just
5269 		 * disable OOB data for P-256.
5270 		 */
5271 		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5272 		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
5273 			rand256 = NULL;
5274 			hash256 = NULL;
5275 		} else {
5276 			rand256 = cp->rand256;
5277 			hash256 = cp->hash256;
5278 		}
5279 
5280 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5281 					      cp->addr.type, hash192, rand192,
5282 					      hash256, rand256);
5283 		if (err < 0)
5284 			status = MGMT_STATUS_FAILED;
5285 		else
5286 			status = MGMT_STATUS_SUCCESS;
5287 
5288 		err = mgmt_cmd_complete(sk, hdev->id,
5289 					MGMT_OP_ADD_REMOTE_OOB_DATA,
5290 					status, &cp->addr, sizeof(cp->addr));
5291 	} else {
5292 		bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5293 			   len);
5294 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5295 				      MGMT_STATUS_INVALID_PARAMS);
5296 	}
5297 
5298 unlock:
5299 	hci_dev_unlock(hdev);
5300 	return err;
5301 }
5302 
5303 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5304 				  void *data, u16 len)
5305 {
5306 	struct mgmt_cp_remove_remote_oob_data *cp = data;
5307 	u8 status;
5308 	int err;
5309 
5310 	bt_dev_dbg(hdev, "sock %p", sk);
5311 
5312 	if (cp->addr.type != BDADDR_BREDR)
5313 		return mgmt_cmd_complete(sk, hdev->id,
5314 					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5315 					 MGMT_STATUS_INVALID_PARAMS,
5316 					 &cp->addr, sizeof(cp->addr));
5317 
5318 	hci_dev_lock(hdev);
5319 
5320 	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5321 		hci_remote_oob_data_clear(hdev);
5322 		status = MGMT_STATUS_SUCCESS;
5323 		goto done;
5324 	}
5325 
5326 	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5327 	if (err < 0)
5328 		status = MGMT_STATUS_INVALID_PARAMS;
5329 	else
5330 		status = MGMT_STATUS_SUCCESS;
5331 
5332 done:
5333 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5334 				status, &cp->addr, sizeof(cp->addr));
5335 
5336 	hci_dev_unlock(hdev);
5337 	return err;
5338 }
5339 
5340 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5341 {
5342 	struct mgmt_pending_cmd *cmd;
5343 
5344 	bt_dev_dbg(hdev, "status %u", status);
5345 
5346 	hci_dev_lock(hdev);
5347 
5348 	cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5349 	if (!cmd)
5350 		cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5351 
5352 	if (!cmd)
5353 		cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5354 
5355 	if (cmd) {
5356 		cmd->cmd_complete(cmd, mgmt_status(status));
5357 		mgmt_pending_remove(cmd);
5358 	}
5359 
5360 	hci_dev_unlock(hdev);
5361 }
5362 
5363 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5364 				    uint8_t *mgmt_status)
5365 {
5366 	switch (type) {
5367 	case DISCOV_TYPE_LE:
5368 		*mgmt_status = mgmt_le_support(hdev);
5369 		if (*mgmt_status)
5370 			return false;
5371 		break;
5372 	case DISCOV_TYPE_INTERLEAVED:
5373 		*mgmt_status = mgmt_le_support(hdev);
5374 		if (*mgmt_status)
5375 			return false;
5376 		fallthrough;
5377 	case DISCOV_TYPE_BREDR:
5378 		*mgmt_status = mgmt_bredr_support(hdev);
5379 		if (*mgmt_status)
5380 			return false;
5381 		break;
5382 	default:
5383 		*mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5384 		return false;
5385 	}
5386 
5387 	return true;
5388 }
5389 
5390 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5391 {
5392 	struct mgmt_pending_cmd *cmd = data;
5393 
5394 	if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5395 	    cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5396 	    cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5397 		return;
5398 
5399 	bt_dev_dbg(hdev, "err %d", err);
5400 
5401 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5402 			  cmd->param, 1);
5403 	mgmt_pending_remove(cmd);
5404 
5405 	hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5406 				DISCOVERY_FINDING);
5407 }
5408 
5409 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5410 {
5411 	return hci_start_discovery_sync(hdev);
5412 }
5413 
5414 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5415 				    u16 op, void *data, u16 len)
5416 {
5417 	struct mgmt_cp_start_discovery *cp = data;
5418 	struct mgmt_pending_cmd *cmd;
5419 	u8 status;
5420 	int err;
5421 
5422 	bt_dev_dbg(hdev, "sock %p", sk);
5423 
5424 	hci_dev_lock(hdev);
5425 
5426 	if (!hdev_is_powered(hdev)) {
5427 		err = mgmt_cmd_complete(sk, hdev->id, op,
5428 					MGMT_STATUS_NOT_POWERED,
5429 					&cp->type, sizeof(cp->type));
5430 		goto failed;
5431 	}
5432 
5433 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
5434 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5435 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5436 					&cp->type, sizeof(cp->type));
5437 		goto failed;
5438 	}
5439 
5440 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5441 		err = mgmt_cmd_complete(sk, hdev->id, op, status,
5442 					&cp->type, sizeof(cp->type));
5443 		goto failed;
5444 	}
5445 
5446 	/* Can't start discovery when it is paused */
5447 	if (hdev->discovery_paused) {
5448 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5449 					&cp->type, sizeof(cp->type));
5450 		goto failed;
5451 	}
5452 
5453 	/* Clear the discovery filter first to free any previously
5454 	 * allocated memory for the UUID list.
5455 	 */
5456 	hci_discovery_filter_clear(hdev);
5457 
5458 	hdev->discovery.type = cp->type;
5459 	hdev->discovery.report_invalid_rssi = false;
5460 	if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5461 		hdev->discovery.limited = true;
5462 	else
5463 		hdev->discovery.limited = false;
5464 
5465 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
5466 	if (!cmd) {
5467 		err = -ENOMEM;
5468 		goto failed;
5469 	}
5470 
5471 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5472 				 start_discovery_complete);
5473 	if (err < 0) {
5474 		mgmt_pending_remove(cmd);
5475 		goto failed;
5476 	}
5477 
5478 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5479 
5480 failed:
5481 	hci_dev_unlock(hdev);
5482 	return err;
5483 }
5484 
5485 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5486 			   void *data, u16 len)
5487 {
5488 	return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5489 					data, len);
5490 }
5491 
5492 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5493 				   void *data, u16 len)
5494 {
5495 	return start_discovery_internal(sk, hdev,
5496 					MGMT_OP_START_LIMITED_DISCOVERY,
5497 					data, len);
5498 }
5499 
5500 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5501 				   void *data, u16 len)
5502 {
5503 	struct mgmt_cp_start_service_discovery *cp = data;
5504 	struct mgmt_pending_cmd *cmd;
5505 	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5506 	u16 uuid_count, expected_len;
5507 	u8 status;
5508 	int err;
5509 
5510 	bt_dev_dbg(hdev, "sock %p", sk);
5511 
5512 	hci_dev_lock(hdev);
5513 
5514 	if (!hdev_is_powered(hdev)) {
5515 		err = mgmt_cmd_complete(sk, hdev->id,
5516 					MGMT_OP_START_SERVICE_DISCOVERY,
5517 					MGMT_STATUS_NOT_POWERED,
5518 					&cp->type, sizeof(cp->type));
5519 		goto failed;
5520 	}
5521 
5522 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
5523 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5524 		err = mgmt_cmd_complete(sk, hdev->id,
5525 					MGMT_OP_START_SERVICE_DISCOVERY,
5526 					MGMT_STATUS_BUSY, &cp->type,
5527 					sizeof(cp->type));
5528 		goto failed;
5529 	}
5530 
5531 	if (hdev->discovery_paused) {
5532 		err = mgmt_cmd_complete(sk, hdev->id,
5533 					MGMT_OP_START_SERVICE_DISCOVERY,
5534 					MGMT_STATUS_BUSY, &cp->type,
5535 					sizeof(cp->type));
5536 		goto failed;
5537 	}
5538 
5539 	uuid_count = __le16_to_cpu(cp->uuid_count);
5540 	if (uuid_count > max_uuid_count) {
5541 		bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5542 			   uuid_count);
5543 		err = mgmt_cmd_complete(sk, hdev->id,
5544 					MGMT_OP_START_SERVICE_DISCOVERY,
5545 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
5546 					sizeof(cp->type));
5547 		goto failed;
5548 	}
5549 
5550 	expected_len = sizeof(*cp) + uuid_count * 16;
5551 	if (expected_len != len) {
5552 		bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5553 			   expected_len, len);
5554 		err = mgmt_cmd_complete(sk, hdev->id,
5555 					MGMT_OP_START_SERVICE_DISCOVERY,
5556 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
5557 					sizeof(cp->type));
5558 		goto failed;
5559 	}
5560 
5561 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5562 		err = mgmt_cmd_complete(sk, hdev->id,
5563 					MGMT_OP_START_SERVICE_DISCOVERY,
5564 					status, &cp->type, sizeof(cp->type));
5565 		goto failed;
5566 	}
5567 
5568 	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
5569 			       hdev, data, len);
5570 	if (!cmd) {
5571 		err = -ENOMEM;
5572 		goto failed;
5573 	}
5574 
5575 	/* Clear the discovery filter first to free any previously
5576 	 * allocated memory for the UUID list.
5577 	 */
5578 	hci_discovery_filter_clear(hdev);
5579 
5580 	hdev->discovery.result_filtering = true;
5581 	hdev->discovery.type = cp->type;
5582 	hdev->discovery.rssi = cp->rssi;
5583 	hdev->discovery.uuid_count = uuid_count;
5584 
5585 	if (uuid_count > 0) {
5586 		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
5587 						GFP_KERNEL);
5588 		if (!hdev->discovery.uuids) {
5589 			err = mgmt_cmd_complete(sk, hdev->id,
5590 						MGMT_OP_START_SERVICE_DISCOVERY,
5591 						MGMT_STATUS_FAILED,
5592 						&cp->type, sizeof(cp->type));
5593 			mgmt_pending_remove(cmd);
5594 			goto failed;
5595 		}
5596 	}
5597 
5598 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5599 				 start_discovery_complete);
5600 	if (err < 0) {
5601 		mgmt_pending_remove(cmd);
5602 		goto failed;
5603 	}
5604 
5605 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5606 
5607 failed:
5608 	hci_dev_unlock(hdev);
5609 	return err;
5610 }
5611 
5612 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
5613 {
5614 	struct mgmt_pending_cmd *cmd;
5615 
5616 	bt_dev_dbg(hdev, "status %u", status);
5617 
5618 	hci_dev_lock(hdev);
5619 
5620 	cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5621 	if (cmd) {
5622 		cmd->cmd_complete(cmd, mgmt_status(status));
5623 		mgmt_pending_remove(cmd);
5624 	}
5625 
5626 	hci_dev_unlock(hdev);
5627 }
5628 
5629 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
5630 {
5631 	struct mgmt_pending_cmd *cmd = data;
5632 
5633 	if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
5634 		return;
5635 
5636 	bt_dev_dbg(hdev, "err %d", err);
5637 
5638 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5639 			  cmd->param, 1);
5640 	mgmt_pending_remove(cmd);
5641 
5642 	if (!err)
5643 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
5644 }
5645 
5646 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
5647 {
5648 	return hci_stop_discovery_sync(hdev);
5649 }
5650 
5651 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
5652 			  u16 len)
5653 {
5654 	struct mgmt_cp_stop_discovery *mgmt_cp = data;
5655 	struct mgmt_pending_cmd *cmd;
5656 	int err;
5657 
5658 	bt_dev_dbg(hdev, "sock %p", sk);
5659 
5660 	hci_dev_lock(hdev);
5661 
5662 	if (!hci_discovery_active(hdev)) {
5663 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5664 					MGMT_STATUS_REJECTED, &mgmt_cp->type,
5665 					sizeof(mgmt_cp->type));
5666 		goto unlock;
5667 	}
5668 
5669 	if (hdev->discovery.type != mgmt_cp->type) {
5670 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5671 					MGMT_STATUS_INVALID_PARAMS,
5672 					&mgmt_cp->type, sizeof(mgmt_cp->type));
5673 		goto unlock;
5674 	}
5675 
5676 	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
5677 	if (!cmd) {
5678 		err = -ENOMEM;
5679 		goto unlock;
5680 	}
5681 
5682 	err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
5683 				 stop_discovery_complete);
5684 	if (err < 0) {
5685 		mgmt_pending_remove(cmd);
5686 		goto unlock;
5687 	}
5688 
5689 	hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
5690 
5691 unlock:
5692 	hci_dev_unlock(hdev);
5693 	return err;
5694 }
5695 
5696 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
5697 			u16 len)
5698 {
5699 	struct mgmt_cp_confirm_name *cp = data;
5700 	struct inquiry_entry *e;
5701 	int err;
5702 
5703 	bt_dev_dbg(hdev, "sock %p", sk);
5704 
5705 	hci_dev_lock(hdev);
5706 
5707 	if (!hci_discovery_active(hdev)) {
5708 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5709 					MGMT_STATUS_FAILED, &cp->addr,
5710 					sizeof(cp->addr));
5711 		goto failed;
5712 	}
5713 
5714 	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
5715 	if (!e) {
5716 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5717 					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
5718 					sizeof(cp->addr));
5719 		goto failed;
5720 	}
5721 
5722 	if (cp->name_known) {
5723 		e->name_state = NAME_KNOWN;
5724 		list_del(&e->list);
5725 	} else {
5726 		e->name_state = NAME_NEEDED;
5727 		hci_inquiry_cache_update_resolve(hdev, e);
5728 	}
5729 
5730 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
5731 				&cp->addr, sizeof(cp->addr));
5732 
5733 failed:
5734 	hci_dev_unlock(hdev);
5735 	return err;
5736 }
5737 
5738 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
5739 			u16 len)
5740 {
5741 	struct mgmt_cp_block_device *cp = data;
5742 	u8 status;
5743 	int err;
5744 
5745 	bt_dev_dbg(hdev, "sock %p", sk);
5746 
5747 	if (!bdaddr_type_is_valid(cp->addr.type))
5748 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
5749 					 MGMT_STATUS_INVALID_PARAMS,
5750 					 &cp->addr, sizeof(cp->addr));
5751 
5752 	hci_dev_lock(hdev);
5753 
5754 	err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
5755 				  cp->addr.type);
5756 	if (err < 0) {
5757 		status = MGMT_STATUS_FAILED;
5758 		goto done;
5759 	}
5760 
5761 	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5762 		   sk);
5763 	status = MGMT_STATUS_SUCCESS;
5764 
5765 done:
5766 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
5767 				&cp->addr, sizeof(cp->addr));
5768 
5769 	hci_dev_unlock(hdev);
5770 
5771 	return err;
5772 }
5773 
5774 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
5775 			  u16 len)
5776 {
5777 	struct mgmt_cp_unblock_device *cp = data;
5778 	u8 status;
5779 	int err;
5780 
5781 	bt_dev_dbg(hdev, "sock %p", sk);
5782 
5783 	if (!bdaddr_type_is_valid(cp->addr.type))
5784 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
5785 					 MGMT_STATUS_INVALID_PARAMS,
5786 					 &cp->addr, sizeof(cp->addr));
5787 
5788 	hci_dev_lock(hdev);
5789 
5790 	err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
5791 				  cp->addr.type);
5792 	if (err < 0) {
5793 		status = MGMT_STATUS_INVALID_PARAMS;
5794 		goto done;
5795 	}
5796 
5797 	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5798 		   sk);
5799 	status = MGMT_STATUS_SUCCESS;
5800 
5801 done:
5802 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
5803 				&cp->addr, sizeof(cp->addr));
5804 
5805 	hci_dev_unlock(hdev);
5806 
5807 	return err;
5808 }
5809 
5810 static int set_device_id_sync(struct hci_dev *hdev, void *data)
5811 {
5812 	return hci_update_eir_sync(hdev);
5813 }
5814 
5815 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
5816 			 u16 len)
5817 {
5818 	struct mgmt_cp_set_device_id *cp = data;
5819 	int err;
5820 	__u16 source;
5821 
5822 	bt_dev_dbg(hdev, "sock %p", sk);
5823 
5824 	source = __le16_to_cpu(cp->source);
5825 
5826 	if (source > 0x0002)
5827 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
5828 				       MGMT_STATUS_INVALID_PARAMS);
5829 
5830 	hci_dev_lock(hdev);
5831 
5832 	hdev->devid_source = source;
5833 	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
5834 	hdev->devid_product = __le16_to_cpu(cp->product);
5835 	hdev->devid_version = __le16_to_cpu(cp->version);
5836 
5837 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
5838 				NULL, 0);
5839 
5840 	hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
5841 
5842 	hci_dev_unlock(hdev);
5843 
5844 	return err;
5845 }
5846 
5847 static void enable_advertising_instance(struct hci_dev *hdev, int err)
5848 {
5849 	if (err)
5850 		bt_dev_err(hdev, "failed to re-configure advertising %d", err);
5851 	else
5852 		bt_dev_dbg(hdev, "status %d", err);
5853 }
5854 
5855 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
5856 {
5857 	struct cmd_lookup match = { NULL, hdev };
5858 	u8 instance;
5859 	struct adv_info *adv_instance;
5860 	u8 status = mgmt_status(err);
5861 
5862 	if (status) {
5863 		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
5864 				     cmd_status_rsp, &status);
5865 		return;
5866 	}
5867 
5868 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
5869 		hci_dev_set_flag(hdev, HCI_ADVERTISING);
5870 	else
5871 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
5872 
5873 	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
5874 			     &match);
5875 
5876 	new_settings(hdev, match.sk);
5877 
5878 	if (match.sk)
5879 		sock_put(match.sk);
5880 
5881 	/* If "Set Advertising" was just disabled and instance advertising was
5882 	 * set up earlier, then re-enable multi-instance advertising.
5883 	 */
5884 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5885 	    list_empty(&hdev->adv_instances))
5886 		return;
5887 
5888 	instance = hdev->cur_adv_instance;
5889 	if (!instance) {
5890 		adv_instance = list_first_entry_or_null(&hdev->adv_instances,
5891 							struct adv_info, list);
5892 		if (!adv_instance)
5893 			return;
5894 
5895 		instance = adv_instance->instance;
5896 	}
5897 
5898 	err = hci_schedule_adv_instance_sync(hdev, instance, true);
5899 
5900 	enable_advertising_instance(hdev, err);
5901 }
5902 
5903 static int set_adv_sync(struct hci_dev *hdev, void *data)
5904 {
5905 	struct mgmt_pending_cmd *cmd = data;
5906 	struct mgmt_mode *cp = cmd->param;
5907 	u8 val = !!cp->val;
5908 
5909 	if (cp->val == 0x02)
5910 		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5911 	else
5912 		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5913 
5914 	cancel_adv_timeout(hdev);
5915 
5916 	if (val) {
5917 		/* Switch to instance "0" for the Set Advertising setting.
5918 		 * We cannot use update_[adv|scan_rsp]_data() here as the
5919 		 * HCI_ADVERTISING flag is not yet set.
5920 		 */
5921 		hdev->cur_adv_instance = 0x00;
5922 
5923 		if (ext_adv_capable(hdev)) {
5924 			hci_start_ext_adv_sync(hdev, 0x00);
5925 		} else {
5926 			hci_update_adv_data_sync(hdev, 0x00);
5927 			hci_update_scan_rsp_data_sync(hdev, 0x00);
5928 			hci_enable_advertising_sync(hdev);
5929 		}
5930 	} else {
5931 		hci_disable_advertising_sync(hdev);
5932 	}
5933 
5934 	return 0;
5935 }
5936 
5937 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
5938 			   u16 len)
5939 {
5940 	struct mgmt_mode *cp = data;
5941 	struct mgmt_pending_cmd *cmd;
5942 	u8 val, status;
5943 	int err;
5944 
5945 	bt_dev_dbg(hdev, "sock %p", sk);
5946 
5947 	status = mgmt_le_support(hdev);
5948 	if (status)
5949 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5950 				       status);
5951 
5952 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5953 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5954 				       MGMT_STATUS_INVALID_PARAMS);
5955 
5956 	if (hdev->advertising_paused)
5957 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5958 				       MGMT_STATUS_BUSY);
5959 
5960 	hci_dev_lock(hdev);
5961 
5962 	val = !!cp->val;
5963 
5964 	/* The following conditions are ones which mean that we should
5965 	 * not do any HCI communication but directly send a mgmt
5966 	 * response to user space (after toggling the flag if
5967 	 * necessary).
5968 	 */
5969 	if (!hdev_is_powered(hdev) ||
5970 	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5971 	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
5972 	    hci_conn_num(hdev, LE_LINK) > 0 ||
5973 	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5974 	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
5975 		bool changed;
5976 
5977 		if (cp->val) {
5978 			hdev->cur_adv_instance = 0x00;
5979 			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
5980 			if (cp->val == 0x02)
5981 				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5982 			else
5983 				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5984 		} else {
5985 			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
5986 			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5987 		}
5988 
5989 		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
5990 		if (err < 0)
5991 			goto unlock;
5992 
5993 		if (changed)
5994 			err = new_settings(hdev, sk);
5995 
5996 		goto unlock;
5997 	}
5998 
5999 	if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6000 	    pending_find(MGMT_OP_SET_LE, hdev)) {
6001 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6002 				      MGMT_STATUS_BUSY);
6003 		goto unlock;
6004 	}
6005 
6006 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6007 	if (!cmd)
6008 		err = -ENOMEM;
6009 	else
6010 		err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6011 					 set_advertising_complete);
6012 
6013 	if (err < 0 && cmd)
6014 		mgmt_pending_remove(cmd);
6015 
6016 unlock:
6017 	hci_dev_unlock(hdev);
6018 	return err;
6019 }
6020 
6021 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6022 			      void *data, u16 len)
6023 {
6024 	struct mgmt_cp_set_static_address *cp = data;
6025 	int err;
6026 
6027 	bt_dev_dbg(hdev, "sock %p", sk);
6028 
6029 	if (!lmp_le_capable(hdev))
6030 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6031 				       MGMT_STATUS_NOT_SUPPORTED);
6032 
6033 	if (hdev_is_powered(hdev))
6034 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6035 				       MGMT_STATUS_REJECTED);
6036 
6037 	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6038 		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6039 			return mgmt_cmd_status(sk, hdev->id,
6040 					       MGMT_OP_SET_STATIC_ADDRESS,
6041 					       MGMT_STATUS_INVALID_PARAMS);
6042 
6043 		/* Two most significant bits shall be set */
6044 		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6045 			return mgmt_cmd_status(sk, hdev->id,
6046 					       MGMT_OP_SET_STATIC_ADDRESS,
6047 					       MGMT_STATUS_INVALID_PARAMS);
6048 	}
6049 
6050 	hci_dev_lock(hdev);
6051 
6052 	bacpy(&hdev->static_addr, &cp->bdaddr);
6053 
6054 	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6055 	if (err < 0)
6056 		goto unlock;
6057 
6058 	err = new_settings(hdev, sk);
6059 
6060 unlock:
6061 	hci_dev_unlock(hdev);
6062 	return err;
6063 }
6064 
6065 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6066 			   void *data, u16 len)
6067 {
6068 	struct mgmt_cp_set_scan_params *cp = data;
6069 	__u16 interval, window;
6070 	int err;
6071 
6072 	bt_dev_dbg(hdev, "sock %p", sk);
6073 
6074 	if (!lmp_le_capable(hdev))
6075 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6076 				       MGMT_STATUS_NOT_SUPPORTED);
6077 
6078 	interval = __le16_to_cpu(cp->interval);
6079 
6080 	if (interval < 0x0004 || interval > 0x4000)
6081 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6082 				       MGMT_STATUS_INVALID_PARAMS);
6083 
6084 	window = __le16_to_cpu(cp->window);
6085 
6086 	if (window < 0x0004 || window > 0x4000)
6087 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6088 				       MGMT_STATUS_INVALID_PARAMS);
6089 
6090 	if (window > interval)
6091 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6092 				       MGMT_STATUS_INVALID_PARAMS);
6093 
6094 	hci_dev_lock(hdev);
6095 
6096 	hdev->le_scan_interval = interval;
6097 	hdev->le_scan_window = window;
6098 
6099 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6100 				NULL, 0);
6101 
6102 	/* If background scan is running, restart it so new parameters are
6103 	 * loaded.
6104 	 */
6105 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6106 	    hdev->discovery.state == DISCOVERY_STOPPED)
6107 		hci_update_passive_scan(hdev);
6108 
6109 	hci_dev_unlock(hdev);
6110 
6111 	return err;
6112 }
6113 
6114 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6115 {
6116 	struct mgmt_pending_cmd *cmd = data;
6117 
6118 	bt_dev_dbg(hdev, "err %d", err);
6119 
6120 	if (err) {
6121 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6122 				mgmt_status(err));
6123 	} else {
6124 		struct mgmt_mode *cp = cmd->param;
6125 
6126 		if (cp->val)
6127 			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6128 		else
6129 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6130 
6131 		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6132 		new_settings(hdev, cmd->sk);
6133 	}
6134 
6135 	mgmt_pending_free(cmd);
6136 }
6137 
6138 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6139 {
6140 	struct mgmt_pending_cmd *cmd = data;
6141 	struct mgmt_mode *cp = cmd->param;
6142 
6143 	return hci_write_fast_connectable_sync(hdev, cp->val);
6144 }
6145 
6146 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6147 				void *data, u16 len)
6148 {
6149 	struct mgmt_mode *cp = data;
6150 	struct mgmt_pending_cmd *cmd;
6151 	int err;
6152 
6153 	bt_dev_dbg(hdev, "sock %p", sk);
6154 
6155 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6156 	    hdev->hci_ver < BLUETOOTH_VER_1_2)
6157 		return mgmt_cmd_status(sk, hdev->id,
6158 				       MGMT_OP_SET_FAST_CONNECTABLE,
6159 				       MGMT_STATUS_NOT_SUPPORTED);
6160 
6161 	if (cp->val != 0x00 && cp->val != 0x01)
6162 		return mgmt_cmd_status(sk, hdev->id,
6163 				       MGMT_OP_SET_FAST_CONNECTABLE,
6164 				       MGMT_STATUS_INVALID_PARAMS);
6165 
6166 	hci_dev_lock(hdev);
6167 
6168 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6169 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6170 		goto unlock;
6171 	}
6172 
6173 	if (!hdev_is_powered(hdev)) {
6174 		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6175 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6176 		new_settings(hdev, sk);
6177 		goto unlock;
6178 	}
6179 
6180 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6181 			       len);
6182 	if (!cmd)
6183 		err = -ENOMEM;
6184 	else
6185 		err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6186 					 fast_connectable_complete);
6187 
6188 	if (err < 0) {
6189 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6190 				MGMT_STATUS_FAILED);
6191 
6192 		if (cmd)
6193 			mgmt_pending_free(cmd);
6194 	}
6195 
6196 unlock:
6197 	hci_dev_unlock(hdev);
6198 
6199 	return err;
6200 }
6201 
6202 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6203 {
6204 	struct mgmt_pending_cmd *cmd = data;
6205 
6206 	bt_dev_dbg(hdev, "err %d", err);
6207 
6208 	if (err) {
6209 		u8 mgmt_err = mgmt_status(err);
6210 
6211 		/* We need to restore the flag if related HCI commands
6212 		 * failed.
6213 		 */
6214 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6215 
6216 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6217 	} else {
6218 		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6219 		new_settings(hdev, cmd->sk);
6220 	}
6221 
6222 	mgmt_pending_free(cmd);
6223 }
6224 
6225 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6226 {
6227 	int status;
6228 
6229 	status = hci_write_fast_connectable_sync(hdev, false);
6230 
6231 	if (!status)
6232 		status = hci_update_scan_sync(hdev);
6233 
6234 	/* Since only the advertising data flags will change, there
6235 	 * is no need to update the scan response data.
6236 	 */
6237 	if (!status)
6238 		status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6239 
6240 	return status;
6241 }
6242 
6243 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6244 {
6245 	struct mgmt_mode *cp = data;
6246 	struct mgmt_pending_cmd *cmd;
6247 	int err;
6248 
6249 	bt_dev_dbg(hdev, "sock %p", sk);
6250 
6251 	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6252 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6253 				       MGMT_STATUS_NOT_SUPPORTED);
6254 
6255 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6256 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6257 				       MGMT_STATUS_REJECTED);
6258 
6259 	if (cp->val != 0x00 && cp->val != 0x01)
6260 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6261 				       MGMT_STATUS_INVALID_PARAMS);
6262 
6263 	hci_dev_lock(hdev);
6264 
6265 	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6266 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6267 		goto unlock;
6268 	}
6269 
6270 	if (!hdev_is_powered(hdev)) {
6271 		if (!cp->val) {
6272 			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6273 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6274 			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6275 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6276 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
6277 		}
6278 
6279 		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6280 
6281 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6282 		if (err < 0)
6283 			goto unlock;
6284 
6285 		err = new_settings(hdev, sk);
6286 		goto unlock;
6287 	}
6288 
6289 	/* Reject disabling when powered on */
6290 	if (!cp->val) {
6291 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6292 				      MGMT_STATUS_REJECTED);
6293 		goto unlock;
6294 	} else {
6295 		/* When configuring a dual-mode controller to operate
6296 		 * with LE only and using a static address, then switching
6297 		 * BR/EDR back on is not allowed.
6298 		 *
6299 		 * Dual-mode controllers shall operate with the public
6300 		 * address as its identity address for BR/EDR and LE. So
6301 		 * reject the attempt to create an invalid configuration.
6302 		 *
6303 		 * The same restrictions applies when secure connections
6304 		 * has been enabled. For BR/EDR this is a controller feature
6305 		 * while for LE it is a host stack feature. This means that
6306 		 * switching BR/EDR back on when secure connections has been
6307 		 * enabled is not a supported transaction.
6308 		 */
6309 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6310 		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6311 		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6312 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6313 					      MGMT_STATUS_REJECTED);
6314 			goto unlock;
6315 		}
6316 	}
6317 
6318 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6319 	if (!cmd)
6320 		err = -ENOMEM;
6321 	else
6322 		err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6323 					 set_bredr_complete);
6324 
6325 	if (err < 0) {
6326 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6327 				MGMT_STATUS_FAILED);
6328 		if (cmd)
6329 			mgmt_pending_free(cmd);
6330 
6331 		goto unlock;
6332 	}
6333 
6334 	/* We need to flip the bit already here so that
6335 	 * hci_req_update_adv_data generates the correct flags.
6336 	 */
6337 	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6338 
6339 unlock:
6340 	hci_dev_unlock(hdev);
6341 	return err;
6342 }
6343 
6344 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6345 {
6346 	struct mgmt_pending_cmd *cmd = data;
6347 	struct mgmt_mode *cp;
6348 
6349 	bt_dev_dbg(hdev, "err %d", err);
6350 
6351 	if (err) {
6352 		u8 mgmt_err = mgmt_status(err);
6353 
6354 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6355 		goto done;
6356 	}
6357 
6358 	cp = cmd->param;
6359 
6360 	switch (cp->val) {
6361 	case 0x00:
6362 		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6363 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6364 		break;
6365 	case 0x01:
6366 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6367 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6368 		break;
6369 	case 0x02:
6370 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6371 		hci_dev_set_flag(hdev, HCI_SC_ONLY);
6372 		break;
6373 	}
6374 
6375 	send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6376 	new_settings(hdev, cmd->sk);
6377 
6378 done:
6379 	mgmt_pending_free(cmd);
6380 }
6381 
6382 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6383 {
6384 	struct mgmt_pending_cmd *cmd = data;
6385 	struct mgmt_mode *cp = cmd->param;
6386 	u8 val = !!cp->val;
6387 
6388 	/* Force write of val */
6389 	hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6390 
6391 	return hci_write_sc_support_sync(hdev, val);
6392 }
6393 
6394 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6395 			   void *data, u16 len)
6396 {
6397 	struct mgmt_mode *cp = data;
6398 	struct mgmt_pending_cmd *cmd;
6399 	u8 val;
6400 	int err;
6401 
6402 	bt_dev_dbg(hdev, "sock %p", sk);
6403 
6404 	if (!lmp_sc_capable(hdev) &&
6405 	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6406 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6407 				       MGMT_STATUS_NOT_SUPPORTED);
6408 
6409 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6410 	    lmp_sc_capable(hdev) &&
6411 	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6412 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6413 				       MGMT_STATUS_REJECTED);
6414 
6415 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6416 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6417 				       MGMT_STATUS_INVALID_PARAMS);
6418 
6419 	hci_dev_lock(hdev);
6420 
6421 	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6422 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6423 		bool changed;
6424 
6425 		if (cp->val) {
6426 			changed = !hci_dev_test_and_set_flag(hdev,
6427 							     HCI_SC_ENABLED);
6428 			if (cp->val == 0x02)
6429 				hci_dev_set_flag(hdev, HCI_SC_ONLY);
6430 			else
6431 				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6432 		} else {
6433 			changed = hci_dev_test_and_clear_flag(hdev,
6434 							      HCI_SC_ENABLED);
6435 			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6436 		}
6437 
6438 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6439 		if (err < 0)
6440 			goto failed;
6441 
6442 		if (changed)
6443 			err = new_settings(hdev, sk);
6444 
6445 		goto failed;
6446 	}
6447 
6448 	val = !!cp->val;
6449 
6450 	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6451 	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6452 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6453 		goto failed;
6454 	}
6455 
6456 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6457 	if (!cmd)
6458 		err = -ENOMEM;
6459 	else
6460 		err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6461 					 set_secure_conn_complete);
6462 
6463 	if (err < 0) {
6464 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6465 				MGMT_STATUS_FAILED);
6466 		if (cmd)
6467 			mgmt_pending_free(cmd);
6468 	}
6469 
6470 failed:
6471 	hci_dev_unlock(hdev);
6472 	return err;
6473 }
6474 
6475 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6476 			  void *data, u16 len)
6477 {
6478 	struct mgmt_mode *cp = data;
6479 	bool changed, use_changed;
6480 	int err;
6481 
6482 	bt_dev_dbg(hdev, "sock %p", sk);
6483 
6484 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6485 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6486 				       MGMT_STATUS_INVALID_PARAMS);
6487 
6488 	hci_dev_lock(hdev);
6489 
6490 	if (cp->val)
6491 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6492 	else
6493 		changed = hci_dev_test_and_clear_flag(hdev,
6494 						      HCI_KEEP_DEBUG_KEYS);
6495 
6496 	if (cp->val == 0x02)
6497 		use_changed = !hci_dev_test_and_set_flag(hdev,
6498 							 HCI_USE_DEBUG_KEYS);
6499 	else
6500 		use_changed = hci_dev_test_and_clear_flag(hdev,
6501 							  HCI_USE_DEBUG_KEYS);
6502 
6503 	if (hdev_is_powered(hdev) && use_changed &&
6504 	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6505 		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6506 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6507 			     sizeof(mode), &mode);
6508 	}
6509 
6510 	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6511 	if (err < 0)
6512 		goto unlock;
6513 
6514 	if (changed)
6515 		err = new_settings(hdev, sk);
6516 
6517 unlock:
6518 	hci_dev_unlock(hdev);
6519 	return err;
6520 }
6521 
6522 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6523 		       u16 len)
6524 {
6525 	struct mgmt_cp_set_privacy *cp = cp_data;
6526 	bool changed;
6527 	int err;
6528 
6529 	bt_dev_dbg(hdev, "sock %p", sk);
6530 
6531 	if (!lmp_le_capable(hdev))
6532 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6533 				       MGMT_STATUS_NOT_SUPPORTED);
6534 
6535 	if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6536 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6537 				       MGMT_STATUS_INVALID_PARAMS);
6538 
6539 	if (hdev_is_powered(hdev))
6540 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6541 				       MGMT_STATUS_REJECTED);
6542 
6543 	hci_dev_lock(hdev);
6544 
6545 	/* If user space supports this command it is also expected to
6546 	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6547 	 */
6548 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6549 
6550 	if (cp->privacy) {
6551 		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6552 		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6553 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6554 		hci_adv_instances_set_rpa_expired(hdev, true);
6555 		if (cp->privacy == 0x02)
6556 			hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6557 		else
6558 			hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6559 	} else {
6560 		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6561 		memset(hdev->irk, 0, sizeof(hdev->irk));
6562 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6563 		hci_adv_instances_set_rpa_expired(hdev, false);
6564 		hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6565 	}
6566 
6567 	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
6568 	if (err < 0)
6569 		goto unlock;
6570 
6571 	if (changed)
6572 		err = new_settings(hdev, sk);
6573 
6574 unlock:
6575 	hci_dev_unlock(hdev);
6576 	return err;
6577 }
6578 
6579 static bool irk_is_valid(struct mgmt_irk_info *irk)
6580 {
6581 	switch (irk->addr.type) {
6582 	case BDADDR_LE_PUBLIC:
6583 		return true;
6584 
6585 	case BDADDR_LE_RANDOM:
6586 		/* Two most significant bits shall be set */
6587 		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6588 			return false;
6589 		return true;
6590 	}
6591 
6592 	return false;
6593 }
6594 
6595 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6596 		     u16 len)
6597 {
6598 	struct mgmt_cp_load_irks *cp = cp_data;
6599 	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
6600 				   sizeof(struct mgmt_irk_info));
6601 	u16 irk_count, expected_len;
6602 	int i, err;
6603 
6604 	bt_dev_dbg(hdev, "sock %p", sk);
6605 
6606 	if (!lmp_le_capable(hdev))
6607 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6608 				       MGMT_STATUS_NOT_SUPPORTED);
6609 
6610 	irk_count = __le16_to_cpu(cp->irk_count);
6611 	if (irk_count > max_irk_count) {
6612 		bt_dev_err(hdev, "load_irks: too big irk_count value %u",
6613 			   irk_count);
6614 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6615 				       MGMT_STATUS_INVALID_PARAMS);
6616 	}
6617 
6618 	expected_len = struct_size(cp, irks, irk_count);
6619 	if (expected_len != len) {
6620 		bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
6621 			   expected_len, len);
6622 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6623 				       MGMT_STATUS_INVALID_PARAMS);
6624 	}
6625 
6626 	bt_dev_dbg(hdev, "irk_count %u", irk_count);
6627 
6628 	for (i = 0; i < irk_count; i++) {
6629 		struct mgmt_irk_info *key = &cp->irks[i];
6630 
6631 		if (!irk_is_valid(key))
6632 			return mgmt_cmd_status(sk, hdev->id,
6633 					       MGMT_OP_LOAD_IRKS,
6634 					       MGMT_STATUS_INVALID_PARAMS);
6635 	}
6636 
6637 	hci_dev_lock(hdev);
6638 
6639 	hci_smp_irks_clear(hdev);
6640 
6641 	for (i = 0; i < irk_count; i++) {
6642 		struct mgmt_irk_info *irk = &cp->irks[i];
6643 
6644 		if (hci_is_blocked_key(hdev,
6645 				       HCI_BLOCKED_KEY_TYPE_IRK,
6646 				       irk->val)) {
6647 			bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
6648 				    &irk->addr.bdaddr);
6649 			continue;
6650 		}
6651 
6652 		hci_add_irk(hdev, &irk->addr.bdaddr,
6653 			    le_addr_type(irk->addr.type), irk->val,
6654 			    BDADDR_ANY);
6655 	}
6656 
6657 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6658 
6659 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
6660 
6661 	hci_dev_unlock(hdev);
6662 
6663 	return err;
6664 }
6665 
6666 static bool ltk_is_valid(struct mgmt_ltk_info *key)
6667 {
6668 	if (key->initiator != 0x00 && key->initiator != 0x01)
6669 		return false;
6670 
6671 	switch (key->addr.type) {
6672 	case BDADDR_LE_PUBLIC:
6673 		return true;
6674 
6675 	case BDADDR_LE_RANDOM:
6676 		/* Two most significant bits shall be set */
6677 		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6678 			return false;
6679 		return true;
6680 	}
6681 
6682 	return false;
6683 }
6684 
6685 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
6686 			       void *cp_data, u16 len)
6687 {
6688 	struct mgmt_cp_load_long_term_keys *cp = cp_data;
6689 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
6690 				   sizeof(struct mgmt_ltk_info));
6691 	u16 key_count, expected_len;
6692 	int i, err;
6693 
6694 	bt_dev_dbg(hdev, "sock %p", sk);
6695 
6696 	if (!lmp_le_capable(hdev))
6697 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6698 				       MGMT_STATUS_NOT_SUPPORTED);
6699 
6700 	key_count = __le16_to_cpu(cp->key_count);
6701 	if (key_count > max_key_count) {
6702 		bt_dev_err(hdev, "load_ltks: too big key_count value %u",
6703 			   key_count);
6704 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6705 				       MGMT_STATUS_INVALID_PARAMS);
6706 	}
6707 
6708 	expected_len = struct_size(cp, keys, key_count);
6709 	if (expected_len != len) {
6710 		bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
6711 			   expected_len, len);
6712 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6713 				       MGMT_STATUS_INVALID_PARAMS);
6714 	}
6715 
6716 	bt_dev_dbg(hdev, "key_count %u", key_count);
6717 
6718 	for (i = 0; i < key_count; i++) {
6719 		struct mgmt_ltk_info *key = &cp->keys[i];
6720 
6721 		if (!ltk_is_valid(key))
6722 			return mgmt_cmd_status(sk, hdev->id,
6723 					       MGMT_OP_LOAD_LONG_TERM_KEYS,
6724 					       MGMT_STATUS_INVALID_PARAMS);
6725 	}
6726 
6727 	hci_dev_lock(hdev);
6728 
6729 	hci_smp_ltks_clear(hdev);
6730 
6731 	for (i = 0; i < key_count; i++) {
6732 		struct mgmt_ltk_info *key = &cp->keys[i];
6733 		u8 type, authenticated;
6734 
6735 		if (hci_is_blocked_key(hdev,
6736 				       HCI_BLOCKED_KEY_TYPE_LTK,
6737 				       key->val)) {
6738 			bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
6739 				    &key->addr.bdaddr);
6740 			continue;
6741 		}
6742 
6743 		switch (key->type) {
6744 		case MGMT_LTK_UNAUTHENTICATED:
6745 			authenticated = 0x00;
6746 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
6747 			break;
6748 		case MGMT_LTK_AUTHENTICATED:
6749 			authenticated = 0x01;
6750 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
6751 			break;
6752 		case MGMT_LTK_P256_UNAUTH:
6753 			authenticated = 0x00;
6754 			type = SMP_LTK_P256;
6755 			break;
6756 		case MGMT_LTK_P256_AUTH:
6757 			authenticated = 0x01;
6758 			type = SMP_LTK_P256;
6759 			break;
6760 		case MGMT_LTK_P256_DEBUG:
6761 			authenticated = 0x00;
6762 			type = SMP_LTK_P256_DEBUG;
6763 			fallthrough;
6764 		default:
6765 			continue;
6766 		}
6767 
6768 		hci_add_ltk(hdev, &key->addr.bdaddr,
6769 			    le_addr_type(key->addr.type), type, authenticated,
6770 			    key->val, key->enc_size, key->ediv, key->rand);
6771 	}
6772 
6773 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
6774 			   NULL, 0);
6775 
6776 	hci_dev_unlock(hdev);
6777 
6778 	return err;
6779 }
6780 
6781 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
6782 {
6783 	struct mgmt_pending_cmd *cmd = data;
6784 	struct hci_conn *conn = cmd->user_data;
6785 	struct mgmt_cp_get_conn_info *cp = cmd->param;
6786 	struct mgmt_rp_get_conn_info rp;
6787 	u8 status;
6788 
6789 	bt_dev_dbg(hdev, "err %d", err);
6790 
6791 	memcpy(&rp.addr, &cp->addr.bdaddr, sizeof(rp.addr));
6792 
6793 	status = mgmt_status(err);
6794 	if (status == MGMT_STATUS_SUCCESS) {
6795 		rp.rssi = conn->rssi;
6796 		rp.tx_power = conn->tx_power;
6797 		rp.max_tx_power = conn->max_tx_power;
6798 	} else {
6799 		rp.rssi = HCI_RSSI_INVALID;
6800 		rp.tx_power = HCI_TX_POWER_INVALID;
6801 		rp.max_tx_power = HCI_TX_POWER_INVALID;
6802 	}
6803 
6804 	mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
6805 			  &rp, sizeof(rp));
6806 
6807 	mgmt_pending_free(cmd);
6808 }
6809 
6810 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
6811 {
6812 	struct mgmt_pending_cmd *cmd = data;
6813 	struct mgmt_cp_get_conn_info *cp = cmd->param;
6814 	struct hci_conn *conn;
6815 	int err;
6816 	__le16   handle;
6817 
6818 	/* Make sure we are still connected */
6819 	if (cp->addr.type == BDADDR_BREDR)
6820 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6821 					       &cp->addr.bdaddr);
6822 	else
6823 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6824 
6825 	if (!conn || conn->state != BT_CONNECTED)
6826 		return MGMT_STATUS_NOT_CONNECTED;
6827 
6828 	cmd->user_data = conn;
6829 	handle = cpu_to_le16(conn->handle);
6830 
6831 	/* Refresh RSSI each time */
6832 	err = hci_read_rssi_sync(hdev, handle);
6833 
6834 	/* For LE links TX power does not change thus we don't need to
6835 	 * query for it once value is known.
6836 	 */
6837 	if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
6838 		     conn->tx_power == HCI_TX_POWER_INVALID))
6839 		err = hci_read_tx_power_sync(hdev, handle, 0x00);
6840 
6841 	/* Max TX power needs to be read only once per connection */
6842 	if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
6843 		err = hci_read_tx_power_sync(hdev, handle, 0x01);
6844 
6845 	return err;
6846 }
6847 
6848 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
6849 			 u16 len)
6850 {
6851 	struct mgmt_cp_get_conn_info *cp = data;
6852 	struct mgmt_rp_get_conn_info rp;
6853 	struct hci_conn *conn;
6854 	unsigned long conn_info_age;
6855 	int err = 0;
6856 
6857 	bt_dev_dbg(hdev, "sock %p", sk);
6858 
6859 	memset(&rp, 0, sizeof(rp));
6860 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6861 	rp.addr.type = cp->addr.type;
6862 
6863 	if (!bdaddr_type_is_valid(cp->addr.type))
6864 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6865 					 MGMT_STATUS_INVALID_PARAMS,
6866 					 &rp, sizeof(rp));
6867 
6868 	hci_dev_lock(hdev);
6869 
6870 	if (!hdev_is_powered(hdev)) {
6871 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6872 					MGMT_STATUS_NOT_POWERED, &rp,
6873 					sizeof(rp));
6874 		goto unlock;
6875 	}
6876 
6877 	if (cp->addr.type == BDADDR_BREDR)
6878 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6879 					       &cp->addr.bdaddr);
6880 	else
6881 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6882 
6883 	if (!conn || conn->state != BT_CONNECTED) {
6884 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6885 					MGMT_STATUS_NOT_CONNECTED, &rp,
6886 					sizeof(rp));
6887 		goto unlock;
6888 	}
6889 
6890 	/* To avoid client trying to guess when to poll again for information we
6891 	 * calculate conn info age as random value between min/max set in hdev.
6892 	 */
6893 	conn_info_age = hdev->conn_info_min_age +
6894 			prandom_u32_max(hdev->conn_info_max_age -
6895 					hdev->conn_info_min_age);
6896 
6897 	/* Query controller to refresh cached values if they are too old or were
6898 	 * never read.
6899 	 */
6900 	if (time_after(jiffies, conn->conn_info_timestamp +
6901 		       msecs_to_jiffies(conn_info_age)) ||
6902 	    !conn->conn_info_timestamp) {
6903 		struct mgmt_pending_cmd *cmd;
6904 
6905 		cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
6906 				       len);
6907 		if (!cmd) {
6908 			err = -ENOMEM;
6909 		} else {
6910 			err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
6911 						 cmd, get_conn_info_complete);
6912 		}
6913 
6914 		if (err < 0) {
6915 			mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6916 					  MGMT_STATUS_FAILED, &rp, sizeof(rp));
6917 
6918 			if (cmd)
6919 				mgmt_pending_free(cmd);
6920 
6921 			goto unlock;
6922 		}
6923 
6924 		conn->conn_info_timestamp = jiffies;
6925 	} else {
6926 		/* Cache is valid, just reply with values cached in hci_conn */
6927 		rp.rssi = conn->rssi;
6928 		rp.tx_power = conn->tx_power;
6929 		rp.max_tx_power = conn->max_tx_power;
6930 
6931 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6932 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6933 	}
6934 
6935 unlock:
6936 	hci_dev_unlock(hdev);
6937 	return err;
6938 }
6939 
6940 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
6941 {
6942 	struct mgmt_pending_cmd *cmd = data;
6943 	struct mgmt_cp_get_clock_info *cp = cmd->param;
6944 	struct mgmt_rp_get_clock_info rp;
6945 	struct hci_conn *conn = cmd->user_data;
6946 	u8 status = mgmt_status(err);
6947 
6948 	bt_dev_dbg(hdev, "err %d", err);
6949 
6950 	memset(&rp, 0, sizeof(rp));
6951 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6952 	rp.addr.type = cp->addr.type;
6953 
6954 	if (err)
6955 		goto complete;
6956 
6957 	rp.local_clock = cpu_to_le32(hdev->clock);
6958 
6959 	if (conn) {
6960 		rp.piconet_clock = cpu_to_le32(conn->clock);
6961 		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
6962 	}
6963 
6964 complete:
6965 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
6966 			  sizeof(rp));
6967 
6968 	mgmt_pending_free(cmd);
6969 }
6970 
6971 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
6972 {
6973 	struct mgmt_pending_cmd *cmd = data;
6974 	struct mgmt_cp_get_clock_info *cp = cmd->param;
6975 	struct hci_cp_read_clock hci_cp;
6976 	struct hci_conn *conn;
6977 
6978 	memset(&hci_cp, 0, sizeof(hci_cp));
6979 	hci_read_clock_sync(hdev, &hci_cp);
6980 
6981 	/* Make sure connection still exists */
6982 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
6983 	if (!conn || conn->state != BT_CONNECTED)
6984 		return MGMT_STATUS_NOT_CONNECTED;
6985 
6986 	cmd->user_data = conn;
6987 	hci_cp.handle = cpu_to_le16(conn->handle);
6988 	hci_cp.which = 0x01; /* Piconet clock */
6989 
6990 	return hci_read_clock_sync(hdev, &hci_cp);
6991 }
6992 
6993 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
6994 								u16 len)
6995 {
6996 	struct mgmt_cp_get_clock_info *cp = data;
6997 	struct mgmt_rp_get_clock_info rp;
6998 	struct mgmt_pending_cmd *cmd;
6999 	struct hci_conn *conn;
7000 	int err;
7001 
7002 	bt_dev_dbg(hdev, "sock %p", sk);
7003 
7004 	memset(&rp, 0, sizeof(rp));
7005 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7006 	rp.addr.type = cp->addr.type;
7007 
7008 	if (cp->addr.type != BDADDR_BREDR)
7009 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7010 					 MGMT_STATUS_INVALID_PARAMS,
7011 					 &rp, sizeof(rp));
7012 
7013 	hci_dev_lock(hdev);
7014 
7015 	if (!hdev_is_powered(hdev)) {
7016 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7017 					MGMT_STATUS_NOT_POWERED, &rp,
7018 					sizeof(rp));
7019 		goto unlock;
7020 	}
7021 
7022 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7023 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7024 					       &cp->addr.bdaddr);
7025 		if (!conn || conn->state != BT_CONNECTED) {
7026 			err = mgmt_cmd_complete(sk, hdev->id,
7027 						MGMT_OP_GET_CLOCK_INFO,
7028 						MGMT_STATUS_NOT_CONNECTED,
7029 						&rp, sizeof(rp));
7030 			goto unlock;
7031 		}
7032 	} else {
7033 		conn = NULL;
7034 	}
7035 
7036 	cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7037 	if (!cmd)
7038 		err = -ENOMEM;
7039 	else
7040 		err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7041 					 get_clock_info_complete);
7042 
7043 	if (err < 0) {
7044 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7045 					MGMT_STATUS_FAILED, &rp, sizeof(rp));
7046 
7047 		if (cmd)
7048 			mgmt_pending_free(cmd);
7049 	}
7050 
7051 
7052 unlock:
7053 	hci_dev_unlock(hdev);
7054 	return err;
7055 }
7056 
7057 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7058 {
7059 	struct hci_conn *conn;
7060 
7061 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7062 	if (!conn)
7063 		return false;
7064 
7065 	if (conn->dst_type != type)
7066 		return false;
7067 
7068 	if (conn->state != BT_CONNECTED)
7069 		return false;
7070 
7071 	return true;
7072 }
7073 
7074 /* This function requires the caller holds hdev->lock */
7075 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7076 			       u8 addr_type, u8 auto_connect)
7077 {
7078 	struct hci_conn_params *params;
7079 
7080 	params = hci_conn_params_add(hdev, addr, addr_type);
7081 	if (!params)
7082 		return -EIO;
7083 
7084 	if (params->auto_connect == auto_connect)
7085 		return 0;
7086 
7087 	list_del_init(&params->action);
7088 
7089 	switch (auto_connect) {
7090 	case HCI_AUTO_CONN_DISABLED:
7091 	case HCI_AUTO_CONN_LINK_LOSS:
7092 		/* If auto connect is being disabled when we're trying to
7093 		 * connect to device, keep connecting.
7094 		 */
7095 		if (params->explicit_connect)
7096 			list_add(&params->action, &hdev->pend_le_conns);
7097 		break;
7098 	case HCI_AUTO_CONN_REPORT:
7099 		if (params->explicit_connect)
7100 			list_add(&params->action, &hdev->pend_le_conns);
7101 		else
7102 			list_add(&params->action, &hdev->pend_le_reports);
7103 		break;
7104 	case HCI_AUTO_CONN_DIRECT:
7105 	case HCI_AUTO_CONN_ALWAYS:
7106 		if (!is_connected(hdev, addr, addr_type))
7107 			list_add(&params->action, &hdev->pend_le_conns);
7108 		break;
7109 	}
7110 
7111 	params->auto_connect = auto_connect;
7112 
7113 	bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7114 		   addr, addr_type, auto_connect);
7115 
7116 	return 0;
7117 }
7118 
7119 static void device_added(struct sock *sk, struct hci_dev *hdev,
7120 			 bdaddr_t *bdaddr, u8 type, u8 action)
7121 {
7122 	struct mgmt_ev_device_added ev;
7123 
7124 	bacpy(&ev.addr.bdaddr, bdaddr);
7125 	ev.addr.type = type;
7126 	ev.action = action;
7127 
7128 	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7129 }
7130 
7131 static int add_device_sync(struct hci_dev *hdev, void *data)
7132 {
7133 	return hci_update_passive_scan_sync(hdev);
7134 }
7135 
7136 static int add_device(struct sock *sk, struct hci_dev *hdev,
7137 		      void *data, u16 len)
7138 {
7139 	struct mgmt_cp_add_device *cp = data;
7140 	u8 auto_conn, addr_type;
7141 	struct hci_conn_params *params;
7142 	int err;
7143 	u32 current_flags = 0;
7144 	u32 supported_flags;
7145 
7146 	bt_dev_dbg(hdev, "sock %p", sk);
7147 
7148 	if (!bdaddr_type_is_valid(cp->addr.type) ||
7149 	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7150 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7151 					 MGMT_STATUS_INVALID_PARAMS,
7152 					 &cp->addr, sizeof(cp->addr));
7153 
7154 	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7155 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7156 					 MGMT_STATUS_INVALID_PARAMS,
7157 					 &cp->addr, sizeof(cp->addr));
7158 
7159 	hci_dev_lock(hdev);
7160 
7161 	if (cp->addr.type == BDADDR_BREDR) {
7162 		/* Only incoming connections action is supported for now */
7163 		if (cp->action != 0x01) {
7164 			err = mgmt_cmd_complete(sk, hdev->id,
7165 						MGMT_OP_ADD_DEVICE,
7166 						MGMT_STATUS_INVALID_PARAMS,
7167 						&cp->addr, sizeof(cp->addr));
7168 			goto unlock;
7169 		}
7170 
7171 		err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7172 						     &cp->addr.bdaddr,
7173 						     cp->addr.type, 0);
7174 		if (err)
7175 			goto unlock;
7176 
7177 		hci_update_scan(hdev);
7178 
7179 		goto added;
7180 	}
7181 
7182 	addr_type = le_addr_type(cp->addr.type);
7183 
7184 	if (cp->action == 0x02)
7185 		auto_conn = HCI_AUTO_CONN_ALWAYS;
7186 	else if (cp->action == 0x01)
7187 		auto_conn = HCI_AUTO_CONN_DIRECT;
7188 	else
7189 		auto_conn = HCI_AUTO_CONN_REPORT;
7190 
7191 	/* Kernel internally uses conn_params with resolvable private
7192 	 * address, but Add Device allows only identity addresses.
7193 	 * Make sure it is enforced before calling
7194 	 * hci_conn_params_lookup.
7195 	 */
7196 	if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7197 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7198 					MGMT_STATUS_INVALID_PARAMS,
7199 					&cp->addr, sizeof(cp->addr));
7200 		goto unlock;
7201 	}
7202 
7203 	/* If the connection parameters don't exist for this device,
7204 	 * they will be created and configured with defaults.
7205 	 */
7206 	if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7207 				auto_conn) < 0) {
7208 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7209 					MGMT_STATUS_FAILED, &cp->addr,
7210 					sizeof(cp->addr));
7211 		goto unlock;
7212 	} else {
7213 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7214 						addr_type);
7215 		if (params)
7216 			current_flags = params->flags;
7217 	}
7218 
7219 	err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
7220 	if (err < 0)
7221 		goto unlock;
7222 
7223 added:
7224 	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7225 	supported_flags = hdev->conn_flags;
7226 	device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7227 			     supported_flags, current_flags);
7228 
7229 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7230 				MGMT_STATUS_SUCCESS, &cp->addr,
7231 				sizeof(cp->addr));
7232 
7233 unlock:
7234 	hci_dev_unlock(hdev);
7235 	return err;
7236 }
7237 
7238 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7239 			   bdaddr_t *bdaddr, u8 type)
7240 {
7241 	struct mgmt_ev_device_removed ev;
7242 
7243 	bacpy(&ev.addr.bdaddr, bdaddr);
7244 	ev.addr.type = type;
7245 
7246 	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7247 }
7248 
7249 static int remove_device_sync(struct hci_dev *hdev, void *data)
7250 {
7251 	return hci_update_passive_scan_sync(hdev);
7252 }
7253 
7254 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7255 			 void *data, u16 len)
7256 {
7257 	struct mgmt_cp_remove_device *cp = data;
7258 	int err;
7259 
7260 	bt_dev_dbg(hdev, "sock %p", sk);
7261 
7262 	hci_dev_lock(hdev);
7263 
7264 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7265 		struct hci_conn_params *params;
7266 		u8 addr_type;
7267 
7268 		if (!bdaddr_type_is_valid(cp->addr.type)) {
7269 			err = mgmt_cmd_complete(sk, hdev->id,
7270 						MGMT_OP_REMOVE_DEVICE,
7271 						MGMT_STATUS_INVALID_PARAMS,
7272 						&cp->addr, sizeof(cp->addr));
7273 			goto unlock;
7274 		}
7275 
7276 		if (cp->addr.type == BDADDR_BREDR) {
7277 			err = hci_bdaddr_list_del(&hdev->accept_list,
7278 						  &cp->addr.bdaddr,
7279 						  cp->addr.type);
7280 			if (err) {
7281 				err = mgmt_cmd_complete(sk, hdev->id,
7282 							MGMT_OP_REMOVE_DEVICE,
7283 							MGMT_STATUS_INVALID_PARAMS,
7284 							&cp->addr,
7285 							sizeof(cp->addr));
7286 				goto unlock;
7287 			}
7288 
7289 			hci_update_scan(hdev);
7290 
7291 			device_removed(sk, hdev, &cp->addr.bdaddr,
7292 				       cp->addr.type);
7293 			goto complete;
7294 		}
7295 
7296 		addr_type = le_addr_type(cp->addr.type);
7297 
7298 		/* Kernel internally uses conn_params with resolvable private
7299 		 * address, but Remove Device allows only identity addresses.
7300 		 * Make sure it is enforced before calling
7301 		 * hci_conn_params_lookup.
7302 		 */
7303 		if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7304 			err = mgmt_cmd_complete(sk, hdev->id,
7305 						MGMT_OP_REMOVE_DEVICE,
7306 						MGMT_STATUS_INVALID_PARAMS,
7307 						&cp->addr, sizeof(cp->addr));
7308 			goto unlock;
7309 		}
7310 
7311 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7312 						addr_type);
7313 		if (!params) {
7314 			err = mgmt_cmd_complete(sk, hdev->id,
7315 						MGMT_OP_REMOVE_DEVICE,
7316 						MGMT_STATUS_INVALID_PARAMS,
7317 						&cp->addr, sizeof(cp->addr));
7318 			goto unlock;
7319 		}
7320 
7321 		if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7322 		    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7323 			err = mgmt_cmd_complete(sk, hdev->id,
7324 						MGMT_OP_REMOVE_DEVICE,
7325 						MGMT_STATUS_INVALID_PARAMS,
7326 						&cp->addr, sizeof(cp->addr));
7327 			goto unlock;
7328 		}
7329 
7330 		list_del(&params->action);
7331 		list_del(&params->list);
7332 		kfree(params);
7333 
7334 		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7335 	} else {
7336 		struct hci_conn_params *p, *tmp;
7337 		struct bdaddr_list *b, *btmp;
7338 
7339 		if (cp->addr.type) {
7340 			err = mgmt_cmd_complete(sk, hdev->id,
7341 						MGMT_OP_REMOVE_DEVICE,
7342 						MGMT_STATUS_INVALID_PARAMS,
7343 						&cp->addr, sizeof(cp->addr));
7344 			goto unlock;
7345 		}
7346 
7347 		list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7348 			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7349 			list_del(&b->list);
7350 			kfree(b);
7351 		}
7352 
7353 		hci_update_scan(hdev);
7354 
7355 		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7356 			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7357 				continue;
7358 			device_removed(sk, hdev, &p->addr, p->addr_type);
7359 			if (p->explicit_connect) {
7360 				p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7361 				continue;
7362 			}
7363 			list_del(&p->action);
7364 			list_del(&p->list);
7365 			kfree(p);
7366 		}
7367 
7368 		bt_dev_dbg(hdev, "All LE connection parameters were removed");
7369 	}
7370 
7371 	hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7372 
7373 complete:
7374 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7375 				MGMT_STATUS_SUCCESS, &cp->addr,
7376 				sizeof(cp->addr));
7377 unlock:
7378 	hci_dev_unlock(hdev);
7379 	return err;
7380 }
7381 
7382 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7383 			   u16 len)
7384 {
7385 	struct mgmt_cp_load_conn_param *cp = data;
7386 	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7387 				     sizeof(struct mgmt_conn_param));
7388 	u16 param_count, expected_len;
7389 	int i;
7390 
7391 	if (!lmp_le_capable(hdev))
7392 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7393 				       MGMT_STATUS_NOT_SUPPORTED);
7394 
7395 	param_count = __le16_to_cpu(cp->param_count);
7396 	if (param_count > max_param_count) {
7397 		bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7398 			   param_count);
7399 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7400 				       MGMT_STATUS_INVALID_PARAMS);
7401 	}
7402 
7403 	expected_len = struct_size(cp, params, param_count);
7404 	if (expected_len != len) {
7405 		bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7406 			   expected_len, len);
7407 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7408 				       MGMT_STATUS_INVALID_PARAMS);
7409 	}
7410 
7411 	bt_dev_dbg(hdev, "param_count %u", param_count);
7412 
7413 	hci_dev_lock(hdev);
7414 
7415 	hci_conn_params_clear_disabled(hdev);
7416 
7417 	for (i = 0; i < param_count; i++) {
7418 		struct mgmt_conn_param *param = &cp->params[i];
7419 		struct hci_conn_params *hci_param;
7420 		u16 min, max, latency, timeout;
7421 		u8 addr_type;
7422 
7423 		bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
7424 			   param->addr.type);
7425 
7426 		if (param->addr.type == BDADDR_LE_PUBLIC) {
7427 			addr_type = ADDR_LE_DEV_PUBLIC;
7428 		} else if (param->addr.type == BDADDR_LE_RANDOM) {
7429 			addr_type = ADDR_LE_DEV_RANDOM;
7430 		} else {
7431 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7432 			continue;
7433 		}
7434 
7435 		min = le16_to_cpu(param->min_interval);
7436 		max = le16_to_cpu(param->max_interval);
7437 		latency = le16_to_cpu(param->latency);
7438 		timeout = le16_to_cpu(param->timeout);
7439 
7440 		bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7441 			   min, max, latency, timeout);
7442 
7443 		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7444 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7445 			continue;
7446 		}
7447 
7448 		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
7449 						addr_type);
7450 		if (!hci_param) {
7451 			bt_dev_err(hdev, "failed to add connection parameters");
7452 			continue;
7453 		}
7454 
7455 		hci_param->conn_min_interval = min;
7456 		hci_param->conn_max_interval = max;
7457 		hci_param->conn_latency = latency;
7458 		hci_param->supervision_timeout = timeout;
7459 	}
7460 
7461 	hci_dev_unlock(hdev);
7462 
7463 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7464 				 NULL, 0);
7465 }
7466 
7467 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7468 			       void *data, u16 len)
7469 {
7470 	struct mgmt_cp_set_external_config *cp = data;
7471 	bool changed;
7472 	int err;
7473 
7474 	bt_dev_dbg(hdev, "sock %p", sk);
7475 
7476 	if (hdev_is_powered(hdev))
7477 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7478 				       MGMT_STATUS_REJECTED);
7479 
7480 	if (cp->config != 0x00 && cp->config != 0x01)
7481 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7482 				         MGMT_STATUS_INVALID_PARAMS);
7483 
7484 	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7485 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7486 				       MGMT_STATUS_NOT_SUPPORTED);
7487 
7488 	hci_dev_lock(hdev);
7489 
7490 	if (cp->config)
7491 		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7492 	else
7493 		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7494 
7495 	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7496 	if (err < 0)
7497 		goto unlock;
7498 
7499 	if (!changed)
7500 		goto unlock;
7501 
7502 	err = new_options(hdev, sk);
7503 
7504 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7505 		mgmt_index_removed(hdev);
7506 
7507 		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7508 			hci_dev_set_flag(hdev, HCI_CONFIG);
7509 			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7510 
7511 			queue_work(hdev->req_workqueue, &hdev->power_on);
7512 		} else {
7513 			set_bit(HCI_RAW, &hdev->flags);
7514 			mgmt_index_added(hdev);
7515 		}
7516 	}
7517 
7518 unlock:
7519 	hci_dev_unlock(hdev);
7520 	return err;
7521 }
7522 
7523 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
7524 			      void *data, u16 len)
7525 {
7526 	struct mgmt_cp_set_public_address *cp = data;
7527 	bool changed;
7528 	int err;
7529 
7530 	bt_dev_dbg(hdev, "sock %p", sk);
7531 
7532 	if (hdev_is_powered(hdev))
7533 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7534 				       MGMT_STATUS_REJECTED);
7535 
7536 	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
7537 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7538 				       MGMT_STATUS_INVALID_PARAMS);
7539 
7540 	if (!hdev->set_bdaddr)
7541 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7542 				       MGMT_STATUS_NOT_SUPPORTED);
7543 
7544 	hci_dev_lock(hdev);
7545 
7546 	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
7547 	bacpy(&hdev->public_addr, &cp->bdaddr);
7548 
7549 	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
7550 	if (err < 0)
7551 		goto unlock;
7552 
7553 	if (!changed)
7554 		goto unlock;
7555 
7556 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
7557 		err = new_options(hdev, sk);
7558 
7559 	if (is_configured(hdev)) {
7560 		mgmt_index_removed(hdev);
7561 
7562 		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
7563 
7564 		hci_dev_set_flag(hdev, HCI_CONFIG);
7565 		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7566 
7567 		queue_work(hdev->req_workqueue, &hdev->power_on);
7568 	}
7569 
7570 unlock:
7571 	hci_dev_unlock(hdev);
7572 	return err;
7573 }
7574 
7575 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
7576 					     int err)
7577 {
7578 	const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
7579 	struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
7580 	u8 *h192, *r192, *h256, *r256;
7581 	struct mgmt_pending_cmd *cmd = data;
7582 	struct sk_buff *skb = cmd->skb;
7583 	u8 status = mgmt_status(err);
7584 	u16 eir_len;
7585 
7586 	if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
7587 		return;
7588 
7589 	if (!status) {
7590 		if (!skb)
7591 			status = MGMT_STATUS_FAILED;
7592 		else if (IS_ERR(skb))
7593 			status = mgmt_status(PTR_ERR(skb));
7594 		else
7595 			status = mgmt_status(skb->data[0]);
7596 	}
7597 
7598 	bt_dev_dbg(hdev, "status %u", status);
7599 
7600 	mgmt_cp = cmd->param;
7601 
7602 	if (status) {
7603 		status = mgmt_status(status);
7604 		eir_len = 0;
7605 
7606 		h192 = NULL;
7607 		r192 = NULL;
7608 		h256 = NULL;
7609 		r256 = NULL;
7610 	} else if (!bredr_sc_enabled(hdev)) {
7611 		struct hci_rp_read_local_oob_data *rp;
7612 
7613 		if (skb->len != sizeof(*rp)) {
7614 			status = MGMT_STATUS_FAILED;
7615 			eir_len = 0;
7616 		} else {
7617 			status = MGMT_STATUS_SUCCESS;
7618 			rp = (void *)skb->data;
7619 
7620 			eir_len = 5 + 18 + 18;
7621 			h192 = rp->hash;
7622 			r192 = rp->rand;
7623 			h256 = NULL;
7624 			r256 = NULL;
7625 		}
7626 	} else {
7627 		struct hci_rp_read_local_oob_ext_data *rp;
7628 
7629 		if (skb->len != sizeof(*rp)) {
7630 			status = MGMT_STATUS_FAILED;
7631 			eir_len = 0;
7632 		} else {
7633 			status = MGMT_STATUS_SUCCESS;
7634 			rp = (void *)skb->data;
7635 
7636 			if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
7637 				eir_len = 5 + 18 + 18;
7638 				h192 = NULL;
7639 				r192 = NULL;
7640 			} else {
7641 				eir_len = 5 + 18 + 18 + 18 + 18;
7642 				h192 = rp->hash192;
7643 				r192 = rp->rand192;
7644 			}
7645 
7646 			h256 = rp->hash256;
7647 			r256 = rp->rand256;
7648 		}
7649 	}
7650 
7651 	mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
7652 	if (!mgmt_rp)
7653 		goto done;
7654 
7655 	if (eir_len == 0)
7656 		goto send_rsp;
7657 
7658 	eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
7659 				  hdev->dev_class, 3);
7660 
7661 	if (h192 && r192) {
7662 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7663 					  EIR_SSP_HASH_C192, h192, 16);
7664 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7665 					  EIR_SSP_RAND_R192, r192, 16);
7666 	}
7667 
7668 	if (h256 && r256) {
7669 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7670 					  EIR_SSP_HASH_C256, h256, 16);
7671 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7672 					  EIR_SSP_RAND_R256, r256, 16);
7673 	}
7674 
7675 send_rsp:
7676 	mgmt_rp->type = mgmt_cp->type;
7677 	mgmt_rp->eir_len = cpu_to_le16(eir_len);
7678 
7679 	err = mgmt_cmd_complete(cmd->sk, hdev->id,
7680 				MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
7681 				mgmt_rp, sizeof(*mgmt_rp) + eir_len);
7682 	if (err < 0 || status)
7683 		goto done;
7684 
7685 	hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
7686 
7687 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7688 				 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
7689 				 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
7690 done:
7691 	if (skb && !IS_ERR(skb))
7692 		kfree_skb(skb);
7693 
7694 	kfree(mgmt_rp);
7695 	mgmt_pending_remove(cmd);
7696 }
7697 
7698 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
7699 				  struct mgmt_cp_read_local_oob_ext_data *cp)
7700 {
7701 	struct mgmt_pending_cmd *cmd;
7702 	int err;
7703 
7704 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
7705 			       cp, sizeof(*cp));
7706 	if (!cmd)
7707 		return -ENOMEM;
7708 
7709 	err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
7710 				 read_local_oob_ext_data_complete);
7711 
7712 	if (err < 0) {
7713 		mgmt_pending_remove(cmd);
7714 		return err;
7715 	}
7716 
7717 	return 0;
7718 }
7719 
7720 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
7721 				   void *data, u16 data_len)
7722 {
7723 	struct mgmt_cp_read_local_oob_ext_data *cp = data;
7724 	struct mgmt_rp_read_local_oob_ext_data *rp;
7725 	size_t rp_len;
7726 	u16 eir_len;
7727 	u8 status, flags, role, addr[7], hash[16], rand[16];
7728 	int err;
7729 
7730 	bt_dev_dbg(hdev, "sock %p", sk);
7731 
7732 	if (hdev_is_powered(hdev)) {
7733 		switch (cp->type) {
7734 		case BIT(BDADDR_BREDR):
7735 			status = mgmt_bredr_support(hdev);
7736 			if (status)
7737 				eir_len = 0;
7738 			else
7739 				eir_len = 5;
7740 			break;
7741 		case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7742 			status = mgmt_le_support(hdev);
7743 			if (status)
7744 				eir_len = 0;
7745 			else
7746 				eir_len = 9 + 3 + 18 + 18 + 3;
7747 			break;
7748 		default:
7749 			status = MGMT_STATUS_INVALID_PARAMS;
7750 			eir_len = 0;
7751 			break;
7752 		}
7753 	} else {
7754 		status = MGMT_STATUS_NOT_POWERED;
7755 		eir_len = 0;
7756 	}
7757 
7758 	rp_len = sizeof(*rp) + eir_len;
7759 	rp = kmalloc(rp_len, GFP_ATOMIC);
7760 	if (!rp)
7761 		return -ENOMEM;
7762 
7763 	if (!status && !lmp_ssp_capable(hdev)) {
7764 		status = MGMT_STATUS_NOT_SUPPORTED;
7765 		eir_len = 0;
7766 	}
7767 
7768 	if (status)
7769 		goto complete;
7770 
7771 	hci_dev_lock(hdev);
7772 
7773 	eir_len = 0;
7774 	switch (cp->type) {
7775 	case BIT(BDADDR_BREDR):
7776 		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7777 			err = read_local_ssp_oob_req(hdev, sk, cp);
7778 			hci_dev_unlock(hdev);
7779 			if (!err)
7780 				goto done;
7781 
7782 			status = MGMT_STATUS_FAILED;
7783 			goto complete;
7784 		} else {
7785 			eir_len = eir_append_data(rp->eir, eir_len,
7786 						  EIR_CLASS_OF_DEV,
7787 						  hdev->dev_class, 3);
7788 		}
7789 		break;
7790 	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7791 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
7792 		    smp_generate_oob(hdev, hash, rand) < 0) {
7793 			hci_dev_unlock(hdev);
7794 			status = MGMT_STATUS_FAILED;
7795 			goto complete;
7796 		}
7797 
7798 		/* This should return the active RPA, but since the RPA
7799 		 * is only programmed on demand, it is really hard to fill
7800 		 * this in at the moment. For now disallow retrieving
7801 		 * local out-of-band data when privacy is in use.
7802 		 *
7803 		 * Returning the identity address will not help here since
7804 		 * pairing happens before the identity resolving key is
7805 		 * known and thus the connection establishment happens
7806 		 * based on the RPA and not the identity address.
7807 		 */
7808 		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
7809 			hci_dev_unlock(hdev);
7810 			status = MGMT_STATUS_REJECTED;
7811 			goto complete;
7812 		}
7813 
7814 		if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
7815 		   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
7816 		   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
7817 		    bacmp(&hdev->static_addr, BDADDR_ANY))) {
7818 			memcpy(addr, &hdev->static_addr, 6);
7819 			addr[6] = 0x01;
7820 		} else {
7821 			memcpy(addr, &hdev->bdaddr, 6);
7822 			addr[6] = 0x00;
7823 		}
7824 
7825 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
7826 					  addr, sizeof(addr));
7827 
7828 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7829 			role = 0x02;
7830 		else
7831 			role = 0x01;
7832 
7833 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
7834 					  &role, sizeof(role));
7835 
7836 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
7837 			eir_len = eir_append_data(rp->eir, eir_len,
7838 						  EIR_LE_SC_CONFIRM,
7839 						  hash, sizeof(hash));
7840 
7841 			eir_len = eir_append_data(rp->eir, eir_len,
7842 						  EIR_LE_SC_RANDOM,
7843 						  rand, sizeof(rand));
7844 		}
7845 
7846 		flags = mgmt_get_adv_discov_flags(hdev);
7847 
7848 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
7849 			flags |= LE_AD_NO_BREDR;
7850 
7851 		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
7852 					  &flags, sizeof(flags));
7853 		break;
7854 	}
7855 
7856 	hci_dev_unlock(hdev);
7857 
7858 	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
7859 
7860 	status = MGMT_STATUS_SUCCESS;
7861 
7862 complete:
7863 	rp->type = cp->type;
7864 	rp->eir_len = cpu_to_le16(eir_len);
7865 
7866 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
7867 				status, rp, sizeof(*rp) + eir_len);
7868 	if (err < 0 || status)
7869 		goto done;
7870 
7871 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7872 				 rp, sizeof(*rp) + eir_len,
7873 				 HCI_MGMT_OOB_DATA_EVENTS, sk);
7874 
7875 done:
7876 	kfree(rp);
7877 
7878 	return err;
7879 }
7880 
7881 static u32 get_supported_adv_flags(struct hci_dev *hdev)
7882 {
7883 	u32 flags = 0;
7884 
7885 	flags |= MGMT_ADV_FLAG_CONNECTABLE;
7886 	flags |= MGMT_ADV_FLAG_DISCOV;
7887 	flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
7888 	flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
7889 	flags |= MGMT_ADV_FLAG_APPEARANCE;
7890 	flags |= MGMT_ADV_FLAG_LOCAL_NAME;
7891 	flags |= MGMT_ADV_PARAM_DURATION;
7892 	flags |= MGMT_ADV_PARAM_TIMEOUT;
7893 	flags |= MGMT_ADV_PARAM_INTERVALS;
7894 	flags |= MGMT_ADV_PARAM_TX_POWER;
7895 	flags |= MGMT_ADV_PARAM_SCAN_RSP;
7896 
7897 	/* In extended adv TX_POWER returned from Set Adv Param
7898 	 * will be always valid.
7899 	 */
7900 	if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
7901 	    ext_adv_capable(hdev))
7902 		flags |= MGMT_ADV_FLAG_TX_POWER;
7903 
7904 	if (ext_adv_capable(hdev)) {
7905 		flags |= MGMT_ADV_FLAG_SEC_1M;
7906 		flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
7907 		flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
7908 
7909 		if (hdev->le_features[1] & HCI_LE_PHY_2M)
7910 			flags |= MGMT_ADV_FLAG_SEC_2M;
7911 
7912 		if (hdev->le_features[1] & HCI_LE_PHY_CODED)
7913 			flags |= MGMT_ADV_FLAG_SEC_CODED;
7914 	}
7915 
7916 	return flags;
7917 }
7918 
7919 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
7920 			     void *data, u16 data_len)
7921 {
7922 	struct mgmt_rp_read_adv_features *rp;
7923 	size_t rp_len;
7924 	int err;
7925 	struct adv_info *adv_instance;
7926 	u32 supported_flags;
7927 	u8 *instance;
7928 
7929 	bt_dev_dbg(hdev, "sock %p", sk);
7930 
7931 	if (!lmp_le_capable(hdev))
7932 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7933 				       MGMT_STATUS_REJECTED);
7934 
7935 	hci_dev_lock(hdev);
7936 
7937 	rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
7938 	rp = kmalloc(rp_len, GFP_ATOMIC);
7939 	if (!rp) {
7940 		hci_dev_unlock(hdev);
7941 		return -ENOMEM;
7942 	}
7943 
7944 	supported_flags = get_supported_adv_flags(hdev);
7945 
7946 	rp->supported_flags = cpu_to_le32(supported_flags);
7947 	rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
7948 	rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
7949 	rp->max_instances = hdev->le_num_of_adv_sets;
7950 	rp->num_instances = hdev->adv_instance_cnt;
7951 
7952 	instance = rp->instance;
7953 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
7954 		*instance = adv_instance->instance;
7955 		instance++;
7956 	}
7957 
7958 	hci_dev_unlock(hdev);
7959 
7960 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7961 				MGMT_STATUS_SUCCESS, rp, rp_len);
7962 
7963 	kfree(rp);
7964 
7965 	return err;
7966 }
7967 
7968 static u8 calculate_name_len(struct hci_dev *hdev)
7969 {
7970 	u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
7971 
7972 	return eir_append_local_name(hdev, buf, 0);
7973 }
7974 
7975 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
7976 			   bool is_adv_data)
7977 {
7978 	u8 max_len = HCI_MAX_AD_LENGTH;
7979 
7980 	if (is_adv_data) {
7981 		if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
7982 				 MGMT_ADV_FLAG_LIMITED_DISCOV |
7983 				 MGMT_ADV_FLAG_MANAGED_FLAGS))
7984 			max_len -= 3;
7985 
7986 		if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
7987 			max_len -= 3;
7988 	} else {
7989 		if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
7990 			max_len -= calculate_name_len(hdev);
7991 
7992 		if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
7993 			max_len -= 4;
7994 	}
7995 
7996 	return max_len;
7997 }
7998 
7999 static bool flags_managed(u32 adv_flags)
8000 {
8001 	return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8002 			    MGMT_ADV_FLAG_LIMITED_DISCOV |
8003 			    MGMT_ADV_FLAG_MANAGED_FLAGS);
8004 }
8005 
8006 static bool tx_power_managed(u32 adv_flags)
8007 {
8008 	return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8009 }
8010 
8011 static bool name_managed(u32 adv_flags)
8012 {
8013 	return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8014 }
8015 
8016 static bool appearance_managed(u32 adv_flags)
8017 {
8018 	return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8019 }
8020 
8021 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8022 			      u8 len, bool is_adv_data)
8023 {
8024 	int i, cur_len;
8025 	u8 max_len;
8026 
8027 	max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8028 
8029 	if (len > max_len)
8030 		return false;
8031 
8032 	/* Make sure that the data is correctly formatted. */
8033 	for (i = 0; i < len; i += (cur_len + 1)) {
8034 		cur_len = data[i];
8035 
8036 		if (!cur_len)
8037 			continue;
8038 
8039 		if (data[i + 1] == EIR_FLAGS &&
8040 		    (!is_adv_data || flags_managed(adv_flags)))
8041 			return false;
8042 
8043 		if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8044 			return false;
8045 
8046 		if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8047 			return false;
8048 
8049 		if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8050 			return false;
8051 
8052 		if (data[i + 1] == EIR_APPEARANCE &&
8053 		    appearance_managed(adv_flags))
8054 			return false;
8055 
8056 		/* If the current field length would exceed the total data
8057 		 * length, then it's invalid.
8058 		 */
8059 		if (i + cur_len >= len)
8060 			return false;
8061 	}
8062 
8063 	return true;
8064 }
8065 
8066 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8067 {
8068 	u32 supported_flags, phy_flags;
8069 
8070 	/* The current implementation only supports a subset of the specified
8071 	 * flags. Also need to check mutual exclusiveness of sec flags.
8072 	 */
8073 	supported_flags = get_supported_adv_flags(hdev);
8074 	phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8075 	if (adv_flags & ~supported_flags ||
8076 	    ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8077 		return false;
8078 
8079 	return true;
8080 }
8081 
8082 static bool adv_busy(struct hci_dev *hdev)
8083 {
8084 	return pending_find(MGMT_OP_SET_LE, hdev);
8085 }
8086 
8087 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8088 			     int err)
8089 {
8090 	struct adv_info *adv, *n;
8091 
8092 	bt_dev_dbg(hdev, "err %d", err);
8093 
8094 	hci_dev_lock(hdev);
8095 
8096 	list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8097 		u8 instance;
8098 
8099 		if (!adv->pending)
8100 			continue;
8101 
8102 		if (!err) {
8103 			adv->pending = false;
8104 			continue;
8105 		}
8106 
8107 		instance = adv->instance;
8108 
8109 		if (hdev->cur_adv_instance == instance)
8110 			cancel_adv_timeout(hdev);
8111 
8112 		hci_remove_adv_instance(hdev, instance);
8113 		mgmt_advertising_removed(sk, hdev, instance);
8114 	}
8115 
8116 	hci_dev_unlock(hdev);
8117 }
8118 
8119 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8120 {
8121 	struct mgmt_pending_cmd *cmd = data;
8122 	struct mgmt_cp_add_advertising *cp = cmd->param;
8123 	struct mgmt_rp_add_advertising rp;
8124 
8125 	memset(&rp, 0, sizeof(rp));
8126 
8127 	rp.instance = cp->instance;
8128 
8129 	if (err)
8130 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8131 				mgmt_status(err));
8132 	else
8133 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8134 				  mgmt_status(err), &rp, sizeof(rp));
8135 
8136 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
8137 
8138 	mgmt_pending_free(cmd);
8139 }
8140 
8141 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8142 {
8143 	struct mgmt_pending_cmd *cmd = data;
8144 	struct mgmt_cp_add_advertising *cp = cmd->param;
8145 
8146 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8147 }
8148 
8149 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8150 			   void *data, u16 data_len)
8151 {
8152 	struct mgmt_cp_add_advertising *cp = data;
8153 	struct mgmt_rp_add_advertising rp;
8154 	u32 flags;
8155 	u8 status;
8156 	u16 timeout, duration;
8157 	unsigned int prev_instance_cnt;
8158 	u8 schedule_instance = 0;
8159 	struct adv_info *adv, *next_instance;
8160 	int err;
8161 	struct mgmt_pending_cmd *cmd;
8162 
8163 	bt_dev_dbg(hdev, "sock %p", sk);
8164 
8165 	status = mgmt_le_support(hdev);
8166 	if (status)
8167 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8168 				       status);
8169 
8170 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8171 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8172 				       MGMT_STATUS_INVALID_PARAMS);
8173 
8174 	if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8175 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8176 				       MGMT_STATUS_INVALID_PARAMS);
8177 
8178 	flags = __le32_to_cpu(cp->flags);
8179 	timeout = __le16_to_cpu(cp->timeout);
8180 	duration = __le16_to_cpu(cp->duration);
8181 
8182 	if (!requested_adv_flags_are_valid(hdev, flags))
8183 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8184 				       MGMT_STATUS_INVALID_PARAMS);
8185 
8186 	hci_dev_lock(hdev);
8187 
8188 	if (timeout && !hdev_is_powered(hdev)) {
8189 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8190 				      MGMT_STATUS_REJECTED);
8191 		goto unlock;
8192 	}
8193 
8194 	if (adv_busy(hdev)) {
8195 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8196 				      MGMT_STATUS_BUSY);
8197 		goto unlock;
8198 	}
8199 
8200 	if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8201 	    !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8202 			       cp->scan_rsp_len, false)) {
8203 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8204 				      MGMT_STATUS_INVALID_PARAMS);
8205 		goto unlock;
8206 	}
8207 
8208 	prev_instance_cnt = hdev->adv_instance_cnt;
8209 
8210 	adv = hci_add_adv_instance(hdev, cp->instance, flags,
8211 				   cp->adv_data_len, cp->data,
8212 				   cp->scan_rsp_len,
8213 				   cp->data + cp->adv_data_len,
8214 				   timeout, duration,
8215 				   HCI_ADV_TX_POWER_NO_PREFERENCE,
8216 				   hdev->le_adv_min_interval,
8217 				   hdev->le_adv_max_interval);
8218 	if (IS_ERR(adv)) {
8219 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8220 				      MGMT_STATUS_FAILED);
8221 		goto unlock;
8222 	}
8223 
8224 	/* Only trigger an advertising added event if a new instance was
8225 	 * actually added.
8226 	 */
8227 	if (hdev->adv_instance_cnt > prev_instance_cnt)
8228 		mgmt_advertising_added(sk, hdev, cp->instance);
8229 
8230 	if (hdev->cur_adv_instance == cp->instance) {
8231 		/* If the currently advertised instance is being changed then
8232 		 * cancel the current advertising and schedule the next
8233 		 * instance. If there is only one instance then the overridden
8234 		 * advertising data will be visible right away.
8235 		 */
8236 		cancel_adv_timeout(hdev);
8237 
8238 		next_instance = hci_get_next_instance(hdev, cp->instance);
8239 		if (next_instance)
8240 			schedule_instance = next_instance->instance;
8241 	} else if (!hdev->adv_instance_timeout) {
8242 		/* Immediately advertise the new instance if no other
8243 		 * instance is currently being advertised.
8244 		 */
8245 		schedule_instance = cp->instance;
8246 	}
8247 
8248 	/* If the HCI_ADVERTISING flag is set or the device isn't powered or
8249 	 * there is no instance to be advertised then we have no HCI
8250 	 * communication to make. Simply return.
8251 	 */
8252 	if (!hdev_is_powered(hdev) ||
8253 	    hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8254 	    !schedule_instance) {
8255 		rp.instance = cp->instance;
8256 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8257 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8258 		goto unlock;
8259 	}
8260 
8261 	/* We're good to go, update advertising data, parameters, and start
8262 	 * advertising.
8263 	 */
8264 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8265 			       data_len);
8266 	if (!cmd) {
8267 		err = -ENOMEM;
8268 		goto unlock;
8269 	}
8270 
8271 	cp->instance = schedule_instance;
8272 
8273 	err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8274 				 add_advertising_complete);
8275 	if (err < 0)
8276 		mgmt_pending_free(cmd);
8277 
8278 unlock:
8279 	hci_dev_unlock(hdev);
8280 
8281 	return err;
8282 }
8283 
8284 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8285 					int err)
8286 {
8287 	struct mgmt_pending_cmd *cmd = data;
8288 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8289 	struct mgmt_rp_add_ext_adv_params rp;
8290 	struct adv_info *adv;
8291 	u32 flags;
8292 
8293 	BT_DBG("%s", hdev->name);
8294 
8295 	hci_dev_lock(hdev);
8296 
8297 	adv = hci_find_adv_instance(hdev, cp->instance);
8298 	if (!adv)
8299 		goto unlock;
8300 
8301 	rp.instance = cp->instance;
8302 	rp.tx_power = adv->tx_power;
8303 
8304 	/* While we're at it, inform userspace of the available space for this
8305 	 * advertisement, given the flags that will be used.
8306 	 */
8307 	flags = __le32_to_cpu(cp->flags);
8308 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8309 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8310 
8311 	if (err) {
8312 		/* If this advertisement was previously advertising and we
8313 		 * failed to update it, we signal that it has been removed and
8314 		 * delete its structure
8315 		 */
8316 		if (!adv->pending)
8317 			mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8318 
8319 		hci_remove_adv_instance(hdev, cp->instance);
8320 
8321 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8322 				mgmt_status(err));
8323 	} else {
8324 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8325 				  mgmt_status(err), &rp, sizeof(rp));
8326 	}
8327 
8328 unlock:
8329 	if (cmd)
8330 		mgmt_pending_free(cmd);
8331 
8332 	hci_dev_unlock(hdev);
8333 }
8334 
8335 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8336 {
8337 	struct mgmt_pending_cmd *cmd = data;
8338 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8339 
8340 	return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8341 }
8342 
8343 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8344 			      void *data, u16 data_len)
8345 {
8346 	struct mgmt_cp_add_ext_adv_params *cp = data;
8347 	struct mgmt_rp_add_ext_adv_params rp;
8348 	struct mgmt_pending_cmd *cmd = NULL;
8349 	struct adv_info *adv;
8350 	u32 flags, min_interval, max_interval;
8351 	u16 timeout, duration;
8352 	u8 status;
8353 	s8 tx_power;
8354 	int err;
8355 
8356 	BT_DBG("%s", hdev->name);
8357 
8358 	status = mgmt_le_support(hdev);
8359 	if (status)
8360 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8361 				       status);
8362 
8363 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8364 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8365 				       MGMT_STATUS_INVALID_PARAMS);
8366 
8367 	/* The purpose of breaking add_advertising into two separate MGMT calls
8368 	 * for params and data is to allow more parameters to be added to this
8369 	 * structure in the future. For this reason, we verify that we have the
8370 	 * bare minimum structure we know of when the interface was defined. Any
8371 	 * extra parameters we don't know about will be ignored in this request.
8372 	 */
8373 	if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8374 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8375 				       MGMT_STATUS_INVALID_PARAMS);
8376 
8377 	flags = __le32_to_cpu(cp->flags);
8378 
8379 	if (!requested_adv_flags_are_valid(hdev, flags))
8380 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8381 				       MGMT_STATUS_INVALID_PARAMS);
8382 
8383 	hci_dev_lock(hdev);
8384 
8385 	/* In new interface, we require that we are powered to register */
8386 	if (!hdev_is_powered(hdev)) {
8387 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8388 				      MGMT_STATUS_REJECTED);
8389 		goto unlock;
8390 	}
8391 
8392 	if (adv_busy(hdev)) {
8393 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8394 				      MGMT_STATUS_BUSY);
8395 		goto unlock;
8396 	}
8397 
8398 	/* Parse defined parameters from request, use defaults otherwise */
8399 	timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8400 		  __le16_to_cpu(cp->timeout) : 0;
8401 
8402 	duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8403 		   __le16_to_cpu(cp->duration) :
8404 		   hdev->def_multi_adv_rotation_duration;
8405 
8406 	min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8407 		       __le32_to_cpu(cp->min_interval) :
8408 		       hdev->le_adv_min_interval;
8409 
8410 	max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8411 		       __le32_to_cpu(cp->max_interval) :
8412 		       hdev->le_adv_max_interval;
8413 
8414 	tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8415 		   cp->tx_power :
8416 		   HCI_ADV_TX_POWER_NO_PREFERENCE;
8417 
8418 	/* Create advertising instance with no advertising or response data */
8419 	adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
8420 				   timeout, duration, tx_power, min_interval,
8421 				   max_interval);
8422 
8423 	if (IS_ERR(adv)) {
8424 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8425 				      MGMT_STATUS_FAILED);
8426 		goto unlock;
8427 	}
8428 
8429 	/* Submit request for advertising params if ext adv available */
8430 	if (ext_adv_capable(hdev)) {
8431 		cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8432 				       data, data_len);
8433 		if (!cmd) {
8434 			err = -ENOMEM;
8435 			hci_remove_adv_instance(hdev, cp->instance);
8436 			goto unlock;
8437 		}
8438 
8439 		err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8440 					 add_ext_adv_params_complete);
8441 		if (err < 0)
8442 			mgmt_pending_free(cmd);
8443 	} else {
8444 		rp.instance = cp->instance;
8445 		rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8446 		rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8447 		rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8448 		err = mgmt_cmd_complete(sk, hdev->id,
8449 					MGMT_OP_ADD_EXT_ADV_PARAMS,
8450 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8451 	}
8452 
8453 unlock:
8454 	hci_dev_unlock(hdev);
8455 
8456 	return err;
8457 }
8458 
8459 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8460 {
8461 	struct mgmt_pending_cmd *cmd = data;
8462 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8463 	struct mgmt_rp_add_advertising rp;
8464 
8465 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
8466 
8467 	memset(&rp, 0, sizeof(rp));
8468 
8469 	rp.instance = cp->instance;
8470 
8471 	if (err)
8472 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8473 				mgmt_status(err));
8474 	else
8475 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8476 				  mgmt_status(err), &rp, sizeof(rp));
8477 
8478 	mgmt_pending_free(cmd);
8479 }
8480 
8481 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8482 {
8483 	struct mgmt_pending_cmd *cmd = data;
8484 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8485 	int err;
8486 
8487 	if (ext_adv_capable(hdev)) {
8488 		err = hci_update_adv_data_sync(hdev, cp->instance);
8489 		if (err)
8490 			return err;
8491 
8492 		err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8493 		if (err)
8494 			return err;
8495 
8496 		return hci_enable_ext_advertising_sync(hdev, cp->instance);
8497 	}
8498 
8499 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8500 }
8501 
8502 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8503 			    u16 data_len)
8504 {
8505 	struct mgmt_cp_add_ext_adv_data *cp = data;
8506 	struct mgmt_rp_add_ext_adv_data rp;
8507 	u8 schedule_instance = 0;
8508 	struct adv_info *next_instance;
8509 	struct adv_info *adv_instance;
8510 	int err = 0;
8511 	struct mgmt_pending_cmd *cmd;
8512 
8513 	BT_DBG("%s", hdev->name);
8514 
8515 	hci_dev_lock(hdev);
8516 
8517 	adv_instance = hci_find_adv_instance(hdev, cp->instance);
8518 
8519 	if (!adv_instance) {
8520 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8521 				      MGMT_STATUS_INVALID_PARAMS);
8522 		goto unlock;
8523 	}
8524 
8525 	/* In new interface, we require that we are powered to register */
8526 	if (!hdev_is_powered(hdev)) {
8527 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8528 				      MGMT_STATUS_REJECTED);
8529 		goto clear_new_instance;
8530 	}
8531 
8532 	if (adv_busy(hdev)) {
8533 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8534 				      MGMT_STATUS_BUSY);
8535 		goto clear_new_instance;
8536 	}
8537 
8538 	/* Validate new data */
8539 	if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
8540 			       cp->adv_data_len, true) ||
8541 	    !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
8542 			       cp->adv_data_len, cp->scan_rsp_len, false)) {
8543 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8544 				      MGMT_STATUS_INVALID_PARAMS);
8545 		goto clear_new_instance;
8546 	}
8547 
8548 	/* Set the data in the advertising instance */
8549 	hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
8550 				  cp->data, cp->scan_rsp_len,
8551 				  cp->data + cp->adv_data_len);
8552 
8553 	/* If using software rotation, determine next instance to use */
8554 	if (hdev->cur_adv_instance == cp->instance) {
8555 		/* If the currently advertised instance is being changed
8556 		 * then cancel the current advertising and schedule the
8557 		 * next instance. If there is only one instance then the
8558 		 * overridden advertising data will be visible right
8559 		 * away
8560 		 */
8561 		cancel_adv_timeout(hdev);
8562 
8563 		next_instance = hci_get_next_instance(hdev, cp->instance);
8564 		if (next_instance)
8565 			schedule_instance = next_instance->instance;
8566 	} else if (!hdev->adv_instance_timeout) {
8567 		/* Immediately advertise the new instance if no other
8568 		 * instance is currently being advertised.
8569 		 */
8570 		schedule_instance = cp->instance;
8571 	}
8572 
8573 	/* If the HCI_ADVERTISING flag is set or there is no instance to
8574 	 * be advertised then we have no HCI communication to make.
8575 	 * Simply return.
8576 	 */
8577 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
8578 		if (adv_instance->pending) {
8579 			mgmt_advertising_added(sk, hdev, cp->instance);
8580 			adv_instance->pending = false;
8581 		}
8582 		rp.instance = cp->instance;
8583 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8584 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8585 		goto unlock;
8586 	}
8587 
8588 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
8589 			       data_len);
8590 	if (!cmd) {
8591 		err = -ENOMEM;
8592 		goto clear_new_instance;
8593 	}
8594 
8595 	err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
8596 				 add_ext_adv_data_complete);
8597 	if (err < 0) {
8598 		mgmt_pending_free(cmd);
8599 		goto clear_new_instance;
8600 	}
8601 
8602 	/* We were successful in updating data, so trigger advertising_added
8603 	 * event if this is an instance that wasn't previously advertising. If
8604 	 * a failure occurs in the requests we initiated, we will remove the
8605 	 * instance again in add_advertising_complete
8606 	 */
8607 	if (adv_instance->pending)
8608 		mgmt_advertising_added(sk, hdev, cp->instance);
8609 
8610 	goto unlock;
8611 
8612 clear_new_instance:
8613 	hci_remove_adv_instance(hdev, cp->instance);
8614 
8615 unlock:
8616 	hci_dev_unlock(hdev);
8617 
8618 	return err;
8619 }
8620 
8621 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
8622 					int err)
8623 {
8624 	struct mgmt_pending_cmd *cmd = data;
8625 	struct mgmt_cp_remove_advertising *cp = cmd->param;
8626 	struct mgmt_rp_remove_advertising rp;
8627 
8628 	bt_dev_dbg(hdev, "err %d", err);
8629 
8630 	memset(&rp, 0, sizeof(rp));
8631 	rp.instance = cp->instance;
8632 
8633 	if (err)
8634 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8635 				mgmt_status(err));
8636 	else
8637 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8638 				  MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8639 
8640 	mgmt_pending_free(cmd);
8641 }
8642 
8643 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
8644 {
8645 	struct mgmt_pending_cmd *cmd = data;
8646 	struct mgmt_cp_remove_advertising *cp = cmd->param;
8647 	int err;
8648 
8649 	err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
8650 	if (err)
8651 		return err;
8652 
8653 	if (list_empty(&hdev->adv_instances))
8654 		err = hci_disable_advertising_sync(hdev);
8655 
8656 	return err;
8657 }
8658 
8659 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
8660 			      void *data, u16 data_len)
8661 {
8662 	struct mgmt_cp_remove_advertising *cp = data;
8663 	struct mgmt_pending_cmd *cmd;
8664 	int err;
8665 
8666 	bt_dev_dbg(hdev, "sock %p", sk);
8667 
8668 	hci_dev_lock(hdev);
8669 
8670 	if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
8671 		err = mgmt_cmd_status(sk, hdev->id,
8672 				      MGMT_OP_REMOVE_ADVERTISING,
8673 				      MGMT_STATUS_INVALID_PARAMS);
8674 		goto unlock;
8675 	}
8676 
8677 	if (pending_find(MGMT_OP_SET_LE, hdev)) {
8678 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8679 				      MGMT_STATUS_BUSY);
8680 		goto unlock;
8681 	}
8682 
8683 	if (list_empty(&hdev->adv_instances)) {
8684 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8685 				      MGMT_STATUS_INVALID_PARAMS);
8686 		goto unlock;
8687 	}
8688 
8689 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
8690 			       data_len);
8691 	if (!cmd) {
8692 		err = -ENOMEM;
8693 		goto unlock;
8694 	}
8695 
8696 	err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
8697 				 remove_advertising_complete);
8698 	if (err < 0)
8699 		mgmt_pending_free(cmd);
8700 
8701 unlock:
8702 	hci_dev_unlock(hdev);
8703 
8704 	return err;
8705 }
8706 
8707 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
8708 			     void *data, u16 data_len)
8709 {
8710 	struct mgmt_cp_get_adv_size_info *cp = data;
8711 	struct mgmt_rp_get_adv_size_info rp;
8712 	u32 flags, supported_flags;
8713 
8714 	bt_dev_dbg(hdev, "sock %p", sk);
8715 
8716 	if (!lmp_le_capable(hdev))
8717 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8718 				       MGMT_STATUS_REJECTED);
8719 
8720 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8721 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8722 				       MGMT_STATUS_INVALID_PARAMS);
8723 
8724 	flags = __le32_to_cpu(cp->flags);
8725 
8726 	/* The current implementation only supports a subset of the specified
8727 	 * flags.
8728 	 */
8729 	supported_flags = get_supported_adv_flags(hdev);
8730 	if (flags & ~supported_flags)
8731 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8732 				       MGMT_STATUS_INVALID_PARAMS);
8733 
8734 	rp.instance = cp->instance;
8735 	rp.flags = cp->flags;
8736 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8737 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8738 
8739 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8740 				 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8741 }
8742 
8743 static const struct hci_mgmt_handler mgmt_handlers[] = {
8744 	{ NULL }, /* 0x0000 (no command) */
8745 	{ read_version,            MGMT_READ_VERSION_SIZE,
8746 						HCI_MGMT_NO_HDEV |
8747 						HCI_MGMT_UNTRUSTED },
8748 	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
8749 						HCI_MGMT_NO_HDEV |
8750 						HCI_MGMT_UNTRUSTED },
8751 	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
8752 						HCI_MGMT_NO_HDEV |
8753 						HCI_MGMT_UNTRUSTED },
8754 	{ read_controller_info,    MGMT_READ_INFO_SIZE,
8755 						HCI_MGMT_UNTRUSTED },
8756 	{ set_powered,             MGMT_SETTING_SIZE },
8757 	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
8758 	{ set_connectable,         MGMT_SETTING_SIZE },
8759 	{ set_fast_connectable,    MGMT_SETTING_SIZE },
8760 	{ set_bondable,            MGMT_SETTING_SIZE },
8761 	{ set_link_security,       MGMT_SETTING_SIZE },
8762 	{ set_ssp,                 MGMT_SETTING_SIZE },
8763 	{ set_hs,                  MGMT_SETTING_SIZE },
8764 	{ set_le,                  MGMT_SETTING_SIZE },
8765 	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
8766 	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
8767 	{ add_uuid,                MGMT_ADD_UUID_SIZE },
8768 	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
8769 	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
8770 						HCI_MGMT_VAR_LEN },
8771 	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
8772 						HCI_MGMT_VAR_LEN },
8773 	{ disconnect,              MGMT_DISCONNECT_SIZE },
8774 	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
8775 	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
8776 	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
8777 	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
8778 	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
8779 	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
8780 	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
8781 	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
8782 	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
8783 	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
8784 	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
8785 	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
8786 	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
8787 						HCI_MGMT_VAR_LEN },
8788 	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
8789 	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
8790 	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
8791 	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
8792 	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
8793 	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
8794 	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
8795 	{ set_advertising,         MGMT_SETTING_SIZE },
8796 	{ set_bredr,               MGMT_SETTING_SIZE },
8797 	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
8798 	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
8799 	{ set_secure_conn,         MGMT_SETTING_SIZE },
8800 	{ set_debug_keys,          MGMT_SETTING_SIZE },
8801 	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
8802 	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
8803 						HCI_MGMT_VAR_LEN },
8804 	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
8805 	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
8806 	{ add_device,              MGMT_ADD_DEVICE_SIZE },
8807 	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
8808 	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
8809 						HCI_MGMT_VAR_LEN },
8810 	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
8811 						HCI_MGMT_NO_HDEV |
8812 						HCI_MGMT_UNTRUSTED },
8813 	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
8814 						HCI_MGMT_UNCONFIGURED |
8815 						HCI_MGMT_UNTRUSTED },
8816 	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
8817 						HCI_MGMT_UNCONFIGURED },
8818 	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
8819 						HCI_MGMT_UNCONFIGURED },
8820 	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
8821 						HCI_MGMT_VAR_LEN },
8822 	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
8823 	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
8824 						HCI_MGMT_NO_HDEV |
8825 						HCI_MGMT_UNTRUSTED },
8826 	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
8827 	{ add_advertising,	   MGMT_ADD_ADVERTISING_SIZE,
8828 						HCI_MGMT_VAR_LEN },
8829 	{ remove_advertising,	   MGMT_REMOVE_ADVERTISING_SIZE },
8830 	{ get_adv_size_info,       MGMT_GET_ADV_SIZE_INFO_SIZE },
8831 	{ start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
8832 	{ read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
8833 						HCI_MGMT_UNTRUSTED },
8834 	{ set_appearance,	   MGMT_SET_APPEARANCE_SIZE },
8835 	{ get_phy_configuration,   MGMT_GET_PHY_CONFIGURATION_SIZE },
8836 	{ set_phy_configuration,   MGMT_SET_PHY_CONFIGURATION_SIZE },
8837 	{ set_blocked_keys,	   MGMT_OP_SET_BLOCKED_KEYS_SIZE,
8838 						HCI_MGMT_VAR_LEN },
8839 	{ set_wideband_speech,	   MGMT_SETTING_SIZE },
8840 	{ read_controller_cap,     MGMT_READ_CONTROLLER_CAP_SIZE,
8841 						HCI_MGMT_UNTRUSTED },
8842 	{ read_exp_features_info,  MGMT_READ_EXP_FEATURES_INFO_SIZE,
8843 						HCI_MGMT_UNTRUSTED |
8844 						HCI_MGMT_HDEV_OPTIONAL },
8845 	{ set_exp_feature,         MGMT_SET_EXP_FEATURE_SIZE,
8846 						HCI_MGMT_VAR_LEN |
8847 						HCI_MGMT_HDEV_OPTIONAL },
8848 	{ read_def_system_config,  MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
8849 						HCI_MGMT_UNTRUSTED },
8850 	{ set_def_system_config,   MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
8851 						HCI_MGMT_VAR_LEN },
8852 	{ read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
8853 						HCI_MGMT_UNTRUSTED },
8854 	{ set_def_runtime_config,  MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
8855 						HCI_MGMT_VAR_LEN },
8856 	{ get_device_flags,        MGMT_GET_DEVICE_FLAGS_SIZE },
8857 	{ set_device_flags,        MGMT_SET_DEVICE_FLAGS_SIZE },
8858 	{ read_adv_mon_features,   MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
8859 	{ add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
8860 						HCI_MGMT_VAR_LEN },
8861 	{ remove_adv_monitor,      MGMT_REMOVE_ADV_MONITOR_SIZE },
8862 	{ add_ext_adv_params,      MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
8863 						HCI_MGMT_VAR_LEN },
8864 	{ add_ext_adv_data,        MGMT_ADD_EXT_ADV_DATA_SIZE,
8865 						HCI_MGMT_VAR_LEN },
8866 	{ add_adv_patterns_monitor_rssi,
8867 				   MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
8868 						HCI_MGMT_VAR_LEN },
8869 };
8870 
8871 void mgmt_index_added(struct hci_dev *hdev)
8872 {
8873 	struct mgmt_ev_ext_index ev;
8874 
8875 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8876 		return;
8877 
8878 	switch (hdev->dev_type) {
8879 	case HCI_PRIMARY:
8880 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8881 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
8882 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8883 			ev.type = 0x01;
8884 		} else {
8885 			mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
8886 					 HCI_MGMT_INDEX_EVENTS);
8887 			ev.type = 0x00;
8888 		}
8889 		break;
8890 	case HCI_AMP:
8891 		ev.type = 0x02;
8892 		break;
8893 	default:
8894 		return;
8895 	}
8896 
8897 	ev.bus = hdev->bus;
8898 
8899 	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
8900 			 HCI_MGMT_EXT_INDEX_EVENTS);
8901 }
8902 
8903 void mgmt_index_removed(struct hci_dev *hdev)
8904 {
8905 	struct mgmt_ev_ext_index ev;
8906 	u8 status = MGMT_STATUS_INVALID_INDEX;
8907 
8908 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8909 		return;
8910 
8911 	switch (hdev->dev_type) {
8912 	case HCI_PRIMARY:
8913 		mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8914 
8915 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8916 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
8917 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8918 			ev.type = 0x01;
8919 		} else {
8920 			mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
8921 					 HCI_MGMT_INDEX_EVENTS);
8922 			ev.type = 0x00;
8923 		}
8924 		break;
8925 	case HCI_AMP:
8926 		ev.type = 0x02;
8927 		break;
8928 	default:
8929 		return;
8930 	}
8931 
8932 	ev.bus = hdev->bus;
8933 
8934 	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
8935 			 HCI_MGMT_EXT_INDEX_EVENTS);
8936 
8937 	/* Cancel any remaining timed work */
8938 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
8939 		return;
8940 	cancel_delayed_work_sync(&hdev->discov_off);
8941 	cancel_delayed_work_sync(&hdev->service_cache);
8942 	cancel_delayed_work_sync(&hdev->rpa_expired);
8943 }
8944 
8945 void mgmt_power_on(struct hci_dev *hdev, int err)
8946 {
8947 	struct cmd_lookup match = { NULL, hdev };
8948 
8949 	bt_dev_dbg(hdev, "err %d", err);
8950 
8951 	hci_dev_lock(hdev);
8952 
8953 	if (!err) {
8954 		restart_le_actions(hdev);
8955 		hci_update_passive_scan(hdev);
8956 	}
8957 
8958 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8959 
8960 	new_settings(hdev, match.sk);
8961 
8962 	if (match.sk)
8963 		sock_put(match.sk);
8964 
8965 	hci_dev_unlock(hdev);
8966 }
8967 
8968 void __mgmt_power_off(struct hci_dev *hdev)
8969 {
8970 	struct cmd_lookup match = { NULL, hdev };
8971 	u8 status, zero_cod[] = { 0, 0, 0 };
8972 
8973 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8974 
8975 	/* If the power off is because of hdev unregistration let
8976 	 * use the appropriate INVALID_INDEX status. Otherwise use
8977 	 * NOT_POWERED. We cover both scenarios here since later in
8978 	 * mgmt_index_removed() any hci_conn callbacks will have already
8979 	 * been triggered, potentially causing misleading DISCONNECTED
8980 	 * status responses.
8981 	 */
8982 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
8983 		status = MGMT_STATUS_INVALID_INDEX;
8984 	else
8985 		status = MGMT_STATUS_NOT_POWERED;
8986 
8987 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8988 
8989 	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
8990 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
8991 				   zero_cod, sizeof(zero_cod),
8992 				   HCI_MGMT_DEV_CLASS_EVENTS, NULL);
8993 		ext_info_changed(hdev, NULL);
8994 	}
8995 
8996 	new_settings(hdev, match.sk);
8997 
8998 	if (match.sk)
8999 		sock_put(match.sk);
9000 }
9001 
9002 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9003 {
9004 	struct mgmt_pending_cmd *cmd;
9005 	u8 status;
9006 
9007 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9008 	if (!cmd)
9009 		return;
9010 
9011 	if (err == -ERFKILL)
9012 		status = MGMT_STATUS_RFKILLED;
9013 	else
9014 		status = MGMT_STATUS_FAILED;
9015 
9016 	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9017 
9018 	mgmt_pending_remove(cmd);
9019 }
9020 
9021 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9022 		       bool persistent)
9023 {
9024 	struct mgmt_ev_new_link_key ev;
9025 
9026 	memset(&ev, 0, sizeof(ev));
9027 
9028 	ev.store_hint = persistent;
9029 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9030 	ev.key.addr.type = BDADDR_BREDR;
9031 	ev.key.type = key->type;
9032 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9033 	ev.key.pin_len = key->pin_len;
9034 
9035 	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9036 }
9037 
9038 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9039 {
9040 	switch (ltk->type) {
9041 	case SMP_LTK:
9042 	case SMP_LTK_RESPONDER:
9043 		if (ltk->authenticated)
9044 			return MGMT_LTK_AUTHENTICATED;
9045 		return MGMT_LTK_UNAUTHENTICATED;
9046 	case SMP_LTK_P256:
9047 		if (ltk->authenticated)
9048 			return MGMT_LTK_P256_AUTH;
9049 		return MGMT_LTK_P256_UNAUTH;
9050 	case SMP_LTK_P256_DEBUG:
9051 		return MGMT_LTK_P256_DEBUG;
9052 	}
9053 
9054 	return MGMT_LTK_UNAUTHENTICATED;
9055 }
9056 
9057 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9058 {
9059 	struct mgmt_ev_new_long_term_key ev;
9060 
9061 	memset(&ev, 0, sizeof(ev));
9062 
9063 	/* Devices using resolvable or non-resolvable random addresses
9064 	 * without providing an identity resolving key don't require
9065 	 * to store long term keys. Their addresses will change the
9066 	 * next time around.
9067 	 *
9068 	 * Only when a remote device provides an identity address
9069 	 * make sure the long term key is stored. If the remote
9070 	 * identity is known, the long term keys are internally
9071 	 * mapped to the identity address. So allow static random
9072 	 * and public addresses here.
9073 	 */
9074 	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9075 	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
9076 		ev.store_hint = 0x00;
9077 	else
9078 		ev.store_hint = persistent;
9079 
9080 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9081 	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
9082 	ev.key.type = mgmt_ltk_type(key);
9083 	ev.key.enc_size = key->enc_size;
9084 	ev.key.ediv = key->ediv;
9085 	ev.key.rand = key->rand;
9086 
9087 	if (key->type == SMP_LTK)
9088 		ev.key.initiator = 1;
9089 
9090 	/* Make sure we copy only the significant bytes based on the
9091 	 * encryption key size, and set the rest of the value to zeroes.
9092 	 */
9093 	memcpy(ev.key.val, key->val, key->enc_size);
9094 	memset(ev.key.val + key->enc_size, 0,
9095 	       sizeof(ev.key.val) - key->enc_size);
9096 
9097 	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9098 }
9099 
9100 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9101 {
9102 	struct mgmt_ev_new_irk ev;
9103 
9104 	memset(&ev, 0, sizeof(ev));
9105 
9106 	ev.store_hint = persistent;
9107 
9108 	bacpy(&ev.rpa, &irk->rpa);
9109 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9110 	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9111 	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9112 
9113 	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9114 }
9115 
9116 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9117 		   bool persistent)
9118 {
9119 	struct mgmt_ev_new_csrk ev;
9120 
9121 	memset(&ev, 0, sizeof(ev));
9122 
9123 	/* Devices using resolvable or non-resolvable random addresses
9124 	 * without providing an identity resolving key don't require
9125 	 * to store signature resolving keys. Their addresses will change
9126 	 * the next time around.
9127 	 *
9128 	 * Only when a remote device provides an identity address
9129 	 * make sure the signature resolving key is stored. So allow
9130 	 * static random and public addresses here.
9131 	 */
9132 	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9133 	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9134 		ev.store_hint = 0x00;
9135 	else
9136 		ev.store_hint = persistent;
9137 
9138 	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9139 	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9140 	ev.key.type = csrk->type;
9141 	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9142 
9143 	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9144 }
9145 
9146 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9147 			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9148 			 u16 max_interval, u16 latency, u16 timeout)
9149 {
9150 	struct mgmt_ev_new_conn_param ev;
9151 
9152 	if (!hci_is_identity_address(bdaddr, bdaddr_type))
9153 		return;
9154 
9155 	memset(&ev, 0, sizeof(ev));
9156 	bacpy(&ev.addr.bdaddr, bdaddr);
9157 	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9158 	ev.store_hint = store_hint;
9159 	ev.min_interval = cpu_to_le16(min_interval);
9160 	ev.max_interval = cpu_to_le16(max_interval);
9161 	ev.latency = cpu_to_le16(latency);
9162 	ev.timeout = cpu_to_le16(timeout);
9163 
9164 	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9165 }
9166 
9167 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9168 			   u8 *name, u8 name_len)
9169 {
9170 	struct sk_buff *skb;
9171 	struct mgmt_ev_device_connected *ev;
9172 	u16 eir_len = 0;
9173 	u32 flags = 0;
9174 
9175 	/* allocate buff for LE or BR/EDR adv */
9176 	if (conn->le_adv_data_len > 0)
9177 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9178 				     sizeof(*ev) + conn->le_adv_data_len);
9179 	else
9180 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9181 				     sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9182 				     eir_precalc_len(sizeof(conn->dev_class)));
9183 
9184 	ev = skb_put(skb, sizeof(*ev));
9185 	bacpy(&ev->addr.bdaddr, &conn->dst);
9186 	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9187 
9188 	if (conn->out)
9189 		flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9190 
9191 	ev->flags = __cpu_to_le32(flags);
9192 
9193 	/* We must ensure that the EIR Data fields are ordered and
9194 	 * unique. Keep it simple for now and avoid the problem by not
9195 	 * adding any BR/EDR data to the LE adv.
9196 	 */
9197 	if (conn->le_adv_data_len > 0) {
9198 		skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9199 		eir_len = conn->le_adv_data_len;
9200 	} else {
9201 		if (name)
9202 			eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9203 
9204 		if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9205 			eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9206 						    conn->dev_class, sizeof(conn->dev_class));
9207 	}
9208 
9209 	ev->eir_len = cpu_to_le16(eir_len);
9210 
9211 	mgmt_event_skb(skb, NULL);
9212 }
9213 
9214 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
9215 {
9216 	struct sock **sk = data;
9217 
9218 	cmd->cmd_complete(cmd, 0);
9219 
9220 	*sk = cmd->sk;
9221 	sock_hold(*sk);
9222 
9223 	mgmt_pending_remove(cmd);
9224 }
9225 
9226 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9227 {
9228 	struct hci_dev *hdev = data;
9229 	struct mgmt_cp_unpair_device *cp = cmd->param;
9230 
9231 	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9232 
9233 	cmd->cmd_complete(cmd, 0);
9234 	mgmt_pending_remove(cmd);
9235 }
9236 
9237 bool mgmt_powering_down(struct hci_dev *hdev)
9238 {
9239 	struct mgmt_pending_cmd *cmd;
9240 	struct mgmt_mode *cp;
9241 
9242 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9243 	if (!cmd)
9244 		return false;
9245 
9246 	cp = cmd->param;
9247 	if (!cp->val)
9248 		return true;
9249 
9250 	return false;
9251 }
9252 
9253 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9254 			      u8 link_type, u8 addr_type, u8 reason,
9255 			      bool mgmt_connected)
9256 {
9257 	struct mgmt_ev_device_disconnected ev;
9258 	struct sock *sk = NULL;
9259 
9260 	/* The connection is still in hci_conn_hash so test for 1
9261 	 * instead of 0 to know if this is the last one.
9262 	 */
9263 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9264 		cancel_delayed_work(&hdev->power_off);
9265 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
9266 	}
9267 
9268 	if (!mgmt_connected)
9269 		return;
9270 
9271 	if (link_type != ACL_LINK && link_type != LE_LINK)
9272 		return;
9273 
9274 	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
9275 
9276 	bacpy(&ev.addr.bdaddr, bdaddr);
9277 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9278 	ev.reason = reason;
9279 
9280 	/* Report disconnects due to suspend */
9281 	if (hdev->suspended)
9282 		ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9283 
9284 	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9285 
9286 	if (sk)
9287 		sock_put(sk);
9288 
9289 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9290 			     hdev);
9291 }
9292 
9293 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9294 			    u8 link_type, u8 addr_type, u8 status)
9295 {
9296 	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9297 	struct mgmt_cp_disconnect *cp;
9298 	struct mgmt_pending_cmd *cmd;
9299 
9300 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9301 			     hdev);
9302 
9303 	cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9304 	if (!cmd)
9305 		return;
9306 
9307 	cp = cmd->param;
9308 
9309 	if (bacmp(bdaddr, &cp->addr.bdaddr))
9310 		return;
9311 
9312 	if (cp->addr.type != bdaddr_type)
9313 		return;
9314 
9315 	cmd->cmd_complete(cmd, mgmt_status(status));
9316 	mgmt_pending_remove(cmd);
9317 }
9318 
9319 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9320 			 u8 addr_type, u8 status)
9321 {
9322 	struct mgmt_ev_connect_failed ev;
9323 
9324 	/* The connection is still in hci_conn_hash so test for 1
9325 	 * instead of 0 to know if this is the last one.
9326 	 */
9327 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9328 		cancel_delayed_work(&hdev->power_off);
9329 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
9330 	}
9331 
9332 	bacpy(&ev.addr.bdaddr, bdaddr);
9333 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9334 	ev.status = mgmt_status(status);
9335 
9336 	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9337 }
9338 
9339 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9340 {
9341 	struct mgmt_ev_pin_code_request ev;
9342 
9343 	bacpy(&ev.addr.bdaddr, bdaddr);
9344 	ev.addr.type = BDADDR_BREDR;
9345 	ev.secure = secure;
9346 
9347 	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9348 }
9349 
9350 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9351 				  u8 status)
9352 {
9353 	struct mgmt_pending_cmd *cmd;
9354 
9355 	cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9356 	if (!cmd)
9357 		return;
9358 
9359 	cmd->cmd_complete(cmd, mgmt_status(status));
9360 	mgmt_pending_remove(cmd);
9361 }
9362 
9363 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9364 				      u8 status)
9365 {
9366 	struct mgmt_pending_cmd *cmd;
9367 
9368 	cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9369 	if (!cmd)
9370 		return;
9371 
9372 	cmd->cmd_complete(cmd, mgmt_status(status));
9373 	mgmt_pending_remove(cmd);
9374 }
9375 
9376 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9377 			      u8 link_type, u8 addr_type, u32 value,
9378 			      u8 confirm_hint)
9379 {
9380 	struct mgmt_ev_user_confirm_request ev;
9381 
9382 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9383 
9384 	bacpy(&ev.addr.bdaddr, bdaddr);
9385 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9386 	ev.confirm_hint = confirm_hint;
9387 	ev.value = cpu_to_le32(value);
9388 
9389 	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9390 			  NULL);
9391 }
9392 
9393 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9394 			      u8 link_type, u8 addr_type)
9395 {
9396 	struct mgmt_ev_user_passkey_request ev;
9397 
9398 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9399 
9400 	bacpy(&ev.addr.bdaddr, bdaddr);
9401 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9402 
9403 	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9404 			  NULL);
9405 }
9406 
9407 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9408 				      u8 link_type, u8 addr_type, u8 status,
9409 				      u8 opcode)
9410 {
9411 	struct mgmt_pending_cmd *cmd;
9412 
9413 	cmd = pending_find(opcode, hdev);
9414 	if (!cmd)
9415 		return -ENOENT;
9416 
9417 	cmd->cmd_complete(cmd, mgmt_status(status));
9418 	mgmt_pending_remove(cmd);
9419 
9420 	return 0;
9421 }
9422 
9423 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9424 				     u8 link_type, u8 addr_type, u8 status)
9425 {
9426 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9427 					  status, MGMT_OP_USER_CONFIRM_REPLY);
9428 }
9429 
9430 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9431 					 u8 link_type, u8 addr_type, u8 status)
9432 {
9433 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9434 					  status,
9435 					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
9436 }
9437 
9438 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9439 				     u8 link_type, u8 addr_type, u8 status)
9440 {
9441 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9442 					  status, MGMT_OP_USER_PASSKEY_REPLY);
9443 }
9444 
9445 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9446 					 u8 link_type, u8 addr_type, u8 status)
9447 {
9448 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9449 					  status,
9450 					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
9451 }
9452 
9453 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9454 			     u8 link_type, u8 addr_type, u32 passkey,
9455 			     u8 entered)
9456 {
9457 	struct mgmt_ev_passkey_notify ev;
9458 
9459 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9460 
9461 	bacpy(&ev.addr.bdaddr, bdaddr);
9462 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9463 	ev.passkey = __cpu_to_le32(passkey);
9464 	ev.entered = entered;
9465 
9466 	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9467 }
9468 
9469 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9470 {
9471 	struct mgmt_ev_auth_failed ev;
9472 	struct mgmt_pending_cmd *cmd;
9473 	u8 status = mgmt_status(hci_status);
9474 
9475 	bacpy(&ev.addr.bdaddr, &conn->dst);
9476 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9477 	ev.status = status;
9478 
9479 	cmd = find_pairing(conn);
9480 
9481 	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9482 		    cmd ? cmd->sk : NULL);
9483 
9484 	if (cmd) {
9485 		cmd->cmd_complete(cmd, status);
9486 		mgmt_pending_remove(cmd);
9487 	}
9488 }
9489 
9490 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9491 {
9492 	struct cmd_lookup match = { NULL, hdev };
9493 	bool changed;
9494 
9495 	if (status) {
9496 		u8 mgmt_err = mgmt_status(status);
9497 		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9498 				     cmd_status_rsp, &mgmt_err);
9499 		return;
9500 	}
9501 
9502 	if (test_bit(HCI_AUTH, &hdev->flags))
9503 		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9504 	else
9505 		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9506 
9507 	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9508 			     &match);
9509 
9510 	if (changed)
9511 		new_settings(hdev, match.sk);
9512 
9513 	if (match.sk)
9514 		sock_put(match.sk);
9515 }
9516 
9517 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9518 {
9519 	struct cmd_lookup *match = data;
9520 
9521 	if (match->sk == NULL) {
9522 		match->sk = cmd->sk;
9523 		sock_hold(match->sk);
9524 	}
9525 }
9526 
9527 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9528 				    u8 status)
9529 {
9530 	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9531 
9532 	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
9533 	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
9534 	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
9535 
9536 	if (!status) {
9537 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9538 				   3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9539 		ext_info_changed(hdev, NULL);
9540 	}
9541 
9542 	if (match.sk)
9543 		sock_put(match.sk);
9544 }
9545 
9546 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
9547 {
9548 	struct mgmt_cp_set_local_name ev;
9549 	struct mgmt_pending_cmd *cmd;
9550 
9551 	if (status)
9552 		return;
9553 
9554 	memset(&ev, 0, sizeof(ev));
9555 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
9556 	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
9557 
9558 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
9559 	if (!cmd) {
9560 		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
9561 
9562 		/* If this is a HCI command related to powering on the
9563 		 * HCI dev don't send any mgmt signals.
9564 		 */
9565 		if (pending_find(MGMT_OP_SET_POWERED, hdev))
9566 			return;
9567 	}
9568 
9569 	mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
9570 			   HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
9571 	ext_info_changed(hdev, cmd ? cmd->sk : NULL);
9572 }
9573 
9574 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
9575 {
9576 	int i;
9577 
9578 	for (i = 0; i < uuid_count; i++) {
9579 		if (!memcmp(uuid, uuids[i], 16))
9580 			return true;
9581 	}
9582 
9583 	return false;
9584 }
9585 
9586 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
9587 {
9588 	u16 parsed = 0;
9589 
9590 	while (parsed < eir_len) {
9591 		u8 field_len = eir[0];
9592 		u8 uuid[16];
9593 		int i;
9594 
9595 		if (field_len == 0)
9596 			break;
9597 
9598 		if (eir_len - parsed < field_len + 1)
9599 			break;
9600 
9601 		switch (eir[1]) {
9602 		case EIR_UUID16_ALL:
9603 		case EIR_UUID16_SOME:
9604 			for (i = 0; i + 3 <= field_len; i += 2) {
9605 				memcpy(uuid, bluetooth_base_uuid, 16);
9606 				uuid[13] = eir[i + 3];
9607 				uuid[12] = eir[i + 2];
9608 				if (has_uuid(uuid, uuid_count, uuids))
9609 					return true;
9610 			}
9611 			break;
9612 		case EIR_UUID32_ALL:
9613 		case EIR_UUID32_SOME:
9614 			for (i = 0; i + 5 <= field_len; i += 4) {
9615 				memcpy(uuid, bluetooth_base_uuid, 16);
9616 				uuid[15] = eir[i + 5];
9617 				uuid[14] = eir[i + 4];
9618 				uuid[13] = eir[i + 3];
9619 				uuid[12] = eir[i + 2];
9620 				if (has_uuid(uuid, uuid_count, uuids))
9621 					return true;
9622 			}
9623 			break;
9624 		case EIR_UUID128_ALL:
9625 		case EIR_UUID128_SOME:
9626 			for (i = 0; i + 17 <= field_len; i += 16) {
9627 				memcpy(uuid, eir + i + 2, 16);
9628 				if (has_uuid(uuid, uuid_count, uuids))
9629 					return true;
9630 			}
9631 			break;
9632 		}
9633 
9634 		parsed += field_len + 1;
9635 		eir += field_len + 1;
9636 	}
9637 
9638 	return false;
9639 }
9640 
9641 static void restart_le_scan(struct hci_dev *hdev)
9642 {
9643 	/* If controller is not scanning we are done. */
9644 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
9645 		return;
9646 
9647 	if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
9648 		       hdev->discovery.scan_start +
9649 		       hdev->discovery.scan_duration))
9650 		return;
9651 
9652 	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
9653 			   DISCOV_LE_RESTART_DELAY);
9654 }
9655 
9656 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
9657 			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9658 {
9659 	/* If a RSSI threshold has been specified, and
9660 	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
9661 	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
9662 	 * is set, let it through for further processing, as we might need to
9663 	 * restart the scan.
9664 	 *
9665 	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
9666 	 * the results are also dropped.
9667 	 */
9668 	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9669 	    (rssi == HCI_RSSI_INVALID ||
9670 	    (rssi < hdev->discovery.rssi &&
9671 	     !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
9672 		return  false;
9673 
9674 	if (hdev->discovery.uuid_count != 0) {
9675 		/* If a list of UUIDs is provided in filter, results with no
9676 		 * matching UUID should be dropped.
9677 		 */
9678 		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
9679 				   hdev->discovery.uuids) &&
9680 		    !eir_has_uuids(scan_rsp, scan_rsp_len,
9681 				   hdev->discovery.uuid_count,
9682 				   hdev->discovery.uuids))
9683 			return false;
9684 	}
9685 
9686 	/* If duplicate filtering does not report RSSI changes, then restart
9687 	 * scanning to ensure updated result with updated RSSI values.
9688 	 */
9689 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
9690 		restart_le_scan(hdev);
9691 
9692 		/* Validate RSSI value against the RSSI threshold once more. */
9693 		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9694 		    rssi < hdev->discovery.rssi)
9695 			return false;
9696 	}
9697 
9698 	return true;
9699 }
9700 
9701 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
9702 				  bdaddr_t *bdaddr, u8 addr_type)
9703 {
9704 	struct mgmt_ev_adv_monitor_device_lost ev;
9705 
9706 	ev.monitor_handle = cpu_to_le16(handle);
9707 	bacpy(&ev.addr.bdaddr, bdaddr);
9708 	ev.addr.type = addr_type;
9709 
9710 	mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
9711 		   NULL);
9712 }
9713 
9714 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
9715 					       struct sk_buff *skb,
9716 					       struct sock *skip_sk,
9717 					       u16 handle)
9718 {
9719 	struct sk_buff *advmon_skb;
9720 	size_t advmon_skb_len;
9721 	__le16 *monitor_handle;
9722 
9723 	if (!skb)
9724 		return;
9725 
9726 	advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
9727 			  sizeof(struct mgmt_ev_device_found)) + skb->len;
9728 	advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
9729 				    advmon_skb_len);
9730 	if (!advmon_skb)
9731 		return;
9732 
9733 	/* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
9734 	 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
9735 	 * store monitor_handle of the matched monitor.
9736 	 */
9737 	monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
9738 	*monitor_handle = cpu_to_le16(handle);
9739 	skb_put_data(advmon_skb, skb->data, skb->len);
9740 
9741 	mgmt_event_skb(advmon_skb, skip_sk);
9742 }
9743 
9744 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
9745 					  bdaddr_t *bdaddr, bool report_device,
9746 					  struct sk_buff *skb,
9747 					  struct sock *skip_sk)
9748 {
9749 	struct monitored_device *dev, *tmp;
9750 	bool matched = false;
9751 	bool notified = false;
9752 
9753 	/* We have received the Advertisement Report because:
9754 	 * 1. the kernel has initiated active discovery
9755 	 * 2. if not, we have pend_le_reports > 0 in which case we are doing
9756 	 *    passive scanning
9757 	 * 3. if none of the above is true, we have one or more active
9758 	 *    Advertisement Monitor
9759 	 *
9760 	 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
9761 	 * and report ONLY one advertisement per device for the matched Monitor
9762 	 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
9763 	 *
9764 	 * For case 3, since we are not active scanning and all advertisements
9765 	 * received are due to a matched Advertisement Monitor, report all
9766 	 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
9767 	 */
9768 	if (report_device && !hdev->advmon_pend_notify) {
9769 		mgmt_event_skb(skb, skip_sk);
9770 		return;
9771 	}
9772 
9773 	hdev->advmon_pend_notify = false;
9774 
9775 	list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
9776 		if (!bacmp(&dev->bdaddr, bdaddr)) {
9777 			matched = true;
9778 
9779 			if (!dev->notified) {
9780 				mgmt_send_adv_monitor_device_found(hdev, skb,
9781 								   skip_sk,
9782 								   dev->handle);
9783 				notified = true;
9784 				dev->notified = true;
9785 			}
9786 		}
9787 
9788 		if (!dev->notified)
9789 			hdev->advmon_pend_notify = true;
9790 	}
9791 
9792 	if (!report_device &&
9793 	    ((matched && !notified) || !msft_monitor_supported(hdev))) {
9794 		/* Handle 0 indicates that we are not active scanning and this
9795 		 * is a subsequent advertisement report for an already matched
9796 		 * Advertisement Monitor or the controller offloading support
9797 		 * is not available.
9798 		 */
9799 		mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
9800 	}
9801 
9802 	if (report_device)
9803 		mgmt_event_skb(skb, skip_sk);
9804 	else
9805 		kfree_skb(skb);
9806 }
9807 
9808 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9809 		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
9810 		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9811 {
9812 	struct sk_buff *skb;
9813 	struct mgmt_ev_device_found *ev;
9814 	bool report_device = hci_discovery_active(hdev);
9815 
9816 	/* Don't send events for a non-kernel initiated discovery. With
9817 	 * LE one exception is if we have pend_le_reports > 0 in which
9818 	 * case we're doing passive scanning and want these events.
9819 	 */
9820 	if (!hci_discovery_active(hdev)) {
9821 		if (link_type == ACL_LINK)
9822 			return;
9823 		if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
9824 			report_device = true;
9825 		else if (!hci_is_adv_monitoring(hdev))
9826 			return;
9827 	}
9828 
9829 	if (hdev->discovery.result_filtering) {
9830 		/* We are using service discovery */
9831 		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
9832 				     scan_rsp_len))
9833 			return;
9834 	}
9835 
9836 	if (hdev->discovery.limited) {
9837 		/* Check for limited discoverable bit */
9838 		if (dev_class) {
9839 			if (!(dev_class[1] & 0x20))
9840 				return;
9841 		} else {
9842 			u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
9843 			if (!flags || !(flags[0] & LE_AD_LIMITED))
9844 				return;
9845 		}
9846 	}
9847 
9848 	/* Allocate skb. The 5 extra bytes are for the potential CoD field */
9849 	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
9850 			     sizeof(*ev) + eir_len + scan_rsp_len + 5);
9851 	if (!skb)
9852 		return;
9853 
9854 	ev = skb_put(skb, sizeof(*ev));
9855 
9856 	/* In case of device discovery with BR/EDR devices (pre 1.2), the
9857 	 * RSSI value was reported as 0 when not available. This behavior
9858 	 * is kept when using device discovery. This is required for full
9859 	 * backwards compatibility with the API.
9860 	 *
9861 	 * However when using service discovery, the value 127 will be
9862 	 * returned when the RSSI is not available.
9863 	 */
9864 	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
9865 	    link_type == ACL_LINK)
9866 		rssi = 0;
9867 
9868 	bacpy(&ev->addr.bdaddr, bdaddr);
9869 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
9870 	ev->rssi = rssi;
9871 	ev->flags = cpu_to_le32(flags);
9872 
9873 	if (eir_len > 0)
9874 		/* Copy EIR or advertising data into event */
9875 		skb_put_data(skb, eir, eir_len);
9876 
9877 	if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
9878 		u8 eir_cod[5];
9879 
9880 		eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
9881 					   dev_class, 3);
9882 		skb_put_data(skb, eir_cod, sizeof(eir_cod));
9883 	}
9884 
9885 	if (scan_rsp_len > 0)
9886 		/* Append scan response data to event */
9887 		skb_put_data(skb, scan_rsp, scan_rsp_len);
9888 
9889 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
9890 
9891 	mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
9892 }
9893 
9894 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9895 		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
9896 {
9897 	struct sk_buff *skb;
9898 	struct mgmt_ev_device_found *ev;
9899 	u16 eir_len = 0;
9900 	u32 flags = 0;
9901 
9902 	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
9903 			     sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
9904 
9905 	ev = skb_put(skb, sizeof(*ev));
9906 	bacpy(&ev->addr.bdaddr, bdaddr);
9907 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
9908 	ev->rssi = rssi;
9909 
9910 	if (name)
9911 		eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9912 	else
9913 		flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
9914 
9915 	ev->eir_len = cpu_to_le16(eir_len);
9916 	ev->flags = cpu_to_le32(flags);
9917 
9918 	mgmt_event_skb(skb, NULL);
9919 }
9920 
9921 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
9922 {
9923 	struct mgmt_ev_discovering ev;
9924 
9925 	bt_dev_dbg(hdev, "discovering %u", discovering);
9926 
9927 	memset(&ev, 0, sizeof(ev));
9928 	ev.type = hdev->discovery.type;
9929 	ev.discovering = discovering;
9930 
9931 	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
9932 }
9933 
9934 void mgmt_suspending(struct hci_dev *hdev, u8 state)
9935 {
9936 	struct mgmt_ev_controller_suspend ev;
9937 
9938 	ev.suspend_state = state;
9939 	mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
9940 }
9941 
9942 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
9943 		   u8 addr_type)
9944 {
9945 	struct mgmt_ev_controller_resume ev;
9946 
9947 	ev.wake_reason = reason;
9948 	if (bdaddr) {
9949 		bacpy(&ev.addr.bdaddr, bdaddr);
9950 		ev.addr.type = addr_type;
9951 	} else {
9952 		memset(&ev.addr, 0, sizeof(ev.addr));
9953 	}
9954 
9955 	mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
9956 }
9957 
9958 static struct hci_mgmt_chan chan = {
9959 	.channel	= HCI_CHANNEL_CONTROL,
9960 	.handler_count	= ARRAY_SIZE(mgmt_handlers),
9961 	.handlers	= mgmt_handlers,
9962 	.hdev_init	= mgmt_init_hdev,
9963 };
9964 
9965 int mgmt_init(void)
9966 {
9967 	return hci_mgmt_chan_register(&chan);
9968 }
9969 
9970 void mgmt_exit(void)
9971 {
9972 	hci_mgmt_chan_unregister(&chan);
9973 }
9974