xref: /openbmc/linux/net/bluetooth/mgmt.c (revision 6ea9a2b8)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI Management interface */
26 
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35 
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
40 #include "msft.h"
41 
42 #define MGMT_VERSION	1
43 #define MGMT_REVISION	20
44 
45 static const u16 mgmt_commands[] = {
46 	MGMT_OP_READ_INDEX_LIST,
47 	MGMT_OP_READ_INFO,
48 	MGMT_OP_SET_POWERED,
49 	MGMT_OP_SET_DISCOVERABLE,
50 	MGMT_OP_SET_CONNECTABLE,
51 	MGMT_OP_SET_FAST_CONNECTABLE,
52 	MGMT_OP_SET_BONDABLE,
53 	MGMT_OP_SET_LINK_SECURITY,
54 	MGMT_OP_SET_SSP,
55 	MGMT_OP_SET_HS,
56 	MGMT_OP_SET_LE,
57 	MGMT_OP_SET_DEV_CLASS,
58 	MGMT_OP_SET_LOCAL_NAME,
59 	MGMT_OP_ADD_UUID,
60 	MGMT_OP_REMOVE_UUID,
61 	MGMT_OP_LOAD_LINK_KEYS,
62 	MGMT_OP_LOAD_LONG_TERM_KEYS,
63 	MGMT_OP_DISCONNECT,
64 	MGMT_OP_GET_CONNECTIONS,
65 	MGMT_OP_PIN_CODE_REPLY,
66 	MGMT_OP_PIN_CODE_NEG_REPLY,
67 	MGMT_OP_SET_IO_CAPABILITY,
68 	MGMT_OP_PAIR_DEVICE,
69 	MGMT_OP_CANCEL_PAIR_DEVICE,
70 	MGMT_OP_UNPAIR_DEVICE,
71 	MGMT_OP_USER_CONFIRM_REPLY,
72 	MGMT_OP_USER_CONFIRM_NEG_REPLY,
73 	MGMT_OP_USER_PASSKEY_REPLY,
74 	MGMT_OP_USER_PASSKEY_NEG_REPLY,
75 	MGMT_OP_READ_LOCAL_OOB_DATA,
76 	MGMT_OP_ADD_REMOTE_OOB_DATA,
77 	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
78 	MGMT_OP_START_DISCOVERY,
79 	MGMT_OP_STOP_DISCOVERY,
80 	MGMT_OP_CONFIRM_NAME,
81 	MGMT_OP_BLOCK_DEVICE,
82 	MGMT_OP_UNBLOCK_DEVICE,
83 	MGMT_OP_SET_DEVICE_ID,
84 	MGMT_OP_SET_ADVERTISING,
85 	MGMT_OP_SET_BREDR,
86 	MGMT_OP_SET_STATIC_ADDRESS,
87 	MGMT_OP_SET_SCAN_PARAMS,
88 	MGMT_OP_SET_SECURE_CONN,
89 	MGMT_OP_SET_DEBUG_KEYS,
90 	MGMT_OP_SET_PRIVACY,
91 	MGMT_OP_LOAD_IRKS,
92 	MGMT_OP_GET_CONN_INFO,
93 	MGMT_OP_GET_CLOCK_INFO,
94 	MGMT_OP_ADD_DEVICE,
95 	MGMT_OP_REMOVE_DEVICE,
96 	MGMT_OP_LOAD_CONN_PARAM,
97 	MGMT_OP_READ_UNCONF_INDEX_LIST,
98 	MGMT_OP_READ_CONFIG_INFO,
99 	MGMT_OP_SET_EXTERNAL_CONFIG,
100 	MGMT_OP_SET_PUBLIC_ADDRESS,
101 	MGMT_OP_START_SERVICE_DISCOVERY,
102 	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
103 	MGMT_OP_READ_EXT_INDEX_LIST,
104 	MGMT_OP_READ_ADV_FEATURES,
105 	MGMT_OP_ADD_ADVERTISING,
106 	MGMT_OP_REMOVE_ADVERTISING,
107 	MGMT_OP_GET_ADV_SIZE_INFO,
108 	MGMT_OP_START_LIMITED_DISCOVERY,
109 	MGMT_OP_READ_EXT_INFO,
110 	MGMT_OP_SET_APPEARANCE,
111 	MGMT_OP_GET_PHY_CONFIGURATION,
112 	MGMT_OP_SET_PHY_CONFIGURATION,
113 	MGMT_OP_SET_BLOCKED_KEYS,
114 	MGMT_OP_SET_WIDEBAND_SPEECH,
115 	MGMT_OP_READ_CONTROLLER_CAP,
116 	MGMT_OP_READ_EXP_FEATURES_INFO,
117 	MGMT_OP_SET_EXP_FEATURE,
118 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
119 	MGMT_OP_SET_DEF_SYSTEM_CONFIG,
120 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
121 	MGMT_OP_SET_DEF_RUNTIME_CONFIG,
122 	MGMT_OP_GET_DEVICE_FLAGS,
123 	MGMT_OP_SET_DEVICE_FLAGS,
124 	MGMT_OP_READ_ADV_MONITOR_FEATURES,
125 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
126 	MGMT_OP_REMOVE_ADV_MONITOR,
127 	MGMT_OP_ADD_EXT_ADV_PARAMS,
128 	MGMT_OP_ADD_EXT_ADV_DATA,
129 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
130 };
131 
132 static const u16 mgmt_events[] = {
133 	MGMT_EV_CONTROLLER_ERROR,
134 	MGMT_EV_INDEX_ADDED,
135 	MGMT_EV_INDEX_REMOVED,
136 	MGMT_EV_NEW_SETTINGS,
137 	MGMT_EV_CLASS_OF_DEV_CHANGED,
138 	MGMT_EV_LOCAL_NAME_CHANGED,
139 	MGMT_EV_NEW_LINK_KEY,
140 	MGMT_EV_NEW_LONG_TERM_KEY,
141 	MGMT_EV_DEVICE_CONNECTED,
142 	MGMT_EV_DEVICE_DISCONNECTED,
143 	MGMT_EV_CONNECT_FAILED,
144 	MGMT_EV_PIN_CODE_REQUEST,
145 	MGMT_EV_USER_CONFIRM_REQUEST,
146 	MGMT_EV_USER_PASSKEY_REQUEST,
147 	MGMT_EV_AUTH_FAILED,
148 	MGMT_EV_DEVICE_FOUND,
149 	MGMT_EV_DISCOVERING,
150 	MGMT_EV_DEVICE_BLOCKED,
151 	MGMT_EV_DEVICE_UNBLOCKED,
152 	MGMT_EV_DEVICE_UNPAIRED,
153 	MGMT_EV_PASSKEY_NOTIFY,
154 	MGMT_EV_NEW_IRK,
155 	MGMT_EV_NEW_CSRK,
156 	MGMT_EV_DEVICE_ADDED,
157 	MGMT_EV_DEVICE_REMOVED,
158 	MGMT_EV_NEW_CONN_PARAM,
159 	MGMT_EV_UNCONF_INDEX_ADDED,
160 	MGMT_EV_UNCONF_INDEX_REMOVED,
161 	MGMT_EV_NEW_CONFIG_OPTIONS,
162 	MGMT_EV_EXT_INDEX_ADDED,
163 	MGMT_EV_EXT_INDEX_REMOVED,
164 	MGMT_EV_LOCAL_OOB_DATA_UPDATED,
165 	MGMT_EV_ADVERTISING_ADDED,
166 	MGMT_EV_ADVERTISING_REMOVED,
167 	MGMT_EV_EXT_INFO_CHANGED,
168 	MGMT_EV_PHY_CONFIGURATION_CHANGED,
169 	MGMT_EV_EXP_FEATURE_CHANGED,
170 	MGMT_EV_DEVICE_FLAGS_CHANGED,
171 	MGMT_EV_ADV_MONITOR_ADDED,
172 	MGMT_EV_ADV_MONITOR_REMOVED,
173 	MGMT_EV_CONTROLLER_SUSPEND,
174 	MGMT_EV_CONTROLLER_RESUME,
175 };
176 
177 static const u16 mgmt_untrusted_commands[] = {
178 	MGMT_OP_READ_INDEX_LIST,
179 	MGMT_OP_READ_INFO,
180 	MGMT_OP_READ_UNCONF_INDEX_LIST,
181 	MGMT_OP_READ_CONFIG_INFO,
182 	MGMT_OP_READ_EXT_INDEX_LIST,
183 	MGMT_OP_READ_EXT_INFO,
184 	MGMT_OP_READ_CONTROLLER_CAP,
185 	MGMT_OP_READ_EXP_FEATURES_INFO,
186 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
187 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
188 };
189 
190 static const u16 mgmt_untrusted_events[] = {
191 	MGMT_EV_INDEX_ADDED,
192 	MGMT_EV_INDEX_REMOVED,
193 	MGMT_EV_NEW_SETTINGS,
194 	MGMT_EV_CLASS_OF_DEV_CHANGED,
195 	MGMT_EV_LOCAL_NAME_CHANGED,
196 	MGMT_EV_UNCONF_INDEX_ADDED,
197 	MGMT_EV_UNCONF_INDEX_REMOVED,
198 	MGMT_EV_NEW_CONFIG_OPTIONS,
199 	MGMT_EV_EXT_INDEX_ADDED,
200 	MGMT_EV_EXT_INDEX_REMOVED,
201 	MGMT_EV_EXT_INFO_CHANGED,
202 	MGMT_EV_EXP_FEATURE_CHANGED,
203 };
204 
205 #define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
206 
207 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
208 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
209 
210 /* HCI to MGMT error code conversion table */
211 static const u8 mgmt_status_table[] = {
212 	MGMT_STATUS_SUCCESS,
213 	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
214 	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
215 	MGMT_STATUS_FAILED,		/* Hardware Failure */
216 	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
217 	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
218 	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
219 	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
220 	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
221 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
222 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
223 	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
224 	MGMT_STATUS_BUSY,		/* Command Disallowed */
225 	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
226 	MGMT_STATUS_REJECTED,		/* Rejected Security */
227 	MGMT_STATUS_REJECTED,		/* Rejected Personal */
228 	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
229 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
230 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
231 	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
232 	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
233 	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
234 	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
235 	MGMT_STATUS_BUSY,		/* Repeated Attempts */
236 	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
237 	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
238 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
239 	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
240 	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
241 	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
242 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
243 	MGMT_STATUS_FAILED,		/* Unspecified Error */
244 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
245 	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
246 	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
247 	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
248 	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
249 	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
250 	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
251 	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
252 	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
253 	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
254 	MGMT_STATUS_FAILED,		/* Transaction Collision */
255 	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
256 	MGMT_STATUS_REJECTED,		/* QoS Rejected */
257 	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
258 	MGMT_STATUS_REJECTED,		/* Insufficient Security */
259 	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
260 	MGMT_STATUS_BUSY,		/* Role Switch Pending */
261 	MGMT_STATUS_FAILED,		/* Slot Violation */
262 	MGMT_STATUS_FAILED,		/* Role Switch Failed */
263 	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
264 	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
265 	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
266 	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
267 	MGMT_STATUS_BUSY,		/* Controller Busy */
268 	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
269 	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
270 	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
271 	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
272 	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
273 };
274 
275 static u8 mgmt_status(u8 hci_status)
276 {
277 	if (hci_status < ARRAY_SIZE(mgmt_status_table))
278 		return mgmt_status_table[hci_status];
279 
280 	return MGMT_STATUS_FAILED;
281 }
282 
283 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
284 			    u16 len, int flag)
285 {
286 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
287 			       flag, NULL);
288 }
289 
290 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
291 			      u16 len, int flag, struct sock *skip_sk)
292 {
293 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
294 			       flag, skip_sk);
295 }
296 
297 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
298 		      struct sock *skip_sk)
299 {
300 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
301 			       HCI_SOCK_TRUSTED, skip_sk);
302 }
303 
304 static u8 le_addr_type(u8 mgmt_addr_type)
305 {
306 	if (mgmt_addr_type == BDADDR_LE_PUBLIC)
307 		return ADDR_LE_DEV_PUBLIC;
308 	else
309 		return ADDR_LE_DEV_RANDOM;
310 }
311 
312 void mgmt_fill_version_info(void *ver)
313 {
314 	struct mgmt_rp_read_version *rp = ver;
315 
316 	rp->version = MGMT_VERSION;
317 	rp->revision = cpu_to_le16(MGMT_REVISION);
318 }
319 
320 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
321 			u16 data_len)
322 {
323 	struct mgmt_rp_read_version rp;
324 
325 	bt_dev_dbg(hdev, "sock %p", sk);
326 
327 	mgmt_fill_version_info(&rp);
328 
329 	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
330 				 &rp, sizeof(rp));
331 }
332 
333 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
334 			 u16 data_len)
335 {
336 	struct mgmt_rp_read_commands *rp;
337 	u16 num_commands, num_events;
338 	size_t rp_size;
339 	int i, err;
340 
341 	bt_dev_dbg(hdev, "sock %p", sk);
342 
343 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
344 		num_commands = ARRAY_SIZE(mgmt_commands);
345 		num_events = ARRAY_SIZE(mgmt_events);
346 	} else {
347 		num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
348 		num_events = ARRAY_SIZE(mgmt_untrusted_events);
349 	}
350 
351 	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
352 
353 	rp = kmalloc(rp_size, GFP_KERNEL);
354 	if (!rp)
355 		return -ENOMEM;
356 
357 	rp->num_commands = cpu_to_le16(num_commands);
358 	rp->num_events = cpu_to_le16(num_events);
359 
360 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
361 		__le16 *opcode = rp->opcodes;
362 
363 		for (i = 0; i < num_commands; i++, opcode++)
364 			put_unaligned_le16(mgmt_commands[i], opcode);
365 
366 		for (i = 0; i < num_events; i++, opcode++)
367 			put_unaligned_le16(mgmt_events[i], opcode);
368 	} else {
369 		__le16 *opcode = rp->opcodes;
370 
371 		for (i = 0; i < num_commands; i++, opcode++)
372 			put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
373 
374 		for (i = 0; i < num_events; i++, opcode++)
375 			put_unaligned_le16(mgmt_untrusted_events[i], opcode);
376 	}
377 
378 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
379 				rp, rp_size);
380 	kfree(rp);
381 
382 	return err;
383 }
384 
385 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
386 			   u16 data_len)
387 {
388 	struct mgmt_rp_read_index_list *rp;
389 	struct hci_dev *d;
390 	size_t rp_len;
391 	u16 count;
392 	int err;
393 
394 	bt_dev_dbg(hdev, "sock %p", sk);
395 
396 	read_lock(&hci_dev_list_lock);
397 
398 	count = 0;
399 	list_for_each_entry(d, &hci_dev_list, list) {
400 		if (d->dev_type == HCI_PRIMARY &&
401 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED))
402 			count++;
403 	}
404 
405 	rp_len = sizeof(*rp) + (2 * count);
406 	rp = kmalloc(rp_len, GFP_ATOMIC);
407 	if (!rp) {
408 		read_unlock(&hci_dev_list_lock);
409 		return -ENOMEM;
410 	}
411 
412 	count = 0;
413 	list_for_each_entry(d, &hci_dev_list, list) {
414 		if (hci_dev_test_flag(d, HCI_SETUP) ||
415 		    hci_dev_test_flag(d, HCI_CONFIG) ||
416 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
417 			continue;
418 
419 		/* Devices marked as raw-only are neither configured
420 		 * nor unconfigured controllers.
421 		 */
422 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
423 			continue;
424 
425 		if (d->dev_type == HCI_PRIMARY &&
426 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
427 			rp->index[count++] = cpu_to_le16(d->id);
428 			bt_dev_dbg(hdev, "Added hci%u", d->id);
429 		}
430 	}
431 
432 	rp->num_controllers = cpu_to_le16(count);
433 	rp_len = sizeof(*rp) + (2 * count);
434 
435 	read_unlock(&hci_dev_list_lock);
436 
437 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
438 				0, rp, rp_len);
439 
440 	kfree(rp);
441 
442 	return err;
443 }
444 
445 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
446 				  void *data, u16 data_len)
447 {
448 	struct mgmt_rp_read_unconf_index_list *rp;
449 	struct hci_dev *d;
450 	size_t rp_len;
451 	u16 count;
452 	int err;
453 
454 	bt_dev_dbg(hdev, "sock %p", sk);
455 
456 	read_lock(&hci_dev_list_lock);
457 
458 	count = 0;
459 	list_for_each_entry(d, &hci_dev_list, list) {
460 		if (d->dev_type == HCI_PRIMARY &&
461 		    hci_dev_test_flag(d, HCI_UNCONFIGURED))
462 			count++;
463 	}
464 
465 	rp_len = sizeof(*rp) + (2 * count);
466 	rp = kmalloc(rp_len, GFP_ATOMIC);
467 	if (!rp) {
468 		read_unlock(&hci_dev_list_lock);
469 		return -ENOMEM;
470 	}
471 
472 	count = 0;
473 	list_for_each_entry(d, &hci_dev_list, list) {
474 		if (hci_dev_test_flag(d, HCI_SETUP) ||
475 		    hci_dev_test_flag(d, HCI_CONFIG) ||
476 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
477 			continue;
478 
479 		/* Devices marked as raw-only are neither configured
480 		 * nor unconfigured controllers.
481 		 */
482 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
483 			continue;
484 
485 		if (d->dev_type == HCI_PRIMARY &&
486 		    hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
487 			rp->index[count++] = cpu_to_le16(d->id);
488 			bt_dev_dbg(hdev, "Added hci%u", d->id);
489 		}
490 	}
491 
492 	rp->num_controllers = cpu_to_le16(count);
493 	rp_len = sizeof(*rp) + (2 * count);
494 
495 	read_unlock(&hci_dev_list_lock);
496 
497 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
498 				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
499 
500 	kfree(rp);
501 
502 	return err;
503 }
504 
505 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
506 			       void *data, u16 data_len)
507 {
508 	struct mgmt_rp_read_ext_index_list *rp;
509 	struct hci_dev *d;
510 	u16 count;
511 	int err;
512 
513 	bt_dev_dbg(hdev, "sock %p", sk);
514 
515 	read_lock(&hci_dev_list_lock);
516 
517 	count = 0;
518 	list_for_each_entry(d, &hci_dev_list, list) {
519 		if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
520 			count++;
521 	}
522 
523 	rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
524 	if (!rp) {
525 		read_unlock(&hci_dev_list_lock);
526 		return -ENOMEM;
527 	}
528 
529 	count = 0;
530 	list_for_each_entry(d, &hci_dev_list, list) {
531 		if (hci_dev_test_flag(d, HCI_SETUP) ||
532 		    hci_dev_test_flag(d, HCI_CONFIG) ||
533 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
534 			continue;
535 
536 		/* Devices marked as raw-only are neither configured
537 		 * nor unconfigured controllers.
538 		 */
539 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
540 			continue;
541 
542 		if (d->dev_type == HCI_PRIMARY) {
543 			if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
544 				rp->entry[count].type = 0x01;
545 			else
546 				rp->entry[count].type = 0x00;
547 		} else if (d->dev_type == HCI_AMP) {
548 			rp->entry[count].type = 0x02;
549 		} else {
550 			continue;
551 		}
552 
553 		rp->entry[count].bus = d->bus;
554 		rp->entry[count++].index = cpu_to_le16(d->id);
555 		bt_dev_dbg(hdev, "Added hci%u", d->id);
556 	}
557 
558 	rp->num_controllers = cpu_to_le16(count);
559 
560 	read_unlock(&hci_dev_list_lock);
561 
562 	/* If this command is called at least once, then all the
563 	 * default index and unconfigured index events are disabled
564 	 * and from now on only extended index events are used.
565 	 */
566 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
567 	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
568 	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
569 
570 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
571 				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
572 				struct_size(rp, entry, count));
573 
574 	kfree(rp);
575 
576 	return err;
577 }
578 
579 static bool is_configured(struct hci_dev *hdev)
580 {
581 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
582 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
583 		return false;
584 
585 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
586 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
587 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
588 		return false;
589 
590 	return true;
591 }
592 
593 static __le32 get_missing_options(struct hci_dev *hdev)
594 {
595 	u32 options = 0;
596 
597 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
598 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
599 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
600 
601 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
602 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
603 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
604 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
605 
606 	return cpu_to_le32(options);
607 }
608 
609 static int new_options(struct hci_dev *hdev, struct sock *skip)
610 {
611 	__le32 options = get_missing_options(hdev);
612 
613 	return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
614 				  sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
615 }
616 
617 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
618 {
619 	__le32 options = get_missing_options(hdev);
620 
621 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
622 				 sizeof(options));
623 }
624 
625 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
626 			    void *data, u16 data_len)
627 {
628 	struct mgmt_rp_read_config_info rp;
629 	u32 options = 0;
630 
631 	bt_dev_dbg(hdev, "sock %p", sk);
632 
633 	hci_dev_lock(hdev);
634 
635 	memset(&rp, 0, sizeof(rp));
636 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
637 
638 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
639 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
640 
641 	if (hdev->set_bdaddr)
642 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
643 
644 	rp.supported_options = cpu_to_le32(options);
645 	rp.missing_options = get_missing_options(hdev);
646 
647 	hci_dev_unlock(hdev);
648 
649 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
650 				 &rp, sizeof(rp));
651 }
652 
653 static u32 get_supported_phys(struct hci_dev *hdev)
654 {
655 	u32 supported_phys = 0;
656 
657 	if (lmp_bredr_capable(hdev)) {
658 		supported_phys |= MGMT_PHY_BR_1M_1SLOT;
659 
660 		if (hdev->features[0][0] & LMP_3SLOT)
661 			supported_phys |= MGMT_PHY_BR_1M_3SLOT;
662 
663 		if (hdev->features[0][0] & LMP_5SLOT)
664 			supported_phys |= MGMT_PHY_BR_1M_5SLOT;
665 
666 		if (lmp_edr_2m_capable(hdev)) {
667 			supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
668 
669 			if (lmp_edr_3slot_capable(hdev))
670 				supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
671 
672 			if (lmp_edr_5slot_capable(hdev))
673 				supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
674 
675 			if (lmp_edr_3m_capable(hdev)) {
676 				supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
677 
678 				if (lmp_edr_3slot_capable(hdev))
679 					supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
680 
681 				if (lmp_edr_5slot_capable(hdev))
682 					supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
683 			}
684 		}
685 	}
686 
687 	if (lmp_le_capable(hdev)) {
688 		supported_phys |= MGMT_PHY_LE_1M_TX;
689 		supported_phys |= MGMT_PHY_LE_1M_RX;
690 
691 		if (hdev->le_features[1] & HCI_LE_PHY_2M) {
692 			supported_phys |= MGMT_PHY_LE_2M_TX;
693 			supported_phys |= MGMT_PHY_LE_2M_RX;
694 		}
695 
696 		if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
697 			supported_phys |= MGMT_PHY_LE_CODED_TX;
698 			supported_phys |= MGMT_PHY_LE_CODED_RX;
699 		}
700 	}
701 
702 	return supported_phys;
703 }
704 
705 static u32 get_selected_phys(struct hci_dev *hdev)
706 {
707 	u32 selected_phys = 0;
708 
709 	if (lmp_bredr_capable(hdev)) {
710 		selected_phys |= MGMT_PHY_BR_1M_1SLOT;
711 
712 		if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
713 			selected_phys |= MGMT_PHY_BR_1M_3SLOT;
714 
715 		if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
716 			selected_phys |= MGMT_PHY_BR_1M_5SLOT;
717 
718 		if (lmp_edr_2m_capable(hdev)) {
719 			if (!(hdev->pkt_type & HCI_2DH1))
720 				selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
721 
722 			if (lmp_edr_3slot_capable(hdev) &&
723 			    !(hdev->pkt_type & HCI_2DH3))
724 				selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
725 
726 			if (lmp_edr_5slot_capable(hdev) &&
727 			    !(hdev->pkt_type & HCI_2DH5))
728 				selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
729 
730 			if (lmp_edr_3m_capable(hdev)) {
731 				if (!(hdev->pkt_type & HCI_3DH1))
732 					selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
733 
734 				if (lmp_edr_3slot_capable(hdev) &&
735 				    !(hdev->pkt_type & HCI_3DH3))
736 					selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
737 
738 				if (lmp_edr_5slot_capable(hdev) &&
739 				    !(hdev->pkt_type & HCI_3DH5))
740 					selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
741 			}
742 		}
743 	}
744 
745 	if (lmp_le_capable(hdev)) {
746 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
747 			selected_phys |= MGMT_PHY_LE_1M_TX;
748 
749 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
750 			selected_phys |= MGMT_PHY_LE_1M_RX;
751 
752 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
753 			selected_phys |= MGMT_PHY_LE_2M_TX;
754 
755 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
756 			selected_phys |= MGMT_PHY_LE_2M_RX;
757 
758 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
759 			selected_phys |= MGMT_PHY_LE_CODED_TX;
760 
761 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
762 			selected_phys |= MGMT_PHY_LE_CODED_RX;
763 	}
764 
765 	return selected_phys;
766 }
767 
768 static u32 get_configurable_phys(struct hci_dev *hdev)
769 {
770 	return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
771 		~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
772 }
773 
774 static u32 get_supported_settings(struct hci_dev *hdev)
775 {
776 	u32 settings = 0;
777 
778 	settings |= MGMT_SETTING_POWERED;
779 	settings |= MGMT_SETTING_BONDABLE;
780 	settings |= MGMT_SETTING_DEBUG_KEYS;
781 	settings |= MGMT_SETTING_CONNECTABLE;
782 	settings |= MGMT_SETTING_DISCOVERABLE;
783 
784 	if (lmp_bredr_capable(hdev)) {
785 		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
786 			settings |= MGMT_SETTING_FAST_CONNECTABLE;
787 		settings |= MGMT_SETTING_BREDR;
788 		settings |= MGMT_SETTING_LINK_SECURITY;
789 
790 		if (lmp_ssp_capable(hdev)) {
791 			settings |= MGMT_SETTING_SSP;
792 			if (IS_ENABLED(CONFIG_BT_HS))
793 				settings |= MGMT_SETTING_HS;
794 		}
795 
796 		if (lmp_sc_capable(hdev))
797 			settings |= MGMT_SETTING_SECURE_CONN;
798 
799 		if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
800 			     &hdev->quirks))
801 			settings |= MGMT_SETTING_WIDEBAND_SPEECH;
802 	}
803 
804 	if (lmp_le_capable(hdev)) {
805 		settings |= MGMT_SETTING_LE;
806 		settings |= MGMT_SETTING_SECURE_CONN;
807 		settings |= MGMT_SETTING_PRIVACY;
808 		settings |= MGMT_SETTING_STATIC_ADDRESS;
809 
810 		/* When the experimental feature for LL Privacy support is
811 		 * enabled, then advertising is no longer supported.
812 		 */
813 		if (!hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
814 			settings |= MGMT_SETTING_ADVERTISING;
815 	}
816 
817 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
818 	    hdev->set_bdaddr)
819 		settings |= MGMT_SETTING_CONFIGURATION;
820 
821 	settings |= MGMT_SETTING_PHY_CONFIGURATION;
822 
823 	return settings;
824 }
825 
826 static u32 get_current_settings(struct hci_dev *hdev)
827 {
828 	u32 settings = 0;
829 
830 	if (hdev_is_powered(hdev))
831 		settings |= MGMT_SETTING_POWERED;
832 
833 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
834 		settings |= MGMT_SETTING_CONNECTABLE;
835 
836 	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
837 		settings |= MGMT_SETTING_FAST_CONNECTABLE;
838 
839 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
840 		settings |= MGMT_SETTING_DISCOVERABLE;
841 
842 	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
843 		settings |= MGMT_SETTING_BONDABLE;
844 
845 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
846 		settings |= MGMT_SETTING_BREDR;
847 
848 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
849 		settings |= MGMT_SETTING_LE;
850 
851 	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
852 		settings |= MGMT_SETTING_LINK_SECURITY;
853 
854 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
855 		settings |= MGMT_SETTING_SSP;
856 
857 	if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
858 		settings |= MGMT_SETTING_HS;
859 
860 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
861 		settings |= MGMT_SETTING_ADVERTISING;
862 
863 	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
864 		settings |= MGMT_SETTING_SECURE_CONN;
865 
866 	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
867 		settings |= MGMT_SETTING_DEBUG_KEYS;
868 
869 	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
870 		settings |= MGMT_SETTING_PRIVACY;
871 
872 	/* The current setting for static address has two purposes. The
873 	 * first is to indicate if the static address will be used and
874 	 * the second is to indicate if it is actually set.
875 	 *
876 	 * This means if the static address is not configured, this flag
877 	 * will never be set. If the address is configured, then if the
878 	 * address is actually used decides if the flag is set or not.
879 	 *
880 	 * For single mode LE only controllers and dual-mode controllers
881 	 * with BR/EDR disabled, the existence of the static address will
882 	 * be evaluated.
883 	 */
884 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
885 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
886 	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
887 		if (bacmp(&hdev->static_addr, BDADDR_ANY))
888 			settings |= MGMT_SETTING_STATIC_ADDRESS;
889 	}
890 
891 	if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
892 		settings |= MGMT_SETTING_WIDEBAND_SPEECH;
893 
894 	return settings;
895 }
896 
897 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
898 {
899 	return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
900 }
901 
902 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
903 						  struct hci_dev *hdev,
904 						  const void *data)
905 {
906 	return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
907 }
908 
909 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
910 {
911 	struct mgmt_pending_cmd *cmd;
912 
913 	/* If there's a pending mgmt command the flags will not yet have
914 	 * their final values, so check for this first.
915 	 */
916 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
917 	if (cmd) {
918 		struct mgmt_mode *cp = cmd->param;
919 		if (cp->val == 0x01)
920 			return LE_AD_GENERAL;
921 		else if (cp->val == 0x02)
922 			return LE_AD_LIMITED;
923 	} else {
924 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
925 			return LE_AD_LIMITED;
926 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
927 			return LE_AD_GENERAL;
928 	}
929 
930 	return 0;
931 }
932 
933 bool mgmt_get_connectable(struct hci_dev *hdev)
934 {
935 	struct mgmt_pending_cmd *cmd;
936 
937 	/* If there's a pending mgmt command the flag will not yet have
938 	 * it's final value, so check for this first.
939 	 */
940 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
941 	if (cmd) {
942 		struct mgmt_mode *cp = cmd->param;
943 
944 		return cp->val;
945 	}
946 
947 	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
948 }
949 
950 static void service_cache_off(struct work_struct *work)
951 {
952 	struct hci_dev *hdev = container_of(work, struct hci_dev,
953 					    service_cache.work);
954 	struct hci_request req;
955 
956 	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
957 		return;
958 
959 	hci_req_init(&req, hdev);
960 
961 	hci_dev_lock(hdev);
962 
963 	__hci_req_update_eir(&req);
964 	__hci_req_update_class(&req);
965 
966 	hci_dev_unlock(hdev);
967 
968 	hci_req_run(&req, NULL);
969 }
970 
971 static void rpa_expired(struct work_struct *work)
972 {
973 	struct hci_dev *hdev = container_of(work, struct hci_dev,
974 					    rpa_expired.work);
975 	struct hci_request req;
976 
977 	bt_dev_dbg(hdev, "");
978 
979 	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
980 
981 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
982 		return;
983 
984 	/* The generation of a new RPA and programming it into the
985 	 * controller happens in the hci_req_enable_advertising()
986 	 * function.
987 	 */
988 	hci_req_init(&req, hdev);
989 	if (ext_adv_capable(hdev))
990 		__hci_req_start_ext_adv(&req, hdev->cur_adv_instance);
991 	else
992 		__hci_req_enable_advertising(&req);
993 	hci_req_run(&req, NULL);
994 }
995 
996 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
997 {
998 	if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
999 		return;
1000 
1001 	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1002 	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1003 
1004 	/* Non-mgmt controlled devices get this bit set
1005 	 * implicitly so that pairing works for them, however
1006 	 * for mgmt we require user-space to explicitly enable
1007 	 * it
1008 	 */
1009 	hci_dev_clear_flag(hdev, HCI_BONDABLE);
1010 }
1011 
1012 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1013 				void *data, u16 data_len)
1014 {
1015 	struct mgmt_rp_read_info rp;
1016 
1017 	bt_dev_dbg(hdev, "sock %p", sk);
1018 
1019 	hci_dev_lock(hdev);
1020 
1021 	memset(&rp, 0, sizeof(rp));
1022 
1023 	bacpy(&rp.bdaddr, &hdev->bdaddr);
1024 
1025 	rp.version = hdev->hci_ver;
1026 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1027 
1028 	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1029 	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1030 
1031 	memcpy(rp.dev_class, hdev->dev_class, 3);
1032 
1033 	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1034 	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1035 
1036 	hci_dev_unlock(hdev);
1037 
1038 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1039 				 sizeof(rp));
1040 }
1041 
1042 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1043 {
1044 	u16 eir_len = 0;
1045 	size_t name_len;
1046 
1047 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1048 		eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1049 					  hdev->dev_class, 3);
1050 
1051 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1052 		eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1053 					  hdev->appearance);
1054 
1055 	name_len = strlen(hdev->dev_name);
1056 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1057 				  hdev->dev_name, name_len);
1058 
1059 	name_len = strlen(hdev->short_name);
1060 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1061 				  hdev->short_name, name_len);
1062 
1063 	return eir_len;
1064 }
1065 
1066 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1067 				    void *data, u16 data_len)
1068 {
1069 	char buf[512];
1070 	struct mgmt_rp_read_ext_info *rp = (void *)buf;
1071 	u16 eir_len;
1072 
1073 	bt_dev_dbg(hdev, "sock %p", sk);
1074 
1075 	memset(&buf, 0, sizeof(buf));
1076 
1077 	hci_dev_lock(hdev);
1078 
1079 	bacpy(&rp->bdaddr, &hdev->bdaddr);
1080 
1081 	rp->version = hdev->hci_ver;
1082 	rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1083 
1084 	rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1085 	rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1086 
1087 
1088 	eir_len = append_eir_data_to_buf(hdev, rp->eir);
1089 	rp->eir_len = cpu_to_le16(eir_len);
1090 
1091 	hci_dev_unlock(hdev);
1092 
1093 	/* If this command is called at least once, then the events
1094 	 * for class of device and local name changes are disabled
1095 	 * and only the new extended controller information event
1096 	 * is used.
1097 	 */
1098 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1099 	hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1100 	hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1101 
1102 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1103 				 sizeof(*rp) + eir_len);
1104 }
1105 
1106 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1107 {
1108 	char buf[512];
1109 	struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1110 	u16 eir_len;
1111 
1112 	memset(buf, 0, sizeof(buf));
1113 
1114 	eir_len = append_eir_data_to_buf(hdev, ev->eir);
1115 	ev->eir_len = cpu_to_le16(eir_len);
1116 
1117 	return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1118 				  sizeof(*ev) + eir_len,
1119 				  HCI_MGMT_EXT_INFO_EVENTS, skip);
1120 }
1121 
1122 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1123 {
1124 	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1125 
1126 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1127 				 sizeof(settings));
1128 }
1129 
1130 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1131 {
1132 	bt_dev_dbg(hdev, "status 0x%02x", status);
1133 
1134 	if (hci_conn_count(hdev) == 0) {
1135 		cancel_delayed_work(&hdev->power_off);
1136 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
1137 	}
1138 }
1139 
1140 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1141 {
1142 	struct mgmt_ev_advertising_added ev;
1143 
1144 	ev.instance = instance;
1145 
1146 	mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1147 }
1148 
1149 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1150 			      u8 instance)
1151 {
1152 	struct mgmt_ev_advertising_removed ev;
1153 
1154 	ev.instance = instance;
1155 
1156 	mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1157 }
1158 
1159 static void cancel_adv_timeout(struct hci_dev *hdev)
1160 {
1161 	if (hdev->adv_instance_timeout) {
1162 		hdev->adv_instance_timeout = 0;
1163 		cancel_delayed_work(&hdev->adv_instance_expire);
1164 	}
1165 }
1166 
1167 static int clean_up_hci_state(struct hci_dev *hdev)
1168 {
1169 	struct hci_request req;
1170 	struct hci_conn *conn;
1171 	bool discov_stopped;
1172 	int err;
1173 
1174 	hci_req_init(&req, hdev);
1175 
1176 	if (test_bit(HCI_ISCAN, &hdev->flags) ||
1177 	    test_bit(HCI_PSCAN, &hdev->flags)) {
1178 		u8 scan = 0x00;
1179 		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1180 	}
1181 
1182 	hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
1183 
1184 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1185 		__hci_req_disable_advertising(&req);
1186 
1187 	discov_stopped = hci_req_stop_discovery(&req);
1188 
1189 	list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1190 		/* 0x15 == Terminated due to Power Off */
1191 		__hci_abort_conn(&req, conn, 0x15);
1192 	}
1193 
1194 	err = hci_req_run(&req, clean_up_hci_complete);
1195 	if (!err && discov_stopped)
1196 		hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1197 
1198 	return err;
1199 }
1200 
1201 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1202 		       u16 len)
1203 {
1204 	struct mgmt_mode *cp = data;
1205 	struct mgmt_pending_cmd *cmd;
1206 	int err;
1207 
1208 	bt_dev_dbg(hdev, "sock %p", sk);
1209 
1210 	if (cp->val != 0x00 && cp->val != 0x01)
1211 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1212 				       MGMT_STATUS_INVALID_PARAMS);
1213 
1214 	hci_dev_lock(hdev);
1215 
1216 	if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1217 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1218 				      MGMT_STATUS_BUSY);
1219 		goto failed;
1220 	}
1221 
1222 	if (!!cp->val == hdev_is_powered(hdev)) {
1223 		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1224 		goto failed;
1225 	}
1226 
1227 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1228 	if (!cmd) {
1229 		err = -ENOMEM;
1230 		goto failed;
1231 	}
1232 
1233 	if (cp->val) {
1234 		queue_work(hdev->req_workqueue, &hdev->power_on);
1235 		err = 0;
1236 	} else {
1237 		/* Disconnect connections, stop scans, etc */
1238 		err = clean_up_hci_state(hdev);
1239 		if (!err)
1240 			queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1241 					   HCI_POWER_OFF_TIMEOUT);
1242 
1243 		/* ENODATA means there were no HCI commands queued */
1244 		if (err == -ENODATA) {
1245 			cancel_delayed_work(&hdev->power_off);
1246 			queue_work(hdev->req_workqueue, &hdev->power_off.work);
1247 			err = 0;
1248 		}
1249 	}
1250 
1251 failed:
1252 	hci_dev_unlock(hdev);
1253 	return err;
1254 }
1255 
1256 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1257 {
1258 	__le32 ev = cpu_to_le32(get_current_settings(hdev));
1259 
1260 	return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1261 				  sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1262 }
1263 
1264 int mgmt_new_settings(struct hci_dev *hdev)
1265 {
1266 	return new_settings(hdev, NULL);
1267 }
1268 
1269 struct cmd_lookup {
1270 	struct sock *sk;
1271 	struct hci_dev *hdev;
1272 	u8 mgmt_status;
1273 };
1274 
1275 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1276 {
1277 	struct cmd_lookup *match = data;
1278 
1279 	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1280 
1281 	list_del(&cmd->list);
1282 
1283 	if (match->sk == NULL) {
1284 		match->sk = cmd->sk;
1285 		sock_hold(match->sk);
1286 	}
1287 
1288 	mgmt_pending_free(cmd);
1289 }
1290 
1291 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1292 {
1293 	u8 *status = data;
1294 
1295 	mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1296 	mgmt_pending_remove(cmd);
1297 }
1298 
1299 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1300 {
1301 	if (cmd->cmd_complete) {
1302 		u8 *status = data;
1303 
1304 		cmd->cmd_complete(cmd, *status);
1305 		mgmt_pending_remove(cmd);
1306 
1307 		return;
1308 	}
1309 
1310 	cmd_status_rsp(cmd, data);
1311 }
1312 
1313 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1314 {
1315 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1316 				 cmd->param, cmd->param_len);
1317 }
1318 
1319 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1320 {
1321 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1322 				 cmd->param, sizeof(struct mgmt_addr_info));
1323 }
1324 
1325 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1326 {
1327 	if (!lmp_bredr_capable(hdev))
1328 		return MGMT_STATUS_NOT_SUPPORTED;
1329 	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1330 		return MGMT_STATUS_REJECTED;
1331 	else
1332 		return MGMT_STATUS_SUCCESS;
1333 }
1334 
1335 static u8 mgmt_le_support(struct hci_dev *hdev)
1336 {
1337 	if (!lmp_le_capable(hdev))
1338 		return MGMT_STATUS_NOT_SUPPORTED;
1339 	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1340 		return MGMT_STATUS_REJECTED;
1341 	else
1342 		return MGMT_STATUS_SUCCESS;
1343 }
1344 
1345 void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
1346 {
1347 	struct mgmt_pending_cmd *cmd;
1348 
1349 	bt_dev_dbg(hdev, "status 0x%02x", status);
1350 
1351 	hci_dev_lock(hdev);
1352 
1353 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1354 	if (!cmd)
1355 		goto unlock;
1356 
1357 	if (status) {
1358 		u8 mgmt_err = mgmt_status(status);
1359 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1360 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1361 		goto remove_cmd;
1362 	}
1363 
1364 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1365 	    hdev->discov_timeout > 0) {
1366 		int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1367 		queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1368 	}
1369 
1370 	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1371 	new_settings(hdev, cmd->sk);
1372 
1373 remove_cmd:
1374 	mgmt_pending_remove(cmd);
1375 
1376 unlock:
1377 	hci_dev_unlock(hdev);
1378 }
1379 
1380 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1381 			    u16 len)
1382 {
1383 	struct mgmt_cp_set_discoverable *cp = data;
1384 	struct mgmt_pending_cmd *cmd;
1385 	u16 timeout;
1386 	int err;
1387 
1388 	bt_dev_dbg(hdev, "sock %p", sk);
1389 
1390 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1391 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1392 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1393 				       MGMT_STATUS_REJECTED);
1394 
1395 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1396 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1397 				       MGMT_STATUS_INVALID_PARAMS);
1398 
1399 	timeout = __le16_to_cpu(cp->timeout);
1400 
1401 	/* Disabling discoverable requires that no timeout is set,
1402 	 * and enabling limited discoverable requires a timeout.
1403 	 */
1404 	if ((cp->val == 0x00 && timeout > 0) ||
1405 	    (cp->val == 0x02 && timeout == 0))
1406 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1407 				       MGMT_STATUS_INVALID_PARAMS);
1408 
1409 	hci_dev_lock(hdev);
1410 
1411 	if (!hdev_is_powered(hdev) && timeout > 0) {
1412 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1413 				      MGMT_STATUS_NOT_POWERED);
1414 		goto failed;
1415 	}
1416 
1417 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1418 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1419 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1420 				      MGMT_STATUS_BUSY);
1421 		goto failed;
1422 	}
1423 
1424 	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1425 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1426 				      MGMT_STATUS_REJECTED);
1427 		goto failed;
1428 	}
1429 
1430 	if (hdev->advertising_paused) {
1431 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1432 				      MGMT_STATUS_BUSY);
1433 		goto failed;
1434 	}
1435 
1436 	if (!hdev_is_powered(hdev)) {
1437 		bool changed = false;
1438 
1439 		/* Setting limited discoverable when powered off is
1440 		 * not a valid operation since it requires a timeout
1441 		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1442 		 */
1443 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1444 			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1445 			changed = true;
1446 		}
1447 
1448 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1449 		if (err < 0)
1450 			goto failed;
1451 
1452 		if (changed)
1453 			err = new_settings(hdev, sk);
1454 
1455 		goto failed;
1456 	}
1457 
1458 	/* If the current mode is the same, then just update the timeout
1459 	 * value with the new value. And if only the timeout gets updated,
1460 	 * then no need for any HCI transactions.
1461 	 */
1462 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1463 	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
1464 						   HCI_LIMITED_DISCOVERABLE)) {
1465 		cancel_delayed_work(&hdev->discov_off);
1466 		hdev->discov_timeout = timeout;
1467 
1468 		if (cp->val && hdev->discov_timeout > 0) {
1469 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1470 			queue_delayed_work(hdev->req_workqueue,
1471 					   &hdev->discov_off, to);
1472 		}
1473 
1474 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1475 		goto failed;
1476 	}
1477 
1478 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1479 	if (!cmd) {
1480 		err = -ENOMEM;
1481 		goto failed;
1482 	}
1483 
1484 	/* Cancel any potential discoverable timeout that might be
1485 	 * still active and store new timeout value. The arming of
1486 	 * the timeout happens in the complete handler.
1487 	 */
1488 	cancel_delayed_work(&hdev->discov_off);
1489 	hdev->discov_timeout = timeout;
1490 
1491 	if (cp->val)
1492 		hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1493 	else
1494 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1495 
1496 	/* Limited discoverable mode */
1497 	if (cp->val == 0x02)
1498 		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1499 	else
1500 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1501 
1502 	queue_work(hdev->req_workqueue, &hdev->discoverable_update);
1503 	err = 0;
1504 
1505 failed:
1506 	hci_dev_unlock(hdev);
1507 	return err;
1508 }
1509 
1510 void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
1511 {
1512 	struct mgmt_pending_cmd *cmd;
1513 
1514 	bt_dev_dbg(hdev, "status 0x%02x", status);
1515 
1516 	hci_dev_lock(hdev);
1517 
1518 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1519 	if (!cmd)
1520 		goto unlock;
1521 
1522 	if (status) {
1523 		u8 mgmt_err = mgmt_status(status);
1524 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1525 		goto remove_cmd;
1526 	}
1527 
1528 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1529 	new_settings(hdev, cmd->sk);
1530 
1531 remove_cmd:
1532 	mgmt_pending_remove(cmd);
1533 
1534 unlock:
1535 	hci_dev_unlock(hdev);
1536 }
1537 
1538 static int set_connectable_update_settings(struct hci_dev *hdev,
1539 					   struct sock *sk, u8 val)
1540 {
1541 	bool changed = false;
1542 	int err;
1543 
1544 	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1545 		changed = true;
1546 
1547 	if (val) {
1548 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1549 	} else {
1550 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1551 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1552 	}
1553 
1554 	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1555 	if (err < 0)
1556 		return err;
1557 
1558 	if (changed) {
1559 		hci_req_update_scan(hdev);
1560 		hci_update_background_scan(hdev);
1561 		return new_settings(hdev, sk);
1562 	}
1563 
1564 	return 0;
1565 }
1566 
1567 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1568 			   u16 len)
1569 {
1570 	struct mgmt_mode *cp = data;
1571 	struct mgmt_pending_cmd *cmd;
1572 	int err;
1573 
1574 	bt_dev_dbg(hdev, "sock %p", sk);
1575 
1576 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1577 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1578 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1579 				       MGMT_STATUS_REJECTED);
1580 
1581 	if (cp->val != 0x00 && cp->val != 0x01)
1582 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1583 				       MGMT_STATUS_INVALID_PARAMS);
1584 
1585 	hci_dev_lock(hdev);
1586 
1587 	if (!hdev_is_powered(hdev)) {
1588 		err = set_connectable_update_settings(hdev, sk, cp->val);
1589 		goto failed;
1590 	}
1591 
1592 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1593 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1594 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1595 				      MGMT_STATUS_BUSY);
1596 		goto failed;
1597 	}
1598 
1599 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1600 	if (!cmd) {
1601 		err = -ENOMEM;
1602 		goto failed;
1603 	}
1604 
1605 	if (cp->val) {
1606 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1607 	} else {
1608 		if (hdev->discov_timeout > 0)
1609 			cancel_delayed_work(&hdev->discov_off);
1610 
1611 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1612 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1613 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1614 	}
1615 
1616 	queue_work(hdev->req_workqueue, &hdev->connectable_update);
1617 	err = 0;
1618 
1619 failed:
1620 	hci_dev_unlock(hdev);
1621 	return err;
1622 }
1623 
1624 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1625 			u16 len)
1626 {
1627 	struct mgmt_mode *cp = data;
1628 	bool changed;
1629 	int err;
1630 
1631 	bt_dev_dbg(hdev, "sock %p", sk);
1632 
1633 	if (cp->val != 0x00 && cp->val != 0x01)
1634 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1635 				       MGMT_STATUS_INVALID_PARAMS);
1636 
1637 	hci_dev_lock(hdev);
1638 
1639 	if (cp->val)
1640 		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1641 	else
1642 		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1643 
1644 	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1645 	if (err < 0)
1646 		goto unlock;
1647 
1648 	if (changed) {
1649 		/* In limited privacy mode the change of bondable mode
1650 		 * may affect the local advertising address.
1651 		 */
1652 		if (hdev_is_powered(hdev) &&
1653 		    hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1654 		    hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1655 		    hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1656 			queue_work(hdev->req_workqueue,
1657 				   &hdev->discoverable_update);
1658 
1659 		err = new_settings(hdev, sk);
1660 	}
1661 
1662 unlock:
1663 	hci_dev_unlock(hdev);
1664 	return err;
1665 }
1666 
1667 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1668 			     u16 len)
1669 {
1670 	struct mgmt_mode *cp = data;
1671 	struct mgmt_pending_cmd *cmd;
1672 	u8 val, status;
1673 	int err;
1674 
1675 	bt_dev_dbg(hdev, "sock %p", sk);
1676 
1677 	status = mgmt_bredr_support(hdev);
1678 	if (status)
1679 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1680 				       status);
1681 
1682 	if (cp->val != 0x00 && cp->val != 0x01)
1683 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1684 				       MGMT_STATUS_INVALID_PARAMS);
1685 
1686 	hci_dev_lock(hdev);
1687 
1688 	if (!hdev_is_powered(hdev)) {
1689 		bool changed = false;
1690 
1691 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1692 			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1693 			changed = true;
1694 		}
1695 
1696 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1697 		if (err < 0)
1698 			goto failed;
1699 
1700 		if (changed)
1701 			err = new_settings(hdev, sk);
1702 
1703 		goto failed;
1704 	}
1705 
1706 	if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1707 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1708 				      MGMT_STATUS_BUSY);
1709 		goto failed;
1710 	}
1711 
1712 	val = !!cp->val;
1713 
1714 	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1715 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1716 		goto failed;
1717 	}
1718 
1719 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1720 	if (!cmd) {
1721 		err = -ENOMEM;
1722 		goto failed;
1723 	}
1724 
1725 	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1726 	if (err < 0) {
1727 		mgmt_pending_remove(cmd);
1728 		goto failed;
1729 	}
1730 
1731 failed:
1732 	hci_dev_unlock(hdev);
1733 	return err;
1734 }
1735 
1736 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1737 {
1738 	struct mgmt_mode *cp = data;
1739 	struct mgmt_pending_cmd *cmd;
1740 	u8 status;
1741 	int err;
1742 
1743 	bt_dev_dbg(hdev, "sock %p", sk);
1744 
1745 	status = mgmt_bredr_support(hdev);
1746 	if (status)
1747 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1748 
1749 	if (!lmp_ssp_capable(hdev))
1750 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1751 				       MGMT_STATUS_NOT_SUPPORTED);
1752 
1753 	if (cp->val != 0x00 && cp->val != 0x01)
1754 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1755 				       MGMT_STATUS_INVALID_PARAMS);
1756 
1757 	hci_dev_lock(hdev);
1758 
1759 	if (!hdev_is_powered(hdev)) {
1760 		bool changed;
1761 
1762 		if (cp->val) {
1763 			changed = !hci_dev_test_and_set_flag(hdev,
1764 							     HCI_SSP_ENABLED);
1765 		} else {
1766 			changed = hci_dev_test_and_clear_flag(hdev,
1767 							      HCI_SSP_ENABLED);
1768 			if (!changed)
1769 				changed = hci_dev_test_and_clear_flag(hdev,
1770 								      HCI_HS_ENABLED);
1771 			else
1772 				hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1773 		}
1774 
1775 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1776 		if (err < 0)
1777 			goto failed;
1778 
1779 		if (changed)
1780 			err = new_settings(hdev, sk);
1781 
1782 		goto failed;
1783 	}
1784 
1785 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1786 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1787 				      MGMT_STATUS_BUSY);
1788 		goto failed;
1789 	}
1790 
1791 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1792 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1793 		goto failed;
1794 	}
1795 
1796 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1797 	if (!cmd) {
1798 		err = -ENOMEM;
1799 		goto failed;
1800 	}
1801 
1802 	if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
1803 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1804 			     sizeof(cp->val), &cp->val);
1805 
1806 	err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1807 	if (err < 0) {
1808 		mgmt_pending_remove(cmd);
1809 		goto failed;
1810 	}
1811 
1812 failed:
1813 	hci_dev_unlock(hdev);
1814 	return err;
1815 }
1816 
1817 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1818 {
1819 	struct mgmt_mode *cp = data;
1820 	bool changed;
1821 	u8 status;
1822 	int err;
1823 
1824 	bt_dev_dbg(hdev, "sock %p", sk);
1825 
1826 	if (!IS_ENABLED(CONFIG_BT_HS))
1827 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1828 				       MGMT_STATUS_NOT_SUPPORTED);
1829 
1830 	status = mgmt_bredr_support(hdev);
1831 	if (status)
1832 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1833 
1834 	if (!lmp_ssp_capable(hdev))
1835 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1836 				       MGMT_STATUS_NOT_SUPPORTED);
1837 
1838 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1839 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1840 				       MGMT_STATUS_REJECTED);
1841 
1842 	if (cp->val != 0x00 && cp->val != 0x01)
1843 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1844 				       MGMT_STATUS_INVALID_PARAMS);
1845 
1846 	hci_dev_lock(hdev);
1847 
1848 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1849 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1850 				      MGMT_STATUS_BUSY);
1851 		goto unlock;
1852 	}
1853 
1854 	if (cp->val) {
1855 		changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1856 	} else {
1857 		if (hdev_is_powered(hdev)) {
1858 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1859 					      MGMT_STATUS_REJECTED);
1860 			goto unlock;
1861 		}
1862 
1863 		changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1864 	}
1865 
1866 	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1867 	if (err < 0)
1868 		goto unlock;
1869 
1870 	if (changed)
1871 		err = new_settings(hdev, sk);
1872 
1873 unlock:
1874 	hci_dev_unlock(hdev);
1875 	return err;
1876 }
1877 
1878 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1879 {
1880 	struct cmd_lookup match = { NULL, hdev };
1881 
1882 	hci_dev_lock(hdev);
1883 
1884 	if (status) {
1885 		u8 mgmt_err = mgmt_status(status);
1886 
1887 		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1888 				     &mgmt_err);
1889 		goto unlock;
1890 	}
1891 
1892 	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1893 
1894 	new_settings(hdev, match.sk);
1895 
1896 	if (match.sk)
1897 		sock_put(match.sk);
1898 
1899 	/* Make sure the controller has a good default for
1900 	 * advertising data. Restrict the update to when LE
1901 	 * has actually been enabled. During power on, the
1902 	 * update in powered_update_hci will take care of it.
1903 	 */
1904 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1905 		struct hci_request req;
1906 		hci_req_init(&req, hdev);
1907 		if (ext_adv_capable(hdev)) {
1908 			int err;
1909 
1910 			err = __hci_req_setup_ext_adv_instance(&req, 0x00);
1911 			if (!err)
1912 				__hci_req_update_scan_rsp_data(&req, 0x00);
1913 		} else {
1914 			__hci_req_update_adv_data(&req, 0x00);
1915 			__hci_req_update_scan_rsp_data(&req, 0x00);
1916 		}
1917 		hci_req_run(&req, NULL);
1918 		hci_update_background_scan(hdev);
1919 	}
1920 
1921 unlock:
1922 	hci_dev_unlock(hdev);
1923 }
1924 
1925 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1926 {
1927 	struct mgmt_mode *cp = data;
1928 	struct hci_cp_write_le_host_supported hci_cp;
1929 	struct mgmt_pending_cmd *cmd;
1930 	struct hci_request req;
1931 	int err;
1932 	u8 val, enabled;
1933 
1934 	bt_dev_dbg(hdev, "sock %p", sk);
1935 
1936 	if (!lmp_le_capable(hdev))
1937 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1938 				       MGMT_STATUS_NOT_SUPPORTED);
1939 
1940 	if (cp->val != 0x00 && cp->val != 0x01)
1941 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1942 				       MGMT_STATUS_INVALID_PARAMS);
1943 
1944 	/* Bluetooth single mode LE only controllers or dual-mode
1945 	 * controllers configured as LE only devices, do not allow
1946 	 * switching LE off. These have either LE enabled explicitly
1947 	 * or BR/EDR has been previously switched off.
1948 	 *
1949 	 * When trying to enable an already enabled LE, then gracefully
1950 	 * send a positive response. Trying to disable it however will
1951 	 * result into rejection.
1952 	 */
1953 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1954 		if (cp->val == 0x01)
1955 			return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1956 
1957 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1958 				       MGMT_STATUS_REJECTED);
1959 	}
1960 
1961 	hci_dev_lock(hdev);
1962 
1963 	val = !!cp->val;
1964 	enabled = lmp_host_le_capable(hdev);
1965 
1966 	if (!val)
1967 		hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
1968 
1969 	if (!hdev_is_powered(hdev) || val == enabled) {
1970 		bool changed = false;
1971 
1972 		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1973 			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
1974 			changed = true;
1975 		}
1976 
1977 		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1978 			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1979 			changed = true;
1980 		}
1981 
1982 		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1983 		if (err < 0)
1984 			goto unlock;
1985 
1986 		if (changed)
1987 			err = new_settings(hdev, sk);
1988 
1989 		goto unlock;
1990 	}
1991 
1992 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
1993 	    pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1994 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1995 				      MGMT_STATUS_BUSY);
1996 		goto unlock;
1997 	}
1998 
1999 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2000 	if (!cmd) {
2001 		err = -ENOMEM;
2002 		goto unlock;
2003 	}
2004 
2005 	hci_req_init(&req, hdev);
2006 
2007 	memset(&hci_cp, 0, sizeof(hci_cp));
2008 
2009 	if (val) {
2010 		hci_cp.le = val;
2011 		hci_cp.simul = 0x00;
2012 	} else {
2013 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2014 			__hci_req_disable_advertising(&req);
2015 
2016 		if (ext_adv_capable(hdev))
2017 			__hci_req_clear_ext_adv_sets(&req);
2018 	}
2019 
2020 	hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2021 		    &hci_cp);
2022 
2023 	err = hci_req_run(&req, le_enable_complete);
2024 	if (err < 0)
2025 		mgmt_pending_remove(cmd);
2026 
2027 unlock:
2028 	hci_dev_unlock(hdev);
2029 	return err;
2030 }
2031 
2032 /* This is a helper function to test for pending mgmt commands that can
2033  * cause CoD or EIR HCI commands. We can only allow one such pending
2034  * mgmt command at a time since otherwise we cannot easily track what
2035  * the current values are, will be, and based on that calculate if a new
2036  * HCI command needs to be sent and if yes with what value.
2037  */
2038 static bool pending_eir_or_class(struct hci_dev *hdev)
2039 {
2040 	struct mgmt_pending_cmd *cmd;
2041 
2042 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2043 		switch (cmd->opcode) {
2044 		case MGMT_OP_ADD_UUID:
2045 		case MGMT_OP_REMOVE_UUID:
2046 		case MGMT_OP_SET_DEV_CLASS:
2047 		case MGMT_OP_SET_POWERED:
2048 			return true;
2049 		}
2050 	}
2051 
2052 	return false;
2053 }
2054 
2055 static const u8 bluetooth_base_uuid[] = {
2056 			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2057 			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2058 };
2059 
2060 static u8 get_uuid_size(const u8 *uuid)
2061 {
2062 	u32 val;
2063 
2064 	if (memcmp(uuid, bluetooth_base_uuid, 12))
2065 		return 128;
2066 
2067 	val = get_unaligned_le32(&uuid[12]);
2068 	if (val > 0xffff)
2069 		return 32;
2070 
2071 	return 16;
2072 }
2073 
2074 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2075 {
2076 	struct mgmt_pending_cmd *cmd;
2077 
2078 	hci_dev_lock(hdev);
2079 
2080 	cmd = pending_find(mgmt_op, hdev);
2081 	if (!cmd)
2082 		goto unlock;
2083 
2084 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2085 			  mgmt_status(status), hdev->dev_class, 3);
2086 
2087 	mgmt_pending_remove(cmd);
2088 
2089 unlock:
2090 	hci_dev_unlock(hdev);
2091 }
2092 
2093 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2094 {
2095 	bt_dev_dbg(hdev, "status 0x%02x", status);
2096 
2097 	mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2098 }
2099 
2100 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2101 {
2102 	struct mgmt_cp_add_uuid *cp = data;
2103 	struct mgmt_pending_cmd *cmd;
2104 	struct hci_request req;
2105 	struct bt_uuid *uuid;
2106 	int err;
2107 
2108 	bt_dev_dbg(hdev, "sock %p", sk);
2109 
2110 	hci_dev_lock(hdev);
2111 
2112 	if (pending_eir_or_class(hdev)) {
2113 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2114 				      MGMT_STATUS_BUSY);
2115 		goto failed;
2116 	}
2117 
2118 	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2119 	if (!uuid) {
2120 		err = -ENOMEM;
2121 		goto failed;
2122 	}
2123 
2124 	memcpy(uuid->uuid, cp->uuid, 16);
2125 	uuid->svc_hint = cp->svc_hint;
2126 	uuid->size = get_uuid_size(cp->uuid);
2127 
2128 	list_add_tail(&uuid->list, &hdev->uuids);
2129 
2130 	hci_req_init(&req, hdev);
2131 
2132 	__hci_req_update_class(&req);
2133 	__hci_req_update_eir(&req);
2134 
2135 	err = hci_req_run(&req, add_uuid_complete);
2136 	if (err < 0) {
2137 		if (err != -ENODATA)
2138 			goto failed;
2139 
2140 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2141 					hdev->dev_class, 3);
2142 		goto failed;
2143 	}
2144 
2145 	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2146 	if (!cmd) {
2147 		err = -ENOMEM;
2148 		goto failed;
2149 	}
2150 
2151 	err = 0;
2152 
2153 failed:
2154 	hci_dev_unlock(hdev);
2155 	return err;
2156 }
2157 
2158 static bool enable_service_cache(struct hci_dev *hdev)
2159 {
2160 	if (!hdev_is_powered(hdev))
2161 		return false;
2162 
2163 	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2164 		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2165 				   CACHE_TIMEOUT);
2166 		return true;
2167 	}
2168 
2169 	return false;
2170 }
2171 
2172 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2173 {
2174 	bt_dev_dbg(hdev, "status 0x%02x", status);
2175 
2176 	mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2177 }
2178 
2179 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2180 		       u16 len)
2181 {
2182 	struct mgmt_cp_remove_uuid *cp = data;
2183 	struct mgmt_pending_cmd *cmd;
2184 	struct bt_uuid *match, *tmp;
2185 	u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2186 	struct hci_request req;
2187 	int err, found;
2188 
2189 	bt_dev_dbg(hdev, "sock %p", sk);
2190 
2191 	hci_dev_lock(hdev);
2192 
2193 	if (pending_eir_or_class(hdev)) {
2194 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2195 				      MGMT_STATUS_BUSY);
2196 		goto unlock;
2197 	}
2198 
2199 	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2200 		hci_uuids_clear(hdev);
2201 
2202 		if (enable_service_cache(hdev)) {
2203 			err = mgmt_cmd_complete(sk, hdev->id,
2204 						MGMT_OP_REMOVE_UUID,
2205 						0, hdev->dev_class, 3);
2206 			goto unlock;
2207 		}
2208 
2209 		goto update_class;
2210 	}
2211 
2212 	found = 0;
2213 
2214 	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2215 		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2216 			continue;
2217 
2218 		list_del(&match->list);
2219 		kfree(match);
2220 		found++;
2221 	}
2222 
2223 	if (found == 0) {
2224 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2225 				      MGMT_STATUS_INVALID_PARAMS);
2226 		goto unlock;
2227 	}
2228 
2229 update_class:
2230 	hci_req_init(&req, hdev);
2231 
2232 	__hci_req_update_class(&req);
2233 	__hci_req_update_eir(&req);
2234 
2235 	err = hci_req_run(&req, remove_uuid_complete);
2236 	if (err < 0) {
2237 		if (err != -ENODATA)
2238 			goto unlock;
2239 
2240 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2241 					hdev->dev_class, 3);
2242 		goto unlock;
2243 	}
2244 
2245 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2246 	if (!cmd) {
2247 		err = -ENOMEM;
2248 		goto unlock;
2249 	}
2250 
2251 	err = 0;
2252 
2253 unlock:
2254 	hci_dev_unlock(hdev);
2255 	return err;
2256 }
2257 
2258 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2259 {
2260 	bt_dev_dbg(hdev, "status 0x%02x", status);
2261 
2262 	mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2263 }
2264 
2265 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2266 			 u16 len)
2267 {
2268 	struct mgmt_cp_set_dev_class *cp = data;
2269 	struct mgmt_pending_cmd *cmd;
2270 	struct hci_request req;
2271 	int err;
2272 
2273 	bt_dev_dbg(hdev, "sock %p", sk);
2274 
2275 	if (!lmp_bredr_capable(hdev))
2276 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2277 				       MGMT_STATUS_NOT_SUPPORTED);
2278 
2279 	hci_dev_lock(hdev);
2280 
2281 	if (pending_eir_or_class(hdev)) {
2282 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2283 				      MGMT_STATUS_BUSY);
2284 		goto unlock;
2285 	}
2286 
2287 	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2288 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2289 				      MGMT_STATUS_INVALID_PARAMS);
2290 		goto unlock;
2291 	}
2292 
2293 	hdev->major_class = cp->major;
2294 	hdev->minor_class = cp->minor;
2295 
2296 	if (!hdev_is_powered(hdev)) {
2297 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2298 					hdev->dev_class, 3);
2299 		goto unlock;
2300 	}
2301 
2302 	hci_req_init(&req, hdev);
2303 
2304 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2305 		hci_dev_unlock(hdev);
2306 		cancel_delayed_work_sync(&hdev->service_cache);
2307 		hci_dev_lock(hdev);
2308 		__hci_req_update_eir(&req);
2309 	}
2310 
2311 	__hci_req_update_class(&req);
2312 
2313 	err = hci_req_run(&req, set_class_complete);
2314 	if (err < 0) {
2315 		if (err != -ENODATA)
2316 			goto unlock;
2317 
2318 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2319 					hdev->dev_class, 3);
2320 		goto unlock;
2321 	}
2322 
2323 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2324 	if (!cmd) {
2325 		err = -ENOMEM;
2326 		goto unlock;
2327 	}
2328 
2329 	err = 0;
2330 
2331 unlock:
2332 	hci_dev_unlock(hdev);
2333 	return err;
2334 }
2335 
2336 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2337 			  u16 len)
2338 {
2339 	struct mgmt_cp_load_link_keys *cp = data;
2340 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2341 				   sizeof(struct mgmt_link_key_info));
2342 	u16 key_count, expected_len;
2343 	bool changed;
2344 	int i;
2345 
2346 	bt_dev_dbg(hdev, "sock %p", sk);
2347 
2348 	if (!lmp_bredr_capable(hdev))
2349 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2350 				       MGMT_STATUS_NOT_SUPPORTED);
2351 
2352 	key_count = __le16_to_cpu(cp->key_count);
2353 	if (key_count > max_key_count) {
2354 		bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2355 			   key_count);
2356 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2357 				       MGMT_STATUS_INVALID_PARAMS);
2358 	}
2359 
2360 	expected_len = struct_size(cp, keys, key_count);
2361 	if (expected_len != len) {
2362 		bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2363 			   expected_len, len);
2364 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2365 				       MGMT_STATUS_INVALID_PARAMS);
2366 	}
2367 
2368 	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2369 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2370 				       MGMT_STATUS_INVALID_PARAMS);
2371 
2372 	bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2373 		   key_count);
2374 
2375 	for (i = 0; i < key_count; i++) {
2376 		struct mgmt_link_key_info *key = &cp->keys[i];
2377 
2378 		if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2379 			return mgmt_cmd_status(sk, hdev->id,
2380 					       MGMT_OP_LOAD_LINK_KEYS,
2381 					       MGMT_STATUS_INVALID_PARAMS);
2382 	}
2383 
2384 	hci_dev_lock(hdev);
2385 
2386 	hci_link_keys_clear(hdev);
2387 
2388 	if (cp->debug_keys)
2389 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2390 	else
2391 		changed = hci_dev_test_and_clear_flag(hdev,
2392 						      HCI_KEEP_DEBUG_KEYS);
2393 
2394 	if (changed)
2395 		new_settings(hdev, NULL);
2396 
2397 	for (i = 0; i < key_count; i++) {
2398 		struct mgmt_link_key_info *key = &cp->keys[i];
2399 
2400 		if (hci_is_blocked_key(hdev,
2401 				       HCI_BLOCKED_KEY_TYPE_LINKKEY,
2402 				       key->val)) {
2403 			bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2404 				    &key->addr.bdaddr);
2405 			continue;
2406 		}
2407 
2408 		/* Always ignore debug keys and require a new pairing if
2409 		 * the user wants to use them.
2410 		 */
2411 		if (key->type == HCI_LK_DEBUG_COMBINATION)
2412 			continue;
2413 
2414 		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2415 				 key->type, key->pin_len, NULL);
2416 	}
2417 
2418 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2419 
2420 	hci_dev_unlock(hdev);
2421 
2422 	return 0;
2423 }
2424 
2425 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2426 			   u8 addr_type, struct sock *skip_sk)
2427 {
2428 	struct mgmt_ev_device_unpaired ev;
2429 
2430 	bacpy(&ev.addr.bdaddr, bdaddr);
2431 	ev.addr.type = addr_type;
2432 
2433 	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2434 			  skip_sk);
2435 }
2436 
2437 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2438 			 u16 len)
2439 {
2440 	struct mgmt_cp_unpair_device *cp = data;
2441 	struct mgmt_rp_unpair_device rp;
2442 	struct hci_conn_params *params;
2443 	struct mgmt_pending_cmd *cmd;
2444 	struct hci_conn *conn;
2445 	u8 addr_type;
2446 	int err;
2447 
2448 	memset(&rp, 0, sizeof(rp));
2449 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2450 	rp.addr.type = cp->addr.type;
2451 
2452 	if (!bdaddr_type_is_valid(cp->addr.type))
2453 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2454 					 MGMT_STATUS_INVALID_PARAMS,
2455 					 &rp, sizeof(rp));
2456 
2457 	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2458 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2459 					 MGMT_STATUS_INVALID_PARAMS,
2460 					 &rp, sizeof(rp));
2461 
2462 	hci_dev_lock(hdev);
2463 
2464 	if (!hdev_is_powered(hdev)) {
2465 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2466 					MGMT_STATUS_NOT_POWERED, &rp,
2467 					sizeof(rp));
2468 		goto unlock;
2469 	}
2470 
2471 	if (cp->addr.type == BDADDR_BREDR) {
2472 		/* If disconnection is requested, then look up the
2473 		 * connection. If the remote device is connected, it
2474 		 * will be later used to terminate the link.
2475 		 *
2476 		 * Setting it to NULL explicitly will cause no
2477 		 * termination of the link.
2478 		 */
2479 		if (cp->disconnect)
2480 			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2481 						       &cp->addr.bdaddr);
2482 		else
2483 			conn = NULL;
2484 
2485 		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2486 		if (err < 0) {
2487 			err = mgmt_cmd_complete(sk, hdev->id,
2488 						MGMT_OP_UNPAIR_DEVICE,
2489 						MGMT_STATUS_NOT_PAIRED, &rp,
2490 						sizeof(rp));
2491 			goto unlock;
2492 		}
2493 
2494 		goto done;
2495 	}
2496 
2497 	/* LE address type */
2498 	addr_type = le_addr_type(cp->addr.type);
2499 
2500 	/* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2501 	err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2502 	if (err < 0) {
2503 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2504 					MGMT_STATUS_NOT_PAIRED, &rp,
2505 					sizeof(rp));
2506 		goto unlock;
2507 	}
2508 
2509 	conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2510 	if (!conn) {
2511 		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2512 		goto done;
2513 	}
2514 
2515 
2516 	/* Defer clearing up the connection parameters until closing to
2517 	 * give a chance of keeping them if a repairing happens.
2518 	 */
2519 	set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2520 
2521 	/* Disable auto-connection parameters if present */
2522 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2523 	if (params) {
2524 		if (params->explicit_connect)
2525 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2526 		else
2527 			params->auto_connect = HCI_AUTO_CONN_DISABLED;
2528 	}
2529 
2530 	/* If disconnection is not requested, then clear the connection
2531 	 * variable so that the link is not terminated.
2532 	 */
2533 	if (!cp->disconnect)
2534 		conn = NULL;
2535 
2536 done:
2537 	/* If the connection variable is set, then termination of the
2538 	 * link is requested.
2539 	 */
2540 	if (!conn) {
2541 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2542 					&rp, sizeof(rp));
2543 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2544 		goto unlock;
2545 	}
2546 
2547 	cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2548 			       sizeof(*cp));
2549 	if (!cmd) {
2550 		err = -ENOMEM;
2551 		goto unlock;
2552 	}
2553 
2554 	cmd->cmd_complete = addr_cmd_complete;
2555 
2556 	err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2557 	if (err < 0)
2558 		mgmt_pending_remove(cmd);
2559 
2560 unlock:
2561 	hci_dev_unlock(hdev);
2562 	return err;
2563 }
2564 
2565 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2566 		      u16 len)
2567 {
2568 	struct mgmt_cp_disconnect *cp = data;
2569 	struct mgmt_rp_disconnect rp;
2570 	struct mgmt_pending_cmd *cmd;
2571 	struct hci_conn *conn;
2572 	int err;
2573 
2574 	bt_dev_dbg(hdev, "sock %p", sk);
2575 
2576 	memset(&rp, 0, sizeof(rp));
2577 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2578 	rp.addr.type = cp->addr.type;
2579 
2580 	if (!bdaddr_type_is_valid(cp->addr.type))
2581 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2582 					 MGMT_STATUS_INVALID_PARAMS,
2583 					 &rp, sizeof(rp));
2584 
2585 	hci_dev_lock(hdev);
2586 
2587 	if (!test_bit(HCI_UP, &hdev->flags)) {
2588 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2589 					MGMT_STATUS_NOT_POWERED, &rp,
2590 					sizeof(rp));
2591 		goto failed;
2592 	}
2593 
2594 	if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2595 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2596 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
2597 		goto failed;
2598 	}
2599 
2600 	if (cp->addr.type == BDADDR_BREDR)
2601 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2602 					       &cp->addr.bdaddr);
2603 	else
2604 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2605 					       le_addr_type(cp->addr.type));
2606 
2607 	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2608 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2609 					MGMT_STATUS_NOT_CONNECTED, &rp,
2610 					sizeof(rp));
2611 		goto failed;
2612 	}
2613 
2614 	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2615 	if (!cmd) {
2616 		err = -ENOMEM;
2617 		goto failed;
2618 	}
2619 
2620 	cmd->cmd_complete = generic_cmd_complete;
2621 
2622 	err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2623 	if (err < 0)
2624 		mgmt_pending_remove(cmd);
2625 
2626 failed:
2627 	hci_dev_unlock(hdev);
2628 	return err;
2629 }
2630 
2631 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2632 {
2633 	switch (link_type) {
2634 	case LE_LINK:
2635 		switch (addr_type) {
2636 		case ADDR_LE_DEV_PUBLIC:
2637 			return BDADDR_LE_PUBLIC;
2638 
2639 		default:
2640 			/* Fallback to LE Random address type */
2641 			return BDADDR_LE_RANDOM;
2642 		}
2643 
2644 	default:
2645 		/* Fallback to BR/EDR type */
2646 		return BDADDR_BREDR;
2647 	}
2648 }
2649 
2650 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2651 			   u16 data_len)
2652 {
2653 	struct mgmt_rp_get_connections *rp;
2654 	struct hci_conn *c;
2655 	int err;
2656 	u16 i;
2657 
2658 	bt_dev_dbg(hdev, "sock %p", sk);
2659 
2660 	hci_dev_lock(hdev);
2661 
2662 	if (!hdev_is_powered(hdev)) {
2663 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2664 				      MGMT_STATUS_NOT_POWERED);
2665 		goto unlock;
2666 	}
2667 
2668 	i = 0;
2669 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2670 		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2671 			i++;
2672 	}
2673 
2674 	rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2675 	if (!rp) {
2676 		err = -ENOMEM;
2677 		goto unlock;
2678 	}
2679 
2680 	i = 0;
2681 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2682 		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2683 			continue;
2684 		bacpy(&rp->addr[i].bdaddr, &c->dst);
2685 		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2686 		if (c->type == SCO_LINK || c->type == ESCO_LINK)
2687 			continue;
2688 		i++;
2689 	}
2690 
2691 	rp->conn_count = cpu_to_le16(i);
2692 
2693 	/* Recalculate length in case of filtered SCO connections, etc */
2694 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2695 				struct_size(rp, addr, i));
2696 
2697 	kfree(rp);
2698 
2699 unlock:
2700 	hci_dev_unlock(hdev);
2701 	return err;
2702 }
2703 
2704 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2705 				   struct mgmt_cp_pin_code_neg_reply *cp)
2706 {
2707 	struct mgmt_pending_cmd *cmd;
2708 	int err;
2709 
2710 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2711 			       sizeof(*cp));
2712 	if (!cmd)
2713 		return -ENOMEM;
2714 
2715 	cmd->cmd_complete = addr_cmd_complete;
2716 
2717 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2718 			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2719 	if (err < 0)
2720 		mgmt_pending_remove(cmd);
2721 
2722 	return err;
2723 }
2724 
2725 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2726 			  u16 len)
2727 {
2728 	struct hci_conn *conn;
2729 	struct mgmt_cp_pin_code_reply *cp = data;
2730 	struct hci_cp_pin_code_reply reply;
2731 	struct mgmt_pending_cmd *cmd;
2732 	int err;
2733 
2734 	bt_dev_dbg(hdev, "sock %p", sk);
2735 
2736 	hci_dev_lock(hdev);
2737 
2738 	if (!hdev_is_powered(hdev)) {
2739 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2740 				      MGMT_STATUS_NOT_POWERED);
2741 		goto failed;
2742 	}
2743 
2744 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2745 	if (!conn) {
2746 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2747 				      MGMT_STATUS_NOT_CONNECTED);
2748 		goto failed;
2749 	}
2750 
2751 	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2752 		struct mgmt_cp_pin_code_neg_reply ncp;
2753 
2754 		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2755 
2756 		bt_dev_err(hdev, "PIN code is not 16 bytes long");
2757 
2758 		err = send_pin_code_neg_reply(sk, hdev, &ncp);
2759 		if (err >= 0)
2760 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2761 					      MGMT_STATUS_INVALID_PARAMS);
2762 
2763 		goto failed;
2764 	}
2765 
2766 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2767 	if (!cmd) {
2768 		err = -ENOMEM;
2769 		goto failed;
2770 	}
2771 
2772 	cmd->cmd_complete = addr_cmd_complete;
2773 
2774 	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2775 	reply.pin_len = cp->pin_len;
2776 	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2777 
2778 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2779 	if (err < 0)
2780 		mgmt_pending_remove(cmd);
2781 
2782 failed:
2783 	hci_dev_unlock(hdev);
2784 	return err;
2785 }
2786 
2787 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2788 			     u16 len)
2789 {
2790 	struct mgmt_cp_set_io_capability *cp = data;
2791 
2792 	bt_dev_dbg(hdev, "sock %p", sk);
2793 
2794 	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2795 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2796 				       MGMT_STATUS_INVALID_PARAMS);
2797 
2798 	hci_dev_lock(hdev);
2799 
2800 	hdev->io_capability = cp->io_capability;
2801 
2802 	bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
2803 
2804 	hci_dev_unlock(hdev);
2805 
2806 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2807 				 NULL, 0);
2808 }
2809 
2810 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2811 {
2812 	struct hci_dev *hdev = conn->hdev;
2813 	struct mgmt_pending_cmd *cmd;
2814 
2815 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2816 		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2817 			continue;
2818 
2819 		if (cmd->user_data != conn)
2820 			continue;
2821 
2822 		return cmd;
2823 	}
2824 
2825 	return NULL;
2826 }
2827 
2828 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2829 {
2830 	struct mgmt_rp_pair_device rp;
2831 	struct hci_conn *conn = cmd->user_data;
2832 	int err;
2833 
2834 	bacpy(&rp.addr.bdaddr, &conn->dst);
2835 	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2836 
2837 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2838 				status, &rp, sizeof(rp));
2839 
2840 	/* So we don't get further callbacks for this connection */
2841 	conn->connect_cfm_cb = NULL;
2842 	conn->security_cfm_cb = NULL;
2843 	conn->disconn_cfm_cb = NULL;
2844 
2845 	hci_conn_drop(conn);
2846 
2847 	/* The device is paired so there is no need to remove
2848 	 * its connection parameters anymore.
2849 	 */
2850 	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2851 
2852 	hci_conn_put(conn);
2853 
2854 	return err;
2855 }
2856 
2857 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2858 {
2859 	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2860 	struct mgmt_pending_cmd *cmd;
2861 
2862 	cmd = find_pairing(conn);
2863 	if (cmd) {
2864 		cmd->cmd_complete(cmd, status);
2865 		mgmt_pending_remove(cmd);
2866 	}
2867 }
2868 
2869 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2870 {
2871 	struct mgmt_pending_cmd *cmd;
2872 
2873 	BT_DBG("status %u", status);
2874 
2875 	cmd = find_pairing(conn);
2876 	if (!cmd) {
2877 		BT_DBG("Unable to find a pending command");
2878 		return;
2879 	}
2880 
2881 	cmd->cmd_complete(cmd, mgmt_status(status));
2882 	mgmt_pending_remove(cmd);
2883 }
2884 
2885 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2886 {
2887 	struct mgmt_pending_cmd *cmd;
2888 
2889 	BT_DBG("status %u", status);
2890 
2891 	if (!status)
2892 		return;
2893 
2894 	cmd = find_pairing(conn);
2895 	if (!cmd) {
2896 		BT_DBG("Unable to find a pending command");
2897 		return;
2898 	}
2899 
2900 	cmd->cmd_complete(cmd, mgmt_status(status));
2901 	mgmt_pending_remove(cmd);
2902 }
2903 
2904 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2905 		       u16 len)
2906 {
2907 	struct mgmt_cp_pair_device *cp = data;
2908 	struct mgmt_rp_pair_device rp;
2909 	struct mgmt_pending_cmd *cmd;
2910 	u8 sec_level, auth_type;
2911 	struct hci_conn *conn;
2912 	int err;
2913 
2914 	bt_dev_dbg(hdev, "sock %p", sk);
2915 
2916 	memset(&rp, 0, sizeof(rp));
2917 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2918 	rp.addr.type = cp->addr.type;
2919 
2920 	if (!bdaddr_type_is_valid(cp->addr.type))
2921 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2922 					 MGMT_STATUS_INVALID_PARAMS,
2923 					 &rp, sizeof(rp));
2924 
2925 	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2926 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2927 					 MGMT_STATUS_INVALID_PARAMS,
2928 					 &rp, sizeof(rp));
2929 
2930 	hci_dev_lock(hdev);
2931 
2932 	if (!hdev_is_powered(hdev)) {
2933 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2934 					MGMT_STATUS_NOT_POWERED, &rp,
2935 					sizeof(rp));
2936 		goto unlock;
2937 	}
2938 
2939 	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
2940 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2941 					MGMT_STATUS_ALREADY_PAIRED, &rp,
2942 					sizeof(rp));
2943 		goto unlock;
2944 	}
2945 
2946 	sec_level = BT_SECURITY_MEDIUM;
2947 	auth_type = HCI_AT_DEDICATED_BONDING;
2948 
2949 	if (cp->addr.type == BDADDR_BREDR) {
2950 		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2951 				       auth_type, CONN_REASON_PAIR_DEVICE);
2952 	} else {
2953 		u8 addr_type = le_addr_type(cp->addr.type);
2954 		struct hci_conn_params *p;
2955 
2956 		/* When pairing a new device, it is expected to remember
2957 		 * this device for future connections. Adding the connection
2958 		 * parameter information ahead of time allows tracking
2959 		 * of the slave preferred values and will speed up any
2960 		 * further connection establishment.
2961 		 *
2962 		 * If connection parameters already exist, then they
2963 		 * will be kept and this function does nothing.
2964 		 */
2965 		p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2966 
2967 		if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
2968 			p->auto_connect = HCI_AUTO_CONN_DISABLED;
2969 
2970 		conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
2971 					   sec_level, HCI_LE_CONN_TIMEOUT,
2972 					   CONN_REASON_PAIR_DEVICE);
2973 	}
2974 
2975 	if (IS_ERR(conn)) {
2976 		int status;
2977 
2978 		if (PTR_ERR(conn) == -EBUSY)
2979 			status = MGMT_STATUS_BUSY;
2980 		else if (PTR_ERR(conn) == -EOPNOTSUPP)
2981 			status = MGMT_STATUS_NOT_SUPPORTED;
2982 		else if (PTR_ERR(conn) == -ECONNREFUSED)
2983 			status = MGMT_STATUS_REJECTED;
2984 		else
2985 			status = MGMT_STATUS_CONNECT_FAILED;
2986 
2987 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2988 					status, &rp, sizeof(rp));
2989 		goto unlock;
2990 	}
2991 
2992 	if (conn->connect_cfm_cb) {
2993 		hci_conn_drop(conn);
2994 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2995 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
2996 		goto unlock;
2997 	}
2998 
2999 	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3000 	if (!cmd) {
3001 		err = -ENOMEM;
3002 		hci_conn_drop(conn);
3003 		goto unlock;
3004 	}
3005 
3006 	cmd->cmd_complete = pairing_complete;
3007 
3008 	/* For LE, just connecting isn't a proof that the pairing finished */
3009 	if (cp->addr.type == BDADDR_BREDR) {
3010 		conn->connect_cfm_cb = pairing_complete_cb;
3011 		conn->security_cfm_cb = pairing_complete_cb;
3012 		conn->disconn_cfm_cb = pairing_complete_cb;
3013 	} else {
3014 		conn->connect_cfm_cb = le_pairing_complete_cb;
3015 		conn->security_cfm_cb = le_pairing_complete_cb;
3016 		conn->disconn_cfm_cb = le_pairing_complete_cb;
3017 	}
3018 
3019 	conn->io_capability = cp->io_cap;
3020 	cmd->user_data = hci_conn_get(conn);
3021 
3022 	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3023 	    hci_conn_security(conn, sec_level, auth_type, true)) {
3024 		cmd->cmd_complete(cmd, 0);
3025 		mgmt_pending_remove(cmd);
3026 	}
3027 
3028 	err = 0;
3029 
3030 unlock:
3031 	hci_dev_unlock(hdev);
3032 	return err;
3033 }
3034 
3035 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3036 			      u16 len)
3037 {
3038 	struct mgmt_addr_info *addr = data;
3039 	struct mgmt_pending_cmd *cmd;
3040 	struct hci_conn *conn;
3041 	int err;
3042 
3043 	bt_dev_dbg(hdev, "sock %p", sk);
3044 
3045 	hci_dev_lock(hdev);
3046 
3047 	if (!hdev_is_powered(hdev)) {
3048 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3049 				      MGMT_STATUS_NOT_POWERED);
3050 		goto unlock;
3051 	}
3052 
3053 	cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3054 	if (!cmd) {
3055 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3056 				      MGMT_STATUS_INVALID_PARAMS);
3057 		goto unlock;
3058 	}
3059 
3060 	conn = cmd->user_data;
3061 
3062 	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3063 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3064 				      MGMT_STATUS_INVALID_PARAMS);
3065 		goto unlock;
3066 	}
3067 
3068 	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3069 	mgmt_pending_remove(cmd);
3070 
3071 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3072 				addr, sizeof(*addr));
3073 
3074 	/* Since user doesn't want to proceed with the connection, abort any
3075 	 * ongoing pairing and then terminate the link if it was created
3076 	 * because of the pair device action.
3077 	 */
3078 	if (addr->type == BDADDR_BREDR)
3079 		hci_remove_link_key(hdev, &addr->bdaddr);
3080 	else
3081 		smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3082 					      le_addr_type(addr->type));
3083 
3084 	if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3085 		hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3086 
3087 unlock:
3088 	hci_dev_unlock(hdev);
3089 	return err;
3090 }
3091 
3092 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3093 			     struct mgmt_addr_info *addr, u16 mgmt_op,
3094 			     u16 hci_op, __le32 passkey)
3095 {
3096 	struct mgmt_pending_cmd *cmd;
3097 	struct hci_conn *conn;
3098 	int err;
3099 
3100 	hci_dev_lock(hdev);
3101 
3102 	if (!hdev_is_powered(hdev)) {
3103 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3104 					MGMT_STATUS_NOT_POWERED, addr,
3105 					sizeof(*addr));
3106 		goto done;
3107 	}
3108 
3109 	if (addr->type == BDADDR_BREDR)
3110 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3111 	else
3112 		conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3113 					       le_addr_type(addr->type));
3114 
3115 	if (!conn) {
3116 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3117 					MGMT_STATUS_NOT_CONNECTED, addr,
3118 					sizeof(*addr));
3119 		goto done;
3120 	}
3121 
3122 	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3123 		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3124 		if (!err)
3125 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3126 						MGMT_STATUS_SUCCESS, addr,
3127 						sizeof(*addr));
3128 		else
3129 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3130 						MGMT_STATUS_FAILED, addr,
3131 						sizeof(*addr));
3132 
3133 		goto done;
3134 	}
3135 
3136 	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3137 	if (!cmd) {
3138 		err = -ENOMEM;
3139 		goto done;
3140 	}
3141 
3142 	cmd->cmd_complete = addr_cmd_complete;
3143 
3144 	/* Continue with pairing via HCI */
3145 	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3146 		struct hci_cp_user_passkey_reply cp;
3147 
3148 		bacpy(&cp.bdaddr, &addr->bdaddr);
3149 		cp.passkey = passkey;
3150 		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3151 	} else
3152 		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3153 				   &addr->bdaddr);
3154 
3155 	if (err < 0)
3156 		mgmt_pending_remove(cmd);
3157 
3158 done:
3159 	hci_dev_unlock(hdev);
3160 	return err;
3161 }
3162 
3163 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3164 			      void *data, u16 len)
3165 {
3166 	struct mgmt_cp_pin_code_neg_reply *cp = data;
3167 
3168 	bt_dev_dbg(hdev, "sock %p", sk);
3169 
3170 	return user_pairing_resp(sk, hdev, &cp->addr,
3171 				MGMT_OP_PIN_CODE_NEG_REPLY,
3172 				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3173 }
3174 
3175 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3176 			      u16 len)
3177 {
3178 	struct mgmt_cp_user_confirm_reply *cp = data;
3179 
3180 	bt_dev_dbg(hdev, "sock %p", sk);
3181 
3182 	if (len != sizeof(*cp))
3183 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3184 				       MGMT_STATUS_INVALID_PARAMS);
3185 
3186 	return user_pairing_resp(sk, hdev, &cp->addr,
3187 				 MGMT_OP_USER_CONFIRM_REPLY,
3188 				 HCI_OP_USER_CONFIRM_REPLY, 0);
3189 }
3190 
3191 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3192 				  void *data, u16 len)
3193 {
3194 	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3195 
3196 	bt_dev_dbg(hdev, "sock %p", sk);
3197 
3198 	return user_pairing_resp(sk, hdev, &cp->addr,
3199 				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3200 				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3201 }
3202 
3203 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3204 			      u16 len)
3205 {
3206 	struct mgmt_cp_user_passkey_reply *cp = data;
3207 
3208 	bt_dev_dbg(hdev, "sock %p", sk);
3209 
3210 	return user_pairing_resp(sk, hdev, &cp->addr,
3211 				 MGMT_OP_USER_PASSKEY_REPLY,
3212 				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3213 }
3214 
3215 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3216 				  void *data, u16 len)
3217 {
3218 	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3219 
3220 	bt_dev_dbg(hdev, "sock %p", sk);
3221 
3222 	return user_pairing_resp(sk, hdev, &cp->addr,
3223 				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3224 				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3225 }
3226 
3227 static void adv_expire(struct hci_dev *hdev, u32 flags)
3228 {
3229 	struct adv_info *adv_instance;
3230 	struct hci_request req;
3231 	int err;
3232 
3233 	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3234 	if (!adv_instance)
3235 		return;
3236 
3237 	/* stop if current instance doesn't need to be changed */
3238 	if (!(adv_instance->flags & flags))
3239 		return;
3240 
3241 	cancel_adv_timeout(hdev);
3242 
3243 	adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3244 	if (!adv_instance)
3245 		return;
3246 
3247 	hci_req_init(&req, hdev);
3248 	err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
3249 					      true);
3250 	if (err)
3251 		return;
3252 
3253 	hci_req_run(&req, NULL);
3254 }
3255 
3256 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3257 {
3258 	struct mgmt_cp_set_local_name *cp;
3259 	struct mgmt_pending_cmd *cmd;
3260 
3261 	bt_dev_dbg(hdev, "status 0x%02x", status);
3262 
3263 	hci_dev_lock(hdev);
3264 
3265 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3266 	if (!cmd)
3267 		goto unlock;
3268 
3269 	cp = cmd->param;
3270 
3271 	if (status) {
3272 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3273 			        mgmt_status(status));
3274 	} else {
3275 		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3276 				  cp, sizeof(*cp));
3277 
3278 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3279 			adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3280 	}
3281 
3282 	mgmt_pending_remove(cmd);
3283 
3284 unlock:
3285 	hci_dev_unlock(hdev);
3286 }
3287 
3288 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3289 			  u16 len)
3290 {
3291 	struct mgmt_cp_set_local_name *cp = data;
3292 	struct mgmt_pending_cmd *cmd;
3293 	struct hci_request req;
3294 	int err;
3295 
3296 	bt_dev_dbg(hdev, "sock %p", sk);
3297 
3298 	hci_dev_lock(hdev);
3299 
3300 	/* If the old values are the same as the new ones just return a
3301 	 * direct command complete event.
3302 	 */
3303 	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3304 	    !memcmp(hdev->short_name, cp->short_name,
3305 		    sizeof(hdev->short_name))) {
3306 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3307 					data, len);
3308 		goto failed;
3309 	}
3310 
3311 	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3312 
3313 	if (!hdev_is_powered(hdev)) {
3314 		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3315 
3316 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3317 					data, len);
3318 		if (err < 0)
3319 			goto failed;
3320 
3321 		err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3322 					 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3323 		ext_info_changed(hdev, sk);
3324 
3325 		goto failed;
3326 	}
3327 
3328 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3329 	if (!cmd) {
3330 		err = -ENOMEM;
3331 		goto failed;
3332 	}
3333 
3334 	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3335 
3336 	hci_req_init(&req, hdev);
3337 
3338 	if (lmp_bredr_capable(hdev)) {
3339 		__hci_req_update_name(&req);
3340 		__hci_req_update_eir(&req);
3341 	}
3342 
3343 	/* The name is stored in the scan response data and so
3344 	 * no need to udpate the advertising data here.
3345 	 */
3346 	if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3347 		__hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
3348 
3349 	err = hci_req_run(&req, set_name_complete);
3350 	if (err < 0)
3351 		mgmt_pending_remove(cmd);
3352 
3353 failed:
3354 	hci_dev_unlock(hdev);
3355 	return err;
3356 }
3357 
3358 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3359 			  u16 len)
3360 {
3361 	struct mgmt_cp_set_appearance *cp = data;
3362 	u16 appearance;
3363 	int err;
3364 
3365 	bt_dev_dbg(hdev, "sock %p", sk);
3366 
3367 	if (!lmp_le_capable(hdev))
3368 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3369 				       MGMT_STATUS_NOT_SUPPORTED);
3370 
3371 	appearance = le16_to_cpu(cp->appearance);
3372 
3373 	hci_dev_lock(hdev);
3374 
3375 	if (hdev->appearance != appearance) {
3376 		hdev->appearance = appearance;
3377 
3378 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3379 			adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
3380 
3381 		ext_info_changed(hdev, sk);
3382 	}
3383 
3384 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3385 				0);
3386 
3387 	hci_dev_unlock(hdev);
3388 
3389 	return err;
3390 }
3391 
3392 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3393 				 void *data, u16 len)
3394 {
3395 	struct mgmt_rp_get_phy_configuration rp;
3396 
3397 	bt_dev_dbg(hdev, "sock %p", sk);
3398 
3399 	hci_dev_lock(hdev);
3400 
3401 	memset(&rp, 0, sizeof(rp));
3402 
3403 	rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3404 	rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3405 	rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3406 
3407 	hci_dev_unlock(hdev);
3408 
3409 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3410 				 &rp, sizeof(rp));
3411 }
3412 
3413 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3414 {
3415 	struct mgmt_ev_phy_configuration_changed ev;
3416 
3417 	memset(&ev, 0, sizeof(ev));
3418 
3419 	ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3420 
3421 	return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3422 			  sizeof(ev), skip);
3423 }
3424 
3425 static void set_default_phy_complete(struct hci_dev *hdev, u8 status,
3426 				     u16 opcode, struct sk_buff *skb)
3427 {
3428 	struct mgmt_pending_cmd *cmd;
3429 
3430 	bt_dev_dbg(hdev, "status 0x%02x", status);
3431 
3432 	hci_dev_lock(hdev);
3433 
3434 	cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev);
3435 	if (!cmd)
3436 		goto unlock;
3437 
3438 	if (status) {
3439 		mgmt_cmd_status(cmd->sk, hdev->id,
3440 				MGMT_OP_SET_PHY_CONFIGURATION,
3441 				mgmt_status(status));
3442 	} else {
3443 		mgmt_cmd_complete(cmd->sk, hdev->id,
3444 				  MGMT_OP_SET_PHY_CONFIGURATION, 0,
3445 				  NULL, 0);
3446 
3447 		mgmt_phy_configuration_changed(hdev, cmd->sk);
3448 	}
3449 
3450 	mgmt_pending_remove(cmd);
3451 
3452 unlock:
3453 	hci_dev_unlock(hdev);
3454 }
3455 
3456 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3457 				 void *data, u16 len)
3458 {
3459 	struct mgmt_cp_set_phy_configuration *cp = data;
3460 	struct hci_cp_le_set_default_phy cp_phy;
3461 	struct mgmt_pending_cmd *cmd;
3462 	struct hci_request req;
3463 	u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3464 	u16 pkt_type = (HCI_DH1 | HCI_DM1);
3465 	bool changed = false;
3466 	int err;
3467 
3468 	bt_dev_dbg(hdev, "sock %p", sk);
3469 
3470 	configurable_phys = get_configurable_phys(hdev);
3471 	supported_phys = get_supported_phys(hdev);
3472 	selected_phys = __le32_to_cpu(cp->selected_phys);
3473 
3474 	if (selected_phys & ~supported_phys)
3475 		return mgmt_cmd_status(sk, hdev->id,
3476 				       MGMT_OP_SET_PHY_CONFIGURATION,
3477 				       MGMT_STATUS_INVALID_PARAMS);
3478 
3479 	unconfigure_phys = supported_phys & ~configurable_phys;
3480 
3481 	if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3482 		return mgmt_cmd_status(sk, hdev->id,
3483 				       MGMT_OP_SET_PHY_CONFIGURATION,
3484 				       MGMT_STATUS_INVALID_PARAMS);
3485 
3486 	if (selected_phys == get_selected_phys(hdev))
3487 		return mgmt_cmd_complete(sk, hdev->id,
3488 					 MGMT_OP_SET_PHY_CONFIGURATION,
3489 					 0, NULL, 0);
3490 
3491 	hci_dev_lock(hdev);
3492 
3493 	if (!hdev_is_powered(hdev)) {
3494 		err = mgmt_cmd_status(sk, hdev->id,
3495 				      MGMT_OP_SET_PHY_CONFIGURATION,
3496 				      MGMT_STATUS_REJECTED);
3497 		goto unlock;
3498 	}
3499 
3500 	if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3501 		err = mgmt_cmd_status(sk, hdev->id,
3502 				      MGMT_OP_SET_PHY_CONFIGURATION,
3503 				      MGMT_STATUS_BUSY);
3504 		goto unlock;
3505 	}
3506 
3507 	if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3508 		pkt_type |= (HCI_DH3 | HCI_DM3);
3509 	else
3510 		pkt_type &= ~(HCI_DH3 | HCI_DM3);
3511 
3512 	if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3513 		pkt_type |= (HCI_DH5 | HCI_DM5);
3514 	else
3515 		pkt_type &= ~(HCI_DH5 | HCI_DM5);
3516 
3517 	if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3518 		pkt_type &= ~HCI_2DH1;
3519 	else
3520 		pkt_type |= HCI_2DH1;
3521 
3522 	if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3523 		pkt_type &= ~HCI_2DH3;
3524 	else
3525 		pkt_type |= HCI_2DH3;
3526 
3527 	if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3528 		pkt_type &= ~HCI_2DH5;
3529 	else
3530 		pkt_type |= HCI_2DH5;
3531 
3532 	if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3533 		pkt_type &= ~HCI_3DH1;
3534 	else
3535 		pkt_type |= HCI_3DH1;
3536 
3537 	if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3538 		pkt_type &= ~HCI_3DH3;
3539 	else
3540 		pkt_type |= HCI_3DH3;
3541 
3542 	if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3543 		pkt_type &= ~HCI_3DH5;
3544 	else
3545 		pkt_type |= HCI_3DH5;
3546 
3547 	if (pkt_type != hdev->pkt_type) {
3548 		hdev->pkt_type = pkt_type;
3549 		changed = true;
3550 	}
3551 
3552 	if ((selected_phys & MGMT_PHY_LE_MASK) ==
3553 	    (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3554 		if (changed)
3555 			mgmt_phy_configuration_changed(hdev, sk);
3556 
3557 		err = mgmt_cmd_complete(sk, hdev->id,
3558 					MGMT_OP_SET_PHY_CONFIGURATION,
3559 					0, NULL, 0);
3560 
3561 		goto unlock;
3562 	}
3563 
3564 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3565 			       len);
3566 	if (!cmd) {
3567 		err = -ENOMEM;
3568 		goto unlock;
3569 	}
3570 
3571 	hci_req_init(&req, hdev);
3572 
3573 	memset(&cp_phy, 0, sizeof(cp_phy));
3574 
3575 	if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3576 		cp_phy.all_phys |= 0x01;
3577 
3578 	if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3579 		cp_phy.all_phys |= 0x02;
3580 
3581 	if (selected_phys & MGMT_PHY_LE_1M_TX)
3582 		cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3583 
3584 	if (selected_phys & MGMT_PHY_LE_2M_TX)
3585 		cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3586 
3587 	if (selected_phys & MGMT_PHY_LE_CODED_TX)
3588 		cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3589 
3590 	if (selected_phys & MGMT_PHY_LE_1M_RX)
3591 		cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3592 
3593 	if (selected_phys & MGMT_PHY_LE_2M_RX)
3594 		cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3595 
3596 	if (selected_phys & MGMT_PHY_LE_CODED_RX)
3597 		cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3598 
3599 	hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy);
3600 
3601 	err = hci_req_run_skb(&req, set_default_phy_complete);
3602 	if (err < 0)
3603 		mgmt_pending_remove(cmd);
3604 
3605 unlock:
3606 	hci_dev_unlock(hdev);
3607 
3608 	return err;
3609 }
3610 
3611 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
3612 			    u16 len)
3613 {
3614 	int err = MGMT_STATUS_SUCCESS;
3615 	struct mgmt_cp_set_blocked_keys *keys = data;
3616 	const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
3617 				   sizeof(struct mgmt_blocked_key_info));
3618 	u16 key_count, expected_len;
3619 	int i;
3620 
3621 	bt_dev_dbg(hdev, "sock %p", sk);
3622 
3623 	key_count = __le16_to_cpu(keys->key_count);
3624 	if (key_count > max_key_count) {
3625 		bt_dev_err(hdev, "too big key_count value %u", key_count);
3626 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3627 				       MGMT_STATUS_INVALID_PARAMS);
3628 	}
3629 
3630 	expected_len = struct_size(keys, keys, key_count);
3631 	if (expected_len != len) {
3632 		bt_dev_err(hdev, "expected %u bytes, got %u bytes",
3633 			   expected_len, len);
3634 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3635 				       MGMT_STATUS_INVALID_PARAMS);
3636 	}
3637 
3638 	hci_dev_lock(hdev);
3639 
3640 	hci_blocked_keys_clear(hdev);
3641 
3642 	for (i = 0; i < keys->key_count; ++i) {
3643 		struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
3644 
3645 		if (!b) {
3646 			err = MGMT_STATUS_NO_RESOURCES;
3647 			break;
3648 		}
3649 
3650 		b->type = keys->keys[i].type;
3651 		memcpy(b->val, keys->keys[i].val, sizeof(b->val));
3652 		list_add_rcu(&b->list, &hdev->blocked_keys);
3653 	}
3654 	hci_dev_unlock(hdev);
3655 
3656 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3657 				err, NULL, 0);
3658 }
3659 
3660 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
3661 			       void *data, u16 len)
3662 {
3663 	struct mgmt_mode *cp = data;
3664 	int err;
3665 	bool changed = false;
3666 
3667 	bt_dev_dbg(hdev, "sock %p", sk);
3668 
3669 	if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
3670 		return mgmt_cmd_status(sk, hdev->id,
3671 				       MGMT_OP_SET_WIDEBAND_SPEECH,
3672 				       MGMT_STATUS_NOT_SUPPORTED);
3673 
3674 	if (cp->val != 0x00 && cp->val != 0x01)
3675 		return mgmt_cmd_status(sk, hdev->id,
3676 				       MGMT_OP_SET_WIDEBAND_SPEECH,
3677 				       MGMT_STATUS_INVALID_PARAMS);
3678 
3679 	hci_dev_lock(hdev);
3680 
3681 	if (pending_find(MGMT_OP_SET_WIDEBAND_SPEECH, hdev)) {
3682 		err = mgmt_cmd_status(sk, hdev->id,
3683 				      MGMT_OP_SET_WIDEBAND_SPEECH,
3684 				      MGMT_STATUS_BUSY);
3685 		goto unlock;
3686 	}
3687 
3688 	if (hdev_is_powered(hdev) &&
3689 	    !!cp->val != hci_dev_test_flag(hdev,
3690 					   HCI_WIDEBAND_SPEECH_ENABLED)) {
3691 		err = mgmt_cmd_status(sk, hdev->id,
3692 				      MGMT_OP_SET_WIDEBAND_SPEECH,
3693 				      MGMT_STATUS_REJECTED);
3694 		goto unlock;
3695 	}
3696 
3697 	if (cp->val)
3698 		changed = !hci_dev_test_and_set_flag(hdev,
3699 						   HCI_WIDEBAND_SPEECH_ENABLED);
3700 	else
3701 		changed = hci_dev_test_and_clear_flag(hdev,
3702 						   HCI_WIDEBAND_SPEECH_ENABLED);
3703 
3704 	err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
3705 	if (err < 0)
3706 		goto unlock;
3707 
3708 	if (changed)
3709 		err = new_settings(hdev, sk);
3710 
3711 unlock:
3712 	hci_dev_unlock(hdev);
3713 	return err;
3714 }
3715 
3716 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
3717 			       void *data, u16 data_len)
3718 {
3719 	char buf[20];
3720 	struct mgmt_rp_read_controller_cap *rp = (void *)buf;
3721 	u16 cap_len = 0;
3722 	u8 flags = 0;
3723 	u8 tx_power_range[2];
3724 
3725 	bt_dev_dbg(hdev, "sock %p", sk);
3726 
3727 	memset(&buf, 0, sizeof(buf));
3728 
3729 	hci_dev_lock(hdev);
3730 
3731 	/* When the Read Simple Pairing Options command is supported, then
3732 	 * the remote public key validation is supported.
3733 	 *
3734 	 * Alternatively, when Microsoft extensions are available, they can
3735 	 * indicate support for public key validation as well.
3736 	 */
3737 	if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
3738 		flags |= 0x01;	/* Remote public key validation (BR/EDR) */
3739 
3740 	flags |= 0x02;		/* Remote public key validation (LE) */
3741 
3742 	/* When the Read Encryption Key Size command is supported, then the
3743 	 * encryption key size is enforced.
3744 	 */
3745 	if (hdev->commands[20] & 0x10)
3746 		flags |= 0x04;	/* Encryption key size enforcement (BR/EDR) */
3747 
3748 	flags |= 0x08;		/* Encryption key size enforcement (LE) */
3749 
3750 	cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
3751 				  &flags, 1);
3752 
3753 	/* When the Read Simple Pairing Options command is supported, then
3754 	 * also max encryption key size information is provided.
3755 	 */
3756 	if (hdev->commands[41] & 0x08)
3757 		cap_len = eir_append_le16(rp->cap, cap_len,
3758 					  MGMT_CAP_MAX_ENC_KEY_SIZE,
3759 					  hdev->max_enc_key_size);
3760 
3761 	cap_len = eir_append_le16(rp->cap, cap_len,
3762 				  MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
3763 				  SMP_MAX_ENC_KEY_SIZE);
3764 
3765 	/* Append the min/max LE tx power parameters if we were able to fetch
3766 	 * it from the controller
3767 	 */
3768 	if (hdev->commands[38] & 0x80) {
3769 		memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
3770 		memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
3771 		cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
3772 					  tx_power_range, 2);
3773 	}
3774 
3775 	rp->cap_len = cpu_to_le16(cap_len);
3776 
3777 	hci_dev_unlock(hdev);
3778 
3779 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
3780 				 rp, sizeof(*rp) + cap_len);
3781 }
3782 
3783 #ifdef CONFIG_BT_FEATURE_DEBUG
3784 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
3785 static const u8 debug_uuid[16] = {
3786 	0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
3787 	0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
3788 };
3789 #endif
3790 
3791 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
3792 static const u8 simult_central_periph_uuid[16] = {
3793 	0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
3794 	0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
3795 };
3796 
3797 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
3798 static const u8 rpa_resolution_uuid[16] = {
3799 	0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
3800 	0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
3801 };
3802 
3803 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
3804 				  void *data, u16 data_len)
3805 {
3806 	char buf[62];	/* Enough space for 3 features */
3807 	struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
3808 	u16 idx = 0;
3809 	u32 flags;
3810 
3811 	bt_dev_dbg(hdev, "sock %p", sk);
3812 
3813 	memset(&buf, 0, sizeof(buf));
3814 
3815 #ifdef CONFIG_BT_FEATURE_DEBUG
3816 	if (!hdev) {
3817 		flags = bt_dbg_get() ? BIT(0) : 0;
3818 
3819 		memcpy(rp->features[idx].uuid, debug_uuid, 16);
3820 		rp->features[idx].flags = cpu_to_le32(flags);
3821 		idx++;
3822 	}
3823 #endif
3824 
3825 	if (hdev) {
3826 		if (test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) &&
3827 		    (hdev->le_states[4] & 0x08) &&	/* Central */
3828 		    (hdev->le_states[4] & 0x40) &&	/* Peripheral */
3829 		    (hdev->le_states[3] & 0x10))	/* Simultaneous */
3830 			flags = BIT(0);
3831 		else
3832 			flags = 0;
3833 
3834 		memcpy(rp->features[idx].uuid, simult_central_periph_uuid, 16);
3835 		rp->features[idx].flags = cpu_to_le32(flags);
3836 		idx++;
3837 	}
3838 
3839 	if (hdev && use_ll_privacy(hdev)) {
3840 		if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
3841 			flags = BIT(0) | BIT(1);
3842 		else
3843 			flags = BIT(1);
3844 
3845 		memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
3846 		rp->features[idx].flags = cpu_to_le32(flags);
3847 		idx++;
3848 	}
3849 
3850 	rp->feature_count = cpu_to_le16(idx);
3851 
3852 	/* After reading the experimental features information, enable
3853 	 * the events to update client on any future change.
3854 	 */
3855 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3856 
3857 	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3858 				 MGMT_OP_READ_EXP_FEATURES_INFO,
3859 				 0, rp, sizeof(*rp) + (20 * idx));
3860 }
3861 
3862 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
3863 					  struct sock *skip)
3864 {
3865 	struct mgmt_ev_exp_feature_changed ev;
3866 
3867 	memset(&ev, 0, sizeof(ev));
3868 	memcpy(ev.uuid, rpa_resolution_uuid, 16);
3869 	ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
3870 
3871 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
3872 				  &ev, sizeof(ev),
3873 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3874 
3875 }
3876 
3877 #ifdef CONFIG_BT_FEATURE_DEBUG
3878 static int exp_debug_feature_changed(bool enabled, struct sock *skip)
3879 {
3880 	struct mgmt_ev_exp_feature_changed ev;
3881 
3882 	memset(&ev, 0, sizeof(ev));
3883 	memcpy(ev.uuid, debug_uuid, 16);
3884 	ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
3885 
3886 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL,
3887 				  &ev, sizeof(ev),
3888 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3889 }
3890 #endif
3891 
3892 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
3893 			   void *data, u16 data_len)
3894 {
3895 	struct mgmt_cp_set_exp_feature *cp = data;
3896 	struct mgmt_rp_set_exp_feature rp;
3897 
3898 	bt_dev_dbg(hdev, "sock %p", sk);
3899 
3900 	if (!memcmp(cp->uuid, ZERO_KEY, 16)) {
3901 		memset(rp.uuid, 0, 16);
3902 		rp.flags = cpu_to_le32(0);
3903 
3904 #ifdef CONFIG_BT_FEATURE_DEBUG
3905 		if (!hdev) {
3906 			bool changed = bt_dbg_get();
3907 
3908 			bt_dbg_set(false);
3909 
3910 			if (changed)
3911 				exp_debug_feature_changed(false, sk);
3912 		}
3913 #endif
3914 
3915 		if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
3916 			bool changed = hci_dev_test_flag(hdev,
3917 							 HCI_ENABLE_LL_PRIVACY);
3918 
3919 			hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
3920 
3921 			if (changed)
3922 				exp_ll_privacy_feature_changed(false, hdev, sk);
3923 		}
3924 
3925 		hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3926 
3927 		return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3928 					 MGMT_OP_SET_EXP_FEATURE, 0,
3929 					 &rp, sizeof(rp));
3930 	}
3931 
3932 #ifdef CONFIG_BT_FEATURE_DEBUG
3933 	if (!memcmp(cp->uuid, debug_uuid, 16)) {
3934 		bool val, changed;
3935 		int err;
3936 
3937 		/* Command requires to use the non-controller index */
3938 		if (hdev)
3939 			return mgmt_cmd_status(sk, hdev->id,
3940 					       MGMT_OP_SET_EXP_FEATURE,
3941 					       MGMT_STATUS_INVALID_INDEX);
3942 
3943 		/* Parameters are limited to a single octet */
3944 		if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
3945 			return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3946 					       MGMT_OP_SET_EXP_FEATURE,
3947 					       MGMT_STATUS_INVALID_PARAMS);
3948 
3949 		/* Only boolean on/off is supported */
3950 		if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
3951 			return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3952 					       MGMT_OP_SET_EXP_FEATURE,
3953 					       MGMT_STATUS_INVALID_PARAMS);
3954 
3955 		val = !!cp->param[0];
3956 		changed = val ? !bt_dbg_get() : bt_dbg_get();
3957 		bt_dbg_set(val);
3958 
3959 		memcpy(rp.uuid, debug_uuid, 16);
3960 		rp.flags = cpu_to_le32(val ? BIT(0) : 0);
3961 
3962 		hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3963 
3964 		err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
3965 					MGMT_OP_SET_EXP_FEATURE, 0,
3966 					&rp, sizeof(rp));
3967 
3968 		if (changed)
3969 			exp_debug_feature_changed(val, sk);
3970 
3971 		return err;
3972 	}
3973 #endif
3974 
3975 	if (!memcmp(cp->uuid, rpa_resolution_uuid, 16)) {
3976 		bool val, changed;
3977 		int err;
3978 		u32 flags;
3979 
3980 		/* Command requires to use the controller index */
3981 		if (!hdev)
3982 			return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3983 					       MGMT_OP_SET_EXP_FEATURE,
3984 					       MGMT_STATUS_INVALID_INDEX);
3985 
3986 		/* Changes can only be made when controller is powered down */
3987 		if (hdev_is_powered(hdev))
3988 			return mgmt_cmd_status(sk, hdev->id,
3989 					       MGMT_OP_SET_EXP_FEATURE,
3990 					       MGMT_STATUS_REJECTED);
3991 
3992 		/* Parameters are limited to a single octet */
3993 		if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
3994 			return mgmt_cmd_status(sk, hdev->id,
3995 					       MGMT_OP_SET_EXP_FEATURE,
3996 					       MGMT_STATUS_INVALID_PARAMS);
3997 
3998 		/* Only boolean on/off is supported */
3999 		if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4000 			return mgmt_cmd_status(sk, hdev->id,
4001 					       MGMT_OP_SET_EXP_FEATURE,
4002 					       MGMT_STATUS_INVALID_PARAMS);
4003 
4004 		val = !!cp->param[0];
4005 
4006 		if (val) {
4007 			changed = !hci_dev_test_flag(hdev,
4008 						     HCI_ENABLE_LL_PRIVACY);
4009 			hci_dev_set_flag(hdev, HCI_ENABLE_LL_PRIVACY);
4010 			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4011 
4012 			/* Enable LL privacy + supported settings changed */
4013 			flags = BIT(0) | BIT(1);
4014 		} else {
4015 			changed = hci_dev_test_flag(hdev,
4016 						    HCI_ENABLE_LL_PRIVACY);
4017 			hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
4018 
4019 			/* Disable LL privacy + supported settings changed */
4020 			flags = BIT(1);
4021 		}
4022 
4023 		memcpy(rp.uuid, rpa_resolution_uuid, 16);
4024 		rp.flags = cpu_to_le32(flags);
4025 
4026 		hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4027 
4028 		err = mgmt_cmd_complete(sk, hdev->id,
4029 					MGMT_OP_SET_EXP_FEATURE, 0,
4030 					&rp, sizeof(rp));
4031 
4032 		if (changed)
4033 			exp_ll_privacy_feature_changed(val, hdev, sk);
4034 
4035 		return err;
4036 	}
4037 
4038 	return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4039 			       MGMT_OP_SET_EXP_FEATURE,
4040 			       MGMT_STATUS_NOT_SUPPORTED);
4041 }
4042 
4043 #define SUPPORTED_DEVICE_FLAGS() ((1U << HCI_CONN_FLAG_MAX) - 1)
4044 
4045 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4046 			    u16 data_len)
4047 {
4048 	struct mgmt_cp_get_device_flags *cp = data;
4049 	struct mgmt_rp_get_device_flags rp;
4050 	struct bdaddr_list_with_flags *br_params;
4051 	struct hci_conn_params *params;
4052 	u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4053 	u32 current_flags = 0;
4054 	u8 status = MGMT_STATUS_INVALID_PARAMS;
4055 
4056 	bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4057 		   &cp->addr.bdaddr, cp->addr.type);
4058 
4059 	hci_dev_lock(hdev);
4060 
4061 	if (cp->addr.type == BDADDR_BREDR) {
4062 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->whitelist,
4063 							      &cp->addr.bdaddr,
4064 							      cp->addr.type);
4065 		if (!br_params)
4066 			goto done;
4067 
4068 		current_flags = br_params->current_flags;
4069 	} else {
4070 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4071 						le_addr_type(cp->addr.type));
4072 
4073 		if (!params)
4074 			goto done;
4075 
4076 		current_flags = params->current_flags;
4077 	}
4078 
4079 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4080 	rp.addr.type = cp->addr.type;
4081 	rp.supported_flags = cpu_to_le32(supported_flags);
4082 	rp.current_flags = cpu_to_le32(current_flags);
4083 
4084 	status = MGMT_STATUS_SUCCESS;
4085 
4086 done:
4087 	hci_dev_unlock(hdev);
4088 
4089 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
4090 				&rp, sizeof(rp));
4091 }
4092 
4093 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
4094 				 bdaddr_t *bdaddr, u8 bdaddr_type,
4095 				 u32 supported_flags, u32 current_flags)
4096 {
4097 	struct mgmt_ev_device_flags_changed ev;
4098 
4099 	bacpy(&ev.addr.bdaddr, bdaddr);
4100 	ev.addr.type = bdaddr_type;
4101 	ev.supported_flags = cpu_to_le32(supported_flags);
4102 	ev.current_flags = cpu_to_le32(current_flags);
4103 
4104 	mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
4105 }
4106 
4107 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4108 			    u16 len)
4109 {
4110 	struct mgmt_cp_set_device_flags *cp = data;
4111 	struct bdaddr_list_with_flags *br_params;
4112 	struct hci_conn_params *params;
4113 	u8 status = MGMT_STATUS_INVALID_PARAMS;
4114 	u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4115 	u32 current_flags = __le32_to_cpu(cp->current_flags);
4116 
4117 	bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
4118 		   &cp->addr.bdaddr, cp->addr.type,
4119 		   __le32_to_cpu(current_flags));
4120 
4121 	if ((supported_flags | current_flags) != supported_flags) {
4122 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
4123 			    current_flags, supported_flags);
4124 		goto done;
4125 	}
4126 
4127 	hci_dev_lock(hdev);
4128 
4129 	if (cp->addr.type == BDADDR_BREDR) {
4130 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->whitelist,
4131 							      &cp->addr.bdaddr,
4132 							      cp->addr.type);
4133 
4134 		if (br_params) {
4135 			br_params->current_flags = current_flags;
4136 			status = MGMT_STATUS_SUCCESS;
4137 		} else {
4138 			bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
4139 				    &cp->addr.bdaddr, cp->addr.type);
4140 		}
4141 	} else {
4142 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4143 						le_addr_type(cp->addr.type));
4144 		if (params) {
4145 			params->current_flags = current_flags;
4146 			status = MGMT_STATUS_SUCCESS;
4147 		} else {
4148 			bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
4149 				    &cp->addr.bdaddr,
4150 				    le_addr_type(cp->addr.type));
4151 		}
4152 	}
4153 
4154 done:
4155 	hci_dev_unlock(hdev);
4156 
4157 	if (status == MGMT_STATUS_SUCCESS)
4158 		device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
4159 				     supported_flags, current_flags);
4160 
4161 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
4162 				 &cp->addr, sizeof(cp->addr));
4163 }
4164 
4165 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
4166 				   u16 handle)
4167 {
4168 	struct mgmt_ev_adv_monitor_added ev;
4169 
4170 	ev.monitor_handle = cpu_to_le16(handle);
4171 
4172 	mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
4173 }
4174 
4175 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
4176 {
4177 	struct mgmt_ev_adv_monitor_removed ev;
4178 	struct mgmt_pending_cmd *cmd;
4179 	struct sock *sk_skip = NULL;
4180 	struct mgmt_cp_remove_adv_monitor *cp;
4181 
4182 	cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4183 	if (cmd) {
4184 		cp = cmd->param;
4185 
4186 		if (cp->monitor_handle)
4187 			sk_skip = cmd->sk;
4188 	}
4189 
4190 	ev.monitor_handle = cpu_to_le16(handle);
4191 
4192 	mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
4193 }
4194 
4195 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
4196 				 void *data, u16 len)
4197 {
4198 	struct adv_monitor *monitor = NULL;
4199 	struct mgmt_rp_read_adv_monitor_features *rp = NULL;
4200 	int handle, err;
4201 	size_t rp_size = 0;
4202 	__u32 supported = 0;
4203 	__u32 enabled = 0;
4204 	__u16 num_handles = 0;
4205 	__u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
4206 
4207 	BT_DBG("request for %s", hdev->name);
4208 
4209 	hci_dev_lock(hdev);
4210 
4211 	if (msft_monitor_supported(hdev))
4212 		supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
4213 
4214 	idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
4215 		handles[num_handles++] = monitor->handle;
4216 
4217 	hci_dev_unlock(hdev);
4218 
4219 	rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
4220 	rp = kmalloc(rp_size, GFP_KERNEL);
4221 	if (!rp)
4222 		return -ENOMEM;
4223 
4224 	/* All supported features are currently enabled */
4225 	enabled = supported;
4226 
4227 	rp->supported_features = cpu_to_le32(supported);
4228 	rp->enabled_features = cpu_to_le32(enabled);
4229 	rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
4230 	rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
4231 	rp->num_handles = cpu_to_le16(num_handles);
4232 	if (num_handles)
4233 		memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
4234 
4235 	err = mgmt_cmd_complete(sk, hdev->id,
4236 				MGMT_OP_READ_ADV_MONITOR_FEATURES,
4237 				MGMT_STATUS_SUCCESS, rp, rp_size);
4238 
4239 	kfree(rp);
4240 
4241 	return err;
4242 }
4243 
4244 int mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
4245 {
4246 	struct mgmt_rp_add_adv_patterns_monitor rp;
4247 	struct mgmt_pending_cmd *cmd;
4248 	struct adv_monitor *monitor;
4249 	int err = 0;
4250 
4251 	hci_dev_lock(hdev);
4252 
4253 	cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev);
4254 	if (!cmd) {
4255 		cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev);
4256 		if (!cmd)
4257 			goto done;
4258 	}
4259 
4260 	monitor = cmd->user_data;
4261 	rp.monitor_handle = cpu_to_le16(monitor->handle);
4262 
4263 	if (!status) {
4264 		mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
4265 		hdev->adv_monitors_cnt++;
4266 		if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
4267 			monitor->state = ADV_MONITOR_STATE_REGISTERED;
4268 		hci_update_background_scan(hdev);
4269 	}
4270 
4271 	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4272 				mgmt_status(status), &rp, sizeof(rp));
4273 	mgmt_pending_remove(cmd);
4274 
4275 done:
4276 	hci_dev_unlock(hdev);
4277 	bt_dev_dbg(hdev, "add monitor %d complete, status %d",
4278 		   rp.monitor_handle, status);
4279 
4280 	return err;
4281 }
4282 
4283 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4284 				      struct adv_monitor *m, u8 status,
4285 				      void *data, u16 len, u16 op)
4286 {
4287 	struct mgmt_rp_add_adv_patterns_monitor rp;
4288 	struct mgmt_pending_cmd *cmd;
4289 	int err;
4290 	bool pending;
4291 
4292 	hci_dev_lock(hdev);
4293 
4294 	if (status)
4295 		goto unlock;
4296 
4297 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
4298 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4299 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
4300 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
4301 		status = MGMT_STATUS_BUSY;
4302 		goto unlock;
4303 	}
4304 
4305 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
4306 	if (!cmd) {
4307 		status = MGMT_STATUS_NO_RESOURCES;
4308 		goto unlock;
4309 	}
4310 
4311 	cmd->user_data = m;
4312 	pending = hci_add_adv_monitor(hdev, m, &err);
4313 	if (err) {
4314 		if (err == -ENOSPC || err == -ENOMEM)
4315 			status = MGMT_STATUS_NO_RESOURCES;
4316 		else if (err == -EINVAL)
4317 			status = MGMT_STATUS_INVALID_PARAMS;
4318 		else
4319 			status = MGMT_STATUS_FAILED;
4320 
4321 		mgmt_pending_remove(cmd);
4322 		goto unlock;
4323 	}
4324 
4325 	if (!pending) {
4326 		mgmt_pending_remove(cmd);
4327 		rp.monitor_handle = cpu_to_le16(m->handle);
4328 		mgmt_adv_monitor_added(sk, hdev, m->handle);
4329 		m->state = ADV_MONITOR_STATE_REGISTERED;
4330 		hdev->adv_monitors_cnt++;
4331 
4332 		hci_dev_unlock(hdev);
4333 		return mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_SUCCESS,
4334 					 &rp, sizeof(rp));
4335 	}
4336 
4337 	hci_dev_unlock(hdev);
4338 
4339 	return 0;
4340 
4341 unlock:
4342 	hci_free_adv_monitor(hdev, m);
4343 	hci_dev_unlock(hdev);
4344 	return mgmt_cmd_status(sk, hdev->id, op, status);
4345 }
4346 
4347 static void parse_adv_monitor_rssi(struct adv_monitor *m,
4348 				   struct mgmt_adv_rssi_thresholds *rssi)
4349 {
4350 	if (rssi) {
4351 		m->rssi.low_threshold = rssi->low_threshold;
4352 		m->rssi.low_threshold_timeout =
4353 		    __le16_to_cpu(rssi->low_threshold_timeout);
4354 		m->rssi.high_threshold = rssi->high_threshold;
4355 		m->rssi.high_threshold_timeout =
4356 		    __le16_to_cpu(rssi->high_threshold_timeout);
4357 		m->rssi.sampling_period = rssi->sampling_period;
4358 	} else {
4359 		/* Default values. These numbers are the least constricting
4360 		 * parameters for MSFT API to work, so it behaves as if there
4361 		 * are no rssi parameter to consider. May need to be changed
4362 		 * if other API are to be supported.
4363 		 */
4364 		m->rssi.low_threshold = -127;
4365 		m->rssi.low_threshold_timeout = 60;
4366 		m->rssi.high_threshold = -127;
4367 		m->rssi.high_threshold_timeout = 0;
4368 		m->rssi.sampling_period = 0;
4369 	}
4370 }
4371 
4372 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
4373 				    struct mgmt_adv_pattern *patterns)
4374 {
4375 	u8 offset = 0, length = 0;
4376 	struct adv_pattern *p = NULL;
4377 	int i;
4378 
4379 	for (i = 0; i < pattern_count; i++) {
4380 		offset = patterns[i].offset;
4381 		length = patterns[i].length;
4382 		if (offset >= HCI_MAX_AD_LENGTH ||
4383 		    length > HCI_MAX_AD_LENGTH ||
4384 		    (offset + length) > HCI_MAX_AD_LENGTH)
4385 			return MGMT_STATUS_INVALID_PARAMS;
4386 
4387 		p = kmalloc(sizeof(*p), GFP_KERNEL);
4388 		if (!p)
4389 			return MGMT_STATUS_NO_RESOURCES;
4390 
4391 		p->ad_type = patterns[i].ad_type;
4392 		p->offset = patterns[i].offset;
4393 		p->length = patterns[i].length;
4394 		memcpy(p->value, patterns[i].value, p->length);
4395 
4396 		INIT_LIST_HEAD(&p->list);
4397 		list_add(&p->list, &m->patterns);
4398 	}
4399 
4400 	return MGMT_STATUS_SUCCESS;
4401 }
4402 
4403 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4404 				    void *data, u16 len)
4405 {
4406 	struct mgmt_cp_add_adv_patterns_monitor *cp = data;
4407 	struct adv_monitor *m = NULL;
4408 	u8 status = MGMT_STATUS_SUCCESS;
4409 	size_t expected_size = sizeof(*cp);
4410 
4411 	BT_DBG("request for %s", hdev->name);
4412 
4413 	if (len <= sizeof(*cp)) {
4414 		status = MGMT_STATUS_INVALID_PARAMS;
4415 		goto done;
4416 	}
4417 
4418 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4419 	if (len != expected_size) {
4420 		status = MGMT_STATUS_INVALID_PARAMS;
4421 		goto done;
4422 	}
4423 
4424 	m = kzalloc(sizeof(*m), GFP_KERNEL);
4425 	if (!m) {
4426 		status = MGMT_STATUS_NO_RESOURCES;
4427 		goto done;
4428 	}
4429 
4430 	INIT_LIST_HEAD(&m->patterns);
4431 
4432 	parse_adv_monitor_rssi(m, NULL);
4433 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4434 
4435 done:
4436 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4437 					  MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
4438 }
4439 
4440 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
4441 					 void *data, u16 len)
4442 {
4443 	struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
4444 	struct adv_monitor *m = NULL;
4445 	u8 status = MGMT_STATUS_SUCCESS;
4446 	size_t expected_size = sizeof(*cp);
4447 
4448 	BT_DBG("request for %s", hdev->name);
4449 
4450 	if (len <= sizeof(*cp)) {
4451 		status = MGMT_STATUS_INVALID_PARAMS;
4452 		goto done;
4453 	}
4454 
4455 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4456 	if (len != expected_size) {
4457 		status = MGMT_STATUS_INVALID_PARAMS;
4458 		goto done;
4459 	}
4460 
4461 	m = kzalloc(sizeof(*m), GFP_KERNEL);
4462 	if (!m) {
4463 		status = MGMT_STATUS_NO_RESOURCES;
4464 		goto done;
4465 	}
4466 
4467 	INIT_LIST_HEAD(&m->patterns);
4468 
4469 	parse_adv_monitor_rssi(m, &cp->rssi);
4470 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4471 
4472 done:
4473 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4474 					 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
4475 }
4476 
4477 int mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
4478 {
4479 	struct mgmt_rp_remove_adv_monitor rp;
4480 	struct mgmt_cp_remove_adv_monitor *cp;
4481 	struct mgmt_pending_cmd *cmd;
4482 	int err = 0;
4483 
4484 	hci_dev_lock(hdev);
4485 
4486 	cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4487 	if (!cmd)
4488 		goto done;
4489 
4490 	cp = cmd->param;
4491 	rp.monitor_handle = cp->monitor_handle;
4492 
4493 	if (!status)
4494 		hci_update_background_scan(hdev);
4495 
4496 	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4497 				mgmt_status(status), &rp, sizeof(rp));
4498 	mgmt_pending_remove(cmd);
4499 
4500 done:
4501 	hci_dev_unlock(hdev);
4502 	bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
4503 		   rp.monitor_handle, status);
4504 
4505 	return err;
4506 }
4507 
4508 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
4509 			      void *data, u16 len)
4510 {
4511 	struct mgmt_cp_remove_adv_monitor *cp = data;
4512 	struct mgmt_rp_remove_adv_monitor rp;
4513 	struct mgmt_pending_cmd *cmd;
4514 	u16 handle = __le16_to_cpu(cp->monitor_handle);
4515 	int err, status;
4516 	bool pending;
4517 
4518 	BT_DBG("request for %s", hdev->name);
4519 	rp.monitor_handle = cp->monitor_handle;
4520 
4521 	hci_dev_lock(hdev);
4522 
4523 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
4524 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
4525 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4526 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
4527 		status = MGMT_STATUS_BUSY;
4528 		goto unlock;
4529 	}
4530 
4531 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
4532 	if (!cmd) {
4533 		status = MGMT_STATUS_NO_RESOURCES;
4534 		goto unlock;
4535 	}
4536 
4537 	if (handle)
4538 		pending = hci_remove_single_adv_monitor(hdev, handle, &err);
4539 	else
4540 		pending = hci_remove_all_adv_monitor(hdev, &err);
4541 
4542 	if (err) {
4543 		mgmt_pending_remove(cmd);
4544 
4545 		if (err == -ENOENT)
4546 			status = MGMT_STATUS_INVALID_INDEX;
4547 		else
4548 			status = MGMT_STATUS_FAILED;
4549 
4550 		goto unlock;
4551 	}
4552 
4553 	/* monitor can be removed without forwarding request to controller */
4554 	if (!pending) {
4555 		mgmt_pending_remove(cmd);
4556 		hci_dev_unlock(hdev);
4557 
4558 		return mgmt_cmd_complete(sk, hdev->id,
4559 					 MGMT_OP_REMOVE_ADV_MONITOR,
4560 					 MGMT_STATUS_SUCCESS,
4561 					 &rp, sizeof(rp));
4562 	}
4563 
4564 	hci_dev_unlock(hdev);
4565 	return 0;
4566 
4567 unlock:
4568 	hci_dev_unlock(hdev);
4569 	return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4570 			       status);
4571 }
4572 
4573 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
4574 				         u16 opcode, struct sk_buff *skb)
4575 {
4576 	struct mgmt_rp_read_local_oob_data mgmt_rp;
4577 	size_t rp_size = sizeof(mgmt_rp);
4578 	struct mgmt_pending_cmd *cmd;
4579 
4580 	bt_dev_dbg(hdev, "status %u", status);
4581 
4582 	cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
4583 	if (!cmd)
4584 		return;
4585 
4586 	if (status || !skb) {
4587 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4588 				status ? mgmt_status(status) : MGMT_STATUS_FAILED);
4589 		goto remove;
4590 	}
4591 
4592 	memset(&mgmt_rp, 0, sizeof(mgmt_rp));
4593 
4594 	if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
4595 		struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
4596 
4597 		if (skb->len < sizeof(*rp)) {
4598 			mgmt_cmd_status(cmd->sk, hdev->id,
4599 					MGMT_OP_READ_LOCAL_OOB_DATA,
4600 					MGMT_STATUS_FAILED);
4601 			goto remove;
4602 		}
4603 
4604 		memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
4605 		memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
4606 
4607 		rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
4608 	} else {
4609 		struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
4610 
4611 		if (skb->len < sizeof(*rp)) {
4612 			mgmt_cmd_status(cmd->sk, hdev->id,
4613 					MGMT_OP_READ_LOCAL_OOB_DATA,
4614 					MGMT_STATUS_FAILED);
4615 			goto remove;
4616 		}
4617 
4618 		memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
4619 		memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
4620 
4621 		memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
4622 		memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
4623 	}
4624 
4625 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4626 			  MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
4627 
4628 remove:
4629 	mgmt_pending_remove(cmd);
4630 }
4631 
4632 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
4633 			       void *data, u16 data_len)
4634 {
4635 	struct mgmt_pending_cmd *cmd;
4636 	struct hci_request req;
4637 	int err;
4638 
4639 	bt_dev_dbg(hdev, "sock %p", sk);
4640 
4641 	hci_dev_lock(hdev);
4642 
4643 	if (!hdev_is_powered(hdev)) {
4644 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4645 				      MGMT_STATUS_NOT_POWERED);
4646 		goto unlock;
4647 	}
4648 
4649 	if (!lmp_ssp_capable(hdev)) {
4650 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4651 				      MGMT_STATUS_NOT_SUPPORTED);
4652 		goto unlock;
4653 	}
4654 
4655 	if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
4656 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4657 				      MGMT_STATUS_BUSY);
4658 		goto unlock;
4659 	}
4660 
4661 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
4662 	if (!cmd) {
4663 		err = -ENOMEM;
4664 		goto unlock;
4665 	}
4666 
4667 	hci_req_init(&req, hdev);
4668 
4669 	if (bredr_sc_enabled(hdev))
4670 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
4671 	else
4672 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
4673 
4674 	err = hci_req_run_skb(&req, read_local_oob_data_complete);
4675 	if (err < 0)
4676 		mgmt_pending_remove(cmd);
4677 
4678 unlock:
4679 	hci_dev_unlock(hdev);
4680 	return err;
4681 }
4682 
4683 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4684 			       void *data, u16 len)
4685 {
4686 	struct mgmt_addr_info *addr = data;
4687 	int err;
4688 
4689 	bt_dev_dbg(hdev, "sock %p", sk);
4690 
4691 	if (!bdaddr_type_is_valid(addr->type))
4692 		return mgmt_cmd_complete(sk, hdev->id,
4693 					 MGMT_OP_ADD_REMOTE_OOB_DATA,
4694 					 MGMT_STATUS_INVALID_PARAMS,
4695 					 addr, sizeof(*addr));
4696 
4697 	hci_dev_lock(hdev);
4698 
4699 	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
4700 		struct mgmt_cp_add_remote_oob_data *cp = data;
4701 		u8 status;
4702 
4703 		if (cp->addr.type != BDADDR_BREDR) {
4704 			err = mgmt_cmd_complete(sk, hdev->id,
4705 						MGMT_OP_ADD_REMOTE_OOB_DATA,
4706 						MGMT_STATUS_INVALID_PARAMS,
4707 						&cp->addr, sizeof(cp->addr));
4708 			goto unlock;
4709 		}
4710 
4711 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4712 					      cp->addr.type, cp->hash,
4713 					      cp->rand, NULL, NULL);
4714 		if (err < 0)
4715 			status = MGMT_STATUS_FAILED;
4716 		else
4717 			status = MGMT_STATUS_SUCCESS;
4718 
4719 		err = mgmt_cmd_complete(sk, hdev->id,
4720 					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
4721 					&cp->addr, sizeof(cp->addr));
4722 	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
4723 		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
4724 		u8 *rand192, *hash192, *rand256, *hash256;
4725 		u8 status;
4726 
4727 		if (bdaddr_type_is_le(cp->addr.type)) {
4728 			/* Enforce zero-valued 192-bit parameters as
4729 			 * long as legacy SMP OOB isn't implemented.
4730 			 */
4731 			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
4732 			    memcmp(cp->hash192, ZERO_KEY, 16)) {
4733 				err = mgmt_cmd_complete(sk, hdev->id,
4734 							MGMT_OP_ADD_REMOTE_OOB_DATA,
4735 							MGMT_STATUS_INVALID_PARAMS,
4736 							addr, sizeof(*addr));
4737 				goto unlock;
4738 			}
4739 
4740 			rand192 = NULL;
4741 			hash192 = NULL;
4742 		} else {
4743 			/* In case one of the P-192 values is set to zero,
4744 			 * then just disable OOB data for P-192.
4745 			 */
4746 			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
4747 			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
4748 				rand192 = NULL;
4749 				hash192 = NULL;
4750 			} else {
4751 				rand192 = cp->rand192;
4752 				hash192 = cp->hash192;
4753 			}
4754 		}
4755 
4756 		/* In case one of the P-256 values is set to zero, then just
4757 		 * disable OOB data for P-256.
4758 		 */
4759 		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
4760 		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
4761 			rand256 = NULL;
4762 			hash256 = NULL;
4763 		} else {
4764 			rand256 = cp->rand256;
4765 			hash256 = cp->hash256;
4766 		}
4767 
4768 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4769 					      cp->addr.type, hash192, rand192,
4770 					      hash256, rand256);
4771 		if (err < 0)
4772 			status = MGMT_STATUS_FAILED;
4773 		else
4774 			status = MGMT_STATUS_SUCCESS;
4775 
4776 		err = mgmt_cmd_complete(sk, hdev->id,
4777 					MGMT_OP_ADD_REMOTE_OOB_DATA,
4778 					status, &cp->addr, sizeof(cp->addr));
4779 	} else {
4780 		bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
4781 			   len);
4782 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
4783 				      MGMT_STATUS_INVALID_PARAMS);
4784 	}
4785 
4786 unlock:
4787 	hci_dev_unlock(hdev);
4788 	return err;
4789 }
4790 
4791 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4792 				  void *data, u16 len)
4793 {
4794 	struct mgmt_cp_remove_remote_oob_data *cp = data;
4795 	u8 status;
4796 	int err;
4797 
4798 	bt_dev_dbg(hdev, "sock %p", sk);
4799 
4800 	if (cp->addr.type != BDADDR_BREDR)
4801 		return mgmt_cmd_complete(sk, hdev->id,
4802 					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4803 					 MGMT_STATUS_INVALID_PARAMS,
4804 					 &cp->addr, sizeof(cp->addr));
4805 
4806 	hci_dev_lock(hdev);
4807 
4808 	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
4809 		hci_remote_oob_data_clear(hdev);
4810 		status = MGMT_STATUS_SUCCESS;
4811 		goto done;
4812 	}
4813 
4814 	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
4815 	if (err < 0)
4816 		status = MGMT_STATUS_INVALID_PARAMS;
4817 	else
4818 		status = MGMT_STATUS_SUCCESS;
4819 
4820 done:
4821 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4822 				status, &cp->addr, sizeof(cp->addr));
4823 
4824 	hci_dev_unlock(hdev);
4825 	return err;
4826 }
4827 
4828 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
4829 {
4830 	struct mgmt_pending_cmd *cmd;
4831 
4832 	bt_dev_dbg(hdev, "status %d", status);
4833 
4834 	hci_dev_lock(hdev);
4835 
4836 	cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
4837 	if (!cmd)
4838 		cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
4839 
4840 	if (!cmd)
4841 		cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
4842 
4843 	if (cmd) {
4844 		cmd->cmd_complete(cmd, mgmt_status(status));
4845 		mgmt_pending_remove(cmd);
4846 	}
4847 
4848 	hci_dev_unlock(hdev);
4849 
4850 	/* Handle suspend notifier */
4851 	if (test_and_clear_bit(SUSPEND_UNPAUSE_DISCOVERY,
4852 			       hdev->suspend_tasks)) {
4853 		bt_dev_dbg(hdev, "Unpaused discovery");
4854 		wake_up(&hdev->suspend_wait_q);
4855 	}
4856 }
4857 
4858 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
4859 				    uint8_t *mgmt_status)
4860 {
4861 	switch (type) {
4862 	case DISCOV_TYPE_LE:
4863 		*mgmt_status = mgmt_le_support(hdev);
4864 		if (*mgmt_status)
4865 			return false;
4866 		break;
4867 	case DISCOV_TYPE_INTERLEAVED:
4868 		*mgmt_status = mgmt_le_support(hdev);
4869 		if (*mgmt_status)
4870 			return false;
4871 		fallthrough;
4872 	case DISCOV_TYPE_BREDR:
4873 		*mgmt_status = mgmt_bredr_support(hdev);
4874 		if (*mgmt_status)
4875 			return false;
4876 		break;
4877 	default:
4878 		*mgmt_status = MGMT_STATUS_INVALID_PARAMS;
4879 		return false;
4880 	}
4881 
4882 	return true;
4883 }
4884 
4885 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
4886 				    u16 op, void *data, u16 len)
4887 {
4888 	struct mgmt_cp_start_discovery *cp = data;
4889 	struct mgmt_pending_cmd *cmd;
4890 	u8 status;
4891 	int err;
4892 
4893 	bt_dev_dbg(hdev, "sock %p", sk);
4894 
4895 	hci_dev_lock(hdev);
4896 
4897 	if (!hdev_is_powered(hdev)) {
4898 		err = mgmt_cmd_complete(sk, hdev->id, op,
4899 					MGMT_STATUS_NOT_POWERED,
4900 					&cp->type, sizeof(cp->type));
4901 		goto failed;
4902 	}
4903 
4904 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
4905 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4906 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4907 					&cp->type, sizeof(cp->type));
4908 		goto failed;
4909 	}
4910 
4911 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4912 		err = mgmt_cmd_complete(sk, hdev->id, op, status,
4913 					&cp->type, sizeof(cp->type));
4914 		goto failed;
4915 	}
4916 
4917 	/* Can't start discovery when it is paused */
4918 	if (hdev->discovery_paused) {
4919 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4920 					&cp->type, sizeof(cp->type));
4921 		goto failed;
4922 	}
4923 
4924 	/* Clear the discovery filter first to free any previously
4925 	 * allocated memory for the UUID list.
4926 	 */
4927 	hci_discovery_filter_clear(hdev);
4928 
4929 	hdev->discovery.type = cp->type;
4930 	hdev->discovery.report_invalid_rssi = false;
4931 	if (op == MGMT_OP_START_LIMITED_DISCOVERY)
4932 		hdev->discovery.limited = true;
4933 	else
4934 		hdev->discovery.limited = false;
4935 
4936 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
4937 	if (!cmd) {
4938 		err = -ENOMEM;
4939 		goto failed;
4940 	}
4941 
4942 	cmd->cmd_complete = generic_cmd_complete;
4943 
4944 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4945 	queue_work(hdev->req_workqueue, &hdev->discov_update);
4946 	err = 0;
4947 
4948 failed:
4949 	hci_dev_unlock(hdev);
4950 	return err;
4951 }
4952 
4953 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4954 			   void *data, u16 len)
4955 {
4956 	return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
4957 					data, len);
4958 }
4959 
4960 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
4961 				   void *data, u16 len)
4962 {
4963 	return start_discovery_internal(sk, hdev,
4964 					MGMT_OP_START_LIMITED_DISCOVERY,
4965 					data, len);
4966 }
4967 
4968 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
4969 					  u8 status)
4970 {
4971 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
4972 				 cmd->param, 1);
4973 }
4974 
4975 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4976 				   void *data, u16 len)
4977 {
4978 	struct mgmt_cp_start_service_discovery *cp = data;
4979 	struct mgmt_pending_cmd *cmd;
4980 	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
4981 	u16 uuid_count, expected_len;
4982 	u8 status;
4983 	int err;
4984 
4985 	bt_dev_dbg(hdev, "sock %p", sk);
4986 
4987 	hci_dev_lock(hdev);
4988 
4989 	if (!hdev_is_powered(hdev)) {
4990 		err = mgmt_cmd_complete(sk, hdev->id,
4991 					MGMT_OP_START_SERVICE_DISCOVERY,
4992 					MGMT_STATUS_NOT_POWERED,
4993 					&cp->type, sizeof(cp->type));
4994 		goto failed;
4995 	}
4996 
4997 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
4998 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4999 		err = mgmt_cmd_complete(sk, hdev->id,
5000 					MGMT_OP_START_SERVICE_DISCOVERY,
5001 					MGMT_STATUS_BUSY, &cp->type,
5002 					sizeof(cp->type));
5003 		goto failed;
5004 	}
5005 
5006 	if (hdev->discovery_paused) {
5007 		err = mgmt_cmd_complete(sk, hdev->id,
5008 					MGMT_OP_START_SERVICE_DISCOVERY,
5009 					MGMT_STATUS_BUSY, &cp->type,
5010 					sizeof(cp->type));
5011 		goto failed;
5012 	}
5013 
5014 	uuid_count = __le16_to_cpu(cp->uuid_count);
5015 	if (uuid_count > max_uuid_count) {
5016 		bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5017 			   uuid_count);
5018 		err = mgmt_cmd_complete(sk, hdev->id,
5019 					MGMT_OP_START_SERVICE_DISCOVERY,
5020 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
5021 					sizeof(cp->type));
5022 		goto failed;
5023 	}
5024 
5025 	expected_len = sizeof(*cp) + uuid_count * 16;
5026 	if (expected_len != len) {
5027 		bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5028 			   expected_len, len);
5029 		err = mgmt_cmd_complete(sk, hdev->id,
5030 					MGMT_OP_START_SERVICE_DISCOVERY,
5031 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
5032 					sizeof(cp->type));
5033 		goto failed;
5034 	}
5035 
5036 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5037 		err = mgmt_cmd_complete(sk, hdev->id,
5038 					MGMT_OP_START_SERVICE_DISCOVERY,
5039 					status, &cp->type, sizeof(cp->type));
5040 		goto failed;
5041 	}
5042 
5043 	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
5044 			       hdev, data, len);
5045 	if (!cmd) {
5046 		err = -ENOMEM;
5047 		goto failed;
5048 	}
5049 
5050 	cmd->cmd_complete = service_discovery_cmd_complete;
5051 
5052 	/* Clear the discovery filter first to free any previously
5053 	 * allocated memory for the UUID list.
5054 	 */
5055 	hci_discovery_filter_clear(hdev);
5056 
5057 	hdev->discovery.result_filtering = true;
5058 	hdev->discovery.type = cp->type;
5059 	hdev->discovery.rssi = cp->rssi;
5060 	hdev->discovery.uuid_count = uuid_count;
5061 
5062 	if (uuid_count > 0) {
5063 		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
5064 						GFP_KERNEL);
5065 		if (!hdev->discovery.uuids) {
5066 			err = mgmt_cmd_complete(sk, hdev->id,
5067 						MGMT_OP_START_SERVICE_DISCOVERY,
5068 						MGMT_STATUS_FAILED,
5069 						&cp->type, sizeof(cp->type));
5070 			mgmt_pending_remove(cmd);
5071 			goto failed;
5072 		}
5073 	}
5074 
5075 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5076 	queue_work(hdev->req_workqueue, &hdev->discov_update);
5077 	err = 0;
5078 
5079 failed:
5080 	hci_dev_unlock(hdev);
5081 	return err;
5082 }
5083 
5084 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
5085 {
5086 	struct mgmt_pending_cmd *cmd;
5087 
5088 	bt_dev_dbg(hdev, "status %d", status);
5089 
5090 	hci_dev_lock(hdev);
5091 
5092 	cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5093 	if (cmd) {
5094 		cmd->cmd_complete(cmd, mgmt_status(status));
5095 		mgmt_pending_remove(cmd);
5096 	}
5097 
5098 	hci_dev_unlock(hdev);
5099 
5100 	/* Handle suspend notifier */
5101 	if (test_and_clear_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks)) {
5102 		bt_dev_dbg(hdev, "Paused discovery");
5103 		wake_up(&hdev->suspend_wait_q);
5104 	}
5105 }
5106 
5107 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
5108 			  u16 len)
5109 {
5110 	struct mgmt_cp_stop_discovery *mgmt_cp = data;
5111 	struct mgmt_pending_cmd *cmd;
5112 	int err;
5113 
5114 	bt_dev_dbg(hdev, "sock %p", sk);
5115 
5116 	hci_dev_lock(hdev);
5117 
5118 	if (!hci_discovery_active(hdev)) {
5119 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5120 					MGMT_STATUS_REJECTED, &mgmt_cp->type,
5121 					sizeof(mgmt_cp->type));
5122 		goto unlock;
5123 	}
5124 
5125 	if (hdev->discovery.type != mgmt_cp->type) {
5126 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5127 					MGMT_STATUS_INVALID_PARAMS,
5128 					&mgmt_cp->type, sizeof(mgmt_cp->type));
5129 		goto unlock;
5130 	}
5131 
5132 	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
5133 	if (!cmd) {
5134 		err = -ENOMEM;
5135 		goto unlock;
5136 	}
5137 
5138 	cmd->cmd_complete = generic_cmd_complete;
5139 
5140 	hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
5141 	queue_work(hdev->req_workqueue, &hdev->discov_update);
5142 	err = 0;
5143 
5144 unlock:
5145 	hci_dev_unlock(hdev);
5146 	return err;
5147 }
5148 
5149 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
5150 			u16 len)
5151 {
5152 	struct mgmt_cp_confirm_name *cp = data;
5153 	struct inquiry_entry *e;
5154 	int err;
5155 
5156 	bt_dev_dbg(hdev, "sock %p", sk);
5157 
5158 	hci_dev_lock(hdev);
5159 
5160 	if (!hci_discovery_active(hdev)) {
5161 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5162 					MGMT_STATUS_FAILED, &cp->addr,
5163 					sizeof(cp->addr));
5164 		goto failed;
5165 	}
5166 
5167 	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
5168 	if (!e) {
5169 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5170 					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
5171 					sizeof(cp->addr));
5172 		goto failed;
5173 	}
5174 
5175 	if (cp->name_known) {
5176 		e->name_state = NAME_KNOWN;
5177 		list_del(&e->list);
5178 	} else {
5179 		e->name_state = NAME_NEEDED;
5180 		hci_inquiry_cache_update_resolve(hdev, e);
5181 	}
5182 
5183 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
5184 				&cp->addr, sizeof(cp->addr));
5185 
5186 failed:
5187 	hci_dev_unlock(hdev);
5188 	return err;
5189 }
5190 
5191 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
5192 			u16 len)
5193 {
5194 	struct mgmt_cp_block_device *cp = data;
5195 	u8 status;
5196 	int err;
5197 
5198 	bt_dev_dbg(hdev, "sock %p", sk);
5199 
5200 	if (!bdaddr_type_is_valid(cp->addr.type))
5201 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
5202 					 MGMT_STATUS_INVALID_PARAMS,
5203 					 &cp->addr, sizeof(cp->addr));
5204 
5205 	hci_dev_lock(hdev);
5206 
5207 	err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
5208 				  cp->addr.type);
5209 	if (err < 0) {
5210 		status = MGMT_STATUS_FAILED;
5211 		goto done;
5212 	}
5213 
5214 	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5215 		   sk);
5216 	status = MGMT_STATUS_SUCCESS;
5217 
5218 done:
5219 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
5220 				&cp->addr, sizeof(cp->addr));
5221 
5222 	hci_dev_unlock(hdev);
5223 
5224 	return err;
5225 }
5226 
5227 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
5228 			  u16 len)
5229 {
5230 	struct mgmt_cp_unblock_device *cp = data;
5231 	u8 status;
5232 	int err;
5233 
5234 	bt_dev_dbg(hdev, "sock %p", sk);
5235 
5236 	if (!bdaddr_type_is_valid(cp->addr.type))
5237 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
5238 					 MGMT_STATUS_INVALID_PARAMS,
5239 					 &cp->addr, sizeof(cp->addr));
5240 
5241 	hci_dev_lock(hdev);
5242 
5243 	err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
5244 				  cp->addr.type);
5245 	if (err < 0) {
5246 		status = MGMT_STATUS_INVALID_PARAMS;
5247 		goto done;
5248 	}
5249 
5250 	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5251 		   sk);
5252 	status = MGMT_STATUS_SUCCESS;
5253 
5254 done:
5255 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
5256 				&cp->addr, sizeof(cp->addr));
5257 
5258 	hci_dev_unlock(hdev);
5259 
5260 	return err;
5261 }
5262 
5263 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
5264 			 u16 len)
5265 {
5266 	struct mgmt_cp_set_device_id *cp = data;
5267 	struct hci_request req;
5268 	int err;
5269 	__u16 source;
5270 
5271 	bt_dev_dbg(hdev, "sock %p", sk);
5272 
5273 	source = __le16_to_cpu(cp->source);
5274 
5275 	if (source > 0x0002)
5276 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
5277 				       MGMT_STATUS_INVALID_PARAMS);
5278 
5279 	hci_dev_lock(hdev);
5280 
5281 	hdev->devid_source = source;
5282 	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
5283 	hdev->devid_product = __le16_to_cpu(cp->product);
5284 	hdev->devid_version = __le16_to_cpu(cp->version);
5285 
5286 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
5287 				NULL, 0);
5288 
5289 	hci_req_init(&req, hdev);
5290 	__hci_req_update_eir(&req);
5291 	hci_req_run(&req, NULL);
5292 
5293 	hci_dev_unlock(hdev);
5294 
5295 	return err;
5296 }
5297 
5298 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
5299 					u16 opcode)
5300 {
5301 	bt_dev_dbg(hdev, "status %d", status);
5302 }
5303 
5304 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
5305 				     u16 opcode)
5306 {
5307 	struct cmd_lookup match = { NULL, hdev };
5308 	struct hci_request req;
5309 	u8 instance;
5310 	struct adv_info *adv_instance;
5311 	int err;
5312 
5313 	hci_dev_lock(hdev);
5314 
5315 	if (status) {
5316 		u8 mgmt_err = mgmt_status(status);
5317 
5318 		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
5319 				     cmd_status_rsp, &mgmt_err);
5320 		goto unlock;
5321 	}
5322 
5323 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
5324 		hci_dev_set_flag(hdev, HCI_ADVERTISING);
5325 	else
5326 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
5327 
5328 	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
5329 			     &match);
5330 
5331 	new_settings(hdev, match.sk);
5332 
5333 	if (match.sk)
5334 		sock_put(match.sk);
5335 
5336 	/* Handle suspend notifier */
5337 	if (test_and_clear_bit(SUSPEND_PAUSE_ADVERTISING,
5338 			       hdev->suspend_tasks)) {
5339 		bt_dev_dbg(hdev, "Paused advertising");
5340 		wake_up(&hdev->suspend_wait_q);
5341 	} else if (test_and_clear_bit(SUSPEND_UNPAUSE_ADVERTISING,
5342 				      hdev->suspend_tasks)) {
5343 		bt_dev_dbg(hdev, "Unpaused advertising");
5344 		wake_up(&hdev->suspend_wait_q);
5345 	}
5346 
5347 	/* If "Set Advertising" was just disabled and instance advertising was
5348 	 * set up earlier, then re-enable multi-instance advertising.
5349 	 */
5350 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5351 	    list_empty(&hdev->adv_instances))
5352 		goto unlock;
5353 
5354 	instance = hdev->cur_adv_instance;
5355 	if (!instance) {
5356 		adv_instance = list_first_entry_or_null(&hdev->adv_instances,
5357 							struct adv_info, list);
5358 		if (!adv_instance)
5359 			goto unlock;
5360 
5361 		instance = adv_instance->instance;
5362 	}
5363 
5364 	hci_req_init(&req, hdev);
5365 
5366 	err = __hci_req_schedule_adv_instance(&req, instance, true);
5367 
5368 	if (!err)
5369 		err = hci_req_run(&req, enable_advertising_instance);
5370 
5371 	if (err)
5372 		bt_dev_err(hdev, "failed to re-configure advertising");
5373 
5374 unlock:
5375 	hci_dev_unlock(hdev);
5376 }
5377 
5378 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
5379 			   u16 len)
5380 {
5381 	struct mgmt_mode *cp = data;
5382 	struct mgmt_pending_cmd *cmd;
5383 	struct hci_request req;
5384 	u8 val, status;
5385 	int err;
5386 
5387 	bt_dev_dbg(hdev, "sock %p", sk);
5388 
5389 	status = mgmt_le_support(hdev);
5390 	if (status)
5391 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5392 				       status);
5393 
5394 	/* Enabling the experimental LL Privay support disables support for
5395 	 * advertising.
5396 	 */
5397 	if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
5398 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5399 				       MGMT_STATUS_NOT_SUPPORTED);
5400 
5401 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5402 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5403 				       MGMT_STATUS_INVALID_PARAMS);
5404 
5405 	if (hdev->advertising_paused)
5406 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5407 				       MGMT_STATUS_BUSY);
5408 
5409 	hci_dev_lock(hdev);
5410 
5411 	val = !!cp->val;
5412 
5413 	/* The following conditions are ones which mean that we should
5414 	 * not do any HCI communication but directly send a mgmt
5415 	 * response to user space (after toggling the flag if
5416 	 * necessary).
5417 	 */
5418 	if (!hdev_is_powered(hdev) ||
5419 	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5420 	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
5421 	    hci_conn_num(hdev, LE_LINK) > 0 ||
5422 	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5423 	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
5424 		bool changed;
5425 
5426 		if (cp->val) {
5427 			hdev->cur_adv_instance = 0x00;
5428 			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
5429 			if (cp->val == 0x02)
5430 				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5431 			else
5432 				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5433 		} else {
5434 			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
5435 			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5436 		}
5437 
5438 		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
5439 		if (err < 0)
5440 			goto unlock;
5441 
5442 		if (changed)
5443 			err = new_settings(hdev, sk);
5444 
5445 		goto unlock;
5446 	}
5447 
5448 	if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
5449 	    pending_find(MGMT_OP_SET_LE, hdev)) {
5450 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5451 				      MGMT_STATUS_BUSY);
5452 		goto unlock;
5453 	}
5454 
5455 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
5456 	if (!cmd) {
5457 		err = -ENOMEM;
5458 		goto unlock;
5459 	}
5460 
5461 	hci_req_init(&req, hdev);
5462 
5463 	if (cp->val == 0x02)
5464 		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5465 	else
5466 		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5467 
5468 	cancel_adv_timeout(hdev);
5469 
5470 	if (val) {
5471 		/* Switch to instance "0" for the Set Advertising setting.
5472 		 * We cannot use update_[adv|scan_rsp]_data() here as the
5473 		 * HCI_ADVERTISING flag is not yet set.
5474 		 */
5475 		hdev->cur_adv_instance = 0x00;
5476 
5477 		if (ext_adv_capable(hdev)) {
5478 			__hci_req_start_ext_adv(&req, 0x00);
5479 		} else {
5480 			__hci_req_update_adv_data(&req, 0x00);
5481 			__hci_req_update_scan_rsp_data(&req, 0x00);
5482 			__hci_req_enable_advertising(&req);
5483 		}
5484 	} else {
5485 		__hci_req_disable_advertising(&req);
5486 	}
5487 
5488 	err = hci_req_run(&req, set_advertising_complete);
5489 	if (err < 0)
5490 		mgmt_pending_remove(cmd);
5491 
5492 unlock:
5493 	hci_dev_unlock(hdev);
5494 	return err;
5495 }
5496 
5497 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
5498 			      void *data, u16 len)
5499 {
5500 	struct mgmt_cp_set_static_address *cp = data;
5501 	int err;
5502 
5503 	bt_dev_dbg(hdev, "sock %p", sk);
5504 
5505 	if (!lmp_le_capable(hdev))
5506 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5507 				       MGMT_STATUS_NOT_SUPPORTED);
5508 
5509 	if (hdev_is_powered(hdev))
5510 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5511 				       MGMT_STATUS_REJECTED);
5512 
5513 	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
5514 		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
5515 			return mgmt_cmd_status(sk, hdev->id,
5516 					       MGMT_OP_SET_STATIC_ADDRESS,
5517 					       MGMT_STATUS_INVALID_PARAMS);
5518 
5519 		/* Two most significant bits shall be set */
5520 		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
5521 			return mgmt_cmd_status(sk, hdev->id,
5522 					       MGMT_OP_SET_STATIC_ADDRESS,
5523 					       MGMT_STATUS_INVALID_PARAMS);
5524 	}
5525 
5526 	hci_dev_lock(hdev);
5527 
5528 	bacpy(&hdev->static_addr, &cp->bdaddr);
5529 
5530 	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
5531 	if (err < 0)
5532 		goto unlock;
5533 
5534 	err = new_settings(hdev, sk);
5535 
5536 unlock:
5537 	hci_dev_unlock(hdev);
5538 	return err;
5539 }
5540 
5541 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
5542 			   void *data, u16 len)
5543 {
5544 	struct mgmt_cp_set_scan_params *cp = data;
5545 	__u16 interval, window;
5546 	int err;
5547 
5548 	bt_dev_dbg(hdev, "sock %p", sk);
5549 
5550 	if (!lmp_le_capable(hdev))
5551 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5552 				       MGMT_STATUS_NOT_SUPPORTED);
5553 
5554 	interval = __le16_to_cpu(cp->interval);
5555 
5556 	if (interval < 0x0004 || interval > 0x4000)
5557 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5558 				       MGMT_STATUS_INVALID_PARAMS);
5559 
5560 	window = __le16_to_cpu(cp->window);
5561 
5562 	if (window < 0x0004 || window > 0x4000)
5563 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5564 				       MGMT_STATUS_INVALID_PARAMS);
5565 
5566 	if (window > interval)
5567 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5568 				       MGMT_STATUS_INVALID_PARAMS);
5569 
5570 	hci_dev_lock(hdev);
5571 
5572 	hdev->le_scan_interval = interval;
5573 	hdev->le_scan_window = window;
5574 
5575 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
5576 				NULL, 0);
5577 
5578 	/* If background scan is running, restart it so new parameters are
5579 	 * loaded.
5580 	 */
5581 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5582 	    hdev->discovery.state == DISCOVERY_STOPPED) {
5583 		struct hci_request req;
5584 
5585 		hci_req_init(&req, hdev);
5586 
5587 		hci_req_add_le_scan_disable(&req, false);
5588 		hci_req_add_le_passive_scan(&req);
5589 
5590 		hci_req_run(&req, NULL);
5591 	}
5592 
5593 	hci_dev_unlock(hdev);
5594 
5595 	return err;
5596 }
5597 
5598 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
5599 				      u16 opcode)
5600 {
5601 	struct mgmt_pending_cmd *cmd;
5602 
5603 	bt_dev_dbg(hdev, "status 0x%02x", status);
5604 
5605 	hci_dev_lock(hdev);
5606 
5607 	cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5608 	if (!cmd)
5609 		goto unlock;
5610 
5611 	if (status) {
5612 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5613 			        mgmt_status(status));
5614 	} else {
5615 		struct mgmt_mode *cp = cmd->param;
5616 
5617 		if (cp->val)
5618 			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
5619 		else
5620 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5621 
5622 		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5623 		new_settings(hdev, cmd->sk);
5624 	}
5625 
5626 	mgmt_pending_remove(cmd);
5627 
5628 unlock:
5629 	hci_dev_unlock(hdev);
5630 }
5631 
5632 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
5633 				void *data, u16 len)
5634 {
5635 	struct mgmt_mode *cp = data;
5636 	struct mgmt_pending_cmd *cmd;
5637 	struct hci_request req;
5638 	int err;
5639 
5640 	bt_dev_dbg(hdev, "sock %p", sk);
5641 
5642 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
5643 	    hdev->hci_ver < BLUETOOTH_VER_1_2)
5644 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5645 				       MGMT_STATUS_NOT_SUPPORTED);
5646 
5647 	if (cp->val != 0x00 && cp->val != 0x01)
5648 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5649 				       MGMT_STATUS_INVALID_PARAMS);
5650 
5651 	hci_dev_lock(hdev);
5652 
5653 	if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
5654 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5655 				      MGMT_STATUS_BUSY);
5656 		goto unlock;
5657 	}
5658 
5659 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
5660 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5661 					hdev);
5662 		goto unlock;
5663 	}
5664 
5665 	if (!hdev_is_powered(hdev)) {
5666 		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
5667 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5668 					hdev);
5669 		new_settings(hdev, sk);
5670 		goto unlock;
5671 	}
5672 
5673 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
5674 			       data, len);
5675 	if (!cmd) {
5676 		err = -ENOMEM;
5677 		goto unlock;
5678 	}
5679 
5680 	hci_req_init(&req, hdev);
5681 
5682 	__hci_req_write_fast_connectable(&req, cp->val);
5683 
5684 	err = hci_req_run(&req, fast_connectable_complete);
5685 	if (err < 0) {
5686 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5687 				      MGMT_STATUS_FAILED);
5688 		mgmt_pending_remove(cmd);
5689 	}
5690 
5691 unlock:
5692 	hci_dev_unlock(hdev);
5693 
5694 	return err;
5695 }
5696 
5697 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5698 {
5699 	struct mgmt_pending_cmd *cmd;
5700 
5701 	bt_dev_dbg(hdev, "status 0x%02x", status);
5702 
5703 	hci_dev_lock(hdev);
5704 
5705 	cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
5706 	if (!cmd)
5707 		goto unlock;
5708 
5709 	if (status) {
5710 		u8 mgmt_err = mgmt_status(status);
5711 
5712 		/* We need to restore the flag if related HCI commands
5713 		 * failed.
5714 		 */
5715 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
5716 
5717 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
5718 	} else {
5719 		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
5720 		new_settings(hdev, cmd->sk);
5721 	}
5722 
5723 	mgmt_pending_remove(cmd);
5724 
5725 unlock:
5726 	hci_dev_unlock(hdev);
5727 }
5728 
5729 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
5730 {
5731 	struct mgmt_mode *cp = data;
5732 	struct mgmt_pending_cmd *cmd;
5733 	struct hci_request req;
5734 	int err;
5735 
5736 	bt_dev_dbg(hdev, "sock %p", sk);
5737 
5738 	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
5739 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5740 				       MGMT_STATUS_NOT_SUPPORTED);
5741 
5742 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5743 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5744 				       MGMT_STATUS_REJECTED);
5745 
5746 	if (cp->val != 0x00 && cp->val != 0x01)
5747 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5748 				       MGMT_STATUS_INVALID_PARAMS);
5749 
5750 	hci_dev_lock(hdev);
5751 
5752 	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5753 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5754 		goto unlock;
5755 	}
5756 
5757 	if (!hdev_is_powered(hdev)) {
5758 		if (!cp->val) {
5759 			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
5760 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
5761 			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
5762 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5763 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
5764 		}
5765 
5766 		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
5767 
5768 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5769 		if (err < 0)
5770 			goto unlock;
5771 
5772 		err = new_settings(hdev, sk);
5773 		goto unlock;
5774 	}
5775 
5776 	/* Reject disabling when powered on */
5777 	if (!cp->val) {
5778 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5779 				      MGMT_STATUS_REJECTED);
5780 		goto unlock;
5781 	} else {
5782 		/* When configuring a dual-mode controller to operate
5783 		 * with LE only and using a static address, then switching
5784 		 * BR/EDR back on is not allowed.
5785 		 *
5786 		 * Dual-mode controllers shall operate with the public
5787 		 * address as its identity address for BR/EDR and LE. So
5788 		 * reject the attempt to create an invalid configuration.
5789 		 *
5790 		 * The same restrictions applies when secure connections
5791 		 * has been enabled. For BR/EDR this is a controller feature
5792 		 * while for LE it is a host stack feature. This means that
5793 		 * switching BR/EDR back on when secure connections has been
5794 		 * enabled is not a supported transaction.
5795 		 */
5796 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5797 		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
5798 		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
5799 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5800 					      MGMT_STATUS_REJECTED);
5801 			goto unlock;
5802 		}
5803 	}
5804 
5805 	if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
5806 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5807 				      MGMT_STATUS_BUSY);
5808 		goto unlock;
5809 	}
5810 
5811 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
5812 	if (!cmd) {
5813 		err = -ENOMEM;
5814 		goto unlock;
5815 	}
5816 
5817 	/* We need to flip the bit already here so that
5818 	 * hci_req_update_adv_data generates the correct flags.
5819 	 */
5820 	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
5821 
5822 	hci_req_init(&req, hdev);
5823 
5824 	__hci_req_write_fast_connectable(&req, false);
5825 	__hci_req_update_scan(&req);
5826 
5827 	/* Since only the advertising data flags will change, there
5828 	 * is no need to update the scan response data.
5829 	 */
5830 	__hci_req_update_adv_data(&req, hdev->cur_adv_instance);
5831 
5832 	err = hci_req_run(&req, set_bredr_complete);
5833 	if (err < 0)
5834 		mgmt_pending_remove(cmd);
5835 
5836 unlock:
5837 	hci_dev_unlock(hdev);
5838 	return err;
5839 }
5840 
5841 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5842 {
5843 	struct mgmt_pending_cmd *cmd;
5844 	struct mgmt_mode *cp;
5845 
5846 	bt_dev_dbg(hdev, "status %u", status);
5847 
5848 	hci_dev_lock(hdev);
5849 
5850 	cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
5851 	if (!cmd)
5852 		goto unlock;
5853 
5854 	if (status) {
5855 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
5856 			        mgmt_status(status));
5857 		goto remove;
5858 	}
5859 
5860 	cp = cmd->param;
5861 
5862 	switch (cp->val) {
5863 	case 0x00:
5864 		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
5865 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5866 		break;
5867 	case 0x01:
5868 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5869 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5870 		break;
5871 	case 0x02:
5872 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5873 		hci_dev_set_flag(hdev, HCI_SC_ONLY);
5874 		break;
5875 	}
5876 
5877 	send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
5878 	new_settings(hdev, cmd->sk);
5879 
5880 remove:
5881 	mgmt_pending_remove(cmd);
5882 unlock:
5883 	hci_dev_unlock(hdev);
5884 }
5885 
5886 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
5887 			   void *data, u16 len)
5888 {
5889 	struct mgmt_mode *cp = data;
5890 	struct mgmt_pending_cmd *cmd;
5891 	struct hci_request req;
5892 	u8 val;
5893 	int err;
5894 
5895 	bt_dev_dbg(hdev, "sock %p", sk);
5896 
5897 	if (!lmp_sc_capable(hdev) &&
5898 	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5899 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5900 				       MGMT_STATUS_NOT_SUPPORTED);
5901 
5902 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5903 	    lmp_sc_capable(hdev) &&
5904 	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5905 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5906 				       MGMT_STATUS_REJECTED);
5907 
5908 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5909 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5910 				  MGMT_STATUS_INVALID_PARAMS);
5911 
5912 	hci_dev_lock(hdev);
5913 
5914 	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
5915 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5916 		bool changed;
5917 
5918 		if (cp->val) {
5919 			changed = !hci_dev_test_and_set_flag(hdev,
5920 							     HCI_SC_ENABLED);
5921 			if (cp->val == 0x02)
5922 				hci_dev_set_flag(hdev, HCI_SC_ONLY);
5923 			else
5924 				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5925 		} else {
5926 			changed = hci_dev_test_and_clear_flag(hdev,
5927 							      HCI_SC_ENABLED);
5928 			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5929 		}
5930 
5931 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5932 		if (err < 0)
5933 			goto failed;
5934 
5935 		if (changed)
5936 			err = new_settings(hdev, sk);
5937 
5938 		goto failed;
5939 	}
5940 
5941 	if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
5942 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5943 				      MGMT_STATUS_BUSY);
5944 		goto failed;
5945 	}
5946 
5947 	val = !!cp->val;
5948 
5949 	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5950 	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5951 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5952 		goto failed;
5953 	}
5954 
5955 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
5956 	if (!cmd) {
5957 		err = -ENOMEM;
5958 		goto failed;
5959 	}
5960 
5961 	hci_req_init(&req, hdev);
5962 	hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
5963 	err = hci_req_run(&req, sc_enable_complete);
5964 	if (err < 0) {
5965 		mgmt_pending_remove(cmd);
5966 		goto failed;
5967 	}
5968 
5969 failed:
5970 	hci_dev_unlock(hdev);
5971 	return err;
5972 }
5973 
5974 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
5975 			  void *data, u16 len)
5976 {
5977 	struct mgmt_mode *cp = data;
5978 	bool changed, use_changed;
5979 	int err;
5980 
5981 	bt_dev_dbg(hdev, "sock %p", sk);
5982 
5983 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5984 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
5985 				       MGMT_STATUS_INVALID_PARAMS);
5986 
5987 	hci_dev_lock(hdev);
5988 
5989 	if (cp->val)
5990 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
5991 	else
5992 		changed = hci_dev_test_and_clear_flag(hdev,
5993 						      HCI_KEEP_DEBUG_KEYS);
5994 
5995 	if (cp->val == 0x02)
5996 		use_changed = !hci_dev_test_and_set_flag(hdev,
5997 							 HCI_USE_DEBUG_KEYS);
5998 	else
5999 		use_changed = hci_dev_test_and_clear_flag(hdev,
6000 							  HCI_USE_DEBUG_KEYS);
6001 
6002 	if (hdev_is_powered(hdev) && use_changed &&
6003 	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6004 		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6005 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6006 			     sizeof(mode), &mode);
6007 	}
6008 
6009 	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6010 	if (err < 0)
6011 		goto unlock;
6012 
6013 	if (changed)
6014 		err = new_settings(hdev, sk);
6015 
6016 unlock:
6017 	hci_dev_unlock(hdev);
6018 	return err;
6019 }
6020 
6021 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6022 		       u16 len)
6023 {
6024 	struct mgmt_cp_set_privacy *cp = cp_data;
6025 	bool changed;
6026 	int err;
6027 
6028 	bt_dev_dbg(hdev, "sock %p", sk);
6029 
6030 	if (!lmp_le_capable(hdev))
6031 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6032 				       MGMT_STATUS_NOT_SUPPORTED);
6033 
6034 	if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6035 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6036 				       MGMT_STATUS_INVALID_PARAMS);
6037 
6038 	if (hdev_is_powered(hdev))
6039 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6040 				       MGMT_STATUS_REJECTED);
6041 
6042 	hci_dev_lock(hdev);
6043 
6044 	/* If user space supports this command it is also expected to
6045 	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6046 	 */
6047 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6048 
6049 	if (cp->privacy) {
6050 		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6051 		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6052 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6053 		hci_adv_instances_set_rpa_expired(hdev, true);
6054 		if (cp->privacy == 0x02)
6055 			hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6056 		else
6057 			hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6058 	} else {
6059 		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6060 		memset(hdev->irk, 0, sizeof(hdev->irk));
6061 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6062 		hci_adv_instances_set_rpa_expired(hdev, false);
6063 		hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6064 	}
6065 
6066 	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
6067 	if (err < 0)
6068 		goto unlock;
6069 
6070 	if (changed)
6071 		err = new_settings(hdev, sk);
6072 
6073 unlock:
6074 	hci_dev_unlock(hdev);
6075 	return err;
6076 }
6077 
6078 static bool irk_is_valid(struct mgmt_irk_info *irk)
6079 {
6080 	switch (irk->addr.type) {
6081 	case BDADDR_LE_PUBLIC:
6082 		return true;
6083 
6084 	case BDADDR_LE_RANDOM:
6085 		/* Two most significant bits shall be set */
6086 		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6087 			return false;
6088 		return true;
6089 	}
6090 
6091 	return false;
6092 }
6093 
6094 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6095 		     u16 len)
6096 {
6097 	struct mgmt_cp_load_irks *cp = cp_data;
6098 	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
6099 				   sizeof(struct mgmt_irk_info));
6100 	u16 irk_count, expected_len;
6101 	int i, err;
6102 
6103 	bt_dev_dbg(hdev, "sock %p", sk);
6104 
6105 	if (!lmp_le_capable(hdev))
6106 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6107 				       MGMT_STATUS_NOT_SUPPORTED);
6108 
6109 	irk_count = __le16_to_cpu(cp->irk_count);
6110 	if (irk_count > max_irk_count) {
6111 		bt_dev_err(hdev, "load_irks: too big irk_count value %u",
6112 			   irk_count);
6113 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6114 				       MGMT_STATUS_INVALID_PARAMS);
6115 	}
6116 
6117 	expected_len = struct_size(cp, irks, irk_count);
6118 	if (expected_len != len) {
6119 		bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
6120 			   expected_len, len);
6121 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6122 				       MGMT_STATUS_INVALID_PARAMS);
6123 	}
6124 
6125 	bt_dev_dbg(hdev, "irk_count %u", irk_count);
6126 
6127 	for (i = 0; i < irk_count; i++) {
6128 		struct mgmt_irk_info *key = &cp->irks[i];
6129 
6130 		if (!irk_is_valid(key))
6131 			return mgmt_cmd_status(sk, hdev->id,
6132 					       MGMT_OP_LOAD_IRKS,
6133 					       MGMT_STATUS_INVALID_PARAMS);
6134 	}
6135 
6136 	hci_dev_lock(hdev);
6137 
6138 	hci_smp_irks_clear(hdev);
6139 
6140 	for (i = 0; i < irk_count; i++) {
6141 		struct mgmt_irk_info *irk = &cp->irks[i];
6142 
6143 		if (hci_is_blocked_key(hdev,
6144 				       HCI_BLOCKED_KEY_TYPE_IRK,
6145 				       irk->val)) {
6146 			bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
6147 				    &irk->addr.bdaddr);
6148 			continue;
6149 		}
6150 
6151 		hci_add_irk(hdev, &irk->addr.bdaddr,
6152 			    le_addr_type(irk->addr.type), irk->val,
6153 			    BDADDR_ANY);
6154 	}
6155 
6156 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6157 
6158 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
6159 
6160 	hci_dev_unlock(hdev);
6161 
6162 	return err;
6163 }
6164 
6165 static bool ltk_is_valid(struct mgmt_ltk_info *key)
6166 {
6167 	if (key->master != 0x00 && key->master != 0x01)
6168 		return false;
6169 
6170 	switch (key->addr.type) {
6171 	case BDADDR_LE_PUBLIC:
6172 		return true;
6173 
6174 	case BDADDR_LE_RANDOM:
6175 		/* Two most significant bits shall be set */
6176 		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6177 			return false;
6178 		return true;
6179 	}
6180 
6181 	return false;
6182 }
6183 
6184 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
6185 			       void *cp_data, u16 len)
6186 {
6187 	struct mgmt_cp_load_long_term_keys *cp = cp_data;
6188 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
6189 				   sizeof(struct mgmt_ltk_info));
6190 	u16 key_count, expected_len;
6191 	int i, err;
6192 
6193 	bt_dev_dbg(hdev, "sock %p", sk);
6194 
6195 	if (!lmp_le_capable(hdev))
6196 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6197 				       MGMT_STATUS_NOT_SUPPORTED);
6198 
6199 	key_count = __le16_to_cpu(cp->key_count);
6200 	if (key_count > max_key_count) {
6201 		bt_dev_err(hdev, "load_ltks: too big key_count value %u",
6202 			   key_count);
6203 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6204 				       MGMT_STATUS_INVALID_PARAMS);
6205 	}
6206 
6207 	expected_len = struct_size(cp, keys, key_count);
6208 	if (expected_len != len) {
6209 		bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
6210 			   expected_len, len);
6211 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6212 				       MGMT_STATUS_INVALID_PARAMS);
6213 	}
6214 
6215 	bt_dev_dbg(hdev, "key_count %u", key_count);
6216 
6217 	for (i = 0; i < key_count; i++) {
6218 		struct mgmt_ltk_info *key = &cp->keys[i];
6219 
6220 		if (!ltk_is_valid(key))
6221 			return mgmt_cmd_status(sk, hdev->id,
6222 					       MGMT_OP_LOAD_LONG_TERM_KEYS,
6223 					       MGMT_STATUS_INVALID_PARAMS);
6224 	}
6225 
6226 	hci_dev_lock(hdev);
6227 
6228 	hci_smp_ltks_clear(hdev);
6229 
6230 	for (i = 0; i < key_count; i++) {
6231 		struct mgmt_ltk_info *key = &cp->keys[i];
6232 		u8 type, authenticated;
6233 
6234 		if (hci_is_blocked_key(hdev,
6235 				       HCI_BLOCKED_KEY_TYPE_LTK,
6236 				       key->val)) {
6237 			bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
6238 				    &key->addr.bdaddr);
6239 			continue;
6240 		}
6241 
6242 		switch (key->type) {
6243 		case MGMT_LTK_UNAUTHENTICATED:
6244 			authenticated = 0x00;
6245 			type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
6246 			break;
6247 		case MGMT_LTK_AUTHENTICATED:
6248 			authenticated = 0x01;
6249 			type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
6250 			break;
6251 		case MGMT_LTK_P256_UNAUTH:
6252 			authenticated = 0x00;
6253 			type = SMP_LTK_P256;
6254 			break;
6255 		case MGMT_LTK_P256_AUTH:
6256 			authenticated = 0x01;
6257 			type = SMP_LTK_P256;
6258 			break;
6259 		case MGMT_LTK_P256_DEBUG:
6260 			authenticated = 0x00;
6261 			type = SMP_LTK_P256_DEBUG;
6262 			fallthrough;
6263 		default:
6264 			continue;
6265 		}
6266 
6267 		hci_add_ltk(hdev, &key->addr.bdaddr,
6268 			    le_addr_type(key->addr.type), type, authenticated,
6269 			    key->val, key->enc_size, key->ediv, key->rand);
6270 	}
6271 
6272 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
6273 			   NULL, 0);
6274 
6275 	hci_dev_unlock(hdev);
6276 
6277 	return err;
6278 }
6279 
6280 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
6281 {
6282 	struct hci_conn *conn = cmd->user_data;
6283 	struct mgmt_rp_get_conn_info rp;
6284 	int err;
6285 
6286 	memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
6287 
6288 	if (status == MGMT_STATUS_SUCCESS) {
6289 		rp.rssi = conn->rssi;
6290 		rp.tx_power = conn->tx_power;
6291 		rp.max_tx_power = conn->max_tx_power;
6292 	} else {
6293 		rp.rssi = HCI_RSSI_INVALID;
6294 		rp.tx_power = HCI_TX_POWER_INVALID;
6295 		rp.max_tx_power = HCI_TX_POWER_INVALID;
6296 	}
6297 
6298 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
6299 				status, &rp, sizeof(rp));
6300 
6301 	hci_conn_drop(conn);
6302 	hci_conn_put(conn);
6303 
6304 	return err;
6305 }
6306 
6307 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
6308 				       u16 opcode)
6309 {
6310 	struct hci_cp_read_rssi *cp;
6311 	struct mgmt_pending_cmd *cmd;
6312 	struct hci_conn *conn;
6313 	u16 handle;
6314 	u8 status;
6315 
6316 	bt_dev_dbg(hdev, "status 0x%02x", hci_status);
6317 
6318 	hci_dev_lock(hdev);
6319 
6320 	/* Commands sent in request are either Read RSSI or Read Transmit Power
6321 	 * Level so we check which one was last sent to retrieve connection
6322 	 * handle.  Both commands have handle as first parameter so it's safe to
6323 	 * cast data on the same command struct.
6324 	 *
6325 	 * First command sent is always Read RSSI and we fail only if it fails.
6326 	 * In other case we simply override error to indicate success as we
6327 	 * already remembered if TX power value is actually valid.
6328 	 */
6329 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
6330 	if (!cp) {
6331 		cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
6332 		status = MGMT_STATUS_SUCCESS;
6333 	} else {
6334 		status = mgmt_status(hci_status);
6335 	}
6336 
6337 	if (!cp) {
6338 		bt_dev_err(hdev, "invalid sent_cmd in conn_info response");
6339 		goto unlock;
6340 	}
6341 
6342 	handle = __le16_to_cpu(cp->handle);
6343 	conn = hci_conn_hash_lookup_handle(hdev, handle);
6344 	if (!conn) {
6345 		bt_dev_err(hdev, "unknown handle (%d) in conn_info response",
6346 			   handle);
6347 		goto unlock;
6348 	}
6349 
6350 	cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
6351 	if (!cmd)
6352 		goto unlock;
6353 
6354 	cmd->cmd_complete(cmd, status);
6355 	mgmt_pending_remove(cmd);
6356 
6357 unlock:
6358 	hci_dev_unlock(hdev);
6359 }
6360 
6361 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
6362 			 u16 len)
6363 {
6364 	struct mgmt_cp_get_conn_info *cp = data;
6365 	struct mgmt_rp_get_conn_info rp;
6366 	struct hci_conn *conn;
6367 	unsigned long conn_info_age;
6368 	int err = 0;
6369 
6370 	bt_dev_dbg(hdev, "sock %p", sk);
6371 
6372 	memset(&rp, 0, sizeof(rp));
6373 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6374 	rp.addr.type = cp->addr.type;
6375 
6376 	if (!bdaddr_type_is_valid(cp->addr.type))
6377 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6378 					 MGMT_STATUS_INVALID_PARAMS,
6379 					 &rp, sizeof(rp));
6380 
6381 	hci_dev_lock(hdev);
6382 
6383 	if (!hdev_is_powered(hdev)) {
6384 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6385 					MGMT_STATUS_NOT_POWERED, &rp,
6386 					sizeof(rp));
6387 		goto unlock;
6388 	}
6389 
6390 	if (cp->addr.type == BDADDR_BREDR)
6391 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6392 					       &cp->addr.bdaddr);
6393 	else
6394 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6395 
6396 	if (!conn || conn->state != BT_CONNECTED) {
6397 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6398 					MGMT_STATUS_NOT_CONNECTED, &rp,
6399 					sizeof(rp));
6400 		goto unlock;
6401 	}
6402 
6403 	if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
6404 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6405 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
6406 		goto unlock;
6407 	}
6408 
6409 	/* To avoid client trying to guess when to poll again for information we
6410 	 * calculate conn info age as random value between min/max set in hdev.
6411 	 */
6412 	conn_info_age = hdev->conn_info_min_age +
6413 			prandom_u32_max(hdev->conn_info_max_age -
6414 					hdev->conn_info_min_age);
6415 
6416 	/* Query controller to refresh cached values if they are too old or were
6417 	 * never read.
6418 	 */
6419 	if (time_after(jiffies, conn->conn_info_timestamp +
6420 		       msecs_to_jiffies(conn_info_age)) ||
6421 	    !conn->conn_info_timestamp) {
6422 		struct hci_request req;
6423 		struct hci_cp_read_tx_power req_txp_cp;
6424 		struct hci_cp_read_rssi req_rssi_cp;
6425 		struct mgmt_pending_cmd *cmd;
6426 
6427 		hci_req_init(&req, hdev);
6428 		req_rssi_cp.handle = cpu_to_le16(conn->handle);
6429 		hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
6430 			    &req_rssi_cp);
6431 
6432 		/* For LE links TX power does not change thus we don't need to
6433 		 * query for it once value is known.
6434 		 */
6435 		if (!bdaddr_type_is_le(cp->addr.type) ||
6436 		    conn->tx_power == HCI_TX_POWER_INVALID) {
6437 			req_txp_cp.handle = cpu_to_le16(conn->handle);
6438 			req_txp_cp.type = 0x00;
6439 			hci_req_add(&req, HCI_OP_READ_TX_POWER,
6440 				    sizeof(req_txp_cp), &req_txp_cp);
6441 		}
6442 
6443 		/* Max TX power needs to be read only once per connection */
6444 		if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
6445 			req_txp_cp.handle = cpu_to_le16(conn->handle);
6446 			req_txp_cp.type = 0x01;
6447 			hci_req_add(&req, HCI_OP_READ_TX_POWER,
6448 				    sizeof(req_txp_cp), &req_txp_cp);
6449 		}
6450 
6451 		err = hci_req_run(&req, conn_info_refresh_complete);
6452 		if (err < 0)
6453 			goto unlock;
6454 
6455 		cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
6456 				       data, len);
6457 		if (!cmd) {
6458 			err = -ENOMEM;
6459 			goto unlock;
6460 		}
6461 
6462 		hci_conn_hold(conn);
6463 		cmd->user_data = hci_conn_get(conn);
6464 		cmd->cmd_complete = conn_info_cmd_complete;
6465 
6466 		conn->conn_info_timestamp = jiffies;
6467 	} else {
6468 		/* Cache is valid, just reply with values cached in hci_conn */
6469 		rp.rssi = conn->rssi;
6470 		rp.tx_power = conn->tx_power;
6471 		rp.max_tx_power = conn->max_tx_power;
6472 
6473 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6474 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6475 	}
6476 
6477 unlock:
6478 	hci_dev_unlock(hdev);
6479 	return err;
6480 }
6481 
6482 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
6483 {
6484 	struct hci_conn *conn = cmd->user_data;
6485 	struct mgmt_rp_get_clock_info rp;
6486 	struct hci_dev *hdev;
6487 	int err;
6488 
6489 	memset(&rp, 0, sizeof(rp));
6490 	memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
6491 
6492 	if (status)
6493 		goto complete;
6494 
6495 	hdev = hci_dev_get(cmd->index);
6496 	if (hdev) {
6497 		rp.local_clock = cpu_to_le32(hdev->clock);
6498 		hci_dev_put(hdev);
6499 	}
6500 
6501 	if (conn) {
6502 		rp.piconet_clock = cpu_to_le32(conn->clock);
6503 		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
6504 	}
6505 
6506 complete:
6507 	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
6508 				sizeof(rp));
6509 
6510 	if (conn) {
6511 		hci_conn_drop(conn);
6512 		hci_conn_put(conn);
6513 	}
6514 
6515 	return err;
6516 }
6517 
6518 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6519 {
6520 	struct hci_cp_read_clock *hci_cp;
6521 	struct mgmt_pending_cmd *cmd;
6522 	struct hci_conn *conn;
6523 
6524 	bt_dev_dbg(hdev, "status %u", status);
6525 
6526 	hci_dev_lock(hdev);
6527 
6528 	hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
6529 	if (!hci_cp)
6530 		goto unlock;
6531 
6532 	if (hci_cp->which) {
6533 		u16 handle = __le16_to_cpu(hci_cp->handle);
6534 		conn = hci_conn_hash_lookup_handle(hdev, handle);
6535 	} else {
6536 		conn = NULL;
6537 	}
6538 
6539 	cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
6540 	if (!cmd)
6541 		goto unlock;
6542 
6543 	cmd->cmd_complete(cmd, mgmt_status(status));
6544 	mgmt_pending_remove(cmd);
6545 
6546 unlock:
6547 	hci_dev_unlock(hdev);
6548 }
6549 
6550 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
6551 			 u16 len)
6552 {
6553 	struct mgmt_cp_get_clock_info *cp = data;
6554 	struct mgmt_rp_get_clock_info rp;
6555 	struct hci_cp_read_clock hci_cp;
6556 	struct mgmt_pending_cmd *cmd;
6557 	struct hci_request req;
6558 	struct hci_conn *conn;
6559 	int err;
6560 
6561 	bt_dev_dbg(hdev, "sock %p", sk);
6562 
6563 	memset(&rp, 0, sizeof(rp));
6564 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6565 	rp.addr.type = cp->addr.type;
6566 
6567 	if (cp->addr.type != BDADDR_BREDR)
6568 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6569 					 MGMT_STATUS_INVALID_PARAMS,
6570 					 &rp, sizeof(rp));
6571 
6572 	hci_dev_lock(hdev);
6573 
6574 	if (!hdev_is_powered(hdev)) {
6575 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6576 					MGMT_STATUS_NOT_POWERED, &rp,
6577 					sizeof(rp));
6578 		goto unlock;
6579 	}
6580 
6581 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6582 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6583 					       &cp->addr.bdaddr);
6584 		if (!conn || conn->state != BT_CONNECTED) {
6585 			err = mgmt_cmd_complete(sk, hdev->id,
6586 						MGMT_OP_GET_CLOCK_INFO,
6587 						MGMT_STATUS_NOT_CONNECTED,
6588 						&rp, sizeof(rp));
6589 			goto unlock;
6590 		}
6591 	} else {
6592 		conn = NULL;
6593 	}
6594 
6595 	cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
6596 	if (!cmd) {
6597 		err = -ENOMEM;
6598 		goto unlock;
6599 	}
6600 
6601 	cmd->cmd_complete = clock_info_cmd_complete;
6602 
6603 	hci_req_init(&req, hdev);
6604 
6605 	memset(&hci_cp, 0, sizeof(hci_cp));
6606 	hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
6607 
6608 	if (conn) {
6609 		hci_conn_hold(conn);
6610 		cmd->user_data = hci_conn_get(conn);
6611 
6612 		hci_cp.handle = cpu_to_le16(conn->handle);
6613 		hci_cp.which = 0x01; /* Piconet clock */
6614 		hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
6615 	}
6616 
6617 	err = hci_req_run(&req, get_clock_info_complete);
6618 	if (err < 0)
6619 		mgmt_pending_remove(cmd);
6620 
6621 unlock:
6622 	hci_dev_unlock(hdev);
6623 	return err;
6624 }
6625 
6626 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
6627 {
6628 	struct hci_conn *conn;
6629 
6630 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
6631 	if (!conn)
6632 		return false;
6633 
6634 	if (conn->dst_type != type)
6635 		return false;
6636 
6637 	if (conn->state != BT_CONNECTED)
6638 		return false;
6639 
6640 	return true;
6641 }
6642 
6643 /* This function requires the caller holds hdev->lock */
6644 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
6645 			       u8 addr_type, u8 auto_connect)
6646 {
6647 	struct hci_conn_params *params;
6648 
6649 	params = hci_conn_params_add(hdev, addr, addr_type);
6650 	if (!params)
6651 		return -EIO;
6652 
6653 	if (params->auto_connect == auto_connect)
6654 		return 0;
6655 
6656 	list_del_init(&params->action);
6657 
6658 	switch (auto_connect) {
6659 	case HCI_AUTO_CONN_DISABLED:
6660 	case HCI_AUTO_CONN_LINK_LOSS:
6661 		/* If auto connect is being disabled when we're trying to
6662 		 * connect to device, keep connecting.
6663 		 */
6664 		if (params->explicit_connect)
6665 			list_add(&params->action, &hdev->pend_le_conns);
6666 		break;
6667 	case HCI_AUTO_CONN_REPORT:
6668 		if (params->explicit_connect)
6669 			list_add(&params->action, &hdev->pend_le_conns);
6670 		else
6671 			list_add(&params->action, &hdev->pend_le_reports);
6672 		break;
6673 	case HCI_AUTO_CONN_DIRECT:
6674 	case HCI_AUTO_CONN_ALWAYS:
6675 		if (!is_connected(hdev, addr, addr_type))
6676 			list_add(&params->action, &hdev->pend_le_conns);
6677 		break;
6678 	}
6679 
6680 	params->auto_connect = auto_connect;
6681 
6682 	bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
6683 		   addr, addr_type, auto_connect);
6684 
6685 	return 0;
6686 }
6687 
6688 static void device_added(struct sock *sk, struct hci_dev *hdev,
6689 			 bdaddr_t *bdaddr, u8 type, u8 action)
6690 {
6691 	struct mgmt_ev_device_added ev;
6692 
6693 	bacpy(&ev.addr.bdaddr, bdaddr);
6694 	ev.addr.type = type;
6695 	ev.action = action;
6696 
6697 	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
6698 }
6699 
6700 static int add_device(struct sock *sk, struct hci_dev *hdev,
6701 		      void *data, u16 len)
6702 {
6703 	struct mgmt_cp_add_device *cp = data;
6704 	u8 auto_conn, addr_type;
6705 	struct hci_conn_params *params;
6706 	int err;
6707 	u32 current_flags = 0;
6708 
6709 	bt_dev_dbg(hdev, "sock %p", sk);
6710 
6711 	if (!bdaddr_type_is_valid(cp->addr.type) ||
6712 	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
6713 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6714 					 MGMT_STATUS_INVALID_PARAMS,
6715 					 &cp->addr, sizeof(cp->addr));
6716 
6717 	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
6718 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6719 					 MGMT_STATUS_INVALID_PARAMS,
6720 					 &cp->addr, sizeof(cp->addr));
6721 
6722 	hci_dev_lock(hdev);
6723 
6724 	if (cp->addr.type == BDADDR_BREDR) {
6725 		/* Only incoming connections action is supported for now */
6726 		if (cp->action != 0x01) {
6727 			err = mgmt_cmd_complete(sk, hdev->id,
6728 						MGMT_OP_ADD_DEVICE,
6729 						MGMT_STATUS_INVALID_PARAMS,
6730 						&cp->addr, sizeof(cp->addr));
6731 			goto unlock;
6732 		}
6733 
6734 		err = hci_bdaddr_list_add_with_flags(&hdev->whitelist,
6735 						     &cp->addr.bdaddr,
6736 						     cp->addr.type, 0);
6737 		if (err)
6738 			goto unlock;
6739 
6740 		hci_req_update_scan(hdev);
6741 
6742 		goto added;
6743 	}
6744 
6745 	addr_type = le_addr_type(cp->addr.type);
6746 
6747 	if (cp->action == 0x02)
6748 		auto_conn = HCI_AUTO_CONN_ALWAYS;
6749 	else if (cp->action == 0x01)
6750 		auto_conn = HCI_AUTO_CONN_DIRECT;
6751 	else
6752 		auto_conn = HCI_AUTO_CONN_REPORT;
6753 
6754 	/* Kernel internally uses conn_params with resolvable private
6755 	 * address, but Add Device allows only identity addresses.
6756 	 * Make sure it is enforced before calling
6757 	 * hci_conn_params_lookup.
6758 	 */
6759 	if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
6760 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6761 					MGMT_STATUS_INVALID_PARAMS,
6762 					&cp->addr, sizeof(cp->addr));
6763 		goto unlock;
6764 	}
6765 
6766 	/* If the connection parameters don't exist for this device,
6767 	 * they will be created and configured with defaults.
6768 	 */
6769 	if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
6770 				auto_conn) < 0) {
6771 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6772 					MGMT_STATUS_FAILED, &cp->addr,
6773 					sizeof(cp->addr));
6774 		goto unlock;
6775 	} else {
6776 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6777 						addr_type);
6778 		if (params)
6779 			current_flags = params->current_flags;
6780 	}
6781 
6782 	hci_update_background_scan(hdev);
6783 
6784 added:
6785 	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
6786 	device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
6787 			     SUPPORTED_DEVICE_FLAGS(), current_flags);
6788 
6789 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6790 				MGMT_STATUS_SUCCESS, &cp->addr,
6791 				sizeof(cp->addr));
6792 
6793 unlock:
6794 	hci_dev_unlock(hdev);
6795 	return err;
6796 }
6797 
6798 static void device_removed(struct sock *sk, struct hci_dev *hdev,
6799 			   bdaddr_t *bdaddr, u8 type)
6800 {
6801 	struct mgmt_ev_device_removed ev;
6802 
6803 	bacpy(&ev.addr.bdaddr, bdaddr);
6804 	ev.addr.type = type;
6805 
6806 	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
6807 }
6808 
6809 static int remove_device(struct sock *sk, struct hci_dev *hdev,
6810 			 void *data, u16 len)
6811 {
6812 	struct mgmt_cp_remove_device *cp = data;
6813 	int err;
6814 
6815 	bt_dev_dbg(hdev, "sock %p", sk);
6816 
6817 	hci_dev_lock(hdev);
6818 
6819 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6820 		struct hci_conn_params *params;
6821 		u8 addr_type;
6822 
6823 		if (!bdaddr_type_is_valid(cp->addr.type)) {
6824 			err = mgmt_cmd_complete(sk, hdev->id,
6825 						MGMT_OP_REMOVE_DEVICE,
6826 						MGMT_STATUS_INVALID_PARAMS,
6827 						&cp->addr, sizeof(cp->addr));
6828 			goto unlock;
6829 		}
6830 
6831 		if (cp->addr.type == BDADDR_BREDR) {
6832 			err = hci_bdaddr_list_del(&hdev->whitelist,
6833 						  &cp->addr.bdaddr,
6834 						  cp->addr.type);
6835 			if (err) {
6836 				err = mgmt_cmd_complete(sk, hdev->id,
6837 							MGMT_OP_REMOVE_DEVICE,
6838 							MGMT_STATUS_INVALID_PARAMS,
6839 							&cp->addr,
6840 							sizeof(cp->addr));
6841 				goto unlock;
6842 			}
6843 
6844 			hci_req_update_scan(hdev);
6845 
6846 			device_removed(sk, hdev, &cp->addr.bdaddr,
6847 				       cp->addr.type);
6848 			goto complete;
6849 		}
6850 
6851 		addr_type = le_addr_type(cp->addr.type);
6852 
6853 		/* Kernel internally uses conn_params with resolvable private
6854 		 * address, but Remove Device allows only identity addresses.
6855 		 * Make sure it is enforced before calling
6856 		 * hci_conn_params_lookup.
6857 		 */
6858 		if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
6859 			err = mgmt_cmd_complete(sk, hdev->id,
6860 						MGMT_OP_REMOVE_DEVICE,
6861 						MGMT_STATUS_INVALID_PARAMS,
6862 						&cp->addr, sizeof(cp->addr));
6863 			goto unlock;
6864 		}
6865 
6866 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6867 						addr_type);
6868 		if (!params) {
6869 			err = mgmt_cmd_complete(sk, hdev->id,
6870 						MGMT_OP_REMOVE_DEVICE,
6871 						MGMT_STATUS_INVALID_PARAMS,
6872 						&cp->addr, sizeof(cp->addr));
6873 			goto unlock;
6874 		}
6875 
6876 		if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
6877 		    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
6878 			err = mgmt_cmd_complete(sk, hdev->id,
6879 						MGMT_OP_REMOVE_DEVICE,
6880 						MGMT_STATUS_INVALID_PARAMS,
6881 						&cp->addr, sizeof(cp->addr));
6882 			goto unlock;
6883 		}
6884 
6885 		list_del(&params->action);
6886 		list_del(&params->list);
6887 		kfree(params);
6888 		hci_update_background_scan(hdev);
6889 
6890 		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
6891 	} else {
6892 		struct hci_conn_params *p, *tmp;
6893 		struct bdaddr_list *b, *btmp;
6894 
6895 		if (cp->addr.type) {
6896 			err = mgmt_cmd_complete(sk, hdev->id,
6897 						MGMT_OP_REMOVE_DEVICE,
6898 						MGMT_STATUS_INVALID_PARAMS,
6899 						&cp->addr, sizeof(cp->addr));
6900 			goto unlock;
6901 		}
6902 
6903 		list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
6904 			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
6905 			list_del(&b->list);
6906 			kfree(b);
6907 		}
6908 
6909 		hci_req_update_scan(hdev);
6910 
6911 		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
6912 			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
6913 				continue;
6914 			device_removed(sk, hdev, &p->addr, p->addr_type);
6915 			if (p->explicit_connect) {
6916 				p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
6917 				continue;
6918 			}
6919 			list_del(&p->action);
6920 			list_del(&p->list);
6921 			kfree(p);
6922 		}
6923 
6924 		bt_dev_dbg(hdev, "All LE connection parameters were removed");
6925 
6926 		hci_update_background_scan(hdev);
6927 	}
6928 
6929 complete:
6930 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
6931 				MGMT_STATUS_SUCCESS, &cp->addr,
6932 				sizeof(cp->addr));
6933 unlock:
6934 	hci_dev_unlock(hdev);
6935 	return err;
6936 }
6937 
6938 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
6939 			   u16 len)
6940 {
6941 	struct mgmt_cp_load_conn_param *cp = data;
6942 	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
6943 				     sizeof(struct mgmt_conn_param));
6944 	u16 param_count, expected_len;
6945 	int i;
6946 
6947 	if (!lmp_le_capable(hdev))
6948 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6949 				       MGMT_STATUS_NOT_SUPPORTED);
6950 
6951 	param_count = __le16_to_cpu(cp->param_count);
6952 	if (param_count > max_param_count) {
6953 		bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
6954 			   param_count);
6955 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6956 				       MGMT_STATUS_INVALID_PARAMS);
6957 	}
6958 
6959 	expected_len = struct_size(cp, params, param_count);
6960 	if (expected_len != len) {
6961 		bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
6962 			   expected_len, len);
6963 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6964 				       MGMT_STATUS_INVALID_PARAMS);
6965 	}
6966 
6967 	bt_dev_dbg(hdev, "param_count %u", param_count);
6968 
6969 	hci_dev_lock(hdev);
6970 
6971 	hci_conn_params_clear_disabled(hdev);
6972 
6973 	for (i = 0; i < param_count; i++) {
6974 		struct mgmt_conn_param *param = &cp->params[i];
6975 		struct hci_conn_params *hci_param;
6976 		u16 min, max, latency, timeout;
6977 		u8 addr_type;
6978 
6979 		bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
6980 			   param->addr.type);
6981 
6982 		if (param->addr.type == BDADDR_LE_PUBLIC) {
6983 			addr_type = ADDR_LE_DEV_PUBLIC;
6984 		} else if (param->addr.type == BDADDR_LE_RANDOM) {
6985 			addr_type = ADDR_LE_DEV_RANDOM;
6986 		} else {
6987 			bt_dev_err(hdev, "ignoring invalid connection parameters");
6988 			continue;
6989 		}
6990 
6991 		min = le16_to_cpu(param->min_interval);
6992 		max = le16_to_cpu(param->max_interval);
6993 		latency = le16_to_cpu(param->latency);
6994 		timeout = le16_to_cpu(param->timeout);
6995 
6996 		bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
6997 			   min, max, latency, timeout);
6998 
6999 		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7000 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7001 			continue;
7002 		}
7003 
7004 		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
7005 						addr_type);
7006 		if (!hci_param) {
7007 			bt_dev_err(hdev, "failed to add connection parameters");
7008 			continue;
7009 		}
7010 
7011 		hci_param->conn_min_interval = min;
7012 		hci_param->conn_max_interval = max;
7013 		hci_param->conn_latency = latency;
7014 		hci_param->supervision_timeout = timeout;
7015 	}
7016 
7017 	hci_dev_unlock(hdev);
7018 
7019 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7020 				 NULL, 0);
7021 }
7022 
7023 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7024 			       void *data, u16 len)
7025 {
7026 	struct mgmt_cp_set_external_config *cp = data;
7027 	bool changed;
7028 	int err;
7029 
7030 	bt_dev_dbg(hdev, "sock %p", sk);
7031 
7032 	if (hdev_is_powered(hdev))
7033 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7034 				       MGMT_STATUS_REJECTED);
7035 
7036 	if (cp->config != 0x00 && cp->config != 0x01)
7037 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7038 				         MGMT_STATUS_INVALID_PARAMS);
7039 
7040 	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7041 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7042 				       MGMT_STATUS_NOT_SUPPORTED);
7043 
7044 	hci_dev_lock(hdev);
7045 
7046 	if (cp->config)
7047 		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7048 	else
7049 		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7050 
7051 	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7052 	if (err < 0)
7053 		goto unlock;
7054 
7055 	if (!changed)
7056 		goto unlock;
7057 
7058 	err = new_options(hdev, sk);
7059 
7060 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7061 		mgmt_index_removed(hdev);
7062 
7063 		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7064 			hci_dev_set_flag(hdev, HCI_CONFIG);
7065 			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7066 
7067 			queue_work(hdev->req_workqueue, &hdev->power_on);
7068 		} else {
7069 			set_bit(HCI_RAW, &hdev->flags);
7070 			mgmt_index_added(hdev);
7071 		}
7072 	}
7073 
7074 unlock:
7075 	hci_dev_unlock(hdev);
7076 	return err;
7077 }
7078 
7079 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
7080 			      void *data, u16 len)
7081 {
7082 	struct mgmt_cp_set_public_address *cp = data;
7083 	bool changed;
7084 	int err;
7085 
7086 	bt_dev_dbg(hdev, "sock %p", sk);
7087 
7088 	if (hdev_is_powered(hdev))
7089 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7090 				       MGMT_STATUS_REJECTED);
7091 
7092 	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
7093 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7094 				       MGMT_STATUS_INVALID_PARAMS);
7095 
7096 	if (!hdev->set_bdaddr)
7097 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7098 				       MGMT_STATUS_NOT_SUPPORTED);
7099 
7100 	hci_dev_lock(hdev);
7101 
7102 	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
7103 	bacpy(&hdev->public_addr, &cp->bdaddr);
7104 
7105 	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
7106 	if (err < 0)
7107 		goto unlock;
7108 
7109 	if (!changed)
7110 		goto unlock;
7111 
7112 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
7113 		err = new_options(hdev, sk);
7114 
7115 	if (is_configured(hdev)) {
7116 		mgmt_index_removed(hdev);
7117 
7118 		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
7119 
7120 		hci_dev_set_flag(hdev, HCI_CONFIG);
7121 		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7122 
7123 		queue_work(hdev->req_workqueue, &hdev->power_on);
7124 	}
7125 
7126 unlock:
7127 	hci_dev_unlock(hdev);
7128 	return err;
7129 }
7130 
7131 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
7132 					     u16 opcode, struct sk_buff *skb)
7133 {
7134 	const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
7135 	struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
7136 	u8 *h192, *r192, *h256, *r256;
7137 	struct mgmt_pending_cmd *cmd;
7138 	u16 eir_len;
7139 	int err;
7140 
7141 	bt_dev_dbg(hdev, "status %u", status);
7142 
7143 	cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
7144 	if (!cmd)
7145 		return;
7146 
7147 	mgmt_cp = cmd->param;
7148 
7149 	if (status) {
7150 		status = mgmt_status(status);
7151 		eir_len = 0;
7152 
7153 		h192 = NULL;
7154 		r192 = NULL;
7155 		h256 = NULL;
7156 		r256 = NULL;
7157 	} else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
7158 		struct hci_rp_read_local_oob_data *rp;
7159 
7160 		if (skb->len != sizeof(*rp)) {
7161 			status = MGMT_STATUS_FAILED;
7162 			eir_len = 0;
7163 		} else {
7164 			status = MGMT_STATUS_SUCCESS;
7165 			rp = (void *)skb->data;
7166 
7167 			eir_len = 5 + 18 + 18;
7168 			h192 = rp->hash;
7169 			r192 = rp->rand;
7170 			h256 = NULL;
7171 			r256 = NULL;
7172 		}
7173 	} else {
7174 		struct hci_rp_read_local_oob_ext_data *rp;
7175 
7176 		if (skb->len != sizeof(*rp)) {
7177 			status = MGMT_STATUS_FAILED;
7178 			eir_len = 0;
7179 		} else {
7180 			status = MGMT_STATUS_SUCCESS;
7181 			rp = (void *)skb->data;
7182 
7183 			if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
7184 				eir_len = 5 + 18 + 18;
7185 				h192 = NULL;
7186 				r192 = NULL;
7187 			} else {
7188 				eir_len = 5 + 18 + 18 + 18 + 18;
7189 				h192 = rp->hash192;
7190 				r192 = rp->rand192;
7191 			}
7192 
7193 			h256 = rp->hash256;
7194 			r256 = rp->rand256;
7195 		}
7196 	}
7197 
7198 	mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
7199 	if (!mgmt_rp)
7200 		goto done;
7201 
7202 	if (status)
7203 		goto send_rsp;
7204 
7205 	eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
7206 				  hdev->dev_class, 3);
7207 
7208 	if (h192 && r192) {
7209 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7210 					  EIR_SSP_HASH_C192, h192, 16);
7211 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7212 					  EIR_SSP_RAND_R192, r192, 16);
7213 	}
7214 
7215 	if (h256 && r256) {
7216 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7217 					  EIR_SSP_HASH_C256, h256, 16);
7218 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7219 					  EIR_SSP_RAND_R256, r256, 16);
7220 	}
7221 
7222 send_rsp:
7223 	mgmt_rp->type = mgmt_cp->type;
7224 	mgmt_rp->eir_len = cpu_to_le16(eir_len);
7225 
7226 	err = mgmt_cmd_complete(cmd->sk, hdev->id,
7227 				MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
7228 				mgmt_rp, sizeof(*mgmt_rp) + eir_len);
7229 	if (err < 0 || status)
7230 		goto done;
7231 
7232 	hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
7233 
7234 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7235 				 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
7236 				 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
7237 done:
7238 	kfree(mgmt_rp);
7239 	mgmt_pending_remove(cmd);
7240 }
7241 
7242 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
7243 				  struct mgmt_cp_read_local_oob_ext_data *cp)
7244 {
7245 	struct mgmt_pending_cmd *cmd;
7246 	struct hci_request req;
7247 	int err;
7248 
7249 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
7250 			       cp, sizeof(*cp));
7251 	if (!cmd)
7252 		return -ENOMEM;
7253 
7254 	hci_req_init(&req, hdev);
7255 
7256 	if (bredr_sc_enabled(hdev))
7257 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
7258 	else
7259 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
7260 
7261 	err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
7262 	if (err < 0) {
7263 		mgmt_pending_remove(cmd);
7264 		return err;
7265 	}
7266 
7267 	return 0;
7268 }
7269 
7270 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
7271 				   void *data, u16 data_len)
7272 {
7273 	struct mgmt_cp_read_local_oob_ext_data *cp = data;
7274 	struct mgmt_rp_read_local_oob_ext_data *rp;
7275 	size_t rp_len;
7276 	u16 eir_len;
7277 	u8 status, flags, role, addr[7], hash[16], rand[16];
7278 	int err;
7279 
7280 	bt_dev_dbg(hdev, "sock %p", sk);
7281 
7282 	if (hdev_is_powered(hdev)) {
7283 		switch (cp->type) {
7284 		case BIT(BDADDR_BREDR):
7285 			status = mgmt_bredr_support(hdev);
7286 			if (status)
7287 				eir_len = 0;
7288 			else
7289 				eir_len = 5;
7290 			break;
7291 		case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7292 			status = mgmt_le_support(hdev);
7293 			if (status)
7294 				eir_len = 0;
7295 			else
7296 				eir_len = 9 + 3 + 18 + 18 + 3;
7297 			break;
7298 		default:
7299 			status = MGMT_STATUS_INVALID_PARAMS;
7300 			eir_len = 0;
7301 			break;
7302 		}
7303 	} else {
7304 		status = MGMT_STATUS_NOT_POWERED;
7305 		eir_len = 0;
7306 	}
7307 
7308 	rp_len = sizeof(*rp) + eir_len;
7309 	rp = kmalloc(rp_len, GFP_ATOMIC);
7310 	if (!rp)
7311 		return -ENOMEM;
7312 
7313 	if (status)
7314 		goto complete;
7315 
7316 	hci_dev_lock(hdev);
7317 
7318 	eir_len = 0;
7319 	switch (cp->type) {
7320 	case BIT(BDADDR_BREDR):
7321 		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7322 			err = read_local_ssp_oob_req(hdev, sk, cp);
7323 			hci_dev_unlock(hdev);
7324 			if (!err)
7325 				goto done;
7326 
7327 			status = MGMT_STATUS_FAILED;
7328 			goto complete;
7329 		} else {
7330 			eir_len = eir_append_data(rp->eir, eir_len,
7331 						  EIR_CLASS_OF_DEV,
7332 						  hdev->dev_class, 3);
7333 		}
7334 		break;
7335 	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7336 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
7337 		    smp_generate_oob(hdev, hash, rand) < 0) {
7338 			hci_dev_unlock(hdev);
7339 			status = MGMT_STATUS_FAILED;
7340 			goto complete;
7341 		}
7342 
7343 		/* This should return the active RPA, but since the RPA
7344 		 * is only programmed on demand, it is really hard to fill
7345 		 * this in at the moment. For now disallow retrieving
7346 		 * local out-of-band data when privacy is in use.
7347 		 *
7348 		 * Returning the identity address will not help here since
7349 		 * pairing happens before the identity resolving key is
7350 		 * known and thus the connection establishment happens
7351 		 * based on the RPA and not the identity address.
7352 		 */
7353 		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
7354 			hci_dev_unlock(hdev);
7355 			status = MGMT_STATUS_REJECTED;
7356 			goto complete;
7357 		}
7358 
7359 		if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
7360 		   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
7361 		   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
7362 		    bacmp(&hdev->static_addr, BDADDR_ANY))) {
7363 			memcpy(addr, &hdev->static_addr, 6);
7364 			addr[6] = 0x01;
7365 		} else {
7366 			memcpy(addr, &hdev->bdaddr, 6);
7367 			addr[6] = 0x00;
7368 		}
7369 
7370 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
7371 					  addr, sizeof(addr));
7372 
7373 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7374 			role = 0x02;
7375 		else
7376 			role = 0x01;
7377 
7378 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
7379 					  &role, sizeof(role));
7380 
7381 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
7382 			eir_len = eir_append_data(rp->eir, eir_len,
7383 						  EIR_LE_SC_CONFIRM,
7384 						  hash, sizeof(hash));
7385 
7386 			eir_len = eir_append_data(rp->eir, eir_len,
7387 						  EIR_LE_SC_RANDOM,
7388 						  rand, sizeof(rand));
7389 		}
7390 
7391 		flags = mgmt_get_adv_discov_flags(hdev);
7392 
7393 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
7394 			flags |= LE_AD_NO_BREDR;
7395 
7396 		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
7397 					  &flags, sizeof(flags));
7398 		break;
7399 	}
7400 
7401 	hci_dev_unlock(hdev);
7402 
7403 	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
7404 
7405 	status = MGMT_STATUS_SUCCESS;
7406 
7407 complete:
7408 	rp->type = cp->type;
7409 	rp->eir_len = cpu_to_le16(eir_len);
7410 
7411 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
7412 				status, rp, sizeof(*rp) + eir_len);
7413 	if (err < 0 || status)
7414 		goto done;
7415 
7416 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7417 				 rp, sizeof(*rp) + eir_len,
7418 				 HCI_MGMT_OOB_DATA_EVENTS, sk);
7419 
7420 done:
7421 	kfree(rp);
7422 
7423 	return err;
7424 }
7425 
7426 static u32 get_supported_adv_flags(struct hci_dev *hdev)
7427 {
7428 	u32 flags = 0;
7429 
7430 	flags |= MGMT_ADV_FLAG_CONNECTABLE;
7431 	flags |= MGMT_ADV_FLAG_DISCOV;
7432 	flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
7433 	flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
7434 	flags |= MGMT_ADV_FLAG_APPEARANCE;
7435 	flags |= MGMT_ADV_FLAG_LOCAL_NAME;
7436 	flags |= MGMT_ADV_PARAM_DURATION;
7437 	flags |= MGMT_ADV_PARAM_TIMEOUT;
7438 	flags |= MGMT_ADV_PARAM_INTERVALS;
7439 	flags |= MGMT_ADV_PARAM_TX_POWER;
7440 	flags |= MGMT_ADV_PARAM_SCAN_RSP;
7441 
7442 	/* In extended adv TX_POWER returned from Set Adv Param
7443 	 * will be always valid.
7444 	 */
7445 	if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
7446 	    ext_adv_capable(hdev))
7447 		flags |= MGMT_ADV_FLAG_TX_POWER;
7448 
7449 	if (ext_adv_capable(hdev)) {
7450 		flags |= MGMT_ADV_FLAG_SEC_1M;
7451 		flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
7452 		flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
7453 
7454 		if (hdev->le_features[1] & HCI_LE_PHY_2M)
7455 			flags |= MGMT_ADV_FLAG_SEC_2M;
7456 
7457 		if (hdev->le_features[1] & HCI_LE_PHY_CODED)
7458 			flags |= MGMT_ADV_FLAG_SEC_CODED;
7459 	}
7460 
7461 	return flags;
7462 }
7463 
7464 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
7465 			     void *data, u16 data_len)
7466 {
7467 	struct mgmt_rp_read_adv_features *rp;
7468 	size_t rp_len;
7469 	int err;
7470 	struct adv_info *adv_instance;
7471 	u32 supported_flags;
7472 	u8 *instance;
7473 
7474 	bt_dev_dbg(hdev, "sock %p", sk);
7475 
7476 	if (!lmp_le_capable(hdev))
7477 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7478 				       MGMT_STATUS_REJECTED);
7479 
7480 	/* Enabling the experimental LL Privay support disables support for
7481 	 * advertising.
7482 	 */
7483 	if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
7484 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7485 				       MGMT_STATUS_NOT_SUPPORTED);
7486 
7487 	hci_dev_lock(hdev);
7488 
7489 	rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
7490 	rp = kmalloc(rp_len, GFP_ATOMIC);
7491 	if (!rp) {
7492 		hci_dev_unlock(hdev);
7493 		return -ENOMEM;
7494 	}
7495 
7496 	supported_flags = get_supported_adv_flags(hdev);
7497 
7498 	rp->supported_flags = cpu_to_le32(supported_flags);
7499 	rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
7500 	rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
7501 	rp->max_instances = hdev->le_num_of_adv_sets;
7502 	rp->num_instances = hdev->adv_instance_cnt;
7503 
7504 	instance = rp->instance;
7505 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
7506 		*instance = adv_instance->instance;
7507 		instance++;
7508 	}
7509 
7510 	hci_dev_unlock(hdev);
7511 
7512 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7513 				MGMT_STATUS_SUCCESS, rp, rp_len);
7514 
7515 	kfree(rp);
7516 
7517 	return err;
7518 }
7519 
7520 static u8 calculate_name_len(struct hci_dev *hdev)
7521 {
7522 	u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
7523 
7524 	return append_local_name(hdev, buf, 0);
7525 }
7526 
7527 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
7528 			   bool is_adv_data)
7529 {
7530 	u8 max_len = HCI_MAX_AD_LENGTH;
7531 
7532 	if (is_adv_data) {
7533 		if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
7534 				 MGMT_ADV_FLAG_LIMITED_DISCOV |
7535 				 MGMT_ADV_FLAG_MANAGED_FLAGS))
7536 			max_len -= 3;
7537 
7538 		if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
7539 			max_len -= 3;
7540 	} else {
7541 		if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
7542 			max_len -= calculate_name_len(hdev);
7543 
7544 		if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
7545 			max_len -= 4;
7546 	}
7547 
7548 	return max_len;
7549 }
7550 
7551 static bool flags_managed(u32 adv_flags)
7552 {
7553 	return adv_flags & (MGMT_ADV_FLAG_DISCOV |
7554 			    MGMT_ADV_FLAG_LIMITED_DISCOV |
7555 			    MGMT_ADV_FLAG_MANAGED_FLAGS);
7556 }
7557 
7558 static bool tx_power_managed(u32 adv_flags)
7559 {
7560 	return adv_flags & MGMT_ADV_FLAG_TX_POWER;
7561 }
7562 
7563 static bool name_managed(u32 adv_flags)
7564 {
7565 	return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
7566 }
7567 
7568 static bool appearance_managed(u32 adv_flags)
7569 {
7570 	return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
7571 }
7572 
7573 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
7574 			      u8 len, bool is_adv_data)
7575 {
7576 	int i, cur_len;
7577 	u8 max_len;
7578 
7579 	max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
7580 
7581 	if (len > max_len)
7582 		return false;
7583 
7584 	/* Make sure that the data is correctly formatted. */
7585 	for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
7586 		cur_len = data[i];
7587 
7588 		if (data[i + 1] == EIR_FLAGS &&
7589 		    (!is_adv_data || flags_managed(adv_flags)))
7590 			return false;
7591 
7592 		if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
7593 			return false;
7594 
7595 		if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
7596 			return false;
7597 
7598 		if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
7599 			return false;
7600 
7601 		if (data[i + 1] == EIR_APPEARANCE &&
7602 		    appearance_managed(adv_flags))
7603 			return false;
7604 
7605 		/* If the current field length would exceed the total data
7606 		 * length, then it's invalid.
7607 		 */
7608 		if (i + cur_len >= len)
7609 			return false;
7610 	}
7611 
7612 	return true;
7613 }
7614 
7615 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
7616 {
7617 	u32 supported_flags, phy_flags;
7618 
7619 	/* The current implementation only supports a subset of the specified
7620 	 * flags. Also need to check mutual exclusiveness of sec flags.
7621 	 */
7622 	supported_flags = get_supported_adv_flags(hdev);
7623 	phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
7624 	if (adv_flags & ~supported_flags ||
7625 	    ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
7626 		return false;
7627 
7628 	return true;
7629 }
7630 
7631 static bool adv_busy(struct hci_dev *hdev)
7632 {
7633 	return (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
7634 		pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
7635 		pending_find(MGMT_OP_SET_LE, hdev) ||
7636 		pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev) ||
7637 		pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev));
7638 }
7639 
7640 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
7641 				     u16 opcode)
7642 {
7643 	struct mgmt_pending_cmd *cmd;
7644 	struct mgmt_cp_add_advertising *cp;
7645 	struct mgmt_rp_add_advertising rp;
7646 	struct adv_info *adv_instance, *n;
7647 	u8 instance;
7648 
7649 	bt_dev_dbg(hdev, "status %d", status);
7650 
7651 	hci_dev_lock(hdev);
7652 
7653 	cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
7654 	if (!cmd)
7655 		cmd = pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev);
7656 
7657 	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
7658 		if (!adv_instance->pending)
7659 			continue;
7660 
7661 		if (!status) {
7662 			adv_instance->pending = false;
7663 			continue;
7664 		}
7665 
7666 		instance = adv_instance->instance;
7667 
7668 		if (hdev->cur_adv_instance == instance)
7669 			cancel_adv_timeout(hdev);
7670 
7671 		hci_remove_adv_instance(hdev, instance);
7672 		mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
7673 	}
7674 
7675 	if (!cmd)
7676 		goto unlock;
7677 
7678 	cp = cmd->param;
7679 	rp.instance = cp->instance;
7680 
7681 	if (status)
7682 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
7683 				mgmt_status(status));
7684 	else
7685 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
7686 				  mgmt_status(status), &rp, sizeof(rp));
7687 
7688 	mgmt_pending_remove(cmd);
7689 
7690 unlock:
7691 	hci_dev_unlock(hdev);
7692 }
7693 
7694 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
7695 			   void *data, u16 data_len)
7696 {
7697 	struct mgmt_cp_add_advertising *cp = data;
7698 	struct mgmt_rp_add_advertising rp;
7699 	u32 flags;
7700 	u8 status;
7701 	u16 timeout, duration;
7702 	unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
7703 	u8 schedule_instance = 0;
7704 	struct adv_info *next_instance;
7705 	int err;
7706 	struct mgmt_pending_cmd *cmd;
7707 	struct hci_request req;
7708 
7709 	bt_dev_dbg(hdev, "sock %p", sk);
7710 
7711 	status = mgmt_le_support(hdev);
7712 	if (status)
7713 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7714 				       status);
7715 
7716 	/* Enabling the experimental LL Privay support disables support for
7717 	 * advertising.
7718 	 */
7719 	if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
7720 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
7721 				       MGMT_STATUS_NOT_SUPPORTED);
7722 
7723 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
7724 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7725 				       MGMT_STATUS_INVALID_PARAMS);
7726 
7727 	if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
7728 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7729 				       MGMT_STATUS_INVALID_PARAMS);
7730 
7731 	flags = __le32_to_cpu(cp->flags);
7732 	timeout = __le16_to_cpu(cp->timeout);
7733 	duration = __le16_to_cpu(cp->duration);
7734 
7735 	if (!requested_adv_flags_are_valid(hdev, flags))
7736 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7737 				       MGMT_STATUS_INVALID_PARAMS);
7738 
7739 	hci_dev_lock(hdev);
7740 
7741 	if (timeout && !hdev_is_powered(hdev)) {
7742 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7743 				      MGMT_STATUS_REJECTED);
7744 		goto unlock;
7745 	}
7746 
7747 	if (adv_busy(hdev)) {
7748 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7749 				      MGMT_STATUS_BUSY);
7750 		goto unlock;
7751 	}
7752 
7753 	if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
7754 	    !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
7755 			       cp->scan_rsp_len, false)) {
7756 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7757 				      MGMT_STATUS_INVALID_PARAMS);
7758 		goto unlock;
7759 	}
7760 
7761 	err = hci_add_adv_instance(hdev, cp->instance, flags,
7762 				   cp->adv_data_len, cp->data,
7763 				   cp->scan_rsp_len,
7764 				   cp->data + cp->adv_data_len,
7765 				   timeout, duration,
7766 				   HCI_ADV_TX_POWER_NO_PREFERENCE,
7767 				   hdev->le_adv_min_interval,
7768 				   hdev->le_adv_max_interval);
7769 	if (err < 0) {
7770 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7771 				      MGMT_STATUS_FAILED);
7772 		goto unlock;
7773 	}
7774 
7775 	/* Only trigger an advertising added event if a new instance was
7776 	 * actually added.
7777 	 */
7778 	if (hdev->adv_instance_cnt > prev_instance_cnt)
7779 		mgmt_advertising_added(sk, hdev, cp->instance);
7780 
7781 	if (hdev->cur_adv_instance == cp->instance) {
7782 		/* If the currently advertised instance is being changed then
7783 		 * cancel the current advertising and schedule the next
7784 		 * instance. If there is only one instance then the overridden
7785 		 * advertising data will be visible right away.
7786 		 */
7787 		cancel_adv_timeout(hdev);
7788 
7789 		next_instance = hci_get_next_instance(hdev, cp->instance);
7790 		if (next_instance)
7791 			schedule_instance = next_instance->instance;
7792 	} else if (!hdev->adv_instance_timeout) {
7793 		/* Immediately advertise the new instance if no other
7794 		 * instance is currently being advertised.
7795 		 */
7796 		schedule_instance = cp->instance;
7797 	}
7798 
7799 	/* If the HCI_ADVERTISING flag is set or the device isn't powered or
7800 	 * there is no instance to be advertised then we have no HCI
7801 	 * communication to make. Simply return.
7802 	 */
7803 	if (!hdev_is_powered(hdev) ||
7804 	    hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
7805 	    !schedule_instance) {
7806 		rp.instance = cp->instance;
7807 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7808 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7809 		goto unlock;
7810 	}
7811 
7812 	/* We're good to go, update advertising data, parameters, and start
7813 	 * advertising.
7814 	 */
7815 	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
7816 			       data_len);
7817 	if (!cmd) {
7818 		err = -ENOMEM;
7819 		goto unlock;
7820 	}
7821 
7822 	hci_req_init(&req, hdev);
7823 
7824 	err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
7825 
7826 	if (!err)
7827 		err = hci_req_run(&req, add_advertising_complete);
7828 
7829 	if (err < 0) {
7830 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7831 				      MGMT_STATUS_FAILED);
7832 		mgmt_pending_remove(cmd);
7833 	}
7834 
7835 unlock:
7836 	hci_dev_unlock(hdev);
7837 
7838 	return err;
7839 }
7840 
7841 static void add_ext_adv_params_complete(struct hci_dev *hdev, u8 status,
7842 					u16 opcode)
7843 {
7844 	struct mgmt_pending_cmd *cmd;
7845 	struct mgmt_cp_add_ext_adv_params *cp;
7846 	struct mgmt_rp_add_ext_adv_params rp;
7847 	struct adv_info *adv_instance;
7848 	u32 flags;
7849 
7850 	BT_DBG("%s", hdev->name);
7851 
7852 	hci_dev_lock(hdev);
7853 
7854 	cmd = pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev);
7855 	if (!cmd)
7856 		goto unlock;
7857 
7858 	cp = cmd->param;
7859 	adv_instance = hci_find_adv_instance(hdev, cp->instance);
7860 	if (!adv_instance)
7861 		goto unlock;
7862 
7863 	rp.instance = cp->instance;
7864 	rp.tx_power = adv_instance->tx_power;
7865 
7866 	/* While we're at it, inform userspace of the available space for this
7867 	 * advertisement, given the flags that will be used.
7868 	 */
7869 	flags = __le32_to_cpu(cp->flags);
7870 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
7871 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
7872 
7873 	if (status) {
7874 		/* If this advertisement was previously advertising and we
7875 		 * failed to update it, we signal that it has been removed and
7876 		 * delete its structure
7877 		 */
7878 		if (!adv_instance->pending)
7879 			mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
7880 
7881 		hci_remove_adv_instance(hdev, cp->instance);
7882 
7883 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
7884 				mgmt_status(status));
7885 
7886 	} else {
7887 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
7888 				  mgmt_status(status), &rp, sizeof(rp));
7889 	}
7890 
7891 unlock:
7892 	if (cmd)
7893 		mgmt_pending_remove(cmd);
7894 
7895 	hci_dev_unlock(hdev);
7896 }
7897 
7898 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
7899 			      void *data, u16 data_len)
7900 {
7901 	struct mgmt_cp_add_ext_adv_params *cp = data;
7902 	struct mgmt_rp_add_ext_adv_params rp;
7903 	struct mgmt_pending_cmd *cmd = NULL;
7904 	struct adv_info *adv_instance;
7905 	struct hci_request req;
7906 	u32 flags, min_interval, max_interval;
7907 	u16 timeout, duration;
7908 	u8 status;
7909 	s8 tx_power;
7910 	int err;
7911 
7912 	BT_DBG("%s", hdev->name);
7913 
7914 	status = mgmt_le_support(hdev);
7915 	if (status)
7916 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
7917 				       status);
7918 
7919 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
7920 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
7921 				       MGMT_STATUS_INVALID_PARAMS);
7922 
7923 	/* The purpose of breaking add_advertising into two separate MGMT calls
7924 	 * for params and data is to allow more parameters to be added to this
7925 	 * structure in the future. For this reason, we verify that we have the
7926 	 * bare minimum structure we know of when the interface was defined. Any
7927 	 * extra parameters we don't know about will be ignored in this request.
7928 	 */
7929 	if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
7930 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7931 				       MGMT_STATUS_INVALID_PARAMS);
7932 
7933 	flags = __le32_to_cpu(cp->flags);
7934 
7935 	if (!requested_adv_flags_are_valid(hdev, flags))
7936 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
7937 				       MGMT_STATUS_INVALID_PARAMS);
7938 
7939 	hci_dev_lock(hdev);
7940 
7941 	/* In new interface, we require that we are powered to register */
7942 	if (!hdev_is_powered(hdev)) {
7943 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
7944 				      MGMT_STATUS_REJECTED);
7945 		goto unlock;
7946 	}
7947 
7948 	if (adv_busy(hdev)) {
7949 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
7950 				      MGMT_STATUS_BUSY);
7951 		goto unlock;
7952 	}
7953 
7954 	/* Parse defined parameters from request, use defaults otherwise */
7955 	timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
7956 		  __le16_to_cpu(cp->timeout) : 0;
7957 
7958 	duration = (flags & MGMT_ADV_PARAM_DURATION) ?
7959 		   __le16_to_cpu(cp->duration) :
7960 		   hdev->def_multi_adv_rotation_duration;
7961 
7962 	min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
7963 		       __le32_to_cpu(cp->min_interval) :
7964 		       hdev->le_adv_min_interval;
7965 
7966 	max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
7967 		       __le32_to_cpu(cp->max_interval) :
7968 		       hdev->le_adv_max_interval;
7969 
7970 	tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
7971 		   cp->tx_power :
7972 		   HCI_ADV_TX_POWER_NO_PREFERENCE;
7973 
7974 	/* Create advertising instance with no advertising or response data */
7975 	err = hci_add_adv_instance(hdev, cp->instance, flags,
7976 				   0, NULL, 0, NULL, timeout, duration,
7977 				   tx_power, min_interval, max_interval);
7978 
7979 	if (err < 0) {
7980 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
7981 				      MGMT_STATUS_FAILED);
7982 		goto unlock;
7983 	}
7984 
7985 	/* Submit request for advertising params if ext adv available */
7986 	if (ext_adv_capable(hdev)) {
7987 		hci_req_init(&req, hdev);
7988 		adv_instance = hci_find_adv_instance(hdev, cp->instance);
7989 
7990 		/* Updating parameters of an active instance will return a
7991 		 * Command Disallowed error, so we must first disable the
7992 		 * instance if it is active.
7993 		 */
7994 		if (!adv_instance->pending)
7995 			__hci_req_disable_ext_adv_instance(&req, cp->instance);
7996 
7997 		__hci_req_setup_ext_adv_instance(&req, cp->instance);
7998 
7999 		err = hci_req_run(&req, add_ext_adv_params_complete);
8000 
8001 		if (!err)
8002 			cmd = mgmt_pending_add(sk, MGMT_OP_ADD_EXT_ADV_PARAMS,
8003 					       hdev, data, data_len);
8004 		if (!cmd) {
8005 			err = -ENOMEM;
8006 			hci_remove_adv_instance(hdev, cp->instance);
8007 			goto unlock;
8008 		}
8009 
8010 	} else {
8011 		rp.instance = cp->instance;
8012 		rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8013 		rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8014 		rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8015 		err = mgmt_cmd_complete(sk, hdev->id,
8016 					MGMT_OP_ADD_EXT_ADV_PARAMS,
8017 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8018 	}
8019 
8020 unlock:
8021 	hci_dev_unlock(hdev);
8022 
8023 	return err;
8024 }
8025 
8026 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8027 			    u16 data_len)
8028 {
8029 	struct mgmt_cp_add_ext_adv_data *cp = data;
8030 	struct mgmt_rp_add_ext_adv_data rp;
8031 	u8 schedule_instance = 0;
8032 	struct adv_info *next_instance;
8033 	struct adv_info *adv_instance;
8034 	int err = 0;
8035 	struct mgmt_pending_cmd *cmd;
8036 	struct hci_request req;
8037 
8038 	BT_DBG("%s", hdev->name);
8039 
8040 	hci_dev_lock(hdev);
8041 
8042 	adv_instance = hci_find_adv_instance(hdev, cp->instance);
8043 
8044 	if (!adv_instance) {
8045 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8046 				      MGMT_STATUS_INVALID_PARAMS);
8047 		goto unlock;
8048 	}
8049 
8050 	/* In new interface, we require that we are powered to register */
8051 	if (!hdev_is_powered(hdev)) {
8052 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8053 				      MGMT_STATUS_REJECTED);
8054 		goto clear_new_instance;
8055 	}
8056 
8057 	if (adv_busy(hdev)) {
8058 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8059 				      MGMT_STATUS_BUSY);
8060 		goto clear_new_instance;
8061 	}
8062 
8063 	/* Validate new data */
8064 	if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
8065 			       cp->adv_data_len, true) ||
8066 	    !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
8067 			       cp->adv_data_len, cp->scan_rsp_len, false)) {
8068 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8069 				      MGMT_STATUS_INVALID_PARAMS);
8070 		goto clear_new_instance;
8071 	}
8072 
8073 	/* Set the data in the advertising instance */
8074 	hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
8075 				  cp->data, cp->scan_rsp_len,
8076 				  cp->data + cp->adv_data_len);
8077 
8078 	/* We're good to go, update advertising data, parameters, and start
8079 	 * advertising.
8080 	 */
8081 
8082 	hci_req_init(&req, hdev);
8083 
8084 	hci_req_add(&req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
8085 
8086 	if (ext_adv_capable(hdev)) {
8087 		__hci_req_update_adv_data(&req, cp->instance);
8088 		__hci_req_update_scan_rsp_data(&req, cp->instance);
8089 		__hci_req_enable_ext_advertising(&req, cp->instance);
8090 
8091 	} else {
8092 		/* If using software rotation, determine next instance to use */
8093 
8094 		if (hdev->cur_adv_instance == cp->instance) {
8095 			/* If the currently advertised instance is being changed
8096 			 * then cancel the current advertising and schedule the
8097 			 * next instance. If there is only one instance then the
8098 			 * overridden advertising data will be visible right
8099 			 * away
8100 			 */
8101 			cancel_adv_timeout(hdev);
8102 
8103 			next_instance = hci_get_next_instance(hdev,
8104 							      cp->instance);
8105 			if (next_instance)
8106 				schedule_instance = next_instance->instance;
8107 		} else if (!hdev->adv_instance_timeout) {
8108 			/* Immediately advertise the new instance if no other
8109 			 * instance is currently being advertised.
8110 			 */
8111 			schedule_instance = cp->instance;
8112 		}
8113 
8114 		/* If the HCI_ADVERTISING flag is set or there is no instance to
8115 		 * be advertised then we have no HCI communication to make.
8116 		 * Simply return.
8117 		 */
8118 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8119 		    !schedule_instance) {
8120 			if (adv_instance->pending) {
8121 				mgmt_advertising_added(sk, hdev, cp->instance);
8122 				adv_instance->pending = false;
8123 			}
8124 			rp.instance = cp->instance;
8125 			err = mgmt_cmd_complete(sk, hdev->id,
8126 						MGMT_OP_ADD_EXT_ADV_DATA,
8127 						MGMT_STATUS_SUCCESS, &rp,
8128 						sizeof(rp));
8129 			goto unlock;
8130 		}
8131 
8132 		err = __hci_req_schedule_adv_instance(&req, schedule_instance,
8133 						      true);
8134 	}
8135 
8136 	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
8137 			       data_len);
8138 	if (!cmd) {
8139 		err = -ENOMEM;
8140 		goto clear_new_instance;
8141 	}
8142 
8143 	if (!err)
8144 		err = hci_req_run(&req, add_advertising_complete);
8145 
8146 	if (err < 0) {
8147 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8148 				      MGMT_STATUS_FAILED);
8149 		mgmt_pending_remove(cmd);
8150 		goto clear_new_instance;
8151 	}
8152 
8153 	/* We were successful in updating data, so trigger advertising_added
8154 	 * event if this is an instance that wasn't previously advertising. If
8155 	 * a failure occurs in the requests we initiated, we will remove the
8156 	 * instance again in add_advertising_complete
8157 	 */
8158 	if (adv_instance->pending)
8159 		mgmt_advertising_added(sk, hdev, cp->instance);
8160 
8161 	goto unlock;
8162 
8163 clear_new_instance:
8164 	hci_remove_adv_instance(hdev, cp->instance);
8165 
8166 unlock:
8167 	hci_dev_unlock(hdev);
8168 
8169 	return err;
8170 }
8171 
8172 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
8173 					u16 opcode)
8174 {
8175 	struct mgmt_pending_cmd *cmd;
8176 	struct mgmt_cp_remove_advertising *cp;
8177 	struct mgmt_rp_remove_advertising rp;
8178 
8179 	bt_dev_dbg(hdev, "status %d", status);
8180 
8181 	hci_dev_lock(hdev);
8182 
8183 	/* A failure status here only means that we failed to disable
8184 	 * advertising. Otherwise, the advertising instance has been removed,
8185 	 * so report success.
8186 	 */
8187 	cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
8188 	if (!cmd)
8189 		goto unlock;
8190 
8191 	cp = cmd->param;
8192 	rp.instance = cp->instance;
8193 
8194 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
8195 			  &rp, sizeof(rp));
8196 	mgmt_pending_remove(cmd);
8197 
8198 unlock:
8199 	hci_dev_unlock(hdev);
8200 }
8201 
8202 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
8203 			      void *data, u16 data_len)
8204 {
8205 	struct mgmt_cp_remove_advertising *cp = data;
8206 	struct mgmt_rp_remove_advertising rp;
8207 	struct mgmt_pending_cmd *cmd;
8208 	struct hci_request req;
8209 	int err;
8210 
8211 	bt_dev_dbg(hdev, "sock %p", sk);
8212 
8213 	/* Enabling the experimental LL Privay support disables support for
8214 	 * advertising.
8215 	 */
8216 	if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
8217 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
8218 				       MGMT_STATUS_NOT_SUPPORTED);
8219 
8220 	hci_dev_lock(hdev);
8221 
8222 	if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
8223 		err = mgmt_cmd_status(sk, hdev->id,
8224 				      MGMT_OP_REMOVE_ADVERTISING,
8225 				      MGMT_STATUS_INVALID_PARAMS);
8226 		goto unlock;
8227 	}
8228 
8229 	if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
8230 	    pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
8231 	    pending_find(MGMT_OP_SET_LE, hdev)) {
8232 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8233 				      MGMT_STATUS_BUSY);
8234 		goto unlock;
8235 	}
8236 
8237 	if (list_empty(&hdev->adv_instances)) {
8238 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8239 				      MGMT_STATUS_INVALID_PARAMS);
8240 		goto unlock;
8241 	}
8242 
8243 	hci_req_init(&req, hdev);
8244 
8245 	/* If we use extended advertising, instance is disabled and removed */
8246 	if (ext_adv_capable(hdev)) {
8247 		__hci_req_disable_ext_adv_instance(&req, cp->instance);
8248 		__hci_req_remove_ext_adv_instance(&req, cp->instance);
8249 	}
8250 
8251 	hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
8252 
8253 	if (list_empty(&hdev->adv_instances))
8254 		__hci_req_disable_advertising(&req);
8255 
8256 	/* If no HCI commands have been collected so far or the HCI_ADVERTISING
8257 	 * flag is set or the device isn't powered then we have no HCI
8258 	 * communication to make. Simply return.
8259 	 */
8260 	if (skb_queue_empty(&req.cmd_q) ||
8261 	    !hdev_is_powered(hdev) ||
8262 	    hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
8263 		hci_req_purge(&req);
8264 		rp.instance = cp->instance;
8265 		err = mgmt_cmd_complete(sk, hdev->id,
8266 					MGMT_OP_REMOVE_ADVERTISING,
8267 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8268 		goto unlock;
8269 	}
8270 
8271 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
8272 			       data_len);
8273 	if (!cmd) {
8274 		err = -ENOMEM;
8275 		goto unlock;
8276 	}
8277 
8278 	err = hci_req_run(&req, remove_advertising_complete);
8279 	if (err < 0)
8280 		mgmt_pending_remove(cmd);
8281 
8282 unlock:
8283 	hci_dev_unlock(hdev);
8284 
8285 	return err;
8286 }
8287 
8288 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
8289 			     void *data, u16 data_len)
8290 {
8291 	struct mgmt_cp_get_adv_size_info *cp = data;
8292 	struct mgmt_rp_get_adv_size_info rp;
8293 	u32 flags, supported_flags;
8294 	int err;
8295 
8296 	bt_dev_dbg(hdev, "sock %p", sk);
8297 
8298 	if (!lmp_le_capable(hdev))
8299 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8300 				       MGMT_STATUS_REJECTED);
8301 
8302 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8303 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8304 				       MGMT_STATUS_INVALID_PARAMS);
8305 
8306 	flags = __le32_to_cpu(cp->flags);
8307 
8308 	/* The current implementation only supports a subset of the specified
8309 	 * flags.
8310 	 */
8311 	supported_flags = get_supported_adv_flags(hdev);
8312 	if (flags & ~supported_flags)
8313 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8314 				       MGMT_STATUS_INVALID_PARAMS);
8315 
8316 	rp.instance = cp->instance;
8317 	rp.flags = cp->flags;
8318 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8319 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8320 
8321 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8322 				MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8323 
8324 	return err;
8325 }
8326 
8327 static const struct hci_mgmt_handler mgmt_handlers[] = {
8328 	{ NULL }, /* 0x0000 (no command) */
8329 	{ read_version,            MGMT_READ_VERSION_SIZE,
8330 						HCI_MGMT_NO_HDEV |
8331 						HCI_MGMT_UNTRUSTED },
8332 	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
8333 						HCI_MGMT_NO_HDEV |
8334 						HCI_MGMT_UNTRUSTED },
8335 	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
8336 						HCI_MGMT_NO_HDEV |
8337 						HCI_MGMT_UNTRUSTED },
8338 	{ read_controller_info,    MGMT_READ_INFO_SIZE,
8339 						HCI_MGMT_UNTRUSTED },
8340 	{ set_powered,             MGMT_SETTING_SIZE },
8341 	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
8342 	{ set_connectable,         MGMT_SETTING_SIZE },
8343 	{ set_fast_connectable,    MGMT_SETTING_SIZE },
8344 	{ set_bondable,            MGMT_SETTING_SIZE },
8345 	{ set_link_security,       MGMT_SETTING_SIZE },
8346 	{ set_ssp,                 MGMT_SETTING_SIZE },
8347 	{ set_hs,                  MGMT_SETTING_SIZE },
8348 	{ set_le,                  MGMT_SETTING_SIZE },
8349 	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
8350 	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
8351 	{ add_uuid,                MGMT_ADD_UUID_SIZE },
8352 	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
8353 	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
8354 						HCI_MGMT_VAR_LEN },
8355 	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
8356 						HCI_MGMT_VAR_LEN },
8357 	{ disconnect,              MGMT_DISCONNECT_SIZE },
8358 	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
8359 	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
8360 	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
8361 	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
8362 	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
8363 	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
8364 	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
8365 	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
8366 	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
8367 	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
8368 	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
8369 	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
8370 	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
8371 						HCI_MGMT_VAR_LEN },
8372 	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
8373 	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
8374 	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
8375 	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
8376 	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
8377 	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
8378 	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
8379 	{ set_advertising,         MGMT_SETTING_SIZE },
8380 	{ set_bredr,               MGMT_SETTING_SIZE },
8381 	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
8382 	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
8383 	{ set_secure_conn,         MGMT_SETTING_SIZE },
8384 	{ set_debug_keys,          MGMT_SETTING_SIZE },
8385 	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
8386 	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
8387 						HCI_MGMT_VAR_LEN },
8388 	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
8389 	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
8390 	{ add_device,              MGMT_ADD_DEVICE_SIZE },
8391 	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
8392 	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
8393 						HCI_MGMT_VAR_LEN },
8394 	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
8395 						HCI_MGMT_NO_HDEV |
8396 						HCI_MGMT_UNTRUSTED },
8397 	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
8398 						HCI_MGMT_UNCONFIGURED |
8399 						HCI_MGMT_UNTRUSTED },
8400 	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
8401 						HCI_MGMT_UNCONFIGURED },
8402 	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
8403 						HCI_MGMT_UNCONFIGURED },
8404 	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
8405 						HCI_MGMT_VAR_LEN },
8406 	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
8407 	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
8408 						HCI_MGMT_NO_HDEV |
8409 						HCI_MGMT_UNTRUSTED },
8410 	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
8411 	{ add_advertising,	   MGMT_ADD_ADVERTISING_SIZE,
8412 						HCI_MGMT_VAR_LEN },
8413 	{ remove_advertising,	   MGMT_REMOVE_ADVERTISING_SIZE },
8414 	{ get_adv_size_info,       MGMT_GET_ADV_SIZE_INFO_SIZE },
8415 	{ start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
8416 	{ read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
8417 						HCI_MGMT_UNTRUSTED },
8418 	{ set_appearance,	   MGMT_SET_APPEARANCE_SIZE },
8419 	{ get_phy_configuration,   MGMT_GET_PHY_CONFIGURATION_SIZE },
8420 	{ set_phy_configuration,   MGMT_SET_PHY_CONFIGURATION_SIZE },
8421 	{ set_blocked_keys,	   MGMT_OP_SET_BLOCKED_KEYS_SIZE,
8422 						HCI_MGMT_VAR_LEN },
8423 	{ set_wideband_speech,	   MGMT_SETTING_SIZE },
8424 	{ read_controller_cap,     MGMT_READ_CONTROLLER_CAP_SIZE,
8425 						HCI_MGMT_UNTRUSTED },
8426 	{ read_exp_features_info,  MGMT_READ_EXP_FEATURES_INFO_SIZE,
8427 						HCI_MGMT_UNTRUSTED |
8428 						HCI_MGMT_HDEV_OPTIONAL },
8429 	{ set_exp_feature,         MGMT_SET_EXP_FEATURE_SIZE,
8430 						HCI_MGMT_VAR_LEN |
8431 						HCI_MGMT_HDEV_OPTIONAL },
8432 	{ read_def_system_config,  MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
8433 						HCI_MGMT_UNTRUSTED },
8434 	{ set_def_system_config,   MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
8435 						HCI_MGMT_VAR_LEN },
8436 	{ read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
8437 						HCI_MGMT_UNTRUSTED },
8438 	{ set_def_runtime_config,  MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
8439 						HCI_MGMT_VAR_LEN },
8440 	{ get_device_flags,        MGMT_GET_DEVICE_FLAGS_SIZE },
8441 	{ set_device_flags,        MGMT_SET_DEVICE_FLAGS_SIZE },
8442 	{ read_adv_mon_features,   MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
8443 	{ add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
8444 						HCI_MGMT_VAR_LEN },
8445 	{ remove_adv_monitor,      MGMT_REMOVE_ADV_MONITOR_SIZE },
8446 	{ add_ext_adv_params,      MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
8447 						HCI_MGMT_VAR_LEN },
8448 	{ add_ext_adv_data,        MGMT_ADD_EXT_ADV_DATA_SIZE,
8449 						HCI_MGMT_VAR_LEN },
8450 	{ add_adv_patterns_monitor_rssi,
8451 				   MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
8452 						HCI_MGMT_VAR_LEN },
8453 };
8454 
8455 void mgmt_index_added(struct hci_dev *hdev)
8456 {
8457 	struct mgmt_ev_ext_index ev;
8458 
8459 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8460 		return;
8461 
8462 	switch (hdev->dev_type) {
8463 	case HCI_PRIMARY:
8464 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8465 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
8466 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8467 			ev.type = 0x01;
8468 		} else {
8469 			mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
8470 					 HCI_MGMT_INDEX_EVENTS);
8471 			ev.type = 0x00;
8472 		}
8473 		break;
8474 	case HCI_AMP:
8475 		ev.type = 0x02;
8476 		break;
8477 	default:
8478 		return;
8479 	}
8480 
8481 	ev.bus = hdev->bus;
8482 
8483 	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
8484 			 HCI_MGMT_EXT_INDEX_EVENTS);
8485 }
8486 
8487 void mgmt_index_removed(struct hci_dev *hdev)
8488 {
8489 	struct mgmt_ev_ext_index ev;
8490 	u8 status = MGMT_STATUS_INVALID_INDEX;
8491 
8492 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8493 		return;
8494 
8495 	switch (hdev->dev_type) {
8496 	case HCI_PRIMARY:
8497 		mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8498 
8499 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8500 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
8501 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8502 			ev.type = 0x01;
8503 		} else {
8504 			mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
8505 					 HCI_MGMT_INDEX_EVENTS);
8506 			ev.type = 0x00;
8507 		}
8508 		break;
8509 	case HCI_AMP:
8510 		ev.type = 0x02;
8511 		break;
8512 	default:
8513 		return;
8514 	}
8515 
8516 	ev.bus = hdev->bus;
8517 
8518 	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
8519 			 HCI_MGMT_EXT_INDEX_EVENTS);
8520 }
8521 
8522 /* This function requires the caller holds hdev->lock */
8523 static void restart_le_actions(struct hci_dev *hdev)
8524 {
8525 	struct hci_conn_params *p;
8526 
8527 	list_for_each_entry(p, &hdev->le_conn_params, list) {
8528 		/* Needed for AUTO_OFF case where might not "really"
8529 		 * have been powered off.
8530 		 */
8531 		list_del_init(&p->action);
8532 
8533 		switch (p->auto_connect) {
8534 		case HCI_AUTO_CONN_DIRECT:
8535 		case HCI_AUTO_CONN_ALWAYS:
8536 			list_add(&p->action, &hdev->pend_le_conns);
8537 			break;
8538 		case HCI_AUTO_CONN_REPORT:
8539 			list_add(&p->action, &hdev->pend_le_reports);
8540 			break;
8541 		default:
8542 			break;
8543 		}
8544 	}
8545 }
8546 
8547 void mgmt_power_on(struct hci_dev *hdev, int err)
8548 {
8549 	struct cmd_lookup match = { NULL, hdev };
8550 
8551 	bt_dev_dbg(hdev, "err %d", err);
8552 
8553 	hci_dev_lock(hdev);
8554 
8555 	if (!err) {
8556 		restart_le_actions(hdev);
8557 		hci_update_background_scan(hdev);
8558 	}
8559 
8560 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8561 
8562 	new_settings(hdev, match.sk);
8563 
8564 	if (match.sk)
8565 		sock_put(match.sk);
8566 
8567 	hci_dev_unlock(hdev);
8568 }
8569 
8570 void __mgmt_power_off(struct hci_dev *hdev)
8571 {
8572 	struct cmd_lookup match = { NULL, hdev };
8573 	u8 status, zero_cod[] = { 0, 0, 0 };
8574 
8575 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8576 
8577 	/* If the power off is because of hdev unregistration let
8578 	 * use the appropriate INVALID_INDEX status. Otherwise use
8579 	 * NOT_POWERED. We cover both scenarios here since later in
8580 	 * mgmt_index_removed() any hci_conn callbacks will have already
8581 	 * been triggered, potentially causing misleading DISCONNECTED
8582 	 * status responses.
8583 	 */
8584 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
8585 		status = MGMT_STATUS_INVALID_INDEX;
8586 	else
8587 		status = MGMT_STATUS_NOT_POWERED;
8588 
8589 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8590 
8591 	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
8592 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
8593 				   zero_cod, sizeof(zero_cod),
8594 				   HCI_MGMT_DEV_CLASS_EVENTS, NULL);
8595 		ext_info_changed(hdev, NULL);
8596 	}
8597 
8598 	new_settings(hdev, match.sk);
8599 
8600 	if (match.sk)
8601 		sock_put(match.sk);
8602 }
8603 
8604 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
8605 {
8606 	struct mgmt_pending_cmd *cmd;
8607 	u8 status;
8608 
8609 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8610 	if (!cmd)
8611 		return;
8612 
8613 	if (err == -ERFKILL)
8614 		status = MGMT_STATUS_RFKILLED;
8615 	else
8616 		status = MGMT_STATUS_FAILED;
8617 
8618 	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
8619 
8620 	mgmt_pending_remove(cmd);
8621 }
8622 
8623 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
8624 		       bool persistent)
8625 {
8626 	struct mgmt_ev_new_link_key ev;
8627 
8628 	memset(&ev, 0, sizeof(ev));
8629 
8630 	ev.store_hint = persistent;
8631 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8632 	ev.key.addr.type = BDADDR_BREDR;
8633 	ev.key.type = key->type;
8634 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
8635 	ev.key.pin_len = key->pin_len;
8636 
8637 	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
8638 }
8639 
8640 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
8641 {
8642 	switch (ltk->type) {
8643 	case SMP_LTK:
8644 	case SMP_LTK_SLAVE:
8645 		if (ltk->authenticated)
8646 			return MGMT_LTK_AUTHENTICATED;
8647 		return MGMT_LTK_UNAUTHENTICATED;
8648 	case SMP_LTK_P256:
8649 		if (ltk->authenticated)
8650 			return MGMT_LTK_P256_AUTH;
8651 		return MGMT_LTK_P256_UNAUTH;
8652 	case SMP_LTK_P256_DEBUG:
8653 		return MGMT_LTK_P256_DEBUG;
8654 	}
8655 
8656 	return MGMT_LTK_UNAUTHENTICATED;
8657 }
8658 
8659 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
8660 {
8661 	struct mgmt_ev_new_long_term_key ev;
8662 
8663 	memset(&ev, 0, sizeof(ev));
8664 
8665 	/* Devices using resolvable or non-resolvable random addresses
8666 	 * without providing an identity resolving key don't require
8667 	 * to store long term keys. Their addresses will change the
8668 	 * next time around.
8669 	 *
8670 	 * Only when a remote device provides an identity address
8671 	 * make sure the long term key is stored. If the remote
8672 	 * identity is known, the long term keys are internally
8673 	 * mapped to the identity address. So allow static random
8674 	 * and public addresses here.
8675 	 */
8676 	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8677 	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
8678 		ev.store_hint = 0x00;
8679 	else
8680 		ev.store_hint = persistent;
8681 
8682 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8683 	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
8684 	ev.key.type = mgmt_ltk_type(key);
8685 	ev.key.enc_size = key->enc_size;
8686 	ev.key.ediv = key->ediv;
8687 	ev.key.rand = key->rand;
8688 
8689 	if (key->type == SMP_LTK)
8690 		ev.key.master = 1;
8691 
8692 	/* Make sure we copy only the significant bytes based on the
8693 	 * encryption key size, and set the rest of the value to zeroes.
8694 	 */
8695 	memcpy(ev.key.val, key->val, key->enc_size);
8696 	memset(ev.key.val + key->enc_size, 0,
8697 	       sizeof(ev.key.val) - key->enc_size);
8698 
8699 	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
8700 }
8701 
8702 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
8703 {
8704 	struct mgmt_ev_new_irk ev;
8705 
8706 	memset(&ev, 0, sizeof(ev));
8707 
8708 	ev.store_hint = persistent;
8709 
8710 	bacpy(&ev.rpa, &irk->rpa);
8711 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
8712 	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
8713 	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
8714 
8715 	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
8716 }
8717 
8718 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
8719 		   bool persistent)
8720 {
8721 	struct mgmt_ev_new_csrk ev;
8722 
8723 	memset(&ev, 0, sizeof(ev));
8724 
8725 	/* Devices using resolvable or non-resolvable random addresses
8726 	 * without providing an identity resolving key don't require
8727 	 * to store signature resolving keys. Their addresses will change
8728 	 * the next time around.
8729 	 *
8730 	 * Only when a remote device provides an identity address
8731 	 * make sure the signature resolving key is stored. So allow
8732 	 * static random and public addresses here.
8733 	 */
8734 	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8735 	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
8736 		ev.store_hint = 0x00;
8737 	else
8738 		ev.store_hint = persistent;
8739 
8740 	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
8741 	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
8742 	ev.key.type = csrk->type;
8743 	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
8744 
8745 	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
8746 }
8747 
8748 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
8749 			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
8750 			 u16 max_interval, u16 latency, u16 timeout)
8751 {
8752 	struct mgmt_ev_new_conn_param ev;
8753 
8754 	if (!hci_is_identity_address(bdaddr, bdaddr_type))
8755 		return;
8756 
8757 	memset(&ev, 0, sizeof(ev));
8758 	bacpy(&ev.addr.bdaddr, bdaddr);
8759 	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
8760 	ev.store_hint = store_hint;
8761 	ev.min_interval = cpu_to_le16(min_interval);
8762 	ev.max_interval = cpu_to_le16(max_interval);
8763 	ev.latency = cpu_to_le16(latency);
8764 	ev.timeout = cpu_to_le16(timeout);
8765 
8766 	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
8767 }
8768 
8769 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
8770 			   u32 flags, u8 *name, u8 name_len)
8771 {
8772 	char buf[512];
8773 	struct mgmt_ev_device_connected *ev = (void *) buf;
8774 	u16 eir_len = 0;
8775 
8776 	bacpy(&ev->addr.bdaddr, &conn->dst);
8777 	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
8778 
8779 	ev->flags = __cpu_to_le32(flags);
8780 
8781 	/* We must ensure that the EIR Data fields are ordered and
8782 	 * unique. Keep it simple for now and avoid the problem by not
8783 	 * adding any BR/EDR data to the LE adv.
8784 	 */
8785 	if (conn->le_adv_data_len > 0) {
8786 		memcpy(&ev->eir[eir_len],
8787 		       conn->le_adv_data, conn->le_adv_data_len);
8788 		eir_len = conn->le_adv_data_len;
8789 	} else {
8790 		if (name_len > 0)
8791 			eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
8792 						  name, name_len);
8793 
8794 		if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
8795 			eir_len = eir_append_data(ev->eir, eir_len,
8796 						  EIR_CLASS_OF_DEV,
8797 						  conn->dev_class, 3);
8798 	}
8799 
8800 	ev->eir_len = cpu_to_le16(eir_len);
8801 
8802 	mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
8803 		    sizeof(*ev) + eir_len, NULL);
8804 }
8805 
8806 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
8807 {
8808 	struct sock **sk = data;
8809 
8810 	cmd->cmd_complete(cmd, 0);
8811 
8812 	*sk = cmd->sk;
8813 	sock_hold(*sk);
8814 
8815 	mgmt_pending_remove(cmd);
8816 }
8817 
8818 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
8819 {
8820 	struct hci_dev *hdev = data;
8821 	struct mgmt_cp_unpair_device *cp = cmd->param;
8822 
8823 	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
8824 
8825 	cmd->cmd_complete(cmd, 0);
8826 	mgmt_pending_remove(cmd);
8827 }
8828 
8829 bool mgmt_powering_down(struct hci_dev *hdev)
8830 {
8831 	struct mgmt_pending_cmd *cmd;
8832 	struct mgmt_mode *cp;
8833 
8834 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8835 	if (!cmd)
8836 		return false;
8837 
8838 	cp = cmd->param;
8839 	if (!cp->val)
8840 		return true;
8841 
8842 	return false;
8843 }
8844 
8845 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
8846 			      u8 link_type, u8 addr_type, u8 reason,
8847 			      bool mgmt_connected)
8848 {
8849 	struct mgmt_ev_device_disconnected ev;
8850 	struct sock *sk = NULL;
8851 
8852 	/* The connection is still in hci_conn_hash so test for 1
8853 	 * instead of 0 to know if this is the last one.
8854 	 */
8855 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
8856 		cancel_delayed_work(&hdev->power_off);
8857 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
8858 	}
8859 
8860 	if (!mgmt_connected)
8861 		return;
8862 
8863 	if (link_type != ACL_LINK && link_type != LE_LINK)
8864 		return;
8865 
8866 	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
8867 
8868 	bacpy(&ev.addr.bdaddr, bdaddr);
8869 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
8870 	ev.reason = reason;
8871 
8872 	/* Report disconnects due to suspend */
8873 	if (hdev->suspended)
8874 		ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
8875 
8876 	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
8877 
8878 	if (sk)
8879 		sock_put(sk);
8880 
8881 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
8882 			     hdev);
8883 }
8884 
8885 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
8886 			    u8 link_type, u8 addr_type, u8 status)
8887 {
8888 	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
8889 	struct mgmt_cp_disconnect *cp;
8890 	struct mgmt_pending_cmd *cmd;
8891 
8892 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
8893 			     hdev);
8894 
8895 	cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
8896 	if (!cmd)
8897 		return;
8898 
8899 	cp = cmd->param;
8900 
8901 	if (bacmp(bdaddr, &cp->addr.bdaddr))
8902 		return;
8903 
8904 	if (cp->addr.type != bdaddr_type)
8905 		return;
8906 
8907 	cmd->cmd_complete(cmd, mgmt_status(status));
8908 	mgmt_pending_remove(cmd);
8909 }
8910 
8911 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8912 			 u8 addr_type, u8 status)
8913 {
8914 	struct mgmt_ev_connect_failed ev;
8915 
8916 	/* The connection is still in hci_conn_hash so test for 1
8917 	 * instead of 0 to know if this is the last one.
8918 	 */
8919 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
8920 		cancel_delayed_work(&hdev->power_off);
8921 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
8922 	}
8923 
8924 	bacpy(&ev.addr.bdaddr, bdaddr);
8925 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
8926 	ev.status = mgmt_status(status);
8927 
8928 	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
8929 }
8930 
8931 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
8932 {
8933 	struct mgmt_ev_pin_code_request ev;
8934 
8935 	bacpy(&ev.addr.bdaddr, bdaddr);
8936 	ev.addr.type = BDADDR_BREDR;
8937 	ev.secure = secure;
8938 
8939 	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
8940 }
8941 
8942 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8943 				  u8 status)
8944 {
8945 	struct mgmt_pending_cmd *cmd;
8946 
8947 	cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
8948 	if (!cmd)
8949 		return;
8950 
8951 	cmd->cmd_complete(cmd, mgmt_status(status));
8952 	mgmt_pending_remove(cmd);
8953 }
8954 
8955 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8956 				      u8 status)
8957 {
8958 	struct mgmt_pending_cmd *cmd;
8959 
8960 	cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
8961 	if (!cmd)
8962 		return;
8963 
8964 	cmd->cmd_complete(cmd, mgmt_status(status));
8965 	mgmt_pending_remove(cmd);
8966 }
8967 
8968 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
8969 			      u8 link_type, u8 addr_type, u32 value,
8970 			      u8 confirm_hint)
8971 {
8972 	struct mgmt_ev_user_confirm_request ev;
8973 
8974 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
8975 
8976 	bacpy(&ev.addr.bdaddr, bdaddr);
8977 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
8978 	ev.confirm_hint = confirm_hint;
8979 	ev.value = cpu_to_le32(value);
8980 
8981 	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
8982 			  NULL);
8983 }
8984 
8985 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
8986 			      u8 link_type, u8 addr_type)
8987 {
8988 	struct mgmt_ev_user_passkey_request ev;
8989 
8990 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
8991 
8992 	bacpy(&ev.addr.bdaddr, bdaddr);
8993 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
8994 
8995 	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
8996 			  NULL);
8997 }
8998 
8999 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9000 				      u8 link_type, u8 addr_type, u8 status,
9001 				      u8 opcode)
9002 {
9003 	struct mgmt_pending_cmd *cmd;
9004 
9005 	cmd = pending_find(opcode, hdev);
9006 	if (!cmd)
9007 		return -ENOENT;
9008 
9009 	cmd->cmd_complete(cmd, mgmt_status(status));
9010 	mgmt_pending_remove(cmd);
9011 
9012 	return 0;
9013 }
9014 
9015 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9016 				     u8 link_type, u8 addr_type, u8 status)
9017 {
9018 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9019 					  status, MGMT_OP_USER_CONFIRM_REPLY);
9020 }
9021 
9022 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9023 					 u8 link_type, u8 addr_type, u8 status)
9024 {
9025 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9026 					  status,
9027 					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
9028 }
9029 
9030 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9031 				     u8 link_type, u8 addr_type, u8 status)
9032 {
9033 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9034 					  status, MGMT_OP_USER_PASSKEY_REPLY);
9035 }
9036 
9037 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9038 					 u8 link_type, u8 addr_type, u8 status)
9039 {
9040 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9041 					  status,
9042 					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
9043 }
9044 
9045 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9046 			     u8 link_type, u8 addr_type, u32 passkey,
9047 			     u8 entered)
9048 {
9049 	struct mgmt_ev_passkey_notify ev;
9050 
9051 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9052 
9053 	bacpy(&ev.addr.bdaddr, bdaddr);
9054 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9055 	ev.passkey = __cpu_to_le32(passkey);
9056 	ev.entered = entered;
9057 
9058 	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9059 }
9060 
9061 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9062 {
9063 	struct mgmt_ev_auth_failed ev;
9064 	struct mgmt_pending_cmd *cmd;
9065 	u8 status = mgmt_status(hci_status);
9066 
9067 	bacpy(&ev.addr.bdaddr, &conn->dst);
9068 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9069 	ev.status = status;
9070 
9071 	cmd = find_pairing(conn);
9072 
9073 	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9074 		    cmd ? cmd->sk : NULL);
9075 
9076 	if (cmd) {
9077 		cmd->cmd_complete(cmd, status);
9078 		mgmt_pending_remove(cmd);
9079 	}
9080 }
9081 
9082 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9083 {
9084 	struct cmd_lookup match = { NULL, hdev };
9085 	bool changed;
9086 
9087 	if (status) {
9088 		u8 mgmt_err = mgmt_status(status);
9089 		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9090 				     cmd_status_rsp, &mgmt_err);
9091 		return;
9092 	}
9093 
9094 	if (test_bit(HCI_AUTH, &hdev->flags))
9095 		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9096 	else
9097 		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9098 
9099 	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9100 			     &match);
9101 
9102 	if (changed)
9103 		new_settings(hdev, match.sk);
9104 
9105 	if (match.sk)
9106 		sock_put(match.sk);
9107 }
9108 
9109 static void clear_eir(struct hci_request *req)
9110 {
9111 	struct hci_dev *hdev = req->hdev;
9112 	struct hci_cp_write_eir cp;
9113 
9114 	if (!lmp_ext_inq_capable(hdev))
9115 		return;
9116 
9117 	memset(hdev->eir, 0, sizeof(hdev->eir));
9118 
9119 	memset(&cp, 0, sizeof(cp));
9120 
9121 	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
9122 }
9123 
9124 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
9125 {
9126 	struct cmd_lookup match = { NULL, hdev };
9127 	struct hci_request req;
9128 	bool changed = false;
9129 
9130 	if (status) {
9131 		u8 mgmt_err = mgmt_status(status);
9132 
9133 		if (enable && hci_dev_test_and_clear_flag(hdev,
9134 							  HCI_SSP_ENABLED)) {
9135 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
9136 			new_settings(hdev, NULL);
9137 		}
9138 
9139 		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
9140 				     &mgmt_err);
9141 		return;
9142 	}
9143 
9144 	if (enable) {
9145 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
9146 	} else {
9147 		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
9148 		if (!changed)
9149 			changed = hci_dev_test_and_clear_flag(hdev,
9150 							      HCI_HS_ENABLED);
9151 		else
9152 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
9153 	}
9154 
9155 	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
9156 
9157 	if (changed)
9158 		new_settings(hdev, match.sk);
9159 
9160 	if (match.sk)
9161 		sock_put(match.sk);
9162 
9163 	hci_req_init(&req, hdev);
9164 
9165 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
9166 		if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
9167 			hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
9168 				    sizeof(enable), &enable);
9169 		__hci_req_update_eir(&req);
9170 	} else {
9171 		clear_eir(&req);
9172 	}
9173 
9174 	hci_req_run(&req, NULL);
9175 }
9176 
9177 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9178 {
9179 	struct cmd_lookup *match = data;
9180 
9181 	if (match->sk == NULL) {
9182 		match->sk = cmd->sk;
9183 		sock_hold(match->sk);
9184 	}
9185 }
9186 
9187 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9188 				    u8 status)
9189 {
9190 	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9191 
9192 	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
9193 	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
9194 	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
9195 
9196 	if (!status) {
9197 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9198 				   3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9199 		ext_info_changed(hdev, NULL);
9200 	}
9201 
9202 	if (match.sk)
9203 		sock_put(match.sk);
9204 }
9205 
9206 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
9207 {
9208 	struct mgmt_cp_set_local_name ev;
9209 	struct mgmt_pending_cmd *cmd;
9210 
9211 	if (status)
9212 		return;
9213 
9214 	memset(&ev, 0, sizeof(ev));
9215 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
9216 	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
9217 
9218 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
9219 	if (!cmd) {
9220 		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
9221 
9222 		/* If this is a HCI command related to powering on the
9223 		 * HCI dev don't send any mgmt signals.
9224 		 */
9225 		if (pending_find(MGMT_OP_SET_POWERED, hdev))
9226 			return;
9227 	}
9228 
9229 	mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
9230 			   HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
9231 	ext_info_changed(hdev, cmd ? cmd->sk : NULL);
9232 }
9233 
9234 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
9235 {
9236 	int i;
9237 
9238 	for (i = 0; i < uuid_count; i++) {
9239 		if (!memcmp(uuid, uuids[i], 16))
9240 			return true;
9241 	}
9242 
9243 	return false;
9244 }
9245 
9246 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
9247 {
9248 	u16 parsed = 0;
9249 
9250 	while (parsed < eir_len) {
9251 		u8 field_len = eir[0];
9252 		u8 uuid[16];
9253 		int i;
9254 
9255 		if (field_len == 0)
9256 			break;
9257 
9258 		if (eir_len - parsed < field_len + 1)
9259 			break;
9260 
9261 		switch (eir[1]) {
9262 		case EIR_UUID16_ALL:
9263 		case EIR_UUID16_SOME:
9264 			for (i = 0; i + 3 <= field_len; i += 2) {
9265 				memcpy(uuid, bluetooth_base_uuid, 16);
9266 				uuid[13] = eir[i + 3];
9267 				uuid[12] = eir[i + 2];
9268 				if (has_uuid(uuid, uuid_count, uuids))
9269 					return true;
9270 			}
9271 			break;
9272 		case EIR_UUID32_ALL:
9273 		case EIR_UUID32_SOME:
9274 			for (i = 0; i + 5 <= field_len; i += 4) {
9275 				memcpy(uuid, bluetooth_base_uuid, 16);
9276 				uuid[15] = eir[i + 5];
9277 				uuid[14] = eir[i + 4];
9278 				uuid[13] = eir[i + 3];
9279 				uuid[12] = eir[i + 2];
9280 				if (has_uuid(uuid, uuid_count, uuids))
9281 					return true;
9282 			}
9283 			break;
9284 		case EIR_UUID128_ALL:
9285 		case EIR_UUID128_SOME:
9286 			for (i = 0; i + 17 <= field_len; i += 16) {
9287 				memcpy(uuid, eir + i + 2, 16);
9288 				if (has_uuid(uuid, uuid_count, uuids))
9289 					return true;
9290 			}
9291 			break;
9292 		}
9293 
9294 		parsed += field_len + 1;
9295 		eir += field_len + 1;
9296 	}
9297 
9298 	return false;
9299 }
9300 
9301 static void restart_le_scan(struct hci_dev *hdev)
9302 {
9303 	/* If controller is not scanning we are done. */
9304 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
9305 		return;
9306 
9307 	if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
9308 		       hdev->discovery.scan_start +
9309 		       hdev->discovery.scan_duration))
9310 		return;
9311 
9312 	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
9313 			   DISCOV_LE_RESTART_DELAY);
9314 }
9315 
9316 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
9317 			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9318 {
9319 	/* If a RSSI threshold has been specified, and
9320 	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
9321 	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
9322 	 * is set, let it through for further processing, as we might need to
9323 	 * restart the scan.
9324 	 *
9325 	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
9326 	 * the results are also dropped.
9327 	 */
9328 	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9329 	    (rssi == HCI_RSSI_INVALID ||
9330 	    (rssi < hdev->discovery.rssi &&
9331 	     !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
9332 		return  false;
9333 
9334 	if (hdev->discovery.uuid_count != 0) {
9335 		/* If a list of UUIDs is provided in filter, results with no
9336 		 * matching UUID should be dropped.
9337 		 */
9338 		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
9339 				   hdev->discovery.uuids) &&
9340 		    !eir_has_uuids(scan_rsp, scan_rsp_len,
9341 				   hdev->discovery.uuid_count,
9342 				   hdev->discovery.uuids))
9343 			return false;
9344 	}
9345 
9346 	/* If duplicate filtering does not report RSSI changes, then restart
9347 	 * scanning to ensure updated result with updated RSSI values.
9348 	 */
9349 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
9350 		restart_le_scan(hdev);
9351 
9352 		/* Validate RSSI value against the RSSI threshold once more. */
9353 		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9354 		    rssi < hdev->discovery.rssi)
9355 			return false;
9356 	}
9357 
9358 	return true;
9359 }
9360 
9361 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9362 		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
9363 		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9364 {
9365 	char buf[512];
9366 	struct mgmt_ev_device_found *ev = (void *)buf;
9367 	size_t ev_size;
9368 
9369 	/* Don't send events for a non-kernel initiated discovery. With
9370 	 * LE one exception is if we have pend_le_reports > 0 in which
9371 	 * case we're doing passive scanning and want these events.
9372 	 */
9373 	if (!hci_discovery_active(hdev)) {
9374 		if (link_type == ACL_LINK)
9375 			return;
9376 		if (link_type == LE_LINK &&
9377 		    list_empty(&hdev->pend_le_reports) &&
9378 		    !hci_is_adv_monitoring(hdev)) {
9379 			return;
9380 		}
9381 	}
9382 
9383 	if (hdev->discovery.result_filtering) {
9384 		/* We are using service discovery */
9385 		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
9386 				     scan_rsp_len))
9387 			return;
9388 	}
9389 
9390 	if (hdev->discovery.limited) {
9391 		/* Check for limited discoverable bit */
9392 		if (dev_class) {
9393 			if (!(dev_class[1] & 0x20))
9394 				return;
9395 		} else {
9396 			u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
9397 			if (!flags || !(flags[0] & LE_AD_LIMITED))
9398 				return;
9399 		}
9400 	}
9401 
9402 	/* Make sure that the buffer is big enough. The 5 extra bytes
9403 	 * are for the potential CoD field.
9404 	 */
9405 	if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
9406 		return;
9407 
9408 	memset(buf, 0, sizeof(buf));
9409 
9410 	/* In case of device discovery with BR/EDR devices (pre 1.2), the
9411 	 * RSSI value was reported as 0 when not available. This behavior
9412 	 * is kept when using device discovery. This is required for full
9413 	 * backwards compatibility with the API.
9414 	 *
9415 	 * However when using service discovery, the value 127 will be
9416 	 * returned when the RSSI is not available.
9417 	 */
9418 	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
9419 	    link_type == ACL_LINK)
9420 		rssi = 0;
9421 
9422 	bacpy(&ev->addr.bdaddr, bdaddr);
9423 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
9424 	ev->rssi = rssi;
9425 	ev->flags = cpu_to_le32(flags);
9426 
9427 	if (eir_len > 0)
9428 		/* Copy EIR or advertising data into event */
9429 		memcpy(ev->eir, eir, eir_len);
9430 
9431 	if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
9432 				       NULL))
9433 		eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
9434 					  dev_class, 3);
9435 
9436 	if (scan_rsp_len > 0)
9437 		/* Append scan response data to event */
9438 		memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
9439 
9440 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
9441 	ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
9442 
9443 	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
9444 }
9445 
9446 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9447 		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
9448 {
9449 	struct mgmt_ev_device_found *ev;
9450 	char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
9451 	u16 eir_len;
9452 
9453 	ev = (struct mgmt_ev_device_found *) buf;
9454 
9455 	memset(buf, 0, sizeof(buf));
9456 
9457 	bacpy(&ev->addr.bdaddr, bdaddr);
9458 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
9459 	ev->rssi = rssi;
9460 
9461 	eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
9462 				  name_len);
9463 
9464 	ev->eir_len = cpu_to_le16(eir_len);
9465 
9466 	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
9467 }
9468 
9469 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
9470 {
9471 	struct mgmt_ev_discovering ev;
9472 
9473 	bt_dev_dbg(hdev, "discovering %u", discovering);
9474 
9475 	memset(&ev, 0, sizeof(ev));
9476 	ev.type = hdev->discovery.type;
9477 	ev.discovering = discovering;
9478 
9479 	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
9480 }
9481 
9482 void mgmt_suspending(struct hci_dev *hdev, u8 state)
9483 {
9484 	struct mgmt_ev_controller_suspend ev;
9485 
9486 	ev.suspend_state = state;
9487 	mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
9488 }
9489 
9490 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
9491 		   u8 addr_type)
9492 {
9493 	struct mgmt_ev_controller_resume ev;
9494 
9495 	ev.wake_reason = reason;
9496 	if (bdaddr) {
9497 		bacpy(&ev.addr.bdaddr, bdaddr);
9498 		ev.addr.type = addr_type;
9499 	} else {
9500 		memset(&ev.addr, 0, sizeof(ev.addr));
9501 	}
9502 
9503 	mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
9504 }
9505 
9506 static struct hci_mgmt_chan chan = {
9507 	.channel	= HCI_CHANNEL_CONTROL,
9508 	.handler_count	= ARRAY_SIZE(mgmt_handlers),
9509 	.handlers	= mgmt_handlers,
9510 	.hdev_init	= mgmt_init_hdev,
9511 };
9512 
9513 int mgmt_init(void)
9514 {
9515 	return hci_mgmt_chan_register(&chan);
9516 }
9517 
9518 void mgmt_exit(void)
9519 {
9520 	hci_mgmt_chan_unregister(&chan);
9521 }
9522