xref: /openbmc/linux/net/bluetooth/mgmt.c (revision 519a8a6c)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI Management interface */
26 
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35 
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
40 #include "msft.h"
41 
42 #define MGMT_VERSION	1
43 #define MGMT_REVISION	18
44 
45 static const u16 mgmt_commands[] = {
46 	MGMT_OP_READ_INDEX_LIST,
47 	MGMT_OP_READ_INFO,
48 	MGMT_OP_SET_POWERED,
49 	MGMT_OP_SET_DISCOVERABLE,
50 	MGMT_OP_SET_CONNECTABLE,
51 	MGMT_OP_SET_FAST_CONNECTABLE,
52 	MGMT_OP_SET_BONDABLE,
53 	MGMT_OP_SET_LINK_SECURITY,
54 	MGMT_OP_SET_SSP,
55 	MGMT_OP_SET_HS,
56 	MGMT_OP_SET_LE,
57 	MGMT_OP_SET_DEV_CLASS,
58 	MGMT_OP_SET_LOCAL_NAME,
59 	MGMT_OP_ADD_UUID,
60 	MGMT_OP_REMOVE_UUID,
61 	MGMT_OP_LOAD_LINK_KEYS,
62 	MGMT_OP_LOAD_LONG_TERM_KEYS,
63 	MGMT_OP_DISCONNECT,
64 	MGMT_OP_GET_CONNECTIONS,
65 	MGMT_OP_PIN_CODE_REPLY,
66 	MGMT_OP_PIN_CODE_NEG_REPLY,
67 	MGMT_OP_SET_IO_CAPABILITY,
68 	MGMT_OP_PAIR_DEVICE,
69 	MGMT_OP_CANCEL_PAIR_DEVICE,
70 	MGMT_OP_UNPAIR_DEVICE,
71 	MGMT_OP_USER_CONFIRM_REPLY,
72 	MGMT_OP_USER_CONFIRM_NEG_REPLY,
73 	MGMT_OP_USER_PASSKEY_REPLY,
74 	MGMT_OP_USER_PASSKEY_NEG_REPLY,
75 	MGMT_OP_READ_LOCAL_OOB_DATA,
76 	MGMT_OP_ADD_REMOTE_OOB_DATA,
77 	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
78 	MGMT_OP_START_DISCOVERY,
79 	MGMT_OP_STOP_DISCOVERY,
80 	MGMT_OP_CONFIRM_NAME,
81 	MGMT_OP_BLOCK_DEVICE,
82 	MGMT_OP_UNBLOCK_DEVICE,
83 	MGMT_OP_SET_DEVICE_ID,
84 	MGMT_OP_SET_ADVERTISING,
85 	MGMT_OP_SET_BREDR,
86 	MGMT_OP_SET_STATIC_ADDRESS,
87 	MGMT_OP_SET_SCAN_PARAMS,
88 	MGMT_OP_SET_SECURE_CONN,
89 	MGMT_OP_SET_DEBUG_KEYS,
90 	MGMT_OP_SET_PRIVACY,
91 	MGMT_OP_LOAD_IRKS,
92 	MGMT_OP_GET_CONN_INFO,
93 	MGMT_OP_GET_CLOCK_INFO,
94 	MGMT_OP_ADD_DEVICE,
95 	MGMT_OP_REMOVE_DEVICE,
96 	MGMT_OP_LOAD_CONN_PARAM,
97 	MGMT_OP_READ_UNCONF_INDEX_LIST,
98 	MGMT_OP_READ_CONFIG_INFO,
99 	MGMT_OP_SET_EXTERNAL_CONFIG,
100 	MGMT_OP_SET_PUBLIC_ADDRESS,
101 	MGMT_OP_START_SERVICE_DISCOVERY,
102 	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
103 	MGMT_OP_READ_EXT_INDEX_LIST,
104 	MGMT_OP_READ_ADV_FEATURES,
105 	MGMT_OP_ADD_ADVERTISING,
106 	MGMT_OP_REMOVE_ADVERTISING,
107 	MGMT_OP_GET_ADV_SIZE_INFO,
108 	MGMT_OP_START_LIMITED_DISCOVERY,
109 	MGMT_OP_READ_EXT_INFO,
110 	MGMT_OP_SET_APPEARANCE,
111 	MGMT_OP_SET_BLOCKED_KEYS,
112 	MGMT_OP_SET_WIDEBAND_SPEECH,
113 	MGMT_OP_READ_SECURITY_INFO,
114 	MGMT_OP_READ_EXP_FEATURES_INFO,
115 	MGMT_OP_SET_EXP_FEATURE,
116 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
117 	MGMT_OP_SET_DEF_SYSTEM_CONFIG,
118 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
119 	MGMT_OP_SET_DEF_RUNTIME_CONFIG,
120 	MGMT_OP_GET_DEVICE_FLAGS,
121 	MGMT_OP_SET_DEVICE_FLAGS,
122 	MGMT_OP_READ_ADV_MONITOR_FEATURES,
123 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
124 	MGMT_OP_REMOVE_ADV_MONITOR,
125 };
126 
127 static const u16 mgmt_events[] = {
128 	MGMT_EV_CONTROLLER_ERROR,
129 	MGMT_EV_INDEX_ADDED,
130 	MGMT_EV_INDEX_REMOVED,
131 	MGMT_EV_NEW_SETTINGS,
132 	MGMT_EV_CLASS_OF_DEV_CHANGED,
133 	MGMT_EV_LOCAL_NAME_CHANGED,
134 	MGMT_EV_NEW_LINK_KEY,
135 	MGMT_EV_NEW_LONG_TERM_KEY,
136 	MGMT_EV_DEVICE_CONNECTED,
137 	MGMT_EV_DEVICE_DISCONNECTED,
138 	MGMT_EV_CONNECT_FAILED,
139 	MGMT_EV_PIN_CODE_REQUEST,
140 	MGMT_EV_USER_CONFIRM_REQUEST,
141 	MGMT_EV_USER_PASSKEY_REQUEST,
142 	MGMT_EV_AUTH_FAILED,
143 	MGMT_EV_DEVICE_FOUND,
144 	MGMT_EV_DISCOVERING,
145 	MGMT_EV_DEVICE_BLOCKED,
146 	MGMT_EV_DEVICE_UNBLOCKED,
147 	MGMT_EV_DEVICE_UNPAIRED,
148 	MGMT_EV_PASSKEY_NOTIFY,
149 	MGMT_EV_NEW_IRK,
150 	MGMT_EV_NEW_CSRK,
151 	MGMT_EV_DEVICE_ADDED,
152 	MGMT_EV_DEVICE_REMOVED,
153 	MGMT_EV_NEW_CONN_PARAM,
154 	MGMT_EV_UNCONF_INDEX_ADDED,
155 	MGMT_EV_UNCONF_INDEX_REMOVED,
156 	MGMT_EV_NEW_CONFIG_OPTIONS,
157 	MGMT_EV_EXT_INDEX_ADDED,
158 	MGMT_EV_EXT_INDEX_REMOVED,
159 	MGMT_EV_LOCAL_OOB_DATA_UPDATED,
160 	MGMT_EV_ADVERTISING_ADDED,
161 	MGMT_EV_ADVERTISING_REMOVED,
162 	MGMT_EV_EXT_INFO_CHANGED,
163 	MGMT_EV_PHY_CONFIGURATION_CHANGED,
164 	MGMT_EV_EXP_FEATURE_CHANGED,
165 	MGMT_EV_DEVICE_FLAGS_CHANGED,
166 };
167 
168 static const u16 mgmt_untrusted_commands[] = {
169 	MGMT_OP_READ_INDEX_LIST,
170 	MGMT_OP_READ_INFO,
171 	MGMT_OP_READ_UNCONF_INDEX_LIST,
172 	MGMT_OP_READ_CONFIG_INFO,
173 	MGMT_OP_READ_EXT_INDEX_LIST,
174 	MGMT_OP_READ_EXT_INFO,
175 	MGMT_OP_READ_SECURITY_INFO,
176 	MGMT_OP_READ_EXP_FEATURES_INFO,
177 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
178 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
179 };
180 
181 static const u16 mgmt_untrusted_events[] = {
182 	MGMT_EV_INDEX_ADDED,
183 	MGMT_EV_INDEX_REMOVED,
184 	MGMT_EV_NEW_SETTINGS,
185 	MGMT_EV_CLASS_OF_DEV_CHANGED,
186 	MGMT_EV_LOCAL_NAME_CHANGED,
187 	MGMT_EV_UNCONF_INDEX_ADDED,
188 	MGMT_EV_UNCONF_INDEX_REMOVED,
189 	MGMT_EV_NEW_CONFIG_OPTIONS,
190 	MGMT_EV_EXT_INDEX_ADDED,
191 	MGMT_EV_EXT_INDEX_REMOVED,
192 	MGMT_EV_EXT_INFO_CHANGED,
193 	MGMT_EV_EXP_FEATURE_CHANGED,
194 	MGMT_EV_ADV_MONITOR_ADDED,
195 	MGMT_EV_ADV_MONITOR_REMOVED,
196 };
197 
198 #define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
199 
200 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
201 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
202 
203 /* HCI to MGMT error code conversion table */
204 static const u8 mgmt_status_table[] = {
205 	MGMT_STATUS_SUCCESS,
206 	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
207 	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
208 	MGMT_STATUS_FAILED,		/* Hardware Failure */
209 	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
210 	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
211 	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
212 	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
213 	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
214 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
215 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
216 	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
217 	MGMT_STATUS_BUSY,		/* Command Disallowed */
218 	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
219 	MGMT_STATUS_REJECTED,		/* Rejected Security */
220 	MGMT_STATUS_REJECTED,		/* Rejected Personal */
221 	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
222 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
223 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
224 	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
225 	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
226 	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
227 	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
228 	MGMT_STATUS_BUSY,		/* Repeated Attempts */
229 	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
230 	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
231 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
232 	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
233 	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
234 	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
235 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
236 	MGMT_STATUS_FAILED,		/* Unspecified Error */
237 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
238 	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
239 	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
240 	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
241 	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
242 	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
243 	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
244 	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
245 	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
246 	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
247 	MGMT_STATUS_FAILED,		/* Transaction Collision */
248 	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
249 	MGMT_STATUS_REJECTED,		/* QoS Rejected */
250 	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
251 	MGMT_STATUS_REJECTED,		/* Insufficient Security */
252 	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
253 	MGMT_STATUS_BUSY,		/* Role Switch Pending */
254 	MGMT_STATUS_FAILED,		/* Slot Violation */
255 	MGMT_STATUS_FAILED,		/* Role Switch Failed */
256 	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
257 	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
258 	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
259 	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
260 	MGMT_STATUS_BUSY,		/* Controller Busy */
261 	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
262 	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
263 	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
264 	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
265 	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
266 };
267 
268 static u8 mgmt_status(u8 hci_status)
269 {
270 	if (hci_status < ARRAY_SIZE(mgmt_status_table))
271 		return mgmt_status_table[hci_status];
272 
273 	return MGMT_STATUS_FAILED;
274 }
275 
276 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
277 			    u16 len, int flag)
278 {
279 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
280 			       flag, NULL);
281 }
282 
283 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
284 			      u16 len, int flag, struct sock *skip_sk)
285 {
286 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
287 			       flag, skip_sk);
288 }
289 
290 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
291 		      struct sock *skip_sk)
292 {
293 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
294 			       HCI_SOCK_TRUSTED, skip_sk);
295 }
296 
297 static u8 le_addr_type(u8 mgmt_addr_type)
298 {
299 	if (mgmt_addr_type == BDADDR_LE_PUBLIC)
300 		return ADDR_LE_DEV_PUBLIC;
301 	else
302 		return ADDR_LE_DEV_RANDOM;
303 }
304 
305 void mgmt_fill_version_info(void *ver)
306 {
307 	struct mgmt_rp_read_version *rp = ver;
308 
309 	rp->version = MGMT_VERSION;
310 	rp->revision = cpu_to_le16(MGMT_REVISION);
311 }
312 
313 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
314 			u16 data_len)
315 {
316 	struct mgmt_rp_read_version rp;
317 
318 	bt_dev_dbg(hdev, "sock %p", sk);
319 
320 	mgmt_fill_version_info(&rp);
321 
322 	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
323 				 &rp, sizeof(rp));
324 }
325 
326 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
327 			 u16 data_len)
328 {
329 	struct mgmt_rp_read_commands *rp;
330 	u16 num_commands, num_events;
331 	size_t rp_size;
332 	int i, err;
333 
334 	bt_dev_dbg(hdev, "sock %p", sk);
335 
336 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
337 		num_commands = ARRAY_SIZE(mgmt_commands);
338 		num_events = ARRAY_SIZE(mgmt_events);
339 	} else {
340 		num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
341 		num_events = ARRAY_SIZE(mgmt_untrusted_events);
342 	}
343 
344 	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
345 
346 	rp = kmalloc(rp_size, GFP_KERNEL);
347 	if (!rp)
348 		return -ENOMEM;
349 
350 	rp->num_commands = cpu_to_le16(num_commands);
351 	rp->num_events = cpu_to_le16(num_events);
352 
353 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
354 		__le16 *opcode = rp->opcodes;
355 
356 		for (i = 0; i < num_commands; i++, opcode++)
357 			put_unaligned_le16(mgmt_commands[i], opcode);
358 
359 		for (i = 0; i < num_events; i++, opcode++)
360 			put_unaligned_le16(mgmt_events[i], opcode);
361 	} else {
362 		__le16 *opcode = rp->opcodes;
363 
364 		for (i = 0; i < num_commands; i++, opcode++)
365 			put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
366 
367 		for (i = 0; i < num_events; i++, opcode++)
368 			put_unaligned_le16(mgmt_untrusted_events[i], opcode);
369 	}
370 
371 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
372 				rp, rp_size);
373 	kfree(rp);
374 
375 	return err;
376 }
377 
378 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
379 			   u16 data_len)
380 {
381 	struct mgmt_rp_read_index_list *rp;
382 	struct hci_dev *d;
383 	size_t rp_len;
384 	u16 count;
385 	int err;
386 
387 	bt_dev_dbg(hdev, "sock %p", sk);
388 
389 	read_lock(&hci_dev_list_lock);
390 
391 	count = 0;
392 	list_for_each_entry(d, &hci_dev_list, list) {
393 		if (d->dev_type == HCI_PRIMARY &&
394 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED))
395 			count++;
396 	}
397 
398 	rp_len = sizeof(*rp) + (2 * count);
399 	rp = kmalloc(rp_len, GFP_ATOMIC);
400 	if (!rp) {
401 		read_unlock(&hci_dev_list_lock);
402 		return -ENOMEM;
403 	}
404 
405 	count = 0;
406 	list_for_each_entry(d, &hci_dev_list, list) {
407 		if (hci_dev_test_flag(d, HCI_SETUP) ||
408 		    hci_dev_test_flag(d, HCI_CONFIG) ||
409 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
410 			continue;
411 
412 		/* Devices marked as raw-only are neither configured
413 		 * nor unconfigured controllers.
414 		 */
415 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
416 			continue;
417 
418 		if (d->dev_type == HCI_PRIMARY &&
419 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
420 			rp->index[count++] = cpu_to_le16(d->id);
421 			bt_dev_dbg(hdev, "Added hci%u", d->id);
422 		}
423 	}
424 
425 	rp->num_controllers = cpu_to_le16(count);
426 	rp_len = sizeof(*rp) + (2 * count);
427 
428 	read_unlock(&hci_dev_list_lock);
429 
430 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
431 				0, rp, rp_len);
432 
433 	kfree(rp);
434 
435 	return err;
436 }
437 
438 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
439 				  void *data, u16 data_len)
440 {
441 	struct mgmt_rp_read_unconf_index_list *rp;
442 	struct hci_dev *d;
443 	size_t rp_len;
444 	u16 count;
445 	int err;
446 
447 	bt_dev_dbg(hdev, "sock %p", sk);
448 
449 	read_lock(&hci_dev_list_lock);
450 
451 	count = 0;
452 	list_for_each_entry(d, &hci_dev_list, list) {
453 		if (d->dev_type == HCI_PRIMARY &&
454 		    hci_dev_test_flag(d, HCI_UNCONFIGURED))
455 			count++;
456 	}
457 
458 	rp_len = sizeof(*rp) + (2 * count);
459 	rp = kmalloc(rp_len, GFP_ATOMIC);
460 	if (!rp) {
461 		read_unlock(&hci_dev_list_lock);
462 		return -ENOMEM;
463 	}
464 
465 	count = 0;
466 	list_for_each_entry(d, &hci_dev_list, list) {
467 		if (hci_dev_test_flag(d, HCI_SETUP) ||
468 		    hci_dev_test_flag(d, HCI_CONFIG) ||
469 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
470 			continue;
471 
472 		/* Devices marked as raw-only are neither configured
473 		 * nor unconfigured controllers.
474 		 */
475 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
476 			continue;
477 
478 		if (d->dev_type == HCI_PRIMARY &&
479 		    hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
480 			rp->index[count++] = cpu_to_le16(d->id);
481 			bt_dev_dbg(hdev, "Added hci%u", d->id);
482 		}
483 	}
484 
485 	rp->num_controllers = cpu_to_le16(count);
486 	rp_len = sizeof(*rp) + (2 * count);
487 
488 	read_unlock(&hci_dev_list_lock);
489 
490 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
491 				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
492 
493 	kfree(rp);
494 
495 	return err;
496 }
497 
498 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
499 			       void *data, u16 data_len)
500 {
501 	struct mgmt_rp_read_ext_index_list *rp;
502 	struct hci_dev *d;
503 	u16 count;
504 	int err;
505 
506 	bt_dev_dbg(hdev, "sock %p", sk);
507 
508 	read_lock(&hci_dev_list_lock);
509 
510 	count = 0;
511 	list_for_each_entry(d, &hci_dev_list, list) {
512 		if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
513 			count++;
514 	}
515 
516 	rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
517 	if (!rp) {
518 		read_unlock(&hci_dev_list_lock);
519 		return -ENOMEM;
520 	}
521 
522 	count = 0;
523 	list_for_each_entry(d, &hci_dev_list, list) {
524 		if (hci_dev_test_flag(d, HCI_SETUP) ||
525 		    hci_dev_test_flag(d, HCI_CONFIG) ||
526 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
527 			continue;
528 
529 		/* Devices marked as raw-only are neither configured
530 		 * nor unconfigured controllers.
531 		 */
532 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
533 			continue;
534 
535 		if (d->dev_type == HCI_PRIMARY) {
536 			if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
537 				rp->entry[count].type = 0x01;
538 			else
539 				rp->entry[count].type = 0x00;
540 		} else if (d->dev_type == HCI_AMP) {
541 			rp->entry[count].type = 0x02;
542 		} else {
543 			continue;
544 		}
545 
546 		rp->entry[count].bus = d->bus;
547 		rp->entry[count++].index = cpu_to_le16(d->id);
548 		bt_dev_dbg(hdev, "Added hci%u", d->id);
549 	}
550 
551 	rp->num_controllers = cpu_to_le16(count);
552 
553 	read_unlock(&hci_dev_list_lock);
554 
555 	/* If this command is called at least once, then all the
556 	 * default index and unconfigured index events are disabled
557 	 * and from now on only extended index events are used.
558 	 */
559 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
560 	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
561 	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
562 
563 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
564 				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
565 				struct_size(rp, entry, count));
566 
567 	kfree(rp);
568 
569 	return err;
570 }
571 
572 static bool is_configured(struct hci_dev *hdev)
573 {
574 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
575 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
576 		return false;
577 
578 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
579 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
580 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
581 		return false;
582 
583 	return true;
584 }
585 
586 static __le32 get_missing_options(struct hci_dev *hdev)
587 {
588 	u32 options = 0;
589 
590 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
591 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
592 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
593 
594 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
595 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
596 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
597 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
598 
599 	return cpu_to_le32(options);
600 }
601 
602 static int new_options(struct hci_dev *hdev, struct sock *skip)
603 {
604 	__le32 options = get_missing_options(hdev);
605 
606 	return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
607 				  sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
608 }
609 
610 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
611 {
612 	__le32 options = get_missing_options(hdev);
613 
614 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
615 				 sizeof(options));
616 }
617 
618 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
619 			    void *data, u16 data_len)
620 {
621 	struct mgmt_rp_read_config_info rp;
622 	u32 options = 0;
623 
624 	bt_dev_dbg(hdev, "sock %p", sk);
625 
626 	hci_dev_lock(hdev);
627 
628 	memset(&rp, 0, sizeof(rp));
629 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
630 
631 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
632 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
633 
634 	if (hdev->set_bdaddr)
635 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
636 
637 	rp.supported_options = cpu_to_le32(options);
638 	rp.missing_options = get_missing_options(hdev);
639 
640 	hci_dev_unlock(hdev);
641 
642 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
643 				 &rp, sizeof(rp));
644 }
645 
646 static u32 get_supported_phys(struct hci_dev *hdev)
647 {
648 	u32 supported_phys = 0;
649 
650 	if (lmp_bredr_capable(hdev)) {
651 		supported_phys |= MGMT_PHY_BR_1M_1SLOT;
652 
653 		if (hdev->features[0][0] & LMP_3SLOT)
654 			supported_phys |= MGMT_PHY_BR_1M_3SLOT;
655 
656 		if (hdev->features[0][0] & LMP_5SLOT)
657 			supported_phys |= MGMT_PHY_BR_1M_5SLOT;
658 
659 		if (lmp_edr_2m_capable(hdev)) {
660 			supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
661 
662 			if (lmp_edr_3slot_capable(hdev))
663 				supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
664 
665 			if (lmp_edr_5slot_capable(hdev))
666 				supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
667 
668 			if (lmp_edr_3m_capable(hdev)) {
669 				supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
670 
671 				if (lmp_edr_3slot_capable(hdev))
672 					supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
673 
674 				if (lmp_edr_5slot_capable(hdev))
675 					supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
676 			}
677 		}
678 	}
679 
680 	if (lmp_le_capable(hdev)) {
681 		supported_phys |= MGMT_PHY_LE_1M_TX;
682 		supported_phys |= MGMT_PHY_LE_1M_RX;
683 
684 		if (hdev->le_features[1] & HCI_LE_PHY_2M) {
685 			supported_phys |= MGMT_PHY_LE_2M_TX;
686 			supported_phys |= MGMT_PHY_LE_2M_RX;
687 		}
688 
689 		if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
690 			supported_phys |= MGMT_PHY_LE_CODED_TX;
691 			supported_phys |= MGMT_PHY_LE_CODED_RX;
692 		}
693 	}
694 
695 	return supported_phys;
696 }
697 
698 static u32 get_selected_phys(struct hci_dev *hdev)
699 {
700 	u32 selected_phys = 0;
701 
702 	if (lmp_bredr_capable(hdev)) {
703 		selected_phys |= MGMT_PHY_BR_1M_1SLOT;
704 
705 		if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
706 			selected_phys |= MGMT_PHY_BR_1M_3SLOT;
707 
708 		if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
709 			selected_phys |= MGMT_PHY_BR_1M_5SLOT;
710 
711 		if (lmp_edr_2m_capable(hdev)) {
712 			if (!(hdev->pkt_type & HCI_2DH1))
713 				selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
714 
715 			if (lmp_edr_3slot_capable(hdev) &&
716 			    !(hdev->pkt_type & HCI_2DH3))
717 				selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
718 
719 			if (lmp_edr_5slot_capable(hdev) &&
720 			    !(hdev->pkt_type & HCI_2DH5))
721 				selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
722 
723 			if (lmp_edr_3m_capable(hdev)) {
724 				if (!(hdev->pkt_type & HCI_3DH1))
725 					selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
726 
727 				if (lmp_edr_3slot_capable(hdev) &&
728 				    !(hdev->pkt_type & HCI_3DH3))
729 					selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
730 
731 				if (lmp_edr_5slot_capable(hdev) &&
732 				    !(hdev->pkt_type & HCI_3DH5))
733 					selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
734 			}
735 		}
736 	}
737 
738 	if (lmp_le_capable(hdev)) {
739 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
740 			selected_phys |= MGMT_PHY_LE_1M_TX;
741 
742 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
743 			selected_phys |= MGMT_PHY_LE_1M_RX;
744 
745 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
746 			selected_phys |= MGMT_PHY_LE_2M_TX;
747 
748 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
749 			selected_phys |= MGMT_PHY_LE_2M_RX;
750 
751 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
752 			selected_phys |= MGMT_PHY_LE_CODED_TX;
753 
754 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
755 			selected_phys |= MGMT_PHY_LE_CODED_RX;
756 	}
757 
758 	return selected_phys;
759 }
760 
761 static u32 get_configurable_phys(struct hci_dev *hdev)
762 {
763 	return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
764 		~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
765 }
766 
767 static u32 get_supported_settings(struct hci_dev *hdev)
768 {
769 	u32 settings = 0;
770 
771 	settings |= MGMT_SETTING_POWERED;
772 	settings |= MGMT_SETTING_BONDABLE;
773 	settings |= MGMT_SETTING_DEBUG_KEYS;
774 	settings |= MGMT_SETTING_CONNECTABLE;
775 	settings |= MGMT_SETTING_DISCOVERABLE;
776 
777 	if (lmp_bredr_capable(hdev)) {
778 		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
779 			settings |= MGMT_SETTING_FAST_CONNECTABLE;
780 		settings |= MGMT_SETTING_BREDR;
781 		settings |= MGMT_SETTING_LINK_SECURITY;
782 
783 		if (lmp_ssp_capable(hdev)) {
784 			settings |= MGMT_SETTING_SSP;
785 			settings |= MGMT_SETTING_HS;
786 		}
787 
788 		if (lmp_sc_capable(hdev))
789 			settings |= MGMT_SETTING_SECURE_CONN;
790 
791 		if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
792 			     &hdev->quirks))
793 			settings |= MGMT_SETTING_WIDEBAND_SPEECH;
794 	}
795 
796 	if (lmp_le_capable(hdev)) {
797 		settings |= MGMT_SETTING_LE;
798 		settings |= MGMT_SETTING_SECURE_CONN;
799 		settings |= MGMT_SETTING_PRIVACY;
800 		settings |= MGMT_SETTING_STATIC_ADDRESS;
801 
802 		/* When the experimental feature for LL Privacy support is
803 		 * enabled, then advertising is no longer supported.
804 		 */
805 		if (!hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
806 			settings |= MGMT_SETTING_ADVERTISING;
807 	}
808 
809 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
810 	    hdev->set_bdaddr)
811 		settings |= MGMT_SETTING_CONFIGURATION;
812 
813 	settings |= MGMT_SETTING_PHY_CONFIGURATION;
814 
815 	return settings;
816 }
817 
818 static u32 get_current_settings(struct hci_dev *hdev)
819 {
820 	u32 settings = 0;
821 
822 	if (hdev_is_powered(hdev))
823 		settings |= MGMT_SETTING_POWERED;
824 
825 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
826 		settings |= MGMT_SETTING_CONNECTABLE;
827 
828 	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
829 		settings |= MGMT_SETTING_FAST_CONNECTABLE;
830 
831 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
832 		settings |= MGMT_SETTING_DISCOVERABLE;
833 
834 	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
835 		settings |= MGMT_SETTING_BONDABLE;
836 
837 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
838 		settings |= MGMT_SETTING_BREDR;
839 
840 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
841 		settings |= MGMT_SETTING_LE;
842 
843 	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
844 		settings |= MGMT_SETTING_LINK_SECURITY;
845 
846 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
847 		settings |= MGMT_SETTING_SSP;
848 
849 	if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
850 		settings |= MGMT_SETTING_HS;
851 
852 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
853 		settings |= MGMT_SETTING_ADVERTISING;
854 
855 	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
856 		settings |= MGMT_SETTING_SECURE_CONN;
857 
858 	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
859 		settings |= MGMT_SETTING_DEBUG_KEYS;
860 
861 	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
862 		settings |= MGMT_SETTING_PRIVACY;
863 
864 	/* The current setting for static address has two purposes. The
865 	 * first is to indicate if the static address will be used and
866 	 * the second is to indicate if it is actually set.
867 	 *
868 	 * This means if the static address is not configured, this flag
869 	 * will never be set. If the address is configured, then if the
870 	 * address is actually used decides if the flag is set or not.
871 	 *
872 	 * For single mode LE only controllers and dual-mode controllers
873 	 * with BR/EDR disabled, the existence of the static address will
874 	 * be evaluated.
875 	 */
876 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
877 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
878 	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
879 		if (bacmp(&hdev->static_addr, BDADDR_ANY))
880 			settings |= MGMT_SETTING_STATIC_ADDRESS;
881 	}
882 
883 	if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
884 		settings |= MGMT_SETTING_WIDEBAND_SPEECH;
885 
886 	return settings;
887 }
888 
889 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
890 {
891 	return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
892 }
893 
894 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
895 						  struct hci_dev *hdev,
896 						  const void *data)
897 {
898 	return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
899 }
900 
901 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
902 {
903 	struct mgmt_pending_cmd *cmd;
904 
905 	/* If there's a pending mgmt command the flags will not yet have
906 	 * their final values, so check for this first.
907 	 */
908 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
909 	if (cmd) {
910 		struct mgmt_mode *cp = cmd->param;
911 		if (cp->val == 0x01)
912 			return LE_AD_GENERAL;
913 		else if (cp->val == 0x02)
914 			return LE_AD_LIMITED;
915 	} else {
916 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
917 			return LE_AD_LIMITED;
918 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
919 			return LE_AD_GENERAL;
920 	}
921 
922 	return 0;
923 }
924 
925 bool mgmt_get_connectable(struct hci_dev *hdev)
926 {
927 	struct mgmt_pending_cmd *cmd;
928 
929 	/* If there's a pending mgmt command the flag will not yet have
930 	 * it's final value, so check for this first.
931 	 */
932 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
933 	if (cmd) {
934 		struct mgmt_mode *cp = cmd->param;
935 
936 		return cp->val;
937 	}
938 
939 	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
940 }
941 
942 static void service_cache_off(struct work_struct *work)
943 {
944 	struct hci_dev *hdev = container_of(work, struct hci_dev,
945 					    service_cache.work);
946 	struct hci_request req;
947 
948 	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
949 		return;
950 
951 	hci_req_init(&req, hdev);
952 
953 	hci_dev_lock(hdev);
954 
955 	__hci_req_update_eir(&req);
956 	__hci_req_update_class(&req);
957 
958 	hci_dev_unlock(hdev);
959 
960 	hci_req_run(&req, NULL);
961 }
962 
963 static void rpa_expired(struct work_struct *work)
964 {
965 	struct hci_dev *hdev = container_of(work, struct hci_dev,
966 					    rpa_expired.work);
967 	struct hci_request req;
968 
969 	bt_dev_dbg(hdev, "");
970 
971 	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
972 
973 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
974 		return;
975 
976 	/* The generation of a new RPA and programming it into the
977 	 * controller happens in the hci_req_enable_advertising()
978 	 * function.
979 	 */
980 	hci_req_init(&req, hdev);
981 	if (ext_adv_capable(hdev))
982 		__hci_req_start_ext_adv(&req, hdev->cur_adv_instance);
983 	else
984 		__hci_req_enable_advertising(&req);
985 	hci_req_run(&req, NULL);
986 }
987 
988 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
989 {
990 	if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
991 		return;
992 
993 	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
994 	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
995 
996 	/* Non-mgmt controlled devices get this bit set
997 	 * implicitly so that pairing works for them, however
998 	 * for mgmt we require user-space to explicitly enable
999 	 * it
1000 	 */
1001 	hci_dev_clear_flag(hdev, HCI_BONDABLE);
1002 }
1003 
1004 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1005 				void *data, u16 data_len)
1006 {
1007 	struct mgmt_rp_read_info rp;
1008 
1009 	bt_dev_dbg(hdev, "sock %p", sk);
1010 
1011 	hci_dev_lock(hdev);
1012 
1013 	memset(&rp, 0, sizeof(rp));
1014 
1015 	bacpy(&rp.bdaddr, &hdev->bdaddr);
1016 
1017 	rp.version = hdev->hci_ver;
1018 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1019 
1020 	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1021 	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1022 
1023 	memcpy(rp.dev_class, hdev->dev_class, 3);
1024 
1025 	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1026 	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1027 
1028 	hci_dev_unlock(hdev);
1029 
1030 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1031 				 sizeof(rp));
1032 }
1033 
1034 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1035 {
1036 	u16 eir_len = 0;
1037 	size_t name_len;
1038 
1039 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1040 		eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1041 					  hdev->dev_class, 3);
1042 
1043 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1044 		eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1045 					  hdev->appearance);
1046 
1047 	name_len = strlen(hdev->dev_name);
1048 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1049 				  hdev->dev_name, name_len);
1050 
1051 	name_len = strlen(hdev->short_name);
1052 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1053 				  hdev->short_name, name_len);
1054 
1055 	return eir_len;
1056 }
1057 
1058 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1059 				    void *data, u16 data_len)
1060 {
1061 	char buf[512];
1062 	struct mgmt_rp_read_ext_info *rp = (void *)buf;
1063 	u16 eir_len;
1064 
1065 	bt_dev_dbg(hdev, "sock %p", sk);
1066 
1067 	memset(&buf, 0, sizeof(buf));
1068 
1069 	hci_dev_lock(hdev);
1070 
1071 	bacpy(&rp->bdaddr, &hdev->bdaddr);
1072 
1073 	rp->version = hdev->hci_ver;
1074 	rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1075 
1076 	rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1077 	rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1078 
1079 
1080 	eir_len = append_eir_data_to_buf(hdev, rp->eir);
1081 	rp->eir_len = cpu_to_le16(eir_len);
1082 
1083 	hci_dev_unlock(hdev);
1084 
1085 	/* If this command is called at least once, then the events
1086 	 * for class of device and local name changes are disabled
1087 	 * and only the new extended controller information event
1088 	 * is used.
1089 	 */
1090 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1091 	hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1092 	hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1093 
1094 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1095 				 sizeof(*rp) + eir_len);
1096 }
1097 
1098 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1099 {
1100 	char buf[512];
1101 	struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1102 	u16 eir_len;
1103 
1104 	memset(buf, 0, sizeof(buf));
1105 
1106 	eir_len = append_eir_data_to_buf(hdev, ev->eir);
1107 	ev->eir_len = cpu_to_le16(eir_len);
1108 
1109 	return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1110 				  sizeof(*ev) + eir_len,
1111 				  HCI_MGMT_EXT_INFO_EVENTS, skip);
1112 }
1113 
1114 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1115 {
1116 	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1117 
1118 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1119 				 sizeof(settings));
1120 }
1121 
1122 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1123 {
1124 	bt_dev_dbg(hdev, "status 0x%02x", status);
1125 
1126 	if (hci_conn_count(hdev) == 0) {
1127 		cancel_delayed_work(&hdev->power_off);
1128 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
1129 	}
1130 }
1131 
1132 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1133 {
1134 	struct mgmt_ev_advertising_added ev;
1135 
1136 	ev.instance = instance;
1137 
1138 	mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1139 }
1140 
1141 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1142 			      u8 instance)
1143 {
1144 	struct mgmt_ev_advertising_removed ev;
1145 
1146 	ev.instance = instance;
1147 
1148 	mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1149 }
1150 
1151 static void cancel_adv_timeout(struct hci_dev *hdev)
1152 {
1153 	if (hdev->adv_instance_timeout) {
1154 		hdev->adv_instance_timeout = 0;
1155 		cancel_delayed_work(&hdev->adv_instance_expire);
1156 	}
1157 }
1158 
1159 static int clean_up_hci_state(struct hci_dev *hdev)
1160 {
1161 	struct hci_request req;
1162 	struct hci_conn *conn;
1163 	bool discov_stopped;
1164 	int err;
1165 
1166 	hci_req_init(&req, hdev);
1167 
1168 	if (test_bit(HCI_ISCAN, &hdev->flags) ||
1169 	    test_bit(HCI_PSCAN, &hdev->flags)) {
1170 		u8 scan = 0x00;
1171 		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1172 	}
1173 
1174 	hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
1175 
1176 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1177 		__hci_req_disable_advertising(&req);
1178 
1179 	discov_stopped = hci_req_stop_discovery(&req);
1180 
1181 	list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1182 		/* 0x15 == Terminated due to Power Off */
1183 		__hci_abort_conn(&req, conn, 0x15);
1184 	}
1185 
1186 	err = hci_req_run(&req, clean_up_hci_complete);
1187 	if (!err && discov_stopped)
1188 		hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1189 
1190 	return err;
1191 }
1192 
1193 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1194 		       u16 len)
1195 {
1196 	struct mgmt_mode *cp = data;
1197 	struct mgmt_pending_cmd *cmd;
1198 	int err;
1199 
1200 	bt_dev_dbg(hdev, "sock %p", sk);
1201 
1202 	if (cp->val != 0x00 && cp->val != 0x01)
1203 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1204 				       MGMT_STATUS_INVALID_PARAMS);
1205 
1206 	hci_dev_lock(hdev);
1207 
1208 	if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1209 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1210 				      MGMT_STATUS_BUSY);
1211 		goto failed;
1212 	}
1213 
1214 	if (!!cp->val == hdev_is_powered(hdev)) {
1215 		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1216 		goto failed;
1217 	}
1218 
1219 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1220 	if (!cmd) {
1221 		err = -ENOMEM;
1222 		goto failed;
1223 	}
1224 
1225 	if (cp->val) {
1226 		queue_work(hdev->req_workqueue, &hdev->power_on);
1227 		err = 0;
1228 	} else {
1229 		/* Disconnect connections, stop scans, etc */
1230 		err = clean_up_hci_state(hdev);
1231 		if (!err)
1232 			queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1233 					   HCI_POWER_OFF_TIMEOUT);
1234 
1235 		/* ENODATA means there were no HCI commands queued */
1236 		if (err == -ENODATA) {
1237 			cancel_delayed_work(&hdev->power_off);
1238 			queue_work(hdev->req_workqueue, &hdev->power_off.work);
1239 			err = 0;
1240 		}
1241 	}
1242 
1243 failed:
1244 	hci_dev_unlock(hdev);
1245 	return err;
1246 }
1247 
1248 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1249 {
1250 	__le32 ev = cpu_to_le32(get_current_settings(hdev));
1251 
1252 	return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1253 				  sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1254 }
1255 
1256 int mgmt_new_settings(struct hci_dev *hdev)
1257 {
1258 	return new_settings(hdev, NULL);
1259 }
1260 
1261 struct cmd_lookup {
1262 	struct sock *sk;
1263 	struct hci_dev *hdev;
1264 	u8 mgmt_status;
1265 };
1266 
1267 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1268 {
1269 	struct cmd_lookup *match = data;
1270 
1271 	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1272 
1273 	list_del(&cmd->list);
1274 
1275 	if (match->sk == NULL) {
1276 		match->sk = cmd->sk;
1277 		sock_hold(match->sk);
1278 	}
1279 
1280 	mgmt_pending_free(cmd);
1281 }
1282 
1283 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1284 {
1285 	u8 *status = data;
1286 
1287 	mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1288 	mgmt_pending_remove(cmd);
1289 }
1290 
1291 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1292 {
1293 	if (cmd->cmd_complete) {
1294 		u8 *status = data;
1295 
1296 		cmd->cmd_complete(cmd, *status);
1297 		mgmt_pending_remove(cmd);
1298 
1299 		return;
1300 	}
1301 
1302 	cmd_status_rsp(cmd, data);
1303 }
1304 
1305 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1306 {
1307 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1308 				 cmd->param, cmd->param_len);
1309 }
1310 
1311 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1312 {
1313 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1314 				 cmd->param, sizeof(struct mgmt_addr_info));
1315 }
1316 
1317 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1318 {
1319 	if (!lmp_bredr_capable(hdev))
1320 		return MGMT_STATUS_NOT_SUPPORTED;
1321 	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1322 		return MGMT_STATUS_REJECTED;
1323 	else
1324 		return MGMT_STATUS_SUCCESS;
1325 }
1326 
1327 static u8 mgmt_le_support(struct hci_dev *hdev)
1328 {
1329 	if (!lmp_le_capable(hdev))
1330 		return MGMT_STATUS_NOT_SUPPORTED;
1331 	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1332 		return MGMT_STATUS_REJECTED;
1333 	else
1334 		return MGMT_STATUS_SUCCESS;
1335 }
1336 
1337 void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
1338 {
1339 	struct mgmt_pending_cmd *cmd;
1340 
1341 	bt_dev_dbg(hdev, "status 0x%02x", status);
1342 
1343 	hci_dev_lock(hdev);
1344 
1345 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1346 	if (!cmd)
1347 		goto unlock;
1348 
1349 	if (status) {
1350 		u8 mgmt_err = mgmt_status(status);
1351 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1352 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1353 		goto remove_cmd;
1354 	}
1355 
1356 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1357 	    hdev->discov_timeout > 0) {
1358 		int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1359 		queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1360 	}
1361 
1362 	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1363 	new_settings(hdev, cmd->sk);
1364 
1365 remove_cmd:
1366 	mgmt_pending_remove(cmd);
1367 
1368 unlock:
1369 	hci_dev_unlock(hdev);
1370 }
1371 
1372 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1373 			    u16 len)
1374 {
1375 	struct mgmt_cp_set_discoverable *cp = data;
1376 	struct mgmt_pending_cmd *cmd;
1377 	u16 timeout;
1378 	int err;
1379 
1380 	bt_dev_dbg(hdev, "sock %p", sk);
1381 
1382 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1383 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1384 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1385 				       MGMT_STATUS_REJECTED);
1386 
1387 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1388 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1389 				       MGMT_STATUS_INVALID_PARAMS);
1390 
1391 	timeout = __le16_to_cpu(cp->timeout);
1392 
1393 	/* Disabling discoverable requires that no timeout is set,
1394 	 * and enabling limited discoverable requires a timeout.
1395 	 */
1396 	if ((cp->val == 0x00 && timeout > 0) ||
1397 	    (cp->val == 0x02 && timeout == 0))
1398 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1399 				       MGMT_STATUS_INVALID_PARAMS);
1400 
1401 	hci_dev_lock(hdev);
1402 
1403 	if (!hdev_is_powered(hdev) && timeout > 0) {
1404 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1405 				      MGMT_STATUS_NOT_POWERED);
1406 		goto failed;
1407 	}
1408 
1409 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1410 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1411 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1412 				      MGMT_STATUS_BUSY);
1413 		goto failed;
1414 	}
1415 
1416 	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1417 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1418 				      MGMT_STATUS_REJECTED);
1419 		goto failed;
1420 	}
1421 
1422 	if (hdev->advertising_paused) {
1423 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1424 				      MGMT_STATUS_BUSY);
1425 		goto failed;
1426 	}
1427 
1428 	if (!hdev_is_powered(hdev)) {
1429 		bool changed = false;
1430 
1431 		/* Setting limited discoverable when powered off is
1432 		 * not a valid operation since it requires a timeout
1433 		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1434 		 */
1435 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1436 			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1437 			changed = true;
1438 		}
1439 
1440 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1441 		if (err < 0)
1442 			goto failed;
1443 
1444 		if (changed)
1445 			err = new_settings(hdev, sk);
1446 
1447 		goto failed;
1448 	}
1449 
1450 	/* If the current mode is the same, then just update the timeout
1451 	 * value with the new value. And if only the timeout gets updated,
1452 	 * then no need for any HCI transactions.
1453 	 */
1454 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1455 	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
1456 						   HCI_LIMITED_DISCOVERABLE)) {
1457 		cancel_delayed_work(&hdev->discov_off);
1458 		hdev->discov_timeout = timeout;
1459 
1460 		if (cp->val && hdev->discov_timeout > 0) {
1461 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1462 			queue_delayed_work(hdev->req_workqueue,
1463 					   &hdev->discov_off, to);
1464 		}
1465 
1466 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1467 		goto failed;
1468 	}
1469 
1470 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1471 	if (!cmd) {
1472 		err = -ENOMEM;
1473 		goto failed;
1474 	}
1475 
1476 	/* Cancel any potential discoverable timeout that might be
1477 	 * still active and store new timeout value. The arming of
1478 	 * the timeout happens in the complete handler.
1479 	 */
1480 	cancel_delayed_work(&hdev->discov_off);
1481 	hdev->discov_timeout = timeout;
1482 
1483 	if (cp->val)
1484 		hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1485 	else
1486 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1487 
1488 	/* Limited discoverable mode */
1489 	if (cp->val == 0x02)
1490 		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1491 	else
1492 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1493 
1494 	queue_work(hdev->req_workqueue, &hdev->discoverable_update);
1495 	err = 0;
1496 
1497 failed:
1498 	hci_dev_unlock(hdev);
1499 	return err;
1500 }
1501 
1502 void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
1503 {
1504 	struct mgmt_pending_cmd *cmd;
1505 
1506 	bt_dev_dbg(hdev, "status 0x%02x", status);
1507 
1508 	hci_dev_lock(hdev);
1509 
1510 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1511 	if (!cmd)
1512 		goto unlock;
1513 
1514 	if (status) {
1515 		u8 mgmt_err = mgmt_status(status);
1516 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1517 		goto remove_cmd;
1518 	}
1519 
1520 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1521 	new_settings(hdev, cmd->sk);
1522 
1523 remove_cmd:
1524 	mgmt_pending_remove(cmd);
1525 
1526 unlock:
1527 	hci_dev_unlock(hdev);
1528 }
1529 
1530 static int set_connectable_update_settings(struct hci_dev *hdev,
1531 					   struct sock *sk, u8 val)
1532 {
1533 	bool changed = false;
1534 	int err;
1535 
1536 	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1537 		changed = true;
1538 
1539 	if (val) {
1540 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1541 	} else {
1542 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1543 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1544 	}
1545 
1546 	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1547 	if (err < 0)
1548 		return err;
1549 
1550 	if (changed) {
1551 		hci_req_update_scan(hdev);
1552 		hci_update_background_scan(hdev);
1553 		return new_settings(hdev, sk);
1554 	}
1555 
1556 	return 0;
1557 }
1558 
1559 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1560 			   u16 len)
1561 {
1562 	struct mgmt_mode *cp = data;
1563 	struct mgmt_pending_cmd *cmd;
1564 	int err;
1565 
1566 	bt_dev_dbg(hdev, "sock %p", sk);
1567 
1568 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1569 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1570 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1571 				       MGMT_STATUS_REJECTED);
1572 
1573 	if (cp->val != 0x00 && cp->val != 0x01)
1574 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1575 				       MGMT_STATUS_INVALID_PARAMS);
1576 
1577 	hci_dev_lock(hdev);
1578 
1579 	if (!hdev_is_powered(hdev)) {
1580 		err = set_connectable_update_settings(hdev, sk, cp->val);
1581 		goto failed;
1582 	}
1583 
1584 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1585 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1586 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1587 				      MGMT_STATUS_BUSY);
1588 		goto failed;
1589 	}
1590 
1591 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1592 	if (!cmd) {
1593 		err = -ENOMEM;
1594 		goto failed;
1595 	}
1596 
1597 	if (cp->val) {
1598 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1599 	} else {
1600 		if (hdev->discov_timeout > 0)
1601 			cancel_delayed_work(&hdev->discov_off);
1602 
1603 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1604 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1605 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1606 	}
1607 
1608 	queue_work(hdev->req_workqueue, &hdev->connectable_update);
1609 	err = 0;
1610 
1611 failed:
1612 	hci_dev_unlock(hdev);
1613 	return err;
1614 }
1615 
1616 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1617 			u16 len)
1618 {
1619 	struct mgmt_mode *cp = data;
1620 	bool changed;
1621 	int err;
1622 
1623 	bt_dev_dbg(hdev, "sock %p", sk);
1624 
1625 	if (cp->val != 0x00 && cp->val != 0x01)
1626 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1627 				       MGMT_STATUS_INVALID_PARAMS);
1628 
1629 	hci_dev_lock(hdev);
1630 
1631 	if (cp->val)
1632 		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1633 	else
1634 		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1635 
1636 	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1637 	if (err < 0)
1638 		goto unlock;
1639 
1640 	if (changed) {
1641 		/* In limited privacy mode the change of bondable mode
1642 		 * may affect the local advertising address.
1643 		 */
1644 		if (hdev_is_powered(hdev) &&
1645 		    hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1646 		    hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1647 		    hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1648 			queue_work(hdev->req_workqueue,
1649 				   &hdev->discoverable_update);
1650 
1651 		err = new_settings(hdev, sk);
1652 	}
1653 
1654 unlock:
1655 	hci_dev_unlock(hdev);
1656 	return err;
1657 }
1658 
1659 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1660 			     u16 len)
1661 {
1662 	struct mgmt_mode *cp = data;
1663 	struct mgmt_pending_cmd *cmd;
1664 	u8 val, status;
1665 	int err;
1666 
1667 	bt_dev_dbg(hdev, "sock %p", sk);
1668 
1669 	status = mgmt_bredr_support(hdev);
1670 	if (status)
1671 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1672 				       status);
1673 
1674 	if (cp->val != 0x00 && cp->val != 0x01)
1675 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1676 				       MGMT_STATUS_INVALID_PARAMS);
1677 
1678 	hci_dev_lock(hdev);
1679 
1680 	if (!hdev_is_powered(hdev)) {
1681 		bool changed = false;
1682 
1683 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1684 			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1685 			changed = true;
1686 		}
1687 
1688 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1689 		if (err < 0)
1690 			goto failed;
1691 
1692 		if (changed)
1693 			err = new_settings(hdev, sk);
1694 
1695 		goto failed;
1696 	}
1697 
1698 	if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1699 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1700 				      MGMT_STATUS_BUSY);
1701 		goto failed;
1702 	}
1703 
1704 	val = !!cp->val;
1705 
1706 	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1707 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1708 		goto failed;
1709 	}
1710 
1711 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1712 	if (!cmd) {
1713 		err = -ENOMEM;
1714 		goto failed;
1715 	}
1716 
1717 	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1718 	if (err < 0) {
1719 		mgmt_pending_remove(cmd);
1720 		goto failed;
1721 	}
1722 
1723 failed:
1724 	hci_dev_unlock(hdev);
1725 	return err;
1726 }
1727 
1728 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1729 {
1730 	struct mgmt_mode *cp = data;
1731 	struct mgmt_pending_cmd *cmd;
1732 	u8 status;
1733 	int err;
1734 
1735 	bt_dev_dbg(hdev, "sock %p", sk);
1736 
1737 	status = mgmt_bredr_support(hdev);
1738 	if (status)
1739 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1740 
1741 	if (!lmp_ssp_capable(hdev))
1742 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1743 				       MGMT_STATUS_NOT_SUPPORTED);
1744 
1745 	if (cp->val != 0x00 && cp->val != 0x01)
1746 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1747 				       MGMT_STATUS_INVALID_PARAMS);
1748 
1749 	hci_dev_lock(hdev);
1750 
1751 	if (!hdev_is_powered(hdev)) {
1752 		bool changed;
1753 
1754 		if (cp->val) {
1755 			changed = !hci_dev_test_and_set_flag(hdev,
1756 							     HCI_SSP_ENABLED);
1757 		} else {
1758 			changed = hci_dev_test_and_clear_flag(hdev,
1759 							      HCI_SSP_ENABLED);
1760 			if (!changed)
1761 				changed = hci_dev_test_and_clear_flag(hdev,
1762 								      HCI_HS_ENABLED);
1763 			else
1764 				hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1765 		}
1766 
1767 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1768 		if (err < 0)
1769 			goto failed;
1770 
1771 		if (changed)
1772 			err = new_settings(hdev, sk);
1773 
1774 		goto failed;
1775 	}
1776 
1777 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1778 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1779 				      MGMT_STATUS_BUSY);
1780 		goto failed;
1781 	}
1782 
1783 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1784 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1785 		goto failed;
1786 	}
1787 
1788 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1789 	if (!cmd) {
1790 		err = -ENOMEM;
1791 		goto failed;
1792 	}
1793 
1794 	if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
1795 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1796 			     sizeof(cp->val), &cp->val);
1797 
1798 	err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1799 	if (err < 0) {
1800 		mgmt_pending_remove(cmd);
1801 		goto failed;
1802 	}
1803 
1804 failed:
1805 	hci_dev_unlock(hdev);
1806 	return err;
1807 }
1808 
1809 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1810 {
1811 	struct mgmt_mode *cp = data;
1812 	bool changed;
1813 	u8 status;
1814 	int err;
1815 
1816 	bt_dev_dbg(hdev, "sock %p", sk);
1817 
1818 	status = mgmt_bredr_support(hdev);
1819 	if (status)
1820 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1821 
1822 	if (!lmp_ssp_capable(hdev))
1823 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1824 				       MGMT_STATUS_NOT_SUPPORTED);
1825 
1826 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1827 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1828 				       MGMT_STATUS_REJECTED);
1829 
1830 	if (cp->val != 0x00 && cp->val != 0x01)
1831 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1832 				       MGMT_STATUS_INVALID_PARAMS);
1833 
1834 	hci_dev_lock(hdev);
1835 
1836 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1837 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1838 				      MGMT_STATUS_BUSY);
1839 		goto unlock;
1840 	}
1841 
1842 	if (cp->val) {
1843 		changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1844 	} else {
1845 		if (hdev_is_powered(hdev)) {
1846 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1847 					      MGMT_STATUS_REJECTED);
1848 			goto unlock;
1849 		}
1850 
1851 		changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1852 	}
1853 
1854 	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1855 	if (err < 0)
1856 		goto unlock;
1857 
1858 	if (changed)
1859 		err = new_settings(hdev, sk);
1860 
1861 unlock:
1862 	hci_dev_unlock(hdev);
1863 	return err;
1864 }
1865 
1866 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1867 {
1868 	struct cmd_lookup match = { NULL, hdev };
1869 
1870 	hci_dev_lock(hdev);
1871 
1872 	if (status) {
1873 		u8 mgmt_err = mgmt_status(status);
1874 
1875 		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1876 				     &mgmt_err);
1877 		goto unlock;
1878 	}
1879 
1880 	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1881 
1882 	new_settings(hdev, match.sk);
1883 
1884 	if (match.sk)
1885 		sock_put(match.sk);
1886 
1887 	/* Make sure the controller has a good default for
1888 	 * advertising data. Restrict the update to when LE
1889 	 * has actually been enabled. During power on, the
1890 	 * update in powered_update_hci will take care of it.
1891 	 */
1892 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1893 		struct hci_request req;
1894 		hci_req_init(&req, hdev);
1895 		if (ext_adv_capable(hdev)) {
1896 			int err;
1897 
1898 			err = __hci_req_setup_ext_adv_instance(&req, 0x00);
1899 			if (!err)
1900 				__hci_req_update_scan_rsp_data(&req, 0x00);
1901 		} else {
1902 			__hci_req_update_adv_data(&req, 0x00);
1903 			__hci_req_update_scan_rsp_data(&req, 0x00);
1904 		}
1905 		hci_req_run(&req, NULL);
1906 		hci_update_background_scan(hdev);
1907 	}
1908 
1909 unlock:
1910 	hci_dev_unlock(hdev);
1911 }
1912 
1913 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1914 {
1915 	struct mgmt_mode *cp = data;
1916 	struct hci_cp_write_le_host_supported hci_cp;
1917 	struct mgmt_pending_cmd *cmd;
1918 	struct hci_request req;
1919 	int err;
1920 	u8 val, enabled;
1921 
1922 	bt_dev_dbg(hdev, "sock %p", sk);
1923 
1924 	if (!lmp_le_capable(hdev))
1925 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1926 				       MGMT_STATUS_NOT_SUPPORTED);
1927 
1928 	if (cp->val != 0x00 && cp->val != 0x01)
1929 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1930 				       MGMT_STATUS_INVALID_PARAMS);
1931 
1932 	/* Bluetooth single mode LE only controllers or dual-mode
1933 	 * controllers configured as LE only devices, do not allow
1934 	 * switching LE off. These have either LE enabled explicitly
1935 	 * or BR/EDR has been previously switched off.
1936 	 *
1937 	 * When trying to enable an already enabled LE, then gracefully
1938 	 * send a positive response. Trying to disable it however will
1939 	 * result into rejection.
1940 	 */
1941 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1942 		if (cp->val == 0x01)
1943 			return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1944 
1945 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1946 				       MGMT_STATUS_REJECTED);
1947 	}
1948 
1949 	hci_dev_lock(hdev);
1950 
1951 	val = !!cp->val;
1952 	enabled = lmp_host_le_capable(hdev);
1953 
1954 	if (!val)
1955 		hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
1956 
1957 	if (!hdev_is_powered(hdev) || val == enabled) {
1958 		bool changed = false;
1959 
1960 		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1961 			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
1962 			changed = true;
1963 		}
1964 
1965 		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1966 			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1967 			changed = true;
1968 		}
1969 
1970 		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1971 		if (err < 0)
1972 			goto unlock;
1973 
1974 		if (changed)
1975 			err = new_settings(hdev, sk);
1976 
1977 		goto unlock;
1978 	}
1979 
1980 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
1981 	    pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1982 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1983 				      MGMT_STATUS_BUSY);
1984 		goto unlock;
1985 	}
1986 
1987 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1988 	if (!cmd) {
1989 		err = -ENOMEM;
1990 		goto unlock;
1991 	}
1992 
1993 	hci_req_init(&req, hdev);
1994 
1995 	memset(&hci_cp, 0, sizeof(hci_cp));
1996 
1997 	if (val) {
1998 		hci_cp.le = val;
1999 		hci_cp.simul = 0x00;
2000 	} else {
2001 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2002 			__hci_req_disable_advertising(&req);
2003 
2004 		if (ext_adv_capable(hdev))
2005 			__hci_req_clear_ext_adv_sets(&req);
2006 	}
2007 
2008 	hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2009 		    &hci_cp);
2010 
2011 	err = hci_req_run(&req, le_enable_complete);
2012 	if (err < 0)
2013 		mgmt_pending_remove(cmd);
2014 
2015 unlock:
2016 	hci_dev_unlock(hdev);
2017 	return err;
2018 }
2019 
2020 /* This is a helper function to test for pending mgmt commands that can
2021  * cause CoD or EIR HCI commands. We can only allow one such pending
2022  * mgmt command at a time since otherwise we cannot easily track what
2023  * the current values are, will be, and based on that calculate if a new
2024  * HCI command needs to be sent and if yes with what value.
2025  */
2026 static bool pending_eir_or_class(struct hci_dev *hdev)
2027 {
2028 	struct mgmt_pending_cmd *cmd;
2029 
2030 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2031 		switch (cmd->opcode) {
2032 		case MGMT_OP_ADD_UUID:
2033 		case MGMT_OP_REMOVE_UUID:
2034 		case MGMT_OP_SET_DEV_CLASS:
2035 		case MGMT_OP_SET_POWERED:
2036 			return true;
2037 		}
2038 	}
2039 
2040 	return false;
2041 }
2042 
2043 static const u8 bluetooth_base_uuid[] = {
2044 			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2045 			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2046 };
2047 
2048 static u8 get_uuid_size(const u8 *uuid)
2049 {
2050 	u32 val;
2051 
2052 	if (memcmp(uuid, bluetooth_base_uuid, 12))
2053 		return 128;
2054 
2055 	val = get_unaligned_le32(&uuid[12]);
2056 	if (val > 0xffff)
2057 		return 32;
2058 
2059 	return 16;
2060 }
2061 
2062 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2063 {
2064 	struct mgmt_pending_cmd *cmd;
2065 
2066 	hci_dev_lock(hdev);
2067 
2068 	cmd = pending_find(mgmt_op, hdev);
2069 	if (!cmd)
2070 		goto unlock;
2071 
2072 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2073 			  mgmt_status(status), hdev->dev_class, 3);
2074 
2075 	mgmt_pending_remove(cmd);
2076 
2077 unlock:
2078 	hci_dev_unlock(hdev);
2079 }
2080 
2081 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2082 {
2083 	bt_dev_dbg(hdev, "status 0x%02x", status);
2084 
2085 	mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2086 }
2087 
2088 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2089 {
2090 	struct mgmt_cp_add_uuid *cp = data;
2091 	struct mgmt_pending_cmd *cmd;
2092 	struct hci_request req;
2093 	struct bt_uuid *uuid;
2094 	int err;
2095 
2096 	bt_dev_dbg(hdev, "sock %p", sk);
2097 
2098 	hci_dev_lock(hdev);
2099 
2100 	if (pending_eir_or_class(hdev)) {
2101 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2102 				      MGMT_STATUS_BUSY);
2103 		goto failed;
2104 	}
2105 
2106 	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2107 	if (!uuid) {
2108 		err = -ENOMEM;
2109 		goto failed;
2110 	}
2111 
2112 	memcpy(uuid->uuid, cp->uuid, 16);
2113 	uuid->svc_hint = cp->svc_hint;
2114 	uuid->size = get_uuid_size(cp->uuid);
2115 
2116 	list_add_tail(&uuid->list, &hdev->uuids);
2117 
2118 	hci_req_init(&req, hdev);
2119 
2120 	__hci_req_update_class(&req);
2121 	__hci_req_update_eir(&req);
2122 
2123 	err = hci_req_run(&req, add_uuid_complete);
2124 	if (err < 0) {
2125 		if (err != -ENODATA)
2126 			goto failed;
2127 
2128 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2129 					hdev->dev_class, 3);
2130 		goto failed;
2131 	}
2132 
2133 	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2134 	if (!cmd) {
2135 		err = -ENOMEM;
2136 		goto failed;
2137 	}
2138 
2139 	err = 0;
2140 
2141 failed:
2142 	hci_dev_unlock(hdev);
2143 	return err;
2144 }
2145 
2146 static bool enable_service_cache(struct hci_dev *hdev)
2147 {
2148 	if (!hdev_is_powered(hdev))
2149 		return false;
2150 
2151 	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2152 		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2153 				   CACHE_TIMEOUT);
2154 		return true;
2155 	}
2156 
2157 	return false;
2158 }
2159 
2160 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2161 {
2162 	bt_dev_dbg(hdev, "status 0x%02x", status);
2163 
2164 	mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2165 }
2166 
2167 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2168 		       u16 len)
2169 {
2170 	struct mgmt_cp_remove_uuid *cp = data;
2171 	struct mgmt_pending_cmd *cmd;
2172 	struct bt_uuid *match, *tmp;
2173 	u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2174 	struct hci_request req;
2175 	int err, found;
2176 
2177 	bt_dev_dbg(hdev, "sock %p", sk);
2178 
2179 	hci_dev_lock(hdev);
2180 
2181 	if (pending_eir_or_class(hdev)) {
2182 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2183 				      MGMT_STATUS_BUSY);
2184 		goto unlock;
2185 	}
2186 
2187 	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2188 		hci_uuids_clear(hdev);
2189 
2190 		if (enable_service_cache(hdev)) {
2191 			err = mgmt_cmd_complete(sk, hdev->id,
2192 						MGMT_OP_REMOVE_UUID,
2193 						0, hdev->dev_class, 3);
2194 			goto unlock;
2195 		}
2196 
2197 		goto update_class;
2198 	}
2199 
2200 	found = 0;
2201 
2202 	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2203 		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2204 			continue;
2205 
2206 		list_del(&match->list);
2207 		kfree(match);
2208 		found++;
2209 	}
2210 
2211 	if (found == 0) {
2212 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2213 				      MGMT_STATUS_INVALID_PARAMS);
2214 		goto unlock;
2215 	}
2216 
2217 update_class:
2218 	hci_req_init(&req, hdev);
2219 
2220 	__hci_req_update_class(&req);
2221 	__hci_req_update_eir(&req);
2222 
2223 	err = hci_req_run(&req, remove_uuid_complete);
2224 	if (err < 0) {
2225 		if (err != -ENODATA)
2226 			goto unlock;
2227 
2228 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2229 					hdev->dev_class, 3);
2230 		goto unlock;
2231 	}
2232 
2233 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2234 	if (!cmd) {
2235 		err = -ENOMEM;
2236 		goto unlock;
2237 	}
2238 
2239 	err = 0;
2240 
2241 unlock:
2242 	hci_dev_unlock(hdev);
2243 	return err;
2244 }
2245 
2246 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2247 {
2248 	bt_dev_dbg(hdev, "status 0x%02x", status);
2249 
2250 	mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2251 }
2252 
2253 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2254 			 u16 len)
2255 {
2256 	struct mgmt_cp_set_dev_class *cp = data;
2257 	struct mgmt_pending_cmd *cmd;
2258 	struct hci_request req;
2259 	int err;
2260 
2261 	bt_dev_dbg(hdev, "sock %p", sk);
2262 
2263 	if (!lmp_bredr_capable(hdev))
2264 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2265 				       MGMT_STATUS_NOT_SUPPORTED);
2266 
2267 	hci_dev_lock(hdev);
2268 
2269 	if (pending_eir_or_class(hdev)) {
2270 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2271 				      MGMT_STATUS_BUSY);
2272 		goto unlock;
2273 	}
2274 
2275 	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2276 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2277 				      MGMT_STATUS_INVALID_PARAMS);
2278 		goto unlock;
2279 	}
2280 
2281 	hdev->major_class = cp->major;
2282 	hdev->minor_class = cp->minor;
2283 
2284 	if (!hdev_is_powered(hdev)) {
2285 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2286 					hdev->dev_class, 3);
2287 		goto unlock;
2288 	}
2289 
2290 	hci_req_init(&req, hdev);
2291 
2292 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2293 		hci_dev_unlock(hdev);
2294 		cancel_delayed_work_sync(&hdev->service_cache);
2295 		hci_dev_lock(hdev);
2296 		__hci_req_update_eir(&req);
2297 	}
2298 
2299 	__hci_req_update_class(&req);
2300 
2301 	err = hci_req_run(&req, set_class_complete);
2302 	if (err < 0) {
2303 		if (err != -ENODATA)
2304 			goto unlock;
2305 
2306 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2307 					hdev->dev_class, 3);
2308 		goto unlock;
2309 	}
2310 
2311 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2312 	if (!cmd) {
2313 		err = -ENOMEM;
2314 		goto unlock;
2315 	}
2316 
2317 	err = 0;
2318 
2319 unlock:
2320 	hci_dev_unlock(hdev);
2321 	return err;
2322 }
2323 
2324 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2325 			  u16 len)
2326 {
2327 	struct mgmt_cp_load_link_keys *cp = data;
2328 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2329 				   sizeof(struct mgmt_link_key_info));
2330 	u16 key_count, expected_len;
2331 	bool changed;
2332 	int i;
2333 
2334 	bt_dev_dbg(hdev, "sock %p", sk);
2335 
2336 	if (!lmp_bredr_capable(hdev))
2337 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2338 				       MGMT_STATUS_NOT_SUPPORTED);
2339 
2340 	key_count = __le16_to_cpu(cp->key_count);
2341 	if (key_count > max_key_count) {
2342 		bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2343 			   key_count);
2344 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2345 				       MGMT_STATUS_INVALID_PARAMS);
2346 	}
2347 
2348 	expected_len = struct_size(cp, keys, key_count);
2349 	if (expected_len != len) {
2350 		bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2351 			   expected_len, len);
2352 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2353 				       MGMT_STATUS_INVALID_PARAMS);
2354 	}
2355 
2356 	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2357 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2358 				       MGMT_STATUS_INVALID_PARAMS);
2359 
2360 	bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2361 		   key_count);
2362 
2363 	for (i = 0; i < key_count; i++) {
2364 		struct mgmt_link_key_info *key = &cp->keys[i];
2365 
2366 		if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2367 			return mgmt_cmd_status(sk, hdev->id,
2368 					       MGMT_OP_LOAD_LINK_KEYS,
2369 					       MGMT_STATUS_INVALID_PARAMS);
2370 	}
2371 
2372 	hci_dev_lock(hdev);
2373 
2374 	hci_link_keys_clear(hdev);
2375 
2376 	if (cp->debug_keys)
2377 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2378 	else
2379 		changed = hci_dev_test_and_clear_flag(hdev,
2380 						      HCI_KEEP_DEBUG_KEYS);
2381 
2382 	if (changed)
2383 		new_settings(hdev, NULL);
2384 
2385 	for (i = 0; i < key_count; i++) {
2386 		struct mgmt_link_key_info *key = &cp->keys[i];
2387 
2388 		if (hci_is_blocked_key(hdev,
2389 				       HCI_BLOCKED_KEY_TYPE_LINKKEY,
2390 				       key->val)) {
2391 			bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2392 				    &key->addr.bdaddr);
2393 			continue;
2394 		}
2395 
2396 		/* Always ignore debug keys and require a new pairing if
2397 		 * the user wants to use them.
2398 		 */
2399 		if (key->type == HCI_LK_DEBUG_COMBINATION)
2400 			continue;
2401 
2402 		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2403 				 key->type, key->pin_len, NULL);
2404 	}
2405 
2406 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2407 
2408 	hci_dev_unlock(hdev);
2409 
2410 	return 0;
2411 }
2412 
2413 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2414 			   u8 addr_type, struct sock *skip_sk)
2415 {
2416 	struct mgmt_ev_device_unpaired ev;
2417 
2418 	bacpy(&ev.addr.bdaddr, bdaddr);
2419 	ev.addr.type = addr_type;
2420 
2421 	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2422 			  skip_sk);
2423 }
2424 
2425 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2426 			 u16 len)
2427 {
2428 	struct mgmt_cp_unpair_device *cp = data;
2429 	struct mgmt_rp_unpair_device rp;
2430 	struct hci_conn_params *params;
2431 	struct mgmt_pending_cmd *cmd;
2432 	struct hci_conn *conn;
2433 	u8 addr_type;
2434 	int err;
2435 
2436 	memset(&rp, 0, sizeof(rp));
2437 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2438 	rp.addr.type = cp->addr.type;
2439 
2440 	if (!bdaddr_type_is_valid(cp->addr.type))
2441 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2442 					 MGMT_STATUS_INVALID_PARAMS,
2443 					 &rp, sizeof(rp));
2444 
2445 	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2446 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2447 					 MGMT_STATUS_INVALID_PARAMS,
2448 					 &rp, sizeof(rp));
2449 
2450 	hci_dev_lock(hdev);
2451 
2452 	if (!hdev_is_powered(hdev)) {
2453 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2454 					MGMT_STATUS_NOT_POWERED, &rp,
2455 					sizeof(rp));
2456 		goto unlock;
2457 	}
2458 
2459 	if (cp->addr.type == BDADDR_BREDR) {
2460 		/* If disconnection is requested, then look up the
2461 		 * connection. If the remote device is connected, it
2462 		 * will be later used to terminate the link.
2463 		 *
2464 		 * Setting it to NULL explicitly will cause no
2465 		 * termination of the link.
2466 		 */
2467 		if (cp->disconnect)
2468 			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2469 						       &cp->addr.bdaddr);
2470 		else
2471 			conn = NULL;
2472 
2473 		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2474 		if (err < 0) {
2475 			err = mgmt_cmd_complete(sk, hdev->id,
2476 						MGMT_OP_UNPAIR_DEVICE,
2477 						MGMT_STATUS_NOT_PAIRED, &rp,
2478 						sizeof(rp));
2479 			goto unlock;
2480 		}
2481 
2482 		goto done;
2483 	}
2484 
2485 	/* LE address type */
2486 	addr_type = le_addr_type(cp->addr.type);
2487 
2488 	/* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2489 	err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2490 	if (err < 0) {
2491 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2492 					MGMT_STATUS_NOT_PAIRED, &rp,
2493 					sizeof(rp));
2494 		goto unlock;
2495 	}
2496 
2497 	conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2498 	if (!conn) {
2499 		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2500 		goto done;
2501 	}
2502 
2503 
2504 	/* Defer clearing up the connection parameters until closing to
2505 	 * give a chance of keeping them if a repairing happens.
2506 	 */
2507 	set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2508 
2509 	/* Disable auto-connection parameters if present */
2510 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2511 	if (params) {
2512 		if (params->explicit_connect)
2513 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2514 		else
2515 			params->auto_connect = HCI_AUTO_CONN_DISABLED;
2516 	}
2517 
2518 	/* If disconnection is not requested, then clear the connection
2519 	 * variable so that the link is not terminated.
2520 	 */
2521 	if (!cp->disconnect)
2522 		conn = NULL;
2523 
2524 done:
2525 	/* If the connection variable is set, then termination of the
2526 	 * link is requested.
2527 	 */
2528 	if (!conn) {
2529 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2530 					&rp, sizeof(rp));
2531 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2532 		goto unlock;
2533 	}
2534 
2535 	cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2536 			       sizeof(*cp));
2537 	if (!cmd) {
2538 		err = -ENOMEM;
2539 		goto unlock;
2540 	}
2541 
2542 	cmd->cmd_complete = addr_cmd_complete;
2543 
2544 	err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2545 	if (err < 0)
2546 		mgmt_pending_remove(cmd);
2547 
2548 unlock:
2549 	hci_dev_unlock(hdev);
2550 	return err;
2551 }
2552 
2553 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2554 		      u16 len)
2555 {
2556 	struct mgmt_cp_disconnect *cp = data;
2557 	struct mgmt_rp_disconnect rp;
2558 	struct mgmt_pending_cmd *cmd;
2559 	struct hci_conn *conn;
2560 	int err;
2561 
2562 	bt_dev_dbg(hdev, "sock %p", sk);
2563 
2564 	memset(&rp, 0, sizeof(rp));
2565 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2566 	rp.addr.type = cp->addr.type;
2567 
2568 	if (!bdaddr_type_is_valid(cp->addr.type))
2569 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2570 					 MGMT_STATUS_INVALID_PARAMS,
2571 					 &rp, sizeof(rp));
2572 
2573 	hci_dev_lock(hdev);
2574 
2575 	if (!test_bit(HCI_UP, &hdev->flags)) {
2576 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2577 					MGMT_STATUS_NOT_POWERED, &rp,
2578 					sizeof(rp));
2579 		goto failed;
2580 	}
2581 
2582 	if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2583 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2584 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
2585 		goto failed;
2586 	}
2587 
2588 	if (cp->addr.type == BDADDR_BREDR)
2589 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2590 					       &cp->addr.bdaddr);
2591 	else
2592 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2593 					       le_addr_type(cp->addr.type));
2594 
2595 	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2596 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2597 					MGMT_STATUS_NOT_CONNECTED, &rp,
2598 					sizeof(rp));
2599 		goto failed;
2600 	}
2601 
2602 	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2603 	if (!cmd) {
2604 		err = -ENOMEM;
2605 		goto failed;
2606 	}
2607 
2608 	cmd->cmd_complete = generic_cmd_complete;
2609 
2610 	err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2611 	if (err < 0)
2612 		mgmt_pending_remove(cmd);
2613 
2614 failed:
2615 	hci_dev_unlock(hdev);
2616 	return err;
2617 }
2618 
2619 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2620 {
2621 	switch (link_type) {
2622 	case LE_LINK:
2623 		switch (addr_type) {
2624 		case ADDR_LE_DEV_PUBLIC:
2625 			return BDADDR_LE_PUBLIC;
2626 
2627 		default:
2628 			/* Fallback to LE Random address type */
2629 			return BDADDR_LE_RANDOM;
2630 		}
2631 
2632 	default:
2633 		/* Fallback to BR/EDR type */
2634 		return BDADDR_BREDR;
2635 	}
2636 }
2637 
2638 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2639 			   u16 data_len)
2640 {
2641 	struct mgmt_rp_get_connections *rp;
2642 	struct hci_conn *c;
2643 	int err;
2644 	u16 i;
2645 
2646 	bt_dev_dbg(hdev, "sock %p", sk);
2647 
2648 	hci_dev_lock(hdev);
2649 
2650 	if (!hdev_is_powered(hdev)) {
2651 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2652 				      MGMT_STATUS_NOT_POWERED);
2653 		goto unlock;
2654 	}
2655 
2656 	i = 0;
2657 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2658 		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2659 			i++;
2660 	}
2661 
2662 	rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2663 	if (!rp) {
2664 		err = -ENOMEM;
2665 		goto unlock;
2666 	}
2667 
2668 	i = 0;
2669 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2670 		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2671 			continue;
2672 		bacpy(&rp->addr[i].bdaddr, &c->dst);
2673 		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2674 		if (c->type == SCO_LINK || c->type == ESCO_LINK)
2675 			continue;
2676 		i++;
2677 	}
2678 
2679 	rp->conn_count = cpu_to_le16(i);
2680 
2681 	/* Recalculate length in case of filtered SCO connections, etc */
2682 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2683 				struct_size(rp, addr, i));
2684 
2685 	kfree(rp);
2686 
2687 unlock:
2688 	hci_dev_unlock(hdev);
2689 	return err;
2690 }
2691 
2692 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2693 				   struct mgmt_cp_pin_code_neg_reply *cp)
2694 {
2695 	struct mgmt_pending_cmd *cmd;
2696 	int err;
2697 
2698 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2699 			       sizeof(*cp));
2700 	if (!cmd)
2701 		return -ENOMEM;
2702 
2703 	cmd->cmd_complete = addr_cmd_complete;
2704 
2705 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2706 			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2707 	if (err < 0)
2708 		mgmt_pending_remove(cmd);
2709 
2710 	return err;
2711 }
2712 
2713 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2714 			  u16 len)
2715 {
2716 	struct hci_conn *conn;
2717 	struct mgmt_cp_pin_code_reply *cp = data;
2718 	struct hci_cp_pin_code_reply reply;
2719 	struct mgmt_pending_cmd *cmd;
2720 	int err;
2721 
2722 	bt_dev_dbg(hdev, "sock %p", sk);
2723 
2724 	hci_dev_lock(hdev);
2725 
2726 	if (!hdev_is_powered(hdev)) {
2727 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2728 				      MGMT_STATUS_NOT_POWERED);
2729 		goto failed;
2730 	}
2731 
2732 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2733 	if (!conn) {
2734 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2735 				      MGMT_STATUS_NOT_CONNECTED);
2736 		goto failed;
2737 	}
2738 
2739 	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2740 		struct mgmt_cp_pin_code_neg_reply ncp;
2741 
2742 		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2743 
2744 		bt_dev_err(hdev, "PIN code is not 16 bytes long");
2745 
2746 		err = send_pin_code_neg_reply(sk, hdev, &ncp);
2747 		if (err >= 0)
2748 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2749 					      MGMT_STATUS_INVALID_PARAMS);
2750 
2751 		goto failed;
2752 	}
2753 
2754 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2755 	if (!cmd) {
2756 		err = -ENOMEM;
2757 		goto failed;
2758 	}
2759 
2760 	cmd->cmd_complete = addr_cmd_complete;
2761 
2762 	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2763 	reply.pin_len = cp->pin_len;
2764 	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2765 
2766 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2767 	if (err < 0)
2768 		mgmt_pending_remove(cmd);
2769 
2770 failed:
2771 	hci_dev_unlock(hdev);
2772 	return err;
2773 }
2774 
2775 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2776 			     u16 len)
2777 {
2778 	struct mgmt_cp_set_io_capability *cp = data;
2779 
2780 	bt_dev_dbg(hdev, "sock %p", sk);
2781 
2782 	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2783 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2784 				       MGMT_STATUS_INVALID_PARAMS);
2785 
2786 	hci_dev_lock(hdev);
2787 
2788 	hdev->io_capability = cp->io_capability;
2789 
2790 	bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
2791 
2792 	hci_dev_unlock(hdev);
2793 
2794 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2795 				 NULL, 0);
2796 }
2797 
2798 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2799 {
2800 	struct hci_dev *hdev = conn->hdev;
2801 	struct mgmt_pending_cmd *cmd;
2802 
2803 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2804 		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2805 			continue;
2806 
2807 		if (cmd->user_data != conn)
2808 			continue;
2809 
2810 		return cmd;
2811 	}
2812 
2813 	return NULL;
2814 }
2815 
2816 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2817 {
2818 	struct mgmt_rp_pair_device rp;
2819 	struct hci_conn *conn = cmd->user_data;
2820 	int err;
2821 
2822 	bacpy(&rp.addr.bdaddr, &conn->dst);
2823 	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2824 
2825 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2826 				status, &rp, sizeof(rp));
2827 
2828 	/* So we don't get further callbacks for this connection */
2829 	conn->connect_cfm_cb = NULL;
2830 	conn->security_cfm_cb = NULL;
2831 	conn->disconn_cfm_cb = NULL;
2832 
2833 	hci_conn_drop(conn);
2834 
2835 	/* The device is paired so there is no need to remove
2836 	 * its connection parameters anymore.
2837 	 */
2838 	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2839 
2840 	hci_conn_put(conn);
2841 
2842 	return err;
2843 }
2844 
2845 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2846 {
2847 	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2848 	struct mgmt_pending_cmd *cmd;
2849 
2850 	cmd = find_pairing(conn);
2851 	if (cmd) {
2852 		cmd->cmd_complete(cmd, status);
2853 		mgmt_pending_remove(cmd);
2854 	}
2855 }
2856 
2857 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2858 {
2859 	struct mgmt_pending_cmd *cmd;
2860 
2861 	BT_DBG("status %u", status);
2862 
2863 	cmd = find_pairing(conn);
2864 	if (!cmd) {
2865 		BT_DBG("Unable to find a pending command");
2866 		return;
2867 	}
2868 
2869 	cmd->cmd_complete(cmd, mgmt_status(status));
2870 	mgmt_pending_remove(cmd);
2871 }
2872 
2873 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2874 {
2875 	struct mgmt_pending_cmd *cmd;
2876 
2877 	BT_DBG("status %u", status);
2878 
2879 	if (!status)
2880 		return;
2881 
2882 	cmd = find_pairing(conn);
2883 	if (!cmd) {
2884 		BT_DBG("Unable to find a pending command");
2885 		return;
2886 	}
2887 
2888 	cmd->cmd_complete(cmd, mgmt_status(status));
2889 	mgmt_pending_remove(cmd);
2890 }
2891 
2892 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2893 		       u16 len)
2894 {
2895 	struct mgmt_cp_pair_device *cp = data;
2896 	struct mgmt_rp_pair_device rp;
2897 	struct mgmt_pending_cmd *cmd;
2898 	u8 sec_level, auth_type;
2899 	struct hci_conn *conn;
2900 	int err;
2901 
2902 	bt_dev_dbg(hdev, "sock %p", sk);
2903 
2904 	memset(&rp, 0, sizeof(rp));
2905 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2906 	rp.addr.type = cp->addr.type;
2907 
2908 	if (!bdaddr_type_is_valid(cp->addr.type))
2909 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2910 					 MGMT_STATUS_INVALID_PARAMS,
2911 					 &rp, sizeof(rp));
2912 
2913 	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2914 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2915 					 MGMT_STATUS_INVALID_PARAMS,
2916 					 &rp, sizeof(rp));
2917 
2918 	hci_dev_lock(hdev);
2919 
2920 	if (!hdev_is_powered(hdev)) {
2921 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2922 					MGMT_STATUS_NOT_POWERED, &rp,
2923 					sizeof(rp));
2924 		goto unlock;
2925 	}
2926 
2927 	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
2928 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2929 					MGMT_STATUS_ALREADY_PAIRED, &rp,
2930 					sizeof(rp));
2931 		goto unlock;
2932 	}
2933 
2934 	sec_level = BT_SECURITY_MEDIUM;
2935 	auth_type = HCI_AT_DEDICATED_BONDING;
2936 
2937 	if (cp->addr.type == BDADDR_BREDR) {
2938 		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2939 				       auth_type, CONN_REASON_PAIR_DEVICE);
2940 	} else {
2941 		u8 addr_type = le_addr_type(cp->addr.type);
2942 		struct hci_conn_params *p;
2943 
2944 		/* When pairing a new device, it is expected to remember
2945 		 * this device for future connections. Adding the connection
2946 		 * parameter information ahead of time allows tracking
2947 		 * of the slave preferred values and will speed up any
2948 		 * further connection establishment.
2949 		 *
2950 		 * If connection parameters already exist, then they
2951 		 * will be kept and this function does nothing.
2952 		 */
2953 		p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2954 
2955 		if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
2956 			p->auto_connect = HCI_AUTO_CONN_DISABLED;
2957 
2958 		conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
2959 					   sec_level, HCI_LE_CONN_TIMEOUT,
2960 					   CONN_REASON_PAIR_DEVICE);
2961 	}
2962 
2963 	if (IS_ERR(conn)) {
2964 		int status;
2965 
2966 		if (PTR_ERR(conn) == -EBUSY)
2967 			status = MGMT_STATUS_BUSY;
2968 		else if (PTR_ERR(conn) == -EOPNOTSUPP)
2969 			status = MGMT_STATUS_NOT_SUPPORTED;
2970 		else if (PTR_ERR(conn) == -ECONNREFUSED)
2971 			status = MGMT_STATUS_REJECTED;
2972 		else
2973 			status = MGMT_STATUS_CONNECT_FAILED;
2974 
2975 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2976 					status, &rp, sizeof(rp));
2977 		goto unlock;
2978 	}
2979 
2980 	if (conn->connect_cfm_cb) {
2981 		hci_conn_drop(conn);
2982 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2983 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
2984 		goto unlock;
2985 	}
2986 
2987 	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2988 	if (!cmd) {
2989 		err = -ENOMEM;
2990 		hci_conn_drop(conn);
2991 		goto unlock;
2992 	}
2993 
2994 	cmd->cmd_complete = pairing_complete;
2995 
2996 	/* For LE, just connecting isn't a proof that the pairing finished */
2997 	if (cp->addr.type == BDADDR_BREDR) {
2998 		conn->connect_cfm_cb = pairing_complete_cb;
2999 		conn->security_cfm_cb = pairing_complete_cb;
3000 		conn->disconn_cfm_cb = pairing_complete_cb;
3001 	} else {
3002 		conn->connect_cfm_cb = le_pairing_complete_cb;
3003 		conn->security_cfm_cb = le_pairing_complete_cb;
3004 		conn->disconn_cfm_cb = le_pairing_complete_cb;
3005 	}
3006 
3007 	conn->io_capability = cp->io_cap;
3008 	cmd->user_data = hci_conn_get(conn);
3009 
3010 	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3011 	    hci_conn_security(conn, sec_level, auth_type, true)) {
3012 		cmd->cmd_complete(cmd, 0);
3013 		mgmt_pending_remove(cmd);
3014 	}
3015 
3016 	err = 0;
3017 
3018 unlock:
3019 	hci_dev_unlock(hdev);
3020 	return err;
3021 }
3022 
3023 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3024 			      u16 len)
3025 {
3026 	struct mgmt_addr_info *addr = data;
3027 	struct mgmt_pending_cmd *cmd;
3028 	struct hci_conn *conn;
3029 	int err;
3030 
3031 	bt_dev_dbg(hdev, "sock %p", sk);
3032 
3033 	hci_dev_lock(hdev);
3034 
3035 	if (!hdev_is_powered(hdev)) {
3036 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3037 				      MGMT_STATUS_NOT_POWERED);
3038 		goto unlock;
3039 	}
3040 
3041 	cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3042 	if (!cmd) {
3043 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3044 				      MGMT_STATUS_INVALID_PARAMS);
3045 		goto unlock;
3046 	}
3047 
3048 	conn = cmd->user_data;
3049 
3050 	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3051 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3052 				      MGMT_STATUS_INVALID_PARAMS);
3053 		goto unlock;
3054 	}
3055 
3056 	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3057 	mgmt_pending_remove(cmd);
3058 
3059 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3060 				addr, sizeof(*addr));
3061 
3062 	/* Since user doesn't want to proceed with the connection, abort any
3063 	 * ongoing pairing and then terminate the link if it was created
3064 	 * because of the pair device action.
3065 	 */
3066 	if (addr->type == BDADDR_BREDR)
3067 		hci_remove_link_key(hdev, &addr->bdaddr);
3068 	else
3069 		smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3070 					      le_addr_type(addr->type));
3071 
3072 	if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3073 		hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3074 
3075 unlock:
3076 	hci_dev_unlock(hdev);
3077 	return err;
3078 }
3079 
3080 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3081 			     struct mgmt_addr_info *addr, u16 mgmt_op,
3082 			     u16 hci_op, __le32 passkey)
3083 {
3084 	struct mgmt_pending_cmd *cmd;
3085 	struct hci_conn *conn;
3086 	int err;
3087 
3088 	hci_dev_lock(hdev);
3089 
3090 	if (!hdev_is_powered(hdev)) {
3091 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3092 					MGMT_STATUS_NOT_POWERED, addr,
3093 					sizeof(*addr));
3094 		goto done;
3095 	}
3096 
3097 	if (addr->type == BDADDR_BREDR)
3098 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3099 	else
3100 		conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3101 					       le_addr_type(addr->type));
3102 
3103 	if (!conn) {
3104 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3105 					MGMT_STATUS_NOT_CONNECTED, addr,
3106 					sizeof(*addr));
3107 		goto done;
3108 	}
3109 
3110 	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3111 		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3112 		if (!err)
3113 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3114 						MGMT_STATUS_SUCCESS, addr,
3115 						sizeof(*addr));
3116 		else
3117 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3118 						MGMT_STATUS_FAILED, addr,
3119 						sizeof(*addr));
3120 
3121 		goto done;
3122 	}
3123 
3124 	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3125 	if (!cmd) {
3126 		err = -ENOMEM;
3127 		goto done;
3128 	}
3129 
3130 	cmd->cmd_complete = addr_cmd_complete;
3131 
3132 	/* Continue with pairing via HCI */
3133 	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3134 		struct hci_cp_user_passkey_reply cp;
3135 
3136 		bacpy(&cp.bdaddr, &addr->bdaddr);
3137 		cp.passkey = passkey;
3138 		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3139 	} else
3140 		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3141 				   &addr->bdaddr);
3142 
3143 	if (err < 0)
3144 		mgmt_pending_remove(cmd);
3145 
3146 done:
3147 	hci_dev_unlock(hdev);
3148 	return err;
3149 }
3150 
3151 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3152 			      void *data, u16 len)
3153 {
3154 	struct mgmt_cp_pin_code_neg_reply *cp = data;
3155 
3156 	bt_dev_dbg(hdev, "sock %p", sk);
3157 
3158 	return user_pairing_resp(sk, hdev, &cp->addr,
3159 				MGMT_OP_PIN_CODE_NEG_REPLY,
3160 				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3161 }
3162 
3163 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3164 			      u16 len)
3165 {
3166 	struct mgmt_cp_user_confirm_reply *cp = data;
3167 
3168 	bt_dev_dbg(hdev, "sock %p", sk);
3169 
3170 	if (len != sizeof(*cp))
3171 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3172 				       MGMT_STATUS_INVALID_PARAMS);
3173 
3174 	return user_pairing_resp(sk, hdev, &cp->addr,
3175 				 MGMT_OP_USER_CONFIRM_REPLY,
3176 				 HCI_OP_USER_CONFIRM_REPLY, 0);
3177 }
3178 
3179 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3180 				  void *data, u16 len)
3181 {
3182 	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3183 
3184 	bt_dev_dbg(hdev, "sock %p", sk);
3185 
3186 	return user_pairing_resp(sk, hdev, &cp->addr,
3187 				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3188 				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3189 }
3190 
3191 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3192 			      u16 len)
3193 {
3194 	struct mgmt_cp_user_passkey_reply *cp = data;
3195 
3196 	bt_dev_dbg(hdev, "sock %p", sk);
3197 
3198 	return user_pairing_resp(sk, hdev, &cp->addr,
3199 				 MGMT_OP_USER_PASSKEY_REPLY,
3200 				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3201 }
3202 
3203 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3204 				  void *data, u16 len)
3205 {
3206 	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3207 
3208 	bt_dev_dbg(hdev, "sock %p", sk);
3209 
3210 	return user_pairing_resp(sk, hdev, &cp->addr,
3211 				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3212 				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3213 }
3214 
3215 static void adv_expire(struct hci_dev *hdev, u32 flags)
3216 {
3217 	struct adv_info *adv_instance;
3218 	struct hci_request req;
3219 	int err;
3220 
3221 	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3222 	if (!adv_instance)
3223 		return;
3224 
3225 	/* stop if current instance doesn't need to be changed */
3226 	if (!(adv_instance->flags & flags))
3227 		return;
3228 
3229 	cancel_adv_timeout(hdev);
3230 
3231 	adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3232 	if (!adv_instance)
3233 		return;
3234 
3235 	hci_req_init(&req, hdev);
3236 	err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
3237 					      true);
3238 	if (err)
3239 		return;
3240 
3241 	hci_req_run(&req, NULL);
3242 }
3243 
3244 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3245 {
3246 	struct mgmt_cp_set_local_name *cp;
3247 	struct mgmt_pending_cmd *cmd;
3248 
3249 	bt_dev_dbg(hdev, "status 0x%02x", status);
3250 
3251 	hci_dev_lock(hdev);
3252 
3253 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3254 	if (!cmd)
3255 		goto unlock;
3256 
3257 	cp = cmd->param;
3258 
3259 	if (status) {
3260 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3261 			        mgmt_status(status));
3262 	} else {
3263 		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3264 				  cp, sizeof(*cp));
3265 
3266 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3267 			adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3268 	}
3269 
3270 	mgmt_pending_remove(cmd);
3271 
3272 unlock:
3273 	hci_dev_unlock(hdev);
3274 }
3275 
3276 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3277 			  u16 len)
3278 {
3279 	struct mgmt_cp_set_local_name *cp = data;
3280 	struct mgmt_pending_cmd *cmd;
3281 	struct hci_request req;
3282 	int err;
3283 
3284 	bt_dev_dbg(hdev, "sock %p", sk);
3285 
3286 	hci_dev_lock(hdev);
3287 
3288 	/* If the old values are the same as the new ones just return a
3289 	 * direct command complete event.
3290 	 */
3291 	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3292 	    !memcmp(hdev->short_name, cp->short_name,
3293 		    sizeof(hdev->short_name))) {
3294 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3295 					data, len);
3296 		goto failed;
3297 	}
3298 
3299 	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3300 
3301 	if (!hdev_is_powered(hdev)) {
3302 		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3303 
3304 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3305 					data, len);
3306 		if (err < 0)
3307 			goto failed;
3308 
3309 		err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3310 					 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3311 		ext_info_changed(hdev, sk);
3312 
3313 		goto failed;
3314 	}
3315 
3316 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3317 	if (!cmd) {
3318 		err = -ENOMEM;
3319 		goto failed;
3320 	}
3321 
3322 	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3323 
3324 	hci_req_init(&req, hdev);
3325 
3326 	if (lmp_bredr_capable(hdev)) {
3327 		__hci_req_update_name(&req);
3328 		__hci_req_update_eir(&req);
3329 	}
3330 
3331 	/* The name is stored in the scan response data and so
3332 	 * no need to udpate the advertising data here.
3333 	 */
3334 	if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3335 		__hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
3336 
3337 	err = hci_req_run(&req, set_name_complete);
3338 	if (err < 0)
3339 		mgmt_pending_remove(cmd);
3340 
3341 failed:
3342 	hci_dev_unlock(hdev);
3343 	return err;
3344 }
3345 
3346 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3347 			  u16 len)
3348 {
3349 	struct mgmt_cp_set_appearance *cp = data;
3350 	u16 appearance;
3351 	int err;
3352 
3353 	bt_dev_dbg(hdev, "sock %p", sk);
3354 
3355 	if (!lmp_le_capable(hdev))
3356 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3357 				       MGMT_STATUS_NOT_SUPPORTED);
3358 
3359 	appearance = le16_to_cpu(cp->appearance);
3360 
3361 	hci_dev_lock(hdev);
3362 
3363 	if (hdev->appearance != appearance) {
3364 		hdev->appearance = appearance;
3365 
3366 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3367 			adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
3368 
3369 		ext_info_changed(hdev, sk);
3370 	}
3371 
3372 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3373 				0);
3374 
3375 	hci_dev_unlock(hdev);
3376 
3377 	return err;
3378 }
3379 
3380 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3381 				 void *data, u16 len)
3382 {
3383 	struct mgmt_rp_get_phy_confguration rp;
3384 
3385 	bt_dev_dbg(hdev, "sock %p", sk);
3386 
3387 	hci_dev_lock(hdev);
3388 
3389 	memset(&rp, 0, sizeof(rp));
3390 
3391 	rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3392 	rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3393 	rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3394 
3395 	hci_dev_unlock(hdev);
3396 
3397 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3398 				 &rp, sizeof(rp));
3399 }
3400 
3401 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3402 {
3403 	struct mgmt_ev_phy_configuration_changed ev;
3404 
3405 	memset(&ev, 0, sizeof(ev));
3406 
3407 	ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3408 
3409 	return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3410 			  sizeof(ev), skip);
3411 }
3412 
3413 static void set_default_phy_complete(struct hci_dev *hdev, u8 status,
3414 				     u16 opcode, struct sk_buff *skb)
3415 {
3416 	struct mgmt_pending_cmd *cmd;
3417 
3418 	bt_dev_dbg(hdev, "status 0x%02x", status);
3419 
3420 	hci_dev_lock(hdev);
3421 
3422 	cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev);
3423 	if (!cmd)
3424 		goto unlock;
3425 
3426 	if (status) {
3427 		mgmt_cmd_status(cmd->sk, hdev->id,
3428 				MGMT_OP_SET_PHY_CONFIGURATION,
3429 				mgmt_status(status));
3430 	} else {
3431 		mgmt_cmd_complete(cmd->sk, hdev->id,
3432 				  MGMT_OP_SET_PHY_CONFIGURATION, 0,
3433 				  NULL, 0);
3434 
3435 		mgmt_phy_configuration_changed(hdev, cmd->sk);
3436 	}
3437 
3438 	mgmt_pending_remove(cmd);
3439 
3440 unlock:
3441 	hci_dev_unlock(hdev);
3442 }
3443 
3444 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3445 				 void *data, u16 len)
3446 {
3447 	struct mgmt_cp_set_phy_confguration *cp = data;
3448 	struct hci_cp_le_set_default_phy cp_phy;
3449 	struct mgmt_pending_cmd *cmd;
3450 	struct hci_request req;
3451 	u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3452 	u16 pkt_type = (HCI_DH1 | HCI_DM1);
3453 	bool changed = false;
3454 	int err;
3455 
3456 	bt_dev_dbg(hdev, "sock %p", sk);
3457 
3458 	configurable_phys = get_configurable_phys(hdev);
3459 	supported_phys = get_supported_phys(hdev);
3460 	selected_phys = __le32_to_cpu(cp->selected_phys);
3461 
3462 	if (selected_phys & ~supported_phys)
3463 		return mgmt_cmd_status(sk, hdev->id,
3464 				       MGMT_OP_SET_PHY_CONFIGURATION,
3465 				       MGMT_STATUS_INVALID_PARAMS);
3466 
3467 	unconfigure_phys = supported_phys & ~configurable_phys;
3468 
3469 	if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3470 		return mgmt_cmd_status(sk, hdev->id,
3471 				       MGMT_OP_SET_PHY_CONFIGURATION,
3472 				       MGMT_STATUS_INVALID_PARAMS);
3473 
3474 	if (selected_phys == get_selected_phys(hdev))
3475 		return mgmt_cmd_complete(sk, hdev->id,
3476 					 MGMT_OP_SET_PHY_CONFIGURATION,
3477 					 0, NULL, 0);
3478 
3479 	hci_dev_lock(hdev);
3480 
3481 	if (!hdev_is_powered(hdev)) {
3482 		err = mgmt_cmd_status(sk, hdev->id,
3483 				      MGMT_OP_SET_PHY_CONFIGURATION,
3484 				      MGMT_STATUS_REJECTED);
3485 		goto unlock;
3486 	}
3487 
3488 	if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3489 		err = mgmt_cmd_status(sk, hdev->id,
3490 				      MGMT_OP_SET_PHY_CONFIGURATION,
3491 				      MGMT_STATUS_BUSY);
3492 		goto unlock;
3493 	}
3494 
3495 	if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3496 		pkt_type |= (HCI_DH3 | HCI_DM3);
3497 	else
3498 		pkt_type &= ~(HCI_DH3 | HCI_DM3);
3499 
3500 	if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3501 		pkt_type |= (HCI_DH5 | HCI_DM5);
3502 	else
3503 		pkt_type &= ~(HCI_DH5 | HCI_DM5);
3504 
3505 	if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3506 		pkt_type &= ~HCI_2DH1;
3507 	else
3508 		pkt_type |= HCI_2DH1;
3509 
3510 	if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3511 		pkt_type &= ~HCI_2DH3;
3512 	else
3513 		pkt_type |= HCI_2DH3;
3514 
3515 	if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3516 		pkt_type &= ~HCI_2DH5;
3517 	else
3518 		pkt_type |= HCI_2DH5;
3519 
3520 	if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3521 		pkt_type &= ~HCI_3DH1;
3522 	else
3523 		pkt_type |= HCI_3DH1;
3524 
3525 	if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3526 		pkt_type &= ~HCI_3DH3;
3527 	else
3528 		pkt_type |= HCI_3DH3;
3529 
3530 	if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3531 		pkt_type &= ~HCI_3DH5;
3532 	else
3533 		pkt_type |= HCI_3DH5;
3534 
3535 	if (pkt_type != hdev->pkt_type) {
3536 		hdev->pkt_type = pkt_type;
3537 		changed = true;
3538 	}
3539 
3540 	if ((selected_phys & MGMT_PHY_LE_MASK) ==
3541 	    (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3542 		if (changed)
3543 			mgmt_phy_configuration_changed(hdev, sk);
3544 
3545 		err = mgmt_cmd_complete(sk, hdev->id,
3546 					MGMT_OP_SET_PHY_CONFIGURATION,
3547 					0, NULL, 0);
3548 
3549 		goto unlock;
3550 	}
3551 
3552 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3553 			       len);
3554 	if (!cmd) {
3555 		err = -ENOMEM;
3556 		goto unlock;
3557 	}
3558 
3559 	hci_req_init(&req, hdev);
3560 
3561 	memset(&cp_phy, 0, sizeof(cp_phy));
3562 
3563 	if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3564 		cp_phy.all_phys |= 0x01;
3565 
3566 	if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3567 		cp_phy.all_phys |= 0x02;
3568 
3569 	if (selected_phys & MGMT_PHY_LE_1M_TX)
3570 		cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3571 
3572 	if (selected_phys & MGMT_PHY_LE_2M_TX)
3573 		cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3574 
3575 	if (selected_phys & MGMT_PHY_LE_CODED_TX)
3576 		cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3577 
3578 	if (selected_phys & MGMT_PHY_LE_1M_RX)
3579 		cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3580 
3581 	if (selected_phys & MGMT_PHY_LE_2M_RX)
3582 		cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3583 
3584 	if (selected_phys & MGMT_PHY_LE_CODED_RX)
3585 		cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3586 
3587 	hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy);
3588 
3589 	err = hci_req_run_skb(&req, set_default_phy_complete);
3590 	if (err < 0)
3591 		mgmt_pending_remove(cmd);
3592 
3593 unlock:
3594 	hci_dev_unlock(hdev);
3595 
3596 	return err;
3597 }
3598 
3599 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
3600 			    u16 len)
3601 {
3602 	int err = MGMT_STATUS_SUCCESS;
3603 	struct mgmt_cp_set_blocked_keys *keys = data;
3604 	const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
3605 				   sizeof(struct mgmt_blocked_key_info));
3606 	u16 key_count, expected_len;
3607 	int i;
3608 
3609 	bt_dev_dbg(hdev, "sock %p", sk);
3610 
3611 	key_count = __le16_to_cpu(keys->key_count);
3612 	if (key_count > max_key_count) {
3613 		bt_dev_err(hdev, "too big key_count value %u", key_count);
3614 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3615 				       MGMT_STATUS_INVALID_PARAMS);
3616 	}
3617 
3618 	expected_len = struct_size(keys, keys, key_count);
3619 	if (expected_len != len) {
3620 		bt_dev_err(hdev, "expected %u bytes, got %u bytes",
3621 			   expected_len, len);
3622 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3623 				       MGMT_STATUS_INVALID_PARAMS);
3624 	}
3625 
3626 	hci_dev_lock(hdev);
3627 
3628 	hci_blocked_keys_clear(hdev);
3629 
3630 	for (i = 0; i < keys->key_count; ++i) {
3631 		struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
3632 
3633 		if (!b) {
3634 			err = MGMT_STATUS_NO_RESOURCES;
3635 			break;
3636 		}
3637 
3638 		b->type = keys->keys[i].type;
3639 		memcpy(b->val, keys->keys[i].val, sizeof(b->val));
3640 		list_add_rcu(&b->list, &hdev->blocked_keys);
3641 	}
3642 	hci_dev_unlock(hdev);
3643 
3644 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3645 				err, NULL, 0);
3646 }
3647 
3648 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
3649 			       void *data, u16 len)
3650 {
3651 	struct mgmt_mode *cp = data;
3652 	int err;
3653 	bool changed = false;
3654 
3655 	bt_dev_dbg(hdev, "sock %p", sk);
3656 
3657 	if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
3658 		return mgmt_cmd_status(sk, hdev->id,
3659 				       MGMT_OP_SET_WIDEBAND_SPEECH,
3660 				       MGMT_STATUS_NOT_SUPPORTED);
3661 
3662 	if (cp->val != 0x00 && cp->val != 0x01)
3663 		return mgmt_cmd_status(sk, hdev->id,
3664 				       MGMT_OP_SET_WIDEBAND_SPEECH,
3665 				       MGMT_STATUS_INVALID_PARAMS);
3666 
3667 	hci_dev_lock(hdev);
3668 
3669 	if (pending_find(MGMT_OP_SET_WIDEBAND_SPEECH, hdev)) {
3670 		err = mgmt_cmd_status(sk, hdev->id,
3671 				      MGMT_OP_SET_WIDEBAND_SPEECH,
3672 				      MGMT_STATUS_BUSY);
3673 		goto unlock;
3674 	}
3675 
3676 	if (hdev_is_powered(hdev) &&
3677 	    !!cp->val != hci_dev_test_flag(hdev,
3678 					   HCI_WIDEBAND_SPEECH_ENABLED)) {
3679 		err = mgmt_cmd_status(sk, hdev->id,
3680 				      MGMT_OP_SET_WIDEBAND_SPEECH,
3681 				      MGMT_STATUS_REJECTED);
3682 		goto unlock;
3683 	}
3684 
3685 	if (cp->val)
3686 		changed = !hci_dev_test_and_set_flag(hdev,
3687 						   HCI_WIDEBAND_SPEECH_ENABLED);
3688 	else
3689 		changed = hci_dev_test_and_clear_flag(hdev,
3690 						   HCI_WIDEBAND_SPEECH_ENABLED);
3691 
3692 	err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
3693 	if (err < 0)
3694 		goto unlock;
3695 
3696 	if (changed)
3697 		err = new_settings(hdev, sk);
3698 
3699 unlock:
3700 	hci_dev_unlock(hdev);
3701 	return err;
3702 }
3703 
3704 static int read_security_info(struct sock *sk, struct hci_dev *hdev,
3705 			      void *data, u16 data_len)
3706 {
3707 	char buf[16];
3708 	struct mgmt_rp_read_security_info *rp = (void *)buf;
3709 	u16 sec_len = 0;
3710 	u8 flags = 0;
3711 
3712 	bt_dev_dbg(hdev, "sock %p", sk);
3713 
3714 	memset(&buf, 0, sizeof(buf));
3715 
3716 	hci_dev_lock(hdev);
3717 
3718 	/* When the Read Simple Pairing Options command is supported, then
3719 	 * the remote public key validation is supported.
3720 	 */
3721 	if (hdev->commands[41] & 0x08)
3722 		flags |= 0x01;	/* Remote public key validation (BR/EDR) */
3723 
3724 	flags |= 0x02;		/* Remote public key validation (LE) */
3725 
3726 	/* When the Read Encryption Key Size command is supported, then the
3727 	 * encryption key size is enforced.
3728 	 */
3729 	if (hdev->commands[20] & 0x10)
3730 		flags |= 0x04;	/* Encryption key size enforcement (BR/EDR) */
3731 
3732 	flags |= 0x08;		/* Encryption key size enforcement (LE) */
3733 
3734 	sec_len = eir_append_data(rp->sec, sec_len, 0x01, &flags, 1);
3735 
3736 	/* When the Read Simple Pairing Options command is supported, then
3737 	 * also max encryption key size information is provided.
3738 	 */
3739 	if (hdev->commands[41] & 0x08)
3740 		sec_len = eir_append_le16(rp->sec, sec_len, 0x02,
3741 					  hdev->max_enc_key_size);
3742 
3743 	sec_len = eir_append_le16(rp->sec, sec_len, 0x03, SMP_MAX_ENC_KEY_SIZE);
3744 
3745 	rp->sec_len = cpu_to_le16(sec_len);
3746 
3747 	hci_dev_unlock(hdev);
3748 
3749 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_SECURITY_INFO, 0,
3750 				 rp, sizeof(*rp) + sec_len);
3751 }
3752 
3753 #ifdef CONFIG_BT_FEATURE_DEBUG
3754 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
3755 static const u8 debug_uuid[16] = {
3756 	0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
3757 	0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
3758 };
3759 #endif
3760 
3761 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
3762 static const u8 simult_central_periph_uuid[16] = {
3763 	0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
3764 	0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
3765 };
3766 
3767 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
3768 static const u8 rpa_resolution_uuid[16] = {
3769 	0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
3770 	0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
3771 };
3772 
3773 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
3774 				  void *data, u16 data_len)
3775 {
3776 	char buf[62];	/* Enough space for 3 features */
3777 	struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
3778 	u16 idx = 0;
3779 	u32 flags;
3780 
3781 	bt_dev_dbg(hdev, "sock %p", sk);
3782 
3783 	memset(&buf, 0, sizeof(buf));
3784 
3785 #ifdef CONFIG_BT_FEATURE_DEBUG
3786 	if (!hdev) {
3787 		flags = bt_dbg_get() ? BIT(0) : 0;
3788 
3789 		memcpy(rp->features[idx].uuid, debug_uuid, 16);
3790 		rp->features[idx].flags = cpu_to_le32(flags);
3791 		idx++;
3792 	}
3793 #endif
3794 
3795 	if (hdev) {
3796 		if (test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) &&
3797 		    (hdev->le_states[4] & 0x08) &&	/* Central */
3798 		    (hdev->le_states[4] & 0x40) &&	/* Peripheral */
3799 		    (hdev->le_states[3] & 0x10))	/* Simultaneous */
3800 			flags = BIT(0);
3801 		else
3802 			flags = 0;
3803 
3804 		memcpy(rp->features[idx].uuid, simult_central_periph_uuid, 16);
3805 		rp->features[idx].flags = cpu_to_le32(flags);
3806 		idx++;
3807 	}
3808 
3809 	if (hdev && use_ll_privacy(hdev)) {
3810 		if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
3811 			flags = BIT(0) | BIT(1);
3812 		else
3813 			flags = BIT(1);
3814 
3815 		memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
3816 		rp->features[idx].flags = cpu_to_le32(flags);
3817 		idx++;
3818 	}
3819 
3820 	rp->feature_count = cpu_to_le16(idx);
3821 
3822 	/* After reading the experimental features information, enable
3823 	 * the events to update client on any future change.
3824 	 */
3825 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3826 
3827 	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3828 				 MGMT_OP_READ_EXP_FEATURES_INFO,
3829 				 0, rp, sizeof(*rp) + (20 * idx));
3830 }
3831 
3832 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
3833 					  struct sock *skip)
3834 {
3835 	struct mgmt_ev_exp_feature_changed ev;
3836 
3837 	memset(&ev, 0, sizeof(ev));
3838 	memcpy(ev.uuid, rpa_resolution_uuid, 16);
3839 	ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
3840 
3841 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
3842 				  &ev, sizeof(ev),
3843 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3844 
3845 }
3846 
3847 #ifdef CONFIG_BT_FEATURE_DEBUG
3848 static int exp_debug_feature_changed(bool enabled, struct sock *skip)
3849 {
3850 	struct mgmt_ev_exp_feature_changed ev;
3851 
3852 	memset(&ev, 0, sizeof(ev));
3853 	memcpy(ev.uuid, debug_uuid, 16);
3854 	ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
3855 
3856 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL,
3857 				  &ev, sizeof(ev),
3858 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3859 }
3860 #endif
3861 
3862 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
3863 			   void *data, u16 data_len)
3864 {
3865 	struct mgmt_cp_set_exp_feature *cp = data;
3866 	struct mgmt_rp_set_exp_feature rp;
3867 
3868 	bt_dev_dbg(hdev, "sock %p", sk);
3869 
3870 	if (!memcmp(cp->uuid, ZERO_KEY, 16)) {
3871 		memset(rp.uuid, 0, 16);
3872 		rp.flags = cpu_to_le32(0);
3873 
3874 #ifdef CONFIG_BT_FEATURE_DEBUG
3875 		if (!hdev) {
3876 			bool changed = bt_dbg_get();
3877 
3878 			bt_dbg_set(false);
3879 
3880 			if (changed)
3881 				exp_debug_feature_changed(false, sk);
3882 		}
3883 #endif
3884 
3885 		if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
3886 			bool changed = hci_dev_test_flag(hdev,
3887 							 HCI_ENABLE_LL_PRIVACY);
3888 
3889 			hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
3890 
3891 			if (changed)
3892 				exp_ll_privacy_feature_changed(false, hdev, sk);
3893 		}
3894 
3895 		hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3896 
3897 		return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3898 					 MGMT_OP_SET_EXP_FEATURE, 0,
3899 					 &rp, sizeof(rp));
3900 	}
3901 
3902 #ifdef CONFIG_BT_FEATURE_DEBUG
3903 	if (!memcmp(cp->uuid, debug_uuid, 16)) {
3904 		bool val, changed;
3905 		int err;
3906 
3907 		/* Command requires to use the non-controller index */
3908 		if (hdev)
3909 			return mgmt_cmd_status(sk, hdev->id,
3910 					       MGMT_OP_SET_EXP_FEATURE,
3911 					       MGMT_STATUS_INVALID_INDEX);
3912 
3913 		/* Parameters are limited to a single octet */
3914 		if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
3915 			return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3916 					       MGMT_OP_SET_EXP_FEATURE,
3917 					       MGMT_STATUS_INVALID_PARAMS);
3918 
3919 		/* Only boolean on/off is supported */
3920 		if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
3921 			return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3922 					       MGMT_OP_SET_EXP_FEATURE,
3923 					       MGMT_STATUS_INVALID_PARAMS);
3924 
3925 		val = !!cp->param[0];
3926 		changed = val ? !bt_dbg_get() : bt_dbg_get();
3927 		bt_dbg_set(val);
3928 
3929 		memcpy(rp.uuid, debug_uuid, 16);
3930 		rp.flags = cpu_to_le32(val ? BIT(0) : 0);
3931 
3932 		hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3933 
3934 		err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
3935 					MGMT_OP_SET_EXP_FEATURE, 0,
3936 					&rp, sizeof(rp));
3937 
3938 		if (changed)
3939 			exp_debug_feature_changed(val, sk);
3940 
3941 		return err;
3942 	}
3943 #endif
3944 
3945 	if (!memcmp(cp->uuid, rpa_resolution_uuid, 16)) {
3946 		bool val, changed;
3947 		int err;
3948 		u32 flags;
3949 
3950 		/* Command requires to use the controller index */
3951 		if (!hdev)
3952 			return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3953 					       MGMT_OP_SET_EXP_FEATURE,
3954 					       MGMT_STATUS_INVALID_INDEX);
3955 
3956 		/* Changes can only be made when controller is powered down */
3957 		if (hdev_is_powered(hdev))
3958 			return mgmt_cmd_status(sk, hdev->id,
3959 					       MGMT_OP_SET_EXP_FEATURE,
3960 					       MGMT_STATUS_NOT_POWERED);
3961 
3962 		/* Parameters are limited to a single octet */
3963 		if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
3964 			return mgmt_cmd_status(sk, hdev->id,
3965 					       MGMT_OP_SET_EXP_FEATURE,
3966 					       MGMT_STATUS_INVALID_PARAMS);
3967 
3968 		/* Only boolean on/off is supported */
3969 		if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
3970 			return mgmt_cmd_status(sk, hdev->id,
3971 					       MGMT_OP_SET_EXP_FEATURE,
3972 					       MGMT_STATUS_INVALID_PARAMS);
3973 
3974 		val = !!cp->param[0];
3975 
3976 		if (val) {
3977 			changed = !hci_dev_test_flag(hdev,
3978 						     HCI_ENABLE_LL_PRIVACY);
3979 			hci_dev_set_flag(hdev, HCI_ENABLE_LL_PRIVACY);
3980 			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
3981 
3982 			/* Enable LL privacy + supported settings changed */
3983 			flags = BIT(0) | BIT(1);
3984 		} else {
3985 			changed = hci_dev_test_flag(hdev,
3986 						    HCI_ENABLE_LL_PRIVACY);
3987 			hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
3988 
3989 			/* Disable LL privacy + supported settings changed */
3990 			flags = BIT(1);
3991 		}
3992 
3993 		memcpy(rp.uuid, rpa_resolution_uuid, 16);
3994 		rp.flags = cpu_to_le32(flags);
3995 
3996 		hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3997 
3998 		err = mgmt_cmd_complete(sk, hdev->id,
3999 					MGMT_OP_SET_EXP_FEATURE, 0,
4000 					&rp, sizeof(rp));
4001 
4002 		if (changed)
4003 			exp_ll_privacy_feature_changed(val, hdev, sk);
4004 
4005 		return err;
4006 	}
4007 
4008 	return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4009 			       MGMT_OP_SET_EXP_FEATURE,
4010 			       MGMT_STATUS_NOT_SUPPORTED);
4011 }
4012 
4013 #define SUPPORTED_DEVICE_FLAGS() ((1U << HCI_CONN_FLAG_MAX) - 1)
4014 
4015 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4016 			    u16 data_len)
4017 {
4018 	struct mgmt_cp_get_device_flags *cp = data;
4019 	struct mgmt_rp_get_device_flags rp;
4020 	struct bdaddr_list_with_flags *br_params;
4021 	struct hci_conn_params *params;
4022 	u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4023 	u32 current_flags = 0;
4024 	u8 status = MGMT_STATUS_INVALID_PARAMS;
4025 
4026 	bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4027 		   &cp->addr.bdaddr, cp->addr.type);
4028 
4029 	hci_dev_lock(hdev);
4030 
4031 	if (cp->addr.type == BDADDR_BREDR) {
4032 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->whitelist,
4033 							      &cp->addr.bdaddr,
4034 							      cp->addr.type);
4035 		if (!br_params)
4036 			goto done;
4037 
4038 		current_flags = br_params->current_flags;
4039 	} else {
4040 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4041 						le_addr_type(cp->addr.type));
4042 
4043 		if (!params)
4044 			goto done;
4045 
4046 		current_flags = params->current_flags;
4047 	}
4048 
4049 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4050 	rp.addr.type = cp->addr.type;
4051 	rp.supported_flags = cpu_to_le32(supported_flags);
4052 	rp.current_flags = cpu_to_le32(current_flags);
4053 
4054 	status = MGMT_STATUS_SUCCESS;
4055 
4056 done:
4057 	hci_dev_unlock(hdev);
4058 
4059 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
4060 				&rp, sizeof(rp));
4061 }
4062 
4063 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
4064 				 bdaddr_t *bdaddr, u8 bdaddr_type,
4065 				 u32 supported_flags, u32 current_flags)
4066 {
4067 	struct mgmt_ev_device_flags_changed ev;
4068 
4069 	bacpy(&ev.addr.bdaddr, bdaddr);
4070 	ev.addr.type = bdaddr_type;
4071 	ev.supported_flags = cpu_to_le32(supported_flags);
4072 	ev.current_flags = cpu_to_le32(current_flags);
4073 
4074 	mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
4075 }
4076 
4077 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4078 			    u16 len)
4079 {
4080 	struct mgmt_cp_set_device_flags *cp = data;
4081 	struct bdaddr_list_with_flags *br_params;
4082 	struct hci_conn_params *params;
4083 	u8 status = MGMT_STATUS_INVALID_PARAMS;
4084 	u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4085 	u32 current_flags = __le32_to_cpu(cp->current_flags);
4086 
4087 	bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
4088 		   &cp->addr.bdaddr, cp->addr.type,
4089 		   __le32_to_cpu(current_flags));
4090 
4091 	if ((supported_flags | current_flags) != supported_flags) {
4092 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
4093 			    current_flags, supported_flags);
4094 		goto done;
4095 	}
4096 
4097 	hci_dev_lock(hdev);
4098 
4099 	if (cp->addr.type == BDADDR_BREDR) {
4100 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->whitelist,
4101 							      &cp->addr.bdaddr,
4102 							      cp->addr.type);
4103 
4104 		if (br_params) {
4105 			br_params->current_flags = current_flags;
4106 			status = MGMT_STATUS_SUCCESS;
4107 		} else {
4108 			bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
4109 				    &cp->addr.bdaddr, cp->addr.type);
4110 		}
4111 	} else {
4112 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4113 						le_addr_type(cp->addr.type));
4114 		if (params) {
4115 			params->current_flags = current_flags;
4116 			status = MGMT_STATUS_SUCCESS;
4117 		} else {
4118 			bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
4119 				    &cp->addr.bdaddr,
4120 				    le_addr_type(cp->addr.type));
4121 		}
4122 	}
4123 
4124 done:
4125 	hci_dev_unlock(hdev);
4126 
4127 	if (status == MGMT_STATUS_SUCCESS)
4128 		device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
4129 				     supported_flags, current_flags);
4130 
4131 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
4132 				 &cp->addr, sizeof(cp->addr));
4133 }
4134 
4135 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
4136 				   u16 handle)
4137 {
4138 	struct mgmt_ev_adv_monitor_added ev;
4139 
4140 	ev.monitor_handle = cpu_to_le16(handle);
4141 
4142 	mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
4143 }
4144 
4145 static void mgmt_adv_monitor_removed(struct sock *sk, struct hci_dev *hdev,
4146 				     u16 handle)
4147 {
4148 	struct mgmt_ev_adv_monitor_added ev;
4149 
4150 	ev.monitor_handle = cpu_to_le16(handle);
4151 
4152 	mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk);
4153 }
4154 
4155 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
4156 				 void *data, u16 len)
4157 {
4158 	struct adv_monitor *monitor = NULL;
4159 	struct mgmt_rp_read_adv_monitor_features *rp = NULL;
4160 	int handle;
4161 	size_t rp_size = 0;
4162 	__u32 supported = 0;
4163 	__u16 num_handles = 0;
4164 	__u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
4165 
4166 	BT_DBG("request for %s", hdev->name);
4167 
4168 	hci_dev_lock(hdev);
4169 
4170 	if (msft_get_features(hdev) & MSFT_FEATURE_MASK_LE_ADV_MONITOR)
4171 		supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
4172 
4173 	idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle) {
4174 		handles[num_handles++] = monitor->handle;
4175 	}
4176 
4177 	hci_dev_unlock(hdev);
4178 
4179 	rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
4180 	rp = kmalloc(rp_size, GFP_KERNEL);
4181 	if (!rp)
4182 		return -ENOMEM;
4183 
4184 	/* Once controller-based monitoring is in place, the enabled_features
4185 	 * should reflect the use.
4186 	 */
4187 	rp->supported_features = cpu_to_le32(supported);
4188 	rp->enabled_features = 0;
4189 	rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
4190 	rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
4191 	rp->num_handles = cpu_to_le16(num_handles);
4192 	if (num_handles)
4193 		memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
4194 
4195 	return mgmt_cmd_complete(sk, hdev->id,
4196 				 MGMT_OP_READ_ADV_MONITOR_FEATURES,
4197 				 MGMT_STATUS_SUCCESS, rp, rp_size);
4198 }
4199 
4200 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4201 				    void *data, u16 len)
4202 {
4203 	struct mgmt_cp_add_adv_patterns_monitor *cp = data;
4204 	struct mgmt_rp_add_adv_patterns_monitor rp;
4205 	struct adv_monitor *m = NULL;
4206 	struct adv_pattern *p = NULL;
4207 	unsigned int mp_cnt = 0, prev_adv_monitors_cnt;
4208 	__u8 cp_ofst = 0, cp_len = 0;
4209 	int err, i;
4210 
4211 	BT_DBG("request for %s", hdev->name);
4212 
4213 	if (len <= sizeof(*cp) || cp->pattern_count == 0) {
4214 		err = mgmt_cmd_status(sk, hdev->id,
4215 				      MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4216 				      MGMT_STATUS_INVALID_PARAMS);
4217 		goto failed;
4218 	}
4219 
4220 	m = kmalloc(sizeof(*m), GFP_KERNEL);
4221 	if (!m) {
4222 		err = -ENOMEM;
4223 		goto failed;
4224 	}
4225 
4226 	INIT_LIST_HEAD(&m->patterns);
4227 	m->active = false;
4228 
4229 	for (i = 0; i < cp->pattern_count; i++) {
4230 		if (++mp_cnt > HCI_MAX_ADV_MONITOR_NUM_PATTERNS) {
4231 			err = mgmt_cmd_status(sk, hdev->id,
4232 					      MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4233 					      MGMT_STATUS_INVALID_PARAMS);
4234 			goto failed;
4235 		}
4236 
4237 		cp_ofst = cp->patterns[i].offset;
4238 		cp_len = cp->patterns[i].length;
4239 		if (cp_ofst >= HCI_MAX_AD_LENGTH ||
4240 		    cp_len > HCI_MAX_AD_LENGTH ||
4241 		    (cp_ofst + cp_len) > HCI_MAX_AD_LENGTH) {
4242 			err = mgmt_cmd_status(sk, hdev->id,
4243 					      MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4244 					      MGMT_STATUS_INVALID_PARAMS);
4245 			goto failed;
4246 		}
4247 
4248 		p = kmalloc(sizeof(*p), GFP_KERNEL);
4249 		if (!p) {
4250 			err = -ENOMEM;
4251 			goto failed;
4252 		}
4253 
4254 		p->ad_type = cp->patterns[i].ad_type;
4255 		p->offset = cp->patterns[i].offset;
4256 		p->length = cp->patterns[i].length;
4257 		memcpy(p->value, cp->patterns[i].value, p->length);
4258 
4259 		INIT_LIST_HEAD(&p->list);
4260 		list_add(&p->list, &m->patterns);
4261 	}
4262 
4263 	if (mp_cnt != cp->pattern_count) {
4264 		err = mgmt_cmd_status(sk, hdev->id,
4265 				      MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4266 				      MGMT_STATUS_INVALID_PARAMS);
4267 		goto failed;
4268 	}
4269 
4270 	hci_dev_lock(hdev);
4271 
4272 	prev_adv_monitors_cnt = hdev->adv_monitors_cnt;
4273 
4274 	err = hci_add_adv_monitor(hdev, m);
4275 	if (err) {
4276 		if (err == -ENOSPC) {
4277 			mgmt_cmd_status(sk, hdev->id,
4278 					MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4279 					MGMT_STATUS_NO_RESOURCES);
4280 		}
4281 		goto unlock;
4282 	}
4283 
4284 	if (hdev->adv_monitors_cnt > prev_adv_monitors_cnt)
4285 		mgmt_adv_monitor_added(sk, hdev, m->handle);
4286 
4287 	hci_dev_unlock(hdev);
4288 
4289 	rp.monitor_handle = cpu_to_le16(m->handle);
4290 
4291 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4292 				 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
4293 
4294 unlock:
4295 	hci_dev_unlock(hdev);
4296 
4297 failed:
4298 	hci_free_adv_monitor(m);
4299 	return err;
4300 }
4301 
4302 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
4303 			      void *data, u16 len)
4304 {
4305 	struct mgmt_cp_remove_adv_monitor *cp = data;
4306 	struct mgmt_rp_remove_adv_monitor rp;
4307 	unsigned int prev_adv_monitors_cnt;
4308 	u16 handle;
4309 	int err;
4310 
4311 	BT_DBG("request for %s", hdev->name);
4312 
4313 	hci_dev_lock(hdev);
4314 
4315 	handle = __le16_to_cpu(cp->monitor_handle);
4316 	prev_adv_monitors_cnt = hdev->adv_monitors_cnt;
4317 
4318 	err = hci_remove_adv_monitor(hdev, handle);
4319 	if (err == -ENOENT) {
4320 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4321 				      MGMT_STATUS_INVALID_INDEX);
4322 		goto unlock;
4323 	}
4324 
4325 	if (hdev->adv_monitors_cnt < prev_adv_monitors_cnt)
4326 		mgmt_adv_monitor_removed(sk, hdev, handle);
4327 
4328 	hci_dev_unlock(hdev);
4329 
4330 	rp.monitor_handle = cp->monitor_handle;
4331 
4332 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4333 				 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
4334 
4335 unlock:
4336 	hci_dev_unlock(hdev);
4337 	return err;
4338 }
4339 
4340 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
4341 				         u16 opcode, struct sk_buff *skb)
4342 {
4343 	struct mgmt_rp_read_local_oob_data mgmt_rp;
4344 	size_t rp_size = sizeof(mgmt_rp);
4345 	struct mgmt_pending_cmd *cmd;
4346 
4347 	bt_dev_dbg(hdev, "status %u", status);
4348 
4349 	cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
4350 	if (!cmd)
4351 		return;
4352 
4353 	if (status || !skb) {
4354 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4355 				status ? mgmt_status(status) : MGMT_STATUS_FAILED);
4356 		goto remove;
4357 	}
4358 
4359 	memset(&mgmt_rp, 0, sizeof(mgmt_rp));
4360 
4361 	if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
4362 		struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
4363 
4364 		if (skb->len < sizeof(*rp)) {
4365 			mgmt_cmd_status(cmd->sk, hdev->id,
4366 					MGMT_OP_READ_LOCAL_OOB_DATA,
4367 					MGMT_STATUS_FAILED);
4368 			goto remove;
4369 		}
4370 
4371 		memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
4372 		memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
4373 
4374 		rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
4375 	} else {
4376 		struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
4377 
4378 		if (skb->len < sizeof(*rp)) {
4379 			mgmt_cmd_status(cmd->sk, hdev->id,
4380 					MGMT_OP_READ_LOCAL_OOB_DATA,
4381 					MGMT_STATUS_FAILED);
4382 			goto remove;
4383 		}
4384 
4385 		memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
4386 		memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
4387 
4388 		memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
4389 		memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
4390 	}
4391 
4392 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4393 			  MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
4394 
4395 remove:
4396 	mgmt_pending_remove(cmd);
4397 }
4398 
4399 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
4400 			       void *data, u16 data_len)
4401 {
4402 	struct mgmt_pending_cmd *cmd;
4403 	struct hci_request req;
4404 	int err;
4405 
4406 	bt_dev_dbg(hdev, "sock %p", sk);
4407 
4408 	hci_dev_lock(hdev);
4409 
4410 	if (!hdev_is_powered(hdev)) {
4411 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4412 				      MGMT_STATUS_NOT_POWERED);
4413 		goto unlock;
4414 	}
4415 
4416 	if (!lmp_ssp_capable(hdev)) {
4417 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4418 				      MGMT_STATUS_NOT_SUPPORTED);
4419 		goto unlock;
4420 	}
4421 
4422 	if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
4423 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4424 				      MGMT_STATUS_BUSY);
4425 		goto unlock;
4426 	}
4427 
4428 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
4429 	if (!cmd) {
4430 		err = -ENOMEM;
4431 		goto unlock;
4432 	}
4433 
4434 	hci_req_init(&req, hdev);
4435 
4436 	if (bredr_sc_enabled(hdev))
4437 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
4438 	else
4439 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
4440 
4441 	err = hci_req_run_skb(&req, read_local_oob_data_complete);
4442 	if (err < 0)
4443 		mgmt_pending_remove(cmd);
4444 
4445 unlock:
4446 	hci_dev_unlock(hdev);
4447 	return err;
4448 }
4449 
4450 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4451 			       void *data, u16 len)
4452 {
4453 	struct mgmt_addr_info *addr = data;
4454 	int err;
4455 
4456 	bt_dev_dbg(hdev, "sock %p", sk);
4457 
4458 	if (!bdaddr_type_is_valid(addr->type))
4459 		return mgmt_cmd_complete(sk, hdev->id,
4460 					 MGMT_OP_ADD_REMOTE_OOB_DATA,
4461 					 MGMT_STATUS_INVALID_PARAMS,
4462 					 addr, sizeof(*addr));
4463 
4464 	hci_dev_lock(hdev);
4465 
4466 	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
4467 		struct mgmt_cp_add_remote_oob_data *cp = data;
4468 		u8 status;
4469 
4470 		if (cp->addr.type != BDADDR_BREDR) {
4471 			err = mgmt_cmd_complete(sk, hdev->id,
4472 						MGMT_OP_ADD_REMOTE_OOB_DATA,
4473 						MGMT_STATUS_INVALID_PARAMS,
4474 						&cp->addr, sizeof(cp->addr));
4475 			goto unlock;
4476 		}
4477 
4478 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4479 					      cp->addr.type, cp->hash,
4480 					      cp->rand, NULL, NULL);
4481 		if (err < 0)
4482 			status = MGMT_STATUS_FAILED;
4483 		else
4484 			status = MGMT_STATUS_SUCCESS;
4485 
4486 		err = mgmt_cmd_complete(sk, hdev->id,
4487 					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
4488 					&cp->addr, sizeof(cp->addr));
4489 	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
4490 		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
4491 		u8 *rand192, *hash192, *rand256, *hash256;
4492 		u8 status;
4493 
4494 		if (bdaddr_type_is_le(cp->addr.type)) {
4495 			/* Enforce zero-valued 192-bit parameters as
4496 			 * long as legacy SMP OOB isn't implemented.
4497 			 */
4498 			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
4499 			    memcmp(cp->hash192, ZERO_KEY, 16)) {
4500 				err = mgmt_cmd_complete(sk, hdev->id,
4501 							MGMT_OP_ADD_REMOTE_OOB_DATA,
4502 							MGMT_STATUS_INVALID_PARAMS,
4503 							addr, sizeof(*addr));
4504 				goto unlock;
4505 			}
4506 
4507 			rand192 = NULL;
4508 			hash192 = NULL;
4509 		} else {
4510 			/* In case one of the P-192 values is set to zero,
4511 			 * then just disable OOB data for P-192.
4512 			 */
4513 			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
4514 			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
4515 				rand192 = NULL;
4516 				hash192 = NULL;
4517 			} else {
4518 				rand192 = cp->rand192;
4519 				hash192 = cp->hash192;
4520 			}
4521 		}
4522 
4523 		/* In case one of the P-256 values is set to zero, then just
4524 		 * disable OOB data for P-256.
4525 		 */
4526 		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
4527 		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
4528 			rand256 = NULL;
4529 			hash256 = NULL;
4530 		} else {
4531 			rand256 = cp->rand256;
4532 			hash256 = cp->hash256;
4533 		}
4534 
4535 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4536 					      cp->addr.type, hash192, rand192,
4537 					      hash256, rand256);
4538 		if (err < 0)
4539 			status = MGMT_STATUS_FAILED;
4540 		else
4541 			status = MGMT_STATUS_SUCCESS;
4542 
4543 		err = mgmt_cmd_complete(sk, hdev->id,
4544 					MGMT_OP_ADD_REMOTE_OOB_DATA,
4545 					status, &cp->addr, sizeof(cp->addr));
4546 	} else {
4547 		bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
4548 			   len);
4549 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
4550 				      MGMT_STATUS_INVALID_PARAMS);
4551 	}
4552 
4553 unlock:
4554 	hci_dev_unlock(hdev);
4555 	return err;
4556 }
4557 
4558 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4559 				  void *data, u16 len)
4560 {
4561 	struct mgmt_cp_remove_remote_oob_data *cp = data;
4562 	u8 status;
4563 	int err;
4564 
4565 	bt_dev_dbg(hdev, "sock %p", sk);
4566 
4567 	if (cp->addr.type != BDADDR_BREDR)
4568 		return mgmt_cmd_complete(sk, hdev->id,
4569 					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4570 					 MGMT_STATUS_INVALID_PARAMS,
4571 					 &cp->addr, sizeof(cp->addr));
4572 
4573 	hci_dev_lock(hdev);
4574 
4575 	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
4576 		hci_remote_oob_data_clear(hdev);
4577 		status = MGMT_STATUS_SUCCESS;
4578 		goto done;
4579 	}
4580 
4581 	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
4582 	if (err < 0)
4583 		status = MGMT_STATUS_INVALID_PARAMS;
4584 	else
4585 		status = MGMT_STATUS_SUCCESS;
4586 
4587 done:
4588 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4589 				status, &cp->addr, sizeof(cp->addr));
4590 
4591 	hci_dev_unlock(hdev);
4592 	return err;
4593 }
4594 
4595 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
4596 {
4597 	struct mgmt_pending_cmd *cmd;
4598 
4599 	bt_dev_dbg(hdev, "status %d", status);
4600 
4601 	hci_dev_lock(hdev);
4602 
4603 	cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
4604 	if (!cmd)
4605 		cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
4606 
4607 	if (!cmd)
4608 		cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
4609 
4610 	if (cmd) {
4611 		cmd->cmd_complete(cmd, mgmt_status(status));
4612 		mgmt_pending_remove(cmd);
4613 	}
4614 
4615 	hci_dev_unlock(hdev);
4616 
4617 	/* Handle suspend notifier */
4618 	if (test_and_clear_bit(SUSPEND_UNPAUSE_DISCOVERY,
4619 			       hdev->suspend_tasks)) {
4620 		bt_dev_dbg(hdev, "Unpaused discovery");
4621 		wake_up(&hdev->suspend_wait_q);
4622 	}
4623 }
4624 
4625 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
4626 				    uint8_t *mgmt_status)
4627 {
4628 	switch (type) {
4629 	case DISCOV_TYPE_LE:
4630 		*mgmt_status = mgmt_le_support(hdev);
4631 		if (*mgmt_status)
4632 			return false;
4633 		break;
4634 	case DISCOV_TYPE_INTERLEAVED:
4635 		*mgmt_status = mgmt_le_support(hdev);
4636 		if (*mgmt_status)
4637 			return false;
4638 		fallthrough;
4639 	case DISCOV_TYPE_BREDR:
4640 		*mgmt_status = mgmt_bredr_support(hdev);
4641 		if (*mgmt_status)
4642 			return false;
4643 		break;
4644 	default:
4645 		*mgmt_status = MGMT_STATUS_INVALID_PARAMS;
4646 		return false;
4647 	}
4648 
4649 	return true;
4650 }
4651 
4652 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
4653 				    u16 op, void *data, u16 len)
4654 {
4655 	struct mgmt_cp_start_discovery *cp = data;
4656 	struct mgmt_pending_cmd *cmd;
4657 	u8 status;
4658 	int err;
4659 
4660 	bt_dev_dbg(hdev, "sock %p", sk);
4661 
4662 	hci_dev_lock(hdev);
4663 
4664 	if (!hdev_is_powered(hdev)) {
4665 		err = mgmt_cmd_complete(sk, hdev->id, op,
4666 					MGMT_STATUS_NOT_POWERED,
4667 					&cp->type, sizeof(cp->type));
4668 		goto failed;
4669 	}
4670 
4671 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
4672 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4673 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4674 					&cp->type, sizeof(cp->type));
4675 		goto failed;
4676 	}
4677 
4678 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4679 		err = mgmt_cmd_complete(sk, hdev->id, op, status,
4680 					&cp->type, sizeof(cp->type));
4681 		goto failed;
4682 	}
4683 
4684 	/* Can't start discovery when it is paused */
4685 	if (hdev->discovery_paused) {
4686 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4687 					&cp->type, sizeof(cp->type));
4688 		goto failed;
4689 	}
4690 
4691 	/* Clear the discovery filter first to free any previously
4692 	 * allocated memory for the UUID list.
4693 	 */
4694 	hci_discovery_filter_clear(hdev);
4695 
4696 	hdev->discovery.type = cp->type;
4697 	hdev->discovery.report_invalid_rssi = false;
4698 	if (op == MGMT_OP_START_LIMITED_DISCOVERY)
4699 		hdev->discovery.limited = true;
4700 	else
4701 		hdev->discovery.limited = false;
4702 
4703 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
4704 	if (!cmd) {
4705 		err = -ENOMEM;
4706 		goto failed;
4707 	}
4708 
4709 	cmd->cmd_complete = generic_cmd_complete;
4710 
4711 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4712 	queue_work(hdev->req_workqueue, &hdev->discov_update);
4713 	err = 0;
4714 
4715 failed:
4716 	hci_dev_unlock(hdev);
4717 	return err;
4718 }
4719 
4720 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4721 			   void *data, u16 len)
4722 {
4723 	return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
4724 					data, len);
4725 }
4726 
4727 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
4728 				   void *data, u16 len)
4729 {
4730 	return start_discovery_internal(sk, hdev,
4731 					MGMT_OP_START_LIMITED_DISCOVERY,
4732 					data, len);
4733 }
4734 
4735 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
4736 					  u8 status)
4737 {
4738 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
4739 				 cmd->param, 1);
4740 }
4741 
4742 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4743 				   void *data, u16 len)
4744 {
4745 	struct mgmt_cp_start_service_discovery *cp = data;
4746 	struct mgmt_pending_cmd *cmd;
4747 	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
4748 	u16 uuid_count, expected_len;
4749 	u8 status;
4750 	int err;
4751 
4752 	bt_dev_dbg(hdev, "sock %p", sk);
4753 
4754 	hci_dev_lock(hdev);
4755 
4756 	if (!hdev_is_powered(hdev)) {
4757 		err = mgmt_cmd_complete(sk, hdev->id,
4758 					MGMT_OP_START_SERVICE_DISCOVERY,
4759 					MGMT_STATUS_NOT_POWERED,
4760 					&cp->type, sizeof(cp->type));
4761 		goto failed;
4762 	}
4763 
4764 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
4765 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4766 		err = mgmt_cmd_complete(sk, hdev->id,
4767 					MGMT_OP_START_SERVICE_DISCOVERY,
4768 					MGMT_STATUS_BUSY, &cp->type,
4769 					sizeof(cp->type));
4770 		goto failed;
4771 	}
4772 
4773 	uuid_count = __le16_to_cpu(cp->uuid_count);
4774 	if (uuid_count > max_uuid_count) {
4775 		bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
4776 			   uuid_count);
4777 		err = mgmt_cmd_complete(sk, hdev->id,
4778 					MGMT_OP_START_SERVICE_DISCOVERY,
4779 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
4780 					sizeof(cp->type));
4781 		goto failed;
4782 	}
4783 
4784 	expected_len = sizeof(*cp) + uuid_count * 16;
4785 	if (expected_len != len) {
4786 		bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
4787 			   expected_len, len);
4788 		err = mgmt_cmd_complete(sk, hdev->id,
4789 					MGMT_OP_START_SERVICE_DISCOVERY,
4790 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
4791 					sizeof(cp->type));
4792 		goto failed;
4793 	}
4794 
4795 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4796 		err = mgmt_cmd_complete(sk, hdev->id,
4797 					MGMT_OP_START_SERVICE_DISCOVERY,
4798 					status, &cp->type, sizeof(cp->type));
4799 		goto failed;
4800 	}
4801 
4802 	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4803 			       hdev, data, len);
4804 	if (!cmd) {
4805 		err = -ENOMEM;
4806 		goto failed;
4807 	}
4808 
4809 	cmd->cmd_complete = service_discovery_cmd_complete;
4810 
4811 	/* Clear the discovery filter first to free any previously
4812 	 * allocated memory for the UUID list.
4813 	 */
4814 	hci_discovery_filter_clear(hdev);
4815 
4816 	hdev->discovery.result_filtering = true;
4817 	hdev->discovery.type = cp->type;
4818 	hdev->discovery.rssi = cp->rssi;
4819 	hdev->discovery.uuid_count = uuid_count;
4820 
4821 	if (uuid_count > 0) {
4822 		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4823 						GFP_KERNEL);
4824 		if (!hdev->discovery.uuids) {
4825 			err = mgmt_cmd_complete(sk, hdev->id,
4826 						MGMT_OP_START_SERVICE_DISCOVERY,
4827 						MGMT_STATUS_FAILED,
4828 						&cp->type, sizeof(cp->type));
4829 			mgmt_pending_remove(cmd);
4830 			goto failed;
4831 		}
4832 	}
4833 
4834 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4835 	queue_work(hdev->req_workqueue, &hdev->discov_update);
4836 	err = 0;
4837 
4838 failed:
4839 	hci_dev_unlock(hdev);
4840 	return err;
4841 }
4842 
4843 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
4844 {
4845 	struct mgmt_pending_cmd *cmd;
4846 
4847 	bt_dev_dbg(hdev, "status %d", status);
4848 
4849 	hci_dev_lock(hdev);
4850 
4851 	cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4852 	if (cmd) {
4853 		cmd->cmd_complete(cmd, mgmt_status(status));
4854 		mgmt_pending_remove(cmd);
4855 	}
4856 
4857 	hci_dev_unlock(hdev);
4858 
4859 	/* Handle suspend notifier */
4860 	if (test_and_clear_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks)) {
4861 		bt_dev_dbg(hdev, "Paused discovery");
4862 		wake_up(&hdev->suspend_wait_q);
4863 	}
4864 }
4865 
4866 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4867 			  u16 len)
4868 {
4869 	struct mgmt_cp_stop_discovery *mgmt_cp = data;
4870 	struct mgmt_pending_cmd *cmd;
4871 	int err;
4872 
4873 	bt_dev_dbg(hdev, "sock %p", sk);
4874 
4875 	hci_dev_lock(hdev);
4876 
4877 	if (!hci_discovery_active(hdev)) {
4878 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4879 					MGMT_STATUS_REJECTED, &mgmt_cp->type,
4880 					sizeof(mgmt_cp->type));
4881 		goto unlock;
4882 	}
4883 
4884 	if (hdev->discovery.type != mgmt_cp->type) {
4885 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4886 					MGMT_STATUS_INVALID_PARAMS,
4887 					&mgmt_cp->type, sizeof(mgmt_cp->type));
4888 		goto unlock;
4889 	}
4890 
4891 	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4892 	if (!cmd) {
4893 		err = -ENOMEM;
4894 		goto unlock;
4895 	}
4896 
4897 	cmd->cmd_complete = generic_cmd_complete;
4898 
4899 	hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4900 	queue_work(hdev->req_workqueue, &hdev->discov_update);
4901 	err = 0;
4902 
4903 unlock:
4904 	hci_dev_unlock(hdev);
4905 	return err;
4906 }
4907 
4908 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4909 			u16 len)
4910 {
4911 	struct mgmt_cp_confirm_name *cp = data;
4912 	struct inquiry_entry *e;
4913 	int err;
4914 
4915 	bt_dev_dbg(hdev, "sock %p", sk);
4916 
4917 	hci_dev_lock(hdev);
4918 
4919 	if (!hci_discovery_active(hdev)) {
4920 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4921 					MGMT_STATUS_FAILED, &cp->addr,
4922 					sizeof(cp->addr));
4923 		goto failed;
4924 	}
4925 
4926 	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4927 	if (!e) {
4928 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4929 					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4930 					sizeof(cp->addr));
4931 		goto failed;
4932 	}
4933 
4934 	if (cp->name_known) {
4935 		e->name_state = NAME_KNOWN;
4936 		list_del(&e->list);
4937 	} else {
4938 		e->name_state = NAME_NEEDED;
4939 		hci_inquiry_cache_update_resolve(hdev, e);
4940 	}
4941 
4942 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
4943 				&cp->addr, sizeof(cp->addr));
4944 
4945 failed:
4946 	hci_dev_unlock(hdev);
4947 	return err;
4948 }
4949 
4950 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4951 			u16 len)
4952 {
4953 	struct mgmt_cp_block_device *cp = data;
4954 	u8 status;
4955 	int err;
4956 
4957 	bt_dev_dbg(hdev, "sock %p", sk);
4958 
4959 	if (!bdaddr_type_is_valid(cp->addr.type))
4960 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4961 					 MGMT_STATUS_INVALID_PARAMS,
4962 					 &cp->addr, sizeof(cp->addr));
4963 
4964 	hci_dev_lock(hdev);
4965 
4966 	err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4967 				  cp->addr.type);
4968 	if (err < 0) {
4969 		status = MGMT_STATUS_FAILED;
4970 		goto done;
4971 	}
4972 
4973 	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4974 		   sk);
4975 	status = MGMT_STATUS_SUCCESS;
4976 
4977 done:
4978 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4979 				&cp->addr, sizeof(cp->addr));
4980 
4981 	hci_dev_unlock(hdev);
4982 
4983 	return err;
4984 }
4985 
4986 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4987 			  u16 len)
4988 {
4989 	struct mgmt_cp_unblock_device *cp = data;
4990 	u8 status;
4991 	int err;
4992 
4993 	bt_dev_dbg(hdev, "sock %p", sk);
4994 
4995 	if (!bdaddr_type_is_valid(cp->addr.type))
4996 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4997 					 MGMT_STATUS_INVALID_PARAMS,
4998 					 &cp->addr, sizeof(cp->addr));
4999 
5000 	hci_dev_lock(hdev);
5001 
5002 	err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
5003 				  cp->addr.type);
5004 	if (err < 0) {
5005 		status = MGMT_STATUS_INVALID_PARAMS;
5006 		goto done;
5007 	}
5008 
5009 	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5010 		   sk);
5011 	status = MGMT_STATUS_SUCCESS;
5012 
5013 done:
5014 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
5015 				&cp->addr, sizeof(cp->addr));
5016 
5017 	hci_dev_unlock(hdev);
5018 
5019 	return err;
5020 }
5021 
5022 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
5023 			 u16 len)
5024 {
5025 	struct mgmt_cp_set_device_id *cp = data;
5026 	struct hci_request req;
5027 	int err;
5028 	__u16 source;
5029 
5030 	bt_dev_dbg(hdev, "sock %p", sk);
5031 
5032 	source = __le16_to_cpu(cp->source);
5033 
5034 	if (source > 0x0002)
5035 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
5036 				       MGMT_STATUS_INVALID_PARAMS);
5037 
5038 	hci_dev_lock(hdev);
5039 
5040 	hdev->devid_source = source;
5041 	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
5042 	hdev->devid_product = __le16_to_cpu(cp->product);
5043 	hdev->devid_version = __le16_to_cpu(cp->version);
5044 
5045 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
5046 				NULL, 0);
5047 
5048 	hci_req_init(&req, hdev);
5049 	__hci_req_update_eir(&req);
5050 	hci_req_run(&req, NULL);
5051 
5052 	hci_dev_unlock(hdev);
5053 
5054 	return err;
5055 }
5056 
5057 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
5058 					u16 opcode)
5059 {
5060 	bt_dev_dbg(hdev, "status %d", status);
5061 }
5062 
5063 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
5064 				     u16 opcode)
5065 {
5066 	struct cmd_lookup match = { NULL, hdev };
5067 	struct hci_request req;
5068 	u8 instance;
5069 	struct adv_info *adv_instance;
5070 	int err;
5071 
5072 	hci_dev_lock(hdev);
5073 
5074 	if (status) {
5075 		u8 mgmt_err = mgmt_status(status);
5076 
5077 		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
5078 				     cmd_status_rsp, &mgmt_err);
5079 		goto unlock;
5080 	}
5081 
5082 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
5083 		hci_dev_set_flag(hdev, HCI_ADVERTISING);
5084 	else
5085 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
5086 
5087 	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
5088 			     &match);
5089 
5090 	new_settings(hdev, match.sk);
5091 
5092 	if (match.sk)
5093 		sock_put(match.sk);
5094 
5095 	/* Handle suspend notifier */
5096 	if (test_and_clear_bit(SUSPEND_PAUSE_ADVERTISING,
5097 			       hdev->suspend_tasks)) {
5098 		bt_dev_dbg(hdev, "Paused advertising");
5099 		wake_up(&hdev->suspend_wait_q);
5100 	} else if (test_and_clear_bit(SUSPEND_UNPAUSE_ADVERTISING,
5101 				      hdev->suspend_tasks)) {
5102 		bt_dev_dbg(hdev, "Unpaused advertising");
5103 		wake_up(&hdev->suspend_wait_q);
5104 	}
5105 
5106 	/* If "Set Advertising" was just disabled and instance advertising was
5107 	 * set up earlier, then re-enable multi-instance advertising.
5108 	 */
5109 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5110 	    list_empty(&hdev->adv_instances))
5111 		goto unlock;
5112 
5113 	instance = hdev->cur_adv_instance;
5114 	if (!instance) {
5115 		adv_instance = list_first_entry_or_null(&hdev->adv_instances,
5116 							struct adv_info, list);
5117 		if (!adv_instance)
5118 			goto unlock;
5119 
5120 		instance = adv_instance->instance;
5121 	}
5122 
5123 	hci_req_init(&req, hdev);
5124 
5125 	err = __hci_req_schedule_adv_instance(&req, instance, true);
5126 
5127 	if (!err)
5128 		err = hci_req_run(&req, enable_advertising_instance);
5129 
5130 	if (err)
5131 		bt_dev_err(hdev, "failed to re-configure advertising");
5132 
5133 unlock:
5134 	hci_dev_unlock(hdev);
5135 }
5136 
5137 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
5138 			   u16 len)
5139 {
5140 	struct mgmt_mode *cp = data;
5141 	struct mgmt_pending_cmd *cmd;
5142 	struct hci_request req;
5143 	u8 val, status;
5144 	int err;
5145 
5146 	bt_dev_dbg(hdev, "sock %p", sk);
5147 
5148 	status = mgmt_le_support(hdev);
5149 	if (status)
5150 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5151 				       status);
5152 
5153 	/* Enabling the experimental LL Privay support disables support for
5154 	 * advertising.
5155 	 */
5156 	if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
5157 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5158 				       MGMT_STATUS_NOT_SUPPORTED);
5159 
5160 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5161 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5162 				       MGMT_STATUS_INVALID_PARAMS);
5163 
5164 	if (hdev->advertising_paused)
5165 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5166 				       MGMT_STATUS_BUSY);
5167 
5168 	hci_dev_lock(hdev);
5169 
5170 	val = !!cp->val;
5171 
5172 	/* The following conditions are ones which mean that we should
5173 	 * not do any HCI communication but directly send a mgmt
5174 	 * response to user space (after toggling the flag if
5175 	 * necessary).
5176 	 */
5177 	if (!hdev_is_powered(hdev) ||
5178 	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5179 	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
5180 	    hci_conn_num(hdev, LE_LINK) > 0 ||
5181 	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5182 	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
5183 		bool changed;
5184 
5185 		if (cp->val) {
5186 			hdev->cur_adv_instance = 0x00;
5187 			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
5188 			if (cp->val == 0x02)
5189 				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5190 			else
5191 				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5192 		} else {
5193 			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
5194 			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5195 		}
5196 
5197 		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
5198 		if (err < 0)
5199 			goto unlock;
5200 
5201 		if (changed)
5202 			err = new_settings(hdev, sk);
5203 
5204 		goto unlock;
5205 	}
5206 
5207 	if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
5208 	    pending_find(MGMT_OP_SET_LE, hdev)) {
5209 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5210 				      MGMT_STATUS_BUSY);
5211 		goto unlock;
5212 	}
5213 
5214 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
5215 	if (!cmd) {
5216 		err = -ENOMEM;
5217 		goto unlock;
5218 	}
5219 
5220 	hci_req_init(&req, hdev);
5221 
5222 	if (cp->val == 0x02)
5223 		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5224 	else
5225 		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5226 
5227 	cancel_adv_timeout(hdev);
5228 
5229 	if (val) {
5230 		/* Switch to instance "0" for the Set Advertising setting.
5231 		 * We cannot use update_[adv|scan_rsp]_data() here as the
5232 		 * HCI_ADVERTISING flag is not yet set.
5233 		 */
5234 		hdev->cur_adv_instance = 0x00;
5235 
5236 		if (ext_adv_capable(hdev)) {
5237 			__hci_req_start_ext_adv(&req, 0x00);
5238 		} else {
5239 			__hci_req_update_adv_data(&req, 0x00);
5240 			__hci_req_update_scan_rsp_data(&req, 0x00);
5241 			__hci_req_enable_advertising(&req);
5242 		}
5243 	} else {
5244 		__hci_req_disable_advertising(&req);
5245 	}
5246 
5247 	err = hci_req_run(&req, set_advertising_complete);
5248 	if (err < 0)
5249 		mgmt_pending_remove(cmd);
5250 
5251 unlock:
5252 	hci_dev_unlock(hdev);
5253 	return err;
5254 }
5255 
5256 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
5257 			      void *data, u16 len)
5258 {
5259 	struct mgmt_cp_set_static_address *cp = data;
5260 	int err;
5261 
5262 	bt_dev_dbg(hdev, "sock %p", sk);
5263 
5264 	if (!lmp_le_capable(hdev))
5265 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5266 				       MGMT_STATUS_NOT_SUPPORTED);
5267 
5268 	if (hdev_is_powered(hdev))
5269 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5270 				       MGMT_STATUS_REJECTED);
5271 
5272 	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
5273 		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
5274 			return mgmt_cmd_status(sk, hdev->id,
5275 					       MGMT_OP_SET_STATIC_ADDRESS,
5276 					       MGMT_STATUS_INVALID_PARAMS);
5277 
5278 		/* Two most significant bits shall be set */
5279 		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
5280 			return mgmt_cmd_status(sk, hdev->id,
5281 					       MGMT_OP_SET_STATIC_ADDRESS,
5282 					       MGMT_STATUS_INVALID_PARAMS);
5283 	}
5284 
5285 	hci_dev_lock(hdev);
5286 
5287 	bacpy(&hdev->static_addr, &cp->bdaddr);
5288 
5289 	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
5290 	if (err < 0)
5291 		goto unlock;
5292 
5293 	err = new_settings(hdev, sk);
5294 
5295 unlock:
5296 	hci_dev_unlock(hdev);
5297 	return err;
5298 }
5299 
5300 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
5301 			   void *data, u16 len)
5302 {
5303 	struct mgmt_cp_set_scan_params *cp = data;
5304 	__u16 interval, window;
5305 	int err;
5306 
5307 	bt_dev_dbg(hdev, "sock %p", sk);
5308 
5309 	if (!lmp_le_capable(hdev))
5310 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5311 				       MGMT_STATUS_NOT_SUPPORTED);
5312 
5313 	interval = __le16_to_cpu(cp->interval);
5314 
5315 	if (interval < 0x0004 || interval > 0x4000)
5316 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5317 				       MGMT_STATUS_INVALID_PARAMS);
5318 
5319 	window = __le16_to_cpu(cp->window);
5320 
5321 	if (window < 0x0004 || window > 0x4000)
5322 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5323 				       MGMT_STATUS_INVALID_PARAMS);
5324 
5325 	if (window > interval)
5326 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5327 				       MGMT_STATUS_INVALID_PARAMS);
5328 
5329 	hci_dev_lock(hdev);
5330 
5331 	hdev->le_scan_interval = interval;
5332 	hdev->le_scan_window = window;
5333 
5334 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
5335 				NULL, 0);
5336 
5337 	/* If background scan is running, restart it so new parameters are
5338 	 * loaded.
5339 	 */
5340 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5341 	    hdev->discovery.state == DISCOVERY_STOPPED) {
5342 		struct hci_request req;
5343 
5344 		hci_req_init(&req, hdev);
5345 
5346 		hci_req_add_le_scan_disable(&req, false);
5347 		hci_req_add_le_passive_scan(&req);
5348 
5349 		hci_req_run(&req, NULL);
5350 	}
5351 
5352 	hci_dev_unlock(hdev);
5353 
5354 	return err;
5355 }
5356 
5357 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
5358 				      u16 opcode)
5359 {
5360 	struct mgmt_pending_cmd *cmd;
5361 
5362 	bt_dev_dbg(hdev, "status 0x%02x", status);
5363 
5364 	hci_dev_lock(hdev);
5365 
5366 	cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5367 	if (!cmd)
5368 		goto unlock;
5369 
5370 	if (status) {
5371 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5372 			        mgmt_status(status));
5373 	} else {
5374 		struct mgmt_mode *cp = cmd->param;
5375 
5376 		if (cp->val)
5377 			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
5378 		else
5379 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5380 
5381 		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5382 		new_settings(hdev, cmd->sk);
5383 	}
5384 
5385 	mgmt_pending_remove(cmd);
5386 
5387 unlock:
5388 	hci_dev_unlock(hdev);
5389 }
5390 
5391 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
5392 				void *data, u16 len)
5393 {
5394 	struct mgmt_mode *cp = data;
5395 	struct mgmt_pending_cmd *cmd;
5396 	struct hci_request req;
5397 	int err;
5398 
5399 	bt_dev_dbg(hdev, "sock %p", sk);
5400 
5401 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
5402 	    hdev->hci_ver < BLUETOOTH_VER_1_2)
5403 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5404 				       MGMT_STATUS_NOT_SUPPORTED);
5405 
5406 	if (cp->val != 0x00 && cp->val != 0x01)
5407 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5408 				       MGMT_STATUS_INVALID_PARAMS);
5409 
5410 	hci_dev_lock(hdev);
5411 
5412 	if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
5413 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5414 				      MGMT_STATUS_BUSY);
5415 		goto unlock;
5416 	}
5417 
5418 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
5419 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5420 					hdev);
5421 		goto unlock;
5422 	}
5423 
5424 	if (!hdev_is_powered(hdev)) {
5425 		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
5426 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5427 					hdev);
5428 		new_settings(hdev, sk);
5429 		goto unlock;
5430 	}
5431 
5432 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
5433 			       data, len);
5434 	if (!cmd) {
5435 		err = -ENOMEM;
5436 		goto unlock;
5437 	}
5438 
5439 	hci_req_init(&req, hdev);
5440 
5441 	__hci_req_write_fast_connectable(&req, cp->val);
5442 
5443 	err = hci_req_run(&req, fast_connectable_complete);
5444 	if (err < 0) {
5445 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5446 				      MGMT_STATUS_FAILED);
5447 		mgmt_pending_remove(cmd);
5448 	}
5449 
5450 unlock:
5451 	hci_dev_unlock(hdev);
5452 
5453 	return err;
5454 }
5455 
5456 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5457 {
5458 	struct mgmt_pending_cmd *cmd;
5459 
5460 	bt_dev_dbg(hdev, "status 0x%02x", status);
5461 
5462 	hci_dev_lock(hdev);
5463 
5464 	cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
5465 	if (!cmd)
5466 		goto unlock;
5467 
5468 	if (status) {
5469 		u8 mgmt_err = mgmt_status(status);
5470 
5471 		/* We need to restore the flag if related HCI commands
5472 		 * failed.
5473 		 */
5474 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
5475 
5476 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
5477 	} else {
5478 		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
5479 		new_settings(hdev, cmd->sk);
5480 	}
5481 
5482 	mgmt_pending_remove(cmd);
5483 
5484 unlock:
5485 	hci_dev_unlock(hdev);
5486 }
5487 
5488 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
5489 {
5490 	struct mgmt_mode *cp = data;
5491 	struct mgmt_pending_cmd *cmd;
5492 	struct hci_request req;
5493 	int err;
5494 
5495 	bt_dev_dbg(hdev, "sock %p", sk);
5496 
5497 	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
5498 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5499 				       MGMT_STATUS_NOT_SUPPORTED);
5500 
5501 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5502 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5503 				       MGMT_STATUS_REJECTED);
5504 
5505 	if (cp->val != 0x00 && cp->val != 0x01)
5506 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5507 				       MGMT_STATUS_INVALID_PARAMS);
5508 
5509 	hci_dev_lock(hdev);
5510 
5511 	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5512 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5513 		goto unlock;
5514 	}
5515 
5516 	if (!hdev_is_powered(hdev)) {
5517 		if (!cp->val) {
5518 			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
5519 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
5520 			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
5521 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5522 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
5523 		}
5524 
5525 		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
5526 
5527 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5528 		if (err < 0)
5529 			goto unlock;
5530 
5531 		err = new_settings(hdev, sk);
5532 		goto unlock;
5533 	}
5534 
5535 	/* Reject disabling when powered on */
5536 	if (!cp->val) {
5537 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5538 				      MGMT_STATUS_REJECTED);
5539 		goto unlock;
5540 	} else {
5541 		/* When configuring a dual-mode controller to operate
5542 		 * with LE only and using a static address, then switching
5543 		 * BR/EDR back on is not allowed.
5544 		 *
5545 		 * Dual-mode controllers shall operate with the public
5546 		 * address as its identity address for BR/EDR and LE. So
5547 		 * reject the attempt to create an invalid configuration.
5548 		 *
5549 		 * The same restrictions applies when secure connections
5550 		 * has been enabled. For BR/EDR this is a controller feature
5551 		 * while for LE it is a host stack feature. This means that
5552 		 * switching BR/EDR back on when secure connections has been
5553 		 * enabled is not a supported transaction.
5554 		 */
5555 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5556 		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
5557 		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
5558 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5559 					      MGMT_STATUS_REJECTED);
5560 			goto unlock;
5561 		}
5562 	}
5563 
5564 	if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
5565 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5566 				      MGMT_STATUS_BUSY);
5567 		goto unlock;
5568 	}
5569 
5570 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
5571 	if (!cmd) {
5572 		err = -ENOMEM;
5573 		goto unlock;
5574 	}
5575 
5576 	/* We need to flip the bit already here so that
5577 	 * hci_req_update_adv_data generates the correct flags.
5578 	 */
5579 	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
5580 
5581 	hci_req_init(&req, hdev);
5582 
5583 	__hci_req_write_fast_connectable(&req, false);
5584 	__hci_req_update_scan(&req);
5585 
5586 	/* Since only the advertising data flags will change, there
5587 	 * is no need to update the scan response data.
5588 	 */
5589 	__hci_req_update_adv_data(&req, hdev->cur_adv_instance);
5590 
5591 	err = hci_req_run(&req, set_bredr_complete);
5592 	if (err < 0)
5593 		mgmt_pending_remove(cmd);
5594 
5595 unlock:
5596 	hci_dev_unlock(hdev);
5597 	return err;
5598 }
5599 
5600 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5601 {
5602 	struct mgmt_pending_cmd *cmd;
5603 	struct mgmt_mode *cp;
5604 
5605 	bt_dev_dbg(hdev, "status %u", status);
5606 
5607 	hci_dev_lock(hdev);
5608 
5609 	cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
5610 	if (!cmd)
5611 		goto unlock;
5612 
5613 	if (status) {
5614 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
5615 			        mgmt_status(status));
5616 		goto remove;
5617 	}
5618 
5619 	cp = cmd->param;
5620 
5621 	switch (cp->val) {
5622 	case 0x00:
5623 		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
5624 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5625 		break;
5626 	case 0x01:
5627 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5628 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5629 		break;
5630 	case 0x02:
5631 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5632 		hci_dev_set_flag(hdev, HCI_SC_ONLY);
5633 		break;
5634 	}
5635 
5636 	send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
5637 	new_settings(hdev, cmd->sk);
5638 
5639 remove:
5640 	mgmt_pending_remove(cmd);
5641 unlock:
5642 	hci_dev_unlock(hdev);
5643 }
5644 
5645 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
5646 			   void *data, u16 len)
5647 {
5648 	struct mgmt_mode *cp = data;
5649 	struct mgmt_pending_cmd *cmd;
5650 	struct hci_request req;
5651 	u8 val;
5652 	int err;
5653 
5654 	bt_dev_dbg(hdev, "sock %p", sk);
5655 
5656 	if (!lmp_sc_capable(hdev) &&
5657 	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5658 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5659 				       MGMT_STATUS_NOT_SUPPORTED);
5660 
5661 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5662 	    lmp_sc_capable(hdev) &&
5663 	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5664 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5665 				       MGMT_STATUS_REJECTED);
5666 
5667 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5668 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5669 				  MGMT_STATUS_INVALID_PARAMS);
5670 
5671 	hci_dev_lock(hdev);
5672 
5673 	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
5674 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5675 		bool changed;
5676 
5677 		if (cp->val) {
5678 			changed = !hci_dev_test_and_set_flag(hdev,
5679 							     HCI_SC_ENABLED);
5680 			if (cp->val == 0x02)
5681 				hci_dev_set_flag(hdev, HCI_SC_ONLY);
5682 			else
5683 				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5684 		} else {
5685 			changed = hci_dev_test_and_clear_flag(hdev,
5686 							      HCI_SC_ENABLED);
5687 			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5688 		}
5689 
5690 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5691 		if (err < 0)
5692 			goto failed;
5693 
5694 		if (changed)
5695 			err = new_settings(hdev, sk);
5696 
5697 		goto failed;
5698 	}
5699 
5700 	if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
5701 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5702 				      MGMT_STATUS_BUSY);
5703 		goto failed;
5704 	}
5705 
5706 	val = !!cp->val;
5707 
5708 	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5709 	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5710 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5711 		goto failed;
5712 	}
5713 
5714 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
5715 	if (!cmd) {
5716 		err = -ENOMEM;
5717 		goto failed;
5718 	}
5719 
5720 	hci_req_init(&req, hdev);
5721 	hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
5722 	err = hci_req_run(&req, sc_enable_complete);
5723 	if (err < 0) {
5724 		mgmt_pending_remove(cmd);
5725 		goto failed;
5726 	}
5727 
5728 failed:
5729 	hci_dev_unlock(hdev);
5730 	return err;
5731 }
5732 
5733 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
5734 			  void *data, u16 len)
5735 {
5736 	struct mgmt_mode *cp = data;
5737 	bool changed, use_changed;
5738 	int err;
5739 
5740 	bt_dev_dbg(hdev, "sock %p", sk);
5741 
5742 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5743 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
5744 				       MGMT_STATUS_INVALID_PARAMS);
5745 
5746 	hci_dev_lock(hdev);
5747 
5748 	if (cp->val)
5749 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
5750 	else
5751 		changed = hci_dev_test_and_clear_flag(hdev,
5752 						      HCI_KEEP_DEBUG_KEYS);
5753 
5754 	if (cp->val == 0x02)
5755 		use_changed = !hci_dev_test_and_set_flag(hdev,
5756 							 HCI_USE_DEBUG_KEYS);
5757 	else
5758 		use_changed = hci_dev_test_and_clear_flag(hdev,
5759 							  HCI_USE_DEBUG_KEYS);
5760 
5761 	if (hdev_is_powered(hdev) && use_changed &&
5762 	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
5763 		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
5764 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
5765 			     sizeof(mode), &mode);
5766 	}
5767 
5768 	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
5769 	if (err < 0)
5770 		goto unlock;
5771 
5772 	if (changed)
5773 		err = new_settings(hdev, sk);
5774 
5775 unlock:
5776 	hci_dev_unlock(hdev);
5777 	return err;
5778 }
5779 
5780 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5781 		       u16 len)
5782 {
5783 	struct mgmt_cp_set_privacy *cp = cp_data;
5784 	bool changed;
5785 	int err;
5786 
5787 	bt_dev_dbg(hdev, "sock %p", sk);
5788 
5789 	if (!lmp_le_capable(hdev))
5790 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5791 				       MGMT_STATUS_NOT_SUPPORTED);
5792 
5793 	if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
5794 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5795 				       MGMT_STATUS_INVALID_PARAMS);
5796 
5797 	if (hdev_is_powered(hdev))
5798 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5799 				       MGMT_STATUS_REJECTED);
5800 
5801 	hci_dev_lock(hdev);
5802 
5803 	/* If user space supports this command it is also expected to
5804 	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
5805 	 */
5806 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5807 
5808 	if (cp->privacy) {
5809 		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
5810 		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
5811 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
5812 		hci_adv_instances_set_rpa_expired(hdev, true);
5813 		if (cp->privacy == 0x02)
5814 			hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
5815 		else
5816 			hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
5817 	} else {
5818 		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
5819 		memset(hdev->irk, 0, sizeof(hdev->irk));
5820 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
5821 		hci_adv_instances_set_rpa_expired(hdev, false);
5822 		hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
5823 	}
5824 
5825 	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
5826 	if (err < 0)
5827 		goto unlock;
5828 
5829 	if (changed)
5830 		err = new_settings(hdev, sk);
5831 
5832 unlock:
5833 	hci_dev_unlock(hdev);
5834 	return err;
5835 }
5836 
5837 static bool irk_is_valid(struct mgmt_irk_info *irk)
5838 {
5839 	switch (irk->addr.type) {
5840 	case BDADDR_LE_PUBLIC:
5841 		return true;
5842 
5843 	case BDADDR_LE_RANDOM:
5844 		/* Two most significant bits shall be set */
5845 		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5846 			return false;
5847 		return true;
5848 	}
5849 
5850 	return false;
5851 }
5852 
5853 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5854 		     u16 len)
5855 {
5856 	struct mgmt_cp_load_irks *cp = cp_data;
5857 	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
5858 				   sizeof(struct mgmt_irk_info));
5859 	u16 irk_count, expected_len;
5860 	int i, err;
5861 
5862 	bt_dev_dbg(hdev, "sock %p", sk);
5863 
5864 	if (!lmp_le_capable(hdev))
5865 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5866 				       MGMT_STATUS_NOT_SUPPORTED);
5867 
5868 	irk_count = __le16_to_cpu(cp->irk_count);
5869 	if (irk_count > max_irk_count) {
5870 		bt_dev_err(hdev, "load_irks: too big irk_count value %u",
5871 			   irk_count);
5872 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5873 				       MGMT_STATUS_INVALID_PARAMS);
5874 	}
5875 
5876 	expected_len = struct_size(cp, irks, irk_count);
5877 	if (expected_len != len) {
5878 		bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
5879 			   expected_len, len);
5880 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5881 				       MGMT_STATUS_INVALID_PARAMS);
5882 	}
5883 
5884 	bt_dev_dbg(hdev, "irk_count %u", irk_count);
5885 
5886 	for (i = 0; i < irk_count; i++) {
5887 		struct mgmt_irk_info *key = &cp->irks[i];
5888 
5889 		if (!irk_is_valid(key))
5890 			return mgmt_cmd_status(sk, hdev->id,
5891 					       MGMT_OP_LOAD_IRKS,
5892 					       MGMT_STATUS_INVALID_PARAMS);
5893 	}
5894 
5895 	hci_dev_lock(hdev);
5896 
5897 	hci_smp_irks_clear(hdev);
5898 
5899 	for (i = 0; i < irk_count; i++) {
5900 		struct mgmt_irk_info *irk = &cp->irks[i];
5901 
5902 		if (hci_is_blocked_key(hdev,
5903 				       HCI_BLOCKED_KEY_TYPE_IRK,
5904 				       irk->val)) {
5905 			bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
5906 				    &irk->addr.bdaddr);
5907 			continue;
5908 		}
5909 
5910 		hci_add_irk(hdev, &irk->addr.bdaddr,
5911 			    le_addr_type(irk->addr.type), irk->val,
5912 			    BDADDR_ANY);
5913 	}
5914 
5915 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5916 
5917 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5918 
5919 	hci_dev_unlock(hdev);
5920 
5921 	return err;
5922 }
5923 
5924 static bool ltk_is_valid(struct mgmt_ltk_info *key)
5925 {
5926 	if (key->master != 0x00 && key->master != 0x01)
5927 		return false;
5928 
5929 	switch (key->addr.type) {
5930 	case BDADDR_LE_PUBLIC:
5931 		return true;
5932 
5933 	case BDADDR_LE_RANDOM:
5934 		/* Two most significant bits shall be set */
5935 		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5936 			return false;
5937 		return true;
5938 	}
5939 
5940 	return false;
5941 }
5942 
5943 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5944 			       void *cp_data, u16 len)
5945 {
5946 	struct mgmt_cp_load_long_term_keys *cp = cp_data;
5947 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5948 				   sizeof(struct mgmt_ltk_info));
5949 	u16 key_count, expected_len;
5950 	int i, err;
5951 
5952 	bt_dev_dbg(hdev, "sock %p", sk);
5953 
5954 	if (!lmp_le_capable(hdev))
5955 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5956 				       MGMT_STATUS_NOT_SUPPORTED);
5957 
5958 	key_count = __le16_to_cpu(cp->key_count);
5959 	if (key_count > max_key_count) {
5960 		bt_dev_err(hdev, "load_ltks: too big key_count value %u",
5961 			   key_count);
5962 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5963 				       MGMT_STATUS_INVALID_PARAMS);
5964 	}
5965 
5966 	expected_len = struct_size(cp, keys, key_count);
5967 	if (expected_len != len) {
5968 		bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
5969 			   expected_len, len);
5970 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5971 				       MGMT_STATUS_INVALID_PARAMS);
5972 	}
5973 
5974 	bt_dev_dbg(hdev, "key_count %u", key_count);
5975 
5976 	for (i = 0; i < key_count; i++) {
5977 		struct mgmt_ltk_info *key = &cp->keys[i];
5978 
5979 		if (!ltk_is_valid(key))
5980 			return mgmt_cmd_status(sk, hdev->id,
5981 					       MGMT_OP_LOAD_LONG_TERM_KEYS,
5982 					       MGMT_STATUS_INVALID_PARAMS);
5983 	}
5984 
5985 	hci_dev_lock(hdev);
5986 
5987 	hci_smp_ltks_clear(hdev);
5988 
5989 	for (i = 0; i < key_count; i++) {
5990 		struct mgmt_ltk_info *key = &cp->keys[i];
5991 		u8 type, authenticated;
5992 
5993 		if (hci_is_blocked_key(hdev,
5994 				       HCI_BLOCKED_KEY_TYPE_LTK,
5995 				       key->val)) {
5996 			bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
5997 				    &key->addr.bdaddr);
5998 			continue;
5999 		}
6000 
6001 		switch (key->type) {
6002 		case MGMT_LTK_UNAUTHENTICATED:
6003 			authenticated = 0x00;
6004 			type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
6005 			break;
6006 		case MGMT_LTK_AUTHENTICATED:
6007 			authenticated = 0x01;
6008 			type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
6009 			break;
6010 		case MGMT_LTK_P256_UNAUTH:
6011 			authenticated = 0x00;
6012 			type = SMP_LTK_P256;
6013 			break;
6014 		case MGMT_LTK_P256_AUTH:
6015 			authenticated = 0x01;
6016 			type = SMP_LTK_P256;
6017 			break;
6018 		case MGMT_LTK_P256_DEBUG:
6019 			authenticated = 0x00;
6020 			type = SMP_LTK_P256_DEBUG;
6021 			fallthrough;
6022 		default:
6023 			continue;
6024 		}
6025 
6026 		hci_add_ltk(hdev, &key->addr.bdaddr,
6027 			    le_addr_type(key->addr.type), type, authenticated,
6028 			    key->val, key->enc_size, key->ediv, key->rand);
6029 	}
6030 
6031 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
6032 			   NULL, 0);
6033 
6034 	hci_dev_unlock(hdev);
6035 
6036 	return err;
6037 }
6038 
6039 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
6040 {
6041 	struct hci_conn *conn = cmd->user_data;
6042 	struct mgmt_rp_get_conn_info rp;
6043 	int err;
6044 
6045 	memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
6046 
6047 	if (status == MGMT_STATUS_SUCCESS) {
6048 		rp.rssi = conn->rssi;
6049 		rp.tx_power = conn->tx_power;
6050 		rp.max_tx_power = conn->max_tx_power;
6051 	} else {
6052 		rp.rssi = HCI_RSSI_INVALID;
6053 		rp.tx_power = HCI_TX_POWER_INVALID;
6054 		rp.max_tx_power = HCI_TX_POWER_INVALID;
6055 	}
6056 
6057 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
6058 				status, &rp, sizeof(rp));
6059 
6060 	hci_conn_drop(conn);
6061 	hci_conn_put(conn);
6062 
6063 	return err;
6064 }
6065 
6066 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
6067 				       u16 opcode)
6068 {
6069 	struct hci_cp_read_rssi *cp;
6070 	struct mgmt_pending_cmd *cmd;
6071 	struct hci_conn *conn;
6072 	u16 handle;
6073 	u8 status;
6074 
6075 	bt_dev_dbg(hdev, "status 0x%02x", hci_status);
6076 
6077 	hci_dev_lock(hdev);
6078 
6079 	/* Commands sent in request are either Read RSSI or Read Transmit Power
6080 	 * Level so we check which one was last sent to retrieve connection
6081 	 * handle.  Both commands have handle as first parameter so it's safe to
6082 	 * cast data on the same command struct.
6083 	 *
6084 	 * First command sent is always Read RSSI and we fail only if it fails.
6085 	 * In other case we simply override error to indicate success as we
6086 	 * already remembered if TX power value is actually valid.
6087 	 */
6088 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
6089 	if (!cp) {
6090 		cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
6091 		status = MGMT_STATUS_SUCCESS;
6092 	} else {
6093 		status = mgmt_status(hci_status);
6094 	}
6095 
6096 	if (!cp) {
6097 		bt_dev_err(hdev, "invalid sent_cmd in conn_info response");
6098 		goto unlock;
6099 	}
6100 
6101 	handle = __le16_to_cpu(cp->handle);
6102 	conn = hci_conn_hash_lookup_handle(hdev, handle);
6103 	if (!conn) {
6104 		bt_dev_err(hdev, "unknown handle (%d) in conn_info response",
6105 			   handle);
6106 		goto unlock;
6107 	}
6108 
6109 	cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
6110 	if (!cmd)
6111 		goto unlock;
6112 
6113 	cmd->cmd_complete(cmd, status);
6114 	mgmt_pending_remove(cmd);
6115 
6116 unlock:
6117 	hci_dev_unlock(hdev);
6118 }
6119 
6120 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
6121 			 u16 len)
6122 {
6123 	struct mgmt_cp_get_conn_info *cp = data;
6124 	struct mgmt_rp_get_conn_info rp;
6125 	struct hci_conn *conn;
6126 	unsigned long conn_info_age;
6127 	int err = 0;
6128 
6129 	bt_dev_dbg(hdev, "sock %p", sk);
6130 
6131 	memset(&rp, 0, sizeof(rp));
6132 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6133 	rp.addr.type = cp->addr.type;
6134 
6135 	if (!bdaddr_type_is_valid(cp->addr.type))
6136 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6137 					 MGMT_STATUS_INVALID_PARAMS,
6138 					 &rp, sizeof(rp));
6139 
6140 	hci_dev_lock(hdev);
6141 
6142 	if (!hdev_is_powered(hdev)) {
6143 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6144 					MGMT_STATUS_NOT_POWERED, &rp,
6145 					sizeof(rp));
6146 		goto unlock;
6147 	}
6148 
6149 	if (cp->addr.type == BDADDR_BREDR)
6150 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6151 					       &cp->addr.bdaddr);
6152 	else
6153 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6154 
6155 	if (!conn || conn->state != BT_CONNECTED) {
6156 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6157 					MGMT_STATUS_NOT_CONNECTED, &rp,
6158 					sizeof(rp));
6159 		goto unlock;
6160 	}
6161 
6162 	if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
6163 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6164 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
6165 		goto unlock;
6166 	}
6167 
6168 	/* To avoid client trying to guess when to poll again for information we
6169 	 * calculate conn info age as random value between min/max set in hdev.
6170 	 */
6171 	conn_info_age = hdev->conn_info_min_age +
6172 			prandom_u32_max(hdev->conn_info_max_age -
6173 					hdev->conn_info_min_age);
6174 
6175 	/* Query controller to refresh cached values if they are too old or were
6176 	 * never read.
6177 	 */
6178 	if (time_after(jiffies, conn->conn_info_timestamp +
6179 		       msecs_to_jiffies(conn_info_age)) ||
6180 	    !conn->conn_info_timestamp) {
6181 		struct hci_request req;
6182 		struct hci_cp_read_tx_power req_txp_cp;
6183 		struct hci_cp_read_rssi req_rssi_cp;
6184 		struct mgmt_pending_cmd *cmd;
6185 
6186 		hci_req_init(&req, hdev);
6187 		req_rssi_cp.handle = cpu_to_le16(conn->handle);
6188 		hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
6189 			    &req_rssi_cp);
6190 
6191 		/* For LE links TX power does not change thus we don't need to
6192 		 * query for it once value is known.
6193 		 */
6194 		if (!bdaddr_type_is_le(cp->addr.type) ||
6195 		    conn->tx_power == HCI_TX_POWER_INVALID) {
6196 			req_txp_cp.handle = cpu_to_le16(conn->handle);
6197 			req_txp_cp.type = 0x00;
6198 			hci_req_add(&req, HCI_OP_READ_TX_POWER,
6199 				    sizeof(req_txp_cp), &req_txp_cp);
6200 		}
6201 
6202 		/* Max TX power needs to be read only once per connection */
6203 		if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
6204 			req_txp_cp.handle = cpu_to_le16(conn->handle);
6205 			req_txp_cp.type = 0x01;
6206 			hci_req_add(&req, HCI_OP_READ_TX_POWER,
6207 				    sizeof(req_txp_cp), &req_txp_cp);
6208 		}
6209 
6210 		err = hci_req_run(&req, conn_info_refresh_complete);
6211 		if (err < 0)
6212 			goto unlock;
6213 
6214 		cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
6215 				       data, len);
6216 		if (!cmd) {
6217 			err = -ENOMEM;
6218 			goto unlock;
6219 		}
6220 
6221 		hci_conn_hold(conn);
6222 		cmd->user_data = hci_conn_get(conn);
6223 		cmd->cmd_complete = conn_info_cmd_complete;
6224 
6225 		conn->conn_info_timestamp = jiffies;
6226 	} else {
6227 		/* Cache is valid, just reply with values cached in hci_conn */
6228 		rp.rssi = conn->rssi;
6229 		rp.tx_power = conn->tx_power;
6230 		rp.max_tx_power = conn->max_tx_power;
6231 
6232 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6233 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6234 	}
6235 
6236 unlock:
6237 	hci_dev_unlock(hdev);
6238 	return err;
6239 }
6240 
6241 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
6242 {
6243 	struct hci_conn *conn = cmd->user_data;
6244 	struct mgmt_rp_get_clock_info rp;
6245 	struct hci_dev *hdev;
6246 	int err;
6247 
6248 	memset(&rp, 0, sizeof(rp));
6249 	memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
6250 
6251 	if (status)
6252 		goto complete;
6253 
6254 	hdev = hci_dev_get(cmd->index);
6255 	if (hdev) {
6256 		rp.local_clock = cpu_to_le32(hdev->clock);
6257 		hci_dev_put(hdev);
6258 	}
6259 
6260 	if (conn) {
6261 		rp.piconet_clock = cpu_to_le32(conn->clock);
6262 		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
6263 	}
6264 
6265 complete:
6266 	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
6267 				sizeof(rp));
6268 
6269 	if (conn) {
6270 		hci_conn_drop(conn);
6271 		hci_conn_put(conn);
6272 	}
6273 
6274 	return err;
6275 }
6276 
6277 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6278 {
6279 	struct hci_cp_read_clock *hci_cp;
6280 	struct mgmt_pending_cmd *cmd;
6281 	struct hci_conn *conn;
6282 
6283 	bt_dev_dbg(hdev, "status %u", status);
6284 
6285 	hci_dev_lock(hdev);
6286 
6287 	hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
6288 	if (!hci_cp)
6289 		goto unlock;
6290 
6291 	if (hci_cp->which) {
6292 		u16 handle = __le16_to_cpu(hci_cp->handle);
6293 		conn = hci_conn_hash_lookup_handle(hdev, handle);
6294 	} else {
6295 		conn = NULL;
6296 	}
6297 
6298 	cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
6299 	if (!cmd)
6300 		goto unlock;
6301 
6302 	cmd->cmd_complete(cmd, mgmt_status(status));
6303 	mgmt_pending_remove(cmd);
6304 
6305 unlock:
6306 	hci_dev_unlock(hdev);
6307 }
6308 
6309 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
6310 			 u16 len)
6311 {
6312 	struct mgmt_cp_get_clock_info *cp = data;
6313 	struct mgmt_rp_get_clock_info rp;
6314 	struct hci_cp_read_clock hci_cp;
6315 	struct mgmt_pending_cmd *cmd;
6316 	struct hci_request req;
6317 	struct hci_conn *conn;
6318 	int err;
6319 
6320 	bt_dev_dbg(hdev, "sock %p", sk);
6321 
6322 	memset(&rp, 0, sizeof(rp));
6323 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6324 	rp.addr.type = cp->addr.type;
6325 
6326 	if (cp->addr.type != BDADDR_BREDR)
6327 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6328 					 MGMT_STATUS_INVALID_PARAMS,
6329 					 &rp, sizeof(rp));
6330 
6331 	hci_dev_lock(hdev);
6332 
6333 	if (!hdev_is_powered(hdev)) {
6334 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6335 					MGMT_STATUS_NOT_POWERED, &rp,
6336 					sizeof(rp));
6337 		goto unlock;
6338 	}
6339 
6340 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6341 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6342 					       &cp->addr.bdaddr);
6343 		if (!conn || conn->state != BT_CONNECTED) {
6344 			err = mgmt_cmd_complete(sk, hdev->id,
6345 						MGMT_OP_GET_CLOCK_INFO,
6346 						MGMT_STATUS_NOT_CONNECTED,
6347 						&rp, sizeof(rp));
6348 			goto unlock;
6349 		}
6350 	} else {
6351 		conn = NULL;
6352 	}
6353 
6354 	cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
6355 	if (!cmd) {
6356 		err = -ENOMEM;
6357 		goto unlock;
6358 	}
6359 
6360 	cmd->cmd_complete = clock_info_cmd_complete;
6361 
6362 	hci_req_init(&req, hdev);
6363 
6364 	memset(&hci_cp, 0, sizeof(hci_cp));
6365 	hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
6366 
6367 	if (conn) {
6368 		hci_conn_hold(conn);
6369 		cmd->user_data = hci_conn_get(conn);
6370 
6371 		hci_cp.handle = cpu_to_le16(conn->handle);
6372 		hci_cp.which = 0x01; /* Piconet clock */
6373 		hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
6374 	}
6375 
6376 	err = hci_req_run(&req, get_clock_info_complete);
6377 	if (err < 0)
6378 		mgmt_pending_remove(cmd);
6379 
6380 unlock:
6381 	hci_dev_unlock(hdev);
6382 	return err;
6383 }
6384 
6385 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
6386 {
6387 	struct hci_conn *conn;
6388 
6389 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
6390 	if (!conn)
6391 		return false;
6392 
6393 	if (conn->dst_type != type)
6394 		return false;
6395 
6396 	if (conn->state != BT_CONNECTED)
6397 		return false;
6398 
6399 	return true;
6400 }
6401 
6402 /* This function requires the caller holds hdev->lock */
6403 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
6404 			       u8 addr_type, u8 auto_connect)
6405 {
6406 	struct hci_conn_params *params;
6407 
6408 	params = hci_conn_params_add(hdev, addr, addr_type);
6409 	if (!params)
6410 		return -EIO;
6411 
6412 	if (params->auto_connect == auto_connect)
6413 		return 0;
6414 
6415 	list_del_init(&params->action);
6416 
6417 	switch (auto_connect) {
6418 	case HCI_AUTO_CONN_DISABLED:
6419 	case HCI_AUTO_CONN_LINK_LOSS:
6420 		/* If auto connect is being disabled when we're trying to
6421 		 * connect to device, keep connecting.
6422 		 */
6423 		if (params->explicit_connect)
6424 			list_add(&params->action, &hdev->pend_le_conns);
6425 		break;
6426 	case HCI_AUTO_CONN_REPORT:
6427 		if (params->explicit_connect)
6428 			list_add(&params->action, &hdev->pend_le_conns);
6429 		else
6430 			list_add(&params->action, &hdev->pend_le_reports);
6431 		break;
6432 	case HCI_AUTO_CONN_DIRECT:
6433 	case HCI_AUTO_CONN_ALWAYS:
6434 		if (!is_connected(hdev, addr, addr_type))
6435 			list_add(&params->action, &hdev->pend_le_conns);
6436 		break;
6437 	}
6438 
6439 	params->auto_connect = auto_connect;
6440 
6441 	bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
6442 		   addr, addr_type, auto_connect);
6443 
6444 	return 0;
6445 }
6446 
6447 static void device_added(struct sock *sk, struct hci_dev *hdev,
6448 			 bdaddr_t *bdaddr, u8 type, u8 action)
6449 {
6450 	struct mgmt_ev_device_added ev;
6451 
6452 	bacpy(&ev.addr.bdaddr, bdaddr);
6453 	ev.addr.type = type;
6454 	ev.action = action;
6455 
6456 	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
6457 }
6458 
6459 static int add_device(struct sock *sk, struct hci_dev *hdev,
6460 		      void *data, u16 len)
6461 {
6462 	struct mgmt_cp_add_device *cp = data;
6463 	u8 auto_conn, addr_type;
6464 	struct hci_conn_params *params;
6465 	int err;
6466 	u32 current_flags = 0;
6467 
6468 	bt_dev_dbg(hdev, "sock %p", sk);
6469 
6470 	if (!bdaddr_type_is_valid(cp->addr.type) ||
6471 	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
6472 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6473 					 MGMT_STATUS_INVALID_PARAMS,
6474 					 &cp->addr, sizeof(cp->addr));
6475 
6476 	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
6477 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6478 					 MGMT_STATUS_INVALID_PARAMS,
6479 					 &cp->addr, sizeof(cp->addr));
6480 
6481 	hci_dev_lock(hdev);
6482 
6483 	if (cp->addr.type == BDADDR_BREDR) {
6484 		/* Only incoming connections action is supported for now */
6485 		if (cp->action != 0x01) {
6486 			err = mgmt_cmd_complete(sk, hdev->id,
6487 						MGMT_OP_ADD_DEVICE,
6488 						MGMT_STATUS_INVALID_PARAMS,
6489 						&cp->addr, sizeof(cp->addr));
6490 			goto unlock;
6491 		}
6492 
6493 		err = hci_bdaddr_list_add_with_flags(&hdev->whitelist,
6494 						     &cp->addr.bdaddr,
6495 						     cp->addr.type, 0);
6496 		if (err)
6497 			goto unlock;
6498 
6499 		hci_req_update_scan(hdev);
6500 
6501 		goto added;
6502 	}
6503 
6504 	addr_type = le_addr_type(cp->addr.type);
6505 
6506 	if (cp->action == 0x02)
6507 		auto_conn = HCI_AUTO_CONN_ALWAYS;
6508 	else if (cp->action == 0x01)
6509 		auto_conn = HCI_AUTO_CONN_DIRECT;
6510 	else
6511 		auto_conn = HCI_AUTO_CONN_REPORT;
6512 
6513 	/* Kernel internally uses conn_params with resolvable private
6514 	 * address, but Add Device allows only identity addresses.
6515 	 * Make sure it is enforced before calling
6516 	 * hci_conn_params_lookup.
6517 	 */
6518 	if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
6519 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6520 					MGMT_STATUS_INVALID_PARAMS,
6521 					&cp->addr, sizeof(cp->addr));
6522 		goto unlock;
6523 	}
6524 
6525 	/* If the connection parameters don't exist for this device,
6526 	 * they will be created and configured with defaults.
6527 	 */
6528 	if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
6529 				auto_conn) < 0) {
6530 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6531 					MGMT_STATUS_FAILED, &cp->addr,
6532 					sizeof(cp->addr));
6533 		goto unlock;
6534 	} else {
6535 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6536 						addr_type);
6537 		if (params)
6538 			current_flags = params->current_flags;
6539 	}
6540 
6541 	hci_update_background_scan(hdev);
6542 
6543 added:
6544 	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
6545 	device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
6546 			     SUPPORTED_DEVICE_FLAGS(), current_flags);
6547 
6548 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6549 				MGMT_STATUS_SUCCESS, &cp->addr,
6550 				sizeof(cp->addr));
6551 
6552 unlock:
6553 	hci_dev_unlock(hdev);
6554 	return err;
6555 }
6556 
6557 static void device_removed(struct sock *sk, struct hci_dev *hdev,
6558 			   bdaddr_t *bdaddr, u8 type)
6559 {
6560 	struct mgmt_ev_device_removed ev;
6561 
6562 	bacpy(&ev.addr.bdaddr, bdaddr);
6563 	ev.addr.type = type;
6564 
6565 	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
6566 }
6567 
6568 static int remove_device(struct sock *sk, struct hci_dev *hdev,
6569 			 void *data, u16 len)
6570 {
6571 	struct mgmt_cp_remove_device *cp = data;
6572 	int err;
6573 
6574 	bt_dev_dbg(hdev, "sock %p", sk);
6575 
6576 	hci_dev_lock(hdev);
6577 
6578 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6579 		struct hci_conn_params *params;
6580 		u8 addr_type;
6581 
6582 		if (!bdaddr_type_is_valid(cp->addr.type)) {
6583 			err = mgmt_cmd_complete(sk, hdev->id,
6584 						MGMT_OP_REMOVE_DEVICE,
6585 						MGMT_STATUS_INVALID_PARAMS,
6586 						&cp->addr, sizeof(cp->addr));
6587 			goto unlock;
6588 		}
6589 
6590 		if (cp->addr.type == BDADDR_BREDR) {
6591 			err = hci_bdaddr_list_del(&hdev->whitelist,
6592 						  &cp->addr.bdaddr,
6593 						  cp->addr.type);
6594 			if (err) {
6595 				err = mgmt_cmd_complete(sk, hdev->id,
6596 							MGMT_OP_REMOVE_DEVICE,
6597 							MGMT_STATUS_INVALID_PARAMS,
6598 							&cp->addr,
6599 							sizeof(cp->addr));
6600 				goto unlock;
6601 			}
6602 
6603 			hci_req_update_scan(hdev);
6604 
6605 			device_removed(sk, hdev, &cp->addr.bdaddr,
6606 				       cp->addr.type);
6607 			goto complete;
6608 		}
6609 
6610 		addr_type = le_addr_type(cp->addr.type);
6611 
6612 		/* Kernel internally uses conn_params with resolvable private
6613 		 * address, but Remove Device allows only identity addresses.
6614 		 * Make sure it is enforced before calling
6615 		 * hci_conn_params_lookup.
6616 		 */
6617 		if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
6618 			err = mgmt_cmd_complete(sk, hdev->id,
6619 						MGMT_OP_REMOVE_DEVICE,
6620 						MGMT_STATUS_INVALID_PARAMS,
6621 						&cp->addr, sizeof(cp->addr));
6622 			goto unlock;
6623 		}
6624 
6625 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6626 						addr_type);
6627 		if (!params) {
6628 			err = mgmt_cmd_complete(sk, hdev->id,
6629 						MGMT_OP_REMOVE_DEVICE,
6630 						MGMT_STATUS_INVALID_PARAMS,
6631 						&cp->addr, sizeof(cp->addr));
6632 			goto unlock;
6633 		}
6634 
6635 		if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
6636 		    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
6637 			err = mgmt_cmd_complete(sk, hdev->id,
6638 						MGMT_OP_REMOVE_DEVICE,
6639 						MGMT_STATUS_INVALID_PARAMS,
6640 						&cp->addr, sizeof(cp->addr));
6641 			goto unlock;
6642 		}
6643 
6644 		list_del(&params->action);
6645 		list_del(&params->list);
6646 		kfree(params);
6647 		hci_update_background_scan(hdev);
6648 
6649 		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
6650 	} else {
6651 		struct hci_conn_params *p, *tmp;
6652 		struct bdaddr_list *b, *btmp;
6653 
6654 		if (cp->addr.type) {
6655 			err = mgmt_cmd_complete(sk, hdev->id,
6656 						MGMT_OP_REMOVE_DEVICE,
6657 						MGMT_STATUS_INVALID_PARAMS,
6658 						&cp->addr, sizeof(cp->addr));
6659 			goto unlock;
6660 		}
6661 
6662 		list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
6663 			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
6664 			list_del(&b->list);
6665 			kfree(b);
6666 		}
6667 
6668 		hci_req_update_scan(hdev);
6669 
6670 		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
6671 			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
6672 				continue;
6673 			device_removed(sk, hdev, &p->addr, p->addr_type);
6674 			if (p->explicit_connect) {
6675 				p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
6676 				continue;
6677 			}
6678 			list_del(&p->action);
6679 			list_del(&p->list);
6680 			kfree(p);
6681 		}
6682 
6683 		bt_dev_dbg(hdev, "All LE connection parameters were removed");
6684 
6685 		hci_update_background_scan(hdev);
6686 	}
6687 
6688 complete:
6689 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
6690 				MGMT_STATUS_SUCCESS, &cp->addr,
6691 				sizeof(cp->addr));
6692 unlock:
6693 	hci_dev_unlock(hdev);
6694 	return err;
6695 }
6696 
6697 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
6698 			   u16 len)
6699 {
6700 	struct mgmt_cp_load_conn_param *cp = data;
6701 	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
6702 				     sizeof(struct mgmt_conn_param));
6703 	u16 param_count, expected_len;
6704 	int i;
6705 
6706 	if (!lmp_le_capable(hdev))
6707 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6708 				       MGMT_STATUS_NOT_SUPPORTED);
6709 
6710 	param_count = __le16_to_cpu(cp->param_count);
6711 	if (param_count > max_param_count) {
6712 		bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
6713 			   param_count);
6714 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6715 				       MGMT_STATUS_INVALID_PARAMS);
6716 	}
6717 
6718 	expected_len = struct_size(cp, params, param_count);
6719 	if (expected_len != len) {
6720 		bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
6721 			   expected_len, len);
6722 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6723 				       MGMT_STATUS_INVALID_PARAMS);
6724 	}
6725 
6726 	bt_dev_dbg(hdev, "param_count %u", param_count);
6727 
6728 	hci_dev_lock(hdev);
6729 
6730 	hci_conn_params_clear_disabled(hdev);
6731 
6732 	for (i = 0; i < param_count; i++) {
6733 		struct mgmt_conn_param *param = &cp->params[i];
6734 		struct hci_conn_params *hci_param;
6735 		u16 min, max, latency, timeout;
6736 		u8 addr_type;
6737 
6738 		bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
6739 			   param->addr.type);
6740 
6741 		if (param->addr.type == BDADDR_LE_PUBLIC) {
6742 			addr_type = ADDR_LE_DEV_PUBLIC;
6743 		} else if (param->addr.type == BDADDR_LE_RANDOM) {
6744 			addr_type = ADDR_LE_DEV_RANDOM;
6745 		} else {
6746 			bt_dev_err(hdev, "ignoring invalid connection parameters");
6747 			continue;
6748 		}
6749 
6750 		min = le16_to_cpu(param->min_interval);
6751 		max = le16_to_cpu(param->max_interval);
6752 		latency = le16_to_cpu(param->latency);
6753 		timeout = le16_to_cpu(param->timeout);
6754 
6755 		bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
6756 			   min, max, latency, timeout);
6757 
6758 		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
6759 			bt_dev_err(hdev, "ignoring invalid connection parameters");
6760 			continue;
6761 		}
6762 
6763 		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
6764 						addr_type);
6765 		if (!hci_param) {
6766 			bt_dev_err(hdev, "failed to add connection parameters");
6767 			continue;
6768 		}
6769 
6770 		hci_param->conn_min_interval = min;
6771 		hci_param->conn_max_interval = max;
6772 		hci_param->conn_latency = latency;
6773 		hci_param->supervision_timeout = timeout;
6774 	}
6775 
6776 	hci_dev_unlock(hdev);
6777 
6778 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
6779 				 NULL, 0);
6780 }
6781 
6782 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
6783 			       void *data, u16 len)
6784 {
6785 	struct mgmt_cp_set_external_config *cp = data;
6786 	bool changed;
6787 	int err;
6788 
6789 	bt_dev_dbg(hdev, "sock %p", sk);
6790 
6791 	if (hdev_is_powered(hdev))
6792 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6793 				       MGMT_STATUS_REJECTED);
6794 
6795 	if (cp->config != 0x00 && cp->config != 0x01)
6796 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6797 				         MGMT_STATUS_INVALID_PARAMS);
6798 
6799 	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
6800 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6801 				       MGMT_STATUS_NOT_SUPPORTED);
6802 
6803 	hci_dev_lock(hdev);
6804 
6805 	if (cp->config)
6806 		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
6807 	else
6808 		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
6809 
6810 	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
6811 	if (err < 0)
6812 		goto unlock;
6813 
6814 	if (!changed)
6815 		goto unlock;
6816 
6817 	err = new_options(hdev, sk);
6818 
6819 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
6820 		mgmt_index_removed(hdev);
6821 
6822 		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
6823 			hci_dev_set_flag(hdev, HCI_CONFIG);
6824 			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6825 
6826 			queue_work(hdev->req_workqueue, &hdev->power_on);
6827 		} else {
6828 			set_bit(HCI_RAW, &hdev->flags);
6829 			mgmt_index_added(hdev);
6830 		}
6831 	}
6832 
6833 unlock:
6834 	hci_dev_unlock(hdev);
6835 	return err;
6836 }
6837 
6838 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
6839 			      void *data, u16 len)
6840 {
6841 	struct mgmt_cp_set_public_address *cp = data;
6842 	bool changed;
6843 	int err;
6844 
6845 	bt_dev_dbg(hdev, "sock %p", sk);
6846 
6847 	if (hdev_is_powered(hdev))
6848 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6849 				       MGMT_STATUS_REJECTED);
6850 
6851 	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
6852 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6853 				       MGMT_STATUS_INVALID_PARAMS);
6854 
6855 	if (!hdev->set_bdaddr)
6856 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6857 				       MGMT_STATUS_NOT_SUPPORTED);
6858 
6859 	hci_dev_lock(hdev);
6860 
6861 	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
6862 	bacpy(&hdev->public_addr, &cp->bdaddr);
6863 
6864 	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
6865 	if (err < 0)
6866 		goto unlock;
6867 
6868 	if (!changed)
6869 		goto unlock;
6870 
6871 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
6872 		err = new_options(hdev, sk);
6873 
6874 	if (is_configured(hdev)) {
6875 		mgmt_index_removed(hdev);
6876 
6877 		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
6878 
6879 		hci_dev_set_flag(hdev, HCI_CONFIG);
6880 		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6881 
6882 		queue_work(hdev->req_workqueue, &hdev->power_on);
6883 	}
6884 
6885 unlock:
6886 	hci_dev_unlock(hdev);
6887 	return err;
6888 }
6889 
6890 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
6891 					     u16 opcode, struct sk_buff *skb)
6892 {
6893 	const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
6894 	struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
6895 	u8 *h192, *r192, *h256, *r256;
6896 	struct mgmt_pending_cmd *cmd;
6897 	u16 eir_len;
6898 	int err;
6899 
6900 	bt_dev_dbg(hdev, "status %u", status);
6901 
6902 	cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
6903 	if (!cmd)
6904 		return;
6905 
6906 	mgmt_cp = cmd->param;
6907 
6908 	if (status) {
6909 		status = mgmt_status(status);
6910 		eir_len = 0;
6911 
6912 		h192 = NULL;
6913 		r192 = NULL;
6914 		h256 = NULL;
6915 		r256 = NULL;
6916 	} else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
6917 		struct hci_rp_read_local_oob_data *rp;
6918 
6919 		if (skb->len != sizeof(*rp)) {
6920 			status = MGMT_STATUS_FAILED;
6921 			eir_len = 0;
6922 		} else {
6923 			status = MGMT_STATUS_SUCCESS;
6924 			rp = (void *)skb->data;
6925 
6926 			eir_len = 5 + 18 + 18;
6927 			h192 = rp->hash;
6928 			r192 = rp->rand;
6929 			h256 = NULL;
6930 			r256 = NULL;
6931 		}
6932 	} else {
6933 		struct hci_rp_read_local_oob_ext_data *rp;
6934 
6935 		if (skb->len != sizeof(*rp)) {
6936 			status = MGMT_STATUS_FAILED;
6937 			eir_len = 0;
6938 		} else {
6939 			status = MGMT_STATUS_SUCCESS;
6940 			rp = (void *)skb->data;
6941 
6942 			if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6943 				eir_len = 5 + 18 + 18;
6944 				h192 = NULL;
6945 				r192 = NULL;
6946 			} else {
6947 				eir_len = 5 + 18 + 18 + 18 + 18;
6948 				h192 = rp->hash192;
6949 				r192 = rp->rand192;
6950 			}
6951 
6952 			h256 = rp->hash256;
6953 			r256 = rp->rand256;
6954 		}
6955 	}
6956 
6957 	mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
6958 	if (!mgmt_rp)
6959 		goto done;
6960 
6961 	if (status)
6962 		goto send_rsp;
6963 
6964 	eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
6965 				  hdev->dev_class, 3);
6966 
6967 	if (h192 && r192) {
6968 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6969 					  EIR_SSP_HASH_C192, h192, 16);
6970 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6971 					  EIR_SSP_RAND_R192, r192, 16);
6972 	}
6973 
6974 	if (h256 && r256) {
6975 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6976 					  EIR_SSP_HASH_C256, h256, 16);
6977 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6978 					  EIR_SSP_RAND_R256, r256, 16);
6979 	}
6980 
6981 send_rsp:
6982 	mgmt_rp->type = mgmt_cp->type;
6983 	mgmt_rp->eir_len = cpu_to_le16(eir_len);
6984 
6985 	err = mgmt_cmd_complete(cmd->sk, hdev->id,
6986 				MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
6987 				mgmt_rp, sizeof(*mgmt_rp) + eir_len);
6988 	if (err < 0 || status)
6989 		goto done;
6990 
6991 	hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
6992 
6993 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
6994 				 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
6995 				 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
6996 done:
6997 	kfree(mgmt_rp);
6998 	mgmt_pending_remove(cmd);
6999 }
7000 
7001 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
7002 				  struct mgmt_cp_read_local_oob_ext_data *cp)
7003 {
7004 	struct mgmt_pending_cmd *cmd;
7005 	struct hci_request req;
7006 	int err;
7007 
7008 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
7009 			       cp, sizeof(*cp));
7010 	if (!cmd)
7011 		return -ENOMEM;
7012 
7013 	hci_req_init(&req, hdev);
7014 
7015 	if (bredr_sc_enabled(hdev))
7016 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
7017 	else
7018 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
7019 
7020 	err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
7021 	if (err < 0) {
7022 		mgmt_pending_remove(cmd);
7023 		return err;
7024 	}
7025 
7026 	return 0;
7027 }
7028 
7029 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
7030 				   void *data, u16 data_len)
7031 {
7032 	struct mgmt_cp_read_local_oob_ext_data *cp = data;
7033 	struct mgmt_rp_read_local_oob_ext_data *rp;
7034 	size_t rp_len;
7035 	u16 eir_len;
7036 	u8 status, flags, role, addr[7], hash[16], rand[16];
7037 	int err;
7038 
7039 	bt_dev_dbg(hdev, "sock %p", sk);
7040 
7041 	if (hdev_is_powered(hdev)) {
7042 		switch (cp->type) {
7043 		case BIT(BDADDR_BREDR):
7044 			status = mgmt_bredr_support(hdev);
7045 			if (status)
7046 				eir_len = 0;
7047 			else
7048 				eir_len = 5;
7049 			break;
7050 		case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7051 			status = mgmt_le_support(hdev);
7052 			if (status)
7053 				eir_len = 0;
7054 			else
7055 				eir_len = 9 + 3 + 18 + 18 + 3;
7056 			break;
7057 		default:
7058 			status = MGMT_STATUS_INVALID_PARAMS;
7059 			eir_len = 0;
7060 			break;
7061 		}
7062 	} else {
7063 		status = MGMT_STATUS_NOT_POWERED;
7064 		eir_len = 0;
7065 	}
7066 
7067 	rp_len = sizeof(*rp) + eir_len;
7068 	rp = kmalloc(rp_len, GFP_ATOMIC);
7069 	if (!rp)
7070 		return -ENOMEM;
7071 
7072 	if (status)
7073 		goto complete;
7074 
7075 	hci_dev_lock(hdev);
7076 
7077 	eir_len = 0;
7078 	switch (cp->type) {
7079 	case BIT(BDADDR_BREDR):
7080 		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7081 			err = read_local_ssp_oob_req(hdev, sk, cp);
7082 			hci_dev_unlock(hdev);
7083 			if (!err)
7084 				goto done;
7085 
7086 			status = MGMT_STATUS_FAILED;
7087 			goto complete;
7088 		} else {
7089 			eir_len = eir_append_data(rp->eir, eir_len,
7090 						  EIR_CLASS_OF_DEV,
7091 						  hdev->dev_class, 3);
7092 		}
7093 		break;
7094 	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7095 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
7096 		    smp_generate_oob(hdev, hash, rand) < 0) {
7097 			hci_dev_unlock(hdev);
7098 			status = MGMT_STATUS_FAILED;
7099 			goto complete;
7100 		}
7101 
7102 		/* This should return the active RPA, but since the RPA
7103 		 * is only programmed on demand, it is really hard to fill
7104 		 * this in at the moment. For now disallow retrieving
7105 		 * local out-of-band data when privacy is in use.
7106 		 *
7107 		 * Returning the identity address will not help here since
7108 		 * pairing happens before the identity resolving key is
7109 		 * known and thus the connection establishment happens
7110 		 * based on the RPA and not the identity address.
7111 		 */
7112 		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
7113 			hci_dev_unlock(hdev);
7114 			status = MGMT_STATUS_REJECTED;
7115 			goto complete;
7116 		}
7117 
7118 		if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
7119 		   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
7120 		   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
7121 		    bacmp(&hdev->static_addr, BDADDR_ANY))) {
7122 			memcpy(addr, &hdev->static_addr, 6);
7123 			addr[6] = 0x01;
7124 		} else {
7125 			memcpy(addr, &hdev->bdaddr, 6);
7126 			addr[6] = 0x00;
7127 		}
7128 
7129 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
7130 					  addr, sizeof(addr));
7131 
7132 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7133 			role = 0x02;
7134 		else
7135 			role = 0x01;
7136 
7137 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
7138 					  &role, sizeof(role));
7139 
7140 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
7141 			eir_len = eir_append_data(rp->eir, eir_len,
7142 						  EIR_LE_SC_CONFIRM,
7143 						  hash, sizeof(hash));
7144 
7145 			eir_len = eir_append_data(rp->eir, eir_len,
7146 						  EIR_LE_SC_RANDOM,
7147 						  rand, sizeof(rand));
7148 		}
7149 
7150 		flags = mgmt_get_adv_discov_flags(hdev);
7151 
7152 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
7153 			flags |= LE_AD_NO_BREDR;
7154 
7155 		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
7156 					  &flags, sizeof(flags));
7157 		break;
7158 	}
7159 
7160 	hci_dev_unlock(hdev);
7161 
7162 	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
7163 
7164 	status = MGMT_STATUS_SUCCESS;
7165 
7166 complete:
7167 	rp->type = cp->type;
7168 	rp->eir_len = cpu_to_le16(eir_len);
7169 
7170 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
7171 				status, rp, sizeof(*rp) + eir_len);
7172 	if (err < 0 || status)
7173 		goto done;
7174 
7175 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7176 				 rp, sizeof(*rp) + eir_len,
7177 				 HCI_MGMT_OOB_DATA_EVENTS, sk);
7178 
7179 done:
7180 	kfree(rp);
7181 
7182 	return err;
7183 }
7184 
7185 static u32 get_supported_adv_flags(struct hci_dev *hdev)
7186 {
7187 	u32 flags = 0;
7188 
7189 	flags |= MGMT_ADV_FLAG_CONNECTABLE;
7190 	flags |= MGMT_ADV_FLAG_DISCOV;
7191 	flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
7192 	flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
7193 	flags |= MGMT_ADV_FLAG_APPEARANCE;
7194 	flags |= MGMT_ADV_FLAG_LOCAL_NAME;
7195 
7196 	/* In extended adv TX_POWER returned from Set Adv Param
7197 	 * will be always valid.
7198 	 */
7199 	if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
7200 	    ext_adv_capable(hdev))
7201 		flags |= MGMT_ADV_FLAG_TX_POWER;
7202 
7203 	if (ext_adv_capable(hdev)) {
7204 		flags |= MGMT_ADV_FLAG_SEC_1M;
7205 
7206 		if (hdev->le_features[1] & HCI_LE_PHY_2M)
7207 			flags |= MGMT_ADV_FLAG_SEC_2M;
7208 
7209 		if (hdev->le_features[1] & HCI_LE_PHY_CODED)
7210 			flags |= MGMT_ADV_FLAG_SEC_CODED;
7211 	}
7212 
7213 	return flags;
7214 }
7215 
7216 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
7217 			     void *data, u16 data_len)
7218 {
7219 	struct mgmt_rp_read_adv_features *rp;
7220 	size_t rp_len;
7221 	int err;
7222 	struct adv_info *adv_instance;
7223 	u32 supported_flags;
7224 	u8 *instance;
7225 
7226 	bt_dev_dbg(hdev, "sock %p", sk);
7227 
7228 	if (!lmp_le_capable(hdev))
7229 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7230 				       MGMT_STATUS_REJECTED);
7231 
7232 	/* Enabling the experimental LL Privay support disables support for
7233 	 * advertising.
7234 	 */
7235 	if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
7236 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
7237 				       MGMT_STATUS_NOT_SUPPORTED);
7238 
7239 	hci_dev_lock(hdev);
7240 
7241 	rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
7242 	rp = kmalloc(rp_len, GFP_ATOMIC);
7243 	if (!rp) {
7244 		hci_dev_unlock(hdev);
7245 		return -ENOMEM;
7246 	}
7247 
7248 	supported_flags = get_supported_adv_flags(hdev);
7249 
7250 	rp->supported_flags = cpu_to_le32(supported_flags);
7251 	rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
7252 	rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
7253 	rp->max_instances = HCI_MAX_ADV_INSTANCES;
7254 	rp->num_instances = hdev->adv_instance_cnt;
7255 
7256 	instance = rp->instance;
7257 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
7258 		*instance = adv_instance->instance;
7259 		instance++;
7260 	}
7261 
7262 	hci_dev_unlock(hdev);
7263 
7264 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7265 				MGMT_STATUS_SUCCESS, rp, rp_len);
7266 
7267 	kfree(rp);
7268 
7269 	return err;
7270 }
7271 
7272 static u8 calculate_name_len(struct hci_dev *hdev)
7273 {
7274 	u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
7275 
7276 	return append_local_name(hdev, buf, 0);
7277 }
7278 
7279 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
7280 			   bool is_adv_data)
7281 {
7282 	u8 max_len = HCI_MAX_AD_LENGTH;
7283 
7284 	if (is_adv_data) {
7285 		if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
7286 				 MGMT_ADV_FLAG_LIMITED_DISCOV |
7287 				 MGMT_ADV_FLAG_MANAGED_FLAGS))
7288 			max_len -= 3;
7289 
7290 		if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
7291 			max_len -= 3;
7292 	} else {
7293 		if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
7294 			max_len -= calculate_name_len(hdev);
7295 
7296 		if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
7297 			max_len -= 4;
7298 	}
7299 
7300 	return max_len;
7301 }
7302 
7303 static bool flags_managed(u32 adv_flags)
7304 {
7305 	return adv_flags & (MGMT_ADV_FLAG_DISCOV |
7306 			    MGMT_ADV_FLAG_LIMITED_DISCOV |
7307 			    MGMT_ADV_FLAG_MANAGED_FLAGS);
7308 }
7309 
7310 static bool tx_power_managed(u32 adv_flags)
7311 {
7312 	return adv_flags & MGMT_ADV_FLAG_TX_POWER;
7313 }
7314 
7315 static bool name_managed(u32 adv_flags)
7316 {
7317 	return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
7318 }
7319 
7320 static bool appearance_managed(u32 adv_flags)
7321 {
7322 	return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
7323 }
7324 
7325 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
7326 			      u8 len, bool is_adv_data)
7327 {
7328 	int i, cur_len;
7329 	u8 max_len;
7330 
7331 	max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
7332 
7333 	if (len > max_len)
7334 		return false;
7335 
7336 	/* Make sure that the data is correctly formatted. */
7337 	for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
7338 		cur_len = data[i];
7339 
7340 		if (data[i + 1] == EIR_FLAGS &&
7341 		    (!is_adv_data || flags_managed(adv_flags)))
7342 			return false;
7343 
7344 		if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
7345 			return false;
7346 
7347 		if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
7348 			return false;
7349 
7350 		if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
7351 			return false;
7352 
7353 		if (data[i + 1] == EIR_APPEARANCE &&
7354 		    appearance_managed(adv_flags))
7355 			return false;
7356 
7357 		/* If the current field length would exceed the total data
7358 		 * length, then it's invalid.
7359 		 */
7360 		if (i + cur_len >= len)
7361 			return false;
7362 	}
7363 
7364 	return true;
7365 }
7366 
7367 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
7368 				     u16 opcode)
7369 {
7370 	struct mgmt_pending_cmd *cmd;
7371 	struct mgmt_cp_add_advertising *cp;
7372 	struct mgmt_rp_add_advertising rp;
7373 	struct adv_info *adv_instance, *n;
7374 	u8 instance;
7375 
7376 	bt_dev_dbg(hdev, "status %d", status);
7377 
7378 	hci_dev_lock(hdev);
7379 
7380 	cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
7381 
7382 	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
7383 		if (!adv_instance->pending)
7384 			continue;
7385 
7386 		if (!status) {
7387 			adv_instance->pending = false;
7388 			continue;
7389 		}
7390 
7391 		instance = adv_instance->instance;
7392 
7393 		if (hdev->cur_adv_instance == instance)
7394 			cancel_adv_timeout(hdev);
7395 
7396 		hci_remove_adv_instance(hdev, instance);
7397 		mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
7398 	}
7399 
7400 	if (!cmd)
7401 		goto unlock;
7402 
7403 	cp = cmd->param;
7404 	rp.instance = cp->instance;
7405 
7406 	if (status)
7407 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
7408 				mgmt_status(status));
7409 	else
7410 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
7411 				  mgmt_status(status), &rp, sizeof(rp));
7412 
7413 	mgmt_pending_remove(cmd);
7414 
7415 unlock:
7416 	hci_dev_unlock(hdev);
7417 }
7418 
7419 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
7420 			   void *data, u16 data_len)
7421 {
7422 	struct mgmt_cp_add_advertising *cp = data;
7423 	struct mgmt_rp_add_advertising rp;
7424 	u32 flags;
7425 	u32 supported_flags, phy_flags;
7426 	u8 status;
7427 	u16 timeout, duration;
7428 	unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
7429 	u8 schedule_instance = 0;
7430 	struct adv_info *next_instance;
7431 	int err;
7432 	struct mgmt_pending_cmd *cmd;
7433 	struct hci_request req;
7434 
7435 	bt_dev_dbg(hdev, "sock %p", sk);
7436 
7437 	status = mgmt_le_support(hdev);
7438 	if (status)
7439 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7440 				       status);
7441 
7442 	/* Enabling the experimental LL Privay support disables support for
7443 	 * advertising.
7444 	 */
7445 	if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
7446 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
7447 				       MGMT_STATUS_NOT_SUPPORTED);
7448 
7449 	if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
7450 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7451 				       MGMT_STATUS_INVALID_PARAMS);
7452 
7453 	if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
7454 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7455 				       MGMT_STATUS_INVALID_PARAMS);
7456 
7457 	flags = __le32_to_cpu(cp->flags);
7458 	timeout = __le16_to_cpu(cp->timeout);
7459 	duration = __le16_to_cpu(cp->duration);
7460 
7461 	/* The current implementation only supports a subset of the specified
7462 	 * flags. Also need to check mutual exclusiveness of sec flags.
7463 	 */
7464 	supported_flags = get_supported_adv_flags(hdev);
7465 	phy_flags = flags & MGMT_ADV_FLAG_SEC_MASK;
7466 	if (flags & ~supported_flags ||
7467 	    ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
7468 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7469 				       MGMT_STATUS_INVALID_PARAMS);
7470 
7471 	hci_dev_lock(hdev);
7472 
7473 	if (timeout && !hdev_is_powered(hdev)) {
7474 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7475 				      MGMT_STATUS_REJECTED);
7476 		goto unlock;
7477 	}
7478 
7479 	if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
7480 	    pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
7481 	    pending_find(MGMT_OP_SET_LE, hdev)) {
7482 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7483 				      MGMT_STATUS_BUSY);
7484 		goto unlock;
7485 	}
7486 
7487 	if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
7488 	    !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
7489 			       cp->scan_rsp_len, false)) {
7490 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7491 				      MGMT_STATUS_INVALID_PARAMS);
7492 		goto unlock;
7493 	}
7494 
7495 	err = hci_add_adv_instance(hdev, cp->instance, flags,
7496 				   cp->adv_data_len, cp->data,
7497 				   cp->scan_rsp_len,
7498 				   cp->data + cp->adv_data_len,
7499 				   timeout, duration);
7500 	if (err < 0) {
7501 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7502 				      MGMT_STATUS_FAILED);
7503 		goto unlock;
7504 	}
7505 
7506 	/* Only trigger an advertising added event if a new instance was
7507 	 * actually added.
7508 	 */
7509 	if (hdev->adv_instance_cnt > prev_instance_cnt)
7510 		mgmt_advertising_added(sk, hdev, cp->instance);
7511 
7512 	if (hdev->cur_adv_instance == cp->instance) {
7513 		/* If the currently advertised instance is being changed then
7514 		 * cancel the current advertising and schedule the next
7515 		 * instance. If there is only one instance then the overridden
7516 		 * advertising data will be visible right away.
7517 		 */
7518 		cancel_adv_timeout(hdev);
7519 
7520 		next_instance = hci_get_next_instance(hdev, cp->instance);
7521 		if (next_instance)
7522 			schedule_instance = next_instance->instance;
7523 	} else if (!hdev->adv_instance_timeout) {
7524 		/* Immediately advertise the new instance if no other
7525 		 * instance is currently being advertised.
7526 		 */
7527 		schedule_instance = cp->instance;
7528 	}
7529 
7530 	/* If the HCI_ADVERTISING flag is set or the device isn't powered or
7531 	 * there is no instance to be advertised then we have no HCI
7532 	 * communication to make. Simply return.
7533 	 */
7534 	if (!hdev_is_powered(hdev) ||
7535 	    hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
7536 	    !schedule_instance) {
7537 		rp.instance = cp->instance;
7538 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7539 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7540 		goto unlock;
7541 	}
7542 
7543 	/* We're good to go, update advertising data, parameters, and start
7544 	 * advertising.
7545 	 */
7546 	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
7547 			       data_len);
7548 	if (!cmd) {
7549 		err = -ENOMEM;
7550 		goto unlock;
7551 	}
7552 
7553 	hci_req_init(&req, hdev);
7554 
7555 	err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
7556 
7557 	if (!err)
7558 		err = hci_req_run(&req, add_advertising_complete);
7559 
7560 	if (err < 0) {
7561 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7562 				      MGMT_STATUS_FAILED);
7563 		mgmt_pending_remove(cmd);
7564 	}
7565 
7566 unlock:
7567 	hci_dev_unlock(hdev);
7568 
7569 	return err;
7570 }
7571 
7572 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
7573 					u16 opcode)
7574 {
7575 	struct mgmt_pending_cmd *cmd;
7576 	struct mgmt_cp_remove_advertising *cp;
7577 	struct mgmt_rp_remove_advertising rp;
7578 
7579 	bt_dev_dbg(hdev, "status %d", status);
7580 
7581 	hci_dev_lock(hdev);
7582 
7583 	/* A failure status here only means that we failed to disable
7584 	 * advertising. Otherwise, the advertising instance has been removed,
7585 	 * so report success.
7586 	 */
7587 	cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
7588 	if (!cmd)
7589 		goto unlock;
7590 
7591 	cp = cmd->param;
7592 	rp.instance = cp->instance;
7593 
7594 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
7595 			  &rp, sizeof(rp));
7596 	mgmt_pending_remove(cmd);
7597 
7598 unlock:
7599 	hci_dev_unlock(hdev);
7600 }
7601 
7602 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
7603 			      void *data, u16 data_len)
7604 {
7605 	struct mgmt_cp_remove_advertising *cp = data;
7606 	struct mgmt_rp_remove_advertising rp;
7607 	struct mgmt_pending_cmd *cmd;
7608 	struct hci_request req;
7609 	int err;
7610 
7611 	bt_dev_dbg(hdev, "sock %p", sk);
7612 
7613 	/* Enabling the experimental LL Privay support disables support for
7614 	 * advertising.
7615 	 */
7616 	if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
7617 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
7618 				       MGMT_STATUS_NOT_SUPPORTED);
7619 
7620 	hci_dev_lock(hdev);
7621 
7622 	if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
7623 		err = mgmt_cmd_status(sk, hdev->id,
7624 				      MGMT_OP_REMOVE_ADVERTISING,
7625 				      MGMT_STATUS_INVALID_PARAMS);
7626 		goto unlock;
7627 	}
7628 
7629 	if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
7630 	    pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
7631 	    pending_find(MGMT_OP_SET_LE, hdev)) {
7632 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
7633 				      MGMT_STATUS_BUSY);
7634 		goto unlock;
7635 	}
7636 
7637 	if (list_empty(&hdev->adv_instances)) {
7638 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
7639 				      MGMT_STATUS_INVALID_PARAMS);
7640 		goto unlock;
7641 	}
7642 
7643 	hci_req_init(&req, hdev);
7644 
7645 	/* If we use extended advertising, instance is disabled and removed */
7646 	if (ext_adv_capable(hdev)) {
7647 		__hci_req_disable_ext_adv_instance(&req, cp->instance);
7648 		__hci_req_remove_ext_adv_instance(&req, cp->instance);
7649 	}
7650 
7651 	hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
7652 
7653 	if (list_empty(&hdev->adv_instances))
7654 		__hci_req_disable_advertising(&req);
7655 
7656 	/* If no HCI commands have been collected so far or the HCI_ADVERTISING
7657 	 * flag is set or the device isn't powered then we have no HCI
7658 	 * communication to make. Simply return.
7659 	 */
7660 	if (skb_queue_empty(&req.cmd_q) ||
7661 	    !hdev_is_powered(hdev) ||
7662 	    hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
7663 		hci_req_purge(&req);
7664 		rp.instance = cp->instance;
7665 		err = mgmt_cmd_complete(sk, hdev->id,
7666 					MGMT_OP_REMOVE_ADVERTISING,
7667 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7668 		goto unlock;
7669 	}
7670 
7671 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
7672 			       data_len);
7673 	if (!cmd) {
7674 		err = -ENOMEM;
7675 		goto unlock;
7676 	}
7677 
7678 	err = hci_req_run(&req, remove_advertising_complete);
7679 	if (err < 0)
7680 		mgmt_pending_remove(cmd);
7681 
7682 unlock:
7683 	hci_dev_unlock(hdev);
7684 
7685 	return err;
7686 }
7687 
7688 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
7689 			     void *data, u16 data_len)
7690 {
7691 	struct mgmt_cp_get_adv_size_info *cp = data;
7692 	struct mgmt_rp_get_adv_size_info rp;
7693 	u32 flags, supported_flags;
7694 	int err;
7695 
7696 	bt_dev_dbg(hdev, "sock %p", sk);
7697 
7698 	if (!lmp_le_capable(hdev))
7699 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7700 				       MGMT_STATUS_REJECTED);
7701 
7702 	if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
7703 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7704 				       MGMT_STATUS_INVALID_PARAMS);
7705 
7706 	flags = __le32_to_cpu(cp->flags);
7707 
7708 	/* The current implementation only supports a subset of the specified
7709 	 * flags.
7710 	 */
7711 	supported_flags = get_supported_adv_flags(hdev);
7712 	if (flags & ~supported_flags)
7713 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7714 				       MGMT_STATUS_INVALID_PARAMS);
7715 
7716 	rp.instance = cp->instance;
7717 	rp.flags = cp->flags;
7718 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
7719 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
7720 
7721 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7722 				MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7723 
7724 	return err;
7725 }
7726 
7727 static const struct hci_mgmt_handler mgmt_handlers[] = {
7728 	{ NULL }, /* 0x0000 (no command) */
7729 	{ read_version,            MGMT_READ_VERSION_SIZE,
7730 						HCI_MGMT_NO_HDEV |
7731 						HCI_MGMT_UNTRUSTED },
7732 	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
7733 						HCI_MGMT_NO_HDEV |
7734 						HCI_MGMT_UNTRUSTED },
7735 	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
7736 						HCI_MGMT_NO_HDEV |
7737 						HCI_MGMT_UNTRUSTED },
7738 	{ read_controller_info,    MGMT_READ_INFO_SIZE,
7739 						HCI_MGMT_UNTRUSTED },
7740 	{ set_powered,             MGMT_SETTING_SIZE },
7741 	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
7742 	{ set_connectable,         MGMT_SETTING_SIZE },
7743 	{ set_fast_connectable,    MGMT_SETTING_SIZE },
7744 	{ set_bondable,            MGMT_SETTING_SIZE },
7745 	{ set_link_security,       MGMT_SETTING_SIZE },
7746 	{ set_ssp,                 MGMT_SETTING_SIZE },
7747 	{ set_hs,                  MGMT_SETTING_SIZE },
7748 	{ set_le,                  MGMT_SETTING_SIZE },
7749 	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
7750 	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
7751 	{ add_uuid,                MGMT_ADD_UUID_SIZE },
7752 	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
7753 	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
7754 						HCI_MGMT_VAR_LEN },
7755 	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
7756 						HCI_MGMT_VAR_LEN },
7757 	{ disconnect,              MGMT_DISCONNECT_SIZE },
7758 	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
7759 	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
7760 	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
7761 	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
7762 	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
7763 	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
7764 	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
7765 	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
7766 	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
7767 	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
7768 	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
7769 	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
7770 	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
7771 						HCI_MGMT_VAR_LEN },
7772 	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
7773 	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
7774 	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
7775 	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
7776 	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
7777 	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
7778 	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
7779 	{ set_advertising,         MGMT_SETTING_SIZE },
7780 	{ set_bredr,               MGMT_SETTING_SIZE },
7781 	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
7782 	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
7783 	{ set_secure_conn,         MGMT_SETTING_SIZE },
7784 	{ set_debug_keys,          MGMT_SETTING_SIZE },
7785 	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
7786 	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
7787 						HCI_MGMT_VAR_LEN },
7788 	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
7789 	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
7790 	{ add_device,              MGMT_ADD_DEVICE_SIZE },
7791 	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
7792 	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
7793 						HCI_MGMT_VAR_LEN },
7794 	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
7795 						HCI_MGMT_NO_HDEV |
7796 						HCI_MGMT_UNTRUSTED },
7797 	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
7798 						HCI_MGMT_UNCONFIGURED |
7799 						HCI_MGMT_UNTRUSTED },
7800 	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
7801 						HCI_MGMT_UNCONFIGURED },
7802 	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
7803 						HCI_MGMT_UNCONFIGURED },
7804 	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
7805 						HCI_MGMT_VAR_LEN },
7806 	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
7807 	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
7808 						HCI_MGMT_NO_HDEV |
7809 						HCI_MGMT_UNTRUSTED },
7810 	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
7811 	{ add_advertising,	   MGMT_ADD_ADVERTISING_SIZE,
7812 						HCI_MGMT_VAR_LEN },
7813 	{ remove_advertising,	   MGMT_REMOVE_ADVERTISING_SIZE },
7814 	{ get_adv_size_info,       MGMT_GET_ADV_SIZE_INFO_SIZE },
7815 	{ start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
7816 	{ read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
7817 						HCI_MGMT_UNTRUSTED },
7818 	{ set_appearance,	   MGMT_SET_APPEARANCE_SIZE },
7819 	{ get_phy_configuration,   MGMT_GET_PHY_CONFIGURATION_SIZE },
7820 	{ set_phy_configuration,   MGMT_SET_PHY_CONFIGURATION_SIZE },
7821 	{ set_blocked_keys,	   MGMT_OP_SET_BLOCKED_KEYS_SIZE,
7822 						HCI_MGMT_VAR_LEN },
7823 	{ set_wideband_speech,	   MGMT_SETTING_SIZE },
7824 	{ read_security_info,      MGMT_READ_SECURITY_INFO_SIZE,
7825 						HCI_MGMT_UNTRUSTED },
7826 	{ read_exp_features_info,  MGMT_READ_EXP_FEATURES_INFO_SIZE,
7827 						HCI_MGMT_UNTRUSTED |
7828 						HCI_MGMT_HDEV_OPTIONAL },
7829 	{ set_exp_feature,         MGMT_SET_EXP_FEATURE_SIZE,
7830 						HCI_MGMT_VAR_LEN |
7831 						HCI_MGMT_HDEV_OPTIONAL },
7832 	{ read_def_system_config,  MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
7833 						HCI_MGMT_UNTRUSTED },
7834 	{ set_def_system_config,   MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
7835 						HCI_MGMT_VAR_LEN },
7836 	{ read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
7837 						HCI_MGMT_UNTRUSTED },
7838 	{ set_def_runtime_config,  MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
7839 						HCI_MGMT_VAR_LEN },
7840 	{ get_device_flags,        MGMT_GET_DEVICE_FLAGS_SIZE },
7841 	{ set_device_flags,        MGMT_SET_DEVICE_FLAGS_SIZE },
7842 	{ read_adv_mon_features,   MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
7843 	{ add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
7844 						HCI_MGMT_VAR_LEN },
7845 	{ remove_adv_monitor,      MGMT_REMOVE_ADV_MONITOR_SIZE },
7846 };
7847 
7848 void mgmt_index_added(struct hci_dev *hdev)
7849 {
7850 	struct mgmt_ev_ext_index ev;
7851 
7852 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
7853 		return;
7854 
7855 	switch (hdev->dev_type) {
7856 	case HCI_PRIMARY:
7857 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7858 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
7859 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7860 			ev.type = 0x01;
7861 		} else {
7862 			mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
7863 					 HCI_MGMT_INDEX_EVENTS);
7864 			ev.type = 0x00;
7865 		}
7866 		break;
7867 	case HCI_AMP:
7868 		ev.type = 0x02;
7869 		break;
7870 	default:
7871 		return;
7872 	}
7873 
7874 	ev.bus = hdev->bus;
7875 
7876 	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
7877 			 HCI_MGMT_EXT_INDEX_EVENTS);
7878 }
7879 
7880 void mgmt_index_removed(struct hci_dev *hdev)
7881 {
7882 	struct mgmt_ev_ext_index ev;
7883 	u8 status = MGMT_STATUS_INVALID_INDEX;
7884 
7885 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
7886 		return;
7887 
7888 	switch (hdev->dev_type) {
7889 	case HCI_PRIMARY:
7890 		mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7891 
7892 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7893 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
7894 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7895 			ev.type = 0x01;
7896 		} else {
7897 			mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
7898 					 HCI_MGMT_INDEX_EVENTS);
7899 			ev.type = 0x00;
7900 		}
7901 		break;
7902 	case HCI_AMP:
7903 		ev.type = 0x02;
7904 		break;
7905 	default:
7906 		return;
7907 	}
7908 
7909 	ev.bus = hdev->bus;
7910 
7911 	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
7912 			 HCI_MGMT_EXT_INDEX_EVENTS);
7913 }
7914 
7915 /* This function requires the caller holds hdev->lock */
7916 static void restart_le_actions(struct hci_dev *hdev)
7917 {
7918 	struct hci_conn_params *p;
7919 
7920 	list_for_each_entry(p, &hdev->le_conn_params, list) {
7921 		/* Needed for AUTO_OFF case where might not "really"
7922 		 * have been powered off.
7923 		 */
7924 		list_del_init(&p->action);
7925 
7926 		switch (p->auto_connect) {
7927 		case HCI_AUTO_CONN_DIRECT:
7928 		case HCI_AUTO_CONN_ALWAYS:
7929 			list_add(&p->action, &hdev->pend_le_conns);
7930 			break;
7931 		case HCI_AUTO_CONN_REPORT:
7932 			list_add(&p->action, &hdev->pend_le_reports);
7933 			break;
7934 		default:
7935 			break;
7936 		}
7937 	}
7938 }
7939 
7940 void mgmt_power_on(struct hci_dev *hdev, int err)
7941 {
7942 	struct cmd_lookup match = { NULL, hdev };
7943 
7944 	bt_dev_dbg(hdev, "err %d", err);
7945 
7946 	hci_dev_lock(hdev);
7947 
7948 	if (!err) {
7949 		restart_le_actions(hdev);
7950 		hci_update_background_scan(hdev);
7951 	}
7952 
7953 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7954 
7955 	new_settings(hdev, match.sk);
7956 
7957 	if (match.sk)
7958 		sock_put(match.sk);
7959 
7960 	hci_dev_unlock(hdev);
7961 }
7962 
7963 void __mgmt_power_off(struct hci_dev *hdev)
7964 {
7965 	struct cmd_lookup match = { NULL, hdev };
7966 	u8 status, zero_cod[] = { 0, 0, 0 };
7967 
7968 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7969 
7970 	/* If the power off is because of hdev unregistration let
7971 	 * use the appropriate INVALID_INDEX status. Otherwise use
7972 	 * NOT_POWERED. We cover both scenarios here since later in
7973 	 * mgmt_index_removed() any hci_conn callbacks will have already
7974 	 * been triggered, potentially causing misleading DISCONNECTED
7975 	 * status responses.
7976 	 */
7977 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
7978 		status = MGMT_STATUS_INVALID_INDEX;
7979 	else
7980 		status = MGMT_STATUS_NOT_POWERED;
7981 
7982 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7983 
7984 	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
7985 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
7986 				   zero_cod, sizeof(zero_cod),
7987 				   HCI_MGMT_DEV_CLASS_EVENTS, NULL);
7988 		ext_info_changed(hdev, NULL);
7989 	}
7990 
7991 	new_settings(hdev, match.sk);
7992 
7993 	if (match.sk)
7994 		sock_put(match.sk);
7995 }
7996 
7997 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
7998 {
7999 	struct mgmt_pending_cmd *cmd;
8000 	u8 status;
8001 
8002 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8003 	if (!cmd)
8004 		return;
8005 
8006 	if (err == -ERFKILL)
8007 		status = MGMT_STATUS_RFKILLED;
8008 	else
8009 		status = MGMT_STATUS_FAILED;
8010 
8011 	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
8012 
8013 	mgmt_pending_remove(cmd);
8014 }
8015 
8016 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
8017 		       bool persistent)
8018 {
8019 	struct mgmt_ev_new_link_key ev;
8020 
8021 	memset(&ev, 0, sizeof(ev));
8022 
8023 	ev.store_hint = persistent;
8024 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8025 	ev.key.addr.type = BDADDR_BREDR;
8026 	ev.key.type = key->type;
8027 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
8028 	ev.key.pin_len = key->pin_len;
8029 
8030 	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
8031 }
8032 
8033 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
8034 {
8035 	switch (ltk->type) {
8036 	case SMP_LTK:
8037 	case SMP_LTK_SLAVE:
8038 		if (ltk->authenticated)
8039 			return MGMT_LTK_AUTHENTICATED;
8040 		return MGMT_LTK_UNAUTHENTICATED;
8041 	case SMP_LTK_P256:
8042 		if (ltk->authenticated)
8043 			return MGMT_LTK_P256_AUTH;
8044 		return MGMT_LTK_P256_UNAUTH;
8045 	case SMP_LTK_P256_DEBUG:
8046 		return MGMT_LTK_P256_DEBUG;
8047 	}
8048 
8049 	return MGMT_LTK_UNAUTHENTICATED;
8050 }
8051 
8052 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
8053 {
8054 	struct mgmt_ev_new_long_term_key ev;
8055 
8056 	memset(&ev, 0, sizeof(ev));
8057 
8058 	/* Devices using resolvable or non-resolvable random addresses
8059 	 * without providing an identity resolving key don't require
8060 	 * to store long term keys. Their addresses will change the
8061 	 * next time around.
8062 	 *
8063 	 * Only when a remote device provides an identity address
8064 	 * make sure the long term key is stored. If the remote
8065 	 * identity is known, the long term keys are internally
8066 	 * mapped to the identity address. So allow static random
8067 	 * and public addresses here.
8068 	 */
8069 	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8070 	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
8071 		ev.store_hint = 0x00;
8072 	else
8073 		ev.store_hint = persistent;
8074 
8075 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8076 	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
8077 	ev.key.type = mgmt_ltk_type(key);
8078 	ev.key.enc_size = key->enc_size;
8079 	ev.key.ediv = key->ediv;
8080 	ev.key.rand = key->rand;
8081 
8082 	if (key->type == SMP_LTK)
8083 		ev.key.master = 1;
8084 
8085 	/* Make sure we copy only the significant bytes based on the
8086 	 * encryption key size, and set the rest of the value to zeroes.
8087 	 */
8088 	memcpy(ev.key.val, key->val, key->enc_size);
8089 	memset(ev.key.val + key->enc_size, 0,
8090 	       sizeof(ev.key.val) - key->enc_size);
8091 
8092 	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
8093 }
8094 
8095 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
8096 {
8097 	struct mgmt_ev_new_irk ev;
8098 
8099 	memset(&ev, 0, sizeof(ev));
8100 
8101 	ev.store_hint = persistent;
8102 
8103 	bacpy(&ev.rpa, &irk->rpa);
8104 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
8105 	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
8106 	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
8107 
8108 	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
8109 }
8110 
8111 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
8112 		   bool persistent)
8113 {
8114 	struct mgmt_ev_new_csrk ev;
8115 
8116 	memset(&ev, 0, sizeof(ev));
8117 
8118 	/* Devices using resolvable or non-resolvable random addresses
8119 	 * without providing an identity resolving key don't require
8120 	 * to store signature resolving keys. Their addresses will change
8121 	 * the next time around.
8122 	 *
8123 	 * Only when a remote device provides an identity address
8124 	 * make sure the signature resolving key is stored. So allow
8125 	 * static random and public addresses here.
8126 	 */
8127 	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8128 	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
8129 		ev.store_hint = 0x00;
8130 	else
8131 		ev.store_hint = persistent;
8132 
8133 	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
8134 	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
8135 	ev.key.type = csrk->type;
8136 	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
8137 
8138 	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
8139 }
8140 
8141 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
8142 			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
8143 			 u16 max_interval, u16 latency, u16 timeout)
8144 {
8145 	struct mgmt_ev_new_conn_param ev;
8146 
8147 	if (!hci_is_identity_address(bdaddr, bdaddr_type))
8148 		return;
8149 
8150 	memset(&ev, 0, sizeof(ev));
8151 	bacpy(&ev.addr.bdaddr, bdaddr);
8152 	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
8153 	ev.store_hint = store_hint;
8154 	ev.min_interval = cpu_to_le16(min_interval);
8155 	ev.max_interval = cpu_to_le16(max_interval);
8156 	ev.latency = cpu_to_le16(latency);
8157 	ev.timeout = cpu_to_le16(timeout);
8158 
8159 	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
8160 }
8161 
8162 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
8163 			   u32 flags, u8 *name, u8 name_len)
8164 {
8165 	char buf[512];
8166 	struct mgmt_ev_device_connected *ev = (void *) buf;
8167 	u16 eir_len = 0;
8168 
8169 	bacpy(&ev->addr.bdaddr, &conn->dst);
8170 	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
8171 
8172 	ev->flags = __cpu_to_le32(flags);
8173 
8174 	/* We must ensure that the EIR Data fields are ordered and
8175 	 * unique. Keep it simple for now and avoid the problem by not
8176 	 * adding any BR/EDR data to the LE adv.
8177 	 */
8178 	if (conn->le_adv_data_len > 0) {
8179 		memcpy(&ev->eir[eir_len],
8180 		       conn->le_adv_data, conn->le_adv_data_len);
8181 		eir_len = conn->le_adv_data_len;
8182 	} else {
8183 		if (name_len > 0)
8184 			eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
8185 						  name, name_len);
8186 
8187 		if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
8188 			eir_len = eir_append_data(ev->eir, eir_len,
8189 						  EIR_CLASS_OF_DEV,
8190 						  conn->dev_class, 3);
8191 	}
8192 
8193 	ev->eir_len = cpu_to_le16(eir_len);
8194 
8195 	mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
8196 		    sizeof(*ev) + eir_len, NULL);
8197 }
8198 
8199 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
8200 {
8201 	struct sock **sk = data;
8202 
8203 	cmd->cmd_complete(cmd, 0);
8204 
8205 	*sk = cmd->sk;
8206 	sock_hold(*sk);
8207 
8208 	mgmt_pending_remove(cmd);
8209 }
8210 
8211 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
8212 {
8213 	struct hci_dev *hdev = data;
8214 	struct mgmt_cp_unpair_device *cp = cmd->param;
8215 
8216 	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
8217 
8218 	cmd->cmd_complete(cmd, 0);
8219 	mgmt_pending_remove(cmd);
8220 }
8221 
8222 bool mgmt_powering_down(struct hci_dev *hdev)
8223 {
8224 	struct mgmt_pending_cmd *cmd;
8225 	struct mgmt_mode *cp;
8226 
8227 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8228 	if (!cmd)
8229 		return false;
8230 
8231 	cp = cmd->param;
8232 	if (!cp->val)
8233 		return true;
8234 
8235 	return false;
8236 }
8237 
8238 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
8239 			      u8 link_type, u8 addr_type, u8 reason,
8240 			      bool mgmt_connected)
8241 {
8242 	struct mgmt_ev_device_disconnected ev;
8243 	struct sock *sk = NULL;
8244 
8245 	/* The connection is still in hci_conn_hash so test for 1
8246 	 * instead of 0 to know if this is the last one.
8247 	 */
8248 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
8249 		cancel_delayed_work(&hdev->power_off);
8250 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
8251 	}
8252 
8253 	if (!mgmt_connected)
8254 		return;
8255 
8256 	if (link_type != ACL_LINK && link_type != LE_LINK)
8257 		return;
8258 
8259 	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
8260 
8261 	bacpy(&ev.addr.bdaddr, bdaddr);
8262 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
8263 	ev.reason = reason;
8264 
8265 	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
8266 
8267 	if (sk)
8268 		sock_put(sk);
8269 
8270 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
8271 			     hdev);
8272 }
8273 
8274 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
8275 			    u8 link_type, u8 addr_type, u8 status)
8276 {
8277 	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
8278 	struct mgmt_cp_disconnect *cp;
8279 	struct mgmt_pending_cmd *cmd;
8280 
8281 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
8282 			     hdev);
8283 
8284 	cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
8285 	if (!cmd)
8286 		return;
8287 
8288 	cp = cmd->param;
8289 
8290 	if (bacmp(bdaddr, &cp->addr.bdaddr))
8291 		return;
8292 
8293 	if (cp->addr.type != bdaddr_type)
8294 		return;
8295 
8296 	cmd->cmd_complete(cmd, mgmt_status(status));
8297 	mgmt_pending_remove(cmd);
8298 }
8299 
8300 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8301 			 u8 addr_type, u8 status)
8302 {
8303 	struct mgmt_ev_connect_failed ev;
8304 
8305 	/* The connection is still in hci_conn_hash so test for 1
8306 	 * instead of 0 to know if this is the last one.
8307 	 */
8308 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
8309 		cancel_delayed_work(&hdev->power_off);
8310 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
8311 	}
8312 
8313 	bacpy(&ev.addr.bdaddr, bdaddr);
8314 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
8315 	ev.status = mgmt_status(status);
8316 
8317 	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
8318 }
8319 
8320 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
8321 {
8322 	struct mgmt_ev_pin_code_request ev;
8323 
8324 	bacpy(&ev.addr.bdaddr, bdaddr);
8325 	ev.addr.type = BDADDR_BREDR;
8326 	ev.secure = secure;
8327 
8328 	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
8329 }
8330 
8331 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8332 				  u8 status)
8333 {
8334 	struct mgmt_pending_cmd *cmd;
8335 
8336 	cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
8337 	if (!cmd)
8338 		return;
8339 
8340 	cmd->cmd_complete(cmd, mgmt_status(status));
8341 	mgmt_pending_remove(cmd);
8342 }
8343 
8344 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8345 				      u8 status)
8346 {
8347 	struct mgmt_pending_cmd *cmd;
8348 
8349 	cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
8350 	if (!cmd)
8351 		return;
8352 
8353 	cmd->cmd_complete(cmd, mgmt_status(status));
8354 	mgmt_pending_remove(cmd);
8355 }
8356 
8357 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
8358 			      u8 link_type, u8 addr_type, u32 value,
8359 			      u8 confirm_hint)
8360 {
8361 	struct mgmt_ev_user_confirm_request ev;
8362 
8363 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
8364 
8365 	bacpy(&ev.addr.bdaddr, bdaddr);
8366 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
8367 	ev.confirm_hint = confirm_hint;
8368 	ev.value = cpu_to_le32(value);
8369 
8370 	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
8371 			  NULL);
8372 }
8373 
8374 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
8375 			      u8 link_type, u8 addr_type)
8376 {
8377 	struct mgmt_ev_user_passkey_request ev;
8378 
8379 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
8380 
8381 	bacpy(&ev.addr.bdaddr, bdaddr);
8382 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
8383 
8384 	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
8385 			  NULL);
8386 }
8387 
8388 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8389 				      u8 link_type, u8 addr_type, u8 status,
8390 				      u8 opcode)
8391 {
8392 	struct mgmt_pending_cmd *cmd;
8393 
8394 	cmd = pending_find(opcode, hdev);
8395 	if (!cmd)
8396 		return -ENOENT;
8397 
8398 	cmd->cmd_complete(cmd, mgmt_status(status));
8399 	mgmt_pending_remove(cmd);
8400 
8401 	return 0;
8402 }
8403 
8404 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8405 				     u8 link_type, u8 addr_type, u8 status)
8406 {
8407 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8408 					  status, MGMT_OP_USER_CONFIRM_REPLY);
8409 }
8410 
8411 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8412 					 u8 link_type, u8 addr_type, u8 status)
8413 {
8414 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8415 					  status,
8416 					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
8417 }
8418 
8419 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8420 				     u8 link_type, u8 addr_type, u8 status)
8421 {
8422 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8423 					  status, MGMT_OP_USER_PASSKEY_REPLY);
8424 }
8425 
8426 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8427 					 u8 link_type, u8 addr_type, u8 status)
8428 {
8429 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8430 					  status,
8431 					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
8432 }
8433 
8434 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
8435 			     u8 link_type, u8 addr_type, u32 passkey,
8436 			     u8 entered)
8437 {
8438 	struct mgmt_ev_passkey_notify ev;
8439 
8440 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
8441 
8442 	bacpy(&ev.addr.bdaddr, bdaddr);
8443 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
8444 	ev.passkey = __cpu_to_le32(passkey);
8445 	ev.entered = entered;
8446 
8447 	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
8448 }
8449 
8450 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
8451 {
8452 	struct mgmt_ev_auth_failed ev;
8453 	struct mgmt_pending_cmd *cmd;
8454 	u8 status = mgmt_status(hci_status);
8455 
8456 	bacpy(&ev.addr.bdaddr, &conn->dst);
8457 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
8458 	ev.status = status;
8459 
8460 	cmd = find_pairing(conn);
8461 
8462 	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
8463 		    cmd ? cmd->sk : NULL);
8464 
8465 	if (cmd) {
8466 		cmd->cmd_complete(cmd, status);
8467 		mgmt_pending_remove(cmd);
8468 	}
8469 }
8470 
8471 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
8472 {
8473 	struct cmd_lookup match = { NULL, hdev };
8474 	bool changed;
8475 
8476 	if (status) {
8477 		u8 mgmt_err = mgmt_status(status);
8478 		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
8479 				     cmd_status_rsp, &mgmt_err);
8480 		return;
8481 	}
8482 
8483 	if (test_bit(HCI_AUTH, &hdev->flags))
8484 		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
8485 	else
8486 		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
8487 
8488 	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
8489 			     &match);
8490 
8491 	if (changed)
8492 		new_settings(hdev, match.sk);
8493 
8494 	if (match.sk)
8495 		sock_put(match.sk);
8496 }
8497 
8498 static void clear_eir(struct hci_request *req)
8499 {
8500 	struct hci_dev *hdev = req->hdev;
8501 	struct hci_cp_write_eir cp;
8502 
8503 	if (!lmp_ext_inq_capable(hdev))
8504 		return;
8505 
8506 	memset(hdev->eir, 0, sizeof(hdev->eir));
8507 
8508 	memset(&cp, 0, sizeof(cp));
8509 
8510 	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
8511 }
8512 
8513 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
8514 {
8515 	struct cmd_lookup match = { NULL, hdev };
8516 	struct hci_request req;
8517 	bool changed = false;
8518 
8519 	if (status) {
8520 		u8 mgmt_err = mgmt_status(status);
8521 
8522 		if (enable && hci_dev_test_and_clear_flag(hdev,
8523 							  HCI_SSP_ENABLED)) {
8524 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
8525 			new_settings(hdev, NULL);
8526 		}
8527 
8528 		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
8529 				     &mgmt_err);
8530 		return;
8531 	}
8532 
8533 	if (enable) {
8534 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
8535 	} else {
8536 		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
8537 		if (!changed)
8538 			changed = hci_dev_test_and_clear_flag(hdev,
8539 							      HCI_HS_ENABLED);
8540 		else
8541 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
8542 	}
8543 
8544 	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
8545 
8546 	if (changed)
8547 		new_settings(hdev, match.sk);
8548 
8549 	if (match.sk)
8550 		sock_put(match.sk);
8551 
8552 	hci_req_init(&req, hdev);
8553 
8554 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8555 		if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
8556 			hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
8557 				    sizeof(enable), &enable);
8558 		__hci_req_update_eir(&req);
8559 	} else {
8560 		clear_eir(&req);
8561 	}
8562 
8563 	hci_req_run(&req, NULL);
8564 }
8565 
8566 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
8567 {
8568 	struct cmd_lookup *match = data;
8569 
8570 	if (match->sk == NULL) {
8571 		match->sk = cmd->sk;
8572 		sock_hold(match->sk);
8573 	}
8574 }
8575 
8576 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
8577 				    u8 status)
8578 {
8579 	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
8580 
8581 	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
8582 	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
8583 	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
8584 
8585 	if (!status) {
8586 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
8587 				   3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
8588 		ext_info_changed(hdev, NULL);
8589 	}
8590 
8591 	if (match.sk)
8592 		sock_put(match.sk);
8593 }
8594 
8595 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
8596 {
8597 	struct mgmt_cp_set_local_name ev;
8598 	struct mgmt_pending_cmd *cmd;
8599 
8600 	if (status)
8601 		return;
8602 
8603 	memset(&ev, 0, sizeof(ev));
8604 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
8605 	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
8606 
8607 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
8608 	if (!cmd) {
8609 		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
8610 
8611 		/* If this is a HCI command related to powering on the
8612 		 * HCI dev don't send any mgmt signals.
8613 		 */
8614 		if (pending_find(MGMT_OP_SET_POWERED, hdev))
8615 			return;
8616 	}
8617 
8618 	mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
8619 			   HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
8620 	ext_info_changed(hdev, cmd ? cmd->sk : NULL);
8621 }
8622 
8623 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
8624 {
8625 	int i;
8626 
8627 	for (i = 0; i < uuid_count; i++) {
8628 		if (!memcmp(uuid, uuids[i], 16))
8629 			return true;
8630 	}
8631 
8632 	return false;
8633 }
8634 
8635 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
8636 {
8637 	u16 parsed = 0;
8638 
8639 	while (parsed < eir_len) {
8640 		u8 field_len = eir[0];
8641 		u8 uuid[16];
8642 		int i;
8643 
8644 		if (field_len == 0)
8645 			break;
8646 
8647 		if (eir_len - parsed < field_len + 1)
8648 			break;
8649 
8650 		switch (eir[1]) {
8651 		case EIR_UUID16_ALL:
8652 		case EIR_UUID16_SOME:
8653 			for (i = 0; i + 3 <= field_len; i += 2) {
8654 				memcpy(uuid, bluetooth_base_uuid, 16);
8655 				uuid[13] = eir[i + 3];
8656 				uuid[12] = eir[i + 2];
8657 				if (has_uuid(uuid, uuid_count, uuids))
8658 					return true;
8659 			}
8660 			break;
8661 		case EIR_UUID32_ALL:
8662 		case EIR_UUID32_SOME:
8663 			for (i = 0; i + 5 <= field_len; i += 4) {
8664 				memcpy(uuid, bluetooth_base_uuid, 16);
8665 				uuid[15] = eir[i + 5];
8666 				uuid[14] = eir[i + 4];
8667 				uuid[13] = eir[i + 3];
8668 				uuid[12] = eir[i + 2];
8669 				if (has_uuid(uuid, uuid_count, uuids))
8670 					return true;
8671 			}
8672 			break;
8673 		case EIR_UUID128_ALL:
8674 		case EIR_UUID128_SOME:
8675 			for (i = 0; i + 17 <= field_len; i += 16) {
8676 				memcpy(uuid, eir + i + 2, 16);
8677 				if (has_uuid(uuid, uuid_count, uuids))
8678 					return true;
8679 			}
8680 			break;
8681 		}
8682 
8683 		parsed += field_len + 1;
8684 		eir += field_len + 1;
8685 	}
8686 
8687 	return false;
8688 }
8689 
8690 static void restart_le_scan(struct hci_dev *hdev)
8691 {
8692 	/* If controller is not scanning we are done. */
8693 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
8694 		return;
8695 
8696 	if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
8697 		       hdev->discovery.scan_start +
8698 		       hdev->discovery.scan_duration))
8699 		return;
8700 
8701 	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
8702 			   DISCOV_LE_RESTART_DELAY);
8703 }
8704 
8705 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
8706 			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8707 {
8708 	/* If a RSSI threshold has been specified, and
8709 	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
8710 	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
8711 	 * is set, let it through for further processing, as we might need to
8712 	 * restart the scan.
8713 	 *
8714 	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
8715 	 * the results are also dropped.
8716 	 */
8717 	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8718 	    (rssi == HCI_RSSI_INVALID ||
8719 	    (rssi < hdev->discovery.rssi &&
8720 	     !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
8721 		return  false;
8722 
8723 	if (hdev->discovery.uuid_count != 0) {
8724 		/* If a list of UUIDs is provided in filter, results with no
8725 		 * matching UUID should be dropped.
8726 		 */
8727 		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
8728 				   hdev->discovery.uuids) &&
8729 		    !eir_has_uuids(scan_rsp, scan_rsp_len,
8730 				   hdev->discovery.uuid_count,
8731 				   hdev->discovery.uuids))
8732 			return false;
8733 	}
8734 
8735 	/* If duplicate filtering does not report RSSI changes, then restart
8736 	 * scanning to ensure updated result with updated RSSI values.
8737 	 */
8738 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
8739 		restart_le_scan(hdev);
8740 
8741 		/* Validate RSSI value against the RSSI threshold once more. */
8742 		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8743 		    rssi < hdev->discovery.rssi)
8744 			return false;
8745 	}
8746 
8747 	return true;
8748 }
8749 
8750 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8751 		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
8752 		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8753 {
8754 	char buf[512];
8755 	struct mgmt_ev_device_found *ev = (void *)buf;
8756 	size_t ev_size;
8757 
8758 	/* Don't send events for a non-kernel initiated discovery. With
8759 	 * LE one exception is if we have pend_le_reports > 0 in which
8760 	 * case we're doing passive scanning and want these events.
8761 	 */
8762 	if (!hci_discovery_active(hdev)) {
8763 		if (link_type == ACL_LINK)
8764 			return;
8765 		if (link_type == LE_LINK &&
8766 		    list_empty(&hdev->pend_le_reports) &&
8767 		    !hci_is_adv_monitoring(hdev)) {
8768 			return;
8769 		}
8770 	}
8771 
8772 	if (hdev->discovery.result_filtering) {
8773 		/* We are using service discovery */
8774 		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
8775 				     scan_rsp_len))
8776 			return;
8777 	}
8778 
8779 	if (hdev->discovery.limited) {
8780 		/* Check for limited discoverable bit */
8781 		if (dev_class) {
8782 			if (!(dev_class[1] & 0x20))
8783 				return;
8784 		} else {
8785 			u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
8786 			if (!flags || !(flags[0] & LE_AD_LIMITED))
8787 				return;
8788 		}
8789 	}
8790 
8791 	/* Make sure that the buffer is big enough. The 5 extra bytes
8792 	 * are for the potential CoD field.
8793 	 */
8794 	if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
8795 		return;
8796 
8797 	memset(buf, 0, sizeof(buf));
8798 
8799 	/* In case of device discovery with BR/EDR devices (pre 1.2), the
8800 	 * RSSI value was reported as 0 when not available. This behavior
8801 	 * is kept when using device discovery. This is required for full
8802 	 * backwards compatibility with the API.
8803 	 *
8804 	 * However when using service discovery, the value 127 will be
8805 	 * returned when the RSSI is not available.
8806 	 */
8807 	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
8808 	    link_type == ACL_LINK)
8809 		rssi = 0;
8810 
8811 	bacpy(&ev->addr.bdaddr, bdaddr);
8812 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
8813 	ev->rssi = rssi;
8814 	ev->flags = cpu_to_le32(flags);
8815 
8816 	if (eir_len > 0)
8817 		/* Copy EIR or advertising data into event */
8818 		memcpy(ev->eir, eir, eir_len);
8819 
8820 	if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
8821 				       NULL))
8822 		eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
8823 					  dev_class, 3);
8824 
8825 	if (scan_rsp_len > 0)
8826 		/* Append scan response data to event */
8827 		memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
8828 
8829 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
8830 	ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
8831 
8832 	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
8833 }
8834 
8835 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8836 		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
8837 {
8838 	struct mgmt_ev_device_found *ev;
8839 	char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
8840 	u16 eir_len;
8841 
8842 	ev = (struct mgmt_ev_device_found *) buf;
8843 
8844 	memset(buf, 0, sizeof(buf));
8845 
8846 	bacpy(&ev->addr.bdaddr, bdaddr);
8847 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
8848 	ev->rssi = rssi;
8849 
8850 	eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
8851 				  name_len);
8852 
8853 	ev->eir_len = cpu_to_le16(eir_len);
8854 
8855 	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
8856 }
8857 
8858 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
8859 {
8860 	struct mgmt_ev_discovering ev;
8861 
8862 	bt_dev_dbg(hdev, "discovering %u", discovering);
8863 
8864 	memset(&ev, 0, sizeof(ev));
8865 	ev.type = hdev->discovery.type;
8866 	ev.discovering = discovering;
8867 
8868 	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
8869 }
8870 
8871 static struct hci_mgmt_chan chan = {
8872 	.channel	= HCI_CHANNEL_CONTROL,
8873 	.handler_count	= ARRAY_SIZE(mgmt_handlers),
8874 	.handlers	= mgmt_handlers,
8875 	.hdev_init	= mgmt_init_hdev,
8876 };
8877 
8878 int mgmt_init(void)
8879 {
8880 	return hci_mgmt_chan_register(&chan);
8881 }
8882 
8883 void mgmt_exit(void)
8884 {
8885 	hci_mgmt_chan_unregister(&chan);
8886 }
8887