xref: /openbmc/linux/net/bluetooth/mgmt.c (revision 15d8ce05ebec37a0d701cde768bbf21349f2329d)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI Management interface */
26 
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35 
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
40 #include "msft.h"
41 
42 #define MGMT_VERSION	1
43 #define MGMT_REVISION	17
44 
45 static const u16 mgmt_commands[] = {
46 	MGMT_OP_READ_INDEX_LIST,
47 	MGMT_OP_READ_INFO,
48 	MGMT_OP_SET_POWERED,
49 	MGMT_OP_SET_DISCOVERABLE,
50 	MGMT_OP_SET_CONNECTABLE,
51 	MGMT_OP_SET_FAST_CONNECTABLE,
52 	MGMT_OP_SET_BONDABLE,
53 	MGMT_OP_SET_LINK_SECURITY,
54 	MGMT_OP_SET_SSP,
55 	MGMT_OP_SET_HS,
56 	MGMT_OP_SET_LE,
57 	MGMT_OP_SET_DEV_CLASS,
58 	MGMT_OP_SET_LOCAL_NAME,
59 	MGMT_OP_ADD_UUID,
60 	MGMT_OP_REMOVE_UUID,
61 	MGMT_OP_LOAD_LINK_KEYS,
62 	MGMT_OP_LOAD_LONG_TERM_KEYS,
63 	MGMT_OP_DISCONNECT,
64 	MGMT_OP_GET_CONNECTIONS,
65 	MGMT_OP_PIN_CODE_REPLY,
66 	MGMT_OP_PIN_CODE_NEG_REPLY,
67 	MGMT_OP_SET_IO_CAPABILITY,
68 	MGMT_OP_PAIR_DEVICE,
69 	MGMT_OP_CANCEL_PAIR_DEVICE,
70 	MGMT_OP_UNPAIR_DEVICE,
71 	MGMT_OP_USER_CONFIRM_REPLY,
72 	MGMT_OP_USER_CONFIRM_NEG_REPLY,
73 	MGMT_OP_USER_PASSKEY_REPLY,
74 	MGMT_OP_USER_PASSKEY_NEG_REPLY,
75 	MGMT_OP_READ_LOCAL_OOB_DATA,
76 	MGMT_OP_ADD_REMOTE_OOB_DATA,
77 	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
78 	MGMT_OP_START_DISCOVERY,
79 	MGMT_OP_STOP_DISCOVERY,
80 	MGMT_OP_CONFIRM_NAME,
81 	MGMT_OP_BLOCK_DEVICE,
82 	MGMT_OP_UNBLOCK_DEVICE,
83 	MGMT_OP_SET_DEVICE_ID,
84 	MGMT_OP_SET_ADVERTISING,
85 	MGMT_OP_SET_BREDR,
86 	MGMT_OP_SET_STATIC_ADDRESS,
87 	MGMT_OP_SET_SCAN_PARAMS,
88 	MGMT_OP_SET_SECURE_CONN,
89 	MGMT_OP_SET_DEBUG_KEYS,
90 	MGMT_OP_SET_PRIVACY,
91 	MGMT_OP_LOAD_IRKS,
92 	MGMT_OP_GET_CONN_INFO,
93 	MGMT_OP_GET_CLOCK_INFO,
94 	MGMT_OP_ADD_DEVICE,
95 	MGMT_OP_REMOVE_DEVICE,
96 	MGMT_OP_LOAD_CONN_PARAM,
97 	MGMT_OP_READ_UNCONF_INDEX_LIST,
98 	MGMT_OP_READ_CONFIG_INFO,
99 	MGMT_OP_SET_EXTERNAL_CONFIG,
100 	MGMT_OP_SET_PUBLIC_ADDRESS,
101 	MGMT_OP_START_SERVICE_DISCOVERY,
102 	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
103 	MGMT_OP_READ_EXT_INDEX_LIST,
104 	MGMT_OP_READ_ADV_FEATURES,
105 	MGMT_OP_ADD_ADVERTISING,
106 	MGMT_OP_REMOVE_ADVERTISING,
107 	MGMT_OP_GET_ADV_SIZE_INFO,
108 	MGMT_OP_START_LIMITED_DISCOVERY,
109 	MGMT_OP_READ_EXT_INFO,
110 	MGMT_OP_SET_APPEARANCE,
111 	MGMT_OP_SET_BLOCKED_KEYS,
112 	MGMT_OP_SET_WIDEBAND_SPEECH,
113 	MGMT_OP_READ_SECURITY_INFO,
114 	MGMT_OP_READ_EXP_FEATURES_INFO,
115 	MGMT_OP_SET_EXP_FEATURE,
116 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
117 	MGMT_OP_SET_DEF_SYSTEM_CONFIG,
118 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
119 	MGMT_OP_SET_DEF_RUNTIME_CONFIG,
120 	MGMT_OP_GET_DEVICE_FLAGS,
121 	MGMT_OP_SET_DEVICE_FLAGS,
122 	MGMT_OP_READ_ADV_MONITOR_FEATURES,
123 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
124 	MGMT_OP_REMOVE_ADV_MONITOR,
125 };
126 
127 static const u16 mgmt_events[] = {
128 	MGMT_EV_CONTROLLER_ERROR,
129 	MGMT_EV_INDEX_ADDED,
130 	MGMT_EV_INDEX_REMOVED,
131 	MGMT_EV_NEW_SETTINGS,
132 	MGMT_EV_CLASS_OF_DEV_CHANGED,
133 	MGMT_EV_LOCAL_NAME_CHANGED,
134 	MGMT_EV_NEW_LINK_KEY,
135 	MGMT_EV_NEW_LONG_TERM_KEY,
136 	MGMT_EV_DEVICE_CONNECTED,
137 	MGMT_EV_DEVICE_DISCONNECTED,
138 	MGMT_EV_CONNECT_FAILED,
139 	MGMT_EV_PIN_CODE_REQUEST,
140 	MGMT_EV_USER_CONFIRM_REQUEST,
141 	MGMT_EV_USER_PASSKEY_REQUEST,
142 	MGMT_EV_AUTH_FAILED,
143 	MGMT_EV_DEVICE_FOUND,
144 	MGMT_EV_DISCOVERING,
145 	MGMT_EV_DEVICE_BLOCKED,
146 	MGMT_EV_DEVICE_UNBLOCKED,
147 	MGMT_EV_DEVICE_UNPAIRED,
148 	MGMT_EV_PASSKEY_NOTIFY,
149 	MGMT_EV_NEW_IRK,
150 	MGMT_EV_NEW_CSRK,
151 	MGMT_EV_DEVICE_ADDED,
152 	MGMT_EV_DEVICE_REMOVED,
153 	MGMT_EV_NEW_CONN_PARAM,
154 	MGMT_EV_UNCONF_INDEX_ADDED,
155 	MGMT_EV_UNCONF_INDEX_REMOVED,
156 	MGMT_EV_NEW_CONFIG_OPTIONS,
157 	MGMT_EV_EXT_INDEX_ADDED,
158 	MGMT_EV_EXT_INDEX_REMOVED,
159 	MGMT_EV_LOCAL_OOB_DATA_UPDATED,
160 	MGMT_EV_ADVERTISING_ADDED,
161 	MGMT_EV_ADVERTISING_REMOVED,
162 	MGMT_EV_EXT_INFO_CHANGED,
163 	MGMT_EV_PHY_CONFIGURATION_CHANGED,
164 	MGMT_EV_EXP_FEATURE_CHANGED,
165 	MGMT_EV_DEVICE_FLAGS_CHANGED,
166 };
167 
168 static const u16 mgmt_untrusted_commands[] = {
169 	MGMT_OP_READ_INDEX_LIST,
170 	MGMT_OP_READ_INFO,
171 	MGMT_OP_READ_UNCONF_INDEX_LIST,
172 	MGMT_OP_READ_CONFIG_INFO,
173 	MGMT_OP_READ_EXT_INDEX_LIST,
174 	MGMT_OP_READ_EXT_INFO,
175 	MGMT_OP_READ_SECURITY_INFO,
176 	MGMT_OP_READ_EXP_FEATURES_INFO,
177 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
178 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
179 };
180 
181 static const u16 mgmt_untrusted_events[] = {
182 	MGMT_EV_INDEX_ADDED,
183 	MGMT_EV_INDEX_REMOVED,
184 	MGMT_EV_NEW_SETTINGS,
185 	MGMT_EV_CLASS_OF_DEV_CHANGED,
186 	MGMT_EV_LOCAL_NAME_CHANGED,
187 	MGMT_EV_UNCONF_INDEX_ADDED,
188 	MGMT_EV_UNCONF_INDEX_REMOVED,
189 	MGMT_EV_NEW_CONFIG_OPTIONS,
190 	MGMT_EV_EXT_INDEX_ADDED,
191 	MGMT_EV_EXT_INDEX_REMOVED,
192 	MGMT_EV_EXT_INFO_CHANGED,
193 	MGMT_EV_EXP_FEATURE_CHANGED,
194 	MGMT_EV_ADV_MONITOR_ADDED,
195 	MGMT_EV_ADV_MONITOR_REMOVED,
196 };
197 
198 #define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
199 
200 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
201 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
202 
203 /* HCI to MGMT error code conversion table */
204 static const u8 mgmt_status_table[] = {
205 	MGMT_STATUS_SUCCESS,
206 	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
207 	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
208 	MGMT_STATUS_FAILED,		/* Hardware Failure */
209 	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
210 	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
211 	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
212 	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
213 	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
214 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
215 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
216 	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
217 	MGMT_STATUS_BUSY,		/* Command Disallowed */
218 	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
219 	MGMT_STATUS_REJECTED,		/* Rejected Security */
220 	MGMT_STATUS_REJECTED,		/* Rejected Personal */
221 	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
222 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
223 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
224 	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
225 	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
226 	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
227 	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
228 	MGMT_STATUS_BUSY,		/* Repeated Attempts */
229 	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
230 	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
231 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
232 	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
233 	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
234 	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
235 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
236 	MGMT_STATUS_FAILED,		/* Unspecified Error */
237 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
238 	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
239 	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
240 	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
241 	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
242 	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
243 	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
244 	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
245 	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
246 	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
247 	MGMT_STATUS_FAILED,		/* Transaction Collision */
248 	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
249 	MGMT_STATUS_REJECTED,		/* QoS Rejected */
250 	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
251 	MGMT_STATUS_REJECTED,		/* Insufficient Security */
252 	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
253 	MGMT_STATUS_BUSY,		/* Role Switch Pending */
254 	MGMT_STATUS_FAILED,		/* Slot Violation */
255 	MGMT_STATUS_FAILED,		/* Role Switch Failed */
256 	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
257 	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
258 	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
259 	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
260 	MGMT_STATUS_BUSY,		/* Controller Busy */
261 	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
262 	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
263 	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
264 	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
265 	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
266 };
267 
268 static u8 mgmt_status(u8 hci_status)
269 {
270 	if (hci_status < ARRAY_SIZE(mgmt_status_table))
271 		return mgmt_status_table[hci_status];
272 
273 	return MGMT_STATUS_FAILED;
274 }
275 
276 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
277 			    u16 len, int flag)
278 {
279 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
280 			       flag, NULL);
281 }
282 
283 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
284 			      u16 len, int flag, struct sock *skip_sk)
285 {
286 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
287 			       flag, skip_sk);
288 }
289 
290 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
291 		      struct sock *skip_sk)
292 {
293 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
294 			       HCI_SOCK_TRUSTED, skip_sk);
295 }
296 
297 static u8 le_addr_type(u8 mgmt_addr_type)
298 {
299 	if (mgmt_addr_type == BDADDR_LE_PUBLIC)
300 		return ADDR_LE_DEV_PUBLIC;
301 	else
302 		return ADDR_LE_DEV_RANDOM;
303 }
304 
305 void mgmt_fill_version_info(void *ver)
306 {
307 	struct mgmt_rp_read_version *rp = ver;
308 
309 	rp->version = MGMT_VERSION;
310 	rp->revision = cpu_to_le16(MGMT_REVISION);
311 }
312 
313 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
314 			u16 data_len)
315 {
316 	struct mgmt_rp_read_version rp;
317 
318 	bt_dev_dbg(hdev, "sock %p", sk);
319 
320 	mgmt_fill_version_info(&rp);
321 
322 	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
323 				 &rp, sizeof(rp));
324 }
325 
326 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
327 			 u16 data_len)
328 {
329 	struct mgmt_rp_read_commands *rp;
330 	u16 num_commands, num_events;
331 	size_t rp_size;
332 	int i, err;
333 
334 	bt_dev_dbg(hdev, "sock %p", sk);
335 
336 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
337 		num_commands = ARRAY_SIZE(mgmt_commands);
338 		num_events = ARRAY_SIZE(mgmt_events);
339 	} else {
340 		num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
341 		num_events = ARRAY_SIZE(mgmt_untrusted_events);
342 	}
343 
344 	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
345 
346 	rp = kmalloc(rp_size, GFP_KERNEL);
347 	if (!rp)
348 		return -ENOMEM;
349 
350 	rp->num_commands = cpu_to_le16(num_commands);
351 	rp->num_events = cpu_to_le16(num_events);
352 
353 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
354 		__le16 *opcode = rp->opcodes;
355 
356 		for (i = 0; i < num_commands; i++, opcode++)
357 			put_unaligned_le16(mgmt_commands[i], opcode);
358 
359 		for (i = 0; i < num_events; i++, opcode++)
360 			put_unaligned_le16(mgmt_events[i], opcode);
361 	} else {
362 		__le16 *opcode = rp->opcodes;
363 
364 		for (i = 0; i < num_commands; i++, opcode++)
365 			put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
366 
367 		for (i = 0; i < num_events; i++, opcode++)
368 			put_unaligned_le16(mgmt_untrusted_events[i], opcode);
369 	}
370 
371 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
372 				rp, rp_size);
373 	kfree(rp);
374 
375 	return err;
376 }
377 
378 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
379 			   u16 data_len)
380 {
381 	struct mgmt_rp_read_index_list *rp;
382 	struct hci_dev *d;
383 	size_t rp_len;
384 	u16 count;
385 	int err;
386 
387 	bt_dev_dbg(hdev, "sock %p", sk);
388 
389 	read_lock(&hci_dev_list_lock);
390 
391 	count = 0;
392 	list_for_each_entry(d, &hci_dev_list, list) {
393 		if (d->dev_type == HCI_PRIMARY &&
394 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED))
395 			count++;
396 	}
397 
398 	rp_len = sizeof(*rp) + (2 * count);
399 	rp = kmalloc(rp_len, GFP_ATOMIC);
400 	if (!rp) {
401 		read_unlock(&hci_dev_list_lock);
402 		return -ENOMEM;
403 	}
404 
405 	count = 0;
406 	list_for_each_entry(d, &hci_dev_list, list) {
407 		if (hci_dev_test_flag(d, HCI_SETUP) ||
408 		    hci_dev_test_flag(d, HCI_CONFIG) ||
409 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
410 			continue;
411 
412 		/* Devices marked as raw-only are neither configured
413 		 * nor unconfigured controllers.
414 		 */
415 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
416 			continue;
417 
418 		if (d->dev_type == HCI_PRIMARY &&
419 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
420 			rp->index[count++] = cpu_to_le16(d->id);
421 			bt_dev_dbg(hdev, "Added hci%u", d->id);
422 		}
423 	}
424 
425 	rp->num_controllers = cpu_to_le16(count);
426 	rp_len = sizeof(*rp) + (2 * count);
427 
428 	read_unlock(&hci_dev_list_lock);
429 
430 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
431 				0, rp, rp_len);
432 
433 	kfree(rp);
434 
435 	return err;
436 }
437 
438 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
439 				  void *data, u16 data_len)
440 {
441 	struct mgmt_rp_read_unconf_index_list *rp;
442 	struct hci_dev *d;
443 	size_t rp_len;
444 	u16 count;
445 	int err;
446 
447 	bt_dev_dbg(hdev, "sock %p", sk);
448 
449 	read_lock(&hci_dev_list_lock);
450 
451 	count = 0;
452 	list_for_each_entry(d, &hci_dev_list, list) {
453 		if (d->dev_type == HCI_PRIMARY &&
454 		    hci_dev_test_flag(d, HCI_UNCONFIGURED))
455 			count++;
456 	}
457 
458 	rp_len = sizeof(*rp) + (2 * count);
459 	rp = kmalloc(rp_len, GFP_ATOMIC);
460 	if (!rp) {
461 		read_unlock(&hci_dev_list_lock);
462 		return -ENOMEM;
463 	}
464 
465 	count = 0;
466 	list_for_each_entry(d, &hci_dev_list, list) {
467 		if (hci_dev_test_flag(d, HCI_SETUP) ||
468 		    hci_dev_test_flag(d, HCI_CONFIG) ||
469 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
470 			continue;
471 
472 		/* Devices marked as raw-only are neither configured
473 		 * nor unconfigured controllers.
474 		 */
475 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
476 			continue;
477 
478 		if (d->dev_type == HCI_PRIMARY &&
479 		    hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
480 			rp->index[count++] = cpu_to_le16(d->id);
481 			bt_dev_dbg(hdev, "Added hci%u", d->id);
482 		}
483 	}
484 
485 	rp->num_controllers = cpu_to_le16(count);
486 	rp_len = sizeof(*rp) + (2 * count);
487 
488 	read_unlock(&hci_dev_list_lock);
489 
490 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
491 				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
492 
493 	kfree(rp);
494 
495 	return err;
496 }
497 
498 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
499 			       void *data, u16 data_len)
500 {
501 	struct mgmt_rp_read_ext_index_list *rp;
502 	struct hci_dev *d;
503 	u16 count;
504 	int err;
505 
506 	bt_dev_dbg(hdev, "sock %p", sk);
507 
508 	read_lock(&hci_dev_list_lock);
509 
510 	count = 0;
511 	list_for_each_entry(d, &hci_dev_list, list) {
512 		if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
513 			count++;
514 	}
515 
516 	rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
517 	if (!rp) {
518 		read_unlock(&hci_dev_list_lock);
519 		return -ENOMEM;
520 	}
521 
522 	count = 0;
523 	list_for_each_entry(d, &hci_dev_list, list) {
524 		if (hci_dev_test_flag(d, HCI_SETUP) ||
525 		    hci_dev_test_flag(d, HCI_CONFIG) ||
526 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
527 			continue;
528 
529 		/* Devices marked as raw-only are neither configured
530 		 * nor unconfigured controllers.
531 		 */
532 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
533 			continue;
534 
535 		if (d->dev_type == HCI_PRIMARY) {
536 			if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
537 				rp->entry[count].type = 0x01;
538 			else
539 				rp->entry[count].type = 0x00;
540 		} else if (d->dev_type == HCI_AMP) {
541 			rp->entry[count].type = 0x02;
542 		} else {
543 			continue;
544 		}
545 
546 		rp->entry[count].bus = d->bus;
547 		rp->entry[count++].index = cpu_to_le16(d->id);
548 		bt_dev_dbg(hdev, "Added hci%u", d->id);
549 	}
550 
551 	rp->num_controllers = cpu_to_le16(count);
552 
553 	read_unlock(&hci_dev_list_lock);
554 
555 	/* If this command is called at least once, then all the
556 	 * default index and unconfigured index events are disabled
557 	 * and from now on only extended index events are used.
558 	 */
559 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
560 	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
561 	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
562 
563 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
564 				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
565 				struct_size(rp, entry, count));
566 
567 	kfree(rp);
568 
569 	return err;
570 }
571 
572 static bool is_configured(struct hci_dev *hdev)
573 {
574 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
575 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
576 		return false;
577 
578 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
579 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
580 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
581 		return false;
582 
583 	return true;
584 }
585 
586 static __le32 get_missing_options(struct hci_dev *hdev)
587 {
588 	u32 options = 0;
589 
590 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
591 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
592 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
593 
594 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
595 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
596 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
597 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
598 
599 	return cpu_to_le32(options);
600 }
601 
602 static int new_options(struct hci_dev *hdev, struct sock *skip)
603 {
604 	__le32 options = get_missing_options(hdev);
605 
606 	return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
607 				  sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
608 }
609 
610 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
611 {
612 	__le32 options = get_missing_options(hdev);
613 
614 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
615 				 sizeof(options));
616 }
617 
618 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
619 			    void *data, u16 data_len)
620 {
621 	struct mgmt_rp_read_config_info rp;
622 	u32 options = 0;
623 
624 	bt_dev_dbg(hdev, "sock %p", sk);
625 
626 	hci_dev_lock(hdev);
627 
628 	memset(&rp, 0, sizeof(rp));
629 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
630 
631 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
632 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
633 
634 	if (hdev->set_bdaddr)
635 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
636 
637 	rp.supported_options = cpu_to_le32(options);
638 	rp.missing_options = get_missing_options(hdev);
639 
640 	hci_dev_unlock(hdev);
641 
642 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
643 				 &rp, sizeof(rp));
644 }
645 
646 static u32 get_supported_phys(struct hci_dev *hdev)
647 {
648 	u32 supported_phys = 0;
649 
650 	if (lmp_bredr_capable(hdev)) {
651 		supported_phys |= MGMT_PHY_BR_1M_1SLOT;
652 
653 		if (hdev->features[0][0] & LMP_3SLOT)
654 			supported_phys |= MGMT_PHY_BR_1M_3SLOT;
655 
656 		if (hdev->features[0][0] & LMP_5SLOT)
657 			supported_phys |= MGMT_PHY_BR_1M_5SLOT;
658 
659 		if (lmp_edr_2m_capable(hdev)) {
660 			supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
661 
662 			if (lmp_edr_3slot_capable(hdev))
663 				supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
664 
665 			if (lmp_edr_5slot_capable(hdev))
666 				supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
667 
668 			if (lmp_edr_3m_capable(hdev)) {
669 				supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
670 
671 				if (lmp_edr_3slot_capable(hdev))
672 					supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
673 
674 				if (lmp_edr_5slot_capable(hdev))
675 					supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
676 			}
677 		}
678 	}
679 
680 	if (lmp_le_capable(hdev)) {
681 		supported_phys |= MGMT_PHY_LE_1M_TX;
682 		supported_phys |= MGMT_PHY_LE_1M_RX;
683 
684 		if (hdev->le_features[1] & HCI_LE_PHY_2M) {
685 			supported_phys |= MGMT_PHY_LE_2M_TX;
686 			supported_phys |= MGMT_PHY_LE_2M_RX;
687 		}
688 
689 		if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
690 			supported_phys |= MGMT_PHY_LE_CODED_TX;
691 			supported_phys |= MGMT_PHY_LE_CODED_RX;
692 		}
693 	}
694 
695 	return supported_phys;
696 }
697 
698 static u32 get_selected_phys(struct hci_dev *hdev)
699 {
700 	u32 selected_phys = 0;
701 
702 	if (lmp_bredr_capable(hdev)) {
703 		selected_phys |= MGMT_PHY_BR_1M_1SLOT;
704 
705 		if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
706 			selected_phys |= MGMT_PHY_BR_1M_3SLOT;
707 
708 		if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
709 			selected_phys |= MGMT_PHY_BR_1M_5SLOT;
710 
711 		if (lmp_edr_2m_capable(hdev)) {
712 			if (!(hdev->pkt_type & HCI_2DH1))
713 				selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
714 
715 			if (lmp_edr_3slot_capable(hdev) &&
716 			    !(hdev->pkt_type & HCI_2DH3))
717 				selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
718 
719 			if (lmp_edr_5slot_capable(hdev) &&
720 			    !(hdev->pkt_type & HCI_2DH5))
721 				selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
722 
723 			if (lmp_edr_3m_capable(hdev)) {
724 				if (!(hdev->pkt_type & HCI_3DH1))
725 					selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
726 
727 				if (lmp_edr_3slot_capable(hdev) &&
728 				    !(hdev->pkt_type & HCI_3DH3))
729 					selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
730 
731 				if (lmp_edr_5slot_capable(hdev) &&
732 				    !(hdev->pkt_type & HCI_3DH5))
733 					selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
734 			}
735 		}
736 	}
737 
738 	if (lmp_le_capable(hdev)) {
739 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
740 			selected_phys |= MGMT_PHY_LE_1M_TX;
741 
742 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
743 			selected_phys |= MGMT_PHY_LE_1M_RX;
744 
745 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
746 			selected_phys |= MGMT_PHY_LE_2M_TX;
747 
748 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
749 			selected_phys |= MGMT_PHY_LE_2M_RX;
750 
751 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
752 			selected_phys |= MGMT_PHY_LE_CODED_TX;
753 
754 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
755 			selected_phys |= MGMT_PHY_LE_CODED_RX;
756 	}
757 
758 	return selected_phys;
759 }
760 
761 static u32 get_configurable_phys(struct hci_dev *hdev)
762 {
763 	return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
764 		~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
765 }
766 
767 static u32 get_supported_settings(struct hci_dev *hdev)
768 {
769 	u32 settings = 0;
770 
771 	settings |= MGMT_SETTING_POWERED;
772 	settings |= MGMT_SETTING_BONDABLE;
773 	settings |= MGMT_SETTING_DEBUG_KEYS;
774 	settings |= MGMT_SETTING_CONNECTABLE;
775 	settings |= MGMT_SETTING_DISCOVERABLE;
776 
777 	if (lmp_bredr_capable(hdev)) {
778 		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
779 			settings |= MGMT_SETTING_FAST_CONNECTABLE;
780 		settings |= MGMT_SETTING_BREDR;
781 		settings |= MGMT_SETTING_LINK_SECURITY;
782 
783 		if (lmp_ssp_capable(hdev)) {
784 			settings |= MGMT_SETTING_SSP;
785 			settings |= MGMT_SETTING_HS;
786 		}
787 
788 		if (lmp_sc_capable(hdev))
789 			settings |= MGMT_SETTING_SECURE_CONN;
790 
791 		if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
792 			     &hdev->quirks))
793 			settings |= MGMT_SETTING_WIDEBAND_SPEECH;
794 	}
795 
796 	if (lmp_le_capable(hdev)) {
797 		settings |= MGMT_SETTING_LE;
798 		settings |= MGMT_SETTING_ADVERTISING;
799 		settings |= MGMT_SETTING_SECURE_CONN;
800 		settings |= MGMT_SETTING_PRIVACY;
801 		settings |= MGMT_SETTING_STATIC_ADDRESS;
802 	}
803 
804 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
805 	    hdev->set_bdaddr)
806 		settings |= MGMT_SETTING_CONFIGURATION;
807 
808 	settings |= MGMT_SETTING_PHY_CONFIGURATION;
809 
810 	return settings;
811 }
812 
813 static u32 get_current_settings(struct hci_dev *hdev)
814 {
815 	u32 settings = 0;
816 
817 	if (hdev_is_powered(hdev))
818 		settings |= MGMT_SETTING_POWERED;
819 
820 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
821 		settings |= MGMT_SETTING_CONNECTABLE;
822 
823 	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
824 		settings |= MGMT_SETTING_FAST_CONNECTABLE;
825 
826 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
827 		settings |= MGMT_SETTING_DISCOVERABLE;
828 
829 	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
830 		settings |= MGMT_SETTING_BONDABLE;
831 
832 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
833 		settings |= MGMT_SETTING_BREDR;
834 
835 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
836 		settings |= MGMT_SETTING_LE;
837 
838 	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
839 		settings |= MGMT_SETTING_LINK_SECURITY;
840 
841 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
842 		settings |= MGMT_SETTING_SSP;
843 
844 	if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
845 		settings |= MGMT_SETTING_HS;
846 
847 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
848 		settings |= MGMT_SETTING_ADVERTISING;
849 
850 	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
851 		settings |= MGMT_SETTING_SECURE_CONN;
852 
853 	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
854 		settings |= MGMT_SETTING_DEBUG_KEYS;
855 
856 	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
857 		settings |= MGMT_SETTING_PRIVACY;
858 
859 	/* The current setting for static address has two purposes. The
860 	 * first is to indicate if the static address will be used and
861 	 * the second is to indicate if it is actually set.
862 	 *
863 	 * This means if the static address is not configured, this flag
864 	 * will never be set. If the address is configured, then if the
865 	 * address is actually used decides if the flag is set or not.
866 	 *
867 	 * For single mode LE only controllers and dual-mode controllers
868 	 * with BR/EDR disabled, the existence of the static address will
869 	 * be evaluated.
870 	 */
871 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
872 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
873 	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
874 		if (bacmp(&hdev->static_addr, BDADDR_ANY))
875 			settings |= MGMT_SETTING_STATIC_ADDRESS;
876 	}
877 
878 	if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
879 		settings |= MGMT_SETTING_WIDEBAND_SPEECH;
880 
881 	return settings;
882 }
883 
884 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
885 {
886 	return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
887 }
888 
889 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
890 						  struct hci_dev *hdev,
891 						  const void *data)
892 {
893 	return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
894 }
895 
896 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
897 {
898 	struct mgmt_pending_cmd *cmd;
899 
900 	/* If there's a pending mgmt command the flags will not yet have
901 	 * their final values, so check for this first.
902 	 */
903 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
904 	if (cmd) {
905 		struct mgmt_mode *cp = cmd->param;
906 		if (cp->val == 0x01)
907 			return LE_AD_GENERAL;
908 		else if (cp->val == 0x02)
909 			return LE_AD_LIMITED;
910 	} else {
911 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
912 			return LE_AD_LIMITED;
913 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
914 			return LE_AD_GENERAL;
915 	}
916 
917 	return 0;
918 }
919 
920 bool mgmt_get_connectable(struct hci_dev *hdev)
921 {
922 	struct mgmt_pending_cmd *cmd;
923 
924 	/* If there's a pending mgmt command the flag will not yet have
925 	 * it's final value, so check for this first.
926 	 */
927 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
928 	if (cmd) {
929 		struct mgmt_mode *cp = cmd->param;
930 
931 		return cp->val;
932 	}
933 
934 	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
935 }
936 
937 static void service_cache_off(struct work_struct *work)
938 {
939 	struct hci_dev *hdev = container_of(work, struct hci_dev,
940 					    service_cache.work);
941 	struct hci_request req;
942 
943 	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
944 		return;
945 
946 	hci_req_init(&req, hdev);
947 
948 	hci_dev_lock(hdev);
949 
950 	__hci_req_update_eir(&req);
951 	__hci_req_update_class(&req);
952 
953 	hci_dev_unlock(hdev);
954 
955 	hci_req_run(&req, NULL);
956 }
957 
958 static void rpa_expired(struct work_struct *work)
959 {
960 	struct hci_dev *hdev = container_of(work, struct hci_dev,
961 					    rpa_expired.work);
962 	struct hci_request req;
963 
964 	bt_dev_dbg(hdev, "");
965 
966 	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
967 
968 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
969 		return;
970 
971 	/* The generation of a new RPA and programming it into the
972 	 * controller happens in the hci_req_enable_advertising()
973 	 * function.
974 	 */
975 	hci_req_init(&req, hdev);
976 	if (ext_adv_capable(hdev))
977 		__hci_req_start_ext_adv(&req, hdev->cur_adv_instance);
978 	else
979 		__hci_req_enable_advertising(&req);
980 	hci_req_run(&req, NULL);
981 }
982 
983 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
984 {
985 	if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
986 		return;
987 
988 	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
989 	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
990 
991 	/* Non-mgmt controlled devices get this bit set
992 	 * implicitly so that pairing works for them, however
993 	 * for mgmt we require user-space to explicitly enable
994 	 * it
995 	 */
996 	hci_dev_clear_flag(hdev, HCI_BONDABLE);
997 }
998 
999 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1000 				void *data, u16 data_len)
1001 {
1002 	struct mgmt_rp_read_info rp;
1003 
1004 	bt_dev_dbg(hdev, "sock %p", sk);
1005 
1006 	hci_dev_lock(hdev);
1007 
1008 	memset(&rp, 0, sizeof(rp));
1009 
1010 	bacpy(&rp.bdaddr, &hdev->bdaddr);
1011 
1012 	rp.version = hdev->hci_ver;
1013 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1014 
1015 	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1016 	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1017 
1018 	memcpy(rp.dev_class, hdev->dev_class, 3);
1019 
1020 	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1021 	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1022 
1023 	hci_dev_unlock(hdev);
1024 
1025 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1026 				 sizeof(rp));
1027 }
1028 
1029 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1030 {
1031 	u16 eir_len = 0;
1032 	size_t name_len;
1033 
1034 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1035 		eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1036 					  hdev->dev_class, 3);
1037 
1038 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1039 		eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1040 					  hdev->appearance);
1041 
1042 	name_len = strlen(hdev->dev_name);
1043 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1044 				  hdev->dev_name, name_len);
1045 
1046 	name_len = strlen(hdev->short_name);
1047 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1048 				  hdev->short_name, name_len);
1049 
1050 	return eir_len;
1051 }
1052 
1053 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1054 				    void *data, u16 data_len)
1055 {
1056 	char buf[512];
1057 	struct mgmt_rp_read_ext_info *rp = (void *)buf;
1058 	u16 eir_len;
1059 
1060 	bt_dev_dbg(hdev, "sock %p", sk);
1061 
1062 	memset(&buf, 0, sizeof(buf));
1063 
1064 	hci_dev_lock(hdev);
1065 
1066 	bacpy(&rp->bdaddr, &hdev->bdaddr);
1067 
1068 	rp->version = hdev->hci_ver;
1069 	rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1070 
1071 	rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1072 	rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1073 
1074 
1075 	eir_len = append_eir_data_to_buf(hdev, rp->eir);
1076 	rp->eir_len = cpu_to_le16(eir_len);
1077 
1078 	hci_dev_unlock(hdev);
1079 
1080 	/* If this command is called at least once, then the events
1081 	 * for class of device and local name changes are disabled
1082 	 * and only the new extended controller information event
1083 	 * is used.
1084 	 */
1085 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1086 	hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1087 	hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1088 
1089 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1090 				 sizeof(*rp) + eir_len);
1091 }
1092 
1093 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1094 {
1095 	char buf[512];
1096 	struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1097 	u16 eir_len;
1098 
1099 	memset(buf, 0, sizeof(buf));
1100 
1101 	eir_len = append_eir_data_to_buf(hdev, ev->eir);
1102 	ev->eir_len = cpu_to_le16(eir_len);
1103 
1104 	return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1105 				  sizeof(*ev) + eir_len,
1106 				  HCI_MGMT_EXT_INFO_EVENTS, skip);
1107 }
1108 
1109 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1110 {
1111 	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1112 
1113 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1114 				 sizeof(settings));
1115 }
1116 
1117 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1118 {
1119 	bt_dev_dbg(hdev, "status 0x%02x", status);
1120 
1121 	if (hci_conn_count(hdev) == 0) {
1122 		cancel_delayed_work(&hdev->power_off);
1123 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
1124 	}
1125 }
1126 
1127 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1128 {
1129 	struct mgmt_ev_advertising_added ev;
1130 
1131 	ev.instance = instance;
1132 
1133 	mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1134 }
1135 
1136 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1137 			      u8 instance)
1138 {
1139 	struct mgmt_ev_advertising_removed ev;
1140 
1141 	ev.instance = instance;
1142 
1143 	mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1144 }
1145 
1146 static void cancel_adv_timeout(struct hci_dev *hdev)
1147 {
1148 	if (hdev->adv_instance_timeout) {
1149 		hdev->adv_instance_timeout = 0;
1150 		cancel_delayed_work(&hdev->adv_instance_expire);
1151 	}
1152 }
1153 
1154 static int clean_up_hci_state(struct hci_dev *hdev)
1155 {
1156 	struct hci_request req;
1157 	struct hci_conn *conn;
1158 	bool discov_stopped;
1159 	int err;
1160 
1161 	hci_req_init(&req, hdev);
1162 
1163 	if (test_bit(HCI_ISCAN, &hdev->flags) ||
1164 	    test_bit(HCI_PSCAN, &hdev->flags)) {
1165 		u8 scan = 0x00;
1166 		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1167 	}
1168 
1169 	hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
1170 
1171 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1172 		__hci_req_disable_advertising(&req);
1173 
1174 	discov_stopped = hci_req_stop_discovery(&req);
1175 
1176 	list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1177 		/* 0x15 == Terminated due to Power Off */
1178 		__hci_abort_conn(&req, conn, 0x15);
1179 	}
1180 
1181 	err = hci_req_run(&req, clean_up_hci_complete);
1182 	if (!err && discov_stopped)
1183 		hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1184 
1185 	return err;
1186 }
1187 
1188 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1189 		       u16 len)
1190 {
1191 	struct mgmt_mode *cp = data;
1192 	struct mgmt_pending_cmd *cmd;
1193 	int err;
1194 
1195 	bt_dev_dbg(hdev, "sock %p", sk);
1196 
1197 	if (cp->val != 0x00 && cp->val != 0x01)
1198 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1199 				       MGMT_STATUS_INVALID_PARAMS);
1200 
1201 	hci_dev_lock(hdev);
1202 
1203 	if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1204 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1205 				      MGMT_STATUS_BUSY);
1206 		goto failed;
1207 	}
1208 
1209 	if (!!cp->val == hdev_is_powered(hdev)) {
1210 		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1211 		goto failed;
1212 	}
1213 
1214 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1215 	if (!cmd) {
1216 		err = -ENOMEM;
1217 		goto failed;
1218 	}
1219 
1220 	if (cp->val) {
1221 		queue_work(hdev->req_workqueue, &hdev->power_on);
1222 		err = 0;
1223 	} else {
1224 		/* Disconnect connections, stop scans, etc */
1225 		err = clean_up_hci_state(hdev);
1226 		if (!err)
1227 			queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1228 					   HCI_POWER_OFF_TIMEOUT);
1229 
1230 		/* ENODATA means there were no HCI commands queued */
1231 		if (err == -ENODATA) {
1232 			cancel_delayed_work(&hdev->power_off);
1233 			queue_work(hdev->req_workqueue, &hdev->power_off.work);
1234 			err = 0;
1235 		}
1236 	}
1237 
1238 failed:
1239 	hci_dev_unlock(hdev);
1240 	return err;
1241 }
1242 
1243 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1244 {
1245 	__le32 ev = cpu_to_le32(get_current_settings(hdev));
1246 
1247 	return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1248 				  sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1249 }
1250 
1251 int mgmt_new_settings(struct hci_dev *hdev)
1252 {
1253 	return new_settings(hdev, NULL);
1254 }
1255 
1256 struct cmd_lookup {
1257 	struct sock *sk;
1258 	struct hci_dev *hdev;
1259 	u8 mgmt_status;
1260 };
1261 
1262 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1263 {
1264 	struct cmd_lookup *match = data;
1265 
1266 	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1267 
1268 	list_del(&cmd->list);
1269 
1270 	if (match->sk == NULL) {
1271 		match->sk = cmd->sk;
1272 		sock_hold(match->sk);
1273 	}
1274 
1275 	mgmt_pending_free(cmd);
1276 }
1277 
1278 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1279 {
1280 	u8 *status = data;
1281 
1282 	mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1283 	mgmt_pending_remove(cmd);
1284 }
1285 
1286 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1287 {
1288 	if (cmd->cmd_complete) {
1289 		u8 *status = data;
1290 
1291 		cmd->cmd_complete(cmd, *status);
1292 		mgmt_pending_remove(cmd);
1293 
1294 		return;
1295 	}
1296 
1297 	cmd_status_rsp(cmd, data);
1298 }
1299 
1300 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1301 {
1302 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1303 				 cmd->param, cmd->param_len);
1304 }
1305 
1306 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1307 {
1308 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1309 				 cmd->param, sizeof(struct mgmt_addr_info));
1310 }
1311 
1312 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1313 {
1314 	if (!lmp_bredr_capable(hdev))
1315 		return MGMT_STATUS_NOT_SUPPORTED;
1316 	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1317 		return MGMT_STATUS_REJECTED;
1318 	else
1319 		return MGMT_STATUS_SUCCESS;
1320 }
1321 
1322 static u8 mgmt_le_support(struct hci_dev *hdev)
1323 {
1324 	if (!lmp_le_capable(hdev))
1325 		return MGMT_STATUS_NOT_SUPPORTED;
1326 	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1327 		return MGMT_STATUS_REJECTED;
1328 	else
1329 		return MGMT_STATUS_SUCCESS;
1330 }
1331 
1332 void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
1333 {
1334 	struct mgmt_pending_cmd *cmd;
1335 
1336 	bt_dev_dbg(hdev, "status 0x%02x", status);
1337 
1338 	hci_dev_lock(hdev);
1339 
1340 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1341 	if (!cmd)
1342 		goto unlock;
1343 
1344 	if (status) {
1345 		u8 mgmt_err = mgmt_status(status);
1346 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1347 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1348 		goto remove_cmd;
1349 	}
1350 
1351 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1352 	    hdev->discov_timeout > 0) {
1353 		int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1354 		queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1355 	}
1356 
1357 	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1358 	new_settings(hdev, cmd->sk);
1359 
1360 remove_cmd:
1361 	mgmt_pending_remove(cmd);
1362 
1363 unlock:
1364 	hci_dev_unlock(hdev);
1365 }
1366 
1367 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1368 			    u16 len)
1369 {
1370 	struct mgmt_cp_set_discoverable *cp = data;
1371 	struct mgmt_pending_cmd *cmd;
1372 	u16 timeout;
1373 	int err;
1374 
1375 	bt_dev_dbg(hdev, "sock %p", sk);
1376 
1377 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1378 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1379 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1380 				       MGMT_STATUS_REJECTED);
1381 
1382 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1383 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1384 				       MGMT_STATUS_INVALID_PARAMS);
1385 
1386 	timeout = __le16_to_cpu(cp->timeout);
1387 
1388 	/* Disabling discoverable requires that no timeout is set,
1389 	 * and enabling limited discoverable requires a timeout.
1390 	 */
1391 	if ((cp->val == 0x00 && timeout > 0) ||
1392 	    (cp->val == 0x02 && timeout == 0))
1393 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1394 				       MGMT_STATUS_INVALID_PARAMS);
1395 
1396 	hci_dev_lock(hdev);
1397 
1398 	if (!hdev_is_powered(hdev) && timeout > 0) {
1399 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1400 				      MGMT_STATUS_NOT_POWERED);
1401 		goto failed;
1402 	}
1403 
1404 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1405 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1406 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1407 				      MGMT_STATUS_BUSY);
1408 		goto failed;
1409 	}
1410 
1411 	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1412 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1413 				      MGMT_STATUS_REJECTED);
1414 		goto failed;
1415 	}
1416 
1417 	if (hdev->advertising_paused) {
1418 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1419 				      MGMT_STATUS_BUSY);
1420 		goto failed;
1421 	}
1422 
1423 	if (!hdev_is_powered(hdev)) {
1424 		bool changed = false;
1425 
1426 		/* Setting limited discoverable when powered off is
1427 		 * not a valid operation since it requires a timeout
1428 		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1429 		 */
1430 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1431 			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1432 			changed = true;
1433 		}
1434 
1435 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1436 		if (err < 0)
1437 			goto failed;
1438 
1439 		if (changed)
1440 			err = new_settings(hdev, sk);
1441 
1442 		goto failed;
1443 	}
1444 
1445 	/* If the current mode is the same, then just update the timeout
1446 	 * value with the new value. And if only the timeout gets updated,
1447 	 * then no need for any HCI transactions.
1448 	 */
1449 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1450 	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
1451 						   HCI_LIMITED_DISCOVERABLE)) {
1452 		cancel_delayed_work(&hdev->discov_off);
1453 		hdev->discov_timeout = timeout;
1454 
1455 		if (cp->val && hdev->discov_timeout > 0) {
1456 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1457 			queue_delayed_work(hdev->req_workqueue,
1458 					   &hdev->discov_off, to);
1459 		}
1460 
1461 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1462 		goto failed;
1463 	}
1464 
1465 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1466 	if (!cmd) {
1467 		err = -ENOMEM;
1468 		goto failed;
1469 	}
1470 
1471 	/* Cancel any potential discoverable timeout that might be
1472 	 * still active and store new timeout value. The arming of
1473 	 * the timeout happens in the complete handler.
1474 	 */
1475 	cancel_delayed_work(&hdev->discov_off);
1476 	hdev->discov_timeout = timeout;
1477 
1478 	if (cp->val)
1479 		hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1480 	else
1481 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1482 
1483 	/* Limited discoverable mode */
1484 	if (cp->val == 0x02)
1485 		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1486 	else
1487 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1488 
1489 	queue_work(hdev->req_workqueue, &hdev->discoverable_update);
1490 	err = 0;
1491 
1492 failed:
1493 	hci_dev_unlock(hdev);
1494 	return err;
1495 }
1496 
1497 void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
1498 {
1499 	struct mgmt_pending_cmd *cmd;
1500 
1501 	bt_dev_dbg(hdev, "status 0x%02x", status);
1502 
1503 	hci_dev_lock(hdev);
1504 
1505 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1506 	if (!cmd)
1507 		goto unlock;
1508 
1509 	if (status) {
1510 		u8 mgmt_err = mgmt_status(status);
1511 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1512 		goto remove_cmd;
1513 	}
1514 
1515 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1516 	new_settings(hdev, cmd->sk);
1517 
1518 remove_cmd:
1519 	mgmt_pending_remove(cmd);
1520 
1521 unlock:
1522 	hci_dev_unlock(hdev);
1523 }
1524 
1525 static int set_connectable_update_settings(struct hci_dev *hdev,
1526 					   struct sock *sk, u8 val)
1527 {
1528 	bool changed = false;
1529 	int err;
1530 
1531 	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1532 		changed = true;
1533 
1534 	if (val) {
1535 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1536 	} else {
1537 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1538 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1539 	}
1540 
1541 	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1542 	if (err < 0)
1543 		return err;
1544 
1545 	if (changed) {
1546 		hci_req_update_scan(hdev);
1547 		hci_update_background_scan(hdev);
1548 		return new_settings(hdev, sk);
1549 	}
1550 
1551 	return 0;
1552 }
1553 
1554 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1555 			   u16 len)
1556 {
1557 	struct mgmt_mode *cp = data;
1558 	struct mgmt_pending_cmd *cmd;
1559 	int err;
1560 
1561 	bt_dev_dbg(hdev, "sock %p", sk);
1562 
1563 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1564 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1565 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1566 				       MGMT_STATUS_REJECTED);
1567 
1568 	if (cp->val != 0x00 && cp->val != 0x01)
1569 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1570 				       MGMT_STATUS_INVALID_PARAMS);
1571 
1572 	hci_dev_lock(hdev);
1573 
1574 	if (!hdev_is_powered(hdev)) {
1575 		err = set_connectable_update_settings(hdev, sk, cp->val);
1576 		goto failed;
1577 	}
1578 
1579 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1580 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1581 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1582 				      MGMT_STATUS_BUSY);
1583 		goto failed;
1584 	}
1585 
1586 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1587 	if (!cmd) {
1588 		err = -ENOMEM;
1589 		goto failed;
1590 	}
1591 
1592 	if (cp->val) {
1593 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1594 	} else {
1595 		if (hdev->discov_timeout > 0)
1596 			cancel_delayed_work(&hdev->discov_off);
1597 
1598 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1599 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1600 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1601 	}
1602 
1603 	queue_work(hdev->req_workqueue, &hdev->connectable_update);
1604 	err = 0;
1605 
1606 failed:
1607 	hci_dev_unlock(hdev);
1608 	return err;
1609 }
1610 
1611 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1612 			u16 len)
1613 {
1614 	struct mgmt_mode *cp = data;
1615 	bool changed;
1616 	int err;
1617 
1618 	bt_dev_dbg(hdev, "sock %p", sk);
1619 
1620 	if (cp->val != 0x00 && cp->val != 0x01)
1621 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1622 				       MGMT_STATUS_INVALID_PARAMS);
1623 
1624 	hci_dev_lock(hdev);
1625 
1626 	if (cp->val)
1627 		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1628 	else
1629 		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1630 
1631 	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1632 	if (err < 0)
1633 		goto unlock;
1634 
1635 	if (changed) {
1636 		/* In limited privacy mode the change of bondable mode
1637 		 * may affect the local advertising address.
1638 		 */
1639 		if (hdev_is_powered(hdev) &&
1640 		    hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1641 		    hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1642 		    hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1643 			queue_work(hdev->req_workqueue,
1644 				   &hdev->discoverable_update);
1645 
1646 		err = new_settings(hdev, sk);
1647 	}
1648 
1649 unlock:
1650 	hci_dev_unlock(hdev);
1651 	return err;
1652 }
1653 
1654 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1655 			     u16 len)
1656 {
1657 	struct mgmt_mode *cp = data;
1658 	struct mgmt_pending_cmd *cmd;
1659 	u8 val, status;
1660 	int err;
1661 
1662 	bt_dev_dbg(hdev, "sock %p", sk);
1663 
1664 	status = mgmt_bredr_support(hdev);
1665 	if (status)
1666 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1667 				       status);
1668 
1669 	if (cp->val != 0x00 && cp->val != 0x01)
1670 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1671 				       MGMT_STATUS_INVALID_PARAMS);
1672 
1673 	hci_dev_lock(hdev);
1674 
1675 	if (!hdev_is_powered(hdev)) {
1676 		bool changed = false;
1677 
1678 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1679 			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1680 			changed = true;
1681 		}
1682 
1683 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1684 		if (err < 0)
1685 			goto failed;
1686 
1687 		if (changed)
1688 			err = new_settings(hdev, sk);
1689 
1690 		goto failed;
1691 	}
1692 
1693 	if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1694 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1695 				      MGMT_STATUS_BUSY);
1696 		goto failed;
1697 	}
1698 
1699 	val = !!cp->val;
1700 
1701 	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1702 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1703 		goto failed;
1704 	}
1705 
1706 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1707 	if (!cmd) {
1708 		err = -ENOMEM;
1709 		goto failed;
1710 	}
1711 
1712 	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1713 	if (err < 0) {
1714 		mgmt_pending_remove(cmd);
1715 		goto failed;
1716 	}
1717 
1718 failed:
1719 	hci_dev_unlock(hdev);
1720 	return err;
1721 }
1722 
1723 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1724 {
1725 	struct mgmt_mode *cp = data;
1726 	struct mgmt_pending_cmd *cmd;
1727 	u8 status;
1728 	int err;
1729 
1730 	bt_dev_dbg(hdev, "sock %p", sk);
1731 
1732 	status = mgmt_bredr_support(hdev);
1733 	if (status)
1734 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1735 
1736 	if (!lmp_ssp_capable(hdev))
1737 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1738 				       MGMT_STATUS_NOT_SUPPORTED);
1739 
1740 	if (cp->val != 0x00 && cp->val != 0x01)
1741 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1742 				       MGMT_STATUS_INVALID_PARAMS);
1743 
1744 	hci_dev_lock(hdev);
1745 
1746 	if (!hdev_is_powered(hdev)) {
1747 		bool changed;
1748 
1749 		if (cp->val) {
1750 			changed = !hci_dev_test_and_set_flag(hdev,
1751 							     HCI_SSP_ENABLED);
1752 		} else {
1753 			changed = hci_dev_test_and_clear_flag(hdev,
1754 							      HCI_SSP_ENABLED);
1755 			if (!changed)
1756 				changed = hci_dev_test_and_clear_flag(hdev,
1757 								      HCI_HS_ENABLED);
1758 			else
1759 				hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1760 		}
1761 
1762 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1763 		if (err < 0)
1764 			goto failed;
1765 
1766 		if (changed)
1767 			err = new_settings(hdev, sk);
1768 
1769 		goto failed;
1770 	}
1771 
1772 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1773 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1774 				      MGMT_STATUS_BUSY);
1775 		goto failed;
1776 	}
1777 
1778 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1779 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1780 		goto failed;
1781 	}
1782 
1783 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1784 	if (!cmd) {
1785 		err = -ENOMEM;
1786 		goto failed;
1787 	}
1788 
1789 	if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
1790 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1791 			     sizeof(cp->val), &cp->val);
1792 
1793 	err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1794 	if (err < 0) {
1795 		mgmt_pending_remove(cmd);
1796 		goto failed;
1797 	}
1798 
1799 failed:
1800 	hci_dev_unlock(hdev);
1801 	return err;
1802 }
1803 
1804 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1805 {
1806 	struct mgmt_mode *cp = data;
1807 	bool changed;
1808 	u8 status;
1809 	int err;
1810 
1811 	bt_dev_dbg(hdev, "sock %p", sk);
1812 
1813 	status = mgmt_bredr_support(hdev);
1814 	if (status)
1815 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1816 
1817 	if (!lmp_ssp_capable(hdev))
1818 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1819 				       MGMT_STATUS_NOT_SUPPORTED);
1820 
1821 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1822 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1823 				       MGMT_STATUS_REJECTED);
1824 
1825 	if (cp->val != 0x00 && cp->val != 0x01)
1826 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1827 				       MGMT_STATUS_INVALID_PARAMS);
1828 
1829 	hci_dev_lock(hdev);
1830 
1831 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1832 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1833 				      MGMT_STATUS_BUSY);
1834 		goto unlock;
1835 	}
1836 
1837 	if (cp->val) {
1838 		changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1839 	} else {
1840 		if (hdev_is_powered(hdev)) {
1841 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1842 					      MGMT_STATUS_REJECTED);
1843 			goto unlock;
1844 		}
1845 
1846 		changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1847 	}
1848 
1849 	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1850 	if (err < 0)
1851 		goto unlock;
1852 
1853 	if (changed)
1854 		err = new_settings(hdev, sk);
1855 
1856 unlock:
1857 	hci_dev_unlock(hdev);
1858 	return err;
1859 }
1860 
1861 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1862 {
1863 	struct cmd_lookup match = { NULL, hdev };
1864 
1865 	hci_dev_lock(hdev);
1866 
1867 	if (status) {
1868 		u8 mgmt_err = mgmt_status(status);
1869 
1870 		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1871 				     &mgmt_err);
1872 		goto unlock;
1873 	}
1874 
1875 	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1876 
1877 	new_settings(hdev, match.sk);
1878 
1879 	if (match.sk)
1880 		sock_put(match.sk);
1881 
1882 	/* Make sure the controller has a good default for
1883 	 * advertising data. Restrict the update to when LE
1884 	 * has actually been enabled. During power on, the
1885 	 * update in powered_update_hci will take care of it.
1886 	 */
1887 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1888 		struct hci_request req;
1889 		hci_req_init(&req, hdev);
1890 		if (ext_adv_capable(hdev)) {
1891 			int err;
1892 
1893 			err = __hci_req_setup_ext_adv_instance(&req, 0x00);
1894 			if (!err)
1895 				__hci_req_update_scan_rsp_data(&req, 0x00);
1896 		} else {
1897 			__hci_req_update_adv_data(&req, 0x00);
1898 			__hci_req_update_scan_rsp_data(&req, 0x00);
1899 		}
1900 		hci_req_run(&req, NULL);
1901 		hci_update_background_scan(hdev);
1902 	}
1903 
1904 unlock:
1905 	hci_dev_unlock(hdev);
1906 }
1907 
1908 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1909 {
1910 	struct mgmt_mode *cp = data;
1911 	struct hci_cp_write_le_host_supported hci_cp;
1912 	struct mgmt_pending_cmd *cmd;
1913 	struct hci_request req;
1914 	int err;
1915 	u8 val, enabled;
1916 
1917 	bt_dev_dbg(hdev, "sock %p", sk);
1918 
1919 	if (!lmp_le_capable(hdev))
1920 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1921 				       MGMT_STATUS_NOT_SUPPORTED);
1922 
1923 	if (cp->val != 0x00 && cp->val != 0x01)
1924 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1925 				       MGMT_STATUS_INVALID_PARAMS);
1926 
1927 	/* Bluetooth single mode LE only controllers or dual-mode
1928 	 * controllers configured as LE only devices, do not allow
1929 	 * switching LE off. These have either LE enabled explicitly
1930 	 * or BR/EDR has been previously switched off.
1931 	 *
1932 	 * When trying to enable an already enabled LE, then gracefully
1933 	 * send a positive response. Trying to disable it however will
1934 	 * result into rejection.
1935 	 */
1936 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1937 		if (cp->val == 0x01)
1938 			return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1939 
1940 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1941 				       MGMT_STATUS_REJECTED);
1942 	}
1943 
1944 	hci_dev_lock(hdev);
1945 
1946 	val = !!cp->val;
1947 	enabled = lmp_host_le_capable(hdev);
1948 
1949 	if (!val)
1950 		hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
1951 
1952 	if (!hdev_is_powered(hdev) || val == enabled) {
1953 		bool changed = false;
1954 
1955 		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1956 			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
1957 			changed = true;
1958 		}
1959 
1960 		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1961 			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1962 			changed = true;
1963 		}
1964 
1965 		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1966 		if (err < 0)
1967 			goto unlock;
1968 
1969 		if (changed)
1970 			err = new_settings(hdev, sk);
1971 
1972 		goto unlock;
1973 	}
1974 
1975 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
1976 	    pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1977 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1978 				      MGMT_STATUS_BUSY);
1979 		goto unlock;
1980 	}
1981 
1982 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1983 	if (!cmd) {
1984 		err = -ENOMEM;
1985 		goto unlock;
1986 	}
1987 
1988 	hci_req_init(&req, hdev);
1989 
1990 	memset(&hci_cp, 0, sizeof(hci_cp));
1991 
1992 	if (val) {
1993 		hci_cp.le = val;
1994 		hci_cp.simul = 0x00;
1995 	} else {
1996 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1997 			__hci_req_disable_advertising(&req);
1998 
1999 		if (ext_adv_capable(hdev))
2000 			__hci_req_clear_ext_adv_sets(&req);
2001 	}
2002 
2003 	hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2004 		    &hci_cp);
2005 
2006 	err = hci_req_run(&req, le_enable_complete);
2007 	if (err < 0)
2008 		mgmt_pending_remove(cmd);
2009 
2010 unlock:
2011 	hci_dev_unlock(hdev);
2012 	return err;
2013 }
2014 
2015 /* This is a helper function to test for pending mgmt commands that can
2016  * cause CoD or EIR HCI commands. We can only allow one such pending
2017  * mgmt command at a time since otherwise we cannot easily track what
2018  * the current values are, will be, and based on that calculate if a new
2019  * HCI command needs to be sent and if yes with what value.
2020  */
2021 static bool pending_eir_or_class(struct hci_dev *hdev)
2022 {
2023 	struct mgmt_pending_cmd *cmd;
2024 
2025 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2026 		switch (cmd->opcode) {
2027 		case MGMT_OP_ADD_UUID:
2028 		case MGMT_OP_REMOVE_UUID:
2029 		case MGMT_OP_SET_DEV_CLASS:
2030 		case MGMT_OP_SET_POWERED:
2031 			return true;
2032 		}
2033 	}
2034 
2035 	return false;
2036 }
2037 
2038 static const u8 bluetooth_base_uuid[] = {
2039 			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2040 			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2041 };
2042 
2043 static u8 get_uuid_size(const u8 *uuid)
2044 {
2045 	u32 val;
2046 
2047 	if (memcmp(uuid, bluetooth_base_uuid, 12))
2048 		return 128;
2049 
2050 	val = get_unaligned_le32(&uuid[12]);
2051 	if (val > 0xffff)
2052 		return 32;
2053 
2054 	return 16;
2055 }
2056 
2057 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2058 {
2059 	struct mgmt_pending_cmd *cmd;
2060 
2061 	hci_dev_lock(hdev);
2062 
2063 	cmd = pending_find(mgmt_op, hdev);
2064 	if (!cmd)
2065 		goto unlock;
2066 
2067 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2068 			  mgmt_status(status), hdev->dev_class, 3);
2069 
2070 	mgmt_pending_remove(cmd);
2071 
2072 unlock:
2073 	hci_dev_unlock(hdev);
2074 }
2075 
2076 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2077 {
2078 	bt_dev_dbg(hdev, "status 0x%02x", status);
2079 
2080 	mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2081 }
2082 
2083 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2084 {
2085 	struct mgmt_cp_add_uuid *cp = data;
2086 	struct mgmt_pending_cmd *cmd;
2087 	struct hci_request req;
2088 	struct bt_uuid *uuid;
2089 	int err;
2090 
2091 	bt_dev_dbg(hdev, "sock %p", sk);
2092 
2093 	hci_dev_lock(hdev);
2094 
2095 	if (pending_eir_or_class(hdev)) {
2096 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2097 				      MGMT_STATUS_BUSY);
2098 		goto failed;
2099 	}
2100 
2101 	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2102 	if (!uuid) {
2103 		err = -ENOMEM;
2104 		goto failed;
2105 	}
2106 
2107 	memcpy(uuid->uuid, cp->uuid, 16);
2108 	uuid->svc_hint = cp->svc_hint;
2109 	uuid->size = get_uuid_size(cp->uuid);
2110 
2111 	list_add_tail(&uuid->list, &hdev->uuids);
2112 
2113 	hci_req_init(&req, hdev);
2114 
2115 	__hci_req_update_class(&req);
2116 	__hci_req_update_eir(&req);
2117 
2118 	err = hci_req_run(&req, add_uuid_complete);
2119 	if (err < 0) {
2120 		if (err != -ENODATA)
2121 			goto failed;
2122 
2123 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2124 					hdev->dev_class, 3);
2125 		goto failed;
2126 	}
2127 
2128 	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2129 	if (!cmd) {
2130 		err = -ENOMEM;
2131 		goto failed;
2132 	}
2133 
2134 	err = 0;
2135 
2136 failed:
2137 	hci_dev_unlock(hdev);
2138 	return err;
2139 }
2140 
2141 static bool enable_service_cache(struct hci_dev *hdev)
2142 {
2143 	if (!hdev_is_powered(hdev))
2144 		return false;
2145 
2146 	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2147 		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2148 				   CACHE_TIMEOUT);
2149 		return true;
2150 	}
2151 
2152 	return false;
2153 }
2154 
2155 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2156 {
2157 	bt_dev_dbg(hdev, "status 0x%02x", status);
2158 
2159 	mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2160 }
2161 
2162 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2163 		       u16 len)
2164 {
2165 	struct mgmt_cp_remove_uuid *cp = data;
2166 	struct mgmt_pending_cmd *cmd;
2167 	struct bt_uuid *match, *tmp;
2168 	u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2169 	struct hci_request req;
2170 	int err, found;
2171 
2172 	bt_dev_dbg(hdev, "sock %p", sk);
2173 
2174 	hci_dev_lock(hdev);
2175 
2176 	if (pending_eir_or_class(hdev)) {
2177 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2178 				      MGMT_STATUS_BUSY);
2179 		goto unlock;
2180 	}
2181 
2182 	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2183 		hci_uuids_clear(hdev);
2184 
2185 		if (enable_service_cache(hdev)) {
2186 			err = mgmt_cmd_complete(sk, hdev->id,
2187 						MGMT_OP_REMOVE_UUID,
2188 						0, hdev->dev_class, 3);
2189 			goto unlock;
2190 		}
2191 
2192 		goto update_class;
2193 	}
2194 
2195 	found = 0;
2196 
2197 	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2198 		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2199 			continue;
2200 
2201 		list_del(&match->list);
2202 		kfree(match);
2203 		found++;
2204 	}
2205 
2206 	if (found == 0) {
2207 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2208 				      MGMT_STATUS_INVALID_PARAMS);
2209 		goto unlock;
2210 	}
2211 
2212 update_class:
2213 	hci_req_init(&req, hdev);
2214 
2215 	__hci_req_update_class(&req);
2216 	__hci_req_update_eir(&req);
2217 
2218 	err = hci_req_run(&req, remove_uuid_complete);
2219 	if (err < 0) {
2220 		if (err != -ENODATA)
2221 			goto unlock;
2222 
2223 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2224 					hdev->dev_class, 3);
2225 		goto unlock;
2226 	}
2227 
2228 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2229 	if (!cmd) {
2230 		err = -ENOMEM;
2231 		goto unlock;
2232 	}
2233 
2234 	err = 0;
2235 
2236 unlock:
2237 	hci_dev_unlock(hdev);
2238 	return err;
2239 }
2240 
2241 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2242 {
2243 	bt_dev_dbg(hdev, "status 0x%02x", status);
2244 
2245 	mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2246 }
2247 
2248 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2249 			 u16 len)
2250 {
2251 	struct mgmt_cp_set_dev_class *cp = data;
2252 	struct mgmt_pending_cmd *cmd;
2253 	struct hci_request req;
2254 	int err;
2255 
2256 	bt_dev_dbg(hdev, "sock %p", sk);
2257 
2258 	if (!lmp_bredr_capable(hdev))
2259 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2260 				       MGMT_STATUS_NOT_SUPPORTED);
2261 
2262 	hci_dev_lock(hdev);
2263 
2264 	if (pending_eir_or_class(hdev)) {
2265 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2266 				      MGMT_STATUS_BUSY);
2267 		goto unlock;
2268 	}
2269 
2270 	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2271 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2272 				      MGMT_STATUS_INVALID_PARAMS);
2273 		goto unlock;
2274 	}
2275 
2276 	hdev->major_class = cp->major;
2277 	hdev->minor_class = cp->minor;
2278 
2279 	if (!hdev_is_powered(hdev)) {
2280 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2281 					hdev->dev_class, 3);
2282 		goto unlock;
2283 	}
2284 
2285 	hci_req_init(&req, hdev);
2286 
2287 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2288 		hci_dev_unlock(hdev);
2289 		cancel_delayed_work_sync(&hdev->service_cache);
2290 		hci_dev_lock(hdev);
2291 		__hci_req_update_eir(&req);
2292 	}
2293 
2294 	__hci_req_update_class(&req);
2295 
2296 	err = hci_req_run(&req, set_class_complete);
2297 	if (err < 0) {
2298 		if (err != -ENODATA)
2299 			goto unlock;
2300 
2301 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2302 					hdev->dev_class, 3);
2303 		goto unlock;
2304 	}
2305 
2306 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2307 	if (!cmd) {
2308 		err = -ENOMEM;
2309 		goto unlock;
2310 	}
2311 
2312 	err = 0;
2313 
2314 unlock:
2315 	hci_dev_unlock(hdev);
2316 	return err;
2317 }
2318 
2319 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2320 			  u16 len)
2321 {
2322 	struct mgmt_cp_load_link_keys *cp = data;
2323 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2324 				   sizeof(struct mgmt_link_key_info));
2325 	u16 key_count, expected_len;
2326 	bool changed;
2327 	int i;
2328 
2329 	bt_dev_dbg(hdev, "sock %p", sk);
2330 
2331 	if (!lmp_bredr_capable(hdev))
2332 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2333 				       MGMT_STATUS_NOT_SUPPORTED);
2334 
2335 	key_count = __le16_to_cpu(cp->key_count);
2336 	if (key_count > max_key_count) {
2337 		bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2338 			   key_count);
2339 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2340 				       MGMT_STATUS_INVALID_PARAMS);
2341 	}
2342 
2343 	expected_len = struct_size(cp, keys, key_count);
2344 	if (expected_len != len) {
2345 		bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2346 			   expected_len, len);
2347 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2348 				       MGMT_STATUS_INVALID_PARAMS);
2349 	}
2350 
2351 	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2352 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2353 				       MGMT_STATUS_INVALID_PARAMS);
2354 
2355 	bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2356 		   key_count);
2357 
2358 	for (i = 0; i < key_count; i++) {
2359 		struct mgmt_link_key_info *key = &cp->keys[i];
2360 
2361 		if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2362 			return mgmt_cmd_status(sk, hdev->id,
2363 					       MGMT_OP_LOAD_LINK_KEYS,
2364 					       MGMT_STATUS_INVALID_PARAMS);
2365 	}
2366 
2367 	hci_dev_lock(hdev);
2368 
2369 	hci_link_keys_clear(hdev);
2370 
2371 	if (cp->debug_keys)
2372 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2373 	else
2374 		changed = hci_dev_test_and_clear_flag(hdev,
2375 						      HCI_KEEP_DEBUG_KEYS);
2376 
2377 	if (changed)
2378 		new_settings(hdev, NULL);
2379 
2380 	for (i = 0; i < key_count; i++) {
2381 		struct mgmt_link_key_info *key = &cp->keys[i];
2382 
2383 		if (hci_is_blocked_key(hdev,
2384 				       HCI_BLOCKED_KEY_TYPE_LINKKEY,
2385 				       key->val)) {
2386 			bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2387 				    &key->addr.bdaddr);
2388 			continue;
2389 		}
2390 
2391 		/* Always ignore debug keys and require a new pairing if
2392 		 * the user wants to use them.
2393 		 */
2394 		if (key->type == HCI_LK_DEBUG_COMBINATION)
2395 			continue;
2396 
2397 		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2398 				 key->type, key->pin_len, NULL);
2399 	}
2400 
2401 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2402 
2403 	hci_dev_unlock(hdev);
2404 
2405 	return 0;
2406 }
2407 
2408 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2409 			   u8 addr_type, struct sock *skip_sk)
2410 {
2411 	struct mgmt_ev_device_unpaired ev;
2412 
2413 	bacpy(&ev.addr.bdaddr, bdaddr);
2414 	ev.addr.type = addr_type;
2415 
2416 	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2417 			  skip_sk);
2418 }
2419 
2420 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2421 			 u16 len)
2422 {
2423 	struct mgmt_cp_unpair_device *cp = data;
2424 	struct mgmt_rp_unpair_device rp;
2425 	struct hci_conn_params *params;
2426 	struct mgmt_pending_cmd *cmd;
2427 	struct hci_conn *conn;
2428 	u8 addr_type;
2429 	int err;
2430 
2431 	memset(&rp, 0, sizeof(rp));
2432 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2433 	rp.addr.type = cp->addr.type;
2434 
2435 	if (!bdaddr_type_is_valid(cp->addr.type))
2436 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2437 					 MGMT_STATUS_INVALID_PARAMS,
2438 					 &rp, sizeof(rp));
2439 
2440 	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2441 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2442 					 MGMT_STATUS_INVALID_PARAMS,
2443 					 &rp, sizeof(rp));
2444 
2445 	hci_dev_lock(hdev);
2446 
2447 	if (!hdev_is_powered(hdev)) {
2448 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2449 					MGMT_STATUS_NOT_POWERED, &rp,
2450 					sizeof(rp));
2451 		goto unlock;
2452 	}
2453 
2454 	if (cp->addr.type == BDADDR_BREDR) {
2455 		/* If disconnection is requested, then look up the
2456 		 * connection. If the remote device is connected, it
2457 		 * will be later used to terminate the link.
2458 		 *
2459 		 * Setting it to NULL explicitly will cause no
2460 		 * termination of the link.
2461 		 */
2462 		if (cp->disconnect)
2463 			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2464 						       &cp->addr.bdaddr);
2465 		else
2466 			conn = NULL;
2467 
2468 		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2469 		if (err < 0) {
2470 			err = mgmt_cmd_complete(sk, hdev->id,
2471 						MGMT_OP_UNPAIR_DEVICE,
2472 						MGMT_STATUS_NOT_PAIRED, &rp,
2473 						sizeof(rp));
2474 			goto unlock;
2475 		}
2476 
2477 		goto done;
2478 	}
2479 
2480 	/* LE address type */
2481 	addr_type = le_addr_type(cp->addr.type);
2482 
2483 	/* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2484 	err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2485 	if (err < 0) {
2486 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2487 					MGMT_STATUS_NOT_PAIRED, &rp,
2488 					sizeof(rp));
2489 		goto unlock;
2490 	}
2491 
2492 	conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2493 	if (!conn) {
2494 		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2495 		goto done;
2496 	}
2497 
2498 
2499 	/* Defer clearing up the connection parameters until closing to
2500 	 * give a chance of keeping them if a repairing happens.
2501 	 */
2502 	set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2503 
2504 	/* Disable auto-connection parameters if present */
2505 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2506 	if (params) {
2507 		if (params->explicit_connect)
2508 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2509 		else
2510 			params->auto_connect = HCI_AUTO_CONN_DISABLED;
2511 	}
2512 
2513 	/* If disconnection is not requested, then clear the connection
2514 	 * variable so that the link is not terminated.
2515 	 */
2516 	if (!cp->disconnect)
2517 		conn = NULL;
2518 
2519 done:
2520 	/* If the connection variable is set, then termination of the
2521 	 * link is requested.
2522 	 */
2523 	if (!conn) {
2524 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2525 					&rp, sizeof(rp));
2526 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2527 		goto unlock;
2528 	}
2529 
2530 	cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2531 			       sizeof(*cp));
2532 	if (!cmd) {
2533 		err = -ENOMEM;
2534 		goto unlock;
2535 	}
2536 
2537 	cmd->cmd_complete = addr_cmd_complete;
2538 
2539 	err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2540 	if (err < 0)
2541 		mgmt_pending_remove(cmd);
2542 
2543 unlock:
2544 	hci_dev_unlock(hdev);
2545 	return err;
2546 }
2547 
2548 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2549 		      u16 len)
2550 {
2551 	struct mgmt_cp_disconnect *cp = data;
2552 	struct mgmt_rp_disconnect rp;
2553 	struct mgmt_pending_cmd *cmd;
2554 	struct hci_conn *conn;
2555 	int err;
2556 
2557 	bt_dev_dbg(hdev, "sock %p", sk);
2558 
2559 	memset(&rp, 0, sizeof(rp));
2560 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2561 	rp.addr.type = cp->addr.type;
2562 
2563 	if (!bdaddr_type_is_valid(cp->addr.type))
2564 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2565 					 MGMT_STATUS_INVALID_PARAMS,
2566 					 &rp, sizeof(rp));
2567 
2568 	hci_dev_lock(hdev);
2569 
2570 	if (!test_bit(HCI_UP, &hdev->flags)) {
2571 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2572 					MGMT_STATUS_NOT_POWERED, &rp,
2573 					sizeof(rp));
2574 		goto failed;
2575 	}
2576 
2577 	if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2578 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2579 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
2580 		goto failed;
2581 	}
2582 
2583 	if (cp->addr.type == BDADDR_BREDR)
2584 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2585 					       &cp->addr.bdaddr);
2586 	else
2587 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2588 					       le_addr_type(cp->addr.type));
2589 
2590 	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2591 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2592 					MGMT_STATUS_NOT_CONNECTED, &rp,
2593 					sizeof(rp));
2594 		goto failed;
2595 	}
2596 
2597 	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2598 	if (!cmd) {
2599 		err = -ENOMEM;
2600 		goto failed;
2601 	}
2602 
2603 	cmd->cmd_complete = generic_cmd_complete;
2604 
2605 	err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2606 	if (err < 0)
2607 		mgmt_pending_remove(cmd);
2608 
2609 failed:
2610 	hci_dev_unlock(hdev);
2611 	return err;
2612 }
2613 
2614 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2615 {
2616 	switch (link_type) {
2617 	case LE_LINK:
2618 		switch (addr_type) {
2619 		case ADDR_LE_DEV_PUBLIC:
2620 			return BDADDR_LE_PUBLIC;
2621 
2622 		default:
2623 			/* Fallback to LE Random address type */
2624 			return BDADDR_LE_RANDOM;
2625 		}
2626 
2627 	default:
2628 		/* Fallback to BR/EDR type */
2629 		return BDADDR_BREDR;
2630 	}
2631 }
2632 
2633 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2634 			   u16 data_len)
2635 {
2636 	struct mgmt_rp_get_connections *rp;
2637 	struct hci_conn *c;
2638 	int err;
2639 	u16 i;
2640 
2641 	bt_dev_dbg(hdev, "sock %p", sk);
2642 
2643 	hci_dev_lock(hdev);
2644 
2645 	if (!hdev_is_powered(hdev)) {
2646 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2647 				      MGMT_STATUS_NOT_POWERED);
2648 		goto unlock;
2649 	}
2650 
2651 	i = 0;
2652 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2653 		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2654 			i++;
2655 	}
2656 
2657 	rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2658 	if (!rp) {
2659 		err = -ENOMEM;
2660 		goto unlock;
2661 	}
2662 
2663 	i = 0;
2664 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2665 		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2666 			continue;
2667 		bacpy(&rp->addr[i].bdaddr, &c->dst);
2668 		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2669 		if (c->type == SCO_LINK || c->type == ESCO_LINK)
2670 			continue;
2671 		i++;
2672 	}
2673 
2674 	rp->conn_count = cpu_to_le16(i);
2675 
2676 	/* Recalculate length in case of filtered SCO connections, etc */
2677 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2678 				struct_size(rp, addr, i));
2679 
2680 	kfree(rp);
2681 
2682 unlock:
2683 	hci_dev_unlock(hdev);
2684 	return err;
2685 }
2686 
2687 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2688 				   struct mgmt_cp_pin_code_neg_reply *cp)
2689 {
2690 	struct mgmt_pending_cmd *cmd;
2691 	int err;
2692 
2693 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2694 			       sizeof(*cp));
2695 	if (!cmd)
2696 		return -ENOMEM;
2697 
2698 	cmd->cmd_complete = addr_cmd_complete;
2699 
2700 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2701 			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2702 	if (err < 0)
2703 		mgmt_pending_remove(cmd);
2704 
2705 	return err;
2706 }
2707 
2708 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2709 			  u16 len)
2710 {
2711 	struct hci_conn *conn;
2712 	struct mgmt_cp_pin_code_reply *cp = data;
2713 	struct hci_cp_pin_code_reply reply;
2714 	struct mgmt_pending_cmd *cmd;
2715 	int err;
2716 
2717 	bt_dev_dbg(hdev, "sock %p", sk);
2718 
2719 	hci_dev_lock(hdev);
2720 
2721 	if (!hdev_is_powered(hdev)) {
2722 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2723 				      MGMT_STATUS_NOT_POWERED);
2724 		goto failed;
2725 	}
2726 
2727 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2728 	if (!conn) {
2729 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2730 				      MGMT_STATUS_NOT_CONNECTED);
2731 		goto failed;
2732 	}
2733 
2734 	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2735 		struct mgmt_cp_pin_code_neg_reply ncp;
2736 
2737 		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2738 
2739 		bt_dev_err(hdev, "PIN code is not 16 bytes long");
2740 
2741 		err = send_pin_code_neg_reply(sk, hdev, &ncp);
2742 		if (err >= 0)
2743 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2744 					      MGMT_STATUS_INVALID_PARAMS);
2745 
2746 		goto failed;
2747 	}
2748 
2749 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2750 	if (!cmd) {
2751 		err = -ENOMEM;
2752 		goto failed;
2753 	}
2754 
2755 	cmd->cmd_complete = addr_cmd_complete;
2756 
2757 	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2758 	reply.pin_len = cp->pin_len;
2759 	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2760 
2761 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2762 	if (err < 0)
2763 		mgmt_pending_remove(cmd);
2764 
2765 failed:
2766 	hci_dev_unlock(hdev);
2767 	return err;
2768 }
2769 
2770 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2771 			     u16 len)
2772 {
2773 	struct mgmt_cp_set_io_capability *cp = data;
2774 
2775 	bt_dev_dbg(hdev, "sock %p", sk);
2776 
2777 	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2778 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2779 				       MGMT_STATUS_INVALID_PARAMS);
2780 
2781 	hci_dev_lock(hdev);
2782 
2783 	hdev->io_capability = cp->io_capability;
2784 
2785 	bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
2786 
2787 	hci_dev_unlock(hdev);
2788 
2789 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2790 				 NULL, 0);
2791 }
2792 
2793 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2794 {
2795 	struct hci_dev *hdev = conn->hdev;
2796 	struct mgmt_pending_cmd *cmd;
2797 
2798 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2799 		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2800 			continue;
2801 
2802 		if (cmd->user_data != conn)
2803 			continue;
2804 
2805 		return cmd;
2806 	}
2807 
2808 	return NULL;
2809 }
2810 
2811 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2812 {
2813 	struct mgmt_rp_pair_device rp;
2814 	struct hci_conn *conn = cmd->user_data;
2815 	int err;
2816 
2817 	bacpy(&rp.addr.bdaddr, &conn->dst);
2818 	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2819 
2820 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2821 				status, &rp, sizeof(rp));
2822 
2823 	/* So we don't get further callbacks for this connection */
2824 	conn->connect_cfm_cb = NULL;
2825 	conn->security_cfm_cb = NULL;
2826 	conn->disconn_cfm_cb = NULL;
2827 
2828 	hci_conn_drop(conn);
2829 
2830 	/* The device is paired so there is no need to remove
2831 	 * its connection parameters anymore.
2832 	 */
2833 	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2834 
2835 	hci_conn_put(conn);
2836 
2837 	return err;
2838 }
2839 
2840 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2841 {
2842 	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2843 	struct mgmt_pending_cmd *cmd;
2844 
2845 	cmd = find_pairing(conn);
2846 	if (cmd) {
2847 		cmd->cmd_complete(cmd, status);
2848 		mgmt_pending_remove(cmd);
2849 	}
2850 }
2851 
2852 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2853 {
2854 	struct mgmt_pending_cmd *cmd;
2855 
2856 	BT_DBG("status %u", status);
2857 
2858 	cmd = find_pairing(conn);
2859 	if (!cmd) {
2860 		BT_DBG("Unable to find a pending command");
2861 		return;
2862 	}
2863 
2864 	cmd->cmd_complete(cmd, mgmt_status(status));
2865 	mgmt_pending_remove(cmd);
2866 }
2867 
2868 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2869 {
2870 	struct mgmt_pending_cmd *cmd;
2871 
2872 	BT_DBG("status %u", status);
2873 
2874 	if (!status)
2875 		return;
2876 
2877 	cmd = find_pairing(conn);
2878 	if (!cmd) {
2879 		BT_DBG("Unable to find a pending command");
2880 		return;
2881 	}
2882 
2883 	cmd->cmd_complete(cmd, mgmt_status(status));
2884 	mgmt_pending_remove(cmd);
2885 }
2886 
2887 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2888 		       u16 len)
2889 {
2890 	struct mgmt_cp_pair_device *cp = data;
2891 	struct mgmt_rp_pair_device rp;
2892 	struct mgmt_pending_cmd *cmd;
2893 	u8 sec_level, auth_type;
2894 	struct hci_conn *conn;
2895 	int err;
2896 
2897 	bt_dev_dbg(hdev, "sock %p", sk);
2898 
2899 	memset(&rp, 0, sizeof(rp));
2900 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2901 	rp.addr.type = cp->addr.type;
2902 
2903 	if (!bdaddr_type_is_valid(cp->addr.type))
2904 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2905 					 MGMT_STATUS_INVALID_PARAMS,
2906 					 &rp, sizeof(rp));
2907 
2908 	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2909 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2910 					 MGMT_STATUS_INVALID_PARAMS,
2911 					 &rp, sizeof(rp));
2912 
2913 	hci_dev_lock(hdev);
2914 
2915 	if (!hdev_is_powered(hdev)) {
2916 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2917 					MGMT_STATUS_NOT_POWERED, &rp,
2918 					sizeof(rp));
2919 		goto unlock;
2920 	}
2921 
2922 	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
2923 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2924 					MGMT_STATUS_ALREADY_PAIRED, &rp,
2925 					sizeof(rp));
2926 		goto unlock;
2927 	}
2928 
2929 	sec_level = BT_SECURITY_MEDIUM;
2930 	auth_type = HCI_AT_DEDICATED_BONDING;
2931 
2932 	if (cp->addr.type == BDADDR_BREDR) {
2933 		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2934 				       auth_type, CONN_REASON_PAIR_DEVICE);
2935 	} else {
2936 		u8 addr_type = le_addr_type(cp->addr.type);
2937 		struct hci_conn_params *p;
2938 
2939 		/* When pairing a new device, it is expected to remember
2940 		 * this device for future connections. Adding the connection
2941 		 * parameter information ahead of time allows tracking
2942 		 * of the slave preferred values and will speed up any
2943 		 * further connection establishment.
2944 		 *
2945 		 * If connection parameters already exist, then they
2946 		 * will be kept and this function does nothing.
2947 		 */
2948 		p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2949 
2950 		if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
2951 			p->auto_connect = HCI_AUTO_CONN_DISABLED;
2952 
2953 		conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
2954 					   sec_level, HCI_LE_CONN_TIMEOUT,
2955 					   CONN_REASON_PAIR_DEVICE);
2956 	}
2957 
2958 	if (IS_ERR(conn)) {
2959 		int status;
2960 
2961 		if (PTR_ERR(conn) == -EBUSY)
2962 			status = MGMT_STATUS_BUSY;
2963 		else if (PTR_ERR(conn) == -EOPNOTSUPP)
2964 			status = MGMT_STATUS_NOT_SUPPORTED;
2965 		else if (PTR_ERR(conn) == -ECONNREFUSED)
2966 			status = MGMT_STATUS_REJECTED;
2967 		else
2968 			status = MGMT_STATUS_CONNECT_FAILED;
2969 
2970 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2971 					status, &rp, sizeof(rp));
2972 		goto unlock;
2973 	}
2974 
2975 	if (conn->connect_cfm_cb) {
2976 		hci_conn_drop(conn);
2977 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2978 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
2979 		goto unlock;
2980 	}
2981 
2982 	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2983 	if (!cmd) {
2984 		err = -ENOMEM;
2985 		hci_conn_drop(conn);
2986 		goto unlock;
2987 	}
2988 
2989 	cmd->cmd_complete = pairing_complete;
2990 
2991 	/* For LE, just connecting isn't a proof that the pairing finished */
2992 	if (cp->addr.type == BDADDR_BREDR) {
2993 		conn->connect_cfm_cb = pairing_complete_cb;
2994 		conn->security_cfm_cb = pairing_complete_cb;
2995 		conn->disconn_cfm_cb = pairing_complete_cb;
2996 	} else {
2997 		conn->connect_cfm_cb = le_pairing_complete_cb;
2998 		conn->security_cfm_cb = le_pairing_complete_cb;
2999 		conn->disconn_cfm_cb = le_pairing_complete_cb;
3000 	}
3001 
3002 	conn->io_capability = cp->io_cap;
3003 	cmd->user_data = hci_conn_get(conn);
3004 
3005 	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3006 	    hci_conn_security(conn, sec_level, auth_type, true)) {
3007 		cmd->cmd_complete(cmd, 0);
3008 		mgmt_pending_remove(cmd);
3009 	}
3010 
3011 	err = 0;
3012 
3013 unlock:
3014 	hci_dev_unlock(hdev);
3015 	return err;
3016 }
3017 
3018 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3019 			      u16 len)
3020 {
3021 	struct mgmt_addr_info *addr = data;
3022 	struct mgmt_pending_cmd *cmd;
3023 	struct hci_conn *conn;
3024 	int err;
3025 
3026 	bt_dev_dbg(hdev, "sock %p", sk);
3027 
3028 	hci_dev_lock(hdev);
3029 
3030 	if (!hdev_is_powered(hdev)) {
3031 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3032 				      MGMT_STATUS_NOT_POWERED);
3033 		goto unlock;
3034 	}
3035 
3036 	cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3037 	if (!cmd) {
3038 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3039 				      MGMT_STATUS_INVALID_PARAMS);
3040 		goto unlock;
3041 	}
3042 
3043 	conn = cmd->user_data;
3044 
3045 	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3046 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3047 				      MGMT_STATUS_INVALID_PARAMS);
3048 		goto unlock;
3049 	}
3050 
3051 	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3052 	mgmt_pending_remove(cmd);
3053 
3054 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3055 				addr, sizeof(*addr));
3056 
3057 	/* Since user doesn't want to proceed with the connection, abort any
3058 	 * ongoing pairing and then terminate the link if it was created
3059 	 * because of the pair device action.
3060 	 */
3061 	if (addr->type == BDADDR_BREDR)
3062 		hci_remove_link_key(hdev, &addr->bdaddr);
3063 	else
3064 		smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3065 					      le_addr_type(addr->type));
3066 
3067 	if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3068 		hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3069 
3070 unlock:
3071 	hci_dev_unlock(hdev);
3072 	return err;
3073 }
3074 
3075 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3076 			     struct mgmt_addr_info *addr, u16 mgmt_op,
3077 			     u16 hci_op, __le32 passkey)
3078 {
3079 	struct mgmt_pending_cmd *cmd;
3080 	struct hci_conn *conn;
3081 	int err;
3082 
3083 	hci_dev_lock(hdev);
3084 
3085 	if (!hdev_is_powered(hdev)) {
3086 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3087 					MGMT_STATUS_NOT_POWERED, addr,
3088 					sizeof(*addr));
3089 		goto done;
3090 	}
3091 
3092 	if (addr->type == BDADDR_BREDR)
3093 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3094 	else
3095 		conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3096 					       le_addr_type(addr->type));
3097 
3098 	if (!conn) {
3099 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3100 					MGMT_STATUS_NOT_CONNECTED, addr,
3101 					sizeof(*addr));
3102 		goto done;
3103 	}
3104 
3105 	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3106 		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3107 		if (!err)
3108 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3109 						MGMT_STATUS_SUCCESS, addr,
3110 						sizeof(*addr));
3111 		else
3112 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3113 						MGMT_STATUS_FAILED, addr,
3114 						sizeof(*addr));
3115 
3116 		goto done;
3117 	}
3118 
3119 	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3120 	if (!cmd) {
3121 		err = -ENOMEM;
3122 		goto done;
3123 	}
3124 
3125 	cmd->cmd_complete = addr_cmd_complete;
3126 
3127 	/* Continue with pairing via HCI */
3128 	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3129 		struct hci_cp_user_passkey_reply cp;
3130 
3131 		bacpy(&cp.bdaddr, &addr->bdaddr);
3132 		cp.passkey = passkey;
3133 		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3134 	} else
3135 		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3136 				   &addr->bdaddr);
3137 
3138 	if (err < 0)
3139 		mgmt_pending_remove(cmd);
3140 
3141 done:
3142 	hci_dev_unlock(hdev);
3143 	return err;
3144 }
3145 
3146 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3147 			      void *data, u16 len)
3148 {
3149 	struct mgmt_cp_pin_code_neg_reply *cp = data;
3150 
3151 	bt_dev_dbg(hdev, "sock %p", sk);
3152 
3153 	return user_pairing_resp(sk, hdev, &cp->addr,
3154 				MGMT_OP_PIN_CODE_NEG_REPLY,
3155 				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3156 }
3157 
3158 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3159 			      u16 len)
3160 {
3161 	struct mgmt_cp_user_confirm_reply *cp = data;
3162 
3163 	bt_dev_dbg(hdev, "sock %p", sk);
3164 
3165 	if (len != sizeof(*cp))
3166 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3167 				       MGMT_STATUS_INVALID_PARAMS);
3168 
3169 	return user_pairing_resp(sk, hdev, &cp->addr,
3170 				 MGMT_OP_USER_CONFIRM_REPLY,
3171 				 HCI_OP_USER_CONFIRM_REPLY, 0);
3172 }
3173 
3174 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3175 				  void *data, u16 len)
3176 {
3177 	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3178 
3179 	bt_dev_dbg(hdev, "sock %p", sk);
3180 
3181 	return user_pairing_resp(sk, hdev, &cp->addr,
3182 				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3183 				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3184 }
3185 
3186 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3187 			      u16 len)
3188 {
3189 	struct mgmt_cp_user_passkey_reply *cp = data;
3190 
3191 	bt_dev_dbg(hdev, "sock %p", sk);
3192 
3193 	return user_pairing_resp(sk, hdev, &cp->addr,
3194 				 MGMT_OP_USER_PASSKEY_REPLY,
3195 				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3196 }
3197 
3198 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3199 				  void *data, u16 len)
3200 {
3201 	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3202 
3203 	bt_dev_dbg(hdev, "sock %p", sk);
3204 
3205 	return user_pairing_resp(sk, hdev, &cp->addr,
3206 				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3207 				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3208 }
3209 
3210 static void adv_expire(struct hci_dev *hdev, u32 flags)
3211 {
3212 	struct adv_info *adv_instance;
3213 	struct hci_request req;
3214 	int err;
3215 
3216 	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3217 	if (!adv_instance)
3218 		return;
3219 
3220 	/* stop if current instance doesn't need to be changed */
3221 	if (!(adv_instance->flags & flags))
3222 		return;
3223 
3224 	cancel_adv_timeout(hdev);
3225 
3226 	adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3227 	if (!adv_instance)
3228 		return;
3229 
3230 	hci_req_init(&req, hdev);
3231 	err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
3232 					      true);
3233 	if (err)
3234 		return;
3235 
3236 	hci_req_run(&req, NULL);
3237 }
3238 
3239 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3240 {
3241 	struct mgmt_cp_set_local_name *cp;
3242 	struct mgmt_pending_cmd *cmd;
3243 
3244 	bt_dev_dbg(hdev, "status 0x%02x", status);
3245 
3246 	hci_dev_lock(hdev);
3247 
3248 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3249 	if (!cmd)
3250 		goto unlock;
3251 
3252 	cp = cmd->param;
3253 
3254 	if (status) {
3255 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3256 			        mgmt_status(status));
3257 	} else {
3258 		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3259 				  cp, sizeof(*cp));
3260 
3261 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3262 			adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3263 	}
3264 
3265 	mgmt_pending_remove(cmd);
3266 
3267 unlock:
3268 	hci_dev_unlock(hdev);
3269 }
3270 
3271 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3272 			  u16 len)
3273 {
3274 	struct mgmt_cp_set_local_name *cp = data;
3275 	struct mgmt_pending_cmd *cmd;
3276 	struct hci_request req;
3277 	int err;
3278 
3279 	bt_dev_dbg(hdev, "sock %p", sk);
3280 
3281 	hci_dev_lock(hdev);
3282 
3283 	/* If the old values are the same as the new ones just return a
3284 	 * direct command complete event.
3285 	 */
3286 	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3287 	    !memcmp(hdev->short_name, cp->short_name,
3288 		    sizeof(hdev->short_name))) {
3289 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3290 					data, len);
3291 		goto failed;
3292 	}
3293 
3294 	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3295 
3296 	if (!hdev_is_powered(hdev)) {
3297 		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3298 
3299 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3300 					data, len);
3301 		if (err < 0)
3302 			goto failed;
3303 
3304 		err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3305 					 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3306 		ext_info_changed(hdev, sk);
3307 
3308 		goto failed;
3309 	}
3310 
3311 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3312 	if (!cmd) {
3313 		err = -ENOMEM;
3314 		goto failed;
3315 	}
3316 
3317 	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3318 
3319 	hci_req_init(&req, hdev);
3320 
3321 	if (lmp_bredr_capable(hdev)) {
3322 		__hci_req_update_name(&req);
3323 		__hci_req_update_eir(&req);
3324 	}
3325 
3326 	/* The name is stored in the scan response data and so
3327 	 * no need to udpate the advertising data here.
3328 	 */
3329 	if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3330 		__hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
3331 
3332 	err = hci_req_run(&req, set_name_complete);
3333 	if (err < 0)
3334 		mgmt_pending_remove(cmd);
3335 
3336 failed:
3337 	hci_dev_unlock(hdev);
3338 	return err;
3339 }
3340 
3341 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3342 			  u16 len)
3343 {
3344 	struct mgmt_cp_set_appearance *cp = data;
3345 	u16 appearance;
3346 	int err;
3347 
3348 	bt_dev_dbg(hdev, "sock %p", sk);
3349 
3350 	if (!lmp_le_capable(hdev))
3351 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3352 				       MGMT_STATUS_NOT_SUPPORTED);
3353 
3354 	appearance = le16_to_cpu(cp->appearance);
3355 
3356 	hci_dev_lock(hdev);
3357 
3358 	if (hdev->appearance != appearance) {
3359 		hdev->appearance = appearance;
3360 
3361 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3362 			adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
3363 
3364 		ext_info_changed(hdev, sk);
3365 	}
3366 
3367 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3368 				0);
3369 
3370 	hci_dev_unlock(hdev);
3371 
3372 	return err;
3373 }
3374 
3375 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3376 				 void *data, u16 len)
3377 {
3378 	struct mgmt_rp_get_phy_confguration rp;
3379 
3380 	bt_dev_dbg(hdev, "sock %p", sk);
3381 
3382 	hci_dev_lock(hdev);
3383 
3384 	memset(&rp, 0, sizeof(rp));
3385 
3386 	rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3387 	rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3388 	rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3389 
3390 	hci_dev_unlock(hdev);
3391 
3392 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3393 				 &rp, sizeof(rp));
3394 }
3395 
3396 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3397 {
3398 	struct mgmt_ev_phy_configuration_changed ev;
3399 
3400 	memset(&ev, 0, sizeof(ev));
3401 
3402 	ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3403 
3404 	return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3405 			  sizeof(ev), skip);
3406 }
3407 
3408 static void set_default_phy_complete(struct hci_dev *hdev, u8 status,
3409 				     u16 opcode, struct sk_buff *skb)
3410 {
3411 	struct mgmt_pending_cmd *cmd;
3412 
3413 	bt_dev_dbg(hdev, "status 0x%02x", status);
3414 
3415 	hci_dev_lock(hdev);
3416 
3417 	cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev);
3418 	if (!cmd)
3419 		goto unlock;
3420 
3421 	if (status) {
3422 		mgmt_cmd_status(cmd->sk, hdev->id,
3423 				MGMT_OP_SET_PHY_CONFIGURATION,
3424 				mgmt_status(status));
3425 	} else {
3426 		mgmt_cmd_complete(cmd->sk, hdev->id,
3427 				  MGMT_OP_SET_PHY_CONFIGURATION, 0,
3428 				  NULL, 0);
3429 
3430 		mgmt_phy_configuration_changed(hdev, cmd->sk);
3431 	}
3432 
3433 	mgmt_pending_remove(cmd);
3434 
3435 unlock:
3436 	hci_dev_unlock(hdev);
3437 }
3438 
3439 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3440 				 void *data, u16 len)
3441 {
3442 	struct mgmt_cp_set_phy_confguration *cp = data;
3443 	struct hci_cp_le_set_default_phy cp_phy;
3444 	struct mgmt_pending_cmd *cmd;
3445 	struct hci_request req;
3446 	u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3447 	u16 pkt_type = (HCI_DH1 | HCI_DM1);
3448 	bool changed = false;
3449 	int err;
3450 
3451 	bt_dev_dbg(hdev, "sock %p", sk);
3452 
3453 	configurable_phys = get_configurable_phys(hdev);
3454 	supported_phys = get_supported_phys(hdev);
3455 	selected_phys = __le32_to_cpu(cp->selected_phys);
3456 
3457 	if (selected_phys & ~supported_phys)
3458 		return mgmt_cmd_status(sk, hdev->id,
3459 				       MGMT_OP_SET_PHY_CONFIGURATION,
3460 				       MGMT_STATUS_INVALID_PARAMS);
3461 
3462 	unconfigure_phys = supported_phys & ~configurable_phys;
3463 
3464 	if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3465 		return mgmt_cmd_status(sk, hdev->id,
3466 				       MGMT_OP_SET_PHY_CONFIGURATION,
3467 				       MGMT_STATUS_INVALID_PARAMS);
3468 
3469 	if (selected_phys == get_selected_phys(hdev))
3470 		return mgmt_cmd_complete(sk, hdev->id,
3471 					 MGMT_OP_SET_PHY_CONFIGURATION,
3472 					 0, NULL, 0);
3473 
3474 	hci_dev_lock(hdev);
3475 
3476 	if (!hdev_is_powered(hdev)) {
3477 		err = mgmt_cmd_status(sk, hdev->id,
3478 				      MGMT_OP_SET_PHY_CONFIGURATION,
3479 				      MGMT_STATUS_REJECTED);
3480 		goto unlock;
3481 	}
3482 
3483 	if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3484 		err = mgmt_cmd_status(sk, hdev->id,
3485 				      MGMT_OP_SET_PHY_CONFIGURATION,
3486 				      MGMT_STATUS_BUSY);
3487 		goto unlock;
3488 	}
3489 
3490 	if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3491 		pkt_type |= (HCI_DH3 | HCI_DM3);
3492 	else
3493 		pkt_type &= ~(HCI_DH3 | HCI_DM3);
3494 
3495 	if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3496 		pkt_type |= (HCI_DH5 | HCI_DM5);
3497 	else
3498 		pkt_type &= ~(HCI_DH5 | HCI_DM5);
3499 
3500 	if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3501 		pkt_type &= ~HCI_2DH1;
3502 	else
3503 		pkt_type |= HCI_2DH1;
3504 
3505 	if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3506 		pkt_type &= ~HCI_2DH3;
3507 	else
3508 		pkt_type |= HCI_2DH3;
3509 
3510 	if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3511 		pkt_type &= ~HCI_2DH5;
3512 	else
3513 		pkt_type |= HCI_2DH5;
3514 
3515 	if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3516 		pkt_type &= ~HCI_3DH1;
3517 	else
3518 		pkt_type |= HCI_3DH1;
3519 
3520 	if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3521 		pkt_type &= ~HCI_3DH3;
3522 	else
3523 		pkt_type |= HCI_3DH3;
3524 
3525 	if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3526 		pkt_type &= ~HCI_3DH5;
3527 	else
3528 		pkt_type |= HCI_3DH5;
3529 
3530 	if (pkt_type != hdev->pkt_type) {
3531 		hdev->pkt_type = pkt_type;
3532 		changed = true;
3533 	}
3534 
3535 	if ((selected_phys & MGMT_PHY_LE_MASK) ==
3536 	    (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3537 		if (changed)
3538 			mgmt_phy_configuration_changed(hdev, sk);
3539 
3540 		err = mgmt_cmd_complete(sk, hdev->id,
3541 					MGMT_OP_SET_PHY_CONFIGURATION,
3542 					0, NULL, 0);
3543 
3544 		goto unlock;
3545 	}
3546 
3547 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3548 			       len);
3549 	if (!cmd) {
3550 		err = -ENOMEM;
3551 		goto unlock;
3552 	}
3553 
3554 	hci_req_init(&req, hdev);
3555 
3556 	memset(&cp_phy, 0, sizeof(cp_phy));
3557 
3558 	if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3559 		cp_phy.all_phys |= 0x01;
3560 
3561 	if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3562 		cp_phy.all_phys |= 0x02;
3563 
3564 	if (selected_phys & MGMT_PHY_LE_1M_TX)
3565 		cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3566 
3567 	if (selected_phys & MGMT_PHY_LE_2M_TX)
3568 		cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3569 
3570 	if (selected_phys & MGMT_PHY_LE_CODED_TX)
3571 		cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3572 
3573 	if (selected_phys & MGMT_PHY_LE_1M_RX)
3574 		cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3575 
3576 	if (selected_phys & MGMT_PHY_LE_2M_RX)
3577 		cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3578 
3579 	if (selected_phys & MGMT_PHY_LE_CODED_RX)
3580 		cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3581 
3582 	hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy);
3583 
3584 	err = hci_req_run_skb(&req, set_default_phy_complete);
3585 	if (err < 0)
3586 		mgmt_pending_remove(cmd);
3587 
3588 unlock:
3589 	hci_dev_unlock(hdev);
3590 
3591 	return err;
3592 }
3593 
3594 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
3595 			    u16 len)
3596 {
3597 	int err = MGMT_STATUS_SUCCESS;
3598 	struct mgmt_cp_set_blocked_keys *keys = data;
3599 	const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
3600 				   sizeof(struct mgmt_blocked_key_info));
3601 	u16 key_count, expected_len;
3602 	int i;
3603 
3604 	bt_dev_dbg(hdev, "sock %p", sk);
3605 
3606 	key_count = __le16_to_cpu(keys->key_count);
3607 	if (key_count > max_key_count) {
3608 		bt_dev_err(hdev, "too big key_count value %u", key_count);
3609 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3610 				       MGMT_STATUS_INVALID_PARAMS);
3611 	}
3612 
3613 	expected_len = struct_size(keys, keys, key_count);
3614 	if (expected_len != len) {
3615 		bt_dev_err(hdev, "expected %u bytes, got %u bytes",
3616 			   expected_len, len);
3617 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3618 				       MGMT_STATUS_INVALID_PARAMS);
3619 	}
3620 
3621 	hci_dev_lock(hdev);
3622 
3623 	hci_blocked_keys_clear(hdev);
3624 
3625 	for (i = 0; i < keys->key_count; ++i) {
3626 		struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
3627 
3628 		if (!b) {
3629 			err = MGMT_STATUS_NO_RESOURCES;
3630 			break;
3631 		}
3632 
3633 		b->type = keys->keys[i].type;
3634 		memcpy(b->val, keys->keys[i].val, sizeof(b->val));
3635 		list_add_rcu(&b->list, &hdev->blocked_keys);
3636 	}
3637 	hci_dev_unlock(hdev);
3638 
3639 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3640 				err, NULL, 0);
3641 }
3642 
3643 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
3644 			       void *data, u16 len)
3645 {
3646 	struct mgmt_mode *cp = data;
3647 	int err;
3648 	bool changed = false;
3649 
3650 	bt_dev_dbg(hdev, "sock %p", sk);
3651 
3652 	if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
3653 		return mgmt_cmd_status(sk, hdev->id,
3654 				       MGMT_OP_SET_WIDEBAND_SPEECH,
3655 				       MGMT_STATUS_NOT_SUPPORTED);
3656 
3657 	if (cp->val != 0x00 && cp->val != 0x01)
3658 		return mgmt_cmd_status(sk, hdev->id,
3659 				       MGMT_OP_SET_WIDEBAND_SPEECH,
3660 				       MGMT_STATUS_INVALID_PARAMS);
3661 
3662 	hci_dev_lock(hdev);
3663 
3664 	if (pending_find(MGMT_OP_SET_WIDEBAND_SPEECH, hdev)) {
3665 		err = mgmt_cmd_status(sk, hdev->id,
3666 				      MGMT_OP_SET_WIDEBAND_SPEECH,
3667 				      MGMT_STATUS_BUSY);
3668 		goto unlock;
3669 	}
3670 
3671 	if (hdev_is_powered(hdev) &&
3672 	    !!cp->val != hci_dev_test_flag(hdev,
3673 					   HCI_WIDEBAND_SPEECH_ENABLED)) {
3674 		err = mgmt_cmd_status(sk, hdev->id,
3675 				      MGMT_OP_SET_WIDEBAND_SPEECH,
3676 				      MGMT_STATUS_REJECTED);
3677 		goto unlock;
3678 	}
3679 
3680 	if (cp->val)
3681 		changed = !hci_dev_test_and_set_flag(hdev,
3682 						   HCI_WIDEBAND_SPEECH_ENABLED);
3683 	else
3684 		changed = hci_dev_test_and_clear_flag(hdev,
3685 						   HCI_WIDEBAND_SPEECH_ENABLED);
3686 
3687 	err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
3688 	if (err < 0)
3689 		goto unlock;
3690 
3691 	if (changed)
3692 		err = new_settings(hdev, sk);
3693 
3694 unlock:
3695 	hci_dev_unlock(hdev);
3696 	return err;
3697 }
3698 
3699 static int read_security_info(struct sock *sk, struct hci_dev *hdev,
3700 			      void *data, u16 data_len)
3701 {
3702 	char buf[16];
3703 	struct mgmt_rp_read_security_info *rp = (void *)buf;
3704 	u16 sec_len = 0;
3705 	u8 flags = 0;
3706 
3707 	bt_dev_dbg(hdev, "sock %p", sk);
3708 
3709 	memset(&buf, 0, sizeof(buf));
3710 
3711 	hci_dev_lock(hdev);
3712 
3713 	/* When the Read Simple Pairing Options command is supported, then
3714 	 * the remote public key validation is supported.
3715 	 */
3716 	if (hdev->commands[41] & 0x08)
3717 		flags |= 0x01;	/* Remote public key validation (BR/EDR) */
3718 
3719 	flags |= 0x02;		/* Remote public key validation (LE) */
3720 
3721 	/* When the Read Encryption Key Size command is supported, then the
3722 	 * encryption key size is enforced.
3723 	 */
3724 	if (hdev->commands[20] & 0x10)
3725 		flags |= 0x04;	/* Encryption key size enforcement (BR/EDR) */
3726 
3727 	flags |= 0x08;		/* Encryption key size enforcement (LE) */
3728 
3729 	sec_len = eir_append_data(rp->sec, sec_len, 0x01, &flags, 1);
3730 
3731 	/* When the Read Simple Pairing Options command is supported, then
3732 	 * also max encryption key size information is provided.
3733 	 */
3734 	if (hdev->commands[41] & 0x08)
3735 		sec_len = eir_append_le16(rp->sec, sec_len, 0x02,
3736 					  hdev->max_enc_key_size);
3737 
3738 	sec_len = eir_append_le16(rp->sec, sec_len, 0x03, SMP_MAX_ENC_KEY_SIZE);
3739 
3740 	rp->sec_len = cpu_to_le16(sec_len);
3741 
3742 	hci_dev_unlock(hdev);
3743 
3744 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_SECURITY_INFO, 0,
3745 				 rp, sizeof(*rp) + sec_len);
3746 }
3747 
3748 #ifdef CONFIG_BT_FEATURE_DEBUG
3749 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
3750 static const u8 debug_uuid[16] = {
3751 	0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
3752 	0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
3753 };
3754 #endif
3755 
3756 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
3757 static const u8 simult_central_periph_uuid[16] = {
3758 	0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
3759 	0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
3760 };
3761 
3762 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
3763 				  void *data, u16 data_len)
3764 {
3765 	char buf[44];
3766 	struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
3767 	u16 idx = 0;
3768 	u32 flags;
3769 
3770 	bt_dev_dbg(hdev, "sock %p", sk);
3771 
3772 	memset(&buf, 0, sizeof(buf));
3773 
3774 #ifdef CONFIG_BT_FEATURE_DEBUG
3775 	if (!hdev) {
3776 		flags = bt_dbg_get() ? BIT(0) : 0;
3777 
3778 		memcpy(rp->features[idx].uuid, debug_uuid, 16);
3779 		rp->features[idx].flags = cpu_to_le32(flags);
3780 		idx++;
3781 	}
3782 #endif
3783 
3784 	if (hdev) {
3785 		if (test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) &&
3786 		    (hdev->le_states[4] & 0x08) &&	/* Central */
3787 		    (hdev->le_states[4] & 0x40) &&	/* Peripheral */
3788 		    (hdev->le_states[3] & 0x10))	/* Simultaneous */
3789 			flags = BIT(0);
3790 		else
3791 			flags = 0;
3792 
3793 		memcpy(rp->features[idx].uuid, simult_central_periph_uuid, 16);
3794 		rp->features[idx].flags = cpu_to_le32(flags);
3795 		idx++;
3796 	}
3797 
3798 	rp->feature_count = cpu_to_le16(idx);
3799 
3800 	/* After reading the experimental features information, enable
3801 	 * the events to update client on any future change.
3802 	 */
3803 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3804 
3805 	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3806 				 MGMT_OP_READ_EXP_FEATURES_INFO,
3807 				 0, rp, sizeof(*rp) + (20 * idx));
3808 }
3809 
3810 #ifdef CONFIG_BT_FEATURE_DEBUG
3811 static int exp_debug_feature_changed(bool enabled, struct sock *skip)
3812 {
3813 	struct mgmt_ev_exp_feature_changed ev;
3814 
3815 	memset(&ev, 0, sizeof(ev));
3816 	memcpy(ev.uuid, debug_uuid, 16);
3817 	ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
3818 
3819 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL,
3820 				  &ev, sizeof(ev),
3821 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3822 }
3823 #endif
3824 
3825 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
3826 			   void *data, u16 data_len)
3827 {
3828 	struct mgmt_cp_set_exp_feature *cp = data;
3829 	struct mgmt_rp_set_exp_feature rp;
3830 
3831 	bt_dev_dbg(hdev, "sock %p", sk);
3832 
3833 	if (!memcmp(cp->uuid, ZERO_KEY, 16)) {
3834 		memset(rp.uuid, 0, 16);
3835 		rp.flags = cpu_to_le32(0);
3836 
3837 #ifdef CONFIG_BT_FEATURE_DEBUG
3838 		if (!hdev) {
3839 			bool changed = bt_dbg_get();
3840 
3841 			bt_dbg_set(false);
3842 
3843 			if (changed)
3844 				exp_debug_feature_changed(false, sk);
3845 		}
3846 #endif
3847 
3848 		hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3849 
3850 		return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3851 					 MGMT_OP_SET_EXP_FEATURE, 0,
3852 					 &rp, sizeof(rp));
3853 	}
3854 
3855 #ifdef CONFIG_BT_FEATURE_DEBUG
3856 	if (!memcmp(cp->uuid, debug_uuid, 16)) {
3857 		bool val, changed;
3858 		int err;
3859 
3860 		/* Command requires to use the non-controller index */
3861 		if (hdev)
3862 			return mgmt_cmd_status(sk, hdev->id,
3863 					       MGMT_OP_SET_EXP_FEATURE,
3864 					       MGMT_STATUS_INVALID_INDEX);
3865 
3866 		/* Parameters are limited to a single octet */
3867 		if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
3868 			return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3869 					       MGMT_OP_SET_EXP_FEATURE,
3870 					       MGMT_STATUS_INVALID_PARAMS);
3871 
3872 		/* Only boolean on/off is supported */
3873 		if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
3874 			return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
3875 					       MGMT_OP_SET_EXP_FEATURE,
3876 					       MGMT_STATUS_INVALID_PARAMS);
3877 
3878 		val = !!cp->param[0];
3879 		changed = val ? !bt_dbg_get() : bt_dbg_get();
3880 		bt_dbg_set(val);
3881 
3882 		memcpy(rp.uuid, debug_uuid, 16);
3883 		rp.flags = cpu_to_le32(val ? BIT(0) : 0);
3884 
3885 		hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3886 
3887 		err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
3888 					MGMT_OP_SET_EXP_FEATURE, 0,
3889 					&rp, sizeof(rp));
3890 
3891 		if (changed)
3892 			exp_debug_feature_changed(val, sk);
3893 
3894 		return err;
3895 	}
3896 #endif
3897 
3898 	return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3899 			       MGMT_OP_SET_EXP_FEATURE,
3900 			       MGMT_STATUS_NOT_SUPPORTED);
3901 }
3902 
3903 #define SUPPORTED_DEVICE_FLAGS() ((1U << HCI_CONN_FLAG_MAX) - 1)
3904 
3905 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
3906 			    u16 data_len)
3907 {
3908 	struct mgmt_cp_get_device_flags *cp = data;
3909 	struct mgmt_rp_get_device_flags rp;
3910 	struct bdaddr_list_with_flags *br_params;
3911 	struct hci_conn_params *params;
3912 	u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
3913 	u32 current_flags = 0;
3914 	u8 status = MGMT_STATUS_INVALID_PARAMS;
3915 
3916 	bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
3917 		   &cp->addr.bdaddr, cp->addr.type);
3918 
3919 	hci_dev_lock(hdev);
3920 
3921 	if (cp->addr.type == BDADDR_BREDR) {
3922 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->whitelist,
3923 							      &cp->addr.bdaddr,
3924 							      cp->addr.type);
3925 		if (!br_params)
3926 			goto done;
3927 
3928 		current_flags = br_params->current_flags;
3929 	} else {
3930 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
3931 						le_addr_type(cp->addr.type));
3932 
3933 		if (!params)
3934 			goto done;
3935 
3936 		current_flags = params->current_flags;
3937 	}
3938 
3939 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3940 	rp.addr.type = cp->addr.type;
3941 	rp.supported_flags = cpu_to_le32(supported_flags);
3942 	rp.current_flags = cpu_to_le32(current_flags);
3943 
3944 	status = MGMT_STATUS_SUCCESS;
3945 
3946 done:
3947 	hci_dev_unlock(hdev);
3948 
3949 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
3950 				&rp, sizeof(rp));
3951 }
3952 
3953 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
3954 				 bdaddr_t *bdaddr, u8 bdaddr_type,
3955 				 u32 supported_flags, u32 current_flags)
3956 {
3957 	struct mgmt_ev_device_flags_changed ev;
3958 
3959 	bacpy(&ev.addr.bdaddr, bdaddr);
3960 	ev.addr.type = bdaddr_type;
3961 	ev.supported_flags = cpu_to_le32(supported_flags);
3962 	ev.current_flags = cpu_to_le32(current_flags);
3963 
3964 	mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
3965 }
3966 
3967 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
3968 			    u16 len)
3969 {
3970 	struct mgmt_cp_set_device_flags *cp = data;
3971 	struct bdaddr_list_with_flags *br_params;
3972 	struct hci_conn_params *params;
3973 	u8 status = MGMT_STATUS_INVALID_PARAMS;
3974 	u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
3975 	u32 current_flags = __le32_to_cpu(cp->current_flags);
3976 
3977 	bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
3978 		   &cp->addr.bdaddr, cp->addr.type,
3979 		   __le32_to_cpu(current_flags));
3980 
3981 	if ((supported_flags | current_flags) != supported_flags) {
3982 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
3983 			    current_flags, supported_flags);
3984 		goto done;
3985 	}
3986 
3987 	hci_dev_lock(hdev);
3988 
3989 	if (cp->addr.type == BDADDR_BREDR) {
3990 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->whitelist,
3991 							      &cp->addr.bdaddr,
3992 							      cp->addr.type);
3993 
3994 		if (br_params) {
3995 			br_params->current_flags = current_flags;
3996 			status = MGMT_STATUS_SUCCESS;
3997 		} else {
3998 			bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
3999 				    &cp->addr.bdaddr, cp->addr.type);
4000 		}
4001 	} else {
4002 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4003 						le_addr_type(cp->addr.type));
4004 		if (params) {
4005 			params->current_flags = current_flags;
4006 			status = MGMT_STATUS_SUCCESS;
4007 		} else {
4008 			bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
4009 				    &cp->addr.bdaddr,
4010 				    le_addr_type(cp->addr.type));
4011 		}
4012 	}
4013 
4014 done:
4015 	hci_dev_unlock(hdev);
4016 
4017 	if (status == MGMT_STATUS_SUCCESS)
4018 		device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
4019 				     supported_flags, current_flags);
4020 
4021 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
4022 				 &cp->addr, sizeof(cp->addr));
4023 }
4024 
4025 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
4026 				   u16 handle)
4027 {
4028 	struct mgmt_ev_adv_monitor_added ev;
4029 
4030 	ev.monitor_handle = cpu_to_le16(handle);
4031 
4032 	mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
4033 }
4034 
4035 static void mgmt_adv_monitor_removed(struct sock *sk, struct hci_dev *hdev,
4036 				     u16 handle)
4037 {
4038 	struct mgmt_ev_adv_monitor_added ev;
4039 
4040 	ev.monitor_handle = cpu_to_le16(handle);
4041 
4042 	mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk);
4043 }
4044 
4045 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
4046 				 void *data, u16 len)
4047 {
4048 	struct adv_monitor *monitor = NULL;
4049 	struct mgmt_rp_read_adv_monitor_features *rp = NULL;
4050 	int handle;
4051 	size_t rp_size = 0;
4052 	__u32 supported = 0;
4053 	__u16 num_handles = 0;
4054 	__u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
4055 
4056 	BT_DBG("request for %s", hdev->name);
4057 
4058 	hci_dev_lock(hdev);
4059 
4060 	if (msft_get_features(hdev) & MSFT_FEATURE_MASK_LE_ADV_MONITOR)
4061 		supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
4062 
4063 	idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle) {
4064 		handles[num_handles++] = monitor->handle;
4065 	}
4066 
4067 	hci_dev_unlock(hdev);
4068 
4069 	rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
4070 	rp = kmalloc(rp_size, GFP_KERNEL);
4071 	if (!rp)
4072 		return -ENOMEM;
4073 
4074 	/* Once controller-based monitoring is in place, the enabled_features
4075 	 * should reflect the use.
4076 	 */
4077 	rp->supported_features = cpu_to_le32(supported);
4078 	rp->enabled_features = 0;
4079 	rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
4080 	rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
4081 	rp->num_handles = cpu_to_le16(num_handles);
4082 	if (num_handles)
4083 		memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
4084 
4085 	return mgmt_cmd_complete(sk, hdev->id,
4086 				 MGMT_OP_READ_ADV_MONITOR_FEATURES,
4087 				 MGMT_STATUS_SUCCESS, rp, rp_size);
4088 }
4089 
4090 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4091 				    void *data, u16 len)
4092 {
4093 	struct mgmt_cp_add_adv_patterns_monitor *cp = data;
4094 	struct mgmt_rp_add_adv_patterns_monitor rp;
4095 	struct adv_monitor *m = NULL;
4096 	struct adv_pattern *p = NULL;
4097 	unsigned int mp_cnt = 0, prev_adv_monitors_cnt;
4098 	__u8 cp_ofst = 0, cp_len = 0;
4099 	int err, i;
4100 
4101 	BT_DBG("request for %s", hdev->name);
4102 
4103 	if (len <= sizeof(*cp) || cp->pattern_count == 0) {
4104 		err = mgmt_cmd_status(sk, hdev->id,
4105 				      MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4106 				      MGMT_STATUS_INVALID_PARAMS);
4107 		goto failed;
4108 	}
4109 
4110 	m = kmalloc(sizeof(*m), GFP_KERNEL);
4111 	if (!m) {
4112 		err = -ENOMEM;
4113 		goto failed;
4114 	}
4115 
4116 	INIT_LIST_HEAD(&m->patterns);
4117 	m->active = false;
4118 
4119 	for (i = 0; i < cp->pattern_count; i++) {
4120 		if (++mp_cnt > HCI_MAX_ADV_MONITOR_NUM_PATTERNS) {
4121 			err = mgmt_cmd_status(sk, hdev->id,
4122 					      MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4123 					      MGMT_STATUS_INVALID_PARAMS);
4124 			goto failed;
4125 		}
4126 
4127 		cp_ofst = cp->patterns[i].offset;
4128 		cp_len = cp->patterns[i].length;
4129 		if (cp_ofst >= HCI_MAX_AD_LENGTH ||
4130 		    cp_len > HCI_MAX_AD_LENGTH ||
4131 		    (cp_ofst + cp_len) > HCI_MAX_AD_LENGTH) {
4132 			err = mgmt_cmd_status(sk, hdev->id,
4133 					      MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4134 					      MGMT_STATUS_INVALID_PARAMS);
4135 			goto failed;
4136 		}
4137 
4138 		p = kmalloc(sizeof(*p), GFP_KERNEL);
4139 		if (!p) {
4140 			err = -ENOMEM;
4141 			goto failed;
4142 		}
4143 
4144 		p->ad_type = cp->patterns[i].ad_type;
4145 		p->offset = cp->patterns[i].offset;
4146 		p->length = cp->patterns[i].length;
4147 		memcpy(p->value, cp->patterns[i].value, p->length);
4148 
4149 		INIT_LIST_HEAD(&p->list);
4150 		list_add(&p->list, &m->patterns);
4151 	}
4152 
4153 	if (mp_cnt != cp->pattern_count) {
4154 		err = mgmt_cmd_status(sk, hdev->id,
4155 				      MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4156 				      MGMT_STATUS_INVALID_PARAMS);
4157 		goto failed;
4158 	}
4159 
4160 	hci_dev_lock(hdev);
4161 
4162 	prev_adv_monitors_cnt = hdev->adv_monitors_cnt;
4163 
4164 	err = hci_add_adv_monitor(hdev, m);
4165 	if (err) {
4166 		if (err == -ENOSPC) {
4167 			mgmt_cmd_status(sk, hdev->id,
4168 					MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4169 					MGMT_STATUS_NO_RESOURCES);
4170 		}
4171 		goto unlock;
4172 	}
4173 
4174 	if (hdev->adv_monitors_cnt > prev_adv_monitors_cnt)
4175 		mgmt_adv_monitor_added(sk, hdev, m->handle);
4176 
4177 	hci_dev_unlock(hdev);
4178 
4179 	rp.monitor_handle = cpu_to_le16(m->handle);
4180 
4181 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
4182 				 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
4183 
4184 unlock:
4185 	hci_dev_unlock(hdev);
4186 
4187 failed:
4188 	hci_free_adv_monitor(m);
4189 	return err;
4190 }
4191 
4192 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
4193 			      void *data, u16 len)
4194 {
4195 	struct mgmt_cp_remove_adv_monitor *cp = data;
4196 	struct mgmt_rp_remove_adv_monitor rp;
4197 	unsigned int prev_adv_monitors_cnt;
4198 	u16 handle;
4199 	int err;
4200 
4201 	BT_DBG("request for %s", hdev->name);
4202 
4203 	hci_dev_lock(hdev);
4204 
4205 	handle = __le16_to_cpu(cp->monitor_handle);
4206 	prev_adv_monitors_cnt = hdev->adv_monitors_cnt;
4207 
4208 	err = hci_remove_adv_monitor(hdev, handle);
4209 	if (err == -ENOENT) {
4210 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4211 				      MGMT_STATUS_INVALID_INDEX);
4212 		goto unlock;
4213 	}
4214 
4215 	if (hdev->adv_monitors_cnt < prev_adv_monitors_cnt)
4216 		mgmt_adv_monitor_removed(sk, hdev, handle);
4217 
4218 	hci_dev_unlock(hdev);
4219 
4220 	rp.monitor_handle = cp->monitor_handle;
4221 
4222 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4223 				 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
4224 
4225 unlock:
4226 	hci_dev_unlock(hdev);
4227 	return err;
4228 }
4229 
4230 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
4231 				         u16 opcode, struct sk_buff *skb)
4232 {
4233 	struct mgmt_rp_read_local_oob_data mgmt_rp;
4234 	size_t rp_size = sizeof(mgmt_rp);
4235 	struct mgmt_pending_cmd *cmd;
4236 
4237 	bt_dev_dbg(hdev, "status %u", status);
4238 
4239 	cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
4240 	if (!cmd)
4241 		return;
4242 
4243 	if (status || !skb) {
4244 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4245 				status ? mgmt_status(status) : MGMT_STATUS_FAILED);
4246 		goto remove;
4247 	}
4248 
4249 	memset(&mgmt_rp, 0, sizeof(mgmt_rp));
4250 
4251 	if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
4252 		struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
4253 
4254 		if (skb->len < sizeof(*rp)) {
4255 			mgmt_cmd_status(cmd->sk, hdev->id,
4256 					MGMT_OP_READ_LOCAL_OOB_DATA,
4257 					MGMT_STATUS_FAILED);
4258 			goto remove;
4259 		}
4260 
4261 		memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
4262 		memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
4263 
4264 		rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
4265 	} else {
4266 		struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
4267 
4268 		if (skb->len < sizeof(*rp)) {
4269 			mgmt_cmd_status(cmd->sk, hdev->id,
4270 					MGMT_OP_READ_LOCAL_OOB_DATA,
4271 					MGMT_STATUS_FAILED);
4272 			goto remove;
4273 		}
4274 
4275 		memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
4276 		memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
4277 
4278 		memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
4279 		memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
4280 	}
4281 
4282 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4283 			  MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
4284 
4285 remove:
4286 	mgmt_pending_remove(cmd);
4287 }
4288 
4289 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
4290 			       void *data, u16 data_len)
4291 {
4292 	struct mgmt_pending_cmd *cmd;
4293 	struct hci_request req;
4294 	int err;
4295 
4296 	bt_dev_dbg(hdev, "sock %p", sk);
4297 
4298 	hci_dev_lock(hdev);
4299 
4300 	if (!hdev_is_powered(hdev)) {
4301 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4302 				      MGMT_STATUS_NOT_POWERED);
4303 		goto unlock;
4304 	}
4305 
4306 	if (!lmp_ssp_capable(hdev)) {
4307 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4308 				      MGMT_STATUS_NOT_SUPPORTED);
4309 		goto unlock;
4310 	}
4311 
4312 	if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
4313 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4314 				      MGMT_STATUS_BUSY);
4315 		goto unlock;
4316 	}
4317 
4318 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
4319 	if (!cmd) {
4320 		err = -ENOMEM;
4321 		goto unlock;
4322 	}
4323 
4324 	hci_req_init(&req, hdev);
4325 
4326 	if (bredr_sc_enabled(hdev))
4327 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
4328 	else
4329 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
4330 
4331 	err = hci_req_run_skb(&req, read_local_oob_data_complete);
4332 	if (err < 0)
4333 		mgmt_pending_remove(cmd);
4334 
4335 unlock:
4336 	hci_dev_unlock(hdev);
4337 	return err;
4338 }
4339 
4340 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4341 			       void *data, u16 len)
4342 {
4343 	struct mgmt_addr_info *addr = data;
4344 	int err;
4345 
4346 	bt_dev_dbg(hdev, "sock %p", sk);
4347 
4348 	if (!bdaddr_type_is_valid(addr->type))
4349 		return mgmt_cmd_complete(sk, hdev->id,
4350 					 MGMT_OP_ADD_REMOTE_OOB_DATA,
4351 					 MGMT_STATUS_INVALID_PARAMS,
4352 					 addr, sizeof(*addr));
4353 
4354 	hci_dev_lock(hdev);
4355 
4356 	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
4357 		struct mgmt_cp_add_remote_oob_data *cp = data;
4358 		u8 status;
4359 
4360 		if (cp->addr.type != BDADDR_BREDR) {
4361 			err = mgmt_cmd_complete(sk, hdev->id,
4362 						MGMT_OP_ADD_REMOTE_OOB_DATA,
4363 						MGMT_STATUS_INVALID_PARAMS,
4364 						&cp->addr, sizeof(cp->addr));
4365 			goto unlock;
4366 		}
4367 
4368 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4369 					      cp->addr.type, cp->hash,
4370 					      cp->rand, NULL, NULL);
4371 		if (err < 0)
4372 			status = MGMT_STATUS_FAILED;
4373 		else
4374 			status = MGMT_STATUS_SUCCESS;
4375 
4376 		err = mgmt_cmd_complete(sk, hdev->id,
4377 					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
4378 					&cp->addr, sizeof(cp->addr));
4379 	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
4380 		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
4381 		u8 *rand192, *hash192, *rand256, *hash256;
4382 		u8 status;
4383 
4384 		if (bdaddr_type_is_le(cp->addr.type)) {
4385 			/* Enforce zero-valued 192-bit parameters as
4386 			 * long as legacy SMP OOB isn't implemented.
4387 			 */
4388 			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
4389 			    memcmp(cp->hash192, ZERO_KEY, 16)) {
4390 				err = mgmt_cmd_complete(sk, hdev->id,
4391 							MGMT_OP_ADD_REMOTE_OOB_DATA,
4392 							MGMT_STATUS_INVALID_PARAMS,
4393 							addr, sizeof(*addr));
4394 				goto unlock;
4395 			}
4396 
4397 			rand192 = NULL;
4398 			hash192 = NULL;
4399 		} else {
4400 			/* In case one of the P-192 values is set to zero,
4401 			 * then just disable OOB data for P-192.
4402 			 */
4403 			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
4404 			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
4405 				rand192 = NULL;
4406 				hash192 = NULL;
4407 			} else {
4408 				rand192 = cp->rand192;
4409 				hash192 = cp->hash192;
4410 			}
4411 		}
4412 
4413 		/* In case one of the P-256 values is set to zero, then just
4414 		 * disable OOB data for P-256.
4415 		 */
4416 		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
4417 		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
4418 			rand256 = NULL;
4419 			hash256 = NULL;
4420 		} else {
4421 			rand256 = cp->rand256;
4422 			hash256 = cp->hash256;
4423 		}
4424 
4425 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4426 					      cp->addr.type, hash192, rand192,
4427 					      hash256, rand256);
4428 		if (err < 0)
4429 			status = MGMT_STATUS_FAILED;
4430 		else
4431 			status = MGMT_STATUS_SUCCESS;
4432 
4433 		err = mgmt_cmd_complete(sk, hdev->id,
4434 					MGMT_OP_ADD_REMOTE_OOB_DATA,
4435 					status, &cp->addr, sizeof(cp->addr));
4436 	} else {
4437 		bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
4438 			   len);
4439 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
4440 				      MGMT_STATUS_INVALID_PARAMS);
4441 	}
4442 
4443 unlock:
4444 	hci_dev_unlock(hdev);
4445 	return err;
4446 }
4447 
4448 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4449 				  void *data, u16 len)
4450 {
4451 	struct mgmt_cp_remove_remote_oob_data *cp = data;
4452 	u8 status;
4453 	int err;
4454 
4455 	bt_dev_dbg(hdev, "sock %p", sk);
4456 
4457 	if (cp->addr.type != BDADDR_BREDR)
4458 		return mgmt_cmd_complete(sk, hdev->id,
4459 					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4460 					 MGMT_STATUS_INVALID_PARAMS,
4461 					 &cp->addr, sizeof(cp->addr));
4462 
4463 	hci_dev_lock(hdev);
4464 
4465 	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
4466 		hci_remote_oob_data_clear(hdev);
4467 		status = MGMT_STATUS_SUCCESS;
4468 		goto done;
4469 	}
4470 
4471 	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
4472 	if (err < 0)
4473 		status = MGMT_STATUS_INVALID_PARAMS;
4474 	else
4475 		status = MGMT_STATUS_SUCCESS;
4476 
4477 done:
4478 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4479 				status, &cp->addr, sizeof(cp->addr));
4480 
4481 	hci_dev_unlock(hdev);
4482 	return err;
4483 }
4484 
4485 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
4486 {
4487 	struct mgmt_pending_cmd *cmd;
4488 
4489 	bt_dev_dbg(hdev, "status %d", status);
4490 
4491 	hci_dev_lock(hdev);
4492 
4493 	cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
4494 	if (!cmd)
4495 		cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
4496 
4497 	if (!cmd)
4498 		cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
4499 
4500 	if (cmd) {
4501 		cmd->cmd_complete(cmd, mgmt_status(status));
4502 		mgmt_pending_remove(cmd);
4503 	}
4504 
4505 	hci_dev_unlock(hdev);
4506 
4507 	/* Handle suspend notifier */
4508 	if (test_and_clear_bit(SUSPEND_UNPAUSE_DISCOVERY,
4509 			       hdev->suspend_tasks)) {
4510 		bt_dev_dbg(hdev, "Unpaused discovery");
4511 		wake_up(&hdev->suspend_wait_q);
4512 	}
4513 }
4514 
4515 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
4516 				    uint8_t *mgmt_status)
4517 {
4518 	switch (type) {
4519 	case DISCOV_TYPE_LE:
4520 		*mgmt_status = mgmt_le_support(hdev);
4521 		if (*mgmt_status)
4522 			return false;
4523 		break;
4524 	case DISCOV_TYPE_INTERLEAVED:
4525 		*mgmt_status = mgmt_le_support(hdev);
4526 		if (*mgmt_status)
4527 			return false;
4528 		/* Intentional fall-through */
4529 	case DISCOV_TYPE_BREDR:
4530 		*mgmt_status = mgmt_bredr_support(hdev);
4531 		if (*mgmt_status)
4532 			return false;
4533 		break;
4534 	default:
4535 		*mgmt_status = MGMT_STATUS_INVALID_PARAMS;
4536 		return false;
4537 	}
4538 
4539 	return true;
4540 }
4541 
4542 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
4543 				    u16 op, void *data, u16 len)
4544 {
4545 	struct mgmt_cp_start_discovery *cp = data;
4546 	struct mgmt_pending_cmd *cmd;
4547 	u8 status;
4548 	int err;
4549 
4550 	bt_dev_dbg(hdev, "sock %p", sk);
4551 
4552 	hci_dev_lock(hdev);
4553 
4554 	if (!hdev_is_powered(hdev)) {
4555 		err = mgmt_cmd_complete(sk, hdev->id, op,
4556 					MGMT_STATUS_NOT_POWERED,
4557 					&cp->type, sizeof(cp->type));
4558 		goto failed;
4559 	}
4560 
4561 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
4562 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4563 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4564 					&cp->type, sizeof(cp->type));
4565 		goto failed;
4566 	}
4567 
4568 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4569 		err = mgmt_cmd_complete(sk, hdev->id, op, status,
4570 					&cp->type, sizeof(cp->type));
4571 		goto failed;
4572 	}
4573 
4574 	/* Can't start discovery when it is paused */
4575 	if (hdev->discovery_paused) {
4576 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4577 					&cp->type, sizeof(cp->type));
4578 		goto failed;
4579 	}
4580 
4581 	/* Clear the discovery filter first to free any previously
4582 	 * allocated memory for the UUID list.
4583 	 */
4584 	hci_discovery_filter_clear(hdev);
4585 
4586 	hdev->discovery.type = cp->type;
4587 	hdev->discovery.report_invalid_rssi = false;
4588 	if (op == MGMT_OP_START_LIMITED_DISCOVERY)
4589 		hdev->discovery.limited = true;
4590 	else
4591 		hdev->discovery.limited = false;
4592 
4593 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
4594 	if (!cmd) {
4595 		err = -ENOMEM;
4596 		goto failed;
4597 	}
4598 
4599 	cmd->cmd_complete = generic_cmd_complete;
4600 
4601 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4602 	queue_work(hdev->req_workqueue, &hdev->discov_update);
4603 	err = 0;
4604 
4605 failed:
4606 	hci_dev_unlock(hdev);
4607 	return err;
4608 }
4609 
4610 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4611 			   void *data, u16 len)
4612 {
4613 	return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
4614 					data, len);
4615 }
4616 
4617 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
4618 				   void *data, u16 len)
4619 {
4620 	return start_discovery_internal(sk, hdev,
4621 					MGMT_OP_START_LIMITED_DISCOVERY,
4622 					data, len);
4623 }
4624 
4625 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
4626 					  u8 status)
4627 {
4628 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
4629 				 cmd->param, 1);
4630 }
4631 
4632 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4633 				   void *data, u16 len)
4634 {
4635 	struct mgmt_cp_start_service_discovery *cp = data;
4636 	struct mgmt_pending_cmd *cmd;
4637 	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
4638 	u16 uuid_count, expected_len;
4639 	u8 status;
4640 	int err;
4641 
4642 	bt_dev_dbg(hdev, "sock %p", sk);
4643 
4644 	hci_dev_lock(hdev);
4645 
4646 	if (!hdev_is_powered(hdev)) {
4647 		err = mgmt_cmd_complete(sk, hdev->id,
4648 					MGMT_OP_START_SERVICE_DISCOVERY,
4649 					MGMT_STATUS_NOT_POWERED,
4650 					&cp->type, sizeof(cp->type));
4651 		goto failed;
4652 	}
4653 
4654 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
4655 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4656 		err = mgmt_cmd_complete(sk, hdev->id,
4657 					MGMT_OP_START_SERVICE_DISCOVERY,
4658 					MGMT_STATUS_BUSY, &cp->type,
4659 					sizeof(cp->type));
4660 		goto failed;
4661 	}
4662 
4663 	uuid_count = __le16_to_cpu(cp->uuid_count);
4664 	if (uuid_count > max_uuid_count) {
4665 		bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
4666 			   uuid_count);
4667 		err = mgmt_cmd_complete(sk, hdev->id,
4668 					MGMT_OP_START_SERVICE_DISCOVERY,
4669 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
4670 					sizeof(cp->type));
4671 		goto failed;
4672 	}
4673 
4674 	expected_len = sizeof(*cp) + uuid_count * 16;
4675 	if (expected_len != len) {
4676 		bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
4677 			   expected_len, len);
4678 		err = mgmt_cmd_complete(sk, hdev->id,
4679 					MGMT_OP_START_SERVICE_DISCOVERY,
4680 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
4681 					sizeof(cp->type));
4682 		goto failed;
4683 	}
4684 
4685 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4686 		err = mgmt_cmd_complete(sk, hdev->id,
4687 					MGMT_OP_START_SERVICE_DISCOVERY,
4688 					status, &cp->type, sizeof(cp->type));
4689 		goto failed;
4690 	}
4691 
4692 	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4693 			       hdev, data, len);
4694 	if (!cmd) {
4695 		err = -ENOMEM;
4696 		goto failed;
4697 	}
4698 
4699 	cmd->cmd_complete = service_discovery_cmd_complete;
4700 
4701 	/* Clear the discovery filter first to free any previously
4702 	 * allocated memory for the UUID list.
4703 	 */
4704 	hci_discovery_filter_clear(hdev);
4705 
4706 	hdev->discovery.result_filtering = true;
4707 	hdev->discovery.type = cp->type;
4708 	hdev->discovery.rssi = cp->rssi;
4709 	hdev->discovery.uuid_count = uuid_count;
4710 
4711 	if (uuid_count > 0) {
4712 		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4713 						GFP_KERNEL);
4714 		if (!hdev->discovery.uuids) {
4715 			err = mgmt_cmd_complete(sk, hdev->id,
4716 						MGMT_OP_START_SERVICE_DISCOVERY,
4717 						MGMT_STATUS_FAILED,
4718 						&cp->type, sizeof(cp->type));
4719 			mgmt_pending_remove(cmd);
4720 			goto failed;
4721 		}
4722 	}
4723 
4724 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4725 	queue_work(hdev->req_workqueue, &hdev->discov_update);
4726 	err = 0;
4727 
4728 failed:
4729 	hci_dev_unlock(hdev);
4730 	return err;
4731 }
4732 
4733 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
4734 {
4735 	struct mgmt_pending_cmd *cmd;
4736 
4737 	bt_dev_dbg(hdev, "status %d", status);
4738 
4739 	hci_dev_lock(hdev);
4740 
4741 	cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4742 	if (cmd) {
4743 		cmd->cmd_complete(cmd, mgmt_status(status));
4744 		mgmt_pending_remove(cmd);
4745 	}
4746 
4747 	hci_dev_unlock(hdev);
4748 
4749 	/* Handle suspend notifier */
4750 	if (test_and_clear_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks)) {
4751 		bt_dev_dbg(hdev, "Paused discovery");
4752 		wake_up(&hdev->suspend_wait_q);
4753 	}
4754 }
4755 
4756 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4757 			  u16 len)
4758 {
4759 	struct mgmt_cp_stop_discovery *mgmt_cp = data;
4760 	struct mgmt_pending_cmd *cmd;
4761 	int err;
4762 
4763 	bt_dev_dbg(hdev, "sock %p", sk);
4764 
4765 	hci_dev_lock(hdev);
4766 
4767 	if (!hci_discovery_active(hdev)) {
4768 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4769 					MGMT_STATUS_REJECTED, &mgmt_cp->type,
4770 					sizeof(mgmt_cp->type));
4771 		goto unlock;
4772 	}
4773 
4774 	if (hdev->discovery.type != mgmt_cp->type) {
4775 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4776 					MGMT_STATUS_INVALID_PARAMS,
4777 					&mgmt_cp->type, sizeof(mgmt_cp->type));
4778 		goto unlock;
4779 	}
4780 
4781 	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4782 	if (!cmd) {
4783 		err = -ENOMEM;
4784 		goto unlock;
4785 	}
4786 
4787 	cmd->cmd_complete = generic_cmd_complete;
4788 
4789 	hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4790 	queue_work(hdev->req_workqueue, &hdev->discov_update);
4791 	err = 0;
4792 
4793 unlock:
4794 	hci_dev_unlock(hdev);
4795 	return err;
4796 }
4797 
4798 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4799 			u16 len)
4800 {
4801 	struct mgmt_cp_confirm_name *cp = data;
4802 	struct inquiry_entry *e;
4803 	int err;
4804 
4805 	bt_dev_dbg(hdev, "sock %p", sk);
4806 
4807 	hci_dev_lock(hdev);
4808 
4809 	if (!hci_discovery_active(hdev)) {
4810 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4811 					MGMT_STATUS_FAILED, &cp->addr,
4812 					sizeof(cp->addr));
4813 		goto failed;
4814 	}
4815 
4816 	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4817 	if (!e) {
4818 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4819 					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4820 					sizeof(cp->addr));
4821 		goto failed;
4822 	}
4823 
4824 	if (cp->name_known) {
4825 		e->name_state = NAME_KNOWN;
4826 		list_del(&e->list);
4827 	} else {
4828 		e->name_state = NAME_NEEDED;
4829 		hci_inquiry_cache_update_resolve(hdev, e);
4830 	}
4831 
4832 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
4833 				&cp->addr, sizeof(cp->addr));
4834 
4835 failed:
4836 	hci_dev_unlock(hdev);
4837 	return err;
4838 }
4839 
4840 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4841 			u16 len)
4842 {
4843 	struct mgmt_cp_block_device *cp = data;
4844 	u8 status;
4845 	int err;
4846 
4847 	bt_dev_dbg(hdev, "sock %p", sk);
4848 
4849 	if (!bdaddr_type_is_valid(cp->addr.type))
4850 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4851 					 MGMT_STATUS_INVALID_PARAMS,
4852 					 &cp->addr, sizeof(cp->addr));
4853 
4854 	hci_dev_lock(hdev);
4855 
4856 	err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4857 				  cp->addr.type);
4858 	if (err < 0) {
4859 		status = MGMT_STATUS_FAILED;
4860 		goto done;
4861 	}
4862 
4863 	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4864 		   sk);
4865 	status = MGMT_STATUS_SUCCESS;
4866 
4867 done:
4868 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4869 				&cp->addr, sizeof(cp->addr));
4870 
4871 	hci_dev_unlock(hdev);
4872 
4873 	return err;
4874 }
4875 
4876 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4877 			  u16 len)
4878 {
4879 	struct mgmt_cp_unblock_device *cp = data;
4880 	u8 status;
4881 	int err;
4882 
4883 	bt_dev_dbg(hdev, "sock %p", sk);
4884 
4885 	if (!bdaddr_type_is_valid(cp->addr.type))
4886 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4887 					 MGMT_STATUS_INVALID_PARAMS,
4888 					 &cp->addr, sizeof(cp->addr));
4889 
4890 	hci_dev_lock(hdev);
4891 
4892 	err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4893 				  cp->addr.type);
4894 	if (err < 0) {
4895 		status = MGMT_STATUS_INVALID_PARAMS;
4896 		goto done;
4897 	}
4898 
4899 	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4900 		   sk);
4901 	status = MGMT_STATUS_SUCCESS;
4902 
4903 done:
4904 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4905 				&cp->addr, sizeof(cp->addr));
4906 
4907 	hci_dev_unlock(hdev);
4908 
4909 	return err;
4910 }
4911 
4912 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4913 			 u16 len)
4914 {
4915 	struct mgmt_cp_set_device_id *cp = data;
4916 	struct hci_request req;
4917 	int err;
4918 	__u16 source;
4919 
4920 	bt_dev_dbg(hdev, "sock %p", sk);
4921 
4922 	source = __le16_to_cpu(cp->source);
4923 
4924 	if (source > 0x0002)
4925 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4926 				       MGMT_STATUS_INVALID_PARAMS);
4927 
4928 	hci_dev_lock(hdev);
4929 
4930 	hdev->devid_source = source;
4931 	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4932 	hdev->devid_product = __le16_to_cpu(cp->product);
4933 	hdev->devid_version = __le16_to_cpu(cp->version);
4934 
4935 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
4936 				NULL, 0);
4937 
4938 	hci_req_init(&req, hdev);
4939 	__hci_req_update_eir(&req);
4940 	hci_req_run(&req, NULL);
4941 
4942 	hci_dev_unlock(hdev);
4943 
4944 	return err;
4945 }
4946 
4947 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
4948 					u16 opcode)
4949 {
4950 	bt_dev_dbg(hdev, "status %d", status);
4951 }
4952 
4953 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
4954 				     u16 opcode)
4955 {
4956 	struct cmd_lookup match = { NULL, hdev };
4957 	struct hci_request req;
4958 	u8 instance;
4959 	struct adv_info *adv_instance;
4960 	int err;
4961 
4962 	hci_dev_lock(hdev);
4963 
4964 	if (status) {
4965 		u8 mgmt_err = mgmt_status(status);
4966 
4967 		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4968 				     cmd_status_rsp, &mgmt_err);
4969 		goto unlock;
4970 	}
4971 
4972 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
4973 		hci_dev_set_flag(hdev, HCI_ADVERTISING);
4974 	else
4975 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4976 
4977 	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4978 			     &match);
4979 
4980 	new_settings(hdev, match.sk);
4981 
4982 	if (match.sk)
4983 		sock_put(match.sk);
4984 
4985 	/* Handle suspend notifier */
4986 	if (test_and_clear_bit(SUSPEND_PAUSE_ADVERTISING,
4987 			       hdev->suspend_tasks)) {
4988 		bt_dev_dbg(hdev, "Paused advertising");
4989 		wake_up(&hdev->suspend_wait_q);
4990 	} else if (test_and_clear_bit(SUSPEND_UNPAUSE_ADVERTISING,
4991 				      hdev->suspend_tasks)) {
4992 		bt_dev_dbg(hdev, "Unpaused advertising");
4993 		wake_up(&hdev->suspend_wait_q);
4994 	}
4995 
4996 	/* If "Set Advertising" was just disabled and instance advertising was
4997 	 * set up earlier, then re-enable multi-instance advertising.
4998 	 */
4999 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5000 	    list_empty(&hdev->adv_instances))
5001 		goto unlock;
5002 
5003 	instance = hdev->cur_adv_instance;
5004 	if (!instance) {
5005 		adv_instance = list_first_entry_or_null(&hdev->adv_instances,
5006 							struct adv_info, list);
5007 		if (!adv_instance)
5008 			goto unlock;
5009 
5010 		instance = adv_instance->instance;
5011 	}
5012 
5013 	hci_req_init(&req, hdev);
5014 
5015 	err = __hci_req_schedule_adv_instance(&req, instance, true);
5016 
5017 	if (!err)
5018 		err = hci_req_run(&req, enable_advertising_instance);
5019 
5020 	if (err)
5021 		bt_dev_err(hdev, "failed to re-configure advertising");
5022 
5023 unlock:
5024 	hci_dev_unlock(hdev);
5025 }
5026 
5027 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
5028 			   u16 len)
5029 {
5030 	struct mgmt_mode *cp = data;
5031 	struct mgmt_pending_cmd *cmd;
5032 	struct hci_request req;
5033 	u8 val, status;
5034 	int err;
5035 
5036 	bt_dev_dbg(hdev, "sock %p", sk);
5037 
5038 	status = mgmt_le_support(hdev);
5039 	if (status)
5040 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5041 				       status);
5042 
5043 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5044 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5045 				       MGMT_STATUS_INVALID_PARAMS);
5046 
5047 	if (hdev->advertising_paused)
5048 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5049 				       MGMT_STATUS_BUSY);
5050 
5051 	hci_dev_lock(hdev);
5052 
5053 	val = !!cp->val;
5054 
5055 	/* The following conditions are ones which mean that we should
5056 	 * not do any HCI communication but directly send a mgmt
5057 	 * response to user space (after toggling the flag if
5058 	 * necessary).
5059 	 */
5060 	if (!hdev_is_powered(hdev) ||
5061 	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5062 	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
5063 	    hci_conn_num(hdev, LE_LINK) > 0 ||
5064 	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5065 	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
5066 		bool changed;
5067 
5068 		if (cp->val) {
5069 			hdev->cur_adv_instance = 0x00;
5070 			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
5071 			if (cp->val == 0x02)
5072 				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5073 			else
5074 				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5075 		} else {
5076 			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
5077 			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5078 		}
5079 
5080 		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
5081 		if (err < 0)
5082 			goto unlock;
5083 
5084 		if (changed)
5085 			err = new_settings(hdev, sk);
5086 
5087 		goto unlock;
5088 	}
5089 
5090 	if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
5091 	    pending_find(MGMT_OP_SET_LE, hdev)) {
5092 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5093 				      MGMT_STATUS_BUSY);
5094 		goto unlock;
5095 	}
5096 
5097 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
5098 	if (!cmd) {
5099 		err = -ENOMEM;
5100 		goto unlock;
5101 	}
5102 
5103 	hci_req_init(&req, hdev);
5104 
5105 	if (cp->val == 0x02)
5106 		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5107 	else
5108 		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5109 
5110 	cancel_adv_timeout(hdev);
5111 
5112 	if (val) {
5113 		/* Switch to instance "0" for the Set Advertising setting.
5114 		 * We cannot use update_[adv|scan_rsp]_data() here as the
5115 		 * HCI_ADVERTISING flag is not yet set.
5116 		 */
5117 		hdev->cur_adv_instance = 0x00;
5118 
5119 		if (ext_adv_capable(hdev)) {
5120 			__hci_req_start_ext_adv(&req, 0x00);
5121 		} else {
5122 			__hci_req_update_adv_data(&req, 0x00);
5123 			__hci_req_update_scan_rsp_data(&req, 0x00);
5124 			__hci_req_enable_advertising(&req);
5125 		}
5126 	} else {
5127 		__hci_req_disable_advertising(&req);
5128 	}
5129 
5130 	err = hci_req_run(&req, set_advertising_complete);
5131 	if (err < 0)
5132 		mgmt_pending_remove(cmd);
5133 
5134 unlock:
5135 	hci_dev_unlock(hdev);
5136 	return err;
5137 }
5138 
5139 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
5140 			      void *data, u16 len)
5141 {
5142 	struct mgmt_cp_set_static_address *cp = data;
5143 	int err;
5144 
5145 	bt_dev_dbg(hdev, "sock %p", sk);
5146 
5147 	if (!lmp_le_capable(hdev))
5148 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5149 				       MGMT_STATUS_NOT_SUPPORTED);
5150 
5151 	if (hdev_is_powered(hdev))
5152 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5153 				       MGMT_STATUS_REJECTED);
5154 
5155 	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
5156 		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
5157 			return mgmt_cmd_status(sk, hdev->id,
5158 					       MGMT_OP_SET_STATIC_ADDRESS,
5159 					       MGMT_STATUS_INVALID_PARAMS);
5160 
5161 		/* Two most significant bits shall be set */
5162 		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
5163 			return mgmt_cmd_status(sk, hdev->id,
5164 					       MGMT_OP_SET_STATIC_ADDRESS,
5165 					       MGMT_STATUS_INVALID_PARAMS);
5166 	}
5167 
5168 	hci_dev_lock(hdev);
5169 
5170 	bacpy(&hdev->static_addr, &cp->bdaddr);
5171 
5172 	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
5173 	if (err < 0)
5174 		goto unlock;
5175 
5176 	err = new_settings(hdev, sk);
5177 
5178 unlock:
5179 	hci_dev_unlock(hdev);
5180 	return err;
5181 }
5182 
5183 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
5184 			   void *data, u16 len)
5185 {
5186 	struct mgmt_cp_set_scan_params *cp = data;
5187 	__u16 interval, window;
5188 	int err;
5189 
5190 	bt_dev_dbg(hdev, "sock %p", sk);
5191 
5192 	if (!lmp_le_capable(hdev))
5193 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5194 				       MGMT_STATUS_NOT_SUPPORTED);
5195 
5196 	interval = __le16_to_cpu(cp->interval);
5197 
5198 	if (interval < 0x0004 || interval > 0x4000)
5199 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5200 				       MGMT_STATUS_INVALID_PARAMS);
5201 
5202 	window = __le16_to_cpu(cp->window);
5203 
5204 	if (window < 0x0004 || window > 0x4000)
5205 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5206 				       MGMT_STATUS_INVALID_PARAMS);
5207 
5208 	if (window > interval)
5209 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5210 				       MGMT_STATUS_INVALID_PARAMS);
5211 
5212 	hci_dev_lock(hdev);
5213 
5214 	hdev->le_scan_interval = interval;
5215 	hdev->le_scan_window = window;
5216 
5217 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
5218 				NULL, 0);
5219 
5220 	/* If background scan is running, restart it so new parameters are
5221 	 * loaded.
5222 	 */
5223 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5224 	    hdev->discovery.state == DISCOVERY_STOPPED) {
5225 		struct hci_request req;
5226 
5227 		hci_req_init(&req, hdev);
5228 
5229 		hci_req_add_le_scan_disable(&req);
5230 		hci_req_add_le_passive_scan(&req);
5231 
5232 		hci_req_run(&req, NULL);
5233 	}
5234 
5235 	hci_dev_unlock(hdev);
5236 
5237 	return err;
5238 }
5239 
5240 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
5241 				      u16 opcode)
5242 {
5243 	struct mgmt_pending_cmd *cmd;
5244 
5245 	bt_dev_dbg(hdev, "status 0x%02x", status);
5246 
5247 	hci_dev_lock(hdev);
5248 
5249 	cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5250 	if (!cmd)
5251 		goto unlock;
5252 
5253 	if (status) {
5254 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5255 			        mgmt_status(status));
5256 	} else {
5257 		struct mgmt_mode *cp = cmd->param;
5258 
5259 		if (cp->val)
5260 			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
5261 		else
5262 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5263 
5264 		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5265 		new_settings(hdev, cmd->sk);
5266 	}
5267 
5268 	mgmt_pending_remove(cmd);
5269 
5270 unlock:
5271 	hci_dev_unlock(hdev);
5272 }
5273 
5274 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
5275 				void *data, u16 len)
5276 {
5277 	struct mgmt_mode *cp = data;
5278 	struct mgmt_pending_cmd *cmd;
5279 	struct hci_request req;
5280 	int err;
5281 
5282 	bt_dev_dbg(hdev, "sock %p", sk);
5283 
5284 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
5285 	    hdev->hci_ver < BLUETOOTH_VER_1_2)
5286 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5287 				       MGMT_STATUS_NOT_SUPPORTED);
5288 
5289 	if (cp->val != 0x00 && cp->val != 0x01)
5290 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5291 				       MGMT_STATUS_INVALID_PARAMS);
5292 
5293 	hci_dev_lock(hdev);
5294 
5295 	if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
5296 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5297 				      MGMT_STATUS_BUSY);
5298 		goto unlock;
5299 	}
5300 
5301 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
5302 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5303 					hdev);
5304 		goto unlock;
5305 	}
5306 
5307 	if (!hdev_is_powered(hdev)) {
5308 		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
5309 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5310 					hdev);
5311 		new_settings(hdev, sk);
5312 		goto unlock;
5313 	}
5314 
5315 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
5316 			       data, len);
5317 	if (!cmd) {
5318 		err = -ENOMEM;
5319 		goto unlock;
5320 	}
5321 
5322 	hci_req_init(&req, hdev);
5323 
5324 	__hci_req_write_fast_connectable(&req, cp->val);
5325 
5326 	err = hci_req_run(&req, fast_connectable_complete);
5327 	if (err < 0) {
5328 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5329 				      MGMT_STATUS_FAILED);
5330 		mgmt_pending_remove(cmd);
5331 	}
5332 
5333 unlock:
5334 	hci_dev_unlock(hdev);
5335 
5336 	return err;
5337 }
5338 
5339 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5340 {
5341 	struct mgmt_pending_cmd *cmd;
5342 
5343 	bt_dev_dbg(hdev, "status 0x%02x", status);
5344 
5345 	hci_dev_lock(hdev);
5346 
5347 	cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
5348 	if (!cmd)
5349 		goto unlock;
5350 
5351 	if (status) {
5352 		u8 mgmt_err = mgmt_status(status);
5353 
5354 		/* We need to restore the flag if related HCI commands
5355 		 * failed.
5356 		 */
5357 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
5358 
5359 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
5360 	} else {
5361 		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
5362 		new_settings(hdev, cmd->sk);
5363 	}
5364 
5365 	mgmt_pending_remove(cmd);
5366 
5367 unlock:
5368 	hci_dev_unlock(hdev);
5369 }
5370 
5371 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
5372 {
5373 	struct mgmt_mode *cp = data;
5374 	struct mgmt_pending_cmd *cmd;
5375 	struct hci_request req;
5376 	int err;
5377 
5378 	bt_dev_dbg(hdev, "sock %p", sk);
5379 
5380 	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
5381 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5382 				       MGMT_STATUS_NOT_SUPPORTED);
5383 
5384 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5385 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5386 				       MGMT_STATUS_REJECTED);
5387 
5388 	if (cp->val != 0x00 && cp->val != 0x01)
5389 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5390 				       MGMT_STATUS_INVALID_PARAMS);
5391 
5392 	hci_dev_lock(hdev);
5393 
5394 	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5395 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5396 		goto unlock;
5397 	}
5398 
5399 	if (!hdev_is_powered(hdev)) {
5400 		if (!cp->val) {
5401 			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
5402 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
5403 			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
5404 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5405 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
5406 		}
5407 
5408 		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
5409 
5410 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5411 		if (err < 0)
5412 			goto unlock;
5413 
5414 		err = new_settings(hdev, sk);
5415 		goto unlock;
5416 	}
5417 
5418 	/* Reject disabling when powered on */
5419 	if (!cp->val) {
5420 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5421 				      MGMT_STATUS_REJECTED);
5422 		goto unlock;
5423 	} else {
5424 		/* When configuring a dual-mode controller to operate
5425 		 * with LE only and using a static address, then switching
5426 		 * BR/EDR back on is not allowed.
5427 		 *
5428 		 * Dual-mode controllers shall operate with the public
5429 		 * address as its identity address for BR/EDR and LE. So
5430 		 * reject the attempt to create an invalid configuration.
5431 		 *
5432 		 * The same restrictions applies when secure connections
5433 		 * has been enabled. For BR/EDR this is a controller feature
5434 		 * while for LE it is a host stack feature. This means that
5435 		 * switching BR/EDR back on when secure connections has been
5436 		 * enabled is not a supported transaction.
5437 		 */
5438 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5439 		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
5440 		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
5441 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5442 					      MGMT_STATUS_REJECTED);
5443 			goto unlock;
5444 		}
5445 	}
5446 
5447 	if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
5448 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5449 				      MGMT_STATUS_BUSY);
5450 		goto unlock;
5451 	}
5452 
5453 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
5454 	if (!cmd) {
5455 		err = -ENOMEM;
5456 		goto unlock;
5457 	}
5458 
5459 	/* We need to flip the bit already here so that
5460 	 * hci_req_update_adv_data generates the correct flags.
5461 	 */
5462 	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
5463 
5464 	hci_req_init(&req, hdev);
5465 
5466 	__hci_req_write_fast_connectable(&req, false);
5467 	__hci_req_update_scan(&req);
5468 
5469 	/* Since only the advertising data flags will change, there
5470 	 * is no need to update the scan response data.
5471 	 */
5472 	__hci_req_update_adv_data(&req, hdev->cur_adv_instance);
5473 
5474 	err = hci_req_run(&req, set_bredr_complete);
5475 	if (err < 0)
5476 		mgmt_pending_remove(cmd);
5477 
5478 unlock:
5479 	hci_dev_unlock(hdev);
5480 	return err;
5481 }
5482 
5483 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5484 {
5485 	struct mgmt_pending_cmd *cmd;
5486 	struct mgmt_mode *cp;
5487 
5488 	bt_dev_dbg(hdev, "status %u", status);
5489 
5490 	hci_dev_lock(hdev);
5491 
5492 	cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
5493 	if (!cmd)
5494 		goto unlock;
5495 
5496 	if (status) {
5497 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
5498 			        mgmt_status(status));
5499 		goto remove;
5500 	}
5501 
5502 	cp = cmd->param;
5503 
5504 	switch (cp->val) {
5505 	case 0x00:
5506 		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
5507 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5508 		break;
5509 	case 0x01:
5510 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5511 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5512 		break;
5513 	case 0x02:
5514 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5515 		hci_dev_set_flag(hdev, HCI_SC_ONLY);
5516 		break;
5517 	}
5518 
5519 	send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
5520 	new_settings(hdev, cmd->sk);
5521 
5522 remove:
5523 	mgmt_pending_remove(cmd);
5524 unlock:
5525 	hci_dev_unlock(hdev);
5526 }
5527 
5528 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
5529 			   void *data, u16 len)
5530 {
5531 	struct mgmt_mode *cp = data;
5532 	struct mgmt_pending_cmd *cmd;
5533 	struct hci_request req;
5534 	u8 val;
5535 	int err;
5536 
5537 	bt_dev_dbg(hdev, "sock %p", sk);
5538 
5539 	if (!lmp_sc_capable(hdev) &&
5540 	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5541 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5542 				       MGMT_STATUS_NOT_SUPPORTED);
5543 
5544 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5545 	    lmp_sc_capable(hdev) &&
5546 	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5547 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5548 				       MGMT_STATUS_REJECTED);
5549 
5550 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5551 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5552 				  MGMT_STATUS_INVALID_PARAMS);
5553 
5554 	hci_dev_lock(hdev);
5555 
5556 	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
5557 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5558 		bool changed;
5559 
5560 		if (cp->val) {
5561 			changed = !hci_dev_test_and_set_flag(hdev,
5562 							     HCI_SC_ENABLED);
5563 			if (cp->val == 0x02)
5564 				hci_dev_set_flag(hdev, HCI_SC_ONLY);
5565 			else
5566 				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5567 		} else {
5568 			changed = hci_dev_test_and_clear_flag(hdev,
5569 							      HCI_SC_ENABLED);
5570 			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5571 		}
5572 
5573 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5574 		if (err < 0)
5575 			goto failed;
5576 
5577 		if (changed)
5578 			err = new_settings(hdev, sk);
5579 
5580 		goto failed;
5581 	}
5582 
5583 	if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
5584 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5585 				      MGMT_STATUS_BUSY);
5586 		goto failed;
5587 	}
5588 
5589 	val = !!cp->val;
5590 
5591 	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5592 	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5593 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5594 		goto failed;
5595 	}
5596 
5597 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
5598 	if (!cmd) {
5599 		err = -ENOMEM;
5600 		goto failed;
5601 	}
5602 
5603 	hci_req_init(&req, hdev);
5604 	hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
5605 	err = hci_req_run(&req, sc_enable_complete);
5606 	if (err < 0) {
5607 		mgmt_pending_remove(cmd);
5608 		goto failed;
5609 	}
5610 
5611 failed:
5612 	hci_dev_unlock(hdev);
5613 	return err;
5614 }
5615 
5616 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
5617 			  void *data, u16 len)
5618 {
5619 	struct mgmt_mode *cp = data;
5620 	bool changed, use_changed;
5621 	int err;
5622 
5623 	bt_dev_dbg(hdev, "sock %p", sk);
5624 
5625 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5626 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
5627 				       MGMT_STATUS_INVALID_PARAMS);
5628 
5629 	hci_dev_lock(hdev);
5630 
5631 	if (cp->val)
5632 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
5633 	else
5634 		changed = hci_dev_test_and_clear_flag(hdev,
5635 						      HCI_KEEP_DEBUG_KEYS);
5636 
5637 	if (cp->val == 0x02)
5638 		use_changed = !hci_dev_test_and_set_flag(hdev,
5639 							 HCI_USE_DEBUG_KEYS);
5640 	else
5641 		use_changed = hci_dev_test_and_clear_flag(hdev,
5642 							  HCI_USE_DEBUG_KEYS);
5643 
5644 	if (hdev_is_powered(hdev) && use_changed &&
5645 	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
5646 		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
5647 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
5648 			     sizeof(mode), &mode);
5649 	}
5650 
5651 	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
5652 	if (err < 0)
5653 		goto unlock;
5654 
5655 	if (changed)
5656 		err = new_settings(hdev, sk);
5657 
5658 unlock:
5659 	hci_dev_unlock(hdev);
5660 	return err;
5661 }
5662 
5663 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5664 		       u16 len)
5665 {
5666 	struct mgmt_cp_set_privacy *cp = cp_data;
5667 	bool changed;
5668 	int err;
5669 
5670 	bt_dev_dbg(hdev, "sock %p", sk);
5671 
5672 	if (!lmp_le_capable(hdev))
5673 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5674 				       MGMT_STATUS_NOT_SUPPORTED);
5675 
5676 	if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
5677 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5678 				       MGMT_STATUS_INVALID_PARAMS);
5679 
5680 	if (hdev_is_powered(hdev))
5681 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5682 				       MGMT_STATUS_REJECTED);
5683 
5684 	hci_dev_lock(hdev);
5685 
5686 	/* If user space supports this command it is also expected to
5687 	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
5688 	 */
5689 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5690 
5691 	if (cp->privacy) {
5692 		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
5693 		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
5694 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
5695 		hci_adv_instances_set_rpa_expired(hdev, true);
5696 		if (cp->privacy == 0x02)
5697 			hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
5698 		else
5699 			hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
5700 	} else {
5701 		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
5702 		memset(hdev->irk, 0, sizeof(hdev->irk));
5703 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
5704 		hci_adv_instances_set_rpa_expired(hdev, false);
5705 		hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
5706 	}
5707 
5708 	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
5709 	if (err < 0)
5710 		goto unlock;
5711 
5712 	if (changed)
5713 		err = new_settings(hdev, sk);
5714 
5715 unlock:
5716 	hci_dev_unlock(hdev);
5717 	return err;
5718 }
5719 
5720 static bool irk_is_valid(struct mgmt_irk_info *irk)
5721 {
5722 	switch (irk->addr.type) {
5723 	case BDADDR_LE_PUBLIC:
5724 		return true;
5725 
5726 	case BDADDR_LE_RANDOM:
5727 		/* Two most significant bits shall be set */
5728 		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5729 			return false;
5730 		return true;
5731 	}
5732 
5733 	return false;
5734 }
5735 
5736 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5737 		     u16 len)
5738 {
5739 	struct mgmt_cp_load_irks *cp = cp_data;
5740 	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
5741 				   sizeof(struct mgmt_irk_info));
5742 	u16 irk_count, expected_len;
5743 	int i, err;
5744 
5745 	bt_dev_dbg(hdev, "sock %p", sk);
5746 
5747 	if (!lmp_le_capable(hdev))
5748 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5749 				       MGMT_STATUS_NOT_SUPPORTED);
5750 
5751 	irk_count = __le16_to_cpu(cp->irk_count);
5752 	if (irk_count > max_irk_count) {
5753 		bt_dev_err(hdev, "load_irks: too big irk_count value %u",
5754 			   irk_count);
5755 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5756 				       MGMT_STATUS_INVALID_PARAMS);
5757 	}
5758 
5759 	expected_len = struct_size(cp, irks, irk_count);
5760 	if (expected_len != len) {
5761 		bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
5762 			   expected_len, len);
5763 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5764 				       MGMT_STATUS_INVALID_PARAMS);
5765 	}
5766 
5767 	bt_dev_dbg(hdev, "irk_count %u", irk_count);
5768 
5769 	for (i = 0; i < irk_count; i++) {
5770 		struct mgmt_irk_info *key = &cp->irks[i];
5771 
5772 		if (!irk_is_valid(key))
5773 			return mgmt_cmd_status(sk, hdev->id,
5774 					       MGMT_OP_LOAD_IRKS,
5775 					       MGMT_STATUS_INVALID_PARAMS);
5776 	}
5777 
5778 	hci_dev_lock(hdev);
5779 
5780 	hci_smp_irks_clear(hdev);
5781 
5782 	for (i = 0; i < irk_count; i++) {
5783 		struct mgmt_irk_info *irk = &cp->irks[i];
5784 
5785 		if (hci_is_blocked_key(hdev,
5786 				       HCI_BLOCKED_KEY_TYPE_IRK,
5787 				       irk->val)) {
5788 			bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
5789 				    &irk->addr.bdaddr);
5790 			continue;
5791 		}
5792 
5793 		hci_add_irk(hdev, &irk->addr.bdaddr,
5794 			    le_addr_type(irk->addr.type), irk->val,
5795 			    BDADDR_ANY);
5796 	}
5797 
5798 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5799 
5800 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5801 
5802 	hci_dev_unlock(hdev);
5803 
5804 	return err;
5805 }
5806 
5807 static bool ltk_is_valid(struct mgmt_ltk_info *key)
5808 {
5809 	if (key->master != 0x00 && key->master != 0x01)
5810 		return false;
5811 
5812 	switch (key->addr.type) {
5813 	case BDADDR_LE_PUBLIC:
5814 		return true;
5815 
5816 	case BDADDR_LE_RANDOM:
5817 		/* Two most significant bits shall be set */
5818 		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5819 			return false;
5820 		return true;
5821 	}
5822 
5823 	return false;
5824 }
5825 
5826 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5827 			       void *cp_data, u16 len)
5828 {
5829 	struct mgmt_cp_load_long_term_keys *cp = cp_data;
5830 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5831 				   sizeof(struct mgmt_ltk_info));
5832 	u16 key_count, expected_len;
5833 	int i, err;
5834 
5835 	bt_dev_dbg(hdev, "sock %p", sk);
5836 
5837 	if (!lmp_le_capable(hdev))
5838 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5839 				       MGMT_STATUS_NOT_SUPPORTED);
5840 
5841 	key_count = __le16_to_cpu(cp->key_count);
5842 	if (key_count > max_key_count) {
5843 		bt_dev_err(hdev, "load_ltks: too big key_count value %u",
5844 			   key_count);
5845 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5846 				       MGMT_STATUS_INVALID_PARAMS);
5847 	}
5848 
5849 	expected_len = struct_size(cp, keys, key_count);
5850 	if (expected_len != len) {
5851 		bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
5852 			   expected_len, len);
5853 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5854 				       MGMT_STATUS_INVALID_PARAMS);
5855 	}
5856 
5857 	bt_dev_dbg(hdev, "key_count %u", key_count);
5858 
5859 	for (i = 0; i < key_count; i++) {
5860 		struct mgmt_ltk_info *key = &cp->keys[i];
5861 
5862 		if (!ltk_is_valid(key))
5863 			return mgmt_cmd_status(sk, hdev->id,
5864 					       MGMT_OP_LOAD_LONG_TERM_KEYS,
5865 					       MGMT_STATUS_INVALID_PARAMS);
5866 	}
5867 
5868 	hci_dev_lock(hdev);
5869 
5870 	hci_smp_ltks_clear(hdev);
5871 
5872 	for (i = 0; i < key_count; i++) {
5873 		struct mgmt_ltk_info *key = &cp->keys[i];
5874 		u8 type, authenticated;
5875 
5876 		if (hci_is_blocked_key(hdev,
5877 				       HCI_BLOCKED_KEY_TYPE_LTK,
5878 				       key->val)) {
5879 			bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
5880 				    &key->addr.bdaddr);
5881 			continue;
5882 		}
5883 
5884 		switch (key->type) {
5885 		case MGMT_LTK_UNAUTHENTICATED:
5886 			authenticated = 0x00;
5887 			type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5888 			break;
5889 		case MGMT_LTK_AUTHENTICATED:
5890 			authenticated = 0x01;
5891 			type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5892 			break;
5893 		case MGMT_LTK_P256_UNAUTH:
5894 			authenticated = 0x00;
5895 			type = SMP_LTK_P256;
5896 			break;
5897 		case MGMT_LTK_P256_AUTH:
5898 			authenticated = 0x01;
5899 			type = SMP_LTK_P256;
5900 			break;
5901 		case MGMT_LTK_P256_DEBUG:
5902 			authenticated = 0x00;
5903 			type = SMP_LTK_P256_DEBUG;
5904 			/* fall through */
5905 		default:
5906 			continue;
5907 		}
5908 
5909 		hci_add_ltk(hdev, &key->addr.bdaddr,
5910 			    le_addr_type(key->addr.type), type, authenticated,
5911 			    key->val, key->enc_size, key->ediv, key->rand);
5912 	}
5913 
5914 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5915 			   NULL, 0);
5916 
5917 	hci_dev_unlock(hdev);
5918 
5919 	return err;
5920 }
5921 
5922 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5923 {
5924 	struct hci_conn *conn = cmd->user_data;
5925 	struct mgmt_rp_get_conn_info rp;
5926 	int err;
5927 
5928 	memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5929 
5930 	if (status == MGMT_STATUS_SUCCESS) {
5931 		rp.rssi = conn->rssi;
5932 		rp.tx_power = conn->tx_power;
5933 		rp.max_tx_power = conn->max_tx_power;
5934 	} else {
5935 		rp.rssi = HCI_RSSI_INVALID;
5936 		rp.tx_power = HCI_TX_POWER_INVALID;
5937 		rp.max_tx_power = HCI_TX_POWER_INVALID;
5938 	}
5939 
5940 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
5941 				status, &rp, sizeof(rp));
5942 
5943 	hci_conn_drop(conn);
5944 	hci_conn_put(conn);
5945 
5946 	return err;
5947 }
5948 
5949 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
5950 				       u16 opcode)
5951 {
5952 	struct hci_cp_read_rssi *cp;
5953 	struct mgmt_pending_cmd *cmd;
5954 	struct hci_conn *conn;
5955 	u16 handle;
5956 	u8 status;
5957 
5958 	bt_dev_dbg(hdev, "status 0x%02x", hci_status);
5959 
5960 	hci_dev_lock(hdev);
5961 
5962 	/* Commands sent in request are either Read RSSI or Read Transmit Power
5963 	 * Level so we check which one was last sent to retrieve connection
5964 	 * handle.  Both commands have handle as first parameter so it's safe to
5965 	 * cast data on the same command struct.
5966 	 *
5967 	 * First command sent is always Read RSSI and we fail only if it fails.
5968 	 * In other case we simply override error to indicate success as we
5969 	 * already remembered if TX power value is actually valid.
5970 	 */
5971 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
5972 	if (!cp) {
5973 		cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5974 		status = MGMT_STATUS_SUCCESS;
5975 	} else {
5976 		status = mgmt_status(hci_status);
5977 	}
5978 
5979 	if (!cp) {
5980 		bt_dev_err(hdev, "invalid sent_cmd in conn_info response");
5981 		goto unlock;
5982 	}
5983 
5984 	handle = __le16_to_cpu(cp->handle);
5985 	conn = hci_conn_hash_lookup_handle(hdev, handle);
5986 	if (!conn) {
5987 		bt_dev_err(hdev, "unknown handle (%d) in conn_info response",
5988 			   handle);
5989 		goto unlock;
5990 	}
5991 
5992 	cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
5993 	if (!cmd)
5994 		goto unlock;
5995 
5996 	cmd->cmd_complete(cmd, status);
5997 	mgmt_pending_remove(cmd);
5998 
5999 unlock:
6000 	hci_dev_unlock(hdev);
6001 }
6002 
6003 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
6004 			 u16 len)
6005 {
6006 	struct mgmt_cp_get_conn_info *cp = data;
6007 	struct mgmt_rp_get_conn_info rp;
6008 	struct hci_conn *conn;
6009 	unsigned long conn_info_age;
6010 	int err = 0;
6011 
6012 	bt_dev_dbg(hdev, "sock %p", sk);
6013 
6014 	memset(&rp, 0, sizeof(rp));
6015 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6016 	rp.addr.type = cp->addr.type;
6017 
6018 	if (!bdaddr_type_is_valid(cp->addr.type))
6019 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6020 					 MGMT_STATUS_INVALID_PARAMS,
6021 					 &rp, sizeof(rp));
6022 
6023 	hci_dev_lock(hdev);
6024 
6025 	if (!hdev_is_powered(hdev)) {
6026 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6027 					MGMT_STATUS_NOT_POWERED, &rp,
6028 					sizeof(rp));
6029 		goto unlock;
6030 	}
6031 
6032 	if (cp->addr.type == BDADDR_BREDR)
6033 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6034 					       &cp->addr.bdaddr);
6035 	else
6036 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6037 
6038 	if (!conn || conn->state != BT_CONNECTED) {
6039 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6040 					MGMT_STATUS_NOT_CONNECTED, &rp,
6041 					sizeof(rp));
6042 		goto unlock;
6043 	}
6044 
6045 	if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
6046 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6047 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
6048 		goto unlock;
6049 	}
6050 
6051 	/* To avoid client trying to guess when to poll again for information we
6052 	 * calculate conn info age as random value between min/max set in hdev.
6053 	 */
6054 	conn_info_age = hdev->conn_info_min_age +
6055 			prandom_u32_max(hdev->conn_info_max_age -
6056 					hdev->conn_info_min_age);
6057 
6058 	/* Query controller to refresh cached values if they are too old or were
6059 	 * never read.
6060 	 */
6061 	if (time_after(jiffies, conn->conn_info_timestamp +
6062 		       msecs_to_jiffies(conn_info_age)) ||
6063 	    !conn->conn_info_timestamp) {
6064 		struct hci_request req;
6065 		struct hci_cp_read_tx_power req_txp_cp;
6066 		struct hci_cp_read_rssi req_rssi_cp;
6067 		struct mgmt_pending_cmd *cmd;
6068 
6069 		hci_req_init(&req, hdev);
6070 		req_rssi_cp.handle = cpu_to_le16(conn->handle);
6071 		hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
6072 			    &req_rssi_cp);
6073 
6074 		/* For LE links TX power does not change thus we don't need to
6075 		 * query for it once value is known.
6076 		 */
6077 		if (!bdaddr_type_is_le(cp->addr.type) ||
6078 		    conn->tx_power == HCI_TX_POWER_INVALID) {
6079 			req_txp_cp.handle = cpu_to_le16(conn->handle);
6080 			req_txp_cp.type = 0x00;
6081 			hci_req_add(&req, HCI_OP_READ_TX_POWER,
6082 				    sizeof(req_txp_cp), &req_txp_cp);
6083 		}
6084 
6085 		/* Max TX power needs to be read only once per connection */
6086 		if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
6087 			req_txp_cp.handle = cpu_to_le16(conn->handle);
6088 			req_txp_cp.type = 0x01;
6089 			hci_req_add(&req, HCI_OP_READ_TX_POWER,
6090 				    sizeof(req_txp_cp), &req_txp_cp);
6091 		}
6092 
6093 		err = hci_req_run(&req, conn_info_refresh_complete);
6094 		if (err < 0)
6095 			goto unlock;
6096 
6097 		cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
6098 				       data, len);
6099 		if (!cmd) {
6100 			err = -ENOMEM;
6101 			goto unlock;
6102 		}
6103 
6104 		hci_conn_hold(conn);
6105 		cmd->user_data = hci_conn_get(conn);
6106 		cmd->cmd_complete = conn_info_cmd_complete;
6107 
6108 		conn->conn_info_timestamp = jiffies;
6109 	} else {
6110 		/* Cache is valid, just reply with values cached in hci_conn */
6111 		rp.rssi = conn->rssi;
6112 		rp.tx_power = conn->tx_power;
6113 		rp.max_tx_power = conn->max_tx_power;
6114 
6115 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6116 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6117 	}
6118 
6119 unlock:
6120 	hci_dev_unlock(hdev);
6121 	return err;
6122 }
6123 
6124 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
6125 {
6126 	struct hci_conn *conn = cmd->user_data;
6127 	struct mgmt_rp_get_clock_info rp;
6128 	struct hci_dev *hdev;
6129 	int err;
6130 
6131 	memset(&rp, 0, sizeof(rp));
6132 	memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
6133 
6134 	if (status)
6135 		goto complete;
6136 
6137 	hdev = hci_dev_get(cmd->index);
6138 	if (hdev) {
6139 		rp.local_clock = cpu_to_le32(hdev->clock);
6140 		hci_dev_put(hdev);
6141 	}
6142 
6143 	if (conn) {
6144 		rp.piconet_clock = cpu_to_le32(conn->clock);
6145 		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
6146 	}
6147 
6148 complete:
6149 	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
6150 				sizeof(rp));
6151 
6152 	if (conn) {
6153 		hci_conn_drop(conn);
6154 		hci_conn_put(conn);
6155 	}
6156 
6157 	return err;
6158 }
6159 
6160 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6161 {
6162 	struct hci_cp_read_clock *hci_cp;
6163 	struct mgmt_pending_cmd *cmd;
6164 	struct hci_conn *conn;
6165 
6166 	bt_dev_dbg(hdev, "status %u", status);
6167 
6168 	hci_dev_lock(hdev);
6169 
6170 	hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
6171 	if (!hci_cp)
6172 		goto unlock;
6173 
6174 	if (hci_cp->which) {
6175 		u16 handle = __le16_to_cpu(hci_cp->handle);
6176 		conn = hci_conn_hash_lookup_handle(hdev, handle);
6177 	} else {
6178 		conn = NULL;
6179 	}
6180 
6181 	cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
6182 	if (!cmd)
6183 		goto unlock;
6184 
6185 	cmd->cmd_complete(cmd, mgmt_status(status));
6186 	mgmt_pending_remove(cmd);
6187 
6188 unlock:
6189 	hci_dev_unlock(hdev);
6190 }
6191 
6192 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
6193 			 u16 len)
6194 {
6195 	struct mgmt_cp_get_clock_info *cp = data;
6196 	struct mgmt_rp_get_clock_info rp;
6197 	struct hci_cp_read_clock hci_cp;
6198 	struct mgmt_pending_cmd *cmd;
6199 	struct hci_request req;
6200 	struct hci_conn *conn;
6201 	int err;
6202 
6203 	bt_dev_dbg(hdev, "sock %p", sk);
6204 
6205 	memset(&rp, 0, sizeof(rp));
6206 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6207 	rp.addr.type = cp->addr.type;
6208 
6209 	if (cp->addr.type != BDADDR_BREDR)
6210 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6211 					 MGMT_STATUS_INVALID_PARAMS,
6212 					 &rp, sizeof(rp));
6213 
6214 	hci_dev_lock(hdev);
6215 
6216 	if (!hdev_is_powered(hdev)) {
6217 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6218 					MGMT_STATUS_NOT_POWERED, &rp,
6219 					sizeof(rp));
6220 		goto unlock;
6221 	}
6222 
6223 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6224 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6225 					       &cp->addr.bdaddr);
6226 		if (!conn || conn->state != BT_CONNECTED) {
6227 			err = mgmt_cmd_complete(sk, hdev->id,
6228 						MGMT_OP_GET_CLOCK_INFO,
6229 						MGMT_STATUS_NOT_CONNECTED,
6230 						&rp, sizeof(rp));
6231 			goto unlock;
6232 		}
6233 	} else {
6234 		conn = NULL;
6235 	}
6236 
6237 	cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
6238 	if (!cmd) {
6239 		err = -ENOMEM;
6240 		goto unlock;
6241 	}
6242 
6243 	cmd->cmd_complete = clock_info_cmd_complete;
6244 
6245 	hci_req_init(&req, hdev);
6246 
6247 	memset(&hci_cp, 0, sizeof(hci_cp));
6248 	hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
6249 
6250 	if (conn) {
6251 		hci_conn_hold(conn);
6252 		cmd->user_data = hci_conn_get(conn);
6253 
6254 		hci_cp.handle = cpu_to_le16(conn->handle);
6255 		hci_cp.which = 0x01; /* Piconet clock */
6256 		hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
6257 	}
6258 
6259 	err = hci_req_run(&req, get_clock_info_complete);
6260 	if (err < 0)
6261 		mgmt_pending_remove(cmd);
6262 
6263 unlock:
6264 	hci_dev_unlock(hdev);
6265 	return err;
6266 }
6267 
6268 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
6269 {
6270 	struct hci_conn *conn;
6271 
6272 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
6273 	if (!conn)
6274 		return false;
6275 
6276 	if (conn->dst_type != type)
6277 		return false;
6278 
6279 	if (conn->state != BT_CONNECTED)
6280 		return false;
6281 
6282 	return true;
6283 }
6284 
6285 /* This function requires the caller holds hdev->lock */
6286 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
6287 			       u8 addr_type, u8 auto_connect)
6288 {
6289 	struct hci_conn_params *params;
6290 
6291 	params = hci_conn_params_add(hdev, addr, addr_type);
6292 	if (!params)
6293 		return -EIO;
6294 
6295 	if (params->auto_connect == auto_connect)
6296 		return 0;
6297 
6298 	list_del_init(&params->action);
6299 
6300 	switch (auto_connect) {
6301 	case HCI_AUTO_CONN_DISABLED:
6302 	case HCI_AUTO_CONN_LINK_LOSS:
6303 		/* If auto connect is being disabled when we're trying to
6304 		 * connect to device, keep connecting.
6305 		 */
6306 		if (params->explicit_connect)
6307 			list_add(&params->action, &hdev->pend_le_conns);
6308 		break;
6309 	case HCI_AUTO_CONN_REPORT:
6310 		if (params->explicit_connect)
6311 			list_add(&params->action, &hdev->pend_le_conns);
6312 		else
6313 			list_add(&params->action, &hdev->pend_le_reports);
6314 		break;
6315 	case HCI_AUTO_CONN_DIRECT:
6316 	case HCI_AUTO_CONN_ALWAYS:
6317 		if (!is_connected(hdev, addr, addr_type))
6318 			list_add(&params->action, &hdev->pend_le_conns);
6319 		break;
6320 	}
6321 
6322 	params->auto_connect = auto_connect;
6323 
6324 	bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
6325 		   addr, addr_type, auto_connect);
6326 
6327 	return 0;
6328 }
6329 
6330 static void device_added(struct sock *sk, struct hci_dev *hdev,
6331 			 bdaddr_t *bdaddr, u8 type, u8 action)
6332 {
6333 	struct mgmt_ev_device_added ev;
6334 
6335 	bacpy(&ev.addr.bdaddr, bdaddr);
6336 	ev.addr.type = type;
6337 	ev.action = action;
6338 
6339 	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
6340 }
6341 
6342 static int add_device(struct sock *sk, struct hci_dev *hdev,
6343 		      void *data, u16 len)
6344 {
6345 	struct mgmt_cp_add_device *cp = data;
6346 	u8 auto_conn, addr_type;
6347 	struct hci_conn_params *params;
6348 	int err;
6349 	u32 current_flags = 0;
6350 
6351 	bt_dev_dbg(hdev, "sock %p", sk);
6352 
6353 	if (!bdaddr_type_is_valid(cp->addr.type) ||
6354 	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
6355 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6356 					 MGMT_STATUS_INVALID_PARAMS,
6357 					 &cp->addr, sizeof(cp->addr));
6358 
6359 	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
6360 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6361 					 MGMT_STATUS_INVALID_PARAMS,
6362 					 &cp->addr, sizeof(cp->addr));
6363 
6364 	hci_dev_lock(hdev);
6365 
6366 	if (cp->addr.type == BDADDR_BREDR) {
6367 		/* Only incoming connections action is supported for now */
6368 		if (cp->action != 0x01) {
6369 			err = mgmt_cmd_complete(sk, hdev->id,
6370 						MGMT_OP_ADD_DEVICE,
6371 						MGMT_STATUS_INVALID_PARAMS,
6372 						&cp->addr, sizeof(cp->addr));
6373 			goto unlock;
6374 		}
6375 
6376 		err = hci_bdaddr_list_add_with_flags(&hdev->whitelist,
6377 						     &cp->addr.bdaddr,
6378 						     cp->addr.type, 0);
6379 		if (err)
6380 			goto unlock;
6381 
6382 		hci_req_update_scan(hdev);
6383 
6384 		goto added;
6385 	}
6386 
6387 	addr_type = le_addr_type(cp->addr.type);
6388 
6389 	if (cp->action == 0x02)
6390 		auto_conn = HCI_AUTO_CONN_ALWAYS;
6391 	else if (cp->action == 0x01)
6392 		auto_conn = HCI_AUTO_CONN_DIRECT;
6393 	else
6394 		auto_conn = HCI_AUTO_CONN_REPORT;
6395 
6396 	/* Kernel internally uses conn_params with resolvable private
6397 	 * address, but Add Device allows only identity addresses.
6398 	 * Make sure it is enforced before calling
6399 	 * hci_conn_params_lookup.
6400 	 */
6401 	if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
6402 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6403 					MGMT_STATUS_INVALID_PARAMS,
6404 					&cp->addr, sizeof(cp->addr));
6405 		goto unlock;
6406 	}
6407 
6408 	/* If the connection parameters don't exist for this device,
6409 	 * they will be created and configured with defaults.
6410 	 */
6411 	if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
6412 				auto_conn) < 0) {
6413 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6414 					MGMT_STATUS_FAILED, &cp->addr,
6415 					sizeof(cp->addr));
6416 		goto unlock;
6417 	} else {
6418 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6419 						addr_type);
6420 		if (params)
6421 			current_flags = params->current_flags;
6422 	}
6423 
6424 	hci_update_background_scan(hdev);
6425 
6426 added:
6427 	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
6428 	device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
6429 			     SUPPORTED_DEVICE_FLAGS(), current_flags);
6430 
6431 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6432 				MGMT_STATUS_SUCCESS, &cp->addr,
6433 				sizeof(cp->addr));
6434 
6435 unlock:
6436 	hci_dev_unlock(hdev);
6437 	return err;
6438 }
6439 
6440 static void device_removed(struct sock *sk, struct hci_dev *hdev,
6441 			   bdaddr_t *bdaddr, u8 type)
6442 {
6443 	struct mgmt_ev_device_removed ev;
6444 
6445 	bacpy(&ev.addr.bdaddr, bdaddr);
6446 	ev.addr.type = type;
6447 
6448 	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
6449 }
6450 
6451 static int remove_device(struct sock *sk, struct hci_dev *hdev,
6452 			 void *data, u16 len)
6453 {
6454 	struct mgmt_cp_remove_device *cp = data;
6455 	int err;
6456 
6457 	bt_dev_dbg(hdev, "sock %p", sk);
6458 
6459 	hci_dev_lock(hdev);
6460 
6461 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6462 		struct hci_conn_params *params;
6463 		u8 addr_type;
6464 
6465 		if (!bdaddr_type_is_valid(cp->addr.type)) {
6466 			err = mgmt_cmd_complete(sk, hdev->id,
6467 						MGMT_OP_REMOVE_DEVICE,
6468 						MGMT_STATUS_INVALID_PARAMS,
6469 						&cp->addr, sizeof(cp->addr));
6470 			goto unlock;
6471 		}
6472 
6473 		if (cp->addr.type == BDADDR_BREDR) {
6474 			err = hci_bdaddr_list_del(&hdev->whitelist,
6475 						  &cp->addr.bdaddr,
6476 						  cp->addr.type);
6477 			if (err) {
6478 				err = mgmt_cmd_complete(sk, hdev->id,
6479 							MGMT_OP_REMOVE_DEVICE,
6480 							MGMT_STATUS_INVALID_PARAMS,
6481 							&cp->addr,
6482 							sizeof(cp->addr));
6483 				goto unlock;
6484 			}
6485 
6486 			hci_req_update_scan(hdev);
6487 
6488 			device_removed(sk, hdev, &cp->addr.bdaddr,
6489 				       cp->addr.type);
6490 			goto complete;
6491 		}
6492 
6493 		addr_type = le_addr_type(cp->addr.type);
6494 
6495 		/* Kernel internally uses conn_params with resolvable private
6496 		 * address, but Remove Device allows only identity addresses.
6497 		 * Make sure it is enforced before calling
6498 		 * hci_conn_params_lookup.
6499 		 */
6500 		if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
6501 			err = mgmt_cmd_complete(sk, hdev->id,
6502 						MGMT_OP_REMOVE_DEVICE,
6503 						MGMT_STATUS_INVALID_PARAMS,
6504 						&cp->addr, sizeof(cp->addr));
6505 			goto unlock;
6506 		}
6507 
6508 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6509 						addr_type);
6510 		if (!params) {
6511 			err = mgmt_cmd_complete(sk, hdev->id,
6512 						MGMT_OP_REMOVE_DEVICE,
6513 						MGMT_STATUS_INVALID_PARAMS,
6514 						&cp->addr, sizeof(cp->addr));
6515 			goto unlock;
6516 		}
6517 
6518 		if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
6519 		    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
6520 			err = mgmt_cmd_complete(sk, hdev->id,
6521 						MGMT_OP_REMOVE_DEVICE,
6522 						MGMT_STATUS_INVALID_PARAMS,
6523 						&cp->addr, sizeof(cp->addr));
6524 			goto unlock;
6525 		}
6526 
6527 		list_del(&params->action);
6528 		list_del(&params->list);
6529 		kfree(params);
6530 		hci_update_background_scan(hdev);
6531 
6532 		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
6533 	} else {
6534 		struct hci_conn_params *p, *tmp;
6535 		struct bdaddr_list *b, *btmp;
6536 
6537 		if (cp->addr.type) {
6538 			err = mgmt_cmd_complete(sk, hdev->id,
6539 						MGMT_OP_REMOVE_DEVICE,
6540 						MGMT_STATUS_INVALID_PARAMS,
6541 						&cp->addr, sizeof(cp->addr));
6542 			goto unlock;
6543 		}
6544 
6545 		list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
6546 			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
6547 			list_del(&b->list);
6548 			kfree(b);
6549 		}
6550 
6551 		hci_req_update_scan(hdev);
6552 
6553 		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
6554 			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
6555 				continue;
6556 			device_removed(sk, hdev, &p->addr, p->addr_type);
6557 			if (p->explicit_connect) {
6558 				p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
6559 				continue;
6560 			}
6561 			list_del(&p->action);
6562 			list_del(&p->list);
6563 			kfree(p);
6564 		}
6565 
6566 		bt_dev_dbg(hdev, "All LE connection parameters were removed");
6567 
6568 		hci_update_background_scan(hdev);
6569 	}
6570 
6571 complete:
6572 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
6573 				MGMT_STATUS_SUCCESS, &cp->addr,
6574 				sizeof(cp->addr));
6575 unlock:
6576 	hci_dev_unlock(hdev);
6577 	return err;
6578 }
6579 
6580 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
6581 			   u16 len)
6582 {
6583 	struct mgmt_cp_load_conn_param *cp = data;
6584 	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
6585 				     sizeof(struct mgmt_conn_param));
6586 	u16 param_count, expected_len;
6587 	int i;
6588 
6589 	if (!lmp_le_capable(hdev))
6590 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6591 				       MGMT_STATUS_NOT_SUPPORTED);
6592 
6593 	param_count = __le16_to_cpu(cp->param_count);
6594 	if (param_count > max_param_count) {
6595 		bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
6596 			   param_count);
6597 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6598 				       MGMT_STATUS_INVALID_PARAMS);
6599 	}
6600 
6601 	expected_len = struct_size(cp, params, param_count);
6602 	if (expected_len != len) {
6603 		bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
6604 			   expected_len, len);
6605 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6606 				       MGMT_STATUS_INVALID_PARAMS);
6607 	}
6608 
6609 	bt_dev_dbg(hdev, "param_count %u", param_count);
6610 
6611 	hci_dev_lock(hdev);
6612 
6613 	hci_conn_params_clear_disabled(hdev);
6614 
6615 	for (i = 0; i < param_count; i++) {
6616 		struct mgmt_conn_param *param = &cp->params[i];
6617 		struct hci_conn_params *hci_param;
6618 		u16 min, max, latency, timeout;
6619 		u8 addr_type;
6620 
6621 		bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
6622 			   param->addr.type);
6623 
6624 		if (param->addr.type == BDADDR_LE_PUBLIC) {
6625 			addr_type = ADDR_LE_DEV_PUBLIC;
6626 		} else if (param->addr.type == BDADDR_LE_RANDOM) {
6627 			addr_type = ADDR_LE_DEV_RANDOM;
6628 		} else {
6629 			bt_dev_err(hdev, "ignoring invalid connection parameters");
6630 			continue;
6631 		}
6632 
6633 		min = le16_to_cpu(param->min_interval);
6634 		max = le16_to_cpu(param->max_interval);
6635 		latency = le16_to_cpu(param->latency);
6636 		timeout = le16_to_cpu(param->timeout);
6637 
6638 		bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
6639 			   min, max, latency, timeout);
6640 
6641 		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
6642 			bt_dev_err(hdev, "ignoring invalid connection parameters");
6643 			continue;
6644 		}
6645 
6646 		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
6647 						addr_type);
6648 		if (!hci_param) {
6649 			bt_dev_err(hdev, "failed to add connection parameters");
6650 			continue;
6651 		}
6652 
6653 		hci_param->conn_min_interval = min;
6654 		hci_param->conn_max_interval = max;
6655 		hci_param->conn_latency = latency;
6656 		hci_param->supervision_timeout = timeout;
6657 	}
6658 
6659 	hci_dev_unlock(hdev);
6660 
6661 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
6662 				 NULL, 0);
6663 }
6664 
6665 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
6666 			       void *data, u16 len)
6667 {
6668 	struct mgmt_cp_set_external_config *cp = data;
6669 	bool changed;
6670 	int err;
6671 
6672 	bt_dev_dbg(hdev, "sock %p", sk);
6673 
6674 	if (hdev_is_powered(hdev))
6675 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6676 				       MGMT_STATUS_REJECTED);
6677 
6678 	if (cp->config != 0x00 && cp->config != 0x01)
6679 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6680 				         MGMT_STATUS_INVALID_PARAMS);
6681 
6682 	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
6683 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6684 				       MGMT_STATUS_NOT_SUPPORTED);
6685 
6686 	hci_dev_lock(hdev);
6687 
6688 	if (cp->config)
6689 		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
6690 	else
6691 		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
6692 
6693 	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
6694 	if (err < 0)
6695 		goto unlock;
6696 
6697 	if (!changed)
6698 		goto unlock;
6699 
6700 	err = new_options(hdev, sk);
6701 
6702 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
6703 		mgmt_index_removed(hdev);
6704 
6705 		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
6706 			hci_dev_set_flag(hdev, HCI_CONFIG);
6707 			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6708 
6709 			queue_work(hdev->req_workqueue, &hdev->power_on);
6710 		} else {
6711 			set_bit(HCI_RAW, &hdev->flags);
6712 			mgmt_index_added(hdev);
6713 		}
6714 	}
6715 
6716 unlock:
6717 	hci_dev_unlock(hdev);
6718 	return err;
6719 }
6720 
6721 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
6722 			      void *data, u16 len)
6723 {
6724 	struct mgmt_cp_set_public_address *cp = data;
6725 	bool changed;
6726 	int err;
6727 
6728 	bt_dev_dbg(hdev, "sock %p", sk);
6729 
6730 	if (hdev_is_powered(hdev))
6731 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6732 				       MGMT_STATUS_REJECTED);
6733 
6734 	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
6735 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6736 				       MGMT_STATUS_INVALID_PARAMS);
6737 
6738 	if (!hdev->set_bdaddr)
6739 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6740 				       MGMT_STATUS_NOT_SUPPORTED);
6741 
6742 	hci_dev_lock(hdev);
6743 
6744 	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
6745 	bacpy(&hdev->public_addr, &cp->bdaddr);
6746 
6747 	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
6748 	if (err < 0)
6749 		goto unlock;
6750 
6751 	if (!changed)
6752 		goto unlock;
6753 
6754 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
6755 		err = new_options(hdev, sk);
6756 
6757 	if (is_configured(hdev)) {
6758 		mgmt_index_removed(hdev);
6759 
6760 		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
6761 
6762 		hci_dev_set_flag(hdev, HCI_CONFIG);
6763 		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6764 
6765 		queue_work(hdev->req_workqueue, &hdev->power_on);
6766 	}
6767 
6768 unlock:
6769 	hci_dev_unlock(hdev);
6770 	return err;
6771 }
6772 
6773 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
6774 					     u16 opcode, struct sk_buff *skb)
6775 {
6776 	const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
6777 	struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
6778 	u8 *h192, *r192, *h256, *r256;
6779 	struct mgmt_pending_cmd *cmd;
6780 	u16 eir_len;
6781 	int err;
6782 
6783 	bt_dev_dbg(hdev, "status %u", status);
6784 
6785 	cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
6786 	if (!cmd)
6787 		return;
6788 
6789 	mgmt_cp = cmd->param;
6790 
6791 	if (status) {
6792 		status = mgmt_status(status);
6793 		eir_len = 0;
6794 
6795 		h192 = NULL;
6796 		r192 = NULL;
6797 		h256 = NULL;
6798 		r256 = NULL;
6799 	} else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
6800 		struct hci_rp_read_local_oob_data *rp;
6801 
6802 		if (skb->len != sizeof(*rp)) {
6803 			status = MGMT_STATUS_FAILED;
6804 			eir_len = 0;
6805 		} else {
6806 			status = MGMT_STATUS_SUCCESS;
6807 			rp = (void *)skb->data;
6808 
6809 			eir_len = 5 + 18 + 18;
6810 			h192 = rp->hash;
6811 			r192 = rp->rand;
6812 			h256 = NULL;
6813 			r256 = NULL;
6814 		}
6815 	} else {
6816 		struct hci_rp_read_local_oob_ext_data *rp;
6817 
6818 		if (skb->len != sizeof(*rp)) {
6819 			status = MGMT_STATUS_FAILED;
6820 			eir_len = 0;
6821 		} else {
6822 			status = MGMT_STATUS_SUCCESS;
6823 			rp = (void *)skb->data;
6824 
6825 			if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6826 				eir_len = 5 + 18 + 18;
6827 				h192 = NULL;
6828 				r192 = NULL;
6829 			} else {
6830 				eir_len = 5 + 18 + 18 + 18 + 18;
6831 				h192 = rp->hash192;
6832 				r192 = rp->rand192;
6833 			}
6834 
6835 			h256 = rp->hash256;
6836 			r256 = rp->rand256;
6837 		}
6838 	}
6839 
6840 	mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
6841 	if (!mgmt_rp)
6842 		goto done;
6843 
6844 	if (status)
6845 		goto send_rsp;
6846 
6847 	eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
6848 				  hdev->dev_class, 3);
6849 
6850 	if (h192 && r192) {
6851 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6852 					  EIR_SSP_HASH_C192, h192, 16);
6853 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6854 					  EIR_SSP_RAND_R192, r192, 16);
6855 	}
6856 
6857 	if (h256 && r256) {
6858 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6859 					  EIR_SSP_HASH_C256, h256, 16);
6860 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6861 					  EIR_SSP_RAND_R256, r256, 16);
6862 	}
6863 
6864 send_rsp:
6865 	mgmt_rp->type = mgmt_cp->type;
6866 	mgmt_rp->eir_len = cpu_to_le16(eir_len);
6867 
6868 	err = mgmt_cmd_complete(cmd->sk, hdev->id,
6869 				MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
6870 				mgmt_rp, sizeof(*mgmt_rp) + eir_len);
6871 	if (err < 0 || status)
6872 		goto done;
6873 
6874 	hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
6875 
6876 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
6877 				 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
6878 				 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
6879 done:
6880 	kfree(mgmt_rp);
6881 	mgmt_pending_remove(cmd);
6882 }
6883 
6884 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
6885 				  struct mgmt_cp_read_local_oob_ext_data *cp)
6886 {
6887 	struct mgmt_pending_cmd *cmd;
6888 	struct hci_request req;
6889 	int err;
6890 
6891 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
6892 			       cp, sizeof(*cp));
6893 	if (!cmd)
6894 		return -ENOMEM;
6895 
6896 	hci_req_init(&req, hdev);
6897 
6898 	if (bredr_sc_enabled(hdev))
6899 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
6900 	else
6901 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
6902 
6903 	err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
6904 	if (err < 0) {
6905 		mgmt_pending_remove(cmd);
6906 		return err;
6907 	}
6908 
6909 	return 0;
6910 }
6911 
6912 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
6913 				   void *data, u16 data_len)
6914 {
6915 	struct mgmt_cp_read_local_oob_ext_data *cp = data;
6916 	struct mgmt_rp_read_local_oob_ext_data *rp;
6917 	size_t rp_len;
6918 	u16 eir_len;
6919 	u8 status, flags, role, addr[7], hash[16], rand[16];
6920 	int err;
6921 
6922 	bt_dev_dbg(hdev, "sock %p", sk);
6923 
6924 	if (hdev_is_powered(hdev)) {
6925 		switch (cp->type) {
6926 		case BIT(BDADDR_BREDR):
6927 			status = mgmt_bredr_support(hdev);
6928 			if (status)
6929 				eir_len = 0;
6930 			else
6931 				eir_len = 5;
6932 			break;
6933 		case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6934 			status = mgmt_le_support(hdev);
6935 			if (status)
6936 				eir_len = 0;
6937 			else
6938 				eir_len = 9 + 3 + 18 + 18 + 3;
6939 			break;
6940 		default:
6941 			status = MGMT_STATUS_INVALID_PARAMS;
6942 			eir_len = 0;
6943 			break;
6944 		}
6945 	} else {
6946 		status = MGMT_STATUS_NOT_POWERED;
6947 		eir_len = 0;
6948 	}
6949 
6950 	rp_len = sizeof(*rp) + eir_len;
6951 	rp = kmalloc(rp_len, GFP_ATOMIC);
6952 	if (!rp)
6953 		return -ENOMEM;
6954 
6955 	if (status)
6956 		goto complete;
6957 
6958 	hci_dev_lock(hdev);
6959 
6960 	eir_len = 0;
6961 	switch (cp->type) {
6962 	case BIT(BDADDR_BREDR):
6963 		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6964 			err = read_local_ssp_oob_req(hdev, sk, cp);
6965 			hci_dev_unlock(hdev);
6966 			if (!err)
6967 				goto done;
6968 
6969 			status = MGMT_STATUS_FAILED;
6970 			goto complete;
6971 		} else {
6972 			eir_len = eir_append_data(rp->eir, eir_len,
6973 						  EIR_CLASS_OF_DEV,
6974 						  hdev->dev_class, 3);
6975 		}
6976 		break;
6977 	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6978 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6979 		    smp_generate_oob(hdev, hash, rand) < 0) {
6980 			hci_dev_unlock(hdev);
6981 			status = MGMT_STATUS_FAILED;
6982 			goto complete;
6983 		}
6984 
6985 		/* This should return the active RPA, but since the RPA
6986 		 * is only programmed on demand, it is really hard to fill
6987 		 * this in at the moment. For now disallow retrieving
6988 		 * local out-of-band data when privacy is in use.
6989 		 *
6990 		 * Returning the identity address will not help here since
6991 		 * pairing happens before the identity resolving key is
6992 		 * known and thus the connection establishment happens
6993 		 * based on the RPA and not the identity address.
6994 		 */
6995 		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
6996 			hci_dev_unlock(hdev);
6997 			status = MGMT_STATUS_REJECTED;
6998 			goto complete;
6999 		}
7000 
7001 		if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
7002 		   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
7003 		   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
7004 		    bacmp(&hdev->static_addr, BDADDR_ANY))) {
7005 			memcpy(addr, &hdev->static_addr, 6);
7006 			addr[6] = 0x01;
7007 		} else {
7008 			memcpy(addr, &hdev->bdaddr, 6);
7009 			addr[6] = 0x00;
7010 		}
7011 
7012 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
7013 					  addr, sizeof(addr));
7014 
7015 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7016 			role = 0x02;
7017 		else
7018 			role = 0x01;
7019 
7020 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
7021 					  &role, sizeof(role));
7022 
7023 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
7024 			eir_len = eir_append_data(rp->eir, eir_len,
7025 						  EIR_LE_SC_CONFIRM,
7026 						  hash, sizeof(hash));
7027 
7028 			eir_len = eir_append_data(rp->eir, eir_len,
7029 						  EIR_LE_SC_RANDOM,
7030 						  rand, sizeof(rand));
7031 		}
7032 
7033 		flags = mgmt_get_adv_discov_flags(hdev);
7034 
7035 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
7036 			flags |= LE_AD_NO_BREDR;
7037 
7038 		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
7039 					  &flags, sizeof(flags));
7040 		break;
7041 	}
7042 
7043 	hci_dev_unlock(hdev);
7044 
7045 	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
7046 
7047 	status = MGMT_STATUS_SUCCESS;
7048 
7049 complete:
7050 	rp->type = cp->type;
7051 	rp->eir_len = cpu_to_le16(eir_len);
7052 
7053 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
7054 				status, rp, sizeof(*rp) + eir_len);
7055 	if (err < 0 || status)
7056 		goto done;
7057 
7058 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7059 				 rp, sizeof(*rp) + eir_len,
7060 				 HCI_MGMT_OOB_DATA_EVENTS, sk);
7061 
7062 done:
7063 	kfree(rp);
7064 
7065 	return err;
7066 }
7067 
7068 static u32 get_supported_adv_flags(struct hci_dev *hdev)
7069 {
7070 	u32 flags = 0;
7071 
7072 	flags |= MGMT_ADV_FLAG_CONNECTABLE;
7073 	flags |= MGMT_ADV_FLAG_DISCOV;
7074 	flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
7075 	flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
7076 	flags |= MGMT_ADV_FLAG_APPEARANCE;
7077 	flags |= MGMT_ADV_FLAG_LOCAL_NAME;
7078 
7079 	/* In extended adv TX_POWER returned from Set Adv Param
7080 	 * will be always valid.
7081 	 */
7082 	if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
7083 	    ext_adv_capable(hdev))
7084 		flags |= MGMT_ADV_FLAG_TX_POWER;
7085 
7086 	if (ext_adv_capable(hdev)) {
7087 		flags |= MGMT_ADV_FLAG_SEC_1M;
7088 
7089 		if (hdev->le_features[1] & HCI_LE_PHY_2M)
7090 			flags |= MGMT_ADV_FLAG_SEC_2M;
7091 
7092 		if (hdev->le_features[1] & HCI_LE_PHY_CODED)
7093 			flags |= MGMT_ADV_FLAG_SEC_CODED;
7094 	}
7095 
7096 	return flags;
7097 }
7098 
7099 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
7100 			     void *data, u16 data_len)
7101 {
7102 	struct mgmt_rp_read_adv_features *rp;
7103 	size_t rp_len;
7104 	int err;
7105 	struct adv_info *adv_instance;
7106 	u32 supported_flags;
7107 	u8 *instance;
7108 
7109 	bt_dev_dbg(hdev, "sock %p", sk);
7110 
7111 	if (!lmp_le_capable(hdev))
7112 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7113 				       MGMT_STATUS_REJECTED);
7114 
7115 	hci_dev_lock(hdev);
7116 
7117 	rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
7118 	rp = kmalloc(rp_len, GFP_ATOMIC);
7119 	if (!rp) {
7120 		hci_dev_unlock(hdev);
7121 		return -ENOMEM;
7122 	}
7123 
7124 	supported_flags = get_supported_adv_flags(hdev);
7125 
7126 	rp->supported_flags = cpu_to_le32(supported_flags);
7127 	rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
7128 	rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
7129 	rp->max_instances = HCI_MAX_ADV_INSTANCES;
7130 	rp->num_instances = hdev->adv_instance_cnt;
7131 
7132 	instance = rp->instance;
7133 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
7134 		*instance = adv_instance->instance;
7135 		instance++;
7136 	}
7137 
7138 	hci_dev_unlock(hdev);
7139 
7140 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7141 				MGMT_STATUS_SUCCESS, rp, rp_len);
7142 
7143 	kfree(rp);
7144 
7145 	return err;
7146 }
7147 
7148 static u8 calculate_name_len(struct hci_dev *hdev)
7149 {
7150 	u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
7151 
7152 	return append_local_name(hdev, buf, 0);
7153 }
7154 
7155 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
7156 			   bool is_adv_data)
7157 {
7158 	u8 max_len = HCI_MAX_AD_LENGTH;
7159 
7160 	if (is_adv_data) {
7161 		if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
7162 				 MGMT_ADV_FLAG_LIMITED_DISCOV |
7163 				 MGMT_ADV_FLAG_MANAGED_FLAGS))
7164 			max_len -= 3;
7165 
7166 		if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
7167 			max_len -= 3;
7168 	} else {
7169 		if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
7170 			max_len -= calculate_name_len(hdev);
7171 
7172 		if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
7173 			max_len -= 4;
7174 	}
7175 
7176 	return max_len;
7177 }
7178 
7179 static bool flags_managed(u32 adv_flags)
7180 {
7181 	return adv_flags & (MGMT_ADV_FLAG_DISCOV |
7182 			    MGMT_ADV_FLAG_LIMITED_DISCOV |
7183 			    MGMT_ADV_FLAG_MANAGED_FLAGS);
7184 }
7185 
7186 static bool tx_power_managed(u32 adv_flags)
7187 {
7188 	return adv_flags & MGMT_ADV_FLAG_TX_POWER;
7189 }
7190 
7191 static bool name_managed(u32 adv_flags)
7192 {
7193 	return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
7194 }
7195 
7196 static bool appearance_managed(u32 adv_flags)
7197 {
7198 	return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
7199 }
7200 
7201 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
7202 			      u8 len, bool is_adv_data)
7203 {
7204 	int i, cur_len;
7205 	u8 max_len;
7206 
7207 	max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
7208 
7209 	if (len > max_len)
7210 		return false;
7211 
7212 	/* Make sure that the data is correctly formatted. */
7213 	for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
7214 		cur_len = data[i];
7215 
7216 		if (data[i + 1] == EIR_FLAGS &&
7217 		    (!is_adv_data || flags_managed(adv_flags)))
7218 			return false;
7219 
7220 		if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
7221 			return false;
7222 
7223 		if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
7224 			return false;
7225 
7226 		if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
7227 			return false;
7228 
7229 		if (data[i + 1] == EIR_APPEARANCE &&
7230 		    appearance_managed(adv_flags))
7231 			return false;
7232 
7233 		/* If the current field length would exceed the total data
7234 		 * length, then it's invalid.
7235 		 */
7236 		if (i + cur_len >= len)
7237 			return false;
7238 	}
7239 
7240 	return true;
7241 }
7242 
7243 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
7244 				     u16 opcode)
7245 {
7246 	struct mgmt_pending_cmd *cmd;
7247 	struct mgmt_cp_add_advertising *cp;
7248 	struct mgmt_rp_add_advertising rp;
7249 	struct adv_info *adv_instance, *n;
7250 	u8 instance;
7251 
7252 	bt_dev_dbg(hdev, "status %d", status);
7253 
7254 	hci_dev_lock(hdev);
7255 
7256 	cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
7257 
7258 	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
7259 		if (!adv_instance->pending)
7260 			continue;
7261 
7262 		if (!status) {
7263 			adv_instance->pending = false;
7264 			continue;
7265 		}
7266 
7267 		instance = adv_instance->instance;
7268 
7269 		if (hdev->cur_adv_instance == instance)
7270 			cancel_adv_timeout(hdev);
7271 
7272 		hci_remove_adv_instance(hdev, instance);
7273 		mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
7274 	}
7275 
7276 	if (!cmd)
7277 		goto unlock;
7278 
7279 	cp = cmd->param;
7280 	rp.instance = cp->instance;
7281 
7282 	if (status)
7283 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
7284 				mgmt_status(status));
7285 	else
7286 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
7287 				  mgmt_status(status), &rp, sizeof(rp));
7288 
7289 	mgmt_pending_remove(cmd);
7290 
7291 unlock:
7292 	hci_dev_unlock(hdev);
7293 }
7294 
7295 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
7296 			   void *data, u16 data_len)
7297 {
7298 	struct mgmt_cp_add_advertising *cp = data;
7299 	struct mgmt_rp_add_advertising rp;
7300 	u32 flags;
7301 	u32 supported_flags, phy_flags;
7302 	u8 status;
7303 	u16 timeout, duration;
7304 	unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
7305 	u8 schedule_instance = 0;
7306 	struct adv_info *next_instance;
7307 	int err;
7308 	struct mgmt_pending_cmd *cmd;
7309 	struct hci_request req;
7310 
7311 	bt_dev_dbg(hdev, "sock %p", sk);
7312 
7313 	status = mgmt_le_support(hdev);
7314 	if (status)
7315 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7316 				       status);
7317 
7318 	if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
7319 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7320 				       MGMT_STATUS_INVALID_PARAMS);
7321 
7322 	if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
7323 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7324 				       MGMT_STATUS_INVALID_PARAMS);
7325 
7326 	flags = __le32_to_cpu(cp->flags);
7327 	timeout = __le16_to_cpu(cp->timeout);
7328 	duration = __le16_to_cpu(cp->duration);
7329 
7330 	/* The current implementation only supports a subset of the specified
7331 	 * flags. Also need to check mutual exclusiveness of sec flags.
7332 	 */
7333 	supported_flags = get_supported_adv_flags(hdev);
7334 	phy_flags = flags & MGMT_ADV_FLAG_SEC_MASK;
7335 	if (flags & ~supported_flags ||
7336 	    ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
7337 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7338 				       MGMT_STATUS_INVALID_PARAMS);
7339 
7340 	hci_dev_lock(hdev);
7341 
7342 	if (timeout && !hdev_is_powered(hdev)) {
7343 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7344 				      MGMT_STATUS_REJECTED);
7345 		goto unlock;
7346 	}
7347 
7348 	if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
7349 	    pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
7350 	    pending_find(MGMT_OP_SET_LE, hdev)) {
7351 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7352 				      MGMT_STATUS_BUSY);
7353 		goto unlock;
7354 	}
7355 
7356 	if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
7357 	    !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
7358 			       cp->scan_rsp_len, false)) {
7359 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7360 				      MGMT_STATUS_INVALID_PARAMS);
7361 		goto unlock;
7362 	}
7363 
7364 	err = hci_add_adv_instance(hdev, cp->instance, flags,
7365 				   cp->adv_data_len, cp->data,
7366 				   cp->scan_rsp_len,
7367 				   cp->data + cp->adv_data_len,
7368 				   timeout, duration);
7369 	if (err < 0) {
7370 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7371 				      MGMT_STATUS_FAILED);
7372 		goto unlock;
7373 	}
7374 
7375 	/* Only trigger an advertising added event if a new instance was
7376 	 * actually added.
7377 	 */
7378 	if (hdev->adv_instance_cnt > prev_instance_cnt)
7379 		mgmt_advertising_added(sk, hdev, cp->instance);
7380 
7381 	if (hdev->cur_adv_instance == cp->instance) {
7382 		/* If the currently advertised instance is being changed then
7383 		 * cancel the current advertising and schedule the next
7384 		 * instance. If there is only one instance then the overridden
7385 		 * advertising data will be visible right away.
7386 		 */
7387 		cancel_adv_timeout(hdev);
7388 
7389 		next_instance = hci_get_next_instance(hdev, cp->instance);
7390 		if (next_instance)
7391 			schedule_instance = next_instance->instance;
7392 	} else if (!hdev->adv_instance_timeout) {
7393 		/* Immediately advertise the new instance if no other
7394 		 * instance is currently being advertised.
7395 		 */
7396 		schedule_instance = cp->instance;
7397 	}
7398 
7399 	/* If the HCI_ADVERTISING flag is set or the device isn't powered or
7400 	 * there is no instance to be advertised then we have no HCI
7401 	 * communication to make. Simply return.
7402 	 */
7403 	if (!hdev_is_powered(hdev) ||
7404 	    hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
7405 	    !schedule_instance) {
7406 		rp.instance = cp->instance;
7407 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7408 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7409 		goto unlock;
7410 	}
7411 
7412 	/* We're good to go, update advertising data, parameters, and start
7413 	 * advertising.
7414 	 */
7415 	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
7416 			       data_len);
7417 	if (!cmd) {
7418 		err = -ENOMEM;
7419 		goto unlock;
7420 	}
7421 
7422 	hci_req_init(&req, hdev);
7423 
7424 	err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
7425 
7426 	if (!err)
7427 		err = hci_req_run(&req, add_advertising_complete);
7428 
7429 	if (err < 0) {
7430 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7431 				      MGMT_STATUS_FAILED);
7432 		mgmt_pending_remove(cmd);
7433 	}
7434 
7435 unlock:
7436 	hci_dev_unlock(hdev);
7437 
7438 	return err;
7439 }
7440 
7441 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
7442 					u16 opcode)
7443 {
7444 	struct mgmt_pending_cmd *cmd;
7445 	struct mgmt_cp_remove_advertising *cp;
7446 	struct mgmt_rp_remove_advertising rp;
7447 
7448 	bt_dev_dbg(hdev, "status %d", status);
7449 
7450 	hci_dev_lock(hdev);
7451 
7452 	/* A failure status here only means that we failed to disable
7453 	 * advertising. Otherwise, the advertising instance has been removed,
7454 	 * so report success.
7455 	 */
7456 	cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
7457 	if (!cmd)
7458 		goto unlock;
7459 
7460 	cp = cmd->param;
7461 	rp.instance = cp->instance;
7462 
7463 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
7464 			  &rp, sizeof(rp));
7465 	mgmt_pending_remove(cmd);
7466 
7467 unlock:
7468 	hci_dev_unlock(hdev);
7469 }
7470 
7471 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
7472 			      void *data, u16 data_len)
7473 {
7474 	struct mgmt_cp_remove_advertising *cp = data;
7475 	struct mgmt_rp_remove_advertising rp;
7476 	struct mgmt_pending_cmd *cmd;
7477 	struct hci_request req;
7478 	int err;
7479 
7480 	bt_dev_dbg(hdev, "sock %p", sk);
7481 
7482 	hci_dev_lock(hdev);
7483 
7484 	if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
7485 		err = mgmt_cmd_status(sk, hdev->id,
7486 				      MGMT_OP_REMOVE_ADVERTISING,
7487 				      MGMT_STATUS_INVALID_PARAMS);
7488 		goto unlock;
7489 	}
7490 
7491 	if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
7492 	    pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
7493 	    pending_find(MGMT_OP_SET_LE, hdev)) {
7494 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
7495 				      MGMT_STATUS_BUSY);
7496 		goto unlock;
7497 	}
7498 
7499 	if (list_empty(&hdev->adv_instances)) {
7500 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
7501 				      MGMT_STATUS_INVALID_PARAMS);
7502 		goto unlock;
7503 	}
7504 
7505 	hci_req_init(&req, hdev);
7506 
7507 	hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
7508 
7509 	if (list_empty(&hdev->adv_instances))
7510 		__hci_req_disable_advertising(&req);
7511 
7512 	/* If no HCI commands have been collected so far or the HCI_ADVERTISING
7513 	 * flag is set or the device isn't powered then we have no HCI
7514 	 * communication to make. Simply return.
7515 	 */
7516 	if (skb_queue_empty(&req.cmd_q) ||
7517 	    !hdev_is_powered(hdev) ||
7518 	    hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
7519 		hci_req_purge(&req);
7520 		rp.instance = cp->instance;
7521 		err = mgmt_cmd_complete(sk, hdev->id,
7522 					MGMT_OP_REMOVE_ADVERTISING,
7523 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7524 		goto unlock;
7525 	}
7526 
7527 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
7528 			       data_len);
7529 	if (!cmd) {
7530 		err = -ENOMEM;
7531 		goto unlock;
7532 	}
7533 
7534 	err = hci_req_run(&req, remove_advertising_complete);
7535 	if (err < 0)
7536 		mgmt_pending_remove(cmd);
7537 
7538 unlock:
7539 	hci_dev_unlock(hdev);
7540 
7541 	return err;
7542 }
7543 
7544 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
7545 			     void *data, u16 data_len)
7546 {
7547 	struct mgmt_cp_get_adv_size_info *cp = data;
7548 	struct mgmt_rp_get_adv_size_info rp;
7549 	u32 flags, supported_flags;
7550 	int err;
7551 
7552 	bt_dev_dbg(hdev, "sock %p", sk);
7553 
7554 	if (!lmp_le_capable(hdev))
7555 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7556 				       MGMT_STATUS_REJECTED);
7557 
7558 	if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
7559 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7560 				       MGMT_STATUS_INVALID_PARAMS);
7561 
7562 	flags = __le32_to_cpu(cp->flags);
7563 
7564 	/* The current implementation only supports a subset of the specified
7565 	 * flags.
7566 	 */
7567 	supported_flags = get_supported_adv_flags(hdev);
7568 	if (flags & ~supported_flags)
7569 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7570 				       MGMT_STATUS_INVALID_PARAMS);
7571 
7572 	rp.instance = cp->instance;
7573 	rp.flags = cp->flags;
7574 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
7575 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
7576 
7577 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7578 				MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7579 
7580 	return err;
7581 }
7582 
7583 static const struct hci_mgmt_handler mgmt_handlers[] = {
7584 	{ NULL }, /* 0x0000 (no command) */
7585 	{ read_version,            MGMT_READ_VERSION_SIZE,
7586 						HCI_MGMT_NO_HDEV |
7587 						HCI_MGMT_UNTRUSTED },
7588 	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
7589 						HCI_MGMT_NO_HDEV |
7590 						HCI_MGMT_UNTRUSTED },
7591 	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
7592 						HCI_MGMT_NO_HDEV |
7593 						HCI_MGMT_UNTRUSTED },
7594 	{ read_controller_info,    MGMT_READ_INFO_SIZE,
7595 						HCI_MGMT_UNTRUSTED },
7596 	{ set_powered,             MGMT_SETTING_SIZE },
7597 	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
7598 	{ set_connectable,         MGMT_SETTING_SIZE },
7599 	{ set_fast_connectable,    MGMT_SETTING_SIZE },
7600 	{ set_bondable,            MGMT_SETTING_SIZE },
7601 	{ set_link_security,       MGMT_SETTING_SIZE },
7602 	{ set_ssp,                 MGMT_SETTING_SIZE },
7603 	{ set_hs,                  MGMT_SETTING_SIZE },
7604 	{ set_le,                  MGMT_SETTING_SIZE },
7605 	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
7606 	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
7607 	{ add_uuid,                MGMT_ADD_UUID_SIZE },
7608 	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
7609 	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
7610 						HCI_MGMT_VAR_LEN },
7611 	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
7612 						HCI_MGMT_VAR_LEN },
7613 	{ disconnect,              MGMT_DISCONNECT_SIZE },
7614 	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
7615 	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
7616 	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
7617 	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
7618 	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
7619 	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
7620 	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
7621 	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
7622 	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
7623 	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
7624 	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
7625 	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
7626 	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
7627 						HCI_MGMT_VAR_LEN },
7628 	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
7629 	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
7630 	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
7631 	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
7632 	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
7633 	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
7634 	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
7635 	{ set_advertising,         MGMT_SETTING_SIZE },
7636 	{ set_bredr,               MGMT_SETTING_SIZE },
7637 	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
7638 	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
7639 	{ set_secure_conn,         MGMT_SETTING_SIZE },
7640 	{ set_debug_keys,          MGMT_SETTING_SIZE },
7641 	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
7642 	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
7643 						HCI_MGMT_VAR_LEN },
7644 	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
7645 	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
7646 	{ add_device,              MGMT_ADD_DEVICE_SIZE },
7647 	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
7648 	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
7649 						HCI_MGMT_VAR_LEN },
7650 	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
7651 						HCI_MGMT_NO_HDEV |
7652 						HCI_MGMT_UNTRUSTED },
7653 	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
7654 						HCI_MGMT_UNCONFIGURED |
7655 						HCI_MGMT_UNTRUSTED },
7656 	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
7657 						HCI_MGMT_UNCONFIGURED },
7658 	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
7659 						HCI_MGMT_UNCONFIGURED },
7660 	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
7661 						HCI_MGMT_VAR_LEN },
7662 	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
7663 	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
7664 						HCI_MGMT_NO_HDEV |
7665 						HCI_MGMT_UNTRUSTED },
7666 	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
7667 	{ add_advertising,	   MGMT_ADD_ADVERTISING_SIZE,
7668 						HCI_MGMT_VAR_LEN },
7669 	{ remove_advertising,	   MGMT_REMOVE_ADVERTISING_SIZE },
7670 	{ get_adv_size_info,       MGMT_GET_ADV_SIZE_INFO_SIZE },
7671 	{ start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
7672 	{ read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
7673 						HCI_MGMT_UNTRUSTED },
7674 	{ set_appearance,	   MGMT_SET_APPEARANCE_SIZE },
7675 	{ get_phy_configuration,   MGMT_GET_PHY_CONFIGURATION_SIZE },
7676 	{ set_phy_configuration,   MGMT_SET_PHY_CONFIGURATION_SIZE },
7677 	{ set_blocked_keys,	   MGMT_OP_SET_BLOCKED_KEYS_SIZE,
7678 						HCI_MGMT_VAR_LEN },
7679 	{ set_wideband_speech,	   MGMT_SETTING_SIZE },
7680 	{ read_security_info,      MGMT_READ_SECURITY_INFO_SIZE,
7681 						HCI_MGMT_UNTRUSTED },
7682 	{ read_exp_features_info,  MGMT_READ_EXP_FEATURES_INFO_SIZE,
7683 						HCI_MGMT_UNTRUSTED |
7684 						HCI_MGMT_HDEV_OPTIONAL },
7685 	{ set_exp_feature,         MGMT_SET_EXP_FEATURE_SIZE,
7686 						HCI_MGMT_VAR_LEN |
7687 						HCI_MGMT_HDEV_OPTIONAL },
7688 	{ read_def_system_config,  MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
7689 						HCI_MGMT_UNTRUSTED },
7690 	{ set_def_system_config,   MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
7691 						HCI_MGMT_VAR_LEN },
7692 	{ read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
7693 						HCI_MGMT_UNTRUSTED },
7694 	{ set_def_runtime_config,  MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
7695 						HCI_MGMT_VAR_LEN },
7696 	{ get_device_flags,        MGMT_GET_DEVICE_FLAGS_SIZE },
7697 	{ set_device_flags,        MGMT_SET_DEVICE_FLAGS_SIZE },
7698 	{ read_adv_mon_features,   MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
7699 	{ add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
7700 						HCI_MGMT_VAR_LEN },
7701 	{ remove_adv_monitor,      MGMT_REMOVE_ADV_MONITOR_SIZE },
7702 };
7703 
7704 void mgmt_index_added(struct hci_dev *hdev)
7705 {
7706 	struct mgmt_ev_ext_index ev;
7707 
7708 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
7709 		return;
7710 
7711 	switch (hdev->dev_type) {
7712 	case HCI_PRIMARY:
7713 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7714 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
7715 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7716 			ev.type = 0x01;
7717 		} else {
7718 			mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
7719 					 HCI_MGMT_INDEX_EVENTS);
7720 			ev.type = 0x00;
7721 		}
7722 		break;
7723 	case HCI_AMP:
7724 		ev.type = 0x02;
7725 		break;
7726 	default:
7727 		return;
7728 	}
7729 
7730 	ev.bus = hdev->bus;
7731 
7732 	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
7733 			 HCI_MGMT_EXT_INDEX_EVENTS);
7734 }
7735 
7736 void mgmt_index_removed(struct hci_dev *hdev)
7737 {
7738 	struct mgmt_ev_ext_index ev;
7739 	u8 status = MGMT_STATUS_INVALID_INDEX;
7740 
7741 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
7742 		return;
7743 
7744 	switch (hdev->dev_type) {
7745 	case HCI_PRIMARY:
7746 		mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7747 
7748 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7749 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
7750 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7751 			ev.type = 0x01;
7752 		} else {
7753 			mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
7754 					 HCI_MGMT_INDEX_EVENTS);
7755 			ev.type = 0x00;
7756 		}
7757 		break;
7758 	case HCI_AMP:
7759 		ev.type = 0x02;
7760 		break;
7761 	default:
7762 		return;
7763 	}
7764 
7765 	ev.bus = hdev->bus;
7766 
7767 	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
7768 			 HCI_MGMT_EXT_INDEX_EVENTS);
7769 }
7770 
7771 /* This function requires the caller holds hdev->lock */
7772 static void restart_le_actions(struct hci_dev *hdev)
7773 {
7774 	struct hci_conn_params *p;
7775 
7776 	list_for_each_entry(p, &hdev->le_conn_params, list) {
7777 		/* Needed for AUTO_OFF case where might not "really"
7778 		 * have been powered off.
7779 		 */
7780 		list_del_init(&p->action);
7781 
7782 		switch (p->auto_connect) {
7783 		case HCI_AUTO_CONN_DIRECT:
7784 		case HCI_AUTO_CONN_ALWAYS:
7785 			list_add(&p->action, &hdev->pend_le_conns);
7786 			break;
7787 		case HCI_AUTO_CONN_REPORT:
7788 			list_add(&p->action, &hdev->pend_le_reports);
7789 			break;
7790 		default:
7791 			break;
7792 		}
7793 	}
7794 }
7795 
7796 void mgmt_power_on(struct hci_dev *hdev, int err)
7797 {
7798 	struct cmd_lookup match = { NULL, hdev };
7799 
7800 	bt_dev_dbg(hdev, "err %d", err);
7801 
7802 	hci_dev_lock(hdev);
7803 
7804 	if (!err) {
7805 		restart_le_actions(hdev);
7806 		hci_update_background_scan(hdev);
7807 	}
7808 
7809 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7810 
7811 	new_settings(hdev, match.sk);
7812 
7813 	if (match.sk)
7814 		sock_put(match.sk);
7815 
7816 	hci_dev_unlock(hdev);
7817 }
7818 
7819 void __mgmt_power_off(struct hci_dev *hdev)
7820 {
7821 	struct cmd_lookup match = { NULL, hdev };
7822 	u8 status, zero_cod[] = { 0, 0, 0 };
7823 
7824 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7825 
7826 	/* If the power off is because of hdev unregistration let
7827 	 * use the appropriate INVALID_INDEX status. Otherwise use
7828 	 * NOT_POWERED. We cover both scenarios here since later in
7829 	 * mgmt_index_removed() any hci_conn callbacks will have already
7830 	 * been triggered, potentially causing misleading DISCONNECTED
7831 	 * status responses.
7832 	 */
7833 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
7834 		status = MGMT_STATUS_INVALID_INDEX;
7835 	else
7836 		status = MGMT_STATUS_NOT_POWERED;
7837 
7838 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7839 
7840 	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
7841 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
7842 				   zero_cod, sizeof(zero_cod),
7843 				   HCI_MGMT_DEV_CLASS_EVENTS, NULL);
7844 		ext_info_changed(hdev, NULL);
7845 	}
7846 
7847 	new_settings(hdev, match.sk);
7848 
7849 	if (match.sk)
7850 		sock_put(match.sk);
7851 }
7852 
7853 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
7854 {
7855 	struct mgmt_pending_cmd *cmd;
7856 	u8 status;
7857 
7858 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7859 	if (!cmd)
7860 		return;
7861 
7862 	if (err == -ERFKILL)
7863 		status = MGMT_STATUS_RFKILLED;
7864 	else
7865 		status = MGMT_STATUS_FAILED;
7866 
7867 	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
7868 
7869 	mgmt_pending_remove(cmd);
7870 }
7871 
7872 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
7873 		       bool persistent)
7874 {
7875 	struct mgmt_ev_new_link_key ev;
7876 
7877 	memset(&ev, 0, sizeof(ev));
7878 
7879 	ev.store_hint = persistent;
7880 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7881 	ev.key.addr.type = BDADDR_BREDR;
7882 	ev.key.type = key->type;
7883 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
7884 	ev.key.pin_len = key->pin_len;
7885 
7886 	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
7887 }
7888 
7889 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
7890 {
7891 	switch (ltk->type) {
7892 	case SMP_LTK:
7893 	case SMP_LTK_SLAVE:
7894 		if (ltk->authenticated)
7895 			return MGMT_LTK_AUTHENTICATED;
7896 		return MGMT_LTK_UNAUTHENTICATED;
7897 	case SMP_LTK_P256:
7898 		if (ltk->authenticated)
7899 			return MGMT_LTK_P256_AUTH;
7900 		return MGMT_LTK_P256_UNAUTH;
7901 	case SMP_LTK_P256_DEBUG:
7902 		return MGMT_LTK_P256_DEBUG;
7903 	}
7904 
7905 	return MGMT_LTK_UNAUTHENTICATED;
7906 }
7907 
7908 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
7909 {
7910 	struct mgmt_ev_new_long_term_key ev;
7911 
7912 	memset(&ev, 0, sizeof(ev));
7913 
7914 	/* Devices using resolvable or non-resolvable random addresses
7915 	 * without providing an identity resolving key don't require
7916 	 * to store long term keys. Their addresses will change the
7917 	 * next time around.
7918 	 *
7919 	 * Only when a remote device provides an identity address
7920 	 * make sure the long term key is stored. If the remote
7921 	 * identity is known, the long term keys are internally
7922 	 * mapped to the identity address. So allow static random
7923 	 * and public addresses here.
7924 	 */
7925 	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
7926 	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
7927 		ev.store_hint = 0x00;
7928 	else
7929 		ev.store_hint = persistent;
7930 
7931 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7932 	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
7933 	ev.key.type = mgmt_ltk_type(key);
7934 	ev.key.enc_size = key->enc_size;
7935 	ev.key.ediv = key->ediv;
7936 	ev.key.rand = key->rand;
7937 
7938 	if (key->type == SMP_LTK)
7939 		ev.key.master = 1;
7940 
7941 	/* Make sure we copy only the significant bytes based on the
7942 	 * encryption key size, and set the rest of the value to zeroes.
7943 	 */
7944 	memcpy(ev.key.val, key->val, key->enc_size);
7945 	memset(ev.key.val + key->enc_size, 0,
7946 	       sizeof(ev.key.val) - key->enc_size);
7947 
7948 	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
7949 }
7950 
7951 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
7952 {
7953 	struct mgmt_ev_new_irk ev;
7954 
7955 	memset(&ev, 0, sizeof(ev));
7956 
7957 	ev.store_hint = persistent;
7958 
7959 	bacpy(&ev.rpa, &irk->rpa);
7960 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
7961 	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
7962 	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
7963 
7964 	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
7965 }
7966 
7967 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
7968 		   bool persistent)
7969 {
7970 	struct mgmt_ev_new_csrk ev;
7971 
7972 	memset(&ev, 0, sizeof(ev));
7973 
7974 	/* Devices using resolvable or non-resolvable random addresses
7975 	 * without providing an identity resolving key don't require
7976 	 * to store signature resolving keys. Their addresses will change
7977 	 * the next time around.
7978 	 *
7979 	 * Only when a remote device provides an identity address
7980 	 * make sure the signature resolving key is stored. So allow
7981 	 * static random and public addresses here.
7982 	 */
7983 	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
7984 	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
7985 		ev.store_hint = 0x00;
7986 	else
7987 		ev.store_hint = persistent;
7988 
7989 	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
7990 	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
7991 	ev.key.type = csrk->type;
7992 	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
7993 
7994 	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
7995 }
7996 
7997 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
7998 			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
7999 			 u16 max_interval, u16 latency, u16 timeout)
8000 {
8001 	struct mgmt_ev_new_conn_param ev;
8002 
8003 	if (!hci_is_identity_address(bdaddr, bdaddr_type))
8004 		return;
8005 
8006 	memset(&ev, 0, sizeof(ev));
8007 	bacpy(&ev.addr.bdaddr, bdaddr);
8008 	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
8009 	ev.store_hint = store_hint;
8010 	ev.min_interval = cpu_to_le16(min_interval);
8011 	ev.max_interval = cpu_to_le16(max_interval);
8012 	ev.latency = cpu_to_le16(latency);
8013 	ev.timeout = cpu_to_le16(timeout);
8014 
8015 	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
8016 }
8017 
8018 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
8019 			   u32 flags, u8 *name, u8 name_len)
8020 {
8021 	char buf[512];
8022 	struct mgmt_ev_device_connected *ev = (void *) buf;
8023 	u16 eir_len = 0;
8024 
8025 	bacpy(&ev->addr.bdaddr, &conn->dst);
8026 	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
8027 
8028 	ev->flags = __cpu_to_le32(flags);
8029 
8030 	/* We must ensure that the EIR Data fields are ordered and
8031 	 * unique. Keep it simple for now and avoid the problem by not
8032 	 * adding any BR/EDR data to the LE adv.
8033 	 */
8034 	if (conn->le_adv_data_len > 0) {
8035 		memcpy(&ev->eir[eir_len],
8036 		       conn->le_adv_data, conn->le_adv_data_len);
8037 		eir_len = conn->le_adv_data_len;
8038 	} else {
8039 		if (name_len > 0)
8040 			eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
8041 						  name, name_len);
8042 
8043 		if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
8044 			eir_len = eir_append_data(ev->eir, eir_len,
8045 						  EIR_CLASS_OF_DEV,
8046 						  conn->dev_class, 3);
8047 	}
8048 
8049 	ev->eir_len = cpu_to_le16(eir_len);
8050 
8051 	mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
8052 		    sizeof(*ev) + eir_len, NULL);
8053 }
8054 
8055 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
8056 {
8057 	struct sock **sk = data;
8058 
8059 	cmd->cmd_complete(cmd, 0);
8060 
8061 	*sk = cmd->sk;
8062 	sock_hold(*sk);
8063 
8064 	mgmt_pending_remove(cmd);
8065 }
8066 
8067 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
8068 {
8069 	struct hci_dev *hdev = data;
8070 	struct mgmt_cp_unpair_device *cp = cmd->param;
8071 
8072 	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
8073 
8074 	cmd->cmd_complete(cmd, 0);
8075 	mgmt_pending_remove(cmd);
8076 }
8077 
8078 bool mgmt_powering_down(struct hci_dev *hdev)
8079 {
8080 	struct mgmt_pending_cmd *cmd;
8081 	struct mgmt_mode *cp;
8082 
8083 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8084 	if (!cmd)
8085 		return false;
8086 
8087 	cp = cmd->param;
8088 	if (!cp->val)
8089 		return true;
8090 
8091 	return false;
8092 }
8093 
8094 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
8095 			      u8 link_type, u8 addr_type, u8 reason,
8096 			      bool mgmt_connected)
8097 {
8098 	struct mgmt_ev_device_disconnected ev;
8099 	struct sock *sk = NULL;
8100 
8101 	/* The connection is still in hci_conn_hash so test for 1
8102 	 * instead of 0 to know if this is the last one.
8103 	 */
8104 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
8105 		cancel_delayed_work(&hdev->power_off);
8106 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
8107 	}
8108 
8109 	if (!mgmt_connected)
8110 		return;
8111 
8112 	if (link_type != ACL_LINK && link_type != LE_LINK)
8113 		return;
8114 
8115 	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
8116 
8117 	bacpy(&ev.addr.bdaddr, bdaddr);
8118 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
8119 	ev.reason = reason;
8120 
8121 	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
8122 
8123 	if (sk)
8124 		sock_put(sk);
8125 
8126 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
8127 			     hdev);
8128 }
8129 
8130 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
8131 			    u8 link_type, u8 addr_type, u8 status)
8132 {
8133 	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
8134 	struct mgmt_cp_disconnect *cp;
8135 	struct mgmt_pending_cmd *cmd;
8136 
8137 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
8138 			     hdev);
8139 
8140 	cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
8141 	if (!cmd)
8142 		return;
8143 
8144 	cp = cmd->param;
8145 
8146 	if (bacmp(bdaddr, &cp->addr.bdaddr))
8147 		return;
8148 
8149 	if (cp->addr.type != bdaddr_type)
8150 		return;
8151 
8152 	cmd->cmd_complete(cmd, mgmt_status(status));
8153 	mgmt_pending_remove(cmd);
8154 }
8155 
8156 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8157 			 u8 addr_type, u8 status)
8158 {
8159 	struct mgmt_ev_connect_failed ev;
8160 
8161 	/* The connection is still in hci_conn_hash so test for 1
8162 	 * instead of 0 to know if this is the last one.
8163 	 */
8164 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
8165 		cancel_delayed_work(&hdev->power_off);
8166 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
8167 	}
8168 
8169 	bacpy(&ev.addr.bdaddr, bdaddr);
8170 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
8171 	ev.status = mgmt_status(status);
8172 
8173 	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
8174 }
8175 
8176 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
8177 {
8178 	struct mgmt_ev_pin_code_request ev;
8179 
8180 	bacpy(&ev.addr.bdaddr, bdaddr);
8181 	ev.addr.type = BDADDR_BREDR;
8182 	ev.secure = secure;
8183 
8184 	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
8185 }
8186 
8187 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8188 				  u8 status)
8189 {
8190 	struct mgmt_pending_cmd *cmd;
8191 
8192 	cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
8193 	if (!cmd)
8194 		return;
8195 
8196 	cmd->cmd_complete(cmd, mgmt_status(status));
8197 	mgmt_pending_remove(cmd);
8198 }
8199 
8200 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8201 				      u8 status)
8202 {
8203 	struct mgmt_pending_cmd *cmd;
8204 
8205 	cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
8206 	if (!cmd)
8207 		return;
8208 
8209 	cmd->cmd_complete(cmd, mgmt_status(status));
8210 	mgmt_pending_remove(cmd);
8211 }
8212 
8213 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
8214 			      u8 link_type, u8 addr_type, u32 value,
8215 			      u8 confirm_hint)
8216 {
8217 	struct mgmt_ev_user_confirm_request ev;
8218 
8219 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
8220 
8221 	bacpy(&ev.addr.bdaddr, bdaddr);
8222 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
8223 	ev.confirm_hint = confirm_hint;
8224 	ev.value = cpu_to_le32(value);
8225 
8226 	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
8227 			  NULL);
8228 }
8229 
8230 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
8231 			      u8 link_type, u8 addr_type)
8232 {
8233 	struct mgmt_ev_user_passkey_request ev;
8234 
8235 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
8236 
8237 	bacpy(&ev.addr.bdaddr, bdaddr);
8238 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
8239 
8240 	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
8241 			  NULL);
8242 }
8243 
8244 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8245 				      u8 link_type, u8 addr_type, u8 status,
8246 				      u8 opcode)
8247 {
8248 	struct mgmt_pending_cmd *cmd;
8249 
8250 	cmd = pending_find(opcode, hdev);
8251 	if (!cmd)
8252 		return -ENOENT;
8253 
8254 	cmd->cmd_complete(cmd, mgmt_status(status));
8255 	mgmt_pending_remove(cmd);
8256 
8257 	return 0;
8258 }
8259 
8260 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8261 				     u8 link_type, u8 addr_type, u8 status)
8262 {
8263 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8264 					  status, MGMT_OP_USER_CONFIRM_REPLY);
8265 }
8266 
8267 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8268 					 u8 link_type, u8 addr_type, u8 status)
8269 {
8270 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8271 					  status,
8272 					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
8273 }
8274 
8275 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8276 				     u8 link_type, u8 addr_type, u8 status)
8277 {
8278 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8279 					  status, MGMT_OP_USER_PASSKEY_REPLY);
8280 }
8281 
8282 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
8283 					 u8 link_type, u8 addr_type, u8 status)
8284 {
8285 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
8286 					  status,
8287 					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
8288 }
8289 
8290 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
8291 			     u8 link_type, u8 addr_type, u32 passkey,
8292 			     u8 entered)
8293 {
8294 	struct mgmt_ev_passkey_notify ev;
8295 
8296 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
8297 
8298 	bacpy(&ev.addr.bdaddr, bdaddr);
8299 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
8300 	ev.passkey = __cpu_to_le32(passkey);
8301 	ev.entered = entered;
8302 
8303 	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
8304 }
8305 
8306 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
8307 {
8308 	struct mgmt_ev_auth_failed ev;
8309 	struct mgmt_pending_cmd *cmd;
8310 	u8 status = mgmt_status(hci_status);
8311 
8312 	bacpy(&ev.addr.bdaddr, &conn->dst);
8313 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
8314 	ev.status = status;
8315 
8316 	cmd = find_pairing(conn);
8317 
8318 	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
8319 		    cmd ? cmd->sk : NULL);
8320 
8321 	if (cmd) {
8322 		cmd->cmd_complete(cmd, status);
8323 		mgmt_pending_remove(cmd);
8324 	}
8325 }
8326 
8327 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
8328 {
8329 	struct cmd_lookup match = { NULL, hdev };
8330 	bool changed;
8331 
8332 	if (status) {
8333 		u8 mgmt_err = mgmt_status(status);
8334 		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
8335 				     cmd_status_rsp, &mgmt_err);
8336 		return;
8337 	}
8338 
8339 	if (test_bit(HCI_AUTH, &hdev->flags))
8340 		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
8341 	else
8342 		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
8343 
8344 	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
8345 			     &match);
8346 
8347 	if (changed)
8348 		new_settings(hdev, match.sk);
8349 
8350 	if (match.sk)
8351 		sock_put(match.sk);
8352 }
8353 
8354 static void clear_eir(struct hci_request *req)
8355 {
8356 	struct hci_dev *hdev = req->hdev;
8357 	struct hci_cp_write_eir cp;
8358 
8359 	if (!lmp_ext_inq_capable(hdev))
8360 		return;
8361 
8362 	memset(hdev->eir, 0, sizeof(hdev->eir));
8363 
8364 	memset(&cp, 0, sizeof(cp));
8365 
8366 	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
8367 }
8368 
8369 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
8370 {
8371 	struct cmd_lookup match = { NULL, hdev };
8372 	struct hci_request req;
8373 	bool changed = false;
8374 
8375 	if (status) {
8376 		u8 mgmt_err = mgmt_status(status);
8377 
8378 		if (enable && hci_dev_test_and_clear_flag(hdev,
8379 							  HCI_SSP_ENABLED)) {
8380 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
8381 			new_settings(hdev, NULL);
8382 		}
8383 
8384 		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
8385 				     &mgmt_err);
8386 		return;
8387 	}
8388 
8389 	if (enable) {
8390 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
8391 	} else {
8392 		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
8393 		if (!changed)
8394 			changed = hci_dev_test_and_clear_flag(hdev,
8395 							      HCI_HS_ENABLED);
8396 		else
8397 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
8398 	}
8399 
8400 	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
8401 
8402 	if (changed)
8403 		new_settings(hdev, match.sk);
8404 
8405 	if (match.sk)
8406 		sock_put(match.sk);
8407 
8408 	hci_req_init(&req, hdev);
8409 
8410 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8411 		if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
8412 			hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
8413 				    sizeof(enable), &enable);
8414 		__hci_req_update_eir(&req);
8415 	} else {
8416 		clear_eir(&req);
8417 	}
8418 
8419 	hci_req_run(&req, NULL);
8420 }
8421 
8422 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
8423 {
8424 	struct cmd_lookup *match = data;
8425 
8426 	if (match->sk == NULL) {
8427 		match->sk = cmd->sk;
8428 		sock_hold(match->sk);
8429 	}
8430 }
8431 
8432 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
8433 				    u8 status)
8434 {
8435 	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
8436 
8437 	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
8438 	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
8439 	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
8440 
8441 	if (!status) {
8442 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
8443 				   3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
8444 		ext_info_changed(hdev, NULL);
8445 	}
8446 
8447 	if (match.sk)
8448 		sock_put(match.sk);
8449 }
8450 
8451 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
8452 {
8453 	struct mgmt_cp_set_local_name ev;
8454 	struct mgmt_pending_cmd *cmd;
8455 
8456 	if (status)
8457 		return;
8458 
8459 	memset(&ev, 0, sizeof(ev));
8460 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
8461 	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
8462 
8463 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
8464 	if (!cmd) {
8465 		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
8466 
8467 		/* If this is a HCI command related to powering on the
8468 		 * HCI dev don't send any mgmt signals.
8469 		 */
8470 		if (pending_find(MGMT_OP_SET_POWERED, hdev))
8471 			return;
8472 	}
8473 
8474 	mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
8475 			   HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
8476 	ext_info_changed(hdev, cmd ? cmd->sk : NULL);
8477 }
8478 
8479 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
8480 {
8481 	int i;
8482 
8483 	for (i = 0; i < uuid_count; i++) {
8484 		if (!memcmp(uuid, uuids[i], 16))
8485 			return true;
8486 	}
8487 
8488 	return false;
8489 }
8490 
8491 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
8492 {
8493 	u16 parsed = 0;
8494 
8495 	while (parsed < eir_len) {
8496 		u8 field_len = eir[0];
8497 		u8 uuid[16];
8498 		int i;
8499 
8500 		if (field_len == 0)
8501 			break;
8502 
8503 		if (eir_len - parsed < field_len + 1)
8504 			break;
8505 
8506 		switch (eir[1]) {
8507 		case EIR_UUID16_ALL:
8508 		case EIR_UUID16_SOME:
8509 			for (i = 0; i + 3 <= field_len; i += 2) {
8510 				memcpy(uuid, bluetooth_base_uuid, 16);
8511 				uuid[13] = eir[i + 3];
8512 				uuid[12] = eir[i + 2];
8513 				if (has_uuid(uuid, uuid_count, uuids))
8514 					return true;
8515 			}
8516 			break;
8517 		case EIR_UUID32_ALL:
8518 		case EIR_UUID32_SOME:
8519 			for (i = 0; i + 5 <= field_len; i += 4) {
8520 				memcpy(uuid, bluetooth_base_uuid, 16);
8521 				uuid[15] = eir[i + 5];
8522 				uuid[14] = eir[i + 4];
8523 				uuid[13] = eir[i + 3];
8524 				uuid[12] = eir[i + 2];
8525 				if (has_uuid(uuid, uuid_count, uuids))
8526 					return true;
8527 			}
8528 			break;
8529 		case EIR_UUID128_ALL:
8530 		case EIR_UUID128_SOME:
8531 			for (i = 0; i + 17 <= field_len; i += 16) {
8532 				memcpy(uuid, eir + i + 2, 16);
8533 				if (has_uuid(uuid, uuid_count, uuids))
8534 					return true;
8535 			}
8536 			break;
8537 		}
8538 
8539 		parsed += field_len + 1;
8540 		eir += field_len + 1;
8541 	}
8542 
8543 	return false;
8544 }
8545 
8546 static void restart_le_scan(struct hci_dev *hdev)
8547 {
8548 	/* If controller is not scanning we are done. */
8549 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
8550 		return;
8551 
8552 	if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
8553 		       hdev->discovery.scan_start +
8554 		       hdev->discovery.scan_duration))
8555 		return;
8556 
8557 	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
8558 			   DISCOV_LE_RESTART_DELAY);
8559 }
8560 
8561 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
8562 			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8563 {
8564 	/* If a RSSI threshold has been specified, and
8565 	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
8566 	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
8567 	 * is set, let it through for further processing, as we might need to
8568 	 * restart the scan.
8569 	 *
8570 	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
8571 	 * the results are also dropped.
8572 	 */
8573 	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8574 	    (rssi == HCI_RSSI_INVALID ||
8575 	    (rssi < hdev->discovery.rssi &&
8576 	     !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
8577 		return  false;
8578 
8579 	if (hdev->discovery.uuid_count != 0) {
8580 		/* If a list of UUIDs is provided in filter, results with no
8581 		 * matching UUID should be dropped.
8582 		 */
8583 		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
8584 				   hdev->discovery.uuids) &&
8585 		    !eir_has_uuids(scan_rsp, scan_rsp_len,
8586 				   hdev->discovery.uuid_count,
8587 				   hdev->discovery.uuids))
8588 			return false;
8589 	}
8590 
8591 	/* If duplicate filtering does not report RSSI changes, then restart
8592 	 * scanning to ensure updated result with updated RSSI values.
8593 	 */
8594 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
8595 		restart_le_scan(hdev);
8596 
8597 		/* Validate RSSI value against the RSSI threshold once more. */
8598 		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8599 		    rssi < hdev->discovery.rssi)
8600 			return false;
8601 	}
8602 
8603 	return true;
8604 }
8605 
8606 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8607 		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
8608 		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8609 {
8610 	char buf[512];
8611 	struct mgmt_ev_device_found *ev = (void *)buf;
8612 	size_t ev_size;
8613 
8614 	/* Don't send events for a non-kernel initiated discovery. With
8615 	 * LE one exception is if we have pend_le_reports > 0 in which
8616 	 * case we're doing passive scanning and want these events.
8617 	 */
8618 	if (!hci_discovery_active(hdev)) {
8619 		if (link_type == ACL_LINK)
8620 			return;
8621 		if (link_type == LE_LINK &&
8622 		    list_empty(&hdev->pend_le_reports) &&
8623 		    !hci_is_adv_monitoring(hdev)) {
8624 			return;
8625 		}
8626 	}
8627 
8628 	if (hdev->discovery.result_filtering) {
8629 		/* We are using service discovery */
8630 		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
8631 				     scan_rsp_len))
8632 			return;
8633 	}
8634 
8635 	if (hdev->discovery.limited) {
8636 		/* Check for limited discoverable bit */
8637 		if (dev_class) {
8638 			if (!(dev_class[1] & 0x20))
8639 				return;
8640 		} else {
8641 			u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
8642 			if (!flags || !(flags[0] & LE_AD_LIMITED))
8643 				return;
8644 		}
8645 	}
8646 
8647 	/* Make sure that the buffer is big enough. The 5 extra bytes
8648 	 * are for the potential CoD field.
8649 	 */
8650 	if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
8651 		return;
8652 
8653 	memset(buf, 0, sizeof(buf));
8654 
8655 	/* In case of device discovery with BR/EDR devices (pre 1.2), the
8656 	 * RSSI value was reported as 0 when not available. This behavior
8657 	 * is kept when using device discovery. This is required for full
8658 	 * backwards compatibility with the API.
8659 	 *
8660 	 * However when using service discovery, the value 127 will be
8661 	 * returned when the RSSI is not available.
8662 	 */
8663 	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
8664 	    link_type == ACL_LINK)
8665 		rssi = 0;
8666 
8667 	bacpy(&ev->addr.bdaddr, bdaddr);
8668 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
8669 	ev->rssi = rssi;
8670 	ev->flags = cpu_to_le32(flags);
8671 
8672 	if (eir_len > 0)
8673 		/* Copy EIR or advertising data into event */
8674 		memcpy(ev->eir, eir, eir_len);
8675 
8676 	if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
8677 				       NULL))
8678 		eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
8679 					  dev_class, 3);
8680 
8681 	if (scan_rsp_len > 0)
8682 		/* Append scan response data to event */
8683 		memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
8684 
8685 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
8686 	ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
8687 
8688 	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
8689 }
8690 
8691 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8692 		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
8693 {
8694 	struct mgmt_ev_device_found *ev;
8695 	char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
8696 	u16 eir_len;
8697 
8698 	ev = (struct mgmt_ev_device_found *) buf;
8699 
8700 	memset(buf, 0, sizeof(buf));
8701 
8702 	bacpy(&ev->addr.bdaddr, bdaddr);
8703 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
8704 	ev->rssi = rssi;
8705 
8706 	eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
8707 				  name_len);
8708 
8709 	ev->eir_len = cpu_to_le16(eir_len);
8710 
8711 	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
8712 }
8713 
8714 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
8715 {
8716 	struct mgmt_ev_discovering ev;
8717 
8718 	bt_dev_dbg(hdev, "discovering %u", discovering);
8719 
8720 	memset(&ev, 0, sizeof(ev));
8721 	ev.type = hdev->discovery.type;
8722 	ev.discovering = discovering;
8723 
8724 	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
8725 }
8726 
8727 static struct hci_mgmt_chan chan = {
8728 	.channel	= HCI_CHANNEL_CONTROL,
8729 	.handler_count	= ARRAY_SIZE(mgmt_handlers),
8730 	.handlers	= mgmt_handlers,
8731 	.hdev_init	= mgmt_init_hdev,
8732 };
8733 
8734 int mgmt_init(void)
8735 {
8736 	return hci_mgmt_chan_register(&chan);
8737 }
8738 
8739 void mgmt_exit(void)
8740 {
8741 	hci_mgmt_chan_unregister(&chan);
8742 }
8743