xref: /openbmc/linux/net/bluetooth/mgmt.c (revision dd21bfa4)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI Management interface */
26 
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35 
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
40 #include "msft.h"
41 #include "eir.h"
42 #include "aosp.h"
43 
44 #define MGMT_VERSION	1
45 #define MGMT_REVISION	21
46 
47 static const u16 mgmt_commands[] = {
48 	MGMT_OP_READ_INDEX_LIST,
49 	MGMT_OP_READ_INFO,
50 	MGMT_OP_SET_POWERED,
51 	MGMT_OP_SET_DISCOVERABLE,
52 	MGMT_OP_SET_CONNECTABLE,
53 	MGMT_OP_SET_FAST_CONNECTABLE,
54 	MGMT_OP_SET_BONDABLE,
55 	MGMT_OP_SET_LINK_SECURITY,
56 	MGMT_OP_SET_SSP,
57 	MGMT_OP_SET_HS,
58 	MGMT_OP_SET_LE,
59 	MGMT_OP_SET_DEV_CLASS,
60 	MGMT_OP_SET_LOCAL_NAME,
61 	MGMT_OP_ADD_UUID,
62 	MGMT_OP_REMOVE_UUID,
63 	MGMT_OP_LOAD_LINK_KEYS,
64 	MGMT_OP_LOAD_LONG_TERM_KEYS,
65 	MGMT_OP_DISCONNECT,
66 	MGMT_OP_GET_CONNECTIONS,
67 	MGMT_OP_PIN_CODE_REPLY,
68 	MGMT_OP_PIN_CODE_NEG_REPLY,
69 	MGMT_OP_SET_IO_CAPABILITY,
70 	MGMT_OP_PAIR_DEVICE,
71 	MGMT_OP_CANCEL_PAIR_DEVICE,
72 	MGMT_OP_UNPAIR_DEVICE,
73 	MGMT_OP_USER_CONFIRM_REPLY,
74 	MGMT_OP_USER_CONFIRM_NEG_REPLY,
75 	MGMT_OP_USER_PASSKEY_REPLY,
76 	MGMT_OP_USER_PASSKEY_NEG_REPLY,
77 	MGMT_OP_READ_LOCAL_OOB_DATA,
78 	MGMT_OP_ADD_REMOTE_OOB_DATA,
79 	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
80 	MGMT_OP_START_DISCOVERY,
81 	MGMT_OP_STOP_DISCOVERY,
82 	MGMT_OP_CONFIRM_NAME,
83 	MGMT_OP_BLOCK_DEVICE,
84 	MGMT_OP_UNBLOCK_DEVICE,
85 	MGMT_OP_SET_DEVICE_ID,
86 	MGMT_OP_SET_ADVERTISING,
87 	MGMT_OP_SET_BREDR,
88 	MGMT_OP_SET_STATIC_ADDRESS,
89 	MGMT_OP_SET_SCAN_PARAMS,
90 	MGMT_OP_SET_SECURE_CONN,
91 	MGMT_OP_SET_DEBUG_KEYS,
92 	MGMT_OP_SET_PRIVACY,
93 	MGMT_OP_LOAD_IRKS,
94 	MGMT_OP_GET_CONN_INFO,
95 	MGMT_OP_GET_CLOCK_INFO,
96 	MGMT_OP_ADD_DEVICE,
97 	MGMT_OP_REMOVE_DEVICE,
98 	MGMT_OP_LOAD_CONN_PARAM,
99 	MGMT_OP_READ_UNCONF_INDEX_LIST,
100 	MGMT_OP_READ_CONFIG_INFO,
101 	MGMT_OP_SET_EXTERNAL_CONFIG,
102 	MGMT_OP_SET_PUBLIC_ADDRESS,
103 	MGMT_OP_START_SERVICE_DISCOVERY,
104 	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
105 	MGMT_OP_READ_EXT_INDEX_LIST,
106 	MGMT_OP_READ_ADV_FEATURES,
107 	MGMT_OP_ADD_ADVERTISING,
108 	MGMT_OP_REMOVE_ADVERTISING,
109 	MGMT_OP_GET_ADV_SIZE_INFO,
110 	MGMT_OP_START_LIMITED_DISCOVERY,
111 	MGMT_OP_READ_EXT_INFO,
112 	MGMT_OP_SET_APPEARANCE,
113 	MGMT_OP_GET_PHY_CONFIGURATION,
114 	MGMT_OP_SET_PHY_CONFIGURATION,
115 	MGMT_OP_SET_BLOCKED_KEYS,
116 	MGMT_OP_SET_WIDEBAND_SPEECH,
117 	MGMT_OP_READ_CONTROLLER_CAP,
118 	MGMT_OP_READ_EXP_FEATURES_INFO,
119 	MGMT_OP_SET_EXP_FEATURE,
120 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
121 	MGMT_OP_SET_DEF_SYSTEM_CONFIG,
122 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
123 	MGMT_OP_SET_DEF_RUNTIME_CONFIG,
124 	MGMT_OP_GET_DEVICE_FLAGS,
125 	MGMT_OP_SET_DEVICE_FLAGS,
126 	MGMT_OP_READ_ADV_MONITOR_FEATURES,
127 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
128 	MGMT_OP_REMOVE_ADV_MONITOR,
129 	MGMT_OP_ADD_EXT_ADV_PARAMS,
130 	MGMT_OP_ADD_EXT_ADV_DATA,
131 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
132 };
133 
134 static const u16 mgmt_events[] = {
135 	MGMT_EV_CONTROLLER_ERROR,
136 	MGMT_EV_INDEX_ADDED,
137 	MGMT_EV_INDEX_REMOVED,
138 	MGMT_EV_NEW_SETTINGS,
139 	MGMT_EV_CLASS_OF_DEV_CHANGED,
140 	MGMT_EV_LOCAL_NAME_CHANGED,
141 	MGMT_EV_NEW_LINK_KEY,
142 	MGMT_EV_NEW_LONG_TERM_KEY,
143 	MGMT_EV_DEVICE_CONNECTED,
144 	MGMT_EV_DEVICE_DISCONNECTED,
145 	MGMT_EV_CONNECT_FAILED,
146 	MGMT_EV_PIN_CODE_REQUEST,
147 	MGMT_EV_USER_CONFIRM_REQUEST,
148 	MGMT_EV_USER_PASSKEY_REQUEST,
149 	MGMT_EV_AUTH_FAILED,
150 	MGMT_EV_DEVICE_FOUND,
151 	MGMT_EV_DISCOVERING,
152 	MGMT_EV_DEVICE_BLOCKED,
153 	MGMT_EV_DEVICE_UNBLOCKED,
154 	MGMT_EV_DEVICE_UNPAIRED,
155 	MGMT_EV_PASSKEY_NOTIFY,
156 	MGMT_EV_NEW_IRK,
157 	MGMT_EV_NEW_CSRK,
158 	MGMT_EV_DEVICE_ADDED,
159 	MGMT_EV_DEVICE_REMOVED,
160 	MGMT_EV_NEW_CONN_PARAM,
161 	MGMT_EV_UNCONF_INDEX_ADDED,
162 	MGMT_EV_UNCONF_INDEX_REMOVED,
163 	MGMT_EV_NEW_CONFIG_OPTIONS,
164 	MGMT_EV_EXT_INDEX_ADDED,
165 	MGMT_EV_EXT_INDEX_REMOVED,
166 	MGMT_EV_LOCAL_OOB_DATA_UPDATED,
167 	MGMT_EV_ADVERTISING_ADDED,
168 	MGMT_EV_ADVERTISING_REMOVED,
169 	MGMT_EV_EXT_INFO_CHANGED,
170 	MGMT_EV_PHY_CONFIGURATION_CHANGED,
171 	MGMT_EV_EXP_FEATURE_CHANGED,
172 	MGMT_EV_DEVICE_FLAGS_CHANGED,
173 	MGMT_EV_ADV_MONITOR_ADDED,
174 	MGMT_EV_ADV_MONITOR_REMOVED,
175 	MGMT_EV_CONTROLLER_SUSPEND,
176 	MGMT_EV_CONTROLLER_RESUME,
177 };
178 
179 static const u16 mgmt_untrusted_commands[] = {
180 	MGMT_OP_READ_INDEX_LIST,
181 	MGMT_OP_READ_INFO,
182 	MGMT_OP_READ_UNCONF_INDEX_LIST,
183 	MGMT_OP_READ_CONFIG_INFO,
184 	MGMT_OP_READ_EXT_INDEX_LIST,
185 	MGMT_OP_READ_EXT_INFO,
186 	MGMT_OP_READ_CONTROLLER_CAP,
187 	MGMT_OP_READ_EXP_FEATURES_INFO,
188 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
189 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
190 };
191 
192 static const u16 mgmt_untrusted_events[] = {
193 	MGMT_EV_INDEX_ADDED,
194 	MGMT_EV_INDEX_REMOVED,
195 	MGMT_EV_NEW_SETTINGS,
196 	MGMT_EV_CLASS_OF_DEV_CHANGED,
197 	MGMT_EV_LOCAL_NAME_CHANGED,
198 	MGMT_EV_UNCONF_INDEX_ADDED,
199 	MGMT_EV_UNCONF_INDEX_REMOVED,
200 	MGMT_EV_NEW_CONFIG_OPTIONS,
201 	MGMT_EV_EXT_INDEX_ADDED,
202 	MGMT_EV_EXT_INDEX_REMOVED,
203 	MGMT_EV_EXT_INFO_CHANGED,
204 	MGMT_EV_EXP_FEATURE_CHANGED,
205 };
206 
207 #define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
208 
209 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
210 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
211 
212 /* HCI to MGMT error code conversion table */
213 static const u8 mgmt_status_table[] = {
214 	MGMT_STATUS_SUCCESS,
215 	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
216 	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
217 	MGMT_STATUS_FAILED,		/* Hardware Failure */
218 	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
219 	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
220 	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
221 	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
222 	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
223 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
224 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
225 	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
226 	MGMT_STATUS_BUSY,		/* Command Disallowed */
227 	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
228 	MGMT_STATUS_REJECTED,		/* Rejected Security */
229 	MGMT_STATUS_REJECTED,		/* Rejected Personal */
230 	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
231 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
232 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
233 	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
234 	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
235 	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
236 	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
237 	MGMT_STATUS_BUSY,		/* Repeated Attempts */
238 	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
239 	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
240 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
241 	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
242 	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
243 	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
244 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
245 	MGMT_STATUS_FAILED,		/* Unspecified Error */
246 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
247 	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
248 	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
249 	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
250 	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
251 	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
252 	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
253 	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
254 	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
255 	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
256 	MGMT_STATUS_FAILED,		/* Transaction Collision */
257 	MGMT_STATUS_FAILED,		/* Reserved for future use */
258 	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
259 	MGMT_STATUS_REJECTED,		/* QoS Rejected */
260 	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
261 	MGMT_STATUS_REJECTED,		/* Insufficient Security */
262 	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
263 	MGMT_STATUS_FAILED,		/* Reserved for future use */
264 	MGMT_STATUS_BUSY,		/* Role Switch Pending */
265 	MGMT_STATUS_FAILED,		/* Reserved for future use */
266 	MGMT_STATUS_FAILED,		/* Slot Violation */
267 	MGMT_STATUS_FAILED,		/* Role Switch Failed */
268 	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
269 	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
270 	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
271 	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
272 	MGMT_STATUS_BUSY,		/* Controller Busy */
273 	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
274 	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
275 	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
276 	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
277 	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
278 };
279 
280 static u8 mgmt_errno_status(int err)
281 {
282 	switch (err) {
283 	case 0:
284 		return MGMT_STATUS_SUCCESS;
285 	case -EPERM:
286 		return MGMT_STATUS_REJECTED;
287 	case -EINVAL:
288 		return MGMT_STATUS_INVALID_PARAMS;
289 	case -EOPNOTSUPP:
290 		return MGMT_STATUS_NOT_SUPPORTED;
291 	case -EBUSY:
292 		return MGMT_STATUS_BUSY;
293 	case -ETIMEDOUT:
294 		return MGMT_STATUS_AUTH_FAILED;
295 	case -ENOMEM:
296 		return MGMT_STATUS_NO_RESOURCES;
297 	case -EISCONN:
298 		return MGMT_STATUS_ALREADY_CONNECTED;
299 	case -ENOTCONN:
300 		return MGMT_STATUS_DISCONNECTED;
301 	}
302 
303 	return MGMT_STATUS_FAILED;
304 }
305 
306 static u8 mgmt_status(int err)
307 {
308 	if (err < 0)
309 		return mgmt_errno_status(err);
310 
311 	if (err < ARRAY_SIZE(mgmt_status_table))
312 		return mgmt_status_table[err];
313 
314 	return MGMT_STATUS_FAILED;
315 }
316 
317 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
318 			    u16 len, int flag)
319 {
320 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
321 			       flag, NULL);
322 }
323 
324 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
325 			      u16 len, int flag, struct sock *skip_sk)
326 {
327 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
328 			       flag, skip_sk);
329 }
330 
331 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
332 		      struct sock *skip_sk)
333 {
334 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
335 			       HCI_SOCK_TRUSTED, skip_sk);
336 }
337 
338 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
339 {
340 	return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
341 				   skip_sk);
342 }
343 
344 static u8 le_addr_type(u8 mgmt_addr_type)
345 {
346 	if (mgmt_addr_type == BDADDR_LE_PUBLIC)
347 		return ADDR_LE_DEV_PUBLIC;
348 	else
349 		return ADDR_LE_DEV_RANDOM;
350 }
351 
352 void mgmt_fill_version_info(void *ver)
353 {
354 	struct mgmt_rp_read_version *rp = ver;
355 
356 	rp->version = MGMT_VERSION;
357 	rp->revision = cpu_to_le16(MGMT_REVISION);
358 }
359 
360 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
361 			u16 data_len)
362 {
363 	struct mgmt_rp_read_version rp;
364 
365 	bt_dev_dbg(hdev, "sock %p", sk);
366 
367 	mgmt_fill_version_info(&rp);
368 
369 	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
370 				 &rp, sizeof(rp));
371 }
372 
373 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
374 			 u16 data_len)
375 {
376 	struct mgmt_rp_read_commands *rp;
377 	u16 num_commands, num_events;
378 	size_t rp_size;
379 	int i, err;
380 
381 	bt_dev_dbg(hdev, "sock %p", sk);
382 
383 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
384 		num_commands = ARRAY_SIZE(mgmt_commands);
385 		num_events = ARRAY_SIZE(mgmt_events);
386 	} else {
387 		num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
388 		num_events = ARRAY_SIZE(mgmt_untrusted_events);
389 	}
390 
391 	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
392 
393 	rp = kmalloc(rp_size, GFP_KERNEL);
394 	if (!rp)
395 		return -ENOMEM;
396 
397 	rp->num_commands = cpu_to_le16(num_commands);
398 	rp->num_events = cpu_to_le16(num_events);
399 
400 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
401 		__le16 *opcode = rp->opcodes;
402 
403 		for (i = 0; i < num_commands; i++, opcode++)
404 			put_unaligned_le16(mgmt_commands[i], opcode);
405 
406 		for (i = 0; i < num_events; i++, opcode++)
407 			put_unaligned_le16(mgmt_events[i], opcode);
408 	} else {
409 		__le16 *opcode = rp->opcodes;
410 
411 		for (i = 0; i < num_commands; i++, opcode++)
412 			put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
413 
414 		for (i = 0; i < num_events; i++, opcode++)
415 			put_unaligned_le16(mgmt_untrusted_events[i], opcode);
416 	}
417 
418 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
419 				rp, rp_size);
420 	kfree(rp);
421 
422 	return err;
423 }
424 
425 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
426 			   u16 data_len)
427 {
428 	struct mgmt_rp_read_index_list *rp;
429 	struct hci_dev *d;
430 	size_t rp_len;
431 	u16 count;
432 	int err;
433 
434 	bt_dev_dbg(hdev, "sock %p", sk);
435 
436 	read_lock(&hci_dev_list_lock);
437 
438 	count = 0;
439 	list_for_each_entry(d, &hci_dev_list, list) {
440 		if (d->dev_type == HCI_PRIMARY &&
441 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED))
442 			count++;
443 	}
444 
445 	rp_len = sizeof(*rp) + (2 * count);
446 	rp = kmalloc(rp_len, GFP_ATOMIC);
447 	if (!rp) {
448 		read_unlock(&hci_dev_list_lock);
449 		return -ENOMEM;
450 	}
451 
452 	count = 0;
453 	list_for_each_entry(d, &hci_dev_list, list) {
454 		if (hci_dev_test_flag(d, HCI_SETUP) ||
455 		    hci_dev_test_flag(d, HCI_CONFIG) ||
456 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
457 			continue;
458 
459 		/* Devices marked as raw-only are neither configured
460 		 * nor unconfigured controllers.
461 		 */
462 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
463 			continue;
464 
465 		if (d->dev_type == HCI_PRIMARY &&
466 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
467 			rp->index[count++] = cpu_to_le16(d->id);
468 			bt_dev_dbg(hdev, "Added hci%u", d->id);
469 		}
470 	}
471 
472 	rp->num_controllers = cpu_to_le16(count);
473 	rp_len = sizeof(*rp) + (2 * count);
474 
475 	read_unlock(&hci_dev_list_lock);
476 
477 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
478 				0, rp, rp_len);
479 
480 	kfree(rp);
481 
482 	return err;
483 }
484 
485 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
486 				  void *data, u16 data_len)
487 {
488 	struct mgmt_rp_read_unconf_index_list *rp;
489 	struct hci_dev *d;
490 	size_t rp_len;
491 	u16 count;
492 	int err;
493 
494 	bt_dev_dbg(hdev, "sock %p", sk);
495 
496 	read_lock(&hci_dev_list_lock);
497 
498 	count = 0;
499 	list_for_each_entry(d, &hci_dev_list, list) {
500 		if (d->dev_type == HCI_PRIMARY &&
501 		    hci_dev_test_flag(d, HCI_UNCONFIGURED))
502 			count++;
503 	}
504 
505 	rp_len = sizeof(*rp) + (2 * count);
506 	rp = kmalloc(rp_len, GFP_ATOMIC);
507 	if (!rp) {
508 		read_unlock(&hci_dev_list_lock);
509 		return -ENOMEM;
510 	}
511 
512 	count = 0;
513 	list_for_each_entry(d, &hci_dev_list, list) {
514 		if (hci_dev_test_flag(d, HCI_SETUP) ||
515 		    hci_dev_test_flag(d, HCI_CONFIG) ||
516 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
517 			continue;
518 
519 		/* Devices marked as raw-only are neither configured
520 		 * nor unconfigured controllers.
521 		 */
522 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
523 			continue;
524 
525 		if (d->dev_type == HCI_PRIMARY &&
526 		    hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
527 			rp->index[count++] = cpu_to_le16(d->id);
528 			bt_dev_dbg(hdev, "Added hci%u", d->id);
529 		}
530 	}
531 
532 	rp->num_controllers = cpu_to_le16(count);
533 	rp_len = sizeof(*rp) + (2 * count);
534 
535 	read_unlock(&hci_dev_list_lock);
536 
537 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
538 				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
539 
540 	kfree(rp);
541 
542 	return err;
543 }
544 
545 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
546 			       void *data, u16 data_len)
547 {
548 	struct mgmt_rp_read_ext_index_list *rp;
549 	struct hci_dev *d;
550 	u16 count;
551 	int err;
552 
553 	bt_dev_dbg(hdev, "sock %p", sk);
554 
555 	read_lock(&hci_dev_list_lock);
556 
557 	count = 0;
558 	list_for_each_entry(d, &hci_dev_list, list) {
559 		if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
560 			count++;
561 	}
562 
563 	rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
564 	if (!rp) {
565 		read_unlock(&hci_dev_list_lock);
566 		return -ENOMEM;
567 	}
568 
569 	count = 0;
570 	list_for_each_entry(d, &hci_dev_list, list) {
571 		if (hci_dev_test_flag(d, HCI_SETUP) ||
572 		    hci_dev_test_flag(d, HCI_CONFIG) ||
573 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
574 			continue;
575 
576 		/* Devices marked as raw-only are neither configured
577 		 * nor unconfigured controllers.
578 		 */
579 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
580 			continue;
581 
582 		if (d->dev_type == HCI_PRIMARY) {
583 			if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
584 				rp->entry[count].type = 0x01;
585 			else
586 				rp->entry[count].type = 0x00;
587 		} else if (d->dev_type == HCI_AMP) {
588 			rp->entry[count].type = 0x02;
589 		} else {
590 			continue;
591 		}
592 
593 		rp->entry[count].bus = d->bus;
594 		rp->entry[count++].index = cpu_to_le16(d->id);
595 		bt_dev_dbg(hdev, "Added hci%u", d->id);
596 	}
597 
598 	rp->num_controllers = cpu_to_le16(count);
599 
600 	read_unlock(&hci_dev_list_lock);
601 
602 	/* If this command is called at least once, then all the
603 	 * default index and unconfigured index events are disabled
604 	 * and from now on only extended index events are used.
605 	 */
606 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
607 	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
608 	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
609 
610 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
611 				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
612 				struct_size(rp, entry, count));
613 
614 	kfree(rp);
615 
616 	return err;
617 }
618 
619 static bool is_configured(struct hci_dev *hdev)
620 {
621 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
622 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
623 		return false;
624 
625 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
626 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
627 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
628 		return false;
629 
630 	return true;
631 }
632 
633 static __le32 get_missing_options(struct hci_dev *hdev)
634 {
635 	u32 options = 0;
636 
637 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
638 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
639 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
640 
641 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
642 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
643 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
644 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
645 
646 	return cpu_to_le32(options);
647 }
648 
649 static int new_options(struct hci_dev *hdev, struct sock *skip)
650 {
651 	__le32 options = get_missing_options(hdev);
652 
653 	return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
654 				  sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
655 }
656 
657 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
658 {
659 	__le32 options = get_missing_options(hdev);
660 
661 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
662 				 sizeof(options));
663 }
664 
665 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
666 			    void *data, u16 data_len)
667 {
668 	struct mgmt_rp_read_config_info rp;
669 	u32 options = 0;
670 
671 	bt_dev_dbg(hdev, "sock %p", sk);
672 
673 	hci_dev_lock(hdev);
674 
675 	memset(&rp, 0, sizeof(rp));
676 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
677 
678 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
679 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
680 
681 	if (hdev->set_bdaddr)
682 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
683 
684 	rp.supported_options = cpu_to_le32(options);
685 	rp.missing_options = get_missing_options(hdev);
686 
687 	hci_dev_unlock(hdev);
688 
689 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
690 				 &rp, sizeof(rp));
691 }
692 
693 static u32 get_supported_phys(struct hci_dev *hdev)
694 {
695 	u32 supported_phys = 0;
696 
697 	if (lmp_bredr_capable(hdev)) {
698 		supported_phys |= MGMT_PHY_BR_1M_1SLOT;
699 
700 		if (hdev->features[0][0] & LMP_3SLOT)
701 			supported_phys |= MGMT_PHY_BR_1M_3SLOT;
702 
703 		if (hdev->features[0][0] & LMP_5SLOT)
704 			supported_phys |= MGMT_PHY_BR_1M_5SLOT;
705 
706 		if (lmp_edr_2m_capable(hdev)) {
707 			supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
708 
709 			if (lmp_edr_3slot_capable(hdev))
710 				supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
711 
712 			if (lmp_edr_5slot_capable(hdev))
713 				supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
714 
715 			if (lmp_edr_3m_capable(hdev)) {
716 				supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
717 
718 				if (lmp_edr_3slot_capable(hdev))
719 					supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
720 
721 				if (lmp_edr_5slot_capable(hdev))
722 					supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
723 			}
724 		}
725 	}
726 
727 	if (lmp_le_capable(hdev)) {
728 		supported_phys |= MGMT_PHY_LE_1M_TX;
729 		supported_phys |= MGMT_PHY_LE_1M_RX;
730 
731 		if (hdev->le_features[1] & HCI_LE_PHY_2M) {
732 			supported_phys |= MGMT_PHY_LE_2M_TX;
733 			supported_phys |= MGMT_PHY_LE_2M_RX;
734 		}
735 
736 		if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
737 			supported_phys |= MGMT_PHY_LE_CODED_TX;
738 			supported_phys |= MGMT_PHY_LE_CODED_RX;
739 		}
740 	}
741 
742 	return supported_phys;
743 }
744 
745 static u32 get_selected_phys(struct hci_dev *hdev)
746 {
747 	u32 selected_phys = 0;
748 
749 	if (lmp_bredr_capable(hdev)) {
750 		selected_phys |= MGMT_PHY_BR_1M_1SLOT;
751 
752 		if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
753 			selected_phys |= MGMT_PHY_BR_1M_3SLOT;
754 
755 		if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
756 			selected_phys |= MGMT_PHY_BR_1M_5SLOT;
757 
758 		if (lmp_edr_2m_capable(hdev)) {
759 			if (!(hdev->pkt_type & HCI_2DH1))
760 				selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
761 
762 			if (lmp_edr_3slot_capable(hdev) &&
763 			    !(hdev->pkt_type & HCI_2DH3))
764 				selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
765 
766 			if (lmp_edr_5slot_capable(hdev) &&
767 			    !(hdev->pkt_type & HCI_2DH5))
768 				selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
769 
770 			if (lmp_edr_3m_capable(hdev)) {
771 				if (!(hdev->pkt_type & HCI_3DH1))
772 					selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
773 
774 				if (lmp_edr_3slot_capable(hdev) &&
775 				    !(hdev->pkt_type & HCI_3DH3))
776 					selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
777 
778 				if (lmp_edr_5slot_capable(hdev) &&
779 				    !(hdev->pkt_type & HCI_3DH5))
780 					selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
781 			}
782 		}
783 	}
784 
785 	if (lmp_le_capable(hdev)) {
786 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
787 			selected_phys |= MGMT_PHY_LE_1M_TX;
788 
789 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
790 			selected_phys |= MGMT_PHY_LE_1M_RX;
791 
792 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
793 			selected_phys |= MGMT_PHY_LE_2M_TX;
794 
795 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
796 			selected_phys |= MGMT_PHY_LE_2M_RX;
797 
798 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
799 			selected_phys |= MGMT_PHY_LE_CODED_TX;
800 
801 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
802 			selected_phys |= MGMT_PHY_LE_CODED_RX;
803 	}
804 
805 	return selected_phys;
806 }
807 
808 static u32 get_configurable_phys(struct hci_dev *hdev)
809 {
810 	return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
811 		~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
812 }
813 
814 static u32 get_supported_settings(struct hci_dev *hdev)
815 {
816 	u32 settings = 0;
817 
818 	settings |= MGMT_SETTING_POWERED;
819 	settings |= MGMT_SETTING_BONDABLE;
820 	settings |= MGMT_SETTING_DEBUG_KEYS;
821 	settings |= MGMT_SETTING_CONNECTABLE;
822 	settings |= MGMT_SETTING_DISCOVERABLE;
823 
824 	if (lmp_bredr_capable(hdev)) {
825 		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
826 			settings |= MGMT_SETTING_FAST_CONNECTABLE;
827 		settings |= MGMT_SETTING_BREDR;
828 		settings |= MGMT_SETTING_LINK_SECURITY;
829 
830 		if (lmp_ssp_capable(hdev)) {
831 			settings |= MGMT_SETTING_SSP;
832 			if (IS_ENABLED(CONFIG_BT_HS))
833 				settings |= MGMT_SETTING_HS;
834 		}
835 
836 		if (lmp_sc_capable(hdev))
837 			settings |= MGMT_SETTING_SECURE_CONN;
838 
839 		if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
840 			     &hdev->quirks))
841 			settings |= MGMT_SETTING_WIDEBAND_SPEECH;
842 	}
843 
844 	if (lmp_le_capable(hdev)) {
845 		settings |= MGMT_SETTING_LE;
846 		settings |= MGMT_SETTING_SECURE_CONN;
847 		settings |= MGMT_SETTING_PRIVACY;
848 		settings |= MGMT_SETTING_STATIC_ADDRESS;
849 		settings |= MGMT_SETTING_ADVERTISING;
850 	}
851 
852 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
853 	    hdev->set_bdaddr)
854 		settings |= MGMT_SETTING_CONFIGURATION;
855 
856 	settings |= MGMT_SETTING_PHY_CONFIGURATION;
857 
858 	return settings;
859 }
860 
861 static u32 get_current_settings(struct hci_dev *hdev)
862 {
863 	u32 settings = 0;
864 
865 	if (hdev_is_powered(hdev))
866 		settings |= MGMT_SETTING_POWERED;
867 
868 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
869 		settings |= MGMT_SETTING_CONNECTABLE;
870 
871 	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
872 		settings |= MGMT_SETTING_FAST_CONNECTABLE;
873 
874 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
875 		settings |= MGMT_SETTING_DISCOVERABLE;
876 
877 	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
878 		settings |= MGMT_SETTING_BONDABLE;
879 
880 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
881 		settings |= MGMT_SETTING_BREDR;
882 
883 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
884 		settings |= MGMT_SETTING_LE;
885 
886 	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
887 		settings |= MGMT_SETTING_LINK_SECURITY;
888 
889 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
890 		settings |= MGMT_SETTING_SSP;
891 
892 	if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
893 		settings |= MGMT_SETTING_HS;
894 
895 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
896 		settings |= MGMT_SETTING_ADVERTISING;
897 
898 	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
899 		settings |= MGMT_SETTING_SECURE_CONN;
900 
901 	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
902 		settings |= MGMT_SETTING_DEBUG_KEYS;
903 
904 	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
905 		settings |= MGMT_SETTING_PRIVACY;
906 
907 	/* The current setting for static address has two purposes. The
908 	 * first is to indicate if the static address will be used and
909 	 * the second is to indicate if it is actually set.
910 	 *
911 	 * This means if the static address is not configured, this flag
912 	 * will never be set. If the address is configured, then if the
913 	 * address is actually used decides if the flag is set or not.
914 	 *
915 	 * For single mode LE only controllers and dual-mode controllers
916 	 * with BR/EDR disabled, the existence of the static address will
917 	 * be evaluated.
918 	 */
919 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
920 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
921 	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
922 		if (bacmp(&hdev->static_addr, BDADDR_ANY))
923 			settings |= MGMT_SETTING_STATIC_ADDRESS;
924 	}
925 
926 	if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
927 		settings |= MGMT_SETTING_WIDEBAND_SPEECH;
928 
929 	return settings;
930 }
931 
932 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
933 {
934 	return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
935 }
936 
937 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
938 {
939 	struct mgmt_pending_cmd *cmd;
940 
941 	/* If there's a pending mgmt command the flags will not yet have
942 	 * their final values, so check for this first.
943 	 */
944 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
945 	if (cmd) {
946 		struct mgmt_mode *cp = cmd->param;
947 		if (cp->val == 0x01)
948 			return LE_AD_GENERAL;
949 		else if (cp->val == 0x02)
950 			return LE_AD_LIMITED;
951 	} else {
952 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
953 			return LE_AD_LIMITED;
954 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
955 			return LE_AD_GENERAL;
956 	}
957 
958 	return 0;
959 }
960 
961 bool mgmt_get_connectable(struct hci_dev *hdev)
962 {
963 	struct mgmt_pending_cmd *cmd;
964 
965 	/* If there's a pending mgmt command the flag will not yet have
966 	 * it's final value, so check for this first.
967 	 */
968 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
969 	if (cmd) {
970 		struct mgmt_mode *cp = cmd->param;
971 
972 		return cp->val;
973 	}
974 
975 	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
976 }
977 
978 static int service_cache_sync(struct hci_dev *hdev, void *data)
979 {
980 	hci_update_eir_sync(hdev);
981 	hci_update_class_sync(hdev);
982 
983 	return 0;
984 }
985 
986 static void service_cache_off(struct work_struct *work)
987 {
988 	struct hci_dev *hdev = container_of(work, struct hci_dev,
989 					    service_cache.work);
990 
991 	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
992 		return;
993 
994 	hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
995 }
996 
997 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
998 {
999 	/* The generation of a new RPA and programming it into the
1000 	 * controller happens in the hci_req_enable_advertising()
1001 	 * function.
1002 	 */
1003 	if (ext_adv_capable(hdev))
1004 		return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1005 	else
1006 		return hci_enable_advertising_sync(hdev);
1007 }
1008 
1009 static void rpa_expired(struct work_struct *work)
1010 {
1011 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1012 					    rpa_expired.work);
1013 
1014 	bt_dev_dbg(hdev, "");
1015 
1016 	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1017 
1018 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1019 		return;
1020 
1021 	hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1022 }
1023 
1024 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1025 {
1026 	if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1027 		return;
1028 
1029 	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1030 	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1031 
1032 	/* Non-mgmt controlled devices get this bit set
1033 	 * implicitly so that pairing works for them, however
1034 	 * for mgmt we require user-space to explicitly enable
1035 	 * it
1036 	 */
1037 	hci_dev_clear_flag(hdev, HCI_BONDABLE);
1038 }
1039 
1040 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1041 				void *data, u16 data_len)
1042 {
1043 	struct mgmt_rp_read_info rp;
1044 
1045 	bt_dev_dbg(hdev, "sock %p", sk);
1046 
1047 	hci_dev_lock(hdev);
1048 
1049 	memset(&rp, 0, sizeof(rp));
1050 
1051 	bacpy(&rp.bdaddr, &hdev->bdaddr);
1052 
1053 	rp.version = hdev->hci_ver;
1054 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1055 
1056 	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1057 	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1058 
1059 	memcpy(rp.dev_class, hdev->dev_class, 3);
1060 
1061 	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1062 	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1063 
1064 	hci_dev_unlock(hdev);
1065 
1066 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1067 				 sizeof(rp));
1068 }
1069 
1070 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1071 {
1072 	u16 eir_len = 0;
1073 	size_t name_len;
1074 
1075 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1076 		eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1077 					  hdev->dev_class, 3);
1078 
1079 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1080 		eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1081 					  hdev->appearance);
1082 
1083 	name_len = strlen(hdev->dev_name);
1084 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1085 				  hdev->dev_name, name_len);
1086 
1087 	name_len = strlen(hdev->short_name);
1088 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1089 				  hdev->short_name, name_len);
1090 
1091 	return eir_len;
1092 }
1093 
1094 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1095 				    void *data, u16 data_len)
1096 {
1097 	char buf[512];
1098 	struct mgmt_rp_read_ext_info *rp = (void *)buf;
1099 	u16 eir_len;
1100 
1101 	bt_dev_dbg(hdev, "sock %p", sk);
1102 
1103 	memset(&buf, 0, sizeof(buf));
1104 
1105 	hci_dev_lock(hdev);
1106 
1107 	bacpy(&rp->bdaddr, &hdev->bdaddr);
1108 
1109 	rp->version = hdev->hci_ver;
1110 	rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1111 
1112 	rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1113 	rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1114 
1115 
1116 	eir_len = append_eir_data_to_buf(hdev, rp->eir);
1117 	rp->eir_len = cpu_to_le16(eir_len);
1118 
1119 	hci_dev_unlock(hdev);
1120 
1121 	/* If this command is called at least once, then the events
1122 	 * for class of device and local name changes are disabled
1123 	 * and only the new extended controller information event
1124 	 * is used.
1125 	 */
1126 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1127 	hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1128 	hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1129 
1130 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1131 				 sizeof(*rp) + eir_len);
1132 }
1133 
1134 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1135 {
1136 	char buf[512];
1137 	struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1138 	u16 eir_len;
1139 
1140 	memset(buf, 0, sizeof(buf));
1141 
1142 	eir_len = append_eir_data_to_buf(hdev, ev->eir);
1143 	ev->eir_len = cpu_to_le16(eir_len);
1144 
1145 	return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1146 				  sizeof(*ev) + eir_len,
1147 				  HCI_MGMT_EXT_INFO_EVENTS, skip);
1148 }
1149 
1150 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1151 {
1152 	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1153 
1154 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1155 				 sizeof(settings));
1156 }
1157 
1158 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1159 {
1160 	struct mgmt_ev_advertising_added ev;
1161 
1162 	ev.instance = instance;
1163 
1164 	mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1165 }
1166 
1167 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1168 			      u8 instance)
1169 {
1170 	struct mgmt_ev_advertising_removed ev;
1171 
1172 	ev.instance = instance;
1173 
1174 	mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1175 }
1176 
1177 static void cancel_adv_timeout(struct hci_dev *hdev)
1178 {
1179 	if (hdev->adv_instance_timeout) {
1180 		hdev->adv_instance_timeout = 0;
1181 		cancel_delayed_work(&hdev->adv_instance_expire);
1182 	}
1183 }
1184 
1185 /* This function requires the caller holds hdev->lock */
1186 static void restart_le_actions(struct hci_dev *hdev)
1187 {
1188 	struct hci_conn_params *p;
1189 
1190 	list_for_each_entry(p, &hdev->le_conn_params, list) {
1191 		/* Needed for AUTO_OFF case where might not "really"
1192 		 * have been powered off.
1193 		 */
1194 		list_del_init(&p->action);
1195 
1196 		switch (p->auto_connect) {
1197 		case HCI_AUTO_CONN_DIRECT:
1198 		case HCI_AUTO_CONN_ALWAYS:
1199 			list_add(&p->action, &hdev->pend_le_conns);
1200 			break;
1201 		case HCI_AUTO_CONN_REPORT:
1202 			list_add(&p->action, &hdev->pend_le_reports);
1203 			break;
1204 		default:
1205 			break;
1206 		}
1207 	}
1208 }
1209 
1210 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1211 {
1212 	__le32 ev = cpu_to_le32(get_current_settings(hdev));
1213 
1214 	return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1215 				  sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1216 }
1217 
1218 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1219 {
1220 	struct mgmt_pending_cmd *cmd = data;
1221 	struct mgmt_mode *cp;
1222 
1223 	/* Make sure cmd still outstanding. */
1224 	if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1225 		return;
1226 
1227 	cp = cmd->param;
1228 
1229 	bt_dev_dbg(hdev, "err %d", err);
1230 
1231 	if (!err) {
1232 		if (cp->val) {
1233 			hci_dev_lock(hdev);
1234 			restart_le_actions(hdev);
1235 			hci_update_passive_scan(hdev);
1236 			hci_dev_unlock(hdev);
1237 		}
1238 
1239 		send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1240 
1241 		/* Only call new_setting for power on as power off is deferred
1242 		 * to hdev->power_off work which does call hci_dev_do_close.
1243 		 */
1244 		if (cp->val)
1245 			new_settings(hdev, cmd->sk);
1246 	} else {
1247 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1248 				mgmt_status(err));
1249 	}
1250 
1251 	mgmt_pending_remove(cmd);
1252 }
1253 
1254 static int set_powered_sync(struct hci_dev *hdev, void *data)
1255 {
1256 	struct mgmt_pending_cmd *cmd = data;
1257 	struct mgmt_mode *cp = cmd->param;
1258 
1259 	BT_DBG("%s", hdev->name);
1260 
1261 	return hci_set_powered_sync(hdev, cp->val);
1262 }
1263 
1264 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1265 		       u16 len)
1266 {
1267 	struct mgmt_mode *cp = data;
1268 	struct mgmt_pending_cmd *cmd;
1269 	int err;
1270 
1271 	bt_dev_dbg(hdev, "sock %p", sk);
1272 
1273 	if (cp->val != 0x00 && cp->val != 0x01)
1274 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1275 				       MGMT_STATUS_INVALID_PARAMS);
1276 
1277 	hci_dev_lock(hdev);
1278 
1279 	if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1280 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1281 				      MGMT_STATUS_BUSY);
1282 		goto failed;
1283 	}
1284 
1285 	if (!!cp->val == hdev_is_powered(hdev)) {
1286 		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1287 		goto failed;
1288 	}
1289 
1290 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1291 	if (!cmd) {
1292 		err = -ENOMEM;
1293 		goto failed;
1294 	}
1295 
1296 	err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1297 				 mgmt_set_powered_complete);
1298 
1299 	if (err < 0)
1300 		mgmt_pending_remove(cmd);
1301 
1302 failed:
1303 	hci_dev_unlock(hdev);
1304 	return err;
1305 }
1306 
1307 int mgmt_new_settings(struct hci_dev *hdev)
1308 {
1309 	return new_settings(hdev, NULL);
1310 }
1311 
1312 struct cmd_lookup {
1313 	struct sock *sk;
1314 	struct hci_dev *hdev;
1315 	u8 mgmt_status;
1316 };
1317 
1318 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1319 {
1320 	struct cmd_lookup *match = data;
1321 
1322 	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1323 
1324 	list_del(&cmd->list);
1325 
1326 	if (match->sk == NULL) {
1327 		match->sk = cmd->sk;
1328 		sock_hold(match->sk);
1329 	}
1330 
1331 	mgmt_pending_free(cmd);
1332 }
1333 
1334 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1335 {
1336 	u8 *status = data;
1337 
1338 	mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1339 	mgmt_pending_remove(cmd);
1340 }
1341 
1342 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1343 {
1344 	if (cmd->cmd_complete) {
1345 		u8 *status = data;
1346 
1347 		cmd->cmd_complete(cmd, *status);
1348 		mgmt_pending_remove(cmd);
1349 
1350 		return;
1351 	}
1352 
1353 	cmd_status_rsp(cmd, data);
1354 }
1355 
1356 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1357 {
1358 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1359 				 cmd->param, cmd->param_len);
1360 }
1361 
1362 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1363 {
1364 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1365 				 cmd->param, sizeof(struct mgmt_addr_info));
1366 }
1367 
1368 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1369 {
1370 	if (!lmp_bredr_capable(hdev))
1371 		return MGMT_STATUS_NOT_SUPPORTED;
1372 	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1373 		return MGMT_STATUS_REJECTED;
1374 	else
1375 		return MGMT_STATUS_SUCCESS;
1376 }
1377 
1378 static u8 mgmt_le_support(struct hci_dev *hdev)
1379 {
1380 	if (!lmp_le_capable(hdev))
1381 		return MGMT_STATUS_NOT_SUPPORTED;
1382 	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1383 		return MGMT_STATUS_REJECTED;
1384 	else
1385 		return MGMT_STATUS_SUCCESS;
1386 }
1387 
1388 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1389 					   int err)
1390 {
1391 	struct mgmt_pending_cmd *cmd = data;
1392 
1393 	bt_dev_dbg(hdev, "err %d", err);
1394 
1395 	/* Make sure cmd still outstanding. */
1396 	if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1397 		return;
1398 
1399 	hci_dev_lock(hdev);
1400 
1401 	if (err) {
1402 		u8 mgmt_err = mgmt_status(err);
1403 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1404 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1405 		goto done;
1406 	}
1407 
1408 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1409 	    hdev->discov_timeout > 0) {
1410 		int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1411 		queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1412 	}
1413 
1414 	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1415 	new_settings(hdev, cmd->sk);
1416 
1417 done:
1418 	mgmt_pending_remove(cmd);
1419 	hci_dev_unlock(hdev);
1420 }
1421 
1422 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1423 {
1424 	BT_DBG("%s", hdev->name);
1425 
1426 	return hci_update_discoverable_sync(hdev);
1427 }
1428 
1429 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1430 			    u16 len)
1431 {
1432 	struct mgmt_cp_set_discoverable *cp = data;
1433 	struct mgmt_pending_cmd *cmd;
1434 	u16 timeout;
1435 	int err;
1436 
1437 	bt_dev_dbg(hdev, "sock %p", sk);
1438 
1439 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1440 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1441 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1442 				       MGMT_STATUS_REJECTED);
1443 
1444 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1445 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1446 				       MGMT_STATUS_INVALID_PARAMS);
1447 
1448 	timeout = __le16_to_cpu(cp->timeout);
1449 
1450 	/* Disabling discoverable requires that no timeout is set,
1451 	 * and enabling limited discoverable requires a timeout.
1452 	 */
1453 	if ((cp->val == 0x00 && timeout > 0) ||
1454 	    (cp->val == 0x02 && timeout == 0))
1455 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1456 				       MGMT_STATUS_INVALID_PARAMS);
1457 
1458 	hci_dev_lock(hdev);
1459 
1460 	if (!hdev_is_powered(hdev) && timeout > 0) {
1461 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1462 				      MGMT_STATUS_NOT_POWERED);
1463 		goto failed;
1464 	}
1465 
1466 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1467 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1468 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1469 				      MGMT_STATUS_BUSY);
1470 		goto failed;
1471 	}
1472 
1473 	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1474 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1475 				      MGMT_STATUS_REJECTED);
1476 		goto failed;
1477 	}
1478 
1479 	if (hdev->advertising_paused) {
1480 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1481 				      MGMT_STATUS_BUSY);
1482 		goto failed;
1483 	}
1484 
1485 	if (!hdev_is_powered(hdev)) {
1486 		bool changed = false;
1487 
1488 		/* Setting limited discoverable when powered off is
1489 		 * not a valid operation since it requires a timeout
1490 		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1491 		 */
1492 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1493 			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1494 			changed = true;
1495 		}
1496 
1497 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1498 		if (err < 0)
1499 			goto failed;
1500 
1501 		if (changed)
1502 			err = new_settings(hdev, sk);
1503 
1504 		goto failed;
1505 	}
1506 
1507 	/* If the current mode is the same, then just update the timeout
1508 	 * value with the new value. And if only the timeout gets updated,
1509 	 * then no need for any HCI transactions.
1510 	 */
1511 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1512 	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
1513 						   HCI_LIMITED_DISCOVERABLE)) {
1514 		cancel_delayed_work(&hdev->discov_off);
1515 		hdev->discov_timeout = timeout;
1516 
1517 		if (cp->val && hdev->discov_timeout > 0) {
1518 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1519 			queue_delayed_work(hdev->req_workqueue,
1520 					   &hdev->discov_off, to);
1521 		}
1522 
1523 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1524 		goto failed;
1525 	}
1526 
1527 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1528 	if (!cmd) {
1529 		err = -ENOMEM;
1530 		goto failed;
1531 	}
1532 
1533 	/* Cancel any potential discoverable timeout that might be
1534 	 * still active and store new timeout value. The arming of
1535 	 * the timeout happens in the complete handler.
1536 	 */
1537 	cancel_delayed_work(&hdev->discov_off);
1538 	hdev->discov_timeout = timeout;
1539 
1540 	if (cp->val)
1541 		hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1542 	else
1543 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1544 
1545 	/* Limited discoverable mode */
1546 	if (cp->val == 0x02)
1547 		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1548 	else
1549 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1550 
1551 	err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1552 				 mgmt_set_discoverable_complete);
1553 
1554 	if (err < 0)
1555 		mgmt_pending_remove(cmd);
1556 
1557 failed:
1558 	hci_dev_unlock(hdev);
1559 	return err;
1560 }
1561 
1562 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1563 					  int err)
1564 {
1565 	struct mgmt_pending_cmd *cmd = data;
1566 
1567 	bt_dev_dbg(hdev, "err %d", err);
1568 
1569 	/* Make sure cmd still outstanding. */
1570 	if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1571 		return;
1572 
1573 	hci_dev_lock(hdev);
1574 
1575 	if (err) {
1576 		u8 mgmt_err = mgmt_status(err);
1577 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1578 		goto done;
1579 	}
1580 
1581 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1582 	new_settings(hdev, cmd->sk);
1583 
1584 done:
1585 	if (cmd)
1586 		mgmt_pending_remove(cmd);
1587 
1588 	hci_dev_unlock(hdev);
1589 }
1590 
1591 static int set_connectable_update_settings(struct hci_dev *hdev,
1592 					   struct sock *sk, u8 val)
1593 {
1594 	bool changed = false;
1595 	int err;
1596 
1597 	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1598 		changed = true;
1599 
1600 	if (val) {
1601 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1602 	} else {
1603 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1604 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1605 	}
1606 
1607 	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1608 	if (err < 0)
1609 		return err;
1610 
1611 	if (changed) {
1612 		hci_req_update_scan(hdev);
1613 		hci_update_passive_scan(hdev);
1614 		return new_settings(hdev, sk);
1615 	}
1616 
1617 	return 0;
1618 }
1619 
1620 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1621 {
1622 	BT_DBG("%s", hdev->name);
1623 
1624 	return hci_update_connectable_sync(hdev);
1625 }
1626 
1627 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1628 			   u16 len)
1629 {
1630 	struct mgmt_mode *cp = data;
1631 	struct mgmt_pending_cmd *cmd;
1632 	int err;
1633 
1634 	bt_dev_dbg(hdev, "sock %p", sk);
1635 
1636 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1637 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1638 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1639 				       MGMT_STATUS_REJECTED);
1640 
1641 	if (cp->val != 0x00 && cp->val != 0x01)
1642 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1643 				       MGMT_STATUS_INVALID_PARAMS);
1644 
1645 	hci_dev_lock(hdev);
1646 
1647 	if (!hdev_is_powered(hdev)) {
1648 		err = set_connectable_update_settings(hdev, sk, cp->val);
1649 		goto failed;
1650 	}
1651 
1652 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1653 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1654 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1655 				      MGMT_STATUS_BUSY);
1656 		goto failed;
1657 	}
1658 
1659 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1660 	if (!cmd) {
1661 		err = -ENOMEM;
1662 		goto failed;
1663 	}
1664 
1665 	if (cp->val) {
1666 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1667 	} else {
1668 		if (hdev->discov_timeout > 0)
1669 			cancel_delayed_work(&hdev->discov_off);
1670 
1671 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1672 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1673 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1674 	}
1675 
1676 	err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1677 				 mgmt_set_connectable_complete);
1678 
1679 	if (err < 0)
1680 		mgmt_pending_remove(cmd);
1681 
1682 failed:
1683 	hci_dev_unlock(hdev);
1684 	return err;
1685 }
1686 
1687 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1688 			u16 len)
1689 {
1690 	struct mgmt_mode *cp = data;
1691 	bool changed;
1692 	int err;
1693 
1694 	bt_dev_dbg(hdev, "sock %p", sk);
1695 
1696 	if (cp->val != 0x00 && cp->val != 0x01)
1697 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1698 				       MGMT_STATUS_INVALID_PARAMS);
1699 
1700 	hci_dev_lock(hdev);
1701 
1702 	if (cp->val)
1703 		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1704 	else
1705 		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1706 
1707 	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1708 	if (err < 0)
1709 		goto unlock;
1710 
1711 	if (changed) {
1712 		/* In limited privacy mode the change of bondable mode
1713 		 * may affect the local advertising address.
1714 		 */
1715 		hci_update_discoverable(hdev);
1716 
1717 		err = new_settings(hdev, sk);
1718 	}
1719 
1720 unlock:
1721 	hci_dev_unlock(hdev);
1722 	return err;
1723 }
1724 
1725 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1726 			     u16 len)
1727 {
1728 	struct mgmt_mode *cp = data;
1729 	struct mgmt_pending_cmd *cmd;
1730 	u8 val, status;
1731 	int err;
1732 
1733 	bt_dev_dbg(hdev, "sock %p", sk);
1734 
1735 	status = mgmt_bredr_support(hdev);
1736 	if (status)
1737 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1738 				       status);
1739 
1740 	if (cp->val != 0x00 && cp->val != 0x01)
1741 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1742 				       MGMT_STATUS_INVALID_PARAMS);
1743 
1744 	hci_dev_lock(hdev);
1745 
1746 	if (!hdev_is_powered(hdev)) {
1747 		bool changed = false;
1748 
1749 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1750 			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1751 			changed = true;
1752 		}
1753 
1754 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1755 		if (err < 0)
1756 			goto failed;
1757 
1758 		if (changed)
1759 			err = new_settings(hdev, sk);
1760 
1761 		goto failed;
1762 	}
1763 
1764 	if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1765 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1766 				      MGMT_STATUS_BUSY);
1767 		goto failed;
1768 	}
1769 
1770 	val = !!cp->val;
1771 
1772 	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1773 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1774 		goto failed;
1775 	}
1776 
1777 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1778 	if (!cmd) {
1779 		err = -ENOMEM;
1780 		goto failed;
1781 	}
1782 
1783 	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1784 	if (err < 0) {
1785 		mgmt_pending_remove(cmd);
1786 		goto failed;
1787 	}
1788 
1789 failed:
1790 	hci_dev_unlock(hdev);
1791 	return err;
1792 }
1793 
1794 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1795 {
1796 	struct cmd_lookup match = { NULL, hdev };
1797 	struct mgmt_pending_cmd *cmd = data;
1798 	struct mgmt_mode *cp = cmd->param;
1799 	u8 enable = cp->val;
1800 	bool changed;
1801 
1802 	/* Make sure cmd still outstanding. */
1803 	if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1804 		return;
1805 
1806 	if (err) {
1807 		u8 mgmt_err = mgmt_status(err);
1808 
1809 		if (enable && hci_dev_test_and_clear_flag(hdev,
1810 							  HCI_SSP_ENABLED)) {
1811 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1812 			new_settings(hdev, NULL);
1813 		}
1814 
1815 		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1816 				     &mgmt_err);
1817 		return;
1818 	}
1819 
1820 	if (enable) {
1821 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1822 	} else {
1823 		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1824 
1825 		if (!changed)
1826 			changed = hci_dev_test_and_clear_flag(hdev,
1827 							      HCI_HS_ENABLED);
1828 		else
1829 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1830 	}
1831 
1832 	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1833 
1834 	if (changed)
1835 		new_settings(hdev, match.sk);
1836 
1837 	if (match.sk)
1838 		sock_put(match.sk);
1839 
1840 	hci_update_eir_sync(hdev);
1841 }
1842 
1843 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1844 {
1845 	struct mgmt_pending_cmd *cmd = data;
1846 	struct mgmt_mode *cp = cmd->param;
1847 	bool changed = false;
1848 	int err;
1849 
1850 	if (cp->val)
1851 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1852 
1853 	err = hci_write_ssp_mode_sync(hdev, cp->val);
1854 
1855 	if (!err && changed)
1856 		hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1857 
1858 	return err;
1859 }
1860 
1861 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1862 {
1863 	struct mgmt_mode *cp = data;
1864 	struct mgmt_pending_cmd *cmd;
1865 	u8 status;
1866 	int err;
1867 
1868 	bt_dev_dbg(hdev, "sock %p", sk);
1869 
1870 	status = mgmt_bredr_support(hdev);
1871 	if (status)
1872 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1873 
1874 	if (!lmp_ssp_capable(hdev))
1875 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1876 				       MGMT_STATUS_NOT_SUPPORTED);
1877 
1878 	if (cp->val != 0x00 && cp->val != 0x01)
1879 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1880 				       MGMT_STATUS_INVALID_PARAMS);
1881 
1882 	hci_dev_lock(hdev);
1883 
1884 	if (!hdev_is_powered(hdev)) {
1885 		bool changed;
1886 
1887 		if (cp->val) {
1888 			changed = !hci_dev_test_and_set_flag(hdev,
1889 							     HCI_SSP_ENABLED);
1890 		} else {
1891 			changed = hci_dev_test_and_clear_flag(hdev,
1892 							      HCI_SSP_ENABLED);
1893 			if (!changed)
1894 				changed = hci_dev_test_and_clear_flag(hdev,
1895 								      HCI_HS_ENABLED);
1896 			else
1897 				hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1898 		}
1899 
1900 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1901 		if (err < 0)
1902 			goto failed;
1903 
1904 		if (changed)
1905 			err = new_settings(hdev, sk);
1906 
1907 		goto failed;
1908 	}
1909 
1910 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1911 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1912 				      MGMT_STATUS_BUSY);
1913 		goto failed;
1914 	}
1915 
1916 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1917 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1918 		goto failed;
1919 	}
1920 
1921 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1922 	if (!cmd)
1923 		err = -ENOMEM;
1924 	else
1925 		err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
1926 					 set_ssp_complete);
1927 
1928 	if (err < 0) {
1929 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1930 				      MGMT_STATUS_FAILED);
1931 
1932 		if (cmd)
1933 			mgmt_pending_remove(cmd);
1934 	}
1935 
1936 failed:
1937 	hci_dev_unlock(hdev);
1938 	return err;
1939 }
1940 
1941 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1942 {
1943 	struct mgmt_mode *cp = data;
1944 	bool changed;
1945 	u8 status;
1946 	int err;
1947 
1948 	bt_dev_dbg(hdev, "sock %p", sk);
1949 
1950 	if (!IS_ENABLED(CONFIG_BT_HS))
1951 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1952 				       MGMT_STATUS_NOT_SUPPORTED);
1953 
1954 	status = mgmt_bredr_support(hdev);
1955 	if (status)
1956 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1957 
1958 	if (!lmp_ssp_capable(hdev))
1959 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1960 				       MGMT_STATUS_NOT_SUPPORTED);
1961 
1962 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1963 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1964 				       MGMT_STATUS_REJECTED);
1965 
1966 	if (cp->val != 0x00 && cp->val != 0x01)
1967 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1968 				       MGMT_STATUS_INVALID_PARAMS);
1969 
1970 	hci_dev_lock(hdev);
1971 
1972 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1973 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1974 				      MGMT_STATUS_BUSY);
1975 		goto unlock;
1976 	}
1977 
1978 	if (cp->val) {
1979 		changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1980 	} else {
1981 		if (hdev_is_powered(hdev)) {
1982 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1983 					      MGMT_STATUS_REJECTED);
1984 			goto unlock;
1985 		}
1986 
1987 		changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1988 	}
1989 
1990 	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1991 	if (err < 0)
1992 		goto unlock;
1993 
1994 	if (changed)
1995 		err = new_settings(hdev, sk);
1996 
1997 unlock:
1998 	hci_dev_unlock(hdev);
1999 	return err;
2000 }
2001 
2002 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2003 {
2004 	struct cmd_lookup match = { NULL, hdev };
2005 	u8 status = mgmt_status(err);
2006 
2007 	bt_dev_dbg(hdev, "err %d", err);
2008 
2009 	if (status) {
2010 		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2011 							&status);
2012 		return;
2013 	}
2014 
2015 	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2016 
2017 	new_settings(hdev, match.sk);
2018 
2019 	if (match.sk)
2020 		sock_put(match.sk);
2021 }
2022 
2023 static int set_le_sync(struct hci_dev *hdev, void *data)
2024 {
2025 	struct mgmt_pending_cmd *cmd = data;
2026 	struct mgmt_mode *cp = cmd->param;
2027 	u8 val = !!cp->val;
2028 	int err;
2029 
2030 	if (!val) {
2031 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2032 			hci_disable_advertising_sync(hdev);
2033 
2034 		if (ext_adv_capable(hdev))
2035 			hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2036 	} else {
2037 		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2038 	}
2039 
2040 	err = hci_write_le_host_supported_sync(hdev, val, 0);
2041 
2042 	/* Make sure the controller has a good default for
2043 	 * advertising data. Restrict the update to when LE
2044 	 * has actually been enabled. During power on, the
2045 	 * update in powered_update_hci will take care of it.
2046 	 */
2047 	if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2048 		if (ext_adv_capable(hdev)) {
2049 			int status;
2050 
2051 			status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2052 			if (!status)
2053 				hci_update_scan_rsp_data_sync(hdev, 0x00);
2054 		} else {
2055 			hci_update_adv_data_sync(hdev, 0x00);
2056 			hci_update_scan_rsp_data_sync(hdev, 0x00);
2057 		}
2058 
2059 		hci_update_passive_scan(hdev);
2060 	}
2061 
2062 	return err;
2063 }
2064 
2065 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2066 {
2067 	struct mgmt_mode *cp = data;
2068 	struct mgmt_pending_cmd *cmd;
2069 	int err;
2070 	u8 val, enabled;
2071 
2072 	bt_dev_dbg(hdev, "sock %p", sk);
2073 
2074 	if (!lmp_le_capable(hdev))
2075 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2076 				       MGMT_STATUS_NOT_SUPPORTED);
2077 
2078 	if (cp->val != 0x00 && cp->val != 0x01)
2079 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2080 				       MGMT_STATUS_INVALID_PARAMS);
2081 
2082 	/* Bluetooth single mode LE only controllers or dual-mode
2083 	 * controllers configured as LE only devices, do not allow
2084 	 * switching LE off. These have either LE enabled explicitly
2085 	 * or BR/EDR has been previously switched off.
2086 	 *
2087 	 * When trying to enable an already enabled LE, then gracefully
2088 	 * send a positive response. Trying to disable it however will
2089 	 * result into rejection.
2090 	 */
2091 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2092 		if (cp->val == 0x01)
2093 			return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2094 
2095 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2096 				       MGMT_STATUS_REJECTED);
2097 	}
2098 
2099 	hci_dev_lock(hdev);
2100 
2101 	val = !!cp->val;
2102 	enabled = lmp_host_le_capable(hdev);
2103 
2104 	if (!val)
2105 		hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
2106 
2107 	if (!hdev_is_powered(hdev) || val == enabled) {
2108 		bool changed = false;
2109 
2110 		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2111 			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2112 			changed = true;
2113 		}
2114 
2115 		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2116 			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2117 			changed = true;
2118 		}
2119 
2120 		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2121 		if (err < 0)
2122 			goto unlock;
2123 
2124 		if (changed)
2125 			err = new_settings(hdev, sk);
2126 
2127 		goto unlock;
2128 	}
2129 
2130 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
2131 	    pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2132 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2133 				      MGMT_STATUS_BUSY);
2134 		goto unlock;
2135 	}
2136 
2137 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2138 	if (!cmd)
2139 		err = -ENOMEM;
2140 	else
2141 		err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2142 					 set_le_complete);
2143 
2144 	if (err < 0) {
2145 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2146 				      MGMT_STATUS_FAILED);
2147 
2148 		if (cmd)
2149 			mgmt_pending_remove(cmd);
2150 	}
2151 
2152 unlock:
2153 	hci_dev_unlock(hdev);
2154 	return err;
2155 }
2156 
2157 /* This is a helper function to test for pending mgmt commands that can
2158  * cause CoD or EIR HCI commands. We can only allow one such pending
2159  * mgmt command at a time since otherwise we cannot easily track what
2160  * the current values are, will be, and based on that calculate if a new
2161  * HCI command needs to be sent and if yes with what value.
2162  */
2163 static bool pending_eir_or_class(struct hci_dev *hdev)
2164 {
2165 	struct mgmt_pending_cmd *cmd;
2166 
2167 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2168 		switch (cmd->opcode) {
2169 		case MGMT_OP_ADD_UUID:
2170 		case MGMT_OP_REMOVE_UUID:
2171 		case MGMT_OP_SET_DEV_CLASS:
2172 		case MGMT_OP_SET_POWERED:
2173 			return true;
2174 		}
2175 	}
2176 
2177 	return false;
2178 }
2179 
2180 static const u8 bluetooth_base_uuid[] = {
2181 			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2182 			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2183 };
2184 
2185 static u8 get_uuid_size(const u8 *uuid)
2186 {
2187 	u32 val;
2188 
2189 	if (memcmp(uuid, bluetooth_base_uuid, 12))
2190 		return 128;
2191 
2192 	val = get_unaligned_le32(&uuid[12]);
2193 	if (val > 0xffff)
2194 		return 32;
2195 
2196 	return 16;
2197 }
2198 
2199 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2200 {
2201 	struct mgmt_pending_cmd *cmd = data;
2202 
2203 	bt_dev_dbg(hdev, "err %d", err);
2204 
2205 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2206 			  mgmt_status(err), hdev->dev_class, 3);
2207 
2208 	mgmt_pending_free(cmd);
2209 }
2210 
2211 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2212 {
2213 	int err;
2214 
2215 	err = hci_update_class_sync(hdev);
2216 	if (err)
2217 		return err;
2218 
2219 	return hci_update_eir_sync(hdev);
2220 }
2221 
2222 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2223 {
2224 	struct mgmt_cp_add_uuid *cp = data;
2225 	struct mgmt_pending_cmd *cmd;
2226 	struct bt_uuid *uuid;
2227 	int err;
2228 
2229 	bt_dev_dbg(hdev, "sock %p", sk);
2230 
2231 	hci_dev_lock(hdev);
2232 
2233 	if (pending_eir_or_class(hdev)) {
2234 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2235 				      MGMT_STATUS_BUSY);
2236 		goto failed;
2237 	}
2238 
2239 	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2240 	if (!uuid) {
2241 		err = -ENOMEM;
2242 		goto failed;
2243 	}
2244 
2245 	memcpy(uuid->uuid, cp->uuid, 16);
2246 	uuid->svc_hint = cp->svc_hint;
2247 	uuid->size = get_uuid_size(cp->uuid);
2248 
2249 	list_add_tail(&uuid->list, &hdev->uuids);
2250 
2251 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2252 	if (!cmd) {
2253 		err = -ENOMEM;
2254 		goto failed;
2255 	}
2256 
2257 	err = hci_cmd_sync_queue(hdev, add_uuid_sync, cmd, mgmt_class_complete);
2258 	if (err < 0) {
2259 		mgmt_pending_free(cmd);
2260 		goto failed;
2261 	}
2262 
2263 failed:
2264 	hci_dev_unlock(hdev);
2265 	return err;
2266 }
2267 
2268 static bool enable_service_cache(struct hci_dev *hdev)
2269 {
2270 	if (!hdev_is_powered(hdev))
2271 		return false;
2272 
2273 	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2274 		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2275 				   CACHE_TIMEOUT);
2276 		return true;
2277 	}
2278 
2279 	return false;
2280 }
2281 
2282 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2283 {
2284 	int err;
2285 
2286 	err = hci_update_class_sync(hdev);
2287 	if (err)
2288 		return err;
2289 
2290 	return hci_update_eir_sync(hdev);
2291 }
2292 
2293 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2294 		       u16 len)
2295 {
2296 	struct mgmt_cp_remove_uuid *cp = data;
2297 	struct mgmt_pending_cmd *cmd;
2298 	struct bt_uuid *match, *tmp;
2299 	u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2300 	int err, found;
2301 
2302 	bt_dev_dbg(hdev, "sock %p", sk);
2303 
2304 	hci_dev_lock(hdev);
2305 
2306 	if (pending_eir_or_class(hdev)) {
2307 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2308 				      MGMT_STATUS_BUSY);
2309 		goto unlock;
2310 	}
2311 
2312 	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2313 		hci_uuids_clear(hdev);
2314 
2315 		if (enable_service_cache(hdev)) {
2316 			err = mgmt_cmd_complete(sk, hdev->id,
2317 						MGMT_OP_REMOVE_UUID,
2318 						0, hdev->dev_class, 3);
2319 			goto unlock;
2320 		}
2321 
2322 		goto update_class;
2323 	}
2324 
2325 	found = 0;
2326 
2327 	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2328 		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2329 			continue;
2330 
2331 		list_del(&match->list);
2332 		kfree(match);
2333 		found++;
2334 	}
2335 
2336 	if (found == 0) {
2337 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2338 				      MGMT_STATUS_INVALID_PARAMS);
2339 		goto unlock;
2340 	}
2341 
2342 update_class:
2343 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2344 	if (!cmd) {
2345 		err = -ENOMEM;
2346 		goto unlock;
2347 	}
2348 
2349 	err = hci_cmd_sync_queue(hdev, remove_uuid_sync, cmd,
2350 				 mgmt_class_complete);
2351 	if (err < 0)
2352 		mgmt_pending_free(cmd);
2353 
2354 unlock:
2355 	hci_dev_unlock(hdev);
2356 	return err;
2357 }
2358 
2359 static int set_class_sync(struct hci_dev *hdev, void *data)
2360 {
2361 	int err = 0;
2362 
2363 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2364 		cancel_delayed_work_sync(&hdev->service_cache);
2365 		err = hci_update_eir_sync(hdev);
2366 	}
2367 
2368 	if (err)
2369 		return err;
2370 
2371 	return hci_update_class_sync(hdev);
2372 }
2373 
2374 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2375 			 u16 len)
2376 {
2377 	struct mgmt_cp_set_dev_class *cp = data;
2378 	struct mgmt_pending_cmd *cmd;
2379 	int err;
2380 
2381 	bt_dev_dbg(hdev, "sock %p", sk);
2382 
2383 	if (!lmp_bredr_capable(hdev))
2384 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2385 				       MGMT_STATUS_NOT_SUPPORTED);
2386 
2387 	hci_dev_lock(hdev);
2388 
2389 	if (pending_eir_or_class(hdev)) {
2390 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2391 				      MGMT_STATUS_BUSY);
2392 		goto unlock;
2393 	}
2394 
2395 	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2396 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2397 				      MGMT_STATUS_INVALID_PARAMS);
2398 		goto unlock;
2399 	}
2400 
2401 	hdev->major_class = cp->major;
2402 	hdev->minor_class = cp->minor;
2403 
2404 	if (!hdev_is_powered(hdev)) {
2405 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2406 					hdev->dev_class, 3);
2407 		goto unlock;
2408 	}
2409 
2410 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2411 	if (!cmd) {
2412 		err = -ENOMEM;
2413 		goto unlock;
2414 	}
2415 
2416 	err = hci_cmd_sync_queue(hdev, set_class_sync, cmd,
2417 				 mgmt_class_complete);
2418 	if (err < 0)
2419 		mgmt_pending_free(cmd);
2420 
2421 unlock:
2422 	hci_dev_unlock(hdev);
2423 	return err;
2424 }
2425 
2426 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2427 			  u16 len)
2428 {
2429 	struct mgmt_cp_load_link_keys *cp = data;
2430 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2431 				   sizeof(struct mgmt_link_key_info));
2432 	u16 key_count, expected_len;
2433 	bool changed;
2434 	int i;
2435 
2436 	bt_dev_dbg(hdev, "sock %p", sk);
2437 
2438 	if (!lmp_bredr_capable(hdev))
2439 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2440 				       MGMT_STATUS_NOT_SUPPORTED);
2441 
2442 	key_count = __le16_to_cpu(cp->key_count);
2443 	if (key_count > max_key_count) {
2444 		bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2445 			   key_count);
2446 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2447 				       MGMT_STATUS_INVALID_PARAMS);
2448 	}
2449 
2450 	expected_len = struct_size(cp, keys, key_count);
2451 	if (expected_len != len) {
2452 		bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2453 			   expected_len, len);
2454 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2455 				       MGMT_STATUS_INVALID_PARAMS);
2456 	}
2457 
2458 	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2459 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2460 				       MGMT_STATUS_INVALID_PARAMS);
2461 
2462 	bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2463 		   key_count);
2464 
2465 	for (i = 0; i < key_count; i++) {
2466 		struct mgmt_link_key_info *key = &cp->keys[i];
2467 
2468 		if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2469 			return mgmt_cmd_status(sk, hdev->id,
2470 					       MGMT_OP_LOAD_LINK_KEYS,
2471 					       MGMT_STATUS_INVALID_PARAMS);
2472 	}
2473 
2474 	hci_dev_lock(hdev);
2475 
2476 	hci_link_keys_clear(hdev);
2477 
2478 	if (cp->debug_keys)
2479 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2480 	else
2481 		changed = hci_dev_test_and_clear_flag(hdev,
2482 						      HCI_KEEP_DEBUG_KEYS);
2483 
2484 	if (changed)
2485 		new_settings(hdev, NULL);
2486 
2487 	for (i = 0; i < key_count; i++) {
2488 		struct mgmt_link_key_info *key = &cp->keys[i];
2489 
2490 		if (hci_is_blocked_key(hdev,
2491 				       HCI_BLOCKED_KEY_TYPE_LINKKEY,
2492 				       key->val)) {
2493 			bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2494 				    &key->addr.bdaddr);
2495 			continue;
2496 		}
2497 
2498 		/* Always ignore debug keys and require a new pairing if
2499 		 * the user wants to use them.
2500 		 */
2501 		if (key->type == HCI_LK_DEBUG_COMBINATION)
2502 			continue;
2503 
2504 		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2505 				 key->type, key->pin_len, NULL);
2506 	}
2507 
2508 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2509 
2510 	hci_dev_unlock(hdev);
2511 
2512 	return 0;
2513 }
2514 
2515 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2516 			   u8 addr_type, struct sock *skip_sk)
2517 {
2518 	struct mgmt_ev_device_unpaired ev;
2519 
2520 	bacpy(&ev.addr.bdaddr, bdaddr);
2521 	ev.addr.type = addr_type;
2522 
2523 	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2524 			  skip_sk);
2525 }
2526 
2527 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2528 			 u16 len)
2529 {
2530 	struct mgmt_cp_unpair_device *cp = data;
2531 	struct mgmt_rp_unpair_device rp;
2532 	struct hci_conn_params *params;
2533 	struct mgmt_pending_cmd *cmd;
2534 	struct hci_conn *conn;
2535 	u8 addr_type;
2536 	int err;
2537 
2538 	memset(&rp, 0, sizeof(rp));
2539 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2540 	rp.addr.type = cp->addr.type;
2541 
2542 	if (!bdaddr_type_is_valid(cp->addr.type))
2543 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2544 					 MGMT_STATUS_INVALID_PARAMS,
2545 					 &rp, sizeof(rp));
2546 
2547 	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2548 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2549 					 MGMT_STATUS_INVALID_PARAMS,
2550 					 &rp, sizeof(rp));
2551 
2552 	hci_dev_lock(hdev);
2553 
2554 	if (!hdev_is_powered(hdev)) {
2555 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2556 					MGMT_STATUS_NOT_POWERED, &rp,
2557 					sizeof(rp));
2558 		goto unlock;
2559 	}
2560 
2561 	if (cp->addr.type == BDADDR_BREDR) {
2562 		/* If disconnection is requested, then look up the
2563 		 * connection. If the remote device is connected, it
2564 		 * will be later used to terminate the link.
2565 		 *
2566 		 * Setting it to NULL explicitly will cause no
2567 		 * termination of the link.
2568 		 */
2569 		if (cp->disconnect)
2570 			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2571 						       &cp->addr.bdaddr);
2572 		else
2573 			conn = NULL;
2574 
2575 		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2576 		if (err < 0) {
2577 			err = mgmt_cmd_complete(sk, hdev->id,
2578 						MGMT_OP_UNPAIR_DEVICE,
2579 						MGMT_STATUS_NOT_PAIRED, &rp,
2580 						sizeof(rp));
2581 			goto unlock;
2582 		}
2583 
2584 		goto done;
2585 	}
2586 
2587 	/* LE address type */
2588 	addr_type = le_addr_type(cp->addr.type);
2589 
2590 	/* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2591 	err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2592 	if (err < 0) {
2593 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2594 					MGMT_STATUS_NOT_PAIRED, &rp,
2595 					sizeof(rp));
2596 		goto unlock;
2597 	}
2598 
2599 	conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2600 	if (!conn) {
2601 		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2602 		goto done;
2603 	}
2604 
2605 
2606 	/* Defer clearing up the connection parameters until closing to
2607 	 * give a chance of keeping them if a repairing happens.
2608 	 */
2609 	set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2610 
2611 	/* Disable auto-connection parameters if present */
2612 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2613 	if (params) {
2614 		if (params->explicit_connect)
2615 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2616 		else
2617 			params->auto_connect = HCI_AUTO_CONN_DISABLED;
2618 	}
2619 
2620 	/* If disconnection is not requested, then clear the connection
2621 	 * variable so that the link is not terminated.
2622 	 */
2623 	if (!cp->disconnect)
2624 		conn = NULL;
2625 
2626 done:
2627 	/* If the connection variable is set, then termination of the
2628 	 * link is requested.
2629 	 */
2630 	if (!conn) {
2631 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2632 					&rp, sizeof(rp));
2633 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2634 		goto unlock;
2635 	}
2636 
2637 	cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2638 			       sizeof(*cp));
2639 	if (!cmd) {
2640 		err = -ENOMEM;
2641 		goto unlock;
2642 	}
2643 
2644 	cmd->cmd_complete = addr_cmd_complete;
2645 
2646 	err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2647 	if (err < 0)
2648 		mgmt_pending_remove(cmd);
2649 
2650 unlock:
2651 	hci_dev_unlock(hdev);
2652 	return err;
2653 }
2654 
2655 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2656 		      u16 len)
2657 {
2658 	struct mgmt_cp_disconnect *cp = data;
2659 	struct mgmt_rp_disconnect rp;
2660 	struct mgmt_pending_cmd *cmd;
2661 	struct hci_conn *conn;
2662 	int err;
2663 
2664 	bt_dev_dbg(hdev, "sock %p", sk);
2665 
2666 	memset(&rp, 0, sizeof(rp));
2667 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2668 	rp.addr.type = cp->addr.type;
2669 
2670 	if (!bdaddr_type_is_valid(cp->addr.type))
2671 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2672 					 MGMT_STATUS_INVALID_PARAMS,
2673 					 &rp, sizeof(rp));
2674 
2675 	hci_dev_lock(hdev);
2676 
2677 	if (!test_bit(HCI_UP, &hdev->flags)) {
2678 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2679 					MGMT_STATUS_NOT_POWERED, &rp,
2680 					sizeof(rp));
2681 		goto failed;
2682 	}
2683 
2684 	if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2685 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2686 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
2687 		goto failed;
2688 	}
2689 
2690 	if (cp->addr.type == BDADDR_BREDR)
2691 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2692 					       &cp->addr.bdaddr);
2693 	else
2694 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2695 					       le_addr_type(cp->addr.type));
2696 
2697 	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2698 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2699 					MGMT_STATUS_NOT_CONNECTED, &rp,
2700 					sizeof(rp));
2701 		goto failed;
2702 	}
2703 
2704 	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2705 	if (!cmd) {
2706 		err = -ENOMEM;
2707 		goto failed;
2708 	}
2709 
2710 	cmd->cmd_complete = generic_cmd_complete;
2711 
2712 	err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2713 	if (err < 0)
2714 		mgmt_pending_remove(cmd);
2715 
2716 failed:
2717 	hci_dev_unlock(hdev);
2718 	return err;
2719 }
2720 
2721 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2722 {
2723 	switch (link_type) {
2724 	case LE_LINK:
2725 		switch (addr_type) {
2726 		case ADDR_LE_DEV_PUBLIC:
2727 			return BDADDR_LE_PUBLIC;
2728 
2729 		default:
2730 			/* Fallback to LE Random address type */
2731 			return BDADDR_LE_RANDOM;
2732 		}
2733 
2734 	default:
2735 		/* Fallback to BR/EDR type */
2736 		return BDADDR_BREDR;
2737 	}
2738 }
2739 
2740 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2741 			   u16 data_len)
2742 {
2743 	struct mgmt_rp_get_connections *rp;
2744 	struct hci_conn *c;
2745 	int err;
2746 	u16 i;
2747 
2748 	bt_dev_dbg(hdev, "sock %p", sk);
2749 
2750 	hci_dev_lock(hdev);
2751 
2752 	if (!hdev_is_powered(hdev)) {
2753 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2754 				      MGMT_STATUS_NOT_POWERED);
2755 		goto unlock;
2756 	}
2757 
2758 	i = 0;
2759 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2760 		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2761 			i++;
2762 	}
2763 
2764 	rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2765 	if (!rp) {
2766 		err = -ENOMEM;
2767 		goto unlock;
2768 	}
2769 
2770 	i = 0;
2771 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2772 		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2773 			continue;
2774 		bacpy(&rp->addr[i].bdaddr, &c->dst);
2775 		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2776 		if (c->type == SCO_LINK || c->type == ESCO_LINK)
2777 			continue;
2778 		i++;
2779 	}
2780 
2781 	rp->conn_count = cpu_to_le16(i);
2782 
2783 	/* Recalculate length in case of filtered SCO connections, etc */
2784 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2785 				struct_size(rp, addr, i));
2786 
2787 	kfree(rp);
2788 
2789 unlock:
2790 	hci_dev_unlock(hdev);
2791 	return err;
2792 }
2793 
2794 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2795 				   struct mgmt_cp_pin_code_neg_reply *cp)
2796 {
2797 	struct mgmt_pending_cmd *cmd;
2798 	int err;
2799 
2800 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2801 			       sizeof(*cp));
2802 	if (!cmd)
2803 		return -ENOMEM;
2804 
2805 	cmd->cmd_complete = addr_cmd_complete;
2806 
2807 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2808 			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2809 	if (err < 0)
2810 		mgmt_pending_remove(cmd);
2811 
2812 	return err;
2813 }
2814 
2815 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2816 			  u16 len)
2817 {
2818 	struct hci_conn *conn;
2819 	struct mgmt_cp_pin_code_reply *cp = data;
2820 	struct hci_cp_pin_code_reply reply;
2821 	struct mgmt_pending_cmd *cmd;
2822 	int err;
2823 
2824 	bt_dev_dbg(hdev, "sock %p", sk);
2825 
2826 	hci_dev_lock(hdev);
2827 
2828 	if (!hdev_is_powered(hdev)) {
2829 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2830 				      MGMT_STATUS_NOT_POWERED);
2831 		goto failed;
2832 	}
2833 
2834 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2835 	if (!conn) {
2836 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2837 				      MGMT_STATUS_NOT_CONNECTED);
2838 		goto failed;
2839 	}
2840 
2841 	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2842 		struct mgmt_cp_pin_code_neg_reply ncp;
2843 
2844 		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2845 
2846 		bt_dev_err(hdev, "PIN code is not 16 bytes long");
2847 
2848 		err = send_pin_code_neg_reply(sk, hdev, &ncp);
2849 		if (err >= 0)
2850 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2851 					      MGMT_STATUS_INVALID_PARAMS);
2852 
2853 		goto failed;
2854 	}
2855 
2856 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2857 	if (!cmd) {
2858 		err = -ENOMEM;
2859 		goto failed;
2860 	}
2861 
2862 	cmd->cmd_complete = addr_cmd_complete;
2863 
2864 	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2865 	reply.pin_len = cp->pin_len;
2866 	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2867 
2868 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2869 	if (err < 0)
2870 		mgmt_pending_remove(cmd);
2871 
2872 failed:
2873 	hci_dev_unlock(hdev);
2874 	return err;
2875 }
2876 
2877 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2878 			     u16 len)
2879 {
2880 	struct mgmt_cp_set_io_capability *cp = data;
2881 
2882 	bt_dev_dbg(hdev, "sock %p", sk);
2883 
2884 	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2885 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2886 				       MGMT_STATUS_INVALID_PARAMS);
2887 
2888 	hci_dev_lock(hdev);
2889 
2890 	hdev->io_capability = cp->io_capability;
2891 
2892 	bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
2893 
2894 	hci_dev_unlock(hdev);
2895 
2896 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2897 				 NULL, 0);
2898 }
2899 
2900 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2901 {
2902 	struct hci_dev *hdev = conn->hdev;
2903 	struct mgmt_pending_cmd *cmd;
2904 
2905 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2906 		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2907 			continue;
2908 
2909 		if (cmd->user_data != conn)
2910 			continue;
2911 
2912 		return cmd;
2913 	}
2914 
2915 	return NULL;
2916 }
2917 
2918 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2919 {
2920 	struct mgmt_rp_pair_device rp;
2921 	struct hci_conn *conn = cmd->user_data;
2922 	int err;
2923 
2924 	bacpy(&rp.addr.bdaddr, &conn->dst);
2925 	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2926 
2927 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2928 				status, &rp, sizeof(rp));
2929 
2930 	/* So we don't get further callbacks for this connection */
2931 	conn->connect_cfm_cb = NULL;
2932 	conn->security_cfm_cb = NULL;
2933 	conn->disconn_cfm_cb = NULL;
2934 
2935 	hci_conn_drop(conn);
2936 
2937 	/* The device is paired so there is no need to remove
2938 	 * its connection parameters anymore.
2939 	 */
2940 	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2941 
2942 	hci_conn_put(conn);
2943 
2944 	return err;
2945 }
2946 
2947 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2948 {
2949 	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2950 	struct mgmt_pending_cmd *cmd;
2951 
2952 	cmd = find_pairing(conn);
2953 	if (cmd) {
2954 		cmd->cmd_complete(cmd, status);
2955 		mgmt_pending_remove(cmd);
2956 	}
2957 }
2958 
2959 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2960 {
2961 	struct mgmt_pending_cmd *cmd;
2962 
2963 	BT_DBG("status %u", status);
2964 
2965 	cmd = find_pairing(conn);
2966 	if (!cmd) {
2967 		BT_DBG("Unable to find a pending command");
2968 		return;
2969 	}
2970 
2971 	cmd->cmd_complete(cmd, mgmt_status(status));
2972 	mgmt_pending_remove(cmd);
2973 }
2974 
2975 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2976 {
2977 	struct mgmt_pending_cmd *cmd;
2978 
2979 	BT_DBG("status %u", status);
2980 
2981 	if (!status)
2982 		return;
2983 
2984 	cmd = find_pairing(conn);
2985 	if (!cmd) {
2986 		BT_DBG("Unable to find a pending command");
2987 		return;
2988 	}
2989 
2990 	cmd->cmd_complete(cmd, mgmt_status(status));
2991 	mgmt_pending_remove(cmd);
2992 }
2993 
2994 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2995 		       u16 len)
2996 {
2997 	struct mgmt_cp_pair_device *cp = data;
2998 	struct mgmt_rp_pair_device rp;
2999 	struct mgmt_pending_cmd *cmd;
3000 	u8 sec_level, auth_type;
3001 	struct hci_conn *conn;
3002 	int err;
3003 
3004 	bt_dev_dbg(hdev, "sock %p", sk);
3005 
3006 	memset(&rp, 0, sizeof(rp));
3007 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3008 	rp.addr.type = cp->addr.type;
3009 
3010 	if (!bdaddr_type_is_valid(cp->addr.type))
3011 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3012 					 MGMT_STATUS_INVALID_PARAMS,
3013 					 &rp, sizeof(rp));
3014 
3015 	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3016 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3017 					 MGMT_STATUS_INVALID_PARAMS,
3018 					 &rp, sizeof(rp));
3019 
3020 	hci_dev_lock(hdev);
3021 
3022 	if (!hdev_is_powered(hdev)) {
3023 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3024 					MGMT_STATUS_NOT_POWERED, &rp,
3025 					sizeof(rp));
3026 		goto unlock;
3027 	}
3028 
3029 	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3030 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3031 					MGMT_STATUS_ALREADY_PAIRED, &rp,
3032 					sizeof(rp));
3033 		goto unlock;
3034 	}
3035 
3036 	sec_level = BT_SECURITY_MEDIUM;
3037 	auth_type = HCI_AT_DEDICATED_BONDING;
3038 
3039 	if (cp->addr.type == BDADDR_BREDR) {
3040 		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3041 				       auth_type, CONN_REASON_PAIR_DEVICE);
3042 	} else {
3043 		u8 addr_type = le_addr_type(cp->addr.type);
3044 		struct hci_conn_params *p;
3045 
3046 		/* When pairing a new device, it is expected to remember
3047 		 * this device for future connections. Adding the connection
3048 		 * parameter information ahead of time allows tracking
3049 		 * of the peripheral preferred values and will speed up any
3050 		 * further connection establishment.
3051 		 *
3052 		 * If connection parameters already exist, then they
3053 		 * will be kept and this function does nothing.
3054 		 */
3055 		p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3056 
3057 		if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3058 			p->auto_connect = HCI_AUTO_CONN_DISABLED;
3059 
3060 		conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3061 					   sec_level, HCI_LE_CONN_TIMEOUT,
3062 					   CONN_REASON_PAIR_DEVICE);
3063 	}
3064 
3065 	if (IS_ERR(conn)) {
3066 		int status;
3067 
3068 		if (PTR_ERR(conn) == -EBUSY)
3069 			status = MGMT_STATUS_BUSY;
3070 		else if (PTR_ERR(conn) == -EOPNOTSUPP)
3071 			status = MGMT_STATUS_NOT_SUPPORTED;
3072 		else if (PTR_ERR(conn) == -ECONNREFUSED)
3073 			status = MGMT_STATUS_REJECTED;
3074 		else
3075 			status = MGMT_STATUS_CONNECT_FAILED;
3076 
3077 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3078 					status, &rp, sizeof(rp));
3079 		goto unlock;
3080 	}
3081 
3082 	if (conn->connect_cfm_cb) {
3083 		hci_conn_drop(conn);
3084 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3085 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
3086 		goto unlock;
3087 	}
3088 
3089 	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3090 	if (!cmd) {
3091 		err = -ENOMEM;
3092 		hci_conn_drop(conn);
3093 		goto unlock;
3094 	}
3095 
3096 	cmd->cmd_complete = pairing_complete;
3097 
3098 	/* For LE, just connecting isn't a proof that the pairing finished */
3099 	if (cp->addr.type == BDADDR_BREDR) {
3100 		conn->connect_cfm_cb = pairing_complete_cb;
3101 		conn->security_cfm_cb = pairing_complete_cb;
3102 		conn->disconn_cfm_cb = pairing_complete_cb;
3103 	} else {
3104 		conn->connect_cfm_cb = le_pairing_complete_cb;
3105 		conn->security_cfm_cb = le_pairing_complete_cb;
3106 		conn->disconn_cfm_cb = le_pairing_complete_cb;
3107 	}
3108 
3109 	conn->io_capability = cp->io_cap;
3110 	cmd->user_data = hci_conn_get(conn);
3111 
3112 	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3113 	    hci_conn_security(conn, sec_level, auth_type, true)) {
3114 		cmd->cmd_complete(cmd, 0);
3115 		mgmt_pending_remove(cmd);
3116 	}
3117 
3118 	err = 0;
3119 
3120 unlock:
3121 	hci_dev_unlock(hdev);
3122 	return err;
3123 }
3124 
3125 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3126 			      u16 len)
3127 {
3128 	struct mgmt_addr_info *addr = data;
3129 	struct mgmt_pending_cmd *cmd;
3130 	struct hci_conn *conn;
3131 	int err;
3132 
3133 	bt_dev_dbg(hdev, "sock %p", sk);
3134 
3135 	hci_dev_lock(hdev);
3136 
3137 	if (!hdev_is_powered(hdev)) {
3138 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3139 				      MGMT_STATUS_NOT_POWERED);
3140 		goto unlock;
3141 	}
3142 
3143 	cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3144 	if (!cmd) {
3145 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3146 				      MGMT_STATUS_INVALID_PARAMS);
3147 		goto unlock;
3148 	}
3149 
3150 	conn = cmd->user_data;
3151 
3152 	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3153 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3154 				      MGMT_STATUS_INVALID_PARAMS);
3155 		goto unlock;
3156 	}
3157 
3158 	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3159 	mgmt_pending_remove(cmd);
3160 
3161 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3162 				addr, sizeof(*addr));
3163 
3164 	/* Since user doesn't want to proceed with the connection, abort any
3165 	 * ongoing pairing and then terminate the link if it was created
3166 	 * because of the pair device action.
3167 	 */
3168 	if (addr->type == BDADDR_BREDR)
3169 		hci_remove_link_key(hdev, &addr->bdaddr);
3170 	else
3171 		smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3172 					      le_addr_type(addr->type));
3173 
3174 	if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3175 		hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3176 
3177 unlock:
3178 	hci_dev_unlock(hdev);
3179 	return err;
3180 }
3181 
3182 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3183 			     struct mgmt_addr_info *addr, u16 mgmt_op,
3184 			     u16 hci_op, __le32 passkey)
3185 {
3186 	struct mgmt_pending_cmd *cmd;
3187 	struct hci_conn *conn;
3188 	int err;
3189 
3190 	hci_dev_lock(hdev);
3191 
3192 	if (!hdev_is_powered(hdev)) {
3193 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3194 					MGMT_STATUS_NOT_POWERED, addr,
3195 					sizeof(*addr));
3196 		goto done;
3197 	}
3198 
3199 	if (addr->type == BDADDR_BREDR)
3200 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3201 	else
3202 		conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3203 					       le_addr_type(addr->type));
3204 
3205 	if (!conn) {
3206 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3207 					MGMT_STATUS_NOT_CONNECTED, addr,
3208 					sizeof(*addr));
3209 		goto done;
3210 	}
3211 
3212 	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3213 		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3214 		if (!err)
3215 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3216 						MGMT_STATUS_SUCCESS, addr,
3217 						sizeof(*addr));
3218 		else
3219 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3220 						MGMT_STATUS_FAILED, addr,
3221 						sizeof(*addr));
3222 
3223 		goto done;
3224 	}
3225 
3226 	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3227 	if (!cmd) {
3228 		err = -ENOMEM;
3229 		goto done;
3230 	}
3231 
3232 	cmd->cmd_complete = addr_cmd_complete;
3233 
3234 	/* Continue with pairing via HCI */
3235 	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3236 		struct hci_cp_user_passkey_reply cp;
3237 
3238 		bacpy(&cp.bdaddr, &addr->bdaddr);
3239 		cp.passkey = passkey;
3240 		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3241 	} else
3242 		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3243 				   &addr->bdaddr);
3244 
3245 	if (err < 0)
3246 		mgmt_pending_remove(cmd);
3247 
3248 done:
3249 	hci_dev_unlock(hdev);
3250 	return err;
3251 }
3252 
3253 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3254 			      void *data, u16 len)
3255 {
3256 	struct mgmt_cp_pin_code_neg_reply *cp = data;
3257 
3258 	bt_dev_dbg(hdev, "sock %p", sk);
3259 
3260 	return user_pairing_resp(sk, hdev, &cp->addr,
3261 				MGMT_OP_PIN_CODE_NEG_REPLY,
3262 				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3263 }
3264 
3265 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3266 			      u16 len)
3267 {
3268 	struct mgmt_cp_user_confirm_reply *cp = data;
3269 
3270 	bt_dev_dbg(hdev, "sock %p", sk);
3271 
3272 	if (len != sizeof(*cp))
3273 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3274 				       MGMT_STATUS_INVALID_PARAMS);
3275 
3276 	return user_pairing_resp(sk, hdev, &cp->addr,
3277 				 MGMT_OP_USER_CONFIRM_REPLY,
3278 				 HCI_OP_USER_CONFIRM_REPLY, 0);
3279 }
3280 
3281 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3282 				  void *data, u16 len)
3283 {
3284 	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3285 
3286 	bt_dev_dbg(hdev, "sock %p", sk);
3287 
3288 	return user_pairing_resp(sk, hdev, &cp->addr,
3289 				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3290 				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3291 }
3292 
3293 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3294 			      u16 len)
3295 {
3296 	struct mgmt_cp_user_passkey_reply *cp = data;
3297 
3298 	bt_dev_dbg(hdev, "sock %p", sk);
3299 
3300 	return user_pairing_resp(sk, hdev, &cp->addr,
3301 				 MGMT_OP_USER_PASSKEY_REPLY,
3302 				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3303 }
3304 
3305 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3306 				  void *data, u16 len)
3307 {
3308 	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3309 
3310 	bt_dev_dbg(hdev, "sock %p", sk);
3311 
3312 	return user_pairing_resp(sk, hdev, &cp->addr,
3313 				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3314 				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3315 }
3316 
3317 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3318 {
3319 	struct adv_info *adv_instance;
3320 
3321 	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3322 	if (!adv_instance)
3323 		return 0;
3324 
3325 	/* stop if current instance doesn't need to be changed */
3326 	if (!(adv_instance->flags & flags))
3327 		return 0;
3328 
3329 	cancel_adv_timeout(hdev);
3330 
3331 	adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3332 	if (!adv_instance)
3333 		return 0;
3334 
3335 	hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3336 
3337 	return 0;
3338 }
3339 
3340 static int name_changed_sync(struct hci_dev *hdev, void *data)
3341 {
3342 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3343 }
3344 
3345 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3346 {
3347 	struct mgmt_pending_cmd *cmd = data;
3348 	struct mgmt_cp_set_local_name *cp = cmd->param;
3349 	u8 status = mgmt_status(err);
3350 
3351 	bt_dev_dbg(hdev, "err %d", err);
3352 
3353 	if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3354 		return;
3355 
3356 	if (status) {
3357 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3358 				status);
3359 	} else {
3360 		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3361 				  cp, sizeof(*cp));
3362 
3363 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3364 			hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3365 	}
3366 
3367 	mgmt_pending_remove(cmd);
3368 }
3369 
3370 static int set_name_sync(struct hci_dev *hdev, void *data)
3371 {
3372 	if (lmp_bredr_capable(hdev)) {
3373 		hci_update_name_sync(hdev);
3374 		hci_update_eir_sync(hdev);
3375 	}
3376 
3377 	/* The name is stored in the scan response data and so
3378 	 * no need to update the advertising data here.
3379 	 */
3380 	if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3381 		hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3382 
3383 	return 0;
3384 }
3385 
3386 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3387 			  u16 len)
3388 {
3389 	struct mgmt_cp_set_local_name *cp = data;
3390 	struct mgmt_pending_cmd *cmd;
3391 	int err;
3392 
3393 	bt_dev_dbg(hdev, "sock %p", sk);
3394 
3395 	hci_dev_lock(hdev);
3396 
3397 	/* If the old values are the same as the new ones just return a
3398 	 * direct command complete event.
3399 	 */
3400 	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3401 	    !memcmp(hdev->short_name, cp->short_name,
3402 		    sizeof(hdev->short_name))) {
3403 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3404 					data, len);
3405 		goto failed;
3406 	}
3407 
3408 	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3409 
3410 	if (!hdev_is_powered(hdev)) {
3411 		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3412 
3413 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3414 					data, len);
3415 		if (err < 0)
3416 			goto failed;
3417 
3418 		err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3419 					 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3420 		ext_info_changed(hdev, sk);
3421 
3422 		goto failed;
3423 	}
3424 
3425 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3426 	if (!cmd)
3427 		err = -ENOMEM;
3428 	else
3429 		err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3430 					 set_name_complete);
3431 
3432 	if (err < 0) {
3433 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3434 				      MGMT_STATUS_FAILED);
3435 
3436 		if (cmd)
3437 			mgmt_pending_remove(cmd);
3438 
3439 		goto failed;
3440 	}
3441 
3442 	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3443 
3444 failed:
3445 	hci_dev_unlock(hdev);
3446 	return err;
3447 }
3448 
3449 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3450 {
3451 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3452 }
3453 
3454 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3455 			  u16 len)
3456 {
3457 	struct mgmt_cp_set_appearance *cp = data;
3458 	u16 appearance;
3459 	int err;
3460 
3461 	bt_dev_dbg(hdev, "sock %p", sk);
3462 
3463 	if (!lmp_le_capable(hdev))
3464 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3465 				       MGMT_STATUS_NOT_SUPPORTED);
3466 
3467 	appearance = le16_to_cpu(cp->appearance);
3468 
3469 	hci_dev_lock(hdev);
3470 
3471 	if (hdev->appearance != appearance) {
3472 		hdev->appearance = appearance;
3473 
3474 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3475 			hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3476 					   NULL);
3477 
3478 		ext_info_changed(hdev, sk);
3479 	}
3480 
3481 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3482 				0);
3483 
3484 	hci_dev_unlock(hdev);
3485 
3486 	return err;
3487 }
3488 
3489 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3490 				 void *data, u16 len)
3491 {
3492 	struct mgmt_rp_get_phy_configuration rp;
3493 
3494 	bt_dev_dbg(hdev, "sock %p", sk);
3495 
3496 	hci_dev_lock(hdev);
3497 
3498 	memset(&rp, 0, sizeof(rp));
3499 
3500 	rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3501 	rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3502 	rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3503 
3504 	hci_dev_unlock(hdev);
3505 
3506 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3507 				 &rp, sizeof(rp));
3508 }
3509 
3510 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3511 {
3512 	struct mgmt_ev_phy_configuration_changed ev;
3513 
3514 	memset(&ev, 0, sizeof(ev));
3515 
3516 	ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3517 
3518 	return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3519 			  sizeof(ev), skip);
3520 }
3521 
3522 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3523 {
3524 	struct mgmt_pending_cmd *cmd = data;
3525 	struct sk_buff *skb = cmd->skb;
3526 	u8 status = mgmt_status(err);
3527 
3528 	if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
3529 		return;
3530 
3531 	if (!status) {
3532 		if (!skb)
3533 			status = MGMT_STATUS_FAILED;
3534 		else if (IS_ERR(skb))
3535 			status = mgmt_status(PTR_ERR(skb));
3536 		else
3537 			status = mgmt_status(skb->data[0]);
3538 	}
3539 
3540 	bt_dev_dbg(hdev, "status %d", status);
3541 
3542 	if (status) {
3543 		mgmt_cmd_status(cmd->sk, hdev->id,
3544 				MGMT_OP_SET_PHY_CONFIGURATION, status);
3545 	} else {
3546 		mgmt_cmd_complete(cmd->sk, hdev->id,
3547 				  MGMT_OP_SET_PHY_CONFIGURATION, 0,
3548 				  NULL, 0);
3549 
3550 		mgmt_phy_configuration_changed(hdev, cmd->sk);
3551 	}
3552 
3553 	if (skb && !IS_ERR(skb))
3554 		kfree_skb(skb);
3555 
3556 	mgmt_pending_remove(cmd);
3557 }
3558 
3559 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
3560 {
3561 	struct mgmt_pending_cmd *cmd = data;
3562 	struct mgmt_cp_set_phy_configuration *cp = cmd->param;
3563 	struct hci_cp_le_set_default_phy cp_phy;
3564 	u32 selected_phys = __le32_to_cpu(cp->selected_phys);
3565 
3566 	memset(&cp_phy, 0, sizeof(cp_phy));
3567 
3568 	if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3569 		cp_phy.all_phys |= 0x01;
3570 
3571 	if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3572 		cp_phy.all_phys |= 0x02;
3573 
3574 	if (selected_phys & MGMT_PHY_LE_1M_TX)
3575 		cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3576 
3577 	if (selected_phys & MGMT_PHY_LE_2M_TX)
3578 		cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3579 
3580 	if (selected_phys & MGMT_PHY_LE_CODED_TX)
3581 		cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3582 
3583 	if (selected_phys & MGMT_PHY_LE_1M_RX)
3584 		cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3585 
3586 	if (selected_phys & MGMT_PHY_LE_2M_RX)
3587 		cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3588 
3589 	if (selected_phys & MGMT_PHY_LE_CODED_RX)
3590 		cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3591 
3592 	cmd->skb =  __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
3593 				   sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
3594 
3595 	return 0;
3596 }
3597 
3598 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3599 				 void *data, u16 len)
3600 {
3601 	struct mgmt_cp_set_phy_configuration *cp = data;
3602 	struct mgmt_pending_cmd *cmd;
3603 	u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3604 	u16 pkt_type = (HCI_DH1 | HCI_DM1);
3605 	bool changed = false;
3606 	int err;
3607 
3608 	bt_dev_dbg(hdev, "sock %p", sk);
3609 
3610 	configurable_phys = get_configurable_phys(hdev);
3611 	supported_phys = get_supported_phys(hdev);
3612 	selected_phys = __le32_to_cpu(cp->selected_phys);
3613 
3614 	if (selected_phys & ~supported_phys)
3615 		return mgmt_cmd_status(sk, hdev->id,
3616 				       MGMT_OP_SET_PHY_CONFIGURATION,
3617 				       MGMT_STATUS_INVALID_PARAMS);
3618 
3619 	unconfigure_phys = supported_phys & ~configurable_phys;
3620 
3621 	if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3622 		return mgmt_cmd_status(sk, hdev->id,
3623 				       MGMT_OP_SET_PHY_CONFIGURATION,
3624 				       MGMT_STATUS_INVALID_PARAMS);
3625 
3626 	if (selected_phys == get_selected_phys(hdev))
3627 		return mgmt_cmd_complete(sk, hdev->id,
3628 					 MGMT_OP_SET_PHY_CONFIGURATION,
3629 					 0, NULL, 0);
3630 
3631 	hci_dev_lock(hdev);
3632 
3633 	if (!hdev_is_powered(hdev)) {
3634 		err = mgmt_cmd_status(sk, hdev->id,
3635 				      MGMT_OP_SET_PHY_CONFIGURATION,
3636 				      MGMT_STATUS_REJECTED);
3637 		goto unlock;
3638 	}
3639 
3640 	if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3641 		err = mgmt_cmd_status(sk, hdev->id,
3642 				      MGMT_OP_SET_PHY_CONFIGURATION,
3643 				      MGMT_STATUS_BUSY);
3644 		goto unlock;
3645 	}
3646 
3647 	if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3648 		pkt_type |= (HCI_DH3 | HCI_DM3);
3649 	else
3650 		pkt_type &= ~(HCI_DH3 | HCI_DM3);
3651 
3652 	if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3653 		pkt_type |= (HCI_DH5 | HCI_DM5);
3654 	else
3655 		pkt_type &= ~(HCI_DH5 | HCI_DM5);
3656 
3657 	if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3658 		pkt_type &= ~HCI_2DH1;
3659 	else
3660 		pkt_type |= HCI_2DH1;
3661 
3662 	if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3663 		pkt_type &= ~HCI_2DH3;
3664 	else
3665 		pkt_type |= HCI_2DH3;
3666 
3667 	if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3668 		pkt_type &= ~HCI_2DH5;
3669 	else
3670 		pkt_type |= HCI_2DH5;
3671 
3672 	if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3673 		pkt_type &= ~HCI_3DH1;
3674 	else
3675 		pkt_type |= HCI_3DH1;
3676 
3677 	if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3678 		pkt_type &= ~HCI_3DH3;
3679 	else
3680 		pkt_type |= HCI_3DH3;
3681 
3682 	if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3683 		pkt_type &= ~HCI_3DH5;
3684 	else
3685 		pkt_type |= HCI_3DH5;
3686 
3687 	if (pkt_type != hdev->pkt_type) {
3688 		hdev->pkt_type = pkt_type;
3689 		changed = true;
3690 	}
3691 
3692 	if ((selected_phys & MGMT_PHY_LE_MASK) ==
3693 	    (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3694 		if (changed)
3695 			mgmt_phy_configuration_changed(hdev, sk);
3696 
3697 		err = mgmt_cmd_complete(sk, hdev->id,
3698 					MGMT_OP_SET_PHY_CONFIGURATION,
3699 					0, NULL, 0);
3700 
3701 		goto unlock;
3702 	}
3703 
3704 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3705 			       len);
3706 	if (!cmd)
3707 		err = -ENOMEM;
3708 	else
3709 		err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
3710 					 set_default_phy_complete);
3711 
3712 	if (err < 0) {
3713 		err = mgmt_cmd_status(sk, hdev->id,
3714 				      MGMT_OP_SET_PHY_CONFIGURATION,
3715 				      MGMT_STATUS_FAILED);
3716 
3717 		if (cmd)
3718 			mgmt_pending_remove(cmd);
3719 	}
3720 
3721 unlock:
3722 	hci_dev_unlock(hdev);
3723 
3724 	return err;
3725 }
3726 
3727 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
3728 			    u16 len)
3729 {
3730 	int err = MGMT_STATUS_SUCCESS;
3731 	struct mgmt_cp_set_blocked_keys *keys = data;
3732 	const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
3733 				   sizeof(struct mgmt_blocked_key_info));
3734 	u16 key_count, expected_len;
3735 	int i;
3736 
3737 	bt_dev_dbg(hdev, "sock %p", sk);
3738 
3739 	key_count = __le16_to_cpu(keys->key_count);
3740 	if (key_count > max_key_count) {
3741 		bt_dev_err(hdev, "too big key_count value %u", key_count);
3742 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3743 				       MGMT_STATUS_INVALID_PARAMS);
3744 	}
3745 
3746 	expected_len = struct_size(keys, keys, key_count);
3747 	if (expected_len != len) {
3748 		bt_dev_err(hdev, "expected %u bytes, got %u bytes",
3749 			   expected_len, len);
3750 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3751 				       MGMT_STATUS_INVALID_PARAMS);
3752 	}
3753 
3754 	hci_dev_lock(hdev);
3755 
3756 	hci_blocked_keys_clear(hdev);
3757 
3758 	for (i = 0; i < keys->key_count; ++i) {
3759 		struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
3760 
3761 		if (!b) {
3762 			err = MGMT_STATUS_NO_RESOURCES;
3763 			break;
3764 		}
3765 
3766 		b->type = keys->keys[i].type;
3767 		memcpy(b->val, keys->keys[i].val, sizeof(b->val));
3768 		list_add_rcu(&b->list, &hdev->blocked_keys);
3769 	}
3770 	hci_dev_unlock(hdev);
3771 
3772 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3773 				err, NULL, 0);
3774 }
3775 
3776 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
3777 			       void *data, u16 len)
3778 {
3779 	struct mgmt_mode *cp = data;
3780 	int err;
3781 	bool changed = false;
3782 
3783 	bt_dev_dbg(hdev, "sock %p", sk);
3784 
3785 	if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
3786 		return mgmt_cmd_status(sk, hdev->id,
3787 				       MGMT_OP_SET_WIDEBAND_SPEECH,
3788 				       MGMT_STATUS_NOT_SUPPORTED);
3789 
3790 	if (cp->val != 0x00 && cp->val != 0x01)
3791 		return mgmt_cmd_status(sk, hdev->id,
3792 				       MGMT_OP_SET_WIDEBAND_SPEECH,
3793 				       MGMT_STATUS_INVALID_PARAMS);
3794 
3795 	hci_dev_lock(hdev);
3796 
3797 	if (hdev_is_powered(hdev) &&
3798 	    !!cp->val != hci_dev_test_flag(hdev,
3799 					   HCI_WIDEBAND_SPEECH_ENABLED)) {
3800 		err = mgmt_cmd_status(sk, hdev->id,
3801 				      MGMT_OP_SET_WIDEBAND_SPEECH,
3802 				      MGMT_STATUS_REJECTED);
3803 		goto unlock;
3804 	}
3805 
3806 	if (cp->val)
3807 		changed = !hci_dev_test_and_set_flag(hdev,
3808 						   HCI_WIDEBAND_SPEECH_ENABLED);
3809 	else
3810 		changed = hci_dev_test_and_clear_flag(hdev,
3811 						   HCI_WIDEBAND_SPEECH_ENABLED);
3812 
3813 	err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
3814 	if (err < 0)
3815 		goto unlock;
3816 
3817 	if (changed)
3818 		err = new_settings(hdev, sk);
3819 
3820 unlock:
3821 	hci_dev_unlock(hdev);
3822 	return err;
3823 }
3824 
3825 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
3826 			       void *data, u16 data_len)
3827 {
3828 	char buf[20];
3829 	struct mgmt_rp_read_controller_cap *rp = (void *)buf;
3830 	u16 cap_len = 0;
3831 	u8 flags = 0;
3832 	u8 tx_power_range[2];
3833 
3834 	bt_dev_dbg(hdev, "sock %p", sk);
3835 
3836 	memset(&buf, 0, sizeof(buf));
3837 
3838 	hci_dev_lock(hdev);
3839 
3840 	/* When the Read Simple Pairing Options command is supported, then
3841 	 * the remote public key validation is supported.
3842 	 *
3843 	 * Alternatively, when Microsoft extensions are available, they can
3844 	 * indicate support for public key validation as well.
3845 	 */
3846 	if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
3847 		flags |= 0x01;	/* Remote public key validation (BR/EDR) */
3848 
3849 	flags |= 0x02;		/* Remote public key validation (LE) */
3850 
3851 	/* When the Read Encryption Key Size command is supported, then the
3852 	 * encryption key size is enforced.
3853 	 */
3854 	if (hdev->commands[20] & 0x10)
3855 		flags |= 0x04;	/* Encryption key size enforcement (BR/EDR) */
3856 
3857 	flags |= 0x08;		/* Encryption key size enforcement (LE) */
3858 
3859 	cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
3860 				  &flags, 1);
3861 
3862 	/* When the Read Simple Pairing Options command is supported, then
3863 	 * also max encryption key size information is provided.
3864 	 */
3865 	if (hdev->commands[41] & 0x08)
3866 		cap_len = eir_append_le16(rp->cap, cap_len,
3867 					  MGMT_CAP_MAX_ENC_KEY_SIZE,
3868 					  hdev->max_enc_key_size);
3869 
3870 	cap_len = eir_append_le16(rp->cap, cap_len,
3871 				  MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
3872 				  SMP_MAX_ENC_KEY_SIZE);
3873 
3874 	/* Append the min/max LE tx power parameters if we were able to fetch
3875 	 * it from the controller
3876 	 */
3877 	if (hdev->commands[38] & 0x80) {
3878 		memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
3879 		memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
3880 		cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
3881 					  tx_power_range, 2);
3882 	}
3883 
3884 	rp->cap_len = cpu_to_le16(cap_len);
3885 
3886 	hci_dev_unlock(hdev);
3887 
3888 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
3889 				 rp, sizeof(*rp) + cap_len);
3890 }
3891 
3892 #ifdef CONFIG_BT_FEATURE_DEBUG
3893 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
3894 static const u8 debug_uuid[16] = {
3895 	0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
3896 	0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
3897 };
3898 #endif
3899 
3900 /* 330859bc-7506-492d-9370-9a6f0614037f */
3901 static const u8 quality_report_uuid[16] = {
3902 	0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
3903 	0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
3904 };
3905 
3906 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
3907 static const u8 offload_codecs_uuid[16] = {
3908 	0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
3909 	0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
3910 };
3911 
3912 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
3913 static const u8 le_simultaneous_roles_uuid[16] = {
3914 	0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
3915 	0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
3916 };
3917 
3918 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
3919 static const u8 rpa_resolution_uuid[16] = {
3920 	0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
3921 	0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
3922 };
3923 
3924 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
3925 				  void *data, u16 data_len)
3926 {
3927 	char buf[102];   /* Enough space for 5 features: 2 + 20 * 5 */
3928 	struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
3929 	u16 idx = 0;
3930 	u32 flags;
3931 
3932 	bt_dev_dbg(hdev, "sock %p", sk);
3933 
3934 	memset(&buf, 0, sizeof(buf));
3935 
3936 #ifdef CONFIG_BT_FEATURE_DEBUG
3937 	if (!hdev) {
3938 		flags = bt_dbg_get() ? BIT(0) : 0;
3939 
3940 		memcpy(rp->features[idx].uuid, debug_uuid, 16);
3941 		rp->features[idx].flags = cpu_to_le32(flags);
3942 		idx++;
3943 	}
3944 #endif
3945 
3946 	if (hdev && hci_dev_le_state_simultaneous(hdev)) {
3947 		if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
3948 			flags = BIT(0);
3949 		else
3950 			flags = 0;
3951 
3952 		memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
3953 		rp->features[idx].flags = cpu_to_le32(flags);
3954 		idx++;
3955 	}
3956 
3957 	if (hdev && ll_privacy_capable(hdev)) {
3958 		if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
3959 			flags = BIT(0) | BIT(1);
3960 		else
3961 			flags = BIT(1);
3962 
3963 		memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
3964 		rp->features[idx].flags = cpu_to_le32(flags);
3965 		idx++;
3966 	}
3967 
3968 	if (hdev && (aosp_has_quality_report(hdev) ||
3969 		     hdev->set_quality_report)) {
3970 		if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
3971 			flags = BIT(0);
3972 		else
3973 			flags = 0;
3974 
3975 		memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
3976 		rp->features[idx].flags = cpu_to_le32(flags);
3977 		idx++;
3978 	}
3979 
3980 	if (hdev && hdev->get_data_path_id) {
3981 		if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
3982 			flags = BIT(0);
3983 		else
3984 			flags = 0;
3985 
3986 		memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
3987 		rp->features[idx].flags = cpu_to_le32(flags);
3988 		idx++;
3989 	}
3990 
3991 	rp->feature_count = cpu_to_le16(idx);
3992 
3993 	/* After reading the experimental features information, enable
3994 	 * the events to update client on any future change.
3995 	 */
3996 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3997 
3998 	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3999 				 MGMT_OP_READ_EXP_FEATURES_INFO,
4000 				 0, rp, sizeof(*rp) + (20 * idx));
4001 }
4002 
4003 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4004 					  struct sock *skip)
4005 {
4006 	struct mgmt_ev_exp_feature_changed ev;
4007 
4008 	memset(&ev, 0, sizeof(ev));
4009 	memcpy(ev.uuid, rpa_resolution_uuid, 16);
4010 	ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4011 
4012 	if (enabled && privacy_mode_capable(hdev))
4013 		set_bit(HCI_CONN_FLAG_DEVICE_PRIVACY, hdev->conn_flags);
4014 	else
4015 		clear_bit(HCI_CONN_FLAG_DEVICE_PRIVACY, hdev->conn_flags);
4016 
4017 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4018 				  &ev, sizeof(ev),
4019 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4020 
4021 }
4022 
4023 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4024 			       bool enabled, struct sock *skip)
4025 {
4026 	struct mgmt_ev_exp_feature_changed ev;
4027 
4028 	memset(&ev, 0, sizeof(ev));
4029 	memcpy(ev.uuid, uuid, 16);
4030 	ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4031 
4032 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4033 				  &ev, sizeof(ev),
4034 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4035 }
4036 
4037 #define EXP_FEAT(_uuid, _set_func)	\
4038 {					\
4039 	.uuid = _uuid,			\
4040 	.set_func = _set_func,		\
4041 }
4042 
4043 /* The zero key uuid is special. Multiple exp features are set through it. */
4044 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4045 			     struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4046 {
4047 	struct mgmt_rp_set_exp_feature rp;
4048 
4049 	memset(rp.uuid, 0, 16);
4050 	rp.flags = cpu_to_le32(0);
4051 
4052 #ifdef CONFIG_BT_FEATURE_DEBUG
4053 	if (!hdev) {
4054 		bool changed = bt_dbg_get();
4055 
4056 		bt_dbg_set(false);
4057 
4058 		if (changed)
4059 			exp_feature_changed(NULL, ZERO_KEY, false, sk);
4060 	}
4061 #endif
4062 
4063 	if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4064 		bool changed;
4065 
4066 		changed = hci_dev_test_and_clear_flag(hdev,
4067 						      HCI_ENABLE_LL_PRIVACY);
4068 		if (changed)
4069 			exp_feature_changed(hdev, rpa_resolution_uuid, false,
4070 					    sk);
4071 	}
4072 
4073 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4074 
4075 	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4076 				 MGMT_OP_SET_EXP_FEATURE, 0,
4077 				 &rp, sizeof(rp));
4078 }
4079 
4080 #ifdef CONFIG_BT_FEATURE_DEBUG
4081 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4082 			  struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4083 {
4084 	struct mgmt_rp_set_exp_feature rp;
4085 
4086 	bool val, changed;
4087 	int err;
4088 
4089 	/* Command requires to use the non-controller index */
4090 	if (hdev)
4091 		return mgmt_cmd_status(sk, hdev->id,
4092 				       MGMT_OP_SET_EXP_FEATURE,
4093 				       MGMT_STATUS_INVALID_INDEX);
4094 
4095 	/* Parameters are limited to a single octet */
4096 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4097 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4098 				       MGMT_OP_SET_EXP_FEATURE,
4099 				       MGMT_STATUS_INVALID_PARAMS);
4100 
4101 	/* Only boolean on/off is supported */
4102 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4103 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4104 				       MGMT_OP_SET_EXP_FEATURE,
4105 				       MGMT_STATUS_INVALID_PARAMS);
4106 
4107 	val = !!cp->param[0];
4108 	changed = val ? !bt_dbg_get() : bt_dbg_get();
4109 	bt_dbg_set(val);
4110 
4111 	memcpy(rp.uuid, debug_uuid, 16);
4112 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4113 
4114 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4115 
4116 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4117 				MGMT_OP_SET_EXP_FEATURE, 0,
4118 				&rp, sizeof(rp));
4119 
4120 	if (changed)
4121 		exp_feature_changed(hdev, debug_uuid, val, sk);
4122 
4123 	return err;
4124 }
4125 #endif
4126 
4127 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4128 				   struct mgmt_cp_set_exp_feature *cp,
4129 				   u16 data_len)
4130 {
4131 	struct mgmt_rp_set_exp_feature rp;
4132 	bool val, changed;
4133 	int err;
4134 	u32 flags;
4135 
4136 	/* Command requires to use the controller index */
4137 	if (!hdev)
4138 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4139 				       MGMT_OP_SET_EXP_FEATURE,
4140 				       MGMT_STATUS_INVALID_INDEX);
4141 
4142 	/* Changes can only be made when controller is powered down */
4143 	if (hdev_is_powered(hdev))
4144 		return mgmt_cmd_status(sk, hdev->id,
4145 				       MGMT_OP_SET_EXP_FEATURE,
4146 				       MGMT_STATUS_REJECTED);
4147 
4148 	/* Parameters are limited to a single octet */
4149 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4150 		return mgmt_cmd_status(sk, hdev->id,
4151 				       MGMT_OP_SET_EXP_FEATURE,
4152 				       MGMT_STATUS_INVALID_PARAMS);
4153 
4154 	/* Only boolean on/off is supported */
4155 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4156 		return mgmt_cmd_status(sk, hdev->id,
4157 				       MGMT_OP_SET_EXP_FEATURE,
4158 				       MGMT_STATUS_INVALID_PARAMS);
4159 
4160 	val = !!cp->param[0];
4161 
4162 	if (val) {
4163 		changed = !hci_dev_test_and_set_flag(hdev,
4164 						     HCI_ENABLE_LL_PRIVACY);
4165 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4166 
4167 		/* Enable LL privacy + supported settings changed */
4168 		flags = BIT(0) | BIT(1);
4169 	} else {
4170 		changed = hci_dev_test_and_clear_flag(hdev,
4171 						      HCI_ENABLE_LL_PRIVACY);
4172 
4173 		/* Disable LL privacy + supported settings changed */
4174 		flags = BIT(1);
4175 	}
4176 
4177 	memcpy(rp.uuid, rpa_resolution_uuid, 16);
4178 	rp.flags = cpu_to_le32(flags);
4179 
4180 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4181 
4182 	err = mgmt_cmd_complete(sk, hdev->id,
4183 				MGMT_OP_SET_EXP_FEATURE, 0,
4184 				&rp, sizeof(rp));
4185 
4186 	if (changed)
4187 		exp_ll_privacy_feature_changed(val, hdev, sk);
4188 
4189 	return err;
4190 }
4191 
4192 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4193 				   struct mgmt_cp_set_exp_feature *cp,
4194 				   u16 data_len)
4195 {
4196 	struct mgmt_rp_set_exp_feature rp;
4197 	bool val, changed;
4198 	int err;
4199 
4200 	/* Command requires to use a valid controller index */
4201 	if (!hdev)
4202 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4203 				       MGMT_OP_SET_EXP_FEATURE,
4204 				       MGMT_STATUS_INVALID_INDEX);
4205 
4206 	/* Parameters are limited to a single octet */
4207 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4208 		return mgmt_cmd_status(sk, hdev->id,
4209 				       MGMT_OP_SET_EXP_FEATURE,
4210 				       MGMT_STATUS_INVALID_PARAMS);
4211 
4212 	/* Only boolean on/off is supported */
4213 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4214 		return mgmt_cmd_status(sk, hdev->id,
4215 				       MGMT_OP_SET_EXP_FEATURE,
4216 				       MGMT_STATUS_INVALID_PARAMS);
4217 
4218 	hci_req_sync_lock(hdev);
4219 
4220 	val = !!cp->param[0];
4221 	changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4222 
4223 	if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4224 		err = mgmt_cmd_status(sk, hdev->id,
4225 				      MGMT_OP_SET_EXP_FEATURE,
4226 				      MGMT_STATUS_NOT_SUPPORTED);
4227 		goto unlock_quality_report;
4228 	}
4229 
4230 	if (changed) {
4231 		if (hdev->set_quality_report)
4232 			err = hdev->set_quality_report(hdev, val);
4233 		else
4234 			err = aosp_set_quality_report(hdev, val);
4235 
4236 		if (err) {
4237 			err = mgmt_cmd_status(sk, hdev->id,
4238 					      MGMT_OP_SET_EXP_FEATURE,
4239 					      MGMT_STATUS_FAILED);
4240 			goto unlock_quality_report;
4241 		}
4242 
4243 		if (val)
4244 			hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4245 		else
4246 			hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4247 	}
4248 
4249 	bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4250 
4251 	memcpy(rp.uuid, quality_report_uuid, 16);
4252 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4253 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4254 
4255 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4256 				&rp, sizeof(rp));
4257 
4258 	if (changed)
4259 		exp_feature_changed(hdev, quality_report_uuid, val, sk);
4260 
4261 unlock_quality_report:
4262 	hci_req_sync_unlock(hdev);
4263 	return err;
4264 }
4265 
4266 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4267 				  struct mgmt_cp_set_exp_feature *cp,
4268 				  u16 data_len)
4269 {
4270 	bool val, changed;
4271 	int err;
4272 	struct mgmt_rp_set_exp_feature rp;
4273 
4274 	/* Command requires to use a valid controller index */
4275 	if (!hdev)
4276 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4277 				       MGMT_OP_SET_EXP_FEATURE,
4278 				       MGMT_STATUS_INVALID_INDEX);
4279 
4280 	/* Parameters are limited to a single octet */
4281 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4282 		return mgmt_cmd_status(sk, hdev->id,
4283 				       MGMT_OP_SET_EXP_FEATURE,
4284 				       MGMT_STATUS_INVALID_PARAMS);
4285 
4286 	/* Only boolean on/off is supported */
4287 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4288 		return mgmt_cmd_status(sk, hdev->id,
4289 				       MGMT_OP_SET_EXP_FEATURE,
4290 				       MGMT_STATUS_INVALID_PARAMS);
4291 
4292 	val = !!cp->param[0];
4293 	changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4294 
4295 	if (!hdev->get_data_path_id) {
4296 		return mgmt_cmd_status(sk, hdev->id,
4297 				       MGMT_OP_SET_EXP_FEATURE,
4298 				       MGMT_STATUS_NOT_SUPPORTED);
4299 	}
4300 
4301 	if (changed) {
4302 		if (val)
4303 			hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4304 		else
4305 			hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4306 	}
4307 
4308 	bt_dev_info(hdev, "offload codecs enable %d changed %d",
4309 		    val, changed);
4310 
4311 	memcpy(rp.uuid, offload_codecs_uuid, 16);
4312 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4313 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4314 	err = mgmt_cmd_complete(sk, hdev->id,
4315 				MGMT_OP_SET_EXP_FEATURE, 0,
4316 				&rp, sizeof(rp));
4317 
4318 	if (changed)
4319 		exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4320 
4321 	return err;
4322 }
4323 
4324 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4325 					  struct mgmt_cp_set_exp_feature *cp,
4326 					  u16 data_len)
4327 {
4328 	bool val, changed;
4329 	int err;
4330 	struct mgmt_rp_set_exp_feature rp;
4331 
4332 	/* Command requires to use a valid controller index */
4333 	if (!hdev)
4334 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4335 				       MGMT_OP_SET_EXP_FEATURE,
4336 				       MGMT_STATUS_INVALID_INDEX);
4337 
4338 	/* Parameters are limited to a single octet */
4339 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4340 		return mgmt_cmd_status(sk, hdev->id,
4341 				       MGMT_OP_SET_EXP_FEATURE,
4342 				       MGMT_STATUS_INVALID_PARAMS);
4343 
4344 	/* Only boolean on/off is supported */
4345 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4346 		return mgmt_cmd_status(sk, hdev->id,
4347 				       MGMT_OP_SET_EXP_FEATURE,
4348 				       MGMT_STATUS_INVALID_PARAMS);
4349 
4350 	val = !!cp->param[0];
4351 	changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4352 
4353 	if (!hci_dev_le_state_simultaneous(hdev)) {
4354 		return mgmt_cmd_status(sk, hdev->id,
4355 				       MGMT_OP_SET_EXP_FEATURE,
4356 				       MGMT_STATUS_NOT_SUPPORTED);
4357 	}
4358 
4359 	if (changed) {
4360 		if (val)
4361 			hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4362 		else
4363 			hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4364 	}
4365 
4366 	bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4367 		    val, changed);
4368 
4369 	memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4370 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4371 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4372 	err = mgmt_cmd_complete(sk, hdev->id,
4373 				MGMT_OP_SET_EXP_FEATURE, 0,
4374 				&rp, sizeof(rp));
4375 
4376 	if (changed)
4377 		exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4378 
4379 	return err;
4380 }
4381 
4382 static const struct mgmt_exp_feature {
4383 	const u8 *uuid;
4384 	int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4385 			struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4386 } exp_features[] = {
4387 	EXP_FEAT(ZERO_KEY, set_zero_key_func),
4388 #ifdef CONFIG_BT_FEATURE_DEBUG
4389 	EXP_FEAT(debug_uuid, set_debug_func),
4390 #endif
4391 	EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4392 	EXP_FEAT(quality_report_uuid, set_quality_report_func),
4393 	EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4394 	EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
4395 
4396 	/* end with a null feature */
4397 	EXP_FEAT(NULL, NULL)
4398 };
4399 
4400 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4401 			   void *data, u16 data_len)
4402 {
4403 	struct mgmt_cp_set_exp_feature *cp = data;
4404 	size_t i = 0;
4405 
4406 	bt_dev_dbg(hdev, "sock %p", sk);
4407 
4408 	for (i = 0; exp_features[i].uuid; i++) {
4409 		if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4410 			return exp_features[i].set_func(sk, hdev, cp, data_len);
4411 	}
4412 
4413 	return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4414 			       MGMT_OP_SET_EXP_FEATURE,
4415 			       MGMT_STATUS_NOT_SUPPORTED);
4416 }
4417 
4418 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4419 			    u16 data_len)
4420 {
4421 	struct mgmt_cp_get_device_flags *cp = data;
4422 	struct mgmt_rp_get_device_flags rp;
4423 	struct bdaddr_list_with_flags *br_params;
4424 	struct hci_conn_params *params;
4425 	u32 supported_flags;
4426 	u32 current_flags = 0;
4427 	u8 status = MGMT_STATUS_INVALID_PARAMS;
4428 
4429 	bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4430 		   &cp->addr.bdaddr, cp->addr.type);
4431 
4432 	hci_dev_lock(hdev);
4433 
4434 	bitmap_to_arr32(&supported_flags, hdev->conn_flags,
4435 			__HCI_CONN_NUM_FLAGS);
4436 
4437 	memset(&rp, 0, sizeof(rp));
4438 
4439 	if (cp->addr.type == BDADDR_BREDR) {
4440 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4441 							      &cp->addr.bdaddr,
4442 							      cp->addr.type);
4443 		if (!br_params)
4444 			goto done;
4445 
4446 		bitmap_to_arr32(&current_flags, br_params->flags,
4447 				__HCI_CONN_NUM_FLAGS);
4448 	} else {
4449 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4450 						le_addr_type(cp->addr.type));
4451 
4452 		if (!params)
4453 			goto done;
4454 
4455 		bitmap_to_arr32(&current_flags, params->flags,
4456 				__HCI_CONN_NUM_FLAGS);
4457 	}
4458 
4459 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4460 	rp.addr.type = cp->addr.type;
4461 	rp.supported_flags = cpu_to_le32(supported_flags);
4462 	rp.current_flags = cpu_to_le32(current_flags);
4463 
4464 	status = MGMT_STATUS_SUCCESS;
4465 
4466 done:
4467 	hci_dev_unlock(hdev);
4468 
4469 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
4470 				&rp, sizeof(rp));
4471 }
4472 
4473 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
4474 				 bdaddr_t *bdaddr, u8 bdaddr_type,
4475 				 u32 supported_flags, u32 current_flags)
4476 {
4477 	struct mgmt_ev_device_flags_changed ev;
4478 
4479 	bacpy(&ev.addr.bdaddr, bdaddr);
4480 	ev.addr.type = bdaddr_type;
4481 	ev.supported_flags = cpu_to_le32(supported_flags);
4482 	ev.current_flags = cpu_to_le32(current_flags);
4483 
4484 	mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
4485 }
4486 
4487 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4488 			    u16 len)
4489 {
4490 	struct mgmt_cp_set_device_flags *cp = data;
4491 	struct bdaddr_list_with_flags *br_params;
4492 	struct hci_conn_params *params;
4493 	u8 status = MGMT_STATUS_INVALID_PARAMS;
4494 	u32 supported_flags;
4495 	u32 current_flags = __le32_to_cpu(cp->current_flags);
4496 
4497 	bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
4498 		   &cp->addr.bdaddr, cp->addr.type,
4499 		   __le32_to_cpu(current_flags));
4500 
4501 	bitmap_to_arr32(&supported_flags, hdev->conn_flags,
4502 			__HCI_CONN_NUM_FLAGS);
4503 
4504 	if ((supported_flags | current_flags) != supported_flags) {
4505 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
4506 			    current_flags, supported_flags);
4507 		goto done;
4508 	}
4509 
4510 	hci_dev_lock(hdev);
4511 
4512 	if (cp->addr.type == BDADDR_BREDR) {
4513 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4514 							      &cp->addr.bdaddr,
4515 							      cp->addr.type);
4516 
4517 		if (br_params) {
4518 			bitmap_from_u64(br_params->flags, current_flags);
4519 			status = MGMT_STATUS_SUCCESS;
4520 		} else {
4521 			bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
4522 				    &cp->addr.bdaddr, cp->addr.type);
4523 		}
4524 	} else {
4525 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4526 						le_addr_type(cp->addr.type));
4527 		if (params) {
4528 			bitmap_from_u64(params->flags, current_flags);
4529 			status = MGMT_STATUS_SUCCESS;
4530 
4531 			/* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
4532 			 * has been set.
4533 			 */
4534 			if (test_bit(HCI_CONN_FLAG_DEVICE_PRIVACY,
4535 				     params->flags))
4536 				hci_update_passive_scan(hdev);
4537 		} else {
4538 			bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
4539 				    &cp->addr.bdaddr,
4540 				    le_addr_type(cp->addr.type));
4541 		}
4542 	}
4543 
4544 done:
4545 	hci_dev_unlock(hdev);
4546 
4547 	if (status == MGMT_STATUS_SUCCESS)
4548 		device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
4549 				     supported_flags, current_flags);
4550 
4551 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
4552 				 &cp->addr, sizeof(cp->addr));
4553 }
4554 
4555 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
4556 				   u16 handle)
4557 {
4558 	struct mgmt_ev_adv_monitor_added ev;
4559 
4560 	ev.monitor_handle = cpu_to_le16(handle);
4561 
4562 	mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
4563 }
4564 
4565 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
4566 {
4567 	struct mgmt_ev_adv_monitor_removed ev;
4568 	struct mgmt_pending_cmd *cmd;
4569 	struct sock *sk_skip = NULL;
4570 	struct mgmt_cp_remove_adv_monitor *cp;
4571 
4572 	cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4573 	if (cmd) {
4574 		cp = cmd->param;
4575 
4576 		if (cp->monitor_handle)
4577 			sk_skip = cmd->sk;
4578 	}
4579 
4580 	ev.monitor_handle = cpu_to_le16(handle);
4581 
4582 	mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
4583 }
4584 
4585 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
4586 				 void *data, u16 len)
4587 {
4588 	struct adv_monitor *monitor = NULL;
4589 	struct mgmt_rp_read_adv_monitor_features *rp = NULL;
4590 	int handle, err;
4591 	size_t rp_size = 0;
4592 	__u32 supported = 0;
4593 	__u32 enabled = 0;
4594 	__u16 num_handles = 0;
4595 	__u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
4596 
4597 	BT_DBG("request for %s", hdev->name);
4598 
4599 	hci_dev_lock(hdev);
4600 
4601 	if (msft_monitor_supported(hdev))
4602 		supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
4603 
4604 	idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
4605 		handles[num_handles++] = monitor->handle;
4606 
4607 	hci_dev_unlock(hdev);
4608 
4609 	rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
4610 	rp = kmalloc(rp_size, GFP_KERNEL);
4611 	if (!rp)
4612 		return -ENOMEM;
4613 
4614 	/* All supported features are currently enabled */
4615 	enabled = supported;
4616 
4617 	rp->supported_features = cpu_to_le32(supported);
4618 	rp->enabled_features = cpu_to_le32(enabled);
4619 	rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
4620 	rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
4621 	rp->num_handles = cpu_to_le16(num_handles);
4622 	if (num_handles)
4623 		memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
4624 
4625 	err = mgmt_cmd_complete(sk, hdev->id,
4626 				MGMT_OP_READ_ADV_MONITOR_FEATURES,
4627 				MGMT_STATUS_SUCCESS, rp, rp_size);
4628 
4629 	kfree(rp);
4630 
4631 	return err;
4632 }
4633 
4634 int mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
4635 {
4636 	struct mgmt_rp_add_adv_patterns_monitor rp;
4637 	struct mgmt_pending_cmd *cmd;
4638 	struct adv_monitor *monitor;
4639 	int err = 0;
4640 
4641 	hci_dev_lock(hdev);
4642 
4643 	cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev);
4644 	if (!cmd) {
4645 		cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev);
4646 		if (!cmd)
4647 			goto done;
4648 	}
4649 
4650 	monitor = cmd->user_data;
4651 	rp.monitor_handle = cpu_to_le16(monitor->handle);
4652 
4653 	if (!status) {
4654 		mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
4655 		hdev->adv_monitors_cnt++;
4656 		if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
4657 			monitor->state = ADV_MONITOR_STATE_REGISTERED;
4658 		hci_update_passive_scan(hdev);
4659 	}
4660 
4661 	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4662 				mgmt_status(status), &rp, sizeof(rp));
4663 	mgmt_pending_remove(cmd);
4664 
4665 done:
4666 	hci_dev_unlock(hdev);
4667 	bt_dev_dbg(hdev, "add monitor %d complete, status %u",
4668 		   rp.monitor_handle, status);
4669 
4670 	return err;
4671 }
4672 
4673 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4674 				      struct adv_monitor *m, u8 status,
4675 				      void *data, u16 len, u16 op)
4676 {
4677 	struct mgmt_rp_add_adv_patterns_monitor rp;
4678 	struct mgmt_pending_cmd *cmd;
4679 	int err;
4680 	bool pending;
4681 
4682 	hci_dev_lock(hdev);
4683 
4684 	if (status)
4685 		goto unlock;
4686 
4687 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
4688 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4689 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
4690 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
4691 		status = MGMT_STATUS_BUSY;
4692 		goto unlock;
4693 	}
4694 
4695 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
4696 	if (!cmd) {
4697 		status = MGMT_STATUS_NO_RESOURCES;
4698 		goto unlock;
4699 	}
4700 
4701 	cmd->user_data = m;
4702 	pending = hci_add_adv_monitor(hdev, m, &err);
4703 	if (err) {
4704 		if (err == -ENOSPC || err == -ENOMEM)
4705 			status = MGMT_STATUS_NO_RESOURCES;
4706 		else if (err == -EINVAL)
4707 			status = MGMT_STATUS_INVALID_PARAMS;
4708 		else
4709 			status = MGMT_STATUS_FAILED;
4710 
4711 		mgmt_pending_remove(cmd);
4712 		goto unlock;
4713 	}
4714 
4715 	if (!pending) {
4716 		mgmt_pending_remove(cmd);
4717 		rp.monitor_handle = cpu_to_le16(m->handle);
4718 		mgmt_adv_monitor_added(sk, hdev, m->handle);
4719 		m->state = ADV_MONITOR_STATE_REGISTERED;
4720 		hdev->adv_monitors_cnt++;
4721 
4722 		hci_dev_unlock(hdev);
4723 		return mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_SUCCESS,
4724 					 &rp, sizeof(rp));
4725 	}
4726 
4727 	hci_dev_unlock(hdev);
4728 
4729 	return 0;
4730 
4731 unlock:
4732 	hci_free_adv_monitor(hdev, m);
4733 	hci_dev_unlock(hdev);
4734 	return mgmt_cmd_status(sk, hdev->id, op, status);
4735 }
4736 
4737 static void parse_adv_monitor_rssi(struct adv_monitor *m,
4738 				   struct mgmt_adv_rssi_thresholds *rssi)
4739 {
4740 	if (rssi) {
4741 		m->rssi.low_threshold = rssi->low_threshold;
4742 		m->rssi.low_threshold_timeout =
4743 		    __le16_to_cpu(rssi->low_threshold_timeout);
4744 		m->rssi.high_threshold = rssi->high_threshold;
4745 		m->rssi.high_threshold_timeout =
4746 		    __le16_to_cpu(rssi->high_threshold_timeout);
4747 		m->rssi.sampling_period = rssi->sampling_period;
4748 	} else {
4749 		/* Default values. These numbers are the least constricting
4750 		 * parameters for MSFT API to work, so it behaves as if there
4751 		 * are no rssi parameter to consider. May need to be changed
4752 		 * if other API are to be supported.
4753 		 */
4754 		m->rssi.low_threshold = -127;
4755 		m->rssi.low_threshold_timeout = 60;
4756 		m->rssi.high_threshold = -127;
4757 		m->rssi.high_threshold_timeout = 0;
4758 		m->rssi.sampling_period = 0;
4759 	}
4760 }
4761 
4762 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
4763 				    struct mgmt_adv_pattern *patterns)
4764 {
4765 	u8 offset = 0, length = 0;
4766 	struct adv_pattern *p = NULL;
4767 	int i;
4768 
4769 	for (i = 0; i < pattern_count; i++) {
4770 		offset = patterns[i].offset;
4771 		length = patterns[i].length;
4772 		if (offset >= HCI_MAX_AD_LENGTH ||
4773 		    length > HCI_MAX_AD_LENGTH ||
4774 		    (offset + length) > HCI_MAX_AD_LENGTH)
4775 			return MGMT_STATUS_INVALID_PARAMS;
4776 
4777 		p = kmalloc(sizeof(*p), GFP_KERNEL);
4778 		if (!p)
4779 			return MGMT_STATUS_NO_RESOURCES;
4780 
4781 		p->ad_type = patterns[i].ad_type;
4782 		p->offset = patterns[i].offset;
4783 		p->length = patterns[i].length;
4784 		memcpy(p->value, patterns[i].value, p->length);
4785 
4786 		INIT_LIST_HEAD(&p->list);
4787 		list_add(&p->list, &m->patterns);
4788 	}
4789 
4790 	return MGMT_STATUS_SUCCESS;
4791 }
4792 
4793 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4794 				    void *data, u16 len)
4795 {
4796 	struct mgmt_cp_add_adv_patterns_monitor *cp = data;
4797 	struct adv_monitor *m = NULL;
4798 	u8 status = MGMT_STATUS_SUCCESS;
4799 	size_t expected_size = sizeof(*cp);
4800 
4801 	BT_DBG("request for %s", hdev->name);
4802 
4803 	if (len <= sizeof(*cp)) {
4804 		status = MGMT_STATUS_INVALID_PARAMS;
4805 		goto done;
4806 	}
4807 
4808 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4809 	if (len != expected_size) {
4810 		status = MGMT_STATUS_INVALID_PARAMS;
4811 		goto done;
4812 	}
4813 
4814 	m = kzalloc(sizeof(*m), GFP_KERNEL);
4815 	if (!m) {
4816 		status = MGMT_STATUS_NO_RESOURCES;
4817 		goto done;
4818 	}
4819 
4820 	INIT_LIST_HEAD(&m->patterns);
4821 
4822 	parse_adv_monitor_rssi(m, NULL);
4823 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4824 
4825 done:
4826 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4827 					  MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
4828 }
4829 
4830 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
4831 					 void *data, u16 len)
4832 {
4833 	struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
4834 	struct adv_monitor *m = NULL;
4835 	u8 status = MGMT_STATUS_SUCCESS;
4836 	size_t expected_size = sizeof(*cp);
4837 
4838 	BT_DBG("request for %s", hdev->name);
4839 
4840 	if (len <= sizeof(*cp)) {
4841 		status = MGMT_STATUS_INVALID_PARAMS;
4842 		goto done;
4843 	}
4844 
4845 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4846 	if (len != expected_size) {
4847 		status = MGMT_STATUS_INVALID_PARAMS;
4848 		goto done;
4849 	}
4850 
4851 	m = kzalloc(sizeof(*m), GFP_KERNEL);
4852 	if (!m) {
4853 		status = MGMT_STATUS_NO_RESOURCES;
4854 		goto done;
4855 	}
4856 
4857 	INIT_LIST_HEAD(&m->patterns);
4858 
4859 	parse_adv_monitor_rssi(m, &cp->rssi);
4860 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4861 
4862 done:
4863 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4864 					 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
4865 }
4866 
4867 int mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
4868 {
4869 	struct mgmt_rp_remove_adv_monitor rp;
4870 	struct mgmt_cp_remove_adv_monitor *cp;
4871 	struct mgmt_pending_cmd *cmd;
4872 	int err = 0;
4873 
4874 	hci_dev_lock(hdev);
4875 
4876 	cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4877 	if (!cmd)
4878 		goto done;
4879 
4880 	cp = cmd->param;
4881 	rp.monitor_handle = cp->monitor_handle;
4882 
4883 	if (!status)
4884 		hci_update_passive_scan(hdev);
4885 
4886 	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4887 				mgmt_status(status), &rp, sizeof(rp));
4888 	mgmt_pending_remove(cmd);
4889 
4890 done:
4891 	hci_dev_unlock(hdev);
4892 	bt_dev_dbg(hdev, "remove monitor %d complete, status %u",
4893 		   rp.monitor_handle, status);
4894 
4895 	return err;
4896 }
4897 
4898 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
4899 			      void *data, u16 len)
4900 {
4901 	struct mgmt_cp_remove_adv_monitor *cp = data;
4902 	struct mgmt_rp_remove_adv_monitor rp;
4903 	struct mgmt_pending_cmd *cmd;
4904 	u16 handle = __le16_to_cpu(cp->monitor_handle);
4905 	int err, status;
4906 	bool pending;
4907 
4908 	BT_DBG("request for %s", hdev->name);
4909 	rp.monitor_handle = cp->monitor_handle;
4910 
4911 	hci_dev_lock(hdev);
4912 
4913 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
4914 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
4915 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4916 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
4917 		status = MGMT_STATUS_BUSY;
4918 		goto unlock;
4919 	}
4920 
4921 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
4922 	if (!cmd) {
4923 		status = MGMT_STATUS_NO_RESOURCES;
4924 		goto unlock;
4925 	}
4926 
4927 	if (handle)
4928 		pending = hci_remove_single_adv_monitor(hdev, handle, &err);
4929 	else
4930 		pending = hci_remove_all_adv_monitor(hdev, &err);
4931 
4932 	if (err) {
4933 		mgmt_pending_remove(cmd);
4934 
4935 		if (err == -ENOENT)
4936 			status = MGMT_STATUS_INVALID_INDEX;
4937 		else
4938 			status = MGMT_STATUS_FAILED;
4939 
4940 		goto unlock;
4941 	}
4942 
4943 	/* monitor can be removed without forwarding request to controller */
4944 	if (!pending) {
4945 		mgmt_pending_remove(cmd);
4946 		hci_dev_unlock(hdev);
4947 
4948 		return mgmt_cmd_complete(sk, hdev->id,
4949 					 MGMT_OP_REMOVE_ADV_MONITOR,
4950 					 MGMT_STATUS_SUCCESS,
4951 					 &rp, sizeof(rp));
4952 	}
4953 
4954 	hci_dev_unlock(hdev);
4955 	return 0;
4956 
4957 unlock:
4958 	hci_dev_unlock(hdev);
4959 	return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4960 			       status);
4961 }
4962 
4963 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
4964 {
4965 	struct mgmt_rp_read_local_oob_data mgmt_rp;
4966 	size_t rp_size = sizeof(mgmt_rp);
4967 	struct mgmt_pending_cmd *cmd = data;
4968 	struct sk_buff *skb = cmd->skb;
4969 	u8 status = mgmt_status(err);
4970 
4971 	if (!status) {
4972 		if (!skb)
4973 			status = MGMT_STATUS_FAILED;
4974 		else if (IS_ERR(skb))
4975 			status = mgmt_status(PTR_ERR(skb));
4976 		else
4977 			status = mgmt_status(skb->data[0]);
4978 	}
4979 
4980 	bt_dev_dbg(hdev, "status %d", status);
4981 
4982 	if (status) {
4983 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
4984 		goto remove;
4985 	}
4986 
4987 	memset(&mgmt_rp, 0, sizeof(mgmt_rp));
4988 
4989 	if (!bredr_sc_enabled(hdev)) {
4990 		struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
4991 
4992 		if (skb->len < sizeof(*rp)) {
4993 			mgmt_cmd_status(cmd->sk, hdev->id,
4994 					MGMT_OP_READ_LOCAL_OOB_DATA,
4995 					MGMT_STATUS_FAILED);
4996 			goto remove;
4997 		}
4998 
4999 		memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5000 		memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5001 
5002 		rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5003 	} else {
5004 		struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5005 
5006 		if (skb->len < sizeof(*rp)) {
5007 			mgmt_cmd_status(cmd->sk, hdev->id,
5008 					MGMT_OP_READ_LOCAL_OOB_DATA,
5009 					MGMT_STATUS_FAILED);
5010 			goto remove;
5011 		}
5012 
5013 		memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5014 		memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5015 
5016 		memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5017 		memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5018 	}
5019 
5020 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5021 			  MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5022 
5023 remove:
5024 	if (skb && !IS_ERR(skb))
5025 		kfree_skb(skb);
5026 
5027 	mgmt_pending_free(cmd);
5028 }
5029 
5030 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5031 {
5032 	struct mgmt_pending_cmd *cmd = data;
5033 
5034 	if (bredr_sc_enabled(hdev))
5035 		cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5036 	else
5037 		cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5038 
5039 	if (IS_ERR(cmd->skb))
5040 		return PTR_ERR(cmd->skb);
5041 	else
5042 		return 0;
5043 }
5044 
5045 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5046 			       void *data, u16 data_len)
5047 {
5048 	struct mgmt_pending_cmd *cmd;
5049 	int err;
5050 
5051 	bt_dev_dbg(hdev, "sock %p", sk);
5052 
5053 	hci_dev_lock(hdev);
5054 
5055 	if (!hdev_is_powered(hdev)) {
5056 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5057 				      MGMT_STATUS_NOT_POWERED);
5058 		goto unlock;
5059 	}
5060 
5061 	if (!lmp_ssp_capable(hdev)) {
5062 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5063 				      MGMT_STATUS_NOT_SUPPORTED);
5064 		goto unlock;
5065 	}
5066 
5067 	cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5068 	if (!cmd)
5069 		err = -ENOMEM;
5070 	else
5071 		err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5072 					 read_local_oob_data_complete);
5073 
5074 	if (err < 0) {
5075 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5076 				      MGMT_STATUS_FAILED);
5077 
5078 		if (cmd)
5079 			mgmt_pending_free(cmd);
5080 	}
5081 
5082 unlock:
5083 	hci_dev_unlock(hdev);
5084 	return err;
5085 }
5086 
5087 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5088 			       void *data, u16 len)
5089 {
5090 	struct mgmt_addr_info *addr = data;
5091 	int err;
5092 
5093 	bt_dev_dbg(hdev, "sock %p", sk);
5094 
5095 	if (!bdaddr_type_is_valid(addr->type))
5096 		return mgmt_cmd_complete(sk, hdev->id,
5097 					 MGMT_OP_ADD_REMOTE_OOB_DATA,
5098 					 MGMT_STATUS_INVALID_PARAMS,
5099 					 addr, sizeof(*addr));
5100 
5101 	hci_dev_lock(hdev);
5102 
5103 	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5104 		struct mgmt_cp_add_remote_oob_data *cp = data;
5105 		u8 status;
5106 
5107 		if (cp->addr.type != BDADDR_BREDR) {
5108 			err = mgmt_cmd_complete(sk, hdev->id,
5109 						MGMT_OP_ADD_REMOTE_OOB_DATA,
5110 						MGMT_STATUS_INVALID_PARAMS,
5111 						&cp->addr, sizeof(cp->addr));
5112 			goto unlock;
5113 		}
5114 
5115 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5116 					      cp->addr.type, cp->hash,
5117 					      cp->rand, NULL, NULL);
5118 		if (err < 0)
5119 			status = MGMT_STATUS_FAILED;
5120 		else
5121 			status = MGMT_STATUS_SUCCESS;
5122 
5123 		err = mgmt_cmd_complete(sk, hdev->id,
5124 					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5125 					&cp->addr, sizeof(cp->addr));
5126 	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5127 		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5128 		u8 *rand192, *hash192, *rand256, *hash256;
5129 		u8 status;
5130 
5131 		if (bdaddr_type_is_le(cp->addr.type)) {
5132 			/* Enforce zero-valued 192-bit parameters as
5133 			 * long as legacy SMP OOB isn't implemented.
5134 			 */
5135 			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5136 			    memcmp(cp->hash192, ZERO_KEY, 16)) {
5137 				err = mgmt_cmd_complete(sk, hdev->id,
5138 							MGMT_OP_ADD_REMOTE_OOB_DATA,
5139 							MGMT_STATUS_INVALID_PARAMS,
5140 							addr, sizeof(*addr));
5141 				goto unlock;
5142 			}
5143 
5144 			rand192 = NULL;
5145 			hash192 = NULL;
5146 		} else {
5147 			/* In case one of the P-192 values is set to zero,
5148 			 * then just disable OOB data for P-192.
5149 			 */
5150 			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5151 			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
5152 				rand192 = NULL;
5153 				hash192 = NULL;
5154 			} else {
5155 				rand192 = cp->rand192;
5156 				hash192 = cp->hash192;
5157 			}
5158 		}
5159 
5160 		/* In case one of the P-256 values is set to zero, then just
5161 		 * disable OOB data for P-256.
5162 		 */
5163 		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5164 		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
5165 			rand256 = NULL;
5166 			hash256 = NULL;
5167 		} else {
5168 			rand256 = cp->rand256;
5169 			hash256 = cp->hash256;
5170 		}
5171 
5172 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5173 					      cp->addr.type, hash192, rand192,
5174 					      hash256, rand256);
5175 		if (err < 0)
5176 			status = MGMT_STATUS_FAILED;
5177 		else
5178 			status = MGMT_STATUS_SUCCESS;
5179 
5180 		err = mgmt_cmd_complete(sk, hdev->id,
5181 					MGMT_OP_ADD_REMOTE_OOB_DATA,
5182 					status, &cp->addr, sizeof(cp->addr));
5183 	} else {
5184 		bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5185 			   len);
5186 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5187 				      MGMT_STATUS_INVALID_PARAMS);
5188 	}
5189 
5190 unlock:
5191 	hci_dev_unlock(hdev);
5192 	return err;
5193 }
5194 
5195 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5196 				  void *data, u16 len)
5197 {
5198 	struct mgmt_cp_remove_remote_oob_data *cp = data;
5199 	u8 status;
5200 	int err;
5201 
5202 	bt_dev_dbg(hdev, "sock %p", sk);
5203 
5204 	if (cp->addr.type != BDADDR_BREDR)
5205 		return mgmt_cmd_complete(sk, hdev->id,
5206 					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5207 					 MGMT_STATUS_INVALID_PARAMS,
5208 					 &cp->addr, sizeof(cp->addr));
5209 
5210 	hci_dev_lock(hdev);
5211 
5212 	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5213 		hci_remote_oob_data_clear(hdev);
5214 		status = MGMT_STATUS_SUCCESS;
5215 		goto done;
5216 	}
5217 
5218 	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5219 	if (err < 0)
5220 		status = MGMT_STATUS_INVALID_PARAMS;
5221 	else
5222 		status = MGMT_STATUS_SUCCESS;
5223 
5224 done:
5225 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5226 				status, &cp->addr, sizeof(cp->addr));
5227 
5228 	hci_dev_unlock(hdev);
5229 	return err;
5230 }
5231 
5232 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5233 {
5234 	struct mgmt_pending_cmd *cmd;
5235 
5236 	bt_dev_dbg(hdev, "status %u", status);
5237 
5238 	hci_dev_lock(hdev);
5239 
5240 	cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5241 	if (!cmd)
5242 		cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5243 
5244 	if (!cmd)
5245 		cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5246 
5247 	if (cmd) {
5248 		cmd->cmd_complete(cmd, mgmt_status(status));
5249 		mgmt_pending_remove(cmd);
5250 	}
5251 
5252 	hci_dev_unlock(hdev);
5253 }
5254 
5255 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5256 				    uint8_t *mgmt_status)
5257 {
5258 	switch (type) {
5259 	case DISCOV_TYPE_LE:
5260 		*mgmt_status = mgmt_le_support(hdev);
5261 		if (*mgmt_status)
5262 			return false;
5263 		break;
5264 	case DISCOV_TYPE_INTERLEAVED:
5265 		*mgmt_status = mgmt_le_support(hdev);
5266 		if (*mgmt_status)
5267 			return false;
5268 		fallthrough;
5269 	case DISCOV_TYPE_BREDR:
5270 		*mgmt_status = mgmt_bredr_support(hdev);
5271 		if (*mgmt_status)
5272 			return false;
5273 		break;
5274 	default:
5275 		*mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5276 		return false;
5277 	}
5278 
5279 	return true;
5280 }
5281 
5282 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5283 {
5284 	struct mgmt_pending_cmd *cmd = data;
5285 
5286 	if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5287 	    cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5288 	    cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5289 		return;
5290 
5291 	bt_dev_dbg(hdev, "err %d", err);
5292 
5293 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5294 			  cmd->param, 1);
5295 	mgmt_pending_remove(cmd);
5296 
5297 	hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5298 				DISCOVERY_FINDING);
5299 }
5300 
5301 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5302 {
5303 	return hci_start_discovery_sync(hdev);
5304 }
5305 
5306 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5307 				    u16 op, void *data, u16 len)
5308 {
5309 	struct mgmt_cp_start_discovery *cp = data;
5310 	struct mgmt_pending_cmd *cmd;
5311 	u8 status;
5312 	int err;
5313 
5314 	bt_dev_dbg(hdev, "sock %p", sk);
5315 
5316 	hci_dev_lock(hdev);
5317 
5318 	if (!hdev_is_powered(hdev)) {
5319 		err = mgmt_cmd_complete(sk, hdev->id, op,
5320 					MGMT_STATUS_NOT_POWERED,
5321 					&cp->type, sizeof(cp->type));
5322 		goto failed;
5323 	}
5324 
5325 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
5326 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5327 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5328 					&cp->type, sizeof(cp->type));
5329 		goto failed;
5330 	}
5331 
5332 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5333 		err = mgmt_cmd_complete(sk, hdev->id, op, status,
5334 					&cp->type, sizeof(cp->type));
5335 		goto failed;
5336 	}
5337 
5338 	/* Can't start discovery when it is paused */
5339 	if (hdev->discovery_paused) {
5340 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5341 					&cp->type, sizeof(cp->type));
5342 		goto failed;
5343 	}
5344 
5345 	/* Clear the discovery filter first to free any previously
5346 	 * allocated memory for the UUID list.
5347 	 */
5348 	hci_discovery_filter_clear(hdev);
5349 
5350 	hdev->discovery.type = cp->type;
5351 	hdev->discovery.report_invalid_rssi = false;
5352 	if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5353 		hdev->discovery.limited = true;
5354 	else
5355 		hdev->discovery.limited = false;
5356 
5357 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
5358 	if (!cmd) {
5359 		err = -ENOMEM;
5360 		goto failed;
5361 	}
5362 
5363 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5364 				 start_discovery_complete);
5365 	if (err < 0) {
5366 		mgmt_pending_remove(cmd);
5367 		goto failed;
5368 	}
5369 
5370 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5371 
5372 failed:
5373 	hci_dev_unlock(hdev);
5374 	return err;
5375 }
5376 
5377 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5378 			   void *data, u16 len)
5379 {
5380 	return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5381 					data, len);
5382 }
5383 
5384 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5385 				   void *data, u16 len)
5386 {
5387 	return start_discovery_internal(sk, hdev,
5388 					MGMT_OP_START_LIMITED_DISCOVERY,
5389 					data, len);
5390 }
5391 
5392 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5393 				   void *data, u16 len)
5394 {
5395 	struct mgmt_cp_start_service_discovery *cp = data;
5396 	struct mgmt_pending_cmd *cmd;
5397 	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5398 	u16 uuid_count, expected_len;
5399 	u8 status;
5400 	int err;
5401 
5402 	bt_dev_dbg(hdev, "sock %p", sk);
5403 
5404 	hci_dev_lock(hdev);
5405 
5406 	if (!hdev_is_powered(hdev)) {
5407 		err = mgmt_cmd_complete(sk, hdev->id,
5408 					MGMT_OP_START_SERVICE_DISCOVERY,
5409 					MGMT_STATUS_NOT_POWERED,
5410 					&cp->type, sizeof(cp->type));
5411 		goto failed;
5412 	}
5413 
5414 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
5415 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5416 		err = mgmt_cmd_complete(sk, hdev->id,
5417 					MGMT_OP_START_SERVICE_DISCOVERY,
5418 					MGMT_STATUS_BUSY, &cp->type,
5419 					sizeof(cp->type));
5420 		goto failed;
5421 	}
5422 
5423 	if (hdev->discovery_paused) {
5424 		err = mgmt_cmd_complete(sk, hdev->id,
5425 					MGMT_OP_START_SERVICE_DISCOVERY,
5426 					MGMT_STATUS_BUSY, &cp->type,
5427 					sizeof(cp->type));
5428 		goto failed;
5429 	}
5430 
5431 	uuid_count = __le16_to_cpu(cp->uuid_count);
5432 	if (uuid_count > max_uuid_count) {
5433 		bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5434 			   uuid_count);
5435 		err = mgmt_cmd_complete(sk, hdev->id,
5436 					MGMT_OP_START_SERVICE_DISCOVERY,
5437 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
5438 					sizeof(cp->type));
5439 		goto failed;
5440 	}
5441 
5442 	expected_len = sizeof(*cp) + uuid_count * 16;
5443 	if (expected_len != len) {
5444 		bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5445 			   expected_len, len);
5446 		err = mgmt_cmd_complete(sk, hdev->id,
5447 					MGMT_OP_START_SERVICE_DISCOVERY,
5448 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
5449 					sizeof(cp->type));
5450 		goto failed;
5451 	}
5452 
5453 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5454 		err = mgmt_cmd_complete(sk, hdev->id,
5455 					MGMT_OP_START_SERVICE_DISCOVERY,
5456 					status, &cp->type, sizeof(cp->type));
5457 		goto failed;
5458 	}
5459 
5460 	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
5461 			       hdev, data, len);
5462 	if (!cmd) {
5463 		err = -ENOMEM;
5464 		goto failed;
5465 	}
5466 
5467 	/* Clear the discovery filter first to free any previously
5468 	 * allocated memory for the UUID list.
5469 	 */
5470 	hci_discovery_filter_clear(hdev);
5471 
5472 	hdev->discovery.result_filtering = true;
5473 	hdev->discovery.type = cp->type;
5474 	hdev->discovery.rssi = cp->rssi;
5475 	hdev->discovery.uuid_count = uuid_count;
5476 
5477 	if (uuid_count > 0) {
5478 		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
5479 						GFP_KERNEL);
5480 		if (!hdev->discovery.uuids) {
5481 			err = mgmt_cmd_complete(sk, hdev->id,
5482 						MGMT_OP_START_SERVICE_DISCOVERY,
5483 						MGMT_STATUS_FAILED,
5484 						&cp->type, sizeof(cp->type));
5485 			mgmt_pending_remove(cmd);
5486 			goto failed;
5487 		}
5488 	}
5489 
5490 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5491 				 start_discovery_complete);
5492 	if (err < 0) {
5493 		mgmt_pending_remove(cmd);
5494 		goto failed;
5495 	}
5496 
5497 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5498 
5499 failed:
5500 	hci_dev_unlock(hdev);
5501 	return err;
5502 }
5503 
5504 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
5505 {
5506 	struct mgmt_pending_cmd *cmd;
5507 
5508 	bt_dev_dbg(hdev, "status %u", status);
5509 
5510 	hci_dev_lock(hdev);
5511 
5512 	cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5513 	if (cmd) {
5514 		cmd->cmd_complete(cmd, mgmt_status(status));
5515 		mgmt_pending_remove(cmd);
5516 	}
5517 
5518 	hci_dev_unlock(hdev);
5519 }
5520 
5521 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
5522 {
5523 	struct mgmt_pending_cmd *cmd = data;
5524 
5525 	if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
5526 		return;
5527 
5528 	bt_dev_dbg(hdev, "err %d", err);
5529 
5530 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5531 			  cmd->param, 1);
5532 	mgmt_pending_remove(cmd);
5533 
5534 	if (!err)
5535 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
5536 }
5537 
5538 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
5539 {
5540 	return hci_stop_discovery_sync(hdev);
5541 }
5542 
5543 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
5544 			  u16 len)
5545 {
5546 	struct mgmt_cp_stop_discovery *mgmt_cp = data;
5547 	struct mgmt_pending_cmd *cmd;
5548 	int err;
5549 
5550 	bt_dev_dbg(hdev, "sock %p", sk);
5551 
5552 	hci_dev_lock(hdev);
5553 
5554 	if (!hci_discovery_active(hdev)) {
5555 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5556 					MGMT_STATUS_REJECTED, &mgmt_cp->type,
5557 					sizeof(mgmt_cp->type));
5558 		goto unlock;
5559 	}
5560 
5561 	if (hdev->discovery.type != mgmt_cp->type) {
5562 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5563 					MGMT_STATUS_INVALID_PARAMS,
5564 					&mgmt_cp->type, sizeof(mgmt_cp->type));
5565 		goto unlock;
5566 	}
5567 
5568 	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
5569 	if (!cmd) {
5570 		err = -ENOMEM;
5571 		goto unlock;
5572 	}
5573 
5574 	err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
5575 				 stop_discovery_complete);
5576 	if (err < 0) {
5577 		mgmt_pending_remove(cmd);
5578 		goto unlock;
5579 	}
5580 
5581 	hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
5582 
5583 unlock:
5584 	hci_dev_unlock(hdev);
5585 	return err;
5586 }
5587 
5588 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
5589 			u16 len)
5590 {
5591 	struct mgmt_cp_confirm_name *cp = data;
5592 	struct inquiry_entry *e;
5593 	int err;
5594 
5595 	bt_dev_dbg(hdev, "sock %p", sk);
5596 
5597 	hci_dev_lock(hdev);
5598 
5599 	if (!hci_discovery_active(hdev)) {
5600 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5601 					MGMT_STATUS_FAILED, &cp->addr,
5602 					sizeof(cp->addr));
5603 		goto failed;
5604 	}
5605 
5606 	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
5607 	if (!e) {
5608 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5609 					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
5610 					sizeof(cp->addr));
5611 		goto failed;
5612 	}
5613 
5614 	if (cp->name_known) {
5615 		e->name_state = NAME_KNOWN;
5616 		list_del(&e->list);
5617 	} else {
5618 		e->name_state = NAME_NEEDED;
5619 		hci_inquiry_cache_update_resolve(hdev, e);
5620 	}
5621 
5622 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
5623 				&cp->addr, sizeof(cp->addr));
5624 
5625 failed:
5626 	hci_dev_unlock(hdev);
5627 	return err;
5628 }
5629 
5630 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
5631 			u16 len)
5632 {
5633 	struct mgmt_cp_block_device *cp = data;
5634 	u8 status;
5635 	int err;
5636 
5637 	bt_dev_dbg(hdev, "sock %p", sk);
5638 
5639 	if (!bdaddr_type_is_valid(cp->addr.type))
5640 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
5641 					 MGMT_STATUS_INVALID_PARAMS,
5642 					 &cp->addr, sizeof(cp->addr));
5643 
5644 	hci_dev_lock(hdev);
5645 
5646 	err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
5647 				  cp->addr.type);
5648 	if (err < 0) {
5649 		status = MGMT_STATUS_FAILED;
5650 		goto done;
5651 	}
5652 
5653 	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5654 		   sk);
5655 	status = MGMT_STATUS_SUCCESS;
5656 
5657 done:
5658 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
5659 				&cp->addr, sizeof(cp->addr));
5660 
5661 	hci_dev_unlock(hdev);
5662 
5663 	return err;
5664 }
5665 
5666 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
5667 			  u16 len)
5668 {
5669 	struct mgmt_cp_unblock_device *cp = data;
5670 	u8 status;
5671 	int err;
5672 
5673 	bt_dev_dbg(hdev, "sock %p", sk);
5674 
5675 	if (!bdaddr_type_is_valid(cp->addr.type))
5676 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
5677 					 MGMT_STATUS_INVALID_PARAMS,
5678 					 &cp->addr, sizeof(cp->addr));
5679 
5680 	hci_dev_lock(hdev);
5681 
5682 	err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
5683 				  cp->addr.type);
5684 	if (err < 0) {
5685 		status = MGMT_STATUS_INVALID_PARAMS;
5686 		goto done;
5687 	}
5688 
5689 	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5690 		   sk);
5691 	status = MGMT_STATUS_SUCCESS;
5692 
5693 done:
5694 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
5695 				&cp->addr, sizeof(cp->addr));
5696 
5697 	hci_dev_unlock(hdev);
5698 
5699 	return err;
5700 }
5701 
5702 static int set_device_id_sync(struct hci_dev *hdev, void *data)
5703 {
5704 	return hci_update_eir_sync(hdev);
5705 }
5706 
5707 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
5708 			 u16 len)
5709 {
5710 	struct mgmt_cp_set_device_id *cp = data;
5711 	int err;
5712 	__u16 source;
5713 
5714 	bt_dev_dbg(hdev, "sock %p", sk);
5715 
5716 	source = __le16_to_cpu(cp->source);
5717 
5718 	if (source > 0x0002)
5719 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
5720 				       MGMT_STATUS_INVALID_PARAMS);
5721 
5722 	hci_dev_lock(hdev);
5723 
5724 	hdev->devid_source = source;
5725 	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
5726 	hdev->devid_product = __le16_to_cpu(cp->product);
5727 	hdev->devid_version = __le16_to_cpu(cp->version);
5728 
5729 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
5730 				NULL, 0);
5731 
5732 	hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
5733 
5734 	hci_dev_unlock(hdev);
5735 
5736 	return err;
5737 }
5738 
5739 static void enable_advertising_instance(struct hci_dev *hdev, int err)
5740 {
5741 	if (err)
5742 		bt_dev_err(hdev, "failed to re-configure advertising %d", err);
5743 	else
5744 		bt_dev_dbg(hdev, "status %d", err);
5745 }
5746 
5747 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
5748 {
5749 	struct cmd_lookup match = { NULL, hdev };
5750 	u8 instance;
5751 	struct adv_info *adv_instance;
5752 	u8 status = mgmt_status(err);
5753 
5754 	if (status) {
5755 		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
5756 				     cmd_status_rsp, &status);
5757 		return;
5758 	}
5759 
5760 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
5761 		hci_dev_set_flag(hdev, HCI_ADVERTISING);
5762 	else
5763 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
5764 
5765 	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
5766 			     &match);
5767 
5768 	new_settings(hdev, match.sk);
5769 
5770 	if (match.sk)
5771 		sock_put(match.sk);
5772 
5773 	/* If "Set Advertising" was just disabled and instance advertising was
5774 	 * set up earlier, then re-enable multi-instance advertising.
5775 	 */
5776 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5777 	    list_empty(&hdev->adv_instances))
5778 		return;
5779 
5780 	instance = hdev->cur_adv_instance;
5781 	if (!instance) {
5782 		adv_instance = list_first_entry_or_null(&hdev->adv_instances,
5783 							struct adv_info, list);
5784 		if (!adv_instance)
5785 			return;
5786 
5787 		instance = adv_instance->instance;
5788 	}
5789 
5790 	err = hci_schedule_adv_instance_sync(hdev, instance, true);
5791 
5792 	enable_advertising_instance(hdev, err);
5793 }
5794 
5795 static int set_adv_sync(struct hci_dev *hdev, void *data)
5796 {
5797 	struct mgmt_pending_cmd *cmd = data;
5798 	struct mgmt_mode *cp = cmd->param;
5799 	u8 val = !!cp->val;
5800 
5801 	if (cp->val == 0x02)
5802 		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5803 	else
5804 		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5805 
5806 	cancel_adv_timeout(hdev);
5807 
5808 	if (val) {
5809 		/* Switch to instance "0" for the Set Advertising setting.
5810 		 * We cannot use update_[adv|scan_rsp]_data() here as the
5811 		 * HCI_ADVERTISING flag is not yet set.
5812 		 */
5813 		hdev->cur_adv_instance = 0x00;
5814 
5815 		if (ext_adv_capable(hdev)) {
5816 			hci_start_ext_adv_sync(hdev, 0x00);
5817 		} else {
5818 			hci_update_adv_data_sync(hdev, 0x00);
5819 			hci_update_scan_rsp_data_sync(hdev, 0x00);
5820 			hci_enable_advertising_sync(hdev);
5821 		}
5822 	} else {
5823 		hci_disable_advertising_sync(hdev);
5824 	}
5825 
5826 	return 0;
5827 }
5828 
5829 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
5830 			   u16 len)
5831 {
5832 	struct mgmt_mode *cp = data;
5833 	struct mgmt_pending_cmd *cmd;
5834 	u8 val, status;
5835 	int err;
5836 
5837 	bt_dev_dbg(hdev, "sock %p", sk);
5838 
5839 	status = mgmt_le_support(hdev);
5840 	if (status)
5841 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5842 				       status);
5843 
5844 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5845 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5846 				       MGMT_STATUS_INVALID_PARAMS);
5847 
5848 	if (hdev->advertising_paused)
5849 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5850 				       MGMT_STATUS_BUSY);
5851 
5852 	hci_dev_lock(hdev);
5853 
5854 	val = !!cp->val;
5855 
5856 	/* The following conditions are ones which mean that we should
5857 	 * not do any HCI communication but directly send a mgmt
5858 	 * response to user space (after toggling the flag if
5859 	 * necessary).
5860 	 */
5861 	if (!hdev_is_powered(hdev) ||
5862 	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5863 	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
5864 	    hci_conn_num(hdev, LE_LINK) > 0 ||
5865 	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5866 	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
5867 		bool changed;
5868 
5869 		if (cp->val) {
5870 			hdev->cur_adv_instance = 0x00;
5871 			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
5872 			if (cp->val == 0x02)
5873 				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5874 			else
5875 				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5876 		} else {
5877 			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
5878 			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5879 		}
5880 
5881 		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
5882 		if (err < 0)
5883 			goto unlock;
5884 
5885 		if (changed)
5886 			err = new_settings(hdev, sk);
5887 
5888 		goto unlock;
5889 	}
5890 
5891 	if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
5892 	    pending_find(MGMT_OP_SET_LE, hdev)) {
5893 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5894 				      MGMT_STATUS_BUSY);
5895 		goto unlock;
5896 	}
5897 
5898 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
5899 	if (!cmd)
5900 		err = -ENOMEM;
5901 	else
5902 		err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
5903 					 set_advertising_complete);
5904 
5905 	if (err < 0 && cmd)
5906 		mgmt_pending_remove(cmd);
5907 
5908 unlock:
5909 	hci_dev_unlock(hdev);
5910 	return err;
5911 }
5912 
5913 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
5914 			      void *data, u16 len)
5915 {
5916 	struct mgmt_cp_set_static_address *cp = data;
5917 	int err;
5918 
5919 	bt_dev_dbg(hdev, "sock %p", sk);
5920 
5921 	if (!lmp_le_capable(hdev))
5922 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5923 				       MGMT_STATUS_NOT_SUPPORTED);
5924 
5925 	if (hdev_is_powered(hdev))
5926 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5927 				       MGMT_STATUS_REJECTED);
5928 
5929 	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
5930 		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
5931 			return mgmt_cmd_status(sk, hdev->id,
5932 					       MGMT_OP_SET_STATIC_ADDRESS,
5933 					       MGMT_STATUS_INVALID_PARAMS);
5934 
5935 		/* Two most significant bits shall be set */
5936 		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
5937 			return mgmt_cmd_status(sk, hdev->id,
5938 					       MGMT_OP_SET_STATIC_ADDRESS,
5939 					       MGMT_STATUS_INVALID_PARAMS);
5940 	}
5941 
5942 	hci_dev_lock(hdev);
5943 
5944 	bacpy(&hdev->static_addr, &cp->bdaddr);
5945 
5946 	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
5947 	if (err < 0)
5948 		goto unlock;
5949 
5950 	err = new_settings(hdev, sk);
5951 
5952 unlock:
5953 	hci_dev_unlock(hdev);
5954 	return err;
5955 }
5956 
5957 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
5958 			   void *data, u16 len)
5959 {
5960 	struct mgmt_cp_set_scan_params *cp = data;
5961 	__u16 interval, window;
5962 	int err;
5963 
5964 	bt_dev_dbg(hdev, "sock %p", sk);
5965 
5966 	if (!lmp_le_capable(hdev))
5967 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5968 				       MGMT_STATUS_NOT_SUPPORTED);
5969 
5970 	interval = __le16_to_cpu(cp->interval);
5971 
5972 	if (interval < 0x0004 || interval > 0x4000)
5973 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5974 				       MGMT_STATUS_INVALID_PARAMS);
5975 
5976 	window = __le16_to_cpu(cp->window);
5977 
5978 	if (window < 0x0004 || window > 0x4000)
5979 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5980 				       MGMT_STATUS_INVALID_PARAMS);
5981 
5982 	if (window > interval)
5983 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5984 				       MGMT_STATUS_INVALID_PARAMS);
5985 
5986 	hci_dev_lock(hdev);
5987 
5988 	hdev->le_scan_interval = interval;
5989 	hdev->le_scan_window = window;
5990 
5991 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
5992 				NULL, 0);
5993 
5994 	/* If background scan is running, restart it so new parameters are
5995 	 * loaded.
5996 	 */
5997 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5998 	    hdev->discovery.state == DISCOVERY_STOPPED)
5999 		hci_update_passive_scan(hdev);
6000 
6001 	hci_dev_unlock(hdev);
6002 
6003 	return err;
6004 }
6005 
6006 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6007 {
6008 	struct mgmt_pending_cmd *cmd = data;
6009 
6010 	bt_dev_dbg(hdev, "err %d", err);
6011 
6012 	if (err) {
6013 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6014 				mgmt_status(err));
6015 	} else {
6016 		struct mgmt_mode *cp = cmd->param;
6017 
6018 		if (cp->val)
6019 			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6020 		else
6021 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6022 
6023 		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6024 		new_settings(hdev, cmd->sk);
6025 	}
6026 
6027 	mgmt_pending_free(cmd);
6028 }
6029 
6030 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6031 {
6032 	struct mgmt_pending_cmd *cmd = data;
6033 	struct mgmt_mode *cp = cmd->param;
6034 
6035 	return hci_write_fast_connectable_sync(hdev, cp->val);
6036 }
6037 
6038 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6039 				void *data, u16 len)
6040 {
6041 	struct mgmt_mode *cp = data;
6042 	struct mgmt_pending_cmd *cmd;
6043 	int err;
6044 
6045 	bt_dev_dbg(hdev, "sock %p", sk);
6046 
6047 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6048 	    hdev->hci_ver < BLUETOOTH_VER_1_2)
6049 		return mgmt_cmd_status(sk, hdev->id,
6050 				       MGMT_OP_SET_FAST_CONNECTABLE,
6051 				       MGMT_STATUS_NOT_SUPPORTED);
6052 
6053 	if (cp->val != 0x00 && cp->val != 0x01)
6054 		return mgmt_cmd_status(sk, hdev->id,
6055 				       MGMT_OP_SET_FAST_CONNECTABLE,
6056 				       MGMT_STATUS_INVALID_PARAMS);
6057 
6058 	hci_dev_lock(hdev);
6059 
6060 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6061 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6062 		goto unlock;
6063 	}
6064 
6065 	if (!hdev_is_powered(hdev)) {
6066 		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6067 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6068 		new_settings(hdev, sk);
6069 		goto unlock;
6070 	}
6071 
6072 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6073 			       len);
6074 	if (!cmd)
6075 		err = -ENOMEM;
6076 	else
6077 		err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6078 					 fast_connectable_complete);
6079 
6080 	if (err < 0) {
6081 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6082 				MGMT_STATUS_FAILED);
6083 
6084 		if (cmd)
6085 			mgmt_pending_free(cmd);
6086 	}
6087 
6088 unlock:
6089 	hci_dev_unlock(hdev);
6090 
6091 	return err;
6092 }
6093 
6094 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6095 {
6096 	struct mgmt_pending_cmd *cmd = data;
6097 
6098 	bt_dev_dbg(hdev, "err %d", err);
6099 
6100 	if (err) {
6101 		u8 mgmt_err = mgmt_status(err);
6102 
6103 		/* We need to restore the flag if related HCI commands
6104 		 * failed.
6105 		 */
6106 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6107 
6108 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6109 	} else {
6110 		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6111 		new_settings(hdev, cmd->sk);
6112 	}
6113 
6114 	mgmt_pending_free(cmd);
6115 }
6116 
6117 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6118 {
6119 	int status;
6120 
6121 	status = hci_write_fast_connectable_sync(hdev, false);
6122 
6123 	if (!status)
6124 		status = hci_update_scan_sync(hdev);
6125 
6126 	/* Since only the advertising data flags will change, there
6127 	 * is no need to update the scan response data.
6128 	 */
6129 	if (!status)
6130 		status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6131 
6132 	return status;
6133 }
6134 
6135 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6136 {
6137 	struct mgmt_mode *cp = data;
6138 	struct mgmt_pending_cmd *cmd;
6139 	int err;
6140 
6141 	bt_dev_dbg(hdev, "sock %p", sk);
6142 
6143 	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6144 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6145 				       MGMT_STATUS_NOT_SUPPORTED);
6146 
6147 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6148 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6149 				       MGMT_STATUS_REJECTED);
6150 
6151 	if (cp->val != 0x00 && cp->val != 0x01)
6152 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6153 				       MGMT_STATUS_INVALID_PARAMS);
6154 
6155 	hci_dev_lock(hdev);
6156 
6157 	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6158 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6159 		goto unlock;
6160 	}
6161 
6162 	if (!hdev_is_powered(hdev)) {
6163 		if (!cp->val) {
6164 			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6165 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6166 			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6167 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6168 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
6169 		}
6170 
6171 		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6172 
6173 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6174 		if (err < 0)
6175 			goto unlock;
6176 
6177 		err = new_settings(hdev, sk);
6178 		goto unlock;
6179 	}
6180 
6181 	/* Reject disabling when powered on */
6182 	if (!cp->val) {
6183 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6184 				      MGMT_STATUS_REJECTED);
6185 		goto unlock;
6186 	} else {
6187 		/* When configuring a dual-mode controller to operate
6188 		 * with LE only and using a static address, then switching
6189 		 * BR/EDR back on is not allowed.
6190 		 *
6191 		 * Dual-mode controllers shall operate with the public
6192 		 * address as its identity address for BR/EDR and LE. So
6193 		 * reject the attempt to create an invalid configuration.
6194 		 *
6195 		 * The same restrictions applies when secure connections
6196 		 * has been enabled. For BR/EDR this is a controller feature
6197 		 * while for LE it is a host stack feature. This means that
6198 		 * switching BR/EDR back on when secure connections has been
6199 		 * enabled is not a supported transaction.
6200 		 */
6201 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6202 		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6203 		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6204 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6205 					      MGMT_STATUS_REJECTED);
6206 			goto unlock;
6207 		}
6208 	}
6209 
6210 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6211 	if (!cmd)
6212 		err = -ENOMEM;
6213 	else
6214 		err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6215 					 set_bredr_complete);
6216 
6217 	if (err < 0) {
6218 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6219 				MGMT_STATUS_FAILED);
6220 		if (cmd)
6221 			mgmt_pending_free(cmd);
6222 
6223 		goto unlock;
6224 	}
6225 
6226 	/* We need to flip the bit already here so that
6227 	 * hci_req_update_adv_data generates the correct flags.
6228 	 */
6229 	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6230 
6231 unlock:
6232 	hci_dev_unlock(hdev);
6233 	return err;
6234 }
6235 
6236 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6237 {
6238 	struct mgmt_pending_cmd *cmd = data;
6239 	struct mgmt_mode *cp;
6240 
6241 	bt_dev_dbg(hdev, "err %d", err);
6242 
6243 	if (err) {
6244 		u8 mgmt_err = mgmt_status(err);
6245 
6246 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6247 		goto done;
6248 	}
6249 
6250 	cp = cmd->param;
6251 
6252 	switch (cp->val) {
6253 	case 0x00:
6254 		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6255 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6256 		break;
6257 	case 0x01:
6258 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6259 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6260 		break;
6261 	case 0x02:
6262 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6263 		hci_dev_set_flag(hdev, HCI_SC_ONLY);
6264 		break;
6265 	}
6266 
6267 	send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6268 	new_settings(hdev, cmd->sk);
6269 
6270 done:
6271 	mgmt_pending_free(cmd);
6272 }
6273 
6274 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6275 {
6276 	struct mgmt_pending_cmd *cmd = data;
6277 	struct mgmt_mode *cp = cmd->param;
6278 	u8 val = !!cp->val;
6279 
6280 	/* Force write of val */
6281 	hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6282 
6283 	return hci_write_sc_support_sync(hdev, val);
6284 }
6285 
6286 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6287 			   void *data, u16 len)
6288 {
6289 	struct mgmt_mode *cp = data;
6290 	struct mgmt_pending_cmd *cmd;
6291 	u8 val;
6292 	int err;
6293 
6294 	bt_dev_dbg(hdev, "sock %p", sk);
6295 
6296 	if (!lmp_sc_capable(hdev) &&
6297 	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6298 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6299 				       MGMT_STATUS_NOT_SUPPORTED);
6300 
6301 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6302 	    lmp_sc_capable(hdev) &&
6303 	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6304 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6305 				       MGMT_STATUS_REJECTED);
6306 
6307 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6308 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6309 				       MGMT_STATUS_INVALID_PARAMS);
6310 
6311 	hci_dev_lock(hdev);
6312 
6313 	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6314 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6315 		bool changed;
6316 
6317 		if (cp->val) {
6318 			changed = !hci_dev_test_and_set_flag(hdev,
6319 							     HCI_SC_ENABLED);
6320 			if (cp->val == 0x02)
6321 				hci_dev_set_flag(hdev, HCI_SC_ONLY);
6322 			else
6323 				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6324 		} else {
6325 			changed = hci_dev_test_and_clear_flag(hdev,
6326 							      HCI_SC_ENABLED);
6327 			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6328 		}
6329 
6330 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6331 		if (err < 0)
6332 			goto failed;
6333 
6334 		if (changed)
6335 			err = new_settings(hdev, sk);
6336 
6337 		goto failed;
6338 	}
6339 
6340 	val = !!cp->val;
6341 
6342 	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6343 	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6344 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6345 		goto failed;
6346 	}
6347 
6348 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6349 	if (!cmd)
6350 		err = -ENOMEM;
6351 	else
6352 		err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6353 					 set_secure_conn_complete);
6354 
6355 	if (err < 0) {
6356 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6357 				MGMT_STATUS_FAILED);
6358 		if (cmd)
6359 			mgmt_pending_free(cmd);
6360 	}
6361 
6362 failed:
6363 	hci_dev_unlock(hdev);
6364 	return err;
6365 }
6366 
6367 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6368 			  void *data, u16 len)
6369 {
6370 	struct mgmt_mode *cp = data;
6371 	bool changed, use_changed;
6372 	int err;
6373 
6374 	bt_dev_dbg(hdev, "sock %p", sk);
6375 
6376 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6377 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6378 				       MGMT_STATUS_INVALID_PARAMS);
6379 
6380 	hci_dev_lock(hdev);
6381 
6382 	if (cp->val)
6383 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6384 	else
6385 		changed = hci_dev_test_and_clear_flag(hdev,
6386 						      HCI_KEEP_DEBUG_KEYS);
6387 
6388 	if (cp->val == 0x02)
6389 		use_changed = !hci_dev_test_and_set_flag(hdev,
6390 							 HCI_USE_DEBUG_KEYS);
6391 	else
6392 		use_changed = hci_dev_test_and_clear_flag(hdev,
6393 							  HCI_USE_DEBUG_KEYS);
6394 
6395 	if (hdev_is_powered(hdev) && use_changed &&
6396 	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6397 		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6398 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6399 			     sizeof(mode), &mode);
6400 	}
6401 
6402 	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6403 	if (err < 0)
6404 		goto unlock;
6405 
6406 	if (changed)
6407 		err = new_settings(hdev, sk);
6408 
6409 unlock:
6410 	hci_dev_unlock(hdev);
6411 	return err;
6412 }
6413 
6414 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6415 		       u16 len)
6416 {
6417 	struct mgmt_cp_set_privacy *cp = cp_data;
6418 	bool changed;
6419 	int err;
6420 
6421 	bt_dev_dbg(hdev, "sock %p", sk);
6422 
6423 	if (!lmp_le_capable(hdev))
6424 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6425 				       MGMT_STATUS_NOT_SUPPORTED);
6426 
6427 	if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6428 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6429 				       MGMT_STATUS_INVALID_PARAMS);
6430 
6431 	if (hdev_is_powered(hdev))
6432 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6433 				       MGMT_STATUS_REJECTED);
6434 
6435 	hci_dev_lock(hdev);
6436 
6437 	/* If user space supports this command it is also expected to
6438 	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6439 	 */
6440 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6441 
6442 	if (cp->privacy) {
6443 		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6444 		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6445 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6446 		hci_adv_instances_set_rpa_expired(hdev, true);
6447 		if (cp->privacy == 0x02)
6448 			hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6449 		else
6450 			hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6451 	} else {
6452 		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6453 		memset(hdev->irk, 0, sizeof(hdev->irk));
6454 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6455 		hci_adv_instances_set_rpa_expired(hdev, false);
6456 		hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6457 	}
6458 
6459 	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
6460 	if (err < 0)
6461 		goto unlock;
6462 
6463 	if (changed)
6464 		err = new_settings(hdev, sk);
6465 
6466 unlock:
6467 	hci_dev_unlock(hdev);
6468 	return err;
6469 }
6470 
6471 static bool irk_is_valid(struct mgmt_irk_info *irk)
6472 {
6473 	switch (irk->addr.type) {
6474 	case BDADDR_LE_PUBLIC:
6475 		return true;
6476 
6477 	case BDADDR_LE_RANDOM:
6478 		/* Two most significant bits shall be set */
6479 		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6480 			return false;
6481 		return true;
6482 	}
6483 
6484 	return false;
6485 }
6486 
6487 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6488 		     u16 len)
6489 {
6490 	struct mgmt_cp_load_irks *cp = cp_data;
6491 	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
6492 				   sizeof(struct mgmt_irk_info));
6493 	u16 irk_count, expected_len;
6494 	int i, err;
6495 
6496 	bt_dev_dbg(hdev, "sock %p", sk);
6497 
6498 	if (!lmp_le_capable(hdev))
6499 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6500 				       MGMT_STATUS_NOT_SUPPORTED);
6501 
6502 	irk_count = __le16_to_cpu(cp->irk_count);
6503 	if (irk_count > max_irk_count) {
6504 		bt_dev_err(hdev, "load_irks: too big irk_count value %u",
6505 			   irk_count);
6506 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6507 				       MGMT_STATUS_INVALID_PARAMS);
6508 	}
6509 
6510 	expected_len = struct_size(cp, irks, irk_count);
6511 	if (expected_len != len) {
6512 		bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
6513 			   expected_len, len);
6514 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6515 				       MGMT_STATUS_INVALID_PARAMS);
6516 	}
6517 
6518 	bt_dev_dbg(hdev, "irk_count %u", irk_count);
6519 
6520 	for (i = 0; i < irk_count; i++) {
6521 		struct mgmt_irk_info *key = &cp->irks[i];
6522 
6523 		if (!irk_is_valid(key))
6524 			return mgmt_cmd_status(sk, hdev->id,
6525 					       MGMT_OP_LOAD_IRKS,
6526 					       MGMT_STATUS_INVALID_PARAMS);
6527 	}
6528 
6529 	hci_dev_lock(hdev);
6530 
6531 	hci_smp_irks_clear(hdev);
6532 
6533 	for (i = 0; i < irk_count; i++) {
6534 		struct mgmt_irk_info *irk = &cp->irks[i];
6535 
6536 		if (hci_is_blocked_key(hdev,
6537 				       HCI_BLOCKED_KEY_TYPE_IRK,
6538 				       irk->val)) {
6539 			bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
6540 				    &irk->addr.bdaddr);
6541 			continue;
6542 		}
6543 
6544 		hci_add_irk(hdev, &irk->addr.bdaddr,
6545 			    le_addr_type(irk->addr.type), irk->val,
6546 			    BDADDR_ANY);
6547 	}
6548 
6549 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6550 
6551 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
6552 
6553 	hci_dev_unlock(hdev);
6554 
6555 	return err;
6556 }
6557 
6558 static bool ltk_is_valid(struct mgmt_ltk_info *key)
6559 {
6560 	if (key->initiator != 0x00 && key->initiator != 0x01)
6561 		return false;
6562 
6563 	switch (key->addr.type) {
6564 	case BDADDR_LE_PUBLIC:
6565 		return true;
6566 
6567 	case BDADDR_LE_RANDOM:
6568 		/* Two most significant bits shall be set */
6569 		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6570 			return false;
6571 		return true;
6572 	}
6573 
6574 	return false;
6575 }
6576 
6577 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
6578 			       void *cp_data, u16 len)
6579 {
6580 	struct mgmt_cp_load_long_term_keys *cp = cp_data;
6581 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
6582 				   sizeof(struct mgmt_ltk_info));
6583 	u16 key_count, expected_len;
6584 	int i, err;
6585 
6586 	bt_dev_dbg(hdev, "sock %p", sk);
6587 
6588 	if (!lmp_le_capable(hdev))
6589 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6590 				       MGMT_STATUS_NOT_SUPPORTED);
6591 
6592 	key_count = __le16_to_cpu(cp->key_count);
6593 	if (key_count > max_key_count) {
6594 		bt_dev_err(hdev, "load_ltks: too big key_count value %u",
6595 			   key_count);
6596 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6597 				       MGMT_STATUS_INVALID_PARAMS);
6598 	}
6599 
6600 	expected_len = struct_size(cp, keys, key_count);
6601 	if (expected_len != len) {
6602 		bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
6603 			   expected_len, len);
6604 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6605 				       MGMT_STATUS_INVALID_PARAMS);
6606 	}
6607 
6608 	bt_dev_dbg(hdev, "key_count %u", key_count);
6609 
6610 	for (i = 0; i < key_count; i++) {
6611 		struct mgmt_ltk_info *key = &cp->keys[i];
6612 
6613 		if (!ltk_is_valid(key))
6614 			return mgmt_cmd_status(sk, hdev->id,
6615 					       MGMT_OP_LOAD_LONG_TERM_KEYS,
6616 					       MGMT_STATUS_INVALID_PARAMS);
6617 	}
6618 
6619 	hci_dev_lock(hdev);
6620 
6621 	hci_smp_ltks_clear(hdev);
6622 
6623 	for (i = 0; i < key_count; i++) {
6624 		struct mgmt_ltk_info *key = &cp->keys[i];
6625 		u8 type, authenticated;
6626 
6627 		if (hci_is_blocked_key(hdev,
6628 				       HCI_BLOCKED_KEY_TYPE_LTK,
6629 				       key->val)) {
6630 			bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
6631 				    &key->addr.bdaddr);
6632 			continue;
6633 		}
6634 
6635 		switch (key->type) {
6636 		case MGMT_LTK_UNAUTHENTICATED:
6637 			authenticated = 0x00;
6638 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
6639 			break;
6640 		case MGMT_LTK_AUTHENTICATED:
6641 			authenticated = 0x01;
6642 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
6643 			break;
6644 		case MGMT_LTK_P256_UNAUTH:
6645 			authenticated = 0x00;
6646 			type = SMP_LTK_P256;
6647 			break;
6648 		case MGMT_LTK_P256_AUTH:
6649 			authenticated = 0x01;
6650 			type = SMP_LTK_P256;
6651 			break;
6652 		case MGMT_LTK_P256_DEBUG:
6653 			authenticated = 0x00;
6654 			type = SMP_LTK_P256_DEBUG;
6655 			fallthrough;
6656 		default:
6657 			continue;
6658 		}
6659 
6660 		hci_add_ltk(hdev, &key->addr.bdaddr,
6661 			    le_addr_type(key->addr.type), type, authenticated,
6662 			    key->val, key->enc_size, key->ediv, key->rand);
6663 	}
6664 
6665 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
6666 			   NULL, 0);
6667 
6668 	hci_dev_unlock(hdev);
6669 
6670 	return err;
6671 }
6672 
6673 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
6674 {
6675 	struct mgmt_pending_cmd *cmd = data;
6676 	struct hci_conn *conn = cmd->user_data;
6677 	struct mgmt_cp_get_conn_info *cp = cmd->param;
6678 	struct mgmt_rp_get_conn_info rp;
6679 	u8 status;
6680 
6681 	bt_dev_dbg(hdev, "err %d", err);
6682 
6683 	memcpy(&rp.addr, &cp->addr.bdaddr, sizeof(rp.addr));
6684 
6685 	status = mgmt_status(err);
6686 	if (status == MGMT_STATUS_SUCCESS) {
6687 		rp.rssi = conn->rssi;
6688 		rp.tx_power = conn->tx_power;
6689 		rp.max_tx_power = conn->max_tx_power;
6690 	} else {
6691 		rp.rssi = HCI_RSSI_INVALID;
6692 		rp.tx_power = HCI_TX_POWER_INVALID;
6693 		rp.max_tx_power = HCI_TX_POWER_INVALID;
6694 	}
6695 
6696 	mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
6697 			  &rp, sizeof(rp));
6698 
6699 	if (conn) {
6700 		hci_conn_drop(conn);
6701 		hci_conn_put(conn);
6702 	}
6703 
6704 	mgmt_pending_free(cmd);
6705 }
6706 
6707 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
6708 {
6709 	struct mgmt_pending_cmd *cmd = data;
6710 	struct mgmt_cp_get_conn_info *cp = cmd->param;
6711 	struct hci_conn *conn;
6712 	int err;
6713 	__le16   handle;
6714 
6715 	/* Make sure we are still connected */
6716 	if (cp->addr.type == BDADDR_BREDR)
6717 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6718 					       &cp->addr.bdaddr);
6719 	else
6720 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6721 
6722 	if (!conn || conn != cmd->user_data || conn->state != BT_CONNECTED) {
6723 		if (cmd->user_data) {
6724 			hci_conn_drop(cmd->user_data);
6725 			hci_conn_put(cmd->user_data);
6726 			cmd->user_data = NULL;
6727 		}
6728 		return MGMT_STATUS_NOT_CONNECTED;
6729 	}
6730 
6731 	handle = cpu_to_le16(conn->handle);
6732 
6733 	/* Refresh RSSI each time */
6734 	err = hci_read_rssi_sync(hdev, handle);
6735 
6736 	/* For LE links TX power does not change thus we don't need to
6737 	 * query for it once value is known.
6738 	 */
6739 	if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
6740 		     conn->tx_power == HCI_TX_POWER_INVALID))
6741 		err = hci_read_tx_power_sync(hdev, handle, 0x00);
6742 
6743 	/* Max TX power needs to be read only once per connection */
6744 	if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
6745 		err = hci_read_tx_power_sync(hdev, handle, 0x01);
6746 
6747 	return err;
6748 }
6749 
6750 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
6751 			 u16 len)
6752 {
6753 	struct mgmt_cp_get_conn_info *cp = data;
6754 	struct mgmt_rp_get_conn_info rp;
6755 	struct hci_conn *conn;
6756 	unsigned long conn_info_age;
6757 	int err = 0;
6758 
6759 	bt_dev_dbg(hdev, "sock %p", sk);
6760 
6761 	memset(&rp, 0, sizeof(rp));
6762 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6763 	rp.addr.type = cp->addr.type;
6764 
6765 	if (!bdaddr_type_is_valid(cp->addr.type))
6766 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6767 					 MGMT_STATUS_INVALID_PARAMS,
6768 					 &rp, sizeof(rp));
6769 
6770 	hci_dev_lock(hdev);
6771 
6772 	if (!hdev_is_powered(hdev)) {
6773 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6774 					MGMT_STATUS_NOT_POWERED, &rp,
6775 					sizeof(rp));
6776 		goto unlock;
6777 	}
6778 
6779 	if (cp->addr.type == BDADDR_BREDR)
6780 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6781 					       &cp->addr.bdaddr);
6782 	else
6783 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6784 
6785 	if (!conn || conn->state != BT_CONNECTED) {
6786 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6787 					MGMT_STATUS_NOT_CONNECTED, &rp,
6788 					sizeof(rp));
6789 		goto unlock;
6790 	}
6791 
6792 	/* To avoid client trying to guess when to poll again for information we
6793 	 * calculate conn info age as random value between min/max set in hdev.
6794 	 */
6795 	conn_info_age = hdev->conn_info_min_age +
6796 			prandom_u32_max(hdev->conn_info_max_age -
6797 					hdev->conn_info_min_age);
6798 
6799 	/* Query controller to refresh cached values if they are too old or were
6800 	 * never read.
6801 	 */
6802 	if (time_after(jiffies, conn->conn_info_timestamp +
6803 		       msecs_to_jiffies(conn_info_age)) ||
6804 	    !conn->conn_info_timestamp) {
6805 		struct mgmt_pending_cmd *cmd;
6806 
6807 		cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
6808 				       len);
6809 		if (!cmd)
6810 			err = -ENOMEM;
6811 		else
6812 			err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
6813 						 cmd, get_conn_info_complete);
6814 
6815 		if (err < 0) {
6816 			mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6817 					  MGMT_STATUS_FAILED, &rp, sizeof(rp));
6818 
6819 			if (cmd)
6820 				mgmt_pending_free(cmd);
6821 
6822 			goto unlock;
6823 		}
6824 
6825 		hci_conn_hold(conn);
6826 		cmd->user_data = hci_conn_get(conn);
6827 
6828 		conn->conn_info_timestamp = jiffies;
6829 	} else {
6830 		/* Cache is valid, just reply with values cached in hci_conn */
6831 		rp.rssi = conn->rssi;
6832 		rp.tx_power = conn->tx_power;
6833 		rp.max_tx_power = conn->max_tx_power;
6834 
6835 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6836 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6837 	}
6838 
6839 unlock:
6840 	hci_dev_unlock(hdev);
6841 	return err;
6842 }
6843 
6844 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
6845 {
6846 	struct mgmt_pending_cmd *cmd = data;
6847 	struct mgmt_cp_get_clock_info *cp = cmd->param;
6848 	struct mgmt_rp_get_clock_info rp;
6849 	struct hci_conn *conn = cmd->user_data;
6850 	u8 status = mgmt_status(err);
6851 
6852 	bt_dev_dbg(hdev, "err %d", err);
6853 
6854 	memset(&rp, 0, sizeof(rp));
6855 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6856 	rp.addr.type = cp->addr.type;
6857 
6858 	if (err)
6859 		goto complete;
6860 
6861 	rp.local_clock = cpu_to_le32(hdev->clock);
6862 
6863 	if (conn) {
6864 		rp.piconet_clock = cpu_to_le32(conn->clock);
6865 		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
6866 		hci_conn_drop(conn);
6867 		hci_conn_put(conn);
6868 	}
6869 
6870 complete:
6871 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
6872 			  sizeof(rp));
6873 
6874 	mgmt_pending_free(cmd);
6875 }
6876 
6877 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
6878 {
6879 	struct mgmt_pending_cmd *cmd = data;
6880 	struct mgmt_cp_get_clock_info *cp = cmd->param;
6881 	struct hci_cp_read_clock hci_cp;
6882 	struct hci_conn *conn = cmd->user_data;
6883 	int err;
6884 
6885 	memset(&hci_cp, 0, sizeof(hci_cp));
6886 	err = hci_read_clock_sync(hdev, &hci_cp);
6887 
6888 	if (conn) {
6889 		/* Make sure connection still exists */
6890 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6891 					       &cp->addr.bdaddr);
6892 
6893 		if (conn && conn == cmd->user_data &&
6894 		    conn->state == BT_CONNECTED) {
6895 			hci_cp.handle = cpu_to_le16(conn->handle);
6896 			hci_cp.which = 0x01; /* Piconet clock */
6897 			err = hci_read_clock_sync(hdev, &hci_cp);
6898 		} else if (cmd->user_data) {
6899 			hci_conn_drop(cmd->user_data);
6900 			hci_conn_put(cmd->user_data);
6901 			cmd->user_data = NULL;
6902 		}
6903 	}
6904 
6905 	return err;
6906 }
6907 
6908 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
6909 								u16 len)
6910 {
6911 	struct mgmt_cp_get_clock_info *cp = data;
6912 	struct mgmt_rp_get_clock_info rp;
6913 	struct mgmt_pending_cmd *cmd;
6914 	struct hci_conn *conn;
6915 	int err;
6916 
6917 	bt_dev_dbg(hdev, "sock %p", sk);
6918 
6919 	memset(&rp, 0, sizeof(rp));
6920 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6921 	rp.addr.type = cp->addr.type;
6922 
6923 	if (cp->addr.type != BDADDR_BREDR)
6924 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6925 					 MGMT_STATUS_INVALID_PARAMS,
6926 					 &rp, sizeof(rp));
6927 
6928 	hci_dev_lock(hdev);
6929 
6930 	if (!hdev_is_powered(hdev)) {
6931 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6932 					MGMT_STATUS_NOT_POWERED, &rp,
6933 					sizeof(rp));
6934 		goto unlock;
6935 	}
6936 
6937 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6938 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6939 					       &cp->addr.bdaddr);
6940 		if (!conn || conn->state != BT_CONNECTED) {
6941 			err = mgmt_cmd_complete(sk, hdev->id,
6942 						MGMT_OP_GET_CLOCK_INFO,
6943 						MGMT_STATUS_NOT_CONNECTED,
6944 						&rp, sizeof(rp));
6945 			goto unlock;
6946 		}
6947 	} else {
6948 		conn = NULL;
6949 	}
6950 
6951 	cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
6952 	if (!cmd)
6953 		err = -ENOMEM;
6954 	else
6955 		err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
6956 					 get_clock_info_complete);
6957 
6958 	if (err < 0) {
6959 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6960 					MGMT_STATUS_FAILED, &rp, sizeof(rp));
6961 
6962 		if (cmd)
6963 			mgmt_pending_free(cmd);
6964 
6965 	} else if (conn) {
6966 		hci_conn_hold(conn);
6967 		cmd->user_data = hci_conn_get(conn);
6968 	}
6969 
6970 
6971 unlock:
6972 	hci_dev_unlock(hdev);
6973 	return err;
6974 }
6975 
6976 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
6977 {
6978 	struct hci_conn *conn;
6979 
6980 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
6981 	if (!conn)
6982 		return false;
6983 
6984 	if (conn->dst_type != type)
6985 		return false;
6986 
6987 	if (conn->state != BT_CONNECTED)
6988 		return false;
6989 
6990 	return true;
6991 }
6992 
6993 /* This function requires the caller holds hdev->lock */
6994 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
6995 			       u8 addr_type, u8 auto_connect)
6996 {
6997 	struct hci_conn_params *params;
6998 
6999 	params = hci_conn_params_add(hdev, addr, addr_type);
7000 	if (!params)
7001 		return -EIO;
7002 
7003 	if (params->auto_connect == auto_connect)
7004 		return 0;
7005 
7006 	list_del_init(&params->action);
7007 
7008 	switch (auto_connect) {
7009 	case HCI_AUTO_CONN_DISABLED:
7010 	case HCI_AUTO_CONN_LINK_LOSS:
7011 		/* If auto connect is being disabled when we're trying to
7012 		 * connect to device, keep connecting.
7013 		 */
7014 		if (params->explicit_connect)
7015 			list_add(&params->action, &hdev->pend_le_conns);
7016 		break;
7017 	case HCI_AUTO_CONN_REPORT:
7018 		if (params->explicit_connect)
7019 			list_add(&params->action, &hdev->pend_le_conns);
7020 		else
7021 			list_add(&params->action, &hdev->pend_le_reports);
7022 		break;
7023 	case HCI_AUTO_CONN_DIRECT:
7024 	case HCI_AUTO_CONN_ALWAYS:
7025 		if (!is_connected(hdev, addr, addr_type))
7026 			list_add(&params->action, &hdev->pend_le_conns);
7027 		break;
7028 	}
7029 
7030 	params->auto_connect = auto_connect;
7031 
7032 	bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7033 		   addr, addr_type, auto_connect);
7034 
7035 	return 0;
7036 }
7037 
7038 static void device_added(struct sock *sk, struct hci_dev *hdev,
7039 			 bdaddr_t *bdaddr, u8 type, u8 action)
7040 {
7041 	struct mgmt_ev_device_added ev;
7042 
7043 	bacpy(&ev.addr.bdaddr, bdaddr);
7044 	ev.addr.type = type;
7045 	ev.action = action;
7046 
7047 	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7048 }
7049 
7050 static int add_device_sync(struct hci_dev *hdev, void *data)
7051 {
7052 	return hci_update_passive_scan_sync(hdev);
7053 }
7054 
7055 static int add_device(struct sock *sk, struct hci_dev *hdev,
7056 		      void *data, u16 len)
7057 {
7058 	struct mgmt_cp_add_device *cp = data;
7059 	u8 auto_conn, addr_type;
7060 	struct hci_conn_params *params;
7061 	int err;
7062 	u32 current_flags = 0;
7063 	u32 supported_flags;
7064 
7065 	bt_dev_dbg(hdev, "sock %p", sk);
7066 
7067 	if (!bdaddr_type_is_valid(cp->addr.type) ||
7068 	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7069 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7070 					 MGMT_STATUS_INVALID_PARAMS,
7071 					 &cp->addr, sizeof(cp->addr));
7072 
7073 	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7074 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7075 					 MGMT_STATUS_INVALID_PARAMS,
7076 					 &cp->addr, sizeof(cp->addr));
7077 
7078 	hci_dev_lock(hdev);
7079 
7080 	if (cp->addr.type == BDADDR_BREDR) {
7081 		/* Only incoming connections action is supported for now */
7082 		if (cp->action != 0x01) {
7083 			err = mgmt_cmd_complete(sk, hdev->id,
7084 						MGMT_OP_ADD_DEVICE,
7085 						MGMT_STATUS_INVALID_PARAMS,
7086 						&cp->addr, sizeof(cp->addr));
7087 			goto unlock;
7088 		}
7089 
7090 		err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7091 						     &cp->addr.bdaddr,
7092 						     cp->addr.type, 0);
7093 		if (err)
7094 			goto unlock;
7095 
7096 		hci_req_update_scan(hdev);
7097 
7098 		goto added;
7099 	}
7100 
7101 	addr_type = le_addr_type(cp->addr.type);
7102 
7103 	if (cp->action == 0x02)
7104 		auto_conn = HCI_AUTO_CONN_ALWAYS;
7105 	else if (cp->action == 0x01)
7106 		auto_conn = HCI_AUTO_CONN_DIRECT;
7107 	else
7108 		auto_conn = HCI_AUTO_CONN_REPORT;
7109 
7110 	/* Kernel internally uses conn_params with resolvable private
7111 	 * address, but Add Device allows only identity addresses.
7112 	 * Make sure it is enforced before calling
7113 	 * hci_conn_params_lookup.
7114 	 */
7115 	if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7116 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7117 					MGMT_STATUS_INVALID_PARAMS,
7118 					&cp->addr, sizeof(cp->addr));
7119 		goto unlock;
7120 	}
7121 
7122 	/* If the connection parameters don't exist for this device,
7123 	 * they will be created and configured with defaults.
7124 	 */
7125 	if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7126 				auto_conn) < 0) {
7127 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7128 					MGMT_STATUS_FAILED, &cp->addr,
7129 					sizeof(cp->addr));
7130 		goto unlock;
7131 	} else {
7132 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7133 						addr_type);
7134 		if (params)
7135 			bitmap_to_arr32(&current_flags, params->flags,
7136 					__HCI_CONN_NUM_FLAGS);
7137 	}
7138 
7139 	err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
7140 	if (err < 0)
7141 		goto unlock;
7142 
7143 added:
7144 	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7145 	bitmap_to_arr32(&supported_flags, hdev->conn_flags,
7146 			__HCI_CONN_NUM_FLAGS);
7147 	device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7148 			     supported_flags, current_flags);
7149 
7150 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7151 				MGMT_STATUS_SUCCESS, &cp->addr,
7152 				sizeof(cp->addr));
7153 
7154 unlock:
7155 	hci_dev_unlock(hdev);
7156 	return err;
7157 }
7158 
7159 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7160 			   bdaddr_t *bdaddr, u8 type)
7161 {
7162 	struct mgmt_ev_device_removed ev;
7163 
7164 	bacpy(&ev.addr.bdaddr, bdaddr);
7165 	ev.addr.type = type;
7166 
7167 	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7168 }
7169 
7170 static int remove_device_sync(struct hci_dev *hdev, void *data)
7171 {
7172 	return hci_update_passive_scan_sync(hdev);
7173 }
7174 
7175 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7176 			 void *data, u16 len)
7177 {
7178 	struct mgmt_cp_remove_device *cp = data;
7179 	int err;
7180 
7181 	bt_dev_dbg(hdev, "sock %p", sk);
7182 
7183 	hci_dev_lock(hdev);
7184 
7185 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7186 		struct hci_conn_params *params;
7187 		u8 addr_type;
7188 
7189 		if (!bdaddr_type_is_valid(cp->addr.type)) {
7190 			err = mgmt_cmd_complete(sk, hdev->id,
7191 						MGMT_OP_REMOVE_DEVICE,
7192 						MGMT_STATUS_INVALID_PARAMS,
7193 						&cp->addr, sizeof(cp->addr));
7194 			goto unlock;
7195 		}
7196 
7197 		if (cp->addr.type == BDADDR_BREDR) {
7198 			err = hci_bdaddr_list_del(&hdev->accept_list,
7199 						  &cp->addr.bdaddr,
7200 						  cp->addr.type);
7201 			if (err) {
7202 				err = mgmt_cmd_complete(sk, hdev->id,
7203 							MGMT_OP_REMOVE_DEVICE,
7204 							MGMT_STATUS_INVALID_PARAMS,
7205 							&cp->addr,
7206 							sizeof(cp->addr));
7207 				goto unlock;
7208 			}
7209 
7210 			hci_req_update_scan(hdev);
7211 
7212 			device_removed(sk, hdev, &cp->addr.bdaddr,
7213 				       cp->addr.type);
7214 			goto complete;
7215 		}
7216 
7217 		addr_type = le_addr_type(cp->addr.type);
7218 
7219 		/* Kernel internally uses conn_params with resolvable private
7220 		 * address, but Remove Device allows only identity addresses.
7221 		 * Make sure it is enforced before calling
7222 		 * hci_conn_params_lookup.
7223 		 */
7224 		if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7225 			err = mgmt_cmd_complete(sk, hdev->id,
7226 						MGMT_OP_REMOVE_DEVICE,
7227 						MGMT_STATUS_INVALID_PARAMS,
7228 						&cp->addr, sizeof(cp->addr));
7229 			goto unlock;
7230 		}
7231 
7232 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7233 						addr_type);
7234 		if (!params) {
7235 			err = mgmt_cmd_complete(sk, hdev->id,
7236 						MGMT_OP_REMOVE_DEVICE,
7237 						MGMT_STATUS_INVALID_PARAMS,
7238 						&cp->addr, sizeof(cp->addr));
7239 			goto unlock;
7240 		}
7241 
7242 		if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7243 		    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7244 			err = mgmt_cmd_complete(sk, hdev->id,
7245 						MGMT_OP_REMOVE_DEVICE,
7246 						MGMT_STATUS_INVALID_PARAMS,
7247 						&cp->addr, sizeof(cp->addr));
7248 			goto unlock;
7249 		}
7250 
7251 		list_del(&params->action);
7252 		list_del(&params->list);
7253 		kfree(params);
7254 
7255 		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7256 	} else {
7257 		struct hci_conn_params *p, *tmp;
7258 		struct bdaddr_list *b, *btmp;
7259 
7260 		if (cp->addr.type) {
7261 			err = mgmt_cmd_complete(sk, hdev->id,
7262 						MGMT_OP_REMOVE_DEVICE,
7263 						MGMT_STATUS_INVALID_PARAMS,
7264 						&cp->addr, sizeof(cp->addr));
7265 			goto unlock;
7266 		}
7267 
7268 		list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7269 			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7270 			list_del(&b->list);
7271 			kfree(b);
7272 		}
7273 
7274 		hci_req_update_scan(hdev);
7275 
7276 		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7277 			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7278 				continue;
7279 			device_removed(sk, hdev, &p->addr, p->addr_type);
7280 			if (p->explicit_connect) {
7281 				p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7282 				continue;
7283 			}
7284 			list_del(&p->action);
7285 			list_del(&p->list);
7286 			kfree(p);
7287 		}
7288 
7289 		bt_dev_dbg(hdev, "All LE connection parameters were removed");
7290 	}
7291 
7292 	hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7293 
7294 complete:
7295 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7296 				MGMT_STATUS_SUCCESS, &cp->addr,
7297 				sizeof(cp->addr));
7298 unlock:
7299 	hci_dev_unlock(hdev);
7300 	return err;
7301 }
7302 
7303 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7304 			   u16 len)
7305 {
7306 	struct mgmt_cp_load_conn_param *cp = data;
7307 	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7308 				     sizeof(struct mgmt_conn_param));
7309 	u16 param_count, expected_len;
7310 	int i;
7311 
7312 	if (!lmp_le_capable(hdev))
7313 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7314 				       MGMT_STATUS_NOT_SUPPORTED);
7315 
7316 	param_count = __le16_to_cpu(cp->param_count);
7317 	if (param_count > max_param_count) {
7318 		bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7319 			   param_count);
7320 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7321 				       MGMT_STATUS_INVALID_PARAMS);
7322 	}
7323 
7324 	expected_len = struct_size(cp, params, param_count);
7325 	if (expected_len != len) {
7326 		bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7327 			   expected_len, len);
7328 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7329 				       MGMT_STATUS_INVALID_PARAMS);
7330 	}
7331 
7332 	bt_dev_dbg(hdev, "param_count %u", param_count);
7333 
7334 	hci_dev_lock(hdev);
7335 
7336 	hci_conn_params_clear_disabled(hdev);
7337 
7338 	for (i = 0; i < param_count; i++) {
7339 		struct mgmt_conn_param *param = &cp->params[i];
7340 		struct hci_conn_params *hci_param;
7341 		u16 min, max, latency, timeout;
7342 		u8 addr_type;
7343 
7344 		bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
7345 			   param->addr.type);
7346 
7347 		if (param->addr.type == BDADDR_LE_PUBLIC) {
7348 			addr_type = ADDR_LE_DEV_PUBLIC;
7349 		} else if (param->addr.type == BDADDR_LE_RANDOM) {
7350 			addr_type = ADDR_LE_DEV_RANDOM;
7351 		} else {
7352 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7353 			continue;
7354 		}
7355 
7356 		min = le16_to_cpu(param->min_interval);
7357 		max = le16_to_cpu(param->max_interval);
7358 		latency = le16_to_cpu(param->latency);
7359 		timeout = le16_to_cpu(param->timeout);
7360 
7361 		bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7362 			   min, max, latency, timeout);
7363 
7364 		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7365 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7366 			continue;
7367 		}
7368 
7369 		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
7370 						addr_type);
7371 		if (!hci_param) {
7372 			bt_dev_err(hdev, "failed to add connection parameters");
7373 			continue;
7374 		}
7375 
7376 		hci_param->conn_min_interval = min;
7377 		hci_param->conn_max_interval = max;
7378 		hci_param->conn_latency = latency;
7379 		hci_param->supervision_timeout = timeout;
7380 	}
7381 
7382 	hci_dev_unlock(hdev);
7383 
7384 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7385 				 NULL, 0);
7386 }
7387 
7388 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7389 			       void *data, u16 len)
7390 {
7391 	struct mgmt_cp_set_external_config *cp = data;
7392 	bool changed;
7393 	int err;
7394 
7395 	bt_dev_dbg(hdev, "sock %p", sk);
7396 
7397 	if (hdev_is_powered(hdev))
7398 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7399 				       MGMT_STATUS_REJECTED);
7400 
7401 	if (cp->config != 0x00 && cp->config != 0x01)
7402 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7403 				         MGMT_STATUS_INVALID_PARAMS);
7404 
7405 	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7406 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7407 				       MGMT_STATUS_NOT_SUPPORTED);
7408 
7409 	hci_dev_lock(hdev);
7410 
7411 	if (cp->config)
7412 		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7413 	else
7414 		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7415 
7416 	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7417 	if (err < 0)
7418 		goto unlock;
7419 
7420 	if (!changed)
7421 		goto unlock;
7422 
7423 	err = new_options(hdev, sk);
7424 
7425 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7426 		mgmt_index_removed(hdev);
7427 
7428 		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7429 			hci_dev_set_flag(hdev, HCI_CONFIG);
7430 			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7431 
7432 			queue_work(hdev->req_workqueue, &hdev->power_on);
7433 		} else {
7434 			set_bit(HCI_RAW, &hdev->flags);
7435 			mgmt_index_added(hdev);
7436 		}
7437 	}
7438 
7439 unlock:
7440 	hci_dev_unlock(hdev);
7441 	return err;
7442 }
7443 
7444 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
7445 			      void *data, u16 len)
7446 {
7447 	struct mgmt_cp_set_public_address *cp = data;
7448 	bool changed;
7449 	int err;
7450 
7451 	bt_dev_dbg(hdev, "sock %p", sk);
7452 
7453 	if (hdev_is_powered(hdev))
7454 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7455 				       MGMT_STATUS_REJECTED);
7456 
7457 	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
7458 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7459 				       MGMT_STATUS_INVALID_PARAMS);
7460 
7461 	if (!hdev->set_bdaddr)
7462 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7463 				       MGMT_STATUS_NOT_SUPPORTED);
7464 
7465 	hci_dev_lock(hdev);
7466 
7467 	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
7468 	bacpy(&hdev->public_addr, &cp->bdaddr);
7469 
7470 	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
7471 	if (err < 0)
7472 		goto unlock;
7473 
7474 	if (!changed)
7475 		goto unlock;
7476 
7477 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
7478 		err = new_options(hdev, sk);
7479 
7480 	if (is_configured(hdev)) {
7481 		mgmt_index_removed(hdev);
7482 
7483 		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
7484 
7485 		hci_dev_set_flag(hdev, HCI_CONFIG);
7486 		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7487 
7488 		queue_work(hdev->req_workqueue, &hdev->power_on);
7489 	}
7490 
7491 unlock:
7492 	hci_dev_unlock(hdev);
7493 	return err;
7494 }
7495 
7496 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
7497 					     int err)
7498 {
7499 	const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
7500 	struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
7501 	u8 *h192, *r192, *h256, *r256;
7502 	struct mgmt_pending_cmd *cmd = data;
7503 	struct sk_buff *skb = cmd->skb;
7504 	u8 status = mgmt_status(err);
7505 	u16 eir_len;
7506 
7507 	if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
7508 		return;
7509 
7510 	if (!status) {
7511 		if (!skb)
7512 			status = MGMT_STATUS_FAILED;
7513 		else if (IS_ERR(skb))
7514 			status = mgmt_status(PTR_ERR(skb));
7515 		else
7516 			status = mgmt_status(skb->data[0]);
7517 	}
7518 
7519 	bt_dev_dbg(hdev, "status %u", status);
7520 
7521 	mgmt_cp = cmd->param;
7522 
7523 	if (status) {
7524 		status = mgmt_status(status);
7525 		eir_len = 0;
7526 
7527 		h192 = NULL;
7528 		r192 = NULL;
7529 		h256 = NULL;
7530 		r256 = NULL;
7531 	} else if (!bredr_sc_enabled(hdev)) {
7532 		struct hci_rp_read_local_oob_data *rp;
7533 
7534 		if (skb->len != sizeof(*rp)) {
7535 			status = MGMT_STATUS_FAILED;
7536 			eir_len = 0;
7537 		} else {
7538 			status = MGMT_STATUS_SUCCESS;
7539 			rp = (void *)skb->data;
7540 
7541 			eir_len = 5 + 18 + 18;
7542 			h192 = rp->hash;
7543 			r192 = rp->rand;
7544 			h256 = NULL;
7545 			r256 = NULL;
7546 		}
7547 	} else {
7548 		struct hci_rp_read_local_oob_ext_data *rp;
7549 
7550 		if (skb->len != sizeof(*rp)) {
7551 			status = MGMT_STATUS_FAILED;
7552 			eir_len = 0;
7553 		} else {
7554 			status = MGMT_STATUS_SUCCESS;
7555 			rp = (void *)skb->data;
7556 
7557 			if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
7558 				eir_len = 5 + 18 + 18;
7559 				h192 = NULL;
7560 				r192 = NULL;
7561 			} else {
7562 				eir_len = 5 + 18 + 18 + 18 + 18;
7563 				h192 = rp->hash192;
7564 				r192 = rp->rand192;
7565 			}
7566 
7567 			h256 = rp->hash256;
7568 			r256 = rp->rand256;
7569 		}
7570 	}
7571 
7572 	mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
7573 	if (!mgmt_rp)
7574 		goto done;
7575 
7576 	if (eir_len == 0)
7577 		goto send_rsp;
7578 
7579 	eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
7580 				  hdev->dev_class, 3);
7581 
7582 	if (h192 && r192) {
7583 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7584 					  EIR_SSP_HASH_C192, h192, 16);
7585 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7586 					  EIR_SSP_RAND_R192, r192, 16);
7587 	}
7588 
7589 	if (h256 && r256) {
7590 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7591 					  EIR_SSP_HASH_C256, h256, 16);
7592 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7593 					  EIR_SSP_RAND_R256, r256, 16);
7594 	}
7595 
7596 send_rsp:
7597 	mgmt_rp->type = mgmt_cp->type;
7598 	mgmt_rp->eir_len = cpu_to_le16(eir_len);
7599 
7600 	err = mgmt_cmd_complete(cmd->sk, hdev->id,
7601 				MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
7602 				mgmt_rp, sizeof(*mgmt_rp) + eir_len);
7603 	if (err < 0 || status)
7604 		goto done;
7605 
7606 	hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
7607 
7608 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7609 				 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
7610 				 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
7611 done:
7612 	if (skb && !IS_ERR(skb))
7613 		kfree_skb(skb);
7614 
7615 	kfree(mgmt_rp);
7616 	mgmt_pending_remove(cmd);
7617 }
7618 
7619 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
7620 				  struct mgmt_cp_read_local_oob_ext_data *cp)
7621 {
7622 	struct mgmt_pending_cmd *cmd;
7623 	int err;
7624 
7625 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
7626 			       cp, sizeof(*cp));
7627 	if (!cmd)
7628 		return -ENOMEM;
7629 
7630 	err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
7631 				 read_local_oob_ext_data_complete);
7632 
7633 	if (err < 0) {
7634 		mgmt_pending_remove(cmd);
7635 		return err;
7636 	}
7637 
7638 	return 0;
7639 }
7640 
7641 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
7642 				   void *data, u16 data_len)
7643 {
7644 	struct mgmt_cp_read_local_oob_ext_data *cp = data;
7645 	struct mgmt_rp_read_local_oob_ext_data *rp;
7646 	size_t rp_len;
7647 	u16 eir_len;
7648 	u8 status, flags, role, addr[7], hash[16], rand[16];
7649 	int err;
7650 
7651 	bt_dev_dbg(hdev, "sock %p", sk);
7652 
7653 	if (hdev_is_powered(hdev)) {
7654 		switch (cp->type) {
7655 		case BIT(BDADDR_BREDR):
7656 			status = mgmt_bredr_support(hdev);
7657 			if (status)
7658 				eir_len = 0;
7659 			else
7660 				eir_len = 5;
7661 			break;
7662 		case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7663 			status = mgmt_le_support(hdev);
7664 			if (status)
7665 				eir_len = 0;
7666 			else
7667 				eir_len = 9 + 3 + 18 + 18 + 3;
7668 			break;
7669 		default:
7670 			status = MGMT_STATUS_INVALID_PARAMS;
7671 			eir_len = 0;
7672 			break;
7673 		}
7674 	} else {
7675 		status = MGMT_STATUS_NOT_POWERED;
7676 		eir_len = 0;
7677 	}
7678 
7679 	rp_len = sizeof(*rp) + eir_len;
7680 	rp = kmalloc(rp_len, GFP_ATOMIC);
7681 	if (!rp)
7682 		return -ENOMEM;
7683 
7684 	if (!status && !lmp_ssp_capable(hdev)) {
7685 		status = MGMT_STATUS_NOT_SUPPORTED;
7686 		eir_len = 0;
7687 	}
7688 
7689 	if (status)
7690 		goto complete;
7691 
7692 	hci_dev_lock(hdev);
7693 
7694 	eir_len = 0;
7695 	switch (cp->type) {
7696 	case BIT(BDADDR_BREDR):
7697 		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7698 			err = read_local_ssp_oob_req(hdev, sk, cp);
7699 			hci_dev_unlock(hdev);
7700 			if (!err)
7701 				goto done;
7702 
7703 			status = MGMT_STATUS_FAILED;
7704 			goto complete;
7705 		} else {
7706 			eir_len = eir_append_data(rp->eir, eir_len,
7707 						  EIR_CLASS_OF_DEV,
7708 						  hdev->dev_class, 3);
7709 		}
7710 		break;
7711 	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7712 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
7713 		    smp_generate_oob(hdev, hash, rand) < 0) {
7714 			hci_dev_unlock(hdev);
7715 			status = MGMT_STATUS_FAILED;
7716 			goto complete;
7717 		}
7718 
7719 		/* This should return the active RPA, but since the RPA
7720 		 * is only programmed on demand, it is really hard to fill
7721 		 * this in at the moment. For now disallow retrieving
7722 		 * local out-of-band data when privacy is in use.
7723 		 *
7724 		 * Returning the identity address will not help here since
7725 		 * pairing happens before the identity resolving key is
7726 		 * known and thus the connection establishment happens
7727 		 * based on the RPA and not the identity address.
7728 		 */
7729 		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
7730 			hci_dev_unlock(hdev);
7731 			status = MGMT_STATUS_REJECTED;
7732 			goto complete;
7733 		}
7734 
7735 		if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
7736 		   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
7737 		   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
7738 		    bacmp(&hdev->static_addr, BDADDR_ANY))) {
7739 			memcpy(addr, &hdev->static_addr, 6);
7740 			addr[6] = 0x01;
7741 		} else {
7742 			memcpy(addr, &hdev->bdaddr, 6);
7743 			addr[6] = 0x00;
7744 		}
7745 
7746 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
7747 					  addr, sizeof(addr));
7748 
7749 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7750 			role = 0x02;
7751 		else
7752 			role = 0x01;
7753 
7754 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
7755 					  &role, sizeof(role));
7756 
7757 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
7758 			eir_len = eir_append_data(rp->eir, eir_len,
7759 						  EIR_LE_SC_CONFIRM,
7760 						  hash, sizeof(hash));
7761 
7762 			eir_len = eir_append_data(rp->eir, eir_len,
7763 						  EIR_LE_SC_RANDOM,
7764 						  rand, sizeof(rand));
7765 		}
7766 
7767 		flags = mgmt_get_adv_discov_flags(hdev);
7768 
7769 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
7770 			flags |= LE_AD_NO_BREDR;
7771 
7772 		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
7773 					  &flags, sizeof(flags));
7774 		break;
7775 	}
7776 
7777 	hci_dev_unlock(hdev);
7778 
7779 	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
7780 
7781 	status = MGMT_STATUS_SUCCESS;
7782 
7783 complete:
7784 	rp->type = cp->type;
7785 	rp->eir_len = cpu_to_le16(eir_len);
7786 
7787 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
7788 				status, rp, sizeof(*rp) + eir_len);
7789 	if (err < 0 || status)
7790 		goto done;
7791 
7792 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7793 				 rp, sizeof(*rp) + eir_len,
7794 				 HCI_MGMT_OOB_DATA_EVENTS, sk);
7795 
7796 done:
7797 	kfree(rp);
7798 
7799 	return err;
7800 }
7801 
7802 static u32 get_supported_adv_flags(struct hci_dev *hdev)
7803 {
7804 	u32 flags = 0;
7805 
7806 	flags |= MGMT_ADV_FLAG_CONNECTABLE;
7807 	flags |= MGMT_ADV_FLAG_DISCOV;
7808 	flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
7809 	flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
7810 	flags |= MGMT_ADV_FLAG_APPEARANCE;
7811 	flags |= MGMT_ADV_FLAG_LOCAL_NAME;
7812 	flags |= MGMT_ADV_PARAM_DURATION;
7813 	flags |= MGMT_ADV_PARAM_TIMEOUT;
7814 	flags |= MGMT_ADV_PARAM_INTERVALS;
7815 	flags |= MGMT_ADV_PARAM_TX_POWER;
7816 	flags |= MGMT_ADV_PARAM_SCAN_RSP;
7817 
7818 	/* In extended adv TX_POWER returned from Set Adv Param
7819 	 * will be always valid.
7820 	 */
7821 	if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
7822 	    ext_adv_capable(hdev))
7823 		flags |= MGMT_ADV_FLAG_TX_POWER;
7824 
7825 	if (ext_adv_capable(hdev)) {
7826 		flags |= MGMT_ADV_FLAG_SEC_1M;
7827 		flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
7828 		flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
7829 
7830 		if (hdev->le_features[1] & HCI_LE_PHY_2M)
7831 			flags |= MGMT_ADV_FLAG_SEC_2M;
7832 
7833 		if (hdev->le_features[1] & HCI_LE_PHY_CODED)
7834 			flags |= MGMT_ADV_FLAG_SEC_CODED;
7835 	}
7836 
7837 	return flags;
7838 }
7839 
7840 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
7841 			     void *data, u16 data_len)
7842 {
7843 	struct mgmt_rp_read_adv_features *rp;
7844 	size_t rp_len;
7845 	int err;
7846 	struct adv_info *adv_instance;
7847 	u32 supported_flags;
7848 	u8 *instance;
7849 
7850 	bt_dev_dbg(hdev, "sock %p", sk);
7851 
7852 	if (!lmp_le_capable(hdev))
7853 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7854 				       MGMT_STATUS_REJECTED);
7855 
7856 	hci_dev_lock(hdev);
7857 
7858 	rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
7859 	rp = kmalloc(rp_len, GFP_ATOMIC);
7860 	if (!rp) {
7861 		hci_dev_unlock(hdev);
7862 		return -ENOMEM;
7863 	}
7864 
7865 	supported_flags = get_supported_adv_flags(hdev);
7866 
7867 	rp->supported_flags = cpu_to_le32(supported_flags);
7868 	rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
7869 	rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
7870 	rp->max_instances = hdev->le_num_of_adv_sets;
7871 	rp->num_instances = hdev->adv_instance_cnt;
7872 
7873 	instance = rp->instance;
7874 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
7875 		*instance = adv_instance->instance;
7876 		instance++;
7877 	}
7878 
7879 	hci_dev_unlock(hdev);
7880 
7881 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7882 				MGMT_STATUS_SUCCESS, rp, rp_len);
7883 
7884 	kfree(rp);
7885 
7886 	return err;
7887 }
7888 
7889 static u8 calculate_name_len(struct hci_dev *hdev)
7890 {
7891 	u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
7892 
7893 	return eir_append_local_name(hdev, buf, 0);
7894 }
7895 
7896 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
7897 			   bool is_adv_data)
7898 {
7899 	u8 max_len = HCI_MAX_AD_LENGTH;
7900 
7901 	if (is_adv_data) {
7902 		if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
7903 				 MGMT_ADV_FLAG_LIMITED_DISCOV |
7904 				 MGMT_ADV_FLAG_MANAGED_FLAGS))
7905 			max_len -= 3;
7906 
7907 		if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
7908 			max_len -= 3;
7909 	} else {
7910 		if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
7911 			max_len -= calculate_name_len(hdev);
7912 
7913 		if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
7914 			max_len -= 4;
7915 	}
7916 
7917 	return max_len;
7918 }
7919 
7920 static bool flags_managed(u32 adv_flags)
7921 {
7922 	return adv_flags & (MGMT_ADV_FLAG_DISCOV |
7923 			    MGMT_ADV_FLAG_LIMITED_DISCOV |
7924 			    MGMT_ADV_FLAG_MANAGED_FLAGS);
7925 }
7926 
7927 static bool tx_power_managed(u32 adv_flags)
7928 {
7929 	return adv_flags & MGMT_ADV_FLAG_TX_POWER;
7930 }
7931 
7932 static bool name_managed(u32 adv_flags)
7933 {
7934 	return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
7935 }
7936 
7937 static bool appearance_managed(u32 adv_flags)
7938 {
7939 	return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
7940 }
7941 
7942 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
7943 			      u8 len, bool is_adv_data)
7944 {
7945 	int i, cur_len;
7946 	u8 max_len;
7947 
7948 	max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
7949 
7950 	if (len > max_len)
7951 		return false;
7952 
7953 	/* Make sure that the data is correctly formatted. */
7954 	for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
7955 		cur_len = data[i];
7956 
7957 		if (!cur_len)
7958 			continue;
7959 
7960 		if (data[i + 1] == EIR_FLAGS &&
7961 		    (!is_adv_data || flags_managed(adv_flags)))
7962 			return false;
7963 
7964 		if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
7965 			return false;
7966 
7967 		if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
7968 			return false;
7969 
7970 		if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
7971 			return false;
7972 
7973 		if (data[i + 1] == EIR_APPEARANCE &&
7974 		    appearance_managed(adv_flags))
7975 			return false;
7976 
7977 		/* If the current field length would exceed the total data
7978 		 * length, then it's invalid.
7979 		 */
7980 		if (i + cur_len >= len)
7981 			return false;
7982 	}
7983 
7984 	return true;
7985 }
7986 
7987 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
7988 {
7989 	u32 supported_flags, phy_flags;
7990 
7991 	/* The current implementation only supports a subset of the specified
7992 	 * flags. Also need to check mutual exclusiveness of sec flags.
7993 	 */
7994 	supported_flags = get_supported_adv_flags(hdev);
7995 	phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
7996 	if (adv_flags & ~supported_flags ||
7997 	    ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
7998 		return false;
7999 
8000 	return true;
8001 }
8002 
8003 static bool adv_busy(struct hci_dev *hdev)
8004 {
8005 	return pending_find(MGMT_OP_SET_LE, hdev);
8006 }
8007 
8008 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8009 			     int err)
8010 {
8011 	struct adv_info *adv, *n;
8012 
8013 	bt_dev_dbg(hdev, "err %d", err);
8014 
8015 	hci_dev_lock(hdev);
8016 
8017 	list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8018 		u8 instance;
8019 
8020 		if (!adv->pending)
8021 			continue;
8022 
8023 		if (!err) {
8024 			adv->pending = false;
8025 			continue;
8026 		}
8027 
8028 		instance = adv->instance;
8029 
8030 		if (hdev->cur_adv_instance == instance)
8031 			cancel_adv_timeout(hdev);
8032 
8033 		hci_remove_adv_instance(hdev, instance);
8034 		mgmt_advertising_removed(sk, hdev, instance);
8035 	}
8036 
8037 	hci_dev_unlock(hdev);
8038 }
8039 
8040 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8041 {
8042 	struct mgmt_pending_cmd *cmd = data;
8043 	struct mgmt_cp_add_advertising *cp = cmd->param;
8044 	struct mgmt_rp_add_advertising rp;
8045 
8046 	memset(&rp, 0, sizeof(rp));
8047 
8048 	rp.instance = cp->instance;
8049 
8050 	if (err)
8051 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8052 				mgmt_status(err));
8053 	else
8054 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8055 				  mgmt_status(err), &rp, sizeof(rp));
8056 
8057 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
8058 
8059 	mgmt_pending_free(cmd);
8060 }
8061 
8062 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8063 {
8064 	struct mgmt_pending_cmd *cmd = data;
8065 	struct mgmt_cp_add_advertising *cp = cmd->param;
8066 
8067 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8068 }
8069 
8070 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8071 			   void *data, u16 data_len)
8072 {
8073 	struct mgmt_cp_add_advertising *cp = data;
8074 	struct mgmt_rp_add_advertising rp;
8075 	u32 flags;
8076 	u8 status;
8077 	u16 timeout, duration;
8078 	unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
8079 	u8 schedule_instance = 0;
8080 	struct adv_info *next_instance;
8081 	int err;
8082 	struct mgmt_pending_cmd *cmd;
8083 
8084 	bt_dev_dbg(hdev, "sock %p", sk);
8085 
8086 	status = mgmt_le_support(hdev);
8087 	if (status)
8088 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8089 				       status);
8090 
8091 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8092 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8093 				       MGMT_STATUS_INVALID_PARAMS);
8094 
8095 	if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8096 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8097 				       MGMT_STATUS_INVALID_PARAMS);
8098 
8099 	flags = __le32_to_cpu(cp->flags);
8100 	timeout = __le16_to_cpu(cp->timeout);
8101 	duration = __le16_to_cpu(cp->duration);
8102 
8103 	if (!requested_adv_flags_are_valid(hdev, flags))
8104 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8105 				       MGMT_STATUS_INVALID_PARAMS);
8106 
8107 	hci_dev_lock(hdev);
8108 
8109 	if (timeout && !hdev_is_powered(hdev)) {
8110 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8111 				      MGMT_STATUS_REJECTED);
8112 		goto unlock;
8113 	}
8114 
8115 	if (adv_busy(hdev)) {
8116 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8117 				      MGMT_STATUS_BUSY);
8118 		goto unlock;
8119 	}
8120 
8121 	if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8122 	    !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8123 			       cp->scan_rsp_len, false)) {
8124 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8125 				      MGMT_STATUS_INVALID_PARAMS);
8126 		goto unlock;
8127 	}
8128 
8129 	err = hci_add_adv_instance(hdev, cp->instance, flags,
8130 				   cp->adv_data_len, cp->data,
8131 				   cp->scan_rsp_len,
8132 				   cp->data + cp->adv_data_len,
8133 				   timeout, duration,
8134 				   HCI_ADV_TX_POWER_NO_PREFERENCE,
8135 				   hdev->le_adv_min_interval,
8136 				   hdev->le_adv_max_interval);
8137 	if (err < 0) {
8138 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8139 				      MGMT_STATUS_FAILED);
8140 		goto unlock;
8141 	}
8142 
8143 	/* Only trigger an advertising added event if a new instance was
8144 	 * actually added.
8145 	 */
8146 	if (hdev->adv_instance_cnt > prev_instance_cnt)
8147 		mgmt_advertising_added(sk, hdev, cp->instance);
8148 
8149 	if (hdev->cur_adv_instance == cp->instance) {
8150 		/* If the currently advertised instance is being changed then
8151 		 * cancel the current advertising and schedule the next
8152 		 * instance. If there is only one instance then the overridden
8153 		 * advertising data will be visible right away.
8154 		 */
8155 		cancel_adv_timeout(hdev);
8156 
8157 		next_instance = hci_get_next_instance(hdev, cp->instance);
8158 		if (next_instance)
8159 			schedule_instance = next_instance->instance;
8160 	} else if (!hdev->adv_instance_timeout) {
8161 		/* Immediately advertise the new instance if no other
8162 		 * instance is currently being advertised.
8163 		 */
8164 		schedule_instance = cp->instance;
8165 	}
8166 
8167 	/* If the HCI_ADVERTISING flag is set or the device isn't powered or
8168 	 * there is no instance to be advertised then we have no HCI
8169 	 * communication to make. Simply return.
8170 	 */
8171 	if (!hdev_is_powered(hdev) ||
8172 	    hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8173 	    !schedule_instance) {
8174 		rp.instance = cp->instance;
8175 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8176 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8177 		goto unlock;
8178 	}
8179 
8180 	/* We're good to go, update advertising data, parameters, and start
8181 	 * advertising.
8182 	 */
8183 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8184 			       data_len);
8185 	if (!cmd) {
8186 		err = -ENOMEM;
8187 		goto unlock;
8188 	}
8189 
8190 	cp->instance = schedule_instance;
8191 
8192 	err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8193 				 add_advertising_complete);
8194 	if (err < 0)
8195 		mgmt_pending_free(cmd);
8196 
8197 unlock:
8198 	hci_dev_unlock(hdev);
8199 
8200 	return err;
8201 }
8202 
8203 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8204 					int err)
8205 {
8206 	struct mgmt_pending_cmd *cmd = data;
8207 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8208 	struct mgmt_rp_add_ext_adv_params rp;
8209 	struct adv_info *adv;
8210 	u32 flags;
8211 
8212 	BT_DBG("%s", hdev->name);
8213 
8214 	hci_dev_lock(hdev);
8215 
8216 	adv = hci_find_adv_instance(hdev, cp->instance);
8217 	if (!adv)
8218 		goto unlock;
8219 
8220 	rp.instance = cp->instance;
8221 	rp.tx_power = adv->tx_power;
8222 
8223 	/* While we're at it, inform userspace of the available space for this
8224 	 * advertisement, given the flags that will be used.
8225 	 */
8226 	flags = __le32_to_cpu(cp->flags);
8227 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8228 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8229 
8230 	if (err) {
8231 		/* If this advertisement was previously advertising and we
8232 		 * failed to update it, we signal that it has been removed and
8233 		 * delete its structure
8234 		 */
8235 		if (!adv->pending)
8236 			mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8237 
8238 		hci_remove_adv_instance(hdev, cp->instance);
8239 
8240 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8241 				mgmt_status(err));
8242 	} else {
8243 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8244 				  mgmt_status(err), &rp, sizeof(rp));
8245 	}
8246 
8247 unlock:
8248 	if (cmd)
8249 		mgmt_pending_free(cmd);
8250 
8251 	hci_dev_unlock(hdev);
8252 }
8253 
8254 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8255 {
8256 	struct mgmt_pending_cmd *cmd = data;
8257 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8258 
8259 	return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8260 }
8261 
8262 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8263 			      void *data, u16 data_len)
8264 {
8265 	struct mgmt_cp_add_ext_adv_params *cp = data;
8266 	struct mgmt_rp_add_ext_adv_params rp;
8267 	struct mgmt_pending_cmd *cmd = NULL;
8268 	u32 flags, min_interval, max_interval;
8269 	u16 timeout, duration;
8270 	u8 status;
8271 	s8 tx_power;
8272 	int err;
8273 
8274 	BT_DBG("%s", hdev->name);
8275 
8276 	status = mgmt_le_support(hdev);
8277 	if (status)
8278 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8279 				       status);
8280 
8281 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8282 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8283 				       MGMT_STATUS_INVALID_PARAMS);
8284 
8285 	/* The purpose of breaking add_advertising into two separate MGMT calls
8286 	 * for params and data is to allow more parameters to be added to this
8287 	 * structure in the future. For this reason, we verify that we have the
8288 	 * bare minimum structure we know of when the interface was defined. Any
8289 	 * extra parameters we don't know about will be ignored in this request.
8290 	 */
8291 	if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8292 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8293 				       MGMT_STATUS_INVALID_PARAMS);
8294 
8295 	flags = __le32_to_cpu(cp->flags);
8296 
8297 	if (!requested_adv_flags_are_valid(hdev, flags))
8298 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8299 				       MGMT_STATUS_INVALID_PARAMS);
8300 
8301 	hci_dev_lock(hdev);
8302 
8303 	/* In new interface, we require that we are powered to register */
8304 	if (!hdev_is_powered(hdev)) {
8305 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8306 				      MGMT_STATUS_REJECTED);
8307 		goto unlock;
8308 	}
8309 
8310 	if (adv_busy(hdev)) {
8311 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8312 				      MGMT_STATUS_BUSY);
8313 		goto unlock;
8314 	}
8315 
8316 	/* Parse defined parameters from request, use defaults otherwise */
8317 	timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8318 		  __le16_to_cpu(cp->timeout) : 0;
8319 
8320 	duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8321 		   __le16_to_cpu(cp->duration) :
8322 		   hdev->def_multi_adv_rotation_duration;
8323 
8324 	min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8325 		       __le32_to_cpu(cp->min_interval) :
8326 		       hdev->le_adv_min_interval;
8327 
8328 	max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8329 		       __le32_to_cpu(cp->max_interval) :
8330 		       hdev->le_adv_max_interval;
8331 
8332 	tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8333 		   cp->tx_power :
8334 		   HCI_ADV_TX_POWER_NO_PREFERENCE;
8335 
8336 	/* Create advertising instance with no advertising or response data */
8337 	err = hci_add_adv_instance(hdev, cp->instance, flags,
8338 				   0, NULL, 0, NULL, timeout, duration,
8339 				   tx_power, min_interval, max_interval);
8340 
8341 	if (err < 0) {
8342 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8343 				      MGMT_STATUS_FAILED);
8344 		goto unlock;
8345 	}
8346 
8347 	/* Submit request for advertising params if ext adv available */
8348 	if (ext_adv_capable(hdev)) {
8349 		cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8350 				       data, data_len);
8351 		if (!cmd) {
8352 			err = -ENOMEM;
8353 			hci_remove_adv_instance(hdev, cp->instance);
8354 			goto unlock;
8355 		}
8356 
8357 		err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8358 					 add_ext_adv_params_complete);
8359 		if (err < 0)
8360 			mgmt_pending_free(cmd);
8361 	} else {
8362 		rp.instance = cp->instance;
8363 		rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8364 		rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8365 		rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8366 		err = mgmt_cmd_complete(sk, hdev->id,
8367 					MGMT_OP_ADD_EXT_ADV_PARAMS,
8368 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8369 	}
8370 
8371 unlock:
8372 	hci_dev_unlock(hdev);
8373 
8374 	return err;
8375 }
8376 
8377 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8378 {
8379 	struct mgmt_pending_cmd *cmd = data;
8380 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8381 	struct mgmt_rp_add_advertising rp;
8382 
8383 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
8384 
8385 	memset(&rp, 0, sizeof(rp));
8386 
8387 	rp.instance = cp->instance;
8388 
8389 	if (err)
8390 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8391 				mgmt_status(err));
8392 	else
8393 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8394 				  mgmt_status(err), &rp, sizeof(rp));
8395 
8396 	mgmt_pending_free(cmd);
8397 }
8398 
8399 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8400 {
8401 	struct mgmt_pending_cmd *cmd = data;
8402 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8403 	int err;
8404 
8405 	if (ext_adv_capable(hdev)) {
8406 		err = hci_update_adv_data_sync(hdev, cp->instance);
8407 		if (err)
8408 			return err;
8409 
8410 		err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8411 		if (err)
8412 			return err;
8413 
8414 		return hci_enable_ext_advertising_sync(hdev, cp->instance);
8415 	}
8416 
8417 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8418 }
8419 
8420 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8421 			    u16 data_len)
8422 {
8423 	struct mgmt_cp_add_ext_adv_data *cp = data;
8424 	struct mgmt_rp_add_ext_adv_data rp;
8425 	u8 schedule_instance = 0;
8426 	struct adv_info *next_instance;
8427 	struct adv_info *adv_instance;
8428 	int err = 0;
8429 	struct mgmt_pending_cmd *cmd;
8430 
8431 	BT_DBG("%s", hdev->name);
8432 
8433 	hci_dev_lock(hdev);
8434 
8435 	adv_instance = hci_find_adv_instance(hdev, cp->instance);
8436 
8437 	if (!adv_instance) {
8438 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8439 				      MGMT_STATUS_INVALID_PARAMS);
8440 		goto unlock;
8441 	}
8442 
8443 	/* In new interface, we require that we are powered to register */
8444 	if (!hdev_is_powered(hdev)) {
8445 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8446 				      MGMT_STATUS_REJECTED);
8447 		goto clear_new_instance;
8448 	}
8449 
8450 	if (adv_busy(hdev)) {
8451 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8452 				      MGMT_STATUS_BUSY);
8453 		goto clear_new_instance;
8454 	}
8455 
8456 	/* Validate new data */
8457 	if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
8458 			       cp->adv_data_len, true) ||
8459 	    !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
8460 			       cp->adv_data_len, cp->scan_rsp_len, false)) {
8461 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8462 				      MGMT_STATUS_INVALID_PARAMS);
8463 		goto clear_new_instance;
8464 	}
8465 
8466 	/* Set the data in the advertising instance */
8467 	hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
8468 				  cp->data, cp->scan_rsp_len,
8469 				  cp->data + cp->adv_data_len);
8470 
8471 	/* If using software rotation, determine next instance to use */
8472 	if (hdev->cur_adv_instance == cp->instance) {
8473 		/* If the currently advertised instance is being changed
8474 		 * then cancel the current advertising and schedule the
8475 		 * next instance. If there is only one instance then the
8476 		 * overridden advertising data will be visible right
8477 		 * away
8478 		 */
8479 		cancel_adv_timeout(hdev);
8480 
8481 		next_instance = hci_get_next_instance(hdev, cp->instance);
8482 		if (next_instance)
8483 			schedule_instance = next_instance->instance;
8484 	} else if (!hdev->adv_instance_timeout) {
8485 		/* Immediately advertise the new instance if no other
8486 		 * instance is currently being advertised.
8487 		 */
8488 		schedule_instance = cp->instance;
8489 	}
8490 
8491 	/* If the HCI_ADVERTISING flag is set or there is no instance to
8492 	 * be advertised then we have no HCI communication to make.
8493 	 * Simply return.
8494 	 */
8495 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
8496 		if (adv_instance->pending) {
8497 			mgmt_advertising_added(sk, hdev, cp->instance);
8498 			adv_instance->pending = false;
8499 		}
8500 		rp.instance = cp->instance;
8501 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8502 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8503 		goto unlock;
8504 	}
8505 
8506 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
8507 			       data_len);
8508 	if (!cmd) {
8509 		err = -ENOMEM;
8510 		goto clear_new_instance;
8511 	}
8512 
8513 	err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
8514 				 add_ext_adv_data_complete);
8515 	if (err < 0) {
8516 		mgmt_pending_free(cmd);
8517 		goto clear_new_instance;
8518 	}
8519 
8520 	/* We were successful in updating data, so trigger advertising_added
8521 	 * event if this is an instance that wasn't previously advertising. If
8522 	 * a failure occurs in the requests we initiated, we will remove the
8523 	 * instance again in add_advertising_complete
8524 	 */
8525 	if (adv_instance->pending)
8526 		mgmt_advertising_added(sk, hdev, cp->instance);
8527 
8528 	goto unlock;
8529 
8530 clear_new_instance:
8531 	hci_remove_adv_instance(hdev, cp->instance);
8532 
8533 unlock:
8534 	hci_dev_unlock(hdev);
8535 
8536 	return err;
8537 }
8538 
8539 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
8540 					int err)
8541 {
8542 	struct mgmt_pending_cmd *cmd = data;
8543 	struct mgmt_cp_remove_advertising *cp = cmd->param;
8544 	struct mgmt_rp_remove_advertising rp;
8545 
8546 	bt_dev_dbg(hdev, "err %d", err);
8547 
8548 	memset(&rp, 0, sizeof(rp));
8549 	rp.instance = cp->instance;
8550 
8551 	if (err)
8552 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8553 				mgmt_status(err));
8554 	else
8555 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8556 				  MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8557 
8558 	mgmt_pending_free(cmd);
8559 }
8560 
8561 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
8562 {
8563 	struct mgmt_pending_cmd *cmd = data;
8564 	struct mgmt_cp_remove_advertising *cp = cmd->param;
8565 	int err;
8566 
8567 	err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
8568 	if (err)
8569 		return err;
8570 
8571 	if (list_empty(&hdev->adv_instances))
8572 		err = hci_disable_advertising_sync(hdev);
8573 
8574 	return err;
8575 }
8576 
8577 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
8578 			      void *data, u16 data_len)
8579 {
8580 	struct mgmt_cp_remove_advertising *cp = data;
8581 	struct mgmt_pending_cmd *cmd;
8582 	int err;
8583 
8584 	bt_dev_dbg(hdev, "sock %p", sk);
8585 
8586 	hci_dev_lock(hdev);
8587 
8588 	if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
8589 		err = mgmt_cmd_status(sk, hdev->id,
8590 				      MGMT_OP_REMOVE_ADVERTISING,
8591 				      MGMT_STATUS_INVALID_PARAMS);
8592 		goto unlock;
8593 	}
8594 
8595 	if (pending_find(MGMT_OP_SET_LE, hdev)) {
8596 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8597 				      MGMT_STATUS_BUSY);
8598 		goto unlock;
8599 	}
8600 
8601 	if (list_empty(&hdev->adv_instances)) {
8602 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8603 				      MGMT_STATUS_INVALID_PARAMS);
8604 		goto unlock;
8605 	}
8606 
8607 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
8608 			       data_len);
8609 	if (!cmd) {
8610 		err = -ENOMEM;
8611 		goto unlock;
8612 	}
8613 
8614 	err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
8615 				 remove_advertising_complete);
8616 	if (err < 0)
8617 		mgmt_pending_free(cmd);
8618 
8619 unlock:
8620 	hci_dev_unlock(hdev);
8621 
8622 	return err;
8623 }
8624 
8625 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
8626 			     void *data, u16 data_len)
8627 {
8628 	struct mgmt_cp_get_adv_size_info *cp = data;
8629 	struct mgmt_rp_get_adv_size_info rp;
8630 	u32 flags, supported_flags;
8631 	int err;
8632 
8633 	bt_dev_dbg(hdev, "sock %p", sk);
8634 
8635 	if (!lmp_le_capable(hdev))
8636 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8637 				       MGMT_STATUS_REJECTED);
8638 
8639 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8640 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8641 				       MGMT_STATUS_INVALID_PARAMS);
8642 
8643 	flags = __le32_to_cpu(cp->flags);
8644 
8645 	/* The current implementation only supports a subset of the specified
8646 	 * flags.
8647 	 */
8648 	supported_flags = get_supported_adv_flags(hdev);
8649 	if (flags & ~supported_flags)
8650 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8651 				       MGMT_STATUS_INVALID_PARAMS);
8652 
8653 	rp.instance = cp->instance;
8654 	rp.flags = cp->flags;
8655 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8656 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8657 
8658 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8659 				MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8660 
8661 	return err;
8662 }
8663 
8664 static const struct hci_mgmt_handler mgmt_handlers[] = {
8665 	{ NULL }, /* 0x0000 (no command) */
8666 	{ read_version,            MGMT_READ_VERSION_SIZE,
8667 						HCI_MGMT_NO_HDEV |
8668 						HCI_MGMT_UNTRUSTED },
8669 	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
8670 						HCI_MGMT_NO_HDEV |
8671 						HCI_MGMT_UNTRUSTED },
8672 	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
8673 						HCI_MGMT_NO_HDEV |
8674 						HCI_MGMT_UNTRUSTED },
8675 	{ read_controller_info,    MGMT_READ_INFO_SIZE,
8676 						HCI_MGMT_UNTRUSTED },
8677 	{ set_powered,             MGMT_SETTING_SIZE },
8678 	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
8679 	{ set_connectable,         MGMT_SETTING_SIZE },
8680 	{ set_fast_connectable,    MGMT_SETTING_SIZE },
8681 	{ set_bondable,            MGMT_SETTING_SIZE },
8682 	{ set_link_security,       MGMT_SETTING_SIZE },
8683 	{ set_ssp,                 MGMT_SETTING_SIZE },
8684 	{ set_hs,                  MGMT_SETTING_SIZE },
8685 	{ set_le,                  MGMT_SETTING_SIZE },
8686 	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
8687 	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
8688 	{ add_uuid,                MGMT_ADD_UUID_SIZE },
8689 	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
8690 	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
8691 						HCI_MGMT_VAR_LEN },
8692 	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
8693 						HCI_MGMT_VAR_LEN },
8694 	{ disconnect,              MGMT_DISCONNECT_SIZE },
8695 	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
8696 	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
8697 	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
8698 	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
8699 	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
8700 	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
8701 	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
8702 	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
8703 	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
8704 	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
8705 	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
8706 	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
8707 	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
8708 						HCI_MGMT_VAR_LEN },
8709 	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
8710 	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
8711 	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
8712 	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
8713 	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
8714 	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
8715 	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
8716 	{ set_advertising,         MGMT_SETTING_SIZE },
8717 	{ set_bredr,               MGMT_SETTING_SIZE },
8718 	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
8719 	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
8720 	{ set_secure_conn,         MGMT_SETTING_SIZE },
8721 	{ set_debug_keys,          MGMT_SETTING_SIZE },
8722 	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
8723 	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
8724 						HCI_MGMT_VAR_LEN },
8725 	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
8726 	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
8727 	{ add_device,              MGMT_ADD_DEVICE_SIZE },
8728 	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
8729 	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
8730 						HCI_MGMT_VAR_LEN },
8731 	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
8732 						HCI_MGMT_NO_HDEV |
8733 						HCI_MGMT_UNTRUSTED },
8734 	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
8735 						HCI_MGMT_UNCONFIGURED |
8736 						HCI_MGMT_UNTRUSTED },
8737 	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
8738 						HCI_MGMT_UNCONFIGURED },
8739 	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
8740 						HCI_MGMT_UNCONFIGURED },
8741 	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
8742 						HCI_MGMT_VAR_LEN },
8743 	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
8744 	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
8745 						HCI_MGMT_NO_HDEV |
8746 						HCI_MGMT_UNTRUSTED },
8747 	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
8748 	{ add_advertising,	   MGMT_ADD_ADVERTISING_SIZE,
8749 						HCI_MGMT_VAR_LEN },
8750 	{ remove_advertising,	   MGMT_REMOVE_ADVERTISING_SIZE },
8751 	{ get_adv_size_info,       MGMT_GET_ADV_SIZE_INFO_SIZE },
8752 	{ start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
8753 	{ read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
8754 						HCI_MGMT_UNTRUSTED },
8755 	{ set_appearance,	   MGMT_SET_APPEARANCE_SIZE },
8756 	{ get_phy_configuration,   MGMT_GET_PHY_CONFIGURATION_SIZE },
8757 	{ set_phy_configuration,   MGMT_SET_PHY_CONFIGURATION_SIZE },
8758 	{ set_blocked_keys,	   MGMT_OP_SET_BLOCKED_KEYS_SIZE,
8759 						HCI_MGMT_VAR_LEN },
8760 	{ set_wideband_speech,	   MGMT_SETTING_SIZE },
8761 	{ read_controller_cap,     MGMT_READ_CONTROLLER_CAP_SIZE,
8762 						HCI_MGMT_UNTRUSTED },
8763 	{ read_exp_features_info,  MGMT_READ_EXP_FEATURES_INFO_SIZE,
8764 						HCI_MGMT_UNTRUSTED |
8765 						HCI_MGMT_HDEV_OPTIONAL },
8766 	{ set_exp_feature,         MGMT_SET_EXP_FEATURE_SIZE,
8767 						HCI_MGMT_VAR_LEN |
8768 						HCI_MGMT_HDEV_OPTIONAL },
8769 	{ read_def_system_config,  MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
8770 						HCI_MGMT_UNTRUSTED },
8771 	{ set_def_system_config,   MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
8772 						HCI_MGMT_VAR_LEN },
8773 	{ read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
8774 						HCI_MGMT_UNTRUSTED },
8775 	{ set_def_runtime_config,  MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
8776 						HCI_MGMT_VAR_LEN },
8777 	{ get_device_flags,        MGMT_GET_DEVICE_FLAGS_SIZE },
8778 	{ set_device_flags,        MGMT_SET_DEVICE_FLAGS_SIZE },
8779 	{ read_adv_mon_features,   MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
8780 	{ add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
8781 						HCI_MGMT_VAR_LEN },
8782 	{ remove_adv_monitor,      MGMT_REMOVE_ADV_MONITOR_SIZE },
8783 	{ add_ext_adv_params,      MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
8784 						HCI_MGMT_VAR_LEN },
8785 	{ add_ext_adv_data,        MGMT_ADD_EXT_ADV_DATA_SIZE,
8786 						HCI_MGMT_VAR_LEN },
8787 	{ add_adv_patterns_monitor_rssi,
8788 				   MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
8789 						HCI_MGMT_VAR_LEN },
8790 };
8791 
8792 void mgmt_index_added(struct hci_dev *hdev)
8793 {
8794 	struct mgmt_ev_ext_index ev;
8795 
8796 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8797 		return;
8798 
8799 	switch (hdev->dev_type) {
8800 	case HCI_PRIMARY:
8801 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8802 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
8803 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8804 			ev.type = 0x01;
8805 		} else {
8806 			mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
8807 					 HCI_MGMT_INDEX_EVENTS);
8808 			ev.type = 0x00;
8809 		}
8810 		break;
8811 	case HCI_AMP:
8812 		ev.type = 0x02;
8813 		break;
8814 	default:
8815 		return;
8816 	}
8817 
8818 	ev.bus = hdev->bus;
8819 
8820 	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
8821 			 HCI_MGMT_EXT_INDEX_EVENTS);
8822 }
8823 
8824 void mgmt_index_removed(struct hci_dev *hdev)
8825 {
8826 	struct mgmt_ev_ext_index ev;
8827 	u8 status = MGMT_STATUS_INVALID_INDEX;
8828 
8829 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8830 		return;
8831 
8832 	switch (hdev->dev_type) {
8833 	case HCI_PRIMARY:
8834 		mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8835 
8836 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8837 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
8838 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8839 			ev.type = 0x01;
8840 		} else {
8841 			mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
8842 					 HCI_MGMT_INDEX_EVENTS);
8843 			ev.type = 0x00;
8844 		}
8845 		break;
8846 	case HCI_AMP:
8847 		ev.type = 0x02;
8848 		break;
8849 	default:
8850 		return;
8851 	}
8852 
8853 	ev.bus = hdev->bus;
8854 
8855 	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
8856 			 HCI_MGMT_EXT_INDEX_EVENTS);
8857 }
8858 
8859 void mgmt_power_on(struct hci_dev *hdev, int err)
8860 {
8861 	struct cmd_lookup match = { NULL, hdev };
8862 
8863 	bt_dev_dbg(hdev, "err %d", err);
8864 
8865 	hci_dev_lock(hdev);
8866 
8867 	if (!err) {
8868 		restart_le_actions(hdev);
8869 		hci_update_passive_scan(hdev);
8870 	}
8871 
8872 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8873 
8874 	new_settings(hdev, match.sk);
8875 
8876 	if (match.sk)
8877 		sock_put(match.sk);
8878 
8879 	hci_dev_unlock(hdev);
8880 }
8881 
8882 void __mgmt_power_off(struct hci_dev *hdev)
8883 {
8884 	struct cmd_lookup match = { NULL, hdev };
8885 	u8 status, zero_cod[] = { 0, 0, 0 };
8886 
8887 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8888 
8889 	/* If the power off is because of hdev unregistration let
8890 	 * use the appropriate INVALID_INDEX status. Otherwise use
8891 	 * NOT_POWERED. We cover both scenarios here since later in
8892 	 * mgmt_index_removed() any hci_conn callbacks will have already
8893 	 * been triggered, potentially causing misleading DISCONNECTED
8894 	 * status responses.
8895 	 */
8896 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
8897 		status = MGMT_STATUS_INVALID_INDEX;
8898 	else
8899 		status = MGMT_STATUS_NOT_POWERED;
8900 
8901 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8902 
8903 	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
8904 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
8905 				   zero_cod, sizeof(zero_cod),
8906 				   HCI_MGMT_DEV_CLASS_EVENTS, NULL);
8907 		ext_info_changed(hdev, NULL);
8908 	}
8909 
8910 	new_settings(hdev, match.sk);
8911 
8912 	if (match.sk)
8913 		sock_put(match.sk);
8914 }
8915 
8916 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
8917 {
8918 	struct mgmt_pending_cmd *cmd;
8919 	u8 status;
8920 
8921 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8922 	if (!cmd)
8923 		return;
8924 
8925 	if (err == -ERFKILL)
8926 		status = MGMT_STATUS_RFKILLED;
8927 	else
8928 		status = MGMT_STATUS_FAILED;
8929 
8930 	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
8931 
8932 	mgmt_pending_remove(cmd);
8933 }
8934 
8935 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
8936 		       bool persistent)
8937 {
8938 	struct mgmt_ev_new_link_key ev;
8939 
8940 	memset(&ev, 0, sizeof(ev));
8941 
8942 	ev.store_hint = persistent;
8943 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8944 	ev.key.addr.type = BDADDR_BREDR;
8945 	ev.key.type = key->type;
8946 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
8947 	ev.key.pin_len = key->pin_len;
8948 
8949 	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
8950 }
8951 
8952 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
8953 {
8954 	switch (ltk->type) {
8955 	case SMP_LTK:
8956 	case SMP_LTK_RESPONDER:
8957 		if (ltk->authenticated)
8958 			return MGMT_LTK_AUTHENTICATED;
8959 		return MGMT_LTK_UNAUTHENTICATED;
8960 	case SMP_LTK_P256:
8961 		if (ltk->authenticated)
8962 			return MGMT_LTK_P256_AUTH;
8963 		return MGMT_LTK_P256_UNAUTH;
8964 	case SMP_LTK_P256_DEBUG:
8965 		return MGMT_LTK_P256_DEBUG;
8966 	}
8967 
8968 	return MGMT_LTK_UNAUTHENTICATED;
8969 }
8970 
8971 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
8972 {
8973 	struct mgmt_ev_new_long_term_key ev;
8974 
8975 	memset(&ev, 0, sizeof(ev));
8976 
8977 	/* Devices using resolvable or non-resolvable random addresses
8978 	 * without providing an identity resolving key don't require
8979 	 * to store long term keys. Their addresses will change the
8980 	 * next time around.
8981 	 *
8982 	 * Only when a remote device provides an identity address
8983 	 * make sure the long term key is stored. If the remote
8984 	 * identity is known, the long term keys are internally
8985 	 * mapped to the identity address. So allow static random
8986 	 * and public addresses here.
8987 	 */
8988 	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8989 	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
8990 		ev.store_hint = 0x00;
8991 	else
8992 		ev.store_hint = persistent;
8993 
8994 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8995 	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
8996 	ev.key.type = mgmt_ltk_type(key);
8997 	ev.key.enc_size = key->enc_size;
8998 	ev.key.ediv = key->ediv;
8999 	ev.key.rand = key->rand;
9000 
9001 	if (key->type == SMP_LTK)
9002 		ev.key.initiator = 1;
9003 
9004 	/* Make sure we copy only the significant bytes based on the
9005 	 * encryption key size, and set the rest of the value to zeroes.
9006 	 */
9007 	memcpy(ev.key.val, key->val, key->enc_size);
9008 	memset(ev.key.val + key->enc_size, 0,
9009 	       sizeof(ev.key.val) - key->enc_size);
9010 
9011 	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9012 }
9013 
9014 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9015 {
9016 	struct mgmt_ev_new_irk ev;
9017 
9018 	memset(&ev, 0, sizeof(ev));
9019 
9020 	ev.store_hint = persistent;
9021 
9022 	bacpy(&ev.rpa, &irk->rpa);
9023 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9024 	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9025 	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9026 
9027 	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9028 }
9029 
9030 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9031 		   bool persistent)
9032 {
9033 	struct mgmt_ev_new_csrk ev;
9034 
9035 	memset(&ev, 0, sizeof(ev));
9036 
9037 	/* Devices using resolvable or non-resolvable random addresses
9038 	 * without providing an identity resolving key don't require
9039 	 * to store signature resolving keys. Their addresses will change
9040 	 * the next time around.
9041 	 *
9042 	 * Only when a remote device provides an identity address
9043 	 * make sure the signature resolving key is stored. So allow
9044 	 * static random and public addresses here.
9045 	 */
9046 	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9047 	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9048 		ev.store_hint = 0x00;
9049 	else
9050 		ev.store_hint = persistent;
9051 
9052 	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9053 	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9054 	ev.key.type = csrk->type;
9055 	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9056 
9057 	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9058 }
9059 
9060 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9061 			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9062 			 u16 max_interval, u16 latency, u16 timeout)
9063 {
9064 	struct mgmt_ev_new_conn_param ev;
9065 
9066 	if (!hci_is_identity_address(bdaddr, bdaddr_type))
9067 		return;
9068 
9069 	memset(&ev, 0, sizeof(ev));
9070 	bacpy(&ev.addr.bdaddr, bdaddr);
9071 	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9072 	ev.store_hint = store_hint;
9073 	ev.min_interval = cpu_to_le16(min_interval);
9074 	ev.max_interval = cpu_to_le16(max_interval);
9075 	ev.latency = cpu_to_le16(latency);
9076 	ev.timeout = cpu_to_le16(timeout);
9077 
9078 	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9079 }
9080 
9081 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9082 			   u8 *name, u8 name_len)
9083 {
9084 	struct sk_buff *skb;
9085 	struct mgmt_ev_device_connected *ev;
9086 	u16 eir_len = 0;
9087 	u32 flags = 0;
9088 
9089 	if (conn->le_adv_data_len > 0)
9090 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9091 				     conn->le_adv_data_len);
9092 	else
9093 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9094 				     2 + name_len + 5);
9095 
9096 	ev = skb_put(skb, sizeof(*ev));
9097 	bacpy(&ev->addr.bdaddr, &conn->dst);
9098 	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9099 
9100 	if (conn->out)
9101 		flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9102 
9103 	ev->flags = __cpu_to_le32(flags);
9104 
9105 	/* We must ensure that the EIR Data fields are ordered and
9106 	 * unique. Keep it simple for now and avoid the problem by not
9107 	 * adding any BR/EDR data to the LE adv.
9108 	 */
9109 	if (conn->le_adv_data_len > 0) {
9110 		skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9111 		eir_len = conn->le_adv_data_len;
9112 	} else {
9113 		if (name_len > 0) {
9114 			eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
9115 						  name, name_len);
9116 			skb_put(skb, eir_len);
9117 		}
9118 
9119 		if (memcmp(conn->dev_class, "\0\0\0", 3) != 0) {
9120 			eir_len = eir_append_data(ev->eir, eir_len,
9121 						  EIR_CLASS_OF_DEV,
9122 						  conn->dev_class, 3);
9123 			skb_put(skb, 5);
9124 		}
9125 	}
9126 
9127 	ev->eir_len = cpu_to_le16(eir_len);
9128 
9129 	mgmt_event_skb(skb, NULL);
9130 }
9131 
9132 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
9133 {
9134 	struct sock **sk = data;
9135 
9136 	cmd->cmd_complete(cmd, 0);
9137 
9138 	*sk = cmd->sk;
9139 	sock_hold(*sk);
9140 
9141 	mgmt_pending_remove(cmd);
9142 }
9143 
9144 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9145 {
9146 	struct hci_dev *hdev = data;
9147 	struct mgmt_cp_unpair_device *cp = cmd->param;
9148 
9149 	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9150 
9151 	cmd->cmd_complete(cmd, 0);
9152 	mgmt_pending_remove(cmd);
9153 }
9154 
9155 bool mgmt_powering_down(struct hci_dev *hdev)
9156 {
9157 	struct mgmt_pending_cmd *cmd;
9158 	struct mgmt_mode *cp;
9159 
9160 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9161 	if (!cmd)
9162 		return false;
9163 
9164 	cp = cmd->param;
9165 	if (!cp->val)
9166 		return true;
9167 
9168 	return false;
9169 }
9170 
9171 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9172 			      u8 link_type, u8 addr_type, u8 reason,
9173 			      bool mgmt_connected)
9174 {
9175 	struct mgmt_ev_device_disconnected ev;
9176 	struct sock *sk = NULL;
9177 
9178 	/* The connection is still in hci_conn_hash so test for 1
9179 	 * instead of 0 to know if this is the last one.
9180 	 */
9181 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9182 		cancel_delayed_work(&hdev->power_off);
9183 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
9184 	}
9185 
9186 	if (!mgmt_connected)
9187 		return;
9188 
9189 	if (link_type != ACL_LINK && link_type != LE_LINK)
9190 		return;
9191 
9192 	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
9193 
9194 	bacpy(&ev.addr.bdaddr, bdaddr);
9195 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9196 	ev.reason = reason;
9197 
9198 	/* Report disconnects due to suspend */
9199 	if (hdev->suspended)
9200 		ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9201 
9202 	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9203 
9204 	if (sk)
9205 		sock_put(sk);
9206 
9207 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9208 			     hdev);
9209 }
9210 
9211 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9212 			    u8 link_type, u8 addr_type, u8 status)
9213 {
9214 	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9215 	struct mgmt_cp_disconnect *cp;
9216 	struct mgmt_pending_cmd *cmd;
9217 
9218 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9219 			     hdev);
9220 
9221 	cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9222 	if (!cmd)
9223 		return;
9224 
9225 	cp = cmd->param;
9226 
9227 	if (bacmp(bdaddr, &cp->addr.bdaddr))
9228 		return;
9229 
9230 	if (cp->addr.type != bdaddr_type)
9231 		return;
9232 
9233 	cmd->cmd_complete(cmd, mgmt_status(status));
9234 	mgmt_pending_remove(cmd);
9235 }
9236 
9237 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9238 			 u8 addr_type, u8 status)
9239 {
9240 	struct mgmt_ev_connect_failed ev;
9241 
9242 	/* The connection is still in hci_conn_hash so test for 1
9243 	 * instead of 0 to know if this is the last one.
9244 	 */
9245 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9246 		cancel_delayed_work(&hdev->power_off);
9247 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
9248 	}
9249 
9250 	bacpy(&ev.addr.bdaddr, bdaddr);
9251 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9252 	ev.status = mgmt_status(status);
9253 
9254 	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9255 }
9256 
9257 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9258 {
9259 	struct mgmt_ev_pin_code_request ev;
9260 
9261 	bacpy(&ev.addr.bdaddr, bdaddr);
9262 	ev.addr.type = BDADDR_BREDR;
9263 	ev.secure = secure;
9264 
9265 	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9266 }
9267 
9268 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9269 				  u8 status)
9270 {
9271 	struct mgmt_pending_cmd *cmd;
9272 
9273 	cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9274 	if (!cmd)
9275 		return;
9276 
9277 	cmd->cmd_complete(cmd, mgmt_status(status));
9278 	mgmt_pending_remove(cmd);
9279 }
9280 
9281 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9282 				      u8 status)
9283 {
9284 	struct mgmt_pending_cmd *cmd;
9285 
9286 	cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9287 	if (!cmd)
9288 		return;
9289 
9290 	cmd->cmd_complete(cmd, mgmt_status(status));
9291 	mgmt_pending_remove(cmd);
9292 }
9293 
9294 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9295 			      u8 link_type, u8 addr_type, u32 value,
9296 			      u8 confirm_hint)
9297 {
9298 	struct mgmt_ev_user_confirm_request ev;
9299 
9300 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9301 
9302 	bacpy(&ev.addr.bdaddr, bdaddr);
9303 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9304 	ev.confirm_hint = confirm_hint;
9305 	ev.value = cpu_to_le32(value);
9306 
9307 	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9308 			  NULL);
9309 }
9310 
9311 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9312 			      u8 link_type, u8 addr_type)
9313 {
9314 	struct mgmt_ev_user_passkey_request ev;
9315 
9316 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9317 
9318 	bacpy(&ev.addr.bdaddr, bdaddr);
9319 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9320 
9321 	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9322 			  NULL);
9323 }
9324 
9325 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9326 				      u8 link_type, u8 addr_type, u8 status,
9327 				      u8 opcode)
9328 {
9329 	struct mgmt_pending_cmd *cmd;
9330 
9331 	cmd = pending_find(opcode, hdev);
9332 	if (!cmd)
9333 		return -ENOENT;
9334 
9335 	cmd->cmd_complete(cmd, mgmt_status(status));
9336 	mgmt_pending_remove(cmd);
9337 
9338 	return 0;
9339 }
9340 
9341 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9342 				     u8 link_type, u8 addr_type, u8 status)
9343 {
9344 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9345 					  status, MGMT_OP_USER_CONFIRM_REPLY);
9346 }
9347 
9348 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9349 					 u8 link_type, u8 addr_type, u8 status)
9350 {
9351 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9352 					  status,
9353 					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
9354 }
9355 
9356 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9357 				     u8 link_type, u8 addr_type, u8 status)
9358 {
9359 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9360 					  status, MGMT_OP_USER_PASSKEY_REPLY);
9361 }
9362 
9363 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9364 					 u8 link_type, u8 addr_type, u8 status)
9365 {
9366 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9367 					  status,
9368 					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
9369 }
9370 
9371 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9372 			     u8 link_type, u8 addr_type, u32 passkey,
9373 			     u8 entered)
9374 {
9375 	struct mgmt_ev_passkey_notify ev;
9376 
9377 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9378 
9379 	bacpy(&ev.addr.bdaddr, bdaddr);
9380 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9381 	ev.passkey = __cpu_to_le32(passkey);
9382 	ev.entered = entered;
9383 
9384 	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9385 }
9386 
9387 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9388 {
9389 	struct mgmt_ev_auth_failed ev;
9390 	struct mgmt_pending_cmd *cmd;
9391 	u8 status = mgmt_status(hci_status);
9392 
9393 	bacpy(&ev.addr.bdaddr, &conn->dst);
9394 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9395 	ev.status = status;
9396 
9397 	cmd = find_pairing(conn);
9398 
9399 	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9400 		    cmd ? cmd->sk : NULL);
9401 
9402 	if (cmd) {
9403 		cmd->cmd_complete(cmd, status);
9404 		mgmt_pending_remove(cmd);
9405 	}
9406 }
9407 
9408 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9409 {
9410 	struct cmd_lookup match = { NULL, hdev };
9411 	bool changed;
9412 
9413 	if (status) {
9414 		u8 mgmt_err = mgmt_status(status);
9415 		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9416 				     cmd_status_rsp, &mgmt_err);
9417 		return;
9418 	}
9419 
9420 	if (test_bit(HCI_AUTH, &hdev->flags))
9421 		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9422 	else
9423 		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9424 
9425 	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9426 			     &match);
9427 
9428 	if (changed)
9429 		new_settings(hdev, match.sk);
9430 
9431 	if (match.sk)
9432 		sock_put(match.sk);
9433 }
9434 
9435 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9436 {
9437 	struct cmd_lookup *match = data;
9438 
9439 	if (match->sk == NULL) {
9440 		match->sk = cmd->sk;
9441 		sock_hold(match->sk);
9442 	}
9443 }
9444 
9445 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9446 				    u8 status)
9447 {
9448 	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9449 
9450 	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
9451 	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
9452 	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
9453 
9454 	if (!status) {
9455 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9456 				   3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9457 		ext_info_changed(hdev, NULL);
9458 	}
9459 
9460 	if (match.sk)
9461 		sock_put(match.sk);
9462 }
9463 
9464 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
9465 {
9466 	struct mgmt_cp_set_local_name ev;
9467 	struct mgmt_pending_cmd *cmd;
9468 
9469 	if (status)
9470 		return;
9471 
9472 	memset(&ev, 0, sizeof(ev));
9473 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
9474 	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
9475 
9476 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
9477 	if (!cmd) {
9478 		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
9479 
9480 		/* If this is a HCI command related to powering on the
9481 		 * HCI dev don't send any mgmt signals.
9482 		 */
9483 		if (pending_find(MGMT_OP_SET_POWERED, hdev))
9484 			return;
9485 	}
9486 
9487 	mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
9488 			   HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
9489 	ext_info_changed(hdev, cmd ? cmd->sk : NULL);
9490 }
9491 
9492 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
9493 {
9494 	int i;
9495 
9496 	for (i = 0; i < uuid_count; i++) {
9497 		if (!memcmp(uuid, uuids[i], 16))
9498 			return true;
9499 	}
9500 
9501 	return false;
9502 }
9503 
9504 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
9505 {
9506 	u16 parsed = 0;
9507 
9508 	while (parsed < eir_len) {
9509 		u8 field_len = eir[0];
9510 		u8 uuid[16];
9511 		int i;
9512 
9513 		if (field_len == 0)
9514 			break;
9515 
9516 		if (eir_len - parsed < field_len + 1)
9517 			break;
9518 
9519 		switch (eir[1]) {
9520 		case EIR_UUID16_ALL:
9521 		case EIR_UUID16_SOME:
9522 			for (i = 0; i + 3 <= field_len; i += 2) {
9523 				memcpy(uuid, bluetooth_base_uuid, 16);
9524 				uuid[13] = eir[i + 3];
9525 				uuid[12] = eir[i + 2];
9526 				if (has_uuid(uuid, uuid_count, uuids))
9527 					return true;
9528 			}
9529 			break;
9530 		case EIR_UUID32_ALL:
9531 		case EIR_UUID32_SOME:
9532 			for (i = 0; i + 5 <= field_len; i += 4) {
9533 				memcpy(uuid, bluetooth_base_uuid, 16);
9534 				uuid[15] = eir[i + 5];
9535 				uuid[14] = eir[i + 4];
9536 				uuid[13] = eir[i + 3];
9537 				uuid[12] = eir[i + 2];
9538 				if (has_uuid(uuid, uuid_count, uuids))
9539 					return true;
9540 			}
9541 			break;
9542 		case EIR_UUID128_ALL:
9543 		case EIR_UUID128_SOME:
9544 			for (i = 0; i + 17 <= field_len; i += 16) {
9545 				memcpy(uuid, eir + i + 2, 16);
9546 				if (has_uuid(uuid, uuid_count, uuids))
9547 					return true;
9548 			}
9549 			break;
9550 		}
9551 
9552 		parsed += field_len + 1;
9553 		eir += field_len + 1;
9554 	}
9555 
9556 	return false;
9557 }
9558 
9559 static void restart_le_scan(struct hci_dev *hdev)
9560 {
9561 	/* If controller is not scanning we are done. */
9562 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
9563 		return;
9564 
9565 	if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
9566 		       hdev->discovery.scan_start +
9567 		       hdev->discovery.scan_duration))
9568 		return;
9569 
9570 	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
9571 			   DISCOV_LE_RESTART_DELAY);
9572 }
9573 
9574 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
9575 			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9576 {
9577 	/* If a RSSI threshold has been specified, and
9578 	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
9579 	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
9580 	 * is set, let it through for further processing, as we might need to
9581 	 * restart the scan.
9582 	 *
9583 	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
9584 	 * the results are also dropped.
9585 	 */
9586 	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9587 	    (rssi == HCI_RSSI_INVALID ||
9588 	    (rssi < hdev->discovery.rssi &&
9589 	     !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
9590 		return  false;
9591 
9592 	if (hdev->discovery.uuid_count != 0) {
9593 		/* If a list of UUIDs is provided in filter, results with no
9594 		 * matching UUID should be dropped.
9595 		 */
9596 		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
9597 				   hdev->discovery.uuids) &&
9598 		    !eir_has_uuids(scan_rsp, scan_rsp_len,
9599 				   hdev->discovery.uuid_count,
9600 				   hdev->discovery.uuids))
9601 			return false;
9602 	}
9603 
9604 	/* If duplicate filtering does not report RSSI changes, then restart
9605 	 * scanning to ensure updated result with updated RSSI values.
9606 	 */
9607 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
9608 		restart_le_scan(hdev);
9609 
9610 		/* Validate RSSI value against the RSSI threshold once more. */
9611 		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9612 		    rssi < hdev->discovery.rssi)
9613 			return false;
9614 	}
9615 
9616 	return true;
9617 }
9618 
9619 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9620 		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
9621 		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9622 {
9623 	struct sk_buff *skb;
9624 	struct mgmt_ev_device_found *ev;
9625 
9626 	/* Don't send events for a non-kernel initiated discovery. With
9627 	 * LE one exception is if we have pend_le_reports > 0 in which
9628 	 * case we're doing passive scanning and want these events.
9629 	 */
9630 	if (!hci_discovery_active(hdev)) {
9631 		if (link_type == ACL_LINK)
9632 			return;
9633 		if (link_type == LE_LINK &&
9634 		    list_empty(&hdev->pend_le_reports) &&
9635 		    !hci_is_adv_monitoring(hdev)) {
9636 			return;
9637 		}
9638 	}
9639 
9640 	if (hdev->discovery.result_filtering) {
9641 		/* We are using service discovery */
9642 		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
9643 				     scan_rsp_len))
9644 			return;
9645 	}
9646 
9647 	if (hdev->discovery.limited) {
9648 		/* Check for limited discoverable bit */
9649 		if (dev_class) {
9650 			if (!(dev_class[1] & 0x20))
9651 				return;
9652 		} else {
9653 			u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
9654 			if (!flags || !(flags[0] & LE_AD_LIMITED))
9655 				return;
9656 		}
9657 	}
9658 
9659 	/* Allocate skb. The 5 extra bytes are for the potential CoD field */
9660 	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
9661 			     sizeof(*ev) + eir_len + scan_rsp_len + 5);
9662 	if (!skb)
9663 		return;
9664 
9665 	ev = skb_put(skb, sizeof(*ev));
9666 
9667 	/* In case of device discovery with BR/EDR devices (pre 1.2), the
9668 	 * RSSI value was reported as 0 when not available. This behavior
9669 	 * is kept when using device discovery. This is required for full
9670 	 * backwards compatibility with the API.
9671 	 *
9672 	 * However when using service discovery, the value 127 will be
9673 	 * returned when the RSSI is not available.
9674 	 */
9675 	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
9676 	    link_type == ACL_LINK)
9677 		rssi = 0;
9678 
9679 	bacpy(&ev->addr.bdaddr, bdaddr);
9680 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
9681 	ev->rssi = rssi;
9682 	ev->flags = cpu_to_le32(flags);
9683 
9684 	if (eir_len > 0)
9685 		/* Copy EIR or advertising data into event */
9686 		skb_put_data(skb, eir, eir_len);
9687 
9688 	if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
9689 		u8 eir_cod[5];
9690 
9691 		eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
9692 					   dev_class, 3);
9693 		skb_put_data(skb, eir_cod, sizeof(eir_cod));
9694 	}
9695 
9696 	if (scan_rsp_len > 0)
9697 		/* Append scan response data to event */
9698 		skb_put_data(skb, scan_rsp, scan_rsp_len);
9699 
9700 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
9701 
9702 	mgmt_event_skb(skb, NULL);
9703 }
9704 
9705 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9706 		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
9707 {
9708 	struct sk_buff *skb;
9709 	struct mgmt_ev_device_found *ev;
9710 	u16 eir_len;
9711 	u32 flags;
9712 
9713 	if (name_len)
9714 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND, 2 + name_len);
9715 	else
9716 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND, 0);
9717 
9718 	ev = skb_put(skb, sizeof(*ev));
9719 	bacpy(&ev->addr.bdaddr, bdaddr);
9720 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
9721 	ev->rssi = rssi;
9722 
9723 	if (name) {
9724 		eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
9725 					  name_len);
9726 		flags = 0;
9727 		skb_put(skb, eir_len);
9728 	} else {
9729 		eir_len = 0;
9730 		flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
9731 	}
9732 
9733 	ev->eir_len = cpu_to_le16(eir_len);
9734 	ev->flags = cpu_to_le32(flags);
9735 
9736 	mgmt_event_skb(skb, NULL);
9737 }
9738 
9739 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
9740 {
9741 	struct mgmt_ev_discovering ev;
9742 
9743 	bt_dev_dbg(hdev, "discovering %u", discovering);
9744 
9745 	memset(&ev, 0, sizeof(ev));
9746 	ev.type = hdev->discovery.type;
9747 	ev.discovering = discovering;
9748 
9749 	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
9750 }
9751 
9752 void mgmt_suspending(struct hci_dev *hdev, u8 state)
9753 {
9754 	struct mgmt_ev_controller_suspend ev;
9755 
9756 	ev.suspend_state = state;
9757 	mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
9758 }
9759 
9760 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
9761 		   u8 addr_type)
9762 {
9763 	struct mgmt_ev_controller_resume ev;
9764 
9765 	ev.wake_reason = reason;
9766 	if (bdaddr) {
9767 		bacpy(&ev.addr.bdaddr, bdaddr);
9768 		ev.addr.type = addr_type;
9769 	} else {
9770 		memset(&ev.addr, 0, sizeof(ev.addr));
9771 	}
9772 
9773 	mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
9774 }
9775 
9776 static struct hci_mgmt_chan chan = {
9777 	.channel	= HCI_CHANNEL_CONTROL,
9778 	.handler_count	= ARRAY_SIZE(mgmt_handlers),
9779 	.handlers	= mgmt_handlers,
9780 	.hdev_init	= mgmt_init_hdev,
9781 };
9782 
9783 int mgmt_init(void)
9784 {
9785 	return hci_mgmt_chan_register(&chan);
9786 }
9787 
9788 void mgmt_exit(void)
9789 {
9790 	hci_mgmt_chan_unregister(&chan);
9791 }
9792