xref: /openbmc/linux/net/bluetooth/mgmt.c (revision f71a261a)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI Management interface */
26 
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35 
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
40 #include "msft.h"
41 #include "eir.h"
42 #include "aosp.h"
43 
44 #define MGMT_VERSION	1
45 #define MGMT_REVISION	22
46 
47 static const u16 mgmt_commands[] = {
48 	MGMT_OP_READ_INDEX_LIST,
49 	MGMT_OP_READ_INFO,
50 	MGMT_OP_SET_POWERED,
51 	MGMT_OP_SET_DISCOVERABLE,
52 	MGMT_OP_SET_CONNECTABLE,
53 	MGMT_OP_SET_FAST_CONNECTABLE,
54 	MGMT_OP_SET_BONDABLE,
55 	MGMT_OP_SET_LINK_SECURITY,
56 	MGMT_OP_SET_SSP,
57 	MGMT_OP_SET_HS,
58 	MGMT_OP_SET_LE,
59 	MGMT_OP_SET_DEV_CLASS,
60 	MGMT_OP_SET_LOCAL_NAME,
61 	MGMT_OP_ADD_UUID,
62 	MGMT_OP_REMOVE_UUID,
63 	MGMT_OP_LOAD_LINK_KEYS,
64 	MGMT_OP_LOAD_LONG_TERM_KEYS,
65 	MGMT_OP_DISCONNECT,
66 	MGMT_OP_GET_CONNECTIONS,
67 	MGMT_OP_PIN_CODE_REPLY,
68 	MGMT_OP_PIN_CODE_NEG_REPLY,
69 	MGMT_OP_SET_IO_CAPABILITY,
70 	MGMT_OP_PAIR_DEVICE,
71 	MGMT_OP_CANCEL_PAIR_DEVICE,
72 	MGMT_OP_UNPAIR_DEVICE,
73 	MGMT_OP_USER_CONFIRM_REPLY,
74 	MGMT_OP_USER_CONFIRM_NEG_REPLY,
75 	MGMT_OP_USER_PASSKEY_REPLY,
76 	MGMT_OP_USER_PASSKEY_NEG_REPLY,
77 	MGMT_OP_READ_LOCAL_OOB_DATA,
78 	MGMT_OP_ADD_REMOTE_OOB_DATA,
79 	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
80 	MGMT_OP_START_DISCOVERY,
81 	MGMT_OP_STOP_DISCOVERY,
82 	MGMT_OP_CONFIRM_NAME,
83 	MGMT_OP_BLOCK_DEVICE,
84 	MGMT_OP_UNBLOCK_DEVICE,
85 	MGMT_OP_SET_DEVICE_ID,
86 	MGMT_OP_SET_ADVERTISING,
87 	MGMT_OP_SET_BREDR,
88 	MGMT_OP_SET_STATIC_ADDRESS,
89 	MGMT_OP_SET_SCAN_PARAMS,
90 	MGMT_OP_SET_SECURE_CONN,
91 	MGMT_OP_SET_DEBUG_KEYS,
92 	MGMT_OP_SET_PRIVACY,
93 	MGMT_OP_LOAD_IRKS,
94 	MGMT_OP_GET_CONN_INFO,
95 	MGMT_OP_GET_CLOCK_INFO,
96 	MGMT_OP_ADD_DEVICE,
97 	MGMT_OP_REMOVE_DEVICE,
98 	MGMT_OP_LOAD_CONN_PARAM,
99 	MGMT_OP_READ_UNCONF_INDEX_LIST,
100 	MGMT_OP_READ_CONFIG_INFO,
101 	MGMT_OP_SET_EXTERNAL_CONFIG,
102 	MGMT_OP_SET_PUBLIC_ADDRESS,
103 	MGMT_OP_START_SERVICE_DISCOVERY,
104 	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
105 	MGMT_OP_READ_EXT_INDEX_LIST,
106 	MGMT_OP_READ_ADV_FEATURES,
107 	MGMT_OP_ADD_ADVERTISING,
108 	MGMT_OP_REMOVE_ADVERTISING,
109 	MGMT_OP_GET_ADV_SIZE_INFO,
110 	MGMT_OP_START_LIMITED_DISCOVERY,
111 	MGMT_OP_READ_EXT_INFO,
112 	MGMT_OP_SET_APPEARANCE,
113 	MGMT_OP_GET_PHY_CONFIGURATION,
114 	MGMT_OP_SET_PHY_CONFIGURATION,
115 	MGMT_OP_SET_BLOCKED_KEYS,
116 	MGMT_OP_SET_WIDEBAND_SPEECH,
117 	MGMT_OP_READ_CONTROLLER_CAP,
118 	MGMT_OP_READ_EXP_FEATURES_INFO,
119 	MGMT_OP_SET_EXP_FEATURE,
120 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
121 	MGMT_OP_SET_DEF_SYSTEM_CONFIG,
122 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
123 	MGMT_OP_SET_DEF_RUNTIME_CONFIG,
124 	MGMT_OP_GET_DEVICE_FLAGS,
125 	MGMT_OP_SET_DEVICE_FLAGS,
126 	MGMT_OP_READ_ADV_MONITOR_FEATURES,
127 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
128 	MGMT_OP_REMOVE_ADV_MONITOR,
129 	MGMT_OP_ADD_EXT_ADV_PARAMS,
130 	MGMT_OP_ADD_EXT_ADV_DATA,
131 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
132 };
133 
134 static const u16 mgmt_events[] = {
135 	MGMT_EV_CONTROLLER_ERROR,
136 	MGMT_EV_INDEX_ADDED,
137 	MGMT_EV_INDEX_REMOVED,
138 	MGMT_EV_NEW_SETTINGS,
139 	MGMT_EV_CLASS_OF_DEV_CHANGED,
140 	MGMT_EV_LOCAL_NAME_CHANGED,
141 	MGMT_EV_NEW_LINK_KEY,
142 	MGMT_EV_NEW_LONG_TERM_KEY,
143 	MGMT_EV_DEVICE_CONNECTED,
144 	MGMT_EV_DEVICE_DISCONNECTED,
145 	MGMT_EV_CONNECT_FAILED,
146 	MGMT_EV_PIN_CODE_REQUEST,
147 	MGMT_EV_USER_CONFIRM_REQUEST,
148 	MGMT_EV_USER_PASSKEY_REQUEST,
149 	MGMT_EV_AUTH_FAILED,
150 	MGMT_EV_DEVICE_FOUND,
151 	MGMT_EV_DISCOVERING,
152 	MGMT_EV_DEVICE_BLOCKED,
153 	MGMT_EV_DEVICE_UNBLOCKED,
154 	MGMT_EV_DEVICE_UNPAIRED,
155 	MGMT_EV_PASSKEY_NOTIFY,
156 	MGMT_EV_NEW_IRK,
157 	MGMT_EV_NEW_CSRK,
158 	MGMT_EV_DEVICE_ADDED,
159 	MGMT_EV_DEVICE_REMOVED,
160 	MGMT_EV_NEW_CONN_PARAM,
161 	MGMT_EV_UNCONF_INDEX_ADDED,
162 	MGMT_EV_UNCONF_INDEX_REMOVED,
163 	MGMT_EV_NEW_CONFIG_OPTIONS,
164 	MGMT_EV_EXT_INDEX_ADDED,
165 	MGMT_EV_EXT_INDEX_REMOVED,
166 	MGMT_EV_LOCAL_OOB_DATA_UPDATED,
167 	MGMT_EV_ADVERTISING_ADDED,
168 	MGMT_EV_ADVERTISING_REMOVED,
169 	MGMT_EV_EXT_INFO_CHANGED,
170 	MGMT_EV_PHY_CONFIGURATION_CHANGED,
171 	MGMT_EV_EXP_FEATURE_CHANGED,
172 	MGMT_EV_DEVICE_FLAGS_CHANGED,
173 	MGMT_EV_ADV_MONITOR_ADDED,
174 	MGMT_EV_ADV_MONITOR_REMOVED,
175 	MGMT_EV_CONTROLLER_SUSPEND,
176 	MGMT_EV_CONTROLLER_RESUME,
177 	MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
178 	MGMT_EV_ADV_MONITOR_DEVICE_LOST,
179 };
180 
181 static const u16 mgmt_untrusted_commands[] = {
182 	MGMT_OP_READ_INDEX_LIST,
183 	MGMT_OP_READ_INFO,
184 	MGMT_OP_READ_UNCONF_INDEX_LIST,
185 	MGMT_OP_READ_CONFIG_INFO,
186 	MGMT_OP_READ_EXT_INDEX_LIST,
187 	MGMT_OP_READ_EXT_INFO,
188 	MGMT_OP_READ_CONTROLLER_CAP,
189 	MGMT_OP_READ_EXP_FEATURES_INFO,
190 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
191 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
192 };
193 
194 static const u16 mgmt_untrusted_events[] = {
195 	MGMT_EV_INDEX_ADDED,
196 	MGMT_EV_INDEX_REMOVED,
197 	MGMT_EV_NEW_SETTINGS,
198 	MGMT_EV_CLASS_OF_DEV_CHANGED,
199 	MGMT_EV_LOCAL_NAME_CHANGED,
200 	MGMT_EV_UNCONF_INDEX_ADDED,
201 	MGMT_EV_UNCONF_INDEX_REMOVED,
202 	MGMT_EV_NEW_CONFIG_OPTIONS,
203 	MGMT_EV_EXT_INDEX_ADDED,
204 	MGMT_EV_EXT_INDEX_REMOVED,
205 	MGMT_EV_EXT_INFO_CHANGED,
206 	MGMT_EV_EXP_FEATURE_CHANGED,
207 };
208 
209 #define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
210 
211 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
212 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
213 
214 /* HCI to MGMT error code conversion table */
215 static const u8 mgmt_status_table[] = {
216 	MGMT_STATUS_SUCCESS,
217 	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
218 	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
219 	MGMT_STATUS_FAILED,		/* Hardware Failure */
220 	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
221 	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
222 	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
223 	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
224 	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
225 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
226 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
227 	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
228 	MGMT_STATUS_BUSY,		/* Command Disallowed */
229 	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
230 	MGMT_STATUS_REJECTED,		/* Rejected Security */
231 	MGMT_STATUS_REJECTED,		/* Rejected Personal */
232 	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
233 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
234 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
235 	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
236 	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
237 	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
238 	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
239 	MGMT_STATUS_BUSY,		/* Repeated Attempts */
240 	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
241 	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
242 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
243 	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
244 	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
245 	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
246 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
247 	MGMT_STATUS_FAILED,		/* Unspecified Error */
248 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
249 	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
250 	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
251 	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
252 	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
253 	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
254 	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
255 	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
256 	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
257 	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
258 	MGMT_STATUS_FAILED,		/* Transaction Collision */
259 	MGMT_STATUS_FAILED,		/* Reserved for future use */
260 	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
261 	MGMT_STATUS_REJECTED,		/* QoS Rejected */
262 	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
263 	MGMT_STATUS_REJECTED,		/* Insufficient Security */
264 	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
265 	MGMT_STATUS_FAILED,		/* Reserved for future use */
266 	MGMT_STATUS_BUSY,		/* Role Switch Pending */
267 	MGMT_STATUS_FAILED,		/* Reserved for future use */
268 	MGMT_STATUS_FAILED,		/* Slot Violation */
269 	MGMT_STATUS_FAILED,		/* Role Switch Failed */
270 	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
271 	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
272 	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
273 	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
274 	MGMT_STATUS_BUSY,		/* Controller Busy */
275 	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
276 	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
277 	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
278 	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
279 	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
280 };
281 
282 static u8 mgmt_errno_status(int err)
283 {
284 	switch (err) {
285 	case 0:
286 		return MGMT_STATUS_SUCCESS;
287 	case -EPERM:
288 		return MGMT_STATUS_REJECTED;
289 	case -EINVAL:
290 		return MGMT_STATUS_INVALID_PARAMS;
291 	case -EOPNOTSUPP:
292 		return MGMT_STATUS_NOT_SUPPORTED;
293 	case -EBUSY:
294 		return MGMT_STATUS_BUSY;
295 	case -ETIMEDOUT:
296 		return MGMT_STATUS_AUTH_FAILED;
297 	case -ENOMEM:
298 		return MGMT_STATUS_NO_RESOURCES;
299 	case -EISCONN:
300 		return MGMT_STATUS_ALREADY_CONNECTED;
301 	case -ENOTCONN:
302 		return MGMT_STATUS_DISCONNECTED;
303 	}
304 
305 	return MGMT_STATUS_FAILED;
306 }
307 
308 static u8 mgmt_status(int err)
309 {
310 	if (err < 0)
311 		return mgmt_errno_status(err);
312 
313 	if (err < ARRAY_SIZE(mgmt_status_table))
314 		return mgmt_status_table[err];
315 
316 	return MGMT_STATUS_FAILED;
317 }
318 
319 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
320 			    u16 len, int flag)
321 {
322 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
323 			       flag, NULL);
324 }
325 
326 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
327 			      u16 len, int flag, struct sock *skip_sk)
328 {
329 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
330 			       flag, skip_sk);
331 }
332 
333 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
334 		      struct sock *skip_sk)
335 {
336 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
337 			       HCI_SOCK_TRUSTED, skip_sk);
338 }
339 
340 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
341 {
342 	return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
343 				   skip_sk);
344 }
345 
346 static u8 le_addr_type(u8 mgmt_addr_type)
347 {
348 	if (mgmt_addr_type == BDADDR_LE_PUBLIC)
349 		return ADDR_LE_DEV_PUBLIC;
350 	else
351 		return ADDR_LE_DEV_RANDOM;
352 }
353 
354 void mgmt_fill_version_info(void *ver)
355 {
356 	struct mgmt_rp_read_version *rp = ver;
357 
358 	rp->version = MGMT_VERSION;
359 	rp->revision = cpu_to_le16(MGMT_REVISION);
360 }
361 
362 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
363 			u16 data_len)
364 {
365 	struct mgmt_rp_read_version rp;
366 
367 	bt_dev_dbg(hdev, "sock %p", sk);
368 
369 	mgmt_fill_version_info(&rp);
370 
371 	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
372 				 &rp, sizeof(rp));
373 }
374 
375 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
376 			 u16 data_len)
377 {
378 	struct mgmt_rp_read_commands *rp;
379 	u16 num_commands, num_events;
380 	size_t rp_size;
381 	int i, err;
382 
383 	bt_dev_dbg(hdev, "sock %p", sk);
384 
385 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
386 		num_commands = ARRAY_SIZE(mgmt_commands);
387 		num_events = ARRAY_SIZE(mgmt_events);
388 	} else {
389 		num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
390 		num_events = ARRAY_SIZE(mgmt_untrusted_events);
391 	}
392 
393 	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
394 
395 	rp = kmalloc(rp_size, GFP_KERNEL);
396 	if (!rp)
397 		return -ENOMEM;
398 
399 	rp->num_commands = cpu_to_le16(num_commands);
400 	rp->num_events = cpu_to_le16(num_events);
401 
402 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
403 		__le16 *opcode = rp->opcodes;
404 
405 		for (i = 0; i < num_commands; i++, opcode++)
406 			put_unaligned_le16(mgmt_commands[i], opcode);
407 
408 		for (i = 0; i < num_events; i++, opcode++)
409 			put_unaligned_le16(mgmt_events[i], opcode);
410 	} else {
411 		__le16 *opcode = rp->opcodes;
412 
413 		for (i = 0; i < num_commands; i++, opcode++)
414 			put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
415 
416 		for (i = 0; i < num_events; i++, opcode++)
417 			put_unaligned_le16(mgmt_untrusted_events[i], opcode);
418 	}
419 
420 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
421 				rp, rp_size);
422 	kfree(rp);
423 
424 	return err;
425 }
426 
427 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
428 			   u16 data_len)
429 {
430 	struct mgmt_rp_read_index_list *rp;
431 	struct hci_dev *d;
432 	size_t rp_len;
433 	u16 count;
434 	int err;
435 
436 	bt_dev_dbg(hdev, "sock %p", sk);
437 
438 	read_lock(&hci_dev_list_lock);
439 
440 	count = 0;
441 	list_for_each_entry(d, &hci_dev_list, list) {
442 		if (d->dev_type == HCI_PRIMARY &&
443 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED))
444 			count++;
445 	}
446 
447 	rp_len = sizeof(*rp) + (2 * count);
448 	rp = kmalloc(rp_len, GFP_ATOMIC);
449 	if (!rp) {
450 		read_unlock(&hci_dev_list_lock);
451 		return -ENOMEM;
452 	}
453 
454 	count = 0;
455 	list_for_each_entry(d, &hci_dev_list, list) {
456 		if (hci_dev_test_flag(d, HCI_SETUP) ||
457 		    hci_dev_test_flag(d, HCI_CONFIG) ||
458 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
459 			continue;
460 
461 		/* Devices marked as raw-only are neither configured
462 		 * nor unconfigured controllers.
463 		 */
464 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
465 			continue;
466 
467 		if (d->dev_type == HCI_PRIMARY &&
468 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
469 			rp->index[count++] = cpu_to_le16(d->id);
470 			bt_dev_dbg(hdev, "Added hci%u", d->id);
471 		}
472 	}
473 
474 	rp->num_controllers = cpu_to_le16(count);
475 	rp_len = sizeof(*rp) + (2 * count);
476 
477 	read_unlock(&hci_dev_list_lock);
478 
479 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
480 				0, rp, rp_len);
481 
482 	kfree(rp);
483 
484 	return err;
485 }
486 
487 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
488 				  void *data, u16 data_len)
489 {
490 	struct mgmt_rp_read_unconf_index_list *rp;
491 	struct hci_dev *d;
492 	size_t rp_len;
493 	u16 count;
494 	int err;
495 
496 	bt_dev_dbg(hdev, "sock %p", sk);
497 
498 	read_lock(&hci_dev_list_lock);
499 
500 	count = 0;
501 	list_for_each_entry(d, &hci_dev_list, list) {
502 		if (d->dev_type == HCI_PRIMARY &&
503 		    hci_dev_test_flag(d, HCI_UNCONFIGURED))
504 			count++;
505 	}
506 
507 	rp_len = sizeof(*rp) + (2 * count);
508 	rp = kmalloc(rp_len, GFP_ATOMIC);
509 	if (!rp) {
510 		read_unlock(&hci_dev_list_lock);
511 		return -ENOMEM;
512 	}
513 
514 	count = 0;
515 	list_for_each_entry(d, &hci_dev_list, list) {
516 		if (hci_dev_test_flag(d, HCI_SETUP) ||
517 		    hci_dev_test_flag(d, HCI_CONFIG) ||
518 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
519 			continue;
520 
521 		/* Devices marked as raw-only are neither configured
522 		 * nor unconfigured controllers.
523 		 */
524 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
525 			continue;
526 
527 		if (d->dev_type == HCI_PRIMARY &&
528 		    hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
529 			rp->index[count++] = cpu_to_le16(d->id);
530 			bt_dev_dbg(hdev, "Added hci%u", d->id);
531 		}
532 	}
533 
534 	rp->num_controllers = cpu_to_le16(count);
535 	rp_len = sizeof(*rp) + (2 * count);
536 
537 	read_unlock(&hci_dev_list_lock);
538 
539 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
540 				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
541 
542 	kfree(rp);
543 
544 	return err;
545 }
546 
547 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
548 			       void *data, u16 data_len)
549 {
550 	struct mgmt_rp_read_ext_index_list *rp;
551 	struct hci_dev *d;
552 	u16 count;
553 	int err;
554 
555 	bt_dev_dbg(hdev, "sock %p", sk);
556 
557 	read_lock(&hci_dev_list_lock);
558 
559 	count = 0;
560 	list_for_each_entry(d, &hci_dev_list, list) {
561 		if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
562 			count++;
563 	}
564 
565 	rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
566 	if (!rp) {
567 		read_unlock(&hci_dev_list_lock);
568 		return -ENOMEM;
569 	}
570 
571 	count = 0;
572 	list_for_each_entry(d, &hci_dev_list, list) {
573 		if (hci_dev_test_flag(d, HCI_SETUP) ||
574 		    hci_dev_test_flag(d, HCI_CONFIG) ||
575 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
576 			continue;
577 
578 		/* Devices marked as raw-only are neither configured
579 		 * nor unconfigured controllers.
580 		 */
581 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
582 			continue;
583 
584 		if (d->dev_type == HCI_PRIMARY) {
585 			if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
586 				rp->entry[count].type = 0x01;
587 			else
588 				rp->entry[count].type = 0x00;
589 		} else if (d->dev_type == HCI_AMP) {
590 			rp->entry[count].type = 0x02;
591 		} else {
592 			continue;
593 		}
594 
595 		rp->entry[count].bus = d->bus;
596 		rp->entry[count++].index = cpu_to_le16(d->id);
597 		bt_dev_dbg(hdev, "Added hci%u", d->id);
598 	}
599 
600 	rp->num_controllers = cpu_to_le16(count);
601 
602 	read_unlock(&hci_dev_list_lock);
603 
604 	/* If this command is called at least once, then all the
605 	 * default index and unconfigured index events are disabled
606 	 * and from now on only extended index events are used.
607 	 */
608 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
609 	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
610 	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
611 
612 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
613 				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
614 				struct_size(rp, entry, count));
615 
616 	kfree(rp);
617 
618 	return err;
619 }
620 
621 static bool is_configured(struct hci_dev *hdev)
622 {
623 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
624 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
625 		return false;
626 
627 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
628 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
629 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
630 		return false;
631 
632 	return true;
633 }
634 
635 static __le32 get_missing_options(struct hci_dev *hdev)
636 {
637 	u32 options = 0;
638 
639 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
640 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
641 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
642 
643 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
644 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
645 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
646 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
647 
648 	return cpu_to_le32(options);
649 }
650 
651 static int new_options(struct hci_dev *hdev, struct sock *skip)
652 {
653 	__le32 options = get_missing_options(hdev);
654 
655 	return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
656 				  sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
657 }
658 
659 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
660 {
661 	__le32 options = get_missing_options(hdev);
662 
663 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
664 				 sizeof(options));
665 }
666 
667 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
668 			    void *data, u16 data_len)
669 {
670 	struct mgmt_rp_read_config_info rp;
671 	u32 options = 0;
672 
673 	bt_dev_dbg(hdev, "sock %p", sk);
674 
675 	hci_dev_lock(hdev);
676 
677 	memset(&rp, 0, sizeof(rp));
678 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
679 
680 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
681 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
682 
683 	if (hdev->set_bdaddr)
684 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
685 
686 	rp.supported_options = cpu_to_le32(options);
687 	rp.missing_options = get_missing_options(hdev);
688 
689 	hci_dev_unlock(hdev);
690 
691 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
692 				 &rp, sizeof(rp));
693 }
694 
695 static u32 get_supported_phys(struct hci_dev *hdev)
696 {
697 	u32 supported_phys = 0;
698 
699 	if (lmp_bredr_capable(hdev)) {
700 		supported_phys |= MGMT_PHY_BR_1M_1SLOT;
701 
702 		if (hdev->features[0][0] & LMP_3SLOT)
703 			supported_phys |= MGMT_PHY_BR_1M_3SLOT;
704 
705 		if (hdev->features[0][0] & LMP_5SLOT)
706 			supported_phys |= MGMT_PHY_BR_1M_5SLOT;
707 
708 		if (lmp_edr_2m_capable(hdev)) {
709 			supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
710 
711 			if (lmp_edr_3slot_capable(hdev))
712 				supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
713 
714 			if (lmp_edr_5slot_capable(hdev))
715 				supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
716 
717 			if (lmp_edr_3m_capable(hdev)) {
718 				supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
719 
720 				if (lmp_edr_3slot_capable(hdev))
721 					supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
722 
723 				if (lmp_edr_5slot_capable(hdev))
724 					supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
725 			}
726 		}
727 	}
728 
729 	if (lmp_le_capable(hdev)) {
730 		supported_phys |= MGMT_PHY_LE_1M_TX;
731 		supported_phys |= MGMT_PHY_LE_1M_RX;
732 
733 		if (hdev->le_features[1] & HCI_LE_PHY_2M) {
734 			supported_phys |= MGMT_PHY_LE_2M_TX;
735 			supported_phys |= MGMT_PHY_LE_2M_RX;
736 		}
737 
738 		if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
739 			supported_phys |= MGMT_PHY_LE_CODED_TX;
740 			supported_phys |= MGMT_PHY_LE_CODED_RX;
741 		}
742 	}
743 
744 	return supported_phys;
745 }
746 
747 static u32 get_selected_phys(struct hci_dev *hdev)
748 {
749 	u32 selected_phys = 0;
750 
751 	if (lmp_bredr_capable(hdev)) {
752 		selected_phys |= MGMT_PHY_BR_1M_1SLOT;
753 
754 		if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
755 			selected_phys |= MGMT_PHY_BR_1M_3SLOT;
756 
757 		if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
758 			selected_phys |= MGMT_PHY_BR_1M_5SLOT;
759 
760 		if (lmp_edr_2m_capable(hdev)) {
761 			if (!(hdev->pkt_type & HCI_2DH1))
762 				selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
763 
764 			if (lmp_edr_3slot_capable(hdev) &&
765 			    !(hdev->pkt_type & HCI_2DH3))
766 				selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
767 
768 			if (lmp_edr_5slot_capable(hdev) &&
769 			    !(hdev->pkt_type & HCI_2DH5))
770 				selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
771 
772 			if (lmp_edr_3m_capable(hdev)) {
773 				if (!(hdev->pkt_type & HCI_3DH1))
774 					selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
775 
776 				if (lmp_edr_3slot_capable(hdev) &&
777 				    !(hdev->pkt_type & HCI_3DH3))
778 					selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
779 
780 				if (lmp_edr_5slot_capable(hdev) &&
781 				    !(hdev->pkt_type & HCI_3DH5))
782 					selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
783 			}
784 		}
785 	}
786 
787 	if (lmp_le_capable(hdev)) {
788 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
789 			selected_phys |= MGMT_PHY_LE_1M_TX;
790 
791 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
792 			selected_phys |= MGMT_PHY_LE_1M_RX;
793 
794 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
795 			selected_phys |= MGMT_PHY_LE_2M_TX;
796 
797 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
798 			selected_phys |= MGMT_PHY_LE_2M_RX;
799 
800 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
801 			selected_phys |= MGMT_PHY_LE_CODED_TX;
802 
803 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
804 			selected_phys |= MGMT_PHY_LE_CODED_RX;
805 	}
806 
807 	return selected_phys;
808 }
809 
810 static u32 get_configurable_phys(struct hci_dev *hdev)
811 {
812 	return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
813 		~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
814 }
815 
816 static u32 get_supported_settings(struct hci_dev *hdev)
817 {
818 	u32 settings = 0;
819 
820 	settings |= MGMT_SETTING_POWERED;
821 	settings |= MGMT_SETTING_BONDABLE;
822 	settings |= MGMT_SETTING_DEBUG_KEYS;
823 	settings |= MGMT_SETTING_CONNECTABLE;
824 	settings |= MGMT_SETTING_DISCOVERABLE;
825 
826 	if (lmp_bredr_capable(hdev)) {
827 		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
828 			settings |= MGMT_SETTING_FAST_CONNECTABLE;
829 		settings |= MGMT_SETTING_BREDR;
830 		settings |= MGMT_SETTING_LINK_SECURITY;
831 
832 		if (lmp_ssp_capable(hdev)) {
833 			settings |= MGMT_SETTING_SSP;
834 			if (IS_ENABLED(CONFIG_BT_HS))
835 				settings |= MGMT_SETTING_HS;
836 		}
837 
838 		if (lmp_sc_capable(hdev))
839 			settings |= MGMT_SETTING_SECURE_CONN;
840 
841 		if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
842 			     &hdev->quirks))
843 			settings |= MGMT_SETTING_WIDEBAND_SPEECH;
844 	}
845 
846 	if (lmp_le_capable(hdev)) {
847 		settings |= MGMT_SETTING_LE;
848 		settings |= MGMT_SETTING_SECURE_CONN;
849 		settings |= MGMT_SETTING_PRIVACY;
850 		settings |= MGMT_SETTING_STATIC_ADDRESS;
851 		settings |= MGMT_SETTING_ADVERTISING;
852 	}
853 
854 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
855 	    hdev->set_bdaddr)
856 		settings |= MGMT_SETTING_CONFIGURATION;
857 
858 	settings |= MGMT_SETTING_PHY_CONFIGURATION;
859 
860 	return settings;
861 }
862 
863 static u32 get_current_settings(struct hci_dev *hdev)
864 {
865 	u32 settings = 0;
866 
867 	if (hdev_is_powered(hdev))
868 		settings |= MGMT_SETTING_POWERED;
869 
870 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
871 		settings |= MGMT_SETTING_CONNECTABLE;
872 
873 	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
874 		settings |= MGMT_SETTING_FAST_CONNECTABLE;
875 
876 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
877 		settings |= MGMT_SETTING_DISCOVERABLE;
878 
879 	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
880 		settings |= MGMT_SETTING_BONDABLE;
881 
882 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
883 		settings |= MGMT_SETTING_BREDR;
884 
885 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
886 		settings |= MGMT_SETTING_LE;
887 
888 	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
889 		settings |= MGMT_SETTING_LINK_SECURITY;
890 
891 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
892 		settings |= MGMT_SETTING_SSP;
893 
894 	if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
895 		settings |= MGMT_SETTING_HS;
896 
897 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
898 		settings |= MGMT_SETTING_ADVERTISING;
899 
900 	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
901 		settings |= MGMT_SETTING_SECURE_CONN;
902 
903 	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
904 		settings |= MGMT_SETTING_DEBUG_KEYS;
905 
906 	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
907 		settings |= MGMT_SETTING_PRIVACY;
908 
909 	/* The current setting for static address has two purposes. The
910 	 * first is to indicate if the static address will be used and
911 	 * the second is to indicate if it is actually set.
912 	 *
913 	 * This means if the static address is not configured, this flag
914 	 * will never be set. If the address is configured, then if the
915 	 * address is actually used decides if the flag is set or not.
916 	 *
917 	 * For single mode LE only controllers and dual-mode controllers
918 	 * with BR/EDR disabled, the existence of the static address will
919 	 * be evaluated.
920 	 */
921 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
922 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
923 	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
924 		if (bacmp(&hdev->static_addr, BDADDR_ANY))
925 			settings |= MGMT_SETTING_STATIC_ADDRESS;
926 	}
927 
928 	if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
929 		settings |= MGMT_SETTING_WIDEBAND_SPEECH;
930 
931 	return settings;
932 }
933 
934 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
935 {
936 	return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
937 }
938 
939 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
940 {
941 	struct mgmt_pending_cmd *cmd;
942 
943 	/* If there's a pending mgmt command the flags will not yet have
944 	 * their final values, so check for this first.
945 	 */
946 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
947 	if (cmd) {
948 		struct mgmt_mode *cp = cmd->param;
949 		if (cp->val == 0x01)
950 			return LE_AD_GENERAL;
951 		else if (cp->val == 0x02)
952 			return LE_AD_LIMITED;
953 	} else {
954 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
955 			return LE_AD_LIMITED;
956 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
957 			return LE_AD_GENERAL;
958 	}
959 
960 	return 0;
961 }
962 
963 bool mgmt_get_connectable(struct hci_dev *hdev)
964 {
965 	struct mgmt_pending_cmd *cmd;
966 
967 	/* If there's a pending mgmt command the flag will not yet have
968 	 * it's final value, so check for this first.
969 	 */
970 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
971 	if (cmd) {
972 		struct mgmt_mode *cp = cmd->param;
973 
974 		return cp->val;
975 	}
976 
977 	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
978 }
979 
980 static int service_cache_sync(struct hci_dev *hdev, void *data)
981 {
982 	hci_update_eir_sync(hdev);
983 	hci_update_class_sync(hdev);
984 
985 	return 0;
986 }
987 
988 static void service_cache_off(struct work_struct *work)
989 {
990 	struct hci_dev *hdev = container_of(work, struct hci_dev,
991 					    service_cache.work);
992 
993 	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
994 		return;
995 
996 	hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
997 }
998 
999 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1000 {
1001 	/* The generation of a new RPA and programming it into the
1002 	 * controller happens in the hci_req_enable_advertising()
1003 	 * function.
1004 	 */
1005 	if (ext_adv_capable(hdev))
1006 		return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1007 	else
1008 		return hci_enable_advertising_sync(hdev);
1009 }
1010 
1011 static void rpa_expired(struct work_struct *work)
1012 {
1013 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1014 					    rpa_expired.work);
1015 
1016 	bt_dev_dbg(hdev, "");
1017 
1018 	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1019 
1020 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1021 		return;
1022 
1023 	hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1024 }
1025 
1026 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1027 {
1028 	if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1029 		return;
1030 
1031 	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1032 	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1033 
1034 	/* Non-mgmt controlled devices get this bit set
1035 	 * implicitly so that pairing works for them, however
1036 	 * for mgmt we require user-space to explicitly enable
1037 	 * it
1038 	 */
1039 	hci_dev_clear_flag(hdev, HCI_BONDABLE);
1040 }
1041 
1042 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1043 				void *data, u16 data_len)
1044 {
1045 	struct mgmt_rp_read_info rp;
1046 
1047 	bt_dev_dbg(hdev, "sock %p", sk);
1048 
1049 	hci_dev_lock(hdev);
1050 
1051 	memset(&rp, 0, sizeof(rp));
1052 
1053 	bacpy(&rp.bdaddr, &hdev->bdaddr);
1054 
1055 	rp.version = hdev->hci_ver;
1056 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1057 
1058 	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1059 	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1060 
1061 	memcpy(rp.dev_class, hdev->dev_class, 3);
1062 
1063 	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1064 	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1065 
1066 	hci_dev_unlock(hdev);
1067 
1068 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1069 				 sizeof(rp));
1070 }
1071 
1072 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1073 {
1074 	u16 eir_len = 0;
1075 	size_t name_len;
1076 
1077 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1078 		eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1079 					  hdev->dev_class, 3);
1080 
1081 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1082 		eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1083 					  hdev->appearance);
1084 
1085 	name_len = strlen(hdev->dev_name);
1086 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1087 				  hdev->dev_name, name_len);
1088 
1089 	name_len = strlen(hdev->short_name);
1090 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1091 				  hdev->short_name, name_len);
1092 
1093 	return eir_len;
1094 }
1095 
1096 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1097 				    void *data, u16 data_len)
1098 {
1099 	char buf[512];
1100 	struct mgmt_rp_read_ext_info *rp = (void *)buf;
1101 	u16 eir_len;
1102 
1103 	bt_dev_dbg(hdev, "sock %p", sk);
1104 
1105 	memset(&buf, 0, sizeof(buf));
1106 
1107 	hci_dev_lock(hdev);
1108 
1109 	bacpy(&rp->bdaddr, &hdev->bdaddr);
1110 
1111 	rp->version = hdev->hci_ver;
1112 	rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1113 
1114 	rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1115 	rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1116 
1117 
1118 	eir_len = append_eir_data_to_buf(hdev, rp->eir);
1119 	rp->eir_len = cpu_to_le16(eir_len);
1120 
1121 	hci_dev_unlock(hdev);
1122 
1123 	/* If this command is called at least once, then the events
1124 	 * for class of device and local name changes are disabled
1125 	 * and only the new extended controller information event
1126 	 * is used.
1127 	 */
1128 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1129 	hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1130 	hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1131 
1132 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1133 				 sizeof(*rp) + eir_len);
1134 }
1135 
1136 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1137 {
1138 	char buf[512];
1139 	struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1140 	u16 eir_len;
1141 
1142 	memset(buf, 0, sizeof(buf));
1143 
1144 	eir_len = append_eir_data_to_buf(hdev, ev->eir);
1145 	ev->eir_len = cpu_to_le16(eir_len);
1146 
1147 	return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1148 				  sizeof(*ev) + eir_len,
1149 				  HCI_MGMT_EXT_INFO_EVENTS, skip);
1150 }
1151 
1152 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1153 {
1154 	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1155 
1156 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1157 				 sizeof(settings));
1158 }
1159 
1160 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1161 {
1162 	struct mgmt_ev_advertising_added ev;
1163 
1164 	ev.instance = instance;
1165 
1166 	mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1167 }
1168 
1169 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1170 			      u8 instance)
1171 {
1172 	struct mgmt_ev_advertising_removed ev;
1173 
1174 	ev.instance = instance;
1175 
1176 	mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1177 }
1178 
1179 static void cancel_adv_timeout(struct hci_dev *hdev)
1180 {
1181 	if (hdev->adv_instance_timeout) {
1182 		hdev->adv_instance_timeout = 0;
1183 		cancel_delayed_work(&hdev->adv_instance_expire);
1184 	}
1185 }
1186 
1187 /* This function requires the caller holds hdev->lock */
1188 static void restart_le_actions(struct hci_dev *hdev)
1189 {
1190 	struct hci_conn_params *p;
1191 
1192 	list_for_each_entry(p, &hdev->le_conn_params, list) {
1193 		/* Needed for AUTO_OFF case where might not "really"
1194 		 * have been powered off.
1195 		 */
1196 		list_del_init(&p->action);
1197 
1198 		switch (p->auto_connect) {
1199 		case HCI_AUTO_CONN_DIRECT:
1200 		case HCI_AUTO_CONN_ALWAYS:
1201 			list_add(&p->action, &hdev->pend_le_conns);
1202 			break;
1203 		case HCI_AUTO_CONN_REPORT:
1204 			list_add(&p->action, &hdev->pend_le_reports);
1205 			break;
1206 		default:
1207 			break;
1208 		}
1209 	}
1210 }
1211 
1212 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1213 {
1214 	__le32 ev = cpu_to_le32(get_current_settings(hdev));
1215 
1216 	return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1217 				  sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1218 }
1219 
1220 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1221 {
1222 	struct mgmt_pending_cmd *cmd = data;
1223 	struct mgmt_mode *cp;
1224 
1225 	/* Make sure cmd still outstanding. */
1226 	if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1227 		return;
1228 
1229 	cp = cmd->param;
1230 
1231 	bt_dev_dbg(hdev, "err %d", err);
1232 
1233 	if (!err) {
1234 		if (cp->val) {
1235 			hci_dev_lock(hdev);
1236 			restart_le_actions(hdev);
1237 			hci_update_passive_scan(hdev);
1238 			hci_dev_unlock(hdev);
1239 		}
1240 
1241 		send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1242 
1243 		/* Only call new_setting for power on as power off is deferred
1244 		 * to hdev->power_off work which does call hci_dev_do_close.
1245 		 */
1246 		if (cp->val)
1247 			new_settings(hdev, cmd->sk);
1248 	} else {
1249 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1250 				mgmt_status(err));
1251 	}
1252 
1253 	mgmt_pending_remove(cmd);
1254 }
1255 
1256 static int set_powered_sync(struct hci_dev *hdev, void *data)
1257 {
1258 	struct mgmt_pending_cmd *cmd = data;
1259 	struct mgmt_mode *cp = cmd->param;
1260 
1261 	BT_DBG("%s", hdev->name);
1262 
1263 	return hci_set_powered_sync(hdev, cp->val);
1264 }
1265 
1266 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1267 		       u16 len)
1268 {
1269 	struct mgmt_mode *cp = data;
1270 	struct mgmt_pending_cmd *cmd;
1271 	int err;
1272 
1273 	bt_dev_dbg(hdev, "sock %p", sk);
1274 
1275 	if (cp->val != 0x00 && cp->val != 0x01)
1276 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1277 				       MGMT_STATUS_INVALID_PARAMS);
1278 
1279 	hci_dev_lock(hdev);
1280 
1281 	if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1282 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1283 				      MGMT_STATUS_BUSY);
1284 		goto failed;
1285 	}
1286 
1287 	if (!!cp->val == hdev_is_powered(hdev)) {
1288 		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1289 		goto failed;
1290 	}
1291 
1292 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1293 	if (!cmd) {
1294 		err = -ENOMEM;
1295 		goto failed;
1296 	}
1297 
1298 	err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1299 				 mgmt_set_powered_complete);
1300 
1301 	if (err < 0)
1302 		mgmt_pending_remove(cmd);
1303 
1304 failed:
1305 	hci_dev_unlock(hdev);
1306 	return err;
1307 }
1308 
1309 int mgmt_new_settings(struct hci_dev *hdev)
1310 {
1311 	return new_settings(hdev, NULL);
1312 }
1313 
1314 struct cmd_lookup {
1315 	struct sock *sk;
1316 	struct hci_dev *hdev;
1317 	u8 mgmt_status;
1318 };
1319 
1320 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1321 {
1322 	struct cmd_lookup *match = data;
1323 
1324 	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1325 
1326 	list_del(&cmd->list);
1327 
1328 	if (match->sk == NULL) {
1329 		match->sk = cmd->sk;
1330 		sock_hold(match->sk);
1331 	}
1332 
1333 	mgmt_pending_free(cmd);
1334 }
1335 
1336 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1337 {
1338 	u8 *status = data;
1339 
1340 	mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1341 	mgmt_pending_remove(cmd);
1342 }
1343 
1344 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1345 {
1346 	if (cmd->cmd_complete) {
1347 		u8 *status = data;
1348 
1349 		cmd->cmd_complete(cmd, *status);
1350 		mgmt_pending_remove(cmd);
1351 
1352 		return;
1353 	}
1354 
1355 	cmd_status_rsp(cmd, data);
1356 }
1357 
1358 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1359 {
1360 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1361 				 cmd->param, cmd->param_len);
1362 }
1363 
1364 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1365 {
1366 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1367 				 cmd->param, sizeof(struct mgmt_addr_info));
1368 }
1369 
1370 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1371 {
1372 	if (!lmp_bredr_capable(hdev))
1373 		return MGMT_STATUS_NOT_SUPPORTED;
1374 	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1375 		return MGMT_STATUS_REJECTED;
1376 	else
1377 		return MGMT_STATUS_SUCCESS;
1378 }
1379 
1380 static u8 mgmt_le_support(struct hci_dev *hdev)
1381 {
1382 	if (!lmp_le_capable(hdev))
1383 		return MGMT_STATUS_NOT_SUPPORTED;
1384 	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1385 		return MGMT_STATUS_REJECTED;
1386 	else
1387 		return MGMT_STATUS_SUCCESS;
1388 }
1389 
1390 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1391 					   int err)
1392 {
1393 	struct mgmt_pending_cmd *cmd = data;
1394 
1395 	bt_dev_dbg(hdev, "err %d", err);
1396 
1397 	/* Make sure cmd still outstanding. */
1398 	if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1399 		return;
1400 
1401 	hci_dev_lock(hdev);
1402 
1403 	if (err) {
1404 		u8 mgmt_err = mgmt_status(err);
1405 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1406 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1407 		goto done;
1408 	}
1409 
1410 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1411 	    hdev->discov_timeout > 0) {
1412 		int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1413 		queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1414 	}
1415 
1416 	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1417 	new_settings(hdev, cmd->sk);
1418 
1419 done:
1420 	mgmt_pending_remove(cmd);
1421 	hci_dev_unlock(hdev);
1422 }
1423 
1424 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1425 {
1426 	BT_DBG("%s", hdev->name);
1427 
1428 	return hci_update_discoverable_sync(hdev);
1429 }
1430 
1431 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1432 			    u16 len)
1433 {
1434 	struct mgmt_cp_set_discoverable *cp = data;
1435 	struct mgmt_pending_cmd *cmd;
1436 	u16 timeout;
1437 	int err;
1438 
1439 	bt_dev_dbg(hdev, "sock %p", sk);
1440 
1441 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1442 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1443 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1444 				       MGMT_STATUS_REJECTED);
1445 
1446 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1447 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1448 				       MGMT_STATUS_INVALID_PARAMS);
1449 
1450 	timeout = __le16_to_cpu(cp->timeout);
1451 
1452 	/* Disabling discoverable requires that no timeout is set,
1453 	 * and enabling limited discoverable requires a timeout.
1454 	 */
1455 	if ((cp->val == 0x00 && timeout > 0) ||
1456 	    (cp->val == 0x02 && timeout == 0))
1457 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1458 				       MGMT_STATUS_INVALID_PARAMS);
1459 
1460 	hci_dev_lock(hdev);
1461 
1462 	if (!hdev_is_powered(hdev) && timeout > 0) {
1463 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1464 				      MGMT_STATUS_NOT_POWERED);
1465 		goto failed;
1466 	}
1467 
1468 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1469 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1470 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1471 				      MGMT_STATUS_BUSY);
1472 		goto failed;
1473 	}
1474 
1475 	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1476 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1477 				      MGMT_STATUS_REJECTED);
1478 		goto failed;
1479 	}
1480 
1481 	if (hdev->advertising_paused) {
1482 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1483 				      MGMT_STATUS_BUSY);
1484 		goto failed;
1485 	}
1486 
1487 	if (!hdev_is_powered(hdev)) {
1488 		bool changed = false;
1489 
1490 		/* Setting limited discoverable when powered off is
1491 		 * not a valid operation since it requires a timeout
1492 		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1493 		 */
1494 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1495 			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1496 			changed = true;
1497 		}
1498 
1499 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1500 		if (err < 0)
1501 			goto failed;
1502 
1503 		if (changed)
1504 			err = new_settings(hdev, sk);
1505 
1506 		goto failed;
1507 	}
1508 
1509 	/* If the current mode is the same, then just update the timeout
1510 	 * value with the new value. And if only the timeout gets updated,
1511 	 * then no need for any HCI transactions.
1512 	 */
1513 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1514 	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
1515 						   HCI_LIMITED_DISCOVERABLE)) {
1516 		cancel_delayed_work(&hdev->discov_off);
1517 		hdev->discov_timeout = timeout;
1518 
1519 		if (cp->val && hdev->discov_timeout > 0) {
1520 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1521 			queue_delayed_work(hdev->req_workqueue,
1522 					   &hdev->discov_off, to);
1523 		}
1524 
1525 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1526 		goto failed;
1527 	}
1528 
1529 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1530 	if (!cmd) {
1531 		err = -ENOMEM;
1532 		goto failed;
1533 	}
1534 
1535 	/* Cancel any potential discoverable timeout that might be
1536 	 * still active and store new timeout value. The arming of
1537 	 * the timeout happens in the complete handler.
1538 	 */
1539 	cancel_delayed_work(&hdev->discov_off);
1540 	hdev->discov_timeout = timeout;
1541 
1542 	if (cp->val)
1543 		hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1544 	else
1545 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1546 
1547 	/* Limited discoverable mode */
1548 	if (cp->val == 0x02)
1549 		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1550 	else
1551 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1552 
1553 	err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1554 				 mgmt_set_discoverable_complete);
1555 
1556 	if (err < 0)
1557 		mgmt_pending_remove(cmd);
1558 
1559 failed:
1560 	hci_dev_unlock(hdev);
1561 	return err;
1562 }
1563 
1564 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1565 					  int err)
1566 {
1567 	struct mgmt_pending_cmd *cmd = data;
1568 
1569 	bt_dev_dbg(hdev, "err %d", err);
1570 
1571 	/* Make sure cmd still outstanding. */
1572 	if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1573 		return;
1574 
1575 	hci_dev_lock(hdev);
1576 
1577 	if (err) {
1578 		u8 mgmt_err = mgmt_status(err);
1579 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1580 		goto done;
1581 	}
1582 
1583 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1584 	new_settings(hdev, cmd->sk);
1585 
1586 done:
1587 	if (cmd)
1588 		mgmt_pending_remove(cmd);
1589 
1590 	hci_dev_unlock(hdev);
1591 }
1592 
1593 static int set_connectable_update_settings(struct hci_dev *hdev,
1594 					   struct sock *sk, u8 val)
1595 {
1596 	bool changed = false;
1597 	int err;
1598 
1599 	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1600 		changed = true;
1601 
1602 	if (val) {
1603 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1604 	} else {
1605 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1606 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1607 	}
1608 
1609 	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1610 	if (err < 0)
1611 		return err;
1612 
1613 	if (changed) {
1614 		hci_req_update_scan(hdev);
1615 		hci_update_passive_scan(hdev);
1616 		return new_settings(hdev, sk);
1617 	}
1618 
1619 	return 0;
1620 }
1621 
1622 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1623 {
1624 	BT_DBG("%s", hdev->name);
1625 
1626 	return hci_update_connectable_sync(hdev);
1627 }
1628 
1629 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1630 			   u16 len)
1631 {
1632 	struct mgmt_mode *cp = data;
1633 	struct mgmt_pending_cmd *cmd;
1634 	int err;
1635 
1636 	bt_dev_dbg(hdev, "sock %p", sk);
1637 
1638 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1639 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1640 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1641 				       MGMT_STATUS_REJECTED);
1642 
1643 	if (cp->val != 0x00 && cp->val != 0x01)
1644 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1645 				       MGMT_STATUS_INVALID_PARAMS);
1646 
1647 	hci_dev_lock(hdev);
1648 
1649 	if (!hdev_is_powered(hdev)) {
1650 		err = set_connectable_update_settings(hdev, sk, cp->val);
1651 		goto failed;
1652 	}
1653 
1654 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1655 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1656 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1657 				      MGMT_STATUS_BUSY);
1658 		goto failed;
1659 	}
1660 
1661 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1662 	if (!cmd) {
1663 		err = -ENOMEM;
1664 		goto failed;
1665 	}
1666 
1667 	if (cp->val) {
1668 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1669 	} else {
1670 		if (hdev->discov_timeout > 0)
1671 			cancel_delayed_work(&hdev->discov_off);
1672 
1673 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1674 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1675 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1676 	}
1677 
1678 	err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1679 				 mgmt_set_connectable_complete);
1680 
1681 	if (err < 0)
1682 		mgmt_pending_remove(cmd);
1683 
1684 failed:
1685 	hci_dev_unlock(hdev);
1686 	return err;
1687 }
1688 
1689 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1690 			u16 len)
1691 {
1692 	struct mgmt_mode *cp = data;
1693 	bool changed;
1694 	int err;
1695 
1696 	bt_dev_dbg(hdev, "sock %p", sk);
1697 
1698 	if (cp->val != 0x00 && cp->val != 0x01)
1699 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1700 				       MGMT_STATUS_INVALID_PARAMS);
1701 
1702 	hci_dev_lock(hdev);
1703 
1704 	if (cp->val)
1705 		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1706 	else
1707 		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1708 
1709 	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1710 	if (err < 0)
1711 		goto unlock;
1712 
1713 	if (changed) {
1714 		/* In limited privacy mode the change of bondable mode
1715 		 * may affect the local advertising address.
1716 		 */
1717 		hci_update_discoverable(hdev);
1718 
1719 		err = new_settings(hdev, sk);
1720 	}
1721 
1722 unlock:
1723 	hci_dev_unlock(hdev);
1724 	return err;
1725 }
1726 
1727 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1728 			     u16 len)
1729 {
1730 	struct mgmt_mode *cp = data;
1731 	struct mgmt_pending_cmd *cmd;
1732 	u8 val, status;
1733 	int err;
1734 
1735 	bt_dev_dbg(hdev, "sock %p", sk);
1736 
1737 	status = mgmt_bredr_support(hdev);
1738 	if (status)
1739 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1740 				       status);
1741 
1742 	if (cp->val != 0x00 && cp->val != 0x01)
1743 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1744 				       MGMT_STATUS_INVALID_PARAMS);
1745 
1746 	hci_dev_lock(hdev);
1747 
1748 	if (!hdev_is_powered(hdev)) {
1749 		bool changed = false;
1750 
1751 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1752 			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1753 			changed = true;
1754 		}
1755 
1756 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1757 		if (err < 0)
1758 			goto failed;
1759 
1760 		if (changed)
1761 			err = new_settings(hdev, sk);
1762 
1763 		goto failed;
1764 	}
1765 
1766 	if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1767 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1768 				      MGMT_STATUS_BUSY);
1769 		goto failed;
1770 	}
1771 
1772 	val = !!cp->val;
1773 
1774 	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1775 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1776 		goto failed;
1777 	}
1778 
1779 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1780 	if (!cmd) {
1781 		err = -ENOMEM;
1782 		goto failed;
1783 	}
1784 
1785 	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1786 	if (err < 0) {
1787 		mgmt_pending_remove(cmd);
1788 		goto failed;
1789 	}
1790 
1791 failed:
1792 	hci_dev_unlock(hdev);
1793 	return err;
1794 }
1795 
1796 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1797 {
1798 	struct cmd_lookup match = { NULL, hdev };
1799 	struct mgmt_pending_cmd *cmd = data;
1800 	struct mgmt_mode *cp = cmd->param;
1801 	u8 enable = cp->val;
1802 	bool changed;
1803 
1804 	/* Make sure cmd still outstanding. */
1805 	if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1806 		return;
1807 
1808 	if (err) {
1809 		u8 mgmt_err = mgmt_status(err);
1810 
1811 		if (enable && hci_dev_test_and_clear_flag(hdev,
1812 							  HCI_SSP_ENABLED)) {
1813 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1814 			new_settings(hdev, NULL);
1815 		}
1816 
1817 		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1818 				     &mgmt_err);
1819 		return;
1820 	}
1821 
1822 	if (enable) {
1823 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1824 	} else {
1825 		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1826 
1827 		if (!changed)
1828 			changed = hci_dev_test_and_clear_flag(hdev,
1829 							      HCI_HS_ENABLED);
1830 		else
1831 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1832 	}
1833 
1834 	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1835 
1836 	if (changed)
1837 		new_settings(hdev, match.sk);
1838 
1839 	if (match.sk)
1840 		sock_put(match.sk);
1841 
1842 	hci_update_eir_sync(hdev);
1843 }
1844 
1845 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1846 {
1847 	struct mgmt_pending_cmd *cmd = data;
1848 	struct mgmt_mode *cp = cmd->param;
1849 	bool changed = false;
1850 	int err;
1851 
1852 	if (cp->val)
1853 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1854 
1855 	err = hci_write_ssp_mode_sync(hdev, cp->val);
1856 
1857 	if (!err && changed)
1858 		hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1859 
1860 	return err;
1861 }
1862 
1863 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1864 {
1865 	struct mgmt_mode *cp = data;
1866 	struct mgmt_pending_cmd *cmd;
1867 	u8 status;
1868 	int err;
1869 
1870 	bt_dev_dbg(hdev, "sock %p", sk);
1871 
1872 	status = mgmt_bredr_support(hdev);
1873 	if (status)
1874 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1875 
1876 	if (!lmp_ssp_capable(hdev))
1877 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1878 				       MGMT_STATUS_NOT_SUPPORTED);
1879 
1880 	if (cp->val != 0x00 && cp->val != 0x01)
1881 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1882 				       MGMT_STATUS_INVALID_PARAMS);
1883 
1884 	hci_dev_lock(hdev);
1885 
1886 	if (!hdev_is_powered(hdev)) {
1887 		bool changed;
1888 
1889 		if (cp->val) {
1890 			changed = !hci_dev_test_and_set_flag(hdev,
1891 							     HCI_SSP_ENABLED);
1892 		} else {
1893 			changed = hci_dev_test_and_clear_flag(hdev,
1894 							      HCI_SSP_ENABLED);
1895 			if (!changed)
1896 				changed = hci_dev_test_and_clear_flag(hdev,
1897 								      HCI_HS_ENABLED);
1898 			else
1899 				hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1900 		}
1901 
1902 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1903 		if (err < 0)
1904 			goto failed;
1905 
1906 		if (changed)
1907 			err = new_settings(hdev, sk);
1908 
1909 		goto failed;
1910 	}
1911 
1912 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1913 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1914 				      MGMT_STATUS_BUSY);
1915 		goto failed;
1916 	}
1917 
1918 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1919 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1920 		goto failed;
1921 	}
1922 
1923 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1924 	if (!cmd)
1925 		err = -ENOMEM;
1926 	else
1927 		err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
1928 					 set_ssp_complete);
1929 
1930 	if (err < 0) {
1931 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1932 				      MGMT_STATUS_FAILED);
1933 
1934 		if (cmd)
1935 			mgmt_pending_remove(cmd);
1936 	}
1937 
1938 failed:
1939 	hci_dev_unlock(hdev);
1940 	return err;
1941 }
1942 
1943 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1944 {
1945 	struct mgmt_mode *cp = data;
1946 	bool changed;
1947 	u8 status;
1948 	int err;
1949 
1950 	bt_dev_dbg(hdev, "sock %p", sk);
1951 
1952 	if (!IS_ENABLED(CONFIG_BT_HS))
1953 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1954 				       MGMT_STATUS_NOT_SUPPORTED);
1955 
1956 	status = mgmt_bredr_support(hdev);
1957 	if (status)
1958 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1959 
1960 	if (!lmp_ssp_capable(hdev))
1961 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1962 				       MGMT_STATUS_NOT_SUPPORTED);
1963 
1964 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1965 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1966 				       MGMT_STATUS_REJECTED);
1967 
1968 	if (cp->val != 0x00 && cp->val != 0x01)
1969 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1970 				       MGMT_STATUS_INVALID_PARAMS);
1971 
1972 	hci_dev_lock(hdev);
1973 
1974 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1975 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1976 				      MGMT_STATUS_BUSY);
1977 		goto unlock;
1978 	}
1979 
1980 	if (cp->val) {
1981 		changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1982 	} else {
1983 		if (hdev_is_powered(hdev)) {
1984 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1985 					      MGMT_STATUS_REJECTED);
1986 			goto unlock;
1987 		}
1988 
1989 		changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1990 	}
1991 
1992 	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1993 	if (err < 0)
1994 		goto unlock;
1995 
1996 	if (changed)
1997 		err = new_settings(hdev, sk);
1998 
1999 unlock:
2000 	hci_dev_unlock(hdev);
2001 	return err;
2002 }
2003 
2004 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2005 {
2006 	struct cmd_lookup match = { NULL, hdev };
2007 	u8 status = mgmt_status(err);
2008 
2009 	bt_dev_dbg(hdev, "err %d", err);
2010 
2011 	if (status) {
2012 		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2013 							&status);
2014 		return;
2015 	}
2016 
2017 	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2018 
2019 	new_settings(hdev, match.sk);
2020 
2021 	if (match.sk)
2022 		sock_put(match.sk);
2023 }
2024 
2025 static int set_le_sync(struct hci_dev *hdev, void *data)
2026 {
2027 	struct mgmt_pending_cmd *cmd = data;
2028 	struct mgmt_mode *cp = cmd->param;
2029 	u8 val = !!cp->val;
2030 	int err;
2031 
2032 	if (!val) {
2033 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2034 			hci_disable_advertising_sync(hdev);
2035 
2036 		if (ext_adv_capable(hdev))
2037 			hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2038 	} else {
2039 		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2040 	}
2041 
2042 	err = hci_write_le_host_supported_sync(hdev, val, 0);
2043 
2044 	/* Make sure the controller has a good default for
2045 	 * advertising data. Restrict the update to when LE
2046 	 * has actually been enabled. During power on, the
2047 	 * update in powered_update_hci will take care of it.
2048 	 */
2049 	if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2050 		if (ext_adv_capable(hdev)) {
2051 			int status;
2052 
2053 			status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2054 			if (!status)
2055 				hci_update_scan_rsp_data_sync(hdev, 0x00);
2056 		} else {
2057 			hci_update_adv_data_sync(hdev, 0x00);
2058 			hci_update_scan_rsp_data_sync(hdev, 0x00);
2059 		}
2060 
2061 		hci_update_passive_scan(hdev);
2062 	}
2063 
2064 	return err;
2065 }
2066 
2067 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2068 {
2069 	struct mgmt_mode *cp = data;
2070 	struct mgmt_pending_cmd *cmd;
2071 	int err;
2072 	u8 val, enabled;
2073 
2074 	bt_dev_dbg(hdev, "sock %p", sk);
2075 
2076 	if (!lmp_le_capable(hdev))
2077 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2078 				       MGMT_STATUS_NOT_SUPPORTED);
2079 
2080 	if (cp->val != 0x00 && cp->val != 0x01)
2081 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2082 				       MGMT_STATUS_INVALID_PARAMS);
2083 
2084 	/* Bluetooth single mode LE only controllers or dual-mode
2085 	 * controllers configured as LE only devices, do not allow
2086 	 * switching LE off. These have either LE enabled explicitly
2087 	 * or BR/EDR has been previously switched off.
2088 	 *
2089 	 * When trying to enable an already enabled LE, then gracefully
2090 	 * send a positive response. Trying to disable it however will
2091 	 * result into rejection.
2092 	 */
2093 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2094 		if (cp->val == 0x01)
2095 			return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2096 
2097 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2098 				       MGMT_STATUS_REJECTED);
2099 	}
2100 
2101 	hci_dev_lock(hdev);
2102 
2103 	val = !!cp->val;
2104 	enabled = lmp_host_le_capable(hdev);
2105 
2106 	if (!val)
2107 		hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
2108 
2109 	if (!hdev_is_powered(hdev) || val == enabled) {
2110 		bool changed = false;
2111 
2112 		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2113 			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2114 			changed = true;
2115 		}
2116 
2117 		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2118 			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2119 			changed = true;
2120 		}
2121 
2122 		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2123 		if (err < 0)
2124 			goto unlock;
2125 
2126 		if (changed)
2127 			err = new_settings(hdev, sk);
2128 
2129 		goto unlock;
2130 	}
2131 
2132 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
2133 	    pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2134 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2135 				      MGMT_STATUS_BUSY);
2136 		goto unlock;
2137 	}
2138 
2139 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2140 	if (!cmd)
2141 		err = -ENOMEM;
2142 	else
2143 		err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2144 					 set_le_complete);
2145 
2146 	if (err < 0) {
2147 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2148 				      MGMT_STATUS_FAILED);
2149 
2150 		if (cmd)
2151 			mgmt_pending_remove(cmd);
2152 	}
2153 
2154 unlock:
2155 	hci_dev_unlock(hdev);
2156 	return err;
2157 }
2158 
2159 /* This is a helper function to test for pending mgmt commands that can
2160  * cause CoD or EIR HCI commands. We can only allow one such pending
2161  * mgmt command at a time since otherwise we cannot easily track what
2162  * the current values are, will be, and based on that calculate if a new
2163  * HCI command needs to be sent and if yes with what value.
2164  */
2165 static bool pending_eir_or_class(struct hci_dev *hdev)
2166 {
2167 	struct mgmt_pending_cmd *cmd;
2168 
2169 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2170 		switch (cmd->opcode) {
2171 		case MGMT_OP_ADD_UUID:
2172 		case MGMT_OP_REMOVE_UUID:
2173 		case MGMT_OP_SET_DEV_CLASS:
2174 		case MGMT_OP_SET_POWERED:
2175 			return true;
2176 		}
2177 	}
2178 
2179 	return false;
2180 }
2181 
2182 static const u8 bluetooth_base_uuid[] = {
2183 			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2184 			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2185 };
2186 
2187 static u8 get_uuid_size(const u8 *uuid)
2188 {
2189 	u32 val;
2190 
2191 	if (memcmp(uuid, bluetooth_base_uuid, 12))
2192 		return 128;
2193 
2194 	val = get_unaligned_le32(&uuid[12]);
2195 	if (val > 0xffff)
2196 		return 32;
2197 
2198 	return 16;
2199 }
2200 
2201 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2202 {
2203 	struct mgmt_pending_cmd *cmd = data;
2204 
2205 	bt_dev_dbg(hdev, "err %d", err);
2206 
2207 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2208 			  mgmt_status(err), hdev->dev_class, 3);
2209 
2210 	mgmt_pending_free(cmd);
2211 }
2212 
2213 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2214 {
2215 	int err;
2216 
2217 	err = hci_update_class_sync(hdev);
2218 	if (err)
2219 		return err;
2220 
2221 	return hci_update_eir_sync(hdev);
2222 }
2223 
2224 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2225 {
2226 	struct mgmt_cp_add_uuid *cp = data;
2227 	struct mgmt_pending_cmd *cmd;
2228 	struct bt_uuid *uuid;
2229 	int err;
2230 
2231 	bt_dev_dbg(hdev, "sock %p", sk);
2232 
2233 	hci_dev_lock(hdev);
2234 
2235 	if (pending_eir_or_class(hdev)) {
2236 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2237 				      MGMT_STATUS_BUSY);
2238 		goto failed;
2239 	}
2240 
2241 	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2242 	if (!uuid) {
2243 		err = -ENOMEM;
2244 		goto failed;
2245 	}
2246 
2247 	memcpy(uuid->uuid, cp->uuid, 16);
2248 	uuid->svc_hint = cp->svc_hint;
2249 	uuid->size = get_uuid_size(cp->uuid);
2250 
2251 	list_add_tail(&uuid->list, &hdev->uuids);
2252 
2253 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2254 	if (!cmd) {
2255 		err = -ENOMEM;
2256 		goto failed;
2257 	}
2258 
2259 	err = hci_cmd_sync_queue(hdev, add_uuid_sync, cmd, mgmt_class_complete);
2260 	if (err < 0) {
2261 		mgmt_pending_free(cmd);
2262 		goto failed;
2263 	}
2264 
2265 failed:
2266 	hci_dev_unlock(hdev);
2267 	return err;
2268 }
2269 
2270 static bool enable_service_cache(struct hci_dev *hdev)
2271 {
2272 	if (!hdev_is_powered(hdev))
2273 		return false;
2274 
2275 	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2276 		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2277 				   CACHE_TIMEOUT);
2278 		return true;
2279 	}
2280 
2281 	return false;
2282 }
2283 
2284 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2285 {
2286 	int err;
2287 
2288 	err = hci_update_class_sync(hdev);
2289 	if (err)
2290 		return err;
2291 
2292 	return hci_update_eir_sync(hdev);
2293 }
2294 
2295 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2296 		       u16 len)
2297 {
2298 	struct mgmt_cp_remove_uuid *cp = data;
2299 	struct mgmt_pending_cmd *cmd;
2300 	struct bt_uuid *match, *tmp;
2301 	static const u8 bt_uuid_any[] = {
2302 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2303 	};
2304 	int err, found;
2305 
2306 	bt_dev_dbg(hdev, "sock %p", sk);
2307 
2308 	hci_dev_lock(hdev);
2309 
2310 	if (pending_eir_or_class(hdev)) {
2311 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2312 				      MGMT_STATUS_BUSY);
2313 		goto unlock;
2314 	}
2315 
2316 	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2317 		hci_uuids_clear(hdev);
2318 
2319 		if (enable_service_cache(hdev)) {
2320 			err = mgmt_cmd_complete(sk, hdev->id,
2321 						MGMT_OP_REMOVE_UUID,
2322 						0, hdev->dev_class, 3);
2323 			goto unlock;
2324 		}
2325 
2326 		goto update_class;
2327 	}
2328 
2329 	found = 0;
2330 
2331 	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2332 		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2333 			continue;
2334 
2335 		list_del(&match->list);
2336 		kfree(match);
2337 		found++;
2338 	}
2339 
2340 	if (found == 0) {
2341 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2342 				      MGMT_STATUS_INVALID_PARAMS);
2343 		goto unlock;
2344 	}
2345 
2346 update_class:
2347 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2348 	if (!cmd) {
2349 		err = -ENOMEM;
2350 		goto unlock;
2351 	}
2352 
2353 	err = hci_cmd_sync_queue(hdev, remove_uuid_sync, cmd,
2354 				 mgmt_class_complete);
2355 	if (err < 0)
2356 		mgmt_pending_free(cmd);
2357 
2358 unlock:
2359 	hci_dev_unlock(hdev);
2360 	return err;
2361 }
2362 
2363 static int set_class_sync(struct hci_dev *hdev, void *data)
2364 {
2365 	int err = 0;
2366 
2367 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2368 		cancel_delayed_work_sync(&hdev->service_cache);
2369 		err = hci_update_eir_sync(hdev);
2370 	}
2371 
2372 	if (err)
2373 		return err;
2374 
2375 	return hci_update_class_sync(hdev);
2376 }
2377 
2378 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2379 			 u16 len)
2380 {
2381 	struct mgmt_cp_set_dev_class *cp = data;
2382 	struct mgmt_pending_cmd *cmd;
2383 	int err;
2384 
2385 	bt_dev_dbg(hdev, "sock %p", sk);
2386 
2387 	if (!lmp_bredr_capable(hdev))
2388 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2389 				       MGMT_STATUS_NOT_SUPPORTED);
2390 
2391 	hci_dev_lock(hdev);
2392 
2393 	if (pending_eir_or_class(hdev)) {
2394 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2395 				      MGMT_STATUS_BUSY);
2396 		goto unlock;
2397 	}
2398 
2399 	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2400 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2401 				      MGMT_STATUS_INVALID_PARAMS);
2402 		goto unlock;
2403 	}
2404 
2405 	hdev->major_class = cp->major;
2406 	hdev->minor_class = cp->minor;
2407 
2408 	if (!hdev_is_powered(hdev)) {
2409 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2410 					hdev->dev_class, 3);
2411 		goto unlock;
2412 	}
2413 
2414 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2415 	if (!cmd) {
2416 		err = -ENOMEM;
2417 		goto unlock;
2418 	}
2419 
2420 	err = hci_cmd_sync_queue(hdev, set_class_sync, cmd,
2421 				 mgmt_class_complete);
2422 	if (err < 0)
2423 		mgmt_pending_free(cmd);
2424 
2425 unlock:
2426 	hci_dev_unlock(hdev);
2427 	return err;
2428 }
2429 
2430 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2431 			  u16 len)
2432 {
2433 	struct mgmt_cp_load_link_keys *cp = data;
2434 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2435 				   sizeof(struct mgmt_link_key_info));
2436 	u16 key_count, expected_len;
2437 	bool changed;
2438 	int i;
2439 
2440 	bt_dev_dbg(hdev, "sock %p", sk);
2441 
2442 	if (!lmp_bredr_capable(hdev))
2443 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2444 				       MGMT_STATUS_NOT_SUPPORTED);
2445 
2446 	key_count = __le16_to_cpu(cp->key_count);
2447 	if (key_count > max_key_count) {
2448 		bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2449 			   key_count);
2450 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2451 				       MGMT_STATUS_INVALID_PARAMS);
2452 	}
2453 
2454 	expected_len = struct_size(cp, keys, key_count);
2455 	if (expected_len != len) {
2456 		bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2457 			   expected_len, len);
2458 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2459 				       MGMT_STATUS_INVALID_PARAMS);
2460 	}
2461 
2462 	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2463 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2464 				       MGMT_STATUS_INVALID_PARAMS);
2465 
2466 	bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2467 		   key_count);
2468 
2469 	for (i = 0; i < key_count; i++) {
2470 		struct mgmt_link_key_info *key = &cp->keys[i];
2471 
2472 		if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2473 			return mgmt_cmd_status(sk, hdev->id,
2474 					       MGMT_OP_LOAD_LINK_KEYS,
2475 					       MGMT_STATUS_INVALID_PARAMS);
2476 	}
2477 
2478 	hci_dev_lock(hdev);
2479 
2480 	hci_link_keys_clear(hdev);
2481 
2482 	if (cp->debug_keys)
2483 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2484 	else
2485 		changed = hci_dev_test_and_clear_flag(hdev,
2486 						      HCI_KEEP_DEBUG_KEYS);
2487 
2488 	if (changed)
2489 		new_settings(hdev, NULL);
2490 
2491 	for (i = 0; i < key_count; i++) {
2492 		struct mgmt_link_key_info *key = &cp->keys[i];
2493 
2494 		if (hci_is_blocked_key(hdev,
2495 				       HCI_BLOCKED_KEY_TYPE_LINKKEY,
2496 				       key->val)) {
2497 			bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2498 				    &key->addr.bdaddr);
2499 			continue;
2500 		}
2501 
2502 		/* Always ignore debug keys and require a new pairing if
2503 		 * the user wants to use them.
2504 		 */
2505 		if (key->type == HCI_LK_DEBUG_COMBINATION)
2506 			continue;
2507 
2508 		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2509 				 key->type, key->pin_len, NULL);
2510 	}
2511 
2512 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2513 
2514 	hci_dev_unlock(hdev);
2515 
2516 	return 0;
2517 }
2518 
2519 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2520 			   u8 addr_type, struct sock *skip_sk)
2521 {
2522 	struct mgmt_ev_device_unpaired ev;
2523 
2524 	bacpy(&ev.addr.bdaddr, bdaddr);
2525 	ev.addr.type = addr_type;
2526 
2527 	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2528 			  skip_sk);
2529 }
2530 
2531 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2532 			 u16 len)
2533 {
2534 	struct mgmt_cp_unpair_device *cp = data;
2535 	struct mgmt_rp_unpair_device rp;
2536 	struct hci_conn_params *params;
2537 	struct mgmt_pending_cmd *cmd;
2538 	struct hci_conn *conn;
2539 	u8 addr_type;
2540 	int err;
2541 
2542 	memset(&rp, 0, sizeof(rp));
2543 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2544 	rp.addr.type = cp->addr.type;
2545 
2546 	if (!bdaddr_type_is_valid(cp->addr.type))
2547 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2548 					 MGMT_STATUS_INVALID_PARAMS,
2549 					 &rp, sizeof(rp));
2550 
2551 	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2552 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2553 					 MGMT_STATUS_INVALID_PARAMS,
2554 					 &rp, sizeof(rp));
2555 
2556 	hci_dev_lock(hdev);
2557 
2558 	if (!hdev_is_powered(hdev)) {
2559 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2560 					MGMT_STATUS_NOT_POWERED, &rp,
2561 					sizeof(rp));
2562 		goto unlock;
2563 	}
2564 
2565 	if (cp->addr.type == BDADDR_BREDR) {
2566 		/* If disconnection is requested, then look up the
2567 		 * connection. If the remote device is connected, it
2568 		 * will be later used to terminate the link.
2569 		 *
2570 		 * Setting it to NULL explicitly will cause no
2571 		 * termination of the link.
2572 		 */
2573 		if (cp->disconnect)
2574 			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2575 						       &cp->addr.bdaddr);
2576 		else
2577 			conn = NULL;
2578 
2579 		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2580 		if (err < 0) {
2581 			err = mgmt_cmd_complete(sk, hdev->id,
2582 						MGMT_OP_UNPAIR_DEVICE,
2583 						MGMT_STATUS_NOT_PAIRED, &rp,
2584 						sizeof(rp));
2585 			goto unlock;
2586 		}
2587 
2588 		goto done;
2589 	}
2590 
2591 	/* LE address type */
2592 	addr_type = le_addr_type(cp->addr.type);
2593 
2594 	/* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2595 	err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2596 	if (err < 0) {
2597 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2598 					MGMT_STATUS_NOT_PAIRED, &rp,
2599 					sizeof(rp));
2600 		goto unlock;
2601 	}
2602 
2603 	conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2604 	if (!conn) {
2605 		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2606 		goto done;
2607 	}
2608 
2609 
2610 	/* Defer clearing up the connection parameters until closing to
2611 	 * give a chance of keeping them if a repairing happens.
2612 	 */
2613 	set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2614 
2615 	/* Disable auto-connection parameters if present */
2616 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2617 	if (params) {
2618 		if (params->explicit_connect)
2619 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2620 		else
2621 			params->auto_connect = HCI_AUTO_CONN_DISABLED;
2622 	}
2623 
2624 	/* If disconnection is not requested, then clear the connection
2625 	 * variable so that the link is not terminated.
2626 	 */
2627 	if (!cp->disconnect)
2628 		conn = NULL;
2629 
2630 done:
2631 	/* If the connection variable is set, then termination of the
2632 	 * link is requested.
2633 	 */
2634 	if (!conn) {
2635 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2636 					&rp, sizeof(rp));
2637 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2638 		goto unlock;
2639 	}
2640 
2641 	cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2642 			       sizeof(*cp));
2643 	if (!cmd) {
2644 		err = -ENOMEM;
2645 		goto unlock;
2646 	}
2647 
2648 	cmd->cmd_complete = addr_cmd_complete;
2649 
2650 	err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2651 	if (err < 0)
2652 		mgmt_pending_remove(cmd);
2653 
2654 unlock:
2655 	hci_dev_unlock(hdev);
2656 	return err;
2657 }
2658 
2659 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2660 		      u16 len)
2661 {
2662 	struct mgmt_cp_disconnect *cp = data;
2663 	struct mgmt_rp_disconnect rp;
2664 	struct mgmt_pending_cmd *cmd;
2665 	struct hci_conn *conn;
2666 	int err;
2667 
2668 	bt_dev_dbg(hdev, "sock %p", sk);
2669 
2670 	memset(&rp, 0, sizeof(rp));
2671 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2672 	rp.addr.type = cp->addr.type;
2673 
2674 	if (!bdaddr_type_is_valid(cp->addr.type))
2675 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2676 					 MGMT_STATUS_INVALID_PARAMS,
2677 					 &rp, sizeof(rp));
2678 
2679 	hci_dev_lock(hdev);
2680 
2681 	if (!test_bit(HCI_UP, &hdev->flags)) {
2682 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2683 					MGMT_STATUS_NOT_POWERED, &rp,
2684 					sizeof(rp));
2685 		goto failed;
2686 	}
2687 
2688 	if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2689 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2690 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
2691 		goto failed;
2692 	}
2693 
2694 	if (cp->addr.type == BDADDR_BREDR)
2695 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2696 					       &cp->addr.bdaddr);
2697 	else
2698 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2699 					       le_addr_type(cp->addr.type));
2700 
2701 	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2702 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2703 					MGMT_STATUS_NOT_CONNECTED, &rp,
2704 					sizeof(rp));
2705 		goto failed;
2706 	}
2707 
2708 	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2709 	if (!cmd) {
2710 		err = -ENOMEM;
2711 		goto failed;
2712 	}
2713 
2714 	cmd->cmd_complete = generic_cmd_complete;
2715 
2716 	err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2717 	if (err < 0)
2718 		mgmt_pending_remove(cmd);
2719 
2720 failed:
2721 	hci_dev_unlock(hdev);
2722 	return err;
2723 }
2724 
2725 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2726 {
2727 	switch (link_type) {
2728 	case LE_LINK:
2729 		switch (addr_type) {
2730 		case ADDR_LE_DEV_PUBLIC:
2731 			return BDADDR_LE_PUBLIC;
2732 
2733 		default:
2734 			/* Fallback to LE Random address type */
2735 			return BDADDR_LE_RANDOM;
2736 		}
2737 
2738 	default:
2739 		/* Fallback to BR/EDR type */
2740 		return BDADDR_BREDR;
2741 	}
2742 }
2743 
2744 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2745 			   u16 data_len)
2746 {
2747 	struct mgmt_rp_get_connections *rp;
2748 	struct hci_conn *c;
2749 	int err;
2750 	u16 i;
2751 
2752 	bt_dev_dbg(hdev, "sock %p", sk);
2753 
2754 	hci_dev_lock(hdev);
2755 
2756 	if (!hdev_is_powered(hdev)) {
2757 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2758 				      MGMT_STATUS_NOT_POWERED);
2759 		goto unlock;
2760 	}
2761 
2762 	i = 0;
2763 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2764 		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2765 			i++;
2766 	}
2767 
2768 	rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2769 	if (!rp) {
2770 		err = -ENOMEM;
2771 		goto unlock;
2772 	}
2773 
2774 	i = 0;
2775 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2776 		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2777 			continue;
2778 		bacpy(&rp->addr[i].bdaddr, &c->dst);
2779 		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2780 		if (c->type == SCO_LINK || c->type == ESCO_LINK)
2781 			continue;
2782 		i++;
2783 	}
2784 
2785 	rp->conn_count = cpu_to_le16(i);
2786 
2787 	/* Recalculate length in case of filtered SCO connections, etc */
2788 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2789 				struct_size(rp, addr, i));
2790 
2791 	kfree(rp);
2792 
2793 unlock:
2794 	hci_dev_unlock(hdev);
2795 	return err;
2796 }
2797 
2798 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2799 				   struct mgmt_cp_pin_code_neg_reply *cp)
2800 {
2801 	struct mgmt_pending_cmd *cmd;
2802 	int err;
2803 
2804 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2805 			       sizeof(*cp));
2806 	if (!cmd)
2807 		return -ENOMEM;
2808 
2809 	cmd->cmd_complete = addr_cmd_complete;
2810 
2811 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2812 			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2813 	if (err < 0)
2814 		mgmt_pending_remove(cmd);
2815 
2816 	return err;
2817 }
2818 
2819 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2820 			  u16 len)
2821 {
2822 	struct hci_conn *conn;
2823 	struct mgmt_cp_pin_code_reply *cp = data;
2824 	struct hci_cp_pin_code_reply reply;
2825 	struct mgmt_pending_cmd *cmd;
2826 	int err;
2827 
2828 	bt_dev_dbg(hdev, "sock %p", sk);
2829 
2830 	hci_dev_lock(hdev);
2831 
2832 	if (!hdev_is_powered(hdev)) {
2833 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2834 				      MGMT_STATUS_NOT_POWERED);
2835 		goto failed;
2836 	}
2837 
2838 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2839 	if (!conn) {
2840 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2841 				      MGMT_STATUS_NOT_CONNECTED);
2842 		goto failed;
2843 	}
2844 
2845 	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2846 		struct mgmt_cp_pin_code_neg_reply ncp;
2847 
2848 		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2849 
2850 		bt_dev_err(hdev, "PIN code is not 16 bytes long");
2851 
2852 		err = send_pin_code_neg_reply(sk, hdev, &ncp);
2853 		if (err >= 0)
2854 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2855 					      MGMT_STATUS_INVALID_PARAMS);
2856 
2857 		goto failed;
2858 	}
2859 
2860 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2861 	if (!cmd) {
2862 		err = -ENOMEM;
2863 		goto failed;
2864 	}
2865 
2866 	cmd->cmd_complete = addr_cmd_complete;
2867 
2868 	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2869 	reply.pin_len = cp->pin_len;
2870 	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2871 
2872 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2873 	if (err < 0)
2874 		mgmt_pending_remove(cmd);
2875 
2876 failed:
2877 	hci_dev_unlock(hdev);
2878 	return err;
2879 }
2880 
2881 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2882 			     u16 len)
2883 {
2884 	struct mgmt_cp_set_io_capability *cp = data;
2885 
2886 	bt_dev_dbg(hdev, "sock %p", sk);
2887 
2888 	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2889 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2890 				       MGMT_STATUS_INVALID_PARAMS);
2891 
2892 	hci_dev_lock(hdev);
2893 
2894 	hdev->io_capability = cp->io_capability;
2895 
2896 	bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
2897 
2898 	hci_dev_unlock(hdev);
2899 
2900 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2901 				 NULL, 0);
2902 }
2903 
2904 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2905 {
2906 	struct hci_dev *hdev = conn->hdev;
2907 	struct mgmt_pending_cmd *cmd;
2908 
2909 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2910 		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2911 			continue;
2912 
2913 		if (cmd->user_data != conn)
2914 			continue;
2915 
2916 		return cmd;
2917 	}
2918 
2919 	return NULL;
2920 }
2921 
2922 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2923 {
2924 	struct mgmt_rp_pair_device rp;
2925 	struct hci_conn *conn = cmd->user_data;
2926 	int err;
2927 
2928 	bacpy(&rp.addr.bdaddr, &conn->dst);
2929 	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2930 
2931 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2932 				status, &rp, sizeof(rp));
2933 
2934 	/* So we don't get further callbacks for this connection */
2935 	conn->connect_cfm_cb = NULL;
2936 	conn->security_cfm_cb = NULL;
2937 	conn->disconn_cfm_cb = NULL;
2938 
2939 	hci_conn_drop(conn);
2940 
2941 	/* The device is paired so there is no need to remove
2942 	 * its connection parameters anymore.
2943 	 */
2944 	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2945 
2946 	hci_conn_put(conn);
2947 
2948 	return err;
2949 }
2950 
2951 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2952 {
2953 	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2954 	struct mgmt_pending_cmd *cmd;
2955 
2956 	cmd = find_pairing(conn);
2957 	if (cmd) {
2958 		cmd->cmd_complete(cmd, status);
2959 		mgmt_pending_remove(cmd);
2960 	}
2961 }
2962 
2963 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2964 {
2965 	struct mgmt_pending_cmd *cmd;
2966 
2967 	BT_DBG("status %u", status);
2968 
2969 	cmd = find_pairing(conn);
2970 	if (!cmd) {
2971 		BT_DBG("Unable to find a pending command");
2972 		return;
2973 	}
2974 
2975 	cmd->cmd_complete(cmd, mgmt_status(status));
2976 	mgmt_pending_remove(cmd);
2977 }
2978 
2979 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2980 {
2981 	struct mgmt_pending_cmd *cmd;
2982 
2983 	BT_DBG("status %u", status);
2984 
2985 	if (!status)
2986 		return;
2987 
2988 	cmd = find_pairing(conn);
2989 	if (!cmd) {
2990 		BT_DBG("Unable to find a pending command");
2991 		return;
2992 	}
2993 
2994 	cmd->cmd_complete(cmd, mgmt_status(status));
2995 	mgmt_pending_remove(cmd);
2996 }
2997 
2998 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2999 		       u16 len)
3000 {
3001 	struct mgmt_cp_pair_device *cp = data;
3002 	struct mgmt_rp_pair_device rp;
3003 	struct mgmt_pending_cmd *cmd;
3004 	u8 sec_level, auth_type;
3005 	struct hci_conn *conn;
3006 	int err;
3007 
3008 	bt_dev_dbg(hdev, "sock %p", sk);
3009 
3010 	memset(&rp, 0, sizeof(rp));
3011 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3012 	rp.addr.type = cp->addr.type;
3013 
3014 	if (!bdaddr_type_is_valid(cp->addr.type))
3015 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3016 					 MGMT_STATUS_INVALID_PARAMS,
3017 					 &rp, sizeof(rp));
3018 
3019 	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3020 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3021 					 MGMT_STATUS_INVALID_PARAMS,
3022 					 &rp, sizeof(rp));
3023 
3024 	hci_dev_lock(hdev);
3025 
3026 	if (!hdev_is_powered(hdev)) {
3027 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3028 					MGMT_STATUS_NOT_POWERED, &rp,
3029 					sizeof(rp));
3030 		goto unlock;
3031 	}
3032 
3033 	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3034 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3035 					MGMT_STATUS_ALREADY_PAIRED, &rp,
3036 					sizeof(rp));
3037 		goto unlock;
3038 	}
3039 
3040 	sec_level = BT_SECURITY_MEDIUM;
3041 	auth_type = HCI_AT_DEDICATED_BONDING;
3042 
3043 	if (cp->addr.type == BDADDR_BREDR) {
3044 		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3045 				       auth_type, CONN_REASON_PAIR_DEVICE);
3046 	} else {
3047 		u8 addr_type = le_addr_type(cp->addr.type);
3048 		struct hci_conn_params *p;
3049 
3050 		/* When pairing a new device, it is expected to remember
3051 		 * this device for future connections. Adding the connection
3052 		 * parameter information ahead of time allows tracking
3053 		 * of the peripheral preferred values and will speed up any
3054 		 * further connection establishment.
3055 		 *
3056 		 * If connection parameters already exist, then they
3057 		 * will be kept and this function does nothing.
3058 		 */
3059 		p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3060 
3061 		if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3062 			p->auto_connect = HCI_AUTO_CONN_DISABLED;
3063 
3064 		conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3065 					   sec_level, HCI_LE_CONN_TIMEOUT,
3066 					   CONN_REASON_PAIR_DEVICE);
3067 	}
3068 
3069 	if (IS_ERR(conn)) {
3070 		int status;
3071 
3072 		if (PTR_ERR(conn) == -EBUSY)
3073 			status = MGMT_STATUS_BUSY;
3074 		else if (PTR_ERR(conn) == -EOPNOTSUPP)
3075 			status = MGMT_STATUS_NOT_SUPPORTED;
3076 		else if (PTR_ERR(conn) == -ECONNREFUSED)
3077 			status = MGMT_STATUS_REJECTED;
3078 		else
3079 			status = MGMT_STATUS_CONNECT_FAILED;
3080 
3081 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3082 					status, &rp, sizeof(rp));
3083 		goto unlock;
3084 	}
3085 
3086 	if (conn->connect_cfm_cb) {
3087 		hci_conn_drop(conn);
3088 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3089 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
3090 		goto unlock;
3091 	}
3092 
3093 	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3094 	if (!cmd) {
3095 		err = -ENOMEM;
3096 		hci_conn_drop(conn);
3097 		goto unlock;
3098 	}
3099 
3100 	cmd->cmd_complete = pairing_complete;
3101 
3102 	/* For LE, just connecting isn't a proof that the pairing finished */
3103 	if (cp->addr.type == BDADDR_BREDR) {
3104 		conn->connect_cfm_cb = pairing_complete_cb;
3105 		conn->security_cfm_cb = pairing_complete_cb;
3106 		conn->disconn_cfm_cb = pairing_complete_cb;
3107 	} else {
3108 		conn->connect_cfm_cb = le_pairing_complete_cb;
3109 		conn->security_cfm_cb = le_pairing_complete_cb;
3110 		conn->disconn_cfm_cb = le_pairing_complete_cb;
3111 	}
3112 
3113 	conn->io_capability = cp->io_cap;
3114 	cmd->user_data = hci_conn_get(conn);
3115 
3116 	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3117 	    hci_conn_security(conn, sec_level, auth_type, true)) {
3118 		cmd->cmd_complete(cmd, 0);
3119 		mgmt_pending_remove(cmd);
3120 	}
3121 
3122 	err = 0;
3123 
3124 unlock:
3125 	hci_dev_unlock(hdev);
3126 	return err;
3127 }
3128 
3129 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3130 			      u16 len)
3131 {
3132 	struct mgmt_addr_info *addr = data;
3133 	struct mgmt_pending_cmd *cmd;
3134 	struct hci_conn *conn;
3135 	int err;
3136 
3137 	bt_dev_dbg(hdev, "sock %p", sk);
3138 
3139 	hci_dev_lock(hdev);
3140 
3141 	if (!hdev_is_powered(hdev)) {
3142 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3143 				      MGMT_STATUS_NOT_POWERED);
3144 		goto unlock;
3145 	}
3146 
3147 	cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3148 	if (!cmd) {
3149 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3150 				      MGMT_STATUS_INVALID_PARAMS);
3151 		goto unlock;
3152 	}
3153 
3154 	conn = cmd->user_data;
3155 
3156 	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3157 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3158 				      MGMT_STATUS_INVALID_PARAMS);
3159 		goto unlock;
3160 	}
3161 
3162 	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3163 	mgmt_pending_remove(cmd);
3164 
3165 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3166 				addr, sizeof(*addr));
3167 
3168 	/* Since user doesn't want to proceed with the connection, abort any
3169 	 * ongoing pairing and then terminate the link if it was created
3170 	 * because of the pair device action.
3171 	 */
3172 	if (addr->type == BDADDR_BREDR)
3173 		hci_remove_link_key(hdev, &addr->bdaddr);
3174 	else
3175 		smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3176 					      le_addr_type(addr->type));
3177 
3178 	if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3179 		hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3180 
3181 unlock:
3182 	hci_dev_unlock(hdev);
3183 	return err;
3184 }
3185 
3186 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3187 			     struct mgmt_addr_info *addr, u16 mgmt_op,
3188 			     u16 hci_op, __le32 passkey)
3189 {
3190 	struct mgmt_pending_cmd *cmd;
3191 	struct hci_conn *conn;
3192 	int err;
3193 
3194 	hci_dev_lock(hdev);
3195 
3196 	if (!hdev_is_powered(hdev)) {
3197 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3198 					MGMT_STATUS_NOT_POWERED, addr,
3199 					sizeof(*addr));
3200 		goto done;
3201 	}
3202 
3203 	if (addr->type == BDADDR_BREDR)
3204 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3205 	else
3206 		conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3207 					       le_addr_type(addr->type));
3208 
3209 	if (!conn) {
3210 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3211 					MGMT_STATUS_NOT_CONNECTED, addr,
3212 					sizeof(*addr));
3213 		goto done;
3214 	}
3215 
3216 	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3217 		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3218 		if (!err)
3219 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3220 						MGMT_STATUS_SUCCESS, addr,
3221 						sizeof(*addr));
3222 		else
3223 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3224 						MGMT_STATUS_FAILED, addr,
3225 						sizeof(*addr));
3226 
3227 		goto done;
3228 	}
3229 
3230 	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3231 	if (!cmd) {
3232 		err = -ENOMEM;
3233 		goto done;
3234 	}
3235 
3236 	cmd->cmd_complete = addr_cmd_complete;
3237 
3238 	/* Continue with pairing via HCI */
3239 	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3240 		struct hci_cp_user_passkey_reply cp;
3241 
3242 		bacpy(&cp.bdaddr, &addr->bdaddr);
3243 		cp.passkey = passkey;
3244 		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3245 	} else
3246 		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3247 				   &addr->bdaddr);
3248 
3249 	if (err < 0)
3250 		mgmt_pending_remove(cmd);
3251 
3252 done:
3253 	hci_dev_unlock(hdev);
3254 	return err;
3255 }
3256 
3257 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3258 			      void *data, u16 len)
3259 {
3260 	struct mgmt_cp_pin_code_neg_reply *cp = data;
3261 
3262 	bt_dev_dbg(hdev, "sock %p", sk);
3263 
3264 	return user_pairing_resp(sk, hdev, &cp->addr,
3265 				MGMT_OP_PIN_CODE_NEG_REPLY,
3266 				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3267 }
3268 
3269 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3270 			      u16 len)
3271 {
3272 	struct mgmt_cp_user_confirm_reply *cp = data;
3273 
3274 	bt_dev_dbg(hdev, "sock %p", sk);
3275 
3276 	if (len != sizeof(*cp))
3277 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3278 				       MGMT_STATUS_INVALID_PARAMS);
3279 
3280 	return user_pairing_resp(sk, hdev, &cp->addr,
3281 				 MGMT_OP_USER_CONFIRM_REPLY,
3282 				 HCI_OP_USER_CONFIRM_REPLY, 0);
3283 }
3284 
3285 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3286 				  void *data, u16 len)
3287 {
3288 	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3289 
3290 	bt_dev_dbg(hdev, "sock %p", sk);
3291 
3292 	return user_pairing_resp(sk, hdev, &cp->addr,
3293 				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3294 				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3295 }
3296 
3297 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3298 			      u16 len)
3299 {
3300 	struct mgmt_cp_user_passkey_reply *cp = data;
3301 
3302 	bt_dev_dbg(hdev, "sock %p", sk);
3303 
3304 	return user_pairing_resp(sk, hdev, &cp->addr,
3305 				 MGMT_OP_USER_PASSKEY_REPLY,
3306 				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3307 }
3308 
3309 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3310 				  void *data, u16 len)
3311 {
3312 	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3313 
3314 	bt_dev_dbg(hdev, "sock %p", sk);
3315 
3316 	return user_pairing_resp(sk, hdev, &cp->addr,
3317 				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3318 				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3319 }
3320 
3321 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3322 {
3323 	struct adv_info *adv_instance;
3324 
3325 	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3326 	if (!adv_instance)
3327 		return 0;
3328 
3329 	/* stop if current instance doesn't need to be changed */
3330 	if (!(adv_instance->flags & flags))
3331 		return 0;
3332 
3333 	cancel_adv_timeout(hdev);
3334 
3335 	adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3336 	if (!adv_instance)
3337 		return 0;
3338 
3339 	hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3340 
3341 	return 0;
3342 }
3343 
3344 static int name_changed_sync(struct hci_dev *hdev, void *data)
3345 {
3346 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3347 }
3348 
3349 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3350 {
3351 	struct mgmt_pending_cmd *cmd = data;
3352 	struct mgmt_cp_set_local_name *cp = cmd->param;
3353 	u8 status = mgmt_status(err);
3354 
3355 	bt_dev_dbg(hdev, "err %d", err);
3356 
3357 	if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3358 		return;
3359 
3360 	if (status) {
3361 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3362 				status);
3363 	} else {
3364 		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3365 				  cp, sizeof(*cp));
3366 
3367 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3368 			hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3369 	}
3370 
3371 	mgmt_pending_remove(cmd);
3372 }
3373 
3374 static int set_name_sync(struct hci_dev *hdev, void *data)
3375 {
3376 	if (lmp_bredr_capable(hdev)) {
3377 		hci_update_name_sync(hdev);
3378 		hci_update_eir_sync(hdev);
3379 	}
3380 
3381 	/* The name is stored in the scan response data and so
3382 	 * no need to update the advertising data here.
3383 	 */
3384 	if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3385 		hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3386 
3387 	return 0;
3388 }
3389 
3390 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3391 			  u16 len)
3392 {
3393 	struct mgmt_cp_set_local_name *cp = data;
3394 	struct mgmt_pending_cmd *cmd;
3395 	int err;
3396 
3397 	bt_dev_dbg(hdev, "sock %p", sk);
3398 
3399 	hci_dev_lock(hdev);
3400 
3401 	/* If the old values are the same as the new ones just return a
3402 	 * direct command complete event.
3403 	 */
3404 	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3405 	    !memcmp(hdev->short_name, cp->short_name,
3406 		    sizeof(hdev->short_name))) {
3407 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3408 					data, len);
3409 		goto failed;
3410 	}
3411 
3412 	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3413 
3414 	if (!hdev_is_powered(hdev)) {
3415 		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3416 
3417 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3418 					data, len);
3419 		if (err < 0)
3420 			goto failed;
3421 
3422 		err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3423 					 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3424 		ext_info_changed(hdev, sk);
3425 
3426 		goto failed;
3427 	}
3428 
3429 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3430 	if (!cmd)
3431 		err = -ENOMEM;
3432 	else
3433 		err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3434 					 set_name_complete);
3435 
3436 	if (err < 0) {
3437 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3438 				      MGMT_STATUS_FAILED);
3439 
3440 		if (cmd)
3441 			mgmt_pending_remove(cmd);
3442 
3443 		goto failed;
3444 	}
3445 
3446 	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3447 
3448 failed:
3449 	hci_dev_unlock(hdev);
3450 	return err;
3451 }
3452 
3453 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3454 {
3455 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3456 }
3457 
3458 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3459 			  u16 len)
3460 {
3461 	struct mgmt_cp_set_appearance *cp = data;
3462 	u16 appearance;
3463 	int err;
3464 
3465 	bt_dev_dbg(hdev, "sock %p", sk);
3466 
3467 	if (!lmp_le_capable(hdev))
3468 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3469 				       MGMT_STATUS_NOT_SUPPORTED);
3470 
3471 	appearance = le16_to_cpu(cp->appearance);
3472 
3473 	hci_dev_lock(hdev);
3474 
3475 	if (hdev->appearance != appearance) {
3476 		hdev->appearance = appearance;
3477 
3478 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3479 			hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3480 					   NULL);
3481 
3482 		ext_info_changed(hdev, sk);
3483 	}
3484 
3485 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3486 				0);
3487 
3488 	hci_dev_unlock(hdev);
3489 
3490 	return err;
3491 }
3492 
3493 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3494 				 void *data, u16 len)
3495 {
3496 	struct mgmt_rp_get_phy_configuration rp;
3497 
3498 	bt_dev_dbg(hdev, "sock %p", sk);
3499 
3500 	hci_dev_lock(hdev);
3501 
3502 	memset(&rp, 0, sizeof(rp));
3503 
3504 	rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3505 	rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3506 	rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3507 
3508 	hci_dev_unlock(hdev);
3509 
3510 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3511 				 &rp, sizeof(rp));
3512 }
3513 
3514 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3515 {
3516 	struct mgmt_ev_phy_configuration_changed ev;
3517 
3518 	memset(&ev, 0, sizeof(ev));
3519 
3520 	ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3521 
3522 	return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3523 			  sizeof(ev), skip);
3524 }
3525 
3526 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3527 {
3528 	struct mgmt_pending_cmd *cmd = data;
3529 	struct sk_buff *skb = cmd->skb;
3530 	u8 status = mgmt_status(err);
3531 
3532 	if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
3533 		return;
3534 
3535 	if (!status) {
3536 		if (!skb)
3537 			status = MGMT_STATUS_FAILED;
3538 		else if (IS_ERR(skb))
3539 			status = mgmt_status(PTR_ERR(skb));
3540 		else
3541 			status = mgmt_status(skb->data[0]);
3542 	}
3543 
3544 	bt_dev_dbg(hdev, "status %d", status);
3545 
3546 	if (status) {
3547 		mgmt_cmd_status(cmd->sk, hdev->id,
3548 				MGMT_OP_SET_PHY_CONFIGURATION, status);
3549 	} else {
3550 		mgmt_cmd_complete(cmd->sk, hdev->id,
3551 				  MGMT_OP_SET_PHY_CONFIGURATION, 0,
3552 				  NULL, 0);
3553 
3554 		mgmt_phy_configuration_changed(hdev, cmd->sk);
3555 	}
3556 
3557 	if (skb && !IS_ERR(skb))
3558 		kfree_skb(skb);
3559 
3560 	mgmt_pending_remove(cmd);
3561 }
3562 
3563 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
3564 {
3565 	struct mgmt_pending_cmd *cmd = data;
3566 	struct mgmt_cp_set_phy_configuration *cp = cmd->param;
3567 	struct hci_cp_le_set_default_phy cp_phy;
3568 	u32 selected_phys = __le32_to_cpu(cp->selected_phys);
3569 
3570 	memset(&cp_phy, 0, sizeof(cp_phy));
3571 
3572 	if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3573 		cp_phy.all_phys |= 0x01;
3574 
3575 	if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3576 		cp_phy.all_phys |= 0x02;
3577 
3578 	if (selected_phys & MGMT_PHY_LE_1M_TX)
3579 		cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3580 
3581 	if (selected_phys & MGMT_PHY_LE_2M_TX)
3582 		cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3583 
3584 	if (selected_phys & MGMT_PHY_LE_CODED_TX)
3585 		cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3586 
3587 	if (selected_phys & MGMT_PHY_LE_1M_RX)
3588 		cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3589 
3590 	if (selected_phys & MGMT_PHY_LE_2M_RX)
3591 		cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3592 
3593 	if (selected_phys & MGMT_PHY_LE_CODED_RX)
3594 		cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3595 
3596 	cmd->skb =  __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
3597 				   sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
3598 
3599 	return 0;
3600 }
3601 
3602 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3603 				 void *data, u16 len)
3604 {
3605 	struct mgmt_cp_set_phy_configuration *cp = data;
3606 	struct mgmt_pending_cmd *cmd;
3607 	u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3608 	u16 pkt_type = (HCI_DH1 | HCI_DM1);
3609 	bool changed = false;
3610 	int err;
3611 
3612 	bt_dev_dbg(hdev, "sock %p", sk);
3613 
3614 	configurable_phys = get_configurable_phys(hdev);
3615 	supported_phys = get_supported_phys(hdev);
3616 	selected_phys = __le32_to_cpu(cp->selected_phys);
3617 
3618 	if (selected_phys & ~supported_phys)
3619 		return mgmt_cmd_status(sk, hdev->id,
3620 				       MGMT_OP_SET_PHY_CONFIGURATION,
3621 				       MGMT_STATUS_INVALID_PARAMS);
3622 
3623 	unconfigure_phys = supported_phys & ~configurable_phys;
3624 
3625 	if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3626 		return mgmt_cmd_status(sk, hdev->id,
3627 				       MGMT_OP_SET_PHY_CONFIGURATION,
3628 				       MGMT_STATUS_INVALID_PARAMS);
3629 
3630 	if (selected_phys == get_selected_phys(hdev))
3631 		return mgmt_cmd_complete(sk, hdev->id,
3632 					 MGMT_OP_SET_PHY_CONFIGURATION,
3633 					 0, NULL, 0);
3634 
3635 	hci_dev_lock(hdev);
3636 
3637 	if (!hdev_is_powered(hdev)) {
3638 		err = mgmt_cmd_status(sk, hdev->id,
3639 				      MGMT_OP_SET_PHY_CONFIGURATION,
3640 				      MGMT_STATUS_REJECTED);
3641 		goto unlock;
3642 	}
3643 
3644 	if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3645 		err = mgmt_cmd_status(sk, hdev->id,
3646 				      MGMT_OP_SET_PHY_CONFIGURATION,
3647 				      MGMT_STATUS_BUSY);
3648 		goto unlock;
3649 	}
3650 
3651 	if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3652 		pkt_type |= (HCI_DH3 | HCI_DM3);
3653 	else
3654 		pkt_type &= ~(HCI_DH3 | HCI_DM3);
3655 
3656 	if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3657 		pkt_type |= (HCI_DH5 | HCI_DM5);
3658 	else
3659 		pkt_type &= ~(HCI_DH5 | HCI_DM5);
3660 
3661 	if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3662 		pkt_type &= ~HCI_2DH1;
3663 	else
3664 		pkt_type |= HCI_2DH1;
3665 
3666 	if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3667 		pkt_type &= ~HCI_2DH3;
3668 	else
3669 		pkt_type |= HCI_2DH3;
3670 
3671 	if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3672 		pkt_type &= ~HCI_2DH5;
3673 	else
3674 		pkt_type |= HCI_2DH5;
3675 
3676 	if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3677 		pkt_type &= ~HCI_3DH1;
3678 	else
3679 		pkt_type |= HCI_3DH1;
3680 
3681 	if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3682 		pkt_type &= ~HCI_3DH3;
3683 	else
3684 		pkt_type |= HCI_3DH3;
3685 
3686 	if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3687 		pkt_type &= ~HCI_3DH5;
3688 	else
3689 		pkt_type |= HCI_3DH5;
3690 
3691 	if (pkt_type != hdev->pkt_type) {
3692 		hdev->pkt_type = pkt_type;
3693 		changed = true;
3694 	}
3695 
3696 	if ((selected_phys & MGMT_PHY_LE_MASK) ==
3697 	    (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3698 		if (changed)
3699 			mgmt_phy_configuration_changed(hdev, sk);
3700 
3701 		err = mgmt_cmd_complete(sk, hdev->id,
3702 					MGMT_OP_SET_PHY_CONFIGURATION,
3703 					0, NULL, 0);
3704 
3705 		goto unlock;
3706 	}
3707 
3708 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3709 			       len);
3710 	if (!cmd)
3711 		err = -ENOMEM;
3712 	else
3713 		err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
3714 					 set_default_phy_complete);
3715 
3716 	if (err < 0) {
3717 		err = mgmt_cmd_status(sk, hdev->id,
3718 				      MGMT_OP_SET_PHY_CONFIGURATION,
3719 				      MGMT_STATUS_FAILED);
3720 
3721 		if (cmd)
3722 			mgmt_pending_remove(cmd);
3723 	}
3724 
3725 unlock:
3726 	hci_dev_unlock(hdev);
3727 
3728 	return err;
3729 }
3730 
3731 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
3732 			    u16 len)
3733 {
3734 	int err = MGMT_STATUS_SUCCESS;
3735 	struct mgmt_cp_set_blocked_keys *keys = data;
3736 	const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
3737 				   sizeof(struct mgmt_blocked_key_info));
3738 	u16 key_count, expected_len;
3739 	int i;
3740 
3741 	bt_dev_dbg(hdev, "sock %p", sk);
3742 
3743 	key_count = __le16_to_cpu(keys->key_count);
3744 	if (key_count > max_key_count) {
3745 		bt_dev_err(hdev, "too big key_count value %u", key_count);
3746 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3747 				       MGMT_STATUS_INVALID_PARAMS);
3748 	}
3749 
3750 	expected_len = struct_size(keys, keys, key_count);
3751 	if (expected_len != len) {
3752 		bt_dev_err(hdev, "expected %u bytes, got %u bytes",
3753 			   expected_len, len);
3754 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3755 				       MGMT_STATUS_INVALID_PARAMS);
3756 	}
3757 
3758 	hci_dev_lock(hdev);
3759 
3760 	hci_blocked_keys_clear(hdev);
3761 
3762 	for (i = 0; i < keys->key_count; ++i) {
3763 		struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
3764 
3765 		if (!b) {
3766 			err = MGMT_STATUS_NO_RESOURCES;
3767 			break;
3768 		}
3769 
3770 		b->type = keys->keys[i].type;
3771 		memcpy(b->val, keys->keys[i].val, sizeof(b->val));
3772 		list_add_rcu(&b->list, &hdev->blocked_keys);
3773 	}
3774 	hci_dev_unlock(hdev);
3775 
3776 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3777 				err, NULL, 0);
3778 }
3779 
3780 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
3781 			       void *data, u16 len)
3782 {
3783 	struct mgmt_mode *cp = data;
3784 	int err;
3785 	bool changed = false;
3786 
3787 	bt_dev_dbg(hdev, "sock %p", sk);
3788 
3789 	if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
3790 		return mgmt_cmd_status(sk, hdev->id,
3791 				       MGMT_OP_SET_WIDEBAND_SPEECH,
3792 				       MGMT_STATUS_NOT_SUPPORTED);
3793 
3794 	if (cp->val != 0x00 && cp->val != 0x01)
3795 		return mgmt_cmd_status(sk, hdev->id,
3796 				       MGMT_OP_SET_WIDEBAND_SPEECH,
3797 				       MGMT_STATUS_INVALID_PARAMS);
3798 
3799 	hci_dev_lock(hdev);
3800 
3801 	if (hdev_is_powered(hdev) &&
3802 	    !!cp->val != hci_dev_test_flag(hdev,
3803 					   HCI_WIDEBAND_SPEECH_ENABLED)) {
3804 		err = mgmt_cmd_status(sk, hdev->id,
3805 				      MGMT_OP_SET_WIDEBAND_SPEECH,
3806 				      MGMT_STATUS_REJECTED);
3807 		goto unlock;
3808 	}
3809 
3810 	if (cp->val)
3811 		changed = !hci_dev_test_and_set_flag(hdev,
3812 						   HCI_WIDEBAND_SPEECH_ENABLED);
3813 	else
3814 		changed = hci_dev_test_and_clear_flag(hdev,
3815 						   HCI_WIDEBAND_SPEECH_ENABLED);
3816 
3817 	err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
3818 	if (err < 0)
3819 		goto unlock;
3820 
3821 	if (changed)
3822 		err = new_settings(hdev, sk);
3823 
3824 unlock:
3825 	hci_dev_unlock(hdev);
3826 	return err;
3827 }
3828 
3829 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
3830 			       void *data, u16 data_len)
3831 {
3832 	char buf[20];
3833 	struct mgmt_rp_read_controller_cap *rp = (void *)buf;
3834 	u16 cap_len = 0;
3835 	u8 flags = 0;
3836 	u8 tx_power_range[2];
3837 
3838 	bt_dev_dbg(hdev, "sock %p", sk);
3839 
3840 	memset(&buf, 0, sizeof(buf));
3841 
3842 	hci_dev_lock(hdev);
3843 
3844 	/* When the Read Simple Pairing Options command is supported, then
3845 	 * the remote public key validation is supported.
3846 	 *
3847 	 * Alternatively, when Microsoft extensions are available, they can
3848 	 * indicate support for public key validation as well.
3849 	 */
3850 	if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
3851 		flags |= 0x01;	/* Remote public key validation (BR/EDR) */
3852 
3853 	flags |= 0x02;		/* Remote public key validation (LE) */
3854 
3855 	/* When the Read Encryption Key Size command is supported, then the
3856 	 * encryption key size is enforced.
3857 	 */
3858 	if (hdev->commands[20] & 0x10)
3859 		flags |= 0x04;	/* Encryption key size enforcement (BR/EDR) */
3860 
3861 	flags |= 0x08;		/* Encryption key size enforcement (LE) */
3862 
3863 	cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
3864 				  &flags, 1);
3865 
3866 	/* When the Read Simple Pairing Options command is supported, then
3867 	 * also max encryption key size information is provided.
3868 	 */
3869 	if (hdev->commands[41] & 0x08)
3870 		cap_len = eir_append_le16(rp->cap, cap_len,
3871 					  MGMT_CAP_MAX_ENC_KEY_SIZE,
3872 					  hdev->max_enc_key_size);
3873 
3874 	cap_len = eir_append_le16(rp->cap, cap_len,
3875 				  MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
3876 				  SMP_MAX_ENC_KEY_SIZE);
3877 
3878 	/* Append the min/max LE tx power parameters if we were able to fetch
3879 	 * it from the controller
3880 	 */
3881 	if (hdev->commands[38] & 0x80) {
3882 		memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
3883 		memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
3884 		cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
3885 					  tx_power_range, 2);
3886 	}
3887 
3888 	rp->cap_len = cpu_to_le16(cap_len);
3889 
3890 	hci_dev_unlock(hdev);
3891 
3892 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
3893 				 rp, sizeof(*rp) + cap_len);
3894 }
3895 
3896 #ifdef CONFIG_BT_FEATURE_DEBUG
3897 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
3898 static const u8 debug_uuid[16] = {
3899 	0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
3900 	0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
3901 };
3902 #endif
3903 
3904 /* 330859bc-7506-492d-9370-9a6f0614037f */
3905 static const u8 quality_report_uuid[16] = {
3906 	0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
3907 	0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
3908 };
3909 
3910 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
3911 static const u8 offload_codecs_uuid[16] = {
3912 	0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
3913 	0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
3914 };
3915 
3916 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
3917 static const u8 le_simultaneous_roles_uuid[16] = {
3918 	0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
3919 	0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
3920 };
3921 
3922 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
3923 static const u8 rpa_resolution_uuid[16] = {
3924 	0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
3925 	0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
3926 };
3927 
3928 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
3929 				  void *data, u16 data_len)
3930 {
3931 	char buf[102];   /* Enough space for 5 features: 2 + 20 * 5 */
3932 	struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
3933 	u16 idx = 0;
3934 	u32 flags;
3935 
3936 	bt_dev_dbg(hdev, "sock %p", sk);
3937 
3938 	memset(&buf, 0, sizeof(buf));
3939 
3940 #ifdef CONFIG_BT_FEATURE_DEBUG
3941 	if (!hdev) {
3942 		flags = bt_dbg_get() ? BIT(0) : 0;
3943 
3944 		memcpy(rp->features[idx].uuid, debug_uuid, 16);
3945 		rp->features[idx].flags = cpu_to_le32(flags);
3946 		idx++;
3947 	}
3948 #endif
3949 
3950 	if (hdev && hci_dev_le_state_simultaneous(hdev)) {
3951 		if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
3952 			flags = BIT(0);
3953 		else
3954 			flags = 0;
3955 
3956 		memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
3957 		rp->features[idx].flags = cpu_to_le32(flags);
3958 		idx++;
3959 	}
3960 
3961 	if (hdev && ll_privacy_capable(hdev)) {
3962 		if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
3963 			flags = BIT(0) | BIT(1);
3964 		else
3965 			flags = BIT(1);
3966 
3967 		memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
3968 		rp->features[idx].flags = cpu_to_le32(flags);
3969 		idx++;
3970 	}
3971 
3972 	if (hdev && (aosp_has_quality_report(hdev) ||
3973 		     hdev->set_quality_report)) {
3974 		if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
3975 			flags = BIT(0);
3976 		else
3977 			flags = 0;
3978 
3979 		memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
3980 		rp->features[idx].flags = cpu_to_le32(flags);
3981 		idx++;
3982 	}
3983 
3984 	if (hdev && hdev->get_data_path_id) {
3985 		if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
3986 			flags = BIT(0);
3987 		else
3988 			flags = 0;
3989 
3990 		memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
3991 		rp->features[idx].flags = cpu_to_le32(flags);
3992 		idx++;
3993 	}
3994 
3995 	rp->feature_count = cpu_to_le16(idx);
3996 
3997 	/* After reading the experimental features information, enable
3998 	 * the events to update client on any future change.
3999 	 */
4000 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4001 
4002 	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4003 				 MGMT_OP_READ_EXP_FEATURES_INFO,
4004 				 0, rp, sizeof(*rp) + (20 * idx));
4005 }
4006 
4007 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4008 					  struct sock *skip)
4009 {
4010 	struct mgmt_ev_exp_feature_changed ev;
4011 
4012 	memset(&ev, 0, sizeof(ev));
4013 	memcpy(ev.uuid, rpa_resolution_uuid, 16);
4014 	ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4015 
4016 	if (enabled && privacy_mode_capable(hdev))
4017 		set_bit(HCI_CONN_FLAG_DEVICE_PRIVACY, hdev->conn_flags);
4018 	else
4019 		clear_bit(HCI_CONN_FLAG_DEVICE_PRIVACY, hdev->conn_flags);
4020 
4021 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4022 				  &ev, sizeof(ev),
4023 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4024 
4025 }
4026 
4027 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4028 			       bool enabled, struct sock *skip)
4029 {
4030 	struct mgmt_ev_exp_feature_changed ev;
4031 
4032 	memset(&ev, 0, sizeof(ev));
4033 	memcpy(ev.uuid, uuid, 16);
4034 	ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4035 
4036 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4037 				  &ev, sizeof(ev),
4038 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4039 }
4040 
4041 #define EXP_FEAT(_uuid, _set_func)	\
4042 {					\
4043 	.uuid = _uuid,			\
4044 	.set_func = _set_func,		\
4045 }
4046 
4047 /* The zero key uuid is special. Multiple exp features are set through it. */
4048 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4049 			     struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4050 {
4051 	struct mgmt_rp_set_exp_feature rp;
4052 
4053 	memset(rp.uuid, 0, 16);
4054 	rp.flags = cpu_to_le32(0);
4055 
4056 #ifdef CONFIG_BT_FEATURE_DEBUG
4057 	if (!hdev) {
4058 		bool changed = bt_dbg_get();
4059 
4060 		bt_dbg_set(false);
4061 
4062 		if (changed)
4063 			exp_feature_changed(NULL, ZERO_KEY, false, sk);
4064 	}
4065 #endif
4066 
4067 	if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4068 		bool changed;
4069 
4070 		changed = hci_dev_test_and_clear_flag(hdev,
4071 						      HCI_ENABLE_LL_PRIVACY);
4072 		if (changed)
4073 			exp_feature_changed(hdev, rpa_resolution_uuid, false,
4074 					    sk);
4075 	}
4076 
4077 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4078 
4079 	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4080 				 MGMT_OP_SET_EXP_FEATURE, 0,
4081 				 &rp, sizeof(rp));
4082 }
4083 
4084 #ifdef CONFIG_BT_FEATURE_DEBUG
4085 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4086 			  struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4087 {
4088 	struct mgmt_rp_set_exp_feature rp;
4089 
4090 	bool val, changed;
4091 	int err;
4092 
4093 	/* Command requires to use the non-controller index */
4094 	if (hdev)
4095 		return mgmt_cmd_status(sk, hdev->id,
4096 				       MGMT_OP_SET_EXP_FEATURE,
4097 				       MGMT_STATUS_INVALID_INDEX);
4098 
4099 	/* Parameters are limited to a single octet */
4100 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4101 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4102 				       MGMT_OP_SET_EXP_FEATURE,
4103 				       MGMT_STATUS_INVALID_PARAMS);
4104 
4105 	/* Only boolean on/off is supported */
4106 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4107 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4108 				       MGMT_OP_SET_EXP_FEATURE,
4109 				       MGMT_STATUS_INVALID_PARAMS);
4110 
4111 	val = !!cp->param[0];
4112 	changed = val ? !bt_dbg_get() : bt_dbg_get();
4113 	bt_dbg_set(val);
4114 
4115 	memcpy(rp.uuid, debug_uuid, 16);
4116 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4117 
4118 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4119 
4120 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4121 				MGMT_OP_SET_EXP_FEATURE, 0,
4122 				&rp, sizeof(rp));
4123 
4124 	if (changed)
4125 		exp_feature_changed(hdev, debug_uuid, val, sk);
4126 
4127 	return err;
4128 }
4129 #endif
4130 
4131 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4132 				   struct mgmt_cp_set_exp_feature *cp,
4133 				   u16 data_len)
4134 {
4135 	struct mgmt_rp_set_exp_feature rp;
4136 	bool val, changed;
4137 	int err;
4138 	u32 flags;
4139 
4140 	/* Command requires to use the controller index */
4141 	if (!hdev)
4142 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4143 				       MGMT_OP_SET_EXP_FEATURE,
4144 				       MGMT_STATUS_INVALID_INDEX);
4145 
4146 	/* Changes can only be made when controller is powered down */
4147 	if (hdev_is_powered(hdev))
4148 		return mgmt_cmd_status(sk, hdev->id,
4149 				       MGMT_OP_SET_EXP_FEATURE,
4150 				       MGMT_STATUS_REJECTED);
4151 
4152 	/* Parameters are limited to a single octet */
4153 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4154 		return mgmt_cmd_status(sk, hdev->id,
4155 				       MGMT_OP_SET_EXP_FEATURE,
4156 				       MGMT_STATUS_INVALID_PARAMS);
4157 
4158 	/* Only boolean on/off is supported */
4159 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4160 		return mgmt_cmd_status(sk, hdev->id,
4161 				       MGMT_OP_SET_EXP_FEATURE,
4162 				       MGMT_STATUS_INVALID_PARAMS);
4163 
4164 	val = !!cp->param[0];
4165 
4166 	if (val) {
4167 		changed = !hci_dev_test_and_set_flag(hdev,
4168 						     HCI_ENABLE_LL_PRIVACY);
4169 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4170 
4171 		/* Enable LL privacy + supported settings changed */
4172 		flags = BIT(0) | BIT(1);
4173 	} else {
4174 		changed = hci_dev_test_and_clear_flag(hdev,
4175 						      HCI_ENABLE_LL_PRIVACY);
4176 
4177 		/* Disable LL privacy + supported settings changed */
4178 		flags = BIT(1);
4179 	}
4180 
4181 	memcpy(rp.uuid, rpa_resolution_uuid, 16);
4182 	rp.flags = cpu_to_le32(flags);
4183 
4184 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4185 
4186 	err = mgmt_cmd_complete(sk, hdev->id,
4187 				MGMT_OP_SET_EXP_FEATURE, 0,
4188 				&rp, sizeof(rp));
4189 
4190 	if (changed)
4191 		exp_ll_privacy_feature_changed(val, hdev, sk);
4192 
4193 	return err;
4194 }
4195 
4196 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4197 				   struct mgmt_cp_set_exp_feature *cp,
4198 				   u16 data_len)
4199 {
4200 	struct mgmt_rp_set_exp_feature rp;
4201 	bool val, changed;
4202 	int err;
4203 
4204 	/* Command requires to use a valid controller index */
4205 	if (!hdev)
4206 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4207 				       MGMT_OP_SET_EXP_FEATURE,
4208 				       MGMT_STATUS_INVALID_INDEX);
4209 
4210 	/* Parameters are limited to a single octet */
4211 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4212 		return mgmt_cmd_status(sk, hdev->id,
4213 				       MGMT_OP_SET_EXP_FEATURE,
4214 				       MGMT_STATUS_INVALID_PARAMS);
4215 
4216 	/* Only boolean on/off is supported */
4217 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4218 		return mgmt_cmd_status(sk, hdev->id,
4219 				       MGMT_OP_SET_EXP_FEATURE,
4220 				       MGMT_STATUS_INVALID_PARAMS);
4221 
4222 	hci_req_sync_lock(hdev);
4223 
4224 	val = !!cp->param[0];
4225 	changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4226 
4227 	if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4228 		err = mgmt_cmd_status(sk, hdev->id,
4229 				      MGMT_OP_SET_EXP_FEATURE,
4230 				      MGMT_STATUS_NOT_SUPPORTED);
4231 		goto unlock_quality_report;
4232 	}
4233 
4234 	if (changed) {
4235 		if (hdev->set_quality_report)
4236 			err = hdev->set_quality_report(hdev, val);
4237 		else
4238 			err = aosp_set_quality_report(hdev, val);
4239 
4240 		if (err) {
4241 			err = mgmt_cmd_status(sk, hdev->id,
4242 					      MGMT_OP_SET_EXP_FEATURE,
4243 					      MGMT_STATUS_FAILED);
4244 			goto unlock_quality_report;
4245 		}
4246 
4247 		if (val)
4248 			hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4249 		else
4250 			hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4251 	}
4252 
4253 	bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4254 
4255 	memcpy(rp.uuid, quality_report_uuid, 16);
4256 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4257 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4258 
4259 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4260 				&rp, sizeof(rp));
4261 
4262 	if (changed)
4263 		exp_feature_changed(hdev, quality_report_uuid, val, sk);
4264 
4265 unlock_quality_report:
4266 	hci_req_sync_unlock(hdev);
4267 	return err;
4268 }
4269 
4270 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4271 				  struct mgmt_cp_set_exp_feature *cp,
4272 				  u16 data_len)
4273 {
4274 	bool val, changed;
4275 	int err;
4276 	struct mgmt_rp_set_exp_feature rp;
4277 
4278 	/* Command requires to use a valid controller index */
4279 	if (!hdev)
4280 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4281 				       MGMT_OP_SET_EXP_FEATURE,
4282 				       MGMT_STATUS_INVALID_INDEX);
4283 
4284 	/* Parameters are limited to a single octet */
4285 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4286 		return mgmt_cmd_status(sk, hdev->id,
4287 				       MGMT_OP_SET_EXP_FEATURE,
4288 				       MGMT_STATUS_INVALID_PARAMS);
4289 
4290 	/* Only boolean on/off is supported */
4291 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4292 		return mgmt_cmd_status(sk, hdev->id,
4293 				       MGMT_OP_SET_EXP_FEATURE,
4294 				       MGMT_STATUS_INVALID_PARAMS);
4295 
4296 	val = !!cp->param[0];
4297 	changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4298 
4299 	if (!hdev->get_data_path_id) {
4300 		return mgmt_cmd_status(sk, hdev->id,
4301 				       MGMT_OP_SET_EXP_FEATURE,
4302 				       MGMT_STATUS_NOT_SUPPORTED);
4303 	}
4304 
4305 	if (changed) {
4306 		if (val)
4307 			hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4308 		else
4309 			hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4310 	}
4311 
4312 	bt_dev_info(hdev, "offload codecs enable %d changed %d",
4313 		    val, changed);
4314 
4315 	memcpy(rp.uuid, offload_codecs_uuid, 16);
4316 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4317 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4318 	err = mgmt_cmd_complete(sk, hdev->id,
4319 				MGMT_OP_SET_EXP_FEATURE, 0,
4320 				&rp, sizeof(rp));
4321 
4322 	if (changed)
4323 		exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4324 
4325 	return err;
4326 }
4327 
4328 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4329 					  struct mgmt_cp_set_exp_feature *cp,
4330 					  u16 data_len)
4331 {
4332 	bool val, changed;
4333 	int err;
4334 	struct mgmt_rp_set_exp_feature rp;
4335 
4336 	/* Command requires to use a valid controller index */
4337 	if (!hdev)
4338 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4339 				       MGMT_OP_SET_EXP_FEATURE,
4340 				       MGMT_STATUS_INVALID_INDEX);
4341 
4342 	/* Parameters are limited to a single octet */
4343 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4344 		return mgmt_cmd_status(sk, hdev->id,
4345 				       MGMT_OP_SET_EXP_FEATURE,
4346 				       MGMT_STATUS_INVALID_PARAMS);
4347 
4348 	/* Only boolean on/off is supported */
4349 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4350 		return mgmt_cmd_status(sk, hdev->id,
4351 				       MGMT_OP_SET_EXP_FEATURE,
4352 				       MGMT_STATUS_INVALID_PARAMS);
4353 
4354 	val = !!cp->param[0];
4355 	changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4356 
4357 	if (!hci_dev_le_state_simultaneous(hdev)) {
4358 		return mgmt_cmd_status(sk, hdev->id,
4359 				       MGMT_OP_SET_EXP_FEATURE,
4360 				       MGMT_STATUS_NOT_SUPPORTED);
4361 	}
4362 
4363 	if (changed) {
4364 		if (val)
4365 			hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4366 		else
4367 			hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4368 	}
4369 
4370 	bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4371 		    val, changed);
4372 
4373 	memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4374 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4375 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4376 	err = mgmt_cmd_complete(sk, hdev->id,
4377 				MGMT_OP_SET_EXP_FEATURE, 0,
4378 				&rp, sizeof(rp));
4379 
4380 	if (changed)
4381 		exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4382 
4383 	return err;
4384 }
4385 
4386 static const struct mgmt_exp_feature {
4387 	const u8 *uuid;
4388 	int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4389 			struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4390 } exp_features[] = {
4391 	EXP_FEAT(ZERO_KEY, set_zero_key_func),
4392 #ifdef CONFIG_BT_FEATURE_DEBUG
4393 	EXP_FEAT(debug_uuid, set_debug_func),
4394 #endif
4395 	EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4396 	EXP_FEAT(quality_report_uuid, set_quality_report_func),
4397 	EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4398 	EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
4399 
4400 	/* end with a null feature */
4401 	EXP_FEAT(NULL, NULL)
4402 };
4403 
4404 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4405 			   void *data, u16 data_len)
4406 {
4407 	struct mgmt_cp_set_exp_feature *cp = data;
4408 	size_t i = 0;
4409 
4410 	bt_dev_dbg(hdev, "sock %p", sk);
4411 
4412 	for (i = 0; exp_features[i].uuid; i++) {
4413 		if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4414 			return exp_features[i].set_func(sk, hdev, cp, data_len);
4415 	}
4416 
4417 	return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4418 			       MGMT_OP_SET_EXP_FEATURE,
4419 			       MGMT_STATUS_NOT_SUPPORTED);
4420 }
4421 
4422 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4423 			    u16 data_len)
4424 {
4425 	struct mgmt_cp_get_device_flags *cp = data;
4426 	struct mgmt_rp_get_device_flags rp;
4427 	struct bdaddr_list_with_flags *br_params;
4428 	struct hci_conn_params *params;
4429 	u32 supported_flags;
4430 	u32 current_flags = 0;
4431 	u8 status = MGMT_STATUS_INVALID_PARAMS;
4432 
4433 	bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4434 		   &cp->addr.bdaddr, cp->addr.type);
4435 
4436 	hci_dev_lock(hdev);
4437 
4438 	bitmap_to_arr32(&supported_flags, hdev->conn_flags,
4439 			__HCI_CONN_NUM_FLAGS);
4440 
4441 	memset(&rp, 0, sizeof(rp));
4442 
4443 	if (cp->addr.type == BDADDR_BREDR) {
4444 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4445 							      &cp->addr.bdaddr,
4446 							      cp->addr.type);
4447 		if (!br_params)
4448 			goto done;
4449 
4450 		bitmap_to_arr32(&current_flags, br_params->flags,
4451 				__HCI_CONN_NUM_FLAGS);
4452 	} else {
4453 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4454 						le_addr_type(cp->addr.type));
4455 
4456 		if (!params)
4457 			goto done;
4458 
4459 		bitmap_to_arr32(&current_flags, params->flags,
4460 				__HCI_CONN_NUM_FLAGS);
4461 	}
4462 
4463 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4464 	rp.addr.type = cp->addr.type;
4465 	rp.supported_flags = cpu_to_le32(supported_flags);
4466 	rp.current_flags = cpu_to_le32(current_flags);
4467 
4468 	status = MGMT_STATUS_SUCCESS;
4469 
4470 done:
4471 	hci_dev_unlock(hdev);
4472 
4473 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
4474 				&rp, sizeof(rp));
4475 }
4476 
4477 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
4478 				 bdaddr_t *bdaddr, u8 bdaddr_type,
4479 				 u32 supported_flags, u32 current_flags)
4480 {
4481 	struct mgmt_ev_device_flags_changed ev;
4482 
4483 	bacpy(&ev.addr.bdaddr, bdaddr);
4484 	ev.addr.type = bdaddr_type;
4485 	ev.supported_flags = cpu_to_le32(supported_flags);
4486 	ev.current_flags = cpu_to_le32(current_flags);
4487 
4488 	mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
4489 }
4490 
4491 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4492 			    u16 len)
4493 {
4494 	struct mgmt_cp_set_device_flags *cp = data;
4495 	struct bdaddr_list_with_flags *br_params;
4496 	struct hci_conn_params *params;
4497 	u8 status = MGMT_STATUS_INVALID_PARAMS;
4498 	u32 supported_flags;
4499 	u32 current_flags = __le32_to_cpu(cp->current_flags);
4500 
4501 	bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
4502 		   &cp->addr.bdaddr, cp->addr.type,
4503 		   __le32_to_cpu(current_flags));
4504 
4505 	bitmap_to_arr32(&supported_flags, hdev->conn_flags,
4506 			__HCI_CONN_NUM_FLAGS);
4507 
4508 	if ((supported_flags | current_flags) != supported_flags) {
4509 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
4510 			    current_flags, supported_flags);
4511 		goto done;
4512 	}
4513 
4514 	hci_dev_lock(hdev);
4515 
4516 	if (cp->addr.type == BDADDR_BREDR) {
4517 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4518 							      &cp->addr.bdaddr,
4519 							      cp->addr.type);
4520 
4521 		if (br_params) {
4522 			bitmap_from_u64(br_params->flags, current_flags);
4523 			status = MGMT_STATUS_SUCCESS;
4524 		} else {
4525 			bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
4526 				    &cp->addr.bdaddr, cp->addr.type);
4527 		}
4528 	} else {
4529 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4530 						le_addr_type(cp->addr.type));
4531 		if (params) {
4532 			DECLARE_BITMAP(flags, __HCI_CONN_NUM_FLAGS);
4533 
4534 			bitmap_from_u64(flags, current_flags);
4535 
4536 			/* Devices using RPAs can only be programmed in the
4537 			 * acceptlist LL Privacy has been enable otherwise they
4538 			 * cannot mark HCI_CONN_FLAG_REMOTE_WAKEUP.
4539 			 */
4540 			if (test_bit(HCI_CONN_FLAG_REMOTE_WAKEUP, flags) &&
4541 			    !use_ll_privacy(hdev) &&
4542 			    hci_find_irk_by_addr(hdev, &params->addr,
4543 						 params->addr_type)) {
4544 				bt_dev_warn(hdev,
4545 					    "Cannot set wakeable for RPA");
4546 				goto unlock;
4547 			}
4548 
4549 			bitmap_from_u64(params->flags, current_flags);
4550 			status = MGMT_STATUS_SUCCESS;
4551 
4552 			/* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
4553 			 * has been set.
4554 			 */
4555 			if (test_bit(HCI_CONN_FLAG_DEVICE_PRIVACY,
4556 				     params->flags))
4557 				hci_update_passive_scan(hdev);
4558 		} else {
4559 			bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
4560 				    &cp->addr.bdaddr,
4561 				    le_addr_type(cp->addr.type));
4562 		}
4563 	}
4564 
4565 unlock:
4566 	hci_dev_unlock(hdev);
4567 
4568 done:
4569 	if (status == MGMT_STATUS_SUCCESS)
4570 		device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
4571 				     supported_flags, current_flags);
4572 
4573 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
4574 				 &cp->addr, sizeof(cp->addr));
4575 }
4576 
4577 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
4578 				   u16 handle)
4579 {
4580 	struct mgmt_ev_adv_monitor_added ev;
4581 
4582 	ev.monitor_handle = cpu_to_le16(handle);
4583 
4584 	mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
4585 }
4586 
4587 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
4588 {
4589 	struct mgmt_ev_adv_monitor_removed ev;
4590 	struct mgmt_pending_cmd *cmd;
4591 	struct sock *sk_skip = NULL;
4592 	struct mgmt_cp_remove_adv_monitor *cp;
4593 
4594 	cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4595 	if (cmd) {
4596 		cp = cmd->param;
4597 
4598 		if (cp->monitor_handle)
4599 			sk_skip = cmd->sk;
4600 	}
4601 
4602 	ev.monitor_handle = cpu_to_le16(handle);
4603 
4604 	mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
4605 }
4606 
4607 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
4608 				 void *data, u16 len)
4609 {
4610 	struct adv_monitor *monitor = NULL;
4611 	struct mgmt_rp_read_adv_monitor_features *rp = NULL;
4612 	int handle, err;
4613 	size_t rp_size = 0;
4614 	__u32 supported = 0;
4615 	__u32 enabled = 0;
4616 	__u16 num_handles = 0;
4617 	__u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
4618 
4619 	BT_DBG("request for %s", hdev->name);
4620 
4621 	hci_dev_lock(hdev);
4622 
4623 	if (msft_monitor_supported(hdev))
4624 		supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
4625 
4626 	idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
4627 		handles[num_handles++] = monitor->handle;
4628 
4629 	hci_dev_unlock(hdev);
4630 
4631 	rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
4632 	rp = kmalloc(rp_size, GFP_KERNEL);
4633 	if (!rp)
4634 		return -ENOMEM;
4635 
4636 	/* All supported features are currently enabled */
4637 	enabled = supported;
4638 
4639 	rp->supported_features = cpu_to_le32(supported);
4640 	rp->enabled_features = cpu_to_le32(enabled);
4641 	rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
4642 	rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
4643 	rp->num_handles = cpu_to_le16(num_handles);
4644 	if (num_handles)
4645 		memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
4646 
4647 	err = mgmt_cmd_complete(sk, hdev->id,
4648 				MGMT_OP_READ_ADV_MONITOR_FEATURES,
4649 				MGMT_STATUS_SUCCESS, rp, rp_size);
4650 
4651 	kfree(rp);
4652 
4653 	return err;
4654 }
4655 
4656 int mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
4657 {
4658 	struct mgmt_rp_add_adv_patterns_monitor rp;
4659 	struct mgmt_pending_cmd *cmd;
4660 	struct adv_monitor *monitor;
4661 	int err = 0;
4662 
4663 	hci_dev_lock(hdev);
4664 
4665 	cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev);
4666 	if (!cmd) {
4667 		cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev);
4668 		if (!cmd)
4669 			goto done;
4670 	}
4671 
4672 	monitor = cmd->user_data;
4673 	rp.monitor_handle = cpu_to_le16(monitor->handle);
4674 
4675 	if (!status) {
4676 		mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
4677 		hdev->adv_monitors_cnt++;
4678 		if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
4679 			monitor->state = ADV_MONITOR_STATE_REGISTERED;
4680 		hci_update_passive_scan(hdev);
4681 	}
4682 
4683 	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4684 				mgmt_status(status), &rp, sizeof(rp));
4685 	mgmt_pending_remove(cmd);
4686 
4687 done:
4688 	hci_dev_unlock(hdev);
4689 	bt_dev_dbg(hdev, "add monitor %d complete, status %u",
4690 		   rp.monitor_handle, status);
4691 
4692 	return err;
4693 }
4694 
4695 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4696 				      struct adv_monitor *m, u8 status,
4697 				      void *data, u16 len, u16 op)
4698 {
4699 	struct mgmt_rp_add_adv_patterns_monitor rp;
4700 	struct mgmt_pending_cmd *cmd;
4701 	int err;
4702 	bool pending;
4703 
4704 	hci_dev_lock(hdev);
4705 
4706 	if (status)
4707 		goto unlock;
4708 
4709 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
4710 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4711 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
4712 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
4713 		status = MGMT_STATUS_BUSY;
4714 		goto unlock;
4715 	}
4716 
4717 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
4718 	if (!cmd) {
4719 		status = MGMT_STATUS_NO_RESOURCES;
4720 		goto unlock;
4721 	}
4722 
4723 	cmd->user_data = m;
4724 	pending = hci_add_adv_monitor(hdev, m, &err);
4725 	if (err) {
4726 		if (err == -ENOSPC || err == -ENOMEM)
4727 			status = MGMT_STATUS_NO_RESOURCES;
4728 		else if (err == -EINVAL)
4729 			status = MGMT_STATUS_INVALID_PARAMS;
4730 		else
4731 			status = MGMT_STATUS_FAILED;
4732 
4733 		mgmt_pending_remove(cmd);
4734 		goto unlock;
4735 	}
4736 
4737 	if (!pending) {
4738 		mgmt_pending_remove(cmd);
4739 		rp.monitor_handle = cpu_to_le16(m->handle);
4740 		mgmt_adv_monitor_added(sk, hdev, m->handle);
4741 		m->state = ADV_MONITOR_STATE_REGISTERED;
4742 		hdev->adv_monitors_cnt++;
4743 
4744 		hci_dev_unlock(hdev);
4745 		return mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_SUCCESS,
4746 					 &rp, sizeof(rp));
4747 	}
4748 
4749 	hci_dev_unlock(hdev);
4750 
4751 	return 0;
4752 
4753 unlock:
4754 	hci_free_adv_monitor(hdev, m);
4755 	hci_dev_unlock(hdev);
4756 	return mgmt_cmd_status(sk, hdev->id, op, status);
4757 }
4758 
4759 static void parse_adv_monitor_rssi(struct adv_monitor *m,
4760 				   struct mgmt_adv_rssi_thresholds *rssi)
4761 {
4762 	if (rssi) {
4763 		m->rssi.low_threshold = rssi->low_threshold;
4764 		m->rssi.low_threshold_timeout =
4765 		    __le16_to_cpu(rssi->low_threshold_timeout);
4766 		m->rssi.high_threshold = rssi->high_threshold;
4767 		m->rssi.high_threshold_timeout =
4768 		    __le16_to_cpu(rssi->high_threshold_timeout);
4769 		m->rssi.sampling_period = rssi->sampling_period;
4770 	} else {
4771 		/* Default values. These numbers are the least constricting
4772 		 * parameters for MSFT API to work, so it behaves as if there
4773 		 * are no rssi parameter to consider. May need to be changed
4774 		 * if other API are to be supported.
4775 		 */
4776 		m->rssi.low_threshold = -127;
4777 		m->rssi.low_threshold_timeout = 60;
4778 		m->rssi.high_threshold = -127;
4779 		m->rssi.high_threshold_timeout = 0;
4780 		m->rssi.sampling_period = 0;
4781 	}
4782 }
4783 
4784 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
4785 				    struct mgmt_adv_pattern *patterns)
4786 {
4787 	u8 offset = 0, length = 0;
4788 	struct adv_pattern *p = NULL;
4789 	int i;
4790 
4791 	for (i = 0; i < pattern_count; i++) {
4792 		offset = patterns[i].offset;
4793 		length = patterns[i].length;
4794 		if (offset >= HCI_MAX_AD_LENGTH ||
4795 		    length > HCI_MAX_AD_LENGTH ||
4796 		    (offset + length) > HCI_MAX_AD_LENGTH)
4797 			return MGMT_STATUS_INVALID_PARAMS;
4798 
4799 		p = kmalloc(sizeof(*p), GFP_KERNEL);
4800 		if (!p)
4801 			return MGMT_STATUS_NO_RESOURCES;
4802 
4803 		p->ad_type = patterns[i].ad_type;
4804 		p->offset = patterns[i].offset;
4805 		p->length = patterns[i].length;
4806 		memcpy(p->value, patterns[i].value, p->length);
4807 
4808 		INIT_LIST_HEAD(&p->list);
4809 		list_add(&p->list, &m->patterns);
4810 	}
4811 
4812 	return MGMT_STATUS_SUCCESS;
4813 }
4814 
4815 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4816 				    void *data, u16 len)
4817 {
4818 	struct mgmt_cp_add_adv_patterns_monitor *cp = data;
4819 	struct adv_monitor *m = NULL;
4820 	u8 status = MGMT_STATUS_SUCCESS;
4821 	size_t expected_size = sizeof(*cp);
4822 
4823 	BT_DBG("request for %s", hdev->name);
4824 
4825 	if (len <= sizeof(*cp)) {
4826 		status = MGMT_STATUS_INVALID_PARAMS;
4827 		goto done;
4828 	}
4829 
4830 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4831 	if (len != expected_size) {
4832 		status = MGMT_STATUS_INVALID_PARAMS;
4833 		goto done;
4834 	}
4835 
4836 	m = kzalloc(sizeof(*m), GFP_KERNEL);
4837 	if (!m) {
4838 		status = MGMT_STATUS_NO_RESOURCES;
4839 		goto done;
4840 	}
4841 
4842 	INIT_LIST_HEAD(&m->patterns);
4843 
4844 	parse_adv_monitor_rssi(m, NULL);
4845 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4846 
4847 done:
4848 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4849 					  MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
4850 }
4851 
4852 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
4853 					 void *data, u16 len)
4854 {
4855 	struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
4856 	struct adv_monitor *m = NULL;
4857 	u8 status = MGMT_STATUS_SUCCESS;
4858 	size_t expected_size = sizeof(*cp);
4859 
4860 	BT_DBG("request for %s", hdev->name);
4861 
4862 	if (len <= sizeof(*cp)) {
4863 		status = MGMT_STATUS_INVALID_PARAMS;
4864 		goto done;
4865 	}
4866 
4867 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4868 	if (len != expected_size) {
4869 		status = MGMT_STATUS_INVALID_PARAMS;
4870 		goto done;
4871 	}
4872 
4873 	m = kzalloc(sizeof(*m), GFP_KERNEL);
4874 	if (!m) {
4875 		status = MGMT_STATUS_NO_RESOURCES;
4876 		goto done;
4877 	}
4878 
4879 	INIT_LIST_HEAD(&m->patterns);
4880 
4881 	parse_adv_monitor_rssi(m, &cp->rssi);
4882 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4883 
4884 done:
4885 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4886 					 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
4887 }
4888 
4889 int mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
4890 {
4891 	struct mgmt_rp_remove_adv_monitor rp;
4892 	struct mgmt_cp_remove_adv_monitor *cp;
4893 	struct mgmt_pending_cmd *cmd;
4894 	int err = 0;
4895 
4896 	hci_dev_lock(hdev);
4897 
4898 	cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4899 	if (!cmd)
4900 		goto done;
4901 
4902 	cp = cmd->param;
4903 	rp.monitor_handle = cp->monitor_handle;
4904 
4905 	if (!status)
4906 		hci_update_passive_scan(hdev);
4907 
4908 	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4909 				mgmt_status(status), &rp, sizeof(rp));
4910 	mgmt_pending_remove(cmd);
4911 
4912 done:
4913 	hci_dev_unlock(hdev);
4914 	bt_dev_dbg(hdev, "remove monitor %d complete, status %u",
4915 		   rp.monitor_handle, status);
4916 
4917 	return err;
4918 }
4919 
4920 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
4921 			      void *data, u16 len)
4922 {
4923 	struct mgmt_cp_remove_adv_monitor *cp = data;
4924 	struct mgmt_rp_remove_adv_monitor rp;
4925 	struct mgmt_pending_cmd *cmd;
4926 	u16 handle = __le16_to_cpu(cp->monitor_handle);
4927 	int err, status;
4928 	bool pending;
4929 
4930 	BT_DBG("request for %s", hdev->name);
4931 	rp.monitor_handle = cp->monitor_handle;
4932 
4933 	hci_dev_lock(hdev);
4934 
4935 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
4936 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
4937 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4938 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
4939 		status = MGMT_STATUS_BUSY;
4940 		goto unlock;
4941 	}
4942 
4943 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
4944 	if (!cmd) {
4945 		status = MGMT_STATUS_NO_RESOURCES;
4946 		goto unlock;
4947 	}
4948 
4949 	if (handle)
4950 		pending = hci_remove_single_adv_monitor(hdev, handle, &err);
4951 	else
4952 		pending = hci_remove_all_adv_monitor(hdev, &err);
4953 
4954 	if (err) {
4955 		mgmt_pending_remove(cmd);
4956 
4957 		if (err == -ENOENT)
4958 			status = MGMT_STATUS_INVALID_INDEX;
4959 		else
4960 			status = MGMT_STATUS_FAILED;
4961 
4962 		goto unlock;
4963 	}
4964 
4965 	/* monitor can be removed without forwarding request to controller */
4966 	if (!pending) {
4967 		mgmt_pending_remove(cmd);
4968 		hci_dev_unlock(hdev);
4969 
4970 		return mgmt_cmd_complete(sk, hdev->id,
4971 					 MGMT_OP_REMOVE_ADV_MONITOR,
4972 					 MGMT_STATUS_SUCCESS,
4973 					 &rp, sizeof(rp));
4974 	}
4975 
4976 	hci_dev_unlock(hdev);
4977 	return 0;
4978 
4979 unlock:
4980 	hci_dev_unlock(hdev);
4981 	return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4982 			       status);
4983 }
4984 
4985 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
4986 {
4987 	struct mgmt_rp_read_local_oob_data mgmt_rp;
4988 	size_t rp_size = sizeof(mgmt_rp);
4989 	struct mgmt_pending_cmd *cmd = data;
4990 	struct sk_buff *skb = cmd->skb;
4991 	u8 status = mgmt_status(err);
4992 
4993 	if (!status) {
4994 		if (!skb)
4995 			status = MGMT_STATUS_FAILED;
4996 		else if (IS_ERR(skb))
4997 			status = mgmt_status(PTR_ERR(skb));
4998 		else
4999 			status = mgmt_status(skb->data[0]);
5000 	}
5001 
5002 	bt_dev_dbg(hdev, "status %d", status);
5003 
5004 	if (status) {
5005 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5006 		goto remove;
5007 	}
5008 
5009 	memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5010 
5011 	if (!bredr_sc_enabled(hdev)) {
5012 		struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5013 
5014 		if (skb->len < sizeof(*rp)) {
5015 			mgmt_cmd_status(cmd->sk, hdev->id,
5016 					MGMT_OP_READ_LOCAL_OOB_DATA,
5017 					MGMT_STATUS_FAILED);
5018 			goto remove;
5019 		}
5020 
5021 		memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5022 		memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5023 
5024 		rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5025 	} else {
5026 		struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5027 
5028 		if (skb->len < sizeof(*rp)) {
5029 			mgmt_cmd_status(cmd->sk, hdev->id,
5030 					MGMT_OP_READ_LOCAL_OOB_DATA,
5031 					MGMT_STATUS_FAILED);
5032 			goto remove;
5033 		}
5034 
5035 		memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5036 		memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5037 
5038 		memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5039 		memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5040 	}
5041 
5042 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5043 			  MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5044 
5045 remove:
5046 	if (skb && !IS_ERR(skb))
5047 		kfree_skb(skb);
5048 
5049 	mgmt_pending_free(cmd);
5050 }
5051 
5052 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5053 {
5054 	struct mgmt_pending_cmd *cmd = data;
5055 
5056 	if (bredr_sc_enabled(hdev))
5057 		cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5058 	else
5059 		cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5060 
5061 	if (IS_ERR(cmd->skb))
5062 		return PTR_ERR(cmd->skb);
5063 	else
5064 		return 0;
5065 }
5066 
5067 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5068 			       void *data, u16 data_len)
5069 {
5070 	struct mgmt_pending_cmd *cmd;
5071 	int err;
5072 
5073 	bt_dev_dbg(hdev, "sock %p", sk);
5074 
5075 	hci_dev_lock(hdev);
5076 
5077 	if (!hdev_is_powered(hdev)) {
5078 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5079 				      MGMT_STATUS_NOT_POWERED);
5080 		goto unlock;
5081 	}
5082 
5083 	if (!lmp_ssp_capable(hdev)) {
5084 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5085 				      MGMT_STATUS_NOT_SUPPORTED);
5086 		goto unlock;
5087 	}
5088 
5089 	cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5090 	if (!cmd)
5091 		err = -ENOMEM;
5092 	else
5093 		err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5094 					 read_local_oob_data_complete);
5095 
5096 	if (err < 0) {
5097 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5098 				      MGMT_STATUS_FAILED);
5099 
5100 		if (cmd)
5101 			mgmt_pending_free(cmd);
5102 	}
5103 
5104 unlock:
5105 	hci_dev_unlock(hdev);
5106 	return err;
5107 }
5108 
5109 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5110 			       void *data, u16 len)
5111 {
5112 	struct mgmt_addr_info *addr = data;
5113 	int err;
5114 
5115 	bt_dev_dbg(hdev, "sock %p", sk);
5116 
5117 	if (!bdaddr_type_is_valid(addr->type))
5118 		return mgmt_cmd_complete(sk, hdev->id,
5119 					 MGMT_OP_ADD_REMOTE_OOB_DATA,
5120 					 MGMT_STATUS_INVALID_PARAMS,
5121 					 addr, sizeof(*addr));
5122 
5123 	hci_dev_lock(hdev);
5124 
5125 	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5126 		struct mgmt_cp_add_remote_oob_data *cp = data;
5127 		u8 status;
5128 
5129 		if (cp->addr.type != BDADDR_BREDR) {
5130 			err = mgmt_cmd_complete(sk, hdev->id,
5131 						MGMT_OP_ADD_REMOTE_OOB_DATA,
5132 						MGMT_STATUS_INVALID_PARAMS,
5133 						&cp->addr, sizeof(cp->addr));
5134 			goto unlock;
5135 		}
5136 
5137 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5138 					      cp->addr.type, cp->hash,
5139 					      cp->rand, NULL, NULL);
5140 		if (err < 0)
5141 			status = MGMT_STATUS_FAILED;
5142 		else
5143 			status = MGMT_STATUS_SUCCESS;
5144 
5145 		err = mgmt_cmd_complete(sk, hdev->id,
5146 					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5147 					&cp->addr, sizeof(cp->addr));
5148 	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5149 		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5150 		u8 *rand192, *hash192, *rand256, *hash256;
5151 		u8 status;
5152 
5153 		if (bdaddr_type_is_le(cp->addr.type)) {
5154 			/* Enforce zero-valued 192-bit parameters as
5155 			 * long as legacy SMP OOB isn't implemented.
5156 			 */
5157 			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5158 			    memcmp(cp->hash192, ZERO_KEY, 16)) {
5159 				err = mgmt_cmd_complete(sk, hdev->id,
5160 							MGMT_OP_ADD_REMOTE_OOB_DATA,
5161 							MGMT_STATUS_INVALID_PARAMS,
5162 							addr, sizeof(*addr));
5163 				goto unlock;
5164 			}
5165 
5166 			rand192 = NULL;
5167 			hash192 = NULL;
5168 		} else {
5169 			/* In case one of the P-192 values is set to zero,
5170 			 * then just disable OOB data for P-192.
5171 			 */
5172 			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5173 			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
5174 				rand192 = NULL;
5175 				hash192 = NULL;
5176 			} else {
5177 				rand192 = cp->rand192;
5178 				hash192 = cp->hash192;
5179 			}
5180 		}
5181 
5182 		/* In case one of the P-256 values is set to zero, then just
5183 		 * disable OOB data for P-256.
5184 		 */
5185 		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5186 		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
5187 			rand256 = NULL;
5188 			hash256 = NULL;
5189 		} else {
5190 			rand256 = cp->rand256;
5191 			hash256 = cp->hash256;
5192 		}
5193 
5194 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5195 					      cp->addr.type, hash192, rand192,
5196 					      hash256, rand256);
5197 		if (err < 0)
5198 			status = MGMT_STATUS_FAILED;
5199 		else
5200 			status = MGMT_STATUS_SUCCESS;
5201 
5202 		err = mgmt_cmd_complete(sk, hdev->id,
5203 					MGMT_OP_ADD_REMOTE_OOB_DATA,
5204 					status, &cp->addr, sizeof(cp->addr));
5205 	} else {
5206 		bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5207 			   len);
5208 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5209 				      MGMT_STATUS_INVALID_PARAMS);
5210 	}
5211 
5212 unlock:
5213 	hci_dev_unlock(hdev);
5214 	return err;
5215 }
5216 
5217 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5218 				  void *data, u16 len)
5219 {
5220 	struct mgmt_cp_remove_remote_oob_data *cp = data;
5221 	u8 status;
5222 	int err;
5223 
5224 	bt_dev_dbg(hdev, "sock %p", sk);
5225 
5226 	if (cp->addr.type != BDADDR_BREDR)
5227 		return mgmt_cmd_complete(sk, hdev->id,
5228 					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5229 					 MGMT_STATUS_INVALID_PARAMS,
5230 					 &cp->addr, sizeof(cp->addr));
5231 
5232 	hci_dev_lock(hdev);
5233 
5234 	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5235 		hci_remote_oob_data_clear(hdev);
5236 		status = MGMT_STATUS_SUCCESS;
5237 		goto done;
5238 	}
5239 
5240 	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5241 	if (err < 0)
5242 		status = MGMT_STATUS_INVALID_PARAMS;
5243 	else
5244 		status = MGMT_STATUS_SUCCESS;
5245 
5246 done:
5247 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5248 				status, &cp->addr, sizeof(cp->addr));
5249 
5250 	hci_dev_unlock(hdev);
5251 	return err;
5252 }
5253 
5254 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5255 {
5256 	struct mgmt_pending_cmd *cmd;
5257 
5258 	bt_dev_dbg(hdev, "status %u", status);
5259 
5260 	hci_dev_lock(hdev);
5261 
5262 	cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5263 	if (!cmd)
5264 		cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5265 
5266 	if (!cmd)
5267 		cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5268 
5269 	if (cmd) {
5270 		cmd->cmd_complete(cmd, mgmt_status(status));
5271 		mgmt_pending_remove(cmd);
5272 	}
5273 
5274 	hci_dev_unlock(hdev);
5275 }
5276 
5277 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5278 				    uint8_t *mgmt_status)
5279 {
5280 	switch (type) {
5281 	case DISCOV_TYPE_LE:
5282 		*mgmt_status = mgmt_le_support(hdev);
5283 		if (*mgmt_status)
5284 			return false;
5285 		break;
5286 	case DISCOV_TYPE_INTERLEAVED:
5287 		*mgmt_status = mgmt_le_support(hdev);
5288 		if (*mgmt_status)
5289 			return false;
5290 		fallthrough;
5291 	case DISCOV_TYPE_BREDR:
5292 		*mgmt_status = mgmt_bredr_support(hdev);
5293 		if (*mgmt_status)
5294 			return false;
5295 		break;
5296 	default:
5297 		*mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5298 		return false;
5299 	}
5300 
5301 	return true;
5302 }
5303 
5304 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5305 {
5306 	struct mgmt_pending_cmd *cmd = data;
5307 
5308 	if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5309 	    cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5310 	    cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5311 		return;
5312 
5313 	bt_dev_dbg(hdev, "err %d", err);
5314 
5315 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5316 			  cmd->param, 1);
5317 	mgmt_pending_remove(cmd);
5318 
5319 	hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5320 				DISCOVERY_FINDING);
5321 }
5322 
5323 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5324 {
5325 	return hci_start_discovery_sync(hdev);
5326 }
5327 
5328 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5329 				    u16 op, void *data, u16 len)
5330 {
5331 	struct mgmt_cp_start_discovery *cp = data;
5332 	struct mgmt_pending_cmd *cmd;
5333 	u8 status;
5334 	int err;
5335 
5336 	bt_dev_dbg(hdev, "sock %p", sk);
5337 
5338 	hci_dev_lock(hdev);
5339 
5340 	if (!hdev_is_powered(hdev)) {
5341 		err = mgmt_cmd_complete(sk, hdev->id, op,
5342 					MGMT_STATUS_NOT_POWERED,
5343 					&cp->type, sizeof(cp->type));
5344 		goto failed;
5345 	}
5346 
5347 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
5348 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5349 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5350 					&cp->type, sizeof(cp->type));
5351 		goto failed;
5352 	}
5353 
5354 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5355 		err = mgmt_cmd_complete(sk, hdev->id, op, status,
5356 					&cp->type, sizeof(cp->type));
5357 		goto failed;
5358 	}
5359 
5360 	/* Can't start discovery when it is paused */
5361 	if (hdev->discovery_paused) {
5362 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5363 					&cp->type, sizeof(cp->type));
5364 		goto failed;
5365 	}
5366 
5367 	/* Clear the discovery filter first to free any previously
5368 	 * allocated memory for the UUID list.
5369 	 */
5370 	hci_discovery_filter_clear(hdev);
5371 
5372 	hdev->discovery.type = cp->type;
5373 	hdev->discovery.report_invalid_rssi = false;
5374 	if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5375 		hdev->discovery.limited = true;
5376 	else
5377 		hdev->discovery.limited = false;
5378 
5379 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
5380 	if (!cmd) {
5381 		err = -ENOMEM;
5382 		goto failed;
5383 	}
5384 
5385 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5386 				 start_discovery_complete);
5387 	if (err < 0) {
5388 		mgmt_pending_remove(cmd);
5389 		goto failed;
5390 	}
5391 
5392 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5393 
5394 failed:
5395 	hci_dev_unlock(hdev);
5396 	return err;
5397 }
5398 
5399 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5400 			   void *data, u16 len)
5401 {
5402 	return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5403 					data, len);
5404 }
5405 
5406 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5407 				   void *data, u16 len)
5408 {
5409 	return start_discovery_internal(sk, hdev,
5410 					MGMT_OP_START_LIMITED_DISCOVERY,
5411 					data, len);
5412 }
5413 
5414 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5415 				   void *data, u16 len)
5416 {
5417 	struct mgmt_cp_start_service_discovery *cp = data;
5418 	struct mgmt_pending_cmd *cmd;
5419 	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5420 	u16 uuid_count, expected_len;
5421 	u8 status;
5422 	int err;
5423 
5424 	bt_dev_dbg(hdev, "sock %p", sk);
5425 
5426 	hci_dev_lock(hdev);
5427 
5428 	if (!hdev_is_powered(hdev)) {
5429 		err = mgmt_cmd_complete(sk, hdev->id,
5430 					MGMT_OP_START_SERVICE_DISCOVERY,
5431 					MGMT_STATUS_NOT_POWERED,
5432 					&cp->type, sizeof(cp->type));
5433 		goto failed;
5434 	}
5435 
5436 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
5437 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5438 		err = mgmt_cmd_complete(sk, hdev->id,
5439 					MGMT_OP_START_SERVICE_DISCOVERY,
5440 					MGMT_STATUS_BUSY, &cp->type,
5441 					sizeof(cp->type));
5442 		goto failed;
5443 	}
5444 
5445 	if (hdev->discovery_paused) {
5446 		err = mgmt_cmd_complete(sk, hdev->id,
5447 					MGMT_OP_START_SERVICE_DISCOVERY,
5448 					MGMT_STATUS_BUSY, &cp->type,
5449 					sizeof(cp->type));
5450 		goto failed;
5451 	}
5452 
5453 	uuid_count = __le16_to_cpu(cp->uuid_count);
5454 	if (uuid_count > max_uuid_count) {
5455 		bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5456 			   uuid_count);
5457 		err = mgmt_cmd_complete(sk, hdev->id,
5458 					MGMT_OP_START_SERVICE_DISCOVERY,
5459 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
5460 					sizeof(cp->type));
5461 		goto failed;
5462 	}
5463 
5464 	expected_len = sizeof(*cp) + uuid_count * 16;
5465 	if (expected_len != len) {
5466 		bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5467 			   expected_len, len);
5468 		err = mgmt_cmd_complete(sk, hdev->id,
5469 					MGMT_OP_START_SERVICE_DISCOVERY,
5470 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
5471 					sizeof(cp->type));
5472 		goto failed;
5473 	}
5474 
5475 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5476 		err = mgmt_cmd_complete(sk, hdev->id,
5477 					MGMT_OP_START_SERVICE_DISCOVERY,
5478 					status, &cp->type, sizeof(cp->type));
5479 		goto failed;
5480 	}
5481 
5482 	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
5483 			       hdev, data, len);
5484 	if (!cmd) {
5485 		err = -ENOMEM;
5486 		goto failed;
5487 	}
5488 
5489 	/* Clear the discovery filter first to free any previously
5490 	 * allocated memory for the UUID list.
5491 	 */
5492 	hci_discovery_filter_clear(hdev);
5493 
5494 	hdev->discovery.result_filtering = true;
5495 	hdev->discovery.type = cp->type;
5496 	hdev->discovery.rssi = cp->rssi;
5497 	hdev->discovery.uuid_count = uuid_count;
5498 
5499 	if (uuid_count > 0) {
5500 		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
5501 						GFP_KERNEL);
5502 		if (!hdev->discovery.uuids) {
5503 			err = mgmt_cmd_complete(sk, hdev->id,
5504 						MGMT_OP_START_SERVICE_DISCOVERY,
5505 						MGMT_STATUS_FAILED,
5506 						&cp->type, sizeof(cp->type));
5507 			mgmt_pending_remove(cmd);
5508 			goto failed;
5509 		}
5510 	}
5511 
5512 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5513 				 start_discovery_complete);
5514 	if (err < 0) {
5515 		mgmt_pending_remove(cmd);
5516 		goto failed;
5517 	}
5518 
5519 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5520 
5521 failed:
5522 	hci_dev_unlock(hdev);
5523 	return err;
5524 }
5525 
5526 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
5527 {
5528 	struct mgmt_pending_cmd *cmd;
5529 
5530 	bt_dev_dbg(hdev, "status %u", status);
5531 
5532 	hci_dev_lock(hdev);
5533 
5534 	cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5535 	if (cmd) {
5536 		cmd->cmd_complete(cmd, mgmt_status(status));
5537 		mgmt_pending_remove(cmd);
5538 	}
5539 
5540 	hci_dev_unlock(hdev);
5541 }
5542 
5543 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
5544 {
5545 	struct mgmt_pending_cmd *cmd = data;
5546 
5547 	if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
5548 		return;
5549 
5550 	bt_dev_dbg(hdev, "err %d", err);
5551 
5552 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5553 			  cmd->param, 1);
5554 	mgmt_pending_remove(cmd);
5555 
5556 	if (!err)
5557 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
5558 }
5559 
5560 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
5561 {
5562 	return hci_stop_discovery_sync(hdev);
5563 }
5564 
5565 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
5566 			  u16 len)
5567 {
5568 	struct mgmt_cp_stop_discovery *mgmt_cp = data;
5569 	struct mgmt_pending_cmd *cmd;
5570 	int err;
5571 
5572 	bt_dev_dbg(hdev, "sock %p", sk);
5573 
5574 	hci_dev_lock(hdev);
5575 
5576 	if (!hci_discovery_active(hdev)) {
5577 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5578 					MGMT_STATUS_REJECTED, &mgmt_cp->type,
5579 					sizeof(mgmt_cp->type));
5580 		goto unlock;
5581 	}
5582 
5583 	if (hdev->discovery.type != mgmt_cp->type) {
5584 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5585 					MGMT_STATUS_INVALID_PARAMS,
5586 					&mgmt_cp->type, sizeof(mgmt_cp->type));
5587 		goto unlock;
5588 	}
5589 
5590 	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
5591 	if (!cmd) {
5592 		err = -ENOMEM;
5593 		goto unlock;
5594 	}
5595 
5596 	err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
5597 				 stop_discovery_complete);
5598 	if (err < 0) {
5599 		mgmt_pending_remove(cmd);
5600 		goto unlock;
5601 	}
5602 
5603 	hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
5604 
5605 unlock:
5606 	hci_dev_unlock(hdev);
5607 	return err;
5608 }
5609 
5610 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
5611 			u16 len)
5612 {
5613 	struct mgmt_cp_confirm_name *cp = data;
5614 	struct inquiry_entry *e;
5615 	int err;
5616 
5617 	bt_dev_dbg(hdev, "sock %p", sk);
5618 
5619 	hci_dev_lock(hdev);
5620 
5621 	if (!hci_discovery_active(hdev)) {
5622 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5623 					MGMT_STATUS_FAILED, &cp->addr,
5624 					sizeof(cp->addr));
5625 		goto failed;
5626 	}
5627 
5628 	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
5629 	if (!e) {
5630 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5631 					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
5632 					sizeof(cp->addr));
5633 		goto failed;
5634 	}
5635 
5636 	if (cp->name_known) {
5637 		e->name_state = NAME_KNOWN;
5638 		list_del(&e->list);
5639 	} else {
5640 		e->name_state = NAME_NEEDED;
5641 		hci_inquiry_cache_update_resolve(hdev, e);
5642 	}
5643 
5644 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
5645 				&cp->addr, sizeof(cp->addr));
5646 
5647 failed:
5648 	hci_dev_unlock(hdev);
5649 	return err;
5650 }
5651 
5652 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
5653 			u16 len)
5654 {
5655 	struct mgmt_cp_block_device *cp = data;
5656 	u8 status;
5657 	int err;
5658 
5659 	bt_dev_dbg(hdev, "sock %p", sk);
5660 
5661 	if (!bdaddr_type_is_valid(cp->addr.type))
5662 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
5663 					 MGMT_STATUS_INVALID_PARAMS,
5664 					 &cp->addr, sizeof(cp->addr));
5665 
5666 	hci_dev_lock(hdev);
5667 
5668 	err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
5669 				  cp->addr.type);
5670 	if (err < 0) {
5671 		status = MGMT_STATUS_FAILED;
5672 		goto done;
5673 	}
5674 
5675 	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5676 		   sk);
5677 	status = MGMT_STATUS_SUCCESS;
5678 
5679 done:
5680 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
5681 				&cp->addr, sizeof(cp->addr));
5682 
5683 	hci_dev_unlock(hdev);
5684 
5685 	return err;
5686 }
5687 
5688 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
5689 			  u16 len)
5690 {
5691 	struct mgmt_cp_unblock_device *cp = data;
5692 	u8 status;
5693 	int err;
5694 
5695 	bt_dev_dbg(hdev, "sock %p", sk);
5696 
5697 	if (!bdaddr_type_is_valid(cp->addr.type))
5698 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
5699 					 MGMT_STATUS_INVALID_PARAMS,
5700 					 &cp->addr, sizeof(cp->addr));
5701 
5702 	hci_dev_lock(hdev);
5703 
5704 	err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
5705 				  cp->addr.type);
5706 	if (err < 0) {
5707 		status = MGMT_STATUS_INVALID_PARAMS;
5708 		goto done;
5709 	}
5710 
5711 	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5712 		   sk);
5713 	status = MGMT_STATUS_SUCCESS;
5714 
5715 done:
5716 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
5717 				&cp->addr, sizeof(cp->addr));
5718 
5719 	hci_dev_unlock(hdev);
5720 
5721 	return err;
5722 }
5723 
5724 static int set_device_id_sync(struct hci_dev *hdev, void *data)
5725 {
5726 	return hci_update_eir_sync(hdev);
5727 }
5728 
5729 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
5730 			 u16 len)
5731 {
5732 	struct mgmt_cp_set_device_id *cp = data;
5733 	int err;
5734 	__u16 source;
5735 
5736 	bt_dev_dbg(hdev, "sock %p", sk);
5737 
5738 	source = __le16_to_cpu(cp->source);
5739 
5740 	if (source > 0x0002)
5741 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
5742 				       MGMT_STATUS_INVALID_PARAMS);
5743 
5744 	hci_dev_lock(hdev);
5745 
5746 	hdev->devid_source = source;
5747 	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
5748 	hdev->devid_product = __le16_to_cpu(cp->product);
5749 	hdev->devid_version = __le16_to_cpu(cp->version);
5750 
5751 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
5752 				NULL, 0);
5753 
5754 	hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
5755 
5756 	hci_dev_unlock(hdev);
5757 
5758 	return err;
5759 }
5760 
5761 static void enable_advertising_instance(struct hci_dev *hdev, int err)
5762 {
5763 	if (err)
5764 		bt_dev_err(hdev, "failed to re-configure advertising %d", err);
5765 	else
5766 		bt_dev_dbg(hdev, "status %d", err);
5767 }
5768 
5769 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
5770 {
5771 	struct cmd_lookup match = { NULL, hdev };
5772 	u8 instance;
5773 	struct adv_info *adv_instance;
5774 	u8 status = mgmt_status(err);
5775 
5776 	if (status) {
5777 		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
5778 				     cmd_status_rsp, &status);
5779 		return;
5780 	}
5781 
5782 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
5783 		hci_dev_set_flag(hdev, HCI_ADVERTISING);
5784 	else
5785 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
5786 
5787 	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
5788 			     &match);
5789 
5790 	new_settings(hdev, match.sk);
5791 
5792 	if (match.sk)
5793 		sock_put(match.sk);
5794 
5795 	/* If "Set Advertising" was just disabled and instance advertising was
5796 	 * set up earlier, then re-enable multi-instance advertising.
5797 	 */
5798 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5799 	    list_empty(&hdev->adv_instances))
5800 		return;
5801 
5802 	instance = hdev->cur_adv_instance;
5803 	if (!instance) {
5804 		adv_instance = list_first_entry_or_null(&hdev->adv_instances,
5805 							struct adv_info, list);
5806 		if (!adv_instance)
5807 			return;
5808 
5809 		instance = adv_instance->instance;
5810 	}
5811 
5812 	err = hci_schedule_adv_instance_sync(hdev, instance, true);
5813 
5814 	enable_advertising_instance(hdev, err);
5815 }
5816 
5817 static int set_adv_sync(struct hci_dev *hdev, void *data)
5818 {
5819 	struct mgmt_pending_cmd *cmd = data;
5820 	struct mgmt_mode *cp = cmd->param;
5821 	u8 val = !!cp->val;
5822 
5823 	if (cp->val == 0x02)
5824 		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5825 	else
5826 		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5827 
5828 	cancel_adv_timeout(hdev);
5829 
5830 	if (val) {
5831 		/* Switch to instance "0" for the Set Advertising setting.
5832 		 * We cannot use update_[adv|scan_rsp]_data() here as the
5833 		 * HCI_ADVERTISING flag is not yet set.
5834 		 */
5835 		hdev->cur_adv_instance = 0x00;
5836 
5837 		if (ext_adv_capable(hdev)) {
5838 			hci_start_ext_adv_sync(hdev, 0x00);
5839 		} else {
5840 			hci_update_adv_data_sync(hdev, 0x00);
5841 			hci_update_scan_rsp_data_sync(hdev, 0x00);
5842 			hci_enable_advertising_sync(hdev);
5843 		}
5844 	} else {
5845 		hci_disable_advertising_sync(hdev);
5846 	}
5847 
5848 	return 0;
5849 }
5850 
5851 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
5852 			   u16 len)
5853 {
5854 	struct mgmt_mode *cp = data;
5855 	struct mgmt_pending_cmd *cmd;
5856 	u8 val, status;
5857 	int err;
5858 
5859 	bt_dev_dbg(hdev, "sock %p", sk);
5860 
5861 	status = mgmt_le_support(hdev);
5862 	if (status)
5863 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5864 				       status);
5865 
5866 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5867 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5868 				       MGMT_STATUS_INVALID_PARAMS);
5869 
5870 	if (hdev->advertising_paused)
5871 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5872 				       MGMT_STATUS_BUSY);
5873 
5874 	hci_dev_lock(hdev);
5875 
5876 	val = !!cp->val;
5877 
5878 	/* The following conditions are ones which mean that we should
5879 	 * not do any HCI communication but directly send a mgmt
5880 	 * response to user space (after toggling the flag if
5881 	 * necessary).
5882 	 */
5883 	if (!hdev_is_powered(hdev) ||
5884 	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5885 	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
5886 	    hci_conn_num(hdev, LE_LINK) > 0 ||
5887 	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5888 	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
5889 		bool changed;
5890 
5891 		if (cp->val) {
5892 			hdev->cur_adv_instance = 0x00;
5893 			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
5894 			if (cp->val == 0x02)
5895 				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5896 			else
5897 				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5898 		} else {
5899 			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
5900 			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5901 		}
5902 
5903 		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
5904 		if (err < 0)
5905 			goto unlock;
5906 
5907 		if (changed)
5908 			err = new_settings(hdev, sk);
5909 
5910 		goto unlock;
5911 	}
5912 
5913 	if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
5914 	    pending_find(MGMT_OP_SET_LE, hdev)) {
5915 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5916 				      MGMT_STATUS_BUSY);
5917 		goto unlock;
5918 	}
5919 
5920 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
5921 	if (!cmd)
5922 		err = -ENOMEM;
5923 	else
5924 		err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
5925 					 set_advertising_complete);
5926 
5927 	if (err < 0 && cmd)
5928 		mgmt_pending_remove(cmd);
5929 
5930 unlock:
5931 	hci_dev_unlock(hdev);
5932 	return err;
5933 }
5934 
5935 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
5936 			      void *data, u16 len)
5937 {
5938 	struct mgmt_cp_set_static_address *cp = data;
5939 	int err;
5940 
5941 	bt_dev_dbg(hdev, "sock %p", sk);
5942 
5943 	if (!lmp_le_capable(hdev))
5944 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5945 				       MGMT_STATUS_NOT_SUPPORTED);
5946 
5947 	if (hdev_is_powered(hdev))
5948 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5949 				       MGMT_STATUS_REJECTED);
5950 
5951 	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
5952 		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
5953 			return mgmt_cmd_status(sk, hdev->id,
5954 					       MGMT_OP_SET_STATIC_ADDRESS,
5955 					       MGMT_STATUS_INVALID_PARAMS);
5956 
5957 		/* Two most significant bits shall be set */
5958 		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
5959 			return mgmt_cmd_status(sk, hdev->id,
5960 					       MGMT_OP_SET_STATIC_ADDRESS,
5961 					       MGMT_STATUS_INVALID_PARAMS);
5962 	}
5963 
5964 	hci_dev_lock(hdev);
5965 
5966 	bacpy(&hdev->static_addr, &cp->bdaddr);
5967 
5968 	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
5969 	if (err < 0)
5970 		goto unlock;
5971 
5972 	err = new_settings(hdev, sk);
5973 
5974 unlock:
5975 	hci_dev_unlock(hdev);
5976 	return err;
5977 }
5978 
5979 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
5980 			   void *data, u16 len)
5981 {
5982 	struct mgmt_cp_set_scan_params *cp = data;
5983 	__u16 interval, window;
5984 	int err;
5985 
5986 	bt_dev_dbg(hdev, "sock %p", sk);
5987 
5988 	if (!lmp_le_capable(hdev))
5989 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5990 				       MGMT_STATUS_NOT_SUPPORTED);
5991 
5992 	interval = __le16_to_cpu(cp->interval);
5993 
5994 	if (interval < 0x0004 || interval > 0x4000)
5995 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5996 				       MGMT_STATUS_INVALID_PARAMS);
5997 
5998 	window = __le16_to_cpu(cp->window);
5999 
6000 	if (window < 0x0004 || window > 0x4000)
6001 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6002 				       MGMT_STATUS_INVALID_PARAMS);
6003 
6004 	if (window > interval)
6005 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6006 				       MGMT_STATUS_INVALID_PARAMS);
6007 
6008 	hci_dev_lock(hdev);
6009 
6010 	hdev->le_scan_interval = interval;
6011 	hdev->le_scan_window = window;
6012 
6013 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6014 				NULL, 0);
6015 
6016 	/* If background scan is running, restart it so new parameters are
6017 	 * loaded.
6018 	 */
6019 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6020 	    hdev->discovery.state == DISCOVERY_STOPPED)
6021 		hci_update_passive_scan(hdev);
6022 
6023 	hci_dev_unlock(hdev);
6024 
6025 	return err;
6026 }
6027 
6028 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6029 {
6030 	struct mgmt_pending_cmd *cmd = data;
6031 
6032 	bt_dev_dbg(hdev, "err %d", err);
6033 
6034 	if (err) {
6035 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6036 				mgmt_status(err));
6037 	} else {
6038 		struct mgmt_mode *cp = cmd->param;
6039 
6040 		if (cp->val)
6041 			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6042 		else
6043 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6044 
6045 		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6046 		new_settings(hdev, cmd->sk);
6047 	}
6048 
6049 	mgmt_pending_free(cmd);
6050 }
6051 
6052 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6053 {
6054 	struct mgmt_pending_cmd *cmd = data;
6055 	struct mgmt_mode *cp = cmd->param;
6056 
6057 	return hci_write_fast_connectable_sync(hdev, cp->val);
6058 }
6059 
6060 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6061 				void *data, u16 len)
6062 {
6063 	struct mgmt_mode *cp = data;
6064 	struct mgmt_pending_cmd *cmd;
6065 	int err;
6066 
6067 	bt_dev_dbg(hdev, "sock %p", sk);
6068 
6069 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6070 	    hdev->hci_ver < BLUETOOTH_VER_1_2)
6071 		return mgmt_cmd_status(sk, hdev->id,
6072 				       MGMT_OP_SET_FAST_CONNECTABLE,
6073 				       MGMT_STATUS_NOT_SUPPORTED);
6074 
6075 	if (cp->val != 0x00 && cp->val != 0x01)
6076 		return mgmt_cmd_status(sk, hdev->id,
6077 				       MGMT_OP_SET_FAST_CONNECTABLE,
6078 				       MGMT_STATUS_INVALID_PARAMS);
6079 
6080 	hci_dev_lock(hdev);
6081 
6082 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6083 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6084 		goto unlock;
6085 	}
6086 
6087 	if (!hdev_is_powered(hdev)) {
6088 		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6089 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6090 		new_settings(hdev, sk);
6091 		goto unlock;
6092 	}
6093 
6094 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6095 			       len);
6096 	if (!cmd)
6097 		err = -ENOMEM;
6098 	else
6099 		err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6100 					 fast_connectable_complete);
6101 
6102 	if (err < 0) {
6103 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6104 				MGMT_STATUS_FAILED);
6105 
6106 		if (cmd)
6107 			mgmt_pending_free(cmd);
6108 	}
6109 
6110 unlock:
6111 	hci_dev_unlock(hdev);
6112 
6113 	return err;
6114 }
6115 
6116 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6117 {
6118 	struct mgmt_pending_cmd *cmd = data;
6119 
6120 	bt_dev_dbg(hdev, "err %d", err);
6121 
6122 	if (err) {
6123 		u8 mgmt_err = mgmt_status(err);
6124 
6125 		/* We need to restore the flag if related HCI commands
6126 		 * failed.
6127 		 */
6128 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6129 
6130 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6131 	} else {
6132 		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6133 		new_settings(hdev, cmd->sk);
6134 	}
6135 
6136 	mgmt_pending_free(cmd);
6137 }
6138 
6139 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6140 {
6141 	int status;
6142 
6143 	status = hci_write_fast_connectable_sync(hdev, false);
6144 
6145 	if (!status)
6146 		status = hci_update_scan_sync(hdev);
6147 
6148 	/* Since only the advertising data flags will change, there
6149 	 * is no need to update the scan response data.
6150 	 */
6151 	if (!status)
6152 		status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6153 
6154 	return status;
6155 }
6156 
6157 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6158 {
6159 	struct mgmt_mode *cp = data;
6160 	struct mgmt_pending_cmd *cmd;
6161 	int err;
6162 
6163 	bt_dev_dbg(hdev, "sock %p", sk);
6164 
6165 	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6166 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6167 				       MGMT_STATUS_NOT_SUPPORTED);
6168 
6169 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6170 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6171 				       MGMT_STATUS_REJECTED);
6172 
6173 	if (cp->val != 0x00 && cp->val != 0x01)
6174 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6175 				       MGMT_STATUS_INVALID_PARAMS);
6176 
6177 	hci_dev_lock(hdev);
6178 
6179 	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6180 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6181 		goto unlock;
6182 	}
6183 
6184 	if (!hdev_is_powered(hdev)) {
6185 		if (!cp->val) {
6186 			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6187 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6188 			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6189 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6190 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
6191 		}
6192 
6193 		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6194 
6195 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6196 		if (err < 0)
6197 			goto unlock;
6198 
6199 		err = new_settings(hdev, sk);
6200 		goto unlock;
6201 	}
6202 
6203 	/* Reject disabling when powered on */
6204 	if (!cp->val) {
6205 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6206 				      MGMT_STATUS_REJECTED);
6207 		goto unlock;
6208 	} else {
6209 		/* When configuring a dual-mode controller to operate
6210 		 * with LE only and using a static address, then switching
6211 		 * BR/EDR back on is not allowed.
6212 		 *
6213 		 * Dual-mode controllers shall operate with the public
6214 		 * address as its identity address for BR/EDR and LE. So
6215 		 * reject the attempt to create an invalid configuration.
6216 		 *
6217 		 * The same restrictions applies when secure connections
6218 		 * has been enabled. For BR/EDR this is a controller feature
6219 		 * while for LE it is a host stack feature. This means that
6220 		 * switching BR/EDR back on when secure connections has been
6221 		 * enabled is not a supported transaction.
6222 		 */
6223 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6224 		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6225 		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6226 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6227 					      MGMT_STATUS_REJECTED);
6228 			goto unlock;
6229 		}
6230 	}
6231 
6232 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6233 	if (!cmd)
6234 		err = -ENOMEM;
6235 	else
6236 		err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6237 					 set_bredr_complete);
6238 
6239 	if (err < 0) {
6240 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6241 				MGMT_STATUS_FAILED);
6242 		if (cmd)
6243 			mgmt_pending_free(cmd);
6244 
6245 		goto unlock;
6246 	}
6247 
6248 	/* We need to flip the bit already here so that
6249 	 * hci_req_update_adv_data generates the correct flags.
6250 	 */
6251 	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6252 
6253 unlock:
6254 	hci_dev_unlock(hdev);
6255 	return err;
6256 }
6257 
6258 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6259 {
6260 	struct mgmt_pending_cmd *cmd = data;
6261 	struct mgmt_mode *cp;
6262 
6263 	bt_dev_dbg(hdev, "err %d", err);
6264 
6265 	if (err) {
6266 		u8 mgmt_err = mgmt_status(err);
6267 
6268 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6269 		goto done;
6270 	}
6271 
6272 	cp = cmd->param;
6273 
6274 	switch (cp->val) {
6275 	case 0x00:
6276 		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6277 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6278 		break;
6279 	case 0x01:
6280 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6281 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6282 		break;
6283 	case 0x02:
6284 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6285 		hci_dev_set_flag(hdev, HCI_SC_ONLY);
6286 		break;
6287 	}
6288 
6289 	send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6290 	new_settings(hdev, cmd->sk);
6291 
6292 done:
6293 	mgmt_pending_free(cmd);
6294 }
6295 
6296 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6297 {
6298 	struct mgmt_pending_cmd *cmd = data;
6299 	struct mgmt_mode *cp = cmd->param;
6300 	u8 val = !!cp->val;
6301 
6302 	/* Force write of val */
6303 	hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6304 
6305 	return hci_write_sc_support_sync(hdev, val);
6306 }
6307 
6308 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6309 			   void *data, u16 len)
6310 {
6311 	struct mgmt_mode *cp = data;
6312 	struct mgmt_pending_cmd *cmd;
6313 	u8 val;
6314 	int err;
6315 
6316 	bt_dev_dbg(hdev, "sock %p", sk);
6317 
6318 	if (!lmp_sc_capable(hdev) &&
6319 	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6320 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6321 				       MGMT_STATUS_NOT_SUPPORTED);
6322 
6323 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6324 	    lmp_sc_capable(hdev) &&
6325 	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6326 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6327 				       MGMT_STATUS_REJECTED);
6328 
6329 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6330 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6331 				       MGMT_STATUS_INVALID_PARAMS);
6332 
6333 	hci_dev_lock(hdev);
6334 
6335 	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6336 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6337 		bool changed;
6338 
6339 		if (cp->val) {
6340 			changed = !hci_dev_test_and_set_flag(hdev,
6341 							     HCI_SC_ENABLED);
6342 			if (cp->val == 0x02)
6343 				hci_dev_set_flag(hdev, HCI_SC_ONLY);
6344 			else
6345 				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6346 		} else {
6347 			changed = hci_dev_test_and_clear_flag(hdev,
6348 							      HCI_SC_ENABLED);
6349 			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6350 		}
6351 
6352 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6353 		if (err < 0)
6354 			goto failed;
6355 
6356 		if (changed)
6357 			err = new_settings(hdev, sk);
6358 
6359 		goto failed;
6360 	}
6361 
6362 	val = !!cp->val;
6363 
6364 	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6365 	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6366 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6367 		goto failed;
6368 	}
6369 
6370 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6371 	if (!cmd)
6372 		err = -ENOMEM;
6373 	else
6374 		err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6375 					 set_secure_conn_complete);
6376 
6377 	if (err < 0) {
6378 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6379 				MGMT_STATUS_FAILED);
6380 		if (cmd)
6381 			mgmt_pending_free(cmd);
6382 	}
6383 
6384 failed:
6385 	hci_dev_unlock(hdev);
6386 	return err;
6387 }
6388 
6389 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6390 			  void *data, u16 len)
6391 {
6392 	struct mgmt_mode *cp = data;
6393 	bool changed, use_changed;
6394 	int err;
6395 
6396 	bt_dev_dbg(hdev, "sock %p", sk);
6397 
6398 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6399 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6400 				       MGMT_STATUS_INVALID_PARAMS);
6401 
6402 	hci_dev_lock(hdev);
6403 
6404 	if (cp->val)
6405 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6406 	else
6407 		changed = hci_dev_test_and_clear_flag(hdev,
6408 						      HCI_KEEP_DEBUG_KEYS);
6409 
6410 	if (cp->val == 0x02)
6411 		use_changed = !hci_dev_test_and_set_flag(hdev,
6412 							 HCI_USE_DEBUG_KEYS);
6413 	else
6414 		use_changed = hci_dev_test_and_clear_flag(hdev,
6415 							  HCI_USE_DEBUG_KEYS);
6416 
6417 	if (hdev_is_powered(hdev) && use_changed &&
6418 	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6419 		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6420 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6421 			     sizeof(mode), &mode);
6422 	}
6423 
6424 	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6425 	if (err < 0)
6426 		goto unlock;
6427 
6428 	if (changed)
6429 		err = new_settings(hdev, sk);
6430 
6431 unlock:
6432 	hci_dev_unlock(hdev);
6433 	return err;
6434 }
6435 
6436 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6437 		       u16 len)
6438 {
6439 	struct mgmt_cp_set_privacy *cp = cp_data;
6440 	bool changed;
6441 	int err;
6442 
6443 	bt_dev_dbg(hdev, "sock %p", sk);
6444 
6445 	if (!lmp_le_capable(hdev))
6446 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6447 				       MGMT_STATUS_NOT_SUPPORTED);
6448 
6449 	if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6450 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6451 				       MGMT_STATUS_INVALID_PARAMS);
6452 
6453 	if (hdev_is_powered(hdev))
6454 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6455 				       MGMT_STATUS_REJECTED);
6456 
6457 	hci_dev_lock(hdev);
6458 
6459 	/* If user space supports this command it is also expected to
6460 	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6461 	 */
6462 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6463 
6464 	if (cp->privacy) {
6465 		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6466 		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6467 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6468 		hci_adv_instances_set_rpa_expired(hdev, true);
6469 		if (cp->privacy == 0x02)
6470 			hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6471 		else
6472 			hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6473 	} else {
6474 		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6475 		memset(hdev->irk, 0, sizeof(hdev->irk));
6476 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6477 		hci_adv_instances_set_rpa_expired(hdev, false);
6478 		hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6479 	}
6480 
6481 	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
6482 	if (err < 0)
6483 		goto unlock;
6484 
6485 	if (changed)
6486 		err = new_settings(hdev, sk);
6487 
6488 unlock:
6489 	hci_dev_unlock(hdev);
6490 	return err;
6491 }
6492 
6493 static bool irk_is_valid(struct mgmt_irk_info *irk)
6494 {
6495 	switch (irk->addr.type) {
6496 	case BDADDR_LE_PUBLIC:
6497 		return true;
6498 
6499 	case BDADDR_LE_RANDOM:
6500 		/* Two most significant bits shall be set */
6501 		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6502 			return false;
6503 		return true;
6504 	}
6505 
6506 	return false;
6507 }
6508 
6509 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6510 		     u16 len)
6511 {
6512 	struct mgmt_cp_load_irks *cp = cp_data;
6513 	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
6514 				   sizeof(struct mgmt_irk_info));
6515 	u16 irk_count, expected_len;
6516 	int i, err;
6517 
6518 	bt_dev_dbg(hdev, "sock %p", sk);
6519 
6520 	if (!lmp_le_capable(hdev))
6521 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6522 				       MGMT_STATUS_NOT_SUPPORTED);
6523 
6524 	irk_count = __le16_to_cpu(cp->irk_count);
6525 	if (irk_count > max_irk_count) {
6526 		bt_dev_err(hdev, "load_irks: too big irk_count value %u",
6527 			   irk_count);
6528 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6529 				       MGMT_STATUS_INVALID_PARAMS);
6530 	}
6531 
6532 	expected_len = struct_size(cp, irks, irk_count);
6533 	if (expected_len != len) {
6534 		bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
6535 			   expected_len, len);
6536 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6537 				       MGMT_STATUS_INVALID_PARAMS);
6538 	}
6539 
6540 	bt_dev_dbg(hdev, "irk_count %u", irk_count);
6541 
6542 	for (i = 0; i < irk_count; i++) {
6543 		struct mgmt_irk_info *key = &cp->irks[i];
6544 
6545 		if (!irk_is_valid(key))
6546 			return mgmt_cmd_status(sk, hdev->id,
6547 					       MGMT_OP_LOAD_IRKS,
6548 					       MGMT_STATUS_INVALID_PARAMS);
6549 	}
6550 
6551 	hci_dev_lock(hdev);
6552 
6553 	hci_smp_irks_clear(hdev);
6554 
6555 	for (i = 0; i < irk_count; i++) {
6556 		struct mgmt_irk_info *irk = &cp->irks[i];
6557 
6558 		if (hci_is_blocked_key(hdev,
6559 				       HCI_BLOCKED_KEY_TYPE_IRK,
6560 				       irk->val)) {
6561 			bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
6562 				    &irk->addr.bdaddr);
6563 			continue;
6564 		}
6565 
6566 		hci_add_irk(hdev, &irk->addr.bdaddr,
6567 			    le_addr_type(irk->addr.type), irk->val,
6568 			    BDADDR_ANY);
6569 	}
6570 
6571 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6572 
6573 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
6574 
6575 	hci_dev_unlock(hdev);
6576 
6577 	return err;
6578 }
6579 
6580 static bool ltk_is_valid(struct mgmt_ltk_info *key)
6581 {
6582 	if (key->initiator != 0x00 && key->initiator != 0x01)
6583 		return false;
6584 
6585 	switch (key->addr.type) {
6586 	case BDADDR_LE_PUBLIC:
6587 		return true;
6588 
6589 	case BDADDR_LE_RANDOM:
6590 		/* Two most significant bits shall be set */
6591 		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6592 			return false;
6593 		return true;
6594 	}
6595 
6596 	return false;
6597 }
6598 
6599 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
6600 			       void *cp_data, u16 len)
6601 {
6602 	struct mgmt_cp_load_long_term_keys *cp = cp_data;
6603 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
6604 				   sizeof(struct mgmt_ltk_info));
6605 	u16 key_count, expected_len;
6606 	int i, err;
6607 
6608 	bt_dev_dbg(hdev, "sock %p", sk);
6609 
6610 	if (!lmp_le_capable(hdev))
6611 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6612 				       MGMT_STATUS_NOT_SUPPORTED);
6613 
6614 	key_count = __le16_to_cpu(cp->key_count);
6615 	if (key_count > max_key_count) {
6616 		bt_dev_err(hdev, "load_ltks: too big key_count value %u",
6617 			   key_count);
6618 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6619 				       MGMT_STATUS_INVALID_PARAMS);
6620 	}
6621 
6622 	expected_len = struct_size(cp, keys, key_count);
6623 	if (expected_len != len) {
6624 		bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
6625 			   expected_len, len);
6626 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6627 				       MGMT_STATUS_INVALID_PARAMS);
6628 	}
6629 
6630 	bt_dev_dbg(hdev, "key_count %u", key_count);
6631 
6632 	for (i = 0; i < key_count; i++) {
6633 		struct mgmt_ltk_info *key = &cp->keys[i];
6634 
6635 		if (!ltk_is_valid(key))
6636 			return mgmt_cmd_status(sk, hdev->id,
6637 					       MGMT_OP_LOAD_LONG_TERM_KEYS,
6638 					       MGMT_STATUS_INVALID_PARAMS);
6639 	}
6640 
6641 	hci_dev_lock(hdev);
6642 
6643 	hci_smp_ltks_clear(hdev);
6644 
6645 	for (i = 0; i < key_count; i++) {
6646 		struct mgmt_ltk_info *key = &cp->keys[i];
6647 		u8 type, authenticated;
6648 
6649 		if (hci_is_blocked_key(hdev,
6650 				       HCI_BLOCKED_KEY_TYPE_LTK,
6651 				       key->val)) {
6652 			bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
6653 				    &key->addr.bdaddr);
6654 			continue;
6655 		}
6656 
6657 		switch (key->type) {
6658 		case MGMT_LTK_UNAUTHENTICATED:
6659 			authenticated = 0x00;
6660 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
6661 			break;
6662 		case MGMT_LTK_AUTHENTICATED:
6663 			authenticated = 0x01;
6664 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
6665 			break;
6666 		case MGMT_LTK_P256_UNAUTH:
6667 			authenticated = 0x00;
6668 			type = SMP_LTK_P256;
6669 			break;
6670 		case MGMT_LTK_P256_AUTH:
6671 			authenticated = 0x01;
6672 			type = SMP_LTK_P256;
6673 			break;
6674 		case MGMT_LTK_P256_DEBUG:
6675 			authenticated = 0x00;
6676 			type = SMP_LTK_P256_DEBUG;
6677 			fallthrough;
6678 		default:
6679 			continue;
6680 		}
6681 
6682 		hci_add_ltk(hdev, &key->addr.bdaddr,
6683 			    le_addr_type(key->addr.type), type, authenticated,
6684 			    key->val, key->enc_size, key->ediv, key->rand);
6685 	}
6686 
6687 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
6688 			   NULL, 0);
6689 
6690 	hci_dev_unlock(hdev);
6691 
6692 	return err;
6693 }
6694 
6695 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
6696 {
6697 	struct mgmt_pending_cmd *cmd = data;
6698 	struct hci_conn *conn = cmd->user_data;
6699 	struct mgmt_cp_get_conn_info *cp = cmd->param;
6700 	struct mgmt_rp_get_conn_info rp;
6701 	u8 status;
6702 
6703 	bt_dev_dbg(hdev, "err %d", err);
6704 
6705 	memcpy(&rp.addr, &cp->addr.bdaddr, sizeof(rp.addr));
6706 
6707 	status = mgmt_status(err);
6708 	if (status == MGMT_STATUS_SUCCESS) {
6709 		rp.rssi = conn->rssi;
6710 		rp.tx_power = conn->tx_power;
6711 		rp.max_tx_power = conn->max_tx_power;
6712 	} else {
6713 		rp.rssi = HCI_RSSI_INVALID;
6714 		rp.tx_power = HCI_TX_POWER_INVALID;
6715 		rp.max_tx_power = HCI_TX_POWER_INVALID;
6716 	}
6717 
6718 	mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
6719 			  &rp, sizeof(rp));
6720 
6721 	if (conn) {
6722 		hci_conn_drop(conn);
6723 		hci_conn_put(conn);
6724 	}
6725 
6726 	mgmt_pending_free(cmd);
6727 }
6728 
6729 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
6730 {
6731 	struct mgmt_pending_cmd *cmd = data;
6732 	struct mgmt_cp_get_conn_info *cp = cmd->param;
6733 	struct hci_conn *conn;
6734 	int err;
6735 	__le16   handle;
6736 
6737 	/* Make sure we are still connected */
6738 	if (cp->addr.type == BDADDR_BREDR)
6739 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6740 					       &cp->addr.bdaddr);
6741 	else
6742 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6743 
6744 	if (!conn || conn != cmd->user_data || conn->state != BT_CONNECTED) {
6745 		if (cmd->user_data) {
6746 			hci_conn_drop(cmd->user_data);
6747 			hci_conn_put(cmd->user_data);
6748 			cmd->user_data = NULL;
6749 		}
6750 		return MGMT_STATUS_NOT_CONNECTED;
6751 	}
6752 
6753 	handle = cpu_to_le16(conn->handle);
6754 
6755 	/* Refresh RSSI each time */
6756 	err = hci_read_rssi_sync(hdev, handle);
6757 
6758 	/* For LE links TX power does not change thus we don't need to
6759 	 * query for it once value is known.
6760 	 */
6761 	if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
6762 		     conn->tx_power == HCI_TX_POWER_INVALID))
6763 		err = hci_read_tx_power_sync(hdev, handle, 0x00);
6764 
6765 	/* Max TX power needs to be read only once per connection */
6766 	if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
6767 		err = hci_read_tx_power_sync(hdev, handle, 0x01);
6768 
6769 	return err;
6770 }
6771 
6772 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
6773 			 u16 len)
6774 {
6775 	struct mgmt_cp_get_conn_info *cp = data;
6776 	struct mgmt_rp_get_conn_info rp;
6777 	struct hci_conn *conn;
6778 	unsigned long conn_info_age;
6779 	int err = 0;
6780 
6781 	bt_dev_dbg(hdev, "sock %p", sk);
6782 
6783 	memset(&rp, 0, sizeof(rp));
6784 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6785 	rp.addr.type = cp->addr.type;
6786 
6787 	if (!bdaddr_type_is_valid(cp->addr.type))
6788 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6789 					 MGMT_STATUS_INVALID_PARAMS,
6790 					 &rp, sizeof(rp));
6791 
6792 	hci_dev_lock(hdev);
6793 
6794 	if (!hdev_is_powered(hdev)) {
6795 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6796 					MGMT_STATUS_NOT_POWERED, &rp,
6797 					sizeof(rp));
6798 		goto unlock;
6799 	}
6800 
6801 	if (cp->addr.type == BDADDR_BREDR)
6802 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6803 					       &cp->addr.bdaddr);
6804 	else
6805 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6806 
6807 	if (!conn || conn->state != BT_CONNECTED) {
6808 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6809 					MGMT_STATUS_NOT_CONNECTED, &rp,
6810 					sizeof(rp));
6811 		goto unlock;
6812 	}
6813 
6814 	/* To avoid client trying to guess when to poll again for information we
6815 	 * calculate conn info age as random value between min/max set in hdev.
6816 	 */
6817 	conn_info_age = hdev->conn_info_min_age +
6818 			prandom_u32_max(hdev->conn_info_max_age -
6819 					hdev->conn_info_min_age);
6820 
6821 	/* Query controller to refresh cached values if they are too old or were
6822 	 * never read.
6823 	 */
6824 	if (time_after(jiffies, conn->conn_info_timestamp +
6825 		       msecs_to_jiffies(conn_info_age)) ||
6826 	    !conn->conn_info_timestamp) {
6827 		struct mgmt_pending_cmd *cmd;
6828 
6829 		cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
6830 				       len);
6831 		if (!cmd)
6832 			err = -ENOMEM;
6833 		else
6834 			err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
6835 						 cmd, get_conn_info_complete);
6836 
6837 		if (err < 0) {
6838 			mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6839 					  MGMT_STATUS_FAILED, &rp, sizeof(rp));
6840 
6841 			if (cmd)
6842 				mgmt_pending_free(cmd);
6843 
6844 			goto unlock;
6845 		}
6846 
6847 		hci_conn_hold(conn);
6848 		cmd->user_data = hci_conn_get(conn);
6849 
6850 		conn->conn_info_timestamp = jiffies;
6851 	} else {
6852 		/* Cache is valid, just reply with values cached in hci_conn */
6853 		rp.rssi = conn->rssi;
6854 		rp.tx_power = conn->tx_power;
6855 		rp.max_tx_power = conn->max_tx_power;
6856 
6857 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6858 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6859 	}
6860 
6861 unlock:
6862 	hci_dev_unlock(hdev);
6863 	return err;
6864 }
6865 
6866 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
6867 {
6868 	struct mgmt_pending_cmd *cmd = data;
6869 	struct mgmt_cp_get_clock_info *cp = cmd->param;
6870 	struct mgmt_rp_get_clock_info rp;
6871 	struct hci_conn *conn = cmd->user_data;
6872 	u8 status = mgmt_status(err);
6873 
6874 	bt_dev_dbg(hdev, "err %d", err);
6875 
6876 	memset(&rp, 0, sizeof(rp));
6877 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6878 	rp.addr.type = cp->addr.type;
6879 
6880 	if (err)
6881 		goto complete;
6882 
6883 	rp.local_clock = cpu_to_le32(hdev->clock);
6884 
6885 	if (conn) {
6886 		rp.piconet_clock = cpu_to_le32(conn->clock);
6887 		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
6888 		hci_conn_drop(conn);
6889 		hci_conn_put(conn);
6890 	}
6891 
6892 complete:
6893 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
6894 			  sizeof(rp));
6895 
6896 	mgmt_pending_free(cmd);
6897 }
6898 
6899 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
6900 {
6901 	struct mgmt_pending_cmd *cmd = data;
6902 	struct mgmt_cp_get_clock_info *cp = cmd->param;
6903 	struct hci_cp_read_clock hci_cp;
6904 	struct hci_conn *conn = cmd->user_data;
6905 	int err;
6906 
6907 	memset(&hci_cp, 0, sizeof(hci_cp));
6908 	err = hci_read_clock_sync(hdev, &hci_cp);
6909 
6910 	if (conn) {
6911 		/* Make sure connection still exists */
6912 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6913 					       &cp->addr.bdaddr);
6914 
6915 		if (conn && conn == cmd->user_data &&
6916 		    conn->state == BT_CONNECTED) {
6917 			hci_cp.handle = cpu_to_le16(conn->handle);
6918 			hci_cp.which = 0x01; /* Piconet clock */
6919 			err = hci_read_clock_sync(hdev, &hci_cp);
6920 		} else if (cmd->user_data) {
6921 			hci_conn_drop(cmd->user_data);
6922 			hci_conn_put(cmd->user_data);
6923 			cmd->user_data = NULL;
6924 		}
6925 	}
6926 
6927 	return err;
6928 }
6929 
6930 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
6931 								u16 len)
6932 {
6933 	struct mgmt_cp_get_clock_info *cp = data;
6934 	struct mgmt_rp_get_clock_info rp;
6935 	struct mgmt_pending_cmd *cmd;
6936 	struct hci_conn *conn;
6937 	int err;
6938 
6939 	bt_dev_dbg(hdev, "sock %p", sk);
6940 
6941 	memset(&rp, 0, sizeof(rp));
6942 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6943 	rp.addr.type = cp->addr.type;
6944 
6945 	if (cp->addr.type != BDADDR_BREDR)
6946 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6947 					 MGMT_STATUS_INVALID_PARAMS,
6948 					 &rp, sizeof(rp));
6949 
6950 	hci_dev_lock(hdev);
6951 
6952 	if (!hdev_is_powered(hdev)) {
6953 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6954 					MGMT_STATUS_NOT_POWERED, &rp,
6955 					sizeof(rp));
6956 		goto unlock;
6957 	}
6958 
6959 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6960 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6961 					       &cp->addr.bdaddr);
6962 		if (!conn || conn->state != BT_CONNECTED) {
6963 			err = mgmt_cmd_complete(sk, hdev->id,
6964 						MGMT_OP_GET_CLOCK_INFO,
6965 						MGMT_STATUS_NOT_CONNECTED,
6966 						&rp, sizeof(rp));
6967 			goto unlock;
6968 		}
6969 	} else {
6970 		conn = NULL;
6971 	}
6972 
6973 	cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
6974 	if (!cmd)
6975 		err = -ENOMEM;
6976 	else
6977 		err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
6978 					 get_clock_info_complete);
6979 
6980 	if (err < 0) {
6981 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6982 					MGMT_STATUS_FAILED, &rp, sizeof(rp));
6983 
6984 		if (cmd)
6985 			mgmt_pending_free(cmd);
6986 
6987 	} else if (conn) {
6988 		hci_conn_hold(conn);
6989 		cmd->user_data = hci_conn_get(conn);
6990 	}
6991 
6992 
6993 unlock:
6994 	hci_dev_unlock(hdev);
6995 	return err;
6996 }
6997 
6998 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
6999 {
7000 	struct hci_conn *conn;
7001 
7002 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7003 	if (!conn)
7004 		return false;
7005 
7006 	if (conn->dst_type != type)
7007 		return false;
7008 
7009 	if (conn->state != BT_CONNECTED)
7010 		return false;
7011 
7012 	return true;
7013 }
7014 
7015 /* This function requires the caller holds hdev->lock */
7016 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7017 			       u8 addr_type, u8 auto_connect)
7018 {
7019 	struct hci_conn_params *params;
7020 
7021 	params = hci_conn_params_add(hdev, addr, addr_type);
7022 	if (!params)
7023 		return -EIO;
7024 
7025 	if (params->auto_connect == auto_connect)
7026 		return 0;
7027 
7028 	list_del_init(&params->action);
7029 
7030 	switch (auto_connect) {
7031 	case HCI_AUTO_CONN_DISABLED:
7032 	case HCI_AUTO_CONN_LINK_LOSS:
7033 		/* If auto connect is being disabled when we're trying to
7034 		 * connect to device, keep connecting.
7035 		 */
7036 		if (params->explicit_connect)
7037 			list_add(&params->action, &hdev->pend_le_conns);
7038 		break;
7039 	case HCI_AUTO_CONN_REPORT:
7040 		if (params->explicit_connect)
7041 			list_add(&params->action, &hdev->pend_le_conns);
7042 		else
7043 			list_add(&params->action, &hdev->pend_le_reports);
7044 		break;
7045 	case HCI_AUTO_CONN_DIRECT:
7046 	case HCI_AUTO_CONN_ALWAYS:
7047 		if (!is_connected(hdev, addr, addr_type))
7048 			list_add(&params->action, &hdev->pend_le_conns);
7049 		break;
7050 	}
7051 
7052 	params->auto_connect = auto_connect;
7053 
7054 	bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7055 		   addr, addr_type, auto_connect);
7056 
7057 	return 0;
7058 }
7059 
7060 static void device_added(struct sock *sk, struct hci_dev *hdev,
7061 			 bdaddr_t *bdaddr, u8 type, u8 action)
7062 {
7063 	struct mgmt_ev_device_added ev;
7064 
7065 	bacpy(&ev.addr.bdaddr, bdaddr);
7066 	ev.addr.type = type;
7067 	ev.action = action;
7068 
7069 	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7070 }
7071 
7072 static int add_device_sync(struct hci_dev *hdev, void *data)
7073 {
7074 	return hci_update_passive_scan_sync(hdev);
7075 }
7076 
7077 static int add_device(struct sock *sk, struct hci_dev *hdev,
7078 		      void *data, u16 len)
7079 {
7080 	struct mgmt_cp_add_device *cp = data;
7081 	u8 auto_conn, addr_type;
7082 	struct hci_conn_params *params;
7083 	int err;
7084 	u32 current_flags = 0;
7085 	u32 supported_flags;
7086 
7087 	bt_dev_dbg(hdev, "sock %p", sk);
7088 
7089 	if (!bdaddr_type_is_valid(cp->addr.type) ||
7090 	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7091 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7092 					 MGMT_STATUS_INVALID_PARAMS,
7093 					 &cp->addr, sizeof(cp->addr));
7094 
7095 	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7096 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7097 					 MGMT_STATUS_INVALID_PARAMS,
7098 					 &cp->addr, sizeof(cp->addr));
7099 
7100 	hci_dev_lock(hdev);
7101 
7102 	if (cp->addr.type == BDADDR_BREDR) {
7103 		/* Only incoming connections action is supported for now */
7104 		if (cp->action != 0x01) {
7105 			err = mgmt_cmd_complete(sk, hdev->id,
7106 						MGMT_OP_ADD_DEVICE,
7107 						MGMT_STATUS_INVALID_PARAMS,
7108 						&cp->addr, sizeof(cp->addr));
7109 			goto unlock;
7110 		}
7111 
7112 		err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7113 						     &cp->addr.bdaddr,
7114 						     cp->addr.type, 0);
7115 		if (err)
7116 			goto unlock;
7117 
7118 		hci_req_update_scan(hdev);
7119 
7120 		goto added;
7121 	}
7122 
7123 	addr_type = le_addr_type(cp->addr.type);
7124 
7125 	if (cp->action == 0x02)
7126 		auto_conn = HCI_AUTO_CONN_ALWAYS;
7127 	else if (cp->action == 0x01)
7128 		auto_conn = HCI_AUTO_CONN_DIRECT;
7129 	else
7130 		auto_conn = HCI_AUTO_CONN_REPORT;
7131 
7132 	/* Kernel internally uses conn_params with resolvable private
7133 	 * address, but Add Device allows only identity addresses.
7134 	 * Make sure it is enforced before calling
7135 	 * hci_conn_params_lookup.
7136 	 */
7137 	if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7138 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7139 					MGMT_STATUS_INVALID_PARAMS,
7140 					&cp->addr, sizeof(cp->addr));
7141 		goto unlock;
7142 	}
7143 
7144 	/* If the connection parameters don't exist for this device,
7145 	 * they will be created and configured with defaults.
7146 	 */
7147 	if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7148 				auto_conn) < 0) {
7149 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7150 					MGMT_STATUS_FAILED, &cp->addr,
7151 					sizeof(cp->addr));
7152 		goto unlock;
7153 	} else {
7154 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7155 						addr_type);
7156 		if (params)
7157 			bitmap_to_arr32(&current_flags, params->flags,
7158 					__HCI_CONN_NUM_FLAGS);
7159 	}
7160 
7161 	err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
7162 	if (err < 0)
7163 		goto unlock;
7164 
7165 added:
7166 	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7167 	bitmap_to_arr32(&supported_flags, hdev->conn_flags,
7168 			__HCI_CONN_NUM_FLAGS);
7169 	device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7170 			     supported_flags, current_flags);
7171 
7172 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7173 				MGMT_STATUS_SUCCESS, &cp->addr,
7174 				sizeof(cp->addr));
7175 
7176 unlock:
7177 	hci_dev_unlock(hdev);
7178 	return err;
7179 }
7180 
7181 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7182 			   bdaddr_t *bdaddr, u8 type)
7183 {
7184 	struct mgmt_ev_device_removed ev;
7185 
7186 	bacpy(&ev.addr.bdaddr, bdaddr);
7187 	ev.addr.type = type;
7188 
7189 	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7190 }
7191 
7192 static int remove_device_sync(struct hci_dev *hdev, void *data)
7193 {
7194 	return hci_update_passive_scan_sync(hdev);
7195 }
7196 
7197 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7198 			 void *data, u16 len)
7199 {
7200 	struct mgmt_cp_remove_device *cp = data;
7201 	int err;
7202 
7203 	bt_dev_dbg(hdev, "sock %p", sk);
7204 
7205 	hci_dev_lock(hdev);
7206 
7207 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7208 		struct hci_conn_params *params;
7209 		u8 addr_type;
7210 
7211 		if (!bdaddr_type_is_valid(cp->addr.type)) {
7212 			err = mgmt_cmd_complete(sk, hdev->id,
7213 						MGMT_OP_REMOVE_DEVICE,
7214 						MGMT_STATUS_INVALID_PARAMS,
7215 						&cp->addr, sizeof(cp->addr));
7216 			goto unlock;
7217 		}
7218 
7219 		if (cp->addr.type == BDADDR_BREDR) {
7220 			err = hci_bdaddr_list_del(&hdev->accept_list,
7221 						  &cp->addr.bdaddr,
7222 						  cp->addr.type);
7223 			if (err) {
7224 				err = mgmt_cmd_complete(sk, hdev->id,
7225 							MGMT_OP_REMOVE_DEVICE,
7226 							MGMT_STATUS_INVALID_PARAMS,
7227 							&cp->addr,
7228 							sizeof(cp->addr));
7229 				goto unlock;
7230 			}
7231 
7232 			hci_req_update_scan(hdev);
7233 
7234 			device_removed(sk, hdev, &cp->addr.bdaddr,
7235 				       cp->addr.type);
7236 			goto complete;
7237 		}
7238 
7239 		addr_type = le_addr_type(cp->addr.type);
7240 
7241 		/* Kernel internally uses conn_params with resolvable private
7242 		 * address, but Remove Device allows only identity addresses.
7243 		 * Make sure it is enforced before calling
7244 		 * hci_conn_params_lookup.
7245 		 */
7246 		if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7247 			err = mgmt_cmd_complete(sk, hdev->id,
7248 						MGMT_OP_REMOVE_DEVICE,
7249 						MGMT_STATUS_INVALID_PARAMS,
7250 						&cp->addr, sizeof(cp->addr));
7251 			goto unlock;
7252 		}
7253 
7254 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7255 						addr_type);
7256 		if (!params) {
7257 			err = mgmt_cmd_complete(sk, hdev->id,
7258 						MGMT_OP_REMOVE_DEVICE,
7259 						MGMT_STATUS_INVALID_PARAMS,
7260 						&cp->addr, sizeof(cp->addr));
7261 			goto unlock;
7262 		}
7263 
7264 		if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7265 		    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7266 			err = mgmt_cmd_complete(sk, hdev->id,
7267 						MGMT_OP_REMOVE_DEVICE,
7268 						MGMT_STATUS_INVALID_PARAMS,
7269 						&cp->addr, sizeof(cp->addr));
7270 			goto unlock;
7271 		}
7272 
7273 		list_del(&params->action);
7274 		list_del(&params->list);
7275 		kfree(params);
7276 
7277 		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7278 	} else {
7279 		struct hci_conn_params *p, *tmp;
7280 		struct bdaddr_list *b, *btmp;
7281 
7282 		if (cp->addr.type) {
7283 			err = mgmt_cmd_complete(sk, hdev->id,
7284 						MGMT_OP_REMOVE_DEVICE,
7285 						MGMT_STATUS_INVALID_PARAMS,
7286 						&cp->addr, sizeof(cp->addr));
7287 			goto unlock;
7288 		}
7289 
7290 		list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7291 			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7292 			list_del(&b->list);
7293 			kfree(b);
7294 		}
7295 
7296 		hci_req_update_scan(hdev);
7297 
7298 		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7299 			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7300 				continue;
7301 			device_removed(sk, hdev, &p->addr, p->addr_type);
7302 			if (p->explicit_connect) {
7303 				p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7304 				continue;
7305 			}
7306 			list_del(&p->action);
7307 			list_del(&p->list);
7308 			kfree(p);
7309 		}
7310 
7311 		bt_dev_dbg(hdev, "All LE connection parameters were removed");
7312 	}
7313 
7314 	hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7315 
7316 complete:
7317 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7318 				MGMT_STATUS_SUCCESS, &cp->addr,
7319 				sizeof(cp->addr));
7320 unlock:
7321 	hci_dev_unlock(hdev);
7322 	return err;
7323 }
7324 
7325 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7326 			   u16 len)
7327 {
7328 	struct mgmt_cp_load_conn_param *cp = data;
7329 	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7330 				     sizeof(struct mgmt_conn_param));
7331 	u16 param_count, expected_len;
7332 	int i;
7333 
7334 	if (!lmp_le_capable(hdev))
7335 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7336 				       MGMT_STATUS_NOT_SUPPORTED);
7337 
7338 	param_count = __le16_to_cpu(cp->param_count);
7339 	if (param_count > max_param_count) {
7340 		bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7341 			   param_count);
7342 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7343 				       MGMT_STATUS_INVALID_PARAMS);
7344 	}
7345 
7346 	expected_len = struct_size(cp, params, param_count);
7347 	if (expected_len != len) {
7348 		bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7349 			   expected_len, len);
7350 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7351 				       MGMT_STATUS_INVALID_PARAMS);
7352 	}
7353 
7354 	bt_dev_dbg(hdev, "param_count %u", param_count);
7355 
7356 	hci_dev_lock(hdev);
7357 
7358 	hci_conn_params_clear_disabled(hdev);
7359 
7360 	for (i = 0; i < param_count; i++) {
7361 		struct mgmt_conn_param *param = &cp->params[i];
7362 		struct hci_conn_params *hci_param;
7363 		u16 min, max, latency, timeout;
7364 		u8 addr_type;
7365 
7366 		bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
7367 			   param->addr.type);
7368 
7369 		if (param->addr.type == BDADDR_LE_PUBLIC) {
7370 			addr_type = ADDR_LE_DEV_PUBLIC;
7371 		} else if (param->addr.type == BDADDR_LE_RANDOM) {
7372 			addr_type = ADDR_LE_DEV_RANDOM;
7373 		} else {
7374 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7375 			continue;
7376 		}
7377 
7378 		min = le16_to_cpu(param->min_interval);
7379 		max = le16_to_cpu(param->max_interval);
7380 		latency = le16_to_cpu(param->latency);
7381 		timeout = le16_to_cpu(param->timeout);
7382 
7383 		bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7384 			   min, max, latency, timeout);
7385 
7386 		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7387 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7388 			continue;
7389 		}
7390 
7391 		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
7392 						addr_type);
7393 		if (!hci_param) {
7394 			bt_dev_err(hdev, "failed to add connection parameters");
7395 			continue;
7396 		}
7397 
7398 		hci_param->conn_min_interval = min;
7399 		hci_param->conn_max_interval = max;
7400 		hci_param->conn_latency = latency;
7401 		hci_param->supervision_timeout = timeout;
7402 	}
7403 
7404 	hci_dev_unlock(hdev);
7405 
7406 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7407 				 NULL, 0);
7408 }
7409 
7410 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7411 			       void *data, u16 len)
7412 {
7413 	struct mgmt_cp_set_external_config *cp = data;
7414 	bool changed;
7415 	int err;
7416 
7417 	bt_dev_dbg(hdev, "sock %p", sk);
7418 
7419 	if (hdev_is_powered(hdev))
7420 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7421 				       MGMT_STATUS_REJECTED);
7422 
7423 	if (cp->config != 0x00 && cp->config != 0x01)
7424 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7425 				         MGMT_STATUS_INVALID_PARAMS);
7426 
7427 	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7428 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7429 				       MGMT_STATUS_NOT_SUPPORTED);
7430 
7431 	hci_dev_lock(hdev);
7432 
7433 	if (cp->config)
7434 		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7435 	else
7436 		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7437 
7438 	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7439 	if (err < 0)
7440 		goto unlock;
7441 
7442 	if (!changed)
7443 		goto unlock;
7444 
7445 	err = new_options(hdev, sk);
7446 
7447 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7448 		mgmt_index_removed(hdev);
7449 
7450 		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7451 			hci_dev_set_flag(hdev, HCI_CONFIG);
7452 			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7453 
7454 			queue_work(hdev->req_workqueue, &hdev->power_on);
7455 		} else {
7456 			set_bit(HCI_RAW, &hdev->flags);
7457 			mgmt_index_added(hdev);
7458 		}
7459 	}
7460 
7461 unlock:
7462 	hci_dev_unlock(hdev);
7463 	return err;
7464 }
7465 
7466 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
7467 			      void *data, u16 len)
7468 {
7469 	struct mgmt_cp_set_public_address *cp = data;
7470 	bool changed;
7471 	int err;
7472 
7473 	bt_dev_dbg(hdev, "sock %p", sk);
7474 
7475 	if (hdev_is_powered(hdev))
7476 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7477 				       MGMT_STATUS_REJECTED);
7478 
7479 	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
7480 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7481 				       MGMT_STATUS_INVALID_PARAMS);
7482 
7483 	if (!hdev->set_bdaddr)
7484 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7485 				       MGMT_STATUS_NOT_SUPPORTED);
7486 
7487 	hci_dev_lock(hdev);
7488 
7489 	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
7490 	bacpy(&hdev->public_addr, &cp->bdaddr);
7491 
7492 	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
7493 	if (err < 0)
7494 		goto unlock;
7495 
7496 	if (!changed)
7497 		goto unlock;
7498 
7499 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
7500 		err = new_options(hdev, sk);
7501 
7502 	if (is_configured(hdev)) {
7503 		mgmt_index_removed(hdev);
7504 
7505 		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
7506 
7507 		hci_dev_set_flag(hdev, HCI_CONFIG);
7508 		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7509 
7510 		queue_work(hdev->req_workqueue, &hdev->power_on);
7511 	}
7512 
7513 unlock:
7514 	hci_dev_unlock(hdev);
7515 	return err;
7516 }
7517 
7518 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
7519 					     int err)
7520 {
7521 	const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
7522 	struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
7523 	u8 *h192, *r192, *h256, *r256;
7524 	struct mgmt_pending_cmd *cmd = data;
7525 	struct sk_buff *skb = cmd->skb;
7526 	u8 status = mgmt_status(err);
7527 	u16 eir_len;
7528 
7529 	if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
7530 		return;
7531 
7532 	if (!status) {
7533 		if (!skb)
7534 			status = MGMT_STATUS_FAILED;
7535 		else if (IS_ERR(skb))
7536 			status = mgmt_status(PTR_ERR(skb));
7537 		else
7538 			status = mgmt_status(skb->data[0]);
7539 	}
7540 
7541 	bt_dev_dbg(hdev, "status %u", status);
7542 
7543 	mgmt_cp = cmd->param;
7544 
7545 	if (status) {
7546 		status = mgmt_status(status);
7547 		eir_len = 0;
7548 
7549 		h192 = NULL;
7550 		r192 = NULL;
7551 		h256 = NULL;
7552 		r256 = NULL;
7553 	} else if (!bredr_sc_enabled(hdev)) {
7554 		struct hci_rp_read_local_oob_data *rp;
7555 
7556 		if (skb->len != sizeof(*rp)) {
7557 			status = MGMT_STATUS_FAILED;
7558 			eir_len = 0;
7559 		} else {
7560 			status = MGMT_STATUS_SUCCESS;
7561 			rp = (void *)skb->data;
7562 
7563 			eir_len = 5 + 18 + 18;
7564 			h192 = rp->hash;
7565 			r192 = rp->rand;
7566 			h256 = NULL;
7567 			r256 = NULL;
7568 		}
7569 	} else {
7570 		struct hci_rp_read_local_oob_ext_data *rp;
7571 
7572 		if (skb->len != sizeof(*rp)) {
7573 			status = MGMT_STATUS_FAILED;
7574 			eir_len = 0;
7575 		} else {
7576 			status = MGMT_STATUS_SUCCESS;
7577 			rp = (void *)skb->data;
7578 
7579 			if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
7580 				eir_len = 5 + 18 + 18;
7581 				h192 = NULL;
7582 				r192 = NULL;
7583 			} else {
7584 				eir_len = 5 + 18 + 18 + 18 + 18;
7585 				h192 = rp->hash192;
7586 				r192 = rp->rand192;
7587 			}
7588 
7589 			h256 = rp->hash256;
7590 			r256 = rp->rand256;
7591 		}
7592 	}
7593 
7594 	mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
7595 	if (!mgmt_rp)
7596 		goto done;
7597 
7598 	if (eir_len == 0)
7599 		goto send_rsp;
7600 
7601 	eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
7602 				  hdev->dev_class, 3);
7603 
7604 	if (h192 && r192) {
7605 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7606 					  EIR_SSP_HASH_C192, h192, 16);
7607 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7608 					  EIR_SSP_RAND_R192, r192, 16);
7609 	}
7610 
7611 	if (h256 && r256) {
7612 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7613 					  EIR_SSP_HASH_C256, h256, 16);
7614 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7615 					  EIR_SSP_RAND_R256, r256, 16);
7616 	}
7617 
7618 send_rsp:
7619 	mgmt_rp->type = mgmt_cp->type;
7620 	mgmt_rp->eir_len = cpu_to_le16(eir_len);
7621 
7622 	err = mgmt_cmd_complete(cmd->sk, hdev->id,
7623 				MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
7624 				mgmt_rp, sizeof(*mgmt_rp) + eir_len);
7625 	if (err < 0 || status)
7626 		goto done;
7627 
7628 	hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
7629 
7630 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7631 				 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
7632 				 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
7633 done:
7634 	if (skb && !IS_ERR(skb))
7635 		kfree_skb(skb);
7636 
7637 	kfree(mgmt_rp);
7638 	mgmt_pending_remove(cmd);
7639 }
7640 
7641 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
7642 				  struct mgmt_cp_read_local_oob_ext_data *cp)
7643 {
7644 	struct mgmt_pending_cmd *cmd;
7645 	int err;
7646 
7647 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
7648 			       cp, sizeof(*cp));
7649 	if (!cmd)
7650 		return -ENOMEM;
7651 
7652 	err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
7653 				 read_local_oob_ext_data_complete);
7654 
7655 	if (err < 0) {
7656 		mgmt_pending_remove(cmd);
7657 		return err;
7658 	}
7659 
7660 	return 0;
7661 }
7662 
7663 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
7664 				   void *data, u16 data_len)
7665 {
7666 	struct mgmt_cp_read_local_oob_ext_data *cp = data;
7667 	struct mgmt_rp_read_local_oob_ext_data *rp;
7668 	size_t rp_len;
7669 	u16 eir_len;
7670 	u8 status, flags, role, addr[7], hash[16], rand[16];
7671 	int err;
7672 
7673 	bt_dev_dbg(hdev, "sock %p", sk);
7674 
7675 	if (hdev_is_powered(hdev)) {
7676 		switch (cp->type) {
7677 		case BIT(BDADDR_BREDR):
7678 			status = mgmt_bredr_support(hdev);
7679 			if (status)
7680 				eir_len = 0;
7681 			else
7682 				eir_len = 5;
7683 			break;
7684 		case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7685 			status = mgmt_le_support(hdev);
7686 			if (status)
7687 				eir_len = 0;
7688 			else
7689 				eir_len = 9 + 3 + 18 + 18 + 3;
7690 			break;
7691 		default:
7692 			status = MGMT_STATUS_INVALID_PARAMS;
7693 			eir_len = 0;
7694 			break;
7695 		}
7696 	} else {
7697 		status = MGMT_STATUS_NOT_POWERED;
7698 		eir_len = 0;
7699 	}
7700 
7701 	rp_len = sizeof(*rp) + eir_len;
7702 	rp = kmalloc(rp_len, GFP_ATOMIC);
7703 	if (!rp)
7704 		return -ENOMEM;
7705 
7706 	if (!status && !lmp_ssp_capable(hdev)) {
7707 		status = MGMT_STATUS_NOT_SUPPORTED;
7708 		eir_len = 0;
7709 	}
7710 
7711 	if (status)
7712 		goto complete;
7713 
7714 	hci_dev_lock(hdev);
7715 
7716 	eir_len = 0;
7717 	switch (cp->type) {
7718 	case BIT(BDADDR_BREDR):
7719 		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7720 			err = read_local_ssp_oob_req(hdev, sk, cp);
7721 			hci_dev_unlock(hdev);
7722 			if (!err)
7723 				goto done;
7724 
7725 			status = MGMT_STATUS_FAILED;
7726 			goto complete;
7727 		} else {
7728 			eir_len = eir_append_data(rp->eir, eir_len,
7729 						  EIR_CLASS_OF_DEV,
7730 						  hdev->dev_class, 3);
7731 		}
7732 		break;
7733 	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7734 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
7735 		    smp_generate_oob(hdev, hash, rand) < 0) {
7736 			hci_dev_unlock(hdev);
7737 			status = MGMT_STATUS_FAILED;
7738 			goto complete;
7739 		}
7740 
7741 		/* This should return the active RPA, but since the RPA
7742 		 * is only programmed on demand, it is really hard to fill
7743 		 * this in at the moment. For now disallow retrieving
7744 		 * local out-of-band data when privacy is in use.
7745 		 *
7746 		 * Returning the identity address will not help here since
7747 		 * pairing happens before the identity resolving key is
7748 		 * known and thus the connection establishment happens
7749 		 * based on the RPA and not the identity address.
7750 		 */
7751 		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
7752 			hci_dev_unlock(hdev);
7753 			status = MGMT_STATUS_REJECTED;
7754 			goto complete;
7755 		}
7756 
7757 		if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
7758 		   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
7759 		   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
7760 		    bacmp(&hdev->static_addr, BDADDR_ANY))) {
7761 			memcpy(addr, &hdev->static_addr, 6);
7762 			addr[6] = 0x01;
7763 		} else {
7764 			memcpy(addr, &hdev->bdaddr, 6);
7765 			addr[6] = 0x00;
7766 		}
7767 
7768 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
7769 					  addr, sizeof(addr));
7770 
7771 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7772 			role = 0x02;
7773 		else
7774 			role = 0x01;
7775 
7776 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
7777 					  &role, sizeof(role));
7778 
7779 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
7780 			eir_len = eir_append_data(rp->eir, eir_len,
7781 						  EIR_LE_SC_CONFIRM,
7782 						  hash, sizeof(hash));
7783 
7784 			eir_len = eir_append_data(rp->eir, eir_len,
7785 						  EIR_LE_SC_RANDOM,
7786 						  rand, sizeof(rand));
7787 		}
7788 
7789 		flags = mgmt_get_adv_discov_flags(hdev);
7790 
7791 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
7792 			flags |= LE_AD_NO_BREDR;
7793 
7794 		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
7795 					  &flags, sizeof(flags));
7796 		break;
7797 	}
7798 
7799 	hci_dev_unlock(hdev);
7800 
7801 	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
7802 
7803 	status = MGMT_STATUS_SUCCESS;
7804 
7805 complete:
7806 	rp->type = cp->type;
7807 	rp->eir_len = cpu_to_le16(eir_len);
7808 
7809 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
7810 				status, rp, sizeof(*rp) + eir_len);
7811 	if (err < 0 || status)
7812 		goto done;
7813 
7814 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7815 				 rp, sizeof(*rp) + eir_len,
7816 				 HCI_MGMT_OOB_DATA_EVENTS, sk);
7817 
7818 done:
7819 	kfree(rp);
7820 
7821 	return err;
7822 }
7823 
7824 static u32 get_supported_adv_flags(struct hci_dev *hdev)
7825 {
7826 	u32 flags = 0;
7827 
7828 	flags |= MGMT_ADV_FLAG_CONNECTABLE;
7829 	flags |= MGMT_ADV_FLAG_DISCOV;
7830 	flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
7831 	flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
7832 	flags |= MGMT_ADV_FLAG_APPEARANCE;
7833 	flags |= MGMT_ADV_FLAG_LOCAL_NAME;
7834 	flags |= MGMT_ADV_PARAM_DURATION;
7835 	flags |= MGMT_ADV_PARAM_TIMEOUT;
7836 	flags |= MGMT_ADV_PARAM_INTERVALS;
7837 	flags |= MGMT_ADV_PARAM_TX_POWER;
7838 	flags |= MGMT_ADV_PARAM_SCAN_RSP;
7839 
7840 	/* In extended adv TX_POWER returned from Set Adv Param
7841 	 * will be always valid.
7842 	 */
7843 	if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
7844 	    ext_adv_capable(hdev))
7845 		flags |= MGMT_ADV_FLAG_TX_POWER;
7846 
7847 	if (ext_adv_capable(hdev)) {
7848 		flags |= MGMT_ADV_FLAG_SEC_1M;
7849 		flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
7850 		flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
7851 
7852 		if (hdev->le_features[1] & HCI_LE_PHY_2M)
7853 			flags |= MGMT_ADV_FLAG_SEC_2M;
7854 
7855 		if (hdev->le_features[1] & HCI_LE_PHY_CODED)
7856 			flags |= MGMT_ADV_FLAG_SEC_CODED;
7857 	}
7858 
7859 	return flags;
7860 }
7861 
7862 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
7863 			     void *data, u16 data_len)
7864 {
7865 	struct mgmt_rp_read_adv_features *rp;
7866 	size_t rp_len;
7867 	int err;
7868 	struct adv_info *adv_instance;
7869 	u32 supported_flags;
7870 	u8 *instance;
7871 
7872 	bt_dev_dbg(hdev, "sock %p", sk);
7873 
7874 	if (!lmp_le_capable(hdev))
7875 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7876 				       MGMT_STATUS_REJECTED);
7877 
7878 	hci_dev_lock(hdev);
7879 
7880 	rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
7881 	rp = kmalloc(rp_len, GFP_ATOMIC);
7882 	if (!rp) {
7883 		hci_dev_unlock(hdev);
7884 		return -ENOMEM;
7885 	}
7886 
7887 	supported_flags = get_supported_adv_flags(hdev);
7888 
7889 	rp->supported_flags = cpu_to_le32(supported_flags);
7890 	rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
7891 	rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
7892 	rp->max_instances = hdev->le_num_of_adv_sets;
7893 	rp->num_instances = hdev->adv_instance_cnt;
7894 
7895 	instance = rp->instance;
7896 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
7897 		*instance = adv_instance->instance;
7898 		instance++;
7899 	}
7900 
7901 	hci_dev_unlock(hdev);
7902 
7903 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7904 				MGMT_STATUS_SUCCESS, rp, rp_len);
7905 
7906 	kfree(rp);
7907 
7908 	return err;
7909 }
7910 
7911 static u8 calculate_name_len(struct hci_dev *hdev)
7912 {
7913 	u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
7914 
7915 	return eir_append_local_name(hdev, buf, 0);
7916 }
7917 
7918 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
7919 			   bool is_adv_data)
7920 {
7921 	u8 max_len = HCI_MAX_AD_LENGTH;
7922 
7923 	if (is_adv_data) {
7924 		if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
7925 				 MGMT_ADV_FLAG_LIMITED_DISCOV |
7926 				 MGMT_ADV_FLAG_MANAGED_FLAGS))
7927 			max_len -= 3;
7928 
7929 		if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
7930 			max_len -= 3;
7931 	} else {
7932 		if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
7933 			max_len -= calculate_name_len(hdev);
7934 
7935 		if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
7936 			max_len -= 4;
7937 	}
7938 
7939 	return max_len;
7940 }
7941 
7942 static bool flags_managed(u32 adv_flags)
7943 {
7944 	return adv_flags & (MGMT_ADV_FLAG_DISCOV |
7945 			    MGMT_ADV_FLAG_LIMITED_DISCOV |
7946 			    MGMT_ADV_FLAG_MANAGED_FLAGS);
7947 }
7948 
7949 static bool tx_power_managed(u32 adv_flags)
7950 {
7951 	return adv_flags & MGMT_ADV_FLAG_TX_POWER;
7952 }
7953 
7954 static bool name_managed(u32 adv_flags)
7955 {
7956 	return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
7957 }
7958 
7959 static bool appearance_managed(u32 adv_flags)
7960 {
7961 	return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
7962 }
7963 
7964 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
7965 			      u8 len, bool is_adv_data)
7966 {
7967 	int i, cur_len;
7968 	u8 max_len;
7969 
7970 	max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
7971 
7972 	if (len > max_len)
7973 		return false;
7974 
7975 	/* Make sure that the data is correctly formatted. */
7976 	for (i = 0; i < len; i += (cur_len + 1)) {
7977 		cur_len = data[i];
7978 
7979 		if (!cur_len)
7980 			continue;
7981 
7982 		if (data[i + 1] == EIR_FLAGS &&
7983 		    (!is_adv_data || flags_managed(adv_flags)))
7984 			return false;
7985 
7986 		if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
7987 			return false;
7988 
7989 		if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
7990 			return false;
7991 
7992 		if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
7993 			return false;
7994 
7995 		if (data[i + 1] == EIR_APPEARANCE &&
7996 		    appearance_managed(adv_flags))
7997 			return false;
7998 
7999 		/* If the current field length would exceed the total data
8000 		 * length, then it's invalid.
8001 		 */
8002 		if (i + cur_len >= len)
8003 			return false;
8004 	}
8005 
8006 	return true;
8007 }
8008 
8009 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8010 {
8011 	u32 supported_flags, phy_flags;
8012 
8013 	/* The current implementation only supports a subset of the specified
8014 	 * flags. Also need to check mutual exclusiveness of sec flags.
8015 	 */
8016 	supported_flags = get_supported_adv_flags(hdev);
8017 	phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8018 	if (adv_flags & ~supported_flags ||
8019 	    ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8020 		return false;
8021 
8022 	return true;
8023 }
8024 
8025 static bool adv_busy(struct hci_dev *hdev)
8026 {
8027 	return pending_find(MGMT_OP_SET_LE, hdev);
8028 }
8029 
8030 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8031 			     int err)
8032 {
8033 	struct adv_info *adv, *n;
8034 
8035 	bt_dev_dbg(hdev, "err %d", err);
8036 
8037 	hci_dev_lock(hdev);
8038 
8039 	list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8040 		u8 instance;
8041 
8042 		if (!adv->pending)
8043 			continue;
8044 
8045 		if (!err) {
8046 			adv->pending = false;
8047 			continue;
8048 		}
8049 
8050 		instance = adv->instance;
8051 
8052 		if (hdev->cur_adv_instance == instance)
8053 			cancel_adv_timeout(hdev);
8054 
8055 		hci_remove_adv_instance(hdev, instance);
8056 		mgmt_advertising_removed(sk, hdev, instance);
8057 	}
8058 
8059 	hci_dev_unlock(hdev);
8060 }
8061 
8062 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8063 {
8064 	struct mgmt_pending_cmd *cmd = data;
8065 	struct mgmt_cp_add_advertising *cp = cmd->param;
8066 	struct mgmt_rp_add_advertising rp;
8067 
8068 	memset(&rp, 0, sizeof(rp));
8069 
8070 	rp.instance = cp->instance;
8071 
8072 	if (err)
8073 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8074 				mgmt_status(err));
8075 	else
8076 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8077 				  mgmt_status(err), &rp, sizeof(rp));
8078 
8079 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
8080 
8081 	mgmt_pending_free(cmd);
8082 }
8083 
8084 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8085 {
8086 	struct mgmt_pending_cmd *cmd = data;
8087 	struct mgmt_cp_add_advertising *cp = cmd->param;
8088 
8089 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8090 }
8091 
8092 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8093 			   void *data, u16 data_len)
8094 {
8095 	struct mgmt_cp_add_advertising *cp = data;
8096 	struct mgmt_rp_add_advertising rp;
8097 	u32 flags;
8098 	u8 status;
8099 	u16 timeout, duration;
8100 	unsigned int prev_instance_cnt;
8101 	u8 schedule_instance = 0;
8102 	struct adv_info *next_instance;
8103 	int err;
8104 	struct mgmt_pending_cmd *cmd;
8105 
8106 	bt_dev_dbg(hdev, "sock %p", sk);
8107 
8108 	status = mgmt_le_support(hdev);
8109 	if (status)
8110 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8111 				       status);
8112 
8113 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8114 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8115 				       MGMT_STATUS_INVALID_PARAMS);
8116 
8117 	if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8118 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8119 				       MGMT_STATUS_INVALID_PARAMS);
8120 
8121 	flags = __le32_to_cpu(cp->flags);
8122 	timeout = __le16_to_cpu(cp->timeout);
8123 	duration = __le16_to_cpu(cp->duration);
8124 
8125 	if (!requested_adv_flags_are_valid(hdev, flags))
8126 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8127 				       MGMT_STATUS_INVALID_PARAMS);
8128 
8129 	hci_dev_lock(hdev);
8130 
8131 	if (timeout && !hdev_is_powered(hdev)) {
8132 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8133 				      MGMT_STATUS_REJECTED);
8134 		goto unlock;
8135 	}
8136 
8137 	if (adv_busy(hdev)) {
8138 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8139 				      MGMT_STATUS_BUSY);
8140 		goto unlock;
8141 	}
8142 
8143 	if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8144 	    !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8145 			       cp->scan_rsp_len, false)) {
8146 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8147 				      MGMT_STATUS_INVALID_PARAMS);
8148 		goto unlock;
8149 	}
8150 
8151 	prev_instance_cnt = hdev->adv_instance_cnt;
8152 
8153 	err = hci_add_adv_instance(hdev, cp->instance, flags,
8154 				   cp->adv_data_len, cp->data,
8155 				   cp->scan_rsp_len,
8156 				   cp->data + cp->adv_data_len,
8157 				   timeout, duration,
8158 				   HCI_ADV_TX_POWER_NO_PREFERENCE,
8159 				   hdev->le_adv_min_interval,
8160 				   hdev->le_adv_max_interval);
8161 	if (err < 0) {
8162 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8163 				      MGMT_STATUS_FAILED);
8164 		goto unlock;
8165 	}
8166 
8167 	/* Only trigger an advertising added event if a new instance was
8168 	 * actually added.
8169 	 */
8170 	if (hdev->adv_instance_cnt > prev_instance_cnt)
8171 		mgmt_advertising_added(sk, hdev, cp->instance);
8172 
8173 	if (hdev->cur_adv_instance == cp->instance) {
8174 		/* If the currently advertised instance is being changed then
8175 		 * cancel the current advertising and schedule the next
8176 		 * instance. If there is only one instance then the overridden
8177 		 * advertising data will be visible right away.
8178 		 */
8179 		cancel_adv_timeout(hdev);
8180 
8181 		next_instance = hci_get_next_instance(hdev, cp->instance);
8182 		if (next_instance)
8183 			schedule_instance = next_instance->instance;
8184 	} else if (!hdev->adv_instance_timeout) {
8185 		/* Immediately advertise the new instance if no other
8186 		 * instance is currently being advertised.
8187 		 */
8188 		schedule_instance = cp->instance;
8189 	}
8190 
8191 	/* If the HCI_ADVERTISING flag is set or the device isn't powered or
8192 	 * there is no instance to be advertised then we have no HCI
8193 	 * communication to make. Simply return.
8194 	 */
8195 	if (!hdev_is_powered(hdev) ||
8196 	    hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8197 	    !schedule_instance) {
8198 		rp.instance = cp->instance;
8199 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8200 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8201 		goto unlock;
8202 	}
8203 
8204 	/* We're good to go, update advertising data, parameters, and start
8205 	 * advertising.
8206 	 */
8207 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8208 			       data_len);
8209 	if (!cmd) {
8210 		err = -ENOMEM;
8211 		goto unlock;
8212 	}
8213 
8214 	cp->instance = schedule_instance;
8215 
8216 	err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8217 				 add_advertising_complete);
8218 	if (err < 0)
8219 		mgmt_pending_free(cmd);
8220 
8221 unlock:
8222 	hci_dev_unlock(hdev);
8223 
8224 	return err;
8225 }
8226 
8227 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8228 					int err)
8229 {
8230 	struct mgmt_pending_cmd *cmd = data;
8231 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8232 	struct mgmt_rp_add_ext_adv_params rp;
8233 	struct adv_info *adv;
8234 	u32 flags;
8235 
8236 	BT_DBG("%s", hdev->name);
8237 
8238 	hci_dev_lock(hdev);
8239 
8240 	adv = hci_find_adv_instance(hdev, cp->instance);
8241 	if (!adv)
8242 		goto unlock;
8243 
8244 	rp.instance = cp->instance;
8245 	rp.tx_power = adv->tx_power;
8246 
8247 	/* While we're at it, inform userspace of the available space for this
8248 	 * advertisement, given the flags that will be used.
8249 	 */
8250 	flags = __le32_to_cpu(cp->flags);
8251 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8252 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8253 
8254 	if (err) {
8255 		/* If this advertisement was previously advertising and we
8256 		 * failed to update it, we signal that it has been removed and
8257 		 * delete its structure
8258 		 */
8259 		if (!adv->pending)
8260 			mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8261 
8262 		hci_remove_adv_instance(hdev, cp->instance);
8263 
8264 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8265 				mgmt_status(err));
8266 	} else {
8267 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8268 				  mgmt_status(err), &rp, sizeof(rp));
8269 	}
8270 
8271 unlock:
8272 	if (cmd)
8273 		mgmt_pending_free(cmd);
8274 
8275 	hci_dev_unlock(hdev);
8276 }
8277 
8278 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8279 {
8280 	struct mgmt_pending_cmd *cmd = data;
8281 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8282 
8283 	return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8284 }
8285 
8286 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8287 			      void *data, u16 data_len)
8288 {
8289 	struct mgmt_cp_add_ext_adv_params *cp = data;
8290 	struct mgmt_rp_add_ext_adv_params rp;
8291 	struct mgmt_pending_cmd *cmd = NULL;
8292 	u32 flags, min_interval, max_interval;
8293 	u16 timeout, duration;
8294 	u8 status;
8295 	s8 tx_power;
8296 	int err;
8297 
8298 	BT_DBG("%s", hdev->name);
8299 
8300 	status = mgmt_le_support(hdev);
8301 	if (status)
8302 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8303 				       status);
8304 
8305 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8306 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8307 				       MGMT_STATUS_INVALID_PARAMS);
8308 
8309 	/* The purpose of breaking add_advertising into two separate MGMT calls
8310 	 * for params and data is to allow more parameters to be added to this
8311 	 * structure in the future. For this reason, we verify that we have the
8312 	 * bare minimum structure we know of when the interface was defined. Any
8313 	 * extra parameters we don't know about will be ignored in this request.
8314 	 */
8315 	if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8316 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8317 				       MGMT_STATUS_INVALID_PARAMS);
8318 
8319 	flags = __le32_to_cpu(cp->flags);
8320 
8321 	if (!requested_adv_flags_are_valid(hdev, flags))
8322 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8323 				       MGMT_STATUS_INVALID_PARAMS);
8324 
8325 	hci_dev_lock(hdev);
8326 
8327 	/* In new interface, we require that we are powered to register */
8328 	if (!hdev_is_powered(hdev)) {
8329 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8330 				      MGMT_STATUS_REJECTED);
8331 		goto unlock;
8332 	}
8333 
8334 	if (adv_busy(hdev)) {
8335 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8336 				      MGMT_STATUS_BUSY);
8337 		goto unlock;
8338 	}
8339 
8340 	/* Parse defined parameters from request, use defaults otherwise */
8341 	timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8342 		  __le16_to_cpu(cp->timeout) : 0;
8343 
8344 	duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8345 		   __le16_to_cpu(cp->duration) :
8346 		   hdev->def_multi_adv_rotation_duration;
8347 
8348 	min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8349 		       __le32_to_cpu(cp->min_interval) :
8350 		       hdev->le_adv_min_interval;
8351 
8352 	max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8353 		       __le32_to_cpu(cp->max_interval) :
8354 		       hdev->le_adv_max_interval;
8355 
8356 	tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8357 		   cp->tx_power :
8358 		   HCI_ADV_TX_POWER_NO_PREFERENCE;
8359 
8360 	/* Create advertising instance with no advertising or response data */
8361 	err = hci_add_adv_instance(hdev, cp->instance, flags,
8362 				   0, NULL, 0, NULL, timeout, duration,
8363 				   tx_power, min_interval, max_interval);
8364 
8365 	if (err < 0) {
8366 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8367 				      MGMT_STATUS_FAILED);
8368 		goto unlock;
8369 	}
8370 
8371 	/* Submit request for advertising params if ext adv available */
8372 	if (ext_adv_capable(hdev)) {
8373 		cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8374 				       data, data_len);
8375 		if (!cmd) {
8376 			err = -ENOMEM;
8377 			hci_remove_adv_instance(hdev, cp->instance);
8378 			goto unlock;
8379 		}
8380 
8381 		err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8382 					 add_ext_adv_params_complete);
8383 		if (err < 0)
8384 			mgmt_pending_free(cmd);
8385 	} else {
8386 		rp.instance = cp->instance;
8387 		rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8388 		rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8389 		rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8390 		err = mgmt_cmd_complete(sk, hdev->id,
8391 					MGMT_OP_ADD_EXT_ADV_PARAMS,
8392 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8393 	}
8394 
8395 unlock:
8396 	hci_dev_unlock(hdev);
8397 
8398 	return err;
8399 }
8400 
8401 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8402 {
8403 	struct mgmt_pending_cmd *cmd = data;
8404 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8405 	struct mgmt_rp_add_advertising rp;
8406 
8407 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
8408 
8409 	memset(&rp, 0, sizeof(rp));
8410 
8411 	rp.instance = cp->instance;
8412 
8413 	if (err)
8414 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8415 				mgmt_status(err));
8416 	else
8417 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8418 				  mgmt_status(err), &rp, sizeof(rp));
8419 
8420 	mgmt_pending_free(cmd);
8421 }
8422 
8423 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8424 {
8425 	struct mgmt_pending_cmd *cmd = data;
8426 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8427 	int err;
8428 
8429 	if (ext_adv_capable(hdev)) {
8430 		err = hci_update_adv_data_sync(hdev, cp->instance);
8431 		if (err)
8432 			return err;
8433 
8434 		err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8435 		if (err)
8436 			return err;
8437 
8438 		return hci_enable_ext_advertising_sync(hdev, cp->instance);
8439 	}
8440 
8441 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8442 }
8443 
8444 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8445 			    u16 data_len)
8446 {
8447 	struct mgmt_cp_add_ext_adv_data *cp = data;
8448 	struct mgmt_rp_add_ext_adv_data rp;
8449 	u8 schedule_instance = 0;
8450 	struct adv_info *next_instance;
8451 	struct adv_info *adv_instance;
8452 	int err = 0;
8453 	struct mgmt_pending_cmd *cmd;
8454 
8455 	BT_DBG("%s", hdev->name);
8456 
8457 	hci_dev_lock(hdev);
8458 
8459 	adv_instance = hci_find_adv_instance(hdev, cp->instance);
8460 
8461 	if (!adv_instance) {
8462 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8463 				      MGMT_STATUS_INVALID_PARAMS);
8464 		goto unlock;
8465 	}
8466 
8467 	/* In new interface, we require that we are powered to register */
8468 	if (!hdev_is_powered(hdev)) {
8469 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8470 				      MGMT_STATUS_REJECTED);
8471 		goto clear_new_instance;
8472 	}
8473 
8474 	if (adv_busy(hdev)) {
8475 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8476 				      MGMT_STATUS_BUSY);
8477 		goto clear_new_instance;
8478 	}
8479 
8480 	/* Validate new data */
8481 	if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
8482 			       cp->adv_data_len, true) ||
8483 	    !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
8484 			       cp->adv_data_len, cp->scan_rsp_len, false)) {
8485 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8486 				      MGMT_STATUS_INVALID_PARAMS);
8487 		goto clear_new_instance;
8488 	}
8489 
8490 	/* Set the data in the advertising instance */
8491 	hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
8492 				  cp->data, cp->scan_rsp_len,
8493 				  cp->data + cp->adv_data_len);
8494 
8495 	/* If using software rotation, determine next instance to use */
8496 	if (hdev->cur_adv_instance == cp->instance) {
8497 		/* If the currently advertised instance is being changed
8498 		 * then cancel the current advertising and schedule the
8499 		 * next instance. If there is only one instance then the
8500 		 * overridden advertising data will be visible right
8501 		 * away
8502 		 */
8503 		cancel_adv_timeout(hdev);
8504 
8505 		next_instance = hci_get_next_instance(hdev, cp->instance);
8506 		if (next_instance)
8507 			schedule_instance = next_instance->instance;
8508 	} else if (!hdev->adv_instance_timeout) {
8509 		/* Immediately advertise the new instance if no other
8510 		 * instance is currently being advertised.
8511 		 */
8512 		schedule_instance = cp->instance;
8513 	}
8514 
8515 	/* If the HCI_ADVERTISING flag is set or there is no instance to
8516 	 * be advertised then we have no HCI communication to make.
8517 	 * Simply return.
8518 	 */
8519 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
8520 		if (adv_instance->pending) {
8521 			mgmt_advertising_added(sk, hdev, cp->instance);
8522 			adv_instance->pending = false;
8523 		}
8524 		rp.instance = cp->instance;
8525 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8526 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8527 		goto unlock;
8528 	}
8529 
8530 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
8531 			       data_len);
8532 	if (!cmd) {
8533 		err = -ENOMEM;
8534 		goto clear_new_instance;
8535 	}
8536 
8537 	err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
8538 				 add_ext_adv_data_complete);
8539 	if (err < 0) {
8540 		mgmt_pending_free(cmd);
8541 		goto clear_new_instance;
8542 	}
8543 
8544 	/* We were successful in updating data, so trigger advertising_added
8545 	 * event if this is an instance that wasn't previously advertising. If
8546 	 * a failure occurs in the requests we initiated, we will remove the
8547 	 * instance again in add_advertising_complete
8548 	 */
8549 	if (adv_instance->pending)
8550 		mgmt_advertising_added(sk, hdev, cp->instance);
8551 
8552 	goto unlock;
8553 
8554 clear_new_instance:
8555 	hci_remove_adv_instance(hdev, cp->instance);
8556 
8557 unlock:
8558 	hci_dev_unlock(hdev);
8559 
8560 	return err;
8561 }
8562 
8563 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
8564 					int err)
8565 {
8566 	struct mgmt_pending_cmd *cmd = data;
8567 	struct mgmt_cp_remove_advertising *cp = cmd->param;
8568 	struct mgmt_rp_remove_advertising rp;
8569 
8570 	bt_dev_dbg(hdev, "err %d", err);
8571 
8572 	memset(&rp, 0, sizeof(rp));
8573 	rp.instance = cp->instance;
8574 
8575 	if (err)
8576 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8577 				mgmt_status(err));
8578 	else
8579 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8580 				  MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8581 
8582 	mgmt_pending_free(cmd);
8583 }
8584 
8585 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
8586 {
8587 	struct mgmt_pending_cmd *cmd = data;
8588 	struct mgmt_cp_remove_advertising *cp = cmd->param;
8589 	int err;
8590 
8591 	err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
8592 	if (err)
8593 		return err;
8594 
8595 	if (list_empty(&hdev->adv_instances))
8596 		err = hci_disable_advertising_sync(hdev);
8597 
8598 	return err;
8599 }
8600 
8601 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
8602 			      void *data, u16 data_len)
8603 {
8604 	struct mgmt_cp_remove_advertising *cp = data;
8605 	struct mgmt_pending_cmd *cmd;
8606 	int err;
8607 
8608 	bt_dev_dbg(hdev, "sock %p", sk);
8609 
8610 	hci_dev_lock(hdev);
8611 
8612 	if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
8613 		err = mgmt_cmd_status(sk, hdev->id,
8614 				      MGMT_OP_REMOVE_ADVERTISING,
8615 				      MGMT_STATUS_INVALID_PARAMS);
8616 		goto unlock;
8617 	}
8618 
8619 	if (pending_find(MGMT_OP_SET_LE, hdev)) {
8620 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8621 				      MGMT_STATUS_BUSY);
8622 		goto unlock;
8623 	}
8624 
8625 	if (list_empty(&hdev->adv_instances)) {
8626 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8627 				      MGMT_STATUS_INVALID_PARAMS);
8628 		goto unlock;
8629 	}
8630 
8631 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
8632 			       data_len);
8633 	if (!cmd) {
8634 		err = -ENOMEM;
8635 		goto unlock;
8636 	}
8637 
8638 	err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
8639 				 remove_advertising_complete);
8640 	if (err < 0)
8641 		mgmt_pending_free(cmd);
8642 
8643 unlock:
8644 	hci_dev_unlock(hdev);
8645 
8646 	return err;
8647 }
8648 
8649 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
8650 			     void *data, u16 data_len)
8651 {
8652 	struct mgmt_cp_get_adv_size_info *cp = data;
8653 	struct mgmt_rp_get_adv_size_info rp;
8654 	u32 flags, supported_flags;
8655 
8656 	bt_dev_dbg(hdev, "sock %p", sk);
8657 
8658 	if (!lmp_le_capable(hdev))
8659 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8660 				       MGMT_STATUS_REJECTED);
8661 
8662 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8663 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8664 				       MGMT_STATUS_INVALID_PARAMS);
8665 
8666 	flags = __le32_to_cpu(cp->flags);
8667 
8668 	/* The current implementation only supports a subset of the specified
8669 	 * flags.
8670 	 */
8671 	supported_flags = get_supported_adv_flags(hdev);
8672 	if (flags & ~supported_flags)
8673 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8674 				       MGMT_STATUS_INVALID_PARAMS);
8675 
8676 	rp.instance = cp->instance;
8677 	rp.flags = cp->flags;
8678 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8679 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8680 
8681 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8682 				 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8683 }
8684 
8685 static const struct hci_mgmt_handler mgmt_handlers[] = {
8686 	{ NULL }, /* 0x0000 (no command) */
8687 	{ read_version,            MGMT_READ_VERSION_SIZE,
8688 						HCI_MGMT_NO_HDEV |
8689 						HCI_MGMT_UNTRUSTED },
8690 	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
8691 						HCI_MGMT_NO_HDEV |
8692 						HCI_MGMT_UNTRUSTED },
8693 	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
8694 						HCI_MGMT_NO_HDEV |
8695 						HCI_MGMT_UNTRUSTED },
8696 	{ read_controller_info,    MGMT_READ_INFO_SIZE,
8697 						HCI_MGMT_UNTRUSTED },
8698 	{ set_powered,             MGMT_SETTING_SIZE },
8699 	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
8700 	{ set_connectable,         MGMT_SETTING_SIZE },
8701 	{ set_fast_connectable,    MGMT_SETTING_SIZE },
8702 	{ set_bondable,            MGMT_SETTING_SIZE },
8703 	{ set_link_security,       MGMT_SETTING_SIZE },
8704 	{ set_ssp,                 MGMT_SETTING_SIZE },
8705 	{ set_hs,                  MGMT_SETTING_SIZE },
8706 	{ set_le,                  MGMT_SETTING_SIZE },
8707 	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
8708 	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
8709 	{ add_uuid,                MGMT_ADD_UUID_SIZE },
8710 	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
8711 	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
8712 						HCI_MGMT_VAR_LEN },
8713 	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
8714 						HCI_MGMT_VAR_LEN },
8715 	{ disconnect,              MGMT_DISCONNECT_SIZE },
8716 	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
8717 	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
8718 	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
8719 	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
8720 	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
8721 	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
8722 	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
8723 	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
8724 	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
8725 	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
8726 	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
8727 	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
8728 	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
8729 						HCI_MGMT_VAR_LEN },
8730 	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
8731 	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
8732 	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
8733 	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
8734 	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
8735 	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
8736 	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
8737 	{ set_advertising,         MGMT_SETTING_SIZE },
8738 	{ set_bredr,               MGMT_SETTING_SIZE },
8739 	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
8740 	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
8741 	{ set_secure_conn,         MGMT_SETTING_SIZE },
8742 	{ set_debug_keys,          MGMT_SETTING_SIZE },
8743 	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
8744 	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
8745 						HCI_MGMT_VAR_LEN },
8746 	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
8747 	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
8748 	{ add_device,              MGMT_ADD_DEVICE_SIZE },
8749 	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
8750 	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
8751 						HCI_MGMT_VAR_LEN },
8752 	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
8753 						HCI_MGMT_NO_HDEV |
8754 						HCI_MGMT_UNTRUSTED },
8755 	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
8756 						HCI_MGMT_UNCONFIGURED |
8757 						HCI_MGMT_UNTRUSTED },
8758 	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
8759 						HCI_MGMT_UNCONFIGURED },
8760 	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
8761 						HCI_MGMT_UNCONFIGURED },
8762 	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
8763 						HCI_MGMT_VAR_LEN },
8764 	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
8765 	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
8766 						HCI_MGMT_NO_HDEV |
8767 						HCI_MGMT_UNTRUSTED },
8768 	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
8769 	{ add_advertising,	   MGMT_ADD_ADVERTISING_SIZE,
8770 						HCI_MGMT_VAR_LEN },
8771 	{ remove_advertising,	   MGMT_REMOVE_ADVERTISING_SIZE },
8772 	{ get_adv_size_info,       MGMT_GET_ADV_SIZE_INFO_SIZE },
8773 	{ start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
8774 	{ read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
8775 						HCI_MGMT_UNTRUSTED },
8776 	{ set_appearance,	   MGMT_SET_APPEARANCE_SIZE },
8777 	{ get_phy_configuration,   MGMT_GET_PHY_CONFIGURATION_SIZE },
8778 	{ set_phy_configuration,   MGMT_SET_PHY_CONFIGURATION_SIZE },
8779 	{ set_blocked_keys,	   MGMT_OP_SET_BLOCKED_KEYS_SIZE,
8780 						HCI_MGMT_VAR_LEN },
8781 	{ set_wideband_speech,	   MGMT_SETTING_SIZE },
8782 	{ read_controller_cap,     MGMT_READ_CONTROLLER_CAP_SIZE,
8783 						HCI_MGMT_UNTRUSTED },
8784 	{ read_exp_features_info,  MGMT_READ_EXP_FEATURES_INFO_SIZE,
8785 						HCI_MGMT_UNTRUSTED |
8786 						HCI_MGMT_HDEV_OPTIONAL },
8787 	{ set_exp_feature,         MGMT_SET_EXP_FEATURE_SIZE,
8788 						HCI_MGMT_VAR_LEN |
8789 						HCI_MGMT_HDEV_OPTIONAL },
8790 	{ read_def_system_config,  MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
8791 						HCI_MGMT_UNTRUSTED },
8792 	{ set_def_system_config,   MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
8793 						HCI_MGMT_VAR_LEN },
8794 	{ read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
8795 						HCI_MGMT_UNTRUSTED },
8796 	{ set_def_runtime_config,  MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
8797 						HCI_MGMT_VAR_LEN },
8798 	{ get_device_flags,        MGMT_GET_DEVICE_FLAGS_SIZE },
8799 	{ set_device_flags,        MGMT_SET_DEVICE_FLAGS_SIZE },
8800 	{ read_adv_mon_features,   MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
8801 	{ add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
8802 						HCI_MGMT_VAR_LEN },
8803 	{ remove_adv_monitor,      MGMT_REMOVE_ADV_MONITOR_SIZE },
8804 	{ add_ext_adv_params,      MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
8805 						HCI_MGMT_VAR_LEN },
8806 	{ add_ext_adv_data,        MGMT_ADD_EXT_ADV_DATA_SIZE,
8807 						HCI_MGMT_VAR_LEN },
8808 	{ add_adv_patterns_monitor_rssi,
8809 				   MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
8810 						HCI_MGMT_VAR_LEN },
8811 };
8812 
8813 void mgmt_index_added(struct hci_dev *hdev)
8814 {
8815 	struct mgmt_ev_ext_index ev;
8816 
8817 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8818 		return;
8819 
8820 	switch (hdev->dev_type) {
8821 	case HCI_PRIMARY:
8822 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8823 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
8824 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8825 			ev.type = 0x01;
8826 		} else {
8827 			mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
8828 					 HCI_MGMT_INDEX_EVENTS);
8829 			ev.type = 0x00;
8830 		}
8831 		break;
8832 	case HCI_AMP:
8833 		ev.type = 0x02;
8834 		break;
8835 	default:
8836 		return;
8837 	}
8838 
8839 	ev.bus = hdev->bus;
8840 
8841 	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
8842 			 HCI_MGMT_EXT_INDEX_EVENTS);
8843 }
8844 
8845 void mgmt_index_removed(struct hci_dev *hdev)
8846 {
8847 	struct mgmt_ev_ext_index ev;
8848 	u8 status = MGMT_STATUS_INVALID_INDEX;
8849 
8850 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8851 		return;
8852 
8853 	switch (hdev->dev_type) {
8854 	case HCI_PRIMARY:
8855 		mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8856 
8857 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8858 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
8859 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8860 			ev.type = 0x01;
8861 		} else {
8862 			mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
8863 					 HCI_MGMT_INDEX_EVENTS);
8864 			ev.type = 0x00;
8865 		}
8866 		break;
8867 	case HCI_AMP:
8868 		ev.type = 0x02;
8869 		break;
8870 	default:
8871 		return;
8872 	}
8873 
8874 	ev.bus = hdev->bus;
8875 
8876 	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
8877 			 HCI_MGMT_EXT_INDEX_EVENTS);
8878 }
8879 
8880 void mgmt_power_on(struct hci_dev *hdev, int err)
8881 {
8882 	struct cmd_lookup match = { NULL, hdev };
8883 
8884 	bt_dev_dbg(hdev, "err %d", err);
8885 
8886 	hci_dev_lock(hdev);
8887 
8888 	if (!err) {
8889 		restart_le_actions(hdev);
8890 		hci_update_passive_scan(hdev);
8891 	}
8892 
8893 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8894 
8895 	new_settings(hdev, match.sk);
8896 
8897 	if (match.sk)
8898 		sock_put(match.sk);
8899 
8900 	hci_dev_unlock(hdev);
8901 }
8902 
8903 void __mgmt_power_off(struct hci_dev *hdev)
8904 {
8905 	struct cmd_lookup match = { NULL, hdev };
8906 	u8 status, zero_cod[] = { 0, 0, 0 };
8907 
8908 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8909 
8910 	/* If the power off is because of hdev unregistration let
8911 	 * use the appropriate INVALID_INDEX status. Otherwise use
8912 	 * NOT_POWERED. We cover both scenarios here since later in
8913 	 * mgmt_index_removed() any hci_conn callbacks will have already
8914 	 * been triggered, potentially causing misleading DISCONNECTED
8915 	 * status responses.
8916 	 */
8917 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
8918 		status = MGMT_STATUS_INVALID_INDEX;
8919 	else
8920 		status = MGMT_STATUS_NOT_POWERED;
8921 
8922 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8923 
8924 	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
8925 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
8926 				   zero_cod, sizeof(zero_cod),
8927 				   HCI_MGMT_DEV_CLASS_EVENTS, NULL);
8928 		ext_info_changed(hdev, NULL);
8929 	}
8930 
8931 	new_settings(hdev, match.sk);
8932 
8933 	if (match.sk)
8934 		sock_put(match.sk);
8935 }
8936 
8937 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
8938 {
8939 	struct mgmt_pending_cmd *cmd;
8940 	u8 status;
8941 
8942 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8943 	if (!cmd)
8944 		return;
8945 
8946 	if (err == -ERFKILL)
8947 		status = MGMT_STATUS_RFKILLED;
8948 	else
8949 		status = MGMT_STATUS_FAILED;
8950 
8951 	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
8952 
8953 	mgmt_pending_remove(cmd);
8954 }
8955 
8956 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
8957 		       bool persistent)
8958 {
8959 	struct mgmt_ev_new_link_key ev;
8960 
8961 	memset(&ev, 0, sizeof(ev));
8962 
8963 	ev.store_hint = persistent;
8964 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8965 	ev.key.addr.type = BDADDR_BREDR;
8966 	ev.key.type = key->type;
8967 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
8968 	ev.key.pin_len = key->pin_len;
8969 
8970 	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
8971 }
8972 
8973 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
8974 {
8975 	switch (ltk->type) {
8976 	case SMP_LTK:
8977 	case SMP_LTK_RESPONDER:
8978 		if (ltk->authenticated)
8979 			return MGMT_LTK_AUTHENTICATED;
8980 		return MGMT_LTK_UNAUTHENTICATED;
8981 	case SMP_LTK_P256:
8982 		if (ltk->authenticated)
8983 			return MGMT_LTK_P256_AUTH;
8984 		return MGMT_LTK_P256_UNAUTH;
8985 	case SMP_LTK_P256_DEBUG:
8986 		return MGMT_LTK_P256_DEBUG;
8987 	}
8988 
8989 	return MGMT_LTK_UNAUTHENTICATED;
8990 }
8991 
8992 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
8993 {
8994 	struct mgmt_ev_new_long_term_key ev;
8995 
8996 	memset(&ev, 0, sizeof(ev));
8997 
8998 	/* Devices using resolvable or non-resolvable random addresses
8999 	 * without providing an identity resolving key don't require
9000 	 * to store long term keys. Their addresses will change the
9001 	 * next time around.
9002 	 *
9003 	 * Only when a remote device provides an identity address
9004 	 * make sure the long term key is stored. If the remote
9005 	 * identity is known, the long term keys are internally
9006 	 * mapped to the identity address. So allow static random
9007 	 * and public addresses here.
9008 	 */
9009 	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9010 	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
9011 		ev.store_hint = 0x00;
9012 	else
9013 		ev.store_hint = persistent;
9014 
9015 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9016 	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
9017 	ev.key.type = mgmt_ltk_type(key);
9018 	ev.key.enc_size = key->enc_size;
9019 	ev.key.ediv = key->ediv;
9020 	ev.key.rand = key->rand;
9021 
9022 	if (key->type == SMP_LTK)
9023 		ev.key.initiator = 1;
9024 
9025 	/* Make sure we copy only the significant bytes based on the
9026 	 * encryption key size, and set the rest of the value to zeroes.
9027 	 */
9028 	memcpy(ev.key.val, key->val, key->enc_size);
9029 	memset(ev.key.val + key->enc_size, 0,
9030 	       sizeof(ev.key.val) - key->enc_size);
9031 
9032 	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9033 }
9034 
9035 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9036 {
9037 	struct mgmt_ev_new_irk ev;
9038 
9039 	memset(&ev, 0, sizeof(ev));
9040 
9041 	ev.store_hint = persistent;
9042 
9043 	bacpy(&ev.rpa, &irk->rpa);
9044 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9045 	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9046 	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9047 
9048 	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9049 }
9050 
9051 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9052 		   bool persistent)
9053 {
9054 	struct mgmt_ev_new_csrk ev;
9055 
9056 	memset(&ev, 0, sizeof(ev));
9057 
9058 	/* Devices using resolvable or non-resolvable random addresses
9059 	 * without providing an identity resolving key don't require
9060 	 * to store signature resolving keys. Their addresses will change
9061 	 * the next time around.
9062 	 *
9063 	 * Only when a remote device provides an identity address
9064 	 * make sure the signature resolving key is stored. So allow
9065 	 * static random and public addresses here.
9066 	 */
9067 	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9068 	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9069 		ev.store_hint = 0x00;
9070 	else
9071 		ev.store_hint = persistent;
9072 
9073 	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9074 	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9075 	ev.key.type = csrk->type;
9076 	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9077 
9078 	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9079 }
9080 
9081 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9082 			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9083 			 u16 max_interval, u16 latency, u16 timeout)
9084 {
9085 	struct mgmt_ev_new_conn_param ev;
9086 
9087 	if (!hci_is_identity_address(bdaddr, bdaddr_type))
9088 		return;
9089 
9090 	memset(&ev, 0, sizeof(ev));
9091 	bacpy(&ev.addr.bdaddr, bdaddr);
9092 	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9093 	ev.store_hint = store_hint;
9094 	ev.min_interval = cpu_to_le16(min_interval);
9095 	ev.max_interval = cpu_to_le16(max_interval);
9096 	ev.latency = cpu_to_le16(latency);
9097 	ev.timeout = cpu_to_le16(timeout);
9098 
9099 	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9100 }
9101 
9102 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9103 			   u8 *name, u8 name_len)
9104 {
9105 	struct sk_buff *skb;
9106 	struct mgmt_ev_device_connected *ev;
9107 	u16 eir_len = 0;
9108 	u32 flags = 0;
9109 
9110 	/* allocate buff for LE or BR/EDR adv */
9111 	if (conn->le_adv_data_len > 0)
9112 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9113 				     sizeof(*ev) + conn->le_adv_data_len);
9114 	else
9115 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9116 				     sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9117 				     eir_precalc_len(sizeof(conn->dev_class)));
9118 
9119 	ev = skb_put(skb, sizeof(*ev));
9120 	bacpy(&ev->addr.bdaddr, &conn->dst);
9121 	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9122 
9123 	if (conn->out)
9124 		flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9125 
9126 	ev->flags = __cpu_to_le32(flags);
9127 
9128 	/* We must ensure that the EIR Data fields are ordered and
9129 	 * unique. Keep it simple for now and avoid the problem by not
9130 	 * adding any BR/EDR data to the LE adv.
9131 	 */
9132 	if (conn->le_adv_data_len > 0) {
9133 		skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9134 		eir_len = conn->le_adv_data_len;
9135 	} else {
9136 		if (name)
9137 			eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9138 
9139 		if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9140 			eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9141 						    conn->dev_class, sizeof(conn->dev_class));
9142 	}
9143 
9144 	ev->eir_len = cpu_to_le16(eir_len);
9145 
9146 	mgmt_event_skb(skb, NULL);
9147 }
9148 
9149 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
9150 {
9151 	struct sock **sk = data;
9152 
9153 	cmd->cmd_complete(cmd, 0);
9154 
9155 	*sk = cmd->sk;
9156 	sock_hold(*sk);
9157 
9158 	mgmt_pending_remove(cmd);
9159 }
9160 
9161 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9162 {
9163 	struct hci_dev *hdev = data;
9164 	struct mgmt_cp_unpair_device *cp = cmd->param;
9165 
9166 	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9167 
9168 	cmd->cmd_complete(cmd, 0);
9169 	mgmt_pending_remove(cmd);
9170 }
9171 
9172 bool mgmt_powering_down(struct hci_dev *hdev)
9173 {
9174 	struct mgmt_pending_cmd *cmd;
9175 	struct mgmt_mode *cp;
9176 
9177 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9178 	if (!cmd)
9179 		return false;
9180 
9181 	cp = cmd->param;
9182 	if (!cp->val)
9183 		return true;
9184 
9185 	return false;
9186 }
9187 
9188 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9189 			      u8 link_type, u8 addr_type, u8 reason,
9190 			      bool mgmt_connected)
9191 {
9192 	struct mgmt_ev_device_disconnected ev;
9193 	struct sock *sk = NULL;
9194 
9195 	/* The connection is still in hci_conn_hash so test for 1
9196 	 * instead of 0 to know if this is the last one.
9197 	 */
9198 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9199 		cancel_delayed_work(&hdev->power_off);
9200 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
9201 	}
9202 
9203 	if (!mgmt_connected)
9204 		return;
9205 
9206 	if (link_type != ACL_LINK && link_type != LE_LINK)
9207 		return;
9208 
9209 	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
9210 
9211 	bacpy(&ev.addr.bdaddr, bdaddr);
9212 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9213 	ev.reason = reason;
9214 
9215 	/* Report disconnects due to suspend */
9216 	if (hdev->suspended)
9217 		ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9218 
9219 	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9220 
9221 	if (sk)
9222 		sock_put(sk);
9223 
9224 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9225 			     hdev);
9226 }
9227 
9228 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9229 			    u8 link_type, u8 addr_type, u8 status)
9230 {
9231 	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9232 	struct mgmt_cp_disconnect *cp;
9233 	struct mgmt_pending_cmd *cmd;
9234 
9235 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9236 			     hdev);
9237 
9238 	cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9239 	if (!cmd)
9240 		return;
9241 
9242 	cp = cmd->param;
9243 
9244 	if (bacmp(bdaddr, &cp->addr.bdaddr))
9245 		return;
9246 
9247 	if (cp->addr.type != bdaddr_type)
9248 		return;
9249 
9250 	cmd->cmd_complete(cmd, mgmt_status(status));
9251 	mgmt_pending_remove(cmd);
9252 }
9253 
9254 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9255 			 u8 addr_type, u8 status)
9256 {
9257 	struct mgmt_ev_connect_failed ev;
9258 
9259 	/* The connection is still in hci_conn_hash so test for 1
9260 	 * instead of 0 to know if this is the last one.
9261 	 */
9262 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9263 		cancel_delayed_work(&hdev->power_off);
9264 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
9265 	}
9266 
9267 	bacpy(&ev.addr.bdaddr, bdaddr);
9268 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9269 	ev.status = mgmt_status(status);
9270 
9271 	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9272 }
9273 
9274 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9275 {
9276 	struct mgmt_ev_pin_code_request ev;
9277 
9278 	bacpy(&ev.addr.bdaddr, bdaddr);
9279 	ev.addr.type = BDADDR_BREDR;
9280 	ev.secure = secure;
9281 
9282 	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9283 }
9284 
9285 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9286 				  u8 status)
9287 {
9288 	struct mgmt_pending_cmd *cmd;
9289 
9290 	cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9291 	if (!cmd)
9292 		return;
9293 
9294 	cmd->cmd_complete(cmd, mgmt_status(status));
9295 	mgmt_pending_remove(cmd);
9296 }
9297 
9298 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9299 				      u8 status)
9300 {
9301 	struct mgmt_pending_cmd *cmd;
9302 
9303 	cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9304 	if (!cmd)
9305 		return;
9306 
9307 	cmd->cmd_complete(cmd, mgmt_status(status));
9308 	mgmt_pending_remove(cmd);
9309 }
9310 
9311 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9312 			      u8 link_type, u8 addr_type, u32 value,
9313 			      u8 confirm_hint)
9314 {
9315 	struct mgmt_ev_user_confirm_request ev;
9316 
9317 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9318 
9319 	bacpy(&ev.addr.bdaddr, bdaddr);
9320 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9321 	ev.confirm_hint = confirm_hint;
9322 	ev.value = cpu_to_le32(value);
9323 
9324 	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9325 			  NULL);
9326 }
9327 
9328 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9329 			      u8 link_type, u8 addr_type)
9330 {
9331 	struct mgmt_ev_user_passkey_request ev;
9332 
9333 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9334 
9335 	bacpy(&ev.addr.bdaddr, bdaddr);
9336 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9337 
9338 	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9339 			  NULL);
9340 }
9341 
9342 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9343 				      u8 link_type, u8 addr_type, u8 status,
9344 				      u8 opcode)
9345 {
9346 	struct mgmt_pending_cmd *cmd;
9347 
9348 	cmd = pending_find(opcode, hdev);
9349 	if (!cmd)
9350 		return -ENOENT;
9351 
9352 	cmd->cmd_complete(cmd, mgmt_status(status));
9353 	mgmt_pending_remove(cmd);
9354 
9355 	return 0;
9356 }
9357 
9358 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9359 				     u8 link_type, u8 addr_type, u8 status)
9360 {
9361 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9362 					  status, MGMT_OP_USER_CONFIRM_REPLY);
9363 }
9364 
9365 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9366 					 u8 link_type, u8 addr_type, u8 status)
9367 {
9368 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9369 					  status,
9370 					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
9371 }
9372 
9373 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9374 				     u8 link_type, u8 addr_type, u8 status)
9375 {
9376 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9377 					  status, MGMT_OP_USER_PASSKEY_REPLY);
9378 }
9379 
9380 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9381 					 u8 link_type, u8 addr_type, u8 status)
9382 {
9383 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9384 					  status,
9385 					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
9386 }
9387 
9388 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9389 			     u8 link_type, u8 addr_type, u32 passkey,
9390 			     u8 entered)
9391 {
9392 	struct mgmt_ev_passkey_notify ev;
9393 
9394 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9395 
9396 	bacpy(&ev.addr.bdaddr, bdaddr);
9397 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9398 	ev.passkey = __cpu_to_le32(passkey);
9399 	ev.entered = entered;
9400 
9401 	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9402 }
9403 
9404 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9405 {
9406 	struct mgmt_ev_auth_failed ev;
9407 	struct mgmt_pending_cmd *cmd;
9408 	u8 status = mgmt_status(hci_status);
9409 
9410 	bacpy(&ev.addr.bdaddr, &conn->dst);
9411 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9412 	ev.status = status;
9413 
9414 	cmd = find_pairing(conn);
9415 
9416 	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9417 		    cmd ? cmd->sk : NULL);
9418 
9419 	if (cmd) {
9420 		cmd->cmd_complete(cmd, status);
9421 		mgmt_pending_remove(cmd);
9422 	}
9423 }
9424 
9425 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9426 {
9427 	struct cmd_lookup match = { NULL, hdev };
9428 	bool changed;
9429 
9430 	if (status) {
9431 		u8 mgmt_err = mgmt_status(status);
9432 		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9433 				     cmd_status_rsp, &mgmt_err);
9434 		return;
9435 	}
9436 
9437 	if (test_bit(HCI_AUTH, &hdev->flags))
9438 		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9439 	else
9440 		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9441 
9442 	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9443 			     &match);
9444 
9445 	if (changed)
9446 		new_settings(hdev, match.sk);
9447 
9448 	if (match.sk)
9449 		sock_put(match.sk);
9450 }
9451 
9452 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9453 {
9454 	struct cmd_lookup *match = data;
9455 
9456 	if (match->sk == NULL) {
9457 		match->sk = cmd->sk;
9458 		sock_hold(match->sk);
9459 	}
9460 }
9461 
9462 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9463 				    u8 status)
9464 {
9465 	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9466 
9467 	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
9468 	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
9469 	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
9470 
9471 	if (!status) {
9472 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9473 				   3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9474 		ext_info_changed(hdev, NULL);
9475 	}
9476 
9477 	if (match.sk)
9478 		sock_put(match.sk);
9479 }
9480 
9481 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
9482 {
9483 	struct mgmt_cp_set_local_name ev;
9484 	struct mgmt_pending_cmd *cmd;
9485 
9486 	if (status)
9487 		return;
9488 
9489 	memset(&ev, 0, sizeof(ev));
9490 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
9491 	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
9492 
9493 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
9494 	if (!cmd) {
9495 		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
9496 
9497 		/* If this is a HCI command related to powering on the
9498 		 * HCI dev don't send any mgmt signals.
9499 		 */
9500 		if (pending_find(MGMT_OP_SET_POWERED, hdev))
9501 			return;
9502 	}
9503 
9504 	mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
9505 			   HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
9506 	ext_info_changed(hdev, cmd ? cmd->sk : NULL);
9507 }
9508 
9509 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
9510 {
9511 	int i;
9512 
9513 	for (i = 0; i < uuid_count; i++) {
9514 		if (!memcmp(uuid, uuids[i], 16))
9515 			return true;
9516 	}
9517 
9518 	return false;
9519 }
9520 
9521 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
9522 {
9523 	u16 parsed = 0;
9524 
9525 	while (parsed < eir_len) {
9526 		u8 field_len = eir[0];
9527 		u8 uuid[16];
9528 		int i;
9529 
9530 		if (field_len == 0)
9531 			break;
9532 
9533 		if (eir_len - parsed < field_len + 1)
9534 			break;
9535 
9536 		switch (eir[1]) {
9537 		case EIR_UUID16_ALL:
9538 		case EIR_UUID16_SOME:
9539 			for (i = 0; i + 3 <= field_len; i += 2) {
9540 				memcpy(uuid, bluetooth_base_uuid, 16);
9541 				uuid[13] = eir[i + 3];
9542 				uuid[12] = eir[i + 2];
9543 				if (has_uuid(uuid, uuid_count, uuids))
9544 					return true;
9545 			}
9546 			break;
9547 		case EIR_UUID32_ALL:
9548 		case EIR_UUID32_SOME:
9549 			for (i = 0; i + 5 <= field_len; i += 4) {
9550 				memcpy(uuid, bluetooth_base_uuid, 16);
9551 				uuid[15] = eir[i + 5];
9552 				uuid[14] = eir[i + 4];
9553 				uuid[13] = eir[i + 3];
9554 				uuid[12] = eir[i + 2];
9555 				if (has_uuid(uuid, uuid_count, uuids))
9556 					return true;
9557 			}
9558 			break;
9559 		case EIR_UUID128_ALL:
9560 		case EIR_UUID128_SOME:
9561 			for (i = 0; i + 17 <= field_len; i += 16) {
9562 				memcpy(uuid, eir + i + 2, 16);
9563 				if (has_uuid(uuid, uuid_count, uuids))
9564 					return true;
9565 			}
9566 			break;
9567 		}
9568 
9569 		parsed += field_len + 1;
9570 		eir += field_len + 1;
9571 	}
9572 
9573 	return false;
9574 }
9575 
9576 static void restart_le_scan(struct hci_dev *hdev)
9577 {
9578 	/* If controller is not scanning we are done. */
9579 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
9580 		return;
9581 
9582 	if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
9583 		       hdev->discovery.scan_start +
9584 		       hdev->discovery.scan_duration))
9585 		return;
9586 
9587 	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
9588 			   DISCOV_LE_RESTART_DELAY);
9589 }
9590 
9591 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
9592 			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9593 {
9594 	/* If a RSSI threshold has been specified, and
9595 	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
9596 	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
9597 	 * is set, let it through for further processing, as we might need to
9598 	 * restart the scan.
9599 	 *
9600 	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
9601 	 * the results are also dropped.
9602 	 */
9603 	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9604 	    (rssi == HCI_RSSI_INVALID ||
9605 	    (rssi < hdev->discovery.rssi &&
9606 	     !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
9607 		return  false;
9608 
9609 	if (hdev->discovery.uuid_count != 0) {
9610 		/* If a list of UUIDs is provided in filter, results with no
9611 		 * matching UUID should be dropped.
9612 		 */
9613 		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
9614 				   hdev->discovery.uuids) &&
9615 		    !eir_has_uuids(scan_rsp, scan_rsp_len,
9616 				   hdev->discovery.uuid_count,
9617 				   hdev->discovery.uuids))
9618 			return false;
9619 	}
9620 
9621 	/* If duplicate filtering does not report RSSI changes, then restart
9622 	 * scanning to ensure updated result with updated RSSI values.
9623 	 */
9624 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
9625 		restart_le_scan(hdev);
9626 
9627 		/* Validate RSSI value against the RSSI threshold once more. */
9628 		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9629 		    rssi < hdev->discovery.rssi)
9630 			return false;
9631 	}
9632 
9633 	return true;
9634 }
9635 
9636 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
9637 				  bdaddr_t *bdaddr, u8 addr_type)
9638 {
9639 	struct mgmt_ev_adv_monitor_device_lost ev;
9640 
9641 	ev.monitor_handle = cpu_to_le16(handle);
9642 	bacpy(&ev.addr.bdaddr, bdaddr);
9643 	ev.addr.type = addr_type;
9644 
9645 	mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
9646 		   NULL);
9647 }
9648 
9649 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
9650 					       struct sk_buff *skb,
9651 					       struct sock *skip_sk,
9652 					       u16 handle)
9653 {
9654 	struct sk_buff *advmon_skb;
9655 	size_t advmon_skb_len;
9656 	__le16 *monitor_handle;
9657 
9658 	if (!skb)
9659 		return;
9660 
9661 	advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
9662 			  sizeof(struct mgmt_ev_device_found)) + skb->len;
9663 	advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
9664 				    advmon_skb_len);
9665 	if (!advmon_skb)
9666 		return;
9667 
9668 	/* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
9669 	 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
9670 	 * store monitor_handle of the matched monitor.
9671 	 */
9672 	monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
9673 	*monitor_handle = cpu_to_le16(handle);
9674 	skb_put_data(advmon_skb, skb->data, skb->len);
9675 
9676 	mgmt_event_skb(advmon_skb, skip_sk);
9677 }
9678 
9679 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
9680 					  bdaddr_t *bdaddr, bool report_device,
9681 					  struct sk_buff *skb,
9682 					  struct sock *skip_sk)
9683 {
9684 	struct monitored_device *dev, *tmp;
9685 	bool matched = false;
9686 	bool notified = false;
9687 
9688 	/* We have received the Advertisement Report because:
9689 	 * 1. the kernel has initiated active discovery
9690 	 * 2. if not, we have pend_le_reports > 0 in which case we are doing
9691 	 *    passive scanning
9692 	 * 3. if none of the above is true, we have one or more active
9693 	 *    Advertisement Monitor
9694 	 *
9695 	 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
9696 	 * and report ONLY one advertisement per device for the matched Monitor
9697 	 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
9698 	 *
9699 	 * For case 3, since we are not active scanning and all advertisements
9700 	 * received are due to a matched Advertisement Monitor, report all
9701 	 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
9702 	 */
9703 	if (report_device && !hdev->advmon_pend_notify) {
9704 		mgmt_event_skb(skb, skip_sk);
9705 		return;
9706 	}
9707 
9708 	hdev->advmon_pend_notify = false;
9709 
9710 	list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
9711 		if (!bacmp(&dev->bdaddr, bdaddr)) {
9712 			matched = true;
9713 
9714 			if (!dev->notified) {
9715 				mgmt_send_adv_monitor_device_found(hdev, skb,
9716 								   skip_sk,
9717 								   dev->handle);
9718 				notified = true;
9719 				dev->notified = true;
9720 			}
9721 		}
9722 
9723 		if (!dev->notified)
9724 			hdev->advmon_pend_notify = true;
9725 	}
9726 
9727 	if (!report_device &&
9728 	    ((matched && !notified) || !msft_monitor_supported(hdev))) {
9729 		/* Handle 0 indicates that we are not active scanning and this
9730 		 * is a subsequent advertisement report for an already matched
9731 		 * Advertisement Monitor or the controller offloading support
9732 		 * is not available.
9733 		 */
9734 		mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
9735 	}
9736 
9737 	if (report_device)
9738 		mgmt_event_skb(skb, skip_sk);
9739 	else
9740 		kfree_skb(skb);
9741 }
9742 
9743 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9744 		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
9745 		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9746 {
9747 	struct sk_buff *skb;
9748 	struct mgmt_ev_device_found *ev;
9749 	bool report_device = hci_discovery_active(hdev);
9750 
9751 	/* Don't send events for a non-kernel initiated discovery. With
9752 	 * LE one exception is if we have pend_le_reports > 0 in which
9753 	 * case we're doing passive scanning and want these events.
9754 	 */
9755 	if (!hci_discovery_active(hdev)) {
9756 		if (link_type == ACL_LINK)
9757 			return;
9758 		if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
9759 			report_device = true;
9760 		else if (!hci_is_adv_monitoring(hdev))
9761 			return;
9762 	}
9763 
9764 	if (hdev->discovery.result_filtering) {
9765 		/* We are using service discovery */
9766 		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
9767 				     scan_rsp_len))
9768 			return;
9769 	}
9770 
9771 	if (hdev->discovery.limited) {
9772 		/* Check for limited discoverable bit */
9773 		if (dev_class) {
9774 			if (!(dev_class[1] & 0x20))
9775 				return;
9776 		} else {
9777 			u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
9778 			if (!flags || !(flags[0] & LE_AD_LIMITED))
9779 				return;
9780 		}
9781 	}
9782 
9783 	/* Allocate skb. The 5 extra bytes are for the potential CoD field */
9784 	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
9785 			     sizeof(*ev) + eir_len + scan_rsp_len + 5);
9786 	if (!skb)
9787 		return;
9788 
9789 	ev = skb_put(skb, sizeof(*ev));
9790 
9791 	/* In case of device discovery with BR/EDR devices (pre 1.2), the
9792 	 * RSSI value was reported as 0 when not available. This behavior
9793 	 * is kept when using device discovery. This is required for full
9794 	 * backwards compatibility with the API.
9795 	 *
9796 	 * However when using service discovery, the value 127 will be
9797 	 * returned when the RSSI is not available.
9798 	 */
9799 	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
9800 	    link_type == ACL_LINK)
9801 		rssi = 0;
9802 
9803 	bacpy(&ev->addr.bdaddr, bdaddr);
9804 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
9805 	ev->rssi = rssi;
9806 	ev->flags = cpu_to_le32(flags);
9807 
9808 	if (eir_len > 0)
9809 		/* Copy EIR or advertising data into event */
9810 		skb_put_data(skb, eir, eir_len);
9811 
9812 	if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
9813 		u8 eir_cod[5];
9814 
9815 		eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
9816 					   dev_class, 3);
9817 		skb_put_data(skb, eir_cod, sizeof(eir_cod));
9818 	}
9819 
9820 	if (scan_rsp_len > 0)
9821 		/* Append scan response data to event */
9822 		skb_put_data(skb, scan_rsp, scan_rsp_len);
9823 
9824 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
9825 
9826 	mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
9827 }
9828 
9829 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9830 		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
9831 {
9832 	struct sk_buff *skb;
9833 	struct mgmt_ev_device_found *ev;
9834 	u16 eir_len = 0;
9835 	u32 flags = 0;
9836 
9837 	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
9838 			     sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
9839 
9840 	ev = skb_put(skb, sizeof(*ev));
9841 	bacpy(&ev->addr.bdaddr, bdaddr);
9842 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
9843 	ev->rssi = rssi;
9844 
9845 	if (name)
9846 		eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9847 	else
9848 		flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
9849 
9850 	ev->eir_len = cpu_to_le16(eir_len);
9851 	ev->flags = cpu_to_le32(flags);
9852 
9853 	mgmt_event_skb(skb, NULL);
9854 }
9855 
9856 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
9857 {
9858 	struct mgmt_ev_discovering ev;
9859 
9860 	bt_dev_dbg(hdev, "discovering %u", discovering);
9861 
9862 	memset(&ev, 0, sizeof(ev));
9863 	ev.type = hdev->discovery.type;
9864 	ev.discovering = discovering;
9865 
9866 	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
9867 }
9868 
9869 void mgmt_suspending(struct hci_dev *hdev, u8 state)
9870 {
9871 	struct mgmt_ev_controller_suspend ev;
9872 
9873 	ev.suspend_state = state;
9874 	mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
9875 }
9876 
9877 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
9878 		   u8 addr_type)
9879 {
9880 	struct mgmt_ev_controller_resume ev;
9881 
9882 	ev.wake_reason = reason;
9883 	if (bdaddr) {
9884 		bacpy(&ev.addr.bdaddr, bdaddr);
9885 		ev.addr.type = addr_type;
9886 	} else {
9887 		memset(&ev.addr, 0, sizeof(ev.addr));
9888 	}
9889 
9890 	mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
9891 }
9892 
9893 static struct hci_mgmt_chan chan = {
9894 	.channel	= HCI_CHANNEL_CONTROL,
9895 	.handler_count	= ARRAY_SIZE(mgmt_handlers),
9896 	.handlers	= mgmt_handlers,
9897 	.hdev_init	= mgmt_init_hdev,
9898 };
9899 
9900 int mgmt_init(void)
9901 {
9902 	return hci_mgmt_chan_register(&chan);
9903 }
9904 
9905 void mgmt_exit(void)
9906 {
9907 	hci_mgmt_chan_unregister(&chan);
9908 }
9909