xref: /openbmc/linux/net/bluetooth/mgmt.c (revision fe17b91a7777df140d0f1433991da67ba658796c)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI Management interface */
26 
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35 
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
40 #include "msft.h"
41 #include "eir.h"
42 #include "aosp.h"
43 
44 #define MGMT_VERSION	1
45 #define MGMT_REVISION	22
46 
47 static const u16 mgmt_commands[] = {
48 	MGMT_OP_READ_INDEX_LIST,
49 	MGMT_OP_READ_INFO,
50 	MGMT_OP_SET_POWERED,
51 	MGMT_OP_SET_DISCOVERABLE,
52 	MGMT_OP_SET_CONNECTABLE,
53 	MGMT_OP_SET_FAST_CONNECTABLE,
54 	MGMT_OP_SET_BONDABLE,
55 	MGMT_OP_SET_LINK_SECURITY,
56 	MGMT_OP_SET_SSP,
57 	MGMT_OP_SET_HS,
58 	MGMT_OP_SET_LE,
59 	MGMT_OP_SET_DEV_CLASS,
60 	MGMT_OP_SET_LOCAL_NAME,
61 	MGMT_OP_ADD_UUID,
62 	MGMT_OP_REMOVE_UUID,
63 	MGMT_OP_LOAD_LINK_KEYS,
64 	MGMT_OP_LOAD_LONG_TERM_KEYS,
65 	MGMT_OP_DISCONNECT,
66 	MGMT_OP_GET_CONNECTIONS,
67 	MGMT_OP_PIN_CODE_REPLY,
68 	MGMT_OP_PIN_CODE_NEG_REPLY,
69 	MGMT_OP_SET_IO_CAPABILITY,
70 	MGMT_OP_PAIR_DEVICE,
71 	MGMT_OP_CANCEL_PAIR_DEVICE,
72 	MGMT_OP_UNPAIR_DEVICE,
73 	MGMT_OP_USER_CONFIRM_REPLY,
74 	MGMT_OP_USER_CONFIRM_NEG_REPLY,
75 	MGMT_OP_USER_PASSKEY_REPLY,
76 	MGMT_OP_USER_PASSKEY_NEG_REPLY,
77 	MGMT_OP_READ_LOCAL_OOB_DATA,
78 	MGMT_OP_ADD_REMOTE_OOB_DATA,
79 	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
80 	MGMT_OP_START_DISCOVERY,
81 	MGMT_OP_STOP_DISCOVERY,
82 	MGMT_OP_CONFIRM_NAME,
83 	MGMT_OP_BLOCK_DEVICE,
84 	MGMT_OP_UNBLOCK_DEVICE,
85 	MGMT_OP_SET_DEVICE_ID,
86 	MGMT_OP_SET_ADVERTISING,
87 	MGMT_OP_SET_BREDR,
88 	MGMT_OP_SET_STATIC_ADDRESS,
89 	MGMT_OP_SET_SCAN_PARAMS,
90 	MGMT_OP_SET_SECURE_CONN,
91 	MGMT_OP_SET_DEBUG_KEYS,
92 	MGMT_OP_SET_PRIVACY,
93 	MGMT_OP_LOAD_IRKS,
94 	MGMT_OP_GET_CONN_INFO,
95 	MGMT_OP_GET_CLOCK_INFO,
96 	MGMT_OP_ADD_DEVICE,
97 	MGMT_OP_REMOVE_DEVICE,
98 	MGMT_OP_LOAD_CONN_PARAM,
99 	MGMT_OP_READ_UNCONF_INDEX_LIST,
100 	MGMT_OP_READ_CONFIG_INFO,
101 	MGMT_OP_SET_EXTERNAL_CONFIG,
102 	MGMT_OP_SET_PUBLIC_ADDRESS,
103 	MGMT_OP_START_SERVICE_DISCOVERY,
104 	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
105 	MGMT_OP_READ_EXT_INDEX_LIST,
106 	MGMT_OP_READ_ADV_FEATURES,
107 	MGMT_OP_ADD_ADVERTISING,
108 	MGMT_OP_REMOVE_ADVERTISING,
109 	MGMT_OP_GET_ADV_SIZE_INFO,
110 	MGMT_OP_START_LIMITED_DISCOVERY,
111 	MGMT_OP_READ_EXT_INFO,
112 	MGMT_OP_SET_APPEARANCE,
113 	MGMT_OP_GET_PHY_CONFIGURATION,
114 	MGMT_OP_SET_PHY_CONFIGURATION,
115 	MGMT_OP_SET_BLOCKED_KEYS,
116 	MGMT_OP_SET_WIDEBAND_SPEECH,
117 	MGMT_OP_READ_CONTROLLER_CAP,
118 	MGMT_OP_READ_EXP_FEATURES_INFO,
119 	MGMT_OP_SET_EXP_FEATURE,
120 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
121 	MGMT_OP_SET_DEF_SYSTEM_CONFIG,
122 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
123 	MGMT_OP_SET_DEF_RUNTIME_CONFIG,
124 	MGMT_OP_GET_DEVICE_FLAGS,
125 	MGMT_OP_SET_DEVICE_FLAGS,
126 	MGMT_OP_READ_ADV_MONITOR_FEATURES,
127 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
128 	MGMT_OP_REMOVE_ADV_MONITOR,
129 	MGMT_OP_ADD_EXT_ADV_PARAMS,
130 	MGMT_OP_ADD_EXT_ADV_DATA,
131 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
132 };
133 
134 static const u16 mgmt_events[] = {
135 	MGMT_EV_CONTROLLER_ERROR,
136 	MGMT_EV_INDEX_ADDED,
137 	MGMT_EV_INDEX_REMOVED,
138 	MGMT_EV_NEW_SETTINGS,
139 	MGMT_EV_CLASS_OF_DEV_CHANGED,
140 	MGMT_EV_LOCAL_NAME_CHANGED,
141 	MGMT_EV_NEW_LINK_KEY,
142 	MGMT_EV_NEW_LONG_TERM_KEY,
143 	MGMT_EV_DEVICE_CONNECTED,
144 	MGMT_EV_DEVICE_DISCONNECTED,
145 	MGMT_EV_CONNECT_FAILED,
146 	MGMT_EV_PIN_CODE_REQUEST,
147 	MGMT_EV_USER_CONFIRM_REQUEST,
148 	MGMT_EV_USER_PASSKEY_REQUEST,
149 	MGMT_EV_AUTH_FAILED,
150 	MGMT_EV_DEVICE_FOUND,
151 	MGMT_EV_DISCOVERING,
152 	MGMT_EV_DEVICE_BLOCKED,
153 	MGMT_EV_DEVICE_UNBLOCKED,
154 	MGMT_EV_DEVICE_UNPAIRED,
155 	MGMT_EV_PASSKEY_NOTIFY,
156 	MGMT_EV_NEW_IRK,
157 	MGMT_EV_NEW_CSRK,
158 	MGMT_EV_DEVICE_ADDED,
159 	MGMT_EV_DEVICE_REMOVED,
160 	MGMT_EV_NEW_CONN_PARAM,
161 	MGMT_EV_UNCONF_INDEX_ADDED,
162 	MGMT_EV_UNCONF_INDEX_REMOVED,
163 	MGMT_EV_NEW_CONFIG_OPTIONS,
164 	MGMT_EV_EXT_INDEX_ADDED,
165 	MGMT_EV_EXT_INDEX_REMOVED,
166 	MGMT_EV_LOCAL_OOB_DATA_UPDATED,
167 	MGMT_EV_ADVERTISING_ADDED,
168 	MGMT_EV_ADVERTISING_REMOVED,
169 	MGMT_EV_EXT_INFO_CHANGED,
170 	MGMT_EV_PHY_CONFIGURATION_CHANGED,
171 	MGMT_EV_EXP_FEATURE_CHANGED,
172 	MGMT_EV_DEVICE_FLAGS_CHANGED,
173 	MGMT_EV_ADV_MONITOR_ADDED,
174 	MGMT_EV_ADV_MONITOR_REMOVED,
175 	MGMT_EV_CONTROLLER_SUSPEND,
176 	MGMT_EV_CONTROLLER_RESUME,
177 	MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
178 	MGMT_EV_ADV_MONITOR_DEVICE_LOST,
179 };
180 
181 static const u16 mgmt_untrusted_commands[] = {
182 	MGMT_OP_READ_INDEX_LIST,
183 	MGMT_OP_READ_INFO,
184 	MGMT_OP_READ_UNCONF_INDEX_LIST,
185 	MGMT_OP_READ_CONFIG_INFO,
186 	MGMT_OP_READ_EXT_INDEX_LIST,
187 	MGMT_OP_READ_EXT_INFO,
188 	MGMT_OP_READ_CONTROLLER_CAP,
189 	MGMT_OP_READ_EXP_FEATURES_INFO,
190 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
191 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
192 };
193 
194 static const u16 mgmt_untrusted_events[] = {
195 	MGMT_EV_INDEX_ADDED,
196 	MGMT_EV_INDEX_REMOVED,
197 	MGMT_EV_NEW_SETTINGS,
198 	MGMT_EV_CLASS_OF_DEV_CHANGED,
199 	MGMT_EV_LOCAL_NAME_CHANGED,
200 	MGMT_EV_UNCONF_INDEX_ADDED,
201 	MGMT_EV_UNCONF_INDEX_REMOVED,
202 	MGMT_EV_NEW_CONFIG_OPTIONS,
203 	MGMT_EV_EXT_INDEX_ADDED,
204 	MGMT_EV_EXT_INDEX_REMOVED,
205 	MGMT_EV_EXT_INFO_CHANGED,
206 	MGMT_EV_EXP_FEATURE_CHANGED,
207 };
208 
209 #define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
210 
211 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
212 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
213 
214 /* HCI to MGMT error code conversion table */
215 static const u8 mgmt_status_table[] = {
216 	MGMT_STATUS_SUCCESS,
217 	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
218 	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
219 	MGMT_STATUS_FAILED,		/* Hardware Failure */
220 	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
221 	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
222 	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
223 	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
224 	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
225 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
226 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
227 	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
228 	MGMT_STATUS_BUSY,		/* Command Disallowed */
229 	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
230 	MGMT_STATUS_REJECTED,		/* Rejected Security */
231 	MGMT_STATUS_REJECTED,		/* Rejected Personal */
232 	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
233 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
234 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
235 	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
236 	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
237 	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
238 	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
239 	MGMT_STATUS_BUSY,		/* Repeated Attempts */
240 	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
241 	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
242 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
243 	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
244 	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
245 	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
246 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
247 	MGMT_STATUS_FAILED,		/* Unspecified Error */
248 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
249 	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
250 	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
251 	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
252 	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
253 	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
254 	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
255 	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
256 	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
257 	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
258 	MGMT_STATUS_FAILED,		/* Transaction Collision */
259 	MGMT_STATUS_FAILED,		/* Reserved for future use */
260 	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
261 	MGMT_STATUS_REJECTED,		/* QoS Rejected */
262 	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
263 	MGMT_STATUS_REJECTED,		/* Insufficient Security */
264 	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
265 	MGMT_STATUS_FAILED,		/* Reserved for future use */
266 	MGMT_STATUS_BUSY,		/* Role Switch Pending */
267 	MGMT_STATUS_FAILED,		/* Reserved for future use */
268 	MGMT_STATUS_FAILED,		/* Slot Violation */
269 	MGMT_STATUS_FAILED,		/* Role Switch Failed */
270 	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
271 	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
272 	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
273 	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
274 	MGMT_STATUS_BUSY,		/* Controller Busy */
275 	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
276 	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
277 	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
278 	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
279 	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
280 };
281 
282 static u8 mgmt_errno_status(int err)
283 {
284 	switch (err) {
285 	case 0:
286 		return MGMT_STATUS_SUCCESS;
287 	case -EPERM:
288 		return MGMT_STATUS_REJECTED;
289 	case -EINVAL:
290 		return MGMT_STATUS_INVALID_PARAMS;
291 	case -EOPNOTSUPP:
292 		return MGMT_STATUS_NOT_SUPPORTED;
293 	case -EBUSY:
294 		return MGMT_STATUS_BUSY;
295 	case -ETIMEDOUT:
296 		return MGMT_STATUS_AUTH_FAILED;
297 	case -ENOMEM:
298 		return MGMT_STATUS_NO_RESOURCES;
299 	case -EISCONN:
300 		return MGMT_STATUS_ALREADY_CONNECTED;
301 	case -ENOTCONN:
302 		return MGMT_STATUS_DISCONNECTED;
303 	}
304 
305 	return MGMT_STATUS_FAILED;
306 }
307 
308 static u8 mgmt_status(int err)
309 {
310 	if (err < 0)
311 		return mgmt_errno_status(err);
312 
313 	if (err < ARRAY_SIZE(mgmt_status_table))
314 		return mgmt_status_table[err];
315 
316 	return MGMT_STATUS_FAILED;
317 }
318 
319 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
320 			    u16 len, int flag)
321 {
322 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
323 			       flag, NULL);
324 }
325 
326 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
327 			      u16 len, int flag, struct sock *skip_sk)
328 {
329 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
330 			       flag, skip_sk);
331 }
332 
333 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
334 		      struct sock *skip_sk)
335 {
336 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
337 			       HCI_SOCK_TRUSTED, skip_sk);
338 }
339 
340 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
341 {
342 	return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
343 				   skip_sk);
344 }
345 
346 static u8 le_addr_type(u8 mgmt_addr_type)
347 {
348 	if (mgmt_addr_type == BDADDR_LE_PUBLIC)
349 		return ADDR_LE_DEV_PUBLIC;
350 	else
351 		return ADDR_LE_DEV_RANDOM;
352 }
353 
354 void mgmt_fill_version_info(void *ver)
355 {
356 	struct mgmt_rp_read_version *rp = ver;
357 
358 	rp->version = MGMT_VERSION;
359 	rp->revision = cpu_to_le16(MGMT_REVISION);
360 }
361 
362 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
363 			u16 data_len)
364 {
365 	struct mgmt_rp_read_version rp;
366 
367 	bt_dev_dbg(hdev, "sock %p", sk);
368 
369 	mgmt_fill_version_info(&rp);
370 
371 	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
372 				 &rp, sizeof(rp));
373 }
374 
375 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
376 			 u16 data_len)
377 {
378 	struct mgmt_rp_read_commands *rp;
379 	u16 num_commands, num_events;
380 	size_t rp_size;
381 	int i, err;
382 
383 	bt_dev_dbg(hdev, "sock %p", sk);
384 
385 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
386 		num_commands = ARRAY_SIZE(mgmt_commands);
387 		num_events = ARRAY_SIZE(mgmt_events);
388 	} else {
389 		num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
390 		num_events = ARRAY_SIZE(mgmt_untrusted_events);
391 	}
392 
393 	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
394 
395 	rp = kmalloc(rp_size, GFP_KERNEL);
396 	if (!rp)
397 		return -ENOMEM;
398 
399 	rp->num_commands = cpu_to_le16(num_commands);
400 	rp->num_events = cpu_to_le16(num_events);
401 
402 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
403 		__le16 *opcode = rp->opcodes;
404 
405 		for (i = 0; i < num_commands; i++, opcode++)
406 			put_unaligned_le16(mgmt_commands[i], opcode);
407 
408 		for (i = 0; i < num_events; i++, opcode++)
409 			put_unaligned_le16(mgmt_events[i], opcode);
410 	} else {
411 		__le16 *opcode = rp->opcodes;
412 
413 		for (i = 0; i < num_commands; i++, opcode++)
414 			put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
415 
416 		for (i = 0; i < num_events; i++, opcode++)
417 			put_unaligned_le16(mgmt_untrusted_events[i], opcode);
418 	}
419 
420 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
421 				rp, rp_size);
422 	kfree(rp);
423 
424 	return err;
425 }
426 
427 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
428 			   u16 data_len)
429 {
430 	struct mgmt_rp_read_index_list *rp;
431 	struct hci_dev *d;
432 	size_t rp_len;
433 	u16 count;
434 	int err;
435 
436 	bt_dev_dbg(hdev, "sock %p", sk);
437 
438 	read_lock(&hci_dev_list_lock);
439 
440 	count = 0;
441 	list_for_each_entry(d, &hci_dev_list, list) {
442 		if (d->dev_type == HCI_PRIMARY &&
443 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED))
444 			count++;
445 	}
446 
447 	rp_len = sizeof(*rp) + (2 * count);
448 	rp = kmalloc(rp_len, GFP_ATOMIC);
449 	if (!rp) {
450 		read_unlock(&hci_dev_list_lock);
451 		return -ENOMEM;
452 	}
453 
454 	count = 0;
455 	list_for_each_entry(d, &hci_dev_list, list) {
456 		if (hci_dev_test_flag(d, HCI_SETUP) ||
457 		    hci_dev_test_flag(d, HCI_CONFIG) ||
458 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
459 			continue;
460 
461 		/* Devices marked as raw-only are neither configured
462 		 * nor unconfigured controllers.
463 		 */
464 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
465 			continue;
466 
467 		if (d->dev_type == HCI_PRIMARY &&
468 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
469 			rp->index[count++] = cpu_to_le16(d->id);
470 			bt_dev_dbg(hdev, "Added hci%u", d->id);
471 		}
472 	}
473 
474 	rp->num_controllers = cpu_to_le16(count);
475 	rp_len = sizeof(*rp) + (2 * count);
476 
477 	read_unlock(&hci_dev_list_lock);
478 
479 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
480 				0, rp, rp_len);
481 
482 	kfree(rp);
483 
484 	return err;
485 }
486 
487 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
488 				  void *data, u16 data_len)
489 {
490 	struct mgmt_rp_read_unconf_index_list *rp;
491 	struct hci_dev *d;
492 	size_t rp_len;
493 	u16 count;
494 	int err;
495 
496 	bt_dev_dbg(hdev, "sock %p", sk);
497 
498 	read_lock(&hci_dev_list_lock);
499 
500 	count = 0;
501 	list_for_each_entry(d, &hci_dev_list, list) {
502 		if (d->dev_type == HCI_PRIMARY &&
503 		    hci_dev_test_flag(d, HCI_UNCONFIGURED))
504 			count++;
505 	}
506 
507 	rp_len = sizeof(*rp) + (2 * count);
508 	rp = kmalloc(rp_len, GFP_ATOMIC);
509 	if (!rp) {
510 		read_unlock(&hci_dev_list_lock);
511 		return -ENOMEM;
512 	}
513 
514 	count = 0;
515 	list_for_each_entry(d, &hci_dev_list, list) {
516 		if (hci_dev_test_flag(d, HCI_SETUP) ||
517 		    hci_dev_test_flag(d, HCI_CONFIG) ||
518 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
519 			continue;
520 
521 		/* Devices marked as raw-only are neither configured
522 		 * nor unconfigured controllers.
523 		 */
524 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
525 			continue;
526 
527 		if (d->dev_type == HCI_PRIMARY &&
528 		    hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
529 			rp->index[count++] = cpu_to_le16(d->id);
530 			bt_dev_dbg(hdev, "Added hci%u", d->id);
531 		}
532 	}
533 
534 	rp->num_controllers = cpu_to_le16(count);
535 	rp_len = sizeof(*rp) + (2 * count);
536 
537 	read_unlock(&hci_dev_list_lock);
538 
539 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
540 				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
541 
542 	kfree(rp);
543 
544 	return err;
545 }
546 
547 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
548 			       void *data, u16 data_len)
549 {
550 	struct mgmt_rp_read_ext_index_list *rp;
551 	struct hci_dev *d;
552 	u16 count;
553 	int err;
554 
555 	bt_dev_dbg(hdev, "sock %p", sk);
556 
557 	read_lock(&hci_dev_list_lock);
558 
559 	count = 0;
560 	list_for_each_entry(d, &hci_dev_list, list) {
561 		if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
562 			count++;
563 	}
564 
565 	rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
566 	if (!rp) {
567 		read_unlock(&hci_dev_list_lock);
568 		return -ENOMEM;
569 	}
570 
571 	count = 0;
572 	list_for_each_entry(d, &hci_dev_list, list) {
573 		if (hci_dev_test_flag(d, HCI_SETUP) ||
574 		    hci_dev_test_flag(d, HCI_CONFIG) ||
575 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
576 			continue;
577 
578 		/* Devices marked as raw-only are neither configured
579 		 * nor unconfigured controllers.
580 		 */
581 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
582 			continue;
583 
584 		if (d->dev_type == HCI_PRIMARY) {
585 			if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
586 				rp->entry[count].type = 0x01;
587 			else
588 				rp->entry[count].type = 0x00;
589 		} else if (d->dev_type == HCI_AMP) {
590 			rp->entry[count].type = 0x02;
591 		} else {
592 			continue;
593 		}
594 
595 		rp->entry[count].bus = d->bus;
596 		rp->entry[count++].index = cpu_to_le16(d->id);
597 		bt_dev_dbg(hdev, "Added hci%u", d->id);
598 	}
599 
600 	rp->num_controllers = cpu_to_le16(count);
601 
602 	read_unlock(&hci_dev_list_lock);
603 
604 	/* If this command is called at least once, then all the
605 	 * default index and unconfigured index events are disabled
606 	 * and from now on only extended index events are used.
607 	 */
608 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
609 	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
610 	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
611 
612 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
613 				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
614 				struct_size(rp, entry, count));
615 
616 	kfree(rp);
617 
618 	return err;
619 }
620 
621 static bool is_configured(struct hci_dev *hdev)
622 {
623 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
624 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
625 		return false;
626 
627 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
628 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
629 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
630 		return false;
631 
632 	return true;
633 }
634 
635 static __le32 get_missing_options(struct hci_dev *hdev)
636 {
637 	u32 options = 0;
638 
639 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
640 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
641 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
642 
643 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
644 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
645 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
646 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
647 
648 	return cpu_to_le32(options);
649 }
650 
651 static int new_options(struct hci_dev *hdev, struct sock *skip)
652 {
653 	__le32 options = get_missing_options(hdev);
654 
655 	return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
656 				  sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
657 }
658 
659 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
660 {
661 	__le32 options = get_missing_options(hdev);
662 
663 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
664 				 sizeof(options));
665 }
666 
667 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
668 			    void *data, u16 data_len)
669 {
670 	struct mgmt_rp_read_config_info rp;
671 	u32 options = 0;
672 
673 	bt_dev_dbg(hdev, "sock %p", sk);
674 
675 	hci_dev_lock(hdev);
676 
677 	memset(&rp, 0, sizeof(rp));
678 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
679 
680 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
681 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
682 
683 	if (hdev->set_bdaddr)
684 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
685 
686 	rp.supported_options = cpu_to_le32(options);
687 	rp.missing_options = get_missing_options(hdev);
688 
689 	hci_dev_unlock(hdev);
690 
691 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
692 				 &rp, sizeof(rp));
693 }
694 
695 static u32 get_supported_phys(struct hci_dev *hdev)
696 {
697 	u32 supported_phys = 0;
698 
699 	if (lmp_bredr_capable(hdev)) {
700 		supported_phys |= MGMT_PHY_BR_1M_1SLOT;
701 
702 		if (hdev->features[0][0] & LMP_3SLOT)
703 			supported_phys |= MGMT_PHY_BR_1M_3SLOT;
704 
705 		if (hdev->features[0][0] & LMP_5SLOT)
706 			supported_phys |= MGMT_PHY_BR_1M_5SLOT;
707 
708 		if (lmp_edr_2m_capable(hdev)) {
709 			supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
710 
711 			if (lmp_edr_3slot_capable(hdev))
712 				supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
713 
714 			if (lmp_edr_5slot_capable(hdev))
715 				supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
716 
717 			if (lmp_edr_3m_capable(hdev)) {
718 				supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
719 
720 				if (lmp_edr_3slot_capable(hdev))
721 					supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
722 
723 				if (lmp_edr_5slot_capable(hdev))
724 					supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
725 			}
726 		}
727 	}
728 
729 	if (lmp_le_capable(hdev)) {
730 		supported_phys |= MGMT_PHY_LE_1M_TX;
731 		supported_phys |= MGMT_PHY_LE_1M_RX;
732 
733 		if (hdev->le_features[1] & HCI_LE_PHY_2M) {
734 			supported_phys |= MGMT_PHY_LE_2M_TX;
735 			supported_phys |= MGMT_PHY_LE_2M_RX;
736 		}
737 
738 		if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
739 			supported_phys |= MGMT_PHY_LE_CODED_TX;
740 			supported_phys |= MGMT_PHY_LE_CODED_RX;
741 		}
742 	}
743 
744 	return supported_phys;
745 }
746 
747 static u32 get_selected_phys(struct hci_dev *hdev)
748 {
749 	u32 selected_phys = 0;
750 
751 	if (lmp_bredr_capable(hdev)) {
752 		selected_phys |= MGMT_PHY_BR_1M_1SLOT;
753 
754 		if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
755 			selected_phys |= MGMT_PHY_BR_1M_3SLOT;
756 
757 		if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
758 			selected_phys |= MGMT_PHY_BR_1M_5SLOT;
759 
760 		if (lmp_edr_2m_capable(hdev)) {
761 			if (!(hdev->pkt_type & HCI_2DH1))
762 				selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
763 
764 			if (lmp_edr_3slot_capable(hdev) &&
765 			    !(hdev->pkt_type & HCI_2DH3))
766 				selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
767 
768 			if (lmp_edr_5slot_capable(hdev) &&
769 			    !(hdev->pkt_type & HCI_2DH5))
770 				selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
771 
772 			if (lmp_edr_3m_capable(hdev)) {
773 				if (!(hdev->pkt_type & HCI_3DH1))
774 					selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
775 
776 				if (lmp_edr_3slot_capable(hdev) &&
777 				    !(hdev->pkt_type & HCI_3DH3))
778 					selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
779 
780 				if (lmp_edr_5slot_capable(hdev) &&
781 				    !(hdev->pkt_type & HCI_3DH5))
782 					selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
783 			}
784 		}
785 	}
786 
787 	if (lmp_le_capable(hdev)) {
788 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
789 			selected_phys |= MGMT_PHY_LE_1M_TX;
790 
791 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
792 			selected_phys |= MGMT_PHY_LE_1M_RX;
793 
794 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
795 			selected_phys |= MGMT_PHY_LE_2M_TX;
796 
797 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
798 			selected_phys |= MGMT_PHY_LE_2M_RX;
799 
800 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
801 			selected_phys |= MGMT_PHY_LE_CODED_TX;
802 
803 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
804 			selected_phys |= MGMT_PHY_LE_CODED_RX;
805 	}
806 
807 	return selected_phys;
808 }
809 
810 static u32 get_configurable_phys(struct hci_dev *hdev)
811 {
812 	return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
813 		~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
814 }
815 
816 static u32 get_supported_settings(struct hci_dev *hdev)
817 {
818 	u32 settings = 0;
819 
820 	settings |= MGMT_SETTING_POWERED;
821 	settings |= MGMT_SETTING_BONDABLE;
822 	settings |= MGMT_SETTING_DEBUG_KEYS;
823 	settings |= MGMT_SETTING_CONNECTABLE;
824 	settings |= MGMT_SETTING_DISCOVERABLE;
825 
826 	if (lmp_bredr_capable(hdev)) {
827 		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
828 			settings |= MGMT_SETTING_FAST_CONNECTABLE;
829 		settings |= MGMT_SETTING_BREDR;
830 		settings |= MGMT_SETTING_LINK_SECURITY;
831 
832 		if (lmp_ssp_capable(hdev)) {
833 			settings |= MGMT_SETTING_SSP;
834 			if (IS_ENABLED(CONFIG_BT_HS))
835 				settings |= MGMT_SETTING_HS;
836 		}
837 
838 		if (lmp_sc_capable(hdev))
839 			settings |= MGMT_SETTING_SECURE_CONN;
840 
841 		if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
842 			     &hdev->quirks))
843 			settings |= MGMT_SETTING_WIDEBAND_SPEECH;
844 	}
845 
846 	if (lmp_le_capable(hdev)) {
847 		settings |= MGMT_SETTING_LE;
848 		settings |= MGMT_SETTING_SECURE_CONN;
849 		settings |= MGMT_SETTING_PRIVACY;
850 		settings |= MGMT_SETTING_STATIC_ADDRESS;
851 		settings |= MGMT_SETTING_ADVERTISING;
852 	}
853 
854 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
855 	    hdev->set_bdaddr)
856 		settings |= MGMT_SETTING_CONFIGURATION;
857 
858 	settings |= MGMT_SETTING_PHY_CONFIGURATION;
859 
860 	return settings;
861 }
862 
863 static u32 get_current_settings(struct hci_dev *hdev)
864 {
865 	u32 settings = 0;
866 
867 	if (hdev_is_powered(hdev))
868 		settings |= MGMT_SETTING_POWERED;
869 
870 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
871 		settings |= MGMT_SETTING_CONNECTABLE;
872 
873 	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
874 		settings |= MGMT_SETTING_FAST_CONNECTABLE;
875 
876 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
877 		settings |= MGMT_SETTING_DISCOVERABLE;
878 
879 	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
880 		settings |= MGMT_SETTING_BONDABLE;
881 
882 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
883 		settings |= MGMT_SETTING_BREDR;
884 
885 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
886 		settings |= MGMT_SETTING_LE;
887 
888 	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
889 		settings |= MGMT_SETTING_LINK_SECURITY;
890 
891 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
892 		settings |= MGMT_SETTING_SSP;
893 
894 	if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
895 		settings |= MGMT_SETTING_HS;
896 
897 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
898 		settings |= MGMT_SETTING_ADVERTISING;
899 
900 	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
901 		settings |= MGMT_SETTING_SECURE_CONN;
902 
903 	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
904 		settings |= MGMT_SETTING_DEBUG_KEYS;
905 
906 	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
907 		settings |= MGMT_SETTING_PRIVACY;
908 
909 	/* The current setting for static address has two purposes. The
910 	 * first is to indicate if the static address will be used and
911 	 * the second is to indicate if it is actually set.
912 	 *
913 	 * This means if the static address is not configured, this flag
914 	 * will never be set. If the address is configured, then if the
915 	 * address is actually used decides if the flag is set or not.
916 	 *
917 	 * For single mode LE only controllers and dual-mode controllers
918 	 * with BR/EDR disabled, the existence of the static address will
919 	 * be evaluated.
920 	 */
921 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
922 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
923 	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
924 		if (bacmp(&hdev->static_addr, BDADDR_ANY))
925 			settings |= MGMT_SETTING_STATIC_ADDRESS;
926 	}
927 
928 	if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
929 		settings |= MGMT_SETTING_WIDEBAND_SPEECH;
930 
931 	return settings;
932 }
933 
934 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
935 {
936 	return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
937 }
938 
939 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
940 {
941 	struct mgmt_pending_cmd *cmd;
942 
943 	/* If there's a pending mgmt command the flags will not yet have
944 	 * their final values, so check for this first.
945 	 */
946 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
947 	if (cmd) {
948 		struct mgmt_mode *cp = cmd->param;
949 		if (cp->val == 0x01)
950 			return LE_AD_GENERAL;
951 		else if (cp->val == 0x02)
952 			return LE_AD_LIMITED;
953 	} else {
954 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
955 			return LE_AD_LIMITED;
956 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
957 			return LE_AD_GENERAL;
958 	}
959 
960 	return 0;
961 }
962 
963 bool mgmt_get_connectable(struct hci_dev *hdev)
964 {
965 	struct mgmt_pending_cmd *cmd;
966 
967 	/* If there's a pending mgmt command the flag will not yet have
968 	 * it's final value, so check for this first.
969 	 */
970 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
971 	if (cmd) {
972 		struct mgmt_mode *cp = cmd->param;
973 
974 		return cp->val;
975 	}
976 
977 	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
978 }
979 
980 static int service_cache_sync(struct hci_dev *hdev, void *data)
981 {
982 	hci_update_eir_sync(hdev);
983 	hci_update_class_sync(hdev);
984 
985 	return 0;
986 }
987 
988 static void service_cache_off(struct work_struct *work)
989 {
990 	struct hci_dev *hdev = container_of(work, struct hci_dev,
991 					    service_cache.work);
992 
993 	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
994 		return;
995 
996 	hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
997 }
998 
999 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1000 {
1001 	/* The generation of a new RPA and programming it into the
1002 	 * controller happens in the hci_req_enable_advertising()
1003 	 * function.
1004 	 */
1005 	if (ext_adv_capable(hdev))
1006 		return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1007 	else
1008 		return hci_enable_advertising_sync(hdev);
1009 }
1010 
1011 static void rpa_expired(struct work_struct *work)
1012 {
1013 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1014 					    rpa_expired.work);
1015 
1016 	bt_dev_dbg(hdev, "");
1017 
1018 	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1019 
1020 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1021 		return;
1022 
1023 	hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1024 }
1025 
1026 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1027 {
1028 	if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1029 		return;
1030 
1031 	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1032 	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1033 
1034 	/* Non-mgmt controlled devices get this bit set
1035 	 * implicitly so that pairing works for them, however
1036 	 * for mgmt we require user-space to explicitly enable
1037 	 * it
1038 	 */
1039 	hci_dev_clear_flag(hdev, HCI_BONDABLE);
1040 }
1041 
1042 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1043 				void *data, u16 data_len)
1044 {
1045 	struct mgmt_rp_read_info rp;
1046 
1047 	bt_dev_dbg(hdev, "sock %p", sk);
1048 
1049 	hci_dev_lock(hdev);
1050 
1051 	memset(&rp, 0, sizeof(rp));
1052 
1053 	bacpy(&rp.bdaddr, &hdev->bdaddr);
1054 
1055 	rp.version = hdev->hci_ver;
1056 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1057 
1058 	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1059 	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1060 
1061 	memcpy(rp.dev_class, hdev->dev_class, 3);
1062 
1063 	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1064 	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1065 
1066 	hci_dev_unlock(hdev);
1067 
1068 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1069 				 sizeof(rp));
1070 }
1071 
1072 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1073 {
1074 	u16 eir_len = 0;
1075 	size_t name_len;
1076 
1077 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1078 		eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1079 					  hdev->dev_class, 3);
1080 
1081 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1082 		eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1083 					  hdev->appearance);
1084 
1085 	name_len = strlen(hdev->dev_name);
1086 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1087 				  hdev->dev_name, name_len);
1088 
1089 	name_len = strlen(hdev->short_name);
1090 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1091 				  hdev->short_name, name_len);
1092 
1093 	return eir_len;
1094 }
1095 
1096 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1097 				    void *data, u16 data_len)
1098 {
1099 	char buf[512];
1100 	struct mgmt_rp_read_ext_info *rp = (void *)buf;
1101 	u16 eir_len;
1102 
1103 	bt_dev_dbg(hdev, "sock %p", sk);
1104 
1105 	memset(&buf, 0, sizeof(buf));
1106 
1107 	hci_dev_lock(hdev);
1108 
1109 	bacpy(&rp->bdaddr, &hdev->bdaddr);
1110 
1111 	rp->version = hdev->hci_ver;
1112 	rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1113 
1114 	rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1115 	rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1116 
1117 
1118 	eir_len = append_eir_data_to_buf(hdev, rp->eir);
1119 	rp->eir_len = cpu_to_le16(eir_len);
1120 
1121 	hci_dev_unlock(hdev);
1122 
1123 	/* If this command is called at least once, then the events
1124 	 * for class of device and local name changes are disabled
1125 	 * and only the new extended controller information event
1126 	 * is used.
1127 	 */
1128 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1129 	hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1130 	hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1131 
1132 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1133 				 sizeof(*rp) + eir_len);
1134 }
1135 
1136 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1137 {
1138 	char buf[512];
1139 	struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1140 	u16 eir_len;
1141 
1142 	memset(buf, 0, sizeof(buf));
1143 
1144 	eir_len = append_eir_data_to_buf(hdev, ev->eir);
1145 	ev->eir_len = cpu_to_le16(eir_len);
1146 
1147 	return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1148 				  sizeof(*ev) + eir_len,
1149 				  HCI_MGMT_EXT_INFO_EVENTS, skip);
1150 }
1151 
1152 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1153 {
1154 	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1155 
1156 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1157 				 sizeof(settings));
1158 }
1159 
1160 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1161 {
1162 	struct mgmt_ev_advertising_added ev;
1163 
1164 	ev.instance = instance;
1165 
1166 	mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1167 }
1168 
1169 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1170 			      u8 instance)
1171 {
1172 	struct mgmt_ev_advertising_removed ev;
1173 
1174 	ev.instance = instance;
1175 
1176 	mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1177 }
1178 
1179 static void cancel_adv_timeout(struct hci_dev *hdev)
1180 {
1181 	if (hdev->adv_instance_timeout) {
1182 		hdev->adv_instance_timeout = 0;
1183 		cancel_delayed_work(&hdev->adv_instance_expire);
1184 	}
1185 }
1186 
1187 /* This function requires the caller holds hdev->lock */
1188 static void restart_le_actions(struct hci_dev *hdev)
1189 {
1190 	struct hci_conn_params *p;
1191 
1192 	list_for_each_entry(p, &hdev->le_conn_params, list) {
1193 		/* Needed for AUTO_OFF case where might not "really"
1194 		 * have been powered off.
1195 		 */
1196 		list_del_init(&p->action);
1197 
1198 		switch (p->auto_connect) {
1199 		case HCI_AUTO_CONN_DIRECT:
1200 		case HCI_AUTO_CONN_ALWAYS:
1201 			list_add(&p->action, &hdev->pend_le_conns);
1202 			break;
1203 		case HCI_AUTO_CONN_REPORT:
1204 			list_add(&p->action, &hdev->pend_le_reports);
1205 			break;
1206 		default:
1207 			break;
1208 		}
1209 	}
1210 }
1211 
1212 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1213 {
1214 	__le32 ev = cpu_to_le32(get_current_settings(hdev));
1215 
1216 	return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1217 				  sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1218 }
1219 
1220 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1221 {
1222 	struct mgmt_pending_cmd *cmd = data;
1223 	struct mgmt_mode *cp;
1224 
1225 	/* Make sure cmd still outstanding. */
1226 	if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1227 		return;
1228 
1229 	cp = cmd->param;
1230 
1231 	bt_dev_dbg(hdev, "err %d", err);
1232 
1233 	if (!err) {
1234 		if (cp->val) {
1235 			hci_dev_lock(hdev);
1236 			restart_le_actions(hdev);
1237 			hci_update_passive_scan(hdev);
1238 			hci_dev_unlock(hdev);
1239 		}
1240 
1241 		send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1242 
1243 		/* Only call new_setting for power on as power off is deferred
1244 		 * to hdev->power_off work which does call hci_dev_do_close.
1245 		 */
1246 		if (cp->val)
1247 			new_settings(hdev, cmd->sk);
1248 	} else {
1249 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1250 				mgmt_status(err));
1251 	}
1252 
1253 	mgmt_pending_remove(cmd);
1254 }
1255 
1256 static int set_powered_sync(struct hci_dev *hdev, void *data)
1257 {
1258 	struct mgmt_pending_cmd *cmd = data;
1259 	struct mgmt_mode *cp = cmd->param;
1260 
1261 	BT_DBG("%s", hdev->name);
1262 
1263 	return hci_set_powered_sync(hdev, cp->val);
1264 }
1265 
1266 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1267 		       u16 len)
1268 {
1269 	struct mgmt_mode *cp = data;
1270 	struct mgmt_pending_cmd *cmd;
1271 	int err;
1272 
1273 	bt_dev_dbg(hdev, "sock %p", sk);
1274 
1275 	if (cp->val != 0x00 && cp->val != 0x01)
1276 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1277 				       MGMT_STATUS_INVALID_PARAMS);
1278 
1279 	hci_dev_lock(hdev);
1280 
1281 	if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1282 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1283 				      MGMT_STATUS_BUSY);
1284 		goto failed;
1285 	}
1286 
1287 	if (!!cp->val == hdev_is_powered(hdev)) {
1288 		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1289 		goto failed;
1290 	}
1291 
1292 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1293 	if (!cmd) {
1294 		err = -ENOMEM;
1295 		goto failed;
1296 	}
1297 
1298 	err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1299 				 mgmt_set_powered_complete);
1300 
1301 	if (err < 0)
1302 		mgmt_pending_remove(cmd);
1303 
1304 failed:
1305 	hci_dev_unlock(hdev);
1306 	return err;
1307 }
1308 
1309 int mgmt_new_settings(struct hci_dev *hdev)
1310 {
1311 	return new_settings(hdev, NULL);
1312 }
1313 
1314 struct cmd_lookup {
1315 	struct sock *sk;
1316 	struct hci_dev *hdev;
1317 	u8 mgmt_status;
1318 };
1319 
1320 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1321 {
1322 	struct cmd_lookup *match = data;
1323 
1324 	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1325 
1326 	list_del(&cmd->list);
1327 
1328 	if (match->sk == NULL) {
1329 		match->sk = cmd->sk;
1330 		sock_hold(match->sk);
1331 	}
1332 
1333 	mgmt_pending_free(cmd);
1334 }
1335 
1336 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1337 {
1338 	u8 *status = data;
1339 
1340 	mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1341 	mgmt_pending_remove(cmd);
1342 }
1343 
1344 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1345 {
1346 	if (cmd->cmd_complete) {
1347 		u8 *status = data;
1348 
1349 		cmd->cmd_complete(cmd, *status);
1350 		mgmt_pending_remove(cmd);
1351 
1352 		return;
1353 	}
1354 
1355 	cmd_status_rsp(cmd, data);
1356 }
1357 
1358 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1359 {
1360 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1361 				 cmd->param, cmd->param_len);
1362 }
1363 
1364 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1365 {
1366 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1367 				 cmd->param, sizeof(struct mgmt_addr_info));
1368 }
1369 
1370 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1371 {
1372 	if (!lmp_bredr_capable(hdev))
1373 		return MGMT_STATUS_NOT_SUPPORTED;
1374 	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1375 		return MGMT_STATUS_REJECTED;
1376 	else
1377 		return MGMT_STATUS_SUCCESS;
1378 }
1379 
1380 static u8 mgmt_le_support(struct hci_dev *hdev)
1381 {
1382 	if (!lmp_le_capable(hdev))
1383 		return MGMT_STATUS_NOT_SUPPORTED;
1384 	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1385 		return MGMT_STATUS_REJECTED;
1386 	else
1387 		return MGMT_STATUS_SUCCESS;
1388 }
1389 
1390 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1391 					   int err)
1392 {
1393 	struct mgmt_pending_cmd *cmd = data;
1394 
1395 	bt_dev_dbg(hdev, "err %d", err);
1396 
1397 	/* Make sure cmd still outstanding. */
1398 	if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1399 		return;
1400 
1401 	hci_dev_lock(hdev);
1402 
1403 	if (err) {
1404 		u8 mgmt_err = mgmt_status(err);
1405 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1406 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1407 		goto done;
1408 	}
1409 
1410 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1411 	    hdev->discov_timeout > 0) {
1412 		int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1413 		queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1414 	}
1415 
1416 	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1417 	new_settings(hdev, cmd->sk);
1418 
1419 done:
1420 	mgmt_pending_remove(cmd);
1421 	hci_dev_unlock(hdev);
1422 }
1423 
1424 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1425 {
1426 	BT_DBG("%s", hdev->name);
1427 
1428 	return hci_update_discoverable_sync(hdev);
1429 }
1430 
1431 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1432 			    u16 len)
1433 {
1434 	struct mgmt_cp_set_discoverable *cp = data;
1435 	struct mgmt_pending_cmd *cmd;
1436 	u16 timeout;
1437 	int err;
1438 
1439 	bt_dev_dbg(hdev, "sock %p", sk);
1440 
1441 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1442 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1443 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1444 				       MGMT_STATUS_REJECTED);
1445 
1446 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1447 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1448 				       MGMT_STATUS_INVALID_PARAMS);
1449 
1450 	timeout = __le16_to_cpu(cp->timeout);
1451 
1452 	/* Disabling discoverable requires that no timeout is set,
1453 	 * and enabling limited discoverable requires a timeout.
1454 	 */
1455 	if ((cp->val == 0x00 && timeout > 0) ||
1456 	    (cp->val == 0x02 && timeout == 0))
1457 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1458 				       MGMT_STATUS_INVALID_PARAMS);
1459 
1460 	hci_dev_lock(hdev);
1461 
1462 	if (!hdev_is_powered(hdev) && timeout > 0) {
1463 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1464 				      MGMT_STATUS_NOT_POWERED);
1465 		goto failed;
1466 	}
1467 
1468 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1469 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1470 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1471 				      MGMT_STATUS_BUSY);
1472 		goto failed;
1473 	}
1474 
1475 	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1476 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1477 				      MGMT_STATUS_REJECTED);
1478 		goto failed;
1479 	}
1480 
1481 	if (hdev->advertising_paused) {
1482 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1483 				      MGMT_STATUS_BUSY);
1484 		goto failed;
1485 	}
1486 
1487 	if (!hdev_is_powered(hdev)) {
1488 		bool changed = false;
1489 
1490 		/* Setting limited discoverable when powered off is
1491 		 * not a valid operation since it requires a timeout
1492 		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1493 		 */
1494 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1495 			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1496 			changed = true;
1497 		}
1498 
1499 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1500 		if (err < 0)
1501 			goto failed;
1502 
1503 		if (changed)
1504 			err = new_settings(hdev, sk);
1505 
1506 		goto failed;
1507 	}
1508 
1509 	/* If the current mode is the same, then just update the timeout
1510 	 * value with the new value. And if only the timeout gets updated,
1511 	 * then no need for any HCI transactions.
1512 	 */
1513 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1514 	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
1515 						   HCI_LIMITED_DISCOVERABLE)) {
1516 		cancel_delayed_work(&hdev->discov_off);
1517 		hdev->discov_timeout = timeout;
1518 
1519 		if (cp->val && hdev->discov_timeout > 0) {
1520 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1521 			queue_delayed_work(hdev->req_workqueue,
1522 					   &hdev->discov_off, to);
1523 		}
1524 
1525 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1526 		goto failed;
1527 	}
1528 
1529 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1530 	if (!cmd) {
1531 		err = -ENOMEM;
1532 		goto failed;
1533 	}
1534 
1535 	/* Cancel any potential discoverable timeout that might be
1536 	 * still active and store new timeout value. The arming of
1537 	 * the timeout happens in the complete handler.
1538 	 */
1539 	cancel_delayed_work(&hdev->discov_off);
1540 	hdev->discov_timeout = timeout;
1541 
1542 	if (cp->val)
1543 		hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1544 	else
1545 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1546 
1547 	/* Limited discoverable mode */
1548 	if (cp->val == 0x02)
1549 		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1550 	else
1551 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1552 
1553 	err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1554 				 mgmt_set_discoverable_complete);
1555 
1556 	if (err < 0)
1557 		mgmt_pending_remove(cmd);
1558 
1559 failed:
1560 	hci_dev_unlock(hdev);
1561 	return err;
1562 }
1563 
1564 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1565 					  int err)
1566 {
1567 	struct mgmt_pending_cmd *cmd = data;
1568 
1569 	bt_dev_dbg(hdev, "err %d", err);
1570 
1571 	/* Make sure cmd still outstanding. */
1572 	if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1573 		return;
1574 
1575 	hci_dev_lock(hdev);
1576 
1577 	if (err) {
1578 		u8 mgmt_err = mgmt_status(err);
1579 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1580 		goto done;
1581 	}
1582 
1583 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1584 	new_settings(hdev, cmd->sk);
1585 
1586 done:
1587 	if (cmd)
1588 		mgmt_pending_remove(cmd);
1589 
1590 	hci_dev_unlock(hdev);
1591 }
1592 
1593 static int set_connectable_update_settings(struct hci_dev *hdev,
1594 					   struct sock *sk, u8 val)
1595 {
1596 	bool changed = false;
1597 	int err;
1598 
1599 	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1600 		changed = true;
1601 
1602 	if (val) {
1603 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1604 	} else {
1605 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1606 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1607 	}
1608 
1609 	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1610 	if (err < 0)
1611 		return err;
1612 
1613 	if (changed) {
1614 		hci_req_update_scan(hdev);
1615 		hci_update_passive_scan(hdev);
1616 		return new_settings(hdev, sk);
1617 	}
1618 
1619 	return 0;
1620 }
1621 
1622 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1623 {
1624 	BT_DBG("%s", hdev->name);
1625 
1626 	return hci_update_connectable_sync(hdev);
1627 }
1628 
1629 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1630 			   u16 len)
1631 {
1632 	struct mgmt_mode *cp = data;
1633 	struct mgmt_pending_cmd *cmd;
1634 	int err;
1635 
1636 	bt_dev_dbg(hdev, "sock %p", sk);
1637 
1638 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1639 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1640 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1641 				       MGMT_STATUS_REJECTED);
1642 
1643 	if (cp->val != 0x00 && cp->val != 0x01)
1644 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1645 				       MGMT_STATUS_INVALID_PARAMS);
1646 
1647 	hci_dev_lock(hdev);
1648 
1649 	if (!hdev_is_powered(hdev)) {
1650 		err = set_connectable_update_settings(hdev, sk, cp->val);
1651 		goto failed;
1652 	}
1653 
1654 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1655 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1656 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1657 				      MGMT_STATUS_BUSY);
1658 		goto failed;
1659 	}
1660 
1661 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1662 	if (!cmd) {
1663 		err = -ENOMEM;
1664 		goto failed;
1665 	}
1666 
1667 	if (cp->val) {
1668 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1669 	} else {
1670 		if (hdev->discov_timeout > 0)
1671 			cancel_delayed_work(&hdev->discov_off);
1672 
1673 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1674 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1675 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1676 	}
1677 
1678 	err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1679 				 mgmt_set_connectable_complete);
1680 
1681 	if (err < 0)
1682 		mgmt_pending_remove(cmd);
1683 
1684 failed:
1685 	hci_dev_unlock(hdev);
1686 	return err;
1687 }
1688 
1689 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1690 			u16 len)
1691 {
1692 	struct mgmt_mode *cp = data;
1693 	bool changed;
1694 	int err;
1695 
1696 	bt_dev_dbg(hdev, "sock %p", sk);
1697 
1698 	if (cp->val != 0x00 && cp->val != 0x01)
1699 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1700 				       MGMT_STATUS_INVALID_PARAMS);
1701 
1702 	hci_dev_lock(hdev);
1703 
1704 	if (cp->val)
1705 		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1706 	else
1707 		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1708 
1709 	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1710 	if (err < 0)
1711 		goto unlock;
1712 
1713 	if (changed) {
1714 		/* In limited privacy mode the change of bondable mode
1715 		 * may affect the local advertising address.
1716 		 */
1717 		hci_update_discoverable(hdev);
1718 
1719 		err = new_settings(hdev, sk);
1720 	}
1721 
1722 unlock:
1723 	hci_dev_unlock(hdev);
1724 	return err;
1725 }
1726 
1727 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1728 			     u16 len)
1729 {
1730 	struct mgmt_mode *cp = data;
1731 	struct mgmt_pending_cmd *cmd;
1732 	u8 val, status;
1733 	int err;
1734 
1735 	bt_dev_dbg(hdev, "sock %p", sk);
1736 
1737 	status = mgmt_bredr_support(hdev);
1738 	if (status)
1739 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1740 				       status);
1741 
1742 	if (cp->val != 0x00 && cp->val != 0x01)
1743 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1744 				       MGMT_STATUS_INVALID_PARAMS);
1745 
1746 	hci_dev_lock(hdev);
1747 
1748 	if (!hdev_is_powered(hdev)) {
1749 		bool changed = false;
1750 
1751 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1752 			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1753 			changed = true;
1754 		}
1755 
1756 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1757 		if (err < 0)
1758 			goto failed;
1759 
1760 		if (changed)
1761 			err = new_settings(hdev, sk);
1762 
1763 		goto failed;
1764 	}
1765 
1766 	if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1767 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1768 				      MGMT_STATUS_BUSY);
1769 		goto failed;
1770 	}
1771 
1772 	val = !!cp->val;
1773 
1774 	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1775 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1776 		goto failed;
1777 	}
1778 
1779 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1780 	if (!cmd) {
1781 		err = -ENOMEM;
1782 		goto failed;
1783 	}
1784 
1785 	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1786 	if (err < 0) {
1787 		mgmt_pending_remove(cmd);
1788 		goto failed;
1789 	}
1790 
1791 failed:
1792 	hci_dev_unlock(hdev);
1793 	return err;
1794 }
1795 
1796 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1797 {
1798 	struct cmd_lookup match = { NULL, hdev };
1799 	struct mgmt_pending_cmd *cmd = data;
1800 	struct mgmt_mode *cp = cmd->param;
1801 	u8 enable = cp->val;
1802 	bool changed;
1803 
1804 	/* Make sure cmd still outstanding. */
1805 	if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1806 		return;
1807 
1808 	if (err) {
1809 		u8 mgmt_err = mgmt_status(err);
1810 
1811 		if (enable && hci_dev_test_and_clear_flag(hdev,
1812 							  HCI_SSP_ENABLED)) {
1813 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1814 			new_settings(hdev, NULL);
1815 		}
1816 
1817 		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1818 				     &mgmt_err);
1819 		return;
1820 	}
1821 
1822 	if (enable) {
1823 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1824 	} else {
1825 		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1826 
1827 		if (!changed)
1828 			changed = hci_dev_test_and_clear_flag(hdev,
1829 							      HCI_HS_ENABLED);
1830 		else
1831 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1832 	}
1833 
1834 	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1835 
1836 	if (changed)
1837 		new_settings(hdev, match.sk);
1838 
1839 	if (match.sk)
1840 		sock_put(match.sk);
1841 
1842 	hci_update_eir_sync(hdev);
1843 }
1844 
1845 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1846 {
1847 	struct mgmt_pending_cmd *cmd = data;
1848 	struct mgmt_mode *cp = cmd->param;
1849 	bool changed = false;
1850 	int err;
1851 
1852 	if (cp->val)
1853 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1854 
1855 	err = hci_write_ssp_mode_sync(hdev, cp->val);
1856 
1857 	if (!err && changed)
1858 		hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1859 
1860 	return err;
1861 }
1862 
1863 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1864 {
1865 	struct mgmt_mode *cp = data;
1866 	struct mgmt_pending_cmd *cmd;
1867 	u8 status;
1868 	int err;
1869 
1870 	bt_dev_dbg(hdev, "sock %p", sk);
1871 
1872 	status = mgmt_bredr_support(hdev);
1873 	if (status)
1874 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1875 
1876 	if (!lmp_ssp_capable(hdev))
1877 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1878 				       MGMT_STATUS_NOT_SUPPORTED);
1879 
1880 	if (cp->val != 0x00 && cp->val != 0x01)
1881 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1882 				       MGMT_STATUS_INVALID_PARAMS);
1883 
1884 	hci_dev_lock(hdev);
1885 
1886 	if (!hdev_is_powered(hdev)) {
1887 		bool changed;
1888 
1889 		if (cp->val) {
1890 			changed = !hci_dev_test_and_set_flag(hdev,
1891 							     HCI_SSP_ENABLED);
1892 		} else {
1893 			changed = hci_dev_test_and_clear_flag(hdev,
1894 							      HCI_SSP_ENABLED);
1895 			if (!changed)
1896 				changed = hci_dev_test_and_clear_flag(hdev,
1897 								      HCI_HS_ENABLED);
1898 			else
1899 				hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1900 		}
1901 
1902 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1903 		if (err < 0)
1904 			goto failed;
1905 
1906 		if (changed)
1907 			err = new_settings(hdev, sk);
1908 
1909 		goto failed;
1910 	}
1911 
1912 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1913 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1914 				      MGMT_STATUS_BUSY);
1915 		goto failed;
1916 	}
1917 
1918 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1919 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1920 		goto failed;
1921 	}
1922 
1923 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1924 	if (!cmd)
1925 		err = -ENOMEM;
1926 	else
1927 		err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
1928 					 set_ssp_complete);
1929 
1930 	if (err < 0) {
1931 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1932 				      MGMT_STATUS_FAILED);
1933 
1934 		if (cmd)
1935 			mgmt_pending_remove(cmd);
1936 	}
1937 
1938 failed:
1939 	hci_dev_unlock(hdev);
1940 	return err;
1941 }
1942 
1943 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1944 {
1945 	struct mgmt_mode *cp = data;
1946 	bool changed;
1947 	u8 status;
1948 	int err;
1949 
1950 	bt_dev_dbg(hdev, "sock %p", sk);
1951 
1952 	if (!IS_ENABLED(CONFIG_BT_HS))
1953 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1954 				       MGMT_STATUS_NOT_SUPPORTED);
1955 
1956 	status = mgmt_bredr_support(hdev);
1957 	if (status)
1958 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1959 
1960 	if (!lmp_ssp_capable(hdev))
1961 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1962 				       MGMT_STATUS_NOT_SUPPORTED);
1963 
1964 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1965 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1966 				       MGMT_STATUS_REJECTED);
1967 
1968 	if (cp->val != 0x00 && cp->val != 0x01)
1969 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1970 				       MGMT_STATUS_INVALID_PARAMS);
1971 
1972 	hci_dev_lock(hdev);
1973 
1974 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1975 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1976 				      MGMT_STATUS_BUSY);
1977 		goto unlock;
1978 	}
1979 
1980 	if (cp->val) {
1981 		changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1982 	} else {
1983 		if (hdev_is_powered(hdev)) {
1984 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1985 					      MGMT_STATUS_REJECTED);
1986 			goto unlock;
1987 		}
1988 
1989 		changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1990 	}
1991 
1992 	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1993 	if (err < 0)
1994 		goto unlock;
1995 
1996 	if (changed)
1997 		err = new_settings(hdev, sk);
1998 
1999 unlock:
2000 	hci_dev_unlock(hdev);
2001 	return err;
2002 }
2003 
2004 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2005 {
2006 	struct cmd_lookup match = { NULL, hdev };
2007 	u8 status = mgmt_status(err);
2008 
2009 	bt_dev_dbg(hdev, "err %d", err);
2010 
2011 	if (status) {
2012 		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2013 							&status);
2014 		return;
2015 	}
2016 
2017 	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2018 
2019 	new_settings(hdev, match.sk);
2020 
2021 	if (match.sk)
2022 		sock_put(match.sk);
2023 }
2024 
2025 static int set_le_sync(struct hci_dev *hdev, void *data)
2026 {
2027 	struct mgmt_pending_cmd *cmd = data;
2028 	struct mgmt_mode *cp = cmd->param;
2029 	u8 val = !!cp->val;
2030 	int err;
2031 
2032 	if (!val) {
2033 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2034 			hci_disable_advertising_sync(hdev);
2035 
2036 		if (ext_adv_capable(hdev))
2037 			hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2038 	} else {
2039 		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2040 	}
2041 
2042 	err = hci_write_le_host_supported_sync(hdev, val, 0);
2043 
2044 	/* Make sure the controller has a good default for
2045 	 * advertising data. Restrict the update to when LE
2046 	 * has actually been enabled. During power on, the
2047 	 * update in powered_update_hci will take care of it.
2048 	 */
2049 	if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2050 		if (ext_adv_capable(hdev)) {
2051 			int status;
2052 
2053 			status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2054 			if (!status)
2055 				hci_update_scan_rsp_data_sync(hdev, 0x00);
2056 		} else {
2057 			hci_update_adv_data_sync(hdev, 0x00);
2058 			hci_update_scan_rsp_data_sync(hdev, 0x00);
2059 		}
2060 
2061 		hci_update_passive_scan(hdev);
2062 	}
2063 
2064 	return err;
2065 }
2066 
2067 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2068 {
2069 	struct mgmt_mode *cp = data;
2070 	struct mgmt_pending_cmd *cmd;
2071 	int err;
2072 	u8 val, enabled;
2073 
2074 	bt_dev_dbg(hdev, "sock %p", sk);
2075 
2076 	if (!lmp_le_capable(hdev))
2077 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2078 				       MGMT_STATUS_NOT_SUPPORTED);
2079 
2080 	if (cp->val != 0x00 && cp->val != 0x01)
2081 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2082 				       MGMT_STATUS_INVALID_PARAMS);
2083 
2084 	/* Bluetooth single mode LE only controllers or dual-mode
2085 	 * controllers configured as LE only devices, do not allow
2086 	 * switching LE off. These have either LE enabled explicitly
2087 	 * or BR/EDR has been previously switched off.
2088 	 *
2089 	 * When trying to enable an already enabled LE, then gracefully
2090 	 * send a positive response. Trying to disable it however will
2091 	 * result into rejection.
2092 	 */
2093 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2094 		if (cp->val == 0x01)
2095 			return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2096 
2097 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2098 				       MGMT_STATUS_REJECTED);
2099 	}
2100 
2101 	hci_dev_lock(hdev);
2102 
2103 	val = !!cp->val;
2104 	enabled = lmp_host_le_capable(hdev);
2105 
2106 	if (!val)
2107 		hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
2108 
2109 	if (!hdev_is_powered(hdev) || val == enabled) {
2110 		bool changed = false;
2111 
2112 		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2113 			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2114 			changed = true;
2115 		}
2116 
2117 		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2118 			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2119 			changed = true;
2120 		}
2121 
2122 		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2123 		if (err < 0)
2124 			goto unlock;
2125 
2126 		if (changed)
2127 			err = new_settings(hdev, sk);
2128 
2129 		goto unlock;
2130 	}
2131 
2132 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
2133 	    pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2134 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2135 				      MGMT_STATUS_BUSY);
2136 		goto unlock;
2137 	}
2138 
2139 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2140 	if (!cmd)
2141 		err = -ENOMEM;
2142 	else
2143 		err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2144 					 set_le_complete);
2145 
2146 	if (err < 0) {
2147 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2148 				      MGMT_STATUS_FAILED);
2149 
2150 		if (cmd)
2151 			mgmt_pending_remove(cmd);
2152 	}
2153 
2154 unlock:
2155 	hci_dev_unlock(hdev);
2156 	return err;
2157 }
2158 
2159 /* This is a helper function to test for pending mgmt commands that can
2160  * cause CoD or EIR HCI commands. We can only allow one such pending
2161  * mgmt command at a time since otherwise we cannot easily track what
2162  * the current values are, will be, and based on that calculate if a new
2163  * HCI command needs to be sent and if yes with what value.
2164  */
2165 static bool pending_eir_or_class(struct hci_dev *hdev)
2166 {
2167 	struct mgmt_pending_cmd *cmd;
2168 
2169 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2170 		switch (cmd->opcode) {
2171 		case MGMT_OP_ADD_UUID:
2172 		case MGMT_OP_REMOVE_UUID:
2173 		case MGMT_OP_SET_DEV_CLASS:
2174 		case MGMT_OP_SET_POWERED:
2175 			return true;
2176 		}
2177 	}
2178 
2179 	return false;
2180 }
2181 
2182 static const u8 bluetooth_base_uuid[] = {
2183 			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2184 			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2185 };
2186 
2187 static u8 get_uuid_size(const u8 *uuid)
2188 {
2189 	u32 val;
2190 
2191 	if (memcmp(uuid, bluetooth_base_uuid, 12))
2192 		return 128;
2193 
2194 	val = get_unaligned_le32(&uuid[12]);
2195 	if (val > 0xffff)
2196 		return 32;
2197 
2198 	return 16;
2199 }
2200 
2201 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2202 {
2203 	struct mgmt_pending_cmd *cmd = data;
2204 
2205 	bt_dev_dbg(hdev, "err %d", err);
2206 
2207 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2208 			  mgmt_status(err), hdev->dev_class, 3);
2209 
2210 	mgmt_pending_free(cmd);
2211 }
2212 
2213 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2214 {
2215 	int err;
2216 
2217 	err = hci_update_class_sync(hdev);
2218 	if (err)
2219 		return err;
2220 
2221 	return hci_update_eir_sync(hdev);
2222 }
2223 
2224 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2225 {
2226 	struct mgmt_cp_add_uuid *cp = data;
2227 	struct mgmt_pending_cmd *cmd;
2228 	struct bt_uuid *uuid;
2229 	int err;
2230 
2231 	bt_dev_dbg(hdev, "sock %p", sk);
2232 
2233 	hci_dev_lock(hdev);
2234 
2235 	if (pending_eir_or_class(hdev)) {
2236 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2237 				      MGMT_STATUS_BUSY);
2238 		goto failed;
2239 	}
2240 
2241 	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2242 	if (!uuid) {
2243 		err = -ENOMEM;
2244 		goto failed;
2245 	}
2246 
2247 	memcpy(uuid->uuid, cp->uuid, 16);
2248 	uuid->svc_hint = cp->svc_hint;
2249 	uuid->size = get_uuid_size(cp->uuid);
2250 
2251 	list_add_tail(&uuid->list, &hdev->uuids);
2252 
2253 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2254 	if (!cmd) {
2255 		err = -ENOMEM;
2256 		goto failed;
2257 	}
2258 
2259 	err = hci_cmd_sync_queue(hdev, add_uuid_sync, cmd, mgmt_class_complete);
2260 	if (err < 0) {
2261 		mgmt_pending_free(cmd);
2262 		goto failed;
2263 	}
2264 
2265 failed:
2266 	hci_dev_unlock(hdev);
2267 	return err;
2268 }
2269 
2270 static bool enable_service_cache(struct hci_dev *hdev)
2271 {
2272 	if (!hdev_is_powered(hdev))
2273 		return false;
2274 
2275 	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2276 		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2277 				   CACHE_TIMEOUT);
2278 		return true;
2279 	}
2280 
2281 	return false;
2282 }
2283 
2284 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2285 {
2286 	int err;
2287 
2288 	err = hci_update_class_sync(hdev);
2289 	if (err)
2290 		return err;
2291 
2292 	return hci_update_eir_sync(hdev);
2293 }
2294 
2295 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2296 		       u16 len)
2297 {
2298 	struct mgmt_cp_remove_uuid *cp = data;
2299 	struct mgmt_pending_cmd *cmd;
2300 	struct bt_uuid *match, *tmp;
2301 	static const u8 bt_uuid_any[] = {
2302 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2303 	};
2304 	int err, found;
2305 
2306 	bt_dev_dbg(hdev, "sock %p", sk);
2307 
2308 	hci_dev_lock(hdev);
2309 
2310 	if (pending_eir_or_class(hdev)) {
2311 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2312 				      MGMT_STATUS_BUSY);
2313 		goto unlock;
2314 	}
2315 
2316 	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2317 		hci_uuids_clear(hdev);
2318 
2319 		if (enable_service_cache(hdev)) {
2320 			err = mgmt_cmd_complete(sk, hdev->id,
2321 						MGMT_OP_REMOVE_UUID,
2322 						0, hdev->dev_class, 3);
2323 			goto unlock;
2324 		}
2325 
2326 		goto update_class;
2327 	}
2328 
2329 	found = 0;
2330 
2331 	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2332 		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2333 			continue;
2334 
2335 		list_del(&match->list);
2336 		kfree(match);
2337 		found++;
2338 	}
2339 
2340 	if (found == 0) {
2341 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2342 				      MGMT_STATUS_INVALID_PARAMS);
2343 		goto unlock;
2344 	}
2345 
2346 update_class:
2347 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2348 	if (!cmd) {
2349 		err = -ENOMEM;
2350 		goto unlock;
2351 	}
2352 
2353 	err = hci_cmd_sync_queue(hdev, remove_uuid_sync, cmd,
2354 				 mgmt_class_complete);
2355 	if (err < 0)
2356 		mgmt_pending_free(cmd);
2357 
2358 unlock:
2359 	hci_dev_unlock(hdev);
2360 	return err;
2361 }
2362 
2363 static int set_class_sync(struct hci_dev *hdev, void *data)
2364 {
2365 	int err = 0;
2366 
2367 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2368 		cancel_delayed_work_sync(&hdev->service_cache);
2369 		err = hci_update_eir_sync(hdev);
2370 	}
2371 
2372 	if (err)
2373 		return err;
2374 
2375 	return hci_update_class_sync(hdev);
2376 }
2377 
2378 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2379 			 u16 len)
2380 {
2381 	struct mgmt_cp_set_dev_class *cp = data;
2382 	struct mgmt_pending_cmd *cmd;
2383 	int err;
2384 
2385 	bt_dev_dbg(hdev, "sock %p", sk);
2386 
2387 	if (!lmp_bredr_capable(hdev))
2388 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2389 				       MGMT_STATUS_NOT_SUPPORTED);
2390 
2391 	hci_dev_lock(hdev);
2392 
2393 	if (pending_eir_or_class(hdev)) {
2394 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2395 				      MGMT_STATUS_BUSY);
2396 		goto unlock;
2397 	}
2398 
2399 	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2400 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2401 				      MGMT_STATUS_INVALID_PARAMS);
2402 		goto unlock;
2403 	}
2404 
2405 	hdev->major_class = cp->major;
2406 	hdev->minor_class = cp->minor;
2407 
2408 	if (!hdev_is_powered(hdev)) {
2409 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2410 					hdev->dev_class, 3);
2411 		goto unlock;
2412 	}
2413 
2414 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2415 	if (!cmd) {
2416 		err = -ENOMEM;
2417 		goto unlock;
2418 	}
2419 
2420 	err = hci_cmd_sync_queue(hdev, set_class_sync, cmd,
2421 				 mgmt_class_complete);
2422 	if (err < 0)
2423 		mgmt_pending_free(cmd);
2424 
2425 unlock:
2426 	hci_dev_unlock(hdev);
2427 	return err;
2428 }
2429 
2430 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2431 			  u16 len)
2432 {
2433 	struct mgmt_cp_load_link_keys *cp = data;
2434 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2435 				   sizeof(struct mgmt_link_key_info));
2436 	u16 key_count, expected_len;
2437 	bool changed;
2438 	int i;
2439 
2440 	bt_dev_dbg(hdev, "sock %p", sk);
2441 
2442 	if (!lmp_bredr_capable(hdev))
2443 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2444 				       MGMT_STATUS_NOT_SUPPORTED);
2445 
2446 	key_count = __le16_to_cpu(cp->key_count);
2447 	if (key_count > max_key_count) {
2448 		bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2449 			   key_count);
2450 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2451 				       MGMT_STATUS_INVALID_PARAMS);
2452 	}
2453 
2454 	expected_len = struct_size(cp, keys, key_count);
2455 	if (expected_len != len) {
2456 		bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2457 			   expected_len, len);
2458 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2459 				       MGMT_STATUS_INVALID_PARAMS);
2460 	}
2461 
2462 	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2463 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2464 				       MGMT_STATUS_INVALID_PARAMS);
2465 
2466 	bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2467 		   key_count);
2468 
2469 	for (i = 0; i < key_count; i++) {
2470 		struct mgmt_link_key_info *key = &cp->keys[i];
2471 
2472 		if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2473 			return mgmt_cmd_status(sk, hdev->id,
2474 					       MGMT_OP_LOAD_LINK_KEYS,
2475 					       MGMT_STATUS_INVALID_PARAMS);
2476 	}
2477 
2478 	hci_dev_lock(hdev);
2479 
2480 	hci_link_keys_clear(hdev);
2481 
2482 	if (cp->debug_keys)
2483 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2484 	else
2485 		changed = hci_dev_test_and_clear_flag(hdev,
2486 						      HCI_KEEP_DEBUG_KEYS);
2487 
2488 	if (changed)
2489 		new_settings(hdev, NULL);
2490 
2491 	for (i = 0; i < key_count; i++) {
2492 		struct mgmt_link_key_info *key = &cp->keys[i];
2493 
2494 		if (hci_is_blocked_key(hdev,
2495 				       HCI_BLOCKED_KEY_TYPE_LINKKEY,
2496 				       key->val)) {
2497 			bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2498 				    &key->addr.bdaddr);
2499 			continue;
2500 		}
2501 
2502 		/* Always ignore debug keys and require a new pairing if
2503 		 * the user wants to use them.
2504 		 */
2505 		if (key->type == HCI_LK_DEBUG_COMBINATION)
2506 			continue;
2507 
2508 		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2509 				 key->type, key->pin_len, NULL);
2510 	}
2511 
2512 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2513 
2514 	hci_dev_unlock(hdev);
2515 
2516 	return 0;
2517 }
2518 
2519 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2520 			   u8 addr_type, struct sock *skip_sk)
2521 {
2522 	struct mgmt_ev_device_unpaired ev;
2523 
2524 	bacpy(&ev.addr.bdaddr, bdaddr);
2525 	ev.addr.type = addr_type;
2526 
2527 	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2528 			  skip_sk);
2529 }
2530 
2531 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2532 			 u16 len)
2533 {
2534 	struct mgmt_cp_unpair_device *cp = data;
2535 	struct mgmt_rp_unpair_device rp;
2536 	struct hci_conn_params *params;
2537 	struct mgmt_pending_cmd *cmd;
2538 	struct hci_conn *conn;
2539 	u8 addr_type;
2540 	int err;
2541 
2542 	memset(&rp, 0, sizeof(rp));
2543 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2544 	rp.addr.type = cp->addr.type;
2545 
2546 	if (!bdaddr_type_is_valid(cp->addr.type))
2547 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2548 					 MGMT_STATUS_INVALID_PARAMS,
2549 					 &rp, sizeof(rp));
2550 
2551 	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2552 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2553 					 MGMT_STATUS_INVALID_PARAMS,
2554 					 &rp, sizeof(rp));
2555 
2556 	hci_dev_lock(hdev);
2557 
2558 	if (!hdev_is_powered(hdev)) {
2559 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2560 					MGMT_STATUS_NOT_POWERED, &rp,
2561 					sizeof(rp));
2562 		goto unlock;
2563 	}
2564 
2565 	if (cp->addr.type == BDADDR_BREDR) {
2566 		/* If disconnection is requested, then look up the
2567 		 * connection. If the remote device is connected, it
2568 		 * will be later used to terminate the link.
2569 		 *
2570 		 * Setting it to NULL explicitly will cause no
2571 		 * termination of the link.
2572 		 */
2573 		if (cp->disconnect)
2574 			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2575 						       &cp->addr.bdaddr);
2576 		else
2577 			conn = NULL;
2578 
2579 		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2580 		if (err < 0) {
2581 			err = mgmt_cmd_complete(sk, hdev->id,
2582 						MGMT_OP_UNPAIR_DEVICE,
2583 						MGMT_STATUS_NOT_PAIRED, &rp,
2584 						sizeof(rp));
2585 			goto unlock;
2586 		}
2587 
2588 		goto done;
2589 	}
2590 
2591 	/* LE address type */
2592 	addr_type = le_addr_type(cp->addr.type);
2593 
2594 	/* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2595 	err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2596 	if (err < 0) {
2597 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2598 					MGMT_STATUS_NOT_PAIRED, &rp,
2599 					sizeof(rp));
2600 		goto unlock;
2601 	}
2602 
2603 	conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2604 	if (!conn) {
2605 		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2606 		goto done;
2607 	}
2608 
2609 
2610 	/* Defer clearing up the connection parameters until closing to
2611 	 * give a chance of keeping them if a repairing happens.
2612 	 */
2613 	set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2614 
2615 	/* Disable auto-connection parameters if present */
2616 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2617 	if (params) {
2618 		if (params->explicit_connect)
2619 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2620 		else
2621 			params->auto_connect = HCI_AUTO_CONN_DISABLED;
2622 	}
2623 
2624 	/* If disconnection is not requested, then clear the connection
2625 	 * variable so that the link is not terminated.
2626 	 */
2627 	if (!cp->disconnect)
2628 		conn = NULL;
2629 
2630 done:
2631 	/* If the connection variable is set, then termination of the
2632 	 * link is requested.
2633 	 */
2634 	if (!conn) {
2635 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2636 					&rp, sizeof(rp));
2637 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2638 		goto unlock;
2639 	}
2640 
2641 	cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2642 			       sizeof(*cp));
2643 	if (!cmd) {
2644 		err = -ENOMEM;
2645 		goto unlock;
2646 	}
2647 
2648 	cmd->cmd_complete = addr_cmd_complete;
2649 
2650 	err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2651 	if (err < 0)
2652 		mgmt_pending_remove(cmd);
2653 
2654 unlock:
2655 	hci_dev_unlock(hdev);
2656 	return err;
2657 }
2658 
2659 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2660 		      u16 len)
2661 {
2662 	struct mgmt_cp_disconnect *cp = data;
2663 	struct mgmt_rp_disconnect rp;
2664 	struct mgmt_pending_cmd *cmd;
2665 	struct hci_conn *conn;
2666 	int err;
2667 
2668 	bt_dev_dbg(hdev, "sock %p", sk);
2669 
2670 	memset(&rp, 0, sizeof(rp));
2671 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2672 	rp.addr.type = cp->addr.type;
2673 
2674 	if (!bdaddr_type_is_valid(cp->addr.type))
2675 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2676 					 MGMT_STATUS_INVALID_PARAMS,
2677 					 &rp, sizeof(rp));
2678 
2679 	hci_dev_lock(hdev);
2680 
2681 	if (!test_bit(HCI_UP, &hdev->flags)) {
2682 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2683 					MGMT_STATUS_NOT_POWERED, &rp,
2684 					sizeof(rp));
2685 		goto failed;
2686 	}
2687 
2688 	if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2689 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2690 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
2691 		goto failed;
2692 	}
2693 
2694 	if (cp->addr.type == BDADDR_BREDR)
2695 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2696 					       &cp->addr.bdaddr);
2697 	else
2698 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2699 					       le_addr_type(cp->addr.type));
2700 
2701 	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2702 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2703 					MGMT_STATUS_NOT_CONNECTED, &rp,
2704 					sizeof(rp));
2705 		goto failed;
2706 	}
2707 
2708 	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2709 	if (!cmd) {
2710 		err = -ENOMEM;
2711 		goto failed;
2712 	}
2713 
2714 	cmd->cmd_complete = generic_cmd_complete;
2715 
2716 	err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2717 	if (err < 0)
2718 		mgmt_pending_remove(cmd);
2719 
2720 failed:
2721 	hci_dev_unlock(hdev);
2722 	return err;
2723 }
2724 
2725 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2726 {
2727 	switch (link_type) {
2728 	case LE_LINK:
2729 		switch (addr_type) {
2730 		case ADDR_LE_DEV_PUBLIC:
2731 			return BDADDR_LE_PUBLIC;
2732 
2733 		default:
2734 			/* Fallback to LE Random address type */
2735 			return BDADDR_LE_RANDOM;
2736 		}
2737 
2738 	default:
2739 		/* Fallback to BR/EDR type */
2740 		return BDADDR_BREDR;
2741 	}
2742 }
2743 
2744 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2745 			   u16 data_len)
2746 {
2747 	struct mgmt_rp_get_connections *rp;
2748 	struct hci_conn *c;
2749 	int err;
2750 	u16 i;
2751 
2752 	bt_dev_dbg(hdev, "sock %p", sk);
2753 
2754 	hci_dev_lock(hdev);
2755 
2756 	if (!hdev_is_powered(hdev)) {
2757 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2758 				      MGMT_STATUS_NOT_POWERED);
2759 		goto unlock;
2760 	}
2761 
2762 	i = 0;
2763 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2764 		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2765 			i++;
2766 	}
2767 
2768 	rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2769 	if (!rp) {
2770 		err = -ENOMEM;
2771 		goto unlock;
2772 	}
2773 
2774 	i = 0;
2775 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2776 		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2777 			continue;
2778 		bacpy(&rp->addr[i].bdaddr, &c->dst);
2779 		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2780 		if (c->type == SCO_LINK || c->type == ESCO_LINK)
2781 			continue;
2782 		i++;
2783 	}
2784 
2785 	rp->conn_count = cpu_to_le16(i);
2786 
2787 	/* Recalculate length in case of filtered SCO connections, etc */
2788 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2789 				struct_size(rp, addr, i));
2790 
2791 	kfree(rp);
2792 
2793 unlock:
2794 	hci_dev_unlock(hdev);
2795 	return err;
2796 }
2797 
2798 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2799 				   struct mgmt_cp_pin_code_neg_reply *cp)
2800 {
2801 	struct mgmt_pending_cmd *cmd;
2802 	int err;
2803 
2804 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2805 			       sizeof(*cp));
2806 	if (!cmd)
2807 		return -ENOMEM;
2808 
2809 	cmd->cmd_complete = addr_cmd_complete;
2810 
2811 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2812 			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2813 	if (err < 0)
2814 		mgmt_pending_remove(cmd);
2815 
2816 	return err;
2817 }
2818 
2819 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2820 			  u16 len)
2821 {
2822 	struct hci_conn *conn;
2823 	struct mgmt_cp_pin_code_reply *cp = data;
2824 	struct hci_cp_pin_code_reply reply;
2825 	struct mgmt_pending_cmd *cmd;
2826 	int err;
2827 
2828 	bt_dev_dbg(hdev, "sock %p", sk);
2829 
2830 	hci_dev_lock(hdev);
2831 
2832 	if (!hdev_is_powered(hdev)) {
2833 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2834 				      MGMT_STATUS_NOT_POWERED);
2835 		goto failed;
2836 	}
2837 
2838 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2839 	if (!conn) {
2840 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2841 				      MGMT_STATUS_NOT_CONNECTED);
2842 		goto failed;
2843 	}
2844 
2845 	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2846 		struct mgmt_cp_pin_code_neg_reply ncp;
2847 
2848 		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2849 
2850 		bt_dev_err(hdev, "PIN code is not 16 bytes long");
2851 
2852 		err = send_pin_code_neg_reply(sk, hdev, &ncp);
2853 		if (err >= 0)
2854 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2855 					      MGMT_STATUS_INVALID_PARAMS);
2856 
2857 		goto failed;
2858 	}
2859 
2860 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2861 	if (!cmd) {
2862 		err = -ENOMEM;
2863 		goto failed;
2864 	}
2865 
2866 	cmd->cmd_complete = addr_cmd_complete;
2867 
2868 	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2869 	reply.pin_len = cp->pin_len;
2870 	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2871 
2872 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2873 	if (err < 0)
2874 		mgmt_pending_remove(cmd);
2875 
2876 failed:
2877 	hci_dev_unlock(hdev);
2878 	return err;
2879 }
2880 
2881 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2882 			     u16 len)
2883 {
2884 	struct mgmt_cp_set_io_capability *cp = data;
2885 
2886 	bt_dev_dbg(hdev, "sock %p", sk);
2887 
2888 	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2889 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2890 				       MGMT_STATUS_INVALID_PARAMS);
2891 
2892 	hci_dev_lock(hdev);
2893 
2894 	hdev->io_capability = cp->io_capability;
2895 
2896 	bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
2897 
2898 	hci_dev_unlock(hdev);
2899 
2900 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2901 				 NULL, 0);
2902 }
2903 
2904 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2905 {
2906 	struct hci_dev *hdev = conn->hdev;
2907 	struct mgmt_pending_cmd *cmd;
2908 
2909 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2910 		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2911 			continue;
2912 
2913 		if (cmd->user_data != conn)
2914 			continue;
2915 
2916 		return cmd;
2917 	}
2918 
2919 	return NULL;
2920 }
2921 
2922 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2923 {
2924 	struct mgmt_rp_pair_device rp;
2925 	struct hci_conn *conn = cmd->user_data;
2926 	int err;
2927 
2928 	bacpy(&rp.addr.bdaddr, &conn->dst);
2929 	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2930 
2931 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2932 				status, &rp, sizeof(rp));
2933 
2934 	/* So we don't get further callbacks for this connection */
2935 	conn->connect_cfm_cb = NULL;
2936 	conn->security_cfm_cb = NULL;
2937 	conn->disconn_cfm_cb = NULL;
2938 
2939 	hci_conn_drop(conn);
2940 
2941 	/* The device is paired so there is no need to remove
2942 	 * its connection parameters anymore.
2943 	 */
2944 	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2945 
2946 	hci_conn_put(conn);
2947 
2948 	return err;
2949 }
2950 
2951 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2952 {
2953 	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2954 	struct mgmt_pending_cmd *cmd;
2955 
2956 	cmd = find_pairing(conn);
2957 	if (cmd) {
2958 		cmd->cmd_complete(cmd, status);
2959 		mgmt_pending_remove(cmd);
2960 	}
2961 }
2962 
2963 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2964 {
2965 	struct mgmt_pending_cmd *cmd;
2966 
2967 	BT_DBG("status %u", status);
2968 
2969 	cmd = find_pairing(conn);
2970 	if (!cmd) {
2971 		BT_DBG("Unable to find a pending command");
2972 		return;
2973 	}
2974 
2975 	cmd->cmd_complete(cmd, mgmt_status(status));
2976 	mgmt_pending_remove(cmd);
2977 }
2978 
2979 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2980 {
2981 	struct mgmt_pending_cmd *cmd;
2982 
2983 	BT_DBG("status %u", status);
2984 
2985 	if (!status)
2986 		return;
2987 
2988 	cmd = find_pairing(conn);
2989 	if (!cmd) {
2990 		BT_DBG("Unable to find a pending command");
2991 		return;
2992 	}
2993 
2994 	cmd->cmd_complete(cmd, mgmt_status(status));
2995 	mgmt_pending_remove(cmd);
2996 }
2997 
2998 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2999 		       u16 len)
3000 {
3001 	struct mgmt_cp_pair_device *cp = data;
3002 	struct mgmt_rp_pair_device rp;
3003 	struct mgmt_pending_cmd *cmd;
3004 	u8 sec_level, auth_type;
3005 	struct hci_conn *conn;
3006 	int err;
3007 
3008 	bt_dev_dbg(hdev, "sock %p", sk);
3009 
3010 	memset(&rp, 0, sizeof(rp));
3011 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3012 	rp.addr.type = cp->addr.type;
3013 
3014 	if (!bdaddr_type_is_valid(cp->addr.type))
3015 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3016 					 MGMT_STATUS_INVALID_PARAMS,
3017 					 &rp, sizeof(rp));
3018 
3019 	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3020 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3021 					 MGMT_STATUS_INVALID_PARAMS,
3022 					 &rp, sizeof(rp));
3023 
3024 	hci_dev_lock(hdev);
3025 
3026 	if (!hdev_is_powered(hdev)) {
3027 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3028 					MGMT_STATUS_NOT_POWERED, &rp,
3029 					sizeof(rp));
3030 		goto unlock;
3031 	}
3032 
3033 	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3034 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3035 					MGMT_STATUS_ALREADY_PAIRED, &rp,
3036 					sizeof(rp));
3037 		goto unlock;
3038 	}
3039 
3040 	sec_level = BT_SECURITY_MEDIUM;
3041 	auth_type = HCI_AT_DEDICATED_BONDING;
3042 
3043 	if (cp->addr.type == BDADDR_BREDR) {
3044 		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3045 				       auth_type, CONN_REASON_PAIR_DEVICE);
3046 	} else {
3047 		u8 addr_type = le_addr_type(cp->addr.type);
3048 		struct hci_conn_params *p;
3049 
3050 		/* When pairing a new device, it is expected to remember
3051 		 * this device for future connections. Adding the connection
3052 		 * parameter information ahead of time allows tracking
3053 		 * of the peripheral preferred values and will speed up any
3054 		 * further connection establishment.
3055 		 *
3056 		 * If connection parameters already exist, then they
3057 		 * will be kept and this function does nothing.
3058 		 */
3059 		p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3060 
3061 		if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3062 			p->auto_connect = HCI_AUTO_CONN_DISABLED;
3063 
3064 		conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3065 					   sec_level, HCI_LE_CONN_TIMEOUT,
3066 					   CONN_REASON_PAIR_DEVICE);
3067 	}
3068 
3069 	if (IS_ERR(conn)) {
3070 		int status;
3071 
3072 		if (PTR_ERR(conn) == -EBUSY)
3073 			status = MGMT_STATUS_BUSY;
3074 		else if (PTR_ERR(conn) == -EOPNOTSUPP)
3075 			status = MGMT_STATUS_NOT_SUPPORTED;
3076 		else if (PTR_ERR(conn) == -ECONNREFUSED)
3077 			status = MGMT_STATUS_REJECTED;
3078 		else
3079 			status = MGMT_STATUS_CONNECT_FAILED;
3080 
3081 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3082 					status, &rp, sizeof(rp));
3083 		goto unlock;
3084 	}
3085 
3086 	if (conn->connect_cfm_cb) {
3087 		hci_conn_drop(conn);
3088 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3089 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
3090 		goto unlock;
3091 	}
3092 
3093 	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3094 	if (!cmd) {
3095 		err = -ENOMEM;
3096 		hci_conn_drop(conn);
3097 		goto unlock;
3098 	}
3099 
3100 	cmd->cmd_complete = pairing_complete;
3101 
3102 	/* For LE, just connecting isn't a proof that the pairing finished */
3103 	if (cp->addr.type == BDADDR_BREDR) {
3104 		conn->connect_cfm_cb = pairing_complete_cb;
3105 		conn->security_cfm_cb = pairing_complete_cb;
3106 		conn->disconn_cfm_cb = pairing_complete_cb;
3107 	} else {
3108 		conn->connect_cfm_cb = le_pairing_complete_cb;
3109 		conn->security_cfm_cb = le_pairing_complete_cb;
3110 		conn->disconn_cfm_cb = le_pairing_complete_cb;
3111 	}
3112 
3113 	conn->io_capability = cp->io_cap;
3114 	cmd->user_data = hci_conn_get(conn);
3115 
3116 	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3117 	    hci_conn_security(conn, sec_level, auth_type, true)) {
3118 		cmd->cmd_complete(cmd, 0);
3119 		mgmt_pending_remove(cmd);
3120 	}
3121 
3122 	err = 0;
3123 
3124 unlock:
3125 	hci_dev_unlock(hdev);
3126 	return err;
3127 }
3128 
3129 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3130 			      u16 len)
3131 {
3132 	struct mgmt_addr_info *addr = data;
3133 	struct mgmt_pending_cmd *cmd;
3134 	struct hci_conn *conn;
3135 	int err;
3136 
3137 	bt_dev_dbg(hdev, "sock %p", sk);
3138 
3139 	hci_dev_lock(hdev);
3140 
3141 	if (!hdev_is_powered(hdev)) {
3142 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3143 				      MGMT_STATUS_NOT_POWERED);
3144 		goto unlock;
3145 	}
3146 
3147 	cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3148 	if (!cmd) {
3149 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3150 				      MGMT_STATUS_INVALID_PARAMS);
3151 		goto unlock;
3152 	}
3153 
3154 	conn = cmd->user_data;
3155 
3156 	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3157 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3158 				      MGMT_STATUS_INVALID_PARAMS);
3159 		goto unlock;
3160 	}
3161 
3162 	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3163 	mgmt_pending_remove(cmd);
3164 
3165 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3166 				addr, sizeof(*addr));
3167 
3168 	/* Since user doesn't want to proceed with the connection, abort any
3169 	 * ongoing pairing and then terminate the link if it was created
3170 	 * because of the pair device action.
3171 	 */
3172 	if (addr->type == BDADDR_BREDR)
3173 		hci_remove_link_key(hdev, &addr->bdaddr);
3174 	else
3175 		smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3176 					      le_addr_type(addr->type));
3177 
3178 	if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3179 		hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3180 
3181 unlock:
3182 	hci_dev_unlock(hdev);
3183 	return err;
3184 }
3185 
3186 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3187 			     struct mgmt_addr_info *addr, u16 mgmt_op,
3188 			     u16 hci_op, __le32 passkey)
3189 {
3190 	struct mgmt_pending_cmd *cmd;
3191 	struct hci_conn *conn;
3192 	int err;
3193 
3194 	hci_dev_lock(hdev);
3195 
3196 	if (!hdev_is_powered(hdev)) {
3197 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3198 					MGMT_STATUS_NOT_POWERED, addr,
3199 					sizeof(*addr));
3200 		goto done;
3201 	}
3202 
3203 	if (addr->type == BDADDR_BREDR)
3204 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3205 	else
3206 		conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3207 					       le_addr_type(addr->type));
3208 
3209 	if (!conn) {
3210 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3211 					MGMT_STATUS_NOT_CONNECTED, addr,
3212 					sizeof(*addr));
3213 		goto done;
3214 	}
3215 
3216 	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3217 		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3218 		if (!err)
3219 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3220 						MGMT_STATUS_SUCCESS, addr,
3221 						sizeof(*addr));
3222 		else
3223 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3224 						MGMT_STATUS_FAILED, addr,
3225 						sizeof(*addr));
3226 
3227 		goto done;
3228 	}
3229 
3230 	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3231 	if (!cmd) {
3232 		err = -ENOMEM;
3233 		goto done;
3234 	}
3235 
3236 	cmd->cmd_complete = addr_cmd_complete;
3237 
3238 	/* Continue with pairing via HCI */
3239 	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3240 		struct hci_cp_user_passkey_reply cp;
3241 
3242 		bacpy(&cp.bdaddr, &addr->bdaddr);
3243 		cp.passkey = passkey;
3244 		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3245 	} else
3246 		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3247 				   &addr->bdaddr);
3248 
3249 	if (err < 0)
3250 		mgmt_pending_remove(cmd);
3251 
3252 done:
3253 	hci_dev_unlock(hdev);
3254 	return err;
3255 }
3256 
3257 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3258 			      void *data, u16 len)
3259 {
3260 	struct mgmt_cp_pin_code_neg_reply *cp = data;
3261 
3262 	bt_dev_dbg(hdev, "sock %p", sk);
3263 
3264 	return user_pairing_resp(sk, hdev, &cp->addr,
3265 				MGMT_OP_PIN_CODE_NEG_REPLY,
3266 				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3267 }
3268 
3269 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3270 			      u16 len)
3271 {
3272 	struct mgmt_cp_user_confirm_reply *cp = data;
3273 
3274 	bt_dev_dbg(hdev, "sock %p", sk);
3275 
3276 	if (len != sizeof(*cp))
3277 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3278 				       MGMT_STATUS_INVALID_PARAMS);
3279 
3280 	return user_pairing_resp(sk, hdev, &cp->addr,
3281 				 MGMT_OP_USER_CONFIRM_REPLY,
3282 				 HCI_OP_USER_CONFIRM_REPLY, 0);
3283 }
3284 
3285 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3286 				  void *data, u16 len)
3287 {
3288 	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3289 
3290 	bt_dev_dbg(hdev, "sock %p", sk);
3291 
3292 	return user_pairing_resp(sk, hdev, &cp->addr,
3293 				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3294 				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3295 }
3296 
3297 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3298 			      u16 len)
3299 {
3300 	struct mgmt_cp_user_passkey_reply *cp = data;
3301 
3302 	bt_dev_dbg(hdev, "sock %p", sk);
3303 
3304 	return user_pairing_resp(sk, hdev, &cp->addr,
3305 				 MGMT_OP_USER_PASSKEY_REPLY,
3306 				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3307 }
3308 
3309 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3310 				  void *data, u16 len)
3311 {
3312 	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3313 
3314 	bt_dev_dbg(hdev, "sock %p", sk);
3315 
3316 	return user_pairing_resp(sk, hdev, &cp->addr,
3317 				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3318 				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3319 }
3320 
3321 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3322 {
3323 	struct adv_info *adv_instance;
3324 
3325 	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3326 	if (!adv_instance)
3327 		return 0;
3328 
3329 	/* stop if current instance doesn't need to be changed */
3330 	if (!(adv_instance->flags & flags))
3331 		return 0;
3332 
3333 	cancel_adv_timeout(hdev);
3334 
3335 	adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3336 	if (!adv_instance)
3337 		return 0;
3338 
3339 	hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3340 
3341 	return 0;
3342 }
3343 
3344 static int name_changed_sync(struct hci_dev *hdev, void *data)
3345 {
3346 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3347 }
3348 
3349 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3350 {
3351 	struct mgmt_pending_cmd *cmd = data;
3352 	struct mgmt_cp_set_local_name *cp = cmd->param;
3353 	u8 status = mgmt_status(err);
3354 
3355 	bt_dev_dbg(hdev, "err %d", err);
3356 
3357 	if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3358 		return;
3359 
3360 	if (status) {
3361 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3362 				status);
3363 	} else {
3364 		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3365 				  cp, sizeof(*cp));
3366 
3367 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3368 			hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3369 	}
3370 
3371 	mgmt_pending_remove(cmd);
3372 }
3373 
3374 static int set_name_sync(struct hci_dev *hdev, void *data)
3375 {
3376 	if (lmp_bredr_capable(hdev)) {
3377 		hci_update_name_sync(hdev);
3378 		hci_update_eir_sync(hdev);
3379 	}
3380 
3381 	/* The name is stored in the scan response data and so
3382 	 * no need to update the advertising data here.
3383 	 */
3384 	if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3385 		hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3386 
3387 	return 0;
3388 }
3389 
3390 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3391 			  u16 len)
3392 {
3393 	struct mgmt_cp_set_local_name *cp = data;
3394 	struct mgmt_pending_cmd *cmd;
3395 	int err;
3396 
3397 	bt_dev_dbg(hdev, "sock %p", sk);
3398 
3399 	hci_dev_lock(hdev);
3400 
3401 	/* If the old values are the same as the new ones just return a
3402 	 * direct command complete event.
3403 	 */
3404 	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3405 	    !memcmp(hdev->short_name, cp->short_name,
3406 		    sizeof(hdev->short_name))) {
3407 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3408 					data, len);
3409 		goto failed;
3410 	}
3411 
3412 	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3413 
3414 	if (!hdev_is_powered(hdev)) {
3415 		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3416 
3417 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3418 					data, len);
3419 		if (err < 0)
3420 			goto failed;
3421 
3422 		err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3423 					 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3424 		ext_info_changed(hdev, sk);
3425 
3426 		goto failed;
3427 	}
3428 
3429 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3430 	if (!cmd)
3431 		err = -ENOMEM;
3432 	else
3433 		err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3434 					 set_name_complete);
3435 
3436 	if (err < 0) {
3437 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3438 				      MGMT_STATUS_FAILED);
3439 
3440 		if (cmd)
3441 			mgmt_pending_remove(cmd);
3442 
3443 		goto failed;
3444 	}
3445 
3446 	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3447 
3448 failed:
3449 	hci_dev_unlock(hdev);
3450 	return err;
3451 }
3452 
3453 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3454 {
3455 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3456 }
3457 
3458 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3459 			  u16 len)
3460 {
3461 	struct mgmt_cp_set_appearance *cp = data;
3462 	u16 appearance;
3463 	int err;
3464 
3465 	bt_dev_dbg(hdev, "sock %p", sk);
3466 
3467 	if (!lmp_le_capable(hdev))
3468 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3469 				       MGMT_STATUS_NOT_SUPPORTED);
3470 
3471 	appearance = le16_to_cpu(cp->appearance);
3472 
3473 	hci_dev_lock(hdev);
3474 
3475 	if (hdev->appearance != appearance) {
3476 		hdev->appearance = appearance;
3477 
3478 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3479 			hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3480 					   NULL);
3481 
3482 		ext_info_changed(hdev, sk);
3483 	}
3484 
3485 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3486 				0);
3487 
3488 	hci_dev_unlock(hdev);
3489 
3490 	return err;
3491 }
3492 
3493 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3494 				 void *data, u16 len)
3495 {
3496 	struct mgmt_rp_get_phy_configuration rp;
3497 
3498 	bt_dev_dbg(hdev, "sock %p", sk);
3499 
3500 	hci_dev_lock(hdev);
3501 
3502 	memset(&rp, 0, sizeof(rp));
3503 
3504 	rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3505 	rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3506 	rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3507 
3508 	hci_dev_unlock(hdev);
3509 
3510 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3511 				 &rp, sizeof(rp));
3512 }
3513 
3514 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3515 {
3516 	struct mgmt_ev_phy_configuration_changed ev;
3517 
3518 	memset(&ev, 0, sizeof(ev));
3519 
3520 	ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3521 
3522 	return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3523 			  sizeof(ev), skip);
3524 }
3525 
3526 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3527 {
3528 	struct mgmt_pending_cmd *cmd = data;
3529 	struct sk_buff *skb = cmd->skb;
3530 	u8 status = mgmt_status(err);
3531 
3532 	if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
3533 		return;
3534 
3535 	if (!status) {
3536 		if (!skb)
3537 			status = MGMT_STATUS_FAILED;
3538 		else if (IS_ERR(skb))
3539 			status = mgmt_status(PTR_ERR(skb));
3540 		else
3541 			status = mgmt_status(skb->data[0]);
3542 	}
3543 
3544 	bt_dev_dbg(hdev, "status %d", status);
3545 
3546 	if (status) {
3547 		mgmt_cmd_status(cmd->sk, hdev->id,
3548 				MGMT_OP_SET_PHY_CONFIGURATION, status);
3549 	} else {
3550 		mgmt_cmd_complete(cmd->sk, hdev->id,
3551 				  MGMT_OP_SET_PHY_CONFIGURATION, 0,
3552 				  NULL, 0);
3553 
3554 		mgmt_phy_configuration_changed(hdev, cmd->sk);
3555 	}
3556 
3557 	if (skb && !IS_ERR(skb))
3558 		kfree_skb(skb);
3559 
3560 	mgmt_pending_remove(cmd);
3561 }
3562 
3563 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
3564 {
3565 	struct mgmt_pending_cmd *cmd = data;
3566 	struct mgmt_cp_set_phy_configuration *cp = cmd->param;
3567 	struct hci_cp_le_set_default_phy cp_phy;
3568 	u32 selected_phys = __le32_to_cpu(cp->selected_phys);
3569 
3570 	memset(&cp_phy, 0, sizeof(cp_phy));
3571 
3572 	if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3573 		cp_phy.all_phys |= 0x01;
3574 
3575 	if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3576 		cp_phy.all_phys |= 0x02;
3577 
3578 	if (selected_phys & MGMT_PHY_LE_1M_TX)
3579 		cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3580 
3581 	if (selected_phys & MGMT_PHY_LE_2M_TX)
3582 		cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3583 
3584 	if (selected_phys & MGMT_PHY_LE_CODED_TX)
3585 		cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3586 
3587 	if (selected_phys & MGMT_PHY_LE_1M_RX)
3588 		cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3589 
3590 	if (selected_phys & MGMT_PHY_LE_2M_RX)
3591 		cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3592 
3593 	if (selected_phys & MGMT_PHY_LE_CODED_RX)
3594 		cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3595 
3596 	cmd->skb =  __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
3597 				   sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
3598 
3599 	return 0;
3600 }
3601 
3602 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3603 				 void *data, u16 len)
3604 {
3605 	struct mgmt_cp_set_phy_configuration *cp = data;
3606 	struct mgmt_pending_cmd *cmd;
3607 	u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3608 	u16 pkt_type = (HCI_DH1 | HCI_DM1);
3609 	bool changed = false;
3610 	int err;
3611 
3612 	bt_dev_dbg(hdev, "sock %p", sk);
3613 
3614 	configurable_phys = get_configurable_phys(hdev);
3615 	supported_phys = get_supported_phys(hdev);
3616 	selected_phys = __le32_to_cpu(cp->selected_phys);
3617 
3618 	if (selected_phys & ~supported_phys)
3619 		return mgmt_cmd_status(sk, hdev->id,
3620 				       MGMT_OP_SET_PHY_CONFIGURATION,
3621 				       MGMT_STATUS_INVALID_PARAMS);
3622 
3623 	unconfigure_phys = supported_phys & ~configurable_phys;
3624 
3625 	if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3626 		return mgmt_cmd_status(sk, hdev->id,
3627 				       MGMT_OP_SET_PHY_CONFIGURATION,
3628 				       MGMT_STATUS_INVALID_PARAMS);
3629 
3630 	if (selected_phys == get_selected_phys(hdev))
3631 		return mgmt_cmd_complete(sk, hdev->id,
3632 					 MGMT_OP_SET_PHY_CONFIGURATION,
3633 					 0, NULL, 0);
3634 
3635 	hci_dev_lock(hdev);
3636 
3637 	if (!hdev_is_powered(hdev)) {
3638 		err = mgmt_cmd_status(sk, hdev->id,
3639 				      MGMT_OP_SET_PHY_CONFIGURATION,
3640 				      MGMT_STATUS_REJECTED);
3641 		goto unlock;
3642 	}
3643 
3644 	if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3645 		err = mgmt_cmd_status(sk, hdev->id,
3646 				      MGMT_OP_SET_PHY_CONFIGURATION,
3647 				      MGMT_STATUS_BUSY);
3648 		goto unlock;
3649 	}
3650 
3651 	if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3652 		pkt_type |= (HCI_DH3 | HCI_DM3);
3653 	else
3654 		pkt_type &= ~(HCI_DH3 | HCI_DM3);
3655 
3656 	if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3657 		pkt_type |= (HCI_DH5 | HCI_DM5);
3658 	else
3659 		pkt_type &= ~(HCI_DH5 | HCI_DM5);
3660 
3661 	if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3662 		pkt_type &= ~HCI_2DH1;
3663 	else
3664 		pkt_type |= HCI_2DH1;
3665 
3666 	if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3667 		pkt_type &= ~HCI_2DH3;
3668 	else
3669 		pkt_type |= HCI_2DH3;
3670 
3671 	if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3672 		pkt_type &= ~HCI_2DH5;
3673 	else
3674 		pkt_type |= HCI_2DH5;
3675 
3676 	if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3677 		pkt_type &= ~HCI_3DH1;
3678 	else
3679 		pkt_type |= HCI_3DH1;
3680 
3681 	if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3682 		pkt_type &= ~HCI_3DH3;
3683 	else
3684 		pkt_type |= HCI_3DH3;
3685 
3686 	if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3687 		pkt_type &= ~HCI_3DH5;
3688 	else
3689 		pkt_type |= HCI_3DH5;
3690 
3691 	if (pkt_type != hdev->pkt_type) {
3692 		hdev->pkt_type = pkt_type;
3693 		changed = true;
3694 	}
3695 
3696 	if ((selected_phys & MGMT_PHY_LE_MASK) ==
3697 	    (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3698 		if (changed)
3699 			mgmt_phy_configuration_changed(hdev, sk);
3700 
3701 		err = mgmt_cmd_complete(sk, hdev->id,
3702 					MGMT_OP_SET_PHY_CONFIGURATION,
3703 					0, NULL, 0);
3704 
3705 		goto unlock;
3706 	}
3707 
3708 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3709 			       len);
3710 	if (!cmd)
3711 		err = -ENOMEM;
3712 	else
3713 		err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
3714 					 set_default_phy_complete);
3715 
3716 	if (err < 0) {
3717 		err = mgmt_cmd_status(sk, hdev->id,
3718 				      MGMT_OP_SET_PHY_CONFIGURATION,
3719 				      MGMT_STATUS_FAILED);
3720 
3721 		if (cmd)
3722 			mgmt_pending_remove(cmd);
3723 	}
3724 
3725 unlock:
3726 	hci_dev_unlock(hdev);
3727 
3728 	return err;
3729 }
3730 
3731 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
3732 			    u16 len)
3733 {
3734 	int err = MGMT_STATUS_SUCCESS;
3735 	struct mgmt_cp_set_blocked_keys *keys = data;
3736 	const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
3737 				   sizeof(struct mgmt_blocked_key_info));
3738 	u16 key_count, expected_len;
3739 	int i;
3740 
3741 	bt_dev_dbg(hdev, "sock %p", sk);
3742 
3743 	key_count = __le16_to_cpu(keys->key_count);
3744 	if (key_count > max_key_count) {
3745 		bt_dev_err(hdev, "too big key_count value %u", key_count);
3746 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3747 				       MGMT_STATUS_INVALID_PARAMS);
3748 	}
3749 
3750 	expected_len = struct_size(keys, keys, key_count);
3751 	if (expected_len != len) {
3752 		bt_dev_err(hdev, "expected %u bytes, got %u bytes",
3753 			   expected_len, len);
3754 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3755 				       MGMT_STATUS_INVALID_PARAMS);
3756 	}
3757 
3758 	hci_dev_lock(hdev);
3759 
3760 	hci_blocked_keys_clear(hdev);
3761 
3762 	for (i = 0; i < keys->key_count; ++i) {
3763 		struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
3764 
3765 		if (!b) {
3766 			err = MGMT_STATUS_NO_RESOURCES;
3767 			break;
3768 		}
3769 
3770 		b->type = keys->keys[i].type;
3771 		memcpy(b->val, keys->keys[i].val, sizeof(b->val));
3772 		list_add_rcu(&b->list, &hdev->blocked_keys);
3773 	}
3774 	hci_dev_unlock(hdev);
3775 
3776 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3777 				err, NULL, 0);
3778 }
3779 
3780 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
3781 			       void *data, u16 len)
3782 {
3783 	struct mgmt_mode *cp = data;
3784 	int err;
3785 	bool changed = false;
3786 
3787 	bt_dev_dbg(hdev, "sock %p", sk);
3788 
3789 	if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
3790 		return mgmt_cmd_status(sk, hdev->id,
3791 				       MGMT_OP_SET_WIDEBAND_SPEECH,
3792 				       MGMT_STATUS_NOT_SUPPORTED);
3793 
3794 	if (cp->val != 0x00 && cp->val != 0x01)
3795 		return mgmt_cmd_status(sk, hdev->id,
3796 				       MGMT_OP_SET_WIDEBAND_SPEECH,
3797 				       MGMT_STATUS_INVALID_PARAMS);
3798 
3799 	hci_dev_lock(hdev);
3800 
3801 	if (hdev_is_powered(hdev) &&
3802 	    !!cp->val != hci_dev_test_flag(hdev,
3803 					   HCI_WIDEBAND_SPEECH_ENABLED)) {
3804 		err = mgmt_cmd_status(sk, hdev->id,
3805 				      MGMT_OP_SET_WIDEBAND_SPEECH,
3806 				      MGMT_STATUS_REJECTED);
3807 		goto unlock;
3808 	}
3809 
3810 	if (cp->val)
3811 		changed = !hci_dev_test_and_set_flag(hdev,
3812 						   HCI_WIDEBAND_SPEECH_ENABLED);
3813 	else
3814 		changed = hci_dev_test_and_clear_flag(hdev,
3815 						   HCI_WIDEBAND_SPEECH_ENABLED);
3816 
3817 	err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
3818 	if (err < 0)
3819 		goto unlock;
3820 
3821 	if (changed)
3822 		err = new_settings(hdev, sk);
3823 
3824 unlock:
3825 	hci_dev_unlock(hdev);
3826 	return err;
3827 }
3828 
3829 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
3830 			       void *data, u16 data_len)
3831 {
3832 	char buf[20];
3833 	struct mgmt_rp_read_controller_cap *rp = (void *)buf;
3834 	u16 cap_len = 0;
3835 	u8 flags = 0;
3836 	u8 tx_power_range[2];
3837 
3838 	bt_dev_dbg(hdev, "sock %p", sk);
3839 
3840 	memset(&buf, 0, sizeof(buf));
3841 
3842 	hci_dev_lock(hdev);
3843 
3844 	/* When the Read Simple Pairing Options command is supported, then
3845 	 * the remote public key validation is supported.
3846 	 *
3847 	 * Alternatively, when Microsoft extensions are available, they can
3848 	 * indicate support for public key validation as well.
3849 	 */
3850 	if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
3851 		flags |= 0x01;	/* Remote public key validation (BR/EDR) */
3852 
3853 	flags |= 0x02;		/* Remote public key validation (LE) */
3854 
3855 	/* When the Read Encryption Key Size command is supported, then the
3856 	 * encryption key size is enforced.
3857 	 */
3858 	if (hdev->commands[20] & 0x10)
3859 		flags |= 0x04;	/* Encryption key size enforcement (BR/EDR) */
3860 
3861 	flags |= 0x08;		/* Encryption key size enforcement (LE) */
3862 
3863 	cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
3864 				  &flags, 1);
3865 
3866 	/* When the Read Simple Pairing Options command is supported, then
3867 	 * also max encryption key size information is provided.
3868 	 */
3869 	if (hdev->commands[41] & 0x08)
3870 		cap_len = eir_append_le16(rp->cap, cap_len,
3871 					  MGMT_CAP_MAX_ENC_KEY_SIZE,
3872 					  hdev->max_enc_key_size);
3873 
3874 	cap_len = eir_append_le16(rp->cap, cap_len,
3875 				  MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
3876 				  SMP_MAX_ENC_KEY_SIZE);
3877 
3878 	/* Append the min/max LE tx power parameters if we were able to fetch
3879 	 * it from the controller
3880 	 */
3881 	if (hdev->commands[38] & 0x80) {
3882 		memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
3883 		memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
3884 		cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
3885 					  tx_power_range, 2);
3886 	}
3887 
3888 	rp->cap_len = cpu_to_le16(cap_len);
3889 
3890 	hci_dev_unlock(hdev);
3891 
3892 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
3893 				 rp, sizeof(*rp) + cap_len);
3894 }
3895 
3896 #ifdef CONFIG_BT_FEATURE_DEBUG
3897 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
3898 static const u8 debug_uuid[16] = {
3899 	0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
3900 	0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
3901 };
3902 #endif
3903 
3904 /* 330859bc-7506-492d-9370-9a6f0614037f */
3905 static const u8 quality_report_uuid[16] = {
3906 	0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
3907 	0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
3908 };
3909 
3910 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
3911 static const u8 offload_codecs_uuid[16] = {
3912 	0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
3913 	0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
3914 };
3915 
3916 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
3917 static const u8 le_simultaneous_roles_uuid[16] = {
3918 	0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
3919 	0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
3920 };
3921 
3922 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
3923 static const u8 rpa_resolution_uuid[16] = {
3924 	0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
3925 	0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
3926 };
3927 
3928 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
3929 				  void *data, u16 data_len)
3930 {
3931 	char buf[102];   /* Enough space for 5 features: 2 + 20 * 5 */
3932 	struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
3933 	u16 idx = 0;
3934 	u32 flags;
3935 
3936 	bt_dev_dbg(hdev, "sock %p", sk);
3937 
3938 	memset(&buf, 0, sizeof(buf));
3939 
3940 #ifdef CONFIG_BT_FEATURE_DEBUG
3941 	if (!hdev) {
3942 		flags = bt_dbg_get() ? BIT(0) : 0;
3943 
3944 		memcpy(rp->features[idx].uuid, debug_uuid, 16);
3945 		rp->features[idx].flags = cpu_to_le32(flags);
3946 		idx++;
3947 	}
3948 #endif
3949 
3950 	if (hdev && hci_dev_le_state_simultaneous(hdev)) {
3951 		if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
3952 			flags = BIT(0);
3953 		else
3954 			flags = 0;
3955 
3956 		memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
3957 		rp->features[idx].flags = cpu_to_le32(flags);
3958 		idx++;
3959 	}
3960 
3961 	if (hdev && ll_privacy_capable(hdev)) {
3962 		if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
3963 			flags = BIT(0) | BIT(1);
3964 		else
3965 			flags = BIT(1);
3966 
3967 		memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
3968 		rp->features[idx].flags = cpu_to_le32(flags);
3969 		idx++;
3970 	}
3971 
3972 	if (hdev && (aosp_has_quality_report(hdev) ||
3973 		     hdev->set_quality_report)) {
3974 		if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
3975 			flags = BIT(0);
3976 		else
3977 			flags = 0;
3978 
3979 		memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
3980 		rp->features[idx].flags = cpu_to_le32(flags);
3981 		idx++;
3982 	}
3983 
3984 	if (hdev && hdev->get_data_path_id) {
3985 		if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
3986 			flags = BIT(0);
3987 		else
3988 			flags = 0;
3989 
3990 		memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
3991 		rp->features[idx].flags = cpu_to_le32(flags);
3992 		idx++;
3993 	}
3994 
3995 	rp->feature_count = cpu_to_le16(idx);
3996 
3997 	/* After reading the experimental features information, enable
3998 	 * the events to update client on any future change.
3999 	 */
4000 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4001 
4002 	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4003 				 MGMT_OP_READ_EXP_FEATURES_INFO,
4004 				 0, rp, sizeof(*rp) + (20 * idx));
4005 }
4006 
4007 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4008 					  struct sock *skip)
4009 {
4010 	struct mgmt_ev_exp_feature_changed ev;
4011 
4012 	memset(&ev, 0, sizeof(ev));
4013 	memcpy(ev.uuid, rpa_resolution_uuid, 16);
4014 	ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4015 
4016 	// Do we need to be atomic with the conn_flags?
4017 	if (enabled && privacy_mode_capable(hdev))
4018 		hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4019 	else
4020 		hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4021 
4022 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4023 				  &ev, sizeof(ev),
4024 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4025 
4026 }
4027 
4028 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4029 			       bool enabled, struct sock *skip)
4030 {
4031 	struct mgmt_ev_exp_feature_changed ev;
4032 
4033 	memset(&ev, 0, sizeof(ev));
4034 	memcpy(ev.uuid, uuid, 16);
4035 	ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4036 
4037 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4038 				  &ev, sizeof(ev),
4039 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4040 }
4041 
4042 #define EXP_FEAT(_uuid, _set_func)	\
4043 {					\
4044 	.uuid = _uuid,			\
4045 	.set_func = _set_func,		\
4046 }
4047 
4048 /* The zero key uuid is special. Multiple exp features are set through it. */
4049 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4050 			     struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4051 {
4052 	struct mgmt_rp_set_exp_feature rp;
4053 
4054 	memset(rp.uuid, 0, 16);
4055 	rp.flags = cpu_to_le32(0);
4056 
4057 #ifdef CONFIG_BT_FEATURE_DEBUG
4058 	if (!hdev) {
4059 		bool changed = bt_dbg_get();
4060 
4061 		bt_dbg_set(false);
4062 
4063 		if (changed)
4064 			exp_feature_changed(NULL, ZERO_KEY, false, sk);
4065 	}
4066 #endif
4067 
4068 	if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4069 		bool changed;
4070 
4071 		changed = hci_dev_test_and_clear_flag(hdev,
4072 						      HCI_ENABLE_LL_PRIVACY);
4073 		if (changed)
4074 			exp_feature_changed(hdev, rpa_resolution_uuid, false,
4075 					    sk);
4076 	}
4077 
4078 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4079 
4080 	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4081 				 MGMT_OP_SET_EXP_FEATURE, 0,
4082 				 &rp, sizeof(rp));
4083 }
4084 
4085 #ifdef CONFIG_BT_FEATURE_DEBUG
4086 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4087 			  struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4088 {
4089 	struct mgmt_rp_set_exp_feature rp;
4090 
4091 	bool val, changed;
4092 	int err;
4093 
4094 	/* Command requires to use the non-controller index */
4095 	if (hdev)
4096 		return mgmt_cmd_status(sk, hdev->id,
4097 				       MGMT_OP_SET_EXP_FEATURE,
4098 				       MGMT_STATUS_INVALID_INDEX);
4099 
4100 	/* Parameters are limited to a single octet */
4101 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4102 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4103 				       MGMT_OP_SET_EXP_FEATURE,
4104 				       MGMT_STATUS_INVALID_PARAMS);
4105 
4106 	/* Only boolean on/off is supported */
4107 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4108 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4109 				       MGMT_OP_SET_EXP_FEATURE,
4110 				       MGMT_STATUS_INVALID_PARAMS);
4111 
4112 	val = !!cp->param[0];
4113 	changed = val ? !bt_dbg_get() : bt_dbg_get();
4114 	bt_dbg_set(val);
4115 
4116 	memcpy(rp.uuid, debug_uuid, 16);
4117 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4118 
4119 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4120 
4121 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4122 				MGMT_OP_SET_EXP_FEATURE, 0,
4123 				&rp, sizeof(rp));
4124 
4125 	if (changed)
4126 		exp_feature_changed(hdev, debug_uuid, val, sk);
4127 
4128 	return err;
4129 }
4130 #endif
4131 
4132 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4133 				   struct mgmt_cp_set_exp_feature *cp,
4134 				   u16 data_len)
4135 {
4136 	struct mgmt_rp_set_exp_feature rp;
4137 	bool val, changed;
4138 	int err;
4139 	u32 flags;
4140 
4141 	/* Command requires to use the controller index */
4142 	if (!hdev)
4143 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4144 				       MGMT_OP_SET_EXP_FEATURE,
4145 				       MGMT_STATUS_INVALID_INDEX);
4146 
4147 	/* Changes can only be made when controller is powered down */
4148 	if (hdev_is_powered(hdev))
4149 		return mgmt_cmd_status(sk, hdev->id,
4150 				       MGMT_OP_SET_EXP_FEATURE,
4151 				       MGMT_STATUS_REJECTED);
4152 
4153 	/* Parameters are limited to a single octet */
4154 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4155 		return mgmt_cmd_status(sk, hdev->id,
4156 				       MGMT_OP_SET_EXP_FEATURE,
4157 				       MGMT_STATUS_INVALID_PARAMS);
4158 
4159 	/* Only boolean on/off is supported */
4160 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4161 		return mgmt_cmd_status(sk, hdev->id,
4162 				       MGMT_OP_SET_EXP_FEATURE,
4163 				       MGMT_STATUS_INVALID_PARAMS);
4164 
4165 	val = !!cp->param[0];
4166 
4167 	if (val) {
4168 		changed = !hci_dev_test_and_set_flag(hdev,
4169 						     HCI_ENABLE_LL_PRIVACY);
4170 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4171 
4172 		/* Enable LL privacy + supported settings changed */
4173 		flags = BIT(0) | BIT(1);
4174 	} else {
4175 		changed = hci_dev_test_and_clear_flag(hdev,
4176 						      HCI_ENABLE_LL_PRIVACY);
4177 
4178 		/* Disable LL privacy + supported settings changed */
4179 		flags = BIT(1);
4180 	}
4181 
4182 	memcpy(rp.uuid, rpa_resolution_uuid, 16);
4183 	rp.flags = cpu_to_le32(flags);
4184 
4185 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4186 
4187 	err = mgmt_cmd_complete(sk, hdev->id,
4188 				MGMT_OP_SET_EXP_FEATURE, 0,
4189 				&rp, sizeof(rp));
4190 
4191 	if (changed)
4192 		exp_ll_privacy_feature_changed(val, hdev, sk);
4193 
4194 	return err;
4195 }
4196 
4197 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4198 				   struct mgmt_cp_set_exp_feature *cp,
4199 				   u16 data_len)
4200 {
4201 	struct mgmt_rp_set_exp_feature rp;
4202 	bool val, changed;
4203 	int err;
4204 
4205 	/* Command requires to use a valid controller index */
4206 	if (!hdev)
4207 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4208 				       MGMT_OP_SET_EXP_FEATURE,
4209 				       MGMT_STATUS_INVALID_INDEX);
4210 
4211 	/* Parameters are limited to a single octet */
4212 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4213 		return mgmt_cmd_status(sk, hdev->id,
4214 				       MGMT_OP_SET_EXP_FEATURE,
4215 				       MGMT_STATUS_INVALID_PARAMS);
4216 
4217 	/* Only boolean on/off is supported */
4218 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4219 		return mgmt_cmd_status(sk, hdev->id,
4220 				       MGMT_OP_SET_EXP_FEATURE,
4221 				       MGMT_STATUS_INVALID_PARAMS);
4222 
4223 	hci_req_sync_lock(hdev);
4224 
4225 	val = !!cp->param[0];
4226 	changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4227 
4228 	if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4229 		err = mgmt_cmd_status(sk, hdev->id,
4230 				      MGMT_OP_SET_EXP_FEATURE,
4231 				      MGMT_STATUS_NOT_SUPPORTED);
4232 		goto unlock_quality_report;
4233 	}
4234 
4235 	if (changed) {
4236 		if (hdev->set_quality_report)
4237 			err = hdev->set_quality_report(hdev, val);
4238 		else
4239 			err = aosp_set_quality_report(hdev, val);
4240 
4241 		if (err) {
4242 			err = mgmt_cmd_status(sk, hdev->id,
4243 					      MGMT_OP_SET_EXP_FEATURE,
4244 					      MGMT_STATUS_FAILED);
4245 			goto unlock_quality_report;
4246 		}
4247 
4248 		if (val)
4249 			hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4250 		else
4251 			hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4252 	}
4253 
4254 	bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4255 
4256 	memcpy(rp.uuid, quality_report_uuid, 16);
4257 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4258 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4259 
4260 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4261 				&rp, sizeof(rp));
4262 
4263 	if (changed)
4264 		exp_feature_changed(hdev, quality_report_uuid, val, sk);
4265 
4266 unlock_quality_report:
4267 	hci_req_sync_unlock(hdev);
4268 	return err;
4269 }
4270 
4271 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4272 				  struct mgmt_cp_set_exp_feature *cp,
4273 				  u16 data_len)
4274 {
4275 	bool val, changed;
4276 	int err;
4277 	struct mgmt_rp_set_exp_feature rp;
4278 
4279 	/* Command requires to use a valid controller index */
4280 	if (!hdev)
4281 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4282 				       MGMT_OP_SET_EXP_FEATURE,
4283 				       MGMT_STATUS_INVALID_INDEX);
4284 
4285 	/* Parameters are limited to a single octet */
4286 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4287 		return mgmt_cmd_status(sk, hdev->id,
4288 				       MGMT_OP_SET_EXP_FEATURE,
4289 				       MGMT_STATUS_INVALID_PARAMS);
4290 
4291 	/* Only boolean on/off is supported */
4292 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4293 		return mgmt_cmd_status(sk, hdev->id,
4294 				       MGMT_OP_SET_EXP_FEATURE,
4295 				       MGMT_STATUS_INVALID_PARAMS);
4296 
4297 	val = !!cp->param[0];
4298 	changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4299 
4300 	if (!hdev->get_data_path_id) {
4301 		return mgmt_cmd_status(sk, hdev->id,
4302 				       MGMT_OP_SET_EXP_FEATURE,
4303 				       MGMT_STATUS_NOT_SUPPORTED);
4304 	}
4305 
4306 	if (changed) {
4307 		if (val)
4308 			hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4309 		else
4310 			hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4311 	}
4312 
4313 	bt_dev_info(hdev, "offload codecs enable %d changed %d",
4314 		    val, changed);
4315 
4316 	memcpy(rp.uuid, offload_codecs_uuid, 16);
4317 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4318 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4319 	err = mgmt_cmd_complete(sk, hdev->id,
4320 				MGMT_OP_SET_EXP_FEATURE, 0,
4321 				&rp, sizeof(rp));
4322 
4323 	if (changed)
4324 		exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4325 
4326 	return err;
4327 }
4328 
4329 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4330 					  struct mgmt_cp_set_exp_feature *cp,
4331 					  u16 data_len)
4332 {
4333 	bool val, changed;
4334 	int err;
4335 	struct mgmt_rp_set_exp_feature rp;
4336 
4337 	/* Command requires to use a valid controller index */
4338 	if (!hdev)
4339 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4340 				       MGMT_OP_SET_EXP_FEATURE,
4341 				       MGMT_STATUS_INVALID_INDEX);
4342 
4343 	/* Parameters are limited to a single octet */
4344 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4345 		return mgmt_cmd_status(sk, hdev->id,
4346 				       MGMT_OP_SET_EXP_FEATURE,
4347 				       MGMT_STATUS_INVALID_PARAMS);
4348 
4349 	/* Only boolean on/off is supported */
4350 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4351 		return mgmt_cmd_status(sk, hdev->id,
4352 				       MGMT_OP_SET_EXP_FEATURE,
4353 				       MGMT_STATUS_INVALID_PARAMS);
4354 
4355 	val = !!cp->param[0];
4356 	changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4357 
4358 	if (!hci_dev_le_state_simultaneous(hdev)) {
4359 		return mgmt_cmd_status(sk, hdev->id,
4360 				       MGMT_OP_SET_EXP_FEATURE,
4361 				       MGMT_STATUS_NOT_SUPPORTED);
4362 	}
4363 
4364 	if (changed) {
4365 		if (val)
4366 			hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4367 		else
4368 			hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4369 	}
4370 
4371 	bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4372 		    val, changed);
4373 
4374 	memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4375 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4376 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4377 	err = mgmt_cmd_complete(sk, hdev->id,
4378 				MGMT_OP_SET_EXP_FEATURE, 0,
4379 				&rp, sizeof(rp));
4380 
4381 	if (changed)
4382 		exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4383 
4384 	return err;
4385 }
4386 
4387 static const struct mgmt_exp_feature {
4388 	const u8 *uuid;
4389 	int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4390 			struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4391 } exp_features[] = {
4392 	EXP_FEAT(ZERO_KEY, set_zero_key_func),
4393 #ifdef CONFIG_BT_FEATURE_DEBUG
4394 	EXP_FEAT(debug_uuid, set_debug_func),
4395 #endif
4396 	EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4397 	EXP_FEAT(quality_report_uuid, set_quality_report_func),
4398 	EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4399 	EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
4400 
4401 	/* end with a null feature */
4402 	EXP_FEAT(NULL, NULL)
4403 };
4404 
4405 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4406 			   void *data, u16 data_len)
4407 {
4408 	struct mgmt_cp_set_exp_feature *cp = data;
4409 	size_t i = 0;
4410 
4411 	bt_dev_dbg(hdev, "sock %p", sk);
4412 
4413 	for (i = 0; exp_features[i].uuid; i++) {
4414 		if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4415 			return exp_features[i].set_func(sk, hdev, cp, data_len);
4416 	}
4417 
4418 	return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4419 			       MGMT_OP_SET_EXP_FEATURE,
4420 			       MGMT_STATUS_NOT_SUPPORTED);
4421 }
4422 
4423 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4424 			    u16 data_len)
4425 {
4426 	struct mgmt_cp_get_device_flags *cp = data;
4427 	struct mgmt_rp_get_device_flags rp;
4428 	struct bdaddr_list_with_flags *br_params;
4429 	struct hci_conn_params *params;
4430 	u32 supported_flags;
4431 	u32 current_flags = 0;
4432 	u8 status = MGMT_STATUS_INVALID_PARAMS;
4433 
4434 	bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4435 		   &cp->addr.bdaddr, cp->addr.type);
4436 
4437 	hci_dev_lock(hdev);
4438 
4439 	supported_flags = hdev->conn_flags;
4440 
4441 	memset(&rp, 0, sizeof(rp));
4442 
4443 	if (cp->addr.type == BDADDR_BREDR) {
4444 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4445 							      &cp->addr.bdaddr,
4446 							      cp->addr.type);
4447 		if (!br_params)
4448 			goto done;
4449 
4450 		current_flags = br_params->flags;
4451 	} else {
4452 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4453 						le_addr_type(cp->addr.type));
4454 
4455 		if (!params)
4456 			goto done;
4457 
4458 		current_flags = params->flags;
4459 	}
4460 
4461 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4462 	rp.addr.type = cp->addr.type;
4463 	rp.supported_flags = cpu_to_le32(supported_flags);
4464 	rp.current_flags = cpu_to_le32(current_flags);
4465 
4466 	status = MGMT_STATUS_SUCCESS;
4467 
4468 done:
4469 	hci_dev_unlock(hdev);
4470 
4471 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
4472 				&rp, sizeof(rp));
4473 }
4474 
4475 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
4476 				 bdaddr_t *bdaddr, u8 bdaddr_type,
4477 				 u32 supported_flags, u32 current_flags)
4478 {
4479 	struct mgmt_ev_device_flags_changed ev;
4480 
4481 	bacpy(&ev.addr.bdaddr, bdaddr);
4482 	ev.addr.type = bdaddr_type;
4483 	ev.supported_flags = cpu_to_le32(supported_flags);
4484 	ev.current_flags = cpu_to_le32(current_flags);
4485 
4486 	mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
4487 }
4488 
4489 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4490 			    u16 len)
4491 {
4492 	struct mgmt_cp_set_device_flags *cp = data;
4493 	struct bdaddr_list_with_flags *br_params;
4494 	struct hci_conn_params *params;
4495 	u8 status = MGMT_STATUS_INVALID_PARAMS;
4496 	u32 supported_flags;
4497 	u32 current_flags = __le32_to_cpu(cp->current_flags);
4498 
4499 	bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
4500 		   &cp->addr.bdaddr, cp->addr.type,
4501 		   __le32_to_cpu(current_flags));
4502 
4503 	// We should take hci_dev_lock() early, I think.. conn_flags can change
4504 	supported_flags = hdev->conn_flags;
4505 
4506 	if ((supported_flags | current_flags) != supported_flags) {
4507 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
4508 			    current_flags, supported_flags);
4509 		goto done;
4510 	}
4511 
4512 	hci_dev_lock(hdev);
4513 
4514 	if (cp->addr.type == BDADDR_BREDR) {
4515 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4516 							      &cp->addr.bdaddr,
4517 							      cp->addr.type);
4518 
4519 		if (br_params) {
4520 			br_params->flags = current_flags;
4521 			status = MGMT_STATUS_SUCCESS;
4522 		} else {
4523 			bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
4524 				    &cp->addr.bdaddr, cp->addr.type);
4525 		}
4526 	} else {
4527 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4528 						le_addr_type(cp->addr.type));
4529 		if (params) {
4530 			/* Devices using RPAs can only be programmed in the
4531 			 * acceptlist LL Privacy has been enable otherwise they
4532 			 * cannot mark HCI_CONN_FLAG_REMOTE_WAKEUP.
4533 			 */
4534 			if ((current_flags & HCI_CONN_FLAG_REMOTE_WAKEUP) &&
4535 			    !use_ll_privacy(hdev) &&
4536 			    hci_find_irk_by_addr(hdev, &params->addr,
4537 						 params->addr_type)) {
4538 				bt_dev_warn(hdev,
4539 					    "Cannot set wakeable for RPA");
4540 				goto unlock;
4541 			}
4542 
4543 			params->flags = current_flags;
4544 			status = MGMT_STATUS_SUCCESS;
4545 
4546 			/* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
4547 			 * has been set.
4548 			 */
4549 			if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
4550 				hci_update_passive_scan(hdev);
4551 		} else {
4552 			bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
4553 				    &cp->addr.bdaddr,
4554 				    le_addr_type(cp->addr.type));
4555 		}
4556 	}
4557 
4558 unlock:
4559 	hci_dev_unlock(hdev);
4560 
4561 done:
4562 	if (status == MGMT_STATUS_SUCCESS)
4563 		device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
4564 				     supported_flags, current_flags);
4565 
4566 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
4567 				 &cp->addr, sizeof(cp->addr));
4568 }
4569 
4570 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
4571 				   u16 handle)
4572 {
4573 	struct mgmt_ev_adv_monitor_added ev;
4574 
4575 	ev.monitor_handle = cpu_to_le16(handle);
4576 
4577 	mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
4578 }
4579 
4580 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
4581 {
4582 	struct mgmt_ev_adv_monitor_removed ev;
4583 	struct mgmt_pending_cmd *cmd;
4584 	struct sock *sk_skip = NULL;
4585 	struct mgmt_cp_remove_adv_monitor *cp;
4586 
4587 	cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4588 	if (cmd) {
4589 		cp = cmd->param;
4590 
4591 		if (cp->monitor_handle)
4592 			sk_skip = cmd->sk;
4593 	}
4594 
4595 	ev.monitor_handle = cpu_to_le16(handle);
4596 
4597 	mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
4598 }
4599 
4600 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
4601 				 void *data, u16 len)
4602 {
4603 	struct adv_monitor *monitor = NULL;
4604 	struct mgmt_rp_read_adv_monitor_features *rp = NULL;
4605 	int handle, err;
4606 	size_t rp_size = 0;
4607 	__u32 supported = 0;
4608 	__u32 enabled = 0;
4609 	__u16 num_handles = 0;
4610 	__u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
4611 
4612 	BT_DBG("request for %s", hdev->name);
4613 
4614 	hci_dev_lock(hdev);
4615 
4616 	if (msft_monitor_supported(hdev))
4617 		supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
4618 
4619 	idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
4620 		handles[num_handles++] = monitor->handle;
4621 
4622 	hci_dev_unlock(hdev);
4623 
4624 	rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
4625 	rp = kmalloc(rp_size, GFP_KERNEL);
4626 	if (!rp)
4627 		return -ENOMEM;
4628 
4629 	/* All supported features are currently enabled */
4630 	enabled = supported;
4631 
4632 	rp->supported_features = cpu_to_le32(supported);
4633 	rp->enabled_features = cpu_to_le32(enabled);
4634 	rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
4635 	rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
4636 	rp->num_handles = cpu_to_le16(num_handles);
4637 	if (num_handles)
4638 		memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
4639 
4640 	err = mgmt_cmd_complete(sk, hdev->id,
4641 				MGMT_OP_READ_ADV_MONITOR_FEATURES,
4642 				MGMT_STATUS_SUCCESS, rp, rp_size);
4643 
4644 	kfree(rp);
4645 
4646 	return err;
4647 }
4648 
4649 int mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
4650 {
4651 	struct mgmt_rp_add_adv_patterns_monitor rp;
4652 	struct mgmt_pending_cmd *cmd;
4653 	struct adv_monitor *monitor;
4654 	int err = 0;
4655 
4656 	hci_dev_lock(hdev);
4657 
4658 	cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev);
4659 	if (!cmd) {
4660 		cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev);
4661 		if (!cmd)
4662 			goto done;
4663 	}
4664 
4665 	monitor = cmd->user_data;
4666 	rp.monitor_handle = cpu_to_le16(monitor->handle);
4667 
4668 	if (!status) {
4669 		mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
4670 		hdev->adv_monitors_cnt++;
4671 		if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
4672 			monitor->state = ADV_MONITOR_STATE_REGISTERED;
4673 		hci_update_passive_scan(hdev);
4674 	}
4675 
4676 	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4677 				mgmt_status(status), &rp, sizeof(rp));
4678 	mgmt_pending_remove(cmd);
4679 
4680 done:
4681 	hci_dev_unlock(hdev);
4682 	bt_dev_dbg(hdev, "add monitor %d complete, status %u",
4683 		   rp.monitor_handle, status);
4684 
4685 	return err;
4686 }
4687 
4688 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4689 				      struct adv_monitor *m, u8 status,
4690 				      void *data, u16 len, u16 op)
4691 {
4692 	struct mgmt_rp_add_adv_patterns_monitor rp;
4693 	struct mgmt_pending_cmd *cmd;
4694 	int err;
4695 	bool pending;
4696 
4697 	hci_dev_lock(hdev);
4698 
4699 	if (status)
4700 		goto unlock;
4701 
4702 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
4703 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4704 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
4705 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
4706 		status = MGMT_STATUS_BUSY;
4707 		goto unlock;
4708 	}
4709 
4710 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
4711 	if (!cmd) {
4712 		status = MGMT_STATUS_NO_RESOURCES;
4713 		goto unlock;
4714 	}
4715 
4716 	cmd->user_data = m;
4717 	pending = hci_add_adv_monitor(hdev, m, &err);
4718 	if (err) {
4719 		if (err == -ENOSPC || err == -ENOMEM)
4720 			status = MGMT_STATUS_NO_RESOURCES;
4721 		else if (err == -EINVAL)
4722 			status = MGMT_STATUS_INVALID_PARAMS;
4723 		else
4724 			status = MGMT_STATUS_FAILED;
4725 
4726 		goto unlock;
4727 	}
4728 
4729 	if (!pending) {
4730 		mgmt_pending_remove(cmd);
4731 		rp.monitor_handle = cpu_to_le16(m->handle);
4732 		mgmt_adv_monitor_added(sk, hdev, m->handle);
4733 		m->state = ADV_MONITOR_STATE_REGISTERED;
4734 		hdev->adv_monitors_cnt++;
4735 
4736 		hci_dev_unlock(hdev);
4737 		return mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_SUCCESS,
4738 					 &rp, sizeof(rp));
4739 	}
4740 
4741 	hci_dev_unlock(hdev);
4742 
4743 	return 0;
4744 
4745 unlock:
4746 	hci_free_adv_monitor(hdev, m);
4747 	hci_dev_unlock(hdev);
4748 	return mgmt_cmd_status(sk, hdev->id, op, status);
4749 }
4750 
4751 static void parse_adv_monitor_rssi(struct adv_monitor *m,
4752 				   struct mgmt_adv_rssi_thresholds *rssi)
4753 {
4754 	if (rssi) {
4755 		m->rssi.low_threshold = rssi->low_threshold;
4756 		m->rssi.low_threshold_timeout =
4757 		    __le16_to_cpu(rssi->low_threshold_timeout);
4758 		m->rssi.high_threshold = rssi->high_threshold;
4759 		m->rssi.high_threshold_timeout =
4760 		    __le16_to_cpu(rssi->high_threshold_timeout);
4761 		m->rssi.sampling_period = rssi->sampling_period;
4762 	} else {
4763 		/* Default values. These numbers are the least constricting
4764 		 * parameters for MSFT API to work, so it behaves as if there
4765 		 * are no rssi parameter to consider. May need to be changed
4766 		 * if other API are to be supported.
4767 		 */
4768 		m->rssi.low_threshold = -127;
4769 		m->rssi.low_threshold_timeout = 60;
4770 		m->rssi.high_threshold = -127;
4771 		m->rssi.high_threshold_timeout = 0;
4772 		m->rssi.sampling_period = 0;
4773 	}
4774 }
4775 
4776 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
4777 				    struct mgmt_adv_pattern *patterns)
4778 {
4779 	u8 offset = 0, length = 0;
4780 	struct adv_pattern *p = NULL;
4781 	int i;
4782 
4783 	for (i = 0; i < pattern_count; i++) {
4784 		offset = patterns[i].offset;
4785 		length = patterns[i].length;
4786 		if (offset >= HCI_MAX_AD_LENGTH ||
4787 		    length > HCI_MAX_AD_LENGTH ||
4788 		    (offset + length) > HCI_MAX_AD_LENGTH)
4789 			return MGMT_STATUS_INVALID_PARAMS;
4790 
4791 		p = kmalloc(sizeof(*p), GFP_KERNEL);
4792 		if (!p)
4793 			return MGMT_STATUS_NO_RESOURCES;
4794 
4795 		p->ad_type = patterns[i].ad_type;
4796 		p->offset = patterns[i].offset;
4797 		p->length = patterns[i].length;
4798 		memcpy(p->value, patterns[i].value, p->length);
4799 
4800 		INIT_LIST_HEAD(&p->list);
4801 		list_add(&p->list, &m->patterns);
4802 	}
4803 
4804 	return MGMT_STATUS_SUCCESS;
4805 }
4806 
4807 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4808 				    void *data, u16 len)
4809 {
4810 	struct mgmt_cp_add_adv_patterns_monitor *cp = data;
4811 	struct adv_monitor *m = NULL;
4812 	u8 status = MGMT_STATUS_SUCCESS;
4813 	size_t expected_size = sizeof(*cp);
4814 
4815 	BT_DBG("request for %s", hdev->name);
4816 
4817 	if (len <= sizeof(*cp)) {
4818 		status = MGMT_STATUS_INVALID_PARAMS;
4819 		goto done;
4820 	}
4821 
4822 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4823 	if (len != expected_size) {
4824 		status = MGMT_STATUS_INVALID_PARAMS;
4825 		goto done;
4826 	}
4827 
4828 	m = kzalloc(sizeof(*m), GFP_KERNEL);
4829 	if (!m) {
4830 		status = MGMT_STATUS_NO_RESOURCES;
4831 		goto done;
4832 	}
4833 
4834 	INIT_LIST_HEAD(&m->patterns);
4835 
4836 	parse_adv_monitor_rssi(m, NULL);
4837 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4838 
4839 done:
4840 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4841 					  MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
4842 }
4843 
4844 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
4845 					 void *data, u16 len)
4846 {
4847 	struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
4848 	struct adv_monitor *m = NULL;
4849 	u8 status = MGMT_STATUS_SUCCESS;
4850 	size_t expected_size = sizeof(*cp);
4851 
4852 	BT_DBG("request for %s", hdev->name);
4853 
4854 	if (len <= sizeof(*cp)) {
4855 		status = MGMT_STATUS_INVALID_PARAMS;
4856 		goto done;
4857 	}
4858 
4859 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4860 	if (len != expected_size) {
4861 		status = MGMT_STATUS_INVALID_PARAMS;
4862 		goto done;
4863 	}
4864 
4865 	m = kzalloc(sizeof(*m), GFP_KERNEL);
4866 	if (!m) {
4867 		status = MGMT_STATUS_NO_RESOURCES;
4868 		goto done;
4869 	}
4870 
4871 	INIT_LIST_HEAD(&m->patterns);
4872 
4873 	parse_adv_monitor_rssi(m, &cp->rssi);
4874 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4875 
4876 done:
4877 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4878 					 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
4879 }
4880 
4881 int mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
4882 {
4883 	struct mgmt_rp_remove_adv_monitor rp;
4884 	struct mgmt_cp_remove_adv_monitor *cp;
4885 	struct mgmt_pending_cmd *cmd;
4886 	int err = 0;
4887 
4888 	hci_dev_lock(hdev);
4889 
4890 	cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4891 	if (!cmd)
4892 		goto done;
4893 
4894 	cp = cmd->param;
4895 	rp.monitor_handle = cp->monitor_handle;
4896 
4897 	if (!status)
4898 		hci_update_passive_scan(hdev);
4899 
4900 	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4901 				mgmt_status(status), &rp, sizeof(rp));
4902 	mgmt_pending_remove(cmd);
4903 
4904 done:
4905 	hci_dev_unlock(hdev);
4906 	bt_dev_dbg(hdev, "remove monitor %d complete, status %u",
4907 		   rp.monitor_handle, status);
4908 
4909 	return err;
4910 }
4911 
4912 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
4913 			      void *data, u16 len)
4914 {
4915 	struct mgmt_cp_remove_adv_monitor *cp = data;
4916 	struct mgmt_rp_remove_adv_monitor rp;
4917 	struct mgmt_pending_cmd *cmd;
4918 	u16 handle = __le16_to_cpu(cp->monitor_handle);
4919 	int err, status;
4920 	bool pending;
4921 
4922 	BT_DBG("request for %s", hdev->name);
4923 	rp.monitor_handle = cp->monitor_handle;
4924 
4925 	hci_dev_lock(hdev);
4926 
4927 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
4928 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
4929 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4930 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
4931 		status = MGMT_STATUS_BUSY;
4932 		goto unlock;
4933 	}
4934 
4935 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
4936 	if (!cmd) {
4937 		status = MGMT_STATUS_NO_RESOURCES;
4938 		goto unlock;
4939 	}
4940 
4941 	if (handle)
4942 		pending = hci_remove_single_adv_monitor(hdev, handle, &err);
4943 	else
4944 		pending = hci_remove_all_adv_monitor(hdev, &err);
4945 
4946 	if (err) {
4947 		mgmt_pending_remove(cmd);
4948 
4949 		if (err == -ENOENT)
4950 			status = MGMT_STATUS_INVALID_INDEX;
4951 		else
4952 			status = MGMT_STATUS_FAILED;
4953 
4954 		goto unlock;
4955 	}
4956 
4957 	/* monitor can be removed without forwarding request to controller */
4958 	if (!pending) {
4959 		mgmt_pending_remove(cmd);
4960 		hci_dev_unlock(hdev);
4961 
4962 		return mgmt_cmd_complete(sk, hdev->id,
4963 					 MGMT_OP_REMOVE_ADV_MONITOR,
4964 					 MGMT_STATUS_SUCCESS,
4965 					 &rp, sizeof(rp));
4966 	}
4967 
4968 	hci_dev_unlock(hdev);
4969 	return 0;
4970 
4971 unlock:
4972 	hci_dev_unlock(hdev);
4973 	return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4974 			       status);
4975 }
4976 
4977 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
4978 {
4979 	struct mgmt_rp_read_local_oob_data mgmt_rp;
4980 	size_t rp_size = sizeof(mgmt_rp);
4981 	struct mgmt_pending_cmd *cmd = data;
4982 	struct sk_buff *skb = cmd->skb;
4983 	u8 status = mgmt_status(err);
4984 
4985 	if (!status) {
4986 		if (!skb)
4987 			status = MGMT_STATUS_FAILED;
4988 		else if (IS_ERR(skb))
4989 			status = mgmt_status(PTR_ERR(skb));
4990 		else
4991 			status = mgmt_status(skb->data[0]);
4992 	}
4993 
4994 	bt_dev_dbg(hdev, "status %d", status);
4995 
4996 	if (status) {
4997 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
4998 		goto remove;
4999 	}
5000 
5001 	memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5002 
5003 	if (!bredr_sc_enabled(hdev)) {
5004 		struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5005 
5006 		if (skb->len < sizeof(*rp)) {
5007 			mgmt_cmd_status(cmd->sk, hdev->id,
5008 					MGMT_OP_READ_LOCAL_OOB_DATA,
5009 					MGMT_STATUS_FAILED);
5010 			goto remove;
5011 		}
5012 
5013 		memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5014 		memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5015 
5016 		rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5017 	} else {
5018 		struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5019 
5020 		if (skb->len < sizeof(*rp)) {
5021 			mgmt_cmd_status(cmd->sk, hdev->id,
5022 					MGMT_OP_READ_LOCAL_OOB_DATA,
5023 					MGMT_STATUS_FAILED);
5024 			goto remove;
5025 		}
5026 
5027 		memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5028 		memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5029 
5030 		memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5031 		memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5032 	}
5033 
5034 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5035 			  MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5036 
5037 remove:
5038 	if (skb && !IS_ERR(skb))
5039 		kfree_skb(skb);
5040 
5041 	mgmt_pending_free(cmd);
5042 }
5043 
5044 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5045 {
5046 	struct mgmt_pending_cmd *cmd = data;
5047 
5048 	if (bredr_sc_enabled(hdev))
5049 		cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5050 	else
5051 		cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5052 
5053 	if (IS_ERR(cmd->skb))
5054 		return PTR_ERR(cmd->skb);
5055 	else
5056 		return 0;
5057 }
5058 
5059 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5060 			       void *data, u16 data_len)
5061 {
5062 	struct mgmt_pending_cmd *cmd;
5063 	int err;
5064 
5065 	bt_dev_dbg(hdev, "sock %p", sk);
5066 
5067 	hci_dev_lock(hdev);
5068 
5069 	if (!hdev_is_powered(hdev)) {
5070 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5071 				      MGMT_STATUS_NOT_POWERED);
5072 		goto unlock;
5073 	}
5074 
5075 	if (!lmp_ssp_capable(hdev)) {
5076 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5077 				      MGMT_STATUS_NOT_SUPPORTED);
5078 		goto unlock;
5079 	}
5080 
5081 	cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5082 	if (!cmd)
5083 		err = -ENOMEM;
5084 	else
5085 		err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5086 					 read_local_oob_data_complete);
5087 
5088 	if (err < 0) {
5089 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5090 				      MGMT_STATUS_FAILED);
5091 
5092 		if (cmd)
5093 			mgmt_pending_free(cmd);
5094 	}
5095 
5096 unlock:
5097 	hci_dev_unlock(hdev);
5098 	return err;
5099 }
5100 
5101 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5102 			       void *data, u16 len)
5103 {
5104 	struct mgmt_addr_info *addr = data;
5105 	int err;
5106 
5107 	bt_dev_dbg(hdev, "sock %p", sk);
5108 
5109 	if (!bdaddr_type_is_valid(addr->type))
5110 		return mgmt_cmd_complete(sk, hdev->id,
5111 					 MGMT_OP_ADD_REMOTE_OOB_DATA,
5112 					 MGMT_STATUS_INVALID_PARAMS,
5113 					 addr, sizeof(*addr));
5114 
5115 	hci_dev_lock(hdev);
5116 
5117 	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5118 		struct mgmt_cp_add_remote_oob_data *cp = data;
5119 		u8 status;
5120 
5121 		if (cp->addr.type != BDADDR_BREDR) {
5122 			err = mgmt_cmd_complete(sk, hdev->id,
5123 						MGMT_OP_ADD_REMOTE_OOB_DATA,
5124 						MGMT_STATUS_INVALID_PARAMS,
5125 						&cp->addr, sizeof(cp->addr));
5126 			goto unlock;
5127 		}
5128 
5129 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5130 					      cp->addr.type, cp->hash,
5131 					      cp->rand, NULL, NULL);
5132 		if (err < 0)
5133 			status = MGMT_STATUS_FAILED;
5134 		else
5135 			status = MGMT_STATUS_SUCCESS;
5136 
5137 		err = mgmt_cmd_complete(sk, hdev->id,
5138 					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5139 					&cp->addr, sizeof(cp->addr));
5140 	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5141 		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5142 		u8 *rand192, *hash192, *rand256, *hash256;
5143 		u8 status;
5144 
5145 		if (bdaddr_type_is_le(cp->addr.type)) {
5146 			/* Enforce zero-valued 192-bit parameters as
5147 			 * long as legacy SMP OOB isn't implemented.
5148 			 */
5149 			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5150 			    memcmp(cp->hash192, ZERO_KEY, 16)) {
5151 				err = mgmt_cmd_complete(sk, hdev->id,
5152 							MGMT_OP_ADD_REMOTE_OOB_DATA,
5153 							MGMT_STATUS_INVALID_PARAMS,
5154 							addr, sizeof(*addr));
5155 				goto unlock;
5156 			}
5157 
5158 			rand192 = NULL;
5159 			hash192 = NULL;
5160 		} else {
5161 			/* In case one of the P-192 values is set to zero,
5162 			 * then just disable OOB data for P-192.
5163 			 */
5164 			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5165 			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
5166 				rand192 = NULL;
5167 				hash192 = NULL;
5168 			} else {
5169 				rand192 = cp->rand192;
5170 				hash192 = cp->hash192;
5171 			}
5172 		}
5173 
5174 		/* In case one of the P-256 values is set to zero, then just
5175 		 * disable OOB data for P-256.
5176 		 */
5177 		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5178 		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
5179 			rand256 = NULL;
5180 			hash256 = NULL;
5181 		} else {
5182 			rand256 = cp->rand256;
5183 			hash256 = cp->hash256;
5184 		}
5185 
5186 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5187 					      cp->addr.type, hash192, rand192,
5188 					      hash256, rand256);
5189 		if (err < 0)
5190 			status = MGMT_STATUS_FAILED;
5191 		else
5192 			status = MGMT_STATUS_SUCCESS;
5193 
5194 		err = mgmt_cmd_complete(sk, hdev->id,
5195 					MGMT_OP_ADD_REMOTE_OOB_DATA,
5196 					status, &cp->addr, sizeof(cp->addr));
5197 	} else {
5198 		bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5199 			   len);
5200 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5201 				      MGMT_STATUS_INVALID_PARAMS);
5202 	}
5203 
5204 unlock:
5205 	hci_dev_unlock(hdev);
5206 	return err;
5207 }
5208 
5209 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5210 				  void *data, u16 len)
5211 {
5212 	struct mgmt_cp_remove_remote_oob_data *cp = data;
5213 	u8 status;
5214 	int err;
5215 
5216 	bt_dev_dbg(hdev, "sock %p", sk);
5217 
5218 	if (cp->addr.type != BDADDR_BREDR)
5219 		return mgmt_cmd_complete(sk, hdev->id,
5220 					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5221 					 MGMT_STATUS_INVALID_PARAMS,
5222 					 &cp->addr, sizeof(cp->addr));
5223 
5224 	hci_dev_lock(hdev);
5225 
5226 	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5227 		hci_remote_oob_data_clear(hdev);
5228 		status = MGMT_STATUS_SUCCESS;
5229 		goto done;
5230 	}
5231 
5232 	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5233 	if (err < 0)
5234 		status = MGMT_STATUS_INVALID_PARAMS;
5235 	else
5236 		status = MGMT_STATUS_SUCCESS;
5237 
5238 done:
5239 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5240 				status, &cp->addr, sizeof(cp->addr));
5241 
5242 	hci_dev_unlock(hdev);
5243 	return err;
5244 }
5245 
5246 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5247 {
5248 	struct mgmt_pending_cmd *cmd;
5249 
5250 	bt_dev_dbg(hdev, "status %u", status);
5251 
5252 	hci_dev_lock(hdev);
5253 
5254 	cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5255 	if (!cmd)
5256 		cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5257 
5258 	if (!cmd)
5259 		cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5260 
5261 	if (cmd) {
5262 		cmd->cmd_complete(cmd, mgmt_status(status));
5263 		mgmt_pending_remove(cmd);
5264 	}
5265 
5266 	hci_dev_unlock(hdev);
5267 }
5268 
5269 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5270 				    uint8_t *mgmt_status)
5271 {
5272 	switch (type) {
5273 	case DISCOV_TYPE_LE:
5274 		*mgmt_status = mgmt_le_support(hdev);
5275 		if (*mgmt_status)
5276 			return false;
5277 		break;
5278 	case DISCOV_TYPE_INTERLEAVED:
5279 		*mgmt_status = mgmt_le_support(hdev);
5280 		if (*mgmt_status)
5281 			return false;
5282 		fallthrough;
5283 	case DISCOV_TYPE_BREDR:
5284 		*mgmt_status = mgmt_bredr_support(hdev);
5285 		if (*mgmt_status)
5286 			return false;
5287 		break;
5288 	default:
5289 		*mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5290 		return false;
5291 	}
5292 
5293 	return true;
5294 }
5295 
5296 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5297 {
5298 	struct mgmt_pending_cmd *cmd = data;
5299 
5300 	if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5301 	    cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5302 	    cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5303 		return;
5304 
5305 	bt_dev_dbg(hdev, "err %d", err);
5306 
5307 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5308 			  cmd->param, 1);
5309 	mgmt_pending_remove(cmd);
5310 
5311 	hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5312 				DISCOVERY_FINDING);
5313 }
5314 
5315 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5316 {
5317 	return hci_start_discovery_sync(hdev);
5318 }
5319 
5320 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5321 				    u16 op, void *data, u16 len)
5322 {
5323 	struct mgmt_cp_start_discovery *cp = data;
5324 	struct mgmt_pending_cmd *cmd;
5325 	u8 status;
5326 	int err;
5327 
5328 	bt_dev_dbg(hdev, "sock %p", sk);
5329 
5330 	hci_dev_lock(hdev);
5331 
5332 	if (!hdev_is_powered(hdev)) {
5333 		err = mgmt_cmd_complete(sk, hdev->id, op,
5334 					MGMT_STATUS_NOT_POWERED,
5335 					&cp->type, sizeof(cp->type));
5336 		goto failed;
5337 	}
5338 
5339 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
5340 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5341 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5342 					&cp->type, sizeof(cp->type));
5343 		goto failed;
5344 	}
5345 
5346 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5347 		err = mgmt_cmd_complete(sk, hdev->id, op, status,
5348 					&cp->type, sizeof(cp->type));
5349 		goto failed;
5350 	}
5351 
5352 	/* Can't start discovery when it is paused */
5353 	if (hdev->discovery_paused) {
5354 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5355 					&cp->type, sizeof(cp->type));
5356 		goto failed;
5357 	}
5358 
5359 	/* Clear the discovery filter first to free any previously
5360 	 * allocated memory for the UUID list.
5361 	 */
5362 	hci_discovery_filter_clear(hdev);
5363 
5364 	hdev->discovery.type = cp->type;
5365 	hdev->discovery.report_invalid_rssi = false;
5366 	if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5367 		hdev->discovery.limited = true;
5368 	else
5369 		hdev->discovery.limited = false;
5370 
5371 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
5372 	if (!cmd) {
5373 		err = -ENOMEM;
5374 		goto failed;
5375 	}
5376 
5377 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5378 				 start_discovery_complete);
5379 	if (err < 0) {
5380 		mgmt_pending_remove(cmd);
5381 		goto failed;
5382 	}
5383 
5384 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5385 
5386 failed:
5387 	hci_dev_unlock(hdev);
5388 	return err;
5389 }
5390 
5391 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5392 			   void *data, u16 len)
5393 {
5394 	return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5395 					data, len);
5396 }
5397 
5398 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5399 				   void *data, u16 len)
5400 {
5401 	return start_discovery_internal(sk, hdev,
5402 					MGMT_OP_START_LIMITED_DISCOVERY,
5403 					data, len);
5404 }
5405 
5406 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5407 				   void *data, u16 len)
5408 {
5409 	struct mgmt_cp_start_service_discovery *cp = data;
5410 	struct mgmt_pending_cmd *cmd;
5411 	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5412 	u16 uuid_count, expected_len;
5413 	u8 status;
5414 	int err;
5415 
5416 	bt_dev_dbg(hdev, "sock %p", sk);
5417 
5418 	hci_dev_lock(hdev);
5419 
5420 	if (!hdev_is_powered(hdev)) {
5421 		err = mgmt_cmd_complete(sk, hdev->id,
5422 					MGMT_OP_START_SERVICE_DISCOVERY,
5423 					MGMT_STATUS_NOT_POWERED,
5424 					&cp->type, sizeof(cp->type));
5425 		goto failed;
5426 	}
5427 
5428 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
5429 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5430 		err = mgmt_cmd_complete(sk, hdev->id,
5431 					MGMT_OP_START_SERVICE_DISCOVERY,
5432 					MGMT_STATUS_BUSY, &cp->type,
5433 					sizeof(cp->type));
5434 		goto failed;
5435 	}
5436 
5437 	if (hdev->discovery_paused) {
5438 		err = mgmt_cmd_complete(sk, hdev->id,
5439 					MGMT_OP_START_SERVICE_DISCOVERY,
5440 					MGMT_STATUS_BUSY, &cp->type,
5441 					sizeof(cp->type));
5442 		goto failed;
5443 	}
5444 
5445 	uuid_count = __le16_to_cpu(cp->uuid_count);
5446 	if (uuid_count > max_uuid_count) {
5447 		bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5448 			   uuid_count);
5449 		err = mgmt_cmd_complete(sk, hdev->id,
5450 					MGMT_OP_START_SERVICE_DISCOVERY,
5451 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
5452 					sizeof(cp->type));
5453 		goto failed;
5454 	}
5455 
5456 	expected_len = sizeof(*cp) + uuid_count * 16;
5457 	if (expected_len != len) {
5458 		bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5459 			   expected_len, len);
5460 		err = mgmt_cmd_complete(sk, hdev->id,
5461 					MGMT_OP_START_SERVICE_DISCOVERY,
5462 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
5463 					sizeof(cp->type));
5464 		goto failed;
5465 	}
5466 
5467 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5468 		err = mgmt_cmd_complete(sk, hdev->id,
5469 					MGMT_OP_START_SERVICE_DISCOVERY,
5470 					status, &cp->type, sizeof(cp->type));
5471 		goto failed;
5472 	}
5473 
5474 	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
5475 			       hdev, data, len);
5476 	if (!cmd) {
5477 		err = -ENOMEM;
5478 		goto failed;
5479 	}
5480 
5481 	/* Clear the discovery filter first to free any previously
5482 	 * allocated memory for the UUID list.
5483 	 */
5484 	hci_discovery_filter_clear(hdev);
5485 
5486 	hdev->discovery.result_filtering = true;
5487 	hdev->discovery.type = cp->type;
5488 	hdev->discovery.rssi = cp->rssi;
5489 	hdev->discovery.uuid_count = uuid_count;
5490 
5491 	if (uuid_count > 0) {
5492 		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
5493 						GFP_KERNEL);
5494 		if (!hdev->discovery.uuids) {
5495 			err = mgmt_cmd_complete(sk, hdev->id,
5496 						MGMT_OP_START_SERVICE_DISCOVERY,
5497 						MGMT_STATUS_FAILED,
5498 						&cp->type, sizeof(cp->type));
5499 			mgmt_pending_remove(cmd);
5500 			goto failed;
5501 		}
5502 	}
5503 
5504 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5505 				 start_discovery_complete);
5506 	if (err < 0) {
5507 		mgmt_pending_remove(cmd);
5508 		goto failed;
5509 	}
5510 
5511 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5512 
5513 failed:
5514 	hci_dev_unlock(hdev);
5515 	return err;
5516 }
5517 
5518 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
5519 {
5520 	struct mgmt_pending_cmd *cmd;
5521 
5522 	bt_dev_dbg(hdev, "status %u", status);
5523 
5524 	hci_dev_lock(hdev);
5525 
5526 	cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5527 	if (cmd) {
5528 		cmd->cmd_complete(cmd, mgmt_status(status));
5529 		mgmt_pending_remove(cmd);
5530 	}
5531 
5532 	hci_dev_unlock(hdev);
5533 }
5534 
5535 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
5536 {
5537 	struct mgmt_pending_cmd *cmd = data;
5538 
5539 	if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
5540 		return;
5541 
5542 	bt_dev_dbg(hdev, "err %d", err);
5543 
5544 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5545 			  cmd->param, 1);
5546 	mgmt_pending_remove(cmd);
5547 
5548 	if (!err)
5549 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
5550 }
5551 
5552 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
5553 {
5554 	return hci_stop_discovery_sync(hdev);
5555 }
5556 
5557 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
5558 			  u16 len)
5559 {
5560 	struct mgmt_cp_stop_discovery *mgmt_cp = data;
5561 	struct mgmt_pending_cmd *cmd;
5562 	int err;
5563 
5564 	bt_dev_dbg(hdev, "sock %p", sk);
5565 
5566 	hci_dev_lock(hdev);
5567 
5568 	if (!hci_discovery_active(hdev)) {
5569 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5570 					MGMT_STATUS_REJECTED, &mgmt_cp->type,
5571 					sizeof(mgmt_cp->type));
5572 		goto unlock;
5573 	}
5574 
5575 	if (hdev->discovery.type != mgmt_cp->type) {
5576 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5577 					MGMT_STATUS_INVALID_PARAMS,
5578 					&mgmt_cp->type, sizeof(mgmt_cp->type));
5579 		goto unlock;
5580 	}
5581 
5582 	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
5583 	if (!cmd) {
5584 		err = -ENOMEM;
5585 		goto unlock;
5586 	}
5587 
5588 	err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
5589 				 stop_discovery_complete);
5590 	if (err < 0) {
5591 		mgmt_pending_remove(cmd);
5592 		goto unlock;
5593 	}
5594 
5595 	hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
5596 
5597 unlock:
5598 	hci_dev_unlock(hdev);
5599 	return err;
5600 }
5601 
5602 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
5603 			u16 len)
5604 {
5605 	struct mgmt_cp_confirm_name *cp = data;
5606 	struct inquiry_entry *e;
5607 	int err;
5608 
5609 	bt_dev_dbg(hdev, "sock %p", sk);
5610 
5611 	hci_dev_lock(hdev);
5612 
5613 	if (!hci_discovery_active(hdev)) {
5614 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5615 					MGMT_STATUS_FAILED, &cp->addr,
5616 					sizeof(cp->addr));
5617 		goto failed;
5618 	}
5619 
5620 	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
5621 	if (!e) {
5622 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5623 					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
5624 					sizeof(cp->addr));
5625 		goto failed;
5626 	}
5627 
5628 	if (cp->name_known) {
5629 		e->name_state = NAME_KNOWN;
5630 		list_del(&e->list);
5631 	} else {
5632 		e->name_state = NAME_NEEDED;
5633 		hci_inquiry_cache_update_resolve(hdev, e);
5634 	}
5635 
5636 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
5637 				&cp->addr, sizeof(cp->addr));
5638 
5639 failed:
5640 	hci_dev_unlock(hdev);
5641 	return err;
5642 }
5643 
5644 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
5645 			u16 len)
5646 {
5647 	struct mgmt_cp_block_device *cp = data;
5648 	u8 status;
5649 	int err;
5650 
5651 	bt_dev_dbg(hdev, "sock %p", sk);
5652 
5653 	if (!bdaddr_type_is_valid(cp->addr.type))
5654 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
5655 					 MGMT_STATUS_INVALID_PARAMS,
5656 					 &cp->addr, sizeof(cp->addr));
5657 
5658 	hci_dev_lock(hdev);
5659 
5660 	err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
5661 				  cp->addr.type);
5662 	if (err < 0) {
5663 		status = MGMT_STATUS_FAILED;
5664 		goto done;
5665 	}
5666 
5667 	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5668 		   sk);
5669 	status = MGMT_STATUS_SUCCESS;
5670 
5671 done:
5672 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
5673 				&cp->addr, sizeof(cp->addr));
5674 
5675 	hci_dev_unlock(hdev);
5676 
5677 	return err;
5678 }
5679 
5680 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
5681 			  u16 len)
5682 {
5683 	struct mgmt_cp_unblock_device *cp = data;
5684 	u8 status;
5685 	int err;
5686 
5687 	bt_dev_dbg(hdev, "sock %p", sk);
5688 
5689 	if (!bdaddr_type_is_valid(cp->addr.type))
5690 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
5691 					 MGMT_STATUS_INVALID_PARAMS,
5692 					 &cp->addr, sizeof(cp->addr));
5693 
5694 	hci_dev_lock(hdev);
5695 
5696 	err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
5697 				  cp->addr.type);
5698 	if (err < 0) {
5699 		status = MGMT_STATUS_INVALID_PARAMS;
5700 		goto done;
5701 	}
5702 
5703 	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5704 		   sk);
5705 	status = MGMT_STATUS_SUCCESS;
5706 
5707 done:
5708 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
5709 				&cp->addr, sizeof(cp->addr));
5710 
5711 	hci_dev_unlock(hdev);
5712 
5713 	return err;
5714 }
5715 
5716 static int set_device_id_sync(struct hci_dev *hdev, void *data)
5717 {
5718 	return hci_update_eir_sync(hdev);
5719 }
5720 
5721 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
5722 			 u16 len)
5723 {
5724 	struct mgmt_cp_set_device_id *cp = data;
5725 	int err;
5726 	__u16 source;
5727 
5728 	bt_dev_dbg(hdev, "sock %p", sk);
5729 
5730 	source = __le16_to_cpu(cp->source);
5731 
5732 	if (source > 0x0002)
5733 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
5734 				       MGMT_STATUS_INVALID_PARAMS);
5735 
5736 	hci_dev_lock(hdev);
5737 
5738 	hdev->devid_source = source;
5739 	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
5740 	hdev->devid_product = __le16_to_cpu(cp->product);
5741 	hdev->devid_version = __le16_to_cpu(cp->version);
5742 
5743 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
5744 				NULL, 0);
5745 
5746 	hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
5747 
5748 	hci_dev_unlock(hdev);
5749 
5750 	return err;
5751 }
5752 
5753 static void enable_advertising_instance(struct hci_dev *hdev, int err)
5754 {
5755 	if (err)
5756 		bt_dev_err(hdev, "failed to re-configure advertising %d", err);
5757 	else
5758 		bt_dev_dbg(hdev, "status %d", err);
5759 }
5760 
5761 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
5762 {
5763 	struct cmd_lookup match = { NULL, hdev };
5764 	u8 instance;
5765 	struct adv_info *adv_instance;
5766 	u8 status = mgmt_status(err);
5767 
5768 	if (status) {
5769 		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
5770 				     cmd_status_rsp, &status);
5771 		return;
5772 	}
5773 
5774 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
5775 		hci_dev_set_flag(hdev, HCI_ADVERTISING);
5776 	else
5777 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
5778 
5779 	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
5780 			     &match);
5781 
5782 	new_settings(hdev, match.sk);
5783 
5784 	if (match.sk)
5785 		sock_put(match.sk);
5786 
5787 	/* If "Set Advertising" was just disabled and instance advertising was
5788 	 * set up earlier, then re-enable multi-instance advertising.
5789 	 */
5790 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5791 	    list_empty(&hdev->adv_instances))
5792 		return;
5793 
5794 	instance = hdev->cur_adv_instance;
5795 	if (!instance) {
5796 		adv_instance = list_first_entry_or_null(&hdev->adv_instances,
5797 							struct adv_info, list);
5798 		if (!adv_instance)
5799 			return;
5800 
5801 		instance = adv_instance->instance;
5802 	}
5803 
5804 	err = hci_schedule_adv_instance_sync(hdev, instance, true);
5805 
5806 	enable_advertising_instance(hdev, err);
5807 }
5808 
5809 static int set_adv_sync(struct hci_dev *hdev, void *data)
5810 {
5811 	struct mgmt_pending_cmd *cmd = data;
5812 	struct mgmt_mode *cp = cmd->param;
5813 	u8 val = !!cp->val;
5814 
5815 	if (cp->val == 0x02)
5816 		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5817 	else
5818 		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5819 
5820 	cancel_adv_timeout(hdev);
5821 
5822 	if (val) {
5823 		/* Switch to instance "0" for the Set Advertising setting.
5824 		 * We cannot use update_[adv|scan_rsp]_data() here as the
5825 		 * HCI_ADVERTISING flag is not yet set.
5826 		 */
5827 		hdev->cur_adv_instance = 0x00;
5828 
5829 		if (ext_adv_capable(hdev)) {
5830 			hci_start_ext_adv_sync(hdev, 0x00);
5831 		} else {
5832 			hci_update_adv_data_sync(hdev, 0x00);
5833 			hci_update_scan_rsp_data_sync(hdev, 0x00);
5834 			hci_enable_advertising_sync(hdev);
5835 		}
5836 	} else {
5837 		hci_disable_advertising_sync(hdev);
5838 	}
5839 
5840 	return 0;
5841 }
5842 
5843 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
5844 			   u16 len)
5845 {
5846 	struct mgmt_mode *cp = data;
5847 	struct mgmt_pending_cmd *cmd;
5848 	u8 val, status;
5849 	int err;
5850 
5851 	bt_dev_dbg(hdev, "sock %p", sk);
5852 
5853 	status = mgmt_le_support(hdev);
5854 	if (status)
5855 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5856 				       status);
5857 
5858 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5859 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5860 				       MGMT_STATUS_INVALID_PARAMS);
5861 
5862 	if (hdev->advertising_paused)
5863 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5864 				       MGMT_STATUS_BUSY);
5865 
5866 	hci_dev_lock(hdev);
5867 
5868 	val = !!cp->val;
5869 
5870 	/* The following conditions are ones which mean that we should
5871 	 * not do any HCI communication but directly send a mgmt
5872 	 * response to user space (after toggling the flag if
5873 	 * necessary).
5874 	 */
5875 	if (!hdev_is_powered(hdev) ||
5876 	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5877 	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
5878 	    hci_conn_num(hdev, LE_LINK) > 0 ||
5879 	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5880 	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
5881 		bool changed;
5882 
5883 		if (cp->val) {
5884 			hdev->cur_adv_instance = 0x00;
5885 			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
5886 			if (cp->val == 0x02)
5887 				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5888 			else
5889 				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5890 		} else {
5891 			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
5892 			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5893 		}
5894 
5895 		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
5896 		if (err < 0)
5897 			goto unlock;
5898 
5899 		if (changed)
5900 			err = new_settings(hdev, sk);
5901 
5902 		goto unlock;
5903 	}
5904 
5905 	if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
5906 	    pending_find(MGMT_OP_SET_LE, hdev)) {
5907 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5908 				      MGMT_STATUS_BUSY);
5909 		goto unlock;
5910 	}
5911 
5912 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
5913 	if (!cmd)
5914 		err = -ENOMEM;
5915 	else
5916 		err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
5917 					 set_advertising_complete);
5918 
5919 	if (err < 0 && cmd)
5920 		mgmt_pending_remove(cmd);
5921 
5922 unlock:
5923 	hci_dev_unlock(hdev);
5924 	return err;
5925 }
5926 
5927 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
5928 			      void *data, u16 len)
5929 {
5930 	struct mgmt_cp_set_static_address *cp = data;
5931 	int err;
5932 
5933 	bt_dev_dbg(hdev, "sock %p", sk);
5934 
5935 	if (!lmp_le_capable(hdev))
5936 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5937 				       MGMT_STATUS_NOT_SUPPORTED);
5938 
5939 	if (hdev_is_powered(hdev))
5940 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5941 				       MGMT_STATUS_REJECTED);
5942 
5943 	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
5944 		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
5945 			return mgmt_cmd_status(sk, hdev->id,
5946 					       MGMT_OP_SET_STATIC_ADDRESS,
5947 					       MGMT_STATUS_INVALID_PARAMS);
5948 
5949 		/* Two most significant bits shall be set */
5950 		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
5951 			return mgmt_cmd_status(sk, hdev->id,
5952 					       MGMT_OP_SET_STATIC_ADDRESS,
5953 					       MGMT_STATUS_INVALID_PARAMS);
5954 	}
5955 
5956 	hci_dev_lock(hdev);
5957 
5958 	bacpy(&hdev->static_addr, &cp->bdaddr);
5959 
5960 	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
5961 	if (err < 0)
5962 		goto unlock;
5963 
5964 	err = new_settings(hdev, sk);
5965 
5966 unlock:
5967 	hci_dev_unlock(hdev);
5968 	return err;
5969 }
5970 
5971 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
5972 			   void *data, u16 len)
5973 {
5974 	struct mgmt_cp_set_scan_params *cp = data;
5975 	__u16 interval, window;
5976 	int err;
5977 
5978 	bt_dev_dbg(hdev, "sock %p", sk);
5979 
5980 	if (!lmp_le_capable(hdev))
5981 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5982 				       MGMT_STATUS_NOT_SUPPORTED);
5983 
5984 	interval = __le16_to_cpu(cp->interval);
5985 
5986 	if (interval < 0x0004 || interval > 0x4000)
5987 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5988 				       MGMT_STATUS_INVALID_PARAMS);
5989 
5990 	window = __le16_to_cpu(cp->window);
5991 
5992 	if (window < 0x0004 || window > 0x4000)
5993 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5994 				       MGMT_STATUS_INVALID_PARAMS);
5995 
5996 	if (window > interval)
5997 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5998 				       MGMT_STATUS_INVALID_PARAMS);
5999 
6000 	hci_dev_lock(hdev);
6001 
6002 	hdev->le_scan_interval = interval;
6003 	hdev->le_scan_window = window;
6004 
6005 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6006 				NULL, 0);
6007 
6008 	/* If background scan is running, restart it so new parameters are
6009 	 * loaded.
6010 	 */
6011 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6012 	    hdev->discovery.state == DISCOVERY_STOPPED)
6013 		hci_update_passive_scan(hdev);
6014 
6015 	hci_dev_unlock(hdev);
6016 
6017 	return err;
6018 }
6019 
6020 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6021 {
6022 	struct mgmt_pending_cmd *cmd = data;
6023 
6024 	bt_dev_dbg(hdev, "err %d", err);
6025 
6026 	if (err) {
6027 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6028 				mgmt_status(err));
6029 	} else {
6030 		struct mgmt_mode *cp = cmd->param;
6031 
6032 		if (cp->val)
6033 			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6034 		else
6035 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6036 
6037 		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6038 		new_settings(hdev, cmd->sk);
6039 	}
6040 
6041 	mgmt_pending_free(cmd);
6042 }
6043 
6044 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6045 {
6046 	struct mgmt_pending_cmd *cmd = data;
6047 	struct mgmt_mode *cp = cmd->param;
6048 
6049 	return hci_write_fast_connectable_sync(hdev, cp->val);
6050 }
6051 
6052 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6053 				void *data, u16 len)
6054 {
6055 	struct mgmt_mode *cp = data;
6056 	struct mgmt_pending_cmd *cmd;
6057 	int err;
6058 
6059 	bt_dev_dbg(hdev, "sock %p", sk);
6060 
6061 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6062 	    hdev->hci_ver < BLUETOOTH_VER_1_2)
6063 		return mgmt_cmd_status(sk, hdev->id,
6064 				       MGMT_OP_SET_FAST_CONNECTABLE,
6065 				       MGMT_STATUS_NOT_SUPPORTED);
6066 
6067 	if (cp->val != 0x00 && cp->val != 0x01)
6068 		return mgmt_cmd_status(sk, hdev->id,
6069 				       MGMT_OP_SET_FAST_CONNECTABLE,
6070 				       MGMT_STATUS_INVALID_PARAMS);
6071 
6072 	hci_dev_lock(hdev);
6073 
6074 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6075 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6076 		goto unlock;
6077 	}
6078 
6079 	if (!hdev_is_powered(hdev)) {
6080 		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6081 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6082 		new_settings(hdev, sk);
6083 		goto unlock;
6084 	}
6085 
6086 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6087 			       len);
6088 	if (!cmd)
6089 		err = -ENOMEM;
6090 	else
6091 		err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6092 					 fast_connectable_complete);
6093 
6094 	if (err < 0) {
6095 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6096 				MGMT_STATUS_FAILED);
6097 
6098 		if (cmd)
6099 			mgmt_pending_free(cmd);
6100 	}
6101 
6102 unlock:
6103 	hci_dev_unlock(hdev);
6104 
6105 	return err;
6106 }
6107 
6108 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6109 {
6110 	struct mgmt_pending_cmd *cmd = data;
6111 
6112 	bt_dev_dbg(hdev, "err %d", err);
6113 
6114 	if (err) {
6115 		u8 mgmt_err = mgmt_status(err);
6116 
6117 		/* We need to restore the flag if related HCI commands
6118 		 * failed.
6119 		 */
6120 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6121 
6122 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6123 	} else {
6124 		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6125 		new_settings(hdev, cmd->sk);
6126 	}
6127 
6128 	mgmt_pending_free(cmd);
6129 }
6130 
6131 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6132 {
6133 	int status;
6134 
6135 	status = hci_write_fast_connectable_sync(hdev, false);
6136 
6137 	if (!status)
6138 		status = hci_update_scan_sync(hdev);
6139 
6140 	/* Since only the advertising data flags will change, there
6141 	 * is no need to update the scan response data.
6142 	 */
6143 	if (!status)
6144 		status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6145 
6146 	return status;
6147 }
6148 
6149 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6150 {
6151 	struct mgmt_mode *cp = data;
6152 	struct mgmt_pending_cmd *cmd;
6153 	int err;
6154 
6155 	bt_dev_dbg(hdev, "sock %p", sk);
6156 
6157 	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6158 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6159 				       MGMT_STATUS_NOT_SUPPORTED);
6160 
6161 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6162 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6163 				       MGMT_STATUS_REJECTED);
6164 
6165 	if (cp->val != 0x00 && cp->val != 0x01)
6166 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6167 				       MGMT_STATUS_INVALID_PARAMS);
6168 
6169 	hci_dev_lock(hdev);
6170 
6171 	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6172 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6173 		goto unlock;
6174 	}
6175 
6176 	if (!hdev_is_powered(hdev)) {
6177 		if (!cp->val) {
6178 			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6179 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6180 			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6181 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6182 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
6183 		}
6184 
6185 		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6186 
6187 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6188 		if (err < 0)
6189 			goto unlock;
6190 
6191 		err = new_settings(hdev, sk);
6192 		goto unlock;
6193 	}
6194 
6195 	/* Reject disabling when powered on */
6196 	if (!cp->val) {
6197 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6198 				      MGMT_STATUS_REJECTED);
6199 		goto unlock;
6200 	} else {
6201 		/* When configuring a dual-mode controller to operate
6202 		 * with LE only and using a static address, then switching
6203 		 * BR/EDR back on is not allowed.
6204 		 *
6205 		 * Dual-mode controllers shall operate with the public
6206 		 * address as its identity address for BR/EDR and LE. So
6207 		 * reject the attempt to create an invalid configuration.
6208 		 *
6209 		 * The same restrictions applies when secure connections
6210 		 * has been enabled. For BR/EDR this is a controller feature
6211 		 * while for LE it is a host stack feature. This means that
6212 		 * switching BR/EDR back on when secure connections has been
6213 		 * enabled is not a supported transaction.
6214 		 */
6215 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6216 		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6217 		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6218 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6219 					      MGMT_STATUS_REJECTED);
6220 			goto unlock;
6221 		}
6222 	}
6223 
6224 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6225 	if (!cmd)
6226 		err = -ENOMEM;
6227 	else
6228 		err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6229 					 set_bredr_complete);
6230 
6231 	if (err < 0) {
6232 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6233 				MGMT_STATUS_FAILED);
6234 		if (cmd)
6235 			mgmt_pending_free(cmd);
6236 
6237 		goto unlock;
6238 	}
6239 
6240 	/* We need to flip the bit already here so that
6241 	 * hci_req_update_adv_data generates the correct flags.
6242 	 */
6243 	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6244 
6245 unlock:
6246 	hci_dev_unlock(hdev);
6247 	return err;
6248 }
6249 
6250 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6251 {
6252 	struct mgmt_pending_cmd *cmd = data;
6253 	struct mgmt_mode *cp;
6254 
6255 	bt_dev_dbg(hdev, "err %d", err);
6256 
6257 	if (err) {
6258 		u8 mgmt_err = mgmt_status(err);
6259 
6260 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6261 		goto done;
6262 	}
6263 
6264 	cp = cmd->param;
6265 
6266 	switch (cp->val) {
6267 	case 0x00:
6268 		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6269 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6270 		break;
6271 	case 0x01:
6272 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6273 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6274 		break;
6275 	case 0x02:
6276 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6277 		hci_dev_set_flag(hdev, HCI_SC_ONLY);
6278 		break;
6279 	}
6280 
6281 	send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6282 	new_settings(hdev, cmd->sk);
6283 
6284 done:
6285 	mgmt_pending_free(cmd);
6286 }
6287 
6288 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6289 {
6290 	struct mgmt_pending_cmd *cmd = data;
6291 	struct mgmt_mode *cp = cmd->param;
6292 	u8 val = !!cp->val;
6293 
6294 	/* Force write of val */
6295 	hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6296 
6297 	return hci_write_sc_support_sync(hdev, val);
6298 }
6299 
6300 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6301 			   void *data, u16 len)
6302 {
6303 	struct mgmt_mode *cp = data;
6304 	struct mgmt_pending_cmd *cmd;
6305 	u8 val;
6306 	int err;
6307 
6308 	bt_dev_dbg(hdev, "sock %p", sk);
6309 
6310 	if (!lmp_sc_capable(hdev) &&
6311 	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6312 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6313 				       MGMT_STATUS_NOT_SUPPORTED);
6314 
6315 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6316 	    lmp_sc_capable(hdev) &&
6317 	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6318 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6319 				       MGMT_STATUS_REJECTED);
6320 
6321 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6322 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6323 				       MGMT_STATUS_INVALID_PARAMS);
6324 
6325 	hci_dev_lock(hdev);
6326 
6327 	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6328 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6329 		bool changed;
6330 
6331 		if (cp->val) {
6332 			changed = !hci_dev_test_and_set_flag(hdev,
6333 							     HCI_SC_ENABLED);
6334 			if (cp->val == 0x02)
6335 				hci_dev_set_flag(hdev, HCI_SC_ONLY);
6336 			else
6337 				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6338 		} else {
6339 			changed = hci_dev_test_and_clear_flag(hdev,
6340 							      HCI_SC_ENABLED);
6341 			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6342 		}
6343 
6344 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6345 		if (err < 0)
6346 			goto failed;
6347 
6348 		if (changed)
6349 			err = new_settings(hdev, sk);
6350 
6351 		goto failed;
6352 	}
6353 
6354 	val = !!cp->val;
6355 
6356 	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6357 	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6358 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6359 		goto failed;
6360 	}
6361 
6362 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6363 	if (!cmd)
6364 		err = -ENOMEM;
6365 	else
6366 		err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6367 					 set_secure_conn_complete);
6368 
6369 	if (err < 0) {
6370 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6371 				MGMT_STATUS_FAILED);
6372 		if (cmd)
6373 			mgmt_pending_free(cmd);
6374 	}
6375 
6376 failed:
6377 	hci_dev_unlock(hdev);
6378 	return err;
6379 }
6380 
6381 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6382 			  void *data, u16 len)
6383 {
6384 	struct mgmt_mode *cp = data;
6385 	bool changed, use_changed;
6386 	int err;
6387 
6388 	bt_dev_dbg(hdev, "sock %p", sk);
6389 
6390 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6391 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6392 				       MGMT_STATUS_INVALID_PARAMS);
6393 
6394 	hci_dev_lock(hdev);
6395 
6396 	if (cp->val)
6397 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6398 	else
6399 		changed = hci_dev_test_and_clear_flag(hdev,
6400 						      HCI_KEEP_DEBUG_KEYS);
6401 
6402 	if (cp->val == 0x02)
6403 		use_changed = !hci_dev_test_and_set_flag(hdev,
6404 							 HCI_USE_DEBUG_KEYS);
6405 	else
6406 		use_changed = hci_dev_test_and_clear_flag(hdev,
6407 							  HCI_USE_DEBUG_KEYS);
6408 
6409 	if (hdev_is_powered(hdev) && use_changed &&
6410 	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6411 		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6412 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6413 			     sizeof(mode), &mode);
6414 	}
6415 
6416 	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6417 	if (err < 0)
6418 		goto unlock;
6419 
6420 	if (changed)
6421 		err = new_settings(hdev, sk);
6422 
6423 unlock:
6424 	hci_dev_unlock(hdev);
6425 	return err;
6426 }
6427 
6428 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6429 		       u16 len)
6430 {
6431 	struct mgmt_cp_set_privacy *cp = cp_data;
6432 	bool changed;
6433 	int err;
6434 
6435 	bt_dev_dbg(hdev, "sock %p", sk);
6436 
6437 	if (!lmp_le_capable(hdev))
6438 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6439 				       MGMT_STATUS_NOT_SUPPORTED);
6440 
6441 	if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6442 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6443 				       MGMT_STATUS_INVALID_PARAMS);
6444 
6445 	if (hdev_is_powered(hdev))
6446 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6447 				       MGMT_STATUS_REJECTED);
6448 
6449 	hci_dev_lock(hdev);
6450 
6451 	/* If user space supports this command it is also expected to
6452 	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6453 	 */
6454 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6455 
6456 	if (cp->privacy) {
6457 		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6458 		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6459 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6460 		hci_adv_instances_set_rpa_expired(hdev, true);
6461 		if (cp->privacy == 0x02)
6462 			hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6463 		else
6464 			hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6465 	} else {
6466 		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6467 		memset(hdev->irk, 0, sizeof(hdev->irk));
6468 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6469 		hci_adv_instances_set_rpa_expired(hdev, false);
6470 		hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6471 	}
6472 
6473 	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
6474 	if (err < 0)
6475 		goto unlock;
6476 
6477 	if (changed)
6478 		err = new_settings(hdev, sk);
6479 
6480 unlock:
6481 	hci_dev_unlock(hdev);
6482 	return err;
6483 }
6484 
6485 static bool irk_is_valid(struct mgmt_irk_info *irk)
6486 {
6487 	switch (irk->addr.type) {
6488 	case BDADDR_LE_PUBLIC:
6489 		return true;
6490 
6491 	case BDADDR_LE_RANDOM:
6492 		/* Two most significant bits shall be set */
6493 		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6494 			return false;
6495 		return true;
6496 	}
6497 
6498 	return false;
6499 }
6500 
6501 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6502 		     u16 len)
6503 {
6504 	struct mgmt_cp_load_irks *cp = cp_data;
6505 	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
6506 				   sizeof(struct mgmt_irk_info));
6507 	u16 irk_count, expected_len;
6508 	int i, err;
6509 
6510 	bt_dev_dbg(hdev, "sock %p", sk);
6511 
6512 	if (!lmp_le_capable(hdev))
6513 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6514 				       MGMT_STATUS_NOT_SUPPORTED);
6515 
6516 	irk_count = __le16_to_cpu(cp->irk_count);
6517 	if (irk_count > max_irk_count) {
6518 		bt_dev_err(hdev, "load_irks: too big irk_count value %u",
6519 			   irk_count);
6520 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6521 				       MGMT_STATUS_INVALID_PARAMS);
6522 	}
6523 
6524 	expected_len = struct_size(cp, irks, irk_count);
6525 	if (expected_len != len) {
6526 		bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
6527 			   expected_len, len);
6528 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6529 				       MGMT_STATUS_INVALID_PARAMS);
6530 	}
6531 
6532 	bt_dev_dbg(hdev, "irk_count %u", irk_count);
6533 
6534 	for (i = 0; i < irk_count; i++) {
6535 		struct mgmt_irk_info *key = &cp->irks[i];
6536 
6537 		if (!irk_is_valid(key))
6538 			return mgmt_cmd_status(sk, hdev->id,
6539 					       MGMT_OP_LOAD_IRKS,
6540 					       MGMT_STATUS_INVALID_PARAMS);
6541 	}
6542 
6543 	hci_dev_lock(hdev);
6544 
6545 	hci_smp_irks_clear(hdev);
6546 
6547 	for (i = 0; i < irk_count; i++) {
6548 		struct mgmt_irk_info *irk = &cp->irks[i];
6549 
6550 		if (hci_is_blocked_key(hdev,
6551 				       HCI_BLOCKED_KEY_TYPE_IRK,
6552 				       irk->val)) {
6553 			bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
6554 				    &irk->addr.bdaddr);
6555 			continue;
6556 		}
6557 
6558 		hci_add_irk(hdev, &irk->addr.bdaddr,
6559 			    le_addr_type(irk->addr.type), irk->val,
6560 			    BDADDR_ANY);
6561 	}
6562 
6563 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6564 
6565 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
6566 
6567 	hci_dev_unlock(hdev);
6568 
6569 	return err;
6570 }
6571 
6572 static bool ltk_is_valid(struct mgmt_ltk_info *key)
6573 {
6574 	if (key->initiator != 0x00 && key->initiator != 0x01)
6575 		return false;
6576 
6577 	switch (key->addr.type) {
6578 	case BDADDR_LE_PUBLIC:
6579 		return true;
6580 
6581 	case BDADDR_LE_RANDOM:
6582 		/* Two most significant bits shall be set */
6583 		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6584 			return false;
6585 		return true;
6586 	}
6587 
6588 	return false;
6589 }
6590 
6591 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
6592 			       void *cp_data, u16 len)
6593 {
6594 	struct mgmt_cp_load_long_term_keys *cp = cp_data;
6595 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
6596 				   sizeof(struct mgmt_ltk_info));
6597 	u16 key_count, expected_len;
6598 	int i, err;
6599 
6600 	bt_dev_dbg(hdev, "sock %p", sk);
6601 
6602 	if (!lmp_le_capable(hdev))
6603 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6604 				       MGMT_STATUS_NOT_SUPPORTED);
6605 
6606 	key_count = __le16_to_cpu(cp->key_count);
6607 	if (key_count > max_key_count) {
6608 		bt_dev_err(hdev, "load_ltks: too big key_count value %u",
6609 			   key_count);
6610 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6611 				       MGMT_STATUS_INVALID_PARAMS);
6612 	}
6613 
6614 	expected_len = struct_size(cp, keys, key_count);
6615 	if (expected_len != len) {
6616 		bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
6617 			   expected_len, len);
6618 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6619 				       MGMT_STATUS_INVALID_PARAMS);
6620 	}
6621 
6622 	bt_dev_dbg(hdev, "key_count %u", key_count);
6623 
6624 	for (i = 0; i < key_count; i++) {
6625 		struct mgmt_ltk_info *key = &cp->keys[i];
6626 
6627 		if (!ltk_is_valid(key))
6628 			return mgmt_cmd_status(sk, hdev->id,
6629 					       MGMT_OP_LOAD_LONG_TERM_KEYS,
6630 					       MGMT_STATUS_INVALID_PARAMS);
6631 	}
6632 
6633 	hci_dev_lock(hdev);
6634 
6635 	hci_smp_ltks_clear(hdev);
6636 
6637 	for (i = 0; i < key_count; i++) {
6638 		struct mgmt_ltk_info *key = &cp->keys[i];
6639 		u8 type, authenticated;
6640 
6641 		if (hci_is_blocked_key(hdev,
6642 				       HCI_BLOCKED_KEY_TYPE_LTK,
6643 				       key->val)) {
6644 			bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
6645 				    &key->addr.bdaddr);
6646 			continue;
6647 		}
6648 
6649 		switch (key->type) {
6650 		case MGMT_LTK_UNAUTHENTICATED:
6651 			authenticated = 0x00;
6652 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
6653 			break;
6654 		case MGMT_LTK_AUTHENTICATED:
6655 			authenticated = 0x01;
6656 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
6657 			break;
6658 		case MGMT_LTK_P256_UNAUTH:
6659 			authenticated = 0x00;
6660 			type = SMP_LTK_P256;
6661 			break;
6662 		case MGMT_LTK_P256_AUTH:
6663 			authenticated = 0x01;
6664 			type = SMP_LTK_P256;
6665 			break;
6666 		case MGMT_LTK_P256_DEBUG:
6667 			authenticated = 0x00;
6668 			type = SMP_LTK_P256_DEBUG;
6669 			fallthrough;
6670 		default:
6671 			continue;
6672 		}
6673 
6674 		hci_add_ltk(hdev, &key->addr.bdaddr,
6675 			    le_addr_type(key->addr.type), type, authenticated,
6676 			    key->val, key->enc_size, key->ediv, key->rand);
6677 	}
6678 
6679 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
6680 			   NULL, 0);
6681 
6682 	hci_dev_unlock(hdev);
6683 
6684 	return err;
6685 }
6686 
6687 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
6688 {
6689 	struct mgmt_pending_cmd *cmd = data;
6690 	struct hci_conn *conn = cmd->user_data;
6691 	struct mgmt_cp_get_conn_info *cp = cmd->param;
6692 	struct mgmt_rp_get_conn_info rp;
6693 	u8 status;
6694 
6695 	bt_dev_dbg(hdev, "err %d", err);
6696 
6697 	memcpy(&rp.addr, &cp->addr.bdaddr, sizeof(rp.addr));
6698 
6699 	status = mgmt_status(err);
6700 	if (status == MGMT_STATUS_SUCCESS) {
6701 		rp.rssi = conn->rssi;
6702 		rp.tx_power = conn->tx_power;
6703 		rp.max_tx_power = conn->max_tx_power;
6704 	} else {
6705 		rp.rssi = HCI_RSSI_INVALID;
6706 		rp.tx_power = HCI_TX_POWER_INVALID;
6707 		rp.max_tx_power = HCI_TX_POWER_INVALID;
6708 	}
6709 
6710 	mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
6711 			  &rp, sizeof(rp));
6712 
6713 	if (conn) {
6714 		hci_conn_drop(conn);
6715 		hci_conn_put(conn);
6716 	}
6717 
6718 	mgmt_pending_free(cmd);
6719 }
6720 
6721 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
6722 {
6723 	struct mgmt_pending_cmd *cmd = data;
6724 	struct mgmt_cp_get_conn_info *cp = cmd->param;
6725 	struct hci_conn *conn;
6726 	int err;
6727 	__le16   handle;
6728 
6729 	/* Make sure we are still connected */
6730 	if (cp->addr.type == BDADDR_BREDR)
6731 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6732 					       &cp->addr.bdaddr);
6733 	else
6734 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6735 
6736 	if (!conn || conn != cmd->user_data || conn->state != BT_CONNECTED) {
6737 		if (cmd->user_data) {
6738 			hci_conn_drop(cmd->user_data);
6739 			hci_conn_put(cmd->user_data);
6740 			cmd->user_data = NULL;
6741 		}
6742 		return MGMT_STATUS_NOT_CONNECTED;
6743 	}
6744 
6745 	handle = cpu_to_le16(conn->handle);
6746 
6747 	/* Refresh RSSI each time */
6748 	err = hci_read_rssi_sync(hdev, handle);
6749 
6750 	/* For LE links TX power does not change thus we don't need to
6751 	 * query for it once value is known.
6752 	 */
6753 	if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
6754 		     conn->tx_power == HCI_TX_POWER_INVALID))
6755 		err = hci_read_tx_power_sync(hdev, handle, 0x00);
6756 
6757 	/* Max TX power needs to be read only once per connection */
6758 	if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
6759 		err = hci_read_tx_power_sync(hdev, handle, 0x01);
6760 
6761 	return err;
6762 }
6763 
6764 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
6765 			 u16 len)
6766 {
6767 	struct mgmt_cp_get_conn_info *cp = data;
6768 	struct mgmt_rp_get_conn_info rp;
6769 	struct hci_conn *conn;
6770 	unsigned long conn_info_age;
6771 	int err = 0;
6772 
6773 	bt_dev_dbg(hdev, "sock %p", sk);
6774 
6775 	memset(&rp, 0, sizeof(rp));
6776 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6777 	rp.addr.type = cp->addr.type;
6778 
6779 	if (!bdaddr_type_is_valid(cp->addr.type))
6780 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6781 					 MGMT_STATUS_INVALID_PARAMS,
6782 					 &rp, sizeof(rp));
6783 
6784 	hci_dev_lock(hdev);
6785 
6786 	if (!hdev_is_powered(hdev)) {
6787 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6788 					MGMT_STATUS_NOT_POWERED, &rp,
6789 					sizeof(rp));
6790 		goto unlock;
6791 	}
6792 
6793 	if (cp->addr.type == BDADDR_BREDR)
6794 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6795 					       &cp->addr.bdaddr);
6796 	else
6797 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6798 
6799 	if (!conn || conn->state != BT_CONNECTED) {
6800 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6801 					MGMT_STATUS_NOT_CONNECTED, &rp,
6802 					sizeof(rp));
6803 		goto unlock;
6804 	}
6805 
6806 	/* To avoid client trying to guess when to poll again for information we
6807 	 * calculate conn info age as random value between min/max set in hdev.
6808 	 */
6809 	conn_info_age = hdev->conn_info_min_age +
6810 			prandom_u32_max(hdev->conn_info_max_age -
6811 					hdev->conn_info_min_age);
6812 
6813 	/* Query controller to refresh cached values if they are too old or were
6814 	 * never read.
6815 	 */
6816 	if (time_after(jiffies, conn->conn_info_timestamp +
6817 		       msecs_to_jiffies(conn_info_age)) ||
6818 	    !conn->conn_info_timestamp) {
6819 		struct mgmt_pending_cmd *cmd;
6820 
6821 		cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
6822 				       len);
6823 		if (!cmd)
6824 			err = -ENOMEM;
6825 		else
6826 			err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
6827 						 cmd, get_conn_info_complete);
6828 
6829 		if (err < 0) {
6830 			mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6831 					  MGMT_STATUS_FAILED, &rp, sizeof(rp));
6832 
6833 			if (cmd)
6834 				mgmt_pending_free(cmd);
6835 
6836 			goto unlock;
6837 		}
6838 
6839 		hci_conn_hold(conn);
6840 		cmd->user_data = hci_conn_get(conn);
6841 
6842 		conn->conn_info_timestamp = jiffies;
6843 	} else {
6844 		/* Cache is valid, just reply with values cached in hci_conn */
6845 		rp.rssi = conn->rssi;
6846 		rp.tx_power = conn->tx_power;
6847 		rp.max_tx_power = conn->max_tx_power;
6848 
6849 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6850 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6851 	}
6852 
6853 unlock:
6854 	hci_dev_unlock(hdev);
6855 	return err;
6856 }
6857 
6858 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
6859 {
6860 	struct mgmt_pending_cmd *cmd = data;
6861 	struct mgmt_cp_get_clock_info *cp = cmd->param;
6862 	struct mgmt_rp_get_clock_info rp;
6863 	struct hci_conn *conn = cmd->user_data;
6864 	u8 status = mgmt_status(err);
6865 
6866 	bt_dev_dbg(hdev, "err %d", err);
6867 
6868 	memset(&rp, 0, sizeof(rp));
6869 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6870 	rp.addr.type = cp->addr.type;
6871 
6872 	if (err)
6873 		goto complete;
6874 
6875 	rp.local_clock = cpu_to_le32(hdev->clock);
6876 
6877 	if (conn) {
6878 		rp.piconet_clock = cpu_to_le32(conn->clock);
6879 		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
6880 		hci_conn_drop(conn);
6881 		hci_conn_put(conn);
6882 	}
6883 
6884 complete:
6885 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
6886 			  sizeof(rp));
6887 
6888 	mgmt_pending_free(cmd);
6889 }
6890 
6891 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
6892 {
6893 	struct mgmt_pending_cmd *cmd = data;
6894 	struct mgmt_cp_get_clock_info *cp = cmd->param;
6895 	struct hci_cp_read_clock hci_cp;
6896 	struct hci_conn *conn = cmd->user_data;
6897 	int err;
6898 
6899 	memset(&hci_cp, 0, sizeof(hci_cp));
6900 	err = hci_read_clock_sync(hdev, &hci_cp);
6901 
6902 	if (conn) {
6903 		/* Make sure connection still exists */
6904 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6905 					       &cp->addr.bdaddr);
6906 
6907 		if (conn && conn == cmd->user_data &&
6908 		    conn->state == BT_CONNECTED) {
6909 			hci_cp.handle = cpu_to_le16(conn->handle);
6910 			hci_cp.which = 0x01; /* Piconet clock */
6911 			err = hci_read_clock_sync(hdev, &hci_cp);
6912 		} else if (cmd->user_data) {
6913 			hci_conn_drop(cmd->user_data);
6914 			hci_conn_put(cmd->user_data);
6915 			cmd->user_data = NULL;
6916 		}
6917 	}
6918 
6919 	return err;
6920 }
6921 
6922 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
6923 								u16 len)
6924 {
6925 	struct mgmt_cp_get_clock_info *cp = data;
6926 	struct mgmt_rp_get_clock_info rp;
6927 	struct mgmt_pending_cmd *cmd;
6928 	struct hci_conn *conn;
6929 	int err;
6930 
6931 	bt_dev_dbg(hdev, "sock %p", sk);
6932 
6933 	memset(&rp, 0, sizeof(rp));
6934 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6935 	rp.addr.type = cp->addr.type;
6936 
6937 	if (cp->addr.type != BDADDR_BREDR)
6938 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6939 					 MGMT_STATUS_INVALID_PARAMS,
6940 					 &rp, sizeof(rp));
6941 
6942 	hci_dev_lock(hdev);
6943 
6944 	if (!hdev_is_powered(hdev)) {
6945 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6946 					MGMT_STATUS_NOT_POWERED, &rp,
6947 					sizeof(rp));
6948 		goto unlock;
6949 	}
6950 
6951 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6952 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6953 					       &cp->addr.bdaddr);
6954 		if (!conn || conn->state != BT_CONNECTED) {
6955 			err = mgmt_cmd_complete(sk, hdev->id,
6956 						MGMT_OP_GET_CLOCK_INFO,
6957 						MGMT_STATUS_NOT_CONNECTED,
6958 						&rp, sizeof(rp));
6959 			goto unlock;
6960 		}
6961 	} else {
6962 		conn = NULL;
6963 	}
6964 
6965 	cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
6966 	if (!cmd)
6967 		err = -ENOMEM;
6968 	else
6969 		err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
6970 					 get_clock_info_complete);
6971 
6972 	if (err < 0) {
6973 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6974 					MGMT_STATUS_FAILED, &rp, sizeof(rp));
6975 
6976 		if (cmd)
6977 			mgmt_pending_free(cmd);
6978 
6979 	} else if (conn) {
6980 		hci_conn_hold(conn);
6981 		cmd->user_data = hci_conn_get(conn);
6982 	}
6983 
6984 
6985 unlock:
6986 	hci_dev_unlock(hdev);
6987 	return err;
6988 }
6989 
6990 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
6991 {
6992 	struct hci_conn *conn;
6993 
6994 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
6995 	if (!conn)
6996 		return false;
6997 
6998 	if (conn->dst_type != type)
6999 		return false;
7000 
7001 	if (conn->state != BT_CONNECTED)
7002 		return false;
7003 
7004 	return true;
7005 }
7006 
7007 /* This function requires the caller holds hdev->lock */
7008 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7009 			       u8 addr_type, u8 auto_connect)
7010 {
7011 	struct hci_conn_params *params;
7012 
7013 	params = hci_conn_params_add(hdev, addr, addr_type);
7014 	if (!params)
7015 		return -EIO;
7016 
7017 	if (params->auto_connect == auto_connect)
7018 		return 0;
7019 
7020 	list_del_init(&params->action);
7021 
7022 	switch (auto_connect) {
7023 	case HCI_AUTO_CONN_DISABLED:
7024 	case HCI_AUTO_CONN_LINK_LOSS:
7025 		/* If auto connect is being disabled when we're trying to
7026 		 * connect to device, keep connecting.
7027 		 */
7028 		if (params->explicit_connect)
7029 			list_add(&params->action, &hdev->pend_le_conns);
7030 		break;
7031 	case HCI_AUTO_CONN_REPORT:
7032 		if (params->explicit_connect)
7033 			list_add(&params->action, &hdev->pend_le_conns);
7034 		else
7035 			list_add(&params->action, &hdev->pend_le_reports);
7036 		break;
7037 	case HCI_AUTO_CONN_DIRECT:
7038 	case HCI_AUTO_CONN_ALWAYS:
7039 		if (!is_connected(hdev, addr, addr_type))
7040 			list_add(&params->action, &hdev->pend_le_conns);
7041 		break;
7042 	}
7043 
7044 	params->auto_connect = auto_connect;
7045 
7046 	bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7047 		   addr, addr_type, auto_connect);
7048 
7049 	return 0;
7050 }
7051 
7052 static void device_added(struct sock *sk, struct hci_dev *hdev,
7053 			 bdaddr_t *bdaddr, u8 type, u8 action)
7054 {
7055 	struct mgmt_ev_device_added ev;
7056 
7057 	bacpy(&ev.addr.bdaddr, bdaddr);
7058 	ev.addr.type = type;
7059 	ev.action = action;
7060 
7061 	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7062 }
7063 
7064 static int add_device_sync(struct hci_dev *hdev, void *data)
7065 {
7066 	return hci_update_passive_scan_sync(hdev);
7067 }
7068 
7069 static int add_device(struct sock *sk, struct hci_dev *hdev,
7070 		      void *data, u16 len)
7071 {
7072 	struct mgmt_cp_add_device *cp = data;
7073 	u8 auto_conn, addr_type;
7074 	struct hci_conn_params *params;
7075 	int err;
7076 	u32 current_flags = 0;
7077 	u32 supported_flags;
7078 
7079 	bt_dev_dbg(hdev, "sock %p", sk);
7080 
7081 	if (!bdaddr_type_is_valid(cp->addr.type) ||
7082 	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7083 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7084 					 MGMT_STATUS_INVALID_PARAMS,
7085 					 &cp->addr, sizeof(cp->addr));
7086 
7087 	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7088 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7089 					 MGMT_STATUS_INVALID_PARAMS,
7090 					 &cp->addr, sizeof(cp->addr));
7091 
7092 	hci_dev_lock(hdev);
7093 
7094 	if (cp->addr.type == BDADDR_BREDR) {
7095 		/* Only incoming connections action is supported for now */
7096 		if (cp->action != 0x01) {
7097 			err = mgmt_cmd_complete(sk, hdev->id,
7098 						MGMT_OP_ADD_DEVICE,
7099 						MGMT_STATUS_INVALID_PARAMS,
7100 						&cp->addr, sizeof(cp->addr));
7101 			goto unlock;
7102 		}
7103 
7104 		err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7105 						     &cp->addr.bdaddr,
7106 						     cp->addr.type, 0);
7107 		if (err)
7108 			goto unlock;
7109 
7110 		hci_req_update_scan(hdev);
7111 
7112 		goto added;
7113 	}
7114 
7115 	addr_type = le_addr_type(cp->addr.type);
7116 
7117 	if (cp->action == 0x02)
7118 		auto_conn = HCI_AUTO_CONN_ALWAYS;
7119 	else if (cp->action == 0x01)
7120 		auto_conn = HCI_AUTO_CONN_DIRECT;
7121 	else
7122 		auto_conn = HCI_AUTO_CONN_REPORT;
7123 
7124 	/* Kernel internally uses conn_params with resolvable private
7125 	 * address, but Add Device allows only identity addresses.
7126 	 * Make sure it is enforced before calling
7127 	 * hci_conn_params_lookup.
7128 	 */
7129 	if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7130 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7131 					MGMT_STATUS_INVALID_PARAMS,
7132 					&cp->addr, sizeof(cp->addr));
7133 		goto unlock;
7134 	}
7135 
7136 	/* If the connection parameters don't exist for this device,
7137 	 * they will be created and configured with defaults.
7138 	 */
7139 	if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7140 				auto_conn) < 0) {
7141 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7142 					MGMT_STATUS_FAILED, &cp->addr,
7143 					sizeof(cp->addr));
7144 		goto unlock;
7145 	} else {
7146 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7147 						addr_type);
7148 		if (params)
7149 			current_flags = params->flags;
7150 	}
7151 
7152 	err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
7153 	if (err < 0)
7154 		goto unlock;
7155 
7156 added:
7157 	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7158 	supported_flags = hdev->conn_flags;
7159 	device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7160 			     supported_flags, current_flags);
7161 
7162 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7163 				MGMT_STATUS_SUCCESS, &cp->addr,
7164 				sizeof(cp->addr));
7165 
7166 unlock:
7167 	hci_dev_unlock(hdev);
7168 	return err;
7169 }
7170 
7171 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7172 			   bdaddr_t *bdaddr, u8 type)
7173 {
7174 	struct mgmt_ev_device_removed ev;
7175 
7176 	bacpy(&ev.addr.bdaddr, bdaddr);
7177 	ev.addr.type = type;
7178 
7179 	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7180 }
7181 
7182 static int remove_device_sync(struct hci_dev *hdev, void *data)
7183 {
7184 	return hci_update_passive_scan_sync(hdev);
7185 }
7186 
7187 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7188 			 void *data, u16 len)
7189 {
7190 	struct mgmt_cp_remove_device *cp = data;
7191 	int err;
7192 
7193 	bt_dev_dbg(hdev, "sock %p", sk);
7194 
7195 	hci_dev_lock(hdev);
7196 
7197 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7198 		struct hci_conn_params *params;
7199 		u8 addr_type;
7200 
7201 		if (!bdaddr_type_is_valid(cp->addr.type)) {
7202 			err = mgmt_cmd_complete(sk, hdev->id,
7203 						MGMT_OP_REMOVE_DEVICE,
7204 						MGMT_STATUS_INVALID_PARAMS,
7205 						&cp->addr, sizeof(cp->addr));
7206 			goto unlock;
7207 		}
7208 
7209 		if (cp->addr.type == BDADDR_BREDR) {
7210 			err = hci_bdaddr_list_del(&hdev->accept_list,
7211 						  &cp->addr.bdaddr,
7212 						  cp->addr.type);
7213 			if (err) {
7214 				err = mgmt_cmd_complete(sk, hdev->id,
7215 							MGMT_OP_REMOVE_DEVICE,
7216 							MGMT_STATUS_INVALID_PARAMS,
7217 							&cp->addr,
7218 							sizeof(cp->addr));
7219 				goto unlock;
7220 			}
7221 
7222 			hci_req_update_scan(hdev);
7223 
7224 			device_removed(sk, hdev, &cp->addr.bdaddr,
7225 				       cp->addr.type);
7226 			goto complete;
7227 		}
7228 
7229 		addr_type = le_addr_type(cp->addr.type);
7230 
7231 		/* Kernel internally uses conn_params with resolvable private
7232 		 * address, but Remove Device allows only identity addresses.
7233 		 * Make sure it is enforced before calling
7234 		 * hci_conn_params_lookup.
7235 		 */
7236 		if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7237 			err = mgmt_cmd_complete(sk, hdev->id,
7238 						MGMT_OP_REMOVE_DEVICE,
7239 						MGMT_STATUS_INVALID_PARAMS,
7240 						&cp->addr, sizeof(cp->addr));
7241 			goto unlock;
7242 		}
7243 
7244 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7245 						addr_type);
7246 		if (!params) {
7247 			err = mgmt_cmd_complete(sk, hdev->id,
7248 						MGMT_OP_REMOVE_DEVICE,
7249 						MGMT_STATUS_INVALID_PARAMS,
7250 						&cp->addr, sizeof(cp->addr));
7251 			goto unlock;
7252 		}
7253 
7254 		if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7255 		    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7256 			err = mgmt_cmd_complete(sk, hdev->id,
7257 						MGMT_OP_REMOVE_DEVICE,
7258 						MGMT_STATUS_INVALID_PARAMS,
7259 						&cp->addr, sizeof(cp->addr));
7260 			goto unlock;
7261 		}
7262 
7263 		list_del(&params->action);
7264 		list_del(&params->list);
7265 		kfree(params);
7266 
7267 		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7268 	} else {
7269 		struct hci_conn_params *p, *tmp;
7270 		struct bdaddr_list *b, *btmp;
7271 
7272 		if (cp->addr.type) {
7273 			err = mgmt_cmd_complete(sk, hdev->id,
7274 						MGMT_OP_REMOVE_DEVICE,
7275 						MGMT_STATUS_INVALID_PARAMS,
7276 						&cp->addr, sizeof(cp->addr));
7277 			goto unlock;
7278 		}
7279 
7280 		list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7281 			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7282 			list_del(&b->list);
7283 			kfree(b);
7284 		}
7285 
7286 		hci_req_update_scan(hdev);
7287 
7288 		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7289 			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7290 				continue;
7291 			device_removed(sk, hdev, &p->addr, p->addr_type);
7292 			if (p->explicit_connect) {
7293 				p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7294 				continue;
7295 			}
7296 			list_del(&p->action);
7297 			list_del(&p->list);
7298 			kfree(p);
7299 		}
7300 
7301 		bt_dev_dbg(hdev, "All LE connection parameters were removed");
7302 	}
7303 
7304 	hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7305 
7306 complete:
7307 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7308 				MGMT_STATUS_SUCCESS, &cp->addr,
7309 				sizeof(cp->addr));
7310 unlock:
7311 	hci_dev_unlock(hdev);
7312 	return err;
7313 }
7314 
7315 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7316 			   u16 len)
7317 {
7318 	struct mgmt_cp_load_conn_param *cp = data;
7319 	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7320 				     sizeof(struct mgmt_conn_param));
7321 	u16 param_count, expected_len;
7322 	int i;
7323 
7324 	if (!lmp_le_capable(hdev))
7325 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7326 				       MGMT_STATUS_NOT_SUPPORTED);
7327 
7328 	param_count = __le16_to_cpu(cp->param_count);
7329 	if (param_count > max_param_count) {
7330 		bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7331 			   param_count);
7332 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7333 				       MGMT_STATUS_INVALID_PARAMS);
7334 	}
7335 
7336 	expected_len = struct_size(cp, params, param_count);
7337 	if (expected_len != len) {
7338 		bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7339 			   expected_len, len);
7340 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7341 				       MGMT_STATUS_INVALID_PARAMS);
7342 	}
7343 
7344 	bt_dev_dbg(hdev, "param_count %u", param_count);
7345 
7346 	hci_dev_lock(hdev);
7347 
7348 	hci_conn_params_clear_disabled(hdev);
7349 
7350 	for (i = 0; i < param_count; i++) {
7351 		struct mgmt_conn_param *param = &cp->params[i];
7352 		struct hci_conn_params *hci_param;
7353 		u16 min, max, latency, timeout;
7354 		u8 addr_type;
7355 
7356 		bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
7357 			   param->addr.type);
7358 
7359 		if (param->addr.type == BDADDR_LE_PUBLIC) {
7360 			addr_type = ADDR_LE_DEV_PUBLIC;
7361 		} else if (param->addr.type == BDADDR_LE_RANDOM) {
7362 			addr_type = ADDR_LE_DEV_RANDOM;
7363 		} else {
7364 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7365 			continue;
7366 		}
7367 
7368 		min = le16_to_cpu(param->min_interval);
7369 		max = le16_to_cpu(param->max_interval);
7370 		latency = le16_to_cpu(param->latency);
7371 		timeout = le16_to_cpu(param->timeout);
7372 
7373 		bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7374 			   min, max, latency, timeout);
7375 
7376 		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7377 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7378 			continue;
7379 		}
7380 
7381 		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
7382 						addr_type);
7383 		if (!hci_param) {
7384 			bt_dev_err(hdev, "failed to add connection parameters");
7385 			continue;
7386 		}
7387 
7388 		hci_param->conn_min_interval = min;
7389 		hci_param->conn_max_interval = max;
7390 		hci_param->conn_latency = latency;
7391 		hci_param->supervision_timeout = timeout;
7392 	}
7393 
7394 	hci_dev_unlock(hdev);
7395 
7396 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7397 				 NULL, 0);
7398 }
7399 
7400 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7401 			       void *data, u16 len)
7402 {
7403 	struct mgmt_cp_set_external_config *cp = data;
7404 	bool changed;
7405 	int err;
7406 
7407 	bt_dev_dbg(hdev, "sock %p", sk);
7408 
7409 	if (hdev_is_powered(hdev))
7410 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7411 				       MGMT_STATUS_REJECTED);
7412 
7413 	if (cp->config != 0x00 && cp->config != 0x01)
7414 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7415 				         MGMT_STATUS_INVALID_PARAMS);
7416 
7417 	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7418 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7419 				       MGMT_STATUS_NOT_SUPPORTED);
7420 
7421 	hci_dev_lock(hdev);
7422 
7423 	if (cp->config)
7424 		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7425 	else
7426 		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7427 
7428 	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7429 	if (err < 0)
7430 		goto unlock;
7431 
7432 	if (!changed)
7433 		goto unlock;
7434 
7435 	err = new_options(hdev, sk);
7436 
7437 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7438 		mgmt_index_removed(hdev);
7439 
7440 		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7441 			hci_dev_set_flag(hdev, HCI_CONFIG);
7442 			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7443 
7444 			queue_work(hdev->req_workqueue, &hdev->power_on);
7445 		} else {
7446 			set_bit(HCI_RAW, &hdev->flags);
7447 			mgmt_index_added(hdev);
7448 		}
7449 	}
7450 
7451 unlock:
7452 	hci_dev_unlock(hdev);
7453 	return err;
7454 }
7455 
7456 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
7457 			      void *data, u16 len)
7458 {
7459 	struct mgmt_cp_set_public_address *cp = data;
7460 	bool changed;
7461 	int err;
7462 
7463 	bt_dev_dbg(hdev, "sock %p", sk);
7464 
7465 	if (hdev_is_powered(hdev))
7466 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7467 				       MGMT_STATUS_REJECTED);
7468 
7469 	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
7470 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7471 				       MGMT_STATUS_INVALID_PARAMS);
7472 
7473 	if (!hdev->set_bdaddr)
7474 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7475 				       MGMT_STATUS_NOT_SUPPORTED);
7476 
7477 	hci_dev_lock(hdev);
7478 
7479 	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
7480 	bacpy(&hdev->public_addr, &cp->bdaddr);
7481 
7482 	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
7483 	if (err < 0)
7484 		goto unlock;
7485 
7486 	if (!changed)
7487 		goto unlock;
7488 
7489 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
7490 		err = new_options(hdev, sk);
7491 
7492 	if (is_configured(hdev)) {
7493 		mgmt_index_removed(hdev);
7494 
7495 		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
7496 
7497 		hci_dev_set_flag(hdev, HCI_CONFIG);
7498 		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7499 
7500 		queue_work(hdev->req_workqueue, &hdev->power_on);
7501 	}
7502 
7503 unlock:
7504 	hci_dev_unlock(hdev);
7505 	return err;
7506 }
7507 
7508 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
7509 					     int err)
7510 {
7511 	const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
7512 	struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
7513 	u8 *h192, *r192, *h256, *r256;
7514 	struct mgmt_pending_cmd *cmd = data;
7515 	struct sk_buff *skb = cmd->skb;
7516 	u8 status = mgmt_status(err);
7517 	u16 eir_len;
7518 
7519 	if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
7520 		return;
7521 
7522 	if (!status) {
7523 		if (!skb)
7524 			status = MGMT_STATUS_FAILED;
7525 		else if (IS_ERR(skb))
7526 			status = mgmt_status(PTR_ERR(skb));
7527 		else
7528 			status = mgmt_status(skb->data[0]);
7529 	}
7530 
7531 	bt_dev_dbg(hdev, "status %u", status);
7532 
7533 	mgmt_cp = cmd->param;
7534 
7535 	if (status) {
7536 		status = mgmt_status(status);
7537 		eir_len = 0;
7538 
7539 		h192 = NULL;
7540 		r192 = NULL;
7541 		h256 = NULL;
7542 		r256 = NULL;
7543 	} else if (!bredr_sc_enabled(hdev)) {
7544 		struct hci_rp_read_local_oob_data *rp;
7545 
7546 		if (skb->len != sizeof(*rp)) {
7547 			status = MGMT_STATUS_FAILED;
7548 			eir_len = 0;
7549 		} else {
7550 			status = MGMT_STATUS_SUCCESS;
7551 			rp = (void *)skb->data;
7552 
7553 			eir_len = 5 + 18 + 18;
7554 			h192 = rp->hash;
7555 			r192 = rp->rand;
7556 			h256 = NULL;
7557 			r256 = NULL;
7558 		}
7559 	} else {
7560 		struct hci_rp_read_local_oob_ext_data *rp;
7561 
7562 		if (skb->len != sizeof(*rp)) {
7563 			status = MGMT_STATUS_FAILED;
7564 			eir_len = 0;
7565 		} else {
7566 			status = MGMT_STATUS_SUCCESS;
7567 			rp = (void *)skb->data;
7568 
7569 			if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
7570 				eir_len = 5 + 18 + 18;
7571 				h192 = NULL;
7572 				r192 = NULL;
7573 			} else {
7574 				eir_len = 5 + 18 + 18 + 18 + 18;
7575 				h192 = rp->hash192;
7576 				r192 = rp->rand192;
7577 			}
7578 
7579 			h256 = rp->hash256;
7580 			r256 = rp->rand256;
7581 		}
7582 	}
7583 
7584 	mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
7585 	if (!mgmt_rp)
7586 		goto done;
7587 
7588 	if (eir_len == 0)
7589 		goto send_rsp;
7590 
7591 	eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
7592 				  hdev->dev_class, 3);
7593 
7594 	if (h192 && r192) {
7595 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7596 					  EIR_SSP_HASH_C192, h192, 16);
7597 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7598 					  EIR_SSP_RAND_R192, r192, 16);
7599 	}
7600 
7601 	if (h256 && r256) {
7602 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7603 					  EIR_SSP_HASH_C256, h256, 16);
7604 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7605 					  EIR_SSP_RAND_R256, r256, 16);
7606 	}
7607 
7608 send_rsp:
7609 	mgmt_rp->type = mgmt_cp->type;
7610 	mgmt_rp->eir_len = cpu_to_le16(eir_len);
7611 
7612 	err = mgmt_cmd_complete(cmd->sk, hdev->id,
7613 				MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
7614 				mgmt_rp, sizeof(*mgmt_rp) + eir_len);
7615 	if (err < 0 || status)
7616 		goto done;
7617 
7618 	hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
7619 
7620 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7621 				 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
7622 				 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
7623 done:
7624 	if (skb && !IS_ERR(skb))
7625 		kfree_skb(skb);
7626 
7627 	kfree(mgmt_rp);
7628 	mgmt_pending_remove(cmd);
7629 }
7630 
7631 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
7632 				  struct mgmt_cp_read_local_oob_ext_data *cp)
7633 {
7634 	struct mgmt_pending_cmd *cmd;
7635 	int err;
7636 
7637 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
7638 			       cp, sizeof(*cp));
7639 	if (!cmd)
7640 		return -ENOMEM;
7641 
7642 	err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
7643 				 read_local_oob_ext_data_complete);
7644 
7645 	if (err < 0) {
7646 		mgmt_pending_remove(cmd);
7647 		return err;
7648 	}
7649 
7650 	return 0;
7651 }
7652 
7653 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
7654 				   void *data, u16 data_len)
7655 {
7656 	struct mgmt_cp_read_local_oob_ext_data *cp = data;
7657 	struct mgmt_rp_read_local_oob_ext_data *rp;
7658 	size_t rp_len;
7659 	u16 eir_len;
7660 	u8 status, flags, role, addr[7], hash[16], rand[16];
7661 	int err;
7662 
7663 	bt_dev_dbg(hdev, "sock %p", sk);
7664 
7665 	if (hdev_is_powered(hdev)) {
7666 		switch (cp->type) {
7667 		case BIT(BDADDR_BREDR):
7668 			status = mgmt_bredr_support(hdev);
7669 			if (status)
7670 				eir_len = 0;
7671 			else
7672 				eir_len = 5;
7673 			break;
7674 		case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7675 			status = mgmt_le_support(hdev);
7676 			if (status)
7677 				eir_len = 0;
7678 			else
7679 				eir_len = 9 + 3 + 18 + 18 + 3;
7680 			break;
7681 		default:
7682 			status = MGMT_STATUS_INVALID_PARAMS;
7683 			eir_len = 0;
7684 			break;
7685 		}
7686 	} else {
7687 		status = MGMT_STATUS_NOT_POWERED;
7688 		eir_len = 0;
7689 	}
7690 
7691 	rp_len = sizeof(*rp) + eir_len;
7692 	rp = kmalloc(rp_len, GFP_ATOMIC);
7693 	if (!rp)
7694 		return -ENOMEM;
7695 
7696 	if (!status && !lmp_ssp_capable(hdev)) {
7697 		status = MGMT_STATUS_NOT_SUPPORTED;
7698 		eir_len = 0;
7699 	}
7700 
7701 	if (status)
7702 		goto complete;
7703 
7704 	hci_dev_lock(hdev);
7705 
7706 	eir_len = 0;
7707 	switch (cp->type) {
7708 	case BIT(BDADDR_BREDR):
7709 		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7710 			err = read_local_ssp_oob_req(hdev, sk, cp);
7711 			hci_dev_unlock(hdev);
7712 			if (!err)
7713 				goto done;
7714 
7715 			status = MGMT_STATUS_FAILED;
7716 			goto complete;
7717 		} else {
7718 			eir_len = eir_append_data(rp->eir, eir_len,
7719 						  EIR_CLASS_OF_DEV,
7720 						  hdev->dev_class, 3);
7721 		}
7722 		break;
7723 	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7724 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
7725 		    smp_generate_oob(hdev, hash, rand) < 0) {
7726 			hci_dev_unlock(hdev);
7727 			status = MGMT_STATUS_FAILED;
7728 			goto complete;
7729 		}
7730 
7731 		/* This should return the active RPA, but since the RPA
7732 		 * is only programmed on demand, it is really hard to fill
7733 		 * this in at the moment. For now disallow retrieving
7734 		 * local out-of-band data when privacy is in use.
7735 		 *
7736 		 * Returning the identity address will not help here since
7737 		 * pairing happens before the identity resolving key is
7738 		 * known and thus the connection establishment happens
7739 		 * based on the RPA and not the identity address.
7740 		 */
7741 		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
7742 			hci_dev_unlock(hdev);
7743 			status = MGMT_STATUS_REJECTED;
7744 			goto complete;
7745 		}
7746 
7747 		if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
7748 		   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
7749 		   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
7750 		    bacmp(&hdev->static_addr, BDADDR_ANY))) {
7751 			memcpy(addr, &hdev->static_addr, 6);
7752 			addr[6] = 0x01;
7753 		} else {
7754 			memcpy(addr, &hdev->bdaddr, 6);
7755 			addr[6] = 0x00;
7756 		}
7757 
7758 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
7759 					  addr, sizeof(addr));
7760 
7761 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7762 			role = 0x02;
7763 		else
7764 			role = 0x01;
7765 
7766 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
7767 					  &role, sizeof(role));
7768 
7769 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
7770 			eir_len = eir_append_data(rp->eir, eir_len,
7771 						  EIR_LE_SC_CONFIRM,
7772 						  hash, sizeof(hash));
7773 
7774 			eir_len = eir_append_data(rp->eir, eir_len,
7775 						  EIR_LE_SC_RANDOM,
7776 						  rand, sizeof(rand));
7777 		}
7778 
7779 		flags = mgmt_get_adv_discov_flags(hdev);
7780 
7781 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
7782 			flags |= LE_AD_NO_BREDR;
7783 
7784 		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
7785 					  &flags, sizeof(flags));
7786 		break;
7787 	}
7788 
7789 	hci_dev_unlock(hdev);
7790 
7791 	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
7792 
7793 	status = MGMT_STATUS_SUCCESS;
7794 
7795 complete:
7796 	rp->type = cp->type;
7797 	rp->eir_len = cpu_to_le16(eir_len);
7798 
7799 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
7800 				status, rp, sizeof(*rp) + eir_len);
7801 	if (err < 0 || status)
7802 		goto done;
7803 
7804 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7805 				 rp, sizeof(*rp) + eir_len,
7806 				 HCI_MGMT_OOB_DATA_EVENTS, sk);
7807 
7808 done:
7809 	kfree(rp);
7810 
7811 	return err;
7812 }
7813 
7814 static u32 get_supported_adv_flags(struct hci_dev *hdev)
7815 {
7816 	u32 flags = 0;
7817 
7818 	flags |= MGMT_ADV_FLAG_CONNECTABLE;
7819 	flags |= MGMT_ADV_FLAG_DISCOV;
7820 	flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
7821 	flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
7822 	flags |= MGMT_ADV_FLAG_APPEARANCE;
7823 	flags |= MGMT_ADV_FLAG_LOCAL_NAME;
7824 	flags |= MGMT_ADV_PARAM_DURATION;
7825 	flags |= MGMT_ADV_PARAM_TIMEOUT;
7826 	flags |= MGMT_ADV_PARAM_INTERVALS;
7827 	flags |= MGMT_ADV_PARAM_TX_POWER;
7828 	flags |= MGMT_ADV_PARAM_SCAN_RSP;
7829 
7830 	/* In extended adv TX_POWER returned from Set Adv Param
7831 	 * will be always valid.
7832 	 */
7833 	if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
7834 	    ext_adv_capable(hdev))
7835 		flags |= MGMT_ADV_FLAG_TX_POWER;
7836 
7837 	if (ext_adv_capable(hdev)) {
7838 		flags |= MGMT_ADV_FLAG_SEC_1M;
7839 		flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
7840 		flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
7841 
7842 		if (hdev->le_features[1] & HCI_LE_PHY_2M)
7843 			flags |= MGMT_ADV_FLAG_SEC_2M;
7844 
7845 		if (hdev->le_features[1] & HCI_LE_PHY_CODED)
7846 			flags |= MGMT_ADV_FLAG_SEC_CODED;
7847 	}
7848 
7849 	return flags;
7850 }
7851 
7852 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
7853 			     void *data, u16 data_len)
7854 {
7855 	struct mgmt_rp_read_adv_features *rp;
7856 	size_t rp_len;
7857 	int err;
7858 	struct adv_info *adv_instance;
7859 	u32 supported_flags;
7860 	u8 *instance;
7861 
7862 	bt_dev_dbg(hdev, "sock %p", sk);
7863 
7864 	if (!lmp_le_capable(hdev))
7865 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7866 				       MGMT_STATUS_REJECTED);
7867 
7868 	hci_dev_lock(hdev);
7869 
7870 	rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
7871 	rp = kmalloc(rp_len, GFP_ATOMIC);
7872 	if (!rp) {
7873 		hci_dev_unlock(hdev);
7874 		return -ENOMEM;
7875 	}
7876 
7877 	supported_flags = get_supported_adv_flags(hdev);
7878 
7879 	rp->supported_flags = cpu_to_le32(supported_flags);
7880 	rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
7881 	rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
7882 	rp->max_instances = hdev->le_num_of_adv_sets;
7883 	rp->num_instances = hdev->adv_instance_cnt;
7884 
7885 	instance = rp->instance;
7886 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
7887 		*instance = adv_instance->instance;
7888 		instance++;
7889 	}
7890 
7891 	hci_dev_unlock(hdev);
7892 
7893 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7894 				MGMT_STATUS_SUCCESS, rp, rp_len);
7895 
7896 	kfree(rp);
7897 
7898 	return err;
7899 }
7900 
7901 static u8 calculate_name_len(struct hci_dev *hdev)
7902 {
7903 	u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
7904 
7905 	return eir_append_local_name(hdev, buf, 0);
7906 }
7907 
7908 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
7909 			   bool is_adv_data)
7910 {
7911 	u8 max_len = HCI_MAX_AD_LENGTH;
7912 
7913 	if (is_adv_data) {
7914 		if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
7915 				 MGMT_ADV_FLAG_LIMITED_DISCOV |
7916 				 MGMT_ADV_FLAG_MANAGED_FLAGS))
7917 			max_len -= 3;
7918 
7919 		if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
7920 			max_len -= 3;
7921 	} else {
7922 		if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
7923 			max_len -= calculate_name_len(hdev);
7924 
7925 		if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
7926 			max_len -= 4;
7927 	}
7928 
7929 	return max_len;
7930 }
7931 
7932 static bool flags_managed(u32 adv_flags)
7933 {
7934 	return adv_flags & (MGMT_ADV_FLAG_DISCOV |
7935 			    MGMT_ADV_FLAG_LIMITED_DISCOV |
7936 			    MGMT_ADV_FLAG_MANAGED_FLAGS);
7937 }
7938 
7939 static bool tx_power_managed(u32 adv_flags)
7940 {
7941 	return adv_flags & MGMT_ADV_FLAG_TX_POWER;
7942 }
7943 
7944 static bool name_managed(u32 adv_flags)
7945 {
7946 	return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
7947 }
7948 
7949 static bool appearance_managed(u32 adv_flags)
7950 {
7951 	return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
7952 }
7953 
7954 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
7955 			      u8 len, bool is_adv_data)
7956 {
7957 	int i, cur_len;
7958 	u8 max_len;
7959 
7960 	max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
7961 
7962 	if (len > max_len)
7963 		return false;
7964 
7965 	/* Make sure that the data is correctly formatted. */
7966 	for (i = 0; i < len; i += (cur_len + 1)) {
7967 		cur_len = data[i];
7968 
7969 		if (!cur_len)
7970 			continue;
7971 
7972 		if (data[i + 1] == EIR_FLAGS &&
7973 		    (!is_adv_data || flags_managed(adv_flags)))
7974 			return false;
7975 
7976 		if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
7977 			return false;
7978 
7979 		if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
7980 			return false;
7981 
7982 		if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
7983 			return false;
7984 
7985 		if (data[i + 1] == EIR_APPEARANCE &&
7986 		    appearance_managed(adv_flags))
7987 			return false;
7988 
7989 		/* If the current field length would exceed the total data
7990 		 * length, then it's invalid.
7991 		 */
7992 		if (i + cur_len >= len)
7993 			return false;
7994 	}
7995 
7996 	return true;
7997 }
7998 
7999 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8000 {
8001 	u32 supported_flags, phy_flags;
8002 
8003 	/* The current implementation only supports a subset of the specified
8004 	 * flags. Also need to check mutual exclusiveness of sec flags.
8005 	 */
8006 	supported_flags = get_supported_adv_flags(hdev);
8007 	phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8008 	if (adv_flags & ~supported_flags ||
8009 	    ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8010 		return false;
8011 
8012 	return true;
8013 }
8014 
8015 static bool adv_busy(struct hci_dev *hdev)
8016 {
8017 	return pending_find(MGMT_OP_SET_LE, hdev);
8018 }
8019 
8020 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8021 			     int err)
8022 {
8023 	struct adv_info *adv, *n;
8024 
8025 	bt_dev_dbg(hdev, "err %d", err);
8026 
8027 	hci_dev_lock(hdev);
8028 
8029 	list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8030 		u8 instance;
8031 
8032 		if (!adv->pending)
8033 			continue;
8034 
8035 		if (!err) {
8036 			adv->pending = false;
8037 			continue;
8038 		}
8039 
8040 		instance = adv->instance;
8041 
8042 		if (hdev->cur_adv_instance == instance)
8043 			cancel_adv_timeout(hdev);
8044 
8045 		hci_remove_adv_instance(hdev, instance);
8046 		mgmt_advertising_removed(sk, hdev, instance);
8047 	}
8048 
8049 	hci_dev_unlock(hdev);
8050 }
8051 
8052 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8053 {
8054 	struct mgmt_pending_cmd *cmd = data;
8055 	struct mgmt_cp_add_advertising *cp = cmd->param;
8056 	struct mgmt_rp_add_advertising rp;
8057 
8058 	memset(&rp, 0, sizeof(rp));
8059 
8060 	rp.instance = cp->instance;
8061 
8062 	if (err)
8063 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8064 				mgmt_status(err));
8065 	else
8066 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8067 				  mgmt_status(err), &rp, sizeof(rp));
8068 
8069 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
8070 
8071 	mgmt_pending_free(cmd);
8072 }
8073 
8074 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8075 {
8076 	struct mgmt_pending_cmd *cmd = data;
8077 	struct mgmt_cp_add_advertising *cp = cmd->param;
8078 
8079 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8080 }
8081 
8082 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8083 			   void *data, u16 data_len)
8084 {
8085 	struct mgmt_cp_add_advertising *cp = data;
8086 	struct mgmt_rp_add_advertising rp;
8087 	u32 flags;
8088 	u8 status;
8089 	u16 timeout, duration;
8090 	unsigned int prev_instance_cnt;
8091 	u8 schedule_instance = 0;
8092 	struct adv_info *next_instance;
8093 	int err;
8094 	struct mgmt_pending_cmd *cmd;
8095 
8096 	bt_dev_dbg(hdev, "sock %p", sk);
8097 
8098 	status = mgmt_le_support(hdev);
8099 	if (status)
8100 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8101 				       status);
8102 
8103 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8104 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8105 				       MGMT_STATUS_INVALID_PARAMS);
8106 
8107 	if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8108 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8109 				       MGMT_STATUS_INVALID_PARAMS);
8110 
8111 	flags = __le32_to_cpu(cp->flags);
8112 	timeout = __le16_to_cpu(cp->timeout);
8113 	duration = __le16_to_cpu(cp->duration);
8114 
8115 	if (!requested_adv_flags_are_valid(hdev, flags))
8116 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8117 				       MGMT_STATUS_INVALID_PARAMS);
8118 
8119 	hci_dev_lock(hdev);
8120 
8121 	if (timeout && !hdev_is_powered(hdev)) {
8122 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8123 				      MGMT_STATUS_REJECTED);
8124 		goto unlock;
8125 	}
8126 
8127 	if (adv_busy(hdev)) {
8128 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8129 				      MGMT_STATUS_BUSY);
8130 		goto unlock;
8131 	}
8132 
8133 	if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8134 	    !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8135 			       cp->scan_rsp_len, false)) {
8136 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8137 				      MGMT_STATUS_INVALID_PARAMS);
8138 		goto unlock;
8139 	}
8140 
8141 	prev_instance_cnt = hdev->adv_instance_cnt;
8142 
8143 	err = hci_add_adv_instance(hdev, cp->instance, flags,
8144 				   cp->adv_data_len, cp->data,
8145 				   cp->scan_rsp_len,
8146 				   cp->data + cp->adv_data_len,
8147 				   timeout, duration,
8148 				   HCI_ADV_TX_POWER_NO_PREFERENCE,
8149 				   hdev->le_adv_min_interval,
8150 				   hdev->le_adv_max_interval);
8151 	if (err < 0) {
8152 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8153 				      MGMT_STATUS_FAILED);
8154 		goto unlock;
8155 	}
8156 
8157 	/* Only trigger an advertising added event if a new instance was
8158 	 * actually added.
8159 	 */
8160 	if (hdev->adv_instance_cnt > prev_instance_cnt)
8161 		mgmt_advertising_added(sk, hdev, cp->instance);
8162 
8163 	if (hdev->cur_adv_instance == cp->instance) {
8164 		/* If the currently advertised instance is being changed then
8165 		 * cancel the current advertising and schedule the next
8166 		 * instance. If there is only one instance then the overridden
8167 		 * advertising data will be visible right away.
8168 		 */
8169 		cancel_adv_timeout(hdev);
8170 
8171 		next_instance = hci_get_next_instance(hdev, cp->instance);
8172 		if (next_instance)
8173 			schedule_instance = next_instance->instance;
8174 	} else if (!hdev->adv_instance_timeout) {
8175 		/* Immediately advertise the new instance if no other
8176 		 * instance is currently being advertised.
8177 		 */
8178 		schedule_instance = cp->instance;
8179 	}
8180 
8181 	/* If the HCI_ADVERTISING flag is set or the device isn't powered or
8182 	 * there is no instance to be advertised then we have no HCI
8183 	 * communication to make. Simply return.
8184 	 */
8185 	if (!hdev_is_powered(hdev) ||
8186 	    hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8187 	    !schedule_instance) {
8188 		rp.instance = cp->instance;
8189 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8190 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8191 		goto unlock;
8192 	}
8193 
8194 	/* We're good to go, update advertising data, parameters, and start
8195 	 * advertising.
8196 	 */
8197 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8198 			       data_len);
8199 	if (!cmd) {
8200 		err = -ENOMEM;
8201 		goto unlock;
8202 	}
8203 
8204 	cp->instance = schedule_instance;
8205 
8206 	err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8207 				 add_advertising_complete);
8208 	if (err < 0)
8209 		mgmt_pending_free(cmd);
8210 
8211 unlock:
8212 	hci_dev_unlock(hdev);
8213 
8214 	return err;
8215 }
8216 
8217 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8218 					int err)
8219 {
8220 	struct mgmt_pending_cmd *cmd = data;
8221 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8222 	struct mgmt_rp_add_ext_adv_params rp;
8223 	struct adv_info *adv;
8224 	u32 flags;
8225 
8226 	BT_DBG("%s", hdev->name);
8227 
8228 	hci_dev_lock(hdev);
8229 
8230 	adv = hci_find_adv_instance(hdev, cp->instance);
8231 	if (!adv)
8232 		goto unlock;
8233 
8234 	rp.instance = cp->instance;
8235 	rp.tx_power = adv->tx_power;
8236 
8237 	/* While we're at it, inform userspace of the available space for this
8238 	 * advertisement, given the flags that will be used.
8239 	 */
8240 	flags = __le32_to_cpu(cp->flags);
8241 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8242 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8243 
8244 	if (err) {
8245 		/* If this advertisement was previously advertising and we
8246 		 * failed to update it, we signal that it has been removed and
8247 		 * delete its structure
8248 		 */
8249 		if (!adv->pending)
8250 			mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8251 
8252 		hci_remove_adv_instance(hdev, cp->instance);
8253 
8254 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8255 				mgmt_status(err));
8256 	} else {
8257 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8258 				  mgmt_status(err), &rp, sizeof(rp));
8259 	}
8260 
8261 unlock:
8262 	if (cmd)
8263 		mgmt_pending_free(cmd);
8264 
8265 	hci_dev_unlock(hdev);
8266 }
8267 
8268 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8269 {
8270 	struct mgmt_pending_cmd *cmd = data;
8271 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8272 
8273 	return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8274 }
8275 
8276 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8277 			      void *data, u16 data_len)
8278 {
8279 	struct mgmt_cp_add_ext_adv_params *cp = data;
8280 	struct mgmt_rp_add_ext_adv_params rp;
8281 	struct mgmt_pending_cmd *cmd = NULL;
8282 	u32 flags, min_interval, max_interval;
8283 	u16 timeout, duration;
8284 	u8 status;
8285 	s8 tx_power;
8286 	int err;
8287 
8288 	BT_DBG("%s", hdev->name);
8289 
8290 	status = mgmt_le_support(hdev);
8291 	if (status)
8292 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8293 				       status);
8294 
8295 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8296 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8297 				       MGMT_STATUS_INVALID_PARAMS);
8298 
8299 	/* The purpose of breaking add_advertising into two separate MGMT calls
8300 	 * for params and data is to allow more parameters to be added to this
8301 	 * structure in the future. For this reason, we verify that we have the
8302 	 * bare minimum structure we know of when the interface was defined. Any
8303 	 * extra parameters we don't know about will be ignored in this request.
8304 	 */
8305 	if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8306 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8307 				       MGMT_STATUS_INVALID_PARAMS);
8308 
8309 	flags = __le32_to_cpu(cp->flags);
8310 
8311 	if (!requested_adv_flags_are_valid(hdev, flags))
8312 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8313 				       MGMT_STATUS_INVALID_PARAMS);
8314 
8315 	hci_dev_lock(hdev);
8316 
8317 	/* In new interface, we require that we are powered to register */
8318 	if (!hdev_is_powered(hdev)) {
8319 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8320 				      MGMT_STATUS_REJECTED);
8321 		goto unlock;
8322 	}
8323 
8324 	if (adv_busy(hdev)) {
8325 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8326 				      MGMT_STATUS_BUSY);
8327 		goto unlock;
8328 	}
8329 
8330 	/* Parse defined parameters from request, use defaults otherwise */
8331 	timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8332 		  __le16_to_cpu(cp->timeout) : 0;
8333 
8334 	duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8335 		   __le16_to_cpu(cp->duration) :
8336 		   hdev->def_multi_adv_rotation_duration;
8337 
8338 	min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8339 		       __le32_to_cpu(cp->min_interval) :
8340 		       hdev->le_adv_min_interval;
8341 
8342 	max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8343 		       __le32_to_cpu(cp->max_interval) :
8344 		       hdev->le_adv_max_interval;
8345 
8346 	tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8347 		   cp->tx_power :
8348 		   HCI_ADV_TX_POWER_NO_PREFERENCE;
8349 
8350 	/* Create advertising instance with no advertising or response data */
8351 	err = hci_add_adv_instance(hdev, cp->instance, flags,
8352 				   0, NULL, 0, NULL, timeout, duration,
8353 				   tx_power, min_interval, max_interval);
8354 
8355 	if (err < 0) {
8356 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8357 				      MGMT_STATUS_FAILED);
8358 		goto unlock;
8359 	}
8360 
8361 	/* Submit request for advertising params if ext adv available */
8362 	if (ext_adv_capable(hdev)) {
8363 		cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8364 				       data, data_len);
8365 		if (!cmd) {
8366 			err = -ENOMEM;
8367 			hci_remove_adv_instance(hdev, cp->instance);
8368 			goto unlock;
8369 		}
8370 
8371 		err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8372 					 add_ext_adv_params_complete);
8373 		if (err < 0)
8374 			mgmt_pending_free(cmd);
8375 	} else {
8376 		rp.instance = cp->instance;
8377 		rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8378 		rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8379 		rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8380 		err = mgmt_cmd_complete(sk, hdev->id,
8381 					MGMT_OP_ADD_EXT_ADV_PARAMS,
8382 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8383 	}
8384 
8385 unlock:
8386 	hci_dev_unlock(hdev);
8387 
8388 	return err;
8389 }
8390 
8391 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8392 {
8393 	struct mgmt_pending_cmd *cmd = data;
8394 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8395 	struct mgmt_rp_add_advertising rp;
8396 
8397 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
8398 
8399 	memset(&rp, 0, sizeof(rp));
8400 
8401 	rp.instance = cp->instance;
8402 
8403 	if (err)
8404 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8405 				mgmt_status(err));
8406 	else
8407 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8408 				  mgmt_status(err), &rp, sizeof(rp));
8409 
8410 	mgmt_pending_free(cmd);
8411 }
8412 
8413 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8414 {
8415 	struct mgmt_pending_cmd *cmd = data;
8416 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8417 	int err;
8418 
8419 	if (ext_adv_capable(hdev)) {
8420 		err = hci_update_adv_data_sync(hdev, cp->instance);
8421 		if (err)
8422 			return err;
8423 
8424 		err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8425 		if (err)
8426 			return err;
8427 
8428 		return hci_enable_ext_advertising_sync(hdev, cp->instance);
8429 	}
8430 
8431 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8432 }
8433 
8434 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8435 			    u16 data_len)
8436 {
8437 	struct mgmt_cp_add_ext_adv_data *cp = data;
8438 	struct mgmt_rp_add_ext_adv_data rp;
8439 	u8 schedule_instance = 0;
8440 	struct adv_info *next_instance;
8441 	struct adv_info *adv_instance;
8442 	int err = 0;
8443 	struct mgmt_pending_cmd *cmd;
8444 
8445 	BT_DBG("%s", hdev->name);
8446 
8447 	hci_dev_lock(hdev);
8448 
8449 	adv_instance = hci_find_adv_instance(hdev, cp->instance);
8450 
8451 	if (!adv_instance) {
8452 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8453 				      MGMT_STATUS_INVALID_PARAMS);
8454 		goto unlock;
8455 	}
8456 
8457 	/* In new interface, we require that we are powered to register */
8458 	if (!hdev_is_powered(hdev)) {
8459 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8460 				      MGMT_STATUS_REJECTED);
8461 		goto clear_new_instance;
8462 	}
8463 
8464 	if (adv_busy(hdev)) {
8465 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8466 				      MGMT_STATUS_BUSY);
8467 		goto clear_new_instance;
8468 	}
8469 
8470 	/* Validate new data */
8471 	if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
8472 			       cp->adv_data_len, true) ||
8473 	    !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
8474 			       cp->adv_data_len, cp->scan_rsp_len, false)) {
8475 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8476 				      MGMT_STATUS_INVALID_PARAMS);
8477 		goto clear_new_instance;
8478 	}
8479 
8480 	/* Set the data in the advertising instance */
8481 	hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
8482 				  cp->data, cp->scan_rsp_len,
8483 				  cp->data + cp->adv_data_len);
8484 
8485 	/* If using software rotation, determine next instance to use */
8486 	if (hdev->cur_adv_instance == cp->instance) {
8487 		/* If the currently advertised instance is being changed
8488 		 * then cancel the current advertising and schedule the
8489 		 * next instance. If there is only one instance then the
8490 		 * overridden advertising data will be visible right
8491 		 * away
8492 		 */
8493 		cancel_adv_timeout(hdev);
8494 
8495 		next_instance = hci_get_next_instance(hdev, cp->instance);
8496 		if (next_instance)
8497 			schedule_instance = next_instance->instance;
8498 	} else if (!hdev->adv_instance_timeout) {
8499 		/* Immediately advertise the new instance if no other
8500 		 * instance is currently being advertised.
8501 		 */
8502 		schedule_instance = cp->instance;
8503 	}
8504 
8505 	/* If the HCI_ADVERTISING flag is set or there is no instance to
8506 	 * be advertised then we have no HCI communication to make.
8507 	 * Simply return.
8508 	 */
8509 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
8510 		if (adv_instance->pending) {
8511 			mgmt_advertising_added(sk, hdev, cp->instance);
8512 			adv_instance->pending = false;
8513 		}
8514 		rp.instance = cp->instance;
8515 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8516 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8517 		goto unlock;
8518 	}
8519 
8520 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
8521 			       data_len);
8522 	if (!cmd) {
8523 		err = -ENOMEM;
8524 		goto clear_new_instance;
8525 	}
8526 
8527 	err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
8528 				 add_ext_adv_data_complete);
8529 	if (err < 0) {
8530 		mgmt_pending_free(cmd);
8531 		goto clear_new_instance;
8532 	}
8533 
8534 	/* We were successful in updating data, so trigger advertising_added
8535 	 * event if this is an instance that wasn't previously advertising. If
8536 	 * a failure occurs in the requests we initiated, we will remove the
8537 	 * instance again in add_advertising_complete
8538 	 */
8539 	if (adv_instance->pending)
8540 		mgmt_advertising_added(sk, hdev, cp->instance);
8541 
8542 	goto unlock;
8543 
8544 clear_new_instance:
8545 	hci_remove_adv_instance(hdev, cp->instance);
8546 
8547 unlock:
8548 	hci_dev_unlock(hdev);
8549 
8550 	return err;
8551 }
8552 
8553 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
8554 					int err)
8555 {
8556 	struct mgmt_pending_cmd *cmd = data;
8557 	struct mgmt_cp_remove_advertising *cp = cmd->param;
8558 	struct mgmt_rp_remove_advertising rp;
8559 
8560 	bt_dev_dbg(hdev, "err %d", err);
8561 
8562 	memset(&rp, 0, sizeof(rp));
8563 	rp.instance = cp->instance;
8564 
8565 	if (err)
8566 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8567 				mgmt_status(err));
8568 	else
8569 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8570 				  MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8571 
8572 	mgmt_pending_free(cmd);
8573 }
8574 
8575 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
8576 {
8577 	struct mgmt_pending_cmd *cmd = data;
8578 	struct mgmt_cp_remove_advertising *cp = cmd->param;
8579 	int err;
8580 
8581 	err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
8582 	if (err)
8583 		return err;
8584 
8585 	if (list_empty(&hdev->adv_instances))
8586 		err = hci_disable_advertising_sync(hdev);
8587 
8588 	return err;
8589 }
8590 
8591 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
8592 			      void *data, u16 data_len)
8593 {
8594 	struct mgmt_cp_remove_advertising *cp = data;
8595 	struct mgmt_pending_cmd *cmd;
8596 	int err;
8597 
8598 	bt_dev_dbg(hdev, "sock %p", sk);
8599 
8600 	hci_dev_lock(hdev);
8601 
8602 	if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
8603 		err = mgmt_cmd_status(sk, hdev->id,
8604 				      MGMT_OP_REMOVE_ADVERTISING,
8605 				      MGMT_STATUS_INVALID_PARAMS);
8606 		goto unlock;
8607 	}
8608 
8609 	if (pending_find(MGMT_OP_SET_LE, hdev)) {
8610 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8611 				      MGMT_STATUS_BUSY);
8612 		goto unlock;
8613 	}
8614 
8615 	if (list_empty(&hdev->adv_instances)) {
8616 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8617 				      MGMT_STATUS_INVALID_PARAMS);
8618 		goto unlock;
8619 	}
8620 
8621 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
8622 			       data_len);
8623 	if (!cmd) {
8624 		err = -ENOMEM;
8625 		goto unlock;
8626 	}
8627 
8628 	err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
8629 				 remove_advertising_complete);
8630 	if (err < 0)
8631 		mgmt_pending_free(cmd);
8632 
8633 unlock:
8634 	hci_dev_unlock(hdev);
8635 
8636 	return err;
8637 }
8638 
8639 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
8640 			     void *data, u16 data_len)
8641 {
8642 	struct mgmt_cp_get_adv_size_info *cp = data;
8643 	struct mgmt_rp_get_adv_size_info rp;
8644 	u32 flags, supported_flags;
8645 
8646 	bt_dev_dbg(hdev, "sock %p", sk);
8647 
8648 	if (!lmp_le_capable(hdev))
8649 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8650 				       MGMT_STATUS_REJECTED);
8651 
8652 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8653 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8654 				       MGMT_STATUS_INVALID_PARAMS);
8655 
8656 	flags = __le32_to_cpu(cp->flags);
8657 
8658 	/* The current implementation only supports a subset of the specified
8659 	 * flags.
8660 	 */
8661 	supported_flags = get_supported_adv_flags(hdev);
8662 	if (flags & ~supported_flags)
8663 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8664 				       MGMT_STATUS_INVALID_PARAMS);
8665 
8666 	rp.instance = cp->instance;
8667 	rp.flags = cp->flags;
8668 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8669 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8670 
8671 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8672 				 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8673 }
8674 
8675 static const struct hci_mgmt_handler mgmt_handlers[] = {
8676 	{ NULL }, /* 0x0000 (no command) */
8677 	{ read_version,            MGMT_READ_VERSION_SIZE,
8678 						HCI_MGMT_NO_HDEV |
8679 						HCI_MGMT_UNTRUSTED },
8680 	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
8681 						HCI_MGMT_NO_HDEV |
8682 						HCI_MGMT_UNTRUSTED },
8683 	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
8684 						HCI_MGMT_NO_HDEV |
8685 						HCI_MGMT_UNTRUSTED },
8686 	{ read_controller_info,    MGMT_READ_INFO_SIZE,
8687 						HCI_MGMT_UNTRUSTED },
8688 	{ set_powered,             MGMT_SETTING_SIZE },
8689 	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
8690 	{ set_connectable,         MGMT_SETTING_SIZE },
8691 	{ set_fast_connectable,    MGMT_SETTING_SIZE },
8692 	{ set_bondable,            MGMT_SETTING_SIZE },
8693 	{ set_link_security,       MGMT_SETTING_SIZE },
8694 	{ set_ssp,                 MGMT_SETTING_SIZE },
8695 	{ set_hs,                  MGMT_SETTING_SIZE },
8696 	{ set_le,                  MGMT_SETTING_SIZE },
8697 	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
8698 	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
8699 	{ add_uuid,                MGMT_ADD_UUID_SIZE },
8700 	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
8701 	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
8702 						HCI_MGMT_VAR_LEN },
8703 	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
8704 						HCI_MGMT_VAR_LEN },
8705 	{ disconnect,              MGMT_DISCONNECT_SIZE },
8706 	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
8707 	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
8708 	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
8709 	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
8710 	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
8711 	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
8712 	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
8713 	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
8714 	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
8715 	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
8716 	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
8717 	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
8718 	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
8719 						HCI_MGMT_VAR_LEN },
8720 	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
8721 	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
8722 	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
8723 	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
8724 	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
8725 	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
8726 	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
8727 	{ set_advertising,         MGMT_SETTING_SIZE },
8728 	{ set_bredr,               MGMT_SETTING_SIZE },
8729 	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
8730 	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
8731 	{ set_secure_conn,         MGMT_SETTING_SIZE },
8732 	{ set_debug_keys,          MGMT_SETTING_SIZE },
8733 	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
8734 	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
8735 						HCI_MGMT_VAR_LEN },
8736 	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
8737 	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
8738 	{ add_device,              MGMT_ADD_DEVICE_SIZE },
8739 	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
8740 	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
8741 						HCI_MGMT_VAR_LEN },
8742 	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
8743 						HCI_MGMT_NO_HDEV |
8744 						HCI_MGMT_UNTRUSTED },
8745 	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
8746 						HCI_MGMT_UNCONFIGURED |
8747 						HCI_MGMT_UNTRUSTED },
8748 	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
8749 						HCI_MGMT_UNCONFIGURED },
8750 	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
8751 						HCI_MGMT_UNCONFIGURED },
8752 	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
8753 						HCI_MGMT_VAR_LEN },
8754 	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
8755 	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
8756 						HCI_MGMT_NO_HDEV |
8757 						HCI_MGMT_UNTRUSTED },
8758 	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
8759 	{ add_advertising,	   MGMT_ADD_ADVERTISING_SIZE,
8760 						HCI_MGMT_VAR_LEN },
8761 	{ remove_advertising,	   MGMT_REMOVE_ADVERTISING_SIZE },
8762 	{ get_adv_size_info,       MGMT_GET_ADV_SIZE_INFO_SIZE },
8763 	{ start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
8764 	{ read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
8765 						HCI_MGMT_UNTRUSTED },
8766 	{ set_appearance,	   MGMT_SET_APPEARANCE_SIZE },
8767 	{ get_phy_configuration,   MGMT_GET_PHY_CONFIGURATION_SIZE },
8768 	{ set_phy_configuration,   MGMT_SET_PHY_CONFIGURATION_SIZE },
8769 	{ set_blocked_keys,	   MGMT_OP_SET_BLOCKED_KEYS_SIZE,
8770 						HCI_MGMT_VAR_LEN },
8771 	{ set_wideband_speech,	   MGMT_SETTING_SIZE },
8772 	{ read_controller_cap,     MGMT_READ_CONTROLLER_CAP_SIZE,
8773 						HCI_MGMT_UNTRUSTED },
8774 	{ read_exp_features_info,  MGMT_READ_EXP_FEATURES_INFO_SIZE,
8775 						HCI_MGMT_UNTRUSTED |
8776 						HCI_MGMT_HDEV_OPTIONAL },
8777 	{ set_exp_feature,         MGMT_SET_EXP_FEATURE_SIZE,
8778 						HCI_MGMT_VAR_LEN |
8779 						HCI_MGMT_HDEV_OPTIONAL },
8780 	{ read_def_system_config,  MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
8781 						HCI_MGMT_UNTRUSTED },
8782 	{ set_def_system_config,   MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
8783 						HCI_MGMT_VAR_LEN },
8784 	{ read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
8785 						HCI_MGMT_UNTRUSTED },
8786 	{ set_def_runtime_config,  MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
8787 						HCI_MGMT_VAR_LEN },
8788 	{ get_device_flags,        MGMT_GET_DEVICE_FLAGS_SIZE },
8789 	{ set_device_flags,        MGMT_SET_DEVICE_FLAGS_SIZE },
8790 	{ read_adv_mon_features,   MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
8791 	{ add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
8792 						HCI_MGMT_VAR_LEN },
8793 	{ remove_adv_monitor,      MGMT_REMOVE_ADV_MONITOR_SIZE },
8794 	{ add_ext_adv_params,      MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
8795 						HCI_MGMT_VAR_LEN },
8796 	{ add_ext_adv_data,        MGMT_ADD_EXT_ADV_DATA_SIZE,
8797 						HCI_MGMT_VAR_LEN },
8798 	{ add_adv_patterns_monitor_rssi,
8799 				   MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
8800 						HCI_MGMT_VAR_LEN },
8801 };
8802 
8803 void mgmt_index_added(struct hci_dev *hdev)
8804 {
8805 	struct mgmt_ev_ext_index ev;
8806 
8807 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8808 		return;
8809 
8810 	switch (hdev->dev_type) {
8811 	case HCI_PRIMARY:
8812 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8813 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
8814 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8815 			ev.type = 0x01;
8816 		} else {
8817 			mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
8818 					 HCI_MGMT_INDEX_EVENTS);
8819 			ev.type = 0x00;
8820 		}
8821 		break;
8822 	case HCI_AMP:
8823 		ev.type = 0x02;
8824 		break;
8825 	default:
8826 		return;
8827 	}
8828 
8829 	ev.bus = hdev->bus;
8830 
8831 	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
8832 			 HCI_MGMT_EXT_INDEX_EVENTS);
8833 }
8834 
8835 void mgmt_index_removed(struct hci_dev *hdev)
8836 {
8837 	struct mgmt_ev_ext_index ev;
8838 	u8 status = MGMT_STATUS_INVALID_INDEX;
8839 
8840 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8841 		return;
8842 
8843 	switch (hdev->dev_type) {
8844 	case HCI_PRIMARY:
8845 		mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8846 
8847 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8848 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
8849 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8850 			ev.type = 0x01;
8851 		} else {
8852 			mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
8853 					 HCI_MGMT_INDEX_EVENTS);
8854 			ev.type = 0x00;
8855 		}
8856 		break;
8857 	case HCI_AMP:
8858 		ev.type = 0x02;
8859 		break;
8860 	default:
8861 		return;
8862 	}
8863 
8864 	ev.bus = hdev->bus;
8865 
8866 	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
8867 			 HCI_MGMT_EXT_INDEX_EVENTS);
8868 }
8869 
8870 void mgmt_power_on(struct hci_dev *hdev, int err)
8871 {
8872 	struct cmd_lookup match = { NULL, hdev };
8873 
8874 	bt_dev_dbg(hdev, "err %d", err);
8875 
8876 	hci_dev_lock(hdev);
8877 
8878 	if (!err) {
8879 		restart_le_actions(hdev);
8880 		hci_update_passive_scan(hdev);
8881 	}
8882 
8883 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8884 
8885 	new_settings(hdev, match.sk);
8886 
8887 	if (match.sk)
8888 		sock_put(match.sk);
8889 
8890 	hci_dev_unlock(hdev);
8891 }
8892 
8893 void __mgmt_power_off(struct hci_dev *hdev)
8894 {
8895 	struct cmd_lookup match = { NULL, hdev };
8896 	u8 status, zero_cod[] = { 0, 0, 0 };
8897 
8898 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8899 
8900 	/* If the power off is because of hdev unregistration let
8901 	 * use the appropriate INVALID_INDEX status. Otherwise use
8902 	 * NOT_POWERED. We cover both scenarios here since later in
8903 	 * mgmt_index_removed() any hci_conn callbacks will have already
8904 	 * been triggered, potentially causing misleading DISCONNECTED
8905 	 * status responses.
8906 	 */
8907 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
8908 		status = MGMT_STATUS_INVALID_INDEX;
8909 	else
8910 		status = MGMT_STATUS_NOT_POWERED;
8911 
8912 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8913 
8914 	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
8915 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
8916 				   zero_cod, sizeof(zero_cod),
8917 				   HCI_MGMT_DEV_CLASS_EVENTS, NULL);
8918 		ext_info_changed(hdev, NULL);
8919 	}
8920 
8921 	new_settings(hdev, match.sk);
8922 
8923 	if (match.sk)
8924 		sock_put(match.sk);
8925 }
8926 
8927 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
8928 {
8929 	struct mgmt_pending_cmd *cmd;
8930 	u8 status;
8931 
8932 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8933 	if (!cmd)
8934 		return;
8935 
8936 	if (err == -ERFKILL)
8937 		status = MGMT_STATUS_RFKILLED;
8938 	else
8939 		status = MGMT_STATUS_FAILED;
8940 
8941 	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
8942 
8943 	mgmt_pending_remove(cmd);
8944 }
8945 
8946 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
8947 		       bool persistent)
8948 {
8949 	struct mgmt_ev_new_link_key ev;
8950 
8951 	memset(&ev, 0, sizeof(ev));
8952 
8953 	ev.store_hint = persistent;
8954 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8955 	ev.key.addr.type = BDADDR_BREDR;
8956 	ev.key.type = key->type;
8957 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
8958 	ev.key.pin_len = key->pin_len;
8959 
8960 	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
8961 }
8962 
8963 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
8964 {
8965 	switch (ltk->type) {
8966 	case SMP_LTK:
8967 	case SMP_LTK_RESPONDER:
8968 		if (ltk->authenticated)
8969 			return MGMT_LTK_AUTHENTICATED;
8970 		return MGMT_LTK_UNAUTHENTICATED;
8971 	case SMP_LTK_P256:
8972 		if (ltk->authenticated)
8973 			return MGMT_LTK_P256_AUTH;
8974 		return MGMT_LTK_P256_UNAUTH;
8975 	case SMP_LTK_P256_DEBUG:
8976 		return MGMT_LTK_P256_DEBUG;
8977 	}
8978 
8979 	return MGMT_LTK_UNAUTHENTICATED;
8980 }
8981 
8982 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
8983 {
8984 	struct mgmt_ev_new_long_term_key ev;
8985 
8986 	memset(&ev, 0, sizeof(ev));
8987 
8988 	/* Devices using resolvable or non-resolvable random addresses
8989 	 * without providing an identity resolving key don't require
8990 	 * to store long term keys. Their addresses will change the
8991 	 * next time around.
8992 	 *
8993 	 * Only when a remote device provides an identity address
8994 	 * make sure the long term key is stored. If the remote
8995 	 * identity is known, the long term keys are internally
8996 	 * mapped to the identity address. So allow static random
8997 	 * and public addresses here.
8998 	 */
8999 	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9000 	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
9001 		ev.store_hint = 0x00;
9002 	else
9003 		ev.store_hint = persistent;
9004 
9005 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9006 	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
9007 	ev.key.type = mgmt_ltk_type(key);
9008 	ev.key.enc_size = key->enc_size;
9009 	ev.key.ediv = key->ediv;
9010 	ev.key.rand = key->rand;
9011 
9012 	if (key->type == SMP_LTK)
9013 		ev.key.initiator = 1;
9014 
9015 	/* Make sure we copy only the significant bytes based on the
9016 	 * encryption key size, and set the rest of the value to zeroes.
9017 	 */
9018 	memcpy(ev.key.val, key->val, key->enc_size);
9019 	memset(ev.key.val + key->enc_size, 0,
9020 	       sizeof(ev.key.val) - key->enc_size);
9021 
9022 	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9023 }
9024 
9025 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9026 {
9027 	struct mgmt_ev_new_irk ev;
9028 
9029 	memset(&ev, 0, sizeof(ev));
9030 
9031 	ev.store_hint = persistent;
9032 
9033 	bacpy(&ev.rpa, &irk->rpa);
9034 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9035 	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9036 	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9037 
9038 	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9039 }
9040 
9041 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9042 		   bool persistent)
9043 {
9044 	struct mgmt_ev_new_csrk ev;
9045 
9046 	memset(&ev, 0, sizeof(ev));
9047 
9048 	/* Devices using resolvable or non-resolvable random addresses
9049 	 * without providing an identity resolving key don't require
9050 	 * to store signature resolving keys. Their addresses will change
9051 	 * the next time around.
9052 	 *
9053 	 * Only when a remote device provides an identity address
9054 	 * make sure the signature resolving key is stored. So allow
9055 	 * static random and public addresses here.
9056 	 */
9057 	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9058 	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9059 		ev.store_hint = 0x00;
9060 	else
9061 		ev.store_hint = persistent;
9062 
9063 	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9064 	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9065 	ev.key.type = csrk->type;
9066 	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9067 
9068 	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9069 }
9070 
9071 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9072 			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9073 			 u16 max_interval, u16 latency, u16 timeout)
9074 {
9075 	struct mgmt_ev_new_conn_param ev;
9076 
9077 	if (!hci_is_identity_address(bdaddr, bdaddr_type))
9078 		return;
9079 
9080 	memset(&ev, 0, sizeof(ev));
9081 	bacpy(&ev.addr.bdaddr, bdaddr);
9082 	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9083 	ev.store_hint = store_hint;
9084 	ev.min_interval = cpu_to_le16(min_interval);
9085 	ev.max_interval = cpu_to_le16(max_interval);
9086 	ev.latency = cpu_to_le16(latency);
9087 	ev.timeout = cpu_to_le16(timeout);
9088 
9089 	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9090 }
9091 
9092 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9093 			   u8 *name, u8 name_len)
9094 {
9095 	struct sk_buff *skb;
9096 	struct mgmt_ev_device_connected *ev;
9097 	u16 eir_len = 0;
9098 	u32 flags = 0;
9099 
9100 	/* allocate buff for LE or BR/EDR adv */
9101 	if (conn->le_adv_data_len > 0)
9102 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9103 				     sizeof(*ev) + conn->le_adv_data_len);
9104 	else
9105 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9106 				     sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9107 				     eir_precalc_len(sizeof(conn->dev_class)));
9108 
9109 	ev = skb_put(skb, sizeof(*ev));
9110 	bacpy(&ev->addr.bdaddr, &conn->dst);
9111 	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9112 
9113 	if (conn->out)
9114 		flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9115 
9116 	ev->flags = __cpu_to_le32(flags);
9117 
9118 	/* We must ensure that the EIR Data fields are ordered and
9119 	 * unique. Keep it simple for now and avoid the problem by not
9120 	 * adding any BR/EDR data to the LE adv.
9121 	 */
9122 	if (conn->le_adv_data_len > 0) {
9123 		skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9124 		eir_len = conn->le_adv_data_len;
9125 	} else {
9126 		if (name)
9127 			eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9128 
9129 		if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9130 			eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9131 						    conn->dev_class, sizeof(conn->dev_class));
9132 	}
9133 
9134 	ev->eir_len = cpu_to_le16(eir_len);
9135 
9136 	mgmt_event_skb(skb, NULL);
9137 }
9138 
9139 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
9140 {
9141 	struct sock **sk = data;
9142 
9143 	cmd->cmd_complete(cmd, 0);
9144 
9145 	*sk = cmd->sk;
9146 	sock_hold(*sk);
9147 
9148 	mgmt_pending_remove(cmd);
9149 }
9150 
9151 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9152 {
9153 	struct hci_dev *hdev = data;
9154 	struct mgmt_cp_unpair_device *cp = cmd->param;
9155 
9156 	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9157 
9158 	cmd->cmd_complete(cmd, 0);
9159 	mgmt_pending_remove(cmd);
9160 }
9161 
9162 bool mgmt_powering_down(struct hci_dev *hdev)
9163 {
9164 	struct mgmt_pending_cmd *cmd;
9165 	struct mgmt_mode *cp;
9166 
9167 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9168 	if (!cmd)
9169 		return false;
9170 
9171 	cp = cmd->param;
9172 	if (!cp->val)
9173 		return true;
9174 
9175 	return false;
9176 }
9177 
9178 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9179 			      u8 link_type, u8 addr_type, u8 reason,
9180 			      bool mgmt_connected)
9181 {
9182 	struct mgmt_ev_device_disconnected ev;
9183 	struct sock *sk = NULL;
9184 
9185 	/* The connection is still in hci_conn_hash so test for 1
9186 	 * instead of 0 to know if this is the last one.
9187 	 */
9188 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9189 		cancel_delayed_work(&hdev->power_off);
9190 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
9191 	}
9192 
9193 	if (!mgmt_connected)
9194 		return;
9195 
9196 	if (link_type != ACL_LINK && link_type != LE_LINK)
9197 		return;
9198 
9199 	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
9200 
9201 	bacpy(&ev.addr.bdaddr, bdaddr);
9202 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9203 	ev.reason = reason;
9204 
9205 	/* Report disconnects due to suspend */
9206 	if (hdev->suspended)
9207 		ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9208 
9209 	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9210 
9211 	if (sk)
9212 		sock_put(sk);
9213 
9214 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9215 			     hdev);
9216 }
9217 
9218 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9219 			    u8 link_type, u8 addr_type, u8 status)
9220 {
9221 	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9222 	struct mgmt_cp_disconnect *cp;
9223 	struct mgmt_pending_cmd *cmd;
9224 
9225 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9226 			     hdev);
9227 
9228 	cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9229 	if (!cmd)
9230 		return;
9231 
9232 	cp = cmd->param;
9233 
9234 	if (bacmp(bdaddr, &cp->addr.bdaddr))
9235 		return;
9236 
9237 	if (cp->addr.type != bdaddr_type)
9238 		return;
9239 
9240 	cmd->cmd_complete(cmd, mgmt_status(status));
9241 	mgmt_pending_remove(cmd);
9242 }
9243 
9244 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9245 			 u8 addr_type, u8 status)
9246 {
9247 	struct mgmt_ev_connect_failed ev;
9248 
9249 	/* The connection is still in hci_conn_hash so test for 1
9250 	 * instead of 0 to know if this is the last one.
9251 	 */
9252 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9253 		cancel_delayed_work(&hdev->power_off);
9254 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
9255 	}
9256 
9257 	bacpy(&ev.addr.bdaddr, bdaddr);
9258 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9259 	ev.status = mgmt_status(status);
9260 
9261 	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9262 }
9263 
9264 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9265 {
9266 	struct mgmt_ev_pin_code_request ev;
9267 
9268 	bacpy(&ev.addr.bdaddr, bdaddr);
9269 	ev.addr.type = BDADDR_BREDR;
9270 	ev.secure = secure;
9271 
9272 	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9273 }
9274 
9275 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9276 				  u8 status)
9277 {
9278 	struct mgmt_pending_cmd *cmd;
9279 
9280 	cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9281 	if (!cmd)
9282 		return;
9283 
9284 	cmd->cmd_complete(cmd, mgmt_status(status));
9285 	mgmt_pending_remove(cmd);
9286 }
9287 
9288 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9289 				      u8 status)
9290 {
9291 	struct mgmt_pending_cmd *cmd;
9292 
9293 	cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9294 	if (!cmd)
9295 		return;
9296 
9297 	cmd->cmd_complete(cmd, mgmt_status(status));
9298 	mgmt_pending_remove(cmd);
9299 }
9300 
9301 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9302 			      u8 link_type, u8 addr_type, u32 value,
9303 			      u8 confirm_hint)
9304 {
9305 	struct mgmt_ev_user_confirm_request ev;
9306 
9307 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9308 
9309 	bacpy(&ev.addr.bdaddr, bdaddr);
9310 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9311 	ev.confirm_hint = confirm_hint;
9312 	ev.value = cpu_to_le32(value);
9313 
9314 	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9315 			  NULL);
9316 }
9317 
9318 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9319 			      u8 link_type, u8 addr_type)
9320 {
9321 	struct mgmt_ev_user_passkey_request ev;
9322 
9323 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9324 
9325 	bacpy(&ev.addr.bdaddr, bdaddr);
9326 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9327 
9328 	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9329 			  NULL);
9330 }
9331 
9332 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9333 				      u8 link_type, u8 addr_type, u8 status,
9334 				      u8 opcode)
9335 {
9336 	struct mgmt_pending_cmd *cmd;
9337 
9338 	cmd = pending_find(opcode, hdev);
9339 	if (!cmd)
9340 		return -ENOENT;
9341 
9342 	cmd->cmd_complete(cmd, mgmt_status(status));
9343 	mgmt_pending_remove(cmd);
9344 
9345 	return 0;
9346 }
9347 
9348 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9349 				     u8 link_type, u8 addr_type, u8 status)
9350 {
9351 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9352 					  status, MGMT_OP_USER_CONFIRM_REPLY);
9353 }
9354 
9355 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9356 					 u8 link_type, u8 addr_type, u8 status)
9357 {
9358 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9359 					  status,
9360 					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
9361 }
9362 
9363 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9364 				     u8 link_type, u8 addr_type, u8 status)
9365 {
9366 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9367 					  status, MGMT_OP_USER_PASSKEY_REPLY);
9368 }
9369 
9370 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9371 					 u8 link_type, u8 addr_type, u8 status)
9372 {
9373 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9374 					  status,
9375 					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
9376 }
9377 
9378 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9379 			     u8 link_type, u8 addr_type, u32 passkey,
9380 			     u8 entered)
9381 {
9382 	struct mgmt_ev_passkey_notify ev;
9383 
9384 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9385 
9386 	bacpy(&ev.addr.bdaddr, bdaddr);
9387 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9388 	ev.passkey = __cpu_to_le32(passkey);
9389 	ev.entered = entered;
9390 
9391 	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9392 }
9393 
9394 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9395 {
9396 	struct mgmt_ev_auth_failed ev;
9397 	struct mgmt_pending_cmd *cmd;
9398 	u8 status = mgmt_status(hci_status);
9399 
9400 	bacpy(&ev.addr.bdaddr, &conn->dst);
9401 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9402 	ev.status = status;
9403 
9404 	cmd = find_pairing(conn);
9405 
9406 	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9407 		    cmd ? cmd->sk : NULL);
9408 
9409 	if (cmd) {
9410 		cmd->cmd_complete(cmd, status);
9411 		mgmt_pending_remove(cmd);
9412 	}
9413 }
9414 
9415 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9416 {
9417 	struct cmd_lookup match = { NULL, hdev };
9418 	bool changed;
9419 
9420 	if (status) {
9421 		u8 mgmt_err = mgmt_status(status);
9422 		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9423 				     cmd_status_rsp, &mgmt_err);
9424 		return;
9425 	}
9426 
9427 	if (test_bit(HCI_AUTH, &hdev->flags))
9428 		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9429 	else
9430 		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9431 
9432 	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9433 			     &match);
9434 
9435 	if (changed)
9436 		new_settings(hdev, match.sk);
9437 
9438 	if (match.sk)
9439 		sock_put(match.sk);
9440 }
9441 
9442 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9443 {
9444 	struct cmd_lookup *match = data;
9445 
9446 	if (match->sk == NULL) {
9447 		match->sk = cmd->sk;
9448 		sock_hold(match->sk);
9449 	}
9450 }
9451 
9452 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9453 				    u8 status)
9454 {
9455 	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9456 
9457 	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
9458 	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
9459 	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
9460 
9461 	if (!status) {
9462 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9463 				   3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9464 		ext_info_changed(hdev, NULL);
9465 	}
9466 
9467 	if (match.sk)
9468 		sock_put(match.sk);
9469 }
9470 
9471 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
9472 {
9473 	struct mgmt_cp_set_local_name ev;
9474 	struct mgmt_pending_cmd *cmd;
9475 
9476 	if (status)
9477 		return;
9478 
9479 	memset(&ev, 0, sizeof(ev));
9480 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
9481 	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
9482 
9483 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
9484 	if (!cmd) {
9485 		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
9486 
9487 		/* If this is a HCI command related to powering on the
9488 		 * HCI dev don't send any mgmt signals.
9489 		 */
9490 		if (pending_find(MGMT_OP_SET_POWERED, hdev))
9491 			return;
9492 	}
9493 
9494 	mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
9495 			   HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
9496 	ext_info_changed(hdev, cmd ? cmd->sk : NULL);
9497 }
9498 
9499 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
9500 {
9501 	int i;
9502 
9503 	for (i = 0; i < uuid_count; i++) {
9504 		if (!memcmp(uuid, uuids[i], 16))
9505 			return true;
9506 	}
9507 
9508 	return false;
9509 }
9510 
9511 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
9512 {
9513 	u16 parsed = 0;
9514 
9515 	while (parsed < eir_len) {
9516 		u8 field_len = eir[0];
9517 		u8 uuid[16];
9518 		int i;
9519 
9520 		if (field_len == 0)
9521 			break;
9522 
9523 		if (eir_len - parsed < field_len + 1)
9524 			break;
9525 
9526 		switch (eir[1]) {
9527 		case EIR_UUID16_ALL:
9528 		case EIR_UUID16_SOME:
9529 			for (i = 0; i + 3 <= field_len; i += 2) {
9530 				memcpy(uuid, bluetooth_base_uuid, 16);
9531 				uuid[13] = eir[i + 3];
9532 				uuid[12] = eir[i + 2];
9533 				if (has_uuid(uuid, uuid_count, uuids))
9534 					return true;
9535 			}
9536 			break;
9537 		case EIR_UUID32_ALL:
9538 		case EIR_UUID32_SOME:
9539 			for (i = 0; i + 5 <= field_len; i += 4) {
9540 				memcpy(uuid, bluetooth_base_uuid, 16);
9541 				uuid[15] = eir[i + 5];
9542 				uuid[14] = eir[i + 4];
9543 				uuid[13] = eir[i + 3];
9544 				uuid[12] = eir[i + 2];
9545 				if (has_uuid(uuid, uuid_count, uuids))
9546 					return true;
9547 			}
9548 			break;
9549 		case EIR_UUID128_ALL:
9550 		case EIR_UUID128_SOME:
9551 			for (i = 0; i + 17 <= field_len; i += 16) {
9552 				memcpy(uuid, eir + i + 2, 16);
9553 				if (has_uuid(uuid, uuid_count, uuids))
9554 					return true;
9555 			}
9556 			break;
9557 		}
9558 
9559 		parsed += field_len + 1;
9560 		eir += field_len + 1;
9561 	}
9562 
9563 	return false;
9564 }
9565 
9566 static void restart_le_scan(struct hci_dev *hdev)
9567 {
9568 	/* If controller is not scanning we are done. */
9569 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
9570 		return;
9571 
9572 	if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
9573 		       hdev->discovery.scan_start +
9574 		       hdev->discovery.scan_duration))
9575 		return;
9576 
9577 	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
9578 			   DISCOV_LE_RESTART_DELAY);
9579 }
9580 
9581 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
9582 			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9583 {
9584 	/* If a RSSI threshold has been specified, and
9585 	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
9586 	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
9587 	 * is set, let it through for further processing, as we might need to
9588 	 * restart the scan.
9589 	 *
9590 	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
9591 	 * the results are also dropped.
9592 	 */
9593 	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9594 	    (rssi == HCI_RSSI_INVALID ||
9595 	    (rssi < hdev->discovery.rssi &&
9596 	     !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
9597 		return  false;
9598 
9599 	if (hdev->discovery.uuid_count != 0) {
9600 		/* If a list of UUIDs is provided in filter, results with no
9601 		 * matching UUID should be dropped.
9602 		 */
9603 		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
9604 				   hdev->discovery.uuids) &&
9605 		    !eir_has_uuids(scan_rsp, scan_rsp_len,
9606 				   hdev->discovery.uuid_count,
9607 				   hdev->discovery.uuids))
9608 			return false;
9609 	}
9610 
9611 	/* If duplicate filtering does not report RSSI changes, then restart
9612 	 * scanning to ensure updated result with updated RSSI values.
9613 	 */
9614 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
9615 		restart_le_scan(hdev);
9616 
9617 		/* Validate RSSI value against the RSSI threshold once more. */
9618 		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9619 		    rssi < hdev->discovery.rssi)
9620 			return false;
9621 	}
9622 
9623 	return true;
9624 }
9625 
9626 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
9627 				  bdaddr_t *bdaddr, u8 addr_type)
9628 {
9629 	struct mgmt_ev_adv_monitor_device_lost ev;
9630 
9631 	ev.monitor_handle = cpu_to_le16(handle);
9632 	bacpy(&ev.addr.bdaddr, bdaddr);
9633 	ev.addr.type = addr_type;
9634 
9635 	mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
9636 		   NULL);
9637 }
9638 
9639 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
9640 					       struct sk_buff *skb,
9641 					       struct sock *skip_sk,
9642 					       u16 handle)
9643 {
9644 	struct sk_buff *advmon_skb;
9645 	size_t advmon_skb_len;
9646 	__le16 *monitor_handle;
9647 
9648 	if (!skb)
9649 		return;
9650 
9651 	advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
9652 			  sizeof(struct mgmt_ev_device_found)) + skb->len;
9653 	advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
9654 				    advmon_skb_len);
9655 	if (!advmon_skb)
9656 		return;
9657 
9658 	/* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
9659 	 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
9660 	 * store monitor_handle of the matched monitor.
9661 	 */
9662 	monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
9663 	*monitor_handle = cpu_to_le16(handle);
9664 	skb_put_data(advmon_skb, skb->data, skb->len);
9665 
9666 	mgmt_event_skb(advmon_skb, skip_sk);
9667 }
9668 
9669 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
9670 					  bdaddr_t *bdaddr, bool report_device,
9671 					  struct sk_buff *skb,
9672 					  struct sock *skip_sk)
9673 {
9674 	struct monitored_device *dev, *tmp;
9675 	bool matched = false;
9676 	bool notified = false;
9677 
9678 	/* We have received the Advertisement Report because:
9679 	 * 1. the kernel has initiated active discovery
9680 	 * 2. if not, we have pend_le_reports > 0 in which case we are doing
9681 	 *    passive scanning
9682 	 * 3. if none of the above is true, we have one or more active
9683 	 *    Advertisement Monitor
9684 	 *
9685 	 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
9686 	 * and report ONLY one advertisement per device for the matched Monitor
9687 	 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
9688 	 *
9689 	 * For case 3, since we are not active scanning and all advertisements
9690 	 * received are due to a matched Advertisement Monitor, report all
9691 	 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
9692 	 */
9693 	if (report_device && !hdev->advmon_pend_notify) {
9694 		mgmt_event_skb(skb, skip_sk);
9695 		return;
9696 	}
9697 
9698 	hdev->advmon_pend_notify = false;
9699 
9700 	list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
9701 		if (!bacmp(&dev->bdaddr, bdaddr)) {
9702 			matched = true;
9703 
9704 			if (!dev->notified) {
9705 				mgmt_send_adv_monitor_device_found(hdev, skb,
9706 								   skip_sk,
9707 								   dev->handle);
9708 				notified = true;
9709 				dev->notified = true;
9710 			}
9711 		}
9712 
9713 		if (!dev->notified)
9714 			hdev->advmon_pend_notify = true;
9715 	}
9716 
9717 	if (!report_device &&
9718 	    ((matched && !notified) || !msft_monitor_supported(hdev))) {
9719 		/* Handle 0 indicates that we are not active scanning and this
9720 		 * is a subsequent advertisement report for an already matched
9721 		 * Advertisement Monitor or the controller offloading support
9722 		 * is not available.
9723 		 */
9724 		mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
9725 	}
9726 
9727 	if (report_device)
9728 		mgmt_event_skb(skb, skip_sk);
9729 	else
9730 		kfree_skb(skb);
9731 }
9732 
9733 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9734 		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
9735 		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9736 {
9737 	struct sk_buff *skb;
9738 	struct mgmt_ev_device_found *ev;
9739 	bool report_device = hci_discovery_active(hdev);
9740 
9741 	/* Don't send events for a non-kernel initiated discovery. With
9742 	 * LE one exception is if we have pend_le_reports > 0 in which
9743 	 * case we're doing passive scanning and want these events.
9744 	 */
9745 	if (!hci_discovery_active(hdev)) {
9746 		if (link_type == ACL_LINK)
9747 			return;
9748 		if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
9749 			report_device = true;
9750 		else if (!hci_is_adv_monitoring(hdev))
9751 			return;
9752 	}
9753 
9754 	if (hdev->discovery.result_filtering) {
9755 		/* We are using service discovery */
9756 		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
9757 				     scan_rsp_len))
9758 			return;
9759 	}
9760 
9761 	if (hdev->discovery.limited) {
9762 		/* Check for limited discoverable bit */
9763 		if (dev_class) {
9764 			if (!(dev_class[1] & 0x20))
9765 				return;
9766 		} else {
9767 			u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
9768 			if (!flags || !(flags[0] & LE_AD_LIMITED))
9769 				return;
9770 		}
9771 	}
9772 
9773 	/* Allocate skb. The 5 extra bytes are for the potential CoD field */
9774 	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
9775 			     sizeof(*ev) + eir_len + scan_rsp_len + 5);
9776 	if (!skb)
9777 		return;
9778 
9779 	ev = skb_put(skb, sizeof(*ev));
9780 
9781 	/* In case of device discovery with BR/EDR devices (pre 1.2), the
9782 	 * RSSI value was reported as 0 when not available. This behavior
9783 	 * is kept when using device discovery. This is required for full
9784 	 * backwards compatibility with the API.
9785 	 *
9786 	 * However when using service discovery, the value 127 will be
9787 	 * returned when the RSSI is not available.
9788 	 */
9789 	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
9790 	    link_type == ACL_LINK)
9791 		rssi = 0;
9792 
9793 	bacpy(&ev->addr.bdaddr, bdaddr);
9794 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
9795 	ev->rssi = rssi;
9796 	ev->flags = cpu_to_le32(flags);
9797 
9798 	if (eir_len > 0)
9799 		/* Copy EIR or advertising data into event */
9800 		skb_put_data(skb, eir, eir_len);
9801 
9802 	if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
9803 		u8 eir_cod[5];
9804 
9805 		eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
9806 					   dev_class, 3);
9807 		skb_put_data(skb, eir_cod, sizeof(eir_cod));
9808 	}
9809 
9810 	if (scan_rsp_len > 0)
9811 		/* Append scan response data to event */
9812 		skb_put_data(skb, scan_rsp, scan_rsp_len);
9813 
9814 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
9815 
9816 	mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
9817 }
9818 
9819 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9820 		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
9821 {
9822 	struct sk_buff *skb;
9823 	struct mgmt_ev_device_found *ev;
9824 	u16 eir_len = 0;
9825 	u32 flags = 0;
9826 
9827 	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
9828 			     sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
9829 
9830 	ev = skb_put(skb, sizeof(*ev));
9831 	bacpy(&ev->addr.bdaddr, bdaddr);
9832 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
9833 	ev->rssi = rssi;
9834 
9835 	if (name)
9836 		eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9837 	else
9838 		flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
9839 
9840 	ev->eir_len = cpu_to_le16(eir_len);
9841 	ev->flags = cpu_to_le32(flags);
9842 
9843 	mgmt_event_skb(skb, NULL);
9844 }
9845 
9846 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
9847 {
9848 	struct mgmt_ev_discovering ev;
9849 
9850 	bt_dev_dbg(hdev, "discovering %u", discovering);
9851 
9852 	memset(&ev, 0, sizeof(ev));
9853 	ev.type = hdev->discovery.type;
9854 	ev.discovering = discovering;
9855 
9856 	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
9857 }
9858 
9859 void mgmt_suspending(struct hci_dev *hdev, u8 state)
9860 {
9861 	struct mgmt_ev_controller_suspend ev;
9862 
9863 	ev.suspend_state = state;
9864 	mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
9865 }
9866 
9867 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
9868 		   u8 addr_type)
9869 {
9870 	struct mgmt_ev_controller_resume ev;
9871 
9872 	ev.wake_reason = reason;
9873 	if (bdaddr) {
9874 		bacpy(&ev.addr.bdaddr, bdaddr);
9875 		ev.addr.type = addr_type;
9876 	} else {
9877 		memset(&ev.addr, 0, sizeof(ev.addr));
9878 	}
9879 
9880 	mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
9881 }
9882 
9883 static struct hci_mgmt_chan chan = {
9884 	.channel	= HCI_CHANNEL_CONTROL,
9885 	.handler_count	= ARRAY_SIZE(mgmt_handlers),
9886 	.handlers	= mgmt_handlers,
9887 	.hdev_init	= mgmt_init_hdev,
9888 };
9889 
9890 int mgmt_init(void)
9891 {
9892 	return hci_mgmt_chan_register(&chan);
9893 }
9894 
9895 void mgmt_exit(void)
9896 {
9897 	hci_mgmt_chan_unregister(&chan);
9898 }
9899