xref: /openbmc/linux/net/bluetooth/mgmt.c (revision aa74c44b)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI Management interface */
26 
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35 
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
40 #include "msft.h"
41 #include "eir.h"
42 #include "aosp.h"
43 
44 #define MGMT_VERSION	1
45 #define MGMT_REVISION	21
46 
47 static const u16 mgmt_commands[] = {
48 	MGMT_OP_READ_INDEX_LIST,
49 	MGMT_OP_READ_INFO,
50 	MGMT_OP_SET_POWERED,
51 	MGMT_OP_SET_DISCOVERABLE,
52 	MGMT_OP_SET_CONNECTABLE,
53 	MGMT_OP_SET_FAST_CONNECTABLE,
54 	MGMT_OP_SET_BONDABLE,
55 	MGMT_OP_SET_LINK_SECURITY,
56 	MGMT_OP_SET_SSP,
57 	MGMT_OP_SET_HS,
58 	MGMT_OP_SET_LE,
59 	MGMT_OP_SET_DEV_CLASS,
60 	MGMT_OP_SET_LOCAL_NAME,
61 	MGMT_OP_ADD_UUID,
62 	MGMT_OP_REMOVE_UUID,
63 	MGMT_OP_LOAD_LINK_KEYS,
64 	MGMT_OP_LOAD_LONG_TERM_KEYS,
65 	MGMT_OP_DISCONNECT,
66 	MGMT_OP_GET_CONNECTIONS,
67 	MGMT_OP_PIN_CODE_REPLY,
68 	MGMT_OP_PIN_CODE_NEG_REPLY,
69 	MGMT_OP_SET_IO_CAPABILITY,
70 	MGMT_OP_PAIR_DEVICE,
71 	MGMT_OP_CANCEL_PAIR_DEVICE,
72 	MGMT_OP_UNPAIR_DEVICE,
73 	MGMT_OP_USER_CONFIRM_REPLY,
74 	MGMT_OP_USER_CONFIRM_NEG_REPLY,
75 	MGMT_OP_USER_PASSKEY_REPLY,
76 	MGMT_OP_USER_PASSKEY_NEG_REPLY,
77 	MGMT_OP_READ_LOCAL_OOB_DATA,
78 	MGMT_OP_ADD_REMOTE_OOB_DATA,
79 	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
80 	MGMT_OP_START_DISCOVERY,
81 	MGMT_OP_STOP_DISCOVERY,
82 	MGMT_OP_CONFIRM_NAME,
83 	MGMT_OP_BLOCK_DEVICE,
84 	MGMT_OP_UNBLOCK_DEVICE,
85 	MGMT_OP_SET_DEVICE_ID,
86 	MGMT_OP_SET_ADVERTISING,
87 	MGMT_OP_SET_BREDR,
88 	MGMT_OP_SET_STATIC_ADDRESS,
89 	MGMT_OP_SET_SCAN_PARAMS,
90 	MGMT_OP_SET_SECURE_CONN,
91 	MGMT_OP_SET_DEBUG_KEYS,
92 	MGMT_OP_SET_PRIVACY,
93 	MGMT_OP_LOAD_IRKS,
94 	MGMT_OP_GET_CONN_INFO,
95 	MGMT_OP_GET_CLOCK_INFO,
96 	MGMT_OP_ADD_DEVICE,
97 	MGMT_OP_REMOVE_DEVICE,
98 	MGMT_OP_LOAD_CONN_PARAM,
99 	MGMT_OP_READ_UNCONF_INDEX_LIST,
100 	MGMT_OP_READ_CONFIG_INFO,
101 	MGMT_OP_SET_EXTERNAL_CONFIG,
102 	MGMT_OP_SET_PUBLIC_ADDRESS,
103 	MGMT_OP_START_SERVICE_DISCOVERY,
104 	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
105 	MGMT_OP_READ_EXT_INDEX_LIST,
106 	MGMT_OP_READ_ADV_FEATURES,
107 	MGMT_OP_ADD_ADVERTISING,
108 	MGMT_OP_REMOVE_ADVERTISING,
109 	MGMT_OP_GET_ADV_SIZE_INFO,
110 	MGMT_OP_START_LIMITED_DISCOVERY,
111 	MGMT_OP_READ_EXT_INFO,
112 	MGMT_OP_SET_APPEARANCE,
113 	MGMT_OP_GET_PHY_CONFIGURATION,
114 	MGMT_OP_SET_PHY_CONFIGURATION,
115 	MGMT_OP_SET_BLOCKED_KEYS,
116 	MGMT_OP_SET_WIDEBAND_SPEECH,
117 	MGMT_OP_READ_CONTROLLER_CAP,
118 	MGMT_OP_READ_EXP_FEATURES_INFO,
119 	MGMT_OP_SET_EXP_FEATURE,
120 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
121 	MGMT_OP_SET_DEF_SYSTEM_CONFIG,
122 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
123 	MGMT_OP_SET_DEF_RUNTIME_CONFIG,
124 	MGMT_OP_GET_DEVICE_FLAGS,
125 	MGMT_OP_SET_DEVICE_FLAGS,
126 	MGMT_OP_READ_ADV_MONITOR_FEATURES,
127 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
128 	MGMT_OP_REMOVE_ADV_MONITOR,
129 	MGMT_OP_ADD_EXT_ADV_PARAMS,
130 	MGMT_OP_ADD_EXT_ADV_DATA,
131 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
132 };
133 
134 static const u16 mgmt_events[] = {
135 	MGMT_EV_CONTROLLER_ERROR,
136 	MGMT_EV_INDEX_ADDED,
137 	MGMT_EV_INDEX_REMOVED,
138 	MGMT_EV_NEW_SETTINGS,
139 	MGMT_EV_CLASS_OF_DEV_CHANGED,
140 	MGMT_EV_LOCAL_NAME_CHANGED,
141 	MGMT_EV_NEW_LINK_KEY,
142 	MGMT_EV_NEW_LONG_TERM_KEY,
143 	MGMT_EV_DEVICE_CONNECTED,
144 	MGMT_EV_DEVICE_DISCONNECTED,
145 	MGMT_EV_CONNECT_FAILED,
146 	MGMT_EV_PIN_CODE_REQUEST,
147 	MGMT_EV_USER_CONFIRM_REQUEST,
148 	MGMT_EV_USER_PASSKEY_REQUEST,
149 	MGMT_EV_AUTH_FAILED,
150 	MGMT_EV_DEVICE_FOUND,
151 	MGMT_EV_DISCOVERING,
152 	MGMT_EV_DEVICE_BLOCKED,
153 	MGMT_EV_DEVICE_UNBLOCKED,
154 	MGMT_EV_DEVICE_UNPAIRED,
155 	MGMT_EV_PASSKEY_NOTIFY,
156 	MGMT_EV_NEW_IRK,
157 	MGMT_EV_NEW_CSRK,
158 	MGMT_EV_DEVICE_ADDED,
159 	MGMT_EV_DEVICE_REMOVED,
160 	MGMT_EV_NEW_CONN_PARAM,
161 	MGMT_EV_UNCONF_INDEX_ADDED,
162 	MGMT_EV_UNCONF_INDEX_REMOVED,
163 	MGMT_EV_NEW_CONFIG_OPTIONS,
164 	MGMT_EV_EXT_INDEX_ADDED,
165 	MGMT_EV_EXT_INDEX_REMOVED,
166 	MGMT_EV_LOCAL_OOB_DATA_UPDATED,
167 	MGMT_EV_ADVERTISING_ADDED,
168 	MGMT_EV_ADVERTISING_REMOVED,
169 	MGMT_EV_EXT_INFO_CHANGED,
170 	MGMT_EV_PHY_CONFIGURATION_CHANGED,
171 	MGMT_EV_EXP_FEATURE_CHANGED,
172 	MGMT_EV_DEVICE_FLAGS_CHANGED,
173 	MGMT_EV_ADV_MONITOR_ADDED,
174 	MGMT_EV_ADV_MONITOR_REMOVED,
175 	MGMT_EV_CONTROLLER_SUSPEND,
176 	MGMT_EV_CONTROLLER_RESUME,
177 };
178 
179 static const u16 mgmt_untrusted_commands[] = {
180 	MGMT_OP_READ_INDEX_LIST,
181 	MGMT_OP_READ_INFO,
182 	MGMT_OP_READ_UNCONF_INDEX_LIST,
183 	MGMT_OP_READ_CONFIG_INFO,
184 	MGMT_OP_READ_EXT_INDEX_LIST,
185 	MGMT_OP_READ_EXT_INFO,
186 	MGMT_OP_READ_CONTROLLER_CAP,
187 	MGMT_OP_READ_EXP_FEATURES_INFO,
188 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
189 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
190 };
191 
192 static const u16 mgmt_untrusted_events[] = {
193 	MGMT_EV_INDEX_ADDED,
194 	MGMT_EV_INDEX_REMOVED,
195 	MGMT_EV_NEW_SETTINGS,
196 	MGMT_EV_CLASS_OF_DEV_CHANGED,
197 	MGMT_EV_LOCAL_NAME_CHANGED,
198 	MGMT_EV_UNCONF_INDEX_ADDED,
199 	MGMT_EV_UNCONF_INDEX_REMOVED,
200 	MGMT_EV_NEW_CONFIG_OPTIONS,
201 	MGMT_EV_EXT_INDEX_ADDED,
202 	MGMT_EV_EXT_INDEX_REMOVED,
203 	MGMT_EV_EXT_INFO_CHANGED,
204 	MGMT_EV_EXP_FEATURE_CHANGED,
205 };
206 
207 #define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
208 
209 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
210 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
211 
212 /* HCI to MGMT error code conversion table */
213 static const u8 mgmt_status_table[] = {
214 	MGMT_STATUS_SUCCESS,
215 	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
216 	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
217 	MGMT_STATUS_FAILED,		/* Hardware Failure */
218 	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
219 	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
220 	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
221 	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
222 	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
223 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
224 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
225 	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
226 	MGMT_STATUS_BUSY,		/* Command Disallowed */
227 	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
228 	MGMT_STATUS_REJECTED,		/* Rejected Security */
229 	MGMT_STATUS_REJECTED,		/* Rejected Personal */
230 	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
231 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
232 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
233 	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
234 	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
235 	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
236 	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
237 	MGMT_STATUS_BUSY,		/* Repeated Attempts */
238 	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
239 	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
240 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
241 	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
242 	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
243 	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
244 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
245 	MGMT_STATUS_FAILED,		/* Unspecified Error */
246 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
247 	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
248 	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
249 	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
250 	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
251 	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
252 	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
253 	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
254 	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
255 	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
256 	MGMT_STATUS_FAILED,		/* Transaction Collision */
257 	MGMT_STATUS_FAILED,		/* Reserved for future use */
258 	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
259 	MGMT_STATUS_REJECTED,		/* QoS Rejected */
260 	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
261 	MGMT_STATUS_REJECTED,		/* Insufficient Security */
262 	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
263 	MGMT_STATUS_FAILED,		/* Reserved for future use */
264 	MGMT_STATUS_BUSY,		/* Role Switch Pending */
265 	MGMT_STATUS_FAILED,		/* Reserved for future use */
266 	MGMT_STATUS_FAILED,		/* Slot Violation */
267 	MGMT_STATUS_FAILED,		/* Role Switch Failed */
268 	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
269 	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
270 	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
271 	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
272 	MGMT_STATUS_BUSY,		/* Controller Busy */
273 	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
274 	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
275 	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
276 	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
277 	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
278 };
279 
280 static u8 mgmt_errno_status(int err)
281 {
282 	switch (err) {
283 	case 0:
284 		return MGMT_STATUS_SUCCESS;
285 	case -EPERM:
286 		return MGMT_STATUS_REJECTED;
287 	case -EINVAL:
288 		return MGMT_STATUS_INVALID_PARAMS;
289 	case -EOPNOTSUPP:
290 		return MGMT_STATUS_NOT_SUPPORTED;
291 	case -EBUSY:
292 		return MGMT_STATUS_BUSY;
293 	case -ETIMEDOUT:
294 		return MGMT_STATUS_AUTH_FAILED;
295 	case -ENOMEM:
296 		return MGMT_STATUS_NO_RESOURCES;
297 	case -EISCONN:
298 		return MGMT_STATUS_ALREADY_CONNECTED;
299 	case -ENOTCONN:
300 		return MGMT_STATUS_DISCONNECTED;
301 	}
302 
303 	return MGMT_STATUS_FAILED;
304 }
305 
306 static u8 mgmt_status(int err)
307 {
308 	if (err < 0)
309 		return mgmt_errno_status(err);
310 
311 	if (err < ARRAY_SIZE(mgmt_status_table))
312 		return mgmt_status_table[err];
313 
314 	return MGMT_STATUS_FAILED;
315 }
316 
317 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
318 			    u16 len, int flag)
319 {
320 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
321 			       flag, NULL);
322 }
323 
324 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
325 			      u16 len, int flag, struct sock *skip_sk)
326 {
327 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
328 			       flag, skip_sk);
329 }
330 
331 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
332 		      struct sock *skip_sk)
333 {
334 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
335 			       HCI_SOCK_TRUSTED, skip_sk);
336 }
337 
338 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
339 {
340 	return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
341 				   skip_sk);
342 }
343 
344 static u8 le_addr_type(u8 mgmt_addr_type)
345 {
346 	if (mgmt_addr_type == BDADDR_LE_PUBLIC)
347 		return ADDR_LE_DEV_PUBLIC;
348 	else
349 		return ADDR_LE_DEV_RANDOM;
350 }
351 
352 void mgmt_fill_version_info(void *ver)
353 {
354 	struct mgmt_rp_read_version *rp = ver;
355 
356 	rp->version = MGMT_VERSION;
357 	rp->revision = cpu_to_le16(MGMT_REVISION);
358 }
359 
360 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
361 			u16 data_len)
362 {
363 	struct mgmt_rp_read_version rp;
364 
365 	bt_dev_dbg(hdev, "sock %p", sk);
366 
367 	mgmt_fill_version_info(&rp);
368 
369 	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
370 				 &rp, sizeof(rp));
371 }
372 
373 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
374 			 u16 data_len)
375 {
376 	struct mgmt_rp_read_commands *rp;
377 	u16 num_commands, num_events;
378 	size_t rp_size;
379 	int i, err;
380 
381 	bt_dev_dbg(hdev, "sock %p", sk);
382 
383 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
384 		num_commands = ARRAY_SIZE(mgmt_commands);
385 		num_events = ARRAY_SIZE(mgmt_events);
386 	} else {
387 		num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
388 		num_events = ARRAY_SIZE(mgmt_untrusted_events);
389 	}
390 
391 	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
392 
393 	rp = kmalloc(rp_size, GFP_KERNEL);
394 	if (!rp)
395 		return -ENOMEM;
396 
397 	rp->num_commands = cpu_to_le16(num_commands);
398 	rp->num_events = cpu_to_le16(num_events);
399 
400 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
401 		__le16 *opcode = rp->opcodes;
402 
403 		for (i = 0; i < num_commands; i++, opcode++)
404 			put_unaligned_le16(mgmt_commands[i], opcode);
405 
406 		for (i = 0; i < num_events; i++, opcode++)
407 			put_unaligned_le16(mgmt_events[i], opcode);
408 	} else {
409 		__le16 *opcode = rp->opcodes;
410 
411 		for (i = 0; i < num_commands; i++, opcode++)
412 			put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
413 
414 		for (i = 0; i < num_events; i++, opcode++)
415 			put_unaligned_le16(mgmt_untrusted_events[i], opcode);
416 	}
417 
418 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
419 				rp, rp_size);
420 	kfree(rp);
421 
422 	return err;
423 }
424 
425 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
426 			   u16 data_len)
427 {
428 	struct mgmt_rp_read_index_list *rp;
429 	struct hci_dev *d;
430 	size_t rp_len;
431 	u16 count;
432 	int err;
433 
434 	bt_dev_dbg(hdev, "sock %p", sk);
435 
436 	read_lock(&hci_dev_list_lock);
437 
438 	count = 0;
439 	list_for_each_entry(d, &hci_dev_list, list) {
440 		if (d->dev_type == HCI_PRIMARY &&
441 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED))
442 			count++;
443 	}
444 
445 	rp_len = sizeof(*rp) + (2 * count);
446 	rp = kmalloc(rp_len, GFP_ATOMIC);
447 	if (!rp) {
448 		read_unlock(&hci_dev_list_lock);
449 		return -ENOMEM;
450 	}
451 
452 	count = 0;
453 	list_for_each_entry(d, &hci_dev_list, list) {
454 		if (hci_dev_test_flag(d, HCI_SETUP) ||
455 		    hci_dev_test_flag(d, HCI_CONFIG) ||
456 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
457 			continue;
458 
459 		/* Devices marked as raw-only are neither configured
460 		 * nor unconfigured controllers.
461 		 */
462 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
463 			continue;
464 
465 		if (d->dev_type == HCI_PRIMARY &&
466 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
467 			rp->index[count++] = cpu_to_le16(d->id);
468 			bt_dev_dbg(hdev, "Added hci%u", d->id);
469 		}
470 	}
471 
472 	rp->num_controllers = cpu_to_le16(count);
473 	rp_len = sizeof(*rp) + (2 * count);
474 
475 	read_unlock(&hci_dev_list_lock);
476 
477 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
478 				0, rp, rp_len);
479 
480 	kfree(rp);
481 
482 	return err;
483 }
484 
485 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
486 				  void *data, u16 data_len)
487 {
488 	struct mgmt_rp_read_unconf_index_list *rp;
489 	struct hci_dev *d;
490 	size_t rp_len;
491 	u16 count;
492 	int err;
493 
494 	bt_dev_dbg(hdev, "sock %p", sk);
495 
496 	read_lock(&hci_dev_list_lock);
497 
498 	count = 0;
499 	list_for_each_entry(d, &hci_dev_list, list) {
500 		if (d->dev_type == HCI_PRIMARY &&
501 		    hci_dev_test_flag(d, HCI_UNCONFIGURED))
502 			count++;
503 	}
504 
505 	rp_len = sizeof(*rp) + (2 * count);
506 	rp = kmalloc(rp_len, GFP_ATOMIC);
507 	if (!rp) {
508 		read_unlock(&hci_dev_list_lock);
509 		return -ENOMEM;
510 	}
511 
512 	count = 0;
513 	list_for_each_entry(d, &hci_dev_list, list) {
514 		if (hci_dev_test_flag(d, HCI_SETUP) ||
515 		    hci_dev_test_flag(d, HCI_CONFIG) ||
516 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
517 			continue;
518 
519 		/* Devices marked as raw-only are neither configured
520 		 * nor unconfigured controllers.
521 		 */
522 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
523 			continue;
524 
525 		if (d->dev_type == HCI_PRIMARY &&
526 		    hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
527 			rp->index[count++] = cpu_to_le16(d->id);
528 			bt_dev_dbg(hdev, "Added hci%u", d->id);
529 		}
530 	}
531 
532 	rp->num_controllers = cpu_to_le16(count);
533 	rp_len = sizeof(*rp) + (2 * count);
534 
535 	read_unlock(&hci_dev_list_lock);
536 
537 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
538 				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
539 
540 	kfree(rp);
541 
542 	return err;
543 }
544 
545 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
546 			       void *data, u16 data_len)
547 {
548 	struct mgmt_rp_read_ext_index_list *rp;
549 	struct hci_dev *d;
550 	u16 count;
551 	int err;
552 
553 	bt_dev_dbg(hdev, "sock %p", sk);
554 
555 	read_lock(&hci_dev_list_lock);
556 
557 	count = 0;
558 	list_for_each_entry(d, &hci_dev_list, list) {
559 		if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
560 			count++;
561 	}
562 
563 	rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
564 	if (!rp) {
565 		read_unlock(&hci_dev_list_lock);
566 		return -ENOMEM;
567 	}
568 
569 	count = 0;
570 	list_for_each_entry(d, &hci_dev_list, list) {
571 		if (hci_dev_test_flag(d, HCI_SETUP) ||
572 		    hci_dev_test_flag(d, HCI_CONFIG) ||
573 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
574 			continue;
575 
576 		/* Devices marked as raw-only are neither configured
577 		 * nor unconfigured controllers.
578 		 */
579 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
580 			continue;
581 
582 		if (d->dev_type == HCI_PRIMARY) {
583 			if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
584 				rp->entry[count].type = 0x01;
585 			else
586 				rp->entry[count].type = 0x00;
587 		} else if (d->dev_type == HCI_AMP) {
588 			rp->entry[count].type = 0x02;
589 		} else {
590 			continue;
591 		}
592 
593 		rp->entry[count].bus = d->bus;
594 		rp->entry[count++].index = cpu_to_le16(d->id);
595 		bt_dev_dbg(hdev, "Added hci%u", d->id);
596 	}
597 
598 	rp->num_controllers = cpu_to_le16(count);
599 
600 	read_unlock(&hci_dev_list_lock);
601 
602 	/* If this command is called at least once, then all the
603 	 * default index and unconfigured index events are disabled
604 	 * and from now on only extended index events are used.
605 	 */
606 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
607 	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
608 	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
609 
610 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
611 				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
612 				struct_size(rp, entry, count));
613 
614 	kfree(rp);
615 
616 	return err;
617 }
618 
619 static bool is_configured(struct hci_dev *hdev)
620 {
621 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
622 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
623 		return false;
624 
625 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
626 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
627 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
628 		return false;
629 
630 	return true;
631 }
632 
633 static __le32 get_missing_options(struct hci_dev *hdev)
634 {
635 	u32 options = 0;
636 
637 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
638 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
639 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
640 
641 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
642 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
643 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
644 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
645 
646 	return cpu_to_le32(options);
647 }
648 
649 static int new_options(struct hci_dev *hdev, struct sock *skip)
650 {
651 	__le32 options = get_missing_options(hdev);
652 
653 	return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
654 				  sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
655 }
656 
657 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
658 {
659 	__le32 options = get_missing_options(hdev);
660 
661 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
662 				 sizeof(options));
663 }
664 
665 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
666 			    void *data, u16 data_len)
667 {
668 	struct mgmt_rp_read_config_info rp;
669 	u32 options = 0;
670 
671 	bt_dev_dbg(hdev, "sock %p", sk);
672 
673 	hci_dev_lock(hdev);
674 
675 	memset(&rp, 0, sizeof(rp));
676 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
677 
678 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
679 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
680 
681 	if (hdev->set_bdaddr)
682 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
683 
684 	rp.supported_options = cpu_to_le32(options);
685 	rp.missing_options = get_missing_options(hdev);
686 
687 	hci_dev_unlock(hdev);
688 
689 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
690 				 &rp, sizeof(rp));
691 }
692 
693 static u32 get_supported_phys(struct hci_dev *hdev)
694 {
695 	u32 supported_phys = 0;
696 
697 	if (lmp_bredr_capable(hdev)) {
698 		supported_phys |= MGMT_PHY_BR_1M_1SLOT;
699 
700 		if (hdev->features[0][0] & LMP_3SLOT)
701 			supported_phys |= MGMT_PHY_BR_1M_3SLOT;
702 
703 		if (hdev->features[0][0] & LMP_5SLOT)
704 			supported_phys |= MGMT_PHY_BR_1M_5SLOT;
705 
706 		if (lmp_edr_2m_capable(hdev)) {
707 			supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
708 
709 			if (lmp_edr_3slot_capable(hdev))
710 				supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
711 
712 			if (lmp_edr_5slot_capable(hdev))
713 				supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
714 
715 			if (lmp_edr_3m_capable(hdev)) {
716 				supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
717 
718 				if (lmp_edr_3slot_capable(hdev))
719 					supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
720 
721 				if (lmp_edr_5slot_capable(hdev))
722 					supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
723 			}
724 		}
725 	}
726 
727 	if (lmp_le_capable(hdev)) {
728 		supported_phys |= MGMT_PHY_LE_1M_TX;
729 		supported_phys |= MGMT_PHY_LE_1M_RX;
730 
731 		if (hdev->le_features[1] & HCI_LE_PHY_2M) {
732 			supported_phys |= MGMT_PHY_LE_2M_TX;
733 			supported_phys |= MGMT_PHY_LE_2M_RX;
734 		}
735 
736 		if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
737 			supported_phys |= MGMT_PHY_LE_CODED_TX;
738 			supported_phys |= MGMT_PHY_LE_CODED_RX;
739 		}
740 	}
741 
742 	return supported_phys;
743 }
744 
745 static u32 get_selected_phys(struct hci_dev *hdev)
746 {
747 	u32 selected_phys = 0;
748 
749 	if (lmp_bredr_capable(hdev)) {
750 		selected_phys |= MGMT_PHY_BR_1M_1SLOT;
751 
752 		if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
753 			selected_phys |= MGMT_PHY_BR_1M_3SLOT;
754 
755 		if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
756 			selected_phys |= MGMT_PHY_BR_1M_5SLOT;
757 
758 		if (lmp_edr_2m_capable(hdev)) {
759 			if (!(hdev->pkt_type & HCI_2DH1))
760 				selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
761 
762 			if (lmp_edr_3slot_capable(hdev) &&
763 			    !(hdev->pkt_type & HCI_2DH3))
764 				selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
765 
766 			if (lmp_edr_5slot_capable(hdev) &&
767 			    !(hdev->pkt_type & HCI_2DH5))
768 				selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
769 
770 			if (lmp_edr_3m_capable(hdev)) {
771 				if (!(hdev->pkt_type & HCI_3DH1))
772 					selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
773 
774 				if (lmp_edr_3slot_capable(hdev) &&
775 				    !(hdev->pkt_type & HCI_3DH3))
776 					selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
777 
778 				if (lmp_edr_5slot_capable(hdev) &&
779 				    !(hdev->pkt_type & HCI_3DH5))
780 					selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
781 			}
782 		}
783 	}
784 
785 	if (lmp_le_capable(hdev)) {
786 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
787 			selected_phys |= MGMT_PHY_LE_1M_TX;
788 
789 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
790 			selected_phys |= MGMT_PHY_LE_1M_RX;
791 
792 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
793 			selected_phys |= MGMT_PHY_LE_2M_TX;
794 
795 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
796 			selected_phys |= MGMT_PHY_LE_2M_RX;
797 
798 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
799 			selected_phys |= MGMT_PHY_LE_CODED_TX;
800 
801 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
802 			selected_phys |= MGMT_PHY_LE_CODED_RX;
803 	}
804 
805 	return selected_phys;
806 }
807 
808 static u32 get_configurable_phys(struct hci_dev *hdev)
809 {
810 	return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
811 		~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
812 }
813 
814 static u32 get_supported_settings(struct hci_dev *hdev)
815 {
816 	u32 settings = 0;
817 
818 	settings |= MGMT_SETTING_POWERED;
819 	settings |= MGMT_SETTING_BONDABLE;
820 	settings |= MGMT_SETTING_DEBUG_KEYS;
821 	settings |= MGMT_SETTING_CONNECTABLE;
822 	settings |= MGMT_SETTING_DISCOVERABLE;
823 
824 	if (lmp_bredr_capable(hdev)) {
825 		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
826 			settings |= MGMT_SETTING_FAST_CONNECTABLE;
827 		settings |= MGMT_SETTING_BREDR;
828 		settings |= MGMT_SETTING_LINK_SECURITY;
829 
830 		if (lmp_ssp_capable(hdev)) {
831 			settings |= MGMT_SETTING_SSP;
832 			if (IS_ENABLED(CONFIG_BT_HS))
833 				settings |= MGMT_SETTING_HS;
834 		}
835 
836 		if (lmp_sc_capable(hdev))
837 			settings |= MGMT_SETTING_SECURE_CONN;
838 
839 		if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
840 			     &hdev->quirks))
841 			settings |= MGMT_SETTING_WIDEBAND_SPEECH;
842 	}
843 
844 	if (lmp_le_capable(hdev)) {
845 		settings |= MGMT_SETTING_LE;
846 		settings |= MGMT_SETTING_SECURE_CONN;
847 		settings |= MGMT_SETTING_PRIVACY;
848 		settings |= MGMT_SETTING_STATIC_ADDRESS;
849 		settings |= MGMT_SETTING_ADVERTISING;
850 	}
851 
852 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
853 	    hdev->set_bdaddr)
854 		settings |= MGMT_SETTING_CONFIGURATION;
855 
856 	settings |= MGMT_SETTING_PHY_CONFIGURATION;
857 
858 	return settings;
859 }
860 
861 static u32 get_current_settings(struct hci_dev *hdev)
862 {
863 	u32 settings = 0;
864 
865 	if (hdev_is_powered(hdev))
866 		settings |= MGMT_SETTING_POWERED;
867 
868 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
869 		settings |= MGMT_SETTING_CONNECTABLE;
870 
871 	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
872 		settings |= MGMT_SETTING_FAST_CONNECTABLE;
873 
874 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
875 		settings |= MGMT_SETTING_DISCOVERABLE;
876 
877 	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
878 		settings |= MGMT_SETTING_BONDABLE;
879 
880 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
881 		settings |= MGMT_SETTING_BREDR;
882 
883 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
884 		settings |= MGMT_SETTING_LE;
885 
886 	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
887 		settings |= MGMT_SETTING_LINK_SECURITY;
888 
889 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
890 		settings |= MGMT_SETTING_SSP;
891 
892 	if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
893 		settings |= MGMT_SETTING_HS;
894 
895 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
896 		settings |= MGMT_SETTING_ADVERTISING;
897 
898 	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
899 		settings |= MGMT_SETTING_SECURE_CONN;
900 
901 	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
902 		settings |= MGMT_SETTING_DEBUG_KEYS;
903 
904 	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
905 		settings |= MGMT_SETTING_PRIVACY;
906 
907 	/* The current setting for static address has two purposes. The
908 	 * first is to indicate if the static address will be used and
909 	 * the second is to indicate if it is actually set.
910 	 *
911 	 * This means if the static address is not configured, this flag
912 	 * will never be set. If the address is configured, then if the
913 	 * address is actually used decides if the flag is set or not.
914 	 *
915 	 * For single mode LE only controllers and dual-mode controllers
916 	 * with BR/EDR disabled, the existence of the static address will
917 	 * be evaluated.
918 	 */
919 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
920 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
921 	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
922 		if (bacmp(&hdev->static_addr, BDADDR_ANY))
923 			settings |= MGMT_SETTING_STATIC_ADDRESS;
924 	}
925 
926 	if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
927 		settings |= MGMT_SETTING_WIDEBAND_SPEECH;
928 
929 	return settings;
930 }
931 
932 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
933 {
934 	return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
935 }
936 
937 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
938 {
939 	struct mgmt_pending_cmd *cmd;
940 
941 	/* If there's a pending mgmt command the flags will not yet have
942 	 * their final values, so check for this first.
943 	 */
944 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
945 	if (cmd) {
946 		struct mgmt_mode *cp = cmd->param;
947 		if (cp->val == 0x01)
948 			return LE_AD_GENERAL;
949 		else if (cp->val == 0x02)
950 			return LE_AD_LIMITED;
951 	} else {
952 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
953 			return LE_AD_LIMITED;
954 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
955 			return LE_AD_GENERAL;
956 	}
957 
958 	return 0;
959 }
960 
961 bool mgmt_get_connectable(struct hci_dev *hdev)
962 {
963 	struct mgmt_pending_cmd *cmd;
964 
965 	/* If there's a pending mgmt command the flag will not yet have
966 	 * it's final value, so check for this first.
967 	 */
968 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
969 	if (cmd) {
970 		struct mgmt_mode *cp = cmd->param;
971 
972 		return cp->val;
973 	}
974 
975 	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
976 }
977 
978 static int service_cache_sync(struct hci_dev *hdev, void *data)
979 {
980 	hci_update_eir_sync(hdev);
981 	hci_update_class_sync(hdev);
982 
983 	return 0;
984 }
985 
986 static void service_cache_off(struct work_struct *work)
987 {
988 	struct hci_dev *hdev = container_of(work, struct hci_dev,
989 					    service_cache.work);
990 
991 	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
992 		return;
993 
994 	hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
995 }
996 
997 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
998 {
999 	/* The generation of a new RPA and programming it into the
1000 	 * controller happens in the hci_req_enable_advertising()
1001 	 * function.
1002 	 */
1003 	if (ext_adv_capable(hdev))
1004 		return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1005 	else
1006 		return hci_enable_advertising_sync(hdev);
1007 }
1008 
1009 static void rpa_expired(struct work_struct *work)
1010 {
1011 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1012 					    rpa_expired.work);
1013 
1014 	bt_dev_dbg(hdev, "");
1015 
1016 	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1017 
1018 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1019 		return;
1020 
1021 	hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1022 }
1023 
1024 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1025 {
1026 	if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1027 		return;
1028 
1029 	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1030 	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1031 
1032 	/* Non-mgmt controlled devices get this bit set
1033 	 * implicitly so that pairing works for them, however
1034 	 * for mgmt we require user-space to explicitly enable
1035 	 * it
1036 	 */
1037 	hci_dev_clear_flag(hdev, HCI_BONDABLE);
1038 }
1039 
1040 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1041 				void *data, u16 data_len)
1042 {
1043 	struct mgmt_rp_read_info rp;
1044 
1045 	bt_dev_dbg(hdev, "sock %p", sk);
1046 
1047 	hci_dev_lock(hdev);
1048 
1049 	memset(&rp, 0, sizeof(rp));
1050 
1051 	bacpy(&rp.bdaddr, &hdev->bdaddr);
1052 
1053 	rp.version = hdev->hci_ver;
1054 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1055 
1056 	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1057 	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1058 
1059 	memcpy(rp.dev_class, hdev->dev_class, 3);
1060 
1061 	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1062 	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1063 
1064 	hci_dev_unlock(hdev);
1065 
1066 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1067 				 sizeof(rp));
1068 }
1069 
1070 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1071 {
1072 	u16 eir_len = 0;
1073 	size_t name_len;
1074 
1075 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1076 		eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1077 					  hdev->dev_class, 3);
1078 
1079 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1080 		eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1081 					  hdev->appearance);
1082 
1083 	name_len = strlen(hdev->dev_name);
1084 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1085 				  hdev->dev_name, name_len);
1086 
1087 	name_len = strlen(hdev->short_name);
1088 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1089 				  hdev->short_name, name_len);
1090 
1091 	return eir_len;
1092 }
1093 
1094 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1095 				    void *data, u16 data_len)
1096 {
1097 	char buf[512];
1098 	struct mgmt_rp_read_ext_info *rp = (void *)buf;
1099 	u16 eir_len;
1100 
1101 	bt_dev_dbg(hdev, "sock %p", sk);
1102 
1103 	memset(&buf, 0, sizeof(buf));
1104 
1105 	hci_dev_lock(hdev);
1106 
1107 	bacpy(&rp->bdaddr, &hdev->bdaddr);
1108 
1109 	rp->version = hdev->hci_ver;
1110 	rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1111 
1112 	rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1113 	rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1114 
1115 
1116 	eir_len = append_eir_data_to_buf(hdev, rp->eir);
1117 	rp->eir_len = cpu_to_le16(eir_len);
1118 
1119 	hci_dev_unlock(hdev);
1120 
1121 	/* If this command is called at least once, then the events
1122 	 * for class of device and local name changes are disabled
1123 	 * and only the new extended controller information event
1124 	 * is used.
1125 	 */
1126 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1127 	hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1128 	hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1129 
1130 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1131 				 sizeof(*rp) + eir_len);
1132 }
1133 
1134 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1135 {
1136 	char buf[512];
1137 	struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1138 	u16 eir_len;
1139 
1140 	memset(buf, 0, sizeof(buf));
1141 
1142 	eir_len = append_eir_data_to_buf(hdev, ev->eir);
1143 	ev->eir_len = cpu_to_le16(eir_len);
1144 
1145 	return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1146 				  sizeof(*ev) + eir_len,
1147 				  HCI_MGMT_EXT_INFO_EVENTS, skip);
1148 }
1149 
1150 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1151 {
1152 	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1153 
1154 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1155 				 sizeof(settings));
1156 }
1157 
1158 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1159 {
1160 	struct mgmt_ev_advertising_added ev;
1161 
1162 	ev.instance = instance;
1163 
1164 	mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1165 }
1166 
1167 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1168 			      u8 instance)
1169 {
1170 	struct mgmt_ev_advertising_removed ev;
1171 
1172 	ev.instance = instance;
1173 
1174 	mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1175 }
1176 
1177 static void cancel_adv_timeout(struct hci_dev *hdev)
1178 {
1179 	if (hdev->adv_instance_timeout) {
1180 		hdev->adv_instance_timeout = 0;
1181 		cancel_delayed_work(&hdev->adv_instance_expire);
1182 	}
1183 }
1184 
1185 /* This function requires the caller holds hdev->lock */
1186 static void restart_le_actions(struct hci_dev *hdev)
1187 {
1188 	struct hci_conn_params *p;
1189 
1190 	list_for_each_entry(p, &hdev->le_conn_params, list) {
1191 		/* Needed for AUTO_OFF case where might not "really"
1192 		 * have been powered off.
1193 		 */
1194 		list_del_init(&p->action);
1195 
1196 		switch (p->auto_connect) {
1197 		case HCI_AUTO_CONN_DIRECT:
1198 		case HCI_AUTO_CONN_ALWAYS:
1199 			list_add(&p->action, &hdev->pend_le_conns);
1200 			break;
1201 		case HCI_AUTO_CONN_REPORT:
1202 			list_add(&p->action, &hdev->pend_le_reports);
1203 			break;
1204 		default:
1205 			break;
1206 		}
1207 	}
1208 }
1209 
1210 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1211 {
1212 	__le32 ev = cpu_to_le32(get_current_settings(hdev));
1213 
1214 	return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1215 				  sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1216 }
1217 
1218 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1219 {
1220 	struct mgmt_pending_cmd *cmd = data;
1221 	struct mgmt_mode *cp = cmd->param;
1222 
1223 	bt_dev_dbg(hdev, "err %d", err);
1224 
1225 	if (!err) {
1226 		if (cp->val) {
1227 			hci_dev_lock(hdev);
1228 			restart_le_actions(hdev);
1229 			hci_update_passive_scan(hdev);
1230 			hci_dev_unlock(hdev);
1231 		}
1232 
1233 		send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1234 
1235 		/* Only call new_setting for power on as power off is deferred
1236 		 * to hdev->power_off work which does call hci_dev_do_close.
1237 		 */
1238 		if (cp->val)
1239 			new_settings(hdev, cmd->sk);
1240 	} else {
1241 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1242 				mgmt_status(err));
1243 	}
1244 
1245 	mgmt_pending_free(cmd);
1246 }
1247 
1248 static int set_powered_sync(struct hci_dev *hdev, void *data)
1249 {
1250 	struct mgmt_pending_cmd *cmd = data;
1251 	struct mgmt_mode *cp = cmd->param;
1252 
1253 	BT_DBG("%s", hdev->name);
1254 
1255 	return hci_set_powered_sync(hdev, cp->val);
1256 }
1257 
1258 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1259 		       u16 len)
1260 {
1261 	struct mgmt_mode *cp = data;
1262 	struct mgmt_pending_cmd *cmd;
1263 	int err;
1264 
1265 	bt_dev_dbg(hdev, "sock %p", sk);
1266 
1267 	if (cp->val != 0x00 && cp->val != 0x01)
1268 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1269 				       MGMT_STATUS_INVALID_PARAMS);
1270 
1271 	hci_dev_lock(hdev);
1272 
1273 	if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1274 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1275 				      MGMT_STATUS_BUSY);
1276 		goto failed;
1277 	}
1278 
1279 	if (!!cp->val == hdev_is_powered(hdev)) {
1280 		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1281 		goto failed;
1282 	}
1283 
1284 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1285 	if (!cmd) {
1286 		err = -ENOMEM;
1287 		goto failed;
1288 	}
1289 
1290 	err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1291 				 mgmt_set_powered_complete);
1292 
1293 failed:
1294 	hci_dev_unlock(hdev);
1295 	return err;
1296 }
1297 
1298 int mgmt_new_settings(struct hci_dev *hdev)
1299 {
1300 	return new_settings(hdev, NULL);
1301 }
1302 
1303 struct cmd_lookup {
1304 	struct sock *sk;
1305 	struct hci_dev *hdev;
1306 	u8 mgmt_status;
1307 };
1308 
1309 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1310 {
1311 	struct cmd_lookup *match = data;
1312 
1313 	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1314 
1315 	list_del(&cmd->list);
1316 
1317 	if (match->sk == NULL) {
1318 		match->sk = cmd->sk;
1319 		sock_hold(match->sk);
1320 	}
1321 
1322 	mgmt_pending_free(cmd);
1323 }
1324 
1325 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1326 {
1327 	u8 *status = data;
1328 
1329 	mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1330 	mgmt_pending_remove(cmd);
1331 }
1332 
1333 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1334 {
1335 	if (cmd->cmd_complete) {
1336 		u8 *status = data;
1337 
1338 		cmd->cmd_complete(cmd, *status);
1339 		mgmt_pending_remove(cmd);
1340 
1341 		return;
1342 	}
1343 
1344 	cmd_status_rsp(cmd, data);
1345 }
1346 
1347 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1348 {
1349 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1350 				 cmd->param, cmd->param_len);
1351 }
1352 
1353 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1354 {
1355 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1356 				 cmd->param, sizeof(struct mgmt_addr_info));
1357 }
1358 
1359 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1360 {
1361 	if (!lmp_bredr_capable(hdev))
1362 		return MGMT_STATUS_NOT_SUPPORTED;
1363 	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1364 		return MGMT_STATUS_REJECTED;
1365 	else
1366 		return MGMT_STATUS_SUCCESS;
1367 }
1368 
1369 static u8 mgmt_le_support(struct hci_dev *hdev)
1370 {
1371 	if (!lmp_le_capable(hdev))
1372 		return MGMT_STATUS_NOT_SUPPORTED;
1373 	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1374 		return MGMT_STATUS_REJECTED;
1375 	else
1376 		return MGMT_STATUS_SUCCESS;
1377 }
1378 
1379 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1380 					   int err)
1381 {
1382 	struct mgmt_pending_cmd *cmd = data;
1383 
1384 	bt_dev_dbg(hdev, "err %d", err);
1385 
1386 	hci_dev_lock(hdev);
1387 
1388 	if (err) {
1389 		u8 mgmt_err = mgmt_status(err);
1390 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1391 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1392 		goto done;
1393 	}
1394 
1395 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1396 	    hdev->discov_timeout > 0) {
1397 		int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1398 		queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1399 	}
1400 
1401 	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1402 	new_settings(hdev, cmd->sk);
1403 
1404 done:
1405 	mgmt_pending_free(cmd);
1406 	hci_dev_unlock(hdev);
1407 }
1408 
1409 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1410 {
1411 	BT_DBG("%s", hdev->name);
1412 
1413 	return hci_update_discoverable_sync(hdev);
1414 }
1415 
1416 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1417 			    u16 len)
1418 {
1419 	struct mgmt_cp_set_discoverable *cp = data;
1420 	struct mgmt_pending_cmd *cmd;
1421 	u16 timeout;
1422 	int err;
1423 
1424 	bt_dev_dbg(hdev, "sock %p", sk);
1425 
1426 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1427 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1428 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1429 				       MGMT_STATUS_REJECTED);
1430 
1431 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1432 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1433 				       MGMT_STATUS_INVALID_PARAMS);
1434 
1435 	timeout = __le16_to_cpu(cp->timeout);
1436 
1437 	/* Disabling discoverable requires that no timeout is set,
1438 	 * and enabling limited discoverable requires a timeout.
1439 	 */
1440 	if ((cp->val == 0x00 && timeout > 0) ||
1441 	    (cp->val == 0x02 && timeout == 0))
1442 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1443 				       MGMT_STATUS_INVALID_PARAMS);
1444 
1445 	hci_dev_lock(hdev);
1446 
1447 	if (!hdev_is_powered(hdev) && timeout > 0) {
1448 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1449 				      MGMT_STATUS_NOT_POWERED);
1450 		goto failed;
1451 	}
1452 
1453 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1454 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1455 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1456 				      MGMT_STATUS_BUSY);
1457 		goto failed;
1458 	}
1459 
1460 	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1461 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1462 				      MGMT_STATUS_REJECTED);
1463 		goto failed;
1464 	}
1465 
1466 	if (hdev->advertising_paused) {
1467 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1468 				      MGMT_STATUS_BUSY);
1469 		goto failed;
1470 	}
1471 
1472 	if (!hdev_is_powered(hdev)) {
1473 		bool changed = false;
1474 
1475 		/* Setting limited discoverable when powered off is
1476 		 * not a valid operation since it requires a timeout
1477 		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1478 		 */
1479 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1480 			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1481 			changed = true;
1482 		}
1483 
1484 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1485 		if (err < 0)
1486 			goto failed;
1487 
1488 		if (changed)
1489 			err = new_settings(hdev, sk);
1490 
1491 		goto failed;
1492 	}
1493 
1494 	/* If the current mode is the same, then just update the timeout
1495 	 * value with the new value. And if only the timeout gets updated,
1496 	 * then no need for any HCI transactions.
1497 	 */
1498 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1499 	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
1500 						   HCI_LIMITED_DISCOVERABLE)) {
1501 		cancel_delayed_work(&hdev->discov_off);
1502 		hdev->discov_timeout = timeout;
1503 
1504 		if (cp->val && hdev->discov_timeout > 0) {
1505 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1506 			queue_delayed_work(hdev->req_workqueue,
1507 					   &hdev->discov_off, to);
1508 		}
1509 
1510 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1511 		goto failed;
1512 	}
1513 
1514 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1515 	if (!cmd) {
1516 		err = -ENOMEM;
1517 		goto failed;
1518 	}
1519 
1520 	/* Cancel any potential discoverable timeout that might be
1521 	 * still active and store new timeout value. The arming of
1522 	 * the timeout happens in the complete handler.
1523 	 */
1524 	cancel_delayed_work(&hdev->discov_off);
1525 	hdev->discov_timeout = timeout;
1526 
1527 	if (cp->val)
1528 		hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1529 	else
1530 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1531 
1532 	/* Limited discoverable mode */
1533 	if (cp->val == 0x02)
1534 		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1535 	else
1536 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1537 
1538 	err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1539 				 mgmt_set_discoverable_complete);
1540 
1541 failed:
1542 	hci_dev_unlock(hdev);
1543 	return err;
1544 }
1545 
1546 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1547 					  int err)
1548 {
1549 	struct mgmt_pending_cmd *cmd = data;
1550 
1551 	bt_dev_dbg(hdev, "err %d", err);
1552 
1553 	hci_dev_lock(hdev);
1554 
1555 	if (err) {
1556 		u8 mgmt_err = mgmt_status(err);
1557 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1558 		goto done;
1559 	}
1560 
1561 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1562 	new_settings(hdev, cmd->sk);
1563 
1564 done:
1565 	mgmt_pending_free(cmd);
1566 	hci_dev_unlock(hdev);
1567 }
1568 
1569 static int set_connectable_update_settings(struct hci_dev *hdev,
1570 					   struct sock *sk, u8 val)
1571 {
1572 	bool changed = false;
1573 	int err;
1574 
1575 	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1576 		changed = true;
1577 
1578 	if (val) {
1579 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1580 	} else {
1581 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1582 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1583 	}
1584 
1585 	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1586 	if (err < 0)
1587 		return err;
1588 
1589 	if (changed) {
1590 		hci_req_update_scan(hdev);
1591 		hci_update_passive_scan(hdev);
1592 		return new_settings(hdev, sk);
1593 	}
1594 
1595 	return 0;
1596 }
1597 
1598 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1599 {
1600 	BT_DBG("%s", hdev->name);
1601 
1602 	return hci_update_connectable_sync(hdev);
1603 }
1604 
1605 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1606 			   u16 len)
1607 {
1608 	struct mgmt_mode *cp = data;
1609 	struct mgmt_pending_cmd *cmd;
1610 	int err;
1611 
1612 	bt_dev_dbg(hdev, "sock %p", sk);
1613 
1614 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1615 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1616 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1617 				       MGMT_STATUS_REJECTED);
1618 
1619 	if (cp->val != 0x00 && cp->val != 0x01)
1620 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1621 				       MGMT_STATUS_INVALID_PARAMS);
1622 
1623 	hci_dev_lock(hdev);
1624 
1625 	if (!hdev_is_powered(hdev)) {
1626 		err = set_connectable_update_settings(hdev, sk, cp->val);
1627 		goto failed;
1628 	}
1629 
1630 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1631 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1632 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1633 				      MGMT_STATUS_BUSY);
1634 		goto failed;
1635 	}
1636 
1637 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1638 	if (!cmd) {
1639 		err = -ENOMEM;
1640 		goto failed;
1641 	}
1642 
1643 	if (cp->val) {
1644 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1645 	} else {
1646 		if (hdev->discov_timeout > 0)
1647 			cancel_delayed_work(&hdev->discov_off);
1648 
1649 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1650 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1651 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1652 	}
1653 
1654 	err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1655 				 mgmt_set_connectable_complete);
1656 
1657 failed:
1658 	hci_dev_unlock(hdev);
1659 	return err;
1660 }
1661 
1662 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1663 			u16 len)
1664 {
1665 	struct mgmt_mode *cp = data;
1666 	bool changed;
1667 	int err;
1668 
1669 	bt_dev_dbg(hdev, "sock %p", sk);
1670 
1671 	if (cp->val != 0x00 && cp->val != 0x01)
1672 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1673 				       MGMT_STATUS_INVALID_PARAMS);
1674 
1675 	hci_dev_lock(hdev);
1676 
1677 	if (cp->val)
1678 		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1679 	else
1680 		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1681 
1682 	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1683 	if (err < 0)
1684 		goto unlock;
1685 
1686 	if (changed) {
1687 		/* In limited privacy mode the change of bondable mode
1688 		 * may affect the local advertising address.
1689 		 */
1690 		hci_update_discoverable(hdev);
1691 
1692 		err = new_settings(hdev, sk);
1693 	}
1694 
1695 unlock:
1696 	hci_dev_unlock(hdev);
1697 	return err;
1698 }
1699 
1700 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1701 			     u16 len)
1702 {
1703 	struct mgmt_mode *cp = data;
1704 	struct mgmt_pending_cmd *cmd;
1705 	u8 val, status;
1706 	int err;
1707 
1708 	bt_dev_dbg(hdev, "sock %p", sk);
1709 
1710 	status = mgmt_bredr_support(hdev);
1711 	if (status)
1712 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1713 				       status);
1714 
1715 	if (cp->val != 0x00 && cp->val != 0x01)
1716 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1717 				       MGMT_STATUS_INVALID_PARAMS);
1718 
1719 	hci_dev_lock(hdev);
1720 
1721 	if (!hdev_is_powered(hdev)) {
1722 		bool changed = false;
1723 
1724 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1725 			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1726 			changed = true;
1727 		}
1728 
1729 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1730 		if (err < 0)
1731 			goto failed;
1732 
1733 		if (changed)
1734 			err = new_settings(hdev, sk);
1735 
1736 		goto failed;
1737 	}
1738 
1739 	if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1740 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1741 				      MGMT_STATUS_BUSY);
1742 		goto failed;
1743 	}
1744 
1745 	val = !!cp->val;
1746 
1747 	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1748 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1749 		goto failed;
1750 	}
1751 
1752 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1753 	if (!cmd) {
1754 		err = -ENOMEM;
1755 		goto failed;
1756 	}
1757 
1758 	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1759 	if (err < 0) {
1760 		mgmt_pending_remove(cmd);
1761 		goto failed;
1762 	}
1763 
1764 failed:
1765 	hci_dev_unlock(hdev);
1766 	return err;
1767 }
1768 
1769 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1770 {
1771 	struct cmd_lookup match = { NULL, hdev };
1772 	struct mgmt_pending_cmd *cmd = data;
1773 	struct mgmt_mode *cp = cmd->param;
1774 	u8 enable = cp->val;
1775 	bool changed;
1776 
1777 	if (err) {
1778 		u8 mgmt_err = mgmt_status(err);
1779 
1780 		if (enable && hci_dev_test_and_clear_flag(hdev,
1781 							  HCI_SSP_ENABLED)) {
1782 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1783 			new_settings(hdev, NULL);
1784 		}
1785 
1786 		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1787 				     &mgmt_err);
1788 		return;
1789 	}
1790 
1791 	if (enable) {
1792 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1793 	} else {
1794 		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1795 
1796 		if (!changed)
1797 			changed = hci_dev_test_and_clear_flag(hdev,
1798 							      HCI_HS_ENABLED);
1799 		else
1800 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1801 	}
1802 
1803 	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1804 
1805 	if (changed)
1806 		new_settings(hdev, match.sk);
1807 
1808 	if (match.sk)
1809 		sock_put(match.sk);
1810 
1811 	hci_update_eir_sync(hdev);
1812 }
1813 
1814 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1815 {
1816 	struct mgmt_pending_cmd *cmd = data;
1817 	struct mgmt_mode *cp = cmd->param;
1818 	bool changed = false;
1819 	int err;
1820 
1821 	if (cp->val)
1822 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1823 
1824 	err = hci_write_ssp_mode_sync(hdev, cp->val);
1825 
1826 	if (!err && changed)
1827 		hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1828 
1829 	return err;
1830 }
1831 
1832 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1833 {
1834 	struct mgmt_mode *cp = data;
1835 	struct mgmt_pending_cmd *cmd;
1836 	u8 status;
1837 	int err;
1838 
1839 	bt_dev_dbg(hdev, "sock %p", sk);
1840 
1841 	status = mgmt_bredr_support(hdev);
1842 	if (status)
1843 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1844 
1845 	if (!lmp_ssp_capable(hdev))
1846 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1847 				       MGMT_STATUS_NOT_SUPPORTED);
1848 
1849 	if (cp->val != 0x00 && cp->val != 0x01)
1850 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1851 				       MGMT_STATUS_INVALID_PARAMS);
1852 
1853 	hci_dev_lock(hdev);
1854 
1855 	if (!hdev_is_powered(hdev)) {
1856 		bool changed;
1857 
1858 		if (cp->val) {
1859 			changed = !hci_dev_test_and_set_flag(hdev,
1860 							     HCI_SSP_ENABLED);
1861 		} else {
1862 			changed = hci_dev_test_and_clear_flag(hdev,
1863 							      HCI_SSP_ENABLED);
1864 			if (!changed)
1865 				changed = hci_dev_test_and_clear_flag(hdev,
1866 								      HCI_HS_ENABLED);
1867 			else
1868 				hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1869 		}
1870 
1871 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1872 		if (err < 0)
1873 			goto failed;
1874 
1875 		if (changed)
1876 			err = new_settings(hdev, sk);
1877 
1878 		goto failed;
1879 	}
1880 
1881 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1882 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1883 				      MGMT_STATUS_BUSY);
1884 		goto failed;
1885 	}
1886 
1887 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1888 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1889 		goto failed;
1890 	}
1891 
1892 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1893 	if (!cmd)
1894 		err = -ENOMEM;
1895 	else
1896 		err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
1897 					 set_ssp_complete);
1898 
1899 	if (err < 0) {
1900 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1901 				      MGMT_STATUS_FAILED);
1902 
1903 		if (cmd)
1904 			mgmt_pending_remove(cmd);
1905 	}
1906 
1907 failed:
1908 	hci_dev_unlock(hdev);
1909 	return err;
1910 }
1911 
1912 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1913 {
1914 	struct mgmt_mode *cp = data;
1915 	bool changed;
1916 	u8 status;
1917 	int err;
1918 
1919 	bt_dev_dbg(hdev, "sock %p", sk);
1920 
1921 	if (!IS_ENABLED(CONFIG_BT_HS))
1922 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1923 				       MGMT_STATUS_NOT_SUPPORTED);
1924 
1925 	status = mgmt_bredr_support(hdev);
1926 	if (status)
1927 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1928 
1929 	if (!lmp_ssp_capable(hdev))
1930 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1931 				       MGMT_STATUS_NOT_SUPPORTED);
1932 
1933 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1934 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1935 				       MGMT_STATUS_REJECTED);
1936 
1937 	if (cp->val != 0x00 && cp->val != 0x01)
1938 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1939 				       MGMT_STATUS_INVALID_PARAMS);
1940 
1941 	hci_dev_lock(hdev);
1942 
1943 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1944 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1945 				      MGMT_STATUS_BUSY);
1946 		goto unlock;
1947 	}
1948 
1949 	if (cp->val) {
1950 		changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1951 	} else {
1952 		if (hdev_is_powered(hdev)) {
1953 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1954 					      MGMT_STATUS_REJECTED);
1955 			goto unlock;
1956 		}
1957 
1958 		changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1959 	}
1960 
1961 	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1962 	if (err < 0)
1963 		goto unlock;
1964 
1965 	if (changed)
1966 		err = new_settings(hdev, sk);
1967 
1968 unlock:
1969 	hci_dev_unlock(hdev);
1970 	return err;
1971 }
1972 
1973 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
1974 {
1975 	struct cmd_lookup match = { NULL, hdev };
1976 	u8 status = mgmt_status(err);
1977 
1978 	bt_dev_dbg(hdev, "err %d", err);
1979 
1980 	if (status) {
1981 		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1982 							&status);
1983 		return;
1984 	}
1985 
1986 	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1987 
1988 	new_settings(hdev, match.sk);
1989 
1990 	if (match.sk)
1991 		sock_put(match.sk);
1992 }
1993 
1994 static int set_le_sync(struct hci_dev *hdev, void *data)
1995 {
1996 	struct mgmt_pending_cmd *cmd = data;
1997 	struct mgmt_mode *cp = cmd->param;
1998 	u8 val = !!cp->val;
1999 	int err;
2000 
2001 	if (!val) {
2002 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2003 			hci_disable_advertising_sync(hdev);
2004 
2005 		if (ext_adv_capable(hdev))
2006 			hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2007 	} else {
2008 		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2009 	}
2010 
2011 	err = hci_write_le_host_supported_sync(hdev, val, 0);
2012 
2013 	/* Make sure the controller has a good default for
2014 	 * advertising data. Restrict the update to when LE
2015 	 * has actually been enabled. During power on, the
2016 	 * update in powered_update_hci will take care of it.
2017 	 */
2018 	if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2019 		if (ext_adv_capable(hdev)) {
2020 			int status;
2021 
2022 			status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2023 			if (!status)
2024 				hci_update_scan_rsp_data_sync(hdev, 0x00);
2025 		} else {
2026 			hci_update_adv_data_sync(hdev, 0x00);
2027 			hci_update_scan_rsp_data_sync(hdev, 0x00);
2028 		}
2029 
2030 		hci_update_passive_scan(hdev);
2031 	}
2032 
2033 	return err;
2034 }
2035 
2036 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2037 {
2038 	struct mgmt_mode *cp = data;
2039 	struct mgmt_pending_cmd *cmd;
2040 	int err;
2041 	u8 val, enabled;
2042 
2043 	bt_dev_dbg(hdev, "sock %p", sk);
2044 
2045 	if (!lmp_le_capable(hdev))
2046 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2047 				       MGMT_STATUS_NOT_SUPPORTED);
2048 
2049 	if (cp->val != 0x00 && cp->val != 0x01)
2050 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2051 				       MGMT_STATUS_INVALID_PARAMS);
2052 
2053 	/* Bluetooth single mode LE only controllers or dual-mode
2054 	 * controllers configured as LE only devices, do not allow
2055 	 * switching LE off. These have either LE enabled explicitly
2056 	 * or BR/EDR has been previously switched off.
2057 	 *
2058 	 * When trying to enable an already enabled LE, then gracefully
2059 	 * send a positive response. Trying to disable it however will
2060 	 * result into rejection.
2061 	 */
2062 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2063 		if (cp->val == 0x01)
2064 			return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2065 
2066 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2067 				       MGMT_STATUS_REJECTED);
2068 	}
2069 
2070 	hci_dev_lock(hdev);
2071 
2072 	val = !!cp->val;
2073 	enabled = lmp_host_le_capable(hdev);
2074 
2075 	if (!val)
2076 		hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
2077 
2078 	if (!hdev_is_powered(hdev) || val == enabled) {
2079 		bool changed = false;
2080 
2081 		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2082 			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2083 			changed = true;
2084 		}
2085 
2086 		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2087 			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2088 			changed = true;
2089 		}
2090 
2091 		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2092 		if (err < 0)
2093 			goto unlock;
2094 
2095 		if (changed)
2096 			err = new_settings(hdev, sk);
2097 
2098 		goto unlock;
2099 	}
2100 
2101 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
2102 	    pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2103 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2104 				      MGMT_STATUS_BUSY);
2105 		goto unlock;
2106 	}
2107 
2108 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2109 	if (!cmd)
2110 		err = -ENOMEM;
2111 	else
2112 		err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2113 					 set_le_complete);
2114 
2115 	if (err < 0) {
2116 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2117 				      MGMT_STATUS_FAILED);
2118 
2119 		if (cmd)
2120 			mgmt_pending_remove(cmd);
2121 	}
2122 
2123 unlock:
2124 	hci_dev_unlock(hdev);
2125 	return err;
2126 }
2127 
2128 /* This is a helper function to test for pending mgmt commands that can
2129  * cause CoD or EIR HCI commands. We can only allow one such pending
2130  * mgmt command at a time since otherwise we cannot easily track what
2131  * the current values are, will be, and based on that calculate if a new
2132  * HCI command needs to be sent and if yes with what value.
2133  */
2134 static bool pending_eir_or_class(struct hci_dev *hdev)
2135 {
2136 	struct mgmt_pending_cmd *cmd;
2137 
2138 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2139 		switch (cmd->opcode) {
2140 		case MGMT_OP_ADD_UUID:
2141 		case MGMT_OP_REMOVE_UUID:
2142 		case MGMT_OP_SET_DEV_CLASS:
2143 		case MGMT_OP_SET_POWERED:
2144 			return true;
2145 		}
2146 	}
2147 
2148 	return false;
2149 }
2150 
2151 static const u8 bluetooth_base_uuid[] = {
2152 			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2153 			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2154 };
2155 
2156 static u8 get_uuid_size(const u8 *uuid)
2157 {
2158 	u32 val;
2159 
2160 	if (memcmp(uuid, bluetooth_base_uuid, 12))
2161 		return 128;
2162 
2163 	val = get_unaligned_le32(&uuid[12]);
2164 	if (val > 0xffff)
2165 		return 32;
2166 
2167 	return 16;
2168 }
2169 
2170 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2171 {
2172 	struct mgmt_pending_cmd *cmd = data;
2173 
2174 	bt_dev_dbg(hdev, "err %d", err);
2175 
2176 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2177 			  mgmt_status(err), hdev->dev_class, 3);
2178 
2179 	mgmt_pending_free(cmd);
2180 }
2181 
2182 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2183 {
2184 	int err;
2185 
2186 	err = hci_update_class_sync(hdev);
2187 	if (err)
2188 		return err;
2189 
2190 	return hci_update_eir_sync(hdev);
2191 }
2192 
2193 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2194 {
2195 	struct mgmt_cp_add_uuid *cp = data;
2196 	struct mgmt_pending_cmd *cmd;
2197 	struct bt_uuid *uuid;
2198 	int err;
2199 
2200 	bt_dev_dbg(hdev, "sock %p", sk);
2201 
2202 	hci_dev_lock(hdev);
2203 
2204 	if (pending_eir_or_class(hdev)) {
2205 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2206 				      MGMT_STATUS_BUSY);
2207 		goto failed;
2208 	}
2209 
2210 	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2211 	if (!uuid) {
2212 		err = -ENOMEM;
2213 		goto failed;
2214 	}
2215 
2216 	memcpy(uuid->uuid, cp->uuid, 16);
2217 	uuid->svc_hint = cp->svc_hint;
2218 	uuid->size = get_uuid_size(cp->uuid);
2219 
2220 	list_add_tail(&uuid->list, &hdev->uuids);
2221 
2222 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2223 	if (!cmd) {
2224 		err = -ENOMEM;
2225 		goto failed;
2226 	}
2227 
2228 	err = hci_cmd_sync_queue(hdev, add_uuid_sync, cmd, mgmt_class_complete);
2229 	if (err < 0) {
2230 		mgmt_pending_free(cmd);
2231 		goto failed;
2232 	}
2233 
2234 failed:
2235 	hci_dev_unlock(hdev);
2236 	return err;
2237 }
2238 
2239 static bool enable_service_cache(struct hci_dev *hdev)
2240 {
2241 	if (!hdev_is_powered(hdev))
2242 		return false;
2243 
2244 	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2245 		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2246 				   CACHE_TIMEOUT);
2247 		return true;
2248 	}
2249 
2250 	return false;
2251 }
2252 
2253 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2254 {
2255 	int err;
2256 
2257 	err = hci_update_class_sync(hdev);
2258 	if (err)
2259 		return err;
2260 
2261 	return hci_update_eir_sync(hdev);
2262 }
2263 
2264 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2265 		       u16 len)
2266 {
2267 	struct mgmt_cp_remove_uuid *cp = data;
2268 	struct mgmt_pending_cmd *cmd;
2269 	struct bt_uuid *match, *tmp;
2270 	u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2271 	int err, found;
2272 
2273 	bt_dev_dbg(hdev, "sock %p", sk);
2274 
2275 	hci_dev_lock(hdev);
2276 
2277 	if (pending_eir_or_class(hdev)) {
2278 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2279 				      MGMT_STATUS_BUSY);
2280 		goto unlock;
2281 	}
2282 
2283 	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2284 		hci_uuids_clear(hdev);
2285 
2286 		if (enable_service_cache(hdev)) {
2287 			err = mgmt_cmd_complete(sk, hdev->id,
2288 						MGMT_OP_REMOVE_UUID,
2289 						0, hdev->dev_class, 3);
2290 			goto unlock;
2291 		}
2292 
2293 		goto update_class;
2294 	}
2295 
2296 	found = 0;
2297 
2298 	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2299 		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2300 			continue;
2301 
2302 		list_del(&match->list);
2303 		kfree(match);
2304 		found++;
2305 	}
2306 
2307 	if (found == 0) {
2308 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2309 				      MGMT_STATUS_INVALID_PARAMS);
2310 		goto unlock;
2311 	}
2312 
2313 update_class:
2314 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2315 	if (!cmd) {
2316 		err = -ENOMEM;
2317 		goto unlock;
2318 	}
2319 
2320 	err = hci_cmd_sync_queue(hdev, remove_uuid_sync, cmd,
2321 				 mgmt_class_complete);
2322 	if (err < 0)
2323 		mgmt_pending_free(cmd);
2324 
2325 unlock:
2326 	hci_dev_unlock(hdev);
2327 	return err;
2328 }
2329 
2330 static int set_class_sync(struct hci_dev *hdev, void *data)
2331 {
2332 	int err = 0;
2333 
2334 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2335 		cancel_delayed_work_sync(&hdev->service_cache);
2336 		err = hci_update_eir_sync(hdev);
2337 	}
2338 
2339 	if (err)
2340 		return err;
2341 
2342 	return hci_update_class_sync(hdev);
2343 }
2344 
2345 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2346 			 u16 len)
2347 {
2348 	struct mgmt_cp_set_dev_class *cp = data;
2349 	struct mgmt_pending_cmd *cmd;
2350 	int err;
2351 
2352 	bt_dev_dbg(hdev, "sock %p", sk);
2353 
2354 	if (!lmp_bredr_capable(hdev))
2355 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2356 				       MGMT_STATUS_NOT_SUPPORTED);
2357 
2358 	hci_dev_lock(hdev);
2359 
2360 	if (pending_eir_or_class(hdev)) {
2361 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2362 				      MGMT_STATUS_BUSY);
2363 		goto unlock;
2364 	}
2365 
2366 	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2367 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2368 				      MGMT_STATUS_INVALID_PARAMS);
2369 		goto unlock;
2370 	}
2371 
2372 	hdev->major_class = cp->major;
2373 	hdev->minor_class = cp->minor;
2374 
2375 	if (!hdev_is_powered(hdev)) {
2376 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2377 					hdev->dev_class, 3);
2378 		goto unlock;
2379 	}
2380 
2381 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2382 	if (!cmd) {
2383 		err = -ENOMEM;
2384 		goto unlock;
2385 	}
2386 
2387 	err = hci_cmd_sync_queue(hdev, set_class_sync, cmd,
2388 				 mgmt_class_complete);
2389 	if (err < 0)
2390 		mgmt_pending_free(cmd);
2391 
2392 unlock:
2393 	hci_dev_unlock(hdev);
2394 	return err;
2395 }
2396 
2397 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2398 			  u16 len)
2399 {
2400 	struct mgmt_cp_load_link_keys *cp = data;
2401 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2402 				   sizeof(struct mgmt_link_key_info));
2403 	u16 key_count, expected_len;
2404 	bool changed;
2405 	int i;
2406 
2407 	bt_dev_dbg(hdev, "sock %p", sk);
2408 
2409 	if (!lmp_bredr_capable(hdev))
2410 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2411 				       MGMT_STATUS_NOT_SUPPORTED);
2412 
2413 	key_count = __le16_to_cpu(cp->key_count);
2414 	if (key_count > max_key_count) {
2415 		bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2416 			   key_count);
2417 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2418 				       MGMT_STATUS_INVALID_PARAMS);
2419 	}
2420 
2421 	expected_len = struct_size(cp, keys, key_count);
2422 	if (expected_len != len) {
2423 		bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2424 			   expected_len, len);
2425 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2426 				       MGMT_STATUS_INVALID_PARAMS);
2427 	}
2428 
2429 	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2430 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2431 				       MGMT_STATUS_INVALID_PARAMS);
2432 
2433 	bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2434 		   key_count);
2435 
2436 	for (i = 0; i < key_count; i++) {
2437 		struct mgmt_link_key_info *key = &cp->keys[i];
2438 
2439 		if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2440 			return mgmt_cmd_status(sk, hdev->id,
2441 					       MGMT_OP_LOAD_LINK_KEYS,
2442 					       MGMT_STATUS_INVALID_PARAMS);
2443 	}
2444 
2445 	hci_dev_lock(hdev);
2446 
2447 	hci_link_keys_clear(hdev);
2448 
2449 	if (cp->debug_keys)
2450 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2451 	else
2452 		changed = hci_dev_test_and_clear_flag(hdev,
2453 						      HCI_KEEP_DEBUG_KEYS);
2454 
2455 	if (changed)
2456 		new_settings(hdev, NULL);
2457 
2458 	for (i = 0; i < key_count; i++) {
2459 		struct mgmt_link_key_info *key = &cp->keys[i];
2460 
2461 		if (hci_is_blocked_key(hdev,
2462 				       HCI_BLOCKED_KEY_TYPE_LINKKEY,
2463 				       key->val)) {
2464 			bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2465 				    &key->addr.bdaddr);
2466 			continue;
2467 		}
2468 
2469 		/* Always ignore debug keys and require a new pairing if
2470 		 * the user wants to use them.
2471 		 */
2472 		if (key->type == HCI_LK_DEBUG_COMBINATION)
2473 			continue;
2474 
2475 		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2476 				 key->type, key->pin_len, NULL);
2477 	}
2478 
2479 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2480 
2481 	hci_dev_unlock(hdev);
2482 
2483 	return 0;
2484 }
2485 
2486 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2487 			   u8 addr_type, struct sock *skip_sk)
2488 {
2489 	struct mgmt_ev_device_unpaired ev;
2490 
2491 	bacpy(&ev.addr.bdaddr, bdaddr);
2492 	ev.addr.type = addr_type;
2493 
2494 	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2495 			  skip_sk);
2496 }
2497 
2498 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2499 			 u16 len)
2500 {
2501 	struct mgmt_cp_unpair_device *cp = data;
2502 	struct mgmt_rp_unpair_device rp;
2503 	struct hci_conn_params *params;
2504 	struct mgmt_pending_cmd *cmd;
2505 	struct hci_conn *conn;
2506 	u8 addr_type;
2507 	int err;
2508 
2509 	memset(&rp, 0, sizeof(rp));
2510 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2511 	rp.addr.type = cp->addr.type;
2512 
2513 	if (!bdaddr_type_is_valid(cp->addr.type))
2514 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2515 					 MGMT_STATUS_INVALID_PARAMS,
2516 					 &rp, sizeof(rp));
2517 
2518 	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2519 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2520 					 MGMT_STATUS_INVALID_PARAMS,
2521 					 &rp, sizeof(rp));
2522 
2523 	hci_dev_lock(hdev);
2524 
2525 	if (!hdev_is_powered(hdev)) {
2526 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2527 					MGMT_STATUS_NOT_POWERED, &rp,
2528 					sizeof(rp));
2529 		goto unlock;
2530 	}
2531 
2532 	if (cp->addr.type == BDADDR_BREDR) {
2533 		/* If disconnection is requested, then look up the
2534 		 * connection. If the remote device is connected, it
2535 		 * will be later used to terminate the link.
2536 		 *
2537 		 * Setting it to NULL explicitly will cause no
2538 		 * termination of the link.
2539 		 */
2540 		if (cp->disconnect)
2541 			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2542 						       &cp->addr.bdaddr);
2543 		else
2544 			conn = NULL;
2545 
2546 		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2547 		if (err < 0) {
2548 			err = mgmt_cmd_complete(sk, hdev->id,
2549 						MGMT_OP_UNPAIR_DEVICE,
2550 						MGMT_STATUS_NOT_PAIRED, &rp,
2551 						sizeof(rp));
2552 			goto unlock;
2553 		}
2554 
2555 		goto done;
2556 	}
2557 
2558 	/* LE address type */
2559 	addr_type = le_addr_type(cp->addr.type);
2560 
2561 	/* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2562 	err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2563 	if (err < 0) {
2564 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2565 					MGMT_STATUS_NOT_PAIRED, &rp,
2566 					sizeof(rp));
2567 		goto unlock;
2568 	}
2569 
2570 	conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2571 	if (!conn) {
2572 		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2573 		goto done;
2574 	}
2575 
2576 
2577 	/* Defer clearing up the connection parameters until closing to
2578 	 * give a chance of keeping them if a repairing happens.
2579 	 */
2580 	set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2581 
2582 	/* Disable auto-connection parameters if present */
2583 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2584 	if (params) {
2585 		if (params->explicit_connect)
2586 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2587 		else
2588 			params->auto_connect = HCI_AUTO_CONN_DISABLED;
2589 	}
2590 
2591 	/* If disconnection is not requested, then clear the connection
2592 	 * variable so that the link is not terminated.
2593 	 */
2594 	if (!cp->disconnect)
2595 		conn = NULL;
2596 
2597 done:
2598 	/* If the connection variable is set, then termination of the
2599 	 * link is requested.
2600 	 */
2601 	if (!conn) {
2602 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2603 					&rp, sizeof(rp));
2604 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2605 		goto unlock;
2606 	}
2607 
2608 	cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2609 			       sizeof(*cp));
2610 	if (!cmd) {
2611 		err = -ENOMEM;
2612 		goto unlock;
2613 	}
2614 
2615 	cmd->cmd_complete = addr_cmd_complete;
2616 
2617 	err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2618 	if (err < 0)
2619 		mgmt_pending_remove(cmd);
2620 
2621 unlock:
2622 	hci_dev_unlock(hdev);
2623 	return err;
2624 }
2625 
2626 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2627 		      u16 len)
2628 {
2629 	struct mgmt_cp_disconnect *cp = data;
2630 	struct mgmt_rp_disconnect rp;
2631 	struct mgmt_pending_cmd *cmd;
2632 	struct hci_conn *conn;
2633 	int err;
2634 
2635 	bt_dev_dbg(hdev, "sock %p", sk);
2636 
2637 	memset(&rp, 0, sizeof(rp));
2638 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2639 	rp.addr.type = cp->addr.type;
2640 
2641 	if (!bdaddr_type_is_valid(cp->addr.type))
2642 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2643 					 MGMT_STATUS_INVALID_PARAMS,
2644 					 &rp, sizeof(rp));
2645 
2646 	hci_dev_lock(hdev);
2647 
2648 	if (!test_bit(HCI_UP, &hdev->flags)) {
2649 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2650 					MGMT_STATUS_NOT_POWERED, &rp,
2651 					sizeof(rp));
2652 		goto failed;
2653 	}
2654 
2655 	if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2656 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2657 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
2658 		goto failed;
2659 	}
2660 
2661 	if (cp->addr.type == BDADDR_BREDR)
2662 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2663 					       &cp->addr.bdaddr);
2664 	else
2665 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2666 					       le_addr_type(cp->addr.type));
2667 
2668 	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2669 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2670 					MGMT_STATUS_NOT_CONNECTED, &rp,
2671 					sizeof(rp));
2672 		goto failed;
2673 	}
2674 
2675 	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2676 	if (!cmd) {
2677 		err = -ENOMEM;
2678 		goto failed;
2679 	}
2680 
2681 	cmd->cmd_complete = generic_cmd_complete;
2682 
2683 	err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2684 	if (err < 0)
2685 		mgmt_pending_remove(cmd);
2686 
2687 failed:
2688 	hci_dev_unlock(hdev);
2689 	return err;
2690 }
2691 
2692 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2693 {
2694 	switch (link_type) {
2695 	case LE_LINK:
2696 		switch (addr_type) {
2697 		case ADDR_LE_DEV_PUBLIC:
2698 			return BDADDR_LE_PUBLIC;
2699 
2700 		default:
2701 			/* Fallback to LE Random address type */
2702 			return BDADDR_LE_RANDOM;
2703 		}
2704 
2705 	default:
2706 		/* Fallback to BR/EDR type */
2707 		return BDADDR_BREDR;
2708 	}
2709 }
2710 
2711 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2712 			   u16 data_len)
2713 {
2714 	struct mgmt_rp_get_connections *rp;
2715 	struct hci_conn *c;
2716 	int err;
2717 	u16 i;
2718 
2719 	bt_dev_dbg(hdev, "sock %p", sk);
2720 
2721 	hci_dev_lock(hdev);
2722 
2723 	if (!hdev_is_powered(hdev)) {
2724 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2725 				      MGMT_STATUS_NOT_POWERED);
2726 		goto unlock;
2727 	}
2728 
2729 	i = 0;
2730 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2731 		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2732 			i++;
2733 	}
2734 
2735 	rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2736 	if (!rp) {
2737 		err = -ENOMEM;
2738 		goto unlock;
2739 	}
2740 
2741 	i = 0;
2742 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2743 		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2744 			continue;
2745 		bacpy(&rp->addr[i].bdaddr, &c->dst);
2746 		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2747 		if (c->type == SCO_LINK || c->type == ESCO_LINK)
2748 			continue;
2749 		i++;
2750 	}
2751 
2752 	rp->conn_count = cpu_to_le16(i);
2753 
2754 	/* Recalculate length in case of filtered SCO connections, etc */
2755 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2756 				struct_size(rp, addr, i));
2757 
2758 	kfree(rp);
2759 
2760 unlock:
2761 	hci_dev_unlock(hdev);
2762 	return err;
2763 }
2764 
2765 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2766 				   struct mgmt_cp_pin_code_neg_reply *cp)
2767 {
2768 	struct mgmt_pending_cmd *cmd;
2769 	int err;
2770 
2771 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2772 			       sizeof(*cp));
2773 	if (!cmd)
2774 		return -ENOMEM;
2775 
2776 	cmd->cmd_complete = addr_cmd_complete;
2777 
2778 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2779 			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2780 	if (err < 0)
2781 		mgmt_pending_remove(cmd);
2782 
2783 	return err;
2784 }
2785 
2786 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2787 			  u16 len)
2788 {
2789 	struct hci_conn *conn;
2790 	struct mgmt_cp_pin_code_reply *cp = data;
2791 	struct hci_cp_pin_code_reply reply;
2792 	struct mgmt_pending_cmd *cmd;
2793 	int err;
2794 
2795 	bt_dev_dbg(hdev, "sock %p", sk);
2796 
2797 	hci_dev_lock(hdev);
2798 
2799 	if (!hdev_is_powered(hdev)) {
2800 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2801 				      MGMT_STATUS_NOT_POWERED);
2802 		goto failed;
2803 	}
2804 
2805 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2806 	if (!conn) {
2807 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2808 				      MGMT_STATUS_NOT_CONNECTED);
2809 		goto failed;
2810 	}
2811 
2812 	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2813 		struct mgmt_cp_pin_code_neg_reply ncp;
2814 
2815 		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2816 
2817 		bt_dev_err(hdev, "PIN code is not 16 bytes long");
2818 
2819 		err = send_pin_code_neg_reply(sk, hdev, &ncp);
2820 		if (err >= 0)
2821 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2822 					      MGMT_STATUS_INVALID_PARAMS);
2823 
2824 		goto failed;
2825 	}
2826 
2827 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2828 	if (!cmd) {
2829 		err = -ENOMEM;
2830 		goto failed;
2831 	}
2832 
2833 	cmd->cmd_complete = addr_cmd_complete;
2834 
2835 	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2836 	reply.pin_len = cp->pin_len;
2837 	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2838 
2839 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2840 	if (err < 0)
2841 		mgmt_pending_remove(cmd);
2842 
2843 failed:
2844 	hci_dev_unlock(hdev);
2845 	return err;
2846 }
2847 
2848 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2849 			     u16 len)
2850 {
2851 	struct mgmt_cp_set_io_capability *cp = data;
2852 
2853 	bt_dev_dbg(hdev, "sock %p", sk);
2854 
2855 	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2856 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2857 				       MGMT_STATUS_INVALID_PARAMS);
2858 
2859 	hci_dev_lock(hdev);
2860 
2861 	hdev->io_capability = cp->io_capability;
2862 
2863 	bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
2864 
2865 	hci_dev_unlock(hdev);
2866 
2867 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2868 				 NULL, 0);
2869 }
2870 
2871 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2872 {
2873 	struct hci_dev *hdev = conn->hdev;
2874 	struct mgmt_pending_cmd *cmd;
2875 
2876 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2877 		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2878 			continue;
2879 
2880 		if (cmd->user_data != conn)
2881 			continue;
2882 
2883 		return cmd;
2884 	}
2885 
2886 	return NULL;
2887 }
2888 
2889 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2890 {
2891 	struct mgmt_rp_pair_device rp;
2892 	struct hci_conn *conn = cmd->user_data;
2893 	int err;
2894 
2895 	bacpy(&rp.addr.bdaddr, &conn->dst);
2896 	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2897 
2898 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2899 				status, &rp, sizeof(rp));
2900 
2901 	/* So we don't get further callbacks for this connection */
2902 	conn->connect_cfm_cb = NULL;
2903 	conn->security_cfm_cb = NULL;
2904 	conn->disconn_cfm_cb = NULL;
2905 
2906 	hci_conn_drop(conn);
2907 
2908 	/* The device is paired so there is no need to remove
2909 	 * its connection parameters anymore.
2910 	 */
2911 	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2912 
2913 	hci_conn_put(conn);
2914 
2915 	return err;
2916 }
2917 
2918 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2919 {
2920 	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2921 	struct mgmt_pending_cmd *cmd;
2922 
2923 	cmd = find_pairing(conn);
2924 	if (cmd) {
2925 		cmd->cmd_complete(cmd, status);
2926 		mgmt_pending_remove(cmd);
2927 	}
2928 }
2929 
2930 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2931 {
2932 	struct mgmt_pending_cmd *cmd;
2933 
2934 	BT_DBG("status %u", status);
2935 
2936 	cmd = find_pairing(conn);
2937 	if (!cmd) {
2938 		BT_DBG("Unable to find a pending command");
2939 		return;
2940 	}
2941 
2942 	cmd->cmd_complete(cmd, mgmt_status(status));
2943 	mgmt_pending_remove(cmd);
2944 }
2945 
2946 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2947 {
2948 	struct mgmt_pending_cmd *cmd;
2949 
2950 	BT_DBG("status %u", status);
2951 
2952 	if (!status)
2953 		return;
2954 
2955 	cmd = find_pairing(conn);
2956 	if (!cmd) {
2957 		BT_DBG("Unable to find a pending command");
2958 		return;
2959 	}
2960 
2961 	cmd->cmd_complete(cmd, mgmt_status(status));
2962 	mgmt_pending_remove(cmd);
2963 }
2964 
2965 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2966 		       u16 len)
2967 {
2968 	struct mgmt_cp_pair_device *cp = data;
2969 	struct mgmt_rp_pair_device rp;
2970 	struct mgmt_pending_cmd *cmd;
2971 	u8 sec_level, auth_type;
2972 	struct hci_conn *conn;
2973 	int err;
2974 
2975 	bt_dev_dbg(hdev, "sock %p", sk);
2976 
2977 	memset(&rp, 0, sizeof(rp));
2978 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2979 	rp.addr.type = cp->addr.type;
2980 
2981 	if (!bdaddr_type_is_valid(cp->addr.type))
2982 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2983 					 MGMT_STATUS_INVALID_PARAMS,
2984 					 &rp, sizeof(rp));
2985 
2986 	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2987 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2988 					 MGMT_STATUS_INVALID_PARAMS,
2989 					 &rp, sizeof(rp));
2990 
2991 	hci_dev_lock(hdev);
2992 
2993 	if (!hdev_is_powered(hdev)) {
2994 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2995 					MGMT_STATUS_NOT_POWERED, &rp,
2996 					sizeof(rp));
2997 		goto unlock;
2998 	}
2999 
3000 	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3001 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3002 					MGMT_STATUS_ALREADY_PAIRED, &rp,
3003 					sizeof(rp));
3004 		goto unlock;
3005 	}
3006 
3007 	sec_level = BT_SECURITY_MEDIUM;
3008 	auth_type = HCI_AT_DEDICATED_BONDING;
3009 
3010 	if (cp->addr.type == BDADDR_BREDR) {
3011 		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3012 				       auth_type, CONN_REASON_PAIR_DEVICE);
3013 	} else {
3014 		u8 addr_type = le_addr_type(cp->addr.type);
3015 		struct hci_conn_params *p;
3016 
3017 		/* When pairing a new device, it is expected to remember
3018 		 * this device for future connections. Adding the connection
3019 		 * parameter information ahead of time allows tracking
3020 		 * of the peripheral preferred values and will speed up any
3021 		 * further connection establishment.
3022 		 *
3023 		 * If connection parameters already exist, then they
3024 		 * will be kept and this function does nothing.
3025 		 */
3026 		p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3027 
3028 		if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3029 			p->auto_connect = HCI_AUTO_CONN_DISABLED;
3030 
3031 		conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3032 					   sec_level, HCI_LE_CONN_TIMEOUT,
3033 					   CONN_REASON_PAIR_DEVICE);
3034 	}
3035 
3036 	if (IS_ERR(conn)) {
3037 		int status;
3038 
3039 		if (PTR_ERR(conn) == -EBUSY)
3040 			status = MGMT_STATUS_BUSY;
3041 		else if (PTR_ERR(conn) == -EOPNOTSUPP)
3042 			status = MGMT_STATUS_NOT_SUPPORTED;
3043 		else if (PTR_ERR(conn) == -ECONNREFUSED)
3044 			status = MGMT_STATUS_REJECTED;
3045 		else
3046 			status = MGMT_STATUS_CONNECT_FAILED;
3047 
3048 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3049 					status, &rp, sizeof(rp));
3050 		goto unlock;
3051 	}
3052 
3053 	if (conn->connect_cfm_cb) {
3054 		hci_conn_drop(conn);
3055 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3056 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
3057 		goto unlock;
3058 	}
3059 
3060 	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3061 	if (!cmd) {
3062 		err = -ENOMEM;
3063 		hci_conn_drop(conn);
3064 		goto unlock;
3065 	}
3066 
3067 	cmd->cmd_complete = pairing_complete;
3068 
3069 	/* For LE, just connecting isn't a proof that the pairing finished */
3070 	if (cp->addr.type == BDADDR_BREDR) {
3071 		conn->connect_cfm_cb = pairing_complete_cb;
3072 		conn->security_cfm_cb = pairing_complete_cb;
3073 		conn->disconn_cfm_cb = pairing_complete_cb;
3074 	} else {
3075 		conn->connect_cfm_cb = le_pairing_complete_cb;
3076 		conn->security_cfm_cb = le_pairing_complete_cb;
3077 		conn->disconn_cfm_cb = le_pairing_complete_cb;
3078 	}
3079 
3080 	conn->io_capability = cp->io_cap;
3081 	cmd->user_data = hci_conn_get(conn);
3082 
3083 	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3084 	    hci_conn_security(conn, sec_level, auth_type, true)) {
3085 		cmd->cmd_complete(cmd, 0);
3086 		mgmt_pending_remove(cmd);
3087 	}
3088 
3089 	err = 0;
3090 
3091 unlock:
3092 	hci_dev_unlock(hdev);
3093 	return err;
3094 }
3095 
3096 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3097 			      u16 len)
3098 {
3099 	struct mgmt_addr_info *addr = data;
3100 	struct mgmt_pending_cmd *cmd;
3101 	struct hci_conn *conn;
3102 	int err;
3103 
3104 	bt_dev_dbg(hdev, "sock %p", sk);
3105 
3106 	hci_dev_lock(hdev);
3107 
3108 	if (!hdev_is_powered(hdev)) {
3109 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3110 				      MGMT_STATUS_NOT_POWERED);
3111 		goto unlock;
3112 	}
3113 
3114 	cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3115 	if (!cmd) {
3116 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3117 				      MGMT_STATUS_INVALID_PARAMS);
3118 		goto unlock;
3119 	}
3120 
3121 	conn = cmd->user_data;
3122 
3123 	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3124 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3125 				      MGMT_STATUS_INVALID_PARAMS);
3126 		goto unlock;
3127 	}
3128 
3129 	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3130 	mgmt_pending_remove(cmd);
3131 
3132 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3133 				addr, sizeof(*addr));
3134 
3135 	/* Since user doesn't want to proceed with the connection, abort any
3136 	 * ongoing pairing and then terminate the link if it was created
3137 	 * because of the pair device action.
3138 	 */
3139 	if (addr->type == BDADDR_BREDR)
3140 		hci_remove_link_key(hdev, &addr->bdaddr);
3141 	else
3142 		smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3143 					      le_addr_type(addr->type));
3144 
3145 	if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3146 		hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3147 
3148 unlock:
3149 	hci_dev_unlock(hdev);
3150 	return err;
3151 }
3152 
3153 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3154 			     struct mgmt_addr_info *addr, u16 mgmt_op,
3155 			     u16 hci_op, __le32 passkey)
3156 {
3157 	struct mgmt_pending_cmd *cmd;
3158 	struct hci_conn *conn;
3159 	int err;
3160 
3161 	hci_dev_lock(hdev);
3162 
3163 	if (!hdev_is_powered(hdev)) {
3164 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3165 					MGMT_STATUS_NOT_POWERED, addr,
3166 					sizeof(*addr));
3167 		goto done;
3168 	}
3169 
3170 	if (addr->type == BDADDR_BREDR)
3171 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3172 	else
3173 		conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3174 					       le_addr_type(addr->type));
3175 
3176 	if (!conn) {
3177 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3178 					MGMT_STATUS_NOT_CONNECTED, addr,
3179 					sizeof(*addr));
3180 		goto done;
3181 	}
3182 
3183 	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3184 		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3185 		if (!err)
3186 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3187 						MGMT_STATUS_SUCCESS, addr,
3188 						sizeof(*addr));
3189 		else
3190 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3191 						MGMT_STATUS_FAILED, addr,
3192 						sizeof(*addr));
3193 
3194 		goto done;
3195 	}
3196 
3197 	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3198 	if (!cmd) {
3199 		err = -ENOMEM;
3200 		goto done;
3201 	}
3202 
3203 	cmd->cmd_complete = addr_cmd_complete;
3204 
3205 	/* Continue with pairing via HCI */
3206 	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3207 		struct hci_cp_user_passkey_reply cp;
3208 
3209 		bacpy(&cp.bdaddr, &addr->bdaddr);
3210 		cp.passkey = passkey;
3211 		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3212 	} else
3213 		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3214 				   &addr->bdaddr);
3215 
3216 	if (err < 0)
3217 		mgmt_pending_remove(cmd);
3218 
3219 done:
3220 	hci_dev_unlock(hdev);
3221 	return err;
3222 }
3223 
3224 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3225 			      void *data, u16 len)
3226 {
3227 	struct mgmt_cp_pin_code_neg_reply *cp = data;
3228 
3229 	bt_dev_dbg(hdev, "sock %p", sk);
3230 
3231 	return user_pairing_resp(sk, hdev, &cp->addr,
3232 				MGMT_OP_PIN_CODE_NEG_REPLY,
3233 				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3234 }
3235 
3236 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3237 			      u16 len)
3238 {
3239 	struct mgmt_cp_user_confirm_reply *cp = data;
3240 
3241 	bt_dev_dbg(hdev, "sock %p", sk);
3242 
3243 	if (len != sizeof(*cp))
3244 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3245 				       MGMT_STATUS_INVALID_PARAMS);
3246 
3247 	return user_pairing_resp(sk, hdev, &cp->addr,
3248 				 MGMT_OP_USER_CONFIRM_REPLY,
3249 				 HCI_OP_USER_CONFIRM_REPLY, 0);
3250 }
3251 
3252 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3253 				  void *data, u16 len)
3254 {
3255 	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3256 
3257 	bt_dev_dbg(hdev, "sock %p", sk);
3258 
3259 	return user_pairing_resp(sk, hdev, &cp->addr,
3260 				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3261 				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3262 }
3263 
3264 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3265 			      u16 len)
3266 {
3267 	struct mgmt_cp_user_passkey_reply *cp = data;
3268 
3269 	bt_dev_dbg(hdev, "sock %p", sk);
3270 
3271 	return user_pairing_resp(sk, hdev, &cp->addr,
3272 				 MGMT_OP_USER_PASSKEY_REPLY,
3273 				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3274 }
3275 
3276 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3277 				  void *data, u16 len)
3278 {
3279 	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3280 
3281 	bt_dev_dbg(hdev, "sock %p", sk);
3282 
3283 	return user_pairing_resp(sk, hdev, &cp->addr,
3284 				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3285 				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3286 }
3287 
3288 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3289 {
3290 	struct adv_info *adv_instance;
3291 
3292 	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3293 	if (!adv_instance)
3294 		return 0;
3295 
3296 	/* stop if current instance doesn't need to be changed */
3297 	if (!(adv_instance->flags & flags))
3298 		return 0;
3299 
3300 	cancel_adv_timeout(hdev);
3301 
3302 	adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3303 	if (!adv_instance)
3304 		return 0;
3305 
3306 	hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3307 
3308 	return 0;
3309 }
3310 
3311 static int name_changed_sync(struct hci_dev *hdev, void *data)
3312 {
3313 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3314 }
3315 
3316 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3317 {
3318 	struct mgmt_pending_cmd *cmd = data;
3319 	struct mgmt_cp_set_local_name *cp = cmd->param;
3320 	u8 status = mgmt_status(err);
3321 
3322 	bt_dev_dbg(hdev, "err %d", err);
3323 
3324 	if (status) {
3325 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3326 				status);
3327 	} else {
3328 		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3329 				  cp, sizeof(*cp));
3330 
3331 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3332 			hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3333 	}
3334 
3335 	mgmt_pending_remove(cmd);
3336 }
3337 
3338 static int set_name_sync(struct hci_dev *hdev, void *data)
3339 {
3340 	if (lmp_bredr_capable(hdev)) {
3341 		hci_update_name_sync(hdev);
3342 		hci_update_eir_sync(hdev);
3343 	}
3344 
3345 	/* The name is stored in the scan response data and so
3346 	 * no need to update the advertising data here.
3347 	 */
3348 	if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3349 		hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3350 
3351 	return 0;
3352 }
3353 
3354 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3355 			  u16 len)
3356 {
3357 	struct mgmt_cp_set_local_name *cp = data;
3358 	struct mgmt_pending_cmd *cmd;
3359 	int err;
3360 
3361 	bt_dev_dbg(hdev, "sock %p", sk);
3362 
3363 	hci_dev_lock(hdev);
3364 
3365 	/* If the old values are the same as the new ones just return a
3366 	 * direct command complete event.
3367 	 */
3368 	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3369 	    !memcmp(hdev->short_name, cp->short_name,
3370 		    sizeof(hdev->short_name))) {
3371 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3372 					data, len);
3373 		goto failed;
3374 	}
3375 
3376 	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3377 
3378 	if (!hdev_is_powered(hdev)) {
3379 		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3380 
3381 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3382 					data, len);
3383 		if (err < 0)
3384 			goto failed;
3385 
3386 		err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3387 					 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3388 		ext_info_changed(hdev, sk);
3389 
3390 		goto failed;
3391 	}
3392 
3393 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3394 	if (!cmd)
3395 		err = -ENOMEM;
3396 	else
3397 		err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3398 					 set_name_complete);
3399 
3400 	if (err < 0) {
3401 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3402 				      MGMT_STATUS_FAILED);
3403 
3404 		if (cmd)
3405 			mgmt_pending_remove(cmd);
3406 
3407 		goto failed;
3408 	}
3409 
3410 	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3411 
3412 failed:
3413 	hci_dev_unlock(hdev);
3414 	return err;
3415 }
3416 
3417 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3418 {
3419 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3420 }
3421 
3422 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3423 			  u16 len)
3424 {
3425 	struct mgmt_cp_set_appearance *cp = data;
3426 	u16 appearance;
3427 	int err;
3428 
3429 	bt_dev_dbg(hdev, "sock %p", sk);
3430 
3431 	if (!lmp_le_capable(hdev))
3432 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3433 				       MGMT_STATUS_NOT_SUPPORTED);
3434 
3435 	appearance = le16_to_cpu(cp->appearance);
3436 
3437 	hci_dev_lock(hdev);
3438 
3439 	if (hdev->appearance != appearance) {
3440 		hdev->appearance = appearance;
3441 
3442 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3443 			hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3444 					   NULL);
3445 
3446 		ext_info_changed(hdev, sk);
3447 	}
3448 
3449 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3450 				0);
3451 
3452 	hci_dev_unlock(hdev);
3453 
3454 	return err;
3455 }
3456 
3457 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3458 				 void *data, u16 len)
3459 {
3460 	struct mgmt_rp_get_phy_configuration rp;
3461 
3462 	bt_dev_dbg(hdev, "sock %p", sk);
3463 
3464 	hci_dev_lock(hdev);
3465 
3466 	memset(&rp, 0, sizeof(rp));
3467 
3468 	rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3469 	rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3470 	rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3471 
3472 	hci_dev_unlock(hdev);
3473 
3474 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3475 				 &rp, sizeof(rp));
3476 }
3477 
3478 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3479 {
3480 	struct mgmt_ev_phy_configuration_changed ev;
3481 
3482 	memset(&ev, 0, sizeof(ev));
3483 
3484 	ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3485 
3486 	return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3487 			  sizeof(ev), skip);
3488 }
3489 
3490 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3491 {
3492 	struct mgmt_pending_cmd *cmd = data;
3493 	struct sk_buff *skb = cmd->skb;
3494 	u8 status = mgmt_status(err);
3495 
3496 	if (!status) {
3497 		if (!skb)
3498 			status = MGMT_STATUS_FAILED;
3499 		else if (IS_ERR(skb))
3500 			status = mgmt_status(PTR_ERR(skb));
3501 		else
3502 			status = mgmt_status(skb->data[0]);
3503 	}
3504 
3505 	bt_dev_dbg(hdev, "status %d", status);
3506 
3507 	if (status) {
3508 		mgmt_cmd_status(cmd->sk, hdev->id,
3509 				MGMT_OP_SET_PHY_CONFIGURATION, status);
3510 	} else {
3511 		mgmt_cmd_complete(cmd->sk, hdev->id,
3512 				  MGMT_OP_SET_PHY_CONFIGURATION, 0,
3513 				  NULL, 0);
3514 
3515 		mgmt_phy_configuration_changed(hdev, cmd->sk);
3516 	}
3517 
3518 	if (skb && !IS_ERR(skb))
3519 		kfree_skb(skb);
3520 
3521 	mgmt_pending_remove(cmd);
3522 }
3523 
3524 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
3525 {
3526 	struct mgmt_pending_cmd *cmd = data;
3527 	struct mgmt_cp_set_phy_configuration *cp = cmd->param;
3528 	struct hci_cp_le_set_default_phy cp_phy;
3529 	u32 selected_phys = __le32_to_cpu(cp->selected_phys);
3530 
3531 	memset(&cp_phy, 0, sizeof(cp_phy));
3532 
3533 	if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3534 		cp_phy.all_phys |= 0x01;
3535 
3536 	if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3537 		cp_phy.all_phys |= 0x02;
3538 
3539 	if (selected_phys & MGMT_PHY_LE_1M_TX)
3540 		cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3541 
3542 	if (selected_phys & MGMT_PHY_LE_2M_TX)
3543 		cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3544 
3545 	if (selected_phys & MGMT_PHY_LE_CODED_TX)
3546 		cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3547 
3548 	if (selected_phys & MGMT_PHY_LE_1M_RX)
3549 		cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3550 
3551 	if (selected_phys & MGMT_PHY_LE_2M_RX)
3552 		cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3553 
3554 	if (selected_phys & MGMT_PHY_LE_CODED_RX)
3555 		cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3556 
3557 	cmd->skb =  __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
3558 				   sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
3559 
3560 	return 0;
3561 }
3562 
3563 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3564 				 void *data, u16 len)
3565 {
3566 	struct mgmt_cp_set_phy_configuration *cp = data;
3567 	struct mgmt_pending_cmd *cmd;
3568 	u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3569 	u16 pkt_type = (HCI_DH1 | HCI_DM1);
3570 	bool changed = false;
3571 	int err;
3572 
3573 	bt_dev_dbg(hdev, "sock %p", sk);
3574 
3575 	configurable_phys = get_configurable_phys(hdev);
3576 	supported_phys = get_supported_phys(hdev);
3577 	selected_phys = __le32_to_cpu(cp->selected_phys);
3578 
3579 	if (selected_phys & ~supported_phys)
3580 		return mgmt_cmd_status(sk, hdev->id,
3581 				       MGMT_OP_SET_PHY_CONFIGURATION,
3582 				       MGMT_STATUS_INVALID_PARAMS);
3583 
3584 	unconfigure_phys = supported_phys & ~configurable_phys;
3585 
3586 	if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3587 		return mgmt_cmd_status(sk, hdev->id,
3588 				       MGMT_OP_SET_PHY_CONFIGURATION,
3589 				       MGMT_STATUS_INVALID_PARAMS);
3590 
3591 	if (selected_phys == get_selected_phys(hdev))
3592 		return mgmt_cmd_complete(sk, hdev->id,
3593 					 MGMT_OP_SET_PHY_CONFIGURATION,
3594 					 0, NULL, 0);
3595 
3596 	hci_dev_lock(hdev);
3597 
3598 	if (!hdev_is_powered(hdev)) {
3599 		err = mgmt_cmd_status(sk, hdev->id,
3600 				      MGMT_OP_SET_PHY_CONFIGURATION,
3601 				      MGMT_STATUS_REJECTED);
3602 		goto unlock;
3603 	}
3604 
3605 	if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3606 		err = mgmt_cmd_status(sk, hdev->id,
3607 				      MGMT_OP_SET_PHY_CONFIGURATION,
3608 				      MGMT_STATUS_BUSY);
3609 		goto unlock;
3610 	}
3611 
3612 	if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3613 		pkt_type |= (HCI_DH3 | HCI_DM3);
3614 	else
3615 		pkt_type &= ~(HCI_DH3 | HCI_DM3);
3616 
3617 	if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3618 		pkt_type |= (HCI_DH5 | HCI_DM5);
3619 	else
3620 		pkt_type &= ~(HCI_DH5 | HCI_DM5);
3621 
3622 	if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3623 		pkt_type &= ~HCI_2DH1;
3624 	else
3625 		pkt_type |= HCI_2DH1;
3626 
3627 	if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3628 		pkt_type &= ~HCI_2DH3;
3629 	else
3630 		pkt_type |= HCI_2DH3;
3631 
3632 	if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3633 		pkt_type &= ~HCI_2DH5;
3634 	else
3635 		pkt_type |= HCI_2DH5;
3636 
3637 	if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3638 		pkt_type &= ~HCI_3DH1;
3639 	else
3640 		pkt_type |= HCI_3DH1;
3641 
3642 	if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3643 		pkt_type &= ~HCI_3DH3;
3644 	else
3645 		pkt_type |= HCI_3DH3;
3646 
3647 	if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3648 		pkt_type &= ~HCI_3DH5;
3649 	else
3650 		pkt_type |= HCI_3DH5;
3651 
3652 	if (pkt_type != hdev->pkt_type) {
3653 		hdev->pkt_type = pkt_type;
3654 		changed = true;
3655 	}
3656 
3657 	if ((selected_phys & MGMT_PHY_LE_MASK) ==
3658 	    (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3659 		if (changed)
3660 			mgmt_phy_configuration_changed(hdev, sk);
3661 
3662 		err = mgmt_cmd_complete(sk, hdev->id,
3663 					MGMT_OP_SET_PHY_CONFIGURATION,
3664 					0, NULL, 0);
3665 
3666 		goto unlock;
3667 	}
3668 
3669 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3670 			       len);
3671 	if (!cmd)
3672 		err = -ENOMEM;
3673 	else
3674 		err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
3675 					 set_default_phy_complete);
3676 
3677 	if (err < 0) {
3678 		err = mgmt_cmd_status(sk, hdev->id,
3679 				      MGMT_OP_SET_PHY_CONFIGURATION,
3680 				      MGMT_STATUS_FAILED);
3681 
3682 		if (cmd)
3683 			mgmt_pending_remove(cmd);
3684 	}
3685 
3686 unlock:
3687 	hci_dev_unlock(hdev);
3688 
3689 	return err;
3690 }
3691 
3692 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
3693 			    u16 len)
3694 {
3695 	int err = MGMT_STATUS_SUCCESS;
3696 	struct mgmt_cp_set_blocked_keys *keys = data;
3697 	const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
3698 				   sizeof(struct mgmt_blocked_key_info));
3699 	u16 key_count, expected_len;
3700 	int i;
3701 
3702 	bt_dev_dbg(hdev, "sock %p", sk);
3703 
3704 	key_count = __le16_to_cpu(keys->key_count);
3705 	if (key_count > max_key_count) {
3706 		bt_dev_err(hdev, "too big key_count value %u", key_count);
3707 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3708 				       MGMT_STATUS_INVALID_PARAMS);
3709 	}
3710 
3711 	expected_len = struct_size(keys, keys, key_count);
3712 	if (expected_len != len) {
3713 		bt_dev_err(hdev, "expected %u bytes, got %u bytes",
3714 			   expected_len, len);
3715 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3716 				       MGMT_STATUS_INVALID_PARAMS);
3717 	}
3718 
3719 	hci_dev_lock(hdev);
3720 
3721 	hci_blocked_keys_clear(hdev);
3722 
3723 	for (i = 0; i < keys->key_count; ++i) {
3724 		struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
3725 
3726 		if (!b) {
3727 			err = MGMT_STATUS_NO_RESOURCES;
3728 			break;
3729 		}
3730 
3731 		b->type = keys->keys[i].type;
3732 		memcpy(b->val, keys->keys[i].val, sizeof(b->val));
3733 		list_add_rcu(&b->list, &hdev->blocked_keys);
3734 	}
3735 	hci_dev_unlock(hdev);
3736 
3737 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3738 				err, NULL, 0);
3739 }
3740 
3741 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
3742 			       void *data, u16 len)
3743 {
3744 	struct mgmt_mode *cp = data;
3745 	int err;
3746 	bool changed = false;
3747 
3748 	bt_dev_dbg(hdev, "sock %p", sk);
3749 
3750 	if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
3751 		return mgmt_cmd_status(sk, hdev->id,
3752 				       MGMT_OP_SET_WIDEBAND_SPEECH,
3753 				       MGMT_STATUS_NOT_SUPPORTED);
3754 
3755 	if (cp->val != 0x00 && cp->val != 0x01)
3756 		return mgmt_cmd_status(sk, hdev->id,
3757 				       MGMT_OP_SET_WIDEBAND_SPEECH,
3758 				       MGMT_STATUS_INVALID_PARAMS);
3759 
3760 	hci_dev_lock(hdev);
3761 
3762 	if (pending_find(MGMT_OP_SET_WIDEBAND_SPEECH, hdev)) {
3763 		err = mgmt_cmd_status(sk, hdev->id,
3764 				      MGMT_OP_SET_WIDEBAND_SPEECH,
3765 				      MGMT_STATUS_BUSY);
3766 		goto unlock;
3767 	}
3768 
3769 	if (hdev_is_powered(hdev) &&
3770 	    !!cp->val != hci_dev_test_flag(hdev,
3771 					   HCI_WIDEBAND_SPEECH_ENABLED)) {
3772 		err = mgmt_cmd_status(sk, hdev->id,
3773 				      MGMT_OP_SET_WIDEBAND_SPEECH,
3774 				      MGMT_STATUS_REJECTED);
3775 		goto unlock;
3776 	}
3777 
3778 	if (cp->val)
3779 		changed = !hci_dev_test_and_set_flag(hdev,
3780 						   HCI_WIDEBAND_SPEECH_ENABLED);
3781 	else
3782 		changed = hci_dev_test_and_clear_flag(hdev,
3783 						   HCI_WIDEBAND_SPEECH_ENABLED);
3784 
3785 	err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
3786 	if (err < 0)
3787 		goto unlock;
3788 
3789 	if (changed)
3790 		err = new_settings(hdev, sk);
3791 
3792 unlock:
3793 	hci_dev_unlock(hdev);
3794 	return err;
3795 }
3796 
3797 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
3798 			       void *data, u16 data_len)
3799 {
3800 	char buf[20];
3801 	struct mgmt_rp_read_controller_cap *rp = (void *)buf;
3802 	u16 cap_len = 0;
3803 	u8 flags = 0;
3804 	u8 tx_power_range[2];
3805 
3806 	bt_dev_dbg(hdev, "sock %p", sk);
3807 
3808 	memset(&buf, 0, sizeof(buf));
3809 
3810 	hci_dev_lock(hdev);
3811 
3812 	/* When the Read Simple Pairing Options command is supported, then
3813 	 * the remote public key validation is supported.
3814 	 *
3815 	 * Alternatively, when Microsoft extensions are available, they can
3816 	 * indicate support for public key validation as well.
3817 	 */
3818 	if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
3819 		flags |= 0x01;	/* Remote public key validation (BR/EDR) */
3820 
3821 	flags |= 0x02;		/* Remote public key validation (LE) */
3822 
3823 	/* When the Read Encryption Key Size command is supported, then the
3824 	 * encryption key size is enforced.
3825 	 */
3826 	if (hdev->commands[20] & 0x10)
3827 		flags |= 0x04;	/* Encryption key size enforcement (BR/EDR) */
3828 
3829 	flags |= 0x08;		/* Encryption key size enforcement (LE) */
3830 
3831 	cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
3832 				  &flags, 1);
3833 
3834 	/* When the Read Simple Pairing Options command is supported, then
3835 	 * also max encryption key size information is provided.
3836 	 */
3837 	if (hdev->commands[41] & 0x08)
3838 		cap_len = eir_append_le16(rp->cap, cap_len,
3839 					  MGMT_CAP_MAX_ENC_KEY_SIZE,
3840 					  hdev->max_enc_key_size);
3841 
3842 	cap_len = eir_append_le16(rp->cap, cap_len,
3843 				  MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
3844 				  SMP_MAX_ENC_KEY_SIZE);
3845 
3846 	/* Append the min/max LE tx power parameters if we were able to fetch
3847 	 * it from the controller
3848 	 */
3849 	if (hdev->commands[38] & 0x80) {
3850 		memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
3851 		memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
3852 		cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
3853 					  tx_power_range, 2);
3854 	}
3855 
3856 	rp->cap_len = cpu_to_le16(cap_len);
3857 
3858 	hci_dev_unlock(hdev);
3859 
3860 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
3861 				 rp, sizeof(*rp) + cap_len);
3862 }
3863 
3864 #ifdef CONFIG_BT_FEATURE_DEBUG
3865 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
3866 static const u8 debug_uuid[16] = {
3867 	0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
3868 	0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
3869 };
3870 #endif
3871 
3872 /* 330859bc-7506-492d-9370-9a6f0614037f */
3873 static const u8 quality_report_uuid[16] = {
3874 	0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
3875 	0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
3876 };
3877 
3878 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
3879 static const u8 offload_codecs_uuid[16] = {
3880 	0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
3881 	0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
3882 };
3883 
3884 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
3885 static const u8 le_simultaneous_roles_uuid[16] = {
3886 	0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
3887 	0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
3888 };
3889 
3890 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
3891 static const u8 rpa_resolution_uuid[16] = {
3892 	0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
3893 	0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
3894 };
3895 
3896 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
3897 				  void *data, u16 data_len)
3898 {
3899 	char buf[102];   /* Enough space for 5 features: 2 + 20 * 5 */
3900 	struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
3901 	u16 idx = 0;
3902 	u32 flags;
3903 
3904 	bt_dev_dbg(hdev, "sock %p", sk);
3905 
3906 	memset(&buf, 0, sizeof(buf));
3907 
3908 #ifdef CONFIG_BT_FEATURE_DEBUG
3909 	if (!hdev) {
3910 		flags = bt_dbg_get() ? BIT(0) : 0;
3911 
3912 		memcpy(rp->features[idx].uuid, debug_uuid, 16);
3913 		rp->features[idx].flags = cpu_to_le32(flags);
3914 		idx++;
3915 	}
3916 #endif
3917 
3918 	if (hdev && hci_dev_le_state_simultaneous(hdev)) {
3919 		if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
3920 			flags = BIT(0);
3921 		else
3922 			flags = 0;
3923 
3924 		memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
3925 		rp->features[idx].flags = cpu_to_le32(flags);
3926 		idx++;
3927 	}
3928 
3929 	if (hdev && ll_privacy_capable(hdev)) {
3930 		if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
3931 			flags = BIT(0) | BIT(1);
3932 		else
3933 			flags = BIT(1);
3934 
3935 		memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
3936 		rp->features[idx].flags = cpu_to_le32(flags);
3937 		idx++;
3938 	}
3939 
3940 	if (hdev && (aosp_has_quality_report(hdev) ||
3941 		     hdev->set_quality_report)) {
3942 		if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
3943 			flags = BIT(0);
3944 		else
3945 			flags = 0;
3946 
3947 		memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
3948 		rp->features[idx].flags = cpu_to_le32(flags);
3949 		idx++;
3950 	}
3951 
3952 	if (hdev && hdev->get_data_path_id) {
3953 		if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
3954 			flags = BIT(0);
3955 		else
3956 			flags = 0;
3957 
3958 		memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
3959 		rp->features[idx].flags = cpu_to_le32(flags);
3960 		idx++;
3961 	}
3962 
3963 	rp->feature_count = cpu_to_le16(idx);
3964 
3965 	/* After reading the experimental features information, enable
3966 	 * the events to update client on any future change.
3967 	 */
3968 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3969 
3970 	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3971 				 MGMT_OP_READ_EXP_FEATURES_INFO,
3972 				 0, rp, sizeof(*rp) + (20 * idx));
3973 }
3974 
3975 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
3976 					  struct sock *skip)
3977 {
3978 	struct mgmt_ev_exp_feature_changed ev;
3979 
3980 	memset(&ev, 0, sizeof(ev));
3981 	memcpy(ev.uuid, rpa_resolution_uuid, 16);
3982 	ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
3983 
3984 	if (enabled && privacy_mode_capable(hdev))
3985 		set_bit(HCI_CONN_FLAG_DEVICE_PRIVACY, hdev->conn_flags);
3986 	else
3987 		clear_bit(HCI_CONN_FLAG_DEVICE_PRIVACY, hdev->conn_flags);
3988 
3989 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
3990 				  &ev, sizeof(ev),
3991 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3992 
3993 }
3994 
3995 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
3996 			       bool enabled, struct sock *skip)
3997 {
3998 	struct mgmt_ev_exp_feature_changed ev;
3999 
4000 	memset(&ev, 0, sizeof(ev));
4001 	memcpy(ev.uuid, uuid, 16);
4002 	ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4003 
4004 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4005 				  &ev, sizeof(ev),
4006 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4007 }
4008 
4009 #define EXP_FEAT(_uuid, _set_func)	\
4010 {					\
4011 	.uuid = _uuid,			\
4012 	.set_func = _set_func,		\
4013 }
4014 
4015 /* The zero key uuid is special. Multiple exp features are set through it. */
4016 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4017 			     struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4018 {
4019 	struct mgmt_rp_set_exp_feature rp;
4020 
4021 	memset(rp.uuid, 0, 16);
4022 	rp.flags = cpu_to_le32(0);
4023 
4024 #ifdef CONFIG_BT_FEATURE_DEBUG
4025 	if (!hdev) {
4026 		bool changed = bt_dbg_get();
4027 
4028 		bt_dbg_set(false);
4029 
4030 		if (changed)
4031 			exp_feature_changed(NULL, ZERO_KEY, false, sk);
4032 	}
4033 #endif
4034 
4035 	if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4036 		bool changed;
4037 
4038 		changed = hci_dev_test_and_clear_flag(hdev,
4039 						      HCI_ENABLE_LL_PRIVACY);
4040 		if (changed)
4041 			exp_feature_changed(hdev, rpa_resolution_uuid, false,
4042 					    sk);
4043 	}
4044 
4045 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4046 
4047 	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4048 				 MGMT_OP_SET_EXP_FEATURE, 0,
4049 				 &rp, sizeof(rp));
4050 }
4051 
4052 #ifdef CONFIG_BT_FEATURE_DEBUG
4053 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4054 			  struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4055 {
4056 	struct mgmt_rp_set_exp_feature rp;
4057 
4058 	bool val, changed;
4059 	int err;
4060 
4061 	/* Command requires to use the non-controller index */
4062 	if (hdev)
4063 		return mgmt_cmd_status(sk, hdev->id,
4064 				       MGMT_OP_SET_EXP_FEATURE,
4065 				       MGMT_STATUS_INVALID_INDEX);
4066 
4067 	/* Parameters are limited to a single octet */
4068 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4069 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4070 				       MGMT_OP_SET_EXP_FEATURE,
4071 				       MGMT_STATUS_INVALID_PARAMS);
4072 
4073 	/* Only boolean on/off is supported */
4074 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4075 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4076 				       MGMT_OP_SET_EXP_FEATURE,
4077 				       MGMT_STATUS_INVALID_PARAMS);
4078 
4079 	val = !!cp->param[0];
4080 	changed = val ? !bt_dbg_get() : bt_dbg_get();
4081 	bt_dbg_set(val);
4082 
4083 	memcpy(rp.uuid, debug_uuid, 16);
4084 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4085 
4086 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4087 
4088 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4089 				MGMT_OP_SET_EXP_FEATURE, 0,
4090 				&rp, sizeof(rp));
4091 
4092 	if (changed)
4093 		exp_feature_changed(hdev, debug_uuid, val, sk);
4094 
4095 	return err;
4096 }
4097 #endif
4098 
4099 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4100 				   struct mgmt_cp_set_exp_feature *cp,
4101 				   u16 data_len)
4102 {
4103 	struct mgmt_rp_set_exp_feature rp;
4104 	bool val, changed;
4105 	int err;
4106 	u32 flags;
4107 
4108 	/* Command requires to use the controller index */
4109 	if (!hdev)
4110 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4111 				       MGMT_OP_SET_EXP_FEATURE,
4112 				       MGMT_STATUS_INVALID_INDEX);
4113 
4114 	/* Changes can only be made when controller is powered down */
4115 	if (hdev_is_powered(hdev))
4116 		return mgmt_cmd_status(sk, hdev->id,
4117 				       MGMT_OP_SET_EXP_FEATURE,
4118 				       MGMT_STATUS_REJECTED);
4119 
4120 	/* Parameters are limited to a single octet */
4121 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4122 		return mgmt_cmd_status(sk, hdev->id,
4123 				       MGMT_OP_SET_EXP_FEATURE,
4124 				       MGMT_STATUS_INVALID_PARAMS);
4125 
4126 	/* Only boolean on/off is supported */
4127 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4128 		return mgmt_cmd_status(sk, hdev->id,
4129 				       MGMT_OP_SET_EXP_FEATURE,
4130 				       MGMT_STATUS_INVALID_PARAMS);
4131 
4132 	val = !!cp->param[0];
4133 
4134 	if (val) {
4135 		changed = !hci_dev_test_and_set_flag(hdev,
4136 						     HCI_ENABLE_LL_PRIVACY);
4137 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4138 
4139 		/* Enable LL privacy + supported settings changed */
4140 		flags = BIT(0) | BIT(1);
4141 	} else {
4142 		changed = hci_dev_test_and_clear_flag(hdev,
4143 						      HCI_ENABLE_LL_PRIVACY);
4144 
4145 		/* Disable LL privacy + supported settings changed */
4146 		flags = BIT(1);
4147 	}
4148 
4149 	memcpy(rp.uuid, rpa_resolution_uuid, 16);
4150 	rp.flags = cpu_to_le32(flags);
4151 
4152 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4153 
4154 	err = mgmt_cmd_complete(sk, hdev->id,
4155 				MGMT_OP_SET_EXP_FEATURE, 0,
4156 				&rp, sizeof(rp));
4157 
4158 	if (changed)
4159 		exp_ll_privacy_feature_changed(val, hdev, sk);
4160 
4161 	return err;
4162 }
4163 
4164 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4165 				   struct mgmt_cp_set_exp_feature *cp,
4166 				   u16 data_len)
4167 {
4168 	struct mgmt_rp_set_exp_feature rp;
4169 	bool val, changed;
4170 	int err;
4171 
4172 	/* Command requires to use a valid controller index */
4173 	if (!hdev)
4174 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4175 				       MGMT_OP_SET_EXP_FEATURE,
4176 				       MGMT_STATUS_INVALID_INDEX);
4177 
4178 	/* Parameters are limited to a single octet */
4179 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4180 		return mgmt_cmd_status(sk, hdev->id,
4181 				       MGMT_OP_SET_EXP_FEATURE,
4182 				       MGMT_STATUS_INVALID_PARAMS);
4183 
4184 	/* Only boolean on/off is supported */
4185 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4186 		return mgmt_cmd_status(sk, hdev->id,
4187 				       MGMT_OP_SET_EXP_FEATURE,
4188 				       MGMT_STATUS_INVALID_PARAMS);
4189 
4190 	hci_req_sync_lock(hdev);
4191 
4192 	val = !!cp->param[0];
4193 	changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4194 
4195 	if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4196 		err = mgmt_cmd_status(sk, hdev->id,
4197 				      MGMT_OP_SET_EXP_FEATURE,
4198 				      MGMT_STATUS_NOT_SUPPORTED);
4199 		goto unlock_quality_report;
4200 	}
4201 
4202 	if (changed) {
4203 		if (hdev->set_quality_report)
4204 			err = hdev->set_quality_report(hdev, val);
4205 		else
4206 			err = aosp_set_quality_report(hdev, val);
4207 
4208 		if (err) {
4209 			err = mgmt_cmd_status(sk, hdev->id,
4210 					      MGMT_OP_SET_EXP_FEATURE,
4211 					      MGMT_STATUS_FAILED);
4212 			goto unlock_quality_report;
4213 		}
4214 
4215 		if (val)
4216 			hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4217 		else
4218 			hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4219 	}
4220 
4221 	bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4222 
4223 	memcpy(rp.uuid, quality_report_uuid, 16);
4224 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4225 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4226 
4227 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4228 				&rp, sizeof(rp));
4229 
4230 	if (changed)
4231 		exp_feature_changed(hdev, quality_report_uuid, val, sk);
4232 
4233 unlock_quality_report:
4234 	hci_req_sync_unlock(hdev);
4235 	return err;
4236 }
4237 
4238 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4239 				  struct mgmt_cp_set_exp_feature *cp,
4240 				  u16 data_len)
4241 {
4242 	bool val, changed;
4243 	int err;
4244 	struct mgmt_rp_set_exp_feature rp;
4245 
4246 	/* Command requires to use a valid controller index */
4247 	if (!hdev)
4248 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4249 				       MGMT_OP_SET_EXP_FEATURE,
4250 				       MGMT_STATUS_INVALID_INDEX);
4251 
4252 	/* Parameters are limited to a single octet */
4253 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4254 		return mgmt_cmd_status(sk, hdev->id,
4255 				       MGMT_OP_SET_EXP_FEATURE,
4256 				       MGMT_STATUS_INVALID_PARAMS);
4257 
4258 	/* Only boolean on/off is supported */
4259 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4260 		return mgmt_cmd_status(sk, hdev->id,
4261 				       MGMT_OP_SET_EXP_FEATURE,
4262 				       MGMT_STATUS_INVALID_PARAMS);
4263 
4264 	val = !!cp->param[0];
4265 	changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4266 
4267 	if (!hdev->get_data_path_id) {
4268 		return mgmt_cmd_status(sk, hdev->id,
4269 				       MGMT_OP_SET_EXP_FEATURE,
4270 				       MGMT_STATUS_NOT_SUPPORTED);
4271 	}
4272 
4273 	if (changed) {
4274 		if (val)
4275 			hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4276 		else
4277 			hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4278 	}
4279 
4280 	bt_dev_info(hdev, "offload codecs enable %d changed %d",
4281 		    val, changed);
4282 
4283 	memcpy(rp.uuid, offload_codecs_uuid, 16);
4284 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4285 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4286 	err = mgmt_cmd_complete(sk, hdev->id,
4287 				MGMT_OP_SET_EXP_FEATURE, 0,
4288 				&rp, sizeof(rp));
4289 
4290 	if (changed)
4291 		exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4292 
4293 	return err;
4294 }
4295 
4296 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4297 					  struct mgmt_cp_set_exp_feature *cp,
4298 					  u16 data_len)
4299 {
4300 	bool val, changed;
4301 	int err;
4302 	struct mgmt_rp_set_exp_feature rp;
4303 
4304 	/* Command requires to use a valid controller index */
4305 	if (!hdev)
4306 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4307 				       MGMT_OP_SET_EXP_FEATURE,
4308 				       MGMT_STATUS_INVALID_INDEX);
4309 
4310 	/* Parameters are limited to a single octet */
4311 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4312 		return mgmt_cmd_status(sk, hdev->id,
4313 				       MGMT_OP_SET_EXP_FEATURE,
4314 				       MGMT_STATUS_INVALID_PARAMS);
4315 
4316 	/* Only boolean on/off is supported */
4317 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4318 		return mgmt_cmd_status(sk, hdev->id,
4319 				       MGMT_OP_SET_EXP_FEATURE,
4320 				       MGMT_STATUS_INVALID_PARAMS);
4321 
4322 	val = !!cp->param[0];
4323 	changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4324 
4325 	if (!hci_dev_le_state_simultaneous(hdev)) {
4326 		return mgmt_cmd_status(sk, hdev->id,
4327 				       MGMT_OP_SET_EXP_FEATURE,
4328 				       MGMT_STATUS_NOT_SUPPORTED);
4329 	}
4330 
4331 	if (changed) {
4332 		if (val)
4333 			hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4334 		else
4335 			hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4336 	}
4337 
4338 	bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4339 		    val, changed);
4340 
4341 	memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4342 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4343 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4344 	err = mgmt_cmd_complete(sk, hdev->id,
4345 				MGMT_OP_SET_EXP_FEATURE, 0,
4346 				&rp, sizeof(rp));
4347 
4348 	if (changed)
4349 		exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4350 
4351 	return err;
4352 }
4353 
4354 static const struct mgmt_exp_feature {
4355 	const u8 *uuid;
4356 	int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4357 			struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4358 } exp_features[] = {
4359 	EXP_FEAT(ZERO_KEY, set_zero_key_func),
4360 #ifdef CONFIG_BT_FEATURE_DEBUG
4361 	EXP_FEAT(debug_uuid, set_debug_func),
4362 #endif
4363 	EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4364 	EXP_FEAT(quality_report_uuid, set_quality_report_func),
4365 	EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4366 	EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
4367 
4368 	/* end with a null feature */
4369 	EXP_FEAT(NULL, NULL)
4370 };
4371 
4372 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4373 			   void *data, u16 data_len)
4374 {
4375 	struct mgmt_cp_set_exp_feature *cp = data;
4376 	size_t i = 0;
4377 
4378 	bt_dev_dbg(hdev, "sock %p", sk);
4379 
4380 	for (i = 0; exp_features[i].uuid; i++) {
4381 		if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4382 			return exp_features[i].set_func(sk, hdev, cp, data_len);
4383 	}
4384 
4385 	return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4386 			       MGMT_OP_SET_EXP_FEATURE,
4387 			       MGMT_STATUS_NOT_SUPPORTED);
4388 }
4389 
4390 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4391 			    u16 data_len)
4392 {
4393 	struct mgmt_cp_get_device_flags *cp = data;
4394 	struct mgmt_rp_get_device_flags rp;
4395 	struct bdaddr_list_with_flags *br_params;
4396 	struct hci_conn_params *params;
4397 	u32 supported_flags;
4398 	u32 current_flags = 0;
4399 	u8 status = MGMT_STATUS_INVALID_PARAMS;
4400 
4401 	bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4402 		   &cp->addr.bdaddr, cp->addr.type);
4403 
4404 	hci_dev_lock(hdev);
4405 
4406 	bitmap_to_arr32(&supported_flags, hdev->conn_flags,
4407 			__HCI_CONN_NUM_FLAGS);
4408 
4409 	memset(&rp, 0, sizeof(rp));
4410 
4411 	if (cp->addr.type == BDADDR_BREDR) {
4412 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4413 							      &cp->addr.bdaddr,
4414 							      cp->addr.type);
4415 		if (!br_params)
4416 			goto done;
4417 
4418 		bitmap_to_arr32(&current_flags, br_params->flags,
4419 				__HCI_CONN_NUM_FLAGS);
4420 	} else {
4421 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4422 						le_addr_type(cp->addr.type));
4423 
4424 		if (!params)
4425 			goto done;
4426 
4427 		bitmap_to_arr32(&current_flags, params->flags,
4428 				__HCI_CONN_NUM_FLAGS);
4429 	}
4430 
4431 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4432 	rp.addr.type = cp->addr.type;
4433 	rp.supported_flags = cpu_to_le32(supported_flags);
4434 	rp.current_flags = cpu_to_le32(current_flags);
4435 
4436 	status = MGMT_STATUS_SUCCESS;
4437 
4438 done:
4439 	hci_dev_unlock(hdev);
4440 
4441 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
4442 				&rp, sizeof(rp));
4443 }
4444 
4445 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
4446 				 bdaddr_t *bdaddr, u8 bdaddr_type,
4447 				 u32 supported_flags, u32 current_flags)
4448 {
4449 	struct mgmt_ev_device_flags_changed ev;
4450 
4451 	bacpy(&ev.addr.bdaddr, bdaddr);
4452 	ev.addr.type = bdaddr_type;
4453 	ev.supported_flags = cpu_to_le32(supported_flags);
4454 	ev.current_flags = cpu_to_le32(current_flags);
4455 
4456 	mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
4457 }
4458 
4459 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4460 			    u16 len)
4461 {
4462 	struct mgmt_cp_set_device_flags *cp = data;
4463 	struct bdaddr_list_with_flags *br_params;
4464 	struct hci_conn_params *params;
4465 	u8 status = MGMT_STATUS_INVALID_PARAMS;
4466 	u32 supported_flags;
4467 	u32 current_flags = __le32_to_cpu(cp->current_flags);
4468 
4469 	bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
4470 		   &cp->addr.bdaddr, cp->addr.type,
4471 		   __le32_to_cpu(current_flags));
4472 
4473 	bitmap_to_arr32(&supported_flags, hdev->conn_flags,
4474 			__HCI_CONN_NUM_FLAGS);
4475 
4476 	if ((supported_flags | current_flags) != supported_flags) {
4477 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
4478 			    current_flags, supported_flags);
4479 		goto done;
4480 	}
4481 
4482 	hci_dev_lock(hdev);
4483 
4484 	if (cp->addr.type == BDADDR_BREDR) {
4485 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4486 							      &cp->addr.bdaddr,
4487 							      cp->addr.type);
4488 
4489 		if (br_params) {
4490 			bitmap_from_u64(br_params->flags, current_flags);
4491 			status = MGMT_STATUS_SUCCESS;
4492 		} else {
4493 			bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
4494 				    &cp->addr.bdaddr, cp->addr.type);
4495 		}
4496 	} else {
4497 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4498 						le_addr_type(cp->addr.type));
4499 		if (params) {
4500 			bitmap_from_u64(params->flags, current_flags);
4501 			status = MGMT_STATUS_SUCCESS;
4502 
4503 			/* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
4504 			 * has been set.
4505 			 */
4506 			if (test_bit(HCI_CONN_FLAG_DEVICE_PRIVACY,
4507 				     params->flags))
4508 				hci_update_passive_scan(hdev);
4509 		} else {
4510 			bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
4511 				    &cp->addr.bdaddr,
4512 				    le_addr_type(cp->addr.type));
4513 		}
4514 	}
4515 
4516 done:
4517 	hci_dev_unlock(hdev);
4518 
4519 	if (status == MGMT_STATUS_SUCCESS)
4520 		device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
4521 				     supported_flags, current_flags);
4522 
4523 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
4524 				 &cp->addr, sizeof(cp->addr));
4525 }
4526 
4527 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
4528 				   u16 handle)
4529 {
4530 	struct mgmt_ev_adv_monitor_added ev;
4531 
4532 	ev.monitor_handle = cpu_to_le16(handle);
4533 
4534 	mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
4535 }
4536 
4537 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
4538 {
4539 	struct mgmt_ev_adv_monitor_removed ev;
4540 	struct mgmt_pending_cmd *cmd;
4541 	struct sock *sk_skip = NULL;
4542 	struct mgmt_cp_remove_adv_monitor *cp;
4543 
4544 	cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4545 	if (cmd) {
4546 		cp = cmd->param;
4547 
4548 		if (cp->monitor_handle)
4549 			sk_skip = cmd->sk;
4550 	}
4551 
4552 	ev.monitor_handle = cpu_to_le16(handle);
4553 
4554 	mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
4555 }
4556 
4557 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
4558 				 void *data, u16 len)
4559 {
4560 	struct adv_monitor *monitor = NULL;
4561 	struct mgmt_rp_read_adv_monitor_features *rp = NULL;
4562 	int handle, err;
4563 	size_t rp_size = 0;
4564 	__u32 supported = 0;
4565 	__u32 enabled = 0;
4566 	__u16 num_handles = 0;
4567 	__u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
4568 
4569 	BT_DBG("request for %s", hdev->name);
4570 
4571 	hci_dev_lock(hdev);
4572 
4573 	if (msft_monitor_supported(hdev))
4574 		supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
4575 
4576 	idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
4577 		handles[num_handles++] = monitor->handle;
4578 
4579 	hci_dev_unlock(hdev);
4580 
4581 	rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
4582 	rp = kmalloc(rp_size, GFP_KERNEL);
4583 	if (!rp)
4584 		return -ENOMEM;
4585 
4586 	/* All supported features are currently enabled */
4587 	enabled = supported;
4588 
4589 	rp->supported_features = cpu_to_le32(supported);
4590 	rp->enabled_features = cpu_to_le32(enabled);
4591 	rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
4592 	rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
4593 	rp->num_handles = cpu_to_le16(num_handles);
4594 	if (num_handles)
4595 		memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
4596 
4597 	err = mgmt_cmd_complete(sk, hdev->id,
4598 				MGMT_OP_READ_ADV_MONITOR_FEATURES,
4599 				MGMT_STATUS_SUCCESS, rp, rp_size);
4600 
4601 	kfree(rp);
4602 
4603 	return err;
4604 }
4605 
4606 int mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
4607 {
4608 	struct mgmt_rp_add_adv_patterns_monitor rp;
4609 	struct mgmt_pending_cmd *cmd;
4610 	struct adv_monitor *monitor;
4611 	int err = 0;
4612 
4613 	hci_dev_lock(hdev);
4614 
4615 	cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev);
4616 	if (!cmd) {
4617 		cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev);
4618 		if (!cmd)
4619 			goto done;
4620 	}
4621 
4622 	monitor = cmd->user_data;
4623 	rp.monitor_handle = cpu_to_le16(monitor->handle);
4624 
4625 	if (!status) {
4626 		mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
4627 		hdev->adv_monitors_cnt++;
4628 		if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
4629 			monitor->state = ADV_MONITOR_STATE_REGISTERED;
4630 		hci_update_passive_scan(hdev);
4631 	}
4632 
4633 	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4634 				mgmt_status(status), &rp, sizeof(rp));
4635 	mgmt_pending_remove(cmd);
4636 
4637 done:
4638 	hci_dev_unlock(hdev);
4639 	bt_dev_dbg(hdev, "add monitor %d complete, status %u",
4640 		   rp.monitor_handle, status);
4641 
4642 	return err;
4643 }
4644 
4645 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4646 				      struct adv_monitor *m, u8 status,
4647 				      void *data, u16 len, u16 op)
4648 {
4649 	struct mgmt_rp_add_adv_patterns_monitor rp;
4650 	struct mgmt_pending_cmd *cmd;
4651 	int err;
4652 	bool pending;
4653 
4654 	hci_dev_lock(hdev);
4655 
4656 	if (status)
4657 		goto unlock;
4658 
4659 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
4660 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4661 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
4662 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
4663 		status = MGMT_STATUS_BUSY;
4664 		goto unlock;
4665 	}
4666 
4667 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
4668 	if (!cmd) {
4669 		status = MGMT_STATUS_NO_RESOURCES;
4670 		goto unlock;
4671 	}
4672 
4673 	cmd->user_data = m;
4674 	pending = hci_add_adv_monitor(hdev, m, &err);
4675 	if (err) {
4676 		if (err == -ENOSPC || err == -ENOMEM)
4677 			status = MGMT_STATUS_NO_RESOURCES;
4678 		else if (err == -EINVAL)
4679 			status = MGMT_STATUS_INVALID_PARAMS;
4680 		else
4681 			status = MGMT_STATUS_FAILED;
4682 
4683 		mgmt_pending_remove(cmd);
4684 		goto unlock;
4685 	}
4686 
4687 	if (!pending) {
4688 		mgmt_pending_remove(cmd);
4689 		rp.monitor_handle = cpu_to_le16(m->handle);
4690 		mgmt_adv_monitor_added(sk, hdev, m->handle);
4691 		m->state = ADV_MONITOR_STATE_REGISTERED;
4692 		hdev->adv_monitors_cnt++;
4693 
4694 		hci_dev_unlock(hdev);
4695 		return mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_SUCCESS,
4696 					 &rp, sizeof(rp));
4697 	}
4698 
4699 	hci_dev_unlock(hdev);
4700 
4701 	return 0;
4702 
4703 unlock:
4704 	hci_free_adv_monitor(hdev, m);
4705 	hci_dev_unlock(hdev);
4706 	return mgmt_cmd_status(sk, hdev->id, op, status);
4707 }
4708 
4709 static void parse_adv_monitor_rssi(struct adv_monitor *m,
4710 				   struct mgmt_adv_rssi_thresholds *rssi)
4711 {
4712 	if (rssi) {
4713 		m->rssi.low_threshold = rssi->low_threshold;
4714 		m->rssi.low_threshold_timeout =
4715 		    __le16_to_cpu(rssi->low_threshold_timeout);
4716 		m->rssi.high_threshold = rssi->high_threshold;
4717 		m->rssi.high_threshold_timeout =
4718 		    __le16_to_cpu(rssi->high_threshold_timeout);
4719 		m->rssi.sampling_period = rssi->sampling_period;
4720 	} else {
4721 		/* Default values. These numbers are the least constricting
4722 		 * parameters for MSFT API to work, so it behaves as if there
4723 		 * are no rssi parameter to consider. May need to be changed
4724 		 * if other API are to be supported.
4725 		 */
4726 		m->rssi.low_threshold = -127;
4727 		m->rssi.low_threshold_timeout = 60;
4728 		m->rssi.high_threshold = -127;
4729 		m->rssi.high_threshold_timeout = 0;
4730 		m->rssi.sampling_period = 0;
4731 	}
4732 }
4733 
4734 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
4735 				    struct mgmt_adv_pattern *patterns)
4736 {
4737 	u8 offset = 0, length = 0;
4738 	struct adv_pattern *p = NULL;
4739 	int i;
4740 
4741 	for (i = 0; i < pattern_count; i++) {
4742 		offset = patterns[i].offset;
4743 		length = patterns[i].length;
4744 		if (offset >= HCI_MAX_AD_LENGTH ||
4745 		    length > HCI_MAX_AD_LENGTH ||
4746 		    (offset + length) > HCI_MAX_AD_LENGTH)
4747 			return MGMT_STATUS_INVALID_PARAMS;
4748 
4749 		p = kmalloc(sizeof(*p), GFP_KERNEL);
4750 		if (!p)
4751 			return MGMT_STATUS_NO_RESOURCES;
4752 
4753 		p->ad_type = patterns[i].ad_type;
4754 		p->offset = patterns[i].offset;
4755 		p->length = patterns[i].length;
4756 		memcpy(p->value, patterns[i].value, p->length);
4757 
4758 		INIT_LIST_HEAD(&p->list);
4759 		list_add(&p->list, &m->patterns);
4760 	}
4761 
4762 	return MGMT_STATUS_SUCCESS;
4763 }
4764 
4765 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4766 				    void *data, u16 len)
4767 {
4768 	struct mgmt_cp_add_adv_patterns_monitor *cp = data;
4769 	struct adv_monitor *m = NULL;
4770 	u8 status = MGMT_STATUS_SUCCESS;
4771 	size_t expected_size = sizeof(*cp);
4772 
4773 	BT_DBG("request for %s", hdev->name);
4774 
4775 	if (len <= sizeof(*cp)) {
4776 		status = MGMT_STATUS_INVALID_PARAMS;
4777 		goto done;
4778 	}
4779 
4780 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4781 	if (len != expected_size) {
4782 		status = MGMT_STATUS_INVALID_PARAMS;
4783 		goto done;
4784 	}
4785 
4786 	m = kzalloc(sizeof(*m), GFP_KERNEL);
4787 	if (!m) {
4788 		status = MGMT_STATUS_NO_RESOURCES;
4789 		goto done;
4790 	}
4791 
4792 	INIT_LIST_HEAD(&m->patterns);
4793 
4794 	parse_adv_monitor_rssi(m, NULL);
4795 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4796 
4797 done:
4798 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4799 					  MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
4800 }
4801 
4802 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
4803 					 void *data, u16 len)
4804 {
4805 	struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
4806 	struct adv_monitor *m = NULL;
4807 	u8 status = MGMT_STATUS_SUCCESS;
4808 	size_t expected_size = sizeof(*cp);
4809 
4810 	BT_DBG("request for %s", hdev->name);
4811 
4812 	if (len <= sizeof(*cp)) {
4813 		status = MGMT_STATUS_INVALID_PARAMS;
4814 		goto done;
4815 	}
4816 
4817 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4818 	if (len != expected_size) {
4819 		status = MGMT_STATUS_INVALID_PARAMS;
4820 		goto done;
4821 	}
4822 
4823 	m = kzalloc(sizeof(*m), GFP_KERNEL);
4824 	if (!m) {
4825 		status = MGMT_STATUS_NO_RESOURCES;
4826 		goto done;
4827 	}
4828 
4829 	INIT_LIST_HEAD(&m->patterns);
4830 
4831 	parse_adv_monitor_rssi(m, &cp->rssi);
4832 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4833 
4834 done:
4835 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4836 					 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
4837 }
4838 
4839 int mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
4840 {
4841 	struct mgmt_rp_remove_adv_monitor rp;
4842 	struct mgmt_cp_remove_adv_monitor *cp;
4843 	struct mgmt_pending_cmd *cmd;
4844 	int err = 0;
4845 
4846 	hci_dev_lock(hdev);
4847 
4848 	cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4849 	if (!cmd)
4850 		goto done;
4851 
4852 	cp = cmd->param;
4853 	rp.monitor_handle = cp->monitor_handle;
4854 
4855 	if (!status)
4856 		hci_update_passive_scan(hdev);
4857 
4858 	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4859 				mgmt_status(status), &rp, sizeof(rp));
4860 	mgmt_pending_remove(cmd);
4861 
4862 done:
4863 	hci_dev_unlock(hdev);
4864 	bt_dev_dbg(hdev, "remove monitor %d complete, status %u",
4865 		   rp.monitor_handle, status);
4866 
4867 	return err;
4868 }
4869 
4870 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
4871 			      void *data, u16 len)
4872 {
4873 	struct mgmt_cp_remove_adv_monitor *cp = data;
4874 	struct mgmt_rp_remove_adv_monitor rp;
4875 	struct mgmt_pending_cmd *cmd;
4876 	u16 handle = __le16_to_cpu(cp->monitor_handle);
4877 	int err, status;
4878 	bool pending;
4879 
4880 	BT_DBG("request for %s", hdev->name);
4881 	rp.monitor_handle = cp->monitor_handle;
4882 
4883 	hci_dev_lock(hdev);
4884 
4885 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
4886 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
4887 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4888 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
4889 		status = MGMT_STATUS_BUSY;
4890 		goto unlock;
4891 	}
4892 
4893 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
4894 	if (!cmd) {
4895 		status = MGMT_STATUS_NO_RESOURCES;
4896 		goto unlock;
4897 	}
4898 
4899 	if (handle)
4900 		pending = hci_remove_single_adv_monitor(hdev, handle, &err);
4901 	else
4902 		pending = hci_remove_all_adv_monitor(hdev, &err);
4903 
4904 	if (err) {
4905 		mgmt_pending_remove(cmd);
4906 
4907 		if (err == -ENOENT)
4908 			status = MGMT_STATUS_INVALID_INDEX;
4909 		else
4910 			status = MGMT_STATUS_FAILED;
4911 
4912 		goto unlock;
4913 	}
4914 
4915 	/* monitor can be removed without forwarding request to controller */
4916 	if (!pending) {
4917 		mgmt_pending_remove(cmd);
4918 		hci_dev_unlock(hdev);
4919 
4920 		return mgmt_cmd_complete(sk, hdev->id,
4921 					 MGMT_OP_REMOVE_ADV_MONITOR,
4922 					 MGMT_STATUS_SUCCESS,
4923 					 &rp, sizeof(rp));
4924 	}
4925 
4926 	hci_dev_unlock(hdev);
4927 	return 0;
4928 
4929 unlock:
4930 	hci_dev_unlock(hdev);
4931 	return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4932 			       status);
4933 }
4934 
4935 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
4936 {
4937 	struct mgmt_rp_read_local_oob_data mgmt_rp;
4938 	size_t rp_size = sizeof(mgmt_rp);
4939 	struct mgmt_pending_cmd *cmd = data;
4940 	struct sk_buff *skb = cmd->skb;
4941 	u8 status = mgmt_status(err);
4942 
4943 	if (!status) {
4944 		if (!skb)
4945 			status = MGMT_STATUS_FAILED;
4946 		else if (IS_ERR(skb))
4947 			status = mgmt_status(PTR_ERR(skb));
4948 		else
4949 			status = mgmt_status(skb->data[0]);
4950 	}
4951 
4952 	bt_dev_dbg(hdev, "status %d", status);
4953 
4954 	if (status) {
4955 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
4956 		goto remove;
4957 	}
4958 
4959 	memset(&mgmt_rp, 0, sizeof(mgmt_rp));
4960 
4961 	if (!bredr_sc_enabled(hdev)) {
4962 		struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
4963 
4964 		if (skb->len < sizeof(*rp)) {
4965 			mgmt_cmd_status(cmd->sk, hdev->id,
4966 					MGMT_OP_READ_LOCAL_OOB_DATA,
4967 					MGMT_STATUS_FAILED);
4968 			goto remove;
4969 		}
4970 
4971 		memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
4972 		memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
4973 
4974 		rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
4975 	} else {
4976 		struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
4977 
4978 		if (skb->len < sizeof(*rp)) {
4979 			mgmt_cmd_status(cmd->sk, hdev->id,
4980 					MGMT_OP_READ_LOCAL_OOB_DATA,
4981 					MGMT_STATUS_FAILED);
4982 			goto remove;
4983 		}
4984 
4985 		memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
4986 		memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
4987 
4988 		memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
4989 		memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
4990 	}
4991 
4992 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4993 			  MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
4994 
4995 remove:
4996 	if (skb && !IS_ERR(skb))
4997 		kfree_skb(skb);
4998 
4999 	mgmt_pending_free(cmd);
5000 }
5001 
5002 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5003 {
5004 	struct mgmt_pending_cmd *cmd = data;
5005 
5006 	if (bredr_sc_enabled(hdev))
5007 		cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5008 	else
5009 		cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5010 
5011 	if (IS_ERR(cmd->skb))
5012 		return PTR_ERR(cmd->skb);
5013 	else
5014 		return 0;
5015 }
5016 
5017 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5018 			       void *data, u16 data_len)
5019 {
5020 	struct mgmt_pending_cmd *cmd;
5021 	int err;
5022 
5023 	bt_dev_dbg(hdev, "sock %p", sk);
5024 
5025 	hci_dev_lock(hdev);
5026 
5027 	if (!hdev_is_powered(hdev)) {
5028 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5029 				      MGMT_STATUS_NOT_POWERED);
5030 		goto unlock;
5031 	}
5032 
5033 	if (!lmp_ssp_capable(hdev)) {
5034 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5035 				      MGMT_STATUS_NOT_SUPPORTED);
5036 		goto unlock;
5037 	}
5038 
5039 	if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
5040 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5041 				      MGMT_STATUS_BUSY);
5042 		goto unlock;
5043 	}
5044 
5045 	cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5046 	if (!cmd)
5047 		err = -ENOMEM;
5048 	else
5049 		err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5050 					 read_local_oob_data_complete);
5051 
5052 	if (err < 0) {
5053 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5054 				      MGMT_STATUS_FAILED);
5055 
5056 		if (cmd)
5057 			mgmt_pending_free(cmd);
5058 	}
5059 
5060 unlock:
5061 	hci_dev_unlock(hdev);
5062 	return err;
5063 }
5064 
5065 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5066 			       void *data, u16 len)
5067 {
5068 	struct mgmt_addr_info *addr = data;
5069 	int err;
5070 
5071 	bt_dev_dbg(hdev, "sock %p", sk);
5072 
5073 	if (!bdaddr_type_is_valid(addr->type))
5074 		return mgmt_cmd_complete(sk, hdev->id,
5075 					 MGMT_OP_ADD_REMOTE_OOB_DATA,
5076 					 MGMT_STATUS_INVALID_PARAMS,
5077 					 addr, sizeof(*addr));
5078 
5079 	hci_dev_lock(hdev);
5080 
5081 	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5082 		struct mgmt_cp_add_remote_oob_data *cp = data;
5083 		u8 status;
5084 
5085 		if (cp->addr.type != BDADDR_BREDR) {
5086 			err = mgmt_cmd_complete(sk, hdev->id,
5087 						MGMT_OP_ADD_REMOTE_OOB_DATA,
5088 						MGMT_STATUS_INVALID_PARAMS,
5089 						&cp->addr, sizeof(cp->addr));
5090 			goto unlock;
5091 		}
5092 
5093 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5094 					      cp->addr.type, cp->hash,
5095 					      cp->rand, NULL, NULL);
5096 		if (err < 0)
5097 			status = MGMT_STATUS_FAILED;
5098 		else
5099 			status = MGMT_STATUS_SUCCESS;
5100 
5101 		err = mgmt_cmd_complete(sk, hdev->id,
5102 					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5103 					&cp->addr, sizeof(cp->addr));
5104 	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5105 		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5106 		u8 *rand192, *hash192, *rand256, *hash256;
5107 		u8 status;
5108 
5109 		if (bdaddr_type_is_le(cp->addr.type)) {
5110 			/* Enforce zero-valued 192-bit parameters as
5111 			 * long as legacy SMP OOB isn't implemented.
5112 			 */
5113 			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5114 			    memcmp(cp->hash192, ZERO_KEY, 16)) {
5115 				err = mgmt_cmd_complete(sk, hdev->id,
5116 							MGMT_OP_ADD_REMOTE_OOB_DATA,
5117 							MGMT_STATUS_INVALID_PARAMS,
5118 							addr, sizeof(*addr));
5119 				goto unlock;
5120 			}
5121 
5122 			rand192 = NULL;
5123 			hash192 = NULL;
5124 		} else {
5125 			/* In case one of the P-192 values is set to zero,
5126 			 * then just disable OOB data for P-192.
5127 			 */
5128 			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5129 			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
5130 				rand192 = NULL;
5131 				hash192 = NULL;
5132 			} else {
5133 				rand192 = cp->rand192;
5134 				hash192 = cp->hash192;
5135 			}
5136 		}
5137 
5138 		/* In case one of the P-256 values is set to zero, then just
5139 		 * disable OOB data for P-256.
5140 		 */
5141 		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5142 		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
5143 			rand256 = NULL;
5144 			hash256 = NULL;
5145 		} else {
5146 			rand256 = cp->rand256;
5147 			hash256 = cp->hash256;
5148 		}
5149 
5150 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5151 					      cp->addr.type, hash192, rand192,
5152 					      hash256, rand256);
5153 		if (err < 0)
5154 			status = MGMT_STATUS_FAILED;
5155 		else
5156 			status = MGMT_STATUS_SUCCESS;
5157 
5158 		err = mgmt_cmd_complete(sk, hdev->id,
5159 					MGMT_OP_ADD_REMOTE_OOB_DATA,
5160 					status, &cp->addr, sizeof(cp->addr));
5161 	} else {
5162 		bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5163 			   len);
5164 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5165 				      MGMT_STATUS_INVALID_PARAMS);
5166 	}
5167 
5168 unlock:
5169 	hci_dev_unlock(hdev);
5170 	return err;
5171 }
5172 
5173 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5174 				  void *data, u16 len)
5175 {
5176 	struct mgmt_cp_remove_remote_oob_data *cp = data;
5177 	u8 status;
5178 	int err;
5179 
5180 	bt_dev_dbg(hdev, "sock %p", sk);
5181 
5182 	if (cp->addr.type != BDADDR_BREDR)
5183 		return mgmt_cmd_complete(sk, hdev->id,
5184 					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5185 					 MGMT_STATUS_INVALID_PARAMS,
5186 					 &cp->addr, sizeof(cp->addr));
5187 
5188 	hci_dev_lock(hdev);
5189 
5190 	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5191 		hci_remote_oob_data_clear(hdev);
5192 		status = MGMT_STATUS_SUCCESS;
5193 		goto done;
5194 	}
5195 
5196 	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5197 	if (err < 0)
5198 		status = MGMT_STATUS_INVALID_PARAMS;
5199 	else
5200 		status = MGMT_STATUS_SUCCESS;
5201 
5202 done:
5203 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5204 				status, &cp->addr, sizeof(cp->addr));
5205 
5206 	hci_dev_unlock(hdev);
5207 	return err;
5208 }
5209 
5210 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5211 {
5212 	struct mgmt_pending_cmd *cmd;
5213 
5214 	bt_dev_dbg(hdev, "status %u", status);
5215 
5216 	hci_dev_lock(hdev);
5217 
5218 	cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5219 	if (!cmd)
5220 		cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5221 
5222 	if (!cmd)
5223 		cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5224 
5225 	if (cmd) {
5226 		cmd->cmd_complete(cmd, mgmt_status(status));
5227 		mgmt_pending_remove(cmd);
5228 	}
5229 
5230 	hci_dev_unlock(hdev);
5231 }
5232 
5233 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5234 				    uint8_t *mgmt_status)
5235 {
5236 	switch (type) {
5237 	case DISCOV_TYPE_LE:
5238 		*mgmt_status = mgmt_le_support(hdev);
5239 		if (*mgmt_status)
5240 			return false;
5241 		break;
5242 	case DISCOV_TYPE_INTERLEAVED:
5243 		*mgmt_status = mgmt_le_support(hdev);
5244 		if (*mgmt_status)
5245 			return false;
5246 		fallthrough;
5247 	case DISCOV_TYPE_BREDR:
5248 		*mgmt_status = mgmt_bredr_support(hdev);
5249 		if (*mgmt_status)
5250 			return false;
5251 		break;
5252 	default:
5253 		*mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5254 		return false;
5255 	}
5256 
5257 	return true;
5258 }
5259 
5260 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5261 {
5262 	struct mgmt_pending_cmd *cmd = data;
5263 
5264 	bt_dev_dbg(hdev, "err %d", err);
5265 
5266 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5267 			  cmd->param, 1);
5268 	mgmt_pending_free(cmd);
5269 
5270 	hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5271 				DISCOVERY_FINDING);
5272 }
5273 
5274 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5275 {
5276 	return hci_start_discovery_sync(hdev);
5277 }
5278 
5279 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5280 				    u16 op, void *data, u16 len)
5281 {
5282 	struct mgmt_cp_start_discovery *cp = data;
5283 	struct mgmt_pending_cmd *cmd;
5284 	u8 status;
5285 	int err;
5286 
5287 	bt_dev_dbg(hdev, "sock %p", sk);
5288 
5289 	hci_dev_lock(hdev);
5290 
5291 	if (!hdev_is_powered(hdev)) {
5292 		err = mgmt_cmd_complete(sk, hdev->id, op,
5293 					MGMT_STATUS_NOT_POWERED,
5294 					&cp->type, sizeof(cp->type));
5295 		goto failed;
5296 	}
5297 
5298 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
5299 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5300 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5301 					&cp->type, sizeof(cp->type));
5302 		goto failed;
5303 	}
5304 
5305 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5306 		err = mgmt_cmd_complete(sk, hdev->id, op, status,
5307 					&cp->type, sizeof(cp->type));
5308 		goto failed;
5309 	}
5310 
5311 	/* Can't start discovery when it is paused */
5312 	if (hdev->discovery_paused) {
5313 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5314 					&cp->type, sizeof(cp->type));
5315 		goto failed;
5316 	}
5317 
5318 	/* Clear the discovery filter first to free any previously
5319 	 * allocated memory for the UUID list.
5320 	 */
5321 	hci_discovery_filter_clear(hdev);
5322 
5323 	hdev->discovery.type = cp->type;
5324 	hdev->discovery.report_invalid_rssi = false;
5325 	if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5326 		hdev->discovery.limited = true;
5327 	else
5328 		hdev->discovery.limited = false;
5329 
5330 	cmd = mgmt_pending_new(sk, op, hdev, data, len);
5331 	if (!cmd) {
5332 		err = -ENOMEM;
5333 		goto failed;
5334 	}
5335 
5336 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5337 				 start_discovery_complete);
5338 	if (err < 0) {
5339 		mgmt_pending_free(cmd);
5340 		goto failed;
5341 	}
5342 
5343 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5344 
5345 failed:
5346 	hci_dev_unlock(hdev);
5347 	return err;
5348 }
5349 
5350 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5351 			   void *data, u16 len)
5352 {
5353 	return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5354 					data, len);
5355 }
5356 
5357 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5358 				   void *data, u16 len)
5359 {
5360 	return start_discovery_internal(sk, hdev,
5361 					MGMT_OP_START_LIMITED_DISCOVERY,
5362 					data, len);
5363 }
5364 
5365 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5366 				   void *data, u16 len)
5367 {
5368 	struct mgmt_cp_start_service_discovery *cp = data;
5369 	struct mgmt_pending_cmd *cmd;
5370 	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5371 	u16 uuid_count, expected_len;
5372 	u8 status;
5373 	int err;
5374 
5375 	bt_dev_dbg(hdev, "sock %p", sk);
5376 
5377 	hci_dev_lock(hdev);
5378 
5379 	if (!hdev_is_powered(hdev)) {
5380 		err = mgmt_cmd_complete(sk, hdev->id,
5381 					MGMT_OP_START_SERVICE_DISCOVERY,
5382 					MGMT_STATUS_NOT_POWERED,
5383 					&cp->type, sizeof(cp->type));
5384 		goto failed;
5385 	}
5386 
5387 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
5388 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5389 		err = mgmt_cmd_complete(sk, hdev->id,
5390 					MGMT_OP_START_SERVICE_DISCOVERY,
5391 					MGMT_STATUS_BUSY, &cp->type,
5392 					sizeof(cp->type));
5393 		goto failed;
5394 	}
5395 
5396 	if (hdev->discovery_paused) {
5397 		err = mgmt_cmd_complete(sk, hdev->id,
5398 					MGMT_OP_START_SERVICE_DISCOVERY,
5399 					MGMT_STATUS_BUSY, &cp->type,
5400 					sizeof(cp->type));
5401 		goto failed;
5402 	}
5403 
5404 	uuid_count = __le16_to_cpu(cp->uuid_count);
5405 	if (uuid_count > max_uuid_count) {
5406 		bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5407 			   uuid_count);
5408 		err = mgmt_cmd_complete(sk, hdev->id,
5409 					MGMT_OP_START_SERVICE_DISCOVERY,
5410 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
5411 					sizeof(cp->type));
5412 		goto failed;
5413 	}
5414 
5415 	expected_len = sizeof(*cp) + uuid_count * 16;
5416 	if (expected_len != len) {
5417 		bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5418 			   expected_len, len);
5419 		err = mgmt_cmd_complete(sk, hdev->id,
5420 					MGMT_OP_START_SERVICE_DISCOVERY,
5421 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
5422 					sizeof(cp->type));
5423 		goto failed;
5424 	}
5425 
5426 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5427 		err = mgmt_cmd_complete(sk, hdev->id,
5428 					MGMT_OP_START_SERVICE_DISCOVERY,
5429 					status, &cp->type, sizeof(cp->type));
5430 		goto failed;
5431 	}
5432 
5433 	cmd = mgmt_pending_new(sk, MGMT_OP_START_SERVICE_DISCOVERY,
5434 			       hdev, data, len);
5435 	if (!cmd) {
5436 		err = -ENOMEM;
5437 		goto failed;
5438 	}
5439 
5440 	/* Clear the discovery filter first to free any previously
5441 	 * allocated memory for the UUID list.
5442 	 */
5443 	hci_discovery_filter_clear(hdev);
5444 
5445 	hdev->discovery.result_filtering = true;
5446 	hdev->discovery.type = cp->type;
5447 	hdev->discovery.rssi = cp->rssi;
5448 	hdev->discovery.uuid_count = uuid_count;
5449 
5450 	if (uuid_count > 0) {
5451 		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
5452 						GFP_KERNEL);
5453 		if (!hdev->discovery.uuids) {
5454 			err = mgmt_cmd_complete(sk, hdev->id,
5455 						MGMT_OP_START_SERVICE_DISCOVERY,
5456 						MGMT_STATUS_FAILED,
5457 						&cp->type, sizeof(cp->type));
5458 			mgmt_pending_remove(cmd);
5459 			goto failed;
5460 		}
5461 	}
5462 
5463 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5464 				 start_discovery_complete);
5465 	if (err < 0) {
5466 		mgmt_pending_free(cmd);
5467 		goto failed;
5468 	}
5469 
5470 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5471 
5472 failed:
5473 	hci_dev_unlock(hdev);
5474 	return err;
5475 }
5476 
5477 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
5478 {
5479 	struct mgmt_pending_cmd *cmd;
5480 
5481 	bt_dev_dbg(hdev, "status %u", status);
5482 
5483 	hci_dev_lock(hdev);
5484 
5485 	cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5486 	if (cmd) {
5487 		cmd->cmd_complete(cmd, mgmt_status(status));
5488 		mgmt_pending_remove(cmd);
5489 	}
5490 
5491 	hci_dev_unlock(hdev);
5492 }
5493 
5494 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
5495 {
5496 	struct mgmt_pending_cmd *cmd = data;
5497 
5498 	bt_dev_dbg(hdev, "err %d", err);
5499 
5500 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5501 			  cmd->param, 1);
5502 	mgmt_pending_free(cmd);
5503 
5504 	if (!err)
5505 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
5506 }
5507 
5508 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
5509 {
5510 	return hci_stop_discovery_sync(hdev);
5511 }
5512 
5513 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
5514 			  u16 len)
5515 {
5516 	struct mgmt_cp_stop_discovery *mgmt_cp = data;
5517 	struct mgmt_pending_cmd *cmd;
5518 	int err;
5519 
5520 	bt_dev_dbg(hdev, "sock %p", sk);
5521 
5522 	hci_dev_lock(hdev);
5523 
5524 	if (!hci_discovery_active(hdev)) {
5525 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5526 					MGMT_STATUS_REJECTED, &mgmt_cp->type,
5527 					sizeof(mgmt_cp->type));
5528 		goto unlock;
5529 	}
5530 
5531 	if (hdev->discovery.type != mgmt_cp->type) {
5532 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5533 					MGMT_STATUS_INVALID_PARAMS,
5534 					&mgmt_cp->type, sizeof(mgmt_cp->type));
5535 		goto unlock;
5536 	}
5537 
5538 	cmd = mgmt_pending_new(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
5539 	if (!cmd) {
5540 		err = -ENOMEM;
5541 		goto unlock;
5542 	}
5543 
5544 	err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
5545 				 stop_discovery_complete);
5546 	if (err < 0) {
5547 		mgmt_pending_free(cmd);
5548 		goto unlock;
5549 	}
5550 
5551 	hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
5552 
5553 unlock:
5554 	hci_dev_unlock(hdev);
5555 	return err;
5556 }
5557 
5558 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
5559 			u16 len)
5560 {
5561 	struct mgmt_cp_confirm_name *cp = data;
5562 	struct inquiry_entry *e;
5563 	int err;
5564 
5565 	bt_dev_dbg(hdev, "sock %p", sk);
5566 
5567 	hci_dev_lock(hdev);
5568 
5569 	if (!hci_discovery_active(hdev)) {
5570 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5571 					MGMT_STATUS_FAILED, &cp->addr,
5572 					sizeof(cp->addr));
5573 		goto failed;
5574 	}
5575 
5576 	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
5577 	if (!e) {
5578 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5579 					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
5580 					sizeof(cp->addr));
5581 		goto failed;
5582 	}
5583 
5584 	if (cp->name_known) {
5585 		e->name_state = NAME_KNOWN;
5586 		list_del(&e->list);
5587 	} else {
5588 		e->name_state = NAME_NEEDED;
5589 		hci_inquiry_cache_update_resolve(hdev, e);
5590 	}
5591 
5592 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
5593 				&cp->addr, sizeof(cp->addr));
5594 
5595 failed:
5596 	hci_dev_unlock(hdev);
5597 	return err;
5598 }
5599 
5600 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
5601 			u16 len)
5602 {
5603 	struct mgmt_cp_block_device *cp = data;
5604 	u8 status;
5605 	int err;
5606 
5607 	bt_dev_dbg(hdev, "sock %p", sk);
5608 
5609 	if (!bdaddr_type_is_valid(cp->addr.type))
5610 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
5611 					 MGMT_STATUS_INVALID_PARAMS,
5612 					 &cp->addr, sizeof(cp->addr));
5613 
5614 	hci_dev_lock(hdev);
5615 
5616 	err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
5617 				  cp->addr.type);
5618 	if (err < 0) {
5619 		status = MGMT_STATUS_FAILED;
5620 		goto done;
5621 	}
5622 
5623 	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5624 		   sk);
5625 	status = MGMT_STATUS_SUCCESS;
5626 
5627 done:
5628 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
5629 				&cp->addr, sizeof(cp->addr));
5630 
5631 	hci_dev_unlock(hdev);
5632 
5633 	return err;
5634 }
5635 
5636 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
5637 			  u16 len)
5638 {
5639 	struct mgmt_cp_unblock_device *cp = data;
5640 	u8 status;
5641 	int err;
5642 
5643 	bt_dev_dbg(hdev, "sock %p", sk);
5644 
5645 	if (!bdaddr_type_is_valid(cp->addr.type))
5646 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
5647 					 MGMT_STATUS_INVALID_PARAMS,
5648 					 &cp->addr, sizeof(cp->addr));
5649 
5650 	hci_dev_lock(hdev);
5651 
5652 	err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
5653 				  cp->addr.type);
5654 	if (err < 0) {
5655 		status = MGMT_STATUS_INVALID_PARAMS;
5656 		goto done;
5657 	}
5658 
5659 	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5660 		   sk);
5661 	status = MGMT_STATUS_SUCCESS;
5662 
5663 done:
5664 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
5665 				&cp->addr, sizeof(cp->addr));
5666 
5667 	hci_dev_unlock(hdev);
5668 
5669 	return err;
5670 }
5671 
5672 static int set_device_id_sync(struct hci_dev *hdev, void *data)
5673 {
5674 	return hci_update_eir_sync(hdev);
5675 }
5676 
5677 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
5678 			 u16 len)
5679 {
5680 	struct mgmt_cp_set_device_id *cp = data;
5681 	int err;
5682 	__u16 source;
5683 
5684 	bt_dev_dbg(hdev, "sock %p", sk);
5685 
5686 	source = __le16_to_cpu(cp->source);
5687 
5688 	if (source > 0x0002)
5689 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
5690 				       MGMT_STATUS_INVALID_PARAMS);
5691 
5692 	hci_dev_lock(hdev);
5693 
5694 	hdev->devid_source = source;
5695 	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
5696 	hdev->devid_product = __le16_to_cpu(cp->product);
5697 	hdev->devid_version = __le16_to_cpu(cp->version);
5698 
5699 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
5700 				NULL, 0);
5701 
5702 	hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
5703 
5704 	hci_dev_unlock(hdev);
5705 
5706 	return err;
5707 }
5708 
5709 static void enable_advertising_instance(struct hci_dev *hdev, int err)
5710 {
5711 	if (err)
5712 		bt_dev_err(hdev, "failed to re-configure advertising %d", err);
5713 	else
5714 		bt_dev_dbg(hdev, "status %d", err);
5715 }
5716 
5717 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
5718 {
5719 	struct cmd_lookup match = { NULL, hdev };
5720 	u8 instance;
5721 	struct adv_info *adv_instance;
5722 	u8 status = mgmt_status(err);
5723 
5724 	if (status) {
5725 		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
5726 				     cmd_status_rsp, &status);
5727 		return;
5728 	}
5729 
5730 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
5731 		hci_dev_set_flag(hdev, HCI_ADVERTISING);
5732 	else
5733 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
5734 
5735 	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
5736 			     &match);
5737 
5738 	new_settings(hdev, match.sk);
5739 
5740 	if (match.sk)
5741 		sock_put(match.sk);
5742 
5743 	/* If "Set Advertising" was just disabled and instance advertising was
5744 	 * set up earlier, then re-enable multi-instance advertising.
5745 	 */
5746 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5747 	    list_empty(&hdev->adv_instances))
5748 		return;
5749 
5750 	instance = hdev->cur_adv_instance;
5751 	if (!instance) {
5752 		adv_instance = list_first_entry_or_null(&hdev->adv_instances,
5753 							struct adv_info, list);
5754 		if (!adv_instance)
5755 			return;
5756 
5757 		instance = adv_instance->instance;
5758 	}
5759 
5760 	err = hci_schedule_adv_instance_sync(hdev, instance, true);
5761 
5762 	enable_advertising_instance(hdev, err);
5763 }
5764 
5765 static int set_adv_sync(struct hci_dev *hdev, void *data)
5766 {
5767 	struct mgmt_pending_cmd *cmd = data;
5768 	struct mgmt_mode *cp = cmd->param;
5769 	u8 val = !!cp->val;
5770 
5771 	if (cp->val == 0x02)
5772 		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5773 	else
5774 		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5775 
5776 	cancel_adv_timeout(hdev);
5777 
5778 	if (val) {
5779 		/* Switch to instance "0" for the Set Advertising setting.
5780 		 * We cannot use update_[adv|scan_rsp]_data() here as the
5781 		 * HCI_ADVERTISING flag is not yet set.
5782 		 */
5783 		hdev->cur_adv_instance = 0x00;
5784 
5785 		if (ext_adv_capable(hdev)) {
5786 			hci_start_ext_adv_sync(hdev, 0x00);
5787 		} else {
5788 			hci_update_adv_data_sync(hdev, 0x00);
5789 			hci_update_scan_rsp_data_sync(hdev, 0x00);
5790 			hci_enable_advertising_sync(hdev);
5791 		}
5792 	} else {
5793 		hci_disable_advertising_sync(hdev);
5794 	}
5795 
5796 	return 0;
5797 }
5798 
5799 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
5800 			   u16 len)
5801 {
5802 	struct mgmt_mode *cp = data;
5803 	struct mgmt_pending_cmd *cmd;
5804 	u8 val, status;
5805 	int err;
5806 
5807 	bt_dev_dbg(hdev, "sock %p", sk);
5808 
5809 	status = mgmt_le_support(hdev);
5810 	if (status)
5811 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5812 				       status);
5813 
5814 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5815 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5816 				       MGMT_STATUS_INVALID_PARAMS);
5817 
5818 	if (hdev->advertising_paused)
5819 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5820 				       MGMT_STATUS_BUSY);
5821 
5822 	hci_dev_lock(hdev);
5823 
5824 	val = !!cp->val;
5825 
5826 	/* The following conditions are ones which mean that we should
5827 	 * not do any HCI communication but directly send a mgmt
5828 	 * response to user space (after toggling the flag if
5829 	 * necessary).
5830 	 */
5831 	if (!hdev_is_powered(hdev) ||
5832 	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5833 	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
5834 	    hci_conn_num(hdev, LE_LINK) > 0 ||
5835 	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5836 	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
5837 		bool changed;
5838 
5839 		if (cp->val) {
5840 			hdev->cur_adv_instance = 0x00;
5841 			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
5842 			if (cp->val == 0x02)
5843 				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5844 			else
5845 				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5846 		} else {
5847 			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
5848 			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5849 		}
5850 
5851 		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
5852 		if (err < 0)
5853 			goto unlock;
5854 
5855 		if (changed)
5856 			err = new_settings(hdev, sk);
5857 
5858 		goto unlock;
5859 	}
5860 
5861 	if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
5862 	    pending_find(MGMT_OP_SET_LE, hdev)) {
5863 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5864 				      MGMT_STATUS_BUSY);
5865 		goto unlock;
5866 	}
5867 
5868 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
5869 	if (!cmd)
5870 		err = -ENOMEM;
5871 	else
5872 		err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
5873 					 set_advertising_complete);
5874 
5875 	if (err < 0 && cmd)
5876 		mgmt_pending_remove(cmd);
5877 
5878 unlock:
5879 	hci_dev_unlock(hdev);
5880 	return err;
5881 }
5882 
5883 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
5884 			      void *data, u16 len)
5885 {
5886 	struct mgmt_cp_set_static_address *cp = data;
5887 	int err;
5888 
5889 	bt_dev_dbg(hdev, "sock %p", sk);
5890 
5891 	if (!lmp_le_capable(hdev))
5892 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5893 				       MGMT_STATUS_NOT_SUPPORTED);
5894 
5895 	if (hdev_is_powered(hdev))
5896 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5897 				       MGMT_STATUS_REJECTED);
5898 
5899 	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
5900 		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
5901 			return mgmt_cmd_status(sk, hdev->id,
5902 					       MGMT_OP_SET_STATIC_ADDRESS,
5903 					       MGMT_STATUS_INVALID_PARAMS);
5904 
5905 		/* Two most significant bits shall be set */
5906 		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
5907 			return mgmt_cmd_status(sk, hdev->id,
5908 					       MGMT_OP_SET_STATIC_ADDRESS,
5909 					       MGMT_STATUS_INVALID_PARAMS);
5910 	}
5911 
5912 	hci_dev_lock(hdev);
5913 
5914 	bacpy(&hdev->static_addr, &cp->bdaddr);
5915 
5916 	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
5917 	if (err < 0)
5918 		goto unlock;
5919 
5920 	err = new_settings(hdev, sk);
5921 
5922 unlock:
5923 	hci_dev_unlock(hdev);
5924 	return err;
5925 }
5926 
5927 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
5928 			   void *data, u16 len)
5929 {
5930 	struct mgmt_cp_set_scan_params *cp = data;
5931 	__u16 interval, window;
5932 	int err;
5933 
5934 	bt_dev_dbg(hdev, "sock %p", sk);
5935 
5936 	if (!lmp_le_capable(hdev))
5937 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5938 				       MGMT_STATUS_NOT_SUPPORTED);
5939 
5940 	interval = __le16_to_cpu(cp->interval);
5941 
5942 	if (interval < 0x0004 || interval > 0x4000)
5943 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5944 				       MGMT_STATUS_INVALID_PARAMS);
5945 
5946 	window = __le16_to_cpu(cp->window);
5947 
5948 	if (window < 0x0004 || window > 0x4000)
5949 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5950 				       MGMT_STATUS_INVALID_PARAMS);
5951 
5952 	if (window > interval)
5953 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5954 				       MGMT_STATUS_INVALID_PARAMS);
5955 
5956 	hci_dev_lock(hdev);
5957 
5958 	hdev->le_scan_interval = interval;
5959 	hdev->le_scan_window = window;
5960 
5961 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
5962 				NULL, 0);
5963 
5964 	/* If background scan is running, restart it so new parameters are
5965 	 * loaded.
5966 	 */
5967 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5968 	    hdev->discovery.state == DISCOVERY_STOPPED)
5969 		hci_update_passive_scan(hdev);
5970 
5971 	hci_dev_unlock(hdev);
5972 
5973 	return err;
5974 }
5975 
5976 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
5977 {
5978 	struct mgmt_pending_cmd *cmd = data;
5979 
5980 	bt_dev_dbg(hdev, "err %d", err);
5981 
5982 	if (err) {
5983 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5984 				mgmt_status(err));
5985 	} else {
5986 		struct mgmt_mode *cp = cmd->param;
5987 
5988 		if (cp->val)
5989 			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
5990 		else
5991 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5992 
5993 		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5994 		new_settings(hdev, cmd->sk);
5995 	}
5996 
5997 	mgmt_pending_free(cmd);
5998 }
5999 
6000 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6001 {
6002 	struct mgmt_pending_cmd *cmd = data;
6003 	struct mgmt_mode *cp = cmd->param;
6004 
6005 	return hci_write_fast_connectable_sync(hdev, cp->val);
6006 }
6007 
6008 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6009 				void *data, u16 len)
6010 {
6011 	struct mgmt_mode *cp = data;
6012 	struct mgmt_pending_cmd *cmd;
6013 	int err;
6014 
6015 	bt_dev_dbg(hdev, "sock %p", sk);
6016 
6017 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6018 	    hdev->hci_ver < BLUETOOTH_VER_1_2)
6019 		return mgmt_cmd_status(sk, hdev->id,
6020 				       MGMT_OP_SET_FAST_CONNECTABLE,
6021 				       MGMT_STATUS_NOT_SUPPORTED);
6022 
6023 	if (cp->val != 0x00 && cp->val != 0x01)
6024 		return mgmt_cmd_status(sk, hdev->id,
6025 				       MGMT_OP_SET_FAST_CONNECTABLE,
6026 				       MGMT_STATUS_INVALID_PARAMS);
6027 
6028 	hci_dev_lock(hdev);
6029 
6030 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6031 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6032 		goto unlock;
6033 	}
6034 
6035 	if (!hdev_is_powered(hdev)) {
6036 		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6037 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6038 		new_settings(hdev, sk);
6039 		goto unlock;
6040 	}
6041 
6042 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6043 			       len);
6044 	if (!cmd)
6045 		err = -ENOMEM;
6046 	else
6047 		err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6048 					 fast_connectable_complete);
6049 
6050 	if (err < 0) {
6051 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6052 				MGMT_STATUS_FAILED);
6053 
6054 		if (cmd)
6055 			mgmt_pending_free(cmd);
6056 	}
6057 
6058 unlock:
6059 	hci_dev_unlock(hdev);
6060 
6061 	return err;
6062 }
6063 
6064 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6065 {
6066 	struct mgmt_pending_cmd *cmd = data;
6067 
6068 	bt_dev_dbg(hdev, "err %d", err);
6069 
6070 	if (err) {
6071 		u8 mgmt_err = mgmt_status(err);
6072 
6073 		/* We need to restore the flag if related HCI commands
6074 		 * failed.
6075 		 */
6076 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6077 
6078 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6079 	} else {
6080 		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6081 		new_settings(hdev, cmd->sk);
6082 	}
6083 
6084 	mgmt_pending_free(cmd);
6085 }
6086 
6087 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6088 {
6089 	int status;
6090 
6091 	status = hci_write_fast_connectable_sync(hdev, false);
6092 
6093 	if (!status)
6094 		status = hci_update_scan_sync(hdev);
6095 
6096 	/* Since only the advertising data flags will change, there
6097 	 * is no need to update the scan response data.
6098 	 */
6099 	if (!status)
6100 		status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6101 
6102 	return status;
6103 }
6104 
6105 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6106 {
6107 	struct mgmt_mode *cp = data;
6108 	struct mgmt_pending_cmd *cmd;
6109 	int err;
6110 
6111 	bt_dev_dbg(hdev, "sock %p", sk);
6112 
6113 	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6114 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6115 				       MGMT_STATUS_NOT_SUPPORTED);
6116 
6117 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6118 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6119 				       MGMT_STATUS_REJECTED);
6120 
6121 	if (cp->val != 0x00 && cp->val != 0x01)
6122 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6123 				       MGMT_STATUS_INVALID_PARAMS);
6124 
6125 	hci_dev_lock(hdev);
6126 
6127 	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6128 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6129 		goto unlock;
6130 	}
6131 
6132 	if (!hdev_is_powered(hdev)) {
6133 		if (!cp->val) {
6134 			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6135 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6136 			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6137 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6138 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
6139 		}
6140 
6141 		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6142 
6143 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6144 		if (err < 0)
6145 			goto unlock;
6146 
6147 		err = new_settings(hdev, sk);
6148 		goto unlock;
6149 	}
6150 
6151 	/* Reject disabling when powered on */
6152 	if (!cp->val) {
6153 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6154 				      MGMT_STATUS_REJECTED);
6155 		goto unlock;
6156 	} else {
6157 		/* When configuring a dual-mode controller to operate
6158 		 * with LE only and using a static address, then switching
6159 		 * BR/EDR back on is not allowed.
6160 		 *
6161 		 * Dual-mode controllers shall operate with the public
6162 		 * address as its identity address for BR/EDR and LE. So
6163 		 * reject the attempt to create an invalid configuration.
6164 		 *
6165 		 * The same restrictions applies when secure connections
6166 		 * has been enabled. For BR/EDR this is a controller feature
6167 		 * while for LE it is a host stack feature. This means that
6168 		 * switching BR/EDR back on when secure connections has been
6169 		 * enabled is not a supported transaction.
6170 		 */
6171 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6172 		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6173 		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6174 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6175 					      MGMT_STATUS_REJECTED);
6176 			goto unlock;
6177 		}
6178 	}
6179 
6180 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6181 	if (!cmd)
6182 		err = -ENOMEM;
6183 	else
6184 		err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6185 					 set_bredr_complete);
6186 
6187 	if (err < 0) {
6188 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6189 				MGMT_STATUS_FAILED);
6190 		if (cmd)
6191 			mgmt_pending_free(cmd);
6192 
6193 		goto unlock;
6194 	}
6195 
6196 	/* We need to flip the bit already here so that
6197 	 * hci_req_update_adv_data generates the correct flags.
6198 	 */
6199 	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6200 
6201 unlock:
6202 	hci_dev_unlock(hdev);
6203 	return err;
6204 }
6205 
6206 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6207 {
6208 	struct mgmt_pending_cmd *cmd = data;
6209 	struct mgmt_mode *cp;
6210 
6211 	bt_dev_dbg(hdev, "err %d", err);
6212 
6213 	if (err) {
6214 		u8 mgmt_err = mgmt_status(err);
6215 
6216 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6217 		goto done;
6218 	}
6219 
6220 	cp = cmd->param;
6221 
6222 	switch (cp->val) {
6223 	case 0x00:
6224 		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6225 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6226 		break;
6227 	case 0x01:
6228 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6229 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6230 		break;
6231 	case 0x02:
6232 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6233 		hci_dev_set_flag(hdev, HCI_SC_ONLY);
6234 		break;
6235 	}
6236 
6237 	send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6238 	new_settings(hdev, cmd->sk);
6239 
6240 done:
6241 	mgmt_pending_free(cmd);
6242 }
6243 
6244 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6245 {
6246 	struct mgmt_pending_cmd *cmd = data;
6247 	struct mgmt_mode *cp = cmd->param;
6248 	u8 val = !!cp->val;
6249 
6250 	/* Force write of val */
6251 	hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6252 
6253 	return hci_write_sc_support_sync(hdev, val);
6254 }
6255 
6256 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6257 			   void *data, u16 len)
6258 {
6259 	struct mgmt_mode *cp = data;
6260 	struct mgmt_pending_cmd *cmd;
6261 	u8 val;
6262 	int err;
6263 
6264 	bt_dev_dbg(hdev, "sock %p", sk);
6265 
6266 	if (!lmp_sc_capable(hdev) &&
6267 	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6268 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6269 				       MGMT_STATUS_NOT_SUPPORTED);
6270 
6271 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6272 	    lmp_sc_capable(hdev) &&
6273 	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6274 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6275 				       MGMT_STATUS_REJECTED);
6276 
6277 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6278 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6279 				       MGMT_STATUS_INVALID_PARAMS);
6280 
6281 	hci_dev_lock(hdev);
6282 
6283 	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6284 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6285 		bool changed;
6286 
6287 		if (cp->val) {
6288 			changed = !hci_dev_test_and_set_flag(hdev,
6289 							     HCI_SC_ENABLED);
6290 			if (cp->val == 0x02)
6291 				hci_dev_set_flag(hdev, HCI_SC_ONLY);
6292 			else
6293 				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6294 		} else {
6295 			changed = hci_dev_test_and_clear_flag(hdev,
6296 							      HCI_SC_ENABLED);
6297 			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6298 		}
6299 
6300 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6301 		if (err < 0)
6302 			goto failed;
6303 
6304 		if (changed)
6305 			err = new_settings(hdev, sk);
6306 
6307 		goto failed;
6308 	}
6309 
6310 	val = !!cp->val;
6311 
6312 	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6313 	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6314 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6315 		goto failed;
6316 	}
6317 
6318 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6319 	if (!cmd)
6320 		err = -ENOMEM;
6321 	else
6322 		err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6323 					 set_secure_conn_complete);
6324 
6325 	if (err < 0) {
6326 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6327 				MGMT_STATUS_FAILED);
6328 		if (cmd)
6329 			mgmt_pending_free(cmd);
6330 	}
6331 
6332 failed:
6333 	hci_dev_unlock(hdev);
6334 	return err;
6335 }
6336 
6337 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6338 			  void *data, u16 len)
6339 {
6340 	struct mgmt_mode *cp = data;
6341 	bool changed, use_changed;
6342 	int err;
6343 
6344 	bt_dev_dbg(hdev, "sock %p", sk);
6345 
6346 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6347 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6348 				       MGMT_STATUS_INVALID_PARAMS);
6349 
6350 	hci_dev_lock(hdev);
6351 
6352 	if (cp->val)
6353 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6354 	else
6355 		changed = hci_dev_test_and_clear_flag(hdev,
6356 						      HCI_KEEP_DEBUG_KEYS);
6357 
6358 	if (cp->val == 0x02)
6359 		use_changed = !hci_dev_test_and_set_flag(hdev,
6360 							 HCI_USE_DEBUG_KEYS);
6361 	else
6362 		use_changed = hci_dev_test_and_clear_flag(hdev,
6363 							  HCI_USE_DEBUG_KEYS);
6364 
6365 	if (hdev_is_powered(hdev) && use_changed &&
6366 	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6367 		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6368 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6369 			     sizeof(mode), &mode);
6370 	}
6371 
6372 	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6373 	if (err < 0)
6374 		goto unlock;
6375 
6376 	if (changed)
6377 		err = new_settings(hdev, sk);
6378 
6379 unlock:
6380 	hci_dev_unlock(hdev);
6381 	return err;
6382 }
6383 
6384 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6385 		       u16 len)
6386 {
6387 	struct mgmt_cp_set_privacy *cp = cp_data;
6388 	bool changed;
6389 	int err;
6390 
6391 	bt_dev_dbg(hdev, "sock %p", sk);
6392 
6393 	if (!lmp_le_capable(hdev))
6394 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6395 				       MGMT_STATUS_NOT_SUPPORTED);
6396 
6397 	if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6398 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6399 				       MGMT_STATUS_INVALID_PARAMS);
6400 
6401 	if (hdev_is_powered(hdev))
6402 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6403 				       MGMT_STATUS_REJECTED);
6404 
6405 	hci_dev_lock(hdev);
6406 
6407 	/* If user space supports this command it is also expected to
6408 	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6409 	 */
6410 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6411 
6412 	if (cp->privacy) {
6413 		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6414 		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6415 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6416 		hci_adv_instances_set_rpa_expired(hdev, true);
6417 		if (cp->privacy == 0x02)
6418 			hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6419 		else
6420 			hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6421 	} else {
6422 		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6423 		memset(hdev->irk, 0, sizeof(hdev->irk));
6424 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6425 		hci_adv_instances_set_rpa_expired(hdev, false);
6426 		hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6427 	}
6428 
6429 	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
6430 	if (err < 0)
6431 		goto unlock;
6432 
6433 	if (changed)
6434 		err = new_settings(hdev, sk);
6435 
6436 unlock:
6437 	hci_dev_unlock(hdev);
6438 	return err;
6439 }
6440 
6441 static bool irk_is_valid(struct mgmt_irk_info *irk)
6442 {
6443 	switch (irk->addr.type) {
6444 	case BDADDR_LE_PUBLIC:
6445 		return true;
6446 
6447 	case BDADDR_LE_RANDOM:
6448 		/* Two most significant bits shall be set */
6449 		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6450 			return false;
6451 		return true;
6452 	}
6453 
6454 	return false;
6455 }
6456 
6457 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6458 		     u16 len)
6459 {
6460 	struct mgmt_cp_load_irks *cp = cp_data;
6461 	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
6462 				   sizeof(struct mgmt_irk_info));
6463 	u16 irk_count, expected_len;
6464 	int i, err;
6465 
6466 	bt_dev_dbg(hdev, "sock %p", sk);
6467 
6468 	if (!lmp_le_capable(hdev))
6469 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6470 				       MGMT_STATUS_NOT_SUPPORTED);
6471 
6472 	irk_count = __le16_to_cpu(cp->irk_count);
6473 	if (irk_count > max_irk_count) {
6474 		bt_dev_err(hdev, "load_irks: too big irk_count value %u",
6475 			   irk_count);
6476 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6477 				       MGMT_STATUS_INVALID_PARAMS);
6478 	}
6479 
6480 	expected_len = struct_size(cp, irks, irk_count);
6481 	if (expected_len != len) {
6482 		bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
6483 			   expected_len, len);
6484 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6485 				       MGMT_STATUS_INVALID_PARAMS);
6486 	}
6487 
6488 	bt_dev_dbg(hdev, "irk_count %u", irk_count);
6489 
6490 	for (i = 0; i < irk_count; i++) {
6491 		struct mgmt_irk_info *key = &cp->irks[i];
6492 
6493 		if (!irk_is_valid(key))
6494 			return mgmt_cmd_status(sk, hdev->id,
6495 					       MGMT_OP_LOAD_IRKS,
6496 					       MGMT_STATUS_INVALID_PARAMS);
6497 	}
6498 
6499 	hci_dev_lock(hdev);
6500 
6501 	hci_smp_irks_clear(hdev);
6502 
6503 	for (i = 0; i < irk_count; i++) {
6504 		struct mgmt_irk_info *irk = &cp->irks[i];
6505 
6506 		if (hci_is_blocked_key(hdev,
6507 				       HCI_BLOCKED_KEY_TYPE_IRK,
6508 				       irk->val)) {
6509 			bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
6510 				    &irk->addr.bdaddr);
6511 			continue;
6512 		}
6513 
6514 		hci_add_irk(hdev, &irk->addr.bdaddr,
6515 			    le_addr_type(irk->addr.type), irk->val,
6516 			    BDADDR_ANY);
6517 	}
6518 
6519 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6520 
6521 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
6522 
6523 	hci_dev_unlock(hdev);
6524 
6525 	return err;
6526 }
6527 
6528 static bool ltk_is_valid(struct mgmt_ltk_info *key)
6529 {
6530 	if (key->initiator != 0x00 && key->initiator != 0x01)
6531 		return false;
6532 
6533 	switch (key->addr.type) {
6534 	case BDADDR_LE_PUBLIC:
6535 		return true;
6536 
6537 	case BDADDR_LE_RANDOM:
6538 		/* Two most significant bits shall be set */
6539 		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6540 			return false;
6541 		return true;
6542 	}
6543 
6544 	return false;
6545 }
6546 
6547 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
6548 			       void *cp_data, u16 len)
6549 {
6550 	struct mgmt_cp_load_long_term_keys *cp = cp_data;
6551 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
6552 				   sizeof(struct mgmt_ltk_info));
6553 	u16 key_count, expected_len;
6554 	int i, err;
6555 
6556 	bt_dev_dbg(hdev, "sock %p", sk);
6557 
6558 	if (!lmp_le_capable(hdev))
6559 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6560 				       MGMT_STATUS_NOT_SUPPORTED);
6561 
6562 	key_count = __le16_to_cpu(cp->key_count);
6563 	if (key_count > max_key_count) {
6564 		bt_dev_err(hdev, "load_ltks: too big key_count value %u",
6565 			   key_count);
6566 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6567 				       MGMT_STATUS_INVALID_PARAMS);
6568 	}
6569 
6570 	expected_len = struct_size(cp, keys, key_count);
6571 	if (expected_len != len) {
6572 		bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
6573 			   expected_len, len);
6574 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6575 				       MGMT_STATUS_INVALID_PARAMS);
6576 	}
6577 
6578 	bt_dev_dbg(hdev, "key_count %u", key_count);
6579 
6580 	for (i = 0; i < key_count; i++) {
6581 		struct mgmt_ltk_info *key = &cp->keys[i];
6582 
6583 		if (!ltk_is_valid(key))
6584 			return mgmt_cmd_status(sk, hdev->id,
6585 					       MGMT_OP_LOAD_LONG_TERM_KEYS,
6586 					       MGMT_STATUS_INVALID_PARAMS);
6587 	}
6588 
6589 	hci_dev_lock(hdev);
6590 
6591 	hci_smp_ltks_clear(hdev);
6592 
6593 	for (i = 0; i < key_count; i++) {
6594 		struct mgmt_ltk_info *key = &cp->keys[i];
6595 		u8 type, authenticated;
6596 
6597 		if (hci_is_blocked_key(hdev,
6598 				       HCI_BLOCKED_KEY_TYPE_LTK,
6599 				       key->val)) {
6600 			bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
6601 				    &key->addr.bdaddr);
6602 			continue;
6603 		}
6604 
6605 		switch (key->type) {
6606 		case MGMT_LTK_UNAUTHENTICATED:
6607 			authenticated = 0x00;
6608 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
6609 			break;
6610 		case MGMT_LTK_AUTHENTICATED:
6611 			authenticated = 0x01;
6612 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
6613 			break;
6614 		case MGMT_LTK_P256_UNAUTH:
6615 			authenticated = 0x00;
6616 			type = SMP_LTK_P256;
6617 			break;
6618 		case MGMT_LTK_P256_AUTH:
6619 			authenticated = 0x01;
6620 			type = SMP_LTK_P256;
6621 			break;
6622 		case MGMT_LTK_P256_DEBUG:
6623 			authenticated = 0x00;
6624 			type = SMP_LTK_P256_DEBUG;
6625 			fallthrough;
6626 		default:
6627 			continue;
6628 		}
6629 
6630 		hci_add_ltk(hdev, &key->addr.bdaddr,
6631 			    le_addr_type(key->addr.type), type, authenticated,
6632 			    key->val, key->enc_size, key->ediv, key->rand);
6633 	}
6634 
6635 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
6636 			   NULL, 0);
6637 
6638 	hci_dev_unlock(hdev);
6639 
6640 	return err;
6641 }
6642 
6643 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
6644 {
6645 	struct mgmt_pending_cmd *cmd = data;
6646 	struct hci_conn *conn = cmd->user_data;
6647 	struct mgmt_cp_get_conn_info *cp = cmd->param;
6648 	struct mgmt_rp_get_conn_info rp;
6649 	u8 status;
6650 
6651 	bt_dev_dbg(hdev, "err %d", err);
6652 
6653 	memcpy(&rp.addr, &cp->addr.bdaddr, sizeof(rp.addr));
6654 
6655 	status = mgmt_status(err);
6656 	if (status == MGMT_STATUS_SUCCESS) {
6657 		rp.rssi = conn->rssi;
6658 		rp.tx_power = conn->tx_power;
6659 		rp.max_tx_power = conn->max_tx_power;
6660 	} else {
6661 		rp.rssi = HCI_RSSI_INVALID;
6662 		rp.tx_power = HCI_TX_POWER_INVALID;
6663 		rp.max_tx_power = HCI_TX_POWER_INVALID;
6664 	}
6665 
6666 	mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
6667 			  &rp, sizeof(rp));
6668 
6669 	if (conn) {
6670 		hci_conn_drop(conn);
6671 		hci_conn_put(conn);
6672 	}
6673 
6674 	mgmt_pending_free(cmd);
6675 }
6676 
6677 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
6678 {
6679 	struct mgmt_pending_cmd *cmd = data;
6680 	struct mgmt_cp_get_conn_info *cp = cmd->param;
6681 	struct hci_conn *conn;
6682 	int err;
6683 	__le16   handle;
6684 
6685 	/* Make sure we are still connected */
6686 	if (cp->addr.type == BDADDR_BREDR)
6687 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6688 					       &cp->addr.bdaddr);
6689 	else
6690 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6691 
6692 	if (!conn || conn != cmd->user_data || conn->state != BT_CONNECTED) {
6693 		if (cmd->user_data) {
6694 			hci_conn_drop(cmd->user_data);
6695 			hci_conn_put(cmd->user_data);
6696 			cmd->user_data = NULL;
6697 		}
6698 		return MGMT_STATUS_NOT_CONNECTED;
6699 	}
6700 
6701 	handle = cpu_to_le16(conn->handle);
6702 
6703 	/* Refresh RSSI each time */
6704 	err = hci_read_rssi_sync(hdev, handle);
6705 
6706 	/* For LE links TX power does not change thus we don't need to
6707 	 * query for it once value is known.
6708 	 */
6709 	if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
6710 		     conn->tx_power == HCI_TX_POWER_INVALID))
6711 		err = hci_read_tx_power_sync(hdev, handle, 0x00);
6712 
6713 	/* Max TX power needs to be read only once per connection */
6714 	if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
6715 		err = hci_read_tx_power_sync(hdev, handle, 0x01);
6716 
6717 	return err;
6718 }
6719 
6720 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
6721 			 u16 len)
6722 {
6723 	struct mgmt_cp_get_conn_info *cp = data;
6724 	struct mgmt_rp_get_conn_info rp;
6725 	struct hci_conn *conn;
6726 	unsigned long conn_info_age;
6727 	int err = 0;
6728 
6729 	bt_dev_dbg(hdev, "sock %p", sk);
6730 
6731 	memset(&rp, 0, sizeof(rp));
6732 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6733 	rp.addr.type = cp->addr.type;
6734 
6735 	if (!bdaddr_type_is_valid(cp->addr.type))
6736 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6737 					 MGMT_STATUS_INVALID_PARAMS,
6738 					 &rp, sizeof(rp));
6739 
6740 	hci_dev_lock(hdev);
6741 
6742 	if (!hdev_is_powered(hdev)) {
6743 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6744 					MGMT_STATUS_NOT_POWERED, &rp,
6745 					sizeof(rp));
6746 		goto unlock;
6747 	}
6748 
6749 	if (cp->addr.type == BDADDR_BREDR)
6750 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6751 					       &cp->addr.bdaddr);
6752 	else
6753 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6754 
6755 	if (!conn || conn->state != BT_CONNECTED) {
6756 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6757 					MGMT_STATUS_NOT_CONNECTED, &rp,
6758 					sizeof(rp));
6759 		goto unlock;
6760 	}
6761 
6762 	/* To avoid client trying to guess when to poll again for information we
6763 	 * calculate conn info age as random value between min/max set in hdev.
6764 	 */
6765 	conn_info_age = hdev->conn_info_min_age +
6766 			prandom_u32_max(hdev->conn_info_max_age -
6767 					hdev->conn_info_min_age);
6768 
6769 	/* Query controller to refresh cached values if they are too old or were
6770 	 * never read.
6771 	 */
6772 	if (time_after(jiffies, conn->conn_info_timestamp +
6773 		       msecs_to_jiffies(conn_info_age)) ||
6774 	    !conn->conn_info_timestamp) {
6775 		struct mgmt_pending_cmd *cmd;
6776 
6777 		cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
6778 				       len);
6779 		if (!cmd)
6780 			err = -ENOMEM;
6781 		else
6782 			err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
6783 						 cmd, get_conn_info_complete);
6784 
6785 		if (err < 0) {
6786 			mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6787 					  MGMT_STATUS_FAILED, &rp, sizeof(rp));
6788 
6789 			if (cmd)
6790 				mgmt_pending_free(cmd);
6791 
6792 			goto unlock;
6793 		}
6794 
6795 		hci_conn_hold(conn);
6796 		cmd->user_data = hci_conn_get(conn);
6797 
6798 		conn->conn_info_timestamp = jiffies;
6799 	} else {
6800 		/* Cache is valid, just reply with values cached in hci_conn */
6801 		rp.rssi = conn->rssi;
6802 		rp.tx_power = conn->tx_power;
6803 		rp.max_tx_power = conn->max_tx_power;
6804 
6805 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6806 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6807 	}
6808 
6809 unlock:
6810 	hci_dev_unlock(hdev);
6811 	return err;
6812 }
6813 
6814 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
6815 {
6816 	struct mgmt_pending_cmd *cmd = data;
6817 	struct mgmt_cp_get_clock_info *cp = cmd->param;
6818 	struct mgmt_rp_get_clock_info rp;
6819 	struct hci_conn *conn = cmd->user_data;
6820 	u8 status = mgmt_status(err);
6821 
6822 	bt_dev_dbg(hdev, "err %d", err);
6823 
6824 	memset(&rp, 0, sizeof(rp));
6825 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6826 	rp.addr.type = cp->addr.type;
6827 
6828 	if (err)
6829 		goto complete;
6830 
6831 	rp.local_clock = cpu_to_le32(hdev->clock);
6832 
6833 	if (conn) {
6834 		rp.piconet_clock = cpu_to_le32(conn->clock);
6835 		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
6836 		hci_conn_drop(conn);
6837 		hci_conn_put(conn);
6838 	}
6839 
6840 complete:
6841 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
6842 			  sizeof(rp));
6843 
6844 	mgmt_pending_free(cmd);
6845 }
6846 
6847 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
6848 {
6849 	struct mgmt_pending_cmd *cmd = data;
6850 	struct mgmt_cp_get_clock_info *cp = cmd->param;
6851 	struct hci_cp_read_clock hci_cp;
6852 	struct hci_conn *conn = cmd->user_data;
6853 	int err;
6854 
6855 	memset(&hci_cp, 0, sizeof(hci_cp));
6856 	err = hci_read_clock_sync(hdev, &hci_cp);
6857 
6858 	if (conn) {
6859 		/* Make sure connection still exists */
6860 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6861 					       &cp->addr.bdaddr);
6862 
6863 		if (conn && conn == cmd->user_data &&
6864 		    conn->state == BT_CONNECTED) {
6865 			hci_cp.handle = cpu_to_le16(conn->handle);
6866 			hci_cp.which = 0x01; /* Piconet clock */
6867 			err = hci_read_clock_sync(hdev, &hci_cp);
6868 		} else if (cmd->user_data) {
6869 			hci_conn_drop(cmd->user_data);
6870 			hci_conn_put(cmd->user_data);
6871 			cmd->user_data = NULL;
6872 		}
6873 	}
6874 
6875 	return err;
6876 }
6877 
6878 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
6879 								u16 len)
6880 {
6881 	struct mgmt_cp_get_clock_info *cp = data;
6882 	struct mgmt_rp_get_clock_info rp;
6883 	struct mgmt_pending_cmd *cmd;
6884 	struct hci_conn *conn;
6885 	int err;
6886 
6887 	bt_dev_dbg(hdev, "sock %p", sk);
6888 
6889 	memset(&rp, 0, sizeof(rp));
6890 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6891 	rp.addr.type = cp->addr.type;
6892 
6893 	if (cp->addr.type != BDADDR_BREDR)
6894 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6895 					 MGMT_STATUS_INVALID_PARAMS,
6896 					 &rp, sizeof(rp));
6897 
6898 	hci_dev_lock(hdev);
6899 
6900 	if (!hdev_is_powered(hdev)) {
6901 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6902 					MGMT_STATUS_NOT_POWERED, &rp,
6903 					sizeof(rp));
6904 		goto unlock;
6905 	}
6906 
6907 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6908 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6909 					       &cp->addr.bdaddr);
6910 		if (!conn || conn->state != BT_CONNECTED) {
6911 			err = mgmt_cmd_complete(sk, hdev->id,
6912 						MGMT_OP_GET_CLOCK_INFO,
6913 						MGMT_STATUS_NOT_CONNECTED,
6914 						&rp, sizeof(rp));
6915 			goto unlock;
6916 		}
6917 	} else {
6918 		conn = NULL;
6919 	}
6920 
6921 	cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
6922 	if (!cmd)
6923 		err = -ENOMEM;
6924 	else
6925 		err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
6926 					 get_clock_info_complete);
6927 
6928 	if (err < 0) {
6929 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6930 					MGMT_STATUS_FAILED, &rp, sizeof(rp));
6931 
6932 		if (cmd)
6933 			mgmt_pending_free(cmd);
6934 
6935 	} else if (conn) {
6936 		hci_conn_hold(conn);
6937 		cmd->user_data = hci_conn_get(conn);
6938 	}
6939 
6940 
6941 unlock:
6942 	hci_dev_unlock(hdev);
6943 	return err;
6944 }
6945 
6946 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
6947 {
6948 	struct hci_conn *conn;
6949 
6950 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
6951 	if (!conn)
6952 		return false;
6953 
6954 	if (conn->dst_type != type)
6955 		return false;
6956 
6957 	if (conn->state != BT_CONNECTED)
6958 		return false;
6959 
6960 	return true;
6961 }
6962 
6963 /* This function requires the caller holds hdev->lock */
6964 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
6965 			       u8 addr_type, u8 auto_connect)
6966 {
6967 	struct hci_conn_params *params;
6968 
6969 	params = hci_conn_params_add(hdev, addr, addr_type);
6970 	if (!params)
6971 		return -EIO;
6972 
6973 	if (params->auto_connect == auto_connect)
6974 		return 0;
6975 
6976 	list_del_init(&params->action);
6977 
6978 	switch (auto_connect) {
6979 	case HCI_AUTO_CONN_DISABLED:
6980 	case HCI_AUTO_CONN_LINK_LOSS:
6981 		/* If auto connect is being disabled when we're trying to
6982 		 * connect to device, keep connecting.
6983 		 */
6984 		if (params->explicit_connect)
6985 			list_add(&params->action, &hdev->pend_le_conns);
6986 		break;
6987 	case HCI_AUTO_CONN_REPORT:
6988 		if (params->explicit_connect)
6989 			list_add(&params->action, &hdev->pend_le_conns);
6990 		else
6991 			list_add(&params->action, &hdev->pend_le_reports);
6992 		break;
6993 	case HCI_AUTO_CONN_DIRECT:
6994 	case HCI_AUTO_CONN_ALWAYS:
6995 		if (!is_connected(hdev, addr, addr_type))
6996 			list_add(&params->action, &hdev->pend_le_conns);
6997 		break;
6998 	}
6999 
7000 	params->auto_connect = auto_connect;
7001 
7002 	bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7003 		   addr, addr_type, auto_connect);
7004 
7005 	return 0;
7006 }
7007 
7008 static void device_added(struct sock *sk, struct hci_dev *hdev,
7009 			 bdaddr_t *bdaddr, u8 type, u8 action)
7010 {
7011 	struct mgmt_ev_device_added ev;
7012 
7013 	bacpy(&ev.addr.bdaddr, bdaddr);
7014 	ev.addr.type = type;
7015 	ev.action = action;
7016 
7017 	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7018 }
7019 
7020 static int add_device_sync(struct hci_dev *hdev, void *data)
7021 {
7022 	return hci_update_passive_scan_sync(hdev);
7023 }
7024 
7025 static int add_device(struct sock *sk, struct hci_dev *hdev,
7026 		      void *data, u16 len)
7027 {
7028 	struct mgmt_cp_add_device *cp = data;
7029 	u8 auto_conn, addr_type;
7030 	struct hci_conn_params *params;
7031 	int err;
7032 	u32 current_flags = 0;
7033 	u32 supported_flags;
7034 
7035 	bt_dev_dbg(hdev, "sock %p", sk);
7036 
7037 	if (!bdaddr_type_is_valid(cp->addr.type) ||
7038 	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7039 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7040 					 MGMT_STATUS_INVALID_PARAMS,
7041 					 &cp->addr, sizeof(cp->addr));
7042 
7043 	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7044 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7045 					 MGMT_STATUS_INVALID_PARAMS,
7046 					 &cp->addr, sizeof(cp->addr));
7047 
7048 	hci_dev_lock(hdev);
7049 
7050 	if (cp->addr.type == BDADDR_BREDR) {
7051 		/* Only incoming connections action is supported for now */
7052 		if (cp->action != 0x01) {
7053 			err = mgmt_cmd_complete(sk, hdev->id,
7054 						MGMT_OP_ADD_DEVICE,
7055 						MGMT_STATUS_INVALID_PARAMS,
7056 						&cp->addr, sizeof(cp->addr));
7057 			goto unlock;
7058 		}
7059 
7060 		err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7061 						     &cp->addr.bdaddr,
7062 						     cp->addr.type, 0);
7063 		if (err)
7064 			goto unlock;
7065 
7066 		hci_req_update_scan(hdev);
7067 
7068 		goto added;
7069 	}
7070 
7071 	addr_type = le_addr_type(cp->addr.type);
7072 
7073 	if (cp->action == 0x02)
7074 		auto_conn = HCI_AUTO_CONN_ALWAYS;
7075 	else if (cp->action == 0x01)
7076 		auto_conn = HCI_AUTO_CONN_DIRECT;
7077 	else
7078 		auto_conn = HCI_AUTO_CONN_REPORT;
7079 
7080 	/* Kernel internally uses conn_params with resolvable private
7081 	 * address, but Add Device allows only identity addresses.
7082 	 * Make sure it is enforced before calling
7083 	 * hci_conn_params_lookup.
7084 	 */
7085 	if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7086 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7087 					MGMT_STATUS_INVALID_PARAMS,
7088 					&cp->addr, sizeof(cp->addr));
7089 		goto unlock;
7090 	}
7091 
7092 	/* If the connection parameters don't exist for this device,
7093 	 * they will be created and configured with defaults.
7094 	 */
7095 	if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7096 				auto_conn) < 0) {
7097 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7098 					MGMT_STATUS_FAILED, &cp->addr,
7099 					sizeof(cp->addr));
7100 		goto unlock;
7101 	} else {
7102 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7103 						addr_type);
7104 		if (params)
7105 			bitmap_to_arr32(&current_flags, params->flags,
7106 					__HCI_CONN_NUM_FLAGS);
7107 	}
7108 
7109 	err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
7110 	if (err < 0)
7111 		goto unlock;
7112 
7113 added:
7114 	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7115 	bitmap_to_arr32(&supported_flags, hdev->conn_flags,
7116 			__HCI_CONN_NUM_FLAGS);
7117 	device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7118 			     supported_flags, current_flags);
7119 
7120 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7121 				MGMT_STATUS_SUCCESS, &cp->addr,
7122 				sizeof(cp->addr));
7123 
7124 unlock:
7125 	hci_dev_unlock(hdev);
7126 	return err;
7127 }
7128 
7129 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7130 			   bdaddr_t *bdaddr, u8 type)
7131 {
7132 	struct mgmt_ev_device_removed ev;
7133 
7134 	bacpy(&ev.addr.bdaddr, bdaddr);
7135 	ev.addr.type = type;
7136 
7137 	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7138 }
7139 
7140 static int remove_device_sync(struct hci_dev *hdev, void *data)
7141 {
7142 	return hci_update_passive_scan_sync(hdev);
7143 }
7144 
7145 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7146 			 void *data, u16 len)
7147 {
7148 	struct mgmt_cp_remove_device *cp = data;
7149 	int err;
7150 
7151 	bt_dev_dbg(hdev, "sock %p", sk);
7152 
7153 	hci_dev_lock(hdev);
7154 
7155 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7156 		struct hci_conn_params *params;
7157 		u8 addr_type;
7158 
7159 		if (!bdaddr_type_is_valid(cp->addr.type)) {
7160 			err = mgmt_cmd_complete(sk, hdev->id,
7161 						MGMT_OP_REMOVE_DEVICE,
7162 						MGMT_STATUS_INVALID_PARAMS,
7163 						&cp->addr, sizeof(cp->addr));
7164 			goto unlock;
7165 		}
7166 
7167 		if (cp->addr.type == BDADDR_BREDR) {
7168 			err = hci_bdaddr_list_del(&hdev->accept_list,
7169 						  &cp->addr.bdaddr,
7170 						  cp->addr.type);
7171 			if (err) {
7172 				err = mgmt_cmd_complete(sk, hdev->id,
7173 							MGMT_OP_REMOVE_DEVICE,
7174 							MGMT_STATUS_INVALID_PARAMS,
7175 							&cp->addr,
7176 							sizeof(cp->addr));
7177 				goto unlock;
7178 			}
7179 
7180 			hci_req_update_scan(hdev);
7181 
7182 			device_removed(sk, hdev, &cp->addr.bdaddr,
7183 				       cp->addr.type);
7184 			goto complete;
7185 		}
7186 
7187 		addr_type = le_addr_type(cp->addr.type);
7188 
7189 		/* Kernel internally uses conn_params with resolvable private
7190 		 * address, but Remove Device allows only identity addresses.
7191 		 * Make sure it is enforced before calling
7192 		 * hci_conn_params_lookup.
7193 		 */
7194 		if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7195 			err = mgmt_cmd_complete(sk, hdev->id,
7196 						MGMT_OP_REMOVE_DEVICE,
7197 						MGMT_STATUS_INVALID_PARAMS,
7198 						&cp->addr, sizeof(cp->addr));
7199 			goto unlock;
7200 		}
7201 
7202 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7203 						addr_type);
7204 		if (!params) {
7205 			err = mgmt_cmd_complete(sk, hdev->id,
7206 						MGMT_OP_REMOVE_DEVICE,
7207 						MGMT_STATUS_INVALID_PARAMS,
7208 						&cp->addr, sizeof(cp->addr));
7209 			goto unlock;
7210 		}
7211 
7212 		if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7213 		    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7214 			err = mgmt_cmd_complete(sk, hdev->id,
7215 						MGMT_OP_REMOVE_DEVICE,
7216 						MGMT_STATUS_INVALID_PARAMS,
7217 						&cp->addr, sizeof(cp->addr));
7218 			goto unlock;
7219 		}
7220 
7221 		list_del(&params->action);
7222 		list_del(&params->list);
7223 		kfree(params);
7224 
7225 		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7226 	} else {
7227 		struct hci_conn_params *p, *tmp;
7228 		struct bdaddr_list *b, *btmp;
7229 
7230 		if (cp->addr.type) {
7231 			err = mgmt_cmd_complete(sk, hdev->id,
7232 						MGMT_OP_REMOVE_DEVICE,
7233 						MGMT_STATUS_INVALID_PARAMS,
7234 						&cp->addr, sizeof(cp->addr));
7235 			goto unlock;
7236 		}
7237 
7238 		list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7239 			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7240 			list_del(&b->list);
7241 			kfree(b);
7242 		}
7243 
7244 		hci_req_update_scan(hdev);
7245 
7246 		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7247 			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7248 				continue;
7249 			device_removed(sk, hdev, &p->addr, p->addr_type);
7250 			if (p->explicit_connect) {
7251 				p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7252 				continue;
7253 			}
7254 			list_del(&p->action);
7255 			list_del(&p->list);
7256 			kfree(p);
7257 		}
7258 
7259 		bt_dev_dbg(hdev, "All LE connection parameters were removed");
7260 	}
7261 
7262 	hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7263 
7264 complete:
7265 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7266 				MGMT_STATUS_SUCCESS, &cp->addr,
7267 				sizeof(cp->addr));
7268 unlock:
7269 	hci_dev_unlock(hdev);
7270 	return err;
7271 }
7272 
7273 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7274 			   u16 len)
7275 {
7276 	struct mgmt_cp_load_conn_param *cp = data;
7277 	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7278 				     sizeof(struct mgmt_conn_param));
7279 	u16 param_count, expected_len;
7280 	int i;
7281 
7282 	if (!lmp_le_capable(hdev))
7283 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7284 				       MGMT_STATUS_NOT_SUPPORTED);
7285 
7286 	param_count = __le16_to_cpu(cp->param_count);
7287 	if (param_count > max_param_count) {
7288 		bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7289 			   param_count);
7290 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7291 				       MGMT_STATUS_INVALID_PARAMS);
7292 	}
7293 
7294 	expected_len = struct_size(cp, params, param_count);
7295 	if (expected_len != len) {
7296 		bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7297 			   expected_len, len);
7298 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7299 				       MGMT_STATUS_INVALID_PARAMS);
7300 	}
7301 
7302 	bt_dev_dbg(hdev, "param_count %u", param_count);
7303 
7304 	hci_dev_lock(hdev);
7305 
7306 	hci_conn_params_clear_disabled(hdev);
7307 
7308 	for (i = 0; i < param_count; i++) {
7309 		struct mgmt_conn_param *param = &cp->params[i];
7310 		struct hci_conn_params *hci_param;
7311 		u16 min, max, latency, timeout;
7312 		u8 addr_type;
7313 
7314 		bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
7315 			   param->addr.type);
7316 
7317 		if (param->addr.type == BDADDR_LE_PUBLIC) {
7318 			addr_type = ADDR_LE_DEV_PUBLIC;
7319 		} else if (param->addr.type == BDADDR_LE_RANDOM) {
7320 			addr_type = ADDR_LE_DEV_RANDOM;
7321 		} else {
7322 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7323 			continue;
7324 		}
7325 
7326 		min = le16_to_cpu(param->min_interval);
7327 		max = le16_to_cpu(param->max_interval);
7328 		latency = le16_to_cpu(param->latency);
7329 		timeout = le16_to_cpu(param->timeout);
7330 
7331 		bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7332 			   min, max, latency, timeout);
7333 
7334 		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7335 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7336 			continue;
7337 		}
7338 
7339 		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
7340 						addr_type);
7341 		if (!hci_param) {
7342 			bt_dev_err(hdev, "failed to add connection parameters");
7343 			continue;
7344 		}
7345 
7346 		hci_param->conn_min_interval = min;
7347 		hci_param->conn_max_interval = max;
7348 		hci_param->conn_latency = latency;
7349 		hci_param->supervision_timeout = timeout;
7350 	}
7351 
7352 	hci_dev_unlock(hdev);
7353 
7354 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7355 				 NULL, 0);
7356 }
7357 
7358 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7359 			       void *data, u16 len)
7360 {
7361 	struct mgmt_cp_set_external_config *cp = data;
7362 	bool changed;
7363 	int err;
7364 
7365 	bt_dev_dbg(hdev, "sock %p", sk);
7366 
7367 	if (hdev_is_powered(hdev))
7368 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7369 				       MGMT_STATUS_REJECTED);
7370 
7371 	if (cp->config != 0x00 && cp->config != 0x01)
7372 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7373 				         MGMT_STATUS_INVALID_PARAMS);
7374 
7375 	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7376 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7377 				       MGMT_STATUS_NOT_SUPPORTED);
7378 
7379 	hci_dev_lock(hdev);
7380 
7381 	if (cp->config)
7382 		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7383 	else
7384 		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7385 
7386 	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7387 	if (err < 0)
7388 		goto unlock;
7389 
7390 	if (!changed)
7391 		goto unlock;
7392 
7393 	err = new_options(hdev, sk);
7394 
7395 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7396 		mgmt_index_removed(hdev);
7397 
7398 		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7399 			hci_dev_set_flag(hdev, HCI_CONFIG);
7400 			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7401 
7402 			queue_work(hdev->req_workqueue, &hdev->power_on);
7403 		} else {
7404 			set_bit(HCI_RAW, &hdev->flags);
7405 			mgmt_index_added(hdev);
7406 		}
7407 	}
7408 
7409 unlock:
7410 	hci_dev_unlock(hdev);
7411 	return err;
7412 }
7413 
7414 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
7415 			      void *data, u16 len)
7416 {
7417 	struct mgmt_cp_set_public_address *cp = data;
7418 	bool changed;
7419 	int err;
7420 
7421 	bt_dev_dbg(hdev, "sock %p", sk);
7422 
7423 	if (hdev_is_powered(hdev))
7424 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7425 				       MGMT_STATUS_REJECTED);
7426 
7427 	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
7428 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7429 				       MGMT_STATUS_INVALID_PARAMS);
7430 
7431 	if (!hdev->set_bdaddr)
7432 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7433 				       MGMT_STATUS_NOT_SUPPORTED);
7434 
7435 	hci_dev_lock(hdev);
7436 
7437 	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
7438 	bacpy(&hdev->public_addr, &cp->bdaddr);
7439 
7440 	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
7441 	if (err < 0)
7442 		goto unlock;
7443 
7444 	if (!changed)
7445 		goto unlock;
7446 
7447 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
7448 		err = new_options(hdev, sk);
7449 
7450 	if (is_configured(hdev)) {
7451 		mgmt_index_removed(hdev);
7452 
7453 		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
7454 
7455 		hci_dev_set_flag(hdev, HCI_CONFIG);
7456 		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7457 
7458 		queue_work(hdev->req_workqueue, &hdev->power_on);
7459 	}
7460 
7461 unlock:
7462 	hci_dev_unlock(hdev);
7463 	return err;
7464 }
7465 
7466 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
7467 					     int err)
7468 {
7469 	const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
7470 	struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
7471 	u8 *h192, *r192, *h256, *r256;
7472 	struct mgmt_pending_cmd *cmd = data;
7473 	struct sk_buff *skb = cmd->skb;
7474 	u8 status = mgmt_status(err);
7475 	u16 eir_len;
7476 
7477 	if (!status) {
7478 		if (!skb)
7479 			status = MGMT_STATUS_FAILED;
7480 		else if (IS_ERR(skb))
7481 			status = mgmt_status(PTR_ERR(skb));
7482 		else
7483 			status = mgmt_status(skb->data[0]);
7484 	}
7485 
7486 	bt_dev_dbg(hdev, "status %u", status);
7487 
7488 	mgmt_cp = cmd->param;
7489 
7490 	if (status) {
7491 		status = mgmt_status(status);
7492 		eir_len = 0;
7493 
7494 		h192 = NULL;
7495 		r192 = NULL;
7496 		h256 = NULL;
7497 		r256 = NULL;
7498 	} else if (!bredr_sc_enabled(hdev)) {
7499 		struct hci_rp_read_local_oob_data *rp;
7500 
7501 		if (skb->len != sizeof(*rp)) {
7502 			status = MGMT_STATUS_FAILED;
7503 			eir_len = 0;
7504 		} else {
7505 			status = MGMT_STATUS_SUCCESS;
7506 			rp = (void *)skb->data;
7507 
7508 			eir_len = 5 + 18 + 18;
7509 			h192 = rp->hash;
7510 			r192 = rp->rand;
7511 			h256 = NULL;
7512 			r256 = NULL;
7513 		}
7514 	} else {
7515 		struct hci_rp_read_local_oob_ext_data *rp;
7516 
7517 		if (skb->len != sizeof(*rp)) {
7518 			status = MGMT_STATUS_FAILED;
7519 			eir_len = 0;
7520 		} else {
7521 			status = MGMT_STATUS_SUCCESS;
7522 			rp = (void *)skb->data;
7523 
7524 			if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
7525 				eir_len = 5 + 18 + 18;
7526 				h192 = NULL;
7527 				r192 = NULL;
7528 			} else {
7529 				eir_len = 5 + 18 + 18 + 18 + 18;
7530 				h192 = rp->hash192;
7531 				r192 = rp->rand192;
7532 			}
7533 
7534 			h256 = rp->hash256;
7535 			r256 = rp->rand256;
7536 		}
7537 	}
7538 
7539 	mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
7540 	if (!mgmt_rp)
7541 		goto done;
7542 
7543 	if (eir_len == 0)
7544 		goto send_rsp;
7545 
7546 	eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
7547 				  hdev->dev_class, 3);
7548 
7549 	if (h192 && r192) {
7550 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7551 					  EIR_SSP_HASH_C192, h192, 16);
7552 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7553 					  EIR_SSP_RAND_R192, r192, 16);
7554 	}
7555 
7556 	if (h256 && r256) {
7557 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7558 					  EIR_SSP_HASH_C256, h256, 16);
7559 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7560 					  EIR_SSP_RAND_R256, r256, 16);
7561 	}
7562 
7563 send_rsp:
7564 	mgmt_rp->type = mgmt_cp->type;
7565 	mgmt_rp->eir_len = cpu_to_le16(eir_len);
7566 
7567 	err = mgmt_cmd_complete(cmd->sk, hdev->id,
7568 				MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
7569 				mgmt_rp, sizeof(*mgmt_rp) + eir_len);
7570 	if (err < 0 || status)
7571 		goto done;
7572 
7573 	hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
7574 
7575 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7576 				 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
7577 				 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
7578 done:
7579 	if (skb && !IS_ERR(skb))
7580 		kfree_skb(skb);
7581 
7582 	kfree(mgmt_rp);
7583 	mgmt_pending_remove(cmd);
7584 }
7585 
7586 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
7587 				  struct mgmt_cp_read_local_oob_ext_data *cp)
7588 {
7589 	struct mgmt_pending_cmd *cmd;
7590 	int err;
7591 
7592 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
7593 			       cp, sizeof(*cp));
7594 	if (!cmd)
7595 		return -ENOMEM;
7596 
7597 	err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
7598 				 read_local_oob_ext_data_complete);
7599 
7600 	if (err < 0) {
7601 		mgmt_pending_remove(cmd);
7602 		return err;
7603 	}
7604 
7605 	return 0;
7606 }
7607 
7608 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
7609 				   void *data, u16 data_len)
7610 {
7611 	struct mgmt_cp_read_local_oob_ext_data *cp = data;
7612 	struct mgmt_rp_read_local_oob_ext_data *rp;
7613 	size_t rp_len;
7614 	u16 eir_len;
7615 	u8 status, flags, role, addr[7], hash[16], rand[16];
7616 	int err;
7617 
7618 	bt_dev_dbg(hdev, "sock %p", sk);
7619 
7620 	if (hdev_is_powered(hdev)) {
7621 		switch (cp->type) {
7622 		case BIT(BDADDR_BREDR):
7623 			status = mgmt_bredr_support(hdev);
7624 			if (status)
7625 				eir_len = 0;
7626 			else
7627 				eir_len = 5;
7628 			break;
7629 		case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7630 			status = mgmt_le_support(hdev);
7631 			if (status)
7632 				eir_len = 0;
7633 			else
7634 				eir_len = 9 + 3 + 18 + 18 + 3;
7635 			break;
7636 		default:
7637 			status = MGMT_STATUS_INVALID_PARAMS;
7638 			eir_len = 0;
7639 			break;
7640 		}
7641 	} else {
7642 		status = MGMT_STATUS_NOT_POWERED;
7643 		eir_len = 0;
7644 	}
7645 
7646 	rp_len = sizeof(*rp) + eir_len;
7647 	rp = kmalloc(rp_len, GFP_ATOMIC);
7648 	if (!rp)
7649 		return -ENOMEM;
7650 
7651 	if (!status && !lmp_ssp_capable(hdev)) {
7652 		status = MGMT_STATUS_NOT_SUPPORTED;
7653 		eir_len = 0;
7654 	}
7655 
7656 	if (status)
7657 		goto complete;
7658 
7659 	hci_dev_lock(hdev);
7660 
7661 	eir_len = 0;
7662 	switch (cp->type) {
7663 	case BIT(BDADDR_BREDR):
7664 		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7665 			err = read_local_ssp_oob_req(hdev, sk, cp);
7666 			hci_dev_unlock(hdev);
7667 			if (!err)
7668 				goto done;
7669 
7670 			status = MGMT_STATUS_FAILED;
7671 			goto complete;
7672 		} else {
7673 			eir_len = eir_append_data(rp->eir, eir_len,
7674 						  EIR_CLASS_OF_DEV,
7675 						  hdev->dev_class, 3);
7676 		}
7677 		break;
7678 	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7679 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
7680 		    smp_generate_oob(hdev, hash, rand) < 0) {
7681 			hci_dev_unlock(hdev);
7682 			status = MGMT_STATUS_FAILED;
7683 			goto complete;
7684 		}
7685 
7686 		/* This should return the active RPA, but since the RPA
7687 		 * is only programmed on demand, it is really hard to fill
7688 		 * this in at the moment. For now disallow retrieving
7689 		 * local out-of-band data when privacy is in use.
7690 		 *
7691 		 * Returning the identity address will not help here since
7692 		 * pairing happens before the identity resolving key is
7693 		 * known and thus the connection establishment happens
7694 		 * based on the RPA and not the identity address.
7695 		 */
7696 		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
7697 			hci_dev_unlock(hdev);
7698 			status = MGMT_STATUS_REJECTED;
7699 			goto complete;
7700 		}
7701 
7702 		if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
7703 		   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
7704 		   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
7705 		    bacmp(&hdev->static_addr, BDADDR_ANY))) {
7706 			memcpy(addr, &hdev->static_addr, 6);
7707 			addr[6] = 0x01;
7708 		} else {
7709 			memcpy(addr, &hdev->bdaddr, 6);
7710 			addr[6] = 0x00;
7711 		}
7712 
7713 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
7714 					  addr, sizeof(addr));
7715 
7716 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7717 			role = 0x02;
7718 		else
7719 			role = 0x01;
7720 
7721 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
7722 					  &role, sizeof(role));
7723 
7724 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
7725 			eir_len = eir_append_data(rp->eir, eir_len,
7726 						  EIR_LE_SC_CONFIRM,
7727 						  hash, sizeof(hash));
7728 
7729 			eir_len = eir_append_data(rp->eir, eir_len,
7730 						  EIR_LE_SC_RANDOM,
7731 						  rand, sizeof(rand));
7732 		}
7733 
7734 		flags = mgmt_get_adv_discov_flags(hdev);
7735 
7736 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
7737 			flags |= LE_AD_NO_BREDR;
7738 
7739 		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
7740 					  &flags, sizeof(flags));
7741 		break;
7742 	}
7743 
7744 	hci_dev_unlock(hdev);
7745 
7746 	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
7747 
7748 	status = MGMT_STATUS_SUCCESS;
7749 
7750 complete:
7751 	rp->type = cp->type;
7752 	rp->eir_len = cpu_to_le16(eir_len);
7753 
7754 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
7755 				status, rp, sizeof(*rp) + eir_len);
7756 	if (err < 0 || status)
7757 		goto done;
7758 
7759 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7760 				 rp, sizeof(*rp) + eir_len,
7761 				 HCI_MGMT_OOB_DATA_EVENTS, sk);
7762 
7763 done:
7764 	kfree(rp);
7765 
7766 	return err;
7767 }
7768 
7769 static u32 get_supported_adv_flags(struct hci_dev *hdev)
7770 {
7771 	u32 flags = 0;
7772 
7773 	flags |= MGMT_ADV_FLAG_CONNECTABLE;
7774 	flags |= MGMT_ADV_FLAG_DISCOV;
7775 	flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
7776 	flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
7777 	flags |= MGMT_ADV_FLAG_APPEARANCE;
7778 	flags |= MGMT_ADV_FLAG_LOCAL_NAME;
7779 	flags |= MGMT_ADV_PARAM_DURATION;
7780 	flags |= MGMT_ADV_PARAM_TIMEOUT;
7781 	flags |= MGMT_ADV_PARAM_INTERVALS;
7782 	flags |= MGMT_ADV_PARAM_TX_POWER;
7783 	flags |= MGMT_ADV_PARAM_SCAN_RSP;
7784 
7785 	/* In extended adv TX_POWER returned from Set Adv Param
7786 	 * will be always valid.
7787 	 */
7788 	if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
7789 	    ext_adv_capable(hdev))
7790 		flags |= MGMT_ADV_FLAG_TX_POWER;
7791 
7792 	if (ext_adv_capable(hdev)) {
7793 		flags |= MGMT_ADV_FLAG_SEC_1M;
7794 		flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
7795 		flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
7796 
7797 		if (hdev->le_features[1] & HCI_LE_PHY_2M)
7798 			flags |= MGMT_ADV_FLAG_SEC_2M;
7799 
7800 		if (hdev->le_features[1] & HCI_LE_PHY_CODED)
7801 			flags |= MGMT_ADV_FLAG_SEC_CODED;
7802 	}
7803 
7804 	return flags;
7805 }
7806 
7807 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
7808 			     void *data, u16 data_len)
7809 {
7810 	struct mgmt_rp_read_adv_features *rp;
7811 	size_t rp_len;
7812 	int err;
7813 	struct adv_info *adv_instance;
7814 	u32 supported_flags;
7815 	u8 *instance;
7816 
7817 	bt_dev_dbg(hdev, "sock %p", sk);
7818 
7819 	if (!lmp_le_capable(hdev))
7820 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7821 				       MGMT_STATUS_REJECTED);
7822 
7823 	hci_dev_lock(hdev);
7824 
7825 	rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
7826 	rp = kmalloc(rp_len, GFP_ATOMIC);
7827 	if (!rp) {
7828 		hci_dev_unlock(hdev);
7829 		return -ENOMEM;
7830 	}
7831 
7832 	supported_flags = get_supported_adv_flags(hdev);
7833 
7834 	rp->supported_flags = cpu_to_le32(supported_flags);
7835 	rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
7836 	rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
7837 	rp->max_instances = hdev->le_num_of_adv_sets;
7838 	rp->num_instances = hdev->adv_instance_cnt;
7839 
7840 	instance = rp->instance;
7841 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
7842 		*instance = adv_instance->instance;
7843 		instance++;
7844 	}
7845 
7846 	hci_dev_unlock(hdev);
7847 
7848 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7849 				MGMT_STATUS_SUCCESS, rp, rp_len);
7850 
7851 	kfree(rp);
7852 
7853 	return err;
7854 }
7855 
7856 static u8 calculate_name_len(struct hci_dev *hdev)
7857 {
7858 	u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
7859 
7860 	return eir_append_local_name(hdev, buf, 0);
7861 }
7862 
7863 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
7864 			   bool is_adv_data)
7865 {
7866 	u8 max_len = HCI_MAX_AD_LENGTH;
7867 
7868 	if (is_adv_data) {
7869 		if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
7870 				 MGMT_ADV_FLAG_LIMITED_DISCOV |
7871 				 MGMT_ADV_FLAG_MANAGED_FLAGS))
7872 			max_len -= 3;
7873 
7874 		if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
7875 			max_len -= 3;
7876 	} else {
7877 		if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
7878 			max_len -= calculate_name_len(hdev);
7879 
7880 		if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
7881 			max_len -= 4;
7882 	}
7883 
7884 	return max_len;
7885 }
7886 
7887 static bool flags_managed(u32 adv_flags)
7888 {
7889 	return adv_flags & (MGMT_ADV_FLAG_DISCOV |
7890 			    MGMT_ADV_FLAG_LIMITED_DISCOV |
7891 			    MGMT_ADV_FLAG_MANAGED_FLAGS);
7892 }
7893 
7894 static bool tx_power_managed(u32 adv_flags)
7895 {
7896 	return adv_flags & MGMT_ADV_FLAG_TX_POWER;
7897 }
7898 
7899 static bool name_managed(u32 adv_flags)
7900 {
7901 	return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
7902 }
7903 
7904 static bool appearance_managed(u32 adv_flags)
7905 {
7906 	return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
7907 }
7908 
7909 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
7910 			      u8 len, bool is_adv_data)
7911 {
7912 	int i, cur_len;
7913 	u8 max_len;
7914 
7915 	max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
7916 
7917 	if (len > max_len)
7918 		return false;
7919 
7920 	/* Make sure that the data is correctly formatted. */
7921 	for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
7922 		cur_len = data[i];
7923 
7924 		if (!cur_len)
7925 			continue;
7926 
7927 		if (data[i + 1] == EIR_FLAGS &&
7928 		    (!is_adv_data || flags_managed(adv_flags)))
7929 			return false;
7930 
7931 		if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
7932 			return false;
7933 
7934 		if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
7935 			return false;
7936 
7937 		if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
7938 			return false;
7939 
7940 		if (data[i + 1] == EIR_APPEARANCE &&
7941 		    appearance_managed(adv_flags))
7942 			return false;
7943 
7944 		/* If the current field length would exceed the total data
7945 		 * length, then it's invalid.
7946 		 */
7947 		if (i + cur_len >= len)
7948 			return false;
7949 	}
7950 
7951 	return true;
7952 }
7953 
7954 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
7955 {
7956 	u32 supported_flags, phy_flags;
7957 
7958 	/* The current implementation only supports a subset of the specified
7959 	 * flags. Also need to check mutual exclusiveness of sec flags.
7960 	 */
7961 	supported_flags = get_supported_adv_flags(hdev);
7962 	phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
7963 	if (adv_flags & ~supported_flags ||
7964 	    ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
7965 		return false;
7966 
7967 	return true;
7968 }
7969 
7970 static bool adv_busy(struct hci_dev *hdev)
7971 {
7972 	return (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
7973 		pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
7974 		pending_find(MGMT_OP_SET_LE, hdev) ||
7975 		pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev) ||
7976 		pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev));
7977 }
7978 
7979 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
7980 			     int err)
7981 {
7982 	struct adv_info *adv, *n;
7983 
7984 	bt_dev_dbg(hdev, "err %d", err);
7985 
7986 	hci_dev_lock(hdev);
7987 
7988 	list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
7989 		u8 instance;
7990 
7991 		if (!adv->pending)
7992 			continue;
7993 
7994 		if (!err) {
7995 			adv->pending = false;
7996 			continue;
7997 		}
7998 
7999 		instance = adv->instance;
8000 
8001 		if (hdev->cur_adv_instance == instance)
8002 			cancel_adv_timeout(hdev);
8003 
8004 		hci_remove_adv_instance(hdev, instance);
8005 		mgmt_advertising_removed(sk, hdev, instance);
8006 	}
8007 
8008 	hci_dev_unlock(hdev);
8009 }
8010 
8011 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8012 {
8013 	struct mgmt_pending_cmd *cmd = data;
8014 	struct mgmt_cp_add_advertising *cp = cmd->param;
8015 	struct mgmt_rp_add_advertising rp;
8016 
8017 	memset(&rp, 0, sizeof(rp));
8018 
8019 	rp.instance = cp->instance;
8020 
8021 	if (err)
8022 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8023 				mgmt_status(err));
8024 	else
8025 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8026 				  mgmt_status(err), &rp, sizeof(rp));
8027 
8028 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
8029 
8030 	mgmt_pending_free(cmd);
8031 }
8032 
8033 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8034 {
8035 	struct mgmt_pending_cmd *cmd = data;
8036 	struct mgmt_cp_add_advertising *cp = cmd->param;
8037 
8038 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8039 }
8040 
8041 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8042 			   void *data, u16 data_len)
8043 {
8044 	struct mgmt_cp_add_advertising *cp = data;
8045 	struct mgmt_rp_add_advertising rp;
8046 	u32 flags;
8047 	u8 status;
8048 	u16 timeout, duration;
8049 	unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
8050 	u8 schedule_instance = 0;
8051 	struct adv_info *next_instance;
8052 	int err;
8053 	struct mgmt_pending_cmd *cmd;
8054 
8055 	bt_dev_dbg(hdev, "sock %p", sk);
8056 
8057 	status = mgmt_le_support(hdev);
8058 	if (status)
8059 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8060 				       status);
8061 
8062 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8063 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8064 				       MGMT_STATUS_INVALID_PARAMS);
8065 
8066 	if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8067 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8068 				       MGMT_STATUS_INVALID_PARAMS);
8069 
8070 	flags = __le32_to_cpu(cp->flags);
8071 	timeout = __le16_to_cpu(cp->timeout);
8072 	duration = __le16_to_cpu(cp->duration);
8073 
8074 	if (!requested_adv_flags_are_valid(hdev, flags))
8075 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8076 				       MGMT_STATUS_INVALID_PARAMS);
8077 
8078 	hci_dev_lock(hdev);
8079 
8080 	if (timeout && !hdev_is_powered(hdev)) {
8081 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8082 				      MGMT_STATUS_REJECTED);
8083 		goto unlock;
8084 	}
8085 
8086 	if (adv_busy(hdev)) {
8087 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8088 				      MGMT_STATUS_BUSY);
8089 		goto unlock;
8090 	}
8091 
8092 	if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8093 	    !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8094 			       cp->scan_rsp_len, false)) {
8095 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8096 				      MGMT_STATUS_INVALID_PARAMS);
8097 		goto unlock;
8098 	}
8099 
8100 	err = hci_add_adv_instance(hdev, cp->instance, flags,
8101 				   cp->adv_data_len, cp->data,
8102 				   cp->scan_rsp_len,
8103 				   cp->data + cp->adv_data_len,
8104 				   timeout, duration,
8105 				   HCI_ADV_TX_POWER_NO_PREFERENCE,
8106 				   hdev->le_adv_min_interval,
8107 				   hdev->le_adv_max_interval);
8108 	if (err < 0) {
8109 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8110 				      MGMT_STATUS_FAILED);
8111 		goto unlock;
8112 	}
8113 
8114 	/* Only trigger an advertising added event if a new instance was
8115 	 * actually added.
8116 	 */
8117 	if (hdev->adv_instance_cnt > prev_instance_cnt)
8118 		mgmt_advertising_added(sk, hdev, cp->instance);
8119 
8120 	if (hdev->cur_adv_instance == cp->instance) {
8121 		/* If the currently advertised instance is being changed then
8122 		 * cancel the current advertising and schedule the next
8123 		 * instance. If there is only one instance then the overridden
8124 		 * advertising data will be visible right away.
8125 		 */
8126 		cancel_adv_timeout(hdev);
8127 
8128 		next_instance = hci_get_next_instance(hdev, cp->instance);
8129 		if (next_instance)
8130 			schedule_instance = next_instance->instance;
8131 	} else if (!hdev->adv_instance_timeout) {
8132 		/* Immediately advertise the new instance if no other
8133 		 * instance is currently being advertised.
8134 		 */
8135 		schedule_instance = cp->instance;
8136 	}
8137 
8138 	/* If the HCI_ADVERTISING flag is set or the device isn't powered or
8139 	 * there is no instance to be advertised then we have no HCI
8140 	 * communication to make. Simply return.
8141 	 */
8142 	if (!hdev_is_powered(hdev) ||
8143 	    hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8144 	    !schedule_instance) {
8145 		rp.instance = cp->instance;
8146 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8147 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8148 		goto unlock;
8149 	}
8150 
8151 	/* We're good to go, update advertising data, parameters, and start
8152 	 * advertising.
8153 	 */
8154 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8155 			       data_len);
8156 	if (!cmd) {
8157 		err = -ENOMEM;
8158 		goto unlock;
8159 	}
8160 
8161 	cp->instance = schedule_instance;
8162 
8163 	err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8164 				 add_advertising_complete);
8165 	if (err < 0)
8166 		mgmt_pending_free(cmd);
8167 
8168 unlock:
8169 	hci_dev_unlock(hdev);
8170 
8171 	return err;
8172 }
8173 
8174 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8175 					int err)
8176 {
8177 	struct mgmt_pending_cmd *cmd = data;
8178 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8179 	struct mgmt_rp_add_ext_adv_params rp;
8180 	struct adv_info *adv;
8181 	u32 flags;
8182 
8183 	BT_DBG("%s", hdev->name);
8184 
8185 	hci_dev_lock(hdev);
8186 
8187 	adv = hci_find_adv_instance(hdev, cp->instance);
8188 	if (!adv)
8189 		goto unlock;
8190 
8191 	rp.instance = cp->instance;
8192 	rp.tx_power = adv->tx_power;
8193 
8194 	/* While we're at it, inform userspace of the available space for this
8195 	 * advertisement, given the flags that will be used.
8196 	 */
8197 	flags = __le32_to_cpu(cp->flags);
8198 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8199 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8200 
8201 	if (err) {
8202 		/* If this advertisement was previously advertising and we
8203 		 * failed to update it, we signal that it has been removed and
8204 		 * delete its structure
8205 		 */
8206 		if (!adv->pending)
8207 			mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8208 
8209 		hci_remove_adv_instance(hdev, cp->instance);
8210 
8211 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8212 				mgmt_status(err));
8213 	} else {
8214 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8215 				  mgmt_status(err), &rp, sizeof(rp));
8216 	}
8217 
8218 unlock:
8219 	if (cmd)
8220 		mgmt_pending_free(cmd);
8221 
8222 	hci_dev_unlock(hdev);
8223 }
8224 
8225 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8226 {
8227 	struct mgmt_pending_cmd *cmd = data;
8228 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8229 
8230 	return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8231 }
8232 
8233 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8234 			      void *data, u16 data_len)
8235 {
8236 	struct mgmt_cp_add_ext_adv_params *cp = data;
8237 	struct mgmt_rp_add_ext_adv_params rp;
8238 	struct mgmt_pending_cmd *cmd = NULL;
8239 	u32 flags, min_interval, max_interval;
8240 	u16 timeout, duration;
8241 	u8 status;
8242 	s8 tx_power;
8243 	int err;
8244 
8245 	BT_DBG("%s", hdev->name);
8246 
8247 	status = mgmt_le_support(hdev);
8248 	if (status)
8249 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8250 				       status);
8251 
8252 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8253 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8254 				       MGMT_STATUS_INVALID_PARAMS);
8255 
8256 	/* The purpose of breaking add_advertising into two separate MGMT calls
8257 	 * for params and data is to allow more parameters to be added to this
8258 	 * structure in the future. For this reason, we verify that we have the
8259 	 * bare minimum structure we know of when the interface was defined. Any
8260 	 * extra parameters we don't know about will be ignored in this request.
8261 	 */
8262 	if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8263 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8264 				       MGMT_STATUS_INVALID_PARAMS);
8265 
8266 	flags = __le32_to_cpu(cp->flags);
8267 
8268 	if (!requested_adv_flags_are_valid(hdev, flags))
8269 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8270 				       MGMT_STATUS_INVALID_PARAMS);
8271 
8272 	hci_dev_lock(hdev);
8273 
8274 	/* In new interface, we require that we are powered to register */
8275 	if (!hdev_is_powered(hdev)) {
8276 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8277 				      MGMT_STATUS_REJECTED);
8278 		goto unlock;
8279 	}
8280 
8281 	if (adv_busy(hdev)) {
8282 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8283 				      MGMT_STATUS_BUSY);
8284 		goto unlock;
8285 	}
8286 
8287 	/* Parse defined parameters from request, use defaults otherwise */
8288 	timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8289 		  __le16_to_cpu(cp->timeout) : 0;
8290 
8291 	duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8292 		   __le16_to_cpu(cp->duration) :
8293 		   hdev->def_multi_adv_rotation_duration;
8294 
8295 	min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8296 		       __le32_to_cpu(cp->min_interval) :
8297 		       hdev->le_adv_min_interval;
8298 
8299 	max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8300 		       __le32_to_cpu(cp->max_interval) :
8301 		       hdev->le_adv_max_interval;
8302 
8303 	tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8304 		   cp->tx_power :
8305 		   HCI_ADV_TX_POWER_NO_PREFERENCE;
8306 
8307 	/* Create advertising instance with no advertising or response data */
8308 	err = hci_add_adv_instance(hdev, cp->instance, flags,
8309 				   0, NULL, 0, NULL, timeout, duration,
8310 				   tx_power, min_interval, max_interval);
8311 
8312 	if (err < 0) {
8313 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8314 				      MGMT_STATUS_FAILED);
8315 		goto unlock;
8316 	}
8317 
8318 	/* Submit request for advertising params if ext adv available */
8319 	if (ext_adv_capable(hdev)) {
8320 		cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8321 				       data, data_len);
8322 		if (!cmd) {
8323 			err = -ENOMEM;
8324 			hci_remove_adv_instance(hdev, cp->instance);
8325 			goto unlock;
8326 		}
8327 
8328 		err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8329 					 add_ext_adv_params_complete);
8330 		if (err < 0)
8331 			mgmt_pending_free(cmd);
8332 	} else {
8333 		rp.instance = cp->instance;
8334 		rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8335 		rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8336 		rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8337 		err = mgmt_cmd_complete(sk, hdev->id,
8338 					MGMT_OP_ADD_EXT_ADV_PARAMS,
8339 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8340 	}
8341 
8342 unlock:
8343 	hci_dev_unlock(hdev);
8344 
8345 	return err;
8346 }
8347 
8348 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8349 {
8350 	struct mgmt_pending_cmd *cmd = data;
8351 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8352 	struct mgmt_rp_add_advertising rp;
8353 
8354 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
8355 
8356 	memset(&rp, 0, sizeof(rp));
8357 
8358 	rp.instance = cp->instance;
8359 
8360 	if (err)
8361 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8362 				mgmt_status(err));
8363 	else
8364 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8365 				  mgmt_status(err), &rp, sizeof(rp));
8366 
8367 	mgmt_pending_free(cmd);
8368 }
8369 
8370 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8371 {
8372 	struct mgmt_pending_cmd *cmd = data;
8373 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8374 	int err;
8375 
8376 	if (ext_adv_capable(hdev)) {
8377 		err = hci_update_adv_data_sync(hdev, cp->instance);
8378 		if (err)
8379 			return err;
8380 
8381 		err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8382 		if (err)
8383 			return err;
8384 
8385 		return hci_enable_ext_advertising_sync(hdev, cp->instance);
8386 	}
8387 
8388 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8389 }
8390 
8391 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8392 			    u16 data_len)
8393 {
8394 	struct mgmt_cp_add_ext_adv_data *cp = data;
8395 	struct mgmt_rp_add_ext_adv_data rp;
8396 	u8 schedule_instance = 0;
8397 	struct adv_info *next_instance;
8398 	struct adv_info *adv_instance;
8399 	int err = 0;
8400 	struct mgmt_pending_cmd *cmd;
8401 
8402 	BT_DBG("%s", hdev->name);
8403 
8404 	hci_dev_lock(hdev);
8405 
8406 	adv_instance = hci_find_adv_instance(hdev, cp->instance);
8407 
8408 	if (!adv_instance) {
8409 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8410 				      MGMT_STATUS_INVALID_PARAMS);
8411 		goto unlock;
8412 	}
8413 
8414 	/* In new interface, we require that we are powered to register */
8415 	if (!hdev_is_powered(hdev)) {
8416 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8417 				      MGMT_STATUS_REJECTED);
8418 		goto clear_new_instance;
8419 	}
8420 
8421 	if (adv_busy(hdev)) {
8422 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8423 				      MGMT_STATUS_BUSY);
8424 		goto clear_new_instance;
8425 	}
8426 
8427 	/* Validate new data */
8428 	if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
8429 			       cp->adv_data_len, true) ||
8430 	    !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
8431 			       cp->adv_data_len, cp->scan_rsp_len, false)) {
8432 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8433 				      MGMT_STATUS_INVALID_PARAMS);
8434 		goto clear_new_instance;
8435 	}
8436 
8437 	/* Set the data in the advertising instance */
8438 	hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
8439 				  cp->data, cp->scan_rsp_len,
8440 				  cp->data + cp->adv_data_len);
8441 
8442 	/* If using software rotation, determine next instance to use */
8443 	if (hdev->cur_adv_instance == cp->instance) {
8444 		/* If the currently advertised instance is being changed
8445 		 * then cancel the current advertising and schedule the
8446 		 * next instance. If there is only one instance then the
8447 		 * overridden advertising data will be visible right
8448 		 * away
8449 		 */
8450 		cancel_adv_timeout(hdev);
8451 
8452 		next_instance = hci_get_next_instance(hdev, cp->instance);
8453 		if (next_instance)
8454 			schedule_instance = next_instance->instance;
8455 	} else if (!hdev->adv_instance_timeout) {
8456 		/* Immediately advertise the new instance if no other
8457 		 * instance is currently being advertised.
8458 		 */
8459 		schedule_instance = cp->instance;
8460 	}
8461 
8462 	/* If the HCI_ADVERTISING flag is set or there is no instance to
8463 	 * be advertised then we have no HCI communication to make.
8464 	 * Simply return.
8465 	 */
8466 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
8467 		if (adv_instance->pending) {
8468 			mgmt_advertising_added(sk, hdev, cp->instance);
8469 			adv_instance->pending = false;
8470 		}
8471 		rp.instance = cp->instance;
8472 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8473 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8474 		goto unlock;
8475 	}
8476 
8477 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
8478 			       data_len);
8479 	if (!cmd) {
8480 		err = -ENOMEM;
8481 		goto clear_new_instance;
8482 	}
8483 
8484 	err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
8485 				 add_ext_adv_data_complete);
8486 	if (err < 0) {
8487 		mgmt_pending_free(cmd);
8488 		goto clear_new_instance;
8489 	}
8490 
8491 	/* We were successful in updating data, so trigger advertising_added
8492 	 * event if this is an instance that wasn't previously advertising. If
8493 	 * a failure occurs in the requests we initiated, we will remove the
8494 	 * instance again in add_advertising_complete
8495 	 */
8496 	if (adv_instance->pending)
8497 		mgmt_advertising_added(sk, hdev, cp->instance);
8498 
8499 	goto unlock;
8500 
8501 clear_new_instance:
8502 	hci_remove_adv_instance(hdev, cp->instance);
8503 
8504 unlock:
8505 	hci_dev_unlock(hdev);
8506 
8507 	return err;
8508 }
8509 
8510 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
8511 					int err)
8512 {
8513 	struct mgmt_pending_cmd *cmd = data;
8514 	struct mgmt_cp_remove_advertising *cp = cmd->param;
8515 	struct mgmt_rp_remove_advertising rp;
8516 
8517 	bt_dev_dbg(hdev, "err %d", err);
8518 
8519 	memset(&rp, 0, sizeof(rp));
8520 	rp.instance = cp->instance;
8521 
8522 	if (err)
8523 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8524 				mgmt_status(err));
8525 	else
8526 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8527 				  MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8528 
8529 	mgmt_pending_free(cmd);
8530 }
8531 
8532 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
8533 {
8534 	struct mgmt_pending_cmd *cmd = data;
8535 	struct mgmt_cp_remove_advertising *cp = cmd->param;
8536 	int err;
8537 
8538 	err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
8539 	if (err)
8540 		return err;
8541 
8542 	if (list_empty(&hdev->adv_instances))
8543 		err = hci_disable_advertising_sync(hdev);
8544 
8545 	return err;
8546 }
8547 
8548 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
8549 			      void *data, u16 data_len)
8550 {
8551 	struct mgmt_cp_remove_advertising *cp = data;
8552 	struct mgmt_pending_cmd *cmd;
8553 	int err;
8554 
8555 	bt_dev_dbg(hdev, "sock %p", sk);
8556 
8557 	hci_dev_lock(hdev);
8558 
8559 	if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
8560 		err = mgmt_cmd_status(sk, hdev->id,
8561 				      MGMT_OP_REMOVE_ADVERTISING,
8562 				      MGMT_STATUS_INVALID_PARAMS);
8563 		goto unlock;
8564 	}
8565 
8566 	if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
8567 	    pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
8568 	    pending_find(MGMT_OP_SET_LE, hdev)) {
8569 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8570 				      MGMT_STATUS_BUSY);
8571 		goto unlock;
8572 	}
8573 
8574 	if (list_empty(&hdev->adv_instances)) {
8575 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8576 				      MGMT_STATUS_INVALID_PARAMS);
8577 		goto unlock;
8578 	}
8579 
8580 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
8581 			       data_len);
8582 	if (!cmd) {
8583 		err = -ENOMEM;
8584 		goto unlock;
8585 	}
8586 
8587 	err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
8588 				 remove_advertising_complete);
8589 	if (err < 0)
8590 		mgmt_pending_free(cmd);
8591 
8592 unlock:
8593 	hci_dev_unlock(hdev);
8594 
8595 	return err;
8596 }
8597 
8598 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
8599 			     void *data, u16 data_len)
8600 {
8601 	struct mgmt_cp_get_adv_size_info *cp = data;
8602 	struct mgmt_rp_get_adv_size_info rp;
8603 	u32 flags, supported_flags;
8604 	int err;
8605 
8606 	bt_dev_dbg(hdev, "sock %p", sk);
8607 
8608 	if (!lmp_le_capable(hdev))
8609 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8610 				       MGMT_STATUS_REJECTED);
8611 
8612 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8613 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8614 				       MGMT_STATUS_INVALID_PARAMS);
8615 
8616 	flags = __le32_to_cpu(cp->flags);
8617 
8618 	/* The current implementation only supports a subset of the specified
8619 	 * flags.
8620 	 */
8621 	supported_flags = get_supported_adv_flags(hdev);
8622 	if (flags & ~supported_flags)
8623 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8624 				       MGMT_STATUS_INVALID_PARAMS);
8625 
8626 	rp.instance = cp->instance;
8627 	rp.flags = cp->flags;
8628 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8629 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8630 
8631 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8632 				MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8633 
8634 	return err;
8635 }
8636 
8637 static const struct hci_mgmt_handler mgmt_handlers[] = {
8638 	{ NULL }, /* 0x0000 (no command) */
8639 	{ read_version,            MGMT_READ_VERSION_SIZE,
8640 						HCI_MGMT_NO_HDEV |
8641 						HCI_MGMT_UNTRUSTED },
8642 	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
8643 						HCI_MGMT_NO_HDEV |
8644 						HCI_MGMT_UNTRUSTED },
8645 	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
8646 						HCI_MGMT_NO_HDEV |
8647 						HCI_MGMT_UNTRUSTED },
8648 	{ read_controller_info,    MGMT_READ_INFO_SIZE,
8649 						HCI_MGMT_UNTRUSTED },
8650 	{ set_powered,             MGMT_SETTING_SIZE },
8651 	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
8652 	{ set_connectable,         MGMT_SETTING_SIZE },
8653 	{ set_fast_connectable,    MGMT_SETTING_SIZE },
8654 	{ set_bondable,            MGMT_SETTING_SIZE },
8655 	{ set_link_security,       MGMT_SETTING_SIZE },
8656 	{ set_ssp,                 MGMT_SETTING_SIZE },
8657 	{ set_hs,                  MGMT_SETTING_SIZE },
8658 	{ set_le,                  MGMT_SETTING_SIZE },
8659 	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
8660 	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
8661 	{ add_uuid,                MGMT_ADD_UUID_SIZE },
8662 	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
8663 	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
8664 						HCI_MGMT_VAR_LEN },
8665 	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
8666 						HCI_MGMT_VAR_LEN },
8667 	{ disconnect,              MGMT_DISCONNECT_SIZE },
8668 	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
8669 	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
8670 	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
8671 	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
8672 	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
8673 	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
8674 	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
8675 	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
8676 	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
8677 	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
8678 	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
8679 	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
8680 	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
8681 						HCI_MGMT_VAR_LEN },
8682 	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
8683 	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
8684 	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
8685 	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
8686 	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
8687 	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
8688 	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
8689 	{ set_advertising,         MGMT_SETTING_SIZE },
8690 	{ set_bredr,               MGMT_SETTING_SIZE },
8691 	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
8692 	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
8693 	{ set_secure_conn,         MGMT_SETTING_SIZE },
8694 	{ set_debug_keys,          MGMT_SETTING_SIZE },
8695 	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
8696 	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
8697 						HCI_MGMT_VAR_LEN },
8698 	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
8699 	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
8700 	{ add_device,              MGMT_ADD_DEVICE_SIZE },
8701 	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
8702 	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
8703 						HCI_MGMT_VAR_LEN },
8704 	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
8705 						HCI_MGMT_NO_HDEV |
8706 						HCI_MGMT_UNTRUSTED },
8707 	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
8708 						HCI_MGMT_UNCONFIGURED |
8709 						HCI_MGMT_UNTRUSTED },
8710 	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
8711 						HCI_MGMT_UNCONFIGURED },
8712 	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
8713 						HCI_MGMT_UNCONFIGURED },
8714 	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
8715 						HCI_MGMT_VAR_LEN },
8716 	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
8717 	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
8718 						HCI_MGMT_NO_HDEV |
8719 						HCI_MGMT_UNTRUSTED },
8720 	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
8721 	{ add_advertising,	   MGMT_ADD_ADVERTISING_SIZE,
8722 						HCI_MGMT_VAR_LEN },
8723 	{ remove_advertising,	   MGMT_REMOVE_ADVERTISING_SIZE },
8724 	{ get_adv_size_info,       MGMT_GET_ADV_SIZE_INFO_SIZE },
8725 	{ start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
8726 	{ read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
8727 						HCI_MGMT_UNTRUSTED },
8728 	{ set_appearance,	   MGMT_SET_APPEARANCE_SIZE },
8729 	{ get_phy_configuration,   MGMT_GET_PHY_CONFIGURATION_SIZE },
8730 	{ set_phy_configuration,   MGMT_SET_PHY_CONFIGURATION_SIZE },
8731 	{ set_blocked_keys,	   MGMT_OP_SET_BLOCKED_KEYS_SIZE,
8732 						HCI_MGMT_VAR_LEN },
8733 	{ set_wideband_speech,	   MGMT_SETTING_SIZE },
8734 	{ read_controller_cap,     MGMT_READ_CONTROLLER_CAP_SIZE,
8735 						HCI_MGMT_UNTRUSTED },
8736 	{ read_exp_features_info,  MGMT_READ_EXP_FEATURES_INFO_SIZE,
8737 						HCI_MGMT_UNTRUSTED |
8738 						HCI_MGMT_HDEV_OPTIONAL },
8739 	{ set_exp_feature,         MGMT_SET_EXP_FEATURE_SIZE,
8740 						HCI_MGMT_VAR_LEN |
8741 						HCI_MGMT_HDEV_OPTIONAL },
8742 	{ read_def_system_config,  MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
8743 						HCI_MGMT_UNTRUSTED },
8744 	{ set_def_system_config,   MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
8745 						HCI_MGMT_VAR_LEN },
8746 	{ read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
8747 						HCI_MGMT_UNTRUSTED },
8748 	{ set_def_runtime_config,  MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
8749 						HCI_MGMT_VAR_LEN },
8750 	{ get_device_flags,        MGMT_GET_DEVICE_FLAGS_SIZE },
8751 	{ set_device_flags,        MGMT_SET_DEVICE_FLAGS_SIZE },
8752 	{ read_adv_mon_features,   MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
8753 	{ add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
8754 						HCI_MGMT_VAR_LEN },
8755 	{ remove_adv_monitor,      MGMT_REMOVE_ADV_MONITOR_SIZE },
8756 	{ add_ext_adv_params,      MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
8757 						HCI_MGMT_VAR_LEN },
8758 	{ add_ext_adv_data,        MGMT_ADD_EXT_ADV_DATA_SIZE,
8759 						HCI_MGMT_VAR_LEN },
8760 	{ add_adv_patterns_monitor_rssi,
8761 				   MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
8762 						HCI_MGMT_VAR_LEN },
8763 };
8764 
8765 void mgmt_index_added(struct hci_dev *hdev)
8766 {
8767 	struct mgmt_ev_ext_index ev;
8768 
8769 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8770 		return;
8771 
8772 	switch (hdev->dev_type) {
8773 	case HCI_PRIMARY:
8774 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8775 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
8776 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8777 			ev.type = 0x01;
8778 		} else {
8779 			mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
8780 					 HCI_MGMT_INDEX_EVENTS);
8781 			ev.type = 0x00;
8782 		}
8783 		break;
8784 	case HCI_AMP:
8785 		ev.type = 0x02;
8786 		break;
8787 	default:
8788 		return;
8789 	}
8790 
8791 	ev.bus = hdev->bus;
8792 
8793 	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
8794 			 HCI_MGMT_EXT_INDEX_EVENTS);
8795 }
8796 
8797 void mgmt_index_removed(struct hci_dev *hdev)
8798 {
8799 	struct mgmt_ev_ext_index ev;
8800 	u8 status = MGMT_STATUS_INVALID_INDEX;
8801 
8802 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8803 		return;
8804 
8805 	switch (hdev->dev_type) {
8806 	case HCI_PRIMARY:
8807 		mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8808 
8809 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8810 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
8811 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8812 			ev.type = 0x01;
8813 		} else {
8814 			mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
8815 					 HCI_MGMT_INDEX_EVENTS);
8816 			ev.type = 0x00;
8817 		}
8818 		break;
8819 	case HCI_AMP:
8820 		ev.type = 0x02;
8821 		break;
8822 	default:
8823 		return;
8824 	}
8825 
8826 	ev.bus = hdev->bus;
8827 
8828 	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
8829 			 HCI_MGMT_EXT_INDEX_EVENTS);
8830 }
8831 
8832 void mgmt_power_on(struct hci_dev *hdev, int err)
8833 {
8834 	struct cmd_lookup match = { NULL, hdev };
8835 
8836 	bt_dev_dbg(hdev, "err %d", err);
8837 
8838 	hci_dev_lock(hdev);
8839 
8840 	if (!err) {
8841 		restart_le_actions(hdev);
8842 		hci_update_passive_scan(hdev);
8843 	}
8844 
8845 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8846 
8847 	new_settings(hdev, match.sk);
8848 
8849 	if (match.sk)
8850 		sock_put(match.sk);
8851 
8852 	hci_dev_unlock(hdev);
8853 }
8854 
8855 void __mgmt_power_off(struct hci_dev *hdev)
8856 {
8857 	struct cmd_lookup match = { NULL, hdev };
8858 	u8 status, zero_cod[] = { 0, 0, 0 };
8859 
8860 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8861 
8862 	/* If the power off is because of hdev unregistration let
8863 	 * use the appropriate INVALID_INDEX status. Otherwise use
8864 	 * NOT_POWERED. We cover both scenarios here since later in
8865 	 * mgmt_index_removed() any hci_conn callbacks will have already
8866 	 * been triggered, potentially causing misleading DISCONNECTED
8867 	 * status responses.
8868 	 */
8869 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
8870 		status = MGMT_STATUS_INVALID_INDEX;
8871 	else
8872 		status = MGMT_STATUS_NOT_POWERED;
8873 
8874 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8875 
8876 	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
8877 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
8878 				   zero_cod, sizeof(zero_cod),
8879 				   HCI_MGMT_DEV_CLASS_EVENTS, NULL);
8880 		ext_info_changed(hdev, NULL);
8881 	}
8882 
8883 	new_settings(hdev, match.sk);
8884 
8885 	if (match.sk)
8886 		sock_put(match.sk);
8887 }
8888 
8889 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
8890 {
8891 	struct mgmt_pending_cmd *cmd;
8892 	u8 status;
8893 
8894 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8895 	if (!cmd)
8896 		return;
8897 
8898 	if (err == -ERFKILL)
8899 		status = MGMT_STATUS_RFKILLED;
8900 	else
8901 		status = MGMT_STATUS_FAILED;
8902 
8903 	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
8904 
8905 	mgmt_pending_remove(cmd);
8906 }
8907 
8908 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
8909 		       bool persistent)
8910 {
8911 	struct mgmt_ev_new_link_key ev;
8912 
8913 	memset(&ev, 0, sizeof(ev));
8914 
8915 	ev.store_hint = persistent;
8916 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8917 	ev.key.addr.type = BDADDR_BREDR;
8918 	ev.key.type = key->type;
8919 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
8920 	ev.key.pin_len = key->pin_len;
8921 
8922 	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
8923 }
8924 
8925 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
8926 {
8927 	switch (ltk->type) {
8928 	case SMP_LTK:
8929 	case SMP_LTK_RESPONDER:
8930 		if (ltk->authenticated)
8931 			return MGMT_LTK_AUTHENTICATED;
8932 		return MGMT_LTK_UNAUTHENTICATED;
8933 	case SMP_LTK_P256:
8934 		if (ltk->authenticated)
8935 			return MGMT_LTK_P256_AUTH;
8936 		return MGMT_LTK_P256_UNAUTH;
8937 	case SMP_LTK_P256_DEBUG:
8938 		return MGMT_LTK_P256_DEBUG;
8939 	}
8940 
8941 	return MGMT_LTK_UNAUTHENTICATED;
8942 }
8943 
8944 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
8945 {
8946 	struct mgmt_ev_new_long_term_key ev;
8947 
8948 	memset(&ev, 0, sizeof(ev));
8949 
8950 	/* Devices using resolvable or non-resolvable random addresses
8951 	 * without providing an identity resolving key don't require
8952 	 * to store long term keys. Their addresses will change the
8953 	 * next time around.
8954 	 *
8955 	 * Only when a remote device provides an identity address
8956 	 * make sure the long term key is stored. If the remote
8957 	 * identity is known, the long term keys are internally
8958 	 * mapped to the identity address. So allow static random
8959 	 * and public addresses here.
8960 	 */
8961 	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8962 	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
8963 		ev.store_hint = 0x00;
8964 	else
8965 		ev.store_hint = persistent;
8966 
8967 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8968 	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
8969 	ev.key.type = mgmt_ltk_type(key);
8970 	ev.key.enc_size = key->enc_size;
8971 	ev.key.ediv = key->ediv;
8972 	ev.key.rand = key->rand;
8973 
8974 	if (key->type == SMP_LTK)
8975 		ev.key.initiator = 1;
8976 
8977 	/* Make sure we copy only the significant bytes based on the
8978 	 * encryption key size, and set the rest of the value to zeroes.
8979 	 */
8980 	memcpy(ev.key.val, key->val, key->enc_size);
8981 	memset(ev.key.val + key->enc_size, 0,
8982 	       sizeof(ev.key.val) - key->enc_size);
8983 
8984 	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
8985 }
8986 
8987 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
8988 {
8989 	struct mgmt_ev_new_irk ev;
8990 
8991 	memset(&ev, 0, sizeof(ev));
8992 
8993 	ev.store_hint = persistent;
8994 
8995 	bacpy(&ev.rpa, &irk->rpa);
8996 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
8997 	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
8998 	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
8999 
9000 	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9001 }
9002 
9003 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9004 		   bool persistent)
9005 {
9006 	struct mgmt_ev_new_csrk ev;
9007 
9008 	memset(&ev, 0, sizeof(ev));
9009 
9010 	/* Devices using resolvable or non-resolvable random addresses
9011 	 * without providing an identity resolving key don't require
9012 	 * to store signature resolving keys. Their addresses will change
9013 	 * the next time around.
9014 	 *
9015 	 * Only when a remote device provides an identity address
9016 	 * make sure the signature resolving key is stored. So allow
9017 	 * static random and public addresses here.
9018 	 */
9019 	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9020 	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9021 		ev.store_hint = 0x00;
9022 	else
9023 		ev.store_hint = persistent;
9024 
9025 	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9026 	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9027 	ev.key.type = csrk->type;
9028 	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9029 
9030 	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9031 }
9032 
9033 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9034 			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9035 			 u16 max_interval, u16 latency, u16 timeout)
9036 {
9037 	struct mgmt_ev_new_conn_param ev;
9038 
9039 	if (!hci_is_identity_address(bdaddr, bdaddr_type))
9040 		return;
9041 
9042 	memset(&ev, 0, sizeof(ev));
9043 	bacpy(&ev.addr.bdaddr, bdaddr);
9044 	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9045 	ev.store_hint = store_hint;
9046 	ev.min_interval = cpu_to_le16(min_interval);
9047 	ev.max_interval = cpu_to_le16(max_interval);
9048 	ev.latency = cpu_to_le16(latency);
9049 	ev.timeout = cpu_to_le16(timeout);
9050 
9051 	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9052 }
9053 
9054 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9055 			   u8 *name, u8 name_len)
9056 {
9057 	struct sk_buff *skb;
9058 	struct mgmt_ev_device_connected *ev;
9059 	u16 eir_len = 0;
9060 	u32 flags = 0;
9061 
9062 	if (conn->le_adv_data_len > 0)
9063 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9064 				     conn->le_adv_data_len);
9065 	else
9066 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9067 				     2 + name_len + 5);
9068 
9069 	ev = skb_put(skb, sizeof(*ev));
9070 	bacpy(&ev->addr.bdaddr, &conn->dst);
9071 	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9072 
9073 	if (conn->out)
9074 		flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9075 
9076 	ev->flags = __cpu_to_le32(flags);
9077 
9078 	/* We must ensure that the EIR Data fields are ordered and
9079 	 * unique. Keep it simple for now and avoid the problem by not
9080 	 * adding any BR/EDR data to the LE adv.
9081 	 */
9082 	if (conn->le_adv_data_len > 0) {
9083 		skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9084 		eir_len = conn->le_adv_data_len;
9085 	} else {
9086 		if (name_len > 0) {
9087 			eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
9088 						  name, name_len);
9089 			skb_put(skb, eir_len);
9090 		}
9091 
9092 		if (memcmp(conn->dev_class, "\0\0\0", 3) != 0) {
9093 			eir_len = eir_append_data(ev->eir, eir_len,
9094 						  EIR_CLASS_OF_DEV,
9095 						  conn->dev_class, 3);
9096 			skb_put(skb, 5);
9097 		}
9098 	}
9099 
9100 	ev->eir_len = cpu_to_le16(eir_len);
9101 
9102 	mgmt_event_skb(skb, NULL);
9103 }
9104 
9105 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
9106 {
9107 	struct sock **sk = data;
9108 
9109 	cmd->cmd_complete(cmd, 0);
9110 
9111 	*sk = cmd->sk;
9112 	sock_hold(*sk);
9113 
9114 	mgmt_pending_remove(cmd);
9115 }
9116 
9117 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9118 {
9119 	struct hci_dev *hdev = data;
9120 	struct mgmt_cp_unpair_device *cp = cmd->param;
9121 
9122 	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9123 
9124 	cmd->cmd_complete(cmd, 0);
9125 	mgmt_pending_remove(cmd);
9126 }
9127 
9128 bool mgmt_powering_down(struct hci_dev *hdev)
9129 {
9130 	struct mgmt_pending_cmd *cmd;
9131 	struct mgmt_mode *cp;
9132 
9133 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9134 	if (!cmd)
9135 		return false;
9136 
9137 	cp = cmd->param;
9138 	if (!cp->val)
9139 		return true;
9140 
9141 	return false;
9142 }
9143 
9144 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9145 			      u8 link_type, u8 addr_type, u8 reason,
9146 			      bool mgmt_connected)
9147 {
9148 	struct mgmt_ev_device_disconnected ev;
9149 	struct sock *sk = NULL;
9150 
9151 	/* The connection is still in hci_conn_hash so test for 1
9152 	 * instead of 0 to know if this is the last one.
9153 	 */
9154 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9155 		cancel_delayed_work(&hdev->power_off);
9156 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
9157 	}
9158 
9159 	if (!mgmt_connected)
9160 		return;
9161 
9162 	if (link_type != ACL_LINK && link_type != LE_LINK)
9163 		return;
9164 
9165 	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
9166 
9167 	bacpy(&ev.addr.bdaddr, bdaddr);
9168 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9169 	ev.reason = reason;
9170 
9171 	/* Report disconnects due to suspend */
9172 	if (hdev->suspended)
9173 		ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9174 
9175 	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9176 
9177 	if (sk)
9178 		sock_put(sk);
9179 
9180 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9181 			     hdev);
9182 }
9183 
9184 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9185 			    u8 link_type, u8 addr_type, u8 status)
9186 {
9187 	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9188 	struct mgmt_cp_disconnect *cp;
9189 	struct mgmt_pending_cmd *cmd;
9190 
9191 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9192 			     hdev);
9193 
9194 	cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9195 	if (!cmd)
9196 		return;
9197 
9198 	cp = cmd->param;
9199 
9200 	if (bacmp(bdaddr, &cp->addr.bdaddr))
9201 		return;
9202 
9203 	if (cp->addr.type != bdaddr_type)
9204 		return;
9205 
9206 	cmd->cmd_complete(cmd, mgmt_status(status));
9207 	mgmt_pending_remove(cmd);
9208 }
9209 
9210 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9211 			 u8 addr_type, u8 status)
9212 {
9213 	struct mgmt_ev_connect_failed ev;
9214 
9215 	/* The connection is still in hci_conn_hash so test for 1
9216 	 * instead of 0 to know if this is the last one.
9217 	 */
9218 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9219 		cancel_delayed_work(&hdev->power_off);
9220 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
9221 	}
9222 
9223 	bacpy(&ev.addr.bdaddr, bdaddr);
9224 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9225 	ev.status = mgmt_status(status);
9226 
9227 	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9228 }
9229 
9230 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9231 {
9232 	struct mgmt_ev_pin_code_request ev;
9233 
9234 	bacpy(&ev.addr.bdaddr, bdaddr);
9235 	ev.addr.type = BDADDR_BREDR;
9236 	ev.secure = secure;
9237 
9238 	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9239 }
9240 
9241 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9242 				  u8 status)
9243 {
9244 	struct mgmt_pending_cmd *cmd;
9245 
9246 	cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9247 	if (!cmd)
9248 		return;
9249 
9250 	cmd->cmd_complete(cmd, mgmt_status(status));
9251 	mgmt_pending_remove(cmd);
9252 }
9253 
9254 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9255 				      u8 status)
9256 {
9257 	struct mgmt_pending_cmd *cmd;
9258 
9259 	cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9260 	if (!cmd)
9261 		return;
9262 
9263 	cmd->cmd_complete(cmd, mgmt_status(status));
9264 	mgmt_pending_remove(cmd);
9265 }
9266 
9267 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9268 			      u8 link_type, u8 addr_type, u32 value,
9269 			      u8 confirm_hint)
9270 {
9271 	struct mgmt_ev_user_confirm_request ev;
9272 
9273 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9274 
9275 	bacpy(&ev.addr.bdaddr, bdaddr);
9276 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9277 	ev.confirm_hint = confirm_hint;
9278 	ev.value = cpu_to_le32(value);
9279 
9280 	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9281 			  NULL);
9282 }
9283 
9284 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9285 			      u8 link_type, u8 addr_type)
9286 {
9287 	struct mgmt_ev_user_passkey_request ev;
9288 
9289 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9290 
9291 	bacpy(&ev.addr.bdaddr, bdaddr);
9292 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9293 
9294 	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9295 			  NULL);
9296 }
9297 
9298 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9299 				      u8 link_type, u8 addr_type, u8 status,
9300 				      u8 opcode)
9301 {
9302 	struct mgmt_pending_cmd *cmd;
9303 
9304 	cmd = pending_find(opcode, hdev);
9305 	if (!cmd)
9306 		return -ENOENT;
9307 
9308 	cmd->cmd_complete(cmd, mgmt_status(status));
9309 	mgmt_pending_remove(cmd);
9310 
9311 	return 0;
9312 }
9313 
9314 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9315 				     u8 link_type, u8 addr_type, u8 status)
9316 {
9317 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9318 					  status, MGMT_OP_USER_CONFIRM_REPLY);
9319 }
9320 
9321 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9322 					 u8 link_type, u8 addr_type, u8 status)
9323 {
9324 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9325 					  status,
9326 					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
9327 }
9328 
9329 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9330 				     u8 link_type, u8 addr_type, u8 status)
9331 {
9332 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9333 					  status, MGMT_OP_USER_PASSKEY_REPLY);
9334 }
9335 
9336 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9337 					 u8 link_type, u8 addr_type, u8 status)
9338 {
9339 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9340 					  status,
9341 					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
9342 }
9343 
9344 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9345 			     u8 link_type, u8 addr_type, u32 passkey,
9346 			     u8 entered)
9347 {
9348 	struct mgmt_ev_passkey_notify ev;
9349 
9350 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9351 
9352 	bacpy(&ev.addr.bdaddr, bdaddr);
9353 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9354 	ev.passkey = __cpu_to_le32(passkey);
9355 	ev.entered = entered;
9356 
9357 	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9358 }
9359 
9360 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9361 {
9362 	struct mgmt_ev_auth_failed ev;
9363 	struct mgmt_pending_cmd *cmd;
9364 	u8 status = mgmt_status(hci_status);
9365 
9366 	bacpy(&ev.addr.bdaddr, &conn->dst);
9367 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9368 	ev.status = status;
9369 
9370 	cmd = find_pairing(conn);
9371 
9372 	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9373 		    cmd ? cmd->sk : NULL);
9374 
9375 	if (cmd) {
9376 		cmd->cmd_complete(cmd, status);
9377 		mgmt_pending_remove(cmd);
9378 	}
9379 }
9380 
9381 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9382 {
9383 	struct cmd_lookup match = { NULL, hdev };
9384 	bool changed;
9385 
9386 	if (status) {
9387 		u8 mgmt_err = mgmt_status(status);
9388 		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9389 				     cmd_status_rsp, &mgmt_err);
9390 		return;
9391 	}
9392 
9393 	if (test_bit(HCI_AUTH, &hdev->flags))
9394 		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9395 	else
9396 		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9397 
9398 	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9399 			     &match);
9400 
9401 	if (changed)
9402 		new_settings(hdev, match.sk);
9403 
9404 	if (match.sk)
9405 		sock_put(match.sk);
9406 }
9407 
9408 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9409 {
9410 	struct cmd_lookup *match = data;
9411 
9412 	if (match->sk == NULL) {
9413 		match->sk = cmd->sk;
9414 		sock_hold(match->sk);
9415 	}
9416 }
9417 
9418 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9419 				    u8 status)
9420 {
9421 	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9422 
9423 	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
9424 	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
9425 	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
9426 
9427 	if (!status) {
9428 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9429 				   3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9430 		ext_info_changed(hdev, NULL);
9431 	}
9432 
9433 	if (match.sk)
9434 		sock_put(match.sk);
9435 }
9436 
9437 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
9438 {
9439 	struct mgmt_cp_set_local_name ev;
9440 	struct mgmt_pending_cmd *cmd;
9441 
9442 	if (status)
9443 		return;
9444 
9445 	memset(&ev, 0, sizeof(ev));
9446 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
9447 	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
9448 
9449 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
9450 	if (!cmd) {
9451 		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
9452 
9453 		/* If this is a HCI command related to powering on the
9454 		 * HCI dev don't send any mgmt signals.
9455 		 */
9456 		if (pending_find(MGMT_OP_SET_POWERED, hdev))
9457 			return;
9458 	}
9459 
9460 	mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
9461 			   HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
9462 	ext_info_changed(hdev, cmd ? cmd->sk : NULL);
9463 }
9464 
9465 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
9466 {
9467 	int i;
9468 
9469 	for (i = 0; i < uuid_count; i++) {
9470 		if (!memcmp(uuid, uuids[i], 16))
9471 			return true;
9472 	}
9473 
9474 	return false;
9475 }
9476 
9477 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
9478 {
9479 	u16 parsed = 0;
9480 
9481 	while (parsed < eir_len) {
9482 		u8 field_len = eir[0];
9483 		u8 uuid[16];
9484 		int i;
9485 
9486 		if (field_len == 0)
9487 			break;
9488 
9489 		if (eir_len - parsed < field_len + 1)
9490 			break;
9491 
9492 		switch (eir[1]) {
9493 		case EIR_UUID16_ALL:
9494 		case EIR_UUID16_SOME:
9495 			for (i = 0; i + 3 <= field_len; i += 2) {
9496 				memcpy(uuid, bluetooth_base_uuid, 16);
9497 				uuid[13] = eir[i + 3];
9498 				uuid[12] = eir[i + 2];
9499 				if (has_uuid(uuid, uuid_count, uuids))
9500 					return true;
9501 			}
9502 			break;
9503 		case EIR_UUID32_ALL:
9504 		case EIR_UUID32_SOME:
9505 			for (i = 0; i + 5 <= field_len; i += 4) {
9506 				memcpy(uuid, bluetooth_base_uuid, 16);
9507 				uuid[15] = eir[i + 5];
9508 				uuid[14] = eir[i + 4];
9509 				uuid[13] = eir[i + 3];
9510 				uuid[12] = eir[i + 2];
9511 				if (has_uuid(uuid, uuid_count, uuids))
9512 					return true;
9513 			}
9514 			break;
9515 		case EIR_UUID128_ALL:
9516 		case EIR_UUID128_SOME:
9517 			for (i = 0; i + 17 <= field_len; i += 16) {
9518 				memcpy(uuid, eir + i + 2, 16);
9519 				if (has_uuid(uuid, uuid_count, uuids))
9520 					return true;
9521 			}
9522 			break;
9523 		}
9524 
9525 		parsed += field_len + 1;
9526 		eir += field_len + 1;
9527 	}
9528 
9529 	return false;
9530 }
9531 
9532 static void restart_le_scan(struct hci_dev *hdev)
9533 {
9534 	/* If controller is not scanning we are done. */
9535 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
9536 		return;
9537 
9538 	if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
9539 		       hdev->discovery.scan_start +
9540 		       hdev->discovery.scan_duration))
9541 		return;
9542 
9543 	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
9544 			   DISCOV_LE_RESTART_DELAY);
9545 }
9546 
9547 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
9548 			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9549 {
9550 	/* If a RSSI threshold has been specified, and
9551 	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
9552 	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
9553 	 * is set, let it through for further processing, as we might need to
9554 	 * restart the scan.
9555 	 *
9556 	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
9557 	 * the results are also dropped.
9558 	 */
9559 	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9560 	    (rssi == HCI_RSSI_INVALID ||
9561 	    (rssi < hdev->discovery.rssi &&
9562 	     !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
9563 		return  false;
9564 
9565 	if (hdev->discovery.uuid_count != 0) {
9566 		/* If a list of UUIDs is provided in filter, results with no
9567 		 * matching UUID should be dropped.
9568 		 */
9569 		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
9570 				   hdev->discovery.uuids) &&
9571 		    !eir_has_uuids(scan_rsp, scan_rsp_len,
9572 				   hdev->discovery.uuid_count,
9573 				   hdev->discovery.uuids))
9574 			return false;
9575 	}
9576 
9577 	/* If duplicate filtering does not report RSSI changes, then restart
9578 	 * scanning to ensure updated result with updated RSSI values.
9579 	 */
9580 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
9581 		restart_le_scan(hdev);
9582 
9583 		/* Validate RSSI value against the RSSI threshold once more. */
9584 		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9585 		    rssi < hdev->discovery.rssi)
9586 			return false;
9587 	}
9588 
9589 	return true;
9590 }
9591 
9592 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9593 		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
9594 		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9595 {
9596 	struct sk_buff *skb;
9597 	struct mgmt_ev_device_found *ev;
9598 
9599 	/* Don't send events for a non-kernel initiated discovery. With
9600 	 * LE one exception is if we have pend_le_reports > 0 in which
9601 	 * case we're doing passive scanning and want these events.
9602 	 */
9603 	if (!hci_discovery_active(hdev)) {
9604 		if (link_type == ACL_LINK)
9605 			return;
9606 		if (link_type == LE_LINK &&
9607 		    list_empty(&hdev->pend_le_reports) &&
9608 		    !hci_is_adv_monitoring(hdev)) {
9609 			return;
9610 		}
9611 	}
9612 
9613 	if (hdev->discovery.result_filtering) {
9614 		/* We are using service discovery */
9615 		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
9616 				     scan_rsp_len))
9617 			return;
9618 	}
9619 
9620 	if (hdev->discovery.limited) {
9621 		/* Check for limited discoverable bit */
9622 		if (dev_class) {
9623 			if (!(dev_class[1] & 0x20))
9624 				return;
9625 		} else {
9626 			u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
9627 			if (!flags || !(flags[0] & LE_AD_LIMITED))
9628 				return;
9629 		}
9630 	}
9631 
9632 	/* Allocate skb. The 5 extra bytes are for the potential CoD field */
9633 	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
9634 			     sizeof(*ev) + eir_len + scan_rsp_len + 5);
9635 	if (!skb)
9636 		return;
9637 
9638 	ev = skb_put(skb, sizeof(*ev));
9639 
9640 	/* In case of device discovery with BR/EDR devices (pre 1.2), the
9641 	 * RSSI value was reported as 0 when not available. This behavior
9642 	 * is kept when using device discovery. This is required for full
9643 	 * backwards compatibility with the API.
9644 	 *
9645 	 * However when using service discovery, the value 127 will be
9646 	 * returned when the RSSI is not available.
9647 	 */
9648 	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
9649 	    link_type == ACL_LINK)
9650 		rssi = 0;
9651 
9652 	bacpy(&ev->addr.bdaddr, bdaddr);
9653 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
9654 	ev->rssi = rssi;
9655 	ev->flags = cpu_to_le32(flags);
9656 
9657 	if (eir_len > 0)
9658 		/* Copy EIR or advertising data into event */
9659 		skb_put_data(skb, eir, eir_len);
9660 
9661 	if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
9662 		u8 eir_cod[5];
9663 
9664 		eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
9665 					   dev_class, 3);
9666 		skb_put_data(skb, eir_cod, sizeof(eir_cod));
9667 	}
9668 
9669 	if (scan_rsp_len > 0)
9670 		/* Append scan response data to event */
9671 		skb_put_data(skb, scan_rsp, scan_rsp_len);
9672 
9673 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
9674 
9675 	mgmt_event_skb(skb, NULL);
9676 }
9677 
9678 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9679 		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
9680 {
9681 	struct sk_buff *skb;
9682 	struct mgmt_ev_device_found *ev;
9683 	u16 eir_len;
9684 	u32 flags;
9685 
9686 	if (name_len)
9687 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND, 2 + name_len);
9688 	else
9689 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND, 0);
9690 
9691 	ev = skb_put(skb, sizeof(*ev));
9692 	bacpy(&ev->addr.bdaddr, bdaddr);
9693 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
9694 	ev->rssi = rssi;
9695 
9696 	if (name) {
9697 		eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
9698 					  name_len);
9699 		flags = 0;
9700 		skb_put(skb, eir_len);
9701 	} else {
9702 		eir_len = 0;
9703 		flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
9704 	}
9705 
9706 	ev->eir_len = cpu_to_le16(eir_len);
9707 	ev->flags = cpu_to_le32(flags);
9708 
9709 	mgmt_event_skb(skb, NULL);
9710 }
9711 
9712 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
9713 {
9714 	struct mgmt_ev_discovering ev;
9715 
9716 	bt_dev_dbg(hdev, "discovering %u", discovering);
9717 
9718 	memset(&ev, 0, sizeof(ev));
9719 	ev.type = hdev->discovery.type;
9720 	ev.discovering = discovering;
9721 
9722 	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
9723 }
9724 
9725 void mgmt_suspending(struct hci_dev *hdev, u8 state)
9726 {
9727 	struct mgmt_ev_controller_suspend ev;
9728 
9729 	ev.suspend_state = state;
9730 	mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
9731 }
9732 
9733 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
9734 		   u8 addr_type)
9735 {
9736 	struct mgmt_ev_controller_resume ev;
9737 
9738 	ev.wake_reason = reason;
9739 	if (bdaddr) {
9740 		bacpy(&ev.addr.bdaddr, bdaddr);
9741 		ev.addr.type = addr_type;
9742 	} else {
9743 		memset(&ev.addr, 0, sizeof(ev.addr));
9744 	}
9745 
9746 	mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
9747 }
9748 
9749 static struct hci_mgmt_chan chan = {
9750 	.channel	= HCI_CHANNEL_CONTROL,
9751 	.handler_count	= ARRAY_SIZE(mgmt_handlers),
9752 	.handlers	= mgmt_handlers,
9753 	.hdev_init	= mgmt_init_hdev,
9754 };
9755 
9756 int mgmt_init(void)
9757 {
9758 	return hci_mgmt_chan_register(&chan);
9759 }
9760 
9761 void mgmt_exit(void)
9762 {
9763 	hci_mgmt_chan_unregister(&chan);
9764 }
9765