xref: /openbmc/linux/net/bluetooth/mgmt.c (revision 4de4e53b)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI Management interface */
26 
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35 
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
40 #include "msft.h"
41 #include "eir.h"
42 #include "aosp.h"
43 
44 #define MGMT_VERSION	1
45 #define MGMT_REVISION	22
46 
47 static const u16 mgmt_commands[] = {
48 	MGMT_OP_READ_INDEX_LIST,
49 	MGMT_OP_READ_INFO,
50 	MGMT_OP_SET_POWERED,
51 	MGMT_OP_SET_DISCOVERABLE,
52 	MGMT_OP_SET_CONNECTABLE,
53 	MGMT_OP_SET_FAST_CONNECTABLE,
54 	MGMT_OP_SET_BONDABLE,
55 	MGMT_OP_SET_LINK_SECURITY,
56 	MGMT_OP_SET_SSP,
57 	MGMT_OP_SET_HS,
58 	MGMT_OP_SET_LE,
59 	MGMT_OP_SET_DEV_CLASS,
60 	MGMT_OP_SET_LOCAL_NAME,
61 	MGMT_OP_ADD_UUID,
62 	MGMT_OP_REMOVE_UUID,
63 	MGMT_OP_LOAD_LINK_KEYS,
64 	MGMT_OP_LOAD_LONG_TERM_KEYS,
65 	MGMT_OP_DISCONNECT,
66 	MGMT_OP_GET_CONNECTIONS,
67 	MGMT_OP_PIN_CODE_REPLY,
68 	MGMT_OP_PIN_CODE_NEG_REPLY,
69 	MGMT_OP_SET_IO_CAPABILITY,
70 	MGMT_OP_PAIR_DEVICE,
71 	MGMT_OP_CANCEL_PAIR_DEVICE,
72 	MGMT_OP_UNPAIR_DEVICE,
73 	MGMT_OP_USER_CONFIRM_REPLY,
74 	MGMT_OP_USER_CONFIRM_NEG_REPLY,
75 	MGMT_OP_USER_PASSKEY_REPLY,
76 	MGMT_OP_USER_PASSKEY_NEG_REPLY,
77 	MGMT_OP_READ_LOCAL_OOB_DATA,
78 	MGMT_OP_ADD_REMOTE_OOB_DATA,
79 	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
80 	MGMT_OP_START_DISCOVERY,
81 	MGMT_OP_STOP_DISCOVERY,
82 	MGMT_OP_CONFIRM_NAME,
83 	MGMT_OP_BLOCK_DEVICE,
84 	MGMT_OP_UNBLOCK_DEVICE,
85 	MGMT_OP_SET_DEVICE_ID,
86 	MGMT_OP_SET_ADVERTISING,
87 	MGMT_OP_SET_BREDR,
88 	MGMT_OP_SET_STATIC_ADDRESS,
89 	MGMT_OP_SET_SCAN_PARAMS,
90 	MGMT_OP_SET_SECURE_CONN,
91 	MGMT_OP_SET_DEBUG_KEYS,
92 	MGMT_OP_SET_PRIVACY,
93 	MGMT_OP_LOAD_IRKS,
94 	MGMT_OP_GET_CONN_INFO,
95 	MGMT_OP_GET_CLOCK_INFO,
96 	MGMT_OP_ADD_DEVICE,
97 	MGMT_OP_REMOVE_DEVICE,
98 	MGMT_OP_LOAD_CONN_PARAM,
99 	MGMT_OP_READ_UNCONF_INDEX_LIST,
100 	MGMT_OP_READ_CONFIG_INFO,
101 	MGMT_OP_SET_EXTERNAL_CONFIG,
102 	MGMT_OP_SET_PUBLIC_ADDRESS,
103 	MGMT_OP_START_SERVICE_DISCOVERY,
104 	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
105 	MGMT_OP_READ_EXT_INDEX_LIST,
106 	MGMT_OP_READ_ADV_FEATURES,
107 	MGMT_OP_ADD_ADVERTISING,
108 	MGMT_OP_REMOVE_ADVERTISING,
109 	MGMT_OP_GET_ADV_SIZE_INFO,
110 	MGMT_OP_START_LIMITED_DISCOVERY,
111 	MGMT_OP_READ_EXT_INFO,
112 	MGMT_OP_SET_APPEARANCE,
113 	MGMT_OP_GET_PHY_CONFIGURATION,
114 	MGMT_OP_SET_PHY_CONFIGURATION,
115 	MGMT_OP_SET_BLOCKED_KEYS,
116 	MGMT_OP_SET_WIDEBAND_SPEECH,
117 	MGMT_OP_READ_CONTROLLER_CAP,
118 	MGMT_OP_READ_EXP_FEATURES_INFO,
119 	MGMT_OP_SET_EXP_FEATURE,
120 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
121 	MGMT_OP_SET_DEF_SYSTEM_CONFIG,
122 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
123 	MGMT_OP_SET_DEF_RUNTIME_CONFIG,
124 	MGMT_OP_GET_DEVICE_FLAGS,
125 	MGMT_OP_SET_DEVICE_FLAGS,
126 	MGMT_OP_READ_ADV_MONITOR_FEATURES,
127 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
128 	MGMT_OP_REMOVE_ADV_MONITOR,
129 	MGMT_OP_ADD_EXT_ADV_PARAMS,
130 	MGMT_OP_ADD_EXT_ADV_DATA,
131 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
132 	MGMT_OP_SET_MESH_RECEIVER,
133 	MGMT_OP_MESH_READ_FEATURES,
134 	MGMT_OP_MESH_SEND,
135 	MGMT_OP_MESH_SEND_CANCEL,
136 };
137 
138 static const u16 mgmt_events[] = {
139 	MGMT_EV_CONTROLLER_ERROR,
140 	MGMT_EV_INDEX_ADDED,
141 	MGMT_EV_INDEX_REMOVED,
142 	MGMT_EV_NEW_SETTINGS,
143 	MGMT_EV_CLASS_OF_DEV_CHANGED,
144 	MGMT_EV_LOCAL_NAME_CHANGED,
145 	MGMT_EV_NEW_LINK_KEY,
146 	MGMT_EV_NEW_LONG_TERM_KEY,
147 	MGMT_EV_DEVICE_CONNECTED,
148 	MGMT_EV_DEVICE_DISCONNECTED,
149 	MGMT_EV_CONNECT_FAILED,
150 	MGMT_EV_PIN_CODE_REQUEST,
151 	MGMT_EV_USER_CONFIRM_REQUEST,
152 	MGMT_EV_USER_PASSKEY_REQUEST,
153 	MGMT_EV_AUTH_FAILED,
154 	MGMT_EV_DEVICE_FOUND,
155 	MGMT_EV_DISCOVERING,
156 	MGMT_EV_DEVICE_BLOCKED,
157 	MGMT_EV_DEVICE_UNBLOCKED,
158 	MGMT_EV_DEVICE_UNPAIRED,
159 	MGMT_EV_PASSKEY_NOTIFY,
160 	MGMT_EV_NEW_IRK,
161 	MGMT_EV_NEW_CSRK,
162 	MGMT_EV_DEVICE_ADDED,
163 	MGMT_EV_DEVICE_REMOVED,
164 	MGMT_EV_NEW_CONN_PARAM,
165 	MGMT_EV_UNCONF_INDEX_ADDED,
166 	MGMT_EV_UNCONF_INDEX_REMOVED,
167 	MGMT_EV_NEW_CONFIG_OPTIONS,
168 	MGMT_EV_EXT_INDEX_ADDED,
169 	MGMT_EV_EXT_INDEX_REMOVED,
170 	MGMT_EV_LOCAL_OOB_DATA_UPDATED,
171 	MGMT_EV_ADVERTISING_ADDED,
172 	MGMT_EV_ADVERTISING_REMOVED,
173 	MGMT_EV_EXT_INFO_CHANGED,
174 	MGMT_EV_PHY_CONFIGURATION_CHANGED,
175 	MGMT_EV_EXP_FEATURE_CHANGED,
176 	MGMT_EV_DEVICE_FLAGS_CHANGED,
177 	MGMT_EV_ADV_MONITOR_ADDED,
178 	MGMT_EV_ADV_MONITOR_REMOVED,
179 	MGMT_EV_CONTROLLER_SUSPEND,
180 	MGMT_EV_CONTROLLER_RESUME,
181 	MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
182 	MGMT_EV_ADV_MONITOR_DEVICE_LOST,
183 };
184 
185 static const u16 mgmt_untrusted_commands[] = {
186 	MGMT_OP_READ_INDEX_LIST,
187 	MGMT_OP_READ_INFO,
188 	MGMT_OP_READ_UNCONF_INDEX_LIST,
189 	MGMT_OP_READ_CONFIG_INFO,
190 	MGMT_OP_READ_EXT_INDEX_LIST,
191 	MGMT_OP_READ_EXT_INFO,
192 	MGMT_OP_READ_CONTROLLER_CAP,
193 	MGMT_OP_READ_EXP_FEATURES_INFO,
194 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
195 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
196 };
197 
198 static const u16 mgmt_untrusted_events[] = {
199 	MGMT_EV_INDEX_ADDED,
200 	MGMT_EV_INDEX_REMOVED,
201 	MGMT_EV_NEW_SETTINGS,
202 	MGMT_EV_CLASS_OF_DEV_CHANGED,
203 	MGMT_EV_LOCAL_NAME_CHANGED,
204 	MGMT_EV_UNCONF_INDEX_ADDED,
205 	MGMT_EV_UNCONF_INDEX_REMOVED,
206 	MGMT_EV_NEW_CONFIG_OPTIONS,
207 	MGMT_EV_EXT_INDEX_ADDED,
208 	MGMT_EV_EXT_INDEX_REMOVED,
209 	MGMT_EV_EXT_INFO_CHANGED,
210 	MGMT_EV_EXP_FEATURE_CHANGED,
211 };
212 
213 #define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
214 
215 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
216 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
217 
218 /* HCI to MGMT error code conversion table */
219 static const u8 mgmt_status_table[] = {
220 	MGMT_STATUS_SUCCESS,
221 	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
222 	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
223 	MGMT_STATUS_FAILED,		/* Hardware Failure */
224 	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
225 	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
226 	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
227 	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
228 	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
229 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
230 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
231 	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
232 	MGMT_STATUS_BUSY,		/* Command Disallowed */
233 	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
234 	MGMT_STATUS_REJECTED,		/* Rejected Security */
235 	MGMT_STATUS_REJECTED,		/* Rejected Personal */
236 	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
237 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
238 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
239 	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
240 	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
241 	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
242 	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
243 	MGMT_STATUS_BUSY,		/* Repeated Attempts */
244 	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
245 	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
246 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
247 	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
248 	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
249 	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
250 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
251 	MGMT_STATUS_FAILED,		/* Unspecified Error */
252 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
253 	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
254 	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
255 	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
256 	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
257 	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
258 	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
259 	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
260 	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
261 	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
262 	MGMT_STATUS_FAILED,		/* Transaction Collision */
263 	MGMT_STATUS_FAILED,		/* Reserved for future use */
264 	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
265 	MGMT_STATUS_REJECTED,		/* QoS Rejected */
266 	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
267 	MGMT_STATUS_REJECTED,		/* Insufficient Security */
268 	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
269 	MGMT_STATUS_FAILED,		/* Reserved for future use */
270 	MGMT_STATUS_BUSY,		/* Role Switch Pending */
271 	MGMT_STATUS_FAILED,		/* Reserved for future use */
272 	MGMT_STATUS_FAILED,		/* Slot Violation */
273 	MGMT_STATUS_FAILED,		/* Role Switch Failed */
274 	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
275 	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
276 	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
277 	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
278 	MGMT_STATUS_BUSY,		/* Controller Busy */
279 	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
280 	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
281 	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
282 	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
283 	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
284 };
285 
286 static u8 mgmt_errno_status(int err)
287 {
288 	switch (err) {
289 	case 0:
290 		return MGMT_STATUS_SUCCESS;
291 	case -EPERM:
292 		return MGMT_STATUS_REJECTED;
293 	case -EINVAL:
294 		return MGMT_STATUS_INVALID_PARAMS;
295 	case -EOPNOTSUPP:
296 		return MGMT_STATUS_NOT_SUPPORTED;
297 	case -EBUSY:
298 		return MGMT_STATUS_BUSY;
299 	case -ETIMEDOUT:
300 		return MGMT_STATUS_AUTH_FAILED;
301 	case -ENOMEM:
302 		return MGMT_STATUS_NO_RESOURCES;
303 	case -EISCONN:
304 		return MGMT_STATUS_ALREADY_CONNECTED;
305 	case -ENOTCONN:
306 		return MGMT_STATUS_DISCONNECTED;
307 	}
308 
309 	return MGMT_STATUS_FAILED;
310 }
311 
312 static u8 mgmt_status(int err)
313 {
314 	if (err < 0)
315 		return mgmt_errno_status(err);
316 
317 	if (err < ARRAY_SIZE(mgmt_status_table))
318 		return mgmt_status_table[err];
319 
320 	return MGMT_STATUS_FAILED;
321 }
322 
323 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
324 			    u16 len, int flag)
325 {
326 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
327 			       flag, NULL);
328 }
329 
330 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
331 			      u16 len, int flag, struct sock *skip_sk)
332 {
333 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
334 			       flag, skip_sk);
335 }
336 
337 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
338 		      struct sock *skip_sk)
339 {
340 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341 			       HCI_SOCK_TRUSTED, skip_sk);
342 }
343 
344 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
345 {
346 	return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
347 				   skip_sk);
348 }
349 
350 static u8 le_addr_type(u8 mgmt_addr_type)
351 {
352 	if (mgmt_addr_type == BDADDR_LE_PUBLIC)
353 		return ADDR_LE_DEV_PUBLIC;
354 	else
355 		return ADDR_LE_DEV_RANDOM;
356 }
357 
358 void mgmt_fill_version_info(void *ver)
359 {
360 	struct mgmt_rp_read_version *rp = ver;
361 
362 	rp->version = MGMT_VERSION;
363 	rp->revision = cpu_to_le16(MGMT_REVISION);
364 }
365 
366 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
367 			u16 data_len)
368 {
369 	struct mgmt_rp_read_version rp;
370 
371 	bt_dev_dbg(hdev, "sock %p", sk);
372 
373 	mgmt_fill_version_info(&rp);
374 
375 	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
376 				 &rp, sizeof(rp));
377 }
378 
379 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
380 			 u16 data_len)
381 {
382 	struct mgmt_rp_read_commands *rp;
383 	u16 num_commands, num_events;
384 	size_t rp_size;
385 	int i, err;
386 
387 	bt_dev_dbg(hdev, "sock %p", sk);
388 
389 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
390 		num_commands = ARRAY_SIZE(mgmt_commands);
391 		num_events = ARRAY_SIZE(mgmt_events);
392 	} else {
393 		num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
394 		num_events = ARRAY_SIZE(mgmt_untrusted_events);
395 	}
396 
397 	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
398 
399 	rp = kmalloc(rp_size, GFP_KERNEL);
400 	if (!rp)
401 		return -ENOMEM;
402 
403 	rp->num_commands = cpu_to_le16(num_commands);
404 	rp->num_events = cpu_to_le16(num_events);
405 
406 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
407 		__le16 *opcode = rp->opcodes;
408 
409 		for (i = 0; i < num_commands; i++, opcode++)
410 			put_unaligned_le16(mgmt_commands[i], opcode);
411 
412 		for (i = 0; i < num_events; i++, opcode++)
413 			put_unaligned_le16(mgmt_events[i], opcode);
414 	} else {
415 		__le16 *opcode = rp->opcodes;
416 
417 		for (i = 0; i < num_commands; i++, opcode++)
418 			put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
419 
420 		for (i = 0; i < num_events; i++, opcode++)
421 			put_unaligned_le16(mgmt_untrusted_events[i], opcode);
422 	}
423 
424 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
425 				rp, rp_size);
426 	kfree(rp);
427 
428 	return err;
429 }
430 
431 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
432 			   u16 data_len)
433 {
434 	struct mgmt_rp_read_index_list *rp;
435 	struct hci_dev *d;
436 	size_t rp_len;
437 	u16 count;
438 	int err;
439 
440 	bt_dev_dbg(hdev, "sock %p", sk);
441 
442 	read_lock(&hci_dev_list_lock);
443 
444 	count = 0;
445 	list_for_each_entry(d, &hci_dev_list, list) {
446 		if (!hci_dev_test_flag(d, HCI_UNCONFIGURED))
447 			count++;
448 	}
449 
450 	rp_len = sizeof(*rp) + (2 * count);
451 	rp = kmalloc(rp_len, GFP_ATOMIC);
452 	if (!rp) {
453 		read_unlock(&hci_dev_list_lock);
454 		return -ENOMEM;
455 	}
456 
457 	count = 0;
458 	list_for_each_entry(d, &hci_dev_list, list) {
459 		if (hci_dev_test_flag(d, HCI_SETUP) ||
460 		    hci_dev_test_flag(d, HCI_CONFIG) ||
461 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
462 			continue;
463 
464 		/* Devices marked as raw-only are neither configured
465 		 * nor unconfigured controllers.
466 		 */
467 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
468 			continue;
469 
470 		if (!hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
471 			rp->index[count++] = cpu_to_le16(d->id);
472 			bt_dev_dbg(hdev, "Added hci%u", d->id);
473 		}
474 	}
475 
476 	rp->num_controllers = cpu_to_le16(count);
477 	rp_len = sizeof(*rp) + (2 * count);
478 
479 	read_unlock(&hci_dev_list_lock);
480 
481 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
482 				0, rp, rp_len);
483 
484 	kfree(rp);
485 
486 	return err;
487 }
488 
489 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
490 				  void *data, u16 data_len)
491 {
492 	struct mgmt_rp_read_unconf_index_list *rp;
493 	struct hci_dev *d;
494 	size_t rp_len;
495 	u16 count;
496 	int err;
497 
498 	bt_dev_dbg(hdev, "sock %p", sk);
499 
500 	read_lock(&hci_dev_list_lock);
501 
502 	count = 0;
503 	list_for_each_entry(d, &hci_dev_list, list) {
504 		if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
505 			count++;
506 	}
507 
508 	rp_len = sizeof(*rp) + (2 * count);
509 	rp = kmalloc(rp_len, GFP_ATOMIC);
510 	if (!rp) {
511 		read_unlock(&hci_dev_list_lock);
512 		return -ENOMEM;
513 	}
514 
515 	count = 0;
516 	list_for_each_entry(d, &hci_dev_list, list) {
517 		if (hci_dev_test_flag(d, HCI_SETUP) ||
518 		    hci_dev_test_flag(d, HCI_CONFIG) ||
519 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
520 			continue;
521 
522 		/* Devices marked as raw-only are neither configured
523 		 * nor unconfigured controllers.
524 		 */
525 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
526 			continue;
527 
528 		if (hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
529 			rp->index[count++] = cpu_to_le16(d->id);
530 			bt_dev_dbg(hdev, "Added hci%u", d->id);
531 		}
532 	}
533 
534 	rp->num_controllers = cpu_to_le16(count);
535 	rp_len = sizeof(*rp) + (2 * count);
536 
537 	read_unlock(&hci_dev_list_lock);
538 
539 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
540 				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
541 
542 	kfree(rp);
543 
544 	return err;
545 }
546 
547 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
548 			       void *data, u16 data_len)
549 {
550 	struct mgmt_rp_read_ext_index_list *rp;
551 	struct hci_dev *d;
552 	u16 count;
553 	int err;
554 
555 	bt_dev_dbg(hdev, "sock %p", sk);
556 
557 	read_lock(&hci_dev_list_lock);
558 
559 	count = 0;
560 	list_for_each_entry(d, &hci_dev_list, list)
561 		count++;
562 
563 	rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
564 	if (!rp) {
565 		read_unlock(&hci_dev_list_lock);
566 		return -ENOMEM;
567 	}
568 
569 	count = 0;
570 	list_for_each_entry(d, &hci_dev_list, list) {
571 		if (hci_dev_test_flag(d, HCI_SETUP) ||
572 		    hci_dev_test_flag(d, HCI_CONFIG) ||
573 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
574 			continue;
575 
576 		/* Devices marked as raw-only are neither configured
577 		 * nor unconfigured controllers.
578 		 */
579 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
580 			continue;
581 
582 		if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
583 			rp->entry[count].type = 0x01;
584 		else
585 			rp->entry[count].type = 0x00;
586 
587 		rp->entry[count].bus = d->bus;
588 		rp->entry[count++].index = cpu_to_le16(d->id);
589 		bt_dev_dbg(hdev, "Added hci%u", d->id);
590 	}
591 
592 	rp->num_controllers = cpu_to_le16(count);
593 
594 	read_unlock(&hci_dev_list_lock);
595 
596 	/* If this command is called at least once, then all the
597 	 * default index and unconfigured index events are disabled
598 	 * and from now on only extended index events are used.
599 	 */
600 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
601 	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
602 	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
603 
604 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
605 				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
606 				struct_size(rp, entry, count));
607 
608 	kfree(rp);
609 
610 	return err;
611 }
612 
613 static bool is_configured(struct hci_dev *hdev)
614 {
615 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
616 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
617 		return false;
618 
619 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
620 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
621 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
622 		return false;
623 
624 	return true;
625 }
626 
627 static __le32 get_missing_options(struct hci_dev *hdev)
628 {
629 	u32 options = 0;
630 
631 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
632 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
633 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
634 
635 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
636 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
637 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
638 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
639 
640 	return cpu_to_le32(options);
641 }
642 
643 static int new_options(struct hci_dev *hdev, struct sock *skip)
644 {
645 	__le32 options = get_missing_options(hdev);
646 
647 	return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
648 				  sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
649 }
650 
651 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
652 {
653 	__le32 options = get_missing_options(hdev);
654 
655 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
656 				 sizeof(options));
657 }
658 
659 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
660 			    void *data, u16 data_len)
661 {
662 	struct mgmt_rp_read_config_info rp;
663 	u32 options = 0;
664 
665 	bt_dev_dbg(hdev, "sock %p", sk);
666 
667 	hci_dev_lock(hdev);
668 
669 	memset(&rp, 0, sizeof(rp));
670 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
671 
672 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
673 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
674 
675 	if (hdev->set_bdaddr)
676 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
677 
678 	rp.supported_options = cpu_to_le32(options);
679 	rp.missing_options = get_missing_options(hdev);
680 
681 	hci_dev_unlock(hdev);
682 
683 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
684 				 &rp, sizeof(rp));
685 }
686 
687 static u32 get_supported_phys(struct hci_dev *hdev)
688 {
689 	u32 supported_phys = 0;
690 
691 	if (lmp_bredr_capable(hdev)) {
692 		supported_phys |= MGMT_PHY_BR_1M_1SLOT;
693 
694 		if (hdev->features[0][0] & LMP_3SLOT)
695 			supported_phys |= MGMT_PHY_BR_1M_3SLOT;
696 
697 		if (hdev->features[0][0] & LMP_5SLOT)
698 			supported_phys |= MGMT_PHY_BR_1M_5SLOT;
699 
700 		if (lmp_edr_2m_capable(hdev)) {
701 			supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
702 
703 			if (lmp_edr_3slot_capable(hdev))
704 				supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
705 
706 			if (lmp_edr_5slot_capable(hdev))
707 				supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
708 
709 			if (lmp_edr_3m_capable(hdev)) {
710 				supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
711 
712 				if (lmp_edr_3slot_capable(hdev))
713 					supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
714 
715 				if (lmp_edr_5slot_capable(hdev))
716 					supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
717 			}
718 		}
719 	}
720 
721 	if (lmp_le_capable(hdev)) {
722 		supported_phys |= MGMT_PHY_LE_1M_TX;
723 		supported_phys |= MGMT_PHY_LE_1M_RX;
724 
725 		if (hdev->le_features[1] & HCI_LE_PHY_2M) {
726 			supported_phys |= MGMT_PHY_LE_2M_TX;
727 			supported_phys |= MGMT_PHY_LE_2M_RX;
728 		}
729 
730 		if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
731 			supported_phys |= MGMT_PHY_LE_CODED_TX;
732 			supported_phys |= MGMT_PHY_LE_CODED_RX;
733 		}
734 	}
735 
736 	return supported_phys;
737 }
738 
739 static u32 get_selected_phys(struct hci_dev *hdev)
740 {
741 	u32 selected_phys = 0;
742 
743 	if (lmp_bredr_capable(hdev)) {
744 		selected_phys |= MGMT_PHY_BR_1M_1SLOT;
745 
746 		if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
747 			selected_phys |= MGMT_PHY_BR_1M_3SLOT;
748 
749 		if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
750 			selected_phys |= MGMT_PHY_BR_1M_5SLOT;
751 
752 		if (lmp_edr_2m_capable(hdev)) {
753 			if (!(hdev->pkt_type & HCI_2DH1))
754 				selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
755 
756 			if (lmp_edr_3slot_capable(hdev) &&
757 			    !(hdev->pkt_type & HCI_2DH3))
758 				selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
759 
760 			if (lmp_edr_5slot_capable(hdev) &&
761 			    !(hdev->pkt_type & HCI_2DH5))
762 				selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
763 
764 			if (lmp_edr_3m_capable(hdev)) {
765 				if (!(hdev->pkt_type & HCI_3DH1))
766 					selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
767 
768 				if (lmp_edr_3slot_capable(hdev) &&
769 				    !(hdev->pkt_type & HCI_3DH3))
770 					selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
771 
772 				if (lmp_edr_5slot_capable(hdev) &&
773 				    !(hdev->pkt_type & HCI_3DH5))
774 					selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
775 			}
776 		}
777 	}
778 
779 	if (lmp_le_capable(hdev)) {
780 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
781 			selected_phys |= MGMT_PHY_LE_1M_TX;
782 
783 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
784 			selected_phys |= MGMT_PHY_LE_1M_RX;
785 
786 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
787 			selected_phys |= MGMT_PHY_LE_2M_TX;
788 
789 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
790 			selected_phys |= MGMT_PHY_LE_2M_RX;
791 
792 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
793 			selected_phys |= MGMT_PHY_LE_CODED_TX;
794 
795 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
796 			selected_phys |= MGMT_PHY_LE_CODED_RX;
797 	}
798 
799 	return selected_phys;
800 }
801 
802 static u32 get_configurable_phys(struct hci_dev *hdev)
803 {
804 	return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
805 		~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
806 }
807 
808 static u32 get_supported_settings(struct hci_dev *hdev)
809 {
810 	u32 settings = 0;
811 
812 	settings |= MGMT_SETTING_POWERED;
813 	settings |= MGMT_SETTING_BONDABLE;
814 	settings |= MGMT_SETTING_DEBUG_KEYS;
815 	settings |= MGMT_SETTING_CONNECTABLE;
816 	settings |= MGMT_SETTING_DISCOVERABLE;
817 
818 	if (lmp_bredr_capable(hdev)) {
819 		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
820 			settings |= MGMT_SETTING_FAST_CONNECTABLE;
821 		settings |= MGMT_SETTING_BREDR;
822 		settings |= MGMT_SETTING_LINK_SECURITY;
823 
824 		if (lmp_ssp_capable(hdev)) {
825 			settings |= MGMT_SETTING_SSP;
826 		}
827 
828 		if (lmp_sc_capable(hdev))
829 			settings |= MGMT_SETTING_SECURE_CONN;
830 
831 		if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
832 			     &hdev->quirks))
833 			settings |= MGMT_SETTING_WIDEBAND_SPEECH;
834 	}
835 
836 	if (lmp_le_capable(hdev)) {
837 		settings |= MGMT_SETTING_LE;
838 		settings |= MGMT_SETTING_SECURE_CONN;
839 		settings |= MGMT_SETTING_PRIVACY;
840 		settings |= MGMT_SETTING_STATIC_ADDRESS;
841 		settings |= MGMT_SETTING_ADVERTISING;
842 	}
843 
844 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
845 	    hdev->set_bdaddr)
846 		settings |= MGMT_SETTING_CONFIGURATION;
847 
848 	if (cis_central_capable(hdev))
849 		settings |= MGMT_SETTING_CIS_CENTRAL;
850 
851 	if (cis_peripheral_capable(hdev))
852 		settings |= MGMT_SETTING_CIS_PERIPHERAL;
853 
854 	settings |= MGMT_SETTING_PHY_CONFIGURATION;
855 
856 	return settings;
857 }
858 
859 static u32 get_current_settings(struct hci_dev *hdev)
860 {
861 	u32 settings = 0;
862 
863 	if (hdev_is_powered(hdev))
864 		settings |= MGMT_SETTING_POWERED;
865 
866 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
867 		settings |= MGMT_SETTING_CONNECTABLE;
868 
869 	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
870 		settings |= MGMT_SETTING_FAST_CONNECTABLE;
871 
872 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
873 		settings |= MGMT_SETTING_DISCOVERABLE;
874 
875 	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
876 		settings |= MGMT_SETTING_BONDABLE;
877 
878 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
879 		settings |= MGMT_SETTING_BREDR;
880 
881 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
882 		settings |= MGMT_SETTING_LE;
883 
884 	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
885 		settings |= MGMT_SETTING_LINK_SECURITY;
886 
887 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
888 		settings |= MGMT_SETTING_SSP;
889 
890 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
891 		settings |= MGMT_SETTING_ADVERTISING;
892 
893 	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
894 		settings |= MGMT_SETTING_SECURE_CONN;
895 
896 	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
897 		settings |= MGMT_SETTING_DEBUG_KEYS;
898 
899 	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
900 		settings |= MGMT_SETTING_PRIVACY;
901 
902 	/* The current setting for static address has two purposes. The
903 	 * first is to indicate if the static address will be used and
904 	 * the second is to indicate if it is actually set.
905 	 *
906 	 * This means if the static address is not configured, this flag
907 	 * will never be set. If the address is configured, then if the
908 	 * address is actually used decides if the flag is set or not.
909 	 *
910 	 * For single mode LE only controllers and dual-mode controllers
911 	 * with BR/EDR disabled, the existence of the static address will
912 	 * be evaluated.
913 	 */
914 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
915 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
916 	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
917 		if (bacmp(&hdev->static_addr, BDADDR_ANY))
918 			settings |= MGMT_SETTING_STATIC_ADDRESS;
919 	}
920 
921 	if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
922 		settings |= MGMT_SETTING_WIDEBAND_SPEECH;
923 
924 	if (cis_central_capable(hdev))
925 		settings |= MGMT_SETTING_CIS_CENTRAL;
926 
927 	if (cis_peripheral_capable(hdev))
928 		settings |= MGMT_SETTING_CIS_PERIPHERAL;
929 
930 	if (bis_capable(hdev))
931 		settings |= MGMT_SETTING_ISO_BROADCASTER;
932 
933 	if (sync_recv_capable(hdev))
934 		settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
935 
936 	return settings;
937 }
938 
939 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
940 {
941 	return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
942 }
943 
944 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
945 {
946 	struct mgmt_pending_cmd *cmd;
947 
948 	/* If there's a pending mgmt command the flags will not yet have
949 	 * their final values, so check for this first.
950 	 */
951 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
952 	if (cmd) {
953 		struct mgmt_mode *cp = cmd->param;
954 		if (cp->val == 0x01)
955 			return LE_AD_GENERAL;
956 		else if (cp->val == 0x02)
957 			return LE_AD_LIMITED;
958 	} else {
959 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
960 			return LE_AD_LIMITED;
961 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
962 			return LE_AD_GENERAL;
963 	}
964 
965 	return 0;
966 }
967 
968 bool mgmt_get_connectable(struct hci_dev *hdev)
969 {
970 	struct mgmt_pending_cmd *cmd;
971 
972 	/* If there's a pending mgmt command the flag will not yet have
973 	 * it's final value, so check for this first.
974 	 */
975 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
976 	if (cmd) {
977 		struct mgmt_mode *cp = cmd->param;
978 
979 		return cp->val;
980 	}
981 
982 	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
983 }
984 
985 static int service_cache_sync(struct hci_dev *hdev, void *data)
986 {
987 	hci_update_eir_sync(hdev);
988 	hci_update_class_sync(hdev);
989 
990 	return 0;
991 }
992 
993 static void service_cache_off(struct work_struct *work)
994 {
995 	struct hci_dev *hdev = container_of(work, struct hci_dev,
996 					    service_cache.work);
997 
998 	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
999 		return;
1000 
1001 	hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1002 }
1003 
1004 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1005 {
1006 	/* The generation of a new RPA and programming it into the
1007 	 * controller happens in the hci_req_enable_advertising()
1008 	 * function.
1009 	 */
1010 	if (ext_adv_capable(hdev))
1011 		return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1012 	else
1013 		return hci_enable_advertising_sync(hdev);
1014 }
1015 
1016 static void rpa_expired(struct work_struct *work)
1017 {
1018 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1019 					    rpa_expired.work);
1020 
1021 	bt_dev_dbg(hdev, "");
1022 
1023 	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1024 
1025 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1026 		return;
1027 
1028 	hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1029 }
1030 
1031 static int set_discoverable_sync(struct hci_dev *hdev, void *data);
1032 
1033 static void discov_off(struct work_struct *work)
1034 {
1035 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1036 					    discov_off.work);
1037 
1038 	bt_dev_dbg(hdev, "");
1039 
1040 	hci_dev_lock(hdev);
1041 
1042 	/* When discoverable timeout triggers, then just make sure
1043 	 * the limited discoverable flag is cleared. Even in the case
1044 	 * of a timeout triggered from general discoverable, it is
1045 	 * safe to unconditionally clear the flag.
1046 	 */
1047 	hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1048 	hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1049 	hdev->discov_timeout = 0;
1050 
1051 	hci_cmd_sync_queue(hdev, set_discoverable_sync, NULL, NULL);
1052 
1053 	mgmt_new_settings(hdev);
1054 
1055 	hci_dev_unlock(hdev);
1056 }
1057 
1058 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1059 
1060 static void mesh_send_complete(struct hci_dev *hdev,
1061 			       struct mgmt_mesh_tx *mesh_tx, bool silent)
1062 {
1063 	u8 handle = mesh_tx->handle;
1064 
1065 	if (!silent)
1066 		mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1067 			   sizeof(handle), NULL);
1068 
1069 	mgmt_mesh_remove(mesh_tx);
1070 }
1071 
1072 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1073 {
1074 	struct mgmt_mesh_tx *mesh_tx;
1075 
1076 	hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1077 	hci_disable_advertising_sync(hdev);
1078 	mesh_tx = mgmt_mesh_next(hdev, NULL);
1079 
1080 	if (mesh_tx)
1081 		mesh_send_complete(hdev, mesh_tx, false);
1082 
1083 	return 0;
1084 }
1085 
1086 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1087 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
1088 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1089 {
1090 	struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1091 
1092 	if (!mesh_tx)
1093 		return;
1094 
1095 	err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1096 				 mesh_send_start_complete);
1097 
1098 	if (err < 0)
1099 		mesh_send_complete(hdev, mesh_tx, false);
1100 	else
1101 		hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1102 }
1103 
1104 static void mesh_send_done(struct work_struct *work)
1105 {
1106 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1107 					    mesh_send_done.work);
1108 
1109 	if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1110 		return;
1111 
1112 	hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1113 }
1114 
1115 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1116 {
1117 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1118 		return;
1119 
1120 	BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1121 
1122 	INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1123 	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1124 	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1125 	INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1126 
1127 	/* Non-mgmt controlled devices get this bit set
1128 	 * implicitly so that pairing works for them, however
1129 	 * for mgmt we require user-space to explicitly enable
1130 	 * it
1131 	 */
1132 	hci_dev_clear_flag(hdev, HCI_BONDABLE);
1133 
1134 	hci_dev_set_flag(hdev, HCI_MGMT);
1135 }
1136 
1137 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1138 				void *data, u16 data_len)
1139 {
1140 	struct mgmt_rp_read_info rp;
1141 
1142 	bt_dev_dbg(hdev, "sock %p", sk);
1143 
1144 	hci_dev_lock(hdev);
1145 
1146 	memset(&rp, 0, sizeof(rp));
1147 
1148 	bacpy(&rp.bdaddr, &hdev->bdaddr);
1149 
1150 	rp.version = hdev->hci_ver;
1151 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1152 
1153 	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1154 	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1155 
1156 	memcpy(rp.dev_class, hdev->dev_class, 3);
1157 
1158 	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1159 	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1160 
1161 	hci_dev_unlock(hdev);
1162 
1163 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1164 				 sizeof(rp));
1165 }
1166 
1167 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1168 {
1169 	u16 eir_len = 0;
1170 	size_t name_len;
1171 
1172 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1173 		eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1174 					  hdev->dev_class, 3);
1175 
1176 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1177 		eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1178 					  hdev->appearance);
1179 
1180 	name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1181 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1182 				  hdev->dev_name, name_len);
1183 
1184 	name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1185 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1186 				  hdev->short_name, name_len);
1187 
1188 	return eir_len;
1189 }
1190 
1191 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1192 				    void *data, u16 data_len)
1193 {
1194 	char buf[512];
1195 	struct mgmt_rp_read_ext_info *rp = (void *)buf;
1196 	u16 eir_len;
1197 
1198 	bt_dev_dbg(hdev, "sock %p", sk);
1199 
1200 	memset(&buf, 0, sizeof(buf));
1201 
1202 	hci_dev_lock(hdev);
1203 
1204 	bacpy(&rp->bdaddr, &hdev->bdaddr);
1205 
1206 	rp->version = hdev->hci_ver;
1207 	rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1208 
1209 	rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1210 	rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1211 
1212 
1213 	eir_len = append_eir_data_to_buf(hdev, rp->eir);
1214 	rp->eir_len = cpu_to_le16(eir_len);
1215 
1216 	hci_dev_unlock(hdev);
1217 
1218 	/* If this command is called at least once, then the events
1219 	 * for class of device and local name changes are disabled
1220 	 * and only the new extended controller information event
1221 	 * is used.
1222 	 */
1223 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1224 	hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1225 	hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1226 
1227 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1228 				 sizeof(*rp) + eir_len);
1229 }
1230 
1231 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1232 {
1233 	char buf[512];
1234 	struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1235 	u16 eir_len;
1236 
1237 	memset(buf, 0, sizeof(buf));
1238 
1239 	eir_len = append_eir_data_to_buf(hdev, ev->eir);
1240 	ev->eir_len = cpu_to_le16(eir_len);
1241 
1242 	return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1243 				  sizeof(*ev) + eir_len,
1244 				  HCI_MGMT_EXT_INFO_EVENTS, skip);
1245 }
1246 
1247 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1248 {
1249 	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1250 
1251 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1252 				 sizeof(settings));
1253 }
1254 
1255 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1256 {
1257 	struct mgmt_ev_advertising_added ev;
1258 
1259 	ev.instance = instance;
1260 
1261 	mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1262 }
1263 
1264 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1265 			      u8 instance)
1266 {
1267 	struct mgmt_ev_advertising_removed ev;
1268 
1269 	ev.instance = instance;
1270 
1271 	mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1272 }
1273 
1274 static void cancel_adv_timeout(struct hci_dev *hdev)
1275 {
1276 	if (hdev->adv_instance_timeout) {
1277 		hdev->adv_instance_timeout = 0;
1278 		cancel_delayed_work(&hdev->adv_instance_expire);
1279 	}
1280 }
1281 
1282 /* This function requires the caller holds hdev->lock */
1283 static void restart_le_actions(struct hci_dev *hdev)
1284 {
1285 	struct hci_conn_params *p;
1286 
1287 	list_for_each_entry(p, &hdev->le_conn_params, list) {
1288 		/* Needed for AUTO_OFF case where might not "really"
1289 		 * have been powered off.
1290 		 */
1291 		hci_pend_le_list_del_init(p);
1292 
1293 		switch (p->auto_connect) {
1294 		case HCI_AUTO_CONN_DIRECT:
1295 		case HCI_AUTO_CONN_ALWAYS:
1296 			hci_pend_le_list_add(p, &hdev->pend_le_conns);
1297 			break;
1298 		case HCI_AUTO_CONN_REPORT:
1299 			hci_pend_le_list_add(p, &hdev->pend_le_reports);
1300 			break;
1301 		default:
1302 			break;
1303 		}
1304 	}
1305 }
1306 
1307 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1308 {
1309 	__le32 ev = cpu_to_le32(get_current_settings(hdev));
1310 
1311 	return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1312 				  sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1313 }
1314 
1315 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1316 {
1317 	struct mgmt_pending_cmd *cmd = data;
1318 	struct mgmt_mode *cp;
1319 
1320 	/* Make sure cmd still outstanding. */
1321 	if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1322 		return;
1323 
1324 	cp = cmd->param;
1325 
1326 	bt_dev_dbg(hdev, "err %d", err);
1327 
1328 	if (!err) {
1329 		if (cp->val) {
1330 			hci_dev_lock(hdev);
1331 			restart_le_actions(hdev);
1332 			hci_update_passive_scan(hdev);
1333 			hci_dev_unlock(hdev);
1334 		}
1335 
1336 		send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1337 
1338 		/* Only call new_setting for power on as power off is deferred
1339 		 * to hdev->power_off work which does call hci_dev_do_close.
1340 		 */
1341 		if (cp->val)
1342 			new_settings(hdev, cmd->sk);
1343 	} else {
1344 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1345 				mgmt_status(err));
1346 	}
1347 
1348 	mgmt_pending_remove(cmd);
1349 }
1350 
1351 static int set_powered_sync(struct hci_dev *hdev, void *data)
1352 {
1353 	struct mgmt_pending_cmd *cmd = data;
1354 	struct mgmt_mode *cp = cmd->param;
1355 
1356 	BT_DBG("%s", hdev->name);
1357 
1358 	return hci_set_powered_sync(hdev, cp->val);
1359 }
1360 
1361 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1362 		       u16 len)
1363 {
1364 	struct mgmt_mode *cp = data;
1365 	struct mgmt_pending_cmd *cmd;
1366 	int err;
1367 
1368 	bt_dev_dbg(hdev, "sock %p", sk);
1369 
1370 	if (cp->val != 0x00 && cp->val != 0x01)
1371 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1372 				       MGMT_STATUS_INVALID_PARAMS);
1373 
1374 	hci_dev_lock(hdev);
1375 
1376 	if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1377 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1378 				      MGMT_STATUS_BUSY);
1379 		goto failed;
1380 	}
1381 
1382 	if (!!cp->val == hdev_is_powered(hdev)) {
1383 		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1384 		goto failed;
1385 	}
1386 
1387 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1388 	if (!cmd) {
1389 		err = -ENOMEM;
1390 		goto failed;
1391 	}
1392 
1393 	/* Cancel potentially blocking sync operation before power off */
1394 	if (cp->val == 0x00) {
1395 		hci_cmd_sync_cancel_sync(hdev, -EHOSTDOWN);
1396 		err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1397 					 mgmt_set_powered_complete);
1398 	} else {
1399 		/* Use hci_cmd_sync_submit since hdev might not be running */
1400 		err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
1401 					  mgmt_set_powered_complete);
1402 	}
1403 
1404 	if (err < 0)
1405 		mgmt_pending_remove(cmd);
1406 
1407 failed:
1408 	hci_dev_unlock(hdev);
1409 	return err;
1410 }
1411 
1412 int mgmt_new_settings(struct hci_dev *hdev)
1413 {
1414 	return new_settings(hdev, NULL);
1415 }
1416 
1417 struct cmd_lookup {
1418 	struct sock *sk;
1419 	struct hci_dev *hdev;
1420 	u8 mgmt_status;
1421 };
1422 
1423 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1424 {
1425 	struct cmd_lookup *match = data;
1426 
1427 	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1428 
1429 	list_del(&cmd->list);
1430 
1431 	if (match->sk == NULL) {
1432 		match->sk = cmd->sk;
1433 		sock_hold(match->sk);
1434 	}
1435 
1436 	mgmt_pending_free(cmd);
1437 }
1438 
1439 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1440 {
1441 	u8 *status = data;
1442 
1443 	mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1444 	mgmt_pending_remove(cmd);
1445 }
1446 
1447 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1448 {
1449 	if (cmd->cmd_complete) {
1450 		u8 *status = data;
1451 
1452 		cmd->cmd_complete(cmd, *status);
1453 		mgmt_pending_remove(cmd);
1454 
1455 		return;
1456 	}
1457 
1458 	cmd_status_rsp(cmd, data);
1459 }
1460 
1461 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1462 {
1463 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1464 				 cmd->param, cmd->param_len);
1465 }
1466 
1467 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1468 {
1469 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1470 				 cmd->param, sizeof(struct mgmt_addr_info));
1471 }
1472 
1473 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1474 {
1475 	if (!lmp_bredr_capable(hdev))
1476 		return MGMT_STATUS_NOT_SUPPORTED;
1477 	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1478 		return MGMT_STATUS_REJECTED;
1479 	else
1480 		return MGMT_STATUS_SUCCESS;
1481 }
1482 
1483 static u8 mgmt_le_support(struct hci_dev *hdev)
1484 {
1485 	if (!lmp_le_capable(hdev))
1486 		return MGMT_STATUS_NOT_SUPPORTED;
1487 	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1488 		return MGMT_STATUS_REJECTED;
1489 	else
1490 		return MGMT_STATUS_SUCCESS;
1491 }
1492 
1493 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1494 					   int err)
1495 {
1496 	struct mgmt_pending_cmd *cmd = data;
1497 
1498 	bt_dev_dbg(hdev, "err %d", err);
1499 
1500 	/* Make sure cmd still outstanding. */
1501 	if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1502 		return;
1503 
1504 	hci_dev_lock(hdev);
1505 
1506 	if (err) {
1507 		u8 mgmt_err = mgmt_status(err);
1508 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1509 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1510 		goto done;
1511 	}
1512 
1513 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1514 	    hdev->discov_timeout > 0) {
1515 		int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1516 		queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1517 	}
1518 
1519 	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1520 	new_settings(hdev, cmd->sk);
1521 
1522 done:
1523 	mgmt_pending_remove(cmd);
1524 	hci_dev_unlock(hdev);
1525 }
1526 
1527 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1528 {
1529 	BT_DBG("%s", hdev->name);
1530 
1531 	return hci_update_discoverable_sync(hdev);
1532 }
1533 
1534 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1535 			    u16 len)
1536 {
1537 	struct mgmt_cp_set_discoverable *cp = data;
1538 	struct mgmt_pending_cmd *cmd;
1539 	u16 timeout;
1540 	int err;
1541 
1542 	bt_dev_dbg(hdev, "sock %p", sk);
1543 
1544 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1545 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1546 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1547 				       MGMT_STATUS_REJECTED);
1548 
1549 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1550 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1551 				       MGMT_STATUS_INVALID_PARAMS);
1552 
1553 	timeout = __le16_to_cpu(cp->timeout);
1554 
1555 	/* Disabling discoverable requires that no timeout is set,
1556 	 * and enabling limited discoverable requires a timeout.
1557 	 */
1558 	if ((cp->val == 0x00 && timeout > 0) ||
1559 	    (cp->val == 0x02 && timeout == 0))
1560 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1561 				       MGMT_STATUS_INVALID_PARAMS);
1562 
1563 	hci_dev_lock(hdev);
1564 
1565 	if (!hdev_is_powered(hdev) && timeout > 0) {
1566 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1567 				      MGMT_STATUS_NOT_POWERED);
1568 		goto failed;
1569 	}
1570 
1571 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1572 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1573 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1574 				      MGMT_STATUS_BUSY);
1575 		goto failed;
1576 	}
1577 
1578 	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1579 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1580 				      MGMT_STATUS_REJECTED);
1581 		goto failed;
1582 	}
1583 
1584 	if (hdev->advertising_paused) {
1585 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1586 				      MGMT_STATUS_BUSY);
1587 		goto failed;
1588 	}
1589 
1590 	if (!hdev_is_powered(hdev)) {
1591 		bool changed = false;
1592 
1593 		/* Setting limited discoverable when powered off is
1594 		 * not a valid operation since it requires a timeout
1595 		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1596 		 */
1597 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1598 			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1599 			changed = true;
1600 		}
1601 
1602 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1603 		if (err < 0)
1604 			goto failed;
1605 
1606 		if (changed)
1607 			err = new_settings(hdev, sk);
1608 
1609 		goto failed;
1610 	}
1611 
1612 	/* If the current mode is the same, then just update the timeout
1613 	 * value with the new value. And if only the timeout gets updated,
1614 	 * then no need for any HCI transactions.
1615 	 */
1616 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1617 	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
1618 						   HCI_LIMITED_DISCOVERABLE)) {
1619 		cancel_delayed_work(&hdev->discov_off);
1620 		hdev->discov_timeout = timeout;
1621 
1622 		if (cp->val && hdev->discov_timeout > 0) {
1623 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1624 			queue_delayed_work(hdev->req_workqueue,
1625 					   &hdev->discov_off, to);
1626 		}
1627 
1628 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1629 		goto failed;
1630 	}
1631 
1632 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1633 	if (!cmd) {
1634 		err = -ENOMEM;
1635 		goto failed;
1636 	}
1637 
1638 	/* Cancel any potential discoverable timeout that might be
1639 	 * still active and store new timeout value. The arming of
1640 	 * the timeout happens in the complete handler.
1641 	 */
1642 	cancel_delayed_work(&hdev->discov_off);
1643 	hdev->discov_timeout = timeout;
1644 
1645 	if (cp->val)
1646 		hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1647 	else
1648 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1649 
1650 	/* Limited discoverable mode */
1651 	if (cp->val == 0x02)
1652 		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1653 	else
1654 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1655 
1656 	err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1657 				 mgmt_set_discoverable_complete);
1658 
1659 	if (err < 0)
1660 		mgmt_pending_remove(cmd);
1661 
1662 failed:
1663 	hci_dev_unlock(hdev);
1664 	return err;
1665 }
1666 
1667 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1668 					  int err)
1669 {
1670 	struct mgmt_pending_cmd *cmd = data;
1671 
1672 	bt_dev_dbg(hdev, "err %d", err);
1673 
1674 	/* Make sure cmd still outstanding. */
1675 	if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1676 		return;
1677 
1678 	hci_dev_lock(hdev);
1679 
1680 	if (err) {
1681 		u8 mgmt_err = mgmt_status(err);
1682 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1683 		goto done;
1684 	}
1685 
1686 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1687 	new_settings(hdev, cmd->sk);
1688 
1689 done:
1690 	if (cmd)
1691 		mgmt_pending_remove(cmd);
1692 
1693 	hci_dev_unlock(hdev);
1694 }
1695 
1696 static int set_connectable_update_settings(struct hci_dev *hdev,
1697 					   struct sock *sk, u8 val)
1698 {
1699 	bool changed = false;
1700 	int err;
1701 
1702 	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1703 		changed = true;
1704 
1705 	if (val) {
1706 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1707 	} else {
1708 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1709 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1710 	}
1711 
1712 	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1713 	if (err < 0)
1714 		return err;
1715 
1716 	if (changed) {
1717 		hci_update_scan(hdev);
1718 		hci_update_passive_scan(hdev);
1719 		return new_settings(hdev, sk);
1720 	}
1721 
1722 	return 0;
1723 }
1724 
1725 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1726 {
1727 	BT_DBG("%s", hdev->name);
1728 
1729 	return hci_update_connectable_sync(hdev);
1730 }
1731 
1732 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1733 			   u16 len)
1734 {
1735 	struct mgmt_mode *cp = data;
1736 	struct mgmt_pending_cmd *cmd;
1737 	int err;
1738 
1739 	bt_dev_dbg(hdev, "sock %p", sk);
1740 
1741 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1742 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1743 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1744 				       MGMT_STATUS_REJECTED);
1745 
1746 	if (cp->val != 0x00 && cp->val != 0x01)
1747 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1748 				       MGMT_STATUS_INVALID_PARAMS);
1749 
1750 	hci_dev_lock(hdev);
1751 
1752 	if (!hdev_is_powered(hdev)) {
1753 		err = set_connectable_update_settings(hdev, sk, cp->val);
1754 		goto failed;
1755 	}
1756 
1757 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1758 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1759 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1760 				      MGMT_STATUS_BUSY);
1761 		goto failed;
1762 	}
1763 
1764 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1765 	if (!cmd) {
1766 		err = -ENOMEM;
1767 		goto failed;
1768 	}
1769 
1770 	if (cp->val) {
1771 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1772 	} else {
1773 		if (hdev->discov_timeout > 0)
1774 			cancel_delayed_work(&hdev->discov_off);
1775 
1776 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1777 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1778 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1779 	}
1780 
1781 	err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1782 				 mgmt_set_connectable_complete);
1783 
1784 	if (err < 0)
1785 		mgmt_pending_remove(cmd);
1786 
1787 failed:
1788 	hci_dev_unlock(hdev);
1789 	return err;
1790 }
1791 
1792 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1793 			u16 len)
1794 {
1795 	struct mgmt_mode *cp = data;
1796 	bool changed;
1797 	int err;
1798 
1799 	bt_dev_dbg(hdev, "sock %p", sk);
1800 
1801 	if (cp->val != 0x00 && cp->val != 0x01)
1802 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1803 				       MGMT_STATUS_INVALID_PARAMS);
1804 
1805 	hci_dev_lock(hdev);
1806 
1807 	if (cp->val)
1808 		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1809 	else
1810 		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1811 
1812 	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1813 	if (err < 0)
1814 		goto unlock;
1815 
1816 	if (changed) {
1817 		/* In limited privacy mode the change of bondable mode
1818 		 * may affect the local advertising address.
1819 		 */
1820 		hci_update_discoverable(hdev);
1821 
1822 		err = new_settings(hdev, sk);
1823 	}
1824 
1825 unlock:
1826 	hci_dev_unlock(hdev);
1827 	return err;
1828 }
1829 
1830 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1831 			     u16 len)
1832 {
1833 	struct mgmt_mode *cp = data;
1834 	struct mgmt_pending_cmd *cmd;
1835 	u8 val, status;
1836 	int err;
1837 
1838 	bt_dev_dbg(hdev, "sock %p", sk);
1839 
1840 	status = mgmt_bredr_support(hdev);
1841 	if (status)
1842 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1843 				       status);
1844 
1845 	if (cp->val != 0x00 && cp->val != 0x01)
1846 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1847 				       MGMT_STATUS_INVALID_PARAMS);
1848 
1849 	hci_dev_lock(hdev);
1850 
1851 	if (!hdev_is_powered(hdev)) {
1852 		bool changed = false;
1853 
1854 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1855 			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1856 			changed = true;
1857 		}
1858 
1859 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1860 		if (err < 0)
1861 			goto failed;
1862 
1863 		if (changed)
1864 			err = new_settings(hdev, sk);
1865 
1866 		goto failed;
1867 	}
1868 
1869 	if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1870 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1871 				      MGMT_STATUS_BUSY);
1872 		goto failed;
1873 	}
1874 
1875 	val = !!cp->val;
1876 
1877 	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1878 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1879 		goto failed;
1880 	}
1881 
1882 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1883 	if (!cmd) {
1884 		err = -ENOMEM;
1885 		goto failed;
1886 	}
1887 
1888 	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1889 	if (err < 0) {
1890 		mgmt_pending_remove(cmd);
1891 		goto failed;
1892 	}
1893 
1894 failed:
1895 	hci_dev_unlock(hdev);
1896 	return err;
1897 }
1898 
1899 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1900 {
1901 	struct cmd_lookup match = { NULL, hdev };
1902 	struct mgmt_pending_cmd *cmd = data;
1903 	struct mgmt_mode *cp = cmd->param;
1904 	u8 enable = cp->val;
1905 	bool changed;
1906 
1907 	/* Make sure cmd still outstanding. */
1908 	if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1909 		return;
1910 
1911 	if (err) {
1912 		u8 mgmt_err = mgmt_status(err);
1913 
1914 		if (enable && hci_dev_test_and_clear_flag(hdev,
1915 							  HCI_SSP_ENABLED)) {
1916 			new_settings(hdev, NULL);
1917 		}
1918 
1919 		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1920 				     &mgmt_err);
1921 		return;
1922 	}
1923 
1924 	if (enable) {
1925 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1926 	} else {
1927 		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1928 	}
1929 
1930 	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1931 
1932 	if (changed)
1933 		new_settings(hdev, match.sk);
1934 
1935 	if (match.sk)
1936 		sock_put(match.sk);
1937 
1938 	hci_update_eir_sync(hdev);
1939 }
1940 
1941 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1942 {
1943 	struct mgmt_pending_cmd *cmd = data;
1944 	struct mgmt_mode *cp = cmd->param;
1945 	bool changed = false;
1946 	int err;
1947 
1948 	if (cp->val)
1949 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1950 
1951 	err = hci_write_ssp_mode_sync(hdev, cp->val);
1952 
1953 	if (!err && changed)
1954 		hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1955 
1956 	return err;
1957 }
1958 
1959 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1960 {
1961 	struct mgmt_mode *cp = data;
1962 	struct mgmt_pending_cmd *cmd;
1963 	u8 status;
1964 	int err;
1965 
1966 	bt_dev_dbg(hdev, "sock %p", sk);
1967 
1968 	status = mgmt_bredr_support(hdev);
1969 	if (status)
1970 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1971 
1972 	if (!lmp_ssp_capable(hdev))
1973 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1974 				       MGMT_STATUS_NOT_SUPPORTED);
1975 
1976 	if (cp->val != 0x00 && cp->val != 0x01)
1977 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1978 				       MGMT_STATUS_INVALID_PARAMS);
1979 
1980 	hci_dev_lock(hdev);
1981 
1982 	if (!hdev_is_powered(hdev)) {
1983 		bool changed;
1984 
1985 		if (cp->val) {
1986 			changed = !hci_dev_test_and_set_flag(hdev,
1987 							     HCI_SSP_ENABLED);
1988 		} else {
1989 			changed = hci_dev_test_and_clear_flag(hdev,
1990 							      HCI_SSP_ENABLED);
1991 		}
1992 
1993 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1994 		if (err < 0)
1995 			goto failed;
1996 
1997 		if (changed)
1998 			err = new_settings(hdev, sk);
1999 
2000 		goto failed;
2001 	}
2002 
2003 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2004 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2005 				      MGMT_STATUS_BUSY);
2006 		goto failed;
2007 	}
2008 
2009 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2010 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2011 		goto failed;
2012 	}
2013 
2014 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2015 	if (!cmd)
2016 		err = -ENOMEM;
2017 	else
2018 		err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2019 					 set_ssp_complete);
2020 
2021 	if (err < 0) {
2022 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2023 				      MGMT_STATUS_FAILED);
2024 
2025 		if (cmd)
2026 			mgmt_pending_remove(cmd);
2027 	}
2028 
2029 failed:
2030 	hci_dev_unlock(hdev);
2031 	return err;
2032 }
2033 
2034 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2035 {
2036 	bt_dev_dbg(hdev, "sock %p", sk);
2037 
2038 	return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2039 				       MGMT_STATUS_NOT_SUPPORTED);
2040 }
2041 
2042 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2043 {
2044 	struct cmd_lookup match = { NULL, hdev };
2045 	u8 status = mgmt_status(err);
2046 
2047 	bt_dev_dbg(hdev, "err %d", err);
2048 
2049 	if (status) {
2050 		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2051 							&status);
2052 		return;
2053 	}
2054 
2055 	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2056 
2057 	new_settings(hdev, match.sk);
2058 
2059 	if (match.sk)
2060 		sock_put(match.sk);
2061 }
2062 
2063 static int set_le_sync(struct hci_dev *hdev, void *data)
2064 {
2065 	struct mgmt_pending_cmd *cmd = data;
2066 	struct mgmt_mode *cp = cmd->param;
2067 	u8 val = !!cp->val;
2068 	int err;
2069 
2070 	if (!val) {
2071 		hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2072 
2073 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2074 			hci_disable_advertising_sync(hdev);
2075 
2076 		if (ext_adv_capable(hdev))
2077 			hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2078 	} else {
2079 		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2080 	}
2081 
2082 	err = hci_write_le_host_supported_sync(hdev, val, 0);
2083 
2084 	/* Make sure the controller has a good default for
2085 	 * advertising data. Restrict the update to when LE
2086 	 * has actually been enabled. During power on, the
2087 	 * update in powered_update_hci will take care of it.
2088 	 */
2089 	if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2090 		if (ext_adv_capable(hdev)) {
2091 			int status;
2092 
2093 			status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2094 			if (!status)
2095 				hci_update_scan_rsp_data_sync(hdev, 0x00);
2096 		} else {
2097 			hci_update_adv_data_sync(hdev, 0x00);
2098 			hci_update_scan_rsp_data_sync(hdev, 0x00);
2099 		}
2100 
2101 		hci_update_passive_scan(hdev);
2102 	}
2103 
2104 	return err;
2105 }
2106 
2107 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2108 {
2109 	struct mgmt_pending_cmd *cmd = data;
2110 	u8 status = mgmt_status(err);
2111 	struct sock *sk = cmd->sk;
2112 
2113 	if (status) {
2114 		mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
2115 				     cmd_status_rsp, &status);
2116 		return;
2117 	}
2118 
2119 	mgmt_pending_remove(cmd);
2120 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2121 }
2122 
2123 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2124 {
2125 	struct mgmt_pending_cmd *cmd = data;
2126 	struct mgmt_cp_set_mesh *cp = cmd->param;
2127 	size_t len = cmd->param_len;
2128 
2129 	memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2130 
2131 	if (cp->enable)
2132 		hci_dev_set_flag(hdev, HCI_MESH);
2133 	else
2134 		hci_dev_clear_flag(hdev, HCI_MESH);
2135 
2136 	len -= sizeof(*cp);
2137 
2138 	/* If filters don't fit, forward all adv pkts */
2139 	if (len <= sizeof(hdev->mesh_ad_types))
2140 		memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2141 
2142 	hci_update_passive_scan_sync(hdev);
2143 	return 0;
2144 }
2145 
2146 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2147 {
2148 	struct mgmt_cp_set_mesh *cp = data;
2149 	struct mgmt_pending_cmd *cmd;
2150 	int err = 0;
2151 
2152 	bt_dev_dbg(hdev, "sock %p", sk);
2153 
2154 	if (!lmp_le_capable(hdev) ||
2155 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2156 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2157 				       MGMT_STATUS_NOT_SUPPORTED);
2158 
2159 	if (cp->enable != 0x00 && cp->enable != 0x01)
2160 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2161 				       MGMT_STATUS_INVALID_PARAMS);
2162 
2163 	hci_dev_lock(hdev);
2164 
2165 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2166 	if (!cmd)
2167 		err = -ENOMEM;
2168 	else
2169 		err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2170 					 set_mesh_complete);
2171 
2172 	if (err < 0) {
2173 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2174 				      MGMT_STATUS_FAILED);
2175 
2176 		if (cmd)
2177 			mgmt_pending_remove(cmd);
2178 	}
2179 
2180 	hci_dev_unlock(hdev);
2181 	return err;
2182 }
2183 
2184 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2185 {
2186 	struct mgmt_mesh_tx *mesh_tx = data;
2187 	struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2188 	unsigned long mesh_send_interval;
2189 	u8 mgmt_err = mgmt_status(err);
2190 
2191 	/* Report any errors here, but don't report completion */
2192 
2193 	if (mgmt_err) {
2194 		hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2195 		/* Send Complete Error Code for handle */
2196 		mesh_send_complete(hdev, mesh_tx, false);
2197 		return;
2198 	}
2199 
2200 	mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2201 	queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2202 			   mesh_send_interval);
2203 }
2204 
2205 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2206 {
2207 	struct mgmt_mesh_tx *mesh_tx = data;
2208 	struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2209 	struct adv_info *adv, *next_instance;
2210 	u8 instance = hdev->le_num_of_adv_sets + 1;
2211 	u16 timeout, duration;
2212 	int err = 0;
2213 
2214 	if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2215 		return MGMT_STATUS_BUSY;
2216 
2217 	timeout = 1000;
2218 	duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2219 	adv = hci_add_adv_instance(hdev, instance, 0,
2220 				   send->adv_data_len, send->adv_data,
2221 				   0, NULL,
2222 				   timeout, duration,
2223 				   HCI_ADV_TX_POWER_NO_PREFERENCE,
2224 				   hdev->le_adv_min_interval,
2225 				   hdev->le_adv_max_interval,
2226 				   mesh_tx->handle);
2227 
2228 	if (!IS_ERR(adv))
2229 		mesh_tx->instance = instance;
2230 	else
2231 		err = PTR_ERR(adv);
2232 
2233 	if (hdev->cur_adv_instance == instance) {
2234 		/* If the currently advertised instance is being changed then
2235 		 * cancel the current advertising and schedule the next
2236 		 * instance. If there is only one instance then the overridden
2237 		 * advertising data will be visible right away.
2238 		 */
2239 		cancel_adv_timeout(hdev);
2240 
2241 		next_instance = hci_get_next_instance(hdev, instance);
2242 		if (next_instance)
2243 			instance = next_instance->instance;
2244 		else
2245 			instance = 0;
2246 	} else if (hdev->adv_instance_timeout) {
2247 		/* Immediately advertise the new instance if no other, or
2248 		 * let it go naturally from queue if ADV is already happening
2249 		 */
2250 		instance = 0;
2251 	}
2252 
2253 	if (instance)
2254 		return hci_schedule_adv_instance_sync(hdev, instance, true);
2255 
2256 	return err;
2257 }
2258 
2259 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2260 {
2261 	struct mgmt_rp_mesh_read_features *rp = data;
2262 
2263 	if (rp->used_handles >= rp->max_handles)
2264 		return;
2265 
2266 	rp->handles[rp->used_handles++] = mesh_tx->handle;
2267 }
2268 
2269 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2270 			 void *data, u16 len)
2271 {
2272 	struct mgmt_rp_mesh_read_features rp;
2273 
2274 	if (!lmp_le_capable(hdev) ||
2275 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2276 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2277 				       MGMT_STATUS_NOT_SUPPORTED);
2278 
2279 	memset(&rp, 0, sizeof(rp));
2280 	rp.index = cpu_to_le16(hdev->id);
2281 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2282 		rp.max_handles = MESH_HANDLES_MAX;
2283 
2284 	hci_dev_lock(hdev);
2285 
2286 	if (rp.max_handles)
2287 		mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2288 
2289 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2290 			  rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2291 
2292 	hci_dev_unlock(hdev);
2293 	return 0;
2294 }
2295 
2296 static int send_cancel(struct hci_dev *hdev, void *data)
2297 {
2298 	struct mgmt_pending_cmd *cmd = data;
2299 	struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2300 	struct mgmt_mesh_tx *mesh_tx;
2301 
2302 	if (!cancel->handle) {
2303 		do {
2304 			mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2305 
2306 			if (mesh_tx)
2307 				mesh_send_complete(hdev, mesh_tx, false);
2308 		} while (mesh_tx);
2309 	} else {
2310 		mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2311 
2312 		if (mesh_tx && mesh_tx->sk == cmd->sk)
2313 			mesh_send_complete(hdev, mesh_tx, false);
2314 	}
2315 
2316 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2317 			  0, NULL, 0);
2318 	mgmt_pending_free(cmd);
2319 
2320 	return 0;
2321 }
2322 
2323 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2324 			    void *data, u16 len)
2325 {
2326 	struct mgmt_pending_cmd *cmd;
2327 	int err;
2328 
2329 	if (!lmp_le_capable(hdev) ||
2330 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2331 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2332 				       MGMT_STATUS_NOT_SUPPORTED);
2333 
2334 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2335 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2336 				       MGMT_STATUS_REJECTED);
2337 
2338 	hci_dev_lock(hdev);
2339 	cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2340 	if (!cmd)
2341 		err = -ENOMEM;
2342 	else
2343 		err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2344 
2345 	if (err < 0) {
2346 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2347 				      MGMT_STATUS_FAILED);
2348 
2349 		if (cmd)
2350 			mgmt_pending_free(cmd);
2351 	}
2352 
2353 	hci_dev_unlock(hdev);
2354 	return err;
2355 }
2356 
2357 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2358 {
2359 	struct mgmt_mesh_tx *mesh_tx;
2360 	struct mgmt_cp_mesh_send *send = data;
2361 	struct mgmt_rp_mesh_read_features rp;
2362 	bool sending;
2363 	int err = 0;
2364 
2365 	if (!lmp_le_capable(hdev) ||
2366 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2367 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2368 				       MGMT_STATUS_NOT_SUPPORTED);
2369 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2370 	    len <= MGMT_MESH_SEND_SIZE ||
2371 	    len > (MGMT_MESH_SEND_SIZE + 31))
2372 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2373 				       MGMT_STATUS_REJECTED);
2374 
2375 	hci_dev_lock(hdev);
2376 
2377 	memset(&rp, 0, sizeof(rp));
2378 	rp.max_handles = MESH_HANDLES_MAX;
2379 
2380 	mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2381 
2382 	if (rp.max_handles <= rp.used_handles) {
2383 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2384 				      MGMT_STATUS_BUSY);
2385 		goto done;
2386 	}
2387 
2388 	sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2389 	mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2390 
2391 	if (!mesh_tx)
2392 		err = -ENOMEM;
2393 	else if (!sending)
2394 		err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2395 					 mesh_send_start_complete);
2396 
2397 	if (err < 0) {
2398 		bt_dev_err(hdev, "Send Mesh Failed %d", err);
2399 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2400 				      MGMT_STATUS_FAILED);
2401 
2402 		if (mesh_tx) {
2403 			if (sending)
2404 				mgmt_mesh_remove(mesh_tx);
2405 		}
2406 	} else {
2407 		hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2408 
2409 		mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2410 				  &mesh_tx->handle, 1);
2411 	}
2412 
2413 done:
2414 	hci_dev_unlock(hdev);
2415 	return err;
2416 }
2417 
2418 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2419 {
2420 	struct mgmt_mode *cp = data;
2421 	struct mgmt_pending_cmd *cmd;
2422 	int err;
2423 	u8 val, enabled;
2424 
2425 	bt_dev_dbg(hdev, "sock %p", sk);
2426 
2427 	if (!lmp_le_capable(hdev))
2428 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2429 				       MGMT_STATUS_NOT_SUPPORTED);
2430 
2431 	if (cp->val != 0x00 && cp->val != 0x01)
2432 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2433 				       MGMT_STATUS_INVALID_PARAMS);
2434 
2435 	/* Bluetooth single mode LE only controllers or dual-mode
2436 	 * controllers configured as LE only devices, do not allow
2437 	 * switching LE off. These have either LE enabled explicitly
2438 	 * or BR/EDR has been previously switched off.
2439 	 *
2440 	 * When trying to enable an already enabled LE, then gracefully
2441 	 * send a positive response. Trying to disable it however will
2442 	 * result into rejection.
2443 	 */
2444 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2445 		if (cp->val == 0x01)
2446 			return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2447 
2448 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2449 				       MGMT_STATUS_REJECTED);
2450 	}
2451 
2452 	hci_dev_lock(hdev);
2453 
2454 	val = !!cp->val;
2455 	enabled = lmp_host_le_capable(hdev);
2456 
2457 	if (!hdev_is_powered(hdev) || val == enabled) {
2458 		bool changed = false;
2459 
2460 		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2461 			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2462 			changed = true;
2463 		}
2464 
2465 		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2466 			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2467 			changed = true;
2468 		}
2469 
2470 		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2471 		if (err < 0)
2472 			goto unlock;
2473 
2474 		if (changed)
2475 			err = new_settings(hdev, sk);
2476 
2477 		goto unlock;
2478 	}
2479 
2480 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
2481 	    pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2482 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2483 				      MGMT_STATUS_BUSY);
2484 		goto unlock;
2485 	}
2486 
2487 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2488 	if (!cmd)
2489 		err = -ENOMEM;
2490 	else
2491 		err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2492 					 set_le_complete);
2493 
2494 	if (err < 0) {
2495 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2496 				      MGMT_STATUS_FAILED);
2497 
2498 		if (cmd)
2499 			mgmt_pending_remove(cmd);
2500 	}
2501 
2502 unlock:
2503 	hci_dev_unlock(hdev);
2504 	return err;
2505 }
2506 
2507 /* This is a helper function to test for pending mgmt commands that can
2508  * cause CoD or EIR HCI commands. We can only allow one such pending
2509  * mgmt command at a time since otherwise we cannot easily track what
2510  * the current values are, will be, and based on that calculate if a new
2511  * HCI command needs to be sent and if yes with what value.
2512  */
2513 static bool pending_eir_or_class(struct hci_dev *hdev)
2514 {
2515 	struct mgmt_pending_cmd *cmd;
2516 
2517 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2518 		switch (cmd->opcode) {
2519 		case MGMT_OP_ADD_UUID:
2520 		case MGMT_OP_REMOVE_UUID:
2521 		case MGMT_OP_SET_DEV_CLASS:
2522 		case MGMT_OP_SET_POWERED:
2523 			return true;
2524 		}
2525 	}
2526 
2527 	return false;
2528 }
2529 
2530 static const u8 bluetooth_base_uuid[] = {
2531 			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2532 			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2533 };
2534 
2535 static u8 get_uuid_size(const u8 *uuid)
2536 {
2537 	u32 val;
2538 
2539 	if (memcmp(uuid, bluetooth_base_uuid, 12))
2540 		return 128;
2541 
2542 	val = get_unaligned_le32(&uuid[12]);
2543 	if (val > 0xffff)
2544 		return 32;
2545 
2546 	return 16;
2547 }
2548 
2549 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2550 {
2551 	struct mgmt_pending_cmd *cmd = data;
2552 
2553 	bt_dev_dbg(hdev, "err %d", err);
2554 
2555 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2556 			  mgmt_status(err), hdev->dev_class, 3);
2557 
2558 	mgmt_pending_free(cmd);
2559 }
2560 
2561 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2562 {
2563 	int err;
2564 
2565 	err = hci_update_class_sync(hdev);
2566 	if (err)
2567 		return err;
2568 
2569 	return hci_update_eir_sync(hdev);
2570 }
2571 
2572 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2573 {
2574 	struct mgmt_cp_add_uuid *cp = data;
2575 	struct mgmt_pending_cmd *cmd;
2576 	struct bt_uuid *uuid;
2577 	int err;
2578 
2579 	bt_dev_dbg(hdev, "sock %p", sk);
2580 
2581 	hci_dev_lock(hdev);
2582 
2583 	if (pending_eir_or_class(hdev)) {
2584 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2585 				      MGMT_STATUS_BUSY);
2586 		goto failed;
2587 	}
2588 
2589 	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2590 	if (!uuid) {
2591 		err = -ENOMEM;
2592 		goto failed;
2593 	}
2594 
2595 	memcpy(uuid->uuid, cp->uuid, 16);
2596 	uuid->svc_hint = cp->svc_hint;
2597 	uuid->size = get_uuid_size(cp->uuid);
2598 
2599 	list_add_tail(&uuid->list, &hdev->uuids);
2600 
2601 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2602 	if (!cmd) {
2603 		err = -ENOMEM;
2604 		goto failed;
2605 	}
2606 
2607 	/* MGMT_OP_ADD_UUID don't require adapter the UP/Running so use
2608 	 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2609 	 */
2610 	err = hci_cmd_sync_submit(hdev, add_uuid_sync, cmd,
2611 				  mgmt_class_complete);
2612 	if (err < 0) {
2613 		mgmt_pending_free(cmd);
2614 		goto failed;
2615 	}
2616 
2617 failed:
2618 	hci_dev_unlock(hdev);
2619 	return err;
2620 }
2621 
2622 static bool enable_service_cache(struct hci_dev *hdev)
2623 {
2624 	if (!hdev_is_powered(hdev))
2625 		return false;
2626 
2627 	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2628 		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2629 				   CACHE_TIMEOUT);
2630 		return true;
2631 	}
2632 
2633 	return false;
2634 }
2635 
2636 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2637 {
2638 	int err;
2639 
2640 	err = hci_update_class_sync(hdev);
2641 	if (err)
2642 		return err;
2643 
2644 	return hci_update_eir_sync(hdev);
2645 }
2646 
2647 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2648 		       u16 len)
2649 {
2650 	struct mgmt_cp_remove_uuid *cp = data;
2651 	struct mgmt_pending_cmd *cmd;
2652 	struct bt_uuid *match, *tmp;
2653 	static const u8 bt_uuid_any[] = {
2654 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2655 	};
2656 	int err, found;
2657 
2658 	bt_dev_dbg(hdev, "sock %p", sk);
2659 
2660 	hci_dev_lock(hdev);
2661 
2662 	if (pending_eir_or_class(hdev)) {
2663 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2664 				      MGMT_STATUS_BUSY);
2665 		goto unlock;
2666 	}
2667 
2668 	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2669 		hci_uuids_clear(hdev);
2670 
2671 		if (enable_service_cache(hdev)) {
2672 			err = mgmt_cmd_complete(sk, hdev->id,
2673 						MGMT_OP_REMOVE_UUID,
2674 						0, hdev->dev_class, 3);
2675 			goto unlock;
2676 		}
2677 
2678 		goto update_class;
2679 	}
2680 
2681 	found = 0;
2682 
2683 	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2684 		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2685 			continue;
2686 
2687 		list_del(&match->list);
2688 		kfree(match);
2689 		found++;
2690 	}
2691 
2692 	if (found == 0) {
2693 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2694 				      MGMT_STATUS_INVALID_PARAMS);
2695 		goto unlock;
2696 	}
2697 
2698 update_class:
2699 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2700 	if (!cmd) {
2701 		err = -ENOMEM;
2702 		goto unlock;
2703 	}
2704 
2705 	/* MGMT_OP_REMOVE_UUID don't require adapter the UP/Running so use
2706 	 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2707 	 */
2708 	err = hci_cmd_sync_submit(hdev, remove_uuid_sync, cmd,
2709 				  mgmt_class_complete);
2710 	if (err < 0)
2711 		mgmt_pending_free(cmd);
2712 
2713 unlock:
2714 	hci_dev_unlock(hdev);
2715 	return err;
2716 }
2717 
2718 static int set_class_sync(struct hci_dev *hdev, void *data)
2719 {
2720 	int err = 0;
2721 
2722 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2723 		cancel_delayed_work_sync(&hdev->service_cache);
2724 		err = hci_update_eir_sync(hdev);
2725 	}
2726 
2727 	if (err)
2728 		return err;
2729 
2730 	return hci_update_class_sync(hdev);
2731 }
2732 
2733 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2734 			 u16 len)
2735 {
2736 	struct mgmt_cp_set_dev_class *cp = data;
2737 	struct mgmt_pending_cmd *cmd;
2738 	int err;
2739 
2740 	bt_dev_dbg(hdev, "sock %p", sk);
2741 
2742 	if (!lmp_bredr_capable(hdev))
2743 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2744 				       MGMT_STATUS_NOT_SUPPORTED);
2745 
2746 	hci_dev_lock(hdev);
2747 
2748 	if (pending_eir_or_class(hdev)) {
2749 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2750 				      MGMT_STATUS_BUSY);
2751 		goto unlock;
2752 	}
2753 
2754 	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2755 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2756 				      MGMT_STATUS_INVALID_PARAMS);
2757 		goto unlock;
2758 	}
2759 
2760 	hdev->major_class = cp->major;
2761 	hdev->minor_class = cp->minor;
2762 
2763 	if (!hdev_is_powered(hdev)) {
2764 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2765 					hdev->dev_class, 3);
2766 		goto unlock;
2767 	}
2768 
2769 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2770 	if (!cmd) {
2771 		err = -ENOMEM;
2772 		goto unlock;
2773 	}
2774 
2775 	/* MGMT_OP_SET_DEV_CLASS don't require adapter the UP/Running so use
2776 	 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2777 	 */
2778 	err = hci_cmd_sync_submit(hdev, set_class_sync, cmd,
2779 				  mgmt_class_complete);
2780 	if (err < 0)
2781 		mgmt_pending_free(cmd);
2782 
2783 unlock:
2784 	hci_dev_unlock(hdev);
2785 	return err;
2786 }
2787 
2788 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2789 			  u16 len)
2790 {
2791 	struct mgmt_cp_load_link_keys *cp = data;
2792 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2793 				   sizeof(struct mgmt_link_key_info));
2794 	u16 key_count, expected_len;
2795 	bool changed;
2796 	int i;
2797 
2798 	bt_dev_dbg(hdev, "sock %p", sk);
2799 
2800 	if (!lmp_bredr_capable(hdev))
2801 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2802 				       MGMT_STATUS_NOT_SUPPORTED);
2803 
2804 	key_count = __le16_to_cpu(cp->key_count);
2805 	if (key_count > max_key_count) {
2806 		bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2807 			   key_count);
2808 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2809 				       MGMT_STATUS_INVALID_PARAMS);
2810 	}
2811 
2812 	expected_len = struct_size(cp, keys, key_count);
2813 	if (expected_len != len) {
2814 		bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2815 			   expected_len, len);
2816 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2817 				       MGMT_STATUS_INVALID_PARAMS);
2818 	}
2819 
2820 	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2821 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2822 				       MGMT_STATUS_INVALID_PARAMS);
2823 
2824 	bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2825 		   key_count);
2826 
2827 	hci_dev_lock(hdev);
2828 
2829 	hci_link_keys_clear(hdev);
2830 
2831 	if (cp->debug_keys)
2832 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2833 	else
2834 		changed = hci_dev_test_and_clear_flag(hdev,
2835 						      HCI_KEEP_DEBUG_KEYS);
2836 
2837 	if (changed)
2838 		new_settings(hdev, NULL);
2839 
2840 	for (i = 0; i < key_count; i++) {
2841 		struct mgmt_link_key_info *key = &cp->keys[i];
2842 
2843 		if (hci_is_blocked_key(hdev,
2844 				       HCI_BLOCKED_KEY_TYPE_LINKKEY,
2845 				       key->val)) {
2846 			bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2847 				    &key->addr.bdaddr);
2848 			continue;
2849 		}
2850 
2851 		if (key->addr.type != BDADDR_BREDR) {
2852 			bt_dev_warn(hdev,
2853 				    "Invalid link address type %u for %pMR",
2854 				    key->addr.type, &key->addr.bdaddr);
2855 			continue;
2856 		}
2857 
2858 		if (key->type > 0x08) {
2859 			bt_dev_warn(hdev, "Invalid link key type %u for %pMR",
2860 				    key->type, &key->addr.bdaddr);
2861 			continue;
2862 		}
2863 
2864 		/* Always ignore debug keys and require a new pairing if
2865 		 * the user wants to use them.
2866 		 */
2867 		if (key->type == HCI_LK_DEBUG_COMBINATION)
2868 			continue;
2869 
2870 		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2871 				 key->type, key->pin_len, NULL);
2872 	}
2873 
2874 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2875 
2876 	hci_dev_unlock(hdev);
2877 
2878 	return 0;
2879 }
2880 
2881 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2882 			   u8 addr_type, struct sock *skip_sk)
2883 {
2884 	struct mgmt_ev_device_unpaired ev;
2885 
2886 	bacpy(&ev.addr.bdaddr, bdaddr);
2887 	ev.addr.type = addr_type;
2888 
2889 	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2890 			  skip_sk);
2891 }
2892 
2893 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2894 {
2895 	struct mgmt_pending_cmd *cmd = data;
2896 	struct mgmt_cp_unpair_device *cp = cmd->param;
2897 
2898 	if (!err)
2899 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2900 
2901 	cmd->cmd_complete(cmd, err);
2902 	mgmt_pending_free(cmd);
2903 }
2904 
2905 static int unpair_device_sync(struct hci_dev *hdev, void *data)
2906 {
2907 	struct mgmt_pending_cmd *cmd = data;
2908 	struct mgmt_cp_unpair_device *cp = cmd->param;
2909 	struct hci_conn *conn;
2910 
2911 	if (cp->addr.type == BDADDR_BREDR)
2912 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2913 					       &cp->addr.bdaddr);
2914 	else
2915 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2916 					       le_addr_type(cp->addr.type));
2917 
2918 	if (!conn)
2919 		return 0;
2920 
2921 	return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
2922 }
2923 
2924 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2925 			 u16 len)
2926 {
2927 	struct mgmt_cp_unpair_device *cp = data;
2928 	struct mgmt_rp_unpair_device rp;
2929 	struct hci_conn_params *params;
2930 	struct mgmt_pending_cmd *cmd;
2931 	struct hci_conn *conn;
2932 	u8 addr_type;
2933 	int err;
2934 
2935 	memset(&rp, 0, sizeof(rp));
2936 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2937 	rp.addr.type = cp->addr.type;
2938 
2939 	if (!bdaddr_type_is_valid(cp->addr.type))
2940 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2941 					 MGMT_STATUS_INVALID_PARAMS,
2942 					 &rp, sizeof(rp));
2943 
2944 	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2945 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2946 					 MGMT_STATUS_INVALID_PARAMS,
2947 					 &rp, sizeof(rp));
2948 
2949 	hci_dev_lock(hdev);
2950 
2951 	if (!hdev_is_powered(hdev)) {
2952 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2953 					MGMT_STATUS_NOT_POWERED, &rp,
2954 					sizeof(rp));
2955 		goto unlock;
2956 	}
2957 
2958 	if (cp->addr.type == BDADDR_BREDR) {
2959 		/* If disconnection is requested, then look up the
2960 		 * connection. If the remote device is connected, it
2961 		 * will be later used to terminate the link.
2962 		 *
2963 		 * Setting it to NULL explicitly will cause no
2964 		 * termination of the link.
2965 		 */
2966 		if (cp->disconnect)
2967 			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2968 						       &cp->addr.bdaddr);
2969 		else
2970 			conn = NULL;
2971 
2972 		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2973 		if (err < 0) {
2974 			err = mgmt_cmd_complete(sk, hdev->id,
2975 						MGMT_OP_UNPAIR_DEVICE,
2976 						MGMT_STATUS_NOT_PAIRED, &rp,
2977 						sizeof(rp));
2978 			goto unlock;
2979 		}
2980 
2981 		goto done;
2982 	}
2983 
2984 	/* LE address type */
2985 	addr_type = le_addr_type(cp->addr.type);
2986 
2987 	/* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2988 	err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2989 	if (err < 0) {
2990 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2991 					MGMT_STATUS_NOT_PAIRED, &rp,
2992 					sizeof(rp));
2993 		goto unlock;
2994 	}
2995 
2996 	conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2997 	if (!conn) {
2998 		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2999 		goto done;
3000 	}
3001 
3002 
3003 	/* Defer clearing up the connection parameters until closing to
3004 	 * give a chance of keeping them if a repairing happens.
3005 	 */
3006 	set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3007 
3008 	/* Disable auto-connection parameters if present */
3009 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3010 	if (params) {
3011 		if (params->explicit_connect)
3012 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3013 		else
3014 			params->auto_connect = HCI_AUTO_CONN_DISABLED;
3015 	}
3016 
3017 	/* If disconnection is not requested, then clear the connection
3018 	 * variable so that the link is not terminated.
3019 	 */
3020 	if (!cp->disconnect)
3021 		conn = NULL;
3022 
3023 done:
3024 	/* If the connection variable is set, then termination of the
3025 	 * link is requested.
3026 	 */
3027 	if (!conn) {
3028 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3029 					&rp, sizeof(rp));
3030 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3031 		goto unlock;
3032 	}
3033 
3034 	cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3035 			       sizeof(*cp));
3036 	if (!cmd) {
3037 		err = -ENOMEM;
3038 		goto unlock;
3039 	}
3040 
3041 	cmd->cmd_complete = addr_cmd_complete;
3042 
3043 	err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3044 				 unpair_device_complete);
3045 	if (err < 0)
3046 		mgmt_pending_free(cmd);
3047 
3048 unlock:
3049 	hci_dev_unlock(hdev);
3050 	return err;
3051 }
3052 
3053 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3054 		      u16 len)
3055 {
3056 	struct mgmt_cp_disconnect *cp = data;
3057 	struct mgmt_rp_disconnect rp;
3058 	struct mgmt_pending_cmd *cmd;
3059 	struct hci_conn *conn;
3060 	int err;
3061 
3062 	bt_dev_dbg(hdev, "sock %p", sk);
3063 
3064 	memset(&rp, 0, sizeof(rp));
3065 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3066 	rp.addr.type = cp->addr.type;
3067 
3068 	if (!bdaddr_type_is_valid(cp->addr.type))
3069 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3070 					 MGMT_STATUS_INVALID_PARAMS,
3071 					 &rp, sizeof(rp));
3072 
3073 	hci_dev_lock(hdev);
3074 
3075 	if (!test_bit(HCI_UP, &hdev->flags)) {
3076 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3077 					MGMT_STATUS_NOT_POWERED, &rp,
3078 					sizeof(rp));
3079 		goto failed;
3080 	}
3081 
3082 	if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
3083 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3084 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
3085 		goto failed;
3086 	}
3087 
3088 	if (cp->addr.type == BDADDR_BREDR)
3089 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3090 					       &cp->addr.bdaddr);
3091 	else
3092 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3093 					       le_addr_type(cp->addr.type));
3094 
3095 	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3096 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3097 					MGMT_STATUS_NOT_CONNECTED, &rp,
3098 					sizeof(rp));
3099 		goto failed;
3100 	}
3101 
3102 	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3103 	if (!cmd) {
3104 		err = -ENOMEM;
3105 		goto failed;
3106 	}
3107 
3108 	cmd->cmd_complete = generic_cmd_complete;
3109 
3110 	err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3111 	if (err < 0)
3112 		mgmt_pending_remove(cmd);
3113 
3114 failed:
3115 	hci_dev_unlock(hdev);
3116 	return err;
3117 }
3118 
3119 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3120 {
3121 	switch (link_type) {
3122 	case ISO_LINK:
3123 	case LE_LINK:
3124 		switch (addr_type) {
3125 		case ADDR_LE_DEV_PUBLIC:
3126 			return BDADDR_LE_PUBLIC;
3127 
3128 		default:
3129 			/* Fallback to LE Random address type */
3130 			return BDADDR_LE_RANDOM;
3131 		}
3132 
3133 	default:
3134 		/* Fallback to BR/EDR type */
3135 		return BDADDR_BREDR;
3136 	}
3137 }
3138 
3139 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3140 			   u16 data_len)
3141 {
3142 	struct mgmt_rp_get_connections *rp;
3143 	struct hci_conn *c;
3144 	int err;
3145 	u16 i;
3146 
3147 	bt_dev_dbg(hdev, "sock %p", sk);
3148 
3149 	hci_dev_lock(hdev);
3150 
3151 	if (!hdev_is_powered(hdev)) {
3152 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3153 				      MGMT_STATUS_NOT_POWERED);
3154 		goto unlock;
3155 	}
3156 
3157 	i = 0;
3158 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
3159 		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3160 			i++;
3161 	}
3162 
3163 	rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3164 	if (!rp) {
3165 		err = -ENOMEM;
3166 		goto unlock;
3167 	}
3168 
3169 	i = 0;
3170 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
3171 		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3172 			continue;
3173 		bacpy(&rp->addr[i].bdaddr, &c->dst);
3174 		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3175 		if (c->type == SCO_LINK || c->type == ESCO_LINK)
3176 			continue;
3177 		i++;
3178 	}
3179 
3180 	rp->conn_count = cpu_to_le16(i);
3181 
3182 	/* Recalculate length in case of filtered SCO connections, etc */
3183 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3184 				struct_size(rp, addr, i));
3185 
3186 	kfree(rp);
3187 
3188 unlock:
3189 	hci_dev_unlock(hdev);
3190 	return err;
3191 }
3192 
3193 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3194 				   struct mgmt_cp_pin_code_neg_reply *cp)
3195 {
3196 	struct mgmt_pending_cmd *cmd;
3197 	int err;
3198 
3199 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3200 			       sizeof(*cp));
3201 	if (!cmd)
3202 		return -ENOMEM;
3203 
3204 	cmd->cmd_complete = addr_cmd_complete;
3205 
3206 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3207 			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3208 	if (err < 0)
3209 		mgmt_pending_remove(cmd);
3210 
3211 	return err;
3212 }
3213 
3214 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3215 			  u16 len)
3216 {
3217 	struct hci_conn *conn;
3218 	struct mgmt_cp_pin_code_reply *cp = data;
3219 	struct hci_cp_pin_code_reply reply;
3220 	struct mgmt_pending_cmd *cmd;
3221 	int err;
3222 
3223 	bt_dev_dbg(hdev, "sock %p", sk);
3224 
3225 	hci_dev_lock(hdev);
3226 
3227 	if (!hdev_is_powered(hdev)) {
3228 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3229 				      MGMT_STATUS_NOT_POWERED);
3230 		goto failed;
3231 	}
3232 
3233 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3234 	if (!conn) {
3235 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3236 				      MGMT_STATUS_NOT_CONNECTED);
3237 		goto failed;
3238 	}
3239 
3240 	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3241 		struct mgmt_cp_pin_code_neg_reply ncp;
3242 
3243 		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3244 
3245 		bt_dev_err(hdev, "PIN code is not 16 bytes long");
3246 
3247 		err = send_pin_code_neg_reply(sk, hdev, &ncp);
3248 		if (err >= 0)
3249 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3250 					      MGMT_STATUS_INVALID_PARAMS);
3251 
3252 		goto failed;
3253 	}
3254 
3255 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3256 	if (!cmd) {
3257 		err = -ENOMEM;
3258 		goto failed;
3259 	}
3260 
3261 	cmd->cmd_complete = addr_cmd_complete;
3262 
3263 	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3264 	reply.pin_len = cp->pin_len;
3265 	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3266 
3267 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3268 	if (err < 0)
3269 		mgmt_pending_remove(cmd);
3270 
3271 failed:
3272 	hci_dev_unlock(hdev);
3273 	return err;
3274 }
3275 
3276 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3277 			     u16 len)
3278 {
3279 	struct mgmt_cp_set_io_capability *cp = data;
3280 
3281 	bt_dev_dbg(hdev, "sock %p", sk);
3282 
3283 	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3284 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3285 				       MGMT_STATUS_INVALID_PARAMS);
3286 
3287 	hci_dev_lock(hdev);
3288 
3289 	hdev->io_capability = cp->io_capability;
3290 
3291 	bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3292 
3293 	hci_dev_unlock(hdev);
3294 
3295 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3296 				 NULL, 0);
3297 }
3298 
3299 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3300 {
3301 	struct hci_dev *hdev = conn->hdev;
3302 	struct mgmt_pending_cmd *cmd;
3303 
3304 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3305 		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3306 			continue;
3307 
3308 		if (cmd->user_data != conn)
3309 			continue;
3310 
3311 		return cmd;
3312 	}
3313 
3314 	return NULL;
3315 }
3316 
3317 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3318 {
3319 	struct mgmt_rp_pair_device rp;
3320 	struct hci_conn *conn = cmd->user_data;
3321 	int err;
3322 
3323 	bacpy(&rp.addr.bdaddr, &conn->dst);
3324 	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3325 
3326 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3327 				status, &rp, sizeof(rp));
3328 
3329 	/* So we don't get further callbacks for this connection */
3330 	conn->connect_cfm_cb = NULL;
3331 	conn->security_cfm_cb = NULL;
3332 	conn->disconn_cfm_cb = NULL;
3333 
3334 	hci_conn_drop(conn);
3335 
3336 	/* The device is paired so there is no need to remove
3337 	 * its connection parameters anymore.
3338 	 */
3339 	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3340 
3341 	hci_conn_put(conn);
3342 
3343 	return err;
3344 }
3345 
3346 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3347 {
3348 	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3349 	struct mgmt_pending_cmd *cmd;
3350 
3351 	cmd = find_pairing(conn);
3352 	if (cmd) {
3353 		cmd->cmd_complete(cmd, status);
3354 		mgmt_pending_remove(cmd);
3355 	}
3356 }
3357 
3358 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3359 {
3360 	struct mgmt_pending_cmd *cmd;
3361 
3362 	BT_DBG("status %u", status);
3363 
3364 	cmd = find_pairing(conn);
3365 	if (!cmd) {
3366 		BT_DBG("Unable to find a pending command");
3367 		return;
3368 	}
3369 
3370 	cmd->cmd_complete(cmd, mgmt_status(status));
3371 	mgmt_pending_remove(cmd);
3372 }
3373 
3374 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3375 {
3376 	struct mgmt_pending_cmd *cmd;
3377 
3378 	BT_DBG("status %u", status);
3379 
3380 	if (!status)
3381 		return;
3382 
3383 	cmd = find_pairing(conn);
3384 	if (!cmd) {
3385 		BT_DBG("Unable to find a pending command");
3386 		return;
3387 	}
3388 
3389 	cmd->cmd_complete(cmd, mgmt_status(status));
3390 	mgmt_pending_remove(cmd);
3391 }
3392 
3393 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3394 		       u16 len)
3395 {
3396 	struct mgmt_cp_pair_device *cp = data;
3397 	struct mgmt_rp_pair_device rp;
3398 	struct mgmt_pending_cmd *cmd;
3399 	u8 sec_level, auth_type;
3400 	struct hci_conn *conn;
3401 	int err;
3402 
3403 	bt_dev_dbg(hdev, "sock %p", sk);
3404 
3405 	memset(&rp, 0, sizeof(rp));
3406 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3407 	rp.addr.type = cp->addr.type;
3408 
3409 	if (!bdaddr_type_is_valid(cp->addr.type))
3410 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3411 					 MGMT_STATUS_INVALID_PARAMS,
3412 					 &rp, sizeof(rp));
3413 
3414 	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3415 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3416 					 MGMT_STATUS_INVALID_PARAMS,
3417 					 &rp, sizeof(rp));
3418 
3419 	hci_dev_lock(hdev);
3420 
3421 	if (!hdev_is_powered(hdev)) {
3422 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3423 					MGMT_STATUS_NOT_POWERED, &rp,
3424 					sizeof(rp));
3425 		goto unlock;
3426 	}
3427 
3428 	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3429 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3430 					MGMT_STATUS_ALREADY_PAIRED, &rp,
3431 					sizeof(rp));
3432 		goto unlock;
3433 	}
3434 
3435 	sec_level = BT_SECURITY_MEDIUM;
3436 	auth_type = HCI_AT_DEDICATED_BONDING;
3437 
3438 	if (cp->addr.type == BDADDR_BREDR) {
3439 		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3440 				       auth_type, CONN_REASON_PAIR_DEVICE);
3441 	} else {
3442 		u8 addr_type = le_addr_type(cp->addr.type);
3443 		struct hci_conn_params *p;
3444 
3445 		/* When pairing a new device, it is expected to remember
3446 		 * this device for future connections. Adding the connection
3447 		 * parameter information ahead of time allows tracking
3448 		 * of the peripheral preferred values and will speed up any
3449 		 * further connection establishment.
3450 		 *
3451 		 * If connection parameters already exist, then they
3452 		 * will be kept and this function does nothing.
3453 		 */
3454 		p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3455 		if (!p) {
3456 			err = -EIO;
3457 			goto unlock;
3458 		}
3459 
3460 		if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3461 			p->auto_connect = HCI_AUTO_CONN_DISABLED;
3462 
3463 		conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3464 					   sec_level, HCI_LE_CONN_TIMEOUT,
3465 					   CONN_REASON_PAIR_DEVICE);
3466 	}
3467 
3468 	if (IS_ERR(conn)) {
3469 		int status;
3470 
3471 		if (PTR_ERR(conn) == -EBUSY)
3472 			status = MGMT_STATUS_BUSY;
3473 		else if (PTR_ERR(conn) == -EOPNOTSUPP)
3474 			status = MGMT_STATUS_NOT_SUPPORTED;
3475 		else if (PTR_ERR(conn) == -ECONNREFUSED)
3476 			status = MGMT_STATUS_REJECTED;
3477 		else
3478 			status = MGMT_STATUS_CONNECT_FAILED;
3479 
3480 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3481 					status, &rp, sizeof(rp));
3482 		goto unlock;
3483 	}
3484 
3485 	if (conn->connect_cfm_cb) {
3486 		hci_conn_drop(conn);
3487 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3488 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
3489 		goto unlock;
3490 	}
3491 
3492 	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3493 	if (!cmd) {
3494 		err = -ENOMEM;
3495 		hci_conn_drop(conn);
3496 		goto unlock;
3497 	}
3498 
3499 	cmd->cmd_complete = pairing_complete;
3500 
3501 	/* For LE, just connecting isn't a proof that the pairing finished */
3502 	if (cp->addr.type == BDADDR_BREDR) {
3503 		conn->connect_cfm_cb = pairing_complete_cb;
3504 		conn->security_cfm_cb = pairing_complete_cb;
3505 		conn->disconn_cfm_cb = pairing_complete_cb;
3506 	} else {
3507 		conn->connect_cfm_cb = le_pairing_complete_cb;
3508 		conn->security_cfm_cb = le_pairing_complete_cb;
3509 		conn->disconn_cfm_cb = le_pairing_complete_cb;
3510 	}
3511 
3512 	conn->io_capability = cp->io_cap;
3513 	cmd->user_data = hci_conn_get(conn);
3514 
3515 	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3516 	    hci_conn_security(conn, sec_level, auth_type, true)) {
3517 		cmd->cmd_complete(cmd, 0);
3518 		mgmt_pending_remove(cmd);
3519 	}
3520 
3521 	err = 0;
3522 
3523 unlock:
3524 	hci_dev_unlock(hdev);
3525 	return err;
3526 }
3527 
3528 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3529 			      u16 len)
3530 {
3531 	struct mgmt_addr_info *addr = data;
3532 	struct mgmt_pending_cmd *cmd;
3533 	struct hci_conn *conn;
3534 	int err;
3535 
3536 	bt_dev_dbg(hdev, "sock %p", sk);
3537 
3538 	hci_dev_lock(hdev);
3539 
3540 	if (!hdev_is_powered(hdev)) {
3541 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3542 				      MGMT_STATUS_NOT_POWERED);
3543 		goto unlock;
3544 	}
3545 
3546 	cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3547 	if (!cmd) {
3548 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3549 				      MGMT_STATUS_INVALID_PARAMS);
3550 		goto unlock;
3551 	}
3552 
3553 	conn = cmd->user_data;
3554 
3555 	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3556 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3557 				      MGMT_STATUS_INVALID_PARAMS);
3558 		goto unlock;
3559 	}
3560 
3561 	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3562 	mgmt_pending_remove(cmd);
3563 
3564 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3565 				addr, sizeof(*addr));
3566 
3567 	/* Since user doesn't want to proceed with the connection, abort any
3568 	 * ongoing pairing and then terminate the link if it was created
3569 	 * because of the pair device action.
3570 	 */
3571 	if (addr->type == BDADDR_BREDR)
3572 		hci_remove_link_key(hdev, &addr->bdaddr);
3573 	else
3574 		smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3575 					      le_addr_type(addr->type));
3576 
3577 	if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3578 		hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3579 
3580 unlock:
3581 	hci_dev_unlock(hdev);
3582 	return err;
3583 }
3584 
3585 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3586 			     struct mgmt_addr_info *addr, u16 mgmt_op,
3587 			     u16 hci_op, __le32 passkey)
3588 {
3589 	struct mgmt_pending_cmd *cmd;
3590 	struct hci_conn *conn;
3591 	int err;
3592 
3593 	hci_dev_lock(hdev);
3594 
3595 	if (!hdev_is_powered(hdev)) {
3596 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3597 					MGMT_STATUS_NOT_POWERED, addr,
3598 					sizeof(*addr));
3599 		goto done;
3600 	}
3601 
3602 	if (addr->type == BDADDR_BREDR)
3603 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3604 	else
3605 		conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3606 					       le_addr_type(addr->type));
3607 
3608 	if (!conn) {
3609 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3610 					MGMT_STATUS_NOT_CONNECTED, addr,
3611 					sizeof(*addr));
3612 		goto done;
3613 	}
3614 
3615 	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3616 		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3617 		if (!err)
3618 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3619 						MGMT_STATUS_SUCCESS, addr,
3620 						sizeof(*addr));
3621 		else
3622 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3623 						MGMT_STATUS_FAILED, addr,
3624 						sizeof(*addr));
3625 
3626 		goto done;
3627 	}
3628 
3629 	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3630 	if (!cmd) {
3631 		err = -ENOMEM;
3632 		goto done;
3633 	}
3634 
3635 	cmd->cmd_complete = addr_cmd_complete;
3636 
3637 	/* Continue with pairing via HCI */
3638 	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3639 		struct hci_cp_user_passkey_reply cp;
3640 
3641 		bacpy(&cp.bdaddr, &addr->bdaddr);
3642 		cp.passkey = passkey;
3643 		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3644 	} else
3645 		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3646 				   &addr->bdaddr);
3647 
3648 	if (err < 0)
3649 		mgmt_pending_remove(cmd);
3650 
3651 done:
3652 	hci_dev_unlock(hdev);
3653 	return err;
3654 }
3655 
3656 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3657 			      void *data, u16 len)
3658 {
3659 	struct mgmt_cp_pin_code_neg_reply *cp = data;
3660 
3661 	bt_dev_dbg(hdev, "sock %p", sk);
3662 
3663 	return user_pairing_resp(sk, hdev, &cp->addr,
3664 				MGMT_OP_PIN_CODE_NEG_REPLY,
3665 				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3666 }
3667 
3668 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3669 			      u16 len)
3670 {
3671 	struct mgmt_cp_user_confirm_reply *cp = data;
3672 
3673 	bt_dev_dbg(hdev, "sock %p", sk);
3674 
3675 	if (len != sizeof(*cp))
3676 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3677 				       MGMT_STATUS_INVALID_PARAMS);
3678 
3679 	return user_pairing_resp(sk, hdev, &cp->addr,
3680 				 MGMT_OP_USER_CONFIRM_REPLY,
3681 				 HCI_OP_USER_CONFIRM_REPLY, 0);
3682 }
3683 
3684 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3685 				  void *data, u16 len)
3686 {
3687 	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3688 
3689 	bt_dev_dbg(hdev, "sock %p", sk);
3690 
3691 	return user_pairing_resp(sk, hdev, &cp->addr,
3692 				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3693 				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3694 }
3695 
3696 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3697 			      u16 len)
3698 {
3699 	struct mgmt_cp_user_passkey_reply *cp = data;
3700 
3701 	bt_dev_dbg(hdev, "sock %p", sk);
3702 
3703 	return user_pairing_resp(sk, hdev, &cp->addr,
3704 				 MGMT_OP_USER_PASSKEY_REPLY,
3705 				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3706 }
3707 
3708 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3709 				  void *data, u16 len)
3710 {
3711 	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3712 
3713 	bt_dev_dbg(hdev, "sock %p", sk);
3714 
3715 	return user_pairing_resp(sk, hdev, &cp->addr,
3716 				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3717 				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3718 }
3719 
3720 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3721 {
3722 	struct adv_info *adv_instance;
3723 
3724 	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3725 	if (!adv_instance)
3726 		return 0;
3727 
3728 	/* stop if current instance doesn't need to be changed */
3729 	if (!(adv_instance->flags & flags))
3730 		return 0;
3731 
3732 	cancel_adv_timeout(hdev);
3733 
3734 	adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3735 	if (!adv_instance)
3736 		return 0;
3737 
3738 	hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3739 
3740 	return 0;
3741 }
3742 
3743 static int name_changed_sync(struct hci_dev *hdev, void *data)
3744 {
3745 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3746 }
3747 
3748 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3749 {
3750 	struct mgmt_pending_cmd *cmd = data;
3751 	struct mgmt_cp_set_local_name *cp = cmd->param;
3752 	u8 status = mgmt_status(err);
3753 
3754 	bt_dev_dbg(hdev, "err %d", err);
3755 
3756 	if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3757 		return;
3758 
3759 	if (status) {
3760 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3761 				status);
3762 	} else {
3763 		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3764 				  cp, sizeof(*cp));
3765 
3766 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3767 			hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3768 	}
3769 
3770 	mgmt_pending_remove(cmd);
3771 }
3772 
3773 static int set_name_sync(struct hci_dev *hdev, void *data)
3774 {
3775 	if (lmp_bredr_capable(hdev)) {
3776 		hci_update_name_sync(hdev);
3777 		hci_update_eir_sync(hdev);
3778 	}
3779 
3780 	/* The name is stored in the scan response data and so
3781 	 * no need to update the advertising data here.
3782 	 */
3783 	if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3784 		hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3785 
3786 	return 0;
3787 }
3788 
3789 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3790 			  u16 len)
3791 {
3792 	struct mgmt_cp_set_local_name *cp = data;
3793 	struct mgmt_pending_cmd *cmd;
3794 	int err;
3795 
3796 	bt_dev_dbg(hdev, "sock %p", sk);
3797 
3798 	hci_dev_lock(hdev);
3799 
3800 	/* If the old values are the same as the new ones just return a
3801 	 * direct command complete event.
3802 	 */
3803 	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3804 	    !memcmp(hdev->short_name, cp->short_name,
3805 		    sizeof(hdev->short_name))) {
3806 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3807 					data, len);
3808 		goto failed;
3809 	}
3810 
3811 	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3812 
3813 	if (!hdev_is_powered(hdev)) {
3814 		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3815 
3816 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3817 					data, len);
3818 		if (err < 0)
3819 			goto failed;
3820 
3821 		err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3822 					 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3823 		ext_info_changed(hdev, sk);
3824 
3825 		goto failed;
3826 	}
3827 
3828 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3829 	if (!cmd)
3830 		err = -ENOMEM;
3831 	else
3832 		err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3833 					 set_name_complete);
3834 
3835 	if (err < 0) {
3836 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3837 				      MGMT_STATUS_FAILED);
3838 
3839 		if (cmd)
3840 			mgmt_pending_remove(cmd);
3841 
3842 		goto failed;
3843 	}
3844 
3845 	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3846 
3847 failed:
3848 	hci_dev_unlock(hdev);
3849 	return err;
3850 }
3851 
3852 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3853 {
3854 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3855 }
3856 
3857 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3858 			  u16 len)
3859 {
3860 	struct mgmt_cp_set_appearance *cp = data;
3861 	u16 appearance;
3862 	int err;
3863 
3864 	bt_dev_dbg(hdev, "sock %p", sk);
3865 
3866 	if (!lmp_le_capable(hdev))
3867 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3868 				       MGMT_STATUS_NOT_SUPPORTED);
3869 
3870 	appearance = le16_to_cpu(cp->appearance);
3871 
3872 	hci_dev_lock(hdev);
3873 
3874 	if (hdev->appearance != appearance) {
3875 		hdev->appearance = appearance;
3876 
3877 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3878 			hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3879 					   NULL);
3880 
3881 		ext_info_changed(hdev, sk);
3882 	}
3883 
3884 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3885 				0);
3886 
3887 	hci_dev_unlock(hdev);
3888 
3889 	return err;
3890 }
3891 
3892 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3893 				 void *data, u16 len)
3894 {
3895 	struct mgmt_rp_get_phy_configuration rp;
3896 
3897 	bt_dev_dbg(hdev, "sock %p", sk);
3898 
3899 	hci_dev_lock(hdev);
3900 
3901 	memset(&rp, 0, sizeof(rp));
3902 
3903 	rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3904 	rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3905 	rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3906 
3907 	hci_dev_unlock(hdev);
3908 
3909 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3910 				 &rp, sizeof(rp));
3911 }
3912 
3913 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3914 {
3915 	struct mgmt_ev_phy_configuration_changed ev;
3916 
3917 	memset(&ev, 0, sizeof(ev));
3918 
3919 	ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3920 
3921 	return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3922 			  sizeof(ev), skip);
3923 }
3924 
3925 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3926 {
3927 	struct mgmt_pending_cmd *cmd = data;
3928 	struct sk_buff *skb = cmd->skb;
3929 	u8 status = mgmt_status(err);
3930 
3931 	if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
3932 		return;
3933 
3934 	if (!status) {
3935 		if (!skb)
3936 			status = MGMT_STATUS_FAILED;
3937 		else if (IS_ERR(skb))
3938 			status = mgmt_status(PTR_ERR(skb));
3939 		else
3940 			status = mgmt_status(skb->data[0]);
3941 	}
3942 
3943 	bt_dev_dbg(hdev, "status %d", status);
3944 
3945 	if (status) {
3946 		mgmt_cmd_status(cmd->sk, hdev->id,
3947 				MGMT_OP_SET_PHY_CONFIGURATION, status);
3948 	} else {
3949 		mgmt_cmd_complete(cmd->sk, hdev->id,
3950 				  MGMT_OP_SET_PHY_CONFIGURATION, 0,
3951 				  NULL, 0);
3952 
3953 		mgmt_phy_configuration_changed(hdev, cmd->sk);
3954 	}
3955 
3956 	if (skb && !IS_ERR(skb))
3957 		kfree_skb(skb);
3958 
3959 	mgmt_pending_remove(cmd);
3960 }
3961 
3962 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
3963 {
3964 	struct mgmt_pending_cmd *cmd = data;
3965 	struct mgmt_cp_set_phy_configuration *cp = cmd->param;
3966 	struct hci_cp_le_set_default_phy cp_phy;
3967 	u32 selected_phys = __le32_to_cpu(cp->selected_phys);
3968 
3969 	memset(&cp_phy, 0, sizeof(cp_phy));
3970 
3971 	if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3972 		cp_phy.all_phys |= 0x01;
3973 
3974 	if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3975 		cp_phy.all_phys |= 0x02;
3976 
3977 	if (selected_phys & MGMT_PHY_LE_1M_TX)
3978 		cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3979 
3980 	if (selected_phys & MGMT_PHY_LE_2M_TX)
3981 		cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3982 
3983 	if (selected_phys & MGMT_PHY_LE_CODED_TX)
3984 		cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3985 
3986 	if (selected_phys & MGMT_PHY_LE_1M_RX)
3987 		cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3988 
3989 	if (selected_phys & MGMT_PHY_LE_2M_RX)
3990 		cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3991 
3992 	if (selected_phys & MGMT_PHY_LE_CODED_RX)
3993 		cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3994 
3995 	cmd->skb =  __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
3996 				   sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
3997 
3998 	return 0;
3999 }
4000 
4001 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4002 				 void *data, u16 len)
4003 {
4004 	struct mgmt_cp_set_phy_configuration *cp = data;
4005 	struct mgmt_pending_cmd *cmd;
4006 	u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4007 	u16 pkt_type = (HCI_DH1 | HCI_DM1);
4008 	bool changed = false;
4009 	int err;
4010 
4011 	bt_dev_dbg(hdev, "sock %p", sk);
4012 
4013 	configurable_phys = get_configurable_phys(hdev);
4014 	supported_phys = get_supported_phys(hdev);
4015 	selected_phys = __le32_to_cpu(cp->selected_phys);
4016 
4017 	if (selected_phys & ~supported_phys)
4018 		return mgmt_cmd_status(sk, hdev->id,
4019 				       MGMT_OP_SET_PHY_CONFIGURATION,
4020 				       MGMT_STATUS_INVALID_PARAMS);
4021 
4022 	unconfigure_phys = supported_phys & ~configurable_phys;
4023 
4024 	if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4025 		return mgmt_cmd_status(sk, hdev->id,
4026 				       MGMT_OP_SET_PHY_CONFIGURATION,
4027 				       MGMT_STATUS_INVALID_PARAMS);
4028 
4029 	if (selected_phys == get_selected_phys(hdev))
4030 		return mgmt_cmd_complete(sk, hdev->id,
4031 					 MGMT_OP_SET_PHY_CONFIGURATION,
4032 					 0, NULL, 0);
4033 
4034 	hci_dev_lock(hdev);
4035 
4036 	if (!hdev_is_powered(hdev)) {
4037 		err = mgmt_cmd_status(sk, hdev->id,
4038 				      MGMT_OP_SET_PHY_CONFIGURATION,
4039 				      MGMT_STATUS_REJECTED);
4040 		goto unlock;
4041 	}
4042 
4043 	if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4044 		err = mgmt_cmd_status(sk, hdev->id,
4045 				      MGMT_OP_SET_PHY_CONFIGURATION,
4046 				      MGMT_STATUS_BUSY);
4047 		goto unlock;
4048 	}
4049 
4050 	if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4051 		pkt_type |= (HCI_DH3 | HCI_DM3);
4052 	else
4053 		pkt_type &= ~(HCI_DH3 | HCI_DM3);
4054 
4055 	if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4056 		pkt_type |= (HCI_DH5 | HCI_DM5);
4057 	else
4058 		pkt_type &= ~(HCI_DH5 | HCI_DM5);
4059 
4060 	if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4061 		pkt_type &= ~HCI_2DH1;
4062 	else
4063 		pkt_type |= HCI_2DH1;
4064 
4065 	if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4066 		pkt_type &= ~HCI_2DH3;
4067 	else
4068 		pkt_type |= HCI_2DH3;
4069 
4070 	if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4071 		pkt_type &= ~HCI_2DH5;
4072 	else
4073 		pkt_type |= HCI_2DH5;
4074 
4075 	if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4076 		pkt_type &= ~HCI_3DH1;
4077 	else
4078 		pkt_type |= HCI_3DH1;
4079 
4080 	if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4081 		pkt_type &= ~HCI_3DH3;
4082 	else
4083 		pkt_type |= HCI_3DH3;
4084 
4085 	if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4086 		pkt_type &= ~HCI_3DH5;
4087 	else
4088 		pkt_type |= HCI_3DH5;
4089 
4090 	if (pkt_type != hdev->pkt_type) {
4091 		hdev->pkt_type = pkt_type;
4092 		changed = true;
4093 	}
4094 
4095 	if ((selected_phys & MGMT_PHY_LE_MASK) ==
4096 	    (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4097 		if (changed)
4098 			mgmt_phy_configuration_changed(hdev, sk);
4099 
4100 		err = mgmt_cmd_complete(sk, hdev->id,
4101 					MGMT_OP_SET_PHY_CONFIGURATION,
4102 					0, NULL, 0);
4103 
4104 		goto unlock;
4105 	}
4106 
4107 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4108 			       len);
4109 	if (!cmd)
4110 		err = -ENOMEM;
4111 	else
4112 		err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4113 					 set_default_phy_complete);
4114 
4115 	if (err < 0) {
4116 		err = mgmt_cmd_status(sk, hdev->id,
4117 				      MGMT_OP_SET_PHY_CONFIGURATION,
4118 				      MGMT_STATUS_FAILED);
4119 
4120 		if (cmd)
4121 			mgmt_pending_remove(cmd);
4122 	}
4123 
4124 unlock:
4125 	hci_dev_unlock(hdev);
4126 
4127 	return err;
4128 }
4129 
4130 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4131 			    u16 len)
4132 {
4133 	int err = MGMT_STATUS_SUCCESS;
4134 	struct mgmt_cp_set_blocked_keys *keys = data;
4135 	const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4136 				   sizeof(struct mgmt_blocked_key_info));
4137 	u16 key_count, expected_len;
4138 	int i;
4139 
4140 	bt_dev_dbg(hdev, "sock %p", sk);
4141 
4142 	key_count = __le16_to_cpu(keys->key_count);
4143 	if (key_count > max_key_count) {
4144 		bt_dev_err(hdev, "too big key_count value %u", key_count);
4145 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4146 				       MGMT_STATUS_INVALID_PARAMS);
4147 	}
4148 
4149 	expected_len = struct_size(keys, keys, key_count);
4150 	if (expected_len != len) {
4151 		bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4152 			   expected_len, len);
4153 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4154 				       MGMT_STATUS_INVALID_PARAMS);
4155 	}
4156 
4157 	hci_dev_lock(hdev);
4158 
4159 	hci_blocked_keys_clear(hdev);
4160 
4161 	for (i = 0; i < key_count; ++i) {
4162 		struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4163 
4164 		if (!b) {
4165 			err = MGMT_STATUS_NO_RESOURCES;
4166 			break;
4167 		}
4168 
4169 		b->type = keys->keys[i].type;
4170 		memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4171 		list_add_rcu(&b->list, &hdev->blocked_keys);
4172 	}
4173 	hci_dev_unlock(hdev);
4174 
4175 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4176 				err, NULL, 0);
4177 }
4178 
4179 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4180 			       void *data, u16 len)
4181 {
4182 	struct mgmt_mode *cp = data;
4183 	int err;
4184 	bool changed = false;
4185 
4186 	bt_dev_dbg(hdev, "sock %p", sk);
4187 
4188 	if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4189 		return mgmt_cmd_status(sk, hdev->id,
4190 				       MGMT_OP_SET_WIDEBAND_SPEECH,
4191 				       MGMT_STATUS_NOT_SUPPORTED);
4192 
4193 	if (cp->val != 0x00 && cp->val != 0x01)
4194 		return mgmt_cmd_status(sk, hdev->id,
4195 				       MGMT_OP_SET_WIDEBAND_SPEECH,
4196 				       MGMT_STATUS_INVALID_PARAMS);
4197 
4198 	hci_dev_lock(hdev);
4199 
4200 	if (hdev_is_powered(hdev) &&
4201 	    !!cp->val != hci_dev_test_flag(hdev,
4202 					   HCI_WIDEBAND_SPEECH_ENABLED)) {
4203 		err = mgmt_cmd_status(sk, hdev->id,
4204 				      MGMT_OP_SET_WIDEBAND_SPEECH,
4205 				      MGMT_STATUS_REJECTED);
4206 		goto unlock;
4207 	}
4208 
4209 	if (cp->val)
4210 		changed = !hci_dev_test_and_set_flag(hdev,
4211 						   HCI_WIDEBAND_SPEECH_ENABLED);
4212 	else
4213 		changed = hci_dev_test_and_clear_flag(hdev,
4214 						   HCI_WIDEBAND_SPEECH_ENABLED);
4215 
4216 	err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4217 	if (err < 0)
4218 		goto unlock;
4219 
4220 	if (changed)
4221 		err = new_settings(hdev, sk);
4222 
4223 unlock:
4224 	hci_dev_unlock(hdev);
4225 	return err;
4226 }
4227 
4228 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4229 			       void *data, u16 data_len)
4230 {
4231 	char buf[20];
4232 	struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4233 	u16 cap_len = 0;
4234 	u8 flags = 0;
4235 	u8 tx_power_range[2];
4236 
4237 	bt_dev_dbg(hdev, "sock %p", sk);
4238 
4239 	memset(&buf, 0, sizeof(buf));
4240 
4241 	hci_dev_lock(hdev);
4242 
4243 	/* When the Read Simple Pairing Options command is supported, then
4244 	 * the remote public key validation is supported.
4245 	 *
4246 	 * Alternatively, when Microsoft extensions are available, they can
4247 	 * indicate support for public key validation as well.
4248 	 */
4249 	if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4250 		flags |= 0x01;	/* Remote public key validation (BR/EDR) */
4251 
4252 	flags |= 0x02;		/* Remote public key validation (LE) */
4253 
4254 	/* When the Read Encryption Key Size command is supported, then the
4255 	 * encryption key size is enforced.
4256 	 */
4257 	if (hdev->commands[20] & 0x10)
4258 		flags |= 0x04;	/* Encryption key size enforcement (BR/EDR) */
4259 
4260 	flags |= 0x08;		/* Encryption key size enforcement (LE) */
4261 
4262 	cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4263 				  &flags, 1);
4264 
4265 	/* When the Read Simple Pairing Options command is supported, then
4266 	 * also max encryption key size information is provided.
4267 	 */
4268 	if (hdev->commands[41] & 0x08)
4269 		cap_len = eir_append_le16(rp->cap, cap_len,
4270 					  MGMT_CAP_MAX_ENC_KEY_SIZE,
4271 					  hdev->max_enc_key_size);
4272 
4273 	cap_len = eir_append_le16(rp->cap, cap_len,
4274 				  MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4275 				  SMP_MAX_ENC_KEY_SIZE);
4276 
4277 	/* Append the min/max LE tx power parameters if we were able to fetch
4278 	 * it from the controller
4279 	 */
4280 	if (hdev->commands[38] & 0x80) {
4281 		memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4282 		memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4283 		cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4284 					  tx_power_range, 2);
4285 	}
4286 
4287 	rp->cap_len = cpu_to_le16(cap_len);
4288 
4289 	hci_dev_unlock(hdev);
4290 
4291 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4292 				 rp, sizeof(*rp) + cap_len);
4293 }
4294 
4295 #ifdef CONFIG_BT_FEATURE_DEBUG
4296 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4297 static const u8 debug_uuid[16] = {
4298 	0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4299 	0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4300 };
4301 #endif
4302 
4303 /* 330859bc-7506-492d-9370-9a6f0614037f */
4304 static const u8 quality_report_uuid[16] = {
4305 	0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4306 	0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4307 };
4308 
4309 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4310 static const u8 offload_codecs_uuid[16] = {
4311 	0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4312 	0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4313 };
4314 
4315 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4316 static const u8 le_simultaneous_roles_uuid[16] = {
4317 	0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4318 	0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4319 };
4320 
4321 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
4322 static const u8 rpa_resolution_uuid[16] = {
4323 	0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
4324 	0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
4325 };
4326 
4327 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4328 static const u8 iso_socket_uuid[16] = {
4329 	0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4330 	0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4331 };
4332 
4333 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4334 static const u8 mgmt_mesh_uuid[16] = {
4335 	0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4336 	0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4337 };
4338 
4339 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4340 				  void *data, u16 data_len)
4341 {
4342 	struct mgmt_rp_read_exp_features_info *rp;
4343 	size_t len;
4344 	u16 idx = 0;
4345 	u32 flags;
4346 	int status;
4347 
4348 	bt_dev_dbg(hdev, "sock %p", sk);
4349 
4350 	/* Enough space for 7 features */
4351 	len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4352 	rp = kzalloc(len, GFP_KERNEL);
4353 	if (!rp)
4354 		return -ENOMEM;
4355 
4356 #ifdef CONFIG_BT_FEATURE_DEBUG
4357 	if (!hdev) {
4358 		flags = bt_dbg_get() ? BIT(0) : 0;
4359 
4360 		memcpy(rp->features[idx].uuid, debug_uuid, 16);
4361 		rp->features[idx].flags = cpu_to_le32(flags);
4362 		idx++;
4363 	}
4364 #endif
4365 
4366 	if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4367 		if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4368 			flags = BIT(0);
4369 		else
4370 			flags = 0;
4371 
4372 		memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4373 		rp->features[idx].flags = cpu_to_le32(flags);
4374 		idx++;
4375 	}
4376 
4377 	if (hdev && ll_privacy_capable(hdev)) {
4378 		if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
4379 			flags = BIT(0) | BIT(1);
4380 		else
4381 			flags = BIT(1);
4382 
4383 		memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
4384 		rp->features[idx].flags = cpu_to_le32(flags);
4385 		idx++;
4386 	}
4387 
4388 	if (hdev && (aosp_has_quality_report(hdev) ||
4389 		     hdev->set_quality_report)) {
4390 		if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4391 			flags = BIT(0);
4392 		else
4393 			flags = 0;
4394 
4395 		memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4396 		rp->features[idx].flags = cpu_to_le32(flags);
4397 		idx++;
4398 	}
4399 
4400 	if (hdev && hdev->get_data_path_id) {
4401 		if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4402 			flags = BIT(0);
4403 		else
4404 			flags = 0;
4405 
4406 		memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4407 		rp->features[idx].flags = cpu_to_le32(flags);
4408 		idx++;
4409 	}
4410 
4411 	if (IS_ENABLED(CONFIG_BT_LE)) {
4412 		flags = iso_enabled() ? BIT(0) : 0;
4413 		memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4414 		rp->features[idx].flags = cpu_to_le32(flags);
4415 		idx++;
4416 	}
4417 
4418 	if (hdev && lmp_le_capable(hdev)) {
4419 		if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4420 			flags = BIT(0);
4421 		else
4422 			flags = 0;
4423 
4424 		memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4425 		rp->features[idx].flags = cpu_to_le32(flags);
4426 		idx++;
4427 	}
4428 
4429 	rp->feature_count = cpu_to_le16(idx);
4430 
4431 	/* After reading the experimental features information, enable
4432 	 * the events to update client on any future change.
4433 	 */
4434 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4435 
4436 	status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4437 				   MGMT_OP_READ_EXP_FEATURES_INFO,
4438 				   0, rp, sizeof(*rp) + (20 * idx));
4439 
4440 	kfree(rp);
4441 	return status;
4442 }
4443 
4444 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4445 					  struct sock *skip)
4446 {
4447 	struct mgmt_ev_exp_feature_changed ev;
4448 
4449 	memset(&ev, 0, sizeof(ev));
4450 	memcpy(ev.uuid, rpa_resolution_uuid, 16);
4451 	ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4452 
4453 	// Do we need to be atomic with the conn_flags?
4454 	if (enabled && privacy_mode_capable(hdev))
4455 		hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4456 	else
4457 		hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4458 
4459 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4460 				  &ev, sizeof(ev),
4461 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4462 
4463 }
4464 
4465 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4466 			       bool enabled, struct sock *skip)
4467 {
4468 	struct mgmt_ev_exp_feature_changed ev;
4469 
4470 	memset(&ev, 0, sizeof(ev));
4471 	memcpy(ev.uuid, uuid, 16);
4472 	ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4473 
4474 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4475 				  &ev, sizeof(ev),
4476 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4477 }
4478 
4479 #define EXP_FEAT(_uuid, _set_func)	\
4480 {					\
4481 	.uuid = _uuid,			\
4482 	.set_func = _set_func,		\
4483 }
4484 
4485 /* The zero key uuid is special. Multiple exp features are set through it. */
4486 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4487 			     struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4488 {
4489 	struct mgmt_rp_set_exp_feature rp;
4490 
4491 	memset(rp.uuid, 0, 16);
4492 	rp.flags = cpu_to_le32(0);
4493 
4494 #ifdef CONFIG_BT_FEATURE_DEBUG
4495 	if (!hdev) {
4496 		bool changed = bt_dbg_get();
4497 
4498 		bt_dbg_set(false);
4499 
4500 		if (changed)
4501 			exp_feature_changed(NULL, ZERO_KEY, false, sk);
4502 	}
4503 #endif
4504 
4505 	if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4506 		bool changed;
4507 
4508 		changed = hci_dev_test_and_clear_flag(hdev,
4509 						      HCI_ENABLE_LL_PRIVACY);
4510 		if (changed)
4511 			exp_feature_changed(hdev, rpa_resolution_uuid, false,
4512 					    sk);
4513 	}
4514 
4515 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4516 
4517 	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4518 				 MGMT_OP_SET_EXP_FEATURE, 0,
4519 				 &rp, sizeof(rp));
4520 }
4521 
4522 #ifdef CONFIG_BT_FEATURE_DEBUG
4523 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4524 			  struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4525 {
4526 	struct mgmt_rp_set_exp_feature rp;
4527 
4528 	bool val, changed;
4529 	int err;
4530 
4531 	/* Command requires to use the non-controller index */
4532 	if (hdev)
4533 		return mgmt_cmd_status(sk, hdev->id,
4534 				       MGMT_OP_SET_EXP_FEATURE,
4535 				       MGMT_STATUS_INVALID_INDEX);
4536 
4537 	/* Parameters are limited to a single octet */
4538 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4539 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4540 				       MGMT_OP_SET_EXP_FEATURE,
4541 				       MGMT_STATUS_INVALID_PARAMS);
4542 
4543 	/* Only boolean on/off is supported */
4544 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4545 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4546 				       MGMT_OP_SET_EXP_FEATURE,
4547 				       MGMT_STATUS_INVALID_PARAMS);
4548 
4549 	val = !!cp->param[0];
4550 	changed = val ? !bt_dbg_get() : bt_dbg_get();
4551 	bt_dbg_set(val);
4552 
4553 	memcpy(rp.uuid, debug_uuid, 16);
4554 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4555 
4556 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4557 
4558 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4559 				MGMT_OP_SET_EXP_FEATURE, 0,
4560 				&rp, sizeof(rp));
4561 
4562 	if (changed)
4563 		exp_feature_changed(hdev, debug_uuid, val, sk);
4564 
4565 	return err;
4566 }
4567 #endif
4568 
4569 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4570 			      struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4571 {
4572 	struct mgmt_rp_set_exp_feature rp;
4573 	bool val, changed;
4574 	int err;
4575 
4576 	/* Command requires to use the controller index */
4577 	if (!hdev)
4578 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4579 				       MGMT_OP_SET_EXP_FEATURE,
4580 				       MGMT_STATUS_INVALID_INDEX);
4581 
4582 	/* Parameters are limited to a single octet */
4583 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4584 		return mgmt_cmd_status(sk, hdev->id,
4585 				       MGMT_OP_SET_EXP_FEATURE,
4586 				       MGMT_STATUS_INVALID_PARAMS);
4587 
4588 	/* Only boolean on/off is supported */
4589 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4590 		return mgmt_cmd_status(sk, hdev->id,
4591 				       MGMT_OP_SET_EXP_FEATURE,
4592 				       MGMT_STATUS_INVALID_PARAMS);
4593 
4594 	val = !!cp->param[0];
4595 
4596 	if (val) {
4597 		changed = !hci_dev_test_and_set_flag(hdev,
4598 						     HCI_MESH_EXPERIMENTAL);
4599 	} else {
4600 		hci_dev_clear_flag(hdev, HCI_MESH);
4601 		changed = hci_dev_test_and_clear_flag(hdev,
4602 						      HCI_MESH_EXPERIMENTAL);
4603 	}
4604 
4605 	memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4606 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4607 
4608 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4609 
4610 	err = mgmt_cmd_complete(sk, hdev->id,
4611 				MGMT_OP_SET_EXP_FEATURE, 0,
4612 				&rp, sizeof(rp));
4613 
4614 	if (changed)
4615 		exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4616 
4617 	return err;
4618 }
4619 
4620 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4621 				   struct mgmt_cp_set_exp_feature *cp,
4622 				   u16 data_len)
4623 {
4624 	struct mgmt_rp_set_exp_feature rp;
4625 	bool val, changed;
4626 	int err;
4627 	u32 flags;
4628 
4629 	/* Command requires to use the controller index */
4630 	if (!hdev)
4631 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4632 				       MGMT_OP_SET_EXP_FEATURE,
4633 				       MGMT_STATUS_INVALID_INDEX);
4634 
4635 	/* Changes can only be made when controller is powered down */
4636 	if (hdev_is_powered(hdev))
4637 		return mgmt_cmd_status(sk, hdev->id,
4638 				       MGMT_OP_SET_EXP_FEATURE,
4639 				       MGMT_STATUS_REJECTED);
4640 
4641 	/* Parameters are limited to a single octet */
4642 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4643 		return mgmt_cmd_status(sk, hdev->id,
4644 				       MGMT_OP_SET_EXP_FEATURE,
4645 				       MGMT_STATUS_INVALID_PARAMS);
4646 
4647 	/* Only boolean on/off is supported */
4648 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4649 		return mgmt_cmd_status(sk, hdev->id,
4650 				       MGMT_OP_SET_EXP_FEATURE,
4651 				       MGMT_STATUS_INVALID_PARAMS);
4652 
4653 	val = !!cp->param[0];
4654 
4655 	if (val) {
4656 		changed = !hci_dev_test_and_set_flag(hdev,
4657 						     HCI_ENABLE_LL_PRIVACY);
4658 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4659 
4660 		/* Enable LL privacy + supported settings changed */
4661 		flags = BIT(0) | BIT(1);
4662 	} else {
4663 		changed = hci_dev_test_and_clear_flag(hdev,
4664 						      HCI_ENABLE_LL_PRIVACY);
4665 
4666 		/* Disable LL privacy + supported settings changed */
4667 		flags = BIT(1);
4668 	}
4669 
4670 	memcpy(rp.uuid, rpa_resolution_uuid, 16);
4671 	rp.flags = cpu_to_le32(flags);
4672 
4673 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4674 
4675 	err = mgmt_cmd_complete(sk, hdev->id,
4676 				MGMT_OP_SET_EXP_FEATURE, 0,
4677 				&rp, sizeof(rp));
4678 
4679 	if (changed)
4680 		exp_ll_privacy_feature_changed(val, hdev, sk);
4681 
4682 	return err;
4683 }
4684 
4685 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4686 				   struct mgmt_cp_set_exp_feature *cp,
4687 				   u16 data_len)
4688 {
4689 	struct mgmt_rp_set_exp_feature rp;
4690 	bool val, changed;
4691 	int err;
4692 
4693 	/* Command requires to use a valid controller index */
4694 	if (!hdev)
4695 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4696 				       MGMT_OP_SET_EXP_FEATURE,
4697 				       MGMT_STATUS_INVALID_INDEX);
4698 
4699 	/* Parameters are limited to a single octet */
4700 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4701 		return mgmt_cmd_status(sk, hdev->id,
4702 				       MGMT_OP_SET_EXP_FEATURE,
4703 				       MGMT_STATUS_INVALID_PARAMS);
4704 
4705 	/* Only boolean on/off is supported */
4706 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4707 		return mgmt_cmd_status(sk, hdev->id,
4708 				       MGMT_OP_SET_EXP_FEATURE,
4709 				       MGMT_STATUS_INVALID_PARAMS);
4710 
4711 	hci_req_sync_lock(hdev);
4712 
4713 	val = !!cp->param[0];
4714 	changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4715 
4716 	if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4717 		err = mgmt_cmd_status(sk, hdev->id,
4718 				      MGMT_OP_SET_EXP_FEATURE,
4719 				      MGMT_STATUS_NOT_SUPPORTED);
4720 		goto unlock_quality_report;
4721 	}
4722 
4723 	if (changed) {
4724 		if (hdev->set_quality_report)
4725 			err = hdev->set_quality_report(hdev, val);
4726 		else
4727 			err = aosp_set_quality_report(hdev, val);
4728 
4729 		if (err) {
4730 			err = mgmt_cmd_status(sk, hdev->id,
4731 					      MGMT_OP_SET_EXP_FEATURE,
4732 					      MGMT_STATUS_FAILED);
4733 			goto unlock_quality_report;
4734 		}
4735 
4736 		if (val)
4737 			hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4738 		else
4739 			hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4740 	}
4741 
4742 	bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4743 
4744 	memcpy(rp.uuid, quality_report_uuid, 16);
4745 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4746 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4747 
4748 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4749 				&rp, sizeof(rp));
4750 
4751 	if (changed)
4752 		exp_feature_changed(hdev, quality_report_uuid, val, sk);
4753 
4754 unlock_quality_report:
4755 	hci_req_sync_unlock(hdev);
4756 	return err;
4757 }
4758 
4759 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4760 				  struct mgmt_cp_set_exp_feature *cp,
4761 				  u16 data_len)
4762 {
4763 	bool val, changed;
4764 	int err;
4765 	struct mgmt_rp_set_exp_feature rp;
4766 
4767 	/* Command requires to use a valid controller index */
4768 	if (!hdev)
4769 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4770 				       MGMT_OP_SET_EXP_FEATURE,
4771 				       MGMT_STATUS_INVALID_INDEX);
4772 
4773 	/* Parameters are limited to a single octet */
4774 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4775 		return mgmt_cmd_status(sk, hdev->id,
4776 				       MGMT_OP_SET_EXP_FEATURE,
4777 				       MGMT_STATUS_INVALID_PARAMS);
4778 
4779 	/* Only boolean on/off is supported */
4780 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4781 		return mgmt_cmd_status(sk, hdev->id,
4782 				       MGMT_OP_SET_EXP_FEATURE,
4783 				       MGMT_STATUS_INVALID_PARAMS);
4784 
4785 	val = !!cp->param[0];
4786 	changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4787 
4788 	if (!hdev->get_data_path_id) {
4789 		return mgmt_cmd_status(sk, hdev->id,
4790 				       MGMT_OP_SET_EXP_FEATURE,
4791 				       MGMT_STATUS_NOT_SUPPORTED);
4792 	}
4793 
4794 	if (changed) {
4795 		if (val)
4796 			hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4797 		else
4798 			hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4799 	}
4800 
4801 	bt_dev_info(hdev, "offload codecs enable %d changed %d",
4802 		    val, changed);
4803 
4804 	memcpy(rp.uuid, offload_codecs_uuid, 16);
4805 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4806 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4807 	err = mgmt_cmd_complete(sk, hdev->id,
4808 				MGMT_OP_SET_EXP_FEATURE, 0,
4809 				&rp, sizeof(rp));
4810 
4811 	if (changed)
4812 		exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4813 
4814 	return err;
4815 }
4816 
4817 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4818 					  struct mgmt_cp_set_exp_feature *cp,
4819 					  u16 data_len)
4820 {
4821 	bool val, changed;
4822 	int err;
4823 	struct mgmt_rp_set_exp_feature rp;
4824 
4825 	/* Command requires to use a valid controller index */
4826 	if (!hdev)
4827 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4828 				       MGMT_OP_SET_EXP_FEATURE,
4829 				       MGMT_STATUS_INVALID_INDEX);
4830 
4831 	/* Parameters are limited to a single octet */
4832 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4833 		return mgmt_cmd_status(sk, hdev->id,
4834 				       MGMT_OP_SET_EXP_FEATURE,
4835 				       MGMT_STATUS_INVALID_PARAMS);
4836 
4837 	/* Only boolean on/off is supported */
4838 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4839 		return mgmt_cmd_status(sk, hdev->id,
4840 				       MGMT_OP_SET_EXP_FEATURE,
4841 				       MGMT_STATUS_INVALID_PARAMS);
4842 
4843 	val = !!cp->param[0];
4844 	changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4845 
4846 	if (!hci_dev_le_state_simultaneous(hdev)) {
4847 		return mgmt_cmd_status(sk, hdev->id,
4848 				       MGMT_OP_SET_EXP_FEATURE,
4849 				       MGMT_STATUS_NOT_SUPPORTED);
4850 	}
4851 
4852 	if (changed) {
4853 		if (val)
4854 			hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4855 		else
4856 			hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4857 	}
4858 
4859 	bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4860 		    val, changed);
4861 
4862 	memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4863 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4864 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4865 	err = mgmt_cmd_complete(sk, hdev->id,
4866 				MGMT_OP_SET_EXP_FEATURE, 0,
4867 				&rp, sizeof(rp));
4868 
4869 	if (changed)
4870 		exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4871 
4872 	return err;
4873 }
4874 
4875 #ifdef CONFIG_BT_LE
4876 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4877 			       struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4878 {
4879 	struct mgmt_rp_set_exp_feature rp;
4880 	bool val, changed = false;
4881 	int err;
4882 
4883 	/* Command requires to use the non-controller index */
4884 	if (hdev)
4885 		return mgmt_cmd_status(sk, hdev->id,
4886 				       MGMT_OP_SET_EXP_FEATURE,
4887 				       MGMT_STATUS_INVALID_INDEX);
4888 
4889 	/* Parameters are limited to a single octet */
4890 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4891 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4892 				       MGMT_OP_SET_EXP_FEATURE,
4893 				       MGMT_STATUS_INVALID_PARAMS);
4894 
4895 	/* Only boolean on/off is supported */
4896 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4897 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4898 				       MGMT_OP_SET_EXP_FEATURE,
4899 				       MGMT_STATUS_INVALID_PARAMS);
4900 
4901 	val = cp->param[0] ? true : false;
4902 	if (val)
4903 		err = iso_init();
4904 	else
4905 		err = iso_exit();
4906 
4907 	if (!err)
4908 		changed = true;
4909 
4910 	memcpy(rp.uuid, iso_socket_uuid, 16);
4911 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4912 
4913 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4914 
4915 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4916 				MGMT_OP_SET_EXP_FEATURE, 0,
4917 				&rp, sizeof(rp));
4918 
4919 	if (changed)
4920 		exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4921 
4922 	return err;
4923 }
4924 #endif
4925 
4926 static const struct mgmt_exp_feature {
4927 	const u8 *uuid;
4928 	int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4929 			struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4930 } exp_features[] = {
4931 	EXP_FEAT(ZERO_KEY, set_zero_key_func),
4932 #ifdef CONFIG_BT_FEATURE_DEBUG
4933 	EXP_FEAT(debug_uuid, set_debug_func),
4934 #endif
4935 	EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
4936 	EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4937 	EXP_FEAT(quality_report_uuid, set_quality_report_func),
4938 	EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4939 	EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
4940 #ifdef CONFIG_BT_LE
4941 	EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
4942 #endif
4943 
4944 	/* end with a null feature */
4945 	EXP_FEAT(NULL, NULL)
4946 };
4947 
4948 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4949 			   void *data, u16 data_len)
4950 {
4951 	struct mgmt_cp_set_exp_feature *cp = data;
4952 	size_t i = 0;
4953 
4954 	bt_dev_dbg(hdev, "sock %p", sk);
4955 
4956 	for (i = 0; exp_features[i].uuid; i++) {
4957 		if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4958 			return exp_features[i].set_func(sk, hdev, cp, data_len);
4959 	}
4960 
4961 	return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4962 			       MGMT_OP_SET_EXP_FEATURE,
4963 			       MGMT_STATUS_NOT_SUPPORTED);
4964 }
4965 
4966 static u32 get_params_flags(struct hci_dev *hdev,
4967 			    struct hci_conn_params *params)
4968 {
4969 	u32 flags = hdev->conn_flags;
4970 
4971 	/* Devices using RPAs can only be programmed in the acceptlist if
4972 	 * LL Privacy has been enable otherwise they cannot mark
4973 	 * HCI_CONN_FLAG_REMOTE_WAKEUP.
4974 	 */
4975 	if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
4976 	    hci_find_irk_by_addr(hdev, &params->addr, params->addr_type))
4977 		flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
4978 
4979 	return flags;
4980 }
4981 
4982 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4983 			    u16 data_len)
4984 {
4985 	struct mgmt_cp_get_device_flags *cp = data;
4986 	struct mgmt_rp_get_device_flags rp;
4987 	struct bdaddr_list_with_flags *br_params;
4988 	struct hci_conn_params *params;
4989 	u32 supported_flags;
4990 	u32 current_flags = 0;
4991 	u8 status = MGMT_STATUS_INVALID_PARAMS;
4992 
4993 	bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4994 		   &cp->addr.bdaddr, cp->addr.type);
4995 
4996 	hci_dev_lock(hdev);
4997 
4998 	supported_flags = hdev->conn_flags;
4999 
5000 	memset(&rp, 0, sizeof(rp));
5001 
5002 	if (cp->addr.type == BDADDR_BREDR) {
5003 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5004 							      &cp->addr.bdaddr,
5005 							      cp->addr.type);
5006 		if (!br_params)
5007 			goto done;
5008 
5009 		current_flags = br_params->flags;
5010 	} else {
5011 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5012 						le_addr_type(cp->addr.type));
5013 		if (!params)
5014 			goto done;
5015 
5016 		supported_flags = get_params_flags(hdev, params);
5017 		current_flags = params->flags;
5018 	}
5019 
5020 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5021 	rp.addr.type = cp->addr.type;
5022 	rp.supported_flags = cpu_to_le32(supported_flags);
5023 	rp.current_flags = cpu_to_le32(current_flags);
5024 
5025 	status = MGMT_STATUS_SUCCESS;
5026 
5027 done:
5028 	hci_dev_unlock(hdev);
5029 
5030 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5031 				&rp, sizeof(rp));
5032 }
5033 
5034 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5035 				 bdaddr_t *bdaddr, u8 bdaddr_type,
5036 				 u32 supported_flags, u32 current_flags)
5037 {
5038 	struct mgmt_ev_device_flags_changed ev;
5039 
5040 	bacpy(&ev.addr.bdaddr, bdaddr);
5041 	ev.addr.type = bdaddr_type;
5042 	ev.supported_flags = cpu_to_le32(supported_flags);
5043 	ev.current_flags = cpu_to_le32(current_flags);
5044 
5045 	mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5046 }
5047 
5048 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5049 			    u16 len)
5050 {
5051 	struct mgmt_cp_set_device_flags *cp = data;
5052 	struct bdaddr_list_with_flags *br_params;
5053 	struct hci_conn_params *params;
5054 	u8 status = MGMT_STATUS_INVALID_PARAMS;
5055 	u32 supported_flags;
5056 	u32 current_flags = __le32_to_cpu(cp->current_flags);
5057 
5058 	bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5059 		   &cp->addr.bdaddr, cp->addr.type, current_flags);
5060 
5061 	// We should take hci_dev_lock() early, I think.. conn_flags can change
5062 	supported_flags = hdev->conn_flags;
5063 
5064 	if ((supported_flags | current_flags) != supported_flags) {
5065 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5066 			    current_flags, supported_flags);
5067 		goto done;
5068 	}
5069 
5070 	hci_dev_lock(hdev);
5071 
5072 	if (cp->addr.type == BDADDR_BREDR) {
5073 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5074 							      &cp->addr.bdaddr,
5075 							      cp->addr.type);
5076 
5077 		if (br_params) {
5078 			br_params->flags = current_flags;
5079 			status = MGMT_STATUS_SUCCESS;
5080 		} else {
5081 			bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5082 				    &cp->addr.bdaddr, cp->addr.type);
5083 		}
5084 
5085 		goto unlock;
5086 	}
5087 
5088 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5089 					le_addr_type(cp->addr.type));
5090 	if (!params) {
5091 		bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5092 			    &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5093 		goto unlock;
5094 	}
5095 
5096 	supported_flags = get_params_flags(hdev, params);
5097 
5098 	if ((supported_flags | current_flags) != supported_flags) {
5099 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5100 			    current_flags, supported_flags);
5101 		goto unlock;
5102 	}
5103 
5104 	WRITE_ONCE(params->flags, current_flags);
5105 	status = MGMT_STATUS_SUCCESS;
5106 
5107 	/* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5108 	 * has been set.
5109 	 */
5110 	if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5111 		hci_update_passive_scan(hdev);
5112 
5113 unlock:
5114 	hci_dev_unlock(hdev);
5115 
5116 done:
5117 	if (status == MGMT_STATUS_SUCCESS)
5118 		device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5119 				     supported_flags, current_flags);
5120 
5121 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5122 				 &cp->addr, sizeof(cp->addr));
5123 }
5124 
5125 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5126 				   u16 handle)
5127 {
5128 	struct mgmt_ev_adv_monitor_added ev;
5129 
5130 	ev.monitor_handle = cpu_to_le16(handle);
5131 
5132 	mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5133 }
5134 
5135 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
5136 {
5137 	struct mgmt_ev_adv_monitor_removed ev;
5138 	struct mgmt_pending_cmd *cmd;
5139 	struct sock *sk_skip = NULL;
5140 	struct mgmt_cp_remove_adv_monitor *cp;
5141 
5142 	cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
5143 	if (cmd) {
5144 		cp = cmd->param;
5145 
5146 		if (cp->monitor_handle)
5147 			sk_skip = cmd->sk;
5148 	}
5149 
5150 	ev.monitor_handle = cpu_to_le16(handle);
5151 
5152 	mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
5153 }
5154 
5155 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5156 				 void *data, u16 len)
5157 {
5158 	struct adv_monitor *monitor = NULL;
5159 	struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5160 	int handle, err;
5161 	size_t rp_size = 0;
5162 	__u32 supported = 0;
5163 	__u32 enabled = 0;
5164 	__u16 num_handles = 0;
5165 	__u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5166 
5167 	BT_DBG("request for %s", hdev->name);
5168 
5169 	hci_dev_lock(hdev);
5170 
5171 	if (msft_monitor_supported(hdev))
5172 		supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5173 
5174 	idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5175 		handles[num_handles++] = monitor->handle;
5176 
5177 	hci_dev_unlock(hdev);
5178 
5179 	rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5180 	rp = kmalloc(rp_size, GFP_KERNEL);
5181 	if (!rp)
5182 		return -ENOMEM;
5183 
5184 	/* All supported features are currently enabled */
5185 	enabled = supported;
5186 
5187 	rp->supported_features = cpu_to_le32(supported);
5188 	rp->enabled_features = cpu_to_le32(enabled);
5189 	rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5190 	rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5191 	rp->num_handles = cpu_to_le16(num_handles);
5192 	if (num_handles)
5193 		memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5194 
5195 	err = mgmt_cmd_complete(sk, hdev->id,
5196 				MGMT_OP_READ_ADV_MONITOR_FEATURES,
5197 				MGMT_STATUS_SUCCESS, rp, rp_size);
5198 
5199 	kfree(rp);
5200 
5201 	return err;
5202 }
5203 
5204 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5205 						   void *data, int status)
5206 {
5207 	struct mgmt_rp_add_adv_patterns_monitor rp;
5208 	struct mgmt_pending_cmd *cmd = data;
5209 	struct adv_monitor *monitor = cmd->user_data;
5210 
5211 	hci_dev_lock(hdev);
5212 
5213 	rp.monitor_handle = cpu_to_le16(monitor->handle);
5214 
5215 	if (!status) {
5216 		mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5217 		hdev->adv_monitors_cnt++;
5218 		if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5219 			monitor->state = ADV_MONITOR_STATE_REGISTERED;
5220 		hci_update_passive_scan(hdev);
5221 	}
5222 
5223 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5224 			  mgmt_status(status), &rp, sizeof(rp));
5225 	mgmt_pending_remove(cmd);
5226 
5227 	hci_dev_unlock(hdev);
5228 	bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5229 		   rp.monitor_handle, status);
5230 }
5231 
5232 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5233 {
5234 	struct mgmt_pending_cmd *cmd = data;
5235 	struct adv_monitor *monitor = cmd->user_data;
5236 
5237 	return hci_add_adv_monitor(hdev, monitor);
5238 }
5239 
5240 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5241 				      struct adv_monitor *m, u8 status,
5242 				      void *data, u16 len, u16 op)
5243 {
5244 	struct mgmt_pending_cmd *cmd;
5245 	int err;
5246 
5247 	hci_dev_lock(hdev);
5248 
5249 	if (status)
5250 		goto unlock;
5251 
5252 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
5253 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5254 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
5255 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
5256 		status = MGMT_STATUS_BUSY;
5257 		goto unlock;
5258 	}
5259 
5260 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
5261 	if (!cmd) {
5262 		status = MGMT_STATUS_NO_RESOURCES;
5263 		goto unlock;
5264 	}
5265 
5266 	cmd->user_data = m;
5267 	err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5268 				 mgmt_add_adv_patterns_monitor_complete);
5269 	if (err) {
5270 		if (err == -ENOMEM)
5271 			status = MGMT_STATUS_NO_RESOURCES;
5272 		else
5273 			status = MGMT_STATUS_FAILED;
5274 
5275 		goto unlock;
5276 	}
5277 
5278 	hci_dev_unlock(hdev);
5279 
5280 	return 0;
5281 
5282 unlock:
5283 	hci_free_adv_monitor(hdev, m);
5284 	hci_dev_unlock(hdev);
5285 	return mgmt_cmd_status(sk, hdev->id, op, status);
5286 }
5287 
5288 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5289 				   struct mgmt_adv_rssi_thresholds *rssi)
5290 {
5291 	if (rssi) {
5292 		m->rssi.low_threshold = rssi->low_threshold;
5293 		m->rssi.low_threshold_timeout =
5294 		    __le16_to_cpu(rssi->low_threshold_timeout);
5295 		m->rssi.high_threshold = rssi->high_threshold;
5296 		m->rssi.high_threshold_timeout =
5297 		    __le16_to_cpu(rssi->high_threshold_timeout);
5298 		m->rssi.sampling_period = rssi->sampling_period;
5299 	} else {
5300 		/* Default values. These numbers are the least constricting
5301 		 * parameters for MSFT API to work, so it behaves as if there
5302 		 * are no rssi parameter to consider. May need to be changed
5303 		 * if other API are to be supported.
5304 		 */
5305 		m->rssi.low_threshold = -127;
5306 		m->rssi.low_threshold_timeout = 60;
5307 		m->rssi.high_threshold = -127;
5308 		m->rssi.high_threshold_timeout = 0;
5309 		m->rssi.sampling_period = 0;
5310 	}
5311 }
5312 
5313 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5314 				    struct mgmt_adv_pattern *patterns)
5315 {
5316 	u8 offset = 0, length = 0;
5317 	struct adv_pattern *p = NULL;
5318 	int i;
5319 
5320 	for (i = 0; i < pattern_count; i++) {
5321 		offset = patterns[i].offset;
5322 		length = patterns[i].length;
5323 		if (offset >= HCI_MAX_EXT_AD_LENGTH ||
5324 		    length > HCI_MAX_EXT_AD_LENGTH ||
5325 		    (offset + length) > HCI_MAX_EXT_AD_LENGTH)
5326 			return MGMT_STATUS_INVALID_PARAMS;
5327 
5328 		p = kmalloc(sizeof(*p), GFP_KERNEL);
5329 		if (!p)
5330 			return MGMT_STATUS_NO_RESOURCES;
5331 
5332 		p->ad_type = patterns[i].ad_type;
5333 		p->offset = patterns[i].offset;
5334 		p->length = patterns[i].length;
5335 		memcpy(p->value, patterns[i].value, p->length);
5336 
5337 		INIT_LIST_HEAD(&p->list);
5338 		list_add(&p->list, &m->patterns);
5339 	}
5340 
5341 	return MGMT_STATUS_SUCCESS;
5342 }
5343 
5344 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5345 				    void *data, u16 len)
5346 {
5347 	struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5348 	struct adv_monitor *m = NULL;
5349 	u8 status = MGMT_STATUS_SUCCESS;
5350 	size_t expected_size = sizeof(*cp);
5351 
5352 	BT_DBG("request for %s", hdev->name);
5353 
5354 	if (len <= sizeof(*cp)) {
5355 		status = MGMT_STATUS_INVALID_PARAMS;
5356 		goto done;
5357 	}
5358 
5359 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5360 	if (len != expected_size) {
5361 		status = MGMT_STATUS_INVALID_PARAMS;
5362 		goto done;
5363 	}
5364 
5365 	m = kzalloc(sizeof(*m), GFP_KERNEL);
5366 	if (!m) {
5367 		status = MGMT_STATUS_NO_RESOURCES;
5368 		goto done;
5369 	}
5370 
5371 	INIT_LIST_HEAD(&m->patterns);
5372 
5373 	parse_adv_monitor_rssi(m, NULL);
5374 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5375 
5376 done:
5377 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5378 					  MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5379 }
5380 
5381 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5382 					 void *data, u16 len)
5383 {
5384 	struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5385 	struct adv_monitor *m = NULL;
5386 	u8 status = MGMT_STATUS_SUCCESS;
5387 	size_t expected_size = sizeof(*cp);
5388 
5389 	BT_DBG("request for %s", hdev->name);
5390 
5391 	if (len <= sizeof(*cp)) {
5392 		status = MGMT_STATUS_INVALID_PARAMS;
5393 		goto done;
5394 	}
5395 
5396 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5397 	if (len != expected_size) {
5398 		status = MGMT_STATUS_INVALID_PARAMS;
5399 		goto done;
5400 	}
5401 
5402 	m = kzalloc(sizeof(*m), GFP_KERNEL);
5403 	if (!m) {
5404 		status = MGMT_STATUS_NO_RESOURCES;
5405 		goto done;
5406 	}
5407 
5408 	INIT_LIST_HEAD(&m->patterns);
5409 
5410 	parse_adv_monitor_rssi(m, &cp->rssi);
5411 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5412 
5413 done:
5414 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5415 					 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5416 }
5417 
5418 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5419 					     void *data, int status)
5420 {
5421 	struct mgmt_rp_remove_adv_monitor rp;
5422 	struct mgmt_pending_cmd *cmd = data;
5423 	struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5424 
5425 	hci_dev_lock(hdev);
5426 
5427 	rp.monitor_handle = cp->monitor_handle;
5428 
5429 	if (!status)
5430 		hci_update_passive_scan(hdev);
5431 
5432 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5433 			  mgmt_status(status), &rp, sizeof(rp));
5434 	mgmt_pending_remove(cmd);
5435 
5436 	hci_dev_unlock(hdev);
5437 	bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5438 		   rp.monitor_handle, status);
5439 }
5440 
5441 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5442 {
5443 	struct mgmt_pending_cmd *cmd = data;
5444 	struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5445 	u16 handle = __le16_to_cpu(cp->monitor_handle);
5446 
5447 	if (!handle)
5448 		return hci_remove_all_adv_monitor(hdev);
5449 
5450 	return hci_remove_single_adv_monitor(hdev, handle);
5451 }
5452 
5453 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5454 			      void *data, u16 len)
5455 {
5456 	struct mgmt_pending_cmd *cmd;
5457 	int err, status;
5458 
5459 	hci_dev_lock(hdev);
5460 
5461 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
5462 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5463 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5464 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5465 		status = MGMT_STATUS_BUSY;
5466 		goto unlock;
5467 	}
5468 
5469 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5470 	if (!cmd) {
5471 		status = MGMT_STATUS_NO_RESOURCES;
5472 		goto unlock;
5473 	}
5474 
5475 	err = hci_cmd_sync_submit(hdev, mgmt_remove_adv_monitor_sync, cmd,
5476 				  mgmt_remove_adv_monitor_complete);
5477 
5478 	if (err) {
5479 		mgmt_pending_remove(cmd);
5480 
5481 		if (err == -ENOMEM)
5482 			status = MGMT_STATUS_NO_RESOURCES;
5483 		else
5484 			status = MGMT_STATUS_FAILED;
5485 
5486 		goto unlock;
5487 	}
5488 
5489 	hci_dev_unlock(hdev);
5490 
5491 	return 0;
5492 
5493 unlock:
5494 	hci_dev_unlock(hdev);
5495 	return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5496 			       status);
5497 }
5498 
5499 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5500 {
5501 	struct mgmt_rp_read_local_oob_data mgmt_rp;
5502 	size_t rp_size = sizeof(mgmt_rp);
5503 	struct mgmt_pending_cmd *cmd = data;
5504 	struct sk_buff *skb = cmd->skb;
5505 	u8 status = mgmt_status(err);
5506 
5507 	if (!status) {
5508 		if (!skb)
5509 			status = MGMT_STATUS_FAILED;
5510 		else if (IS_ERR(skb))
5511 			status = mgmt_status(PTR_ERR(skb));
5512 		else
5513 			status = mgmt_status(skb->data[0]);
5514 	}
5515 
5516 	bt_dev_dbg(hdev, "status %d", status);
5517 
5518 	if (status) {
5519 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5520 		goto remove;
5521 	}
5522 
5523 	memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5524 
5525 	if (!bredr_sc_enabled(hdev)) {
5526 		struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5527 
5528 		if (skb->len < sizeof(*rp)) {
5529 			mgmt_cmd_status(cmd->sk, hdev->id,
5530 					MGMT_OP_READ_LOCAL_OOB_DATA,
5531 					MGMT_STATUS_FAILED);
5532 			goto remove;
5533 		}
5534 
5535 		memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5536 		memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5537 
5538 		rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5539 	} else {
5540 		struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5541 
5542 		if (skb->len < sizeof(*rp)) {
5543 			mgmt_cmd_status(cmd->sk, hdev->id,
5544 					MGMT_OP_READ_LOCAL_OOB_DATA,
5545 					MGMT_STATUS_FAILED);
5546 			goto remove;
5547 		}
5548 
5549 		memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5550 		memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5551 
5552 		memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5553 		memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5554 	}
5555 
5556 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5557 			  MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5558 
5559 remove:
5560 	if (skb && !IS_ERR(skb))
5561 		kfree_skb(skb);
5562 
5563 	mgmt_pending_free(cmd);
5564 }
5565 
5566 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5567 {
5568 	struct mgmt_pending_cmd *cmd = data;
5569 
5570 	if (bredr_sc_enabled(hdev))
5571 		cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5572 	else
5573 		cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5574 
5575 	if (IS_ERR(cmd->skb))
5576 		return PTR_ERR(cmd->skb);
5577 	else
5578 		return 0;
5579 }
5580 
5581 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5582 			       void *data, u16 data_len)
5583 {
5584 	struct mgmt_pending_cmd *cmd;
5585 	int err;
5586 
5587 	bt_dev_dbg(hdev, "sock %p", sk);
5588 
5589 	hci_dev_lock(hdev);
5590 
5591 	if (!hdev_is_powered(hdev)) {
5592 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5593 				      MGMT_STATUS_NOT_POWERED);
5594 		goto unlock;
5595 	}
5596 
5597 	if (!lmp_ssp_capable(hdev)) {
5598 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5599 				      MGMT_STATUS_NOT_SUPPORTED);
5600 		goto unlock;
5601 	}
5602 
5603 	cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5604 	if (!cmd)
5605 		err = -ENOMEM;
5606 	else
5607 		err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5608 					 read_local_oob_data_complete);
5609 
5610 	if (err < 0) {
5611 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5612 				      MGMT_STATUS_FAILED);
5613 
5614 		if (cmd)
5615 			mgmt_pending_free(cmd);
5616 	}
5617 
5618 unlock:
5619 	hci_dev_unlock(hdev);
5620 	return err;
5621 }
5622 
5623 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5624 			       void *data, u16 len)
5625 {
5626 	struct mgmt_addr_info *addr = data;
5627 	int err;
5628 
5629 	bt_dev_dbg(hdev, "sock %p", sk);
5630 
5631 	if (!bdaddr_type_is_valid(addr->type))
5632 		return mgmt_cmd_complete(sk, hdev->id,
5633 					 MGMT_OP_ADD_REMOTE_OOB_DATA,
5634 					 MGMT_STATUS_INVALID_PARAMS,
5635 					 addr, sizeof(*addr));
5636 
5637 	hci_dev_lock(hdev);
5638 
5639 	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5640 		struct mgmt_cp_add_remote_oob_data *cp = data;
5641 		u8 status;
5642 
5643 		if (cp->addr.type != BDADDR_BREDR) {
5644 			err = mgmt_cmd_complete(sk, hdev->id,
5645 						MGMT_OP_ADD_REMOTE_OOB_DATA,
5646 						MGMT_STATUS_INVALID_PARAMS,
5647 						&cp->addr, sizeof(cp->addr));
5648 			goto unlock;
5649 		}
5650 
5651 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5652 					      cp->addr.type, cp->hash,
5653 					      cp->rand, NULL, NULL);
5654 		if (err < 0)
5655 			status = MGMT_STATUS_FAILED;
5656 		else
5657 			status = MGMT_STATUS_SUCCESS;
5658 
5659 		err = mgmt_cmd_complete(sk, hdev->id,
5660 					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5661 					&cp->addr, sizeof(cp->addr));
5662 	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5663 		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5664 		u8 *rand192, *hash192, *rand256, *hash256;
5665 		u8 status;
5666 
5667 		if (bdaddr_type_is_le(cp->addr.type)) {
5668 			/* Enforce zero-valued 192-bit parameters as
5669 			 * long as legacy SMP OOB isn't implemented.
5670 			 */
5671 			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5672 			    memcmp(cp->hash192, ZERO_KEY, 16)) {
5673 				err = mgmt_cmd_complete(sk, hdev->id,
5674 							MGMT_OP_ADD_REMOTE_OOB_DATA,
5675 							MGMT_STATUS_INVALID_PARAMS,
5676 							addr, sizeof(*addr));
5677 				goto unlock;
5678 			}
5679 
5680 			rand192 = NULL;
5681 			hash192 = NULL;
5682 		} else {
5683 			/* In case one of the P-192 values is set to zero,
5684 			 * then just disable OOB data for P-192.
5685 			 */
5686 			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5687 			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
5688 				rand192 = NULL;
5689 				hash192 = NULL;
5690 			} else {
5691 				rand192 = cp->rand192;
5692 				hash192 = cp->hash192;
5693 			}
5694 		}
5695 
5696 		/* In case one of the P-256 values is set to zero, then just
5697 		 * disable OOB data for P-256.
5698 		 */
5699 		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5700 		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
5701 			rand256 = NULL;
5702 			hash256 = NULL;
5703 		} else {
5704 			rand256 = cp->rand256;
5705 			hash256 = cp->hash256;
5706 		}
5707 
5708 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5709 					      cp->addr.type, hash192, rand192,
5710 					      hash256, rand256);
5711 		if (err < 0)
5712 			status = MGMT_STATUS_FAILED;
5713 		else
5714 			status = MGMT_STATUS_SUCCESS;
5715 
5716 		err = mgmt_cmd_complete(sk, hdev->id,
5717 					MGMT_OP_ADD_REMOTE_OOB_DATA,
5718 					status, &cp->addr, sizeof(cp->addr));
5719 	} else {
5720 		bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5721 			   len);
5722 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5723 				      MGMT_STATUS_INVALID_PARAMS);
5724 	}
5725 
5726 unlock:
5727 	hci_dev_unlock(hdev);
5728 	return err;
5729 }
5730 
5731 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5732 				  void *data, u16 len)
5733 {
5734 	struct mgmt_cp_remove_remote_oob_data *cp = data;
5735 	u8 status;
5736 	int err;
5737 
5738 	bt_dev_dbg(hdev, "sock %p", sk);
5739 
5740 	if (cp->addr.type != BDADDR_BREDR)
5741 		return mgmt_cmd_complete(sk, hdev->id,
5742 					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5743 					 MGMT_STATUS_INVALID_PARAMS,
5744 					 &cp->addr, sizeof(cp->addr));
5745 
5746 	hci_dev_lock(hdev);
5747 
5748 	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5749 		hci_remote_oob_data_clear(hdev);
5750 		status = MGMT_STATUS_SUCCESS;
5751 		goto done;
5752 	}
5753 
5754 	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5755 	if (err < 0)
5756 		status = MGMT_STATUS_INVALID_PARAMS;
5757 	else
5758 		status = MGMT_STATUS_SUCCESS;
5759 
5760 done:
5761 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5762 				status, &cp->addr, sizeof(cp->addr));
5763 
5764 	hci_dev_unlock(hdev);
5765 	return err;
5766 }
5767 
5768 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5769 {
5770 	struct mgmt_pending_cmd *cmd;
5771 
5772 	bt_dev_dbg(hdev, "status %u", status);
5773 
5774 	hci_dev_lock(hdev);
5775 
5776 	cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5777 	if (!cmd)
5778 		cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5779 
5780 	if (!cmd)
5781 		cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5782 
5783 	if (cmd) {
5784 		cmd->cmd_complete(cmd, mgmt_status(status));
5785 		mgmt_pending_remove(cmd);
5786 	}
5787 
5788 	hci_dev_unlock(hdev);
5789 }
5790 
5791 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5792 				    uint8_t *mgmt_status)
5793 {
5794 	switch (type) {
5795 	case DISCOV_TYPE_LE:
5796 		*mgmt_status = mgmt_le_support(hdev);
5797 		if (*mgmt_status)
5798 			return false;
5799 		break;
5800 	case DISCOV_TYPE_INTERLEAVED:
5801 		*mgmt_status = mgmt_le_support(hdev);
5802 		if (*mgmt_status)
5803 			return false;
5804 		fallthrough;
5805 	case DISCOV_TYPE_BREDR:
5806 		*mgmt_status = mgmt_bredr_support(hdev);
5807 		if (*mgmt_status)
5808 			return false;
5809 		break;
5810 	default:
5811 		*mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5812 		return false;
5813 	}
5814 
5815 	return true;
5816 }
5817 
5818 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5819 {
5820 	struct mgmt_pending_cmd *cmd = data;
5821 
5822 	if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5823 	    cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5824 	    cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5825 		return;
5826 
5827 	bt_dev_dbg(hdev, "err %d", err);
5828 
5829 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5830 			  cmd->param, 1);
5831 	mgmt_pending_remove(cmd);
5832 
5833 	hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5834 				DISCOVERY_FINDING);
5835 }
5836 
5837 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5838 {
5839 	return hci_start_discovery_sync(hdev);
5840 }
5841 
5842 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5843 				    u16 op, void *data, u16 len)
5844 {
5845 	struct mgmt_cp_start_discovery *cp = data;
5846 	struct mgmt_pending_cmd *cmd;
5847 	u8 status;
5848 	int err;
5849 
5850 	bt_dev_dbg(hdev, "sock %p", sk);
5851 
5852 	hci_dev_lock(hdev);
5853 
5854 	if (!hdev_is_powered(hdev)) {
5855 		err = mgmt_cmd_complete(sk, hdev->id, op,
5856 					MGMT_STATUS_NOT_POWERED,
5857 					&cp->type, sizeof(cp->type));
5858 		goto failed;
5859 	}
5860 
5861 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
5862 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5863 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5864 					&cp->type, sizeof(cp->type));
5865 		goto failed;
5866 	}
5867 
5868 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5869 		err = mgmt_cmd_complete(sk, hdev->id, op, status,
5870 					&cp->type, sizeof(cp->type));
5871 		goto failed;
5872 	}
5873 
5874 	/* Can't start discovery when it is paused */
5875 	if (hdev->discovery_paused) {
5876 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5877 					&cp->type, sizeof(cp->type));
5878 		goto failed;
5879 	}
5880 
5881 	/* Clear the discovery filter first to free any previously
5882 	 * allocated memory for the UUID list.
5883 	 */
5884 	hci_discovery_filter_clear(hdev);
5885 
5886 	hdev->discovery.type = cp->type;
5887 	hdev->discovery.report_invalid_rssi = false;
5888 	if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5889 		hdev->discovery.limited = true;
5890 	else
5891 		hdev->discovery.limited = false;
5892 
5893 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
5894 	if (!cmd) {
5895 		err = -ENOMEM;
5896 		goto failed;
5897 	}
5898 
5899 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5900 				 start_discovery_complete);
5901 	if (err < 0) {
5902 		mgmt_pending_remove(cmd);
5903 		goto failed;
5904 	}
5905 
5906 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5907 
5908 failed:
5909 	hci_dev_unlock(hdev);
5910 	return err;
5911 }
5912 
5913 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5914 			   void *data, u16 len)
5915 {
5916 	return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5917 					data, len);
5918 }
5919 
5920 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5921 				   void *data, u16 len)
5922 {
5923 	return start_discovery_internal(sk, hdev,
5924 					MGMT_OP_START_LIMITED_DISCOVERY,
5925 					data, len);
5926 }
5927 
5928 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5929 				   void *data, u16 len)
5930 {
5931 	struct mgmt_cp_start_service_discovery *cp = data;
5932 	struct mgmt_pending_cmd *cmd;
5933 	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5934 	u16 uuid_count, expected_len;
5935 	u8 status;
5936 	int err;
5937 
5938 	bt_dev_dbg(hdev, "sock %p", sk);
5939 
5940 	hci_dev_lock(hdev);
5941 
5942 	if (!hdev_is_powered(hdev)) {
5943 		err = mgmt_cmd_complete(sk, hdev->id,
5944 					MGMT_OP_START_SERVICE_DISCOVERY,
5945 					MGMT_STATUS_NOT_POWERED,
5946 					&cp->type, sizeof(cp->type));
5947 		goto failed;
5948 	}
5949 
5950 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
5951 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5952 		err = mgmt_cmd_complete(sk, hdev->id,
5953 					MGMT_OP_START_SERVICE_DISCOVERY,
5954 					MGMT_STATUS_BUSY, &cp->type,
5955 					sizeof(cp->type));
5956 		goto failed;
5957 	}
5958 
5959 	if (hdev->discovery_paused) {
5960 		err = mgmt_cmd_complete(sk, hdev->id,
5961 					MGMT_OP_START_SERVICE_DISCOVERY,
5962 					MGMT_STATUS_BUSY, &cp->type,
5963 					sizeof(cp->type));
5964 		goto failed;
5965 	}
5966 
5967 	uuid_count = __le16_to_cpu(cp->uuid_count);
5968 	if (uuid_count > max_uuid_count) {
5969 		bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5970 			   uuid_count);
5971 		err = mgmt_cmd_complete(sk, hdev->id,
5972 					MGMT_OP_START_SERVICE_DISCOVERY,
5973 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
5974 					sizeof(cp->type));
5975 		goto failed;
5976 	}
5977 
5978 	expected_len = sizeof(*cp) + uuid_count * 16;
5979 	if (expected_len != len) {
5980 		bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5981 			   expected_len, len);
5982 		err = mgmt_cmd_complete(sk, hdev->id,
5983 					MGMT_OP_START_SERVICE_DISCOVERY,
5984 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
5985 					sizeof(cp->type));
5986 		goto failed;
5987 	}
5988 
5989 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5990 		err = mgmt_cmd_complete(sk, hdev->id,
5991 					MGMT_OP_START_SERVICE_DISCOVERY,
5992 					status, &cp->type, sizeof(cp->type));
5993 		goto failed;
5994 	}
5995 
5996 	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
5997 			       hdev, data, len);
5998 	if (!cmd) {
5999 		err = -ENOMEM;
6000 		goto failed;
6001 	}
6002 
6003 	/* Clear the discovery filter first to free any previously
6004 	 * allocated memory for the UUID list.
6005 	 */
6006 	hci_discovery_filter_clear(hdev);
6007 
6008 	hdev->discovery.result_filtering = true;
6009 	hdev->discovery.type = cp->type;
6010 	hdev->discovery.rssi = cp->rssi;
6011 	hdev->discovery.uuid_count = uuid_count;
6012 
6013 	if (uuid_count > 0) {
6014 		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6015 						GFP_KERNEL);
6016 		if (!hdev->discovery.uuids) {
6017 			err = mgmt_cmd_complete(sk, hdev->id,
6018 						MGMT_OP_START_SERVICE_DISCOVERY,
6019 						MGMT_STATUS_FAILED,
6020 						&cp->type, sizeof(cp->type));
6021 			mgmt_pending_remove(cmd);
6022 			goto failed;
6023 		}
6024 	}
6025 
6026 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6027 				 start_discovery_complete);
6028 	if (err < 0) {
6029 		mgmt_pending_remove(cmd);
6030 		goto failed;
6031 	}
6032 
6033 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6034 
6035 failed:
6036 	hci_dev_unlock(hdev);
6037 	return err;
6038 }
6039 
6040 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
6041 {
6042 	struct mgmt_pending_cmd *cmd;
6043 
6044 	bt_dev_dbg(hdev, "status %u", status);
6045 
6046 	hci_dev_lock(hdev);
6047 
6048 	cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6049 	if (cmd) {
6050 		cmd->cmd_complete(cmd, mgmt_status(status));
6051 		mgmt_pending_remove(cmd);
6052 	}
6053 
6054 	hci_dev_unlock(hdev);
6055 }
6056 
6057 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6058 {
6059 	struct mgmt_pending_cmd *cmd = data;
6060 
6061 	if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6062 		return;
6063 
6064 	bt_dev_dbg(hdev, "err %d", err);
6065 
6066 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
6067 			  cmd->param, 1);
6068 	mgmt_pending_remove(cmd);
6069 
6070 	if (!err)
6071 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6072 }
6073 
6074 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6075 {
6076 	return hci_stop_discovery_sync(hdev);
6077 }
6078 
6079 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6080 			  u16 len)
6081 {
6082 	struct mgmt_cp_stop_discovery *mgmt_cp = data;
6083 	struct mgmt_pending_cmd *cmd;
6084 	int err;
6085 
6086 	bt_dev_dbg(hdev, "sock %p", sk);
6087 
6088 	hci_dev_lock(hdev);
6089 
6090 	if (!hci_discovery_active(hdev)) {
6091 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6092 					MGMT_STATUS_REJECTED, &mgmt_cp->type,
6093 					sizeof(mgmt_cp->type));
6094 		goto unlock;
6095 	}
6096 
6097 	if (hdev->discovery.type != mgmt_cp->type) {
6098 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6099 					MGMT_STATUS_INVALID_PARAMS,
6100 					&mgmt_cp->type, sizeof(mgmt_cp->type));
6101 		goto unlock;
6102 	}
6103 
6104 	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6105 	if (!cmd) {
6106 		err = -ENOMEM;
6107 		goto unlock;
6108 	}
6109 
6110 	err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6111 				 stop_discovery_complete);
6112 	if (err < 0) {
6113 		mgmt_pending_remove(cmd);
6114 		goto unlock;
6115 	}
6116 
6117 	hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6118 
6119 unlock:
6120 	hci_dev_unlock(hdev);
6121 	return err;
6122 }
6123 
6124 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6125 			u16 len)
6126 {
6127 	struct mgmt_cp_confirm_name *cp = data;
6128 	struct inquiry_entry *e;
6129 	int err;
6130 
6131 	bt_dev_dbg(hdev, "sock %p", sk);
6132 
6133 	hci_dev_lock(hdev);
6134 
6135 	if (!hci_discovery_active(hdev)) {
6136 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6137 					MGMT_STATUS_FAILED, &cp->addr,
6138 					sizeof(cp->addr));
6139 		goto failed;
6140 	}
6141 
6142 	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6143 	if (!e) {
6144 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6145 					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6146 					sizeof(cp->addr));
6147 		goto failed;
6148 	}
6149 
6150 	if (cp->name_known) {
6151 		e->name_state = NAME_KNOWN;
6152 		list_del(&e->list);
6153 	} else {
6154 		e->name_state = NAME_NEEDED;
6155 		hci_inquiry_cache_update_resolve(hdev, e);
6156 	}
6157 
6158 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6159 				&cp->addr, sizeof(cp->addr));
6160 
6161 failed:
6162 	hci_dev_unlock(hdev);
6163 	return err;
6164 }
6165 
6166 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6167 			u16 len)
6168 {
6169 	struct mgmt_cp_block_device *cp = data;
6170 	u8 status;
6171 	int err;
6172 
6173 	bt_dev_dbg(hdev, "sock %p", sk);
6174 
6175 	if (!bdaddr_type_is_valid(cp->addr.type))
6176 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6177 					 MGMT_STATUS_INVALID_PARAMS,
6178 					 &cp->addr, sizeof(cp->addr));
6179 
6180 	hci_dev_lock(hdev);
6181 
6182 	err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6183 				  cp->addr.type);
6184 	if (err < 0) {
6185 		status = MGMT_STATUS_FAILED;
6186 		goto done;
6187 	}
6188 
6189 	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6190 		   sk);
6191 	status = MGMT_STATUS_SUCCESS;
6192 
6193 done:
6194 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6195 				&cp->addr, sizeof(cp->addr));
6196 
6197 	hci_dev_unlock(hdev);
6198 
6199 	return err;
6200 }
6201 
6202 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6203 			  u16 len)
6204 {
6205 	struct mgmt_cp_unblock_device *cp = data;
6206 	u8 status;
6207 	int err;
6208 
6209 	bt_dev_dbg(hdev, "sock %p", sk);
6210 
6211 	if (!bdaddr_type_is_valid(cp->addr.type))
6212 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6213 					 MGMT_STATUS_INVALID_PARAMS,
6214 					 &cp->addr, sizeof(cp->addr));
6215 
6216 	hci_dev_lock(hdev);
6217 
6218 	err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6219 				  cp->addr.type);
6220 	if (err < 0) {
6221 		status = MGMT_STATUS_INVALID_PARAMS;
6222 		goto done;
6223 	}
6224 
6225 	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6226 		   sk);
6227 	status = MGMT_STATUS_SUCCESS;
6228 
6229 done:
6230 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6231 				&cp->addr, sizeof(cp->addr));
6232 
6233 	hci_dev_unlock(hdev);
6234 
6235 	return err;
6236 }
6237 
6238 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6239 {
6240 	return hci_update_eir_sync(hdev);
6241 }
6242 
6243 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6244 			 u16 len)
6245 {
6246 	struct mgmt_cp_set_device_id *cp = data;
6247 	int err;
6248 	__u16 source;
6249 
6250 	bt_dev_dbg(hdev, "sock %p", sk);
6251 
6252 	source = __le16_to_cpu(cp->source);
6253 
6254 	if (source > 0x0002)
6255 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6256 				       MGMT_STATUS_INVALID_PARAMS);
6257 
6258 	hci_dev_lock(hdev);
6259 
6260 	hdev->devid_source = source;
6261 	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6262 	hdev->devid_product = __le16_to_cpu(cp->product);
6263 	hdev->devid_version = __le16_to_cpu(cp->version);
6264 
6265 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6266 				NULL, 0);
6267 
6268 	hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6269 
6270 	hci_dev_unlock(hdev);
6271 
6272 	return err;
6273 }
6274 
6275 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6276 {
6277 	if (err)
6278 		bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6279 	else
6280 		bt_dev_dbg(hdev, "status %d", err);
6281 }
6282 
6283 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6284 {
6285 	struct cmd_lookup match = { NULL, hdev };
6286 	u8 instance;
6287 	struct adv_info *adv_instance;
6288 	u8 status = mgmt_status(err);
6289 
6290 	if (status) {
6291 		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
6292 				     cmd_status_rsp, &status);
6293 		return;
6294 	}
6295 
6296 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6297 		hci_dev_set_flag(hdev, HCI_ADVERTISING);
6298 	else
6299 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6300 
6301 	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
6302 			     &match);
6303 
6304 	new_settings(hdev, match.sk);
6305 
6306 	if (match.sk)
6307 		sock_put(match.sk);
6308 
6309 	/* If "Set Advertising" was just disabled and instance advertising was
6310 	 * set up earlier, then re-enable multi-instance advertising.
6311 	 */
6312 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6313 	    list_empty(&hdev->adv_instances))
6314 		return;
6315 
6316 	instance = hdev->cur_adv_instance;
6317 	if (!instance) {
6318 		adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6319 							struct adv_info, list);
6320 		if (!adv_instance)
6321 			return;
6322 
6323 		instance = adv_instance->instance;
6324 	}
6325 
6326 	err = hci_schedule_adv_instance_sync(hdev, instance, true);
6327 
6328 	enable_advertising_instance(hdev, err);
6329 }
6330 
6331 static int set_adv_sync(struct hci_dev *hdev, void *data)
6332 {
6333 	struct mgmt_pending_cmd *cmd = data;
6334 	struct mgmt_mode *cp = cmd->param;
6335 	u8 val = !!cp->val;
6336 
6337 	if (cp->val == 0x02)
6338 		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6339 	else
6340 		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6341 
6342 	cancel_adv_timeout(hdev);
6343 
6344 	if (val) {
6345 		/* Switch to instance "0" for the Set Advertising setting.
6346 		 * We cannot use update_[adv|scan_rsp]_data() here as the
6347 		 * HCI_ADVERTISING flag is not yet set.
6348 		 */
6349 		hdev->cur_adv_instance = 0x00;
6350 
6351 		if (ext_adv_capable(hdev)) {
6352 			hci_start_ext_adv_sync(hdev, 0x00);
6353 		} else {
6354 			hci_update_adv_data_sync(hdev, 0x00);
6355 			hci_update_scan_rsp_data_sync(hdev, 0x00);
6356 			hci_enable_advertising_sync(hdev);
6357 		}
6358 	} else {
6359 		hci_disable_advertising_sync(hdev);
6360 	}
6361 
6362 	return 0;
6363 }
6364 
6365 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6366 			   u16 len)
6367 {
6368 	struct mgmt_mode *cp = data;
6369 	struct mgmt_pending_cmd *cmd;
6370 	u8 val, status;
6371 	int err;
6372 
6373 	bt_dev_dbg(hdev, "sock %p", sk);
6374 
6375 	status = mgmt_le_support(hdev);
6376 	if (status)
6377 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6378 				       status);
6379 
6380 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6381 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6382 				       MGMT_STATUS_INVALID_PARAMS);
6383 
6384 	if (hdev->advertising_paused)
6385 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6386 				       MGMT_STATUS_BUSY);
6387 
6388 	hci_dev_lock(hdev);
6389 
6390 	val = !!cp->val;
6391 
6392 	/* The following conditions are ones which mean that we should
6393 	 * not do any HCI communication but directly send a mgmt
6394 	 * response to user space (after toggling the flag if
6395 	 * necessary).
6396 	 */
6397 	if (!hdev_is_powered(hdev) ||
6398 	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6399 	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6400 	    hci_dev_test_flag(hdev, HCI_MESH) ||
6401 	    hci_conn_num(hdev, LE_LINK) > 0 ||
6402 	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6403 	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6404 		bool changed;
6405 
6406 		if (cp->val) {
6407 			hdev->cur_adv_instance = 0x00;
6408 			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6409 			if (cp->val == 0x02)
6410 				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6411 			else
6412 				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6413 		} else {
6414 			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6415 			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6416 		}
6417 
6418 		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6419 		if (err < 0)
6420 			goto unlock;
6421 
6422 		if (changed)
6423 			err = new_settings(hdev, sk);
6424 
6425 		goto unlock;
6426 	}
6427 
6428 	if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6429 	    pending_find(MGMT_OP_SET_LE, hdev)) {
6430 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6431 				      MGMT_STATUS_BUSY);
6432 		goto unlock;
6433 	}
6434 
6435 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6436 	if (!cmd)
6437 		err = -ENOMEM;
6438 	else
6439 		err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6440 					 set_advertising_complete);
6441 
6442 	if (err < 0 && cmd)
6443 		mgmt_pending_remove(cmd);
6444 
6445 unlock:
6446 	hci_dev_unlock(hdev);
6447 	return err;
6448 }
6449 
6450 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6451 			      void *data, u16 len)
6452 {
6453 	struct mgmt_cp_set_static_address *cp = data;
6454 	int err;
6455 
6456 	bt_dev_dbg(hdev, "sock %p", sk);
6457 
6458 	if (!lmp_le_capable(hdev))
6459 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6460 				       MGMT_STATUS_NOT_SUPPORTED);
6461 
6462 	if (hdev_is_powered(hdev))
6463 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6464 				       MGMT_STATUS_REJECTED);
6465 
6466 	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6467 		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6468 			return mgmt_cmd_status(sk, hdev->id,
6469 					       MGMT_OP_SET_STATIC_ADDRESS,
6470 					       MGMT_STATUS_INVALID_PARAMS);
6471 
6472 		/* Two most significant bits shall be set */
6473 		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6474 			return mgmt_cmd_status(sk, hdev->id,
6475 					       MGMT_OP_SET_STATIC_ADDRESS,
6476 					       MGMT_STATUS_INVALID_PARAMS);
6477 	}
6478 
6479 	hci_dev_lock(hdev);
6480 
6481 	bacpy(&hdev->static_addr, &cp->bdaddr);
6482 
6483 	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6484 	if (err < 0)
6485 		goto unlock;
6486 
6487 	err = new_settings(hdev, sk);
6488 
6489 unlock:
6490 	hci_dev_unlock(hdev);
6491 	return err;
6492 }
6493 
6494 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6495 			   void *data, u16 len)
6496 {
6497 	struct mgmt_cp_set_scan_params *cp = data;
6498 	__u16 interval, window;
6499 	int err;
6500 
6501 	bt_dev_dbg(hdev, "sock %p", sk);
6502 
6503 	if (!lmp_le_capable(hdev))
6504 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6505 				       MGMT_STATUS_NOT_SUPPORTED);
6506 
6507 	interval = __le16_to_cpu(cp->interval);
6508 
6509 	if (interval < 0x0004 || interval > 0x4000)
6510 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6511 				       MGMT_STATUS_INVALID_PARAMS);
6512 
6513 	window = __le16_to_cpu(cp->window);
6514 
6515 	if (window < 0x0004 || window > 0x4000)
6516 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6517 				       MGMT_STATUS_INVALID_PARAMS);
6518 
6519 	if (window > interval)
6520 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6521 				       MGMT_STATUS_INVALID_PARAMS);
6522 
6523 	hci_dev_lock(hdev);
6524 
6525 	hdev->le_scan_interval = interval;
6526 	hdev->le_scan_window = window;
6527 
6528 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6529 				NULL, 0);
6530 
6531 	/* If background scan is running, restart it so new parameters are
6532 	 * loaded.
6533 	 */
6534 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6535 	    hdev->discovery.state == DISCOVERY_STOPPED)
6536 		hci_update_passive_scan(hdev);
6537 
6538 	hci_dev_unlock(hdev);
6539 
6540 	return err;
6541 }
6542 
6543 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6544 {
6545 	struct mgmt_pending_cmd *cmd = data;
6546 
6547 	bt_dev_dbg(hdev, "err %d", err);
6548 
6549 	if (err) {
6550 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6551 				mgmt_status(err));
6552 	} else {
6553 		struct mgmt_mode *cp = cmd->param;
6554 
6555 		if (cp->val)
6556 			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6557 		else
6558 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6559 
6560 		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6561 		new_settings(hdev, cmd->sk);
6562 	}
6563 
6564 	mgmt_pending_free(cmd);
6565 }
6566 
6567 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6568 {
6569 	struct mgmt_pending_cmd *cmd = data;
6570 	struct mgmt_mode *cp = cmd->param;
6571 
6572 	return hci_write_fast_connectable_sync(hdev, cp->val);
6573 }
6574 
6575 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6576 				void *data, u16 len)
6577 {
6578 	struct mgmt_mode *cp = data;
6579 	struct mgmt_pending_cmd *cmd;
6580 	int err;
6581 
6582 	bt_dev_dbg(hdev, "sock %p", sk);
6583 
6584 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6585 	    hdev->hci_ver < BLUETOOTH_VER_1_2)
6586 		return mgmt_cmd_status(sk, hdev->id,
6587 				       MGMT_OP_SET_FAST_CONNECTABLE,
6588 				       MGMT_STATUS_NOT_SUPPORTED);
6589 
6590 	if (cp->val != 0x00 && cp->val != 0x01)
6591 		return mgmt_cmd_status(sk, hdev->id,
6592 				       MGMT_OP_SET_FAST_CONNECTABLE,
6593 				       MGMT_STATUS_INVALID_PARAMS);
6594 
6595 	hci_dev_lock(hdev);
6596 
6597 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6598 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6599 		goto unlock;
6600 	}
6601 
6602 	if (!hdev_is_powered(hdev)) {
6603 		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6604 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6605 		new_settings(hdev, sk);
6606 		goto unlock;
6607 	}
6608 
6609 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6610 			       len);
6611 	if (!cmd)
6612 		err = -ENOMEM;
6613 	else
6614 		err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6615 					 fast_connectable_complete);
6616 
6617 	if (err < 0) {
6618 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6619 				MGMT_STATUS_FAILED);
6620 
6621 		if (cmd)
6622 			mgmt_pending_free(cmd);
6623 	}
6624 
6625 unlock:
6626 	hci_dev_unlock(hdev);
6627 
6628 	return err;
6629 }
6630 
6631 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6632 {
6633 	struct mgmt_pending_cmd *cmd = data;
6634 
6635 	bt_dev_dbg(hdev, "err %d", err);
6636 
6637 	if (err) {
6638 		u8 mgmt_err = mgmt_status(err);
6639 
6640 		/* We need to restore the flag if related HCI commands
6641 		 * failed.
6642 		 */
6643 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6644 
6645 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6646 	} else {
6647 		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6648 		new_settings(hdev, cmd->sk);
6649 	}
6650 
6651 	mgmt_pending_free(cmd);
6652 }
6653 
6654 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6655 {
6656 	int status;
6657 
6658 	status = hci_write_fast_connectable_sync(hdev, false);
6659 
6660 	if (!status)
6661 		status = hci_update_scan_sync(hdev);
6662 
6663 	/* Since only the advertising data flags will change, there
6664 	 * is no need to update the scan response data.
6665 	 */
6666 	if (!status)
6667 		status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6668 
6669 	return status;
6670 }
6671 
6672 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6673 {
6674 	struct mgmt_mode *cp = data;
6675 	struct mgmt_pending_cmd *cmd;
6676 	int err;
6677 
6678 	bt_dev_dbg(hdev, "sock %p", sk);
6679 
6680 	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6681 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6682 				       MGMT_STATUS_NOT_SUPPORTED);
6683 
6684 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6685 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6686 				       MGMT_STATUS_REJECTED);
6687 
6688 	if (cp->val != 0x00 && cp->val != 0x01)
6689 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6690 				       MGMT_STATUS_INVALID_PARAMS);
6691 
6692 	hci_dev_lock(hdev);
6693 
6694 	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6695 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6696 		goto unlock;
6697 	}
6698 
6699 	if (!hdev_is_powered(hdev)) {
6700 		if (!cp->val) {
6701 			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6702 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6703 			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6704 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6705 		}
6706 
6707 		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6708 
6709 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6710 		if (err < 0)
6711 			goto unlock;
6712 
6713 		err = new_settings(hdev, sk);
6714 		goto unlock;
6715 	}
6716 
6717 	/* Reject disabling when powered on */
6718 	if (!cp->val) {
6719 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6720 				      MGMT_STATUS_REJECTED);
6721 		goto unlock;
6722 	} else {
6723 		/* When configuring a dual-mode controller to operate
6724 		 * with LE only and using a static address, then switching
6725 		 * BR/EDR back on is not allowed.
6726 		 *
6727 		 * Dual-mode controllers shall operate with the public
6728 		 * address as its identity address for BR/EDR and LE. So
6729 		 * reject the attempt to create an invalid configuration.
6730 		 *
6731 		 * The same restrictions applies when secure connections
6732 		 * has been enabled. For BR/EDR this is a controller feature
6733 		 * while for LE it is a host stack feature. This means that
6734 		 * switching BR/EDR back on when secure connections has been
6735 		 * enabled is not a supported transaction.
6736 		 */
6737 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6738 		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6739 		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6740 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6741 					      MGMT_STATUS_REJECTED);
6742 			goto unlock;
6743 		}
6744 	}
6745 
6746 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6747 	if (!cmd)
6748 		err = -ENOMEM;
6749 	else
6750 		err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6751 					 set_bredr_complete);
6752 
6753 	if (err < 0) {
6754 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6755 				MGMT_STATUS_FAILED);
6756 		if (cmd)
6757 			mgmt_pending_free(cmd);
6758 
6759 		goto unlock;
6760 	}
6761 
6762 	/* We need to flip the bit already here so that
6763 	 * hci_req_update_adv_data generates the correct flags.
6764 	 */
6765 	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6766 
6767 unlock:
6768 	hci_dev_unlock(hdev);
6769 	return err;
6770 }
6771 
6772 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6773 {
6774 	struct mgmt_pending_cmd *cmd = data;
6775 	struct mgmt_mode *cp;
6776 
6777 	bt_dev_dbg(hdev, "err %d", err);
6778 
6779 	if (err) {
6780 		u8 mgmt_err = mgmt_status(err);
6781 
6782 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6783 		goto done;
6784 	}
6785 
6786 	cp = cmd->param;
6787 
6788 	switch (cp->val) {
6789 	case 0x00:
6790 		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6791 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6792 		break;
6793 	case 0x01:
6794 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6795 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6796 		break;
6797 	case 0x02:
6798 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6799 		hci_dev_set_flag(hdev, HCI_SC_ONLY);
6800 		break;
6801 	}
6802 
6803 	send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6804 	new_settings(hdev, cmd->sk);
6805 
6806 done:
6807 	mgmt_pending_free(cmd);
6808 }
6809 
6810 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6811 {
6812 	struct mgmt_pending_cmd *cmd = data;
6813 	struct mgmt_mode *cp = cmd->param;
6814 	u8 val = !!cp->val;
6815 
6816 	/* Force write of val */
6817 	hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6818 
6819 	return hci_write_sc_support_sync(hdev, val);
6820 }
6821 
6822 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6823 			   void *data, u16 len)
6824 {
6825 	struct mgmt_mode *cp = data;
6826 	struct mgmt_pending_cmd *cmd;
6827 	u8 val;
6828 	int err;
6829 
6830 	bt_dev_dbg(hdev, "sock %p", sk);
6831 
6832 	if (!lmp_sc_capable(hdev) &&
6833 	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6834 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6835 				       MGMT_STATUS_NOT_SUPPORTED);
6836 
6837 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6838 	    lmp_sc_capable(hdev) &&
6839 	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6840 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6841 				       MGMT_STATUS_REJECTED);
6842 
6843 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6844 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6845 				       MGMT_STATUS_INVALID_PARAMS);
6846 
6847 	hci_dev_lock(hdev);
6848 
6849 	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6850 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6851 		bool changed;
6852 
6853 		if (cp->val) {
6854 			changed = !hci_dev_test_and_set_flag(hdev,
6855 							     HCI_SC_ENABLED);
6856 			if (cp->val == 0x02)
6857 				hci_dev_set_flag(hdev, HCI_SC_ONLY);
6858 			else
6859 				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6860 		} else {
6861 			changed = hci_dev_test_and_clear_flag(hdev,
6862 							      HCI_SC_ENABLED);
6863 			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6864 		}
6865 
6866 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6867 		if (err < 0)
6868 			goto failed;
6869 
6870 		if (changed)
6871 			err = new_settings(hdev, sk);
6872 
6873 		goto failed;
6874 	}
6875 
6876 	val = !!cp->val;
6877 
6878 	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6879 	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6880 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6881 		goto failed;
6882 	}
6883 
6884 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6885 	if (!cmd)
6886 		err = -ENOMEM;
6887 	else
6888 		err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6889 					 set_secure_conn_complete);
6890 
6891 	if (err < 0) {
6892 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6893 				MGMT_STATUS_FAILED);
6894 		if (cmd)
6895 			mgmt_pending_free(cmd);
6896 	}
6897 
6898 failed:
6899 	hci_dev_unlock(hdev);
6900 	return err;
6901 }
6902 
6903 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6904 			  void *data, u16 len)
6905 {
6906 	struct mgmt_mode *cp = data;
6907 	bool changed, use_changed;
6908 	int err;
6909 
6910 	bt_dev_dbg(hdev, "sock %p", sk);
6911 
6912 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6913 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6914 				       MGMT_STATUS_INVALID_PARAMS);
6915 
6916 	hci_dev_lock(hdev);
6917 
6918 	if (cp->val)
6919 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6920 	else
6921 		changed = hci_dev_test_and_clear_flag(hdev,
6922 						      HCI_KEEP_DEBUG_KEYS);
6923 
6924 	if (cp->val == 0x02)
6925 		use_changed = !hci_dev_test_and_set_flag(hdev,
6926 							 HCI_USE_DEBUG_KEYS);
6927 	else
6928 		use_changed = hci_dev_test_and_clear_flag(hdev,
6929 							  HCI_USE_DEBUG_KEYS);
6930 
6931 	if (hdev_is_powered(hdev) && use_changed &&
6932 	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6933 		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6934 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6935 			     sizeof(mode), &mode);
6936 	}
6937 
6938 	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6939 	if (err < 0)
6940 		goto unlock;
6941 
6942 	if (changed)
6943 		err = new_settings(hdev, sk);
6944 
6945 unlock:
6946 	hci_dev_unlock(hdev);
6947 	return err;
6948 }
6949 
6950 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6951 		       u16 len)
6952 {
6953 	struct mgmt_cp_set_privacy *cp = cp_data;
6954 	bool changed;
6955 	int err;
6956 
6957 	bt_dev_dbg(hdev, "sock %p", sk);
6958 
6959 	if (!lmp_le_capable(hdev))
6960 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6961 				       MGMT_STATUS_NOT_SUPPORTED);
6962 
6963 	if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6964 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6965 				       MGMT_STATUS_INVALID_PARAMS);
6966 
6967 	if (hdev_is_powered(hdev))
6968 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6969 				       MGMT_STATUS_REJECTED);
6970 
6971 	hci_dev_lock(hdev);
6972 
6973 	/* If user space supports this command it is also expected to
6974 	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6975 	 */
6976 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6977 
6978 	if (cp->privacy) {
6979 		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6980 		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6981 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6982 		hci_adv_instances_set_rpa_expired(hdev, true);
6983 		if (cp->privacy == 0x02)
6984 			hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6985 		else
6986 			hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6987 	} else {
6988 		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6989 		memset(hdev->irk, 0, sizeof(hdev->irk));
6990 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6991 		hci_adv_instances_set_rpa_expired(hdev, false);
6992 		hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6993 	}
6994 
6995 	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
6996 	if (err < 0)
6997 		goto unlock;
6998 
6999 	if (changed)
7000 		err = new_settings(hdev, sk);
7001 
7002 unlock:
7003 	hci_dev_unlock(hdev);
7004 	return err;
7005 }
7006 
7007 static bool irk_is_valid(struct mgmt_irk_info *irk)
7008 {
7009 	switch (irk->addr.type) {
7010 	case BDADDR_LE_PUBLIC:
7011 		return true;
7012 
7013 	case BDADDR_LE_RANDOM:
7014 		/* Two most significant bits shall be set */
7015 		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7016 			return false;
7017 		return true;
7018 	}
7019 
7020 	return false;
7021 }
7022 
7023 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7024 		     u16 len)
7025 {
7026 	struct mgmt_cp_load_irks *cp = cp_data;
7027 	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7028 				   sizeof(struct mgmt_irk_info));
7029 	u16 irk_count, expected_len;
7030 	int i, err;
7031 
7032 	bt_dev_dbg(hdev, "sock %p", sk);
7033 
7034 	if (!lmp_le_capable(hdev))
7035 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7036 				       MGMT_STATUS_NOT_SUPPORTED);
7037 
7038 	irk_count = __le16_to_cpu(cp->irk_count);
7039 	if (irk_count > max_irk_count) {
7040 		bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7041 			   irk_count);
7042 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7043 				       MGMT_STATUS_INVALID_PARAMS);
7044 	}
7045 
7046 	expected_len = struct_size(cp, irks, irk_count);
7047 	if (expected_len != len) {
7048 		bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7049 			   expected_len, len);
7050 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7051 				       MGMT_STATUS_INVALID_PARAMS);
7052 	}
7053 
7054 	bt_dev_dbg(hdev, "irk_count %u", irk_count);
7055 
7056 	for (i = 0; i < irk_count; i++) {
7057 		struct mgmt_irk_info *key = &cp->irks[i];
7058 
7059 		if (!irk_is_valid(key))
7060 			return mgmt_cmd_status(sk, hdev->id,
7061 					       MGMT_OP_LOAD_IRKS,
7062 					       MGMT_STATUS_INVALID_PARAMS);
7063 	}
7064 
7065 	hci_dev_lock(hdev);
7066 
7067 	hci_smp_irks_clear(hdev);
7068 
7069 	for (i = 0; i < irk_count; i++) {
7070 		struct mgmt_irk_info *irk = &cp->irks[i];
7071 
7072 		if (hci_is_blocked_key(hdev,
7073 				       HCI_BLOCKED_KEY_TYPE_IRK,
7074 				       irk->val)) {
7075 			bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7076 				    &irk->addr.bdaddr);
7077 			continue;
7078 		}
7079 
7080 		hci_add_irk(hdev, &irk->addr.bdaddr,
7081 			    le_addr_type(irk->addr.type), irk->val,
7082 			    BDADDR_ANY);
7083 	}
7084 
7085 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7086 
7087 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7088 
7089 	hci_dev_unlock(hdev);
7090 
7091 	return err;
7092 }
7093 
7094 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7095 {
7096 	if (key->initiator != 0x00 && key->initiator != 0x01)
7097 		return false;
7098 
7099 	switch (key->addr.type) {
7100 	case BDADDR_LE_PUBLIC:
7101 		return true;
7102 
7103 	case BDADDR_LE_RANDOM:
7104 		/* Two most significant bits shall be set */
7105 		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7106 			return false;
7107 		return true;
7108 	}
7109 
7110 	return false;
7111 }
7112 
7113 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7114 			       void *cp_data, u16 len)
7115 {
7116 	struct mgmt_cp_load_long_term_keys *cp = cp_data;
7117 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7118 				   sizeof(struct mgmt_ltk_info));
7119 	u16 key_count, expected_len;
7120 	int i, err;
7121 
7122 	bt_dev_dbg(hdev, "sock %p", sk);
7123 
7124 	if (!lmp_le_capable(hdev))
7125 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7126 				       MGMT_STATUS_NOT_SUPPORTED);
7127 
7128 	key_count = __le16_to_cpu(cp->key_count);
7129 	if (key_count > max_key_count) {
7130 		bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7131 			   key_count);
7132 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7133 				       MGMT_STATUS_INVALID_PARAMS);
7134 	}
7135 
7136 	expected_len = struct_size(cp, keys, key_count);
7137 	if (expected_len != len) {
7138 		bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7139 			   expected_len, len);
7140 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7141 				       MGMT_STATUS_INVALID_PARAMS);
7142 	}
7143 
7144 	bt_dev_dbg(hdev, "key_count %u", key_count);
7145 
7146 	hci_dev_lock(hdev);
7147 
7148 	hci_smp_ltks_clear(hdev);
7149 
7150 	for (i = 0; i < key_count; i++) {
7151 		struct mgmt_ltk_info *key = &cp->keys[i];
7152 		u8 type, authenticated;
7153 
7154 		if (hci_is_blocked_key(hdev,
7155 				       HCI_BLOCKED_KEY_TYPE_LTK,
7156 				       key->val)) {
7157 			bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7158 				    &key->addr.bdaddr);
7159 			continue;
7160 		}
7161 
7162 		if (!ltk_is_valid(key)) {
7163 			bt_dev_warn(hdev, "Invalid LTK for %pMR",
7164 				    &key->addr.bdaddr);
7165 			continue;
7166 		}
7167 
7168 		switch (key->type) {
7169 		case MGMT_LTK_UNAUTHENTICATED:
7170 			authenticated = 0x00;
7171 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7172 			break;
7173 		case MGMT_LTK_AUTHENTICATED:
7174 			authenticated = 0x01;
7175 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7176 			break;
7177 		case MGMT_LTK_P256_UNAUTH:
7178 			authenticated = 0x00;
7179 			type = SMP_LTK_P256;
7180 			break;
7181 		case MGMT_LTK_P256_AUTH:
7182 			authenticated = 0x01;
7183 			type = SMP_LTK_P256;
7184 			break;
7185 		case MGMT_LTK_P256_DEBUG:
7186 			authenticated = 0x00;
7187 			type = SMP_LTK_P256_DEBUG;
7188 			fallthrough;
7189 		default:
7190 			continue;
7191 		}
7192 
7193 		hci_add_ltk(hdev, &key->addr.bdaddr,
7194 			    le_addr_type(key->addr.type), type, authenticated,
7195 			    key->val, key->enc_size, key->ediv, key->rand);
7196 	}
7197 
7198 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7199 			   NULL, 0);
7200 
7201 	hci_dev_unlock(hdev);
7202 
7203 	return err;
7204 }
7205 
7206 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7207 {
7208 	struct mgmt_pending_cmd *cmd = data;
7209 	struct hci_conn *conn = cmd->user_data;
7210 	struct mgmt_cp_get_conn_info *cp = cmd->param;
7211 	struct mgmt_rp_get_conn_info rp;
7212 	u8 status;
7213 
7214 	bt_dev_dbg(hdev, "err %d", err);
7215 
7216 	memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
7217 
7218 	status = mgmt_status(err);
7219 	if (status == MGMT_STATUS_SUCCESS) {
7220 		rp.rssi = conn->rssi;
7221 		rp.tx_power = conn->tx_power;
7222 		rp.max_tx_power = conn->max_tx_power;
7223 	} else {
7224 		rp.rssi = HCI_RSSI_INVALID;
7225 		rp.tx_power = HCI_TX_POWER_INVALID;
7226 		rp.max_tx_power = HCI_TX_POWER_INVALID;
7227 	}
7228 
7229 	mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
7230 			  &rp, sizeof(rp));
7231 
7232 	mgmt_pending_free(cmd);
7233 }
7234 
7235 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7236 {
7237 	struct mgmt_pending_cmd *cmd = data;
7238 	struct mgmt_cp_get_conn_info *cp = cmd->param;
7239 	struct hci_conn *conn;
7240 	int err;
7241 	__le16   handle;
7242 
7243 	/* Make sure we are still connected */
7244 	if (cp->addr.type == BDADDR_BREDR)
7245 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7246 					       &cp->addr.bdaddr);
7247 	else
7248 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7249 
7250 	if (!conn || conn->state != BT_CONNECTED)
7251 		return MGMT_STATUS_NOT_CONNECTED;
7252 
7253 	cmd->user_data = conn;
7254 	handle = cpu_to_le16(conn->handle);
7255 
7256 	/* Refresh RSSI each time */
7257 	err = hci_read_rssi_sync(hdev, handle);
7258 
7259 	/* For LE links TX power does not change thus we don't need to
7260 	 * query for it once value is known.
7261 	 */
7262 	if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7263 		     conn->tx_power == HCI_TX_POWER_INVALID))
7264 		err = hci_read_tx_power_sync(hdev, handle, 0x00);
7265 
7266 	/* Max TX power needs to be read only once per connection */
7267 	if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7268 		err = hci_read_tx_power_sync(hdev, handle, 0x01);
7269 
7270 	return err;
7271 }
7272 
7273 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7274 			 u16 len)
7275 {
7276 	struct mgmt_cp_get_conn_info *cp = data;
7277 	struct mgmt_rp_get_conn_info rp;
7278 	struct hci_conn *conn;
7279 	unsigned long conn_info_age;
7280 	int err = 0;
7281 
7282 	bt_dev_dbg(hdev, "sock %p", sk);
7283 
7284 	memset(&rp, 0, sizeof(rp));
7285 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7286 	rp.addr.type = cp->addr.type;
7287 
7288 	if (!bdaddr_type_is_valid(cp->addr.type))
7289 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7290 					 MGMT_STATUS_INVALID_PARAMS,
7291 					 &rp, sizeof(rp));
7292 
7293 	hci_dev_lock(hdev);
7294 
7295 	if (!hdev_is_powered(hdev)) {
7296 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7297 					MGMT_STATUS_NOT_POWERED, &rp,
7298 					sizeof(rp));
7299 		goto unlock;
7300 	}
7301 
7302 	if (cp->addr.type == BDADDR_BREDR)
7303 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7304 					       &cp->addr.bdaddr);
7305 	else
7306 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7307 
7308 	if (!conn || conn->state != BT_CONNECTED) {
7309 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7310 					MGMT_STATUS_NOT_CONNECTED, &rp,
7311 					sizeof(rp));
7312 		goto unlock;
7313 	}
7314 
7315 	/* To avoid client trying to guess when to poll again for information we
7316 	 * calculate conn info age as random value between min/max set in hdev.
7317 	 */
7318 	conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
7319 						 hdev->conn_info_max_age - 1);
7320 
7321 	/* Query controller to refresh cached values if they are too old or were
7322 	 * never read.
7323 	 */
7324 	if (time_after(jiffies, conn->conn_info_timestamp +
7325 		       msecs_to_jiffies(conn_info_age)) ||
7326 	    !conn->conn_info_timestamp) {
7327 		struct mgmt_pending_cmd *cmd;
7328 
7329 		cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7330 				       len);
7331 		if (!cmd) {
7332 			err = -ENOMEM;
7333 		} else {
7334 			err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7335 						 cmd, get_conn_info_complete);
7336 		}
7337 
7338 		if (err < 0) {
7339 			mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7340 					  MGMT_STATUS_FAILED, &rp, sizeof(rp));
7341 
7342 			if (cmd)
7343 				mgmt_pending_free(cmd);
7344 
7345 			goto unlock;
7346 		}
7347 
7348 		conn->conn_info_timestamp = jiffies;
7349 	} else {
7350 		/* Cache is valid, just reply with values cached in hci_conn */
7351 		rp.rssi = conn->rssi;
7352 		rp.tx_power = conn->tx_power;
7353 		rp.max_tx_power = conn->max_tx_power;
7354 
7355 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7356 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7357 	}
7358 
7359 unlock:
7360 	hci_dev_unlock(hdev);
7361 	return err;
7362 }
7363 
7364 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7365 {
7366 	struct mgmt_pending_cmd *cmd = data;
7367 	struct mgmt_cp_get_clock_info *cp = cmd->param;
7368 	struct mgmt_rp_get_clock_info rp;
7369 	struct hci_conn *conn = cmd->user_data;
7370 	u8 status = mgmt_status(err);
7371 
7372 	bt_dev_dbg(hdev, "err %d", err);
7373 
7374 	memset(&rp, 0, sizeof(rp));
7375 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7376 	rp.addr.type = cp->addr.type;
7377 
7378 	if (err)
7379 		goto complete;
7380 
7381 	rp.local_clock = cpu_to_le32(hdev->clock);
7382 
7383 	if (conn) {
7384 		rp.piconet_clock = cpu_to_le32(conn->clock);
7385 		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7386 	}
7387 
7388 complete:
7389 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
7390 			  sizeof(rp));
7391 
7392 	mgmt_pending_free(cmd);
7393 }
7394 
7395 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7396 {
7397 	struct mgmt_pending_cmd *cmd = data;
7398 	struct mgmt_cp_get_clock_info *cp = cmd->param;
7399 	struct hci_cp_read_clock hci_cp;
7400 	struct hci_conn *conn;
7401 
7402 	memset(&hci_cp, 0, sizeof(hci_cp));
7403 	hci_read_clock_sync(hdev, &hci_cp);
7404 
7405 	/* Make sure connection still exists */
7406 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7407 	if (!conn || conn->state != BT_CONNECTED)
7408 		return MGMT_STATUS_NOT_CONNECTED;
7409 
7410 	cmd->user_data = conn;
7411 	hci_cp.handle = cpu_to_le16(conn->handle);
7412 	hci_cp.which = 0x01; /* Piconet clock */
7413 
7414 	return hci_read_clock_sync(hdev, &hci_cp);
7415 }
7416 
7417 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7418 								u16 len)
7419 {
7420 	struct mgmt_cp_get_clock_info *cp = data;
7421 	struct mgmt_rp_get_clock_info rp;
7422 	struct mgmt_pending_cmd *cmd;
7423 	struct hci_conn *conn;
7424 	int err;
7425 
7426 	bt_dev_dbg(hdev, "sock %p", sk);
7427 
7428 	memset(&rp, 0, sizeof(rp));
7429 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7430 	rp.addr.type = cp->addr.type;
7431 
7432 	if (cp->addr.type != BDADDR_BREDR)
7433 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7434 					 MGMT_STATUS_INVALID_PARAMS,
7435 					 &rp, sizeof(rp));
7436 
7437 	hci_dev_lock(hdev);
7438 
7439 	if (!hdev_is_powered(hdev)) {
7440 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7441 					MGMT_STATUS_NOT_POWERED, &rp,
7442 					sizeof(rp));
7443 		goto unlock;
7444 	}
7445 
7446 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7447 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7448 					       &cp->addr.bdaddr);
7449 		if (!conn || conn->state != BT_CONNECTED) {
7450 			err = mgmt_cmd_complete(sk, hdev->id,
7451 						MGMT_OP_GET_CLOCK_INFO,
7452 						MGMT_STATUS_NOT_CONNECTED,
7453 						&rp, sizeof(rp));
7454 			goto unlock;
7455 		}
7456 	} else {
7457 		conn = NULL;
7458 	}
7459 
7460 	cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7461 	if (!cmd)
7462 		err = -ENOMEM;
7463 	else
7464 		err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7465 					 get_clock_info_complete);
7466 
7467 	if (err < 0) {
7468 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7469 					MGMT_STATUS_FAILED, &rp, sizeof(rp));
7470 
7471 		if (cmd)
7472 			mgmt_pending_free(cmd);
7473 	}
7474 
7475 
7476 unlock:
7477 	hci_dev_unlock(hdev);
7478 	return err;
7479 }
7480 
7481 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7482 {
7483 	struct hci_conn *conn;
7484 
7485 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7486 	if (!conn)
7487 		return false;
7488 
7489 	if (conn->dst_type != type)
7490 		return false;
7491 
7492 	if (conn->state != BT_CONNECTED)
7493 		return false;
7494 
7495 	return true;
7496 }
7497 
7498 /* This function requires the caller holds hdev->lock */
7499 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7500 			       u8 addr_type, u8 auto_connect)
7501 {
7502 	struct hci_conn_params *params;
7503 
7504 	params = hci_conn_params_add(hdev, addr, addr_type);
7505 	if (!params)
7506 		return -EIO;
7507 
7508 	if (params->auto_connect == auto_connect)
7509 		return 0;
7510 
7511 	hci_pend_le_list_del_init(params);
7512 
7513 	switch (auto_connect) {
7514 	case HCI_AUTO_CONN_DISABLED:
7515 	case HCI_AUTO_CONN_LINK_LOSS:
7516 		/* If auto connect is being disabled when we're trying to
7517 		 * connect to device, keep connecting.
7518 		 */
7519 		if (params->explicit_connect)
7520 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
7521 		break;
7522 	case HCI_AUTO_CONN_REPORT:
7523 		if (params->explicit_connect)
7524 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
7525 		else
7526 			hci_pend_le_list_add(params, &hdev->pend_le_reports);
7527 		break;
7528 	case HCI_AUTO_CONN_DIRECT:
7529 	case HCI_AUTO_CONN_ALWAYS:
7530 		if (!is_connected(hdev, addr, addr_type))
7531 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
7532 		break;
7533 	}
7534 
7535 	params->auto_connect = auto_connect;
7536 
7537 	bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7538 		   addr, addr_type, auto_connect);
7539 
7540 	return 0;
7541 }
7542 
7543 static void device_added(struct sock *sk, struct hci_dev *hdev,
7544 			 bdaddr_t *bdaddr, u8 type, u8 action)
7545 {
7546 	struct mgmt_ev_device_added ev;
7547 
7548 	bacpy(&ev.addr.bdaddr, bdaddr);
7549 	ev.addr.type = type;
7550 	ev.action = action;
7551 
7552 	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7553 }
7554 
7555 static int add_device_sync(struct hci_dev *hdev, void *data)
7556 {
7557 	return hci_update_passive_scan_sync(hdev);
7558 }
7559 
7560 static int add_device(struct sock *sk, struct hci_dev *hdev,
7561 		      void *data, u16 len)
7562 {
7563 	struct mgmt_cp_add_device *cp = data;
7564 	u8 auto_conn, addr_type;
7565 	struct hci_conn_params *params;
7566 	int err;
7567 	u32 current_flags = 0;
7568 	u32 supported_flags;
7569 
7570 	bt_dev_dbg(hdev, "sock %p", sk);
7571 
7572 	if (!bdaddr_type_is_valid(cp->addr.type) ||
7573 	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7574 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7575 					 MGMT_STATUS_INVALID_PARAMS,
7576 					 &cp->addr, sizeof(cp->addr));
7577 
7578 	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7579 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7580 					 MGMT_STATUS_INVALID_PARAMS,
7581 					 &cp->addr, sizeof(cp->addr));
7582 
7583 	hci_dev_lock(hdev);
7584 
7585 	if (cp->addr.type == BDADDR_BREDR) {
7586 		/* Only incoming connections action is supported for now */
7587 		if (cp->action != 0x01) {
7588 			err = mgmt_cmd_complete(sk, hdev->id,
7589 						MGMT_OP_ADD_DEVICE,
7590 						MGMT_STATUS_INVALID_PARAMS,
7591 						&cp->addr, sizeof(cp->addr));
7592 			goto unlock;
7593 		}
7594 
7595 		err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7596 						     &cp->addr.bdaddr,
7597 						     cp->addr.type, 0);
7598 		if (err)
7599 			goto unlock;
7600 
7601 		hci_update_scan(hdev);
7602 
7603 		goto added;
7604 	}
7605 
7606 	addr_type = le_addr_type(cp->addr.type);
7607 
7608 	if (cp->action == 0x02)
7609 		auto_conn = HCI_AUTO_CONN_ALWAYS;
7610 	else if (cp->action == 0x01)
7611 		auto_conn = HCI_AUTO_CONN_DIRECT;
7612 	else
7613 		auto_conn = HCI_AUTO_CONN_REPORT;
7614 
7615 	/* Kernel internally uses conn_params with resolvable private
7616 	 * address, but Add Device allows only identity addresses.
7617 	 * Make sure it is enforced before calling
7618 	 * hci_conn_params_lookup.
7619 	 */
7620 	if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7621 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7622 					MGMT_STATUS_INVALID_PARAMS,
7623 					&cp->addr, sizeof(cp->addr));
7624 		goto unlock;
7625 	}
7626 
7627 	/* If the connection parameters don't exist for this device,
7628 	 * they will be created and configured with defaults.
7629 	 */
7630 	if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7631 				auto_conn) < 0) {
7632 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7633 					MGMT_STATUS_FAILED, &cp->addr,
7634 					sizeof(cp->addr));
7635 		goto unlock;
7636 	} else {
7637 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7638 						addr_type);
7639 		if (params)
7640 			current_flags = params->flags;
7641 	}
7642 
7643 	err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
7644 	if (err < 0)
7645 		goto unlock;
7646 
7647 added:
7648 	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7649 	supported_flags = hdev->conn_flags;
7650 	device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7651 			     supported_flags, current_flags);
7652 
7653 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7654 				MGMT_STATUS_SUCCESS, &cp->addr,
7655 				sizeof(cp->addr));
7656 
7657 unlock:
7658 	hci_dev_unlock(hdev);
7659 	return err;
7660 }
7661 
7662 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7663 			   bdaddr_t *bdaddr, u8 type)
7664 {
7665 	struct mgmt_ev_device_removed ev;
7666 
7667 	bacpy(&ev.addr.bdaddr, bdaddr);
7668 	ev.addr.type = type;
7669 
7670 	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7671 }
7672 
7673 static int remove_device_sync(struct hci_dev *hdev, void *data)
7674 {
7675 	return hci_update_passive_scan_sync(hdev);
7676 }
7677 
7678 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7679 			 void *data, u16 len)
7680 {
7681 	struct mgmt_cp_remove_device *cp = data;
7682 	int err;
7683 
7684 	bt_dev_dbg(hdev, "sock %p", sk);
7685 
7686 	hci_dev_lock(hdev);
7687 
7688 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7689 		struct hci_conn_params *params;
7690 		u8 addr_type;
7691 
7692 		if (!bdaddr_type_is_valid(cp->addr.type)) {
7693 			err = mgmt_cmd_complete(sk, hdev->id,
7694 						MGMT_OP_REMOVE_DEVICE,
7695 						MGMT_STATUS_INVALID_PARAMS,
7696 						&cp->addr, sizeof(cp->addr));
7697 			goto unlock;
7698 		}
7699 
7700 		if (cp->addr.type == BDADDR_BREDR) {
7701 			err = hci_bdaddr_list_del(&hdev->accept_list,
7702 						  &cp->addr.bdaddr,
7703 						  cp->addr.type);
7704 			if (err) {
7705 				err = mgmt_cmd_complete(sk, hdev->id,
7706 							MGMT_OP_REMOVE_DEVICE,
7707 							MGMT_STATUS_INVALID_PARAMS,
7708 							&cp->addr,
7709 							sizeof(cp->addr));
7710 				goto unlock;
7711 			}
7712 
7713 			hci_update_scan(hdev);
7714 
7715 			device_removed(sk, hdev, &cp->addr.bdaddr,
7716 				       cp->addr.type);
7717 			goto complete;
7718 		}
7719 
7720 		addr_type = le_addr_type(cp->addr.type);
7721 
7722 		/* Kernel internally uses conn_params with resolvable private
7723 		 * address, but Remove Device allows only identity addresses.
7724 		 * Make sure it is enforced before calling
7725 		 * hci_conn_params_lookup.
7726 		 */
7727 		if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7728 			err = mgmt_cmd_complete(sk, hdev->id,
7729 						MGMT_OP_REMOVE_DEVICE,
7730 						MGMT_STATUS_INVALID_PARAMS,
7731 						&cp->addr, sizeof(cp->addr));
7732 			goto unlock;
7733 		}
7734 
7735 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7736 						addr_type);
7737 		if (!params) {
7738 			err = mgmt_cmd_complete(sk, hdev->id,
7739 						MGMT_OP_REMOVE_DEVICE,
7740 						MGMT_STATUS_INVALID_PARAMS,
7741 						&cp->addr, sizeof(cp->addr));
7742 			goto unlock;
7743 		}
7744 
7745 		if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7746 		    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7747 			err = mgmt_cmd_complete(sk, hdev->id,
7748 						MGMT_OP_REMOVE_DEVICE,
7749 						MGMT_STATUS_INVALID_PARAMS,
7750 						&cp->addr, sizeof(cp->addr));
7751 			goto unlock;
7752 		}
7753 
7754 		hci_conn_params_free(params);
7755 
7756 		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7757 	} else {
7758 		struct hci_conn_params *p, *tmp;
7759 		struct bdaddr_list *b, *btmp;
7760 
7761 		if (cp->addr.type) {
7762 			err = mgmt_cmd_complete(sk, hdev->id,
7763 						MGMT_OP_REMOVE_DEVICE,
7764 						MGMT_STATUS_INVALID_PARAMS,
7765 						&cp->addr, sizeof(cp->addr));
7766 			goto unlock;
7767 		}
7768 
7769 		list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7770 			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7771 			list_del(&b->list);
7772 			kfree(b);
7773 		}
7774 
7775 		hci_update_scan(hdev);
7776 
7777 		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7778 			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7779 				continue;
7780 			device_removed(sk, hdev, &p->addr, p->addr_type);
7781 			if (p->explicit_connect) {
7782 				p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7783 				continue;
7784 			}
7785 			hci_conn_params_free(p);
7786 		}
7787 
7788 		bt_dev_dbg(hdev, "All LE connection parameters were removed");
7789 	}
7790 
7791 	hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7792 
7793 complete:
7794 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7795 				MGMT_STATUS_SUCCESS, &cp->addr,
7796 				sizeof(cp->addr));
7797 unlock:
7798 	hci_dev_unlock(hdev);
7799 	return err;
7800 }
7801 
7802 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7803 			   u16 len)
7804 {
7805 	struct mgmt_cp_load_conn_param *cp = data;
7806 	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7807 				     sizeof(struct mgmt_conn_param));
7808 	u16 param_count, expected_len;
7809 	int i;
7810 
7811 	if (!lmp_le_capable(hdev))
7812 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7813 				       MGMT_STATUS_NOT_SUPPORTED);
7814 
7815 	param_count = __le16_to_cpu(cp->param_count);
7816 	if (param_count > max_param_count) {
7817 		bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7818 			   param_count);
7819 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7820 				       MGMT_STATUS_INVALID_PARAMS);
7821 	}
7822 
7823 	expected_len = struct_size(cp, params, param_count);
7824 	if (expected_len != len) {
7825 		bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7826 			   expected_len, len);
7827 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7828 				       MGMT_STATUS_INVALID_PARAMS);
7829 	}
7830 
7831 	bt_dev_dbg(hdev, "param_count %u", param_count);
7832 
7833 	hci_dev_lock(hdev);
7834 
7835 	hci_conn_params_clear_disabled(hdev);
7836 
7837 	for (i = 0; i < param_count; i++) {
7838 		struct mgmt_conn_param *param = &cp->params[i];
7839 		struct hci_conn_params *hci_param;
7840 		u16 min, max, latency, timeout;
7841 		u8 addr_type;
7842 
7843 		bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
7844 			   param->addr.type);
7845 
7846 		if (param->addr.type == BDADDR_LE_PUBLIC) {
7847 			addr_type = ADDR_LE_DEV_PUBLIC;
7848 		} else if (param->addr.type == BDADDR_LE_RANDOM) {
7849 			addr_type = ADDR_LE_DEV_RANDOM;
7850 		} else {
7851 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7852 			continue;
7853 		}
7854 
7855 		min = le16_to_cpu(param->min_interval);
7856 		max = le16_to_cpu(param->max_interval);
7857 		latency = le16_to_cpu(param->latency);
7858 		timeout = le16_to_cpu(param->timeout);
7859 
7860 		bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7861 			   min, max, latency, timeout);
7862 
7863 		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7864 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7865 			continue;
7866 		}
7867 
7868 		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
7869 						addr_type);
7870 		if (!hci_param) {
7871 			bt_dev_err(hdev, "failed to add connection parameters");
7872 			continue;
7873 		}
7874 
7875 		hci_param->conn_min_interval = min;
7876 		hci_param->conn_max_interval = max;
7877 		hci_param->conn_latency = latency;
7878 		hci_param->supervision_timeout = timeout;
7879 	}
7880 
7881 	hci_dev_unlock(hdev);
7882 
7883 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7884 				 NULL, 0);
7885 }
7886 
7887 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7888 			       void *data, u16 len)
7889 {
7890 	struct mgmt_cp_set_external_config *cp = data;
7891 	bool changed;
7892 	int err;
7893 
7894 	bt_dev_dbg(hdev, "sock %p", sk);
7895 
7896 	if (hdev_is_powered(hdev))
7897 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7898 				       MGMT_STATUS_REJECTED);
7899 
7900 	if (cp->config != 0x00 && cp->config != 0x01)
7901 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7902 				         MGMT_STATUS_INVALID_PARAMS);
7903 
7904 	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7905 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7906 				       MGMT_STATUS_NOT_SUPPORTED);
7907 
7908 	hci_dev_lock(hdev);
7909 
7910 	if (cp->config)
7911 		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7912 	else
7913 		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7914 
7915 	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7916 	if (err < 0)
7917 		goto unlock;
7918 
7919 	if (!changed)
7920 		goto unlock;
7921 
7922 	err = new_options(hdev, sk);
7923 
7924 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7925 		mgmt_index_removed(hdev);
7926 
7927 		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7928 			hci_dev_set_flag(hdev, HCI_CONFIG);
7929 			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7930 
7931 			queue_work(hdev->req_workqueue, &hdev->power_on);
7932 		} else {
7933 			set_bit(HCI_RAW, &hdev->flags);
7934 			mgmt_index_added(hdev);
7935 		}
7936 	}
7937 
7938 unlock:
7939 	hci_dev_unlock(hdev);
7940 	return err;
7941 }
7942 
7943 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
7944 			      void *data, u16 len)
7945 {
7946 	struct mgmt_cp_set_public_address *cp = data;
7947 	bool changed;
7948 	int err;
7949 
7950 	bt_dev_dbg(hdev, "sock %p", sk);
7951 
7952 	if (hdev_is_powered(hdev))
7953 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7954 				       MGMT_STATUS_REJECTED);
7955 
7956 	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
7957 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7958 				       MGMT_STATUS_INVALID_PARAMS);
7959 
7960 	if (!hdev->set_bdaddr)
7961 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7962 				       MGMT_STATUS_NOT_SUPPORTED);
7963 
7964 	hci_dev_lock(hdev);
7965 
7966 	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
7967 	bacpy(&hdev->public_addr, &cp->bdaddr);
7968 
7969 	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
7970 	if (err < 0)
7971 		goto unlock;
7972 
7973 	if (!changed)
7974 		goto unlock;
7975 
7976 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
7977 		err = new_options(hdev, sk);
7978 
7979 	if (is_configured(hdev)) {
7980 		mgmt_index_removed(hdev);
7981 
7982 		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
7983 
7984 		hci_dev_set_flag(hdev, HCI_CONFIG);
7985 		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7986 
7987 		queue_work(hdev->req_workqueue, &hdev->power_on);
7988 	}
7989 
7990 unlock:
7991 	hci_dev_unlock(hdev);
7992 	return err;
7993 }
7994 
7995 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
7996 					     int err)
7997 {
7998 	const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
7999 	struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8000 	u8 *h192, *r192, *h256, *r256;
8001 	struct mgmt_pending_cmd *cmd = data;
8002 	struct sk_buff *skb = cmd->skb;
8003 	u8 status = mgmt_status(err);
8004 	u16 eir_len;
8005 
8006 	if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
8007 		return;
8008 
8009 	if (!status) {
8010 		if (!skb)
8011 			status = MGMT_STATUS_FAILED;
8012 		else if (IS_ERR(skb))
8013 			status = mgmt_status(PTR_ERR(skb));
8014 		else
8015 			status = mgmt_status(skb->data[0]);
8016 	}
8017 
8018 	bt_dev_dbg(hdev, "status %u", status);
8019 
8020 	mgmt_cp = cmd->param;
8021 
8022 	if (status) {
8023 		status = mgmt_status(status);
8024 		eir_len = 0;
8025 
8026 		h192 = NULL;
8027 		r192 = NULL;
8028 		h256 = NULL;
8029 		r256 = NULL;
8030 	} else if (!bredr_sc_enabled(hdev)) {
8031 		struct hci_rp_read_local_oob_data *rp;
8032 
8033 		if (skb->len != sizeof(*rp)) {
8034 			status = MGMT_STATUS_FAILED;
8035 			eir_len = 0;
8036 		} else {
8037 			status = MGMT_STATUS_SUCCESS;
8038 			rp = (void *)skb->data;
8039 
8040 			eir_len = 5 + 18 + 18;
8041 			h192 = rp->hash;
8042 			r192 = rp->rand;
8043 			h256 = NULL;
8044 			r256 = NULL;
8045 		}
8046 	} else {
8047 		struct hci_rp_read_local_oob_ext_data *rp;
8048 
8049 		if (skb->len != sizeof(*rp)) {
8050 			status = MGMT_STATUS_FAILED;
8051 			eir_len = 0;
8052 		} else {
8053 			status = MGMT_STATUS_SUCCESS;
8054 			rp = (void *)skb->data;
8055 
8056 			if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8057 				eir_len = 5 + 18 + 18;
8058 				h192 = NULL;
8059 				r192 = NULL;
8060 			} else {
8061 				eir_len = 5 + 18 + 18 + 18 + 18;
8062 				h192 = rp->hash192;
8063 				r192 = rp->rand192;
8064 			}
8065 
8066 			h256 = rp->hash256;
8067 			r256 = rp->rand256;
8068 		}
8069 	}
8070 
8071 	mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8072 	if (!mgmt_rp)
8073 		goto done;
8074 
8075 	if (eir_len == 0)
8076 		goto send_rsp;
8077 
8078 	eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8079 				  hdev->dev_class, 3);
8080 
8081 	if (h192 && r192) {
8082 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8083 					  EIR_SSP_HASH_C192, h192, 16);
8084 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8085 					  EIR_SSP_RAND_R192, r192, 16);
8086 	}
8087 
8088 	if (h256 && r256) {
8089 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8090 					  EIR_SSP_HASH_C256, h256, 16);
8091 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8092 					  EIR_SSP_RAND_R256, r256, 16);
8093 	}
8094 
8095 send_rsp:
8096 	mgmt_rp->type = mgmt_cp->type;
8097 	mgmt_rp->eir_len = cpu_to_le16(eir_len);
8098 
8099 	err = mgmt_cmd_complete(cmd->sk, hdev->id,
8100 				MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8101 				mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8102 	if (err < 0 || status)
8103 		goto done;
8104 
8105 	hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8106 
8107 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8108 				 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8109 				 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8110 done:
8111 	if (skb && !IS_ERR(skb))
8112 		kfree_skb(skb);
8113 
8114 	kfree(mgmt_rp);
8115 	mgmt_pending_remove(cmd);
8116 }
8117 
8118 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8119 				  struct mgmt_cp_read_local_oob_ext_data *cp)
8120 {
8121 	struct mgmt_pending_cmd *cmd;
8122 	int err;
8123 
8124 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8125 			       cp, sizeof(*cp));
8126 	if (!cmd)
8127 		return -ENOMEM;
8128 
8129 	err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8130 				 read_local_oob_ext_data_complete);
8131 
8132 	if (err < 0) {
8133 		mgmt_pending_remove(cmd);
8134 		return err;
8135 	}
8136 
8137 	return 0;
8138 }
8139 
8140 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8141 				   void *data, u16 data_len)
8142 {
8143 	struct mgmt_cp_read_local_oob_ext_data *cp = data;
8144 	struct mgmt_rp_read_local_oob_ext_data *rp;
8145 	size_t rp_len;
8146 	u16 eir_len;
8147 	u8 status, flags, role, addr[7], hash[16], rand[16];
8148 	int err;
8149 
8150 	bt_dev_dbg(hdev, "sock %p", sk);
8151 
8152 	if (hdev_is_powered(hdev)) {
8153 		switch (cp->type) {
8154 		case BIT(BDADDR_BREDR):
8155 			status = mgmt_bredr_support(hdev);
8156 			if (status)
8157 				eir_len = 0;
8158 			else
8159 				eir_len = 5;
8160 			break;
8161 		case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8162 			status = mgmt_le_support(hdev);
8163 			if (status)
8164 				eir_len = 0;
8165 			else
8166 				eir_len = 9 + 3 + 18 + 18 + 3;
8167 			break;
8168 		default:
8169 			status = MGMT_STATUS_INVALID_PARAMS;
8170 			eir_len = 0;
8171 			break;
8172 		}
8173 	} else {
8174 		status = MGMT_STATUS_NOT_POWERED;
8175 		eir_len = 0;
8176 	}
8177 
8178 	rp_len = sizeof(*rp) + eir_len;
8179 	rp = kmalloc(rp_len, GFP_ATOMIC);
8180 	if (!rp)
8181 		return -ENOMEM;
8182 
8183 	if (!status && !lmp_ssp_capable(hdev)) {
8184 		status = MGMT_STATUS_NOT_SUPPORTED;
8185 		eir_len = 0;
8186 	}
8187 
8188 	if (status)
8189 		goto complete;
8190 
8191 	hci_dev_lock(hdev);
8192 
8193 	eir_len = 0;
8194 	switch (cp->type) {
8195 	case BIT(BDADDR_BREDR):
8196 		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8197 			err = read_local_ssp_oob_req(hdev, sk, cp);
8198 			hci_dev_unlock(hdev);
8199 			if (!err)
8200 				goto done;
8201 
8202 			status = MGMT_STATUS_FAILED;
8203 			goto complete;
8204 		} else {
8205 			eir_len = eir_append_data(rp->eir, eir_len,
8206 						  EIR_CLASS_OF_DEV,
8207 						  hdev->dev_class, 3);
8208 		}
8209 		break;
8210 	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8211 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8212 		    smp_generate_oob(hdev, hash, rand) < 0) {
8213 			hci_dev_unlock(hdev);
8214 			status = MGMT_STATUS_FAILED;
8215 			goto complete;
8216 		}
8217 
8218 		/* This should return the active RPA, but since the RPA
8219 		 * is only programmed on demand, it is really hard to fill
8220 		 * this in at the moment. For now disallow retrieving
8221 		 * local out-of-band data when privacy is in use.
8222 		 *
8223 		 * Returning the identity address will not help here since
8224 		 * pairing happens before the identity resolving key is
8225 		 * known and thus the connection establishment happens
8226 		 * based on the RPA and not the identity address.
8227 		 */
8228 		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8229 			hci_dev_unlock(hdev);
8230 			status = MGMT_STATUS_REJECTED;
8231 			goto complete;
8232 		}
8233 
8234 		if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8235 		   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8236 		   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8237 		    bacmp(&hdev->static_addr, BDADDR_ANY))) {
8238 			memcpy(addr, &hdev->static_addr, 6);
8239 			addr[6] = 0x01;
8240 		} else {
8241 			memcpy(addr, &hdev->bdaddr, 6);
8242 			addr[6] = 0x00;
8243 		}
8244 
8245 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8246 					  addr, sizeof(addr));
8247 
8248 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8249 			role = 0x02;
8250 		else
8251 			role = 0x01;
8252 
8253 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8254 					  &role, sizeof(role));
8255 
8256 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8257 			eir_len = eir_append_data(rp->eir, eir_len,
8258 						  EIR_LE_SC_CONFIRM,
8259 						  hash, sizeof(hash));
8260 
8261 			eir_len = eir_append_data(rp->eir, eir_len,
8262 						  EIR_LE_SC_RANDOM,
8263 						  rand, sizeof(rand));
8264 		}
8265 
8266 		flags = mgmt_get_adv_discov_flags(hdev);
8267 
8268 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8269 			flags |= LE_AD_NO_BREDR;
8270 
8271 		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8272 					  &flags, sizeof(flags));
8273 		break;
8274 	}
8275 
8276 	hci_dev_unlock(hdev);
8277 
8278 	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8279 
8280 	status = MGMT_STATUS_SUCCESS;
8281 
8282 complete:
8283 	rp->type = cp->type;
8284 	rp->eir_len = cpu_to_le16(eir_len);
8285 
8286 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8287 				status, rp, sizeof(*rp) + eir_len);
8288 	if (err < 0 || status)
8289 		goto done;
8290 
8291 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8292 				 rp, sizeof(*rp) + eir_len,
8293 				 HCI_MGMT_OOB_DATA_EVENTS, sk);
8294 
8295 done:
8296 	kfree(rp);
8297 
8298 	return err;
8299 }
8300 
8301 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8302 {
8303 	u32 flags = 0;
8304 
8305 	flags |= MGMT_ADV_FLAG_CONNECTABLE;
8306 	flags |= MGMT_ADV_FLAG_DISCOV;
8307 	flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8308 	flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8309 	flags |= MGMT_ADV_FLAG_APPEARANCE;
8310 	flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8311 	flags |= MGMT_ADV_PARAM_DURATION;
8312 	flags |= MGMT_ADV_PARAM_TIMEOUT;
8313 	flags |= MGMT_ADV_PARAM_INTERVALS;
8314 	flags |= MGMT_ADV_PARAM_TX_POWER;
8315 	flags |= MGMT_ADV_PARAM_SCAN_RSP;
8316 
8317 	/* In extended adv TX_POWER returned from Set Adv Param
8318 	 * will be always valid.
8319 	 */
8320 	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8321 		flags |= MGMT_ADV_FLAG_TX_POWER;
8322 
8323 	if (ext_adv_capable(hdev)) {
8324 		flags |= MGMT_ADV_FLAG_SEC_1M;
8325 		flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8326 		flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8327 
8328 		if (le_2m_capable(hdev))
8329 			flags |= MGMT_ADV_FLAG_SEC_2M;
8330 
8331 		if (le_coded_capable(hdev))
8332 			flags |= MGMT_ADV_FLAG_SEC_CODED;
8333 	}
8334 
8335 	return flags;
8336 }
8337 
8338 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8339 			     void *data, u16 data_len)
8340 {
8341 	struct mgmt_rp_read_adv_features *rp;
8342 	size_t rp_len;
8343 	int err;
8344 	struct adv_info *adv_instance;
8345 	u32 supported_flags;
8346 	u8 *instance;
8347 
8348 	bt_dev_dbg(hdev, "sock %p", sk);
8349 
8350 	if (!lmp_le_capable(hdev))
8351 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8352 				       MGMT_STATUS_REJECTED);
8353 
8354 	hci_dev_lock(hdev);
8355 
8356 	rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8357 	rp = kmalloc(rp_len, GFP_ATOMIC);
8358 	if (!rp) {
8359 		hci_dev_unlock(hdev);
8360 		return -ENOMEM;
8361 	}
8362 
8363 	supported_flags = get_supported_adv_flags(hdev);
8364 
8365 	rp->supported_flags = cpu_to_le32(supported_flags);
8366 	rp->max_adv_data_len = max_adv_len(hdev);
8367 	rp->max_scan_rsp_len = max_adv_len(hdev);
8368 	rp->max_instances = hdev->le_num_of_adv_sets;
8369 	rp->num_instances = hdev->adv_instance_cnt;
8370 
8371 	instance = rp->instance;
8372 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8373 		/* Only instances 1-le_num_of_adv_sets are externally visible */
8374 		if (adv_instance->instance <= hdev->adv_instance_cnt) {
8375 			*instance = adv_instance->instance;
8376 			instance++;
8377 		} else {
8378 			rp->num_instances--;
8379 			rp_len--;
8380 		}
8381 	}
8382 
8383 	hci_dev_unlock(hdev);
8384 
8385 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8386 				MGMT_STATUS_SUCCESS, rp, rp_len);
8387 
8388 	kfree(rp);
8389 
8390 	return err;
8391 }
8392 
8393 static u8 calculate_name_len(struct hci_dev *hdev)
8394 {
8395 	u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 2]; /* len + type + name */
8396 
8397 	return eir_append_local_name(hdev, buf, 0);
8398 }
8399 
8400 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8401 			   bool is_adv_data)
8402 {
8403 	u8 max_len = max_adv_len(hdev);
8404 
8405 	if (is_adv_data) {
8406 		if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8407 				 MGMT_ADV_FLAG_LIMITED_DISCOV |
8408 				 MGMT_ADV_FLAG_MANAGED_FLAGS))
8409 			max_len -= 3;
8410 
8411 		if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8412 			max_len -= 3;
8413 	} else {
8414 		if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8415 			max_len -= calculate_name_len(hdev);
8416 
8417 		if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8418 			max_len -= 4;
8419 	}
8420 
8421 	return max_len;
8422 }
8423 
8424 static bool flags_managed(u32 adv_flags)
8425 {
8426 	return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8427 			    MGMT_ADV_FLAG_LIMITED_DISCOV |
8428 			    MGMT_ADV_FLAG_MANAGED_FLAGS);
8429 }
8430 
8431 static bool tx_power_managed(u32 adv_flags)
8432 {
8433 	return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8434 }
8435 
8436 static bool name_managed(u32 adv_flags)
8437 {
8438 	return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8439 }
8440 
8441 static bool appearance_managed(u32 adv_flags)
8442 {
8443 	return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8444 }
8445 
8446 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8447 			      u8 len, bool is_adv_data)
8448 {
8449 	int i, cur_len;
8450 	u8 max_len;
8451 
8452 	max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8453 
8454 	if (len > max_len)
8455 		return false;
8456 
8457 	/* Make sure that the data is correctly formatted. */
8458 	for (i = 0; i < len; i += (cur_len + 1)) {
8459 		cur_len = data[i];
8460 
8461 		if (!cur_len)
8462 			continue;
8463 
8464 		if (data[i + 1] == EIR_FLAGS &&
8465 		    (!is_adv_data || flags_managed(adv_flags)))
8466 			return false;
8467 
8468 		if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8469 			return false;
8470 
8471 		if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8472 			return false;
8473 
8474 		if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8475 			return false;
8476 
8477 		if (data[i + 1] == EIR_APPEARANCE &&
8478 		    appearance_managed(adv_flags))
8479 			return false;
8480 
8481 		/* If the current field length would exceed the total data
8482 		 * length, then it's invalid.
8483 		 */
8484 		if (i + cur_len >= len)
8485 			return false;
8486 	}
8487 
8488 	return true;
8489 }
8490 
8491 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8492 {
8493 	u32 supported_flags, phy_flags;
8494 
8495 	/* The current implementation only supports a subset of the specified
8496 	 * flags. Also need to check mutual exclusiveness of sec flags.
8497 	 */
8498 	supported_flags = get_supported_adv_flags(hdev);
8499 	phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8500 	if (adv_flags & ~supported_flags ||
8501 	    ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8502 		return false;
8503 
8504 	return true;
8505 }
8506 
8507 static bool adv_busy(struct hci_dev *hdev)
8508 {
8509 	return pending_find(MGMT_OP_SET_LE, hdev);
8510 }
8511 
8512 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8513 			     int err)
8514 {
8515 	struct adv_info *adv, *n;
8516 
8517 	bt_dev_dbg(hdev, "err %d", err);
8518 
8519 	hci_dev_lock(hdev);
8520 
8521 	list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8522 		u8 instance;
8523 
8524 		if (!adv->pending)
8525 			continue;
8526 
8527 		if (!err) {
8528 			adv->pending = false;
8529 			continue;
8530 		}
8531 
8532 		instance = adv->instance;
8533 
8534 		if (hdev->cur_adv_instance == instance)
8535 			cancel_adv_timeout(hdev);
8536 
8537 		hci_remove_adv_instance(hdev, instance);
8538 		mgmt_advertising_removed(sk, hdev, instance);
8539 	}
8540 
8541 	hci_dev_unlock(hdev);
8542 }
8543 
8544 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8545 {
8546 	struct mgmt_pending_cmd *cmd = data;
8547 	struct mgmt_cp_add_advertising *cp = cmd->param;
8548 	struct mgmt_rp_add_advertising rp;
8549 
8550 	memset(&rp, 0, sizeof(rp));
8551 
8552 	rp.instance = cp->instance;
8553 
8554 	if (err)
8555 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8556 				mgmt_status(err));
8557 	else
8558 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8559 				  mgmt_status(err), &rp, sizeof(rp));
8560 
8561 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
8562 
8563 	mgmt_pending_free(cmd);
8564 }
8565 
8566 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8567 {
8568 	struct mgmt_pending_cmd *cmd = data;
8569 	struct mgmt_cp_add_advertising *cp = cmd->param;
8570 
8571 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8572 }
8573 
8574 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8575 			   void *data, u16 data_len)
8576 {
8577 	struct mgmt_cp_add_advertising *cp = data;
8578 	struct mgmt_rp_add_advertising rp;
8579 	u32 flags;
8580 	u8 status;
8581 	u16 timeout, duration;
8582 	unsigned int prev_instance_cnt;
8583 	u8 schedule_instance = 0;
8584 	struct adv_info *adv, *next_instance;
8585 	int err;
8586 	struct mgmt_pending_cmd *cmd;
8587 
8588 	bt_dev_dbg(hdev, "sock %p", sk);
8589 
8590 	status = mgmt_le_support(hdev);
8591 	if (status)
8592 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8593 				       status);
8594 
8595 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8596 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8597 				       MGMT_STATUS_INVALID_PARAMS);
8598 
8599 	if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8600 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8601 				       MGMT_STATUS_INVALID_PARAMS);
8602 
8603 	flags = __le32_to_cpu(cp->flags);
8604 	timeout = __le16_to_cpu(cp->timeout);
8605 	duration = __le16_to_cpu(cp->duration);
8606 
8607 	if (!requested_adv_flags_are_valid(hdev, flags))
8608 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8609 				       MGMT_STATUS_INVALID_PARAMS);
8610 
8611 	hci_dev_lock(hdev);
8612 
8613 	if (timeout && !hdev_is_powered(hdev)) {
8614 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8615 				      MGMT_STATUS_REJECTED);
8616 		goto unlock;
8617 	}
8618 
8619 	if (adv_busy(hdev)) {
8620 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8621 				      MGMT_STATUS_BUSY);
8622 		goto unlock;
8623 	}
8624 
8625 	if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8626 	    !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8627 			       cp->scan_rsp_len, false)) {
8628 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8629 				      MGMT_STATUS_INVALID_PARAMS);
8630 		goto unlock;
8631 	}
8632 
8633 	prev_instance_cnt = hdev->adv_instance_cnt;
8634 
8635 	adv = hci_add_adv_instance(hdev, cp->instance, flags,
8636 				   cp->adv_data_len, cp->data,
8637 				   cp->scan_rsp_len,
8638 				   cp->data + cp->adv_data_len,
8639 				   timeout, duration,
8640 				   HCI_ADV_TX_POWER_NO_PREFERENCE,
8641 				   hdev->le_adv_min_interval,
8642 				   hdev->le_adv_max_interval, 0);
8643 	if (IS_ERR(adv)) {
8644 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8645 				      MGMT_STATUS_FAILED);
8646 		goto unlock;
8647 	}
8648 
8649 	/* Only trigger an advertising added event if a new instance was
8650 	 * actually added.
8651 	 */
8652 	if (hdev->adv_instance_cnt > prev_instance_cnt)
8653 		mgmt_advertising_added(sk, hdev, cp->instance);
8654 
8655 	if (hdev->cur_adv_instance == cp->instance) {
8656 		/* If the currently advertised instance is being changed then
8657 		 * cancel the current advertising and schedule the next
8658 		 * instance. If there is only one instance then the overridden
8659 		 * advertising data will be visible right away.
8660 		 */
8661 		cancel_adv_timeout(hdev);
8662 
8663 		next_instance = hci_get_next_instance(hdev, cp->instance);
8664 		if (next_instance)
8665 			schedule_instance = next_instance->instance;
8666 	} else if (!hdev->adv_instance_timeout) {
8667 		/* Immediately advertise the new instance if no other
8668 		 * instance is currently being advertised.
8669 		 */
8670 		schedule_instance = cp->instance;
8671 	}
8672 
8673 	/* If the HCI_ADVERTISING flag is set or the device isn't powered or
8674 	 * there is no instance to be advertised then we have no HCI
8675 	 * communication to make. Simply return.
8676 	 */
8677 	if (!hdev_is_powered(hdev) ||
8678 	    hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8679 	    !schedule_instance) {
8680 		rp.instance = cp->instance;
8681 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8682 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8683 		goto unlock;
8684 	}
8685 
8686 	/* We're good to go, update advertising data, parameters, and start
8687 	 * advertising.
8688 	 */
8689 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8690 			       data_len);
8691 	if (!cmd) {
8692 		err = -ENOMEM;
8693 		goto unlock;
8694 	}
8695 
8696 	cp->instance = schedule_instance;
8697 
8698 	err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8699 				 add_advertising_complete);
8700 	if (err < 0)
8701 		mgmt_pending_free(cmd);
8702 
8703 unlock:
8704 	hci_dev_unlock(hdev);
8705 
8706 	return err;
8707 }
8708 
8709 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8710 					int err)
8711 {
8712 	struct mgmt_pending_cmd *cmd = data;
8713 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8714 	struct mgmt_rp_add_ext_adv_params rp;
8715 	struct adv_info *adv;
8716 	u32 flags;
8717 
8718 	BT_DBG("%s", hdev->name);
8719 
8720 	hci_dev_lock(hdev);
8721 
8722 	adv = hci_find_adv_instance(hdev, cp->instance);
8723 	if (!adv)
8724 		goto unlock;
8725 
8726 	rp.instance = cp->instance;
8727 	rp.tx_power = adv->tx_power;
8728 
8729 	/* While we're at it, inform userspace of the available space for this
8730 	 * advertisement, given the flags that will be used.
8731 	 */
8732 	flags = __le32_to_cpu(cp->flags);
8733 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8734 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8735 
8736 	if (err) {
8737 		/* If this advertisement was previously advertising and we
8738 		 * failed to update it, we signal that it has been removed and
8739 		 * delete its structure
8740 		 */
8741 		if (!adv->pending)
8742 			mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8743 
8744 		hci_remove_adv_instance(hdev, cp->instance);
8745 
8746 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8747 				mgmt_status(err));
8748 	} else {
8749 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8750 				  mgmt_status(err), &rp, sizeof(rp));
8751 	}
8752 
8753 unlock:
8754 	if (cmd)
8755 		mgmt_pending_free(cmd);
8756 
8757 	hci_dev_unlock(hdev);
8758 }
8759 
8760 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8761 {
8762 	struct mgmt_pending_cmd *cmd = data;
8763 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8764 
8765 	return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8766 }
8767 
8768 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8769 			      void *data, u16 data_len)
8770 {
8771 	struct mgmt_cp_add_ext_adv_params *cp = data;
8772 	struct mgmt_rp_add_ext_adv_params rp;
8773 	struct mgmt_pending_cmd *cmd = NULL;
8774 	struct adv_info *adv;
8775 	u32 flags, min_interval, max_interval;
8776 	u16 timeout, duration;
8777 	u8 status;
8778 	s8 tx_power;
8779 	int err;
8780 
8781 	BT_DBG("%s", hdev->name);
8782 
8783 	status = mgmt_le_support(hdev);
8784 	if (status)
8785 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8786 				       status);
8787 
8788 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8789 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8790 				       MGMT_STATUS_INVALID_PARAMS);
8791 
8792 	/* The purpose of breaking add_advertising into two separate MGMT calls
8793 	 * for params and data is to allow more parameters to be added to this
8794 	 * structure in the future. For this reason, we verify that we have the
8795 	 * bare minimum structure we know of when the interface was defined. Any
8796 	 * extra parameters we don't know about will be ignored in this request.
8797 	 */
8798 	if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8799 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8800 				       MGMT_STATUS_INVALID_PARAMS);
8801 
8802 	flags = __le32_to_cpu(cp->flags);
8803 
8804 	if (!requested_adv_flags_are_valid(hdev, flags))
8805 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8806 				       MGMT_STATUS_INVALID_PARAMS);
8807 
8808 	hci_dev_lock(hdev);
8809 
8810 	/* In new interface, we require that we are powered to register */
8811 	if (!hdev_is_powered(hdev)) {
8812 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8813 				      MGMT_STATUS_REJECTED);
8814 		goto unlock;
8815 	}
8816 
8817 	if (adv_busy(hdev)) {
8818 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8819 				      MGMT_STATUS_BUSY);
8820 		goto unlock;
8821 	}
8822 
8823 	/* Parse defined parameters from request, use defaults otherwise */
8824 	timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8825 		  __le16_to_cpu(cp->timeout) : 0;
8826 
8827 	duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8828 		   __le16_to_cpu(cp->duration) :
8829 		   hdev->def_multi_adv_rotation_duration;
8830 
8831 	min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8832 		       __le32_to_cpu(cp->min_interval) :
8833 		       hdev->le_adv_min_interval;
8834 
8835 	max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8836 		       __le32_to_cpu(cp->max_interval) :
8837 		       hdev->le_adv_max_interval;
8838 
8839 	tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8840 		   cp->tx_power :
8841 		   HCI_ADV_TX_POWER_NO_PREFERENCE;
8842 
8843 	/* Create advertising instance with no advertising or response data */
8844 	adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
8845 				   timeout, duration, tx_power, min_interval,
8846 				   max_interval, 0);
8847 
8848 	if (IS_ERR(adv)) {
8849 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8850 				      MGMT_STATUS_FAILED);
8851 		goto unlock;
8852 	}
8853 
8854 	/* Submit request for advertising params if ext adv available */
8855 	if (ext_adv_capable(hdev)) {
8856 		cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8857 				       data, data_len);
8858 		if (!cmd) {
8859 			err = -ENOMEM;
8860 			hci_remove_adv_instance(hdev, cp->instance);
8861 			goto unlock;
8862 		}
8863 
8864 		err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8865 					 add_ext_adv_params_complete);
8866 		if (err < 0)
8867 			mgmt_pending_free(cmd);
8868 	} else {
8869 		rp.instance = cp->instance;
8870 		rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8871 		rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8872 		rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8873 		err = mgmt_cmd_complete(sk, hdev->id,
8874 					MGMT_OP_ADD_EXT_ADV_PARAMS,
8875 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8876 	}
8877 
8878 unlock:
8879 	hci_dev_unlock(hdev);
8880 
8881 	return err;
8882 }
8883 
8884 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8885 {
8886 	struct mgmt_pending_cmd *cmd = data;
8887 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8888 	struct mgmt_rp_add_advertising rp;
8889 
8890 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
8891 
8892 	memset(&rp, 0, sizeof(rp));
8893 
8894 	rp.instance = cp->instance;
8895 
8896 	if (err)
8897 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8898 				mgmt_status(err));
8899 	else
8900 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8901 				  mgmt_status(err), &rp, sizeof(rp));
8902 
8903 	mgmt_pending_free(cmd);
8904 }
8905 
8906 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8907 {
8908 	struct mgmt_pending_cmd *cmd = data;
8909 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8910 	int err;
8911 
8912 	if (ext_adv_capable(hdev)) {
8913 		err = hci_update_adv_data_sync(hdev, cp->instance);
8914 		if (err)
8915 			return err;
8916 
8917 		err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8918 		if (err)
8919 			return err;
8920 
8921 		return hci_enable_ext_advertising_sync(hdev, cp->instance);
8922 	}
8923 
8924 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8925 }
8926 
8927 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8928 			    u16 data_len)
8929 {
8930 	struct mgmt_cp_add_ext_adv_data *cp = data;
8931 	struct mgmt_rp_add_ext_adv_data rp;
8932 	u8 schedule_instance = 0;
8933 	struct adv_info *next_instance;
8934 	struct adv_info *adv_instance;
8935 	int err = 0;
8936 	struct mgmt_pending_cmd *cmd;
8937 
8938 	BT_DBG("%s", hdev->name);
8939 
8940 	hci_dev_lock(hdev);
8941 
8942 	adv_instance = hci_find_adv_instance(hdev, cp->instance);
8943 
8944 	if (!adv_instance) {
8945 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8946 				      MGMT_STATUS_INVALID_PARAMS);
8947 		goto unlock;
8948 	}
8949 
8950 	/* In new interface, we require that we are powered to register */
8951 	if (!hdev_is_powered(hdev)) {
8952 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8953 				      MGMT_STATUS_REJECTED);
8954 		goto clear_new_instance;
8955 	}
8956 
8957 	if (adv_busy(hdev)) {
8958 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8959 				      MGMT_STATUS_BUSY);
8960 		goto clear_new_instance;
8961 	}
8962 
8963 	/* Validate new data */
8964 	if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
8965 			       cp->adv_data_len, true) ||
8966 	    !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
8967 			       cp->adv_data_len, cp->scan_rsp_len, false)) {
8968 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8969 				      MGMT_STATUS_INVALID_PARAMS);
8970 		goto clear_new_instance;
8971 	}
8972 
8973 	/* Set the data in the advertising instance */
8974 	hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
8975 				  cp->data, cp->scan_rsp_len,
8976 				  cp->data + cp->adv_data_len);
8977 
8978 	/* If using software rotation, determine next instance to use */
8979 	if (hdev->cur_adv_instance == cp->instance) {
8980 		/* If the currently advertised instance is being changed
8981 		 * then cancel the current advertising and schedule the
8982 		 * next instance. If there is only one instance then the
8983 		 * overridden advertising data will be visible right
8984 		 * away
8985 		 */
8986 		cancel_adv_timeout(hdev);
8987 
8988 		next_instance = hci_get_next_instance(hdev, cp->instance);
8989 		if (next_instance)
8990 			schedule_instance = next_instance->instance;
8991 	} else if (!hdev->adv_instance_timeout) {
8992 		/* Immediately advertise the new instance if no other
8993 		 * instance is currently being advertised.
8994 		 */
8995 		schedule_instance = cp->instance;
8996 	}
8997 
8998 	/* If the HCI_ADVERTISING flag is set or there is no instance to
8999 	 * be advertised then we have no HCI communication to make.
9000 	 * Simply return.
9001 	 */
9002 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9003 		if (adv_instance->pending) {
9004 			mgmt_advertising_added(sk, hdev, cp->instance);
9005 			adv_instance->pending = false;
9006 		}
9007 		rp.instance = cp->instance;
9008 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9009 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9010 		goto unlock;
9011 	}
9012 
9013 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9014 			       data_len);
9015 	if (!cmd) {
9016 		err = -ENOMEM;
9017 		goto clear_new_instance;
9018 	}
9019 
9020 	err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9021 				 add_ext_adv_data_complete);
9022 	if (err < 0) {
9023 		mgmt_pending_free(cmd);
9024 		goto clear_new_instance;
9025 	}
9026 
9027 	/* We were successful in updating data, so trigger advertising_added
9028 	 * event if this is an instance that wasn't previously advertising. If
9029 	 * a failure occurs in the requests we initiated, we will remove the
9030 	 * instance again in add_advertising_complete
9031 	 */
9032 	if (adv_instance->pending)
9033 		mgmt_advertising_added(sk, hdev, cp->instance);
9034 
9035 	goto unlock;
9036 
9037 clear_new_instance:
9038 	hci_remove_adv_instance(hdev, cp->instance);
9039 
9040 unlock:
9041 	hci_dev_unlock(hdev);
9042 
9043 	return err;
9044 }
9045 
9046 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9047 					int err)
9048 {
9049 	struct mgmt_pending_cmd *cmd = data;
9050 	struct mgmt_cp_remove_advertising *cp = cmd->param;
9051 	struct mgmt_rp_remove_advertising rp;
9052 
9053 	bt_dev_dbg(hdev, "err %d", err);
9054 
9055 	memset(&rp, 0, sizeof(rp));
9056 	rp.instance = cp->instance;
9057 
9058 	if (err)
9059 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9060 				mgmt_status(err));
9061 	else
9062 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9063 				  MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9064 
9065 	mgmt_pending_free(cmd);
9066 }
9067 
9068 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9069 {
9070 	struct mgmt_pending_cmd *cmd = data;
9071 	struct mgmt_cp_remove_advertising *cp = cmd->param;
9072 	int err;
9073 
9074 	err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9075 	if (err)
9076 		return err;
9077 
9078 	if (list_empty(&hdev->adv_instances))
9079 		err = hci_disable_advertising_sync(hdev);
9080 
9081 	return err;
9082 }
9083 
9084 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9085 			      void *data, u16 data_len)
9086 {
9087 	struct mgmt_cp_remove_advertising *cp = data;
9088 	struct mgmt_pending_cmd *cmd;
9089 	int err;
9090 
9091 	bt_dev_dbg(hdev, "sock %p", sk);
9092 
9093 	hci_dev_lock(hdev);
9094 
9095 	if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9096 		err = mgmt_cmd_status(sk, hdev->id,
9097 				      MGMT_OP_REMOVE_ADVERTISING,
9098 				      MGMT_STATUS_INVALID_PARAMS);
9099 		goto unlock;
9100 	}
9101 
9102 	if (pending_find(MGMT_OP_SET_LE, hdev)) {
9103 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9104 				      MGMT_STATUS_BUSY);
9105 		goto unlock;
9106 	}
9107 
9108 	if (list_empty(&hdev->adv_instances)) {
9109 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9110 				      MGMT_STATUS_INVALID_PARAMS);
9111 		goto unlock;
9112 	}
9113 
9114 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9115 			       data_len);
9116 	if (!cmd) {
9117 		err = -ENOMEM;
9118 		goto unlock;
9119 	}
9120 
9121 	err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9122 				 remove_advertising_complete);
9123 	if (err < 0)
9124 		mgmt_pending_free(cmd);
9125 
9126 unlock:
9127 	hci_dev_unlock(hdev);
9128 
9129 	return err;
9130 }
9131 
9132 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9133 			     void *data, u16 data_len)
9134 {
9135 	struct mgmt_cp_get_adv_size_info *cp = data;
9136 	struct mgmt_rp_get_adv_size_info rp;
9137 	u32 flags, supported_flags;
9138 
9139 	bt_dev_dbg(hdev, "sock %p", sk);
9140 
9141 	if (!lmp_le_capable(hdev))
9142 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9143 				       MGMT_STATUS_REJECTED);
9144 
9145 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9146 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9147 				       MGMT_STATUS_INVALID_PARAMS);
9148 
9149 	flags = __le32_to_cpu(cp->flags);
9150 
9151 	/* The current implementation only supports a subset of the specified
9152 	 * flags.
9153 	 */
9154 	supported_flags = get_supported_adv_flags(hdev);
9155 	if (flags & ~supported_flags)
9156 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9157 				       MGMT_STATUS_INVALID_PARAMS);
9158 
9159 	rp.instance = cp->instance;
9160 	rp.flags = cp->flags;
9161 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9162 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9163 
9164 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9165 				 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9166 }
9167 
9168 static const struct hci_mgmt_handler mgmt_handlers[] = {
9169 	{ NULL }, /* 0x0000 (no command) */
9170 	{ read_version,            MGMT_READ_VERSION_SIZE,
9171 						HCI_MGMT_NO_HDEV |
9172 						HCI_MGMT_UNTRUSTED },
9173 	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
9174 						HCI_MGMT_NO_HDEV |
9175 						HCI_MGMT_UNTRUSTED },
9176 	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
9177 						HCI_MGMT_NO_HDEV |
9178 						HCI_MGMT_UNTRUSTED },
9179 	{ read_controller_info,    MGMT_READ_INFO_SIZE,
9180 						HCI_MGMT_UNTRUSTED },
9181 	{ set_powered,             MGMT_SETTING_SIZE },
9182 	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
9183 	{ set_connectable,         MGMT_SETTING_SIZE },
9184 	{ set_fast_connectable,    MGMT_SETTING_SIZE },
9185 	{ set_bondable,            MGMT_SETTING_SIZE },
9186 	{ set_link_security,       MGMT_SETTING_SIZE },
9187 	{ set_ssp,                 MGMT_SETTING_SIZE },
9188 	{ set_hs,                  MGMT_SETTING_SIZE },
9189 	{ set_le,                  MGMT_SETTING_SIZE },
9190 	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
9191 	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
9192 	{ add_uuid,                MGMT_ADD_UUID_SIZE },
9193 	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
9194 	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
9195 						HCI_MGMT_VAR_LEN },
9196 	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9197 						HCI_MGMT_VAR_LEN },
9198 	{ disconnect,              MGMT_DISCONNECT_SIZE },
9199 	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
9200 	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
9201 	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
9202 	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
9203 	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
9204 	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
9205 	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
9206 	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
9207 	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9208 	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
9209 	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9210 	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
9211 	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9212 						HCI_MGMT_VAR_LEN },
9213 	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9214 	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
9215 	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
9216 	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
9217 	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
9218 	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
9219 	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
9220 	{ set_advertising,         MGMT_SETTING_SIZE },
9221 	{ set_bredr,               MGMT_SETTING_SIZE },
9222 	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
9223 	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
9224 	{ set_secure_conn,         MGMT_SETTING_SIZE },
9225 	{ set_debug_keys,          MGMT_SETTING_SIZE },
9226 	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
9227 	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
9228 						HCI_MGMT_VAR_LEN },
9229 	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
9230 	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
9231 	{ add_device,              MGMT_ADD_DEVICE_SIZE },
9232 	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
9233 	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
9234 						HCI_MGMT_VAR_LEN },
9235 	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9236 						HCI_MGMT_NO_HDEV |
9237 						HCI_MGMT_UNTRUSTED },
9238 	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
9239 						HCI_MGMT_UNCONFIGURED |
9240 						HCI_MGMT_UNTRUSTED },
9241 	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
9242 						HCI_MGMT_UNCONFIGURED },
9243 	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
9244 						HCI_MGMT_UNCONFIGURED },
9245 	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9246 						HCI_MGMT_VAR_LEN },
9247 	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9248 	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
9249 						HCI_MGMT_NO_HDEV |
9250 						HCI_MGMT_UNTRUSTED },
9251 	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
9252 	{ add_advertising,	   MGMT_ADD_ADVERTISING_SIZE,
9253 						HCI_MGMT_VAR_LEN },
9254 	{ remove_advertising,	   MGMT_REMOVE_ADVERTISING_SIZE },
9255 	{ get_adv_size_info,       MGMT_GET_ADV_SIZE_INFO_SIZE },
9256 	{ start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9257 	{ read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9258 						HCI_MGMT_UNTRUSTED },
9259 	{ set_appearance,	   MGMT_SET_APPEARANCE_SIZE },
9260 	{ get_phy_configuration,   MGMT_GET_PHY_CONFIGURATION_SIZE },
9261 	{ set_phy_configuration,   MGMT_SET_PHY_CONFIGURATION_SIZE },
9262 	{ set_blocked_keys,	   MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9263 						HCI_MGMT_VAR_LEN },
9264 	{ set_wideband_speech,	   MGMT_SETTING_SIZE },
9265 	{ read_controller_cap,     MGMT_READ_CONTROLLER_CAP_SIZE,
9266 						HCI_MGMT_UNTRUSTED },
9267 	{ read_exp_features_info,  MGMT_READ_EXP_FEATURES_INFO_SIZE,
9268 						HCI_MGMT_UNTRUSTED |
9269 						HCI_MGMT_HDEV_OPTIONAL },
9270 	{ set_exp_feature,         MGMT_SET_EXP_FEATURE_SIZE,
9271 						HCI_MGMT_VAR_LEN |
9272 						HCI_MGMT_HDEV_OPTIONAL },
9273 	{ read_def_system_config,  MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9274 						HCI_MGMT_UNTRUSTED },
9275 	{ set_def_system_config,   MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9276 						HCI_MGMT_VAR_LEN },
9277 	{ read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9278 						HCI_MGMT_UNTRUSTED },
9279 	{ set_def_runtime_config,  MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9280 						HCI_MGMT_VAR_LEN },
9281 	{ get_device_flags,        MGMT_GET_DEVICE_FLAGS_SIZE },
9282 	{ set_device_flags,        MGMT_SET_DEVICE_FLAGS_SIZE },
9283 	{ read_adv_mon_features,   MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9284 	{ add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9285 						HCI_MGMT_VAR_LEN },
9286 	{ remove_adv_monitor,      MGMT_REMOVE_ADV_MONITOR_SIZE },
9287 	{ add_ext_adv_params,      MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9288 						HCI_MGMT_VAR_LEN },
9289 	{ add_ext_adv_data,        MGMT_ADD_EXT_ADV_DATA_SIZE,
9290 						HCI_MGMT_VAR_LEN },
9291 	{ add_adv_patterns_monitor_rssi,
9292 				   MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9293 						HCI_MGMT_VAR_LEN },
9294 	{ set_mesh,                MGMT_SET_MESH_RECEIVER_SIZE,
9295 						HCI_MGMT_VAR_LEN },
9296 	{ mesh_features,           MGMT_MESH_READ_FEATURES_SIZE },
9297 	{ mesh_send,               MGMT_MESH_SEND_SIZE,
9298 						HCI_MGMT_VAR_LEN },
9299 	{ mesh_send_cancel,        MGMT_MESH_SEND_CANCEL_SIZE },
9300 };
9301 
9302 void mgmt_index_added(struct hci_dev *hdev)
9303 {
9304 	struct mgmt_ev_ext_index ev;
9305 
9306 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9307 		return;
9308 
9309 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9310 		mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0,
9311 				 HCI_MGMT_UNCONF_INDEX_EVENTS);
9312 		ev.type = 0x01;
9313 	} else {
9314 		mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9315 				 HCI_MGMT_INDEX_EVENTS);
9316 		ev.type = 0x00;
9317 	}
9318 
9319 	ev.bus = hdev->bus;
9320 
9321 	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9322 			 HCI_MGMT_EXT_INDEX_EVENTS);
9323 }
9324 
9325 void mgmt_index_removed(struct hci_dev *hdev)
9326 {
9327 	struct mgmt_ev_ext_index ev;
9328 	u8 status = MGMT_STATUS_INVALID_INDEX;
9329 
9330 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9331 		return;
9332 
9333 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9334 
9335 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9336 		mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0,
9337 				 HCI_MGMT_UNCONF_INDEX_EVENTS);
9338 		ev.type = 0x01;
9339 	} else {
9340 		mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9341 				 HCI_MGMT_INDEX_EVENTS);
9342 		ev.type = 0x00;
9343 	}
9344 
9345 	ev.bus = hdev->bus;
9346 
9347 	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9348 			 HCI_MGMT_EXT_INDEX_EVENTS);
9349 
9350 	/* Cancel any remaining timed work */
9351 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
9352 		return;
9353 	cancel_delayed_work_sync(&hdev->discov_off);
9354 	cancel_delayed_work_sync(&hdev->service_cache);
9355 	cancel_delayed_work_sync(&hdev->rpa_expired);
9356 }
9357 
9358 void mgmt_power_on(struct hci_dev *hdev, int err)
9359 {
9360 	struct cmd_lookup match = { NULL, hdev };
9361 
9362 	bt_dev_dbg(hdev, "err %d", err);
9363 
9364 	hci_dev_lock(hdev);
9365 
9366 	if (!err) {
9367 		restart_le_actions(hdev);
9368 		hci_update_passive_scan(hdev);
9369 	}
9370 
9371 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9372 
9373 	new_settings(hdev, match.sk);
9374 
9375 	if (match.sk)
9376 		sock_put(match.sk);
9377 
9378 	hci_dev_unlock(hdev);
9379 }
9380 
9381 void __mgmt_power_off(struct hci_dev *hdev)
9382 {
9383 	struct cmd_lookup match = { NULL, hdev };
9384 	u8 status, zero_cod[] = { 0, 0, 0 };
9385 
9386 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9387 
9388 	/* If the power off is because of hdev unregistration let
9389 	 * use the appropriate INVALID_INDEX status. Otherwise use
9390 	 * NOT_POWERED. We cover both scenarios here since later in
9391 	 * mgmt_index_removed() any hci_conn callbacks will have already
9392 	 * been triggered, potentially causing misleading DISCONNECTED
9393 	 * status responses.
9394 	 */
9395 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9396 		status = MGMT_STATUS_INVALID_INDEX;
9397 	else
9398 		status = MGMT_STATUS_NOT_POWERED;
9399 
9400 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9401 
9402 	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9403 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9404 				   zero_cod, sizeof(zero_cod),
9405 				   HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9406 		ext_info_changed(hdev, NULL);
9407 	}
9408 
9409 	new_settings(hdev, match.sk);
9410 
9411 	if (match.sk)
9412 		sock_put(match.sk);
9413 }
9414 
9415 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9416 {
9417 	struct mgmt_pending_cmd *cmd;
9418 	u8 status;
9419 
9420 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9421 	if (!cmd)
9422 		return;
9423 
9424 	if (err == -ERFKILL)
9425 		status = MGMT_STATUS_RFKILLED;
9426 	else
9427 		status = MGMT_STATUS_FAILED;
9428 
9429 	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9430 
9431 	mgmt_pending_remove(cmd);
9432 }
9433 
9434 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9435 		       bool persistent)
9436 {
9437 	struct mgmt_ev_new_link_key ev;
9438 
9439 	memset(&ev, 0, sizeof(ev));
9440 
9441 	ev.store_hint = persistent;
9442 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9443 	ev.key.addr.type = BDADDR_BREDR;
9444 	ev.key.type = key->type;
9445 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9446 	ev.key.pin_len = key->pin_len;
9447 
9448 	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9449 }
9450 
9451 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9452 {
9453 	switch (ltk->type) {
9454 	case SMP_LTK:
9455 	case SMP_LTK_RESPONDER:
9456 		if (ltk->authenticated)
9457 			return MGMT_LTK_AUTHENTICATED;
9458 		return MGMT_LTK_UNAUTHENTICATED;
9459 	case SMP_LTK_P256:
9460 		if (ltk->authenticated)
9461 			return MGMT_LTK_P256_AUTH;
9462 		return MGMT_LTK_P256_UNAUTH;
9463 	case SMP_LTK_P256_DEBUG:
9464 		return MGMT_LTK_P256_DEBUG;
9465 	}
9466 
9467 	return MGMT_LTK_UNAUTHENTICATED;
9468 }
9469 
9470 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9471 {
9472 	struct mgmt_ev_new_long_term_key ev;
9473 
9474 	memset(&ev, 0, sizeof(ev));
9475 
9476 	/* Devices using resolvable or non-resolvable random addresses
9477 	 * without providing an identity resolving key don't require
9478 	 * to store long term keys. Their addresses will change the
9479 	 * next time around.
9480 	 *
9481 	 * Only when a remote device provides an identity address
9482 	 * make sure the long term key is stored. If the remote
9483 	 * identity is known, the long term keys are internally
9484 	 * mapped to the identity address. So allow static random
9485 	 * and public addresses here.
9486 	 */
9487 	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9488 	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
9489 		ev.store_hint = 0x00;
9490 	else
9491 		ev.store_hint = persistent;
9492 
9493 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9494 	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
9495 	ev.key.type = mgmt_ltk_type(key);
9496 	ev.key.enc_size = key->enc_size;
9497 	ev.key.ediv = key->ediv;
9498 	ev.key.rand = key->rand;
9499 
9500 	if (key->type == SMP_LTK)
9501 		ev.key.initiator = 1;
9502 
9503 	/* Make sure we copy only the significant bytes based on the
9504 	 * encryption key size, and set the rest of the value to zeroes.
9505 	 */
9506 	memcpy(ev.key.val, key->val, key->enc_size);
9507 	memset(ev.key.val + key->enc_size, 0,
9508 	       sizeof(ev.key.val) - key->enc_size);
9509 
9510 	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9511 }
9512 
9513 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9514 {
9515 	struct mgmt_ev_new_irk ev;
9516 
9517 	memset(&ev, 0, sizeof(ev));
9518 
9519 	ev.store_hint = persistent;
9520 
9521 	bacpy(&ev.rpa, &irk->rpa);
9522 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9523 	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9524 	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9525 
9526 	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9527 }
9528 
9529 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9530 		   bool persistent)
9531 {
9532 	struct mgmt_ev_new_csrk ev;
9533 
9534 	memset(&ev, 0, sizeof(ev));
9535 
9536 	/* Devices using resolvable or non-resolvable random addresses
9537 	 * without providing an identity resolving key don't require
9538 	 * to store signature resolving keys. Their addresses will change
9539 	 * the next time around.
9540 	 *
9541 	 * Only when a remote device provides an identity address
9542 	 * make sure the signature resolving key is stored. So allow
9543 	 * static random and public addresses here.
9544 	 */
9545 	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9546 	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9547 		ev.store_hint = 0x00;
9548 	else
9549 		ev.store_hint = persistent;
9550 
9551 	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9552 	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9553 	ev.key.type = csrk->type;
9554 	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9555 
9556 	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9557 }
9558 
9559 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9560 			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9561 			 u16 max_interval, u16 latency, u16 timeout)
9562 {
9563 	struct mgmt_ev_new_conn_param ev;
9564 
9565 	if (!hci_is_identity_address(bdaddr, bdaddr_type))
9566 		return;
9567 
9568 	memset(&ev, 0, sizeof(ev));
9569 	bacpy(&ev.addr.bdaddr, bdaddr);
9570 	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9571 	ev.store_hint = store_hint;
9572 	ev.min_interval = cpu_to_le16(min_interval);
9573 	ev.max_interval = cpu_to_le16(max_interval);
9574 	ev.latency = cpu_to_le16(latency);
9575 	ev.timeout = cpu_to_le16(timeout);
9576 
9577 	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9578 }
9579 
9580 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9581 			   u8 *name, u8 name_len)
9582 {
9583 	struct sk_buff *skb;
9584 	struct mgmt_ev_device_connected *ev;
9585 	u16 eir_len = 0;
9586 	u32 flags = 0;
9587 
9588 	if (test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
9589 		return;
9590 
9591 	/* allocate buff for LE or BR/EDR adv */
9592 	if (conn->le_adv_data_len > 0)
9593 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9594 				     sizeof(*ev) + conn->le_adv_data_len);
9595 	else
9596 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9597 				     sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9598 				     eir_precalc_len(sizeof(conn->dev_class)));
9599 
9600 	ev = skb_put(skb, sizeof(*ev));
9601 	bacpy(&ev->addr.bdaddr, &conn->dst);
9602 	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9603 
9604 	if (conn->out)
9605 		flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9606 
9607 	ev->flags = __cpu_to_le32(flags);
9608 
9609 	/* We must ensure that the EIR Data fields are ordered and
9610 	 * unique. Keep it simple for now and avoid the problem by not
9611 	 * adding any BR/EDR data to the LE adv.
9612 	 */
9613 	if (conn->le_adv_data_len > 0) {
9614 		skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9615 		eir_len = conn->le_adv_data_len;
9616 	} else {
9617 		if (name)
9618 			eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9619 
9620 		if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9621 			eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9622 						    conn->dev_class, sizeof(conn->dev_class));
9623 	}
9624 
9625 	ev->eir_len = cpu_to_le16(eir_len);
9626 
9627 	mgmt_event_skb(skb, NULL);
9628 }
9629 
9630 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
9631 {
9632 	struct sock **sk = data;
9633 
9634 	cmd->cmd_complete(cmd, 0);
9635 
9636 	*sk = cmd->sk;
9637 	sock_hold(*sk);
9638 
9639 	mgmt_pending_remove(cmd);
9640 }
9641 
9642 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9643 {
9644 	struct hci_dev *hdev = data;
9645 	struct mgmt_cp_unpair_device *cp = cmd->param;
9646 
9647 	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9648 
9649 	cmd->cmd_complete(cmd, 0);
9650 	mgmt_pending_remove(cmd);
9651 }
9652 
9653 bool mgmt_powering_down(struct hci_dev *hdev)
9654 {
9655 	struct mgmt_pending_cmd *cmd;
9656 	struct mgmt_mode *cp;
9657 
9658 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9659 	if (!cmd)
9660 		return false;
9661 
9662 	cp = cmd->param;
9663 	if (!cp->val)
9664 		return true;
9665 
9666 	return false;
9667 }
9668 
9669 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9670 			      u8 link_type, u8 addr_type, u8 reason,
9671 			      bool mgmt_connected)
9672 {
9673 	struct mgmt_ev_device_disconnected ev;
9674 	struct sock *sk = NULL;
9675 
9676 	if (!mgmt_connected)
9677 		return;
9678 
9679 	if (link_type != ACL_LINK && link_type != LE_LINK)
9680 		return;
9681 
9682 	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
9683 
9684 	bacpy(&ev.addr.bdaddr, bdaddr);
9685 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9686 	ev.reason = reason;
9687 
9688 	/* Report disconnects due to suspend */
9689 	if (hdev->suspended)
9690 		ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9691 
9692 	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9693 
9694 	if (sk)
9695 		sock_put(sk);
9696 
9697 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9698 			     hdev);
9699 }
9700 
9701 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9702 			    u8 link_type, u8 addr_type, u8 status)
9703 {
9704 	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9705 	struct mgmt_cp_disconnect *cp;
9706 	struct mgmt_pending_cmd *cmd;
9707 
9708 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9709 			     hdev);
9710 
9711 	cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9712 	if (!cmd)
9713 		return;
9714 
9715 	cp = cmd->param;
9716 
9717 	if (bacmp(bdaddr, &cp->addr.bdaddr))
9718 		return;
9719 
9720 	if (cp->addr.type != bdaddr_type)
9721 		return;
9722 
9723 	cmd->cmd_complete(cmd, mgmt_status(status));
9724 	mgmt_pending_remove(cmd);
9725 }
9726 
9727 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9728 			 u8 addr_type, u8 status)
9729 {
9730 	struct mgmt_ev_connect_failed ev;
9731 
9732 	bacpy(&ev.addr.bdaddr, bdaddr);
9733 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9734 	ev.status = mgmt_status(status);
9735 
9736 	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9737 }
9738 
9739 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9740 {
9741 	struct mgmt_ev_pin_code_request ev;
9742 
9743 	bacpy(&ev.addr.bdaddr, bdaddr);
9744 	ev.addr.type = BDADDR_BREDR;
9745 	ev.secure = secure;
9746 
9747 	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9748 }
9749 
9750 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9751 				  u8 status)
9752 {
9753 	struct mgmt_pending_cmd *cmd;
9754 
9755 	cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9756 	if (!cmd)
9757 		return;
9758 
9759 	cmd->cmd_complete(cmd, mgmt_status(status));
9760 	mgmt_pending_remove(cmd);
9761 }
9762 
9763 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9764 				      u8 status)
9765 {
9766 	struct mgmt_pending_cmd *cmd;
9767 
9768 	cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9769 	if (!cmd)
9770 		return;
9771 
9772 	cmd->cmd_complete(cmd, mgmt_status(status));
9773 	mgmt_pending_remove(cmd);
9774 }
9775 
9776 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9777 			      u8 link_type, u8 addr_type, u32 value,
9778 			      u8 confirm_hint)
9779 {
9780 	struct mgmt_ev_user_confirm_request ev;
9781 
9782 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9783 
9784 	bacpy(&ev.addr.bdaddr, bdaddr);
9785 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9786 	ev.confirm_hint = confirm_hint;
9787 	ev.value = cpu_to_le32(value);
9788 
9789 	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9790 			  NULL);
9791 }
9792 
9793 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9794 			      u8 link_type, u8 addr_type)
9795 {
9796 	struct mgmt_ev_user_passkey_request ev;
9797 
9798 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9799 
9800 	bacpy(&ev.addr.bdaddr, bdaddr);
9801 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9802 
9803 	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9804 			  NULL);
9805 }
9806 
9807 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9808 				      u8 link_type, u8 addr_type, u8 status,
9809 				      u8 opcode)
9810 {
9811 	struct mgmt_pending_cmd *cmd;
9812 
9813 	cmd = pending_find(opcode, hdev);
9814 	if (!cmd)
9815 		return -ENOENT;
9816 
9817 	cmd->cmd_complete(cmd, mgmt_status(status));
9818 	mgmt_pending_remove(cmd);
9819 
9820 	return 0;
9821 }
9822 
9823 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9824 				     u8 link_type, u8 addr_type, u8 status)
9825 {
9826 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9827 					  status, MGMT_OP_USER_CONFIRM_REPLY);
9828 }
9829 
9830 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9831 					 u8 link_type, u8 addr_type, u8 status)
9832 {
9833 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9834 					  status,
9835 					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
9836 }
9837 
9838 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9839 				     u8 link_type, u8 addr_type, u8 status)
9840 {
9841 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9842 					  status, MGMT_OP_USER_PASSKEY_REPLY);
9843 }
9844 
9845 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9846 					 u8 link_type, u8 addr_type, u8 status)
9847 {
9848 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9849 					  status,
9850 					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
9851 }
9852 
9853 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9854 			     u8 link_type, u8 addr_type, u32 passkey,
9855 			     u8 entered)
9856 {
9857 	struct mgmt_ev_passkey_notify ev;
9858 
9859 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9860 
9861 	bacpy(&ev.addr.bdaddr, bdaddr);
9862 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9863 	ev.passkey = __cpu_to_le32(passkey);
9864 	ev.entered = entered;
9865 
9866 	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9867 }
9868 
9869 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9870 {
9871 	struct mgmt_ev_auth_failed ev;
9872 	struct mgmt_pending_cmd *cmd;
9873 	u8 status = mgmt_status(hci_status);
9874 
9875 	bacpy(&ev.addr.bdaddr, &conn->dst);
9876 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9877 	ev.status = status;
9878 
9879 	cmd = find_pairing(conn);
9880 
9881 	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9882 		    cmd ? cmd->sk : NULL);
9883 
9884 	if (cmd) {
9885 		cmd->cmd_complete(cmd, status);
9886 		mgmt_pending_remove(cmd);
9887 	}
9888 }
9889 
9890 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9891 {
9892 	struct cmd_lookup match = { NULL, hdev };
9893 	bool changed;
9894 
9895 	if (status) {
9896 		u8 mgmt_err = mgmt_status(status);
9897 		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9898 				     cmd_status_rsp, &mgmt_err);
9899 		return;
9900 	}
9901 
9902 	if (test_bit(HCI_AUTH, &hdev->flags))
9903 		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9904 	else
9905 		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9906 
9907 	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9908 			     &match);
9909 
9910 	if (changed)
9911 		new_settings(hdev, match.sk);
9912 
9913 	if (match.sk)
9914 		sock_put(match.sk);
9915 }
9916 
9917 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9918 {
9919 	struct cmd_lookup *match = data;
9920 
9921 	if (match->sk == NULL) {
9922 		match->sk = cmd->sk;
9923 		sock_hold(match->sk);
9924 	}
9925 }
9926 
9927 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9928 				    u8 status)
9929 {
9930 	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9931 
9932 	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
9933 	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
9934 	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
9935 
9936 	if (!status) {
9937 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9938 				   3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9939 		ext_info_changed(hdev, NULL);
9940 	}
9941 
9942 	if (match.sk)
9943 		sock_put(match.sk);
9944 }
9945 
9946 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
9947 {
9948 	struct mgmt_cp_set_local_name ev;
9949 	struct mgmt_pending_cmd *cmd;
9950 
9951 	if (status)
9952 		return;
9953 
9954 	memset(&ev, 0, sizeof(ev));
9955 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
9956 	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
9957 
9958 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
9959 	if (!cmd) {
9960 		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
9961 
9962 		/* If this is a HCI command related to powering on the
9963 		 * HCI dev don't send any mgmt signals.
9964 		 */
9965 		if (pending_find(MGMT_OP_SET_POWERED, hdev))
9966 			return;
9967 	}
9968 
9969 	mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
9970 			   HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
9971 	ext_info_changed(hdev, cmd ? cmd->sk : NULL);
9972 }
9973 
9974 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
9975 {
9976 	int i;
9977 
9978 	for (i = 0; i < uuid_count; i++) {
9979 		if (!memcmp(uuid, uuids[i], 16))
9980 			return true;
9981 	}
9982 
9983 	return false;
9984 }
9985 
9986 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
9987 {
9988 	u16 parsed = 0;
9989 
9990 	while (parsed < eir_len) {
9991 		u8 field_len = eir[0];
9992 		u8 uuid[16];
9993 		int i;
9994 
9995 		if (field_len == 0)
9996 			break;
9997 
9998 		if (eir_len - parsed < field_len + 1)
9999 			break;
10000 
10001 		switch (eir[1]) {
10002 		case EIR_UUID16_ALL:
10003 		case EIR_UUID16_SOME:
10004 			for (i = 0; i + 3 <= field_len; i += 2) {
10005 				memcpy(uuid, bluetooth_base_uuid, 16);
10006 				uuid[13] = eir[i + 3];
10007 				uuid[12] = eir[i + 2];
10008 				if (has_uuid(uuid, uuid_count, uuids))
10009 					return true;
10010 			}
10011 			break;
10012 		case EIR_UUID32_ALL:
10013 		case EIR_UUID32_SOME:
10014 			for (i = 0; i + 5 <= field_len; i += 4) {
10015 				memcpy(uuid, bluetooth_base_uuid, 16);
10016 				uuid[15] = eir[i + 5];
10017 				uuid[14] = eir[i + 4];
10018 				uuid[13] = eir[i + 3];
10019 				uuid[12] = eir[i + 2];
10020 				if (has_uuid(uuid, uuid_count, uuids))
10021 					return true;
10022 			}
10023 			break;
10024 		case EIR_UUID128_ALL:
10025 		case EIR_UUID128_SOME:
10026 			for (i = 0; i + 17 <= field_len; i += 16) {
10027 				memcpy(uuid, eir + i + 2, 16);
10028 				if (has_uuid(uuid, uuid_count, uuids))
10029 					return true;
10030 			}
10031 			break;
10032 		}
10033 
10034 		parsed += field_len + 1;
10035 		eir += field_len + 1;
10036 	}
10037 
10038 	return false;
10039 }
10040 
10041 static void restart_le_scan(struct hci_dev *hdev)
10042 {
10043 	/* If controller is not scanning we are done. */
10044 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
10045 		return;
10046 
10047 	if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
10048 		       hdev->discovery.scan_start +
10049 		       hdev->discovery.scan_duration))
10050 		return;
10051 
10052 	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
10053 			   DISCOV_LE_RESTART_DELAY);
10054 }
10055 
10056 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10057 			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10058 {
10059 	/* If a RSSI threshold has been specified, and
10060 	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10061 	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10062 	 * is set, let it through for further processing, as we might need to
10063 	 * restart the scan.
10064 	 *
10065 	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10066 	 * the results are also dropped.
10067 	 */
10068 	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10069 	    (rssi == HCI_RSSI_INVALID ||
10070 	    (rssi < hdev->discovery.rssi &&
10071 	     !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
10072 		return  false;
10073 
10074 	if (hdev->discovery.uuid_count != 0) {
10075 		/* If a list of UUIDs is provided in filter, results with no
10076 		 * matching UUID should be dropped.
10077 		 */
10078 		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10079 				   hdev->discovery.uuids) &&
10080 		    !eir_has_uuids(scan_rsp, scan_rsp_len,
10081 				   hdev->discovery.uuid_count,
10082 				   hdev->discovery.uuids))
10083 			return false;
10084 	}
10085 
10086 	/* If duplicate filtering does not report RSSI changes, then restart
10087 	 * scanning to ensure updated result with updated RSSI values.
10088 	 */
10089 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
10090 		restart_le_scan(hdev);
10091 
10092 		/* Validate RSSI value against the RSSI threshold once more. */
10093 		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10094 		    rssi < hdev->discovery.rssi)
10095 			return false;
10096 	}
10097 
10098 	return true;
10099 }
10100 
10101 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10102 				  bdaddr_t *bdaddr, u8 addr_type)
10103 {
10104 	struct mgmt_ev_adv_monitor_device_lost ev;
10105 
10106 	ev.monitor_handle = cpu_to_le16(handle);
10107 	bacpy(&ev.addr.bdaddr, bdaddr);
10108 	ev.addr.type = addr_type;
10109 
10110 	mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10111 		   NULL);
10112 }
10113 
10114 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10115 					       struct sk_buff *skb,
10116 					       struct sock *skip_sk,
10117 					       u16 handle)
10118 {
10119 	struct sk_buff *advmon_skb;
10120 	size_t advmon_skb_len;
10121 	__le16 *monitor_handle;
10122 
10123 	if (!skb)
10124 		return;
10125 
10126 	advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10127 			  sizeof(struct mgmt_ev_device_found)) + skb->len;
10128 	advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10129 				    advmon_skb_len);
10130 	if (!advmon_skb)
10131 		return;
10132 
10133 	/* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10134 	 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10135 	 * store monitor_handle of the matched monitor.
10136 	 */
10137 	monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10138 	*monitor_handle = cpu_to_le16(handle);
10139 	skb_put_data(advmon_skb, skb->data, skb->len);
10140 
10141 	mgmt_event_skb(advmon_skb, skip_sk);
10142 }
10143 
10144 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10145 					  bdaddr_t *bdaddr, bool report_device,
10146 					  struct sk_buff *skb,
10147 					  struct sock *skip_sk)
10148 {
10149 	struct monitored_device *dev, *tmp;
10150 	bool matched = false;
10151 	bool notified = false;
10152 
10153 	/* We have received the Advertisement Report because:
10154 	 * 1. the kernel has initiated active discovery
10155 	 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10156 	 *    passive scanning
10157 	 * 3. if none of the above is true, we have one or more active
10158 	 *    Advertisement Monitor
10159 	 *
10160 	 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10161 	 * and report ONLY one advertisement per device for the matched Monitor
10162 	 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10163 	 *
10164 	 * For case 3, since we are not active scanning and all advertisements
10165 	 * received are due to a matched Advertisement Monitor, report all
10166 	 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10167 	 */
10168 	if (report_device && !hdev->advmon_pend_notify) {
10169 		mgmt_event_skb(skb, skip_sk);
10170 		return;
10171 	}
10172 
10173 	hdev->advmon_pend_notify = false;
10174 
10175 	list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10176 		if (!bacmp(&dev->bdaddr, bdaddr)) {
10177 			matched = true;
10178 
10179 			if (!dev->notified) {
10180 				mgmt_send_adv_monitor_device_found(hdev, skb,
10181 								   skip_sk,
10182 								   dev->handle);
10183 				notified = true;
10184 				dev->notified = true;
10185 			}
10186 		}
10187 
10188 		if (!dev->notified)
10189 			hdev->advmon_pend_notify = true;
10190 	}
10191 
10192 	if (!report_device &&
10193 	    ((matched && !notified) || !msft_monitor_supported(hdev))) {
10194 		/* Handle 0 indicates that we are not active scanning and this
10195 		 * is a subsequent advertisement report for an already matched
10196 		 * Advertisement Monitor or the controller offloading support
10197 		 * is not available.
10198 		 */
10199 		mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10200 	}
10201 
10202 	if (report_device)
10203 		mgmt_event_skb(skb, skip_sk);
10204 	else
10205 		kfree_skb(skb);
10206 }
10207 
10208 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10209 			      u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10210 			      u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10211 			      u64 instant)
10212 {
10213 	struct sk_buff *skb;
10214 	struct mgmt_ev_mesh_device_found *ev;
10215 	int i, j;
10216 
10217 	if (!hdev->mesh_ad_types[0])
10218 		goto accepted;
10219 
10220 	/* Scan for requested AD types */
10221 	if (eir_len > 0) {
10222 		for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10223 			for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10224 				if (!hdev->mesh_ad_types[j])
10225 					break;
10226 
10227 				if (hdev->mesh_ad_types[j] == eir[i + 1])
10228 					goto accepted;
10229 			}
10230 		}
10231 	}
10232 
10233 	if (scan_rsp_len > 0) {
10234 		for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10235 			for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10236 				if (!hdev->mesh_ad_types[j])
10237 					break;
10238 
10239 				if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10240 					goto accepted;
10241 			}
10242 		}
10243 	}
10244 
10245 	return;
10246 
10247 accepted:
10248 	skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10249 			     sizeof(*ev) + eir_len + scan_rsp_len);
10250 	if (!skb)
10251 		return;
10252 
10253 	ev = skb_put(skb, sizeof(*ev));
10254 
10255 	bacpy(&ev->addr.bdaddr, bdaddr);
10256 	ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10257 	ev->rssi = rssi;
10258 	ev->flags = cpu_to_le32(flags);
10259 	ev->instant = cpu_to_le64(instant);
10260 
10261 	if (eir_len > 0)
10262 		/* Copy EIR or advertising data into event */
10263 		skb_put_data(skb, eir, eir_len);
10264 
10265 	if (scan_rsp_len > 0)
10266 		/* Append scan response data to event */
10267 		skb_put_data(skb, scan_rsp, scan_rsp_len);
10268 
10269 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10270 
10271 	mgmt_event_skb(skb, NULL);
10272 }
10273 
10274 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10275 		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10276 		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10277 		       u64 instant)
10278 {
10279 	struct sk_buff *skb;
10280 	struct mgmt_ev_device_found *ev;
10281 	bool report_device = hci_discovery_active(hdev);
10282 
10283 	if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10284 		mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10285 				  eir, eir_len, scan_rsp, scan_rsp_len,
10286 				  instant);
10287 
10288 	/* Don't send events for a non-kernel initiated discovery. With
10289 	 * LE one exception is if we have pend_le_reports > 0 in which
10290 	 * case we're doing passive scanning and want these events.
10291 	 */
10292 	if (!hci_discovery_active(hdev)) {
10293 		if (link_type == ACL_LINK)
10294 			return;
10295 		if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10296 			report_device = true;
10297 		else if (!hci_is_adv_monitoring(hdev))
10298 			return;
10299 	}
10300 
10301 	if (hdev->discovery.result_filtering) {
10302 		/* We are using service discovery */
10303 		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10304 				     scan_rsp_len))
10305 			return;
10306 	}
10307 
10308 	if (hdev->discovery.limited) {
10309 		/* Check for limited discoverable bit */
10310 		if (dev_class) {
10311 			if (!(dev_class[1] & 0x20))
10312 				return;
10313 		} else {
10314 			u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10315 			if (!flags || !(flags[0] & LE_AD_LIMITED))
10316 				return;
10317 		}
10318 	}
10319 
10320 	/* Allocate skb. The 5 extra bytes are for the potential CoD field */
10321 	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10322 			     sizeof(*ev) + eir_len + scan_rsp_len + 5);
10323 	if (!skb)
10324 		return;
10325 
10326 	ev = skb_put(skb, sizeof(*ev));
10327 
10328 	/* In case of device discovery with BR/EDR devices (pre 1.2), the
10329 	 * RSSI value was reported as 0 when not available. This behavior
10330 	 * is kept when using device discovery. This is required for full
10331 	 * backwards compatibility with the API.
10332 	 *
10333 	 * However when using service discovery, the value 127 will be
10334 	 * returned when the RSSI is not available.
10335 	 */
10336 	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10337 	    link_type == ACL_LINK)
10338 		rssi = 0;
10339 
10340 	bacpy(&ev->addr.bdaddr, bdaddr);
10341 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
10342 	ev->rssi = rssi;
10343 	ev->flags = cpu_to_le32(flags);
10344 
10345 	if (eir_len > 0)
10346 		/* Copy EIR or advertising data into event */
10347 		skb_put_data(skb, eir, eir_len);
10348 
10349 	if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10350 		u8 eir_cod[5];
10351 
10352 		eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10353 					   dev_class, 3);
10354 		skb_put_data(skb, eir_cod, sizeof(eir_cod));
10355 	}
10356 
10357 	if (scan_rsp_len > 0)
10358 		/* Append scan response data to event */
10359 		skb_put_data(skb, scan_rsp, scan_rsp_len);
10360 
10361 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10362 
10363 	mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10364 }
10365 
10366 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10367 		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10368 {
10369 	struct sk_buff *skb;
10370 	struct mgmt_ev_device_found *ev;
10371 	u16 eir_len = 0;
10372 	u32 flags = 0;
10373 
10374 	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10375 			     sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10376 
10377 	ev = skb_put(skb, sizeof(*ev));
10378 	bacpy(&ev->addr.bdaddr, bdaddr);
10379 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
10380 	ev->rssi = rssi;
10381 
10382 	if (name)
10383 		eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10384 	else
10385 		flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10386 
10387 	ev->eir_len = cpu_to_le16(eir_len);
10388 	ev->flags = cpu_to_le32(flags);
10389 
10390 	mgmt_event_skb(skb, NULL);
10391 }
10392 
10393 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10394 {
10395 	struct mgmt_ev_discovering ev;
10396 
10397 	bt_dev_dbg(hdev, "discovering %u", discovering);
10398 
10399 	memset(&ev, 0, sizeof(ev));
10400 	ev.type = hdev->discovery.type;
10401 	ev.discovering = discovering;
10402 
10403 	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10404 }
10405 
10406 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10407 {
10408 	struct mgmt_ev_controller_suspend ev;
10409 
10410 	ev.suspend_state = state;
10411 	mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10412 }
10413 
10414 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10415 		   u8 addr_type)
10416 {
10417 	struct mgmt_ev_controller_resume ev;
10418 
10419 	ev.wake_reason = reason;
10420 	if (bdaddr) {
10421 		bacpy(&ev.addr.bdaddr, bdaddr);
10422 		ev.addr.type = addr_type;
10423 	} else {
10424 		memset(&ev.addr, 0, sizeof(ev.addr));
10425 	}
10426 
10427 	mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10428 }
10429 
10430 static struct hci_mgmt_chan chan = {
10431 	.channel	= HCI_CHANNEL_CONTROL,
10432 	.handler_count	= ARRAY_SIZE(mgmt_handlers),
10433 	.handlers	= mgmt_handlers,
10434 	.hdev_init	= mgmt_init_hdev,
10435 };
10436 
10437 int mgmt_init(void)
10438 {
10439 	return hci_mgmt_chan_register(&chan);
10440 }
10441 
10442 void mgmt_exit(void)
10443 {
10444 	hci_mgmt_chan_unregister(&chan);
10445 }
10446 
10447 void mgmt_cleanup(struct sock *sk)
10448 {
10449 	struct mgmt_mesh_tx *mesh_tx;
10450 	struct hci_dev *hdev;
10451 
10452 	read_lock(&hci_dev_list_lock);
10453 
10454 	list_for_each_entry(hdev, &hci_dev_list, list) {
10455 		do {
10456 			mesh_tx = mgmt_mesh_next(hdev, sk);
10457 
10458 			if (mesh_tx)
10459 				mesh_send_complete(hdev, mesh_tx, true);
10460 		} while (mesh_tx);
10461 	}
10462 
10463 	read_unlock(&hci_dev_list_lock);
10464 }
10465