xref: /openbmc/linux/net/bluetooth/mgmt.c (revision 95f7a972194ad20696c36523b54c19a3567e0697)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI Management interface */
26 
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35 
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
40 #include "msft.h"
41 #include "eir.h"
42 #include "aosp.h"
43 
44 #define MGMT_VERSION	1
45 #define MGMT_REVISION	22
46 
47 static const u16 mgmt_commands[] = {
48 	MGMT_OP_READ_INDEX_LIST,
49 	MGMT_OP_READ_INFO,
50 	MGMT_OP_SET_POWERED,
51 	MGMT_OP_SET_DISCOVERABLE,
52 	MGMT_OP_SET_CONNECTABLE,
53 	MGMT_OP_SET_FAST_CONNECTABLE,
54 	MGMT_OP_SET_BONDABLE,
55 	MGMT_OP_SET_LINK_SECURITY,
56 	MGMT_OP_SET_SSP,
57 	MGMT_OP_SET_HS,
58 	MGMT_OP_SET_LE,
59 	MGMT_OP_SET_DEV_CLASS,
60 	MGMT_OP_SET_LOCAL_NAME,
61 	MGMT_OP_ADD_UUID,
62 	MGMT_OP_REMOVE_UUID,
63 	MGMT_OP_LOAD_LINK_KEYS,
64 	MGMT_OP_LOAD_LONG_TERM_KEYS,
65 	MGMT_OP_DISCONNECT,
66 	MGMT_OP_GET_CONNECTIONS,
67 	MGMT_OP_PIN_CODE_REPLY,
68 	MGMT_OP_PIN_CODE_NEG_REPLY,
69 	MGMT_OP_SET_IO_CAPABILITY,
70 	MGMT_OP_PAIR_DEVICE,
71 	MGMT_OP_CANCEL_PAIR_DEVICE,
72 	MGMT_OP_UNPAIR_DEVICE,
73 	MGMT_OP_USER_CONFIRM_REPLY,
74 	MGMT_OP_USER_CONFIRM_NEG_REPLY,
75 	MGMT_OP_USER_PASSKEY_REPLY,
76 	MGMT_OP_USER_PASSKEY_NEG_REPLY,
77 	MGMT_OP_READ_LOCAL_OOB_DATA,
78 	MGMT_OP_ADD_REMOTE_OOB_DATA,
79 	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
80 	MGMT_OP_START_DISCOVERY,
81 	MGMT_OP_STOP_DISCOVERY,
82 	MGMT_OP_CONFIRM_NAME,
83 	MGMT_OP_BLOCK_DEVICE,
84 	MGMT_OP_UNBLOCK_DEVICE,
85 	MGMT_OP_SET_DEVICE_ID,
86 	MGMT_OP_SET_ADVERTISING,
87 	MGMT_OP_SET_BREDR,
88 	MGMT_OP_SET_STATIC_ADDRESS,
89 	MGMT_OP_SET_SCAN_PARAMS,
90 	MGMT_OP_SET_SECURE_CONN,
91 	MGMT_OP_SET_DEBUG_KEYS,
92 	MGMT_OP_SET_PRIVACY,
93 	MGMT_OP_LOAD_IRKS,
94 	MGMT_OP_GET_CONN_INFO,
95 	MGMT_OP_GET_CLOCK_INFO,
96 	MGMT_OP_ADD_DEVICE,
97 	MGMT_OP_REMOVE_DEVICE,
98 	MGMT_OP_LOAD_CONN_PARAM,
99 	MGMT_OP_READ_UNCONF_INDEX_LIST,
100 	MGMT_OP_READ_CONFIG_INFO,
101 	MGMT_OP_SET_EXTERNAL_CONFIG,
102 	MGMT_OP_SET_PUBLIC_ADDRESS,
103 	MGMT_OP_START_SERVICE_DISCOVERY,
104 	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
105 	MGMT_OP_READ_EXT_INDEX_LIST,
106 	MGMT_OP_READ_ADV_FEATURES,
107 	MGMT_OP_ADD_ADVERTISING,
108 	MGMT_OP_REMOVE_ADVERTISING,
109 	MGMT_OP_GET_ADV_SIZE_INFO,
110 	MGMT_OP_START_LIMITED_DISCOVERY,
111 	MGMT_OP_READ_EXT_INFO,
112 	MGMT_OP_SET_APPEARANCE,
113 	MGMT_OP_GET_PHY_CONFIGURATION,
114 	MGMT_OP_SET_PHY_CONFIGURATION,
115 	MGMT_OP_SET_BLOCKED_KEYS,
116 	MGMT_OP_SET_WIDEBAND_SPEECH,
117 	MGMT_OP_READ_CONTROLLER_CAP,
118 	MGMT_OP_READ_EXP_FEATURES_INFO,
119 	MGMT_OP_SET_EXP_FEATURE,
120 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
121 	MGMT_OP_SET_DEF_SYSTEM_CONFIG,
122 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
123 	MGMT_OP_SET_DEF_RUNTIME_CONFIG,
124 	MGMT_OP_GET_DEVICE_FLAGS,
125 	MGMT_OP_SET_DEVICE_FLAGS,
126 	MGMT_OP_READ_ADV_MONITOR_FEATURES,
127 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
128 	MGMT_OP_REMOVE_ADV_MONITOR,
129 	MGMT_OP_ADD_EXT_ADV_PARAMS,
130 	MGMT_OP_ADD_EXT_ADV_DATA,
131 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
132 	MGMT_OP_SET_MESH_RECEIVER,
133 	MGMT_OP_MESH_READ_FEATURES,
134 	MGMT_OP_MESH_SEND,
135 	MGMT_OP_MESH_SEND_CANCEL,
136 };
137 
138 static const u16 mgmt_events[] = {
139 	MGMT_EV_CONTROLLER_ERROR,
140 	MGMT_EV_INDEX_ADDED,
141 	MGMT_EV_INDEX_REMOVED,
142 	MGMT_EV_NEW_SETTINGS,
143 	MGMT_EV_CLASS_OF_DEV_CHANGED,
144 	MGMT_EV_LOCAL_NAME_CHANGED,
145 	MGMT_EV_NEW_LINK_KEY,
146 	MGMT_EV_NEW_LONG_TERM_KEY,
147 	MGMT_EV_DEVICE_CONNECTED,
148 	MGMT_EV_DEVICE_DISCONNECTED,
149 	MGMT_EV_CONNECT_FAILED,
150 	MGMT_EV_PIN_CODE_REQUEST,
151 	MGMT_EV_USER_CONFIRM_REQUEST,
152 	MGMT_EV_USER_PASSKEY_REQUEST,
153 	MGMT_EV_AUTH_FAILED,
154 	MGMT_EV_DEVICE_FOUND,
155 	MGMT_EV_DISCOVERING,
156 	MGMT_EV_DEVICE_BLOCKED,
157 	MGMT_EV_DEVICE_UNBLOCKED,
158 	MGMT_EV_DEVICE_UNPAIRED,
159 	MGMT_EV_PASSKEY_NOTIFY,
160 	MGMT_EV_NEW_IRK,
161 	MGMT_EV_NEW_CSRK,
162 	MGMT_EV_DEVICE_ADDED,
163 	MGMT_EV_DEVICE_REMOVED,
164 	MGMT_EV_NEW_CONN_PARAM,
165 	MGMT_EV_UNCONF_INDEX_ADDED,
166 	MGMT_EV_UNCONF_INDEX_REMOVED,
167 	MGMT_EV_NEW_CONFIG_OPTIONS,
168 	MGMT_EV_EXT_INDEX_ADDED,
169 	MGMT_EV_EXT_INDEX_REMOVED,
170 	MGMT_EV_LOCAL_OOB_DATA_UPDATED,
171 	MGMT_EV_ADVERTISING_ADDED,
172 	MGMT_EV_ADVERTISING_REMOVED,
173 	MGMT_EV_EXT_INFO_CHANGED,
174 	MGMT_EV_PHY_CONFIGURATION_CHANGED,
175 	MGMT_EV_EXP_FEATURE_CHANGED,
176 	MGMT_EV_DEVICE_FLAGS_CHANGED,
177 	MGMT_EV_ADV_MONITOR_ADDED,
178 	MGMT_EV_ADV_MONITOR_REMOVED,
179 	MGMT_EV_CONTROLLER_SUSPEND,
180 	MGMT_EV_CONTROLLER_RESUME,
181 	MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
182 	MGMT_EV_ADV_MONITOR_DEVICE_LOST,
183 };
184 
185 static const u16 mgmt_untrusted_commands[] = {
186 	MGMT_OP_READ_INDEX_LIST,
187 	MGMT_OP_READ_INFO,
188 	MGMT_OP_READ_UNCONF_INDEX_LIST,
189 	MGMT_OP_READ_CONFIG_INFO,
190 	MGMT_OP_READ_EXT_INDEX_LIST,
191 	MGMT_OP_READ_EXT_INFO,
192 	MGMT_OP_READ_CONTROLLER_CAP,
193 	MGMT_OP_READ_EXP_FEATURES_INFO,
194 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
195 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
196 };
197 
198 static const u16 mgmt_untrusted_events[] = {
199 	MGMT_EV_INDEX_ADDED,
200 	MGMT_EV_INDEX_REMOVED,
201 	MGMT_EV_NEW_SETTINGS,
202 	MGMT_EV_CLASS_OF_DEV_CHANGED,
203 	MGMT_EV_LOCAL_NAME_CHANGED,
204 	MGMT_EV_UNCONF_INDEX_ADDED,
205 	MGMT_EV_UNCONF_INDEX_REMOVED,
206 	MGMT_EV_NEW_CONFIG_OPTIONS,
207 	MGMT_EV_EXT_INDEX_ADDED,
208 	MGMT_EV_EXT_INDEX_REMOVED,
209 	MGMT_EV_EXT_INFO_CHANGED,
210 	MGMT_EV_EXP_FEATURE_CHANGED,
211 };
212 
213 #define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
214 
215 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
216 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
217 
218 /* HCI to MGMT error code conversion table */
219 static const u8 mgmt_status_table[] = {
220 	MGMT_STATUS_SUCCESS,
221 	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
222 	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
223 	MGMT_STATUS_FAILED,		/* Hardware Failure */
224 	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
225 	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
226 	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
227 	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
228 	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
229 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
230 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
231 	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
232 	MGMT_STATUS_BUSY,		/* Command Disallowed */
233 	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
234 	MGMT_STATUS_REJECTED,		/* Rejected Security */
235 	MGMT_STATUS_REJECTED,		/* Rejected Personal */
236 	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
237 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
238 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
239 	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
240 	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
241 	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
242 	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
243 	MGMT_STATUS_BUSY,		/* Repeated Attempts */
244 	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
245 	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
246 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
247 	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
248 	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
249 	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
250 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
251 	MGMT_STATUS_FAILED,		/* Unspecified Error */
252 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
253 	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
254 	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
255 	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
256 	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
257 	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
258 	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
259 	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
260 	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
261 	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
262 	MGMT_STATUS_FAILED,		/* Transaction Collision */
263 	MGMT_STATUS_FAILED,		/* Reserved for future use */
264 	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
265 	MGMT_STATUS_REJECTED,		/* QoS Rejected */
266 	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
267 	MGMT_STATUS_REJECTED,		/* Insufficient Security */
268 	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
269 	MGMT_STATUS_FAILED,		/* Reserved for future use */
270 	MGMT_STATUS_BUSY,		/* Role Switch Pending */
271 	MGMT_STATUS_FAILED,		/* Reserved for future use */
272 	MGMT_STATUS_FAILED,		/* Slot Violation */
273 	MGMT_STATUS_FAILED,		/* Role Switch Failed */
274 	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
275 	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
276 	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
277 	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
278 	MGMT_STATUS_BUSY,		/* Controller Busy */
279 	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
280 	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
281 	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
282 	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
283 	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
284 };
285 
286 static u8 mgmt_errno_status(int err)
287 {
288 	switch (err) {
289 	case 0:
290 		return MGMT_STATUS_SUCCESS;
291 	case -EPERM:
292 		return MGMT_STATUS_REJECTED;
293 	case -EINVAL:
294 		return MGMT_STATUS_INVALID_PARAMS;
295 	case -EOPNOTSUPP:
296 		return MGMT_STATUS_NOT_SUPPORTED;
297 	case -EBUSY:
298 		return MGMT_STATUS_BUSY;
299 	case -ETIMEDOUT:
300 		return MGMT_STATUS_AUTH_FAILED;
301 	case -ENOMEM:
302 		return MGMT_STATUS_NO_RESOURCES;
303 	case -EISCONN:
304 		return MGMT_STATUS_ALREADY_CONNECTED;
305 	case -ENOTCONN:
306 		return MGMT_STATUS_DISCONNECTED;
307 	}
308 
309 	return MGMT_STATUS_FAILED;
310 }
311 
312 static u8 mgmt_status(int err)
313 {
314 	if (err < 0)
315 		return mgmt_errno_status(err);
316 
317 	if (err < ARRAY_SIZE(mgmt_status_table))
318 		return mgmt_status_table[err];
319 
320 	return MGMT_STATUS_FAILED;
321 }
322 
323 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
324 			    u16 len, int flag)
325 {
326 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
327 			       flag, NULL);
328 }
329 
330 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
331 			      u16 len, int flag, struct sock *skip_sk)
332 {
333 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
334 			       flag, skip_sk);
335 }
336 
337 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
338 		      struct sock *skip_sk)
339 {
340 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341 			       HCI_SOCK_TRUSTED, skip_sk);
342 }
343 
344 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
345 {
346 	return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
347 				   skip_sk);
348 }
349 
350 static u8 le_addr_type(u8 mgmt_addr_type)
351 {
352 	if (mgmt_addr_type == BDADDR_LE_PUBLIC)
353 		return ADDR_LE_DEV_PUBLIC;
354 	else
355 		return ADDR_LE_DEV_RANDOM;
356 }
357 
358 void mgmt_fill_version_info(void *ver)
359 {
360 	struct mgmt_rp_read_version *rp = ver;
361 
362 	rp->version = MGMT_VERSION;
363 	rp->revision = cpu_to_le16(MGMT_REVISION);
364 }
365 
366 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
367 			u16 data_len)
368 {
369 	struct mgmt_rp_read_version rp;
370 
371 	bt_dev_dbg(hdev, "sock %p", sk);
372 
373 	mgmt_fill_version_info(&rp);
374 
375 	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
376 				 &rp, sizeof(rp));
377 }
378 
379 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
380 			 u16 data_len)
381 {
382 	struct mgmt_rp_read_commands *rp;
383 	u16 num_commands, num_events;
384 	size_t rp_size;
385 	int i, err;
386 
387 	bt_dev_dbg(hdev, "sock %p", sk);
388 
389 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
390 		num_commands = ARRAY_SIZE(mgmt_commands);
391 		num_events = ARRAY_SIZE(mgmt_events);
392 	} else {
393 		num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
394 		num_events = ARRAY_SIZE(mgmt_untrusted_events);
395 	}
396 
397 	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
398 
399 	rp = kmalloc(rp_size, GFP_KERNEL);
400 	if (!rp)
401 		return -ENOMEM;
402 
403 	rp->num_commands = cpu_to_le16(num_commands);
404 	rp->num_events = cpu_to_le16(num_events);
405 
406 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
407 		__le16 *opcode = rp->opcodes;
408 
409 		for (i = 0; i < num_commands; i++, opcode++)
410 			put_unaligned_le16(mgmt_commands[i], opcode);
411 
412 		for (i = 0; i < num_events; i++, opcode++)
413 			put_unaligned_le16(mgmt_events[i], opcode);
414 	} else {
415 		__le16 *opcode = rp->opcodes;
416 
417 		for (i = 0; i < num_commands; i++, opcode++)
418 			put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
419 
420 		for (i = 0; i < num_events; i++, opcode++)
421 			put_unaligned_le16(mgmt_untrusted_events[i], opcode);
422 	}
423 
424 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
425 				rp, rp_size);
426 	kfree(rp);
427 
428 	return err;
429 }
430 
431 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
432 			   u16 data_len)
433 {
434 	struct mgmt_rp_read_index_list *rp;
435 	struct hci_dev *d;
436 	size_t rp_len;
437 	u16 count;
438 	int err;
439 
440 	bt_dev_dbg(hdev, "sock %p", sk);
441 
442 	read_lock(&hci_dev_list_lock);
443 
444 	count = 0;
445 	list_for_each_entry(d, &hci_dev_list, list) {
446 		if (!hci_dev_test_flag(d, HCI_UNCONFIGURED))
447 			count++;
448 	}
449 
450 	rp_len = sizeof(*rp) + (2 * count);
451 	rp = kmalloc(rp_len, GFP_ATOMIC);
452 	if (!rp) {
453 		read_unlock(&hci_dev_list_lock);
454 		return -ENOMEM;
455 	}
456 
457 	count = 0;
458 	list_for_each_entry(d, &hci_dev_list, list) {
459 		if (hci_dev_test_flag(d, HCI_SETUP) ||
460 		    hci_dev_test_flag(d, HCI_CONFIG) ||
461 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
462 			continue;
463 
464 		/* Devices marked as raw-only are neither configured
465 		 * nor unconfigured controllers.
466 		 */
467 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
468 			continue;
469 
470 		if (!hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
471 			rp->index[count++] = cpu_to_le16(d->id);
472 			bt_dev_dbg(hdev, "Added hci%u", d->id);
473 		}
474 	}
475 
476 	rp->num_controllers = cpu_to_le16(count);
477 	rp_len = sizeof(*rp) + (2 * count);
478 
479 	read_unlock(&hci_dev_list_lock);
480 
481 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
482 				0, rp, rp_len);
483 
484 	kfree(rp);
485 
486 	return err;
487 }
488 
489 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
490 				  void *data, u16 data_len)
491 {
492 	struct mgmt_rp_read_unconf_index_list *rp;
493 	struct hci_dev *d;
494 	size_t rp_len;
495 	u16 count;
496 	int err;
497 
498 	bt_dev_dbg(hdev, "sock %p", sk);
499 
500 	read_lock(&hci_dev_list_lock);
501 
502 	count = 0;
503 	list_for_each_entry(d, &hci_dev_list, list) {
504 		if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
505 			count++;
506 	}
507 
508 	rp_len = sizeof(*rp) + (2 * count);
509 	rp = kmalloc(rp_len, GFP_ATOMIC);
510 	if (!rp) {
511 		read_unlock(&hci_dev_list_lock);
512 		return -ENOMEM;
513 	}
514 
515 	count = 0;
516 	list_for_each_entry(d, &hci_dev_list, list) {
517 		if (hci_dev_test_flag(d, HCI_SETUP) ||
518 		    hci_dev_test_flag(d, HCI_CONFIG) ||
519 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
520 			continue;
521 
522 		/* Devices marked as raw-only are neither configured
523 		 * nor unconfigured controllers.
524 		 */
525 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
526 			continue;
527 
528 		if (hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
529 			rp->index[count++] = cpu_to_le16(d->id);
530 			bt_dev_dbg(hdev, "Added hci%u", d->id);
531 		}
532 	}
533 
534 	rp->num_controllers = cpu_to_le16(count);
535 	rp_len = sizeof(*rp) + (2 * count);
536 
537 	read_unlock(&hci_dev_list_lock);
538 
539 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
540 				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
541 
542 	kfree(rp);
543 
544 	return err;
545 }
546 
547 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
548 			       void *data, u16 data_len)
549 {
550 	struct mgmt_rp_read_ext_index_list *rp;
551 	struct hci_dev *d;
552 	u16 count;
553 	int err;
554 
555 	bt_dev_dbg(hdev, "sock %p", sk);
556 
557 	read_lock(&hci_dev_list_lock);
558 
559 	count = 0;
560 	list_for_each_entry(d, &hci_dev_list, list)
561 		count++;
562 
563 	rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
564 	if (!rp) {
565 		read_unlock(&hci_dev_list_lock);
566 		return -ENOMEM;
567 	}
568 
569 	count = 0;
570 	list_for_each_entry(d, &hci_dev_list, list) {
571 		if (hci_dev_test_flag(d, HCI_SETUP) ||
572 		    hci_dev_test_flag(d, HCI_CONFIG) ||
573 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
574 			continue;
575 
576 		/* Devices marked as raw-only are neither configured
577 		 * nor unconfigured controllers.
578 		 */
579 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
580 			continue;
581 
582 		if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
583 			rp->entry[count].type = 0x01;
584 		else
585 			rp->entry[count].type = 0x00;
586 
587 		rp->entry[count].bus = d->bus;
588 		rp->entry[count++].index = cpu_to_le16(d->id);
589 		bt_dev_dbg(hdev, "Added hci%u", d->id);
590 	}
591 
592 	rp->num_controllers = cpu_to_le16(count);
593 
594 	read_unlock(&hci_dev_list_lock);
595 
596 	/* If this command is called at least once, then all the
597 	 * default index and unconfigured index events are disabled
598 	 * and from now on only extended index events are used.
599 	 */
600 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
601 	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
602 	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
603 
604 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
605 				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
606 				struct_size(rp, entry, count));
607 
608 	kfree(rp);
609 
610 	return err;
611 }
612 
613 static bool is_configured(struct hci_dev *hdev)
614 {
615 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
616 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
617 		return false;
618 
619 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
620 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
621 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
622 		return false;
623 
624 	return true;
625 }
626 
627 static __le32 get_missing_options(struct hci_dev *hdev)
628 {
629 	u32 options = 0;
630 
631 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
632 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
633 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
634 
635 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
636 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
637 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
638 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
639 
640 	return cpu_to_le32(options);
641 }
642 
643 static int new_options(struct hci_dev *hdev, struct sock *skip)
644 {
645 	__le32 options = get_missing_options(hdev);
646 
647 	return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
648 				  sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
649 }
650 
651 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
652 {
653 	__le32 options = get_missing_options(hdev);
654 
655 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
656 				 sizeof(options));
657 }
658 
659 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
660 			    void *data, u16 data_len)
661 {
662 	struct mgmt_rp_read_config_info rp;
663 	u32 options = 0;
664 
665 	bt_dev_dbg(hdev, "sock %p", sk);
666 
667 	hci_dev_lock(hdev);
668 
669 	memset(&rp, 0, sizeof(rp));
670 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
671 
672 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
673 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
674 
675 	if (hdev->set_bdaddr)
676 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
677 
678 	rp.supported_options = cpu_to_le32(options);
679 	rp.missing_options = get_missing_options(hdev);
680 
681 	hci_dev_unlock(hdev);
682 
683 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
684 				 &rp, sizeof(rp));
685 }
686 
687 static u32 get_supported_phys(struct hci_dev *hdev)
688 {
689 	u32 supported_phys = 0;
690 
691 	if (lmp_bredr_capable(hdev)) {
692 		supported_phys |= MGMT_PHY_BR_1M_1SLOT;
693 
694 		if (hdev->features[0][0] & LMP_3SLOT)
695 			supported_phys |= MGMT_PHY_BR_1M_3SLOT;
696 
697 		if (hdev->features[0][0] & LMP_5SLOT)
698 			supported_phys |= MGMT_PHY_BR_1M_5SLOT;
699 
700 		if (lmp_edr_2m_capable(hdev)) {
701 			supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
702 
703 			if (lmp_edr_3slot_capable(hdev))
704 				supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
705 
706 			if (lmp_edr_5slot_capable(hdev))
707 				supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
708 
709 			if (lmp_edr_3m_capable(hdev)) {
710 				supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
711 
712 				if (lmp_edr_3slot_capable(hdev))
713 					supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
714 
715 				if (lmp_edr_5slot_capable(hdev))
716 					supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
717 			}
718 		}
719 	}
720 
721 	if (lmp_le_capable(hdev)) {
722 		supported_phys |= MGMT_PHY_LE_1M_TX;
723 		supported_phys |= MGMT_PHY_LE_1M_RX;
724 
725 		if (hdev->le_features[1] & HCI_LE_PHY_2M) {
726 			supported_phys |= MGMT_PHY_LE_2M_TX;
727 			supported_phys |= MGMT_PHY_LE_2M_RX;
728 		}
729 
730 		if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
731 			supported_phys |= MGMT_PHY_LE_CODED_TX;
732 			supported_phys |= MGMT_PHY_LE_CODED_RX;
733 		}
734 	}
735 
736 	return supported_phys;
737 }
738 
739 static u32 get_selected_phys(struct hci_dev *hdev)
740 {
741 	u32 selected_phys = 0;
742 
743 	if (lmp_bredr_capable(hdev)) {
744 		selected_phys |= MGMT_PHY_BR_1M_1SLOT;
745 
746 		if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
747 			selected_phys |= MGMT_PHY_BR_1M_3SLOT;
748 
749 		if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
750 			selected_phys |= MGMT_PHY_BR_1M_5SLOT;
751 
752 		if (lmp_edr_2m_capable(hdev)) {
753 			if (!(hdev->pkt_type & HCI_2DH1))
754 				selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
755 
756 			if (lmp_edr_3slot_capable(hdev) &&
757 			    !(hdev->pkt_type & HCI_2DH3))
758 				selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
759 
760 			if (lmp_edr_5slot_capable(hdev) &&
761 			    !(hdev->pkt_type & HCI_2DH5))
762 				selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
763 
764 			if (lmp_edr_3m_capable(hdev)) {
765 				if (!(hdev->pkt_type & HCI_3DH1))
766 					selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
767 
768 				if (lmp_edr_3slot_capable(hdev) &&
769 				    !(hdev->pkt_type & HCI_3DH3))
770 					selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
771 
772 				if (lmp_edr_5slot_capable(hdev) &&
773 				    !(hdev->pkt_type & HCI_3DH5))
774 					selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
775 			}
776 		}
777 	}
778 
779 	if (lmp_le_capable(hdev)) {
780 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
781 			selected_phys |= MGMT_PHY_LE_1M_TX;
782 
783 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
784 			selected_phys |= MGMT_PHY_LE_1M_RX;
785 
786 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
787 			selected_phys |= MGMT_PHY_LE_2M_TX;
788 
789 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
790 			selected_phys |= MGMT_PHY_LE_2M_RX;
791 
792 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
793 			selected_phys |= MGMT_PHY_LE_CODED_TX;
794 
795 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
796 			selected_phys |= MGMT_PHY_LE_CODED_RX;
797 	}
798 
799 	return selected_phys;
800 }
801 
802 static u32 get_configurable_phys(struct hci_dev *hdev)
803 {
804 	return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
805 		~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
806 }
807 
808 static u32 get_supported_settings(struct hci_dev *hdev)
809 {
810 	u32 settings = 0;
811 
812 	settings |= MGMT_SETTING_POWERED;
813 	settings |= MGMT_SETTING_BONDABLE;
814 	settings |= MGMT_SETTING_DEBUG_KEYS;
815 	settings |= MGMT_SETTING_CONNECTABLE;
816 	settings |= MGMT_SETTING_DISCOVERABLE;
817 
818 	if (lmp_bredr_capable(hdev)) {
819 		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
820 			settings |= MGMT_SETTING_FAST_CONNECTABLE;
821 		settings |= MGMT_SETTING_BREDR;
822 		settings |= MGMT_SETTING_LINK_SECURITY;
823 
824 		if (lmp_ssp_capable(hdev)) {
825 			settings |= MGMT_SETTING_SSP;
826 		}
827 
828 		if (lmp_sc_capable(hdev))
829 			settings |= MGMT_SETTING_SECURE_CONN;
830 
831 		if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
832 			     &hdev->quirks))
833 			settings |= MGMT_SETTING_WIDEBAND_SPEECH;
834 	}
835 
836 	if (lmp_le_capable(hdev)) {
837 		settings |= MGMT_SETTING_LE;
838 		settings |= MGMT_SETTING_SECURE_CONN;
839 		settings |= MGMT_SETTING_PRIVACY;
840 		settings |= MGMT_SETTING_STATIC_ADDRESS;
841 		settings |= MGMT_SETTING_ADVERTISING;
842 	}
843 
844 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
845 	    hdev->set_bdaddr)
846 		settings |= MGMT_SETTING_CONFIGURATION;
847 
848 	if (cis_central_capable(hdev))
849 		settings |= MGMT_SETTING_CIS_CENTRAL;
850 
851 	if (cis_peripheral_capable(hdev))
852 		settings |= MGMT_SETTING_CIS_PERIPHERAL;
853 
854 	settings |= MGMT_SETTING_PHY_CONFIGURATION;
855 
856 	return settings;
857 }
858 
859 static u32 get_current_settings(struct hci_dev *hdev)
860 {
861 	u32 settings = 0;
862 
863 	if (hdev_is_powered(hdev))
864 		settings |= MGMT_SETTING_POWERED;
865 
866 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
867 		settings |= MGMT_SETTING_CONNECTABLE;
868 
869 	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
870 		settings |= MGMT_SETTING_FAST_CONNECTABLE;
871 
872 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
873 		settings |= MGMT_SETTING_DISCOVERABLE;
874 
875 	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
876 		settings |= MGMT_SETTING_BONDABLE;
877 
878 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
879 		settings |= MGMT_SETTING_BREDR;
880 
881 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
882 		settings |= MGMT_SETTING_LE;
883 
884 	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
885 		settings |= MGMT_SETTING_LINK_SECURITY;
886 
887 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
888 		settings |= MGMT_SETTING_SSP;
889 
890 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
891 		settings |= MGMT_SETTING_ADVERTISING;
892 
893 	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
894 		settings |= MGMT_SETTING_SECURE_CONN;
895 
896 	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
897 		settings |= MGMT_SETTING_DEBUG_KEYS;
898 
899 	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
900 		settings |= MGMT_SETTING_PRIVACY;
901 
902 	/* The current setting for static address has two purposes. The
903 	 * first is to indicate if the static address will be used and
904 	 * the second is to indicate if it is actually set.
905 	 *
906 	 * This means if the static address is not configured, this flag
907 	 * will never be set. If the address is configured, then if the
908 	 * address is actually used decides if the flag is set or not.
909 	 *
910 	 * For single mode LE only controllers and dual-mode controllers
911 	 * with BR/EDR disabled, the existence of the static address will
912 	 * be evaluated.
913 	 */
914 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
915 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
916 	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
917 		if (bacmp(&hdev->static_addr, BDADDR_ANY))
918 			settings |= MGMT_SETTING_STATIC_ADDRESS;
919 	}
920 
921 	if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
922 		settings |= MGMT_SETTING_WIDEBAND_SPEECH;
923 
924 	if (cis_central_capable(hdev))
925 		settings |= MGMT_SETTING_CIS_CENTRAL;
926 
927 	if (cis_peripheral_capable(hdev))
928 		settings |= MGMT_SETTING_CIS_PERIPHERAL;
929 
930 	if (bis_capable(hdev))
931 		settings |= MGMT_SETTING_ISO_BROADCASTER;
932 
933 	if (sync_recv_capable(hdev))
934 		settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
935 
936 	return settings;
937 }
938 
939 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
940 {
941 	return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
942 }
943 
944 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
945 {
946 	struct mgmt_pending_cmd *cmd;
947 
948 	/* If there's a pending mgmt command the flags will not yet have
949 	 * their final values, so check for this first.
950 	 */
951 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
952 	if (cmd) {
953 		struct mgmt_mode *cp = cmd->param;
954 		if (cp->val == 0x01)
955 			return LE_AD_GENERAL;
956 		else if (cp->val == 0x02)
957 			return LE_AD_LIMITED;
958 	} else {
959 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
960 			return LE_AD_LIMITED;
961 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
962 			return LE_AD_GENERAL;
963 	}
964 
965 	return 0;
966 }
967 
968 bool mgmt_get_connectable(struct hci_dev *hdev)
969 {
970 	struct mgmt_pending_cmd *cmd;
971 
972 	/* If there's a pending mgmt command the flag will not yet have
973 	 * it's final value, so check for this first.
974 	 */
975 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
976 	if (cmd) {
977 		struct mgmt_mode *cp = cmd->param;
978 
979 		return cp->val;
980 	}
981 
982 	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
983 }
984 
985 static int service_cache_sync(struct hci_dev *hdev, void *data)
986 {
987 	hci_update_eir_sync(hdev);
988 	hci_update_class_sync(hdev);
989 
990 	return 0;
991 }
992 
993 static void service_cache_off(struct work_struct *work)
994 {
995 	struct hci_dev *hdev = container_of(work, struct hci_dev,
996 					    service_cache.work);
997 
998 	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
999 		return;
1000 
1001 	hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1002 }
1003 
1004 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1005 {
1006 	/* The generation of a new RPA and programming it into the
1007 	 * controller happens in the hci_req_enable_advertising()
1008 	 * function.
1009 	 */
1010 	if (ext_adv_capable(hdev))
1011 		return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1012 	else
1013 		return hci_enable_advertising_sync(hdev);
1014 }
1015 
1016 static void rpa_expired(struct work_struct *work)
1017 {
1018 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1019 					    rpa_expired.work);
1020 
1021 	bt_dev_dbg(hdev, "");
1022 
1023 	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1024 
1025 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1026 		return;
1027 
1028 	hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1029 }
1030 
1031 static int set_discoverable_sync(struct hci_dev *hdev, void *data);
1032 
1033 static void discov_off(struct work_struct *work)
1034 {
1035 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1036 					    discov_off.work);
1037 
1038 	bt_dev_dbg(hdev, "");
1039 
1040 	hci_dev_lock(hdev);
1041 
1042 	/* When discoverable timeout triggers, then just make sure
1043 	 * the limited discoverable flag is cleared. Even in the case
1044 	 * of a timeout triggered from general discoverable, it is
1045 	 * safe to unconditionally clear the flag.
1046 	 */
1047 	hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1048 	hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1049 	hdev->discov_timeout = 0;
1050 
1051 	hci_cmd_sync_queue(hdev, set_discoverable_sync, NULL, NULL);
1052 
1053 	mgmt_new_settings(hdev);
1054 
1055 	hci_dev_unlock(hdev);
1056 }
1057 
1058 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1059 
1060 static void mesh_send_complete(struct hci_dev *hdev,
1061 			       struct mgmt_mesh_tx *mesh_tx, bool silent)
1062 {
1063 	u8 handle = mesh_tx->handle;
1064 
1065 	if (!silent)
1066 		mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1067 			   sizeof(handle), NULL);
1068 
1069 	mgmt_mesh_remove(mesh_tx);
1070 }
1071 
1072 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1073 {
1074 	struct mgmt_mesh_tx *mesh_tx;
1075 
1076 	hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1077 	hci_disable_advertising_sync(hdev);
1078 	mesh_tx = mgmt_mesh_next(hdev, NULL);
1079 
1080 	if (mesh_tx)
1081 		mesh_send_complete(hdev, mesh_tx, false);
1082 
1083 	return 0;
1084 }
1085 
1086 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1087 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
1088 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1089 {
1090 	struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1091 
1092 	if (!mesh_tx)
1093 		return;
1094 
1095 	err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1096 				 mesh_send_start_complete);
1097 
1098 	if (err < 0)
1099 		mesh_send_complete(hdev, mesh_tx, false);
1100 	else
1101 		hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1102 }
1103 
1104 static void mesh_send_done(struct work_struct *work)
1105 {
1106 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1107 					    mesh_send_done.work);
1108 
1109 	if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1110 		return;
1111 
1112 	hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1113 }
1114 
1115 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1116 {
1117 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1118 		return;
1119 
1120 	BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1121 
1122 	INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1123 	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1124 	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1125 	INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1126 
1127 	/* Non-mgmt controlled devices get this bit set
1128 	 * implicitly so that pairing works for them, however
1129 	 * for mgmt we require user-space to explicitly enable
1130 	 * it
1131 	 */
1132 	hci_dev_clear_flag(hdev, HCI_BONDABLE);
1133 
1134 	hci_dev_set_flag(hdev, HCI_MGMT);
1135 }
1136 
1137 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1138 				void *data, u16 data_len)
1139 {
1140 	struct mgmt_rp_read_info rp;
1141 
1142 	bt_dev_dbg(hdev, "sock %p", sk);
1143 
1144 	hci_dev_lock(hdev);
1145 
1146 	memset(&rp, 0, sizeof(rp));
1147 
1148 	bacpy(&rp.bdaddr, &hdev->bdaddr);
1149 
1150 	rp.version = hdev->hci_ver;
1151 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1152 
1153 	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1154 	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1155 
1156 	memcpy(rp.dev_class, hdev->dev_class, 3);
1157 
1158 	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1159 	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1160 
1161 	hci_dev_unlock(hdev);
1162 
1163 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1164 				 sizeof(rp));
1165 }
1166 
1167 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1168 {
1169 	u16 eir_len = 0;
1170 	size_t name_len;
1171 
1172 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1173 		eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1174 					  hdev->dev_class, 3);
1175 
1176 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1177 		eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1178 					  hdev->appearance);
1179 
1180 	name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1181 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1182 				  hdev->dev_name, name_len);
1183 
1184 	name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1185 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1186 				  hdev->short_name, name_len);
1187 
1188 	return eir_len;
1189 }
1190 
1191 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1192 				    void *data, u16 data_len)
1193 {
1194 	char buf[512];
1195 	struct mgmt_rp_read_ext_info *rp = (void *)buf;
1196 	u16 eir_len;
1197 
1198 	bt_dev_dbg(hdev, "sock %p", sk);
1199 
1200 	memset(&buf, 0, sizeof(buf));
1201 
1202 	hci_dev_lock(hdev);
1203 
1204 	bacpy(&rp->bdaddr, &hdev->bdaddr);
1205 
1206 	rp->version = hdev->hci_ver;
1207 	rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1208 
1209 	rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1210 	rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1211 
1212 
1213 	eir_len = append_eir_data_to_buf(hdev, rp->eir);
1214 	rp->eir_len = cpu_to_le16(eir_len);
1215 
1216 	hci_dev_unlock(hdev);
1217 
1218 	/* If this command is called at least once, then the events
1219 	 * for class of device and local name changes are disabled
1220 	 * and only the new extended controller information event
1221 	 * is used.
1222 	 */
1223 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1224 	hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1225 	hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1226 
1227 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1228 				 sizeof(*rp) + eir_len);
1229 }
1230 
1231 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1232 {
1233 	char buf[512];
1234 	struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1235 	u16 eir_len;
1236 
1237 	memset(buf, 0, sizeof(buf));
1238 
1239 	eir_len = append_eir_data_to_buf(hdev, ev->eir);
1240 	ev->eir_len = cpu_to_le16(eir_len);
1241 
1242 	return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1243 				  sizeof(*ev) + eir_len,
1244 				  HCI_MGMT_EXT_INFO_EVENTS, skip);
1245 }
1246 
1247 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1248 {
1249 	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1250 
1251 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1252 				 sizeof(settings));
1253 }
1254 
1255 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1256 {
1257 	struct mgmt_ev_advertising_added ev;
1258 
1259 	ev.instance = instance;
1260 
1261 	mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1262 }
1263 
1264 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1265 			      u8 instance)
1266 {
1267 	struct mgmt_ev_advertising_removed ev;
1268 
1269 	ev.instance = instance;
1270 
1271 	mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1272 }
1273 
1274 static void cancel_adv_timeout(struct hci_dev *hdev)
1275 {
1276 	if (hdev->adv_instance_timeout) {
1277 		hdev->adv_instance_timeout = 0;
1278 		cancel_delayed_work(&hdev->adv_instance_expire);
1279 	}
1280 }
1281 
1282 /* This function requires the caller holds hdev->lock */
1283 static void restart_le_actions(struct hci_dev *hdev)
1284 {
1285 	struct hci_conn_params *p;
1286 
1287 	list_for_each_entry(p, &hdev->le_conn_params, list) {
1288 		/* Needed for AUTO_OFF case where might not "really"
1289 		 * have been powered off.
1290 		 */
1291 		hci_pend_le_list_del_init(p);
1292 
1293 		switch (p->auto_connect) {
1294 		case HCI_AUTO_CONN_DIRECT:
1295 		case HCI_AUTO_CONN_ALWAYS:
1296 			hci_pend_le_list_add(p, &hdev->pend_le_conns);
1297 			break;
1298 		case HCI_AUTO_CONN_REPORT:
1299 			hci_pend_le_list_add(p, &hdev->pend_le_reports);
1300 			break;
1301 		default:
1302 			break;
1303 		}
1304 	}
1305 }
1306 
1307 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1308 {
1309 	__le32 ev = cpu_to_le32(get_current_settings(hdev));
1310 
1311 	return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1312 				  sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1313 }
1314 
1315 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1316 {
1317 	struct mgmt_pending_cmd *cmd = data;
1318 	struct mgmt_mode *cp;
1319 
1320 	/* Make sure cmd still outstanding. */
1321 	if (err == -ECANCELED ||
1322 	    cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1323 		return;
1324 
1325 	cp = cmd->param;
1326 
1327 	bt_dev_dbg(hdev, "err %d", err);
1328 
1329 	if (!err) {
1330 		if (cp->val) {
1331 			hci_dev_lock(hdev);
1332 			restart_le_actions(hdev);
1333 			hci_update_passive_scan(hdev);
1334 			hci_dev_unlock(hdev);
1335 		}
1336 
1337 		send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1338 
1339 		/* Only call new_setting for power on as power off is deferred
1340 		 * to hdev->power_off work which does call hci_dev_do_close.
1341 		 */
1342 		if (cp->val)
1343 			new_settings(hdev, cmd->sk);
1344 	} else {
1345 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1346 				mgmt_status(err));
1347 	}
1348 
1349 	mgmt_pending_remove(cmd);
1350 }
1351 
1352 static int set_powered_sync(struct hci_dev *hdev, void *data)
1353 {
1354 	struct mgmt_pending_cmd *cmd = data;
1355 	struct mgmt_mode *cp;
1356 
1357 	/* Make sure cmd still outstanding. */
1358 	if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1359 		return -ECANCELED;
1360 
1361 	cp = cmd->param;
1362 
1363 	BT_DBG("%s", hdev->name);
1364 
1365 	return hci_set_powered_sync(hdev, cp->val);
1366 }
1367 
1368 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1369 		       u16 len)
1370 {
1371 	struct mgmt_mode *cp = data;
1372 	struct mgmt_pending_cmd *cmd;
1373 	int err;
1374 
1375 	bt_dev_dbg(hdev, "sock %p", sk);
1376 
1377 	if (cp->val != 0x00 && cp->val != 0x01)
1378 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1379 				       MGMT_STATUS_INVALID_PARAMS);
1380 
1381 	hci_dev_lock(hdev);
1382 
1383 	if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1384 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1385 				      MGMT_STATUS_BUSY);
1386 		goto failed;
1387 	}
1388 
1389 	if (!!cp->val == hdev_is_powered(hdev)) {
1390 		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1391 		goto failed;
1392 	}
1393 
1394 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1395 	if (!cmd) {
1396 		err = -ENOMEM;
1397 		goto failed;
1398 	}
1399 
1400 	/* Cancel potentially blocking sync operation before power off */
1401 	if (cp->val == 0x00) {
1402 		hci_cmd_sync_cancel_sync(hdev, -EHOSTDOWN);
1403 		err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1404 					 mgmt_set_powered_complete);
1405 	} else {
1406 		/* Use hci_cmd_sync_submit since hdev might not be running */
1407 		err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
1408 					  mgmt_set_powered_complete);
1409 	}
1410 
1411 	if (err < 0)
1412 		mgmt_pending_remove(cmd);
1413 
1414 failed:
1415 	hci_dev_unlock(hdev);
1416 	return err;
1417 }
1418 
1419 int mgmt_new_settings(struct hci_dev *hdev)
1420 {
1421 	return new_settings(hdev, NULL);
1422 }
1423 
1424 struct cmd_lookup {
1425 	struct sock *sk;
1426 	struct hci_dev *hdev;
1427 	u8 mgmt_status;
1428 };
1429 
1430 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1431 {
1432 	struct cmd_lookup *match = data;
1433 
1434 	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1435 
1436 	list_del(&cmd->list);
1437 
1438 	if (match->sk == NULL) {
1439 		match->sk = cmd->sk;
1440 		sock_hold(match->sk);
1441 	}
1442 
1443 	mgmt_pending_free(cmd);
1444 }
1445 
1446 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1447 {
1448 	u8 *status = data;
1449 
1450 	mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1451 	mgmt_pending_remove(cmd);
1452 }
1453 
1454 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1455 {
1456 	struct cmd_lookup *match = data;
1457 
1458 	/* dequeue cmd_sync entries using cmd as data as that is about to be
1459 	 * removed/freed.
1460 	 */
1461 	hci_cmd_sync_dequeue(match->hdev, NULL, cmd, NULL);
1462 
1463 	if (cmd->cmd_complete) {
1464 		cmd->cmd_complete(cmd, match->mgmt_status);
1465 		mgmt_pending_remove(cmd);
1466 
1467 		return;
1468 	}
1469 
1470 	cmd_status_rsp(cmd, data);
1471 }
1472 
1473 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1474 {
1475 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1476 				 cmd->param, cmd->param_len);
1477 }
1478 
1479 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1480 {
1481 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1482 				 cmd->param, sizeof(struct mgmt_addr_info));
1483 }
1484 
1485 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1486 {
1487 	if (!lmp_bredr_capable(hdev))
1488 		return MGMT_STATUS_NOT_SUPPORTED;
1489 	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1490 		return MGMT_STATUS_REJECTED;
1491 	else
1492 		return MGMT_STATUS_SUCCESS;
1493 }
1494 
1495 static u8 mgmt_le_support(struct hci_dev *hdev)
1496 {
1497 	if (!lmp_le_capable(hdev))
1498 		return MGMT_STATUS_NOT_SUPPORTED;
1499 	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1500 		return MGMT_STATUS_REJECTED;
1501 	else
1502 		return MGMT_STATUS_SUCCESS;
1503 }
1504 
1505 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1506 					   int err)
1507 {
1508 	struct mgmt_pending_cmd *cmd = data;
1509 
1510 	bt_dev_dbg(hdev, "err %d", err);
1511 
1512 	/* Make sure cmd still outstanding. */
1513 	if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1514 		return;
1515 
1516 	hci_dev_lock(hdev);
1517 
1518 	if (err) {
1519 		u8 mgmt_err = mgmt_status(err);
1520 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1521 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1522 		goto done;
1523 	}
1524 
1525 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1526 	    hdev->discov_timeout > 0) {
1527 		int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1528 		queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1529 	}
1530 
1531 	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1532 	new_settings(hdev, cmd->sk);
1533 
1534 done:
1535 	mgmt_pending_remove(cmd);
1536 	hci_dev_unlock(hdev);
1537 }
1538 
1539 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1540 {
1541 	BT_DBG("%s", hdev->name);
1542 
1543 	return hci_update_discoverable_sync(hdev);
1544 }
1545 
1546 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1547 			    u16 len)
1548 {
1549 	struct mgmt_cp_set_discoverable *cp = data;
1550 	struct mgmt_pending_cmd *cmd;
1551 	u16 timeout;
1552 	int err;
1553 
1554 	bt_dev_dbg(hdev, "sock %p", sk);
1555 
1556 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1557 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1558 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1559 				       MGMT_STATUS_REJECTED);
1560 
1561 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1562 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1563 				       MGMT_STATUS_INVALID_PARAMS);
1564 
1565 	timeout = __le16_to_cpu(cp->timeout);
1566 
1567 	/* Disabling discoverable requires that no timeout is set,
1568 	 * and enabling limited discoverable requires a timeout.
1569 	 */
1570 	if ((cp->val == 0x00 && timeout > 0) ||
1571 	    (cp->val == 0x02 && timeout == 0))
1572 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1573 				       MGMT_STATUS_INVALID_PARAMS);
1574 
1575 	hci_dev_lock(hdev);
1576 
1577 	if (!hdev_is_powered(hdev) && timeout > 0) {
1578 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1579 				      MGMT_STATUS_NOT_POWERED);
1580 		goto failed;
1581 	}
1582 
1583 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1584 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1585 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1586 				      MGMT_STATUS_BUSY);
1587 		goto failed;
1588 	}
1589 
1590 	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1591 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1592 				      MGMT_STATUS_REJECTED);
1593 		goto failed;
1594 	}
1595 
1596 	if (hdev->advertising_paused) {
1597 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1598 				      MGMT_STATUS_BUSY);
1599 		goto failed;
1600 	}
1601 
1602 	if (!hdev_is_powered(hdev)) {
1603 		bool changed = false;
1604 
1605 		/* Setting limited discoverable when powered off is
1606 		 * not a valid operation since it requires a timeout
1607 		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1608 		 */
1609 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1610 			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1611 			changed = true;
1612 		}
1613 
1614 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1615 		if (err < 0)
1616 			goto failed;
1617 
1618 		if (changed)
1619 			err = new_settings(hdev, sk);
1620 
1621 		goto failed;
1622 	}
1623 
1624 	/* If the current mode is the same, then just update the timeout
1625 	 * value with the new value. And if only the timeout gets updated,
1626 	 * then no need for any HCI transactions.
1627 	 */
1628 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1629 	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
1630 						   HCI_LIMITED_DISCOVERABLE)) {
1631 		cancel_delayed_work(&hdev->discov_off);
1632 		hdev->discov_timeout = timeout;
1633 
1634 		if (cp->val && hdev->discov_timeout > 0) {
1635 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1636 			queue_delayed_work(hdev->req_workqueue,
1637 					   &hdev->discov_off, to);
1638 		}
1639 
1640 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1641 		goto failed;
1642 	}
1643 
1644 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1645 	if (!cmd) {
1646 		err = -ENOMEM;
1647 		goto failed;
1648 	}
1649 
1650 	/* Cancel any potential discoverable timeout that might be
1651 	 * still active and store new timeout value. The arming of
1652 	 * the timeout happens in the complete handler.
1653 	 */
1654 	cancel_delayed_work(&hdev->discov_off);
1655 	hdev->discov_timeout = timeout;
1656 
1657 	if (cp->val)
1658 		hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1659 	else
1660 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1661 
1662 	/* Limited discoverable mode */
1663 	if (cp->val == 0x02)
1664 		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1665 	else
1666 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1667 
1668 	err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1669 				 mgmt_set_discoverable_complete);
1670 
1671 	if (err < 0)
1672 		mgmt_pending_remove(cmd);
1673 
1674 failed:
1675 	hci_dev_unlock(hdev);
1676 	return err;
1677 }
1678 
1679 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1680 					  int err)
1681 {
1682 	struct mgmt_pending_cmd *cmd = data;
1683 
1684 	bt_dev_dbg(hdev, "err %d", err);
1685 
1686 	/* Make sure cmd still outstanding. */
1687 	if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1688 		return;
1689 
1690 	hci_dev_lock(hdev);
1691 
1692 	if (err) {
1693 		u8 mgmt_err = mgmt_status(err);
1694 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1695 		goto done;
1696 	}
1697 
1698 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1699 	new_settings(hdev, cmd->sk);
1700 
1701 done:
1702 	if (cmd)
1703 		mgmt_pending_remove(cmd);
1704 
1705 	hci_dev_unlock(hdev);
1706 }
1707 
1708 static int set_connectable_update_settings(struct hci_dev *hdev,
1709 					   struct sock *sk, u8 val)
1710 {
1711 	bool changed = false;
1712 	int err;
1713 
1714 	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1715 		changed = true;
1716 
1717 	if (val) {
1718 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1719 	} else {
1720 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1721 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1722 	}
1723 
1724 	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1725 	if (err < 0)
1726 		return err;
1727 
1728 	if (changed) {
1729 		hci_update_scan(hdev);
1730 		hci_update_passive_scan(hdev);
1731 		return new_settings(hdev, sk);
1732 	}
1733 
1734 	return 0;
1735 }
1736 
1737 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1738 {
1739 	BT_DBG("%s", hdev->name);
1740 
1741 	return hci_update_connectable_sync(hdev);
1742 }
1743 
1744 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1745 			   u16 len)
1746 {
1747 	struct mgmt_mode *cp = data;
1748 	struct mgmt_pending_cmd *cmd;
1749 	int err;
1750 
1751 	bt_dev_dbg(hdev, "sock %p", sk);
1752 
1753 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1754 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1755 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1756 				       MGMT_STATUS_REJECTED);
1757 
1758 	if (cp->val != 0x00 && cp->val != 0x01)
1759 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1760 				       MGMT_STATUS_INVALID_PARAMS);
1761 
1762 	hci_dev_lock(hdev);
1763 
1764 	if (!hdev_is_powered(hdev)) {
1765 		err = set_connectable_update_settings(hdev, sk, cp->val);
1766 		goto failed;
1767 	}
1768 
1769 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1770 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1771 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1772 				      MGMT_STATUS_BUSY);
1773 		goto failed;
1774 	}
1775 
1776 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1777 	if (!cmd) {
1778 		err = -ENOMEM;
1779 		goto failed;
1780 	}
1781 
1782 	if (cp->val) {
1783 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1784 	} else {
1785 		if (hdev->discov_timeout > 0)
1786 			cancel_delayed_work(&hdev->discov_off);
1787 
1788 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1789 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1790 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1791 	}
1792 
1793 	err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1794 				 mgmt_set_connectable_complete);
1795 
1796 	if (err < 0)
1797 		mgmt_pending_remove(cmd);
1798 
1799 failed:
1800 	hci_dev_unlock(hdev);
1801 	return err;
1802 }
1803 
1804 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1805 			u16 len)
1806 {
1807 	struct mgmt_mode *cp = data;
1808 	bool changed;
1809 	int err;
1810 
1811 	bt_dev_dbg(hdev, "sock %p", sk);
1812 
1813 	if (cp->val != 0x00 && cp->val != 0x01)
1814 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1815 				       MGMT_STATUS_INVALID_PARAMS);
1816 
1817 	hci_dev_lock(hdev);
1818 
1819 	if (cp->val)
1820 		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1821 	else
1822 		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1823 
1824 	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1825 	if (err < 0)
1826 		goto unlock;
1827 
1828 	if (changed) {
1829 		/* In limited privacy mode the change of bondable mode
1830 		 * may affect the local advertising address.
1831 		 */
1832 		hci_update_discoverable(hdev);
1833 
1834 		err = new_settings(hdev, sk);
1835 	}
1836 
1837 unlock:
1838 	hci_dev_unlock(hdev);
1839 	return err;
1840 }
1841 
1842 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1843 			     u16 len)
1844 {
1845 	struct mgmt_mode *cp = data;
1846 	struct mgmt_pending_cmd *cmd;
1847 	u8 val, status;
1848 	int err;
1849 
1850 	bt_dev_dbg(hdev, "sock %p", sk);
1851 
1852 	status = mgmt_bredr_support(hdev);
1853 	if (status)
1854 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1855 				       status);
1856 
1857 	if (cp->val != 0x00 && cp->val != 0x01)
1858 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1859 				       MGMT_STATUS_INVALID_PARAMS);
1860 
1861 	hci_dev_lock(hdev);
1862 
1863 	if (!hdev_is_powered(hdev)) {
1864 		bool changed = false;
1865 
1866 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1867 			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1868 			changed = true;
1869 		}
1870 
1871 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1872 		if (err < 0)
1873 			goto failed;
1874 
1875 		if (changed)
1876 			err = new_settings(hdev, sk);
1877 
1878 		goto failed;
1879 	}
1880 
1881 	if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1882 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1883 				      MGMT_STATUS_BUSY);
1884 		goto failed;
1885 	}
1886 
1887 	val = !!cp->val;
1888 
1889 	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1890 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1891 		goto failed;
1892 	}
1893 
1894 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1895 	if (!cmd) {
1896 		err = -ENOMEM;
1897 		goto failed;
1898 	}
1899 
1900 	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1901 	if (err < 0) {
1902 		mgmt_pending_remove(cmd);
1903 		goto failed;
1904 	}
1905 
1906 failed:
1907 	hci_dev_unlock(hdev);
1908 	return err;
1909 }
1910 
1911 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1912 {
1913 	struct cmd_lookup match = { NULL, hdev };
1914 	struct mgmt_pending_cmd *cmd = data;
1915 	struct mgmt_mode *cp = cmd->param;
1916 	u8 enable = cp->val;
1917 	bool changed;
1918 
1919 	/* Make sure cmd still outstanding. */
1920 	if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1921 		return;
1922 
1923 	if (err) {
1924 		u8 mgmt_err = mgmt_status(err);
1925 
1926 		if (enable && hci_dev_test_and_clear_flag(hdev,
1927 							  HCI_SSP_ENABLED)) {
1928 			new_settings(hdev, NULL);
1929 		}
1930 
1931 		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1932 				     &mgmt_err);
1933 		return;
1934 	}
1935 
1936 	if (enable) {
1937 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1938 	} else {
1939 		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1940 	}
1941 
1942 	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1943 
1944 	if (changed)
1945 		new_settings(hdev, match.sk);
1946 
1947 	if (match.sk)
1948 		sock_put(match.sk);
1949 
1950 	hci_update_eir_sync(hdev);
1951 }
1952 
1953 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1954 {
1955 	struct mgmt_pending_cmd *cmd = data;
1956 	struct mgmt_mode *cp = cmd->param;
1957 	bool changed = false;
1958 	int err;
1959 
1960 	if (cp->val)
1961 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1962 
1963 	err = hci_write_ssp_mode_sync(hdev, cp->val);
1964 
1965 	if (!err && changed)
1966 		hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1967 
1968 	return err;
1969 }
1970 
1971 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1972 {
1973 	struct mgmt_mode *cp = data;
1974 	struct mgmt_pending_cmd *cmd;
1975 	u8 status;
1976 	int err;
1977 
1978 	bt_dev_dbg(hdev, "sock %p", sk);
1979 
1980 	status = mgmt_bredr_support(hdev);
1981 	if (status)
1982 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1983 
1984 	if (!lmp_ssp_capable(hdev))
1985 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1986 				       MGMT_STATUS_NOT_SUPPORTED);
1987 
1988 	if (cp->val != 0x00 && cp->val != 0x01)
1989 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1990 				       MGMT_STATUS_INVALID_PARAMS);
1991 
1992 	hci_dev_lock(hdev);
1993 
1994 	if (!hdev_is_powered(hdev)) {
1995 		bool changed;
1996 
1997 		if (cp->val) {
1998 			changed = !hci_dev_test_and_set_flag(hdev,
1999 							     HCI_SSP_ENABLED);
2000 		} else {
2001 			changed = hci_dev_test_and_clear_flag(hdev,
2002 							      HCI_SSP_ENABLED);
2003 		}
2004 
2005 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2006 		if (err < 0)
2007 			goto failed;
2008 
2009 		if (changed)
2010 			err = new_settings(hdev, sk);
2011 
2012 		goto failed;
2013 	}
2014 
2015 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2016 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2017 				      MGMT_STATUS_BUSY);
2018 		goto failed;
2019 	}
2020 
2021 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2022 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2023 		goto failed;
2024 	}
2025 
2026 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2027 	if (!cmd)
2028 		err = -ENOMEM;
2029 	else
2030 		err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2031 					 set_ssp_complete);
2032 
2033 	if (err < 0) {
2034 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2035 				      MGMT_STATUS_FAILED);
2036 
2037 		if (cmd)
2038 			mgmt_pending_remove(cmd);
2039 	}
2040 
2041 failed:
2042 	hci_dev_unlock(hdev);
2043 	return err;
2044 }
2045 
2046 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2047 {
2048 	bt_dev_dbg(hdev, "sock %p", sk);
2049 
2050 	return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2051 				       MGMT_STATUS_NOT_SUPPORTED);
2052 }
2053 
2054 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2055 {
2056 	struct cmd_lookup match = { NULL, hdev };
2057 	u8 status = mgmt_status(err);
2058 
2059 	bt_dev_dbg(hdev, "err %d", err);
2060 
2061 	if (status) {
2062 		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2063 							&status);
2064 		return;
2065 	}
2066 
2067 	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2068 
2069 	new_settings(hdev, match.sk);
2070 
2071 	if (match.sk)
2072 		sock_put(match.sk);
2073 }
2074 
2075 static int set_le_sync(struct hci_dev *hdev, void *data)
2076 {
2077 	struct mgmt_pending_cmd *cmd = data;
2078 	struct mgmt_mode *cp = cmd->param;
2079 	u8 val = !!cp->val;
2080 	int err;
2081 
2082 	if (!val) {
2083 		hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2084 
2085 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2086 			hci_disable_advertising_sync(hdev);
2087 
2088 		if (ext_adv_capable(hdev))
2089 			hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2090 	} else {
2091 		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2092 	}
2093 
2094 	err = hci_write_le_host_supported_sync(hdev, val, 0);
2095 
2096 	/* Make sure the controller has a good default for
2097 	 * advertising data. Restrict the update to when LE
2098 	 * has actually been enabled. During power on, the
2099 	 * update in powered_update_hci will take care of it.
2100 	 */
2101 	if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2102 		if (ext_adv_capable(hdev)) {
2103 			int status;
2104 
2105 			status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2106 			if (!status)
2107 				hci_update_scan_rsp_data_sync(hdev, 0x00);
2108 		} else {
2109 			hci_update_adv_data_sync(hdev, 0x00);
2110 			hci_update_scan_rsp_data_sync(hdev, 0x00);
2111 		}
2112 
2113 		hci_update_passive_scan(hdev);
2114 	}
2115 
2116 	return err;
2117 }
2118 
2119 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2120 {
2121 	struct mgmt_pending_cmd *cmd = data;
2122 	u8 status = mgmt_status(err);
2123 	struct sock *sk = cmd->sk;
2124 
2125 	if (status) {
2126 		mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
2127 				     cmd_status_rsp, &status);
2128 		return;
2129 	}
2130 
2131 	mgmt_pending_remove(cmd);
2132 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2133 }
2134 
2135 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2136 {
2137 	struct mgmt_pending_cmd *cmd = data;
2138 	struct mgmt_cp_set_mesh *cp = cmd->param;
2139 	size_t len = cmd->param_len;
2140 
2141 	memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2142 
2143 	if (cp->enable)
2144 		hci_dev_set_flag(hdev, HCI_MESH);
2145 	else
2146 		hci_dev_clear_flag(hdev, HCI_MESH);
2147 
2148 	len -= sizeof(*cp);
2149 
2150 	/* If filters don't fit, forward all adv pkts */
2151 	if (len <= sizeof(hdev->mesh_ad_types))
2152 		memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2153 
2154 	hci_update_passive_scan_sync(hdev);
2155 	return 0;
2156 }
2157 
2158 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2159 {
2160 	struct mgmt_cp_set_mesh *cp = data;
2161 	struct mgmt_pending_cmd *cmd;
2162 	int err = 0;
2163 
2164 	bt_dev_dbg(hdev, "sock %p", sk);
2165 
2166 	if (!lmp_le_capable(hdev) ||
2167 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2168 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2169 				       MGMT_STATUS_NOT_SUPPORTED);
2170 
2171 	if (cp->enable != 0x00 && cp->enable != 0x01)
2172 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2173 				       MGMT_STATUS_INVALID_PARAMS);
2174 
2175 	hci_dev_lock(hdev);
2176 
2177 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2178 	if (!cmd)
2179 		err = -ENOMEM;
2180 	else
2181 		err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2182 					 set_mesh_complete);
2183 
2184 	if (err < 0) {
2185 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2186 				      MGMT_STATUS_FAILED);
2187 
2188 		if (cmd)
2189 			mgmt_pending_remove(cmd);
2190 	}
2191 
2192 	hci_dev_unlock(hdev);
2193 	return err;
2194 }
2195 
2196 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2197 {
2198 	struct mgmt_mesh_tx *mesh_tx = data;
2199 	struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2200 	unsigned long mesh_send_interval;
2201 	u8 mgmt_err = mgmt_status(err);
2202 
2203 	/* Report any errors here, but don't report completion */
2204 
2205 	if (mgmt_err) {
2206 		hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2207 		/* Send Complete Error Code for handle */
2208 		mesh_send_complete(hdev, mesh_tx, false);
2209 		return;
2210 	}
2211 
2212 	mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2213 	queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2214 			   mesh_send_interval);
2215 }
2216 
2217 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2218 {
2219 	struct mgmt_mesh_tx *mesh_tx = data;
2220 	struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2221 	struct adv_info *adv, *next_instance;
2222 	u8 instance = hdev->le_num_of_adv_sets + 1;
2223 	u16 timeout, duration;
2224 	int err = 0;
2225 
2226 	if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2227 		return MGMT_STATUS_BUSY;
2228 
2229 	timeout = 1000;
2230 	duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2231 	adv = hci_add_adv_instance(hdev, instance, 0,
2232 				   send->adv_data_len, send->adv_data,
2233 				   0, NULL,
2234 				   timeout, duration,
2235 				   HCI_ADV_TX_POWER_NO_PREFERENCE,
2236 				   hdev->le_adv_min_interval,
2237 				   hdev->le_adv_max_interval,
2238 				   mesh_tx->handle);
2239 
2240 	if (!IS_ERR(adv))
2241 		mesh_tx->instance = instance;
2242 	else
2243 		err = PTR_ERR(adv);
2244 
2245 	if (hdev->cur_adv_instance == instance) {
2246 		/* If the currently advertised instance is being changed then
2247 		 * cancel the current advertising and schedule the next
2248 		 * instance. If there is only one instance then the overridden
2249 		 * advertising data will be visible right away.
2250 		 */
2251 		cancel_adv_timeout(hdev);
2252 
2253 		next_instance = hci_get_next_instance(hdev, instance);
2254 		if (next_instance)
2255 			instance = next_instance->instance;
2256 		else
2257 			instance = 0;
2258 	} else if (hdev->adv_instance_timeout) {
2259 		/* Immediately advertise the new instance if no other, or
2260 		 * let it go naturally from queue if ADV is already happening
2261 		 */
2262 		instance = 0;
2263 	}
2264 
2265 	if (instance)
2266 		return hci_schedule_adv_instance_sync(hdev, instance, true);
2267 
2268 	return err;
2269 }
2270 
2271 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2272 {
2273 	struct mgmt_rp_mesh_read_features *rp = data;
2274 
2275 	if (rp->used_handles >= rp->max_handles)
2276 		return;
2277 
2278 	rp->handles[rp->used_handles++] = mesh_tx->handle;
2279 }
2280 
2281 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2282 			 void *data, u16 len)
2283 {
2284 	struct mgmt_rp_mesh_read_features rp;
2285 
2286 	if (!lmp_le_capable(hdev) ||
2287 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2288 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2289 				       MGMT_STATUS_NOT_SUPPORTED);
2290 
2291 	memset(&rp, 0, sizeof(rp));
2292 	rp.index = cpu_to_le16(hdev->id);
2293 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2294 		rp.max_handles = MESH_HANDLES_MAX;
2295 
2296 	hci_dev_lock(hdev);
2297 
2298 	if (rp.max_handles)
2299 		mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2300 
2301 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2302 			  rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2303 
2304 	hci_dev_unlock(hdev);
2305 	return 0;
2306 }
2307 
2308 static int send_cancel(struct hci_dev *hdev, void *data)
2309 {
2310 	struct mgmt_pending_cmd *cmd = data;
2311 	struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2312 	struct mgmt_mesh_tx *mesh_tx;
2313 
2314 	if (!cancel->handle) {
2315 		do {
2316 			mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2317 
2318 			if (mesh_tx)
2319 				mesh_send_complete(hdev, mesh_tx, false);
2320 		} while (mesh_tx);
2321 	} else {
2322 		mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2323 
2324 		if (mesh_tx && mesh_tx->sk == cmd->sk)
2325 			mesh_send_complete(hdev, mesh_tx, false);
2326 	}
2327 
2328 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2329 			  0, NULL, 0);
2330 	mgmt_pending_free(cmd);
2331 
2332 	return 0;
2333 }
2334 
2335 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2336 			    void *data, u16 len)
2337 {
2338 	struct mgmt_pending_cmd *cmd;
2339 	int err;
2340 
2341 	if (!lmp_le_capable(hdev) ||
2342 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2343 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2344 				       MGMT_STATUS_NOT_SUPPORTED);
2345 
2346 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2347 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2348 				       MGMT_STATUS_REJECTED);
2349 
2350 	hci_dev_lock(hdev);
2351 	cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2352 	if (!cmd)
2353 		err = -ENOMEM;
2354 	else
2355 		err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2356 
2357 	if (err < 0) {
2358 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2359 				      MGMT_STATUS_FAILED);
2360 
2361 		if (cmd)
2362 			mgmt_pending_free(cmd);
2363 	}
2364 
2365 	hci_dev_unlock(hdev);
2366 	return err;
2367 }
2368 
2369 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2370 {
2371 	struct mgmt_mesh_tx *mesh_tx;
2372 	struct mgmt_cp_mesh_send *send = data;
2373 	struct mgmt_rp_mesh_read_features rp;
2374 	bool sending;
2375 	int err = 0;
2376 
2377 	if (!lmp_le_capable(hdev) ||
2378 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2379 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2380 				       MGMT_STATUS_NOT_SUPPORTED);
2381 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2382 	    len <= MGMT_MESH_SEND_SIZE ||
2383 	    len > (MGMT_MESH_SEND_SIZE + 31))
2384 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2385 				       MGMT_STATUS_REJECTED);
2386 
2387 	hci_dev_lock(hdev);
2388 
2389 	memset(&rp, 0, sizeof(rp));
2390 	rp.max_handles = MESH_HANDLES_MAX;
2391 
2392 	mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2393 
2394 	if (rp.max_handles <= rp.used_handles) {
2395 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2396 				      MGMT_STATUS_BUSY);
2397 		goto done;
2398 	}
2399 
2400 	sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2401 	mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2402 
2403 	if (!mesh_tx)
2404 		err = -ENOMEM;
2405 	else if (!sending)
2406 		err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2407 					 mesh_send_start_complete);
2408 
2409 	if (err < 0) {
2410 		bt_dev_err(hdev, "Send Mesh Failed %d", err);
2411 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2412 				      MGMT_STATUS_FAILED);
2413 
2414 		if (mesh_tx) {
2415 			if (sending)
2416 				mgmt_mesh_remove(mesh_tx);
2417 		}
2418 	} else {
2419 		hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2420 
2421 		mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2422 				  &mesh_tx->handle, 1);
2423 	}
2424 
2425 done:
2426 	hci_dev_unlock(hdev);
2427 	return err;
2428 }
2429 
2430 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2431 {
2432 	struct mgmt_mode *cp = data;
2433 	struct mgmt_pending_cmd *cmd;
2434 	int err;
2435 	u8 val, enabled;
2436 
2437 	bt_dev_dbg(hdev, "sock %p", sk);
2438 
2439 	if (!lmp_le_capable(hdev))
2440 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2441 				       MGMT_STATUS_NOT_SUPPORTED);
2442 
2443 	if (cp->val != 0x00 && cp->val != 0x01)
2444 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2445 				       MGMT_STATUS_INVALID_PARAMS);
2446 
2447 	/* Bluetooth single mode LE only controllers or dual-mode
2448 	 * controllers configured as LE only devices, do not allow
2449 	 * switching LE off. These have either LE enabled explicitly
2450 	 * or BR/EDR has been previously switched off.
2451 	 *
2452 	 * When trying to enable an already enabled LE, then gracefully
2453 	 * send a positive response. Trying to disable it however will
2454 	 * result into rejection.
2455 	 */
2456 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2457 		if (cp->val == 0x01)
2458 			return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2459 
2460 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2461 				       MGMT_STATUS_REJECTED);
2462 	}
2463 
2464 	hci_dev_lock(hdev);
2465 
2466 	val = !!cp->val;
2467 	enabled = lmp_host_le_capable(hdev);
2468 
2469 	if (!hdev_is_powered(hdev) || val == enabled) {
2470 		bool changed = false;
2471 
2472 		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2473 			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2474 			changed = true;
2475 		}
2476 
2477 		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2478 			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2479 			changed = true;
2480 		}
2481 
2482 		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2483 		if (err < 0)
2484 			goto unlock;
2485 
2486 		if (changed)
2487 			err = new_settings(hdev, sk);
2488 
2489 		goto unlock;
2490 	}
2491 
2492 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
2493 	    pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2494 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2495 				      MGMT_STATUS_BUSY);
2496 		goto unlock;
2497 	}
2498 
2499 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2500 	if (!cmd)
2501 		err = -ENOMEM;
2502 	else
2503 		err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2504 					 set_le_complete);
2505 
2506 	if (err < 0) {
2507 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2508 				      MGMT_STATUS_FAILED);
2509 
2510 		if (cmd)
2511 			mgmt_pending_remove(cmd);
2512 	}
2513 
2514 unlock:
2515 	hci_dev_unlock(hdev);
2516 	return err;
2517 }
2518 
2519 /* This is a helper function to test for pending mgmt commands that can
2520  * cause CoD or EIR HCI commands. We can only allow one such pending
2521  * mgmt command at a time since otherwise we cannot easily track what
2522  * the current values are, will be, and based on that calculate if a new
2523  * HCI command needs to be sent and if yes with what value.
2524  */
2525 static bool pending_eir_or_class(struct hci_dev *hdev)
2526 {
2527 	struct mgmt_pending_cmd *cmd;
2528 
2529 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2530 		switch (cmd->opcode) {
2531 		case MGMT_OP_ADD_UUID:
2532 		case MGMT_OP_REMOVE_UUID:
2533 		case MGMT_OP_SET_DEV_CLASS:
2534 		case MGMT_OP_SET_POWERED:
2535 			return true;
2536 		}
2537 	}
2538 
2539 	return false;
2540 }
2541 
2542 static const u8 bluetooth_base_uuid[] = {
2543 			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2544 			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2545 };
2546 
2547 static u8 get_uuid_size(const u8 *uuid)
2548 {
2549 	u32 val;
2550 
2551 	if (memcmp(uuid, bluetooth_base_uuid, 12))
2552 		return 128;
2553 
2554 	val = get_unaligned_le32(&uuid[12]);
2555 	if (val > 0xffff)
2556 		return 32;
2557 
2558 	return 16;
2559 }
2560 
2561 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2562 {
2563 	struct mgmt_pending_cmd *cmd = data;
2564 
2565 	bt_dev_dbg(hdev, "err %d", err);
2566 
2567 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2568 			  mgmt_status(err), hdev->dev_class, 3);
2569 
2570 	mgmt_pending_free(cmd);
2571 }
2572 
2573 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2574 {
2575 	int err;
2576 
2577 	err = hci_update_class_sync(hdev);
2578 	if (err)
2579 		return err;
2580 
2581 	return hci_update_eir_sync(hdev);
2582 }
2583 
2584 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2585 {
2586 	struct mgmt_cp_add_uuid *cp = data;
2587 	struct mgmt_pending_cmd *cmd;
2588 	struct bt_uuid *uuid;
2589 	int err;
2590 
2591 	bt_dev_dbg(hdev, "sock %p", sk);
2592 
2593 	hci_dev_lock(hdev);
2594 
2595 	if (pending_eir_or_class(hdev)) {
2596 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2597 				      MGMT_STATUS_BUSY);
2598 		goto failed;
2599 	}
2600 
2601 	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2602 	if (!uuid) {
2603 		err = -ENOMEM;
2604 		goto failed;
2605 	}
2606 
2607 	memcpy(uuid->uuid, cp->uuid, 16);
2608 	uuid->svc_hint = cp->svc_hint;
2609 	uuid->size = get_uuid_size(cp->uuid);
2610 
2611 	list_add_tail(&uuid->list, &hdev->uuids);
2612 
2613 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2614 	if (!cmd) {
2615 		err = -ENOMEM;
2616 		goto failed;
2617 	}
2618 
2619 	/* MGMT_OP_ADD_UUID don't require adapter the UP/Running so use
2620 	 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2621 	 */
2622 	err = hci_cmd_sync_submit(hdev, add_uuid_sync, cmd,
2623 				  mgmt_class_complete);
2624 	if (err < 0) {
2625 		mgmt_pending_free(cmd);
2626 		goto failed;
2627 	}
2628 
2629 failed:
2630 	hci_dev_unlock(hdev);
2631 	return err;
2632 }
2633 
2634 static bool enable_service_cache(struct hci_dev *hdev)
2635 {
2636 	if (!hdev_is_powered(hdev))
2637 		return false;
2638 
2639 	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2640 		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2641 				   CACHE_TIMEOUT);
2642 		return true;
2643 	}
2644 
2645 	return false;
2646 }
2647 
2648 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2649 {
2650 	int err;
2651 
2652 	err = hci_update_class_sync(hdev);
2653 	if (err)
2654 		return err;
2655 
2656 	return hci_update_eir_sync(hdev);
2657 }
2658 
2659 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2660 		       u16 len)
2661 {
2662 	struct mgmt_cp_remove_uuid *cp = data;
2663 	struct mgmt_pending_cmd *cmd;
2664 	struct bt_uuid *match, *tmp;
2665 	static const u8 bt_uuid_any[] = {
2666 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2667 	};
2668 	int err, found;
2669 
2670 	bt_dev_dbg(hdev, "sock %p", sk);
2671 
2672 	hci_dev_lock(hdev);
2673 
2674 	if (pending_eir_or_class(hdev)) {
2675 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2676 				      MGMT_STATUS_BUSY);
2677 		goto unlock;
2678 	}
2679 
2680 	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2681 		hci_uuids_clear(hdev);
2682 
2683 		if (enable_service_cache(hdev)) {
2684 			err = mgmt_cmd_complete(sk, hdev->id,
2685 						MGMT_OP_REMOVE_UUID,
2686 						0, hdev->dev_class, 3);
2687 			goto unlock;
2688 		}
2689 
2690 		goto update_class;
2691 	}
2692 
2693 	found = 0;
2694 
2695 	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2696 		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2697 			continue;
2698 
2699 		list_del(&match->list);
2700 		kfree(match);
2701 		found++;
2702 	}
2703 
2704 	if (found == 0) {
2705 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2706 				      MGMT_STATUS_INVALID_PARAMS);
2707 		goto unlock;
2708 	}
2709 
2710 update_class:
2711 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2712 	if (!cmd) {
2713 		err = -ENOMEM;
2714 		goto unlock;
2715 	}
2716 
2717 	/* MGMT_OP_REMOVE_UUID don't require adapter the UP/Running so use
2718 	 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2719 	 */
2720 	err = hci_cmd_sync_submit(hdev, remove_uuid_sync, cmd,
2721 				  mgmt_class_complete);
2722 	if (err < 0)
2723 		mgmt_pending_free(cmd);
2724 
2725 unlock:
2726 	hci_dev_unlock(hdev);
2727 	return err;
2728 }
2729 
2730 static int set_class_sync(struct hci_dev *hdev, void *data)
2731 {
2732 	int err = 0;
2733 
2734 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2735 		cancel_delayed_work_sync(&hdev->service_cache);
2736 		err = hci_update_eir_sync(hdev);
2737 	}
2738 
2739 	if (err)
2740 		return err;
2741 
2742 	return hci_update_class_sync(hdev);
2743 }
2744 
2745 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2746 			 u16 len)
2747 {
2748 	struct mgmt_cp_set_dev_class *cp = data;
2749 	struct mgmt_pending_cmd *cmd;
2750 	int err;
2751 
2752 	bt_dev_dbg(hdev, "sock %p", sk);
2753 
2754 	if (!lmp_bredr_capable(hdev))
2755 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2756 				       MGMT_STATUS_NOT_SUPPORTED);
2757 
2758 	hci_dev_lock(hdev);
2759 
2760 	if (pending_eir_or_class(hdev)) {
2761 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2762 				      MGMT_STATUS_BUSY);
2763 		goto unlock;
2764 	}
2765 
2766 	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2767 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2768 				      MGMT_STATUS_INVALID_PARAMS);
2769 		goto unlock;
2770 	}
2771 
2772 	hdev->major_class = cp->major;
2773 	hdev->minor_class = cp->minor;
2774 
2775 	if (!hdev_is_powered(hdev)) {
2776 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2777 					hdev->dev_class, 3);
2778 		goto unlock;
2779 	}
2780 
2781 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2782 	if (!cmd) {
2783 		err = -ENOMEM;
2784 		goto unlock;
2785 	}
2786 
2787 	/* MGMT_OP_SET_DEV_CLASS don't require adapter the UP/Running so use
2788 	 * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
2789 	 */
2790 	err = hci_cmd_sync_submit(hdev, set_class_sync, cmd,
2791 				  mgmt_class_complete);
2792 	if (err < 0)
2793 		mgmt_pending_free(cmd);
2794 
2795 unlock:
2796 	hci_dev_unlock(hdev);
2797 	return err;
2798 }
2799 
2800 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2801 			  u16 len)
2802 {
2803 	struct mgmt_cp_load_link_keys *cp = data;
2804 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2805 				   sizeof(struct mgmt_link_key_info));
2806 	u16 key_count, expected_len;
2807 	bool changed;
2808 	int i;
2809 
2810 	bt_dev_dbg(hdev, "sock %p", sk);
2811 
2812 	if (!lmp_bredr_capable(hdev))
2813 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2814 				       MGMT_STATUS_NOT_SUPPORTED);
2815 
2816 	key_count = __le16_to_cpu(cp->key_count);
2817 	if (key_count > max_key_count) {
2818 		bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2819 			   key_count);
2820 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2821 				       MGMT_STATUS_INVALID_PARAMS);
2822 	}
2823 
2824 	expected_len = struct_size(cp, keys, key_count);
2825 	if (expected_len != len) {
2826 		bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2827 			   expected_len, len);
2828 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2829 				       MGMT_STATUS_INVALID_PARAMS);
2830 	}
2831 
2832 	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2833 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2834 				       MGMT_STATUS_INVALID_PARAMS);
2835 
2836 	bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2837 		   key_count);
2838 
2839 	hci_dev_lock(hdev);
2840 
2841 	hci_link_keys_clear(hdev);
2842 
2843 	if (cp->debug_keys)
2844 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2845 	else
2846 		changed = hci_dev_test_and_clear_flag(hdev,
2847 						      HCI_KEEP_DEBUG_KEYS);
2848 
2849 	if (changed)
2850 		new_settings(hdev, NULL);
2851 
2852 	for (i = 0; i < key_count; i++) {
2853 		struct mgmt_link_key_info *key = &cp->keys[i];
2854 
2855 		if (hci_is_blocked_key(hdev,
2856 				       HCI_BLOCKED_KEY_TYPE_LINKKEY,
2857 				       key->val)) {
2858 			bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2859 				    &key->addr.bdaddr);
2860 			continue;
2861 		}
2862 
2863 		if (key->addr.type != BDADDR_BREDR) {
2864 			bt_dev_warn(hdev,
2865 				    "Invalid link address type %u for %pMR",
2866 				    key->addr.type, &key->addr.bdaddr);
2867 			continue;
2868 		}
2869 
2870 		if (key->type > 0x08) {
2871 			bt_dev_warn(hdev, "Invalid link key type %u for %pMR",
2872 				    key->type, &key->addr.bdaddr);
2873 			continue;
2874 		}
2875 
2876 		/* Always ignore debug keys and require a new pairing if
2877 		 * the user wants to use them.
2878 		 */
2879 		if (key->type == HCI_LK_DEBUG_COMBINATION)
2880 			continue;
2881 
2882 		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2883 				 key->type, key->pin_len, NULL);
2884 	}
2885 
2886 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2887 
2888 	hci_dev_unlock(hdev);
2889 
2890 	return 0;
2891 }
2892 
2893 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2894 			   u8 addr_type, struct sock *skip_sk)
2895 {
2896 	struct mgmt_ev_device_unpaired ev;
2897 
2898 	bacpy(&ev.addr.bdaddr, bdaddr);
2899 	ev.addr.type = addr_type;
2900 
2901 	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2902 			  skip_sk);
2903 }
2904 
2905 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2906 {
2907 	struct mgmt_pending_cmd *cmd = data;
2908 	struct mgmt_cp_unpair_device *cp = cmd->param;
2909 
2910 	if (!err)
2911 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2912 
2913 	cmd->cmd_complete(cmd, err);
2914 	mgmt_pending_free(cmd);
2915 }
2916 
2917 static int unpair_device_sync(struct hci_dev *hdev, void *data)
2918 {
2919 	struct mgmt_pending_cmd *cmd = data;
2920 	struct mgmt_cp_unpair_device *cp = cmd->param;
2921 	struct hci_conn *conn;
2922 
2923 	if (cp->addr.type == BDADDR_BREDR)
2924 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2925 					       &cp->addr.bdaddr);
2926 	else
2927 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2928 					       le_addr_type(cp->addr.type));
2929 
2930 	if (!conn)
2931 		return 0;
2932 
2933 	/* Disregard any possible error since the likes of hci_abort_conn_sync
2934 	 * will clean up the connection no matter the error.
2935 	 */
2936 	hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2937 
2938 	return 0;
2939 }
2940 
2941 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2942 			 u16 len)
2943 {
2944 	struct mgmt_cp_unpair_device *cp = data;
2945 	struct mgmt_rp_unpair_device rp;
2946 	struct hci_conn_params *params;
2947 	struct mgmt_pending_cmd *cmd;
2948 	struct hci_conn *conn;
2949 	u8 addr_type;
2950 	int err;
2951 
2952 	memset(&rp, 0, sizeof(rp));
2953 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2954 	rp.addr.type = cp->addr.type;
2955 
2956 	if (!bdaddr_type_is_valid(cp->addr.type))
2957 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2958 					 MGMT_STATUS_INVALID_PARAMS,
2959 					 &rp, sizeof(rp));
2960 
2961 	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2962 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2963 					 MGMT_STATUS_INVALID_PARAMS,
2964 					 &rp, sizeof(rp));
2965 
2966 	hci_dev_lock(hdev);
2967 
2968 	if (!hdev_is_powered(hdev)) {
2969 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2970 					MGMT_STATUS_NOT_POWERED, &rp,
2971 					sizeof(rp));
2972 		goto unlock;
2973 	}
2974 
2975 	if (cp->addr.type == BDADDR_BREDR) {
2976 		/* If disconnection is requested, then look up the
2977 		 * connection. If the remote device is connected, it
2978 		 * will be later used to terminate the link.
2979 		 *
2980 		 * Setting it to NULL explicitly will cause no
2981 		 * termination of the link.
2982 		 */
2983 		if (cp->disconnect)
2984 			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2985 						       &cp->addr.bdaddr);
2986 		else
2987 			conn = NULL;
2988 
2989 		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2990 		if (err < 0) {
2991 			err = mgmt_cmd_complete(sk, hdev->id,
2992 						MGMT_OP_UNPAIR_DEVICE,
2993 						MGMT_STATUS_NOT_PAIRED, &rp,
2994 						sizeof(rp));
2995 			goto unlock;
2996 		}
2997 
2998 		goto done;
2999 	}
3000 
3001 	/* LE address type */
3002 	addr_type = le_addr_type(cp->addr.type);
3003 
3004 	/* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3005 	err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
3006 	if (err < 0) {
3007 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3008 					MGMT_STATUS_NOT_PAIRED, &rp,
3009 					sizeof(rp));
3010 		goto unlock;
3011 	}
3012 
3013 	conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3014 	if (!conn) {
3015 		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3016 		goto done;
3017 	}
3018 
3019 
3020 	/* Defer clearing up the connection parameters until closing to
3021 	 * give a chance of keeping them if a repairing happens.
3022 	 */
3023 	set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3024 
3025 	/* Disable auto-connection parameters if present */
3026 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3027 	if (params) {
3028 		if (params->explicit_connect)
3029 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3030 		else
3031 			params->auto_connect = HCI_AUTO_CONN_DISABLED;
3032 	}
3033 
3034 	/* If disconnection is not requested, then clear the connection
3035 	 * variable so that the link is not terminated.
3036 	 */
3037 	if (!cp->disconnect)
3038 		conn = NULL;
3039 
3040 done:
3041 	/* If the connection variable is set, then termination of the
3042 	 * link is requested.
3043 	 */
3044 	if (!conn) {
3045 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3046 					&rp, sizeof(rp));
3047 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3048 		goto unlock;
3049 	}
3050 
3051 	cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3052 			       sizeof(*cp));
3053 	if (!cmd) {
3054 		err = -ENOMEM;
3055 		goto unlock;
3056 	}
3057 
3058 	cmd->cmd_complete = addr_cmd_complete;
3059 
3060 	err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3061 				 unpair_device_complete);
3062 	if (err < 0)
3063 		mgmt_pending_free(cmd);
3064 
3065 unlock:
3066 	hci_dev_unlock(hdev);
3067 	return err;
3068 }
3069 
3070 static void disconnect_complete(struct hci_dev *hdev, void *data, int err)
3071 {
3072 	struct mgmt_pending_cmd *cmd = data;
3073 
3074 	cmd->cmd_complete(cmd, mgmt_status(err));
3075 	mgmt_pending_free(cmd);
3076 }
3077 
3078 static int disconnect_sync(struct hci_dev *hdev, void *data)
3079 {
3080 	struct mgmt_pending_cmd *cmd = data;
3081 	struct mgmt_cp_disconnect *cp = cmd->param;
3082 	struct hci_conn *conn;
3083 
3084 	if (cp->addr.type == BDADDR_BREDR)
3085 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3086 					       &cp->addr.bdaddr);
3087 	else
3088 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3089 					       le_addr_type(cp->addr.type));
3090 
3091 	if (!conn)
3092 		return -ENOTCONN;
3093 
3094 	/* Disregard any possible error since the likes of hci_abort_conn_sync
3095 	 * will clean up the connection no matter the error.
3096 	 */
3097 	hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3098 
3099 	return 0;
3100 }
3101 
3102 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3103 		      u16 len)
3104 {
3105 	struct mgmt_cp_disconnect *cp = data;
3106 	struct mgmt_rp_disconnect rp;
3107 	struct mgmt_pending_cmd *cmd;
3108 	int err;
3109 
3110 	bt_dev_dbg(hdev, "sock %p", sk);
3111 
3112 	memset(&rp, 0, sizeof(rp));
3113 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3114 	rp.addr.type = cp->addr.type;
3115 
3116 	if (!bdaddr_type_is_valid(cp->addr.type))
3117 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3118 					 MGMT_STATUS_INVALID_PARAMS,
3119 					 &rp, sizeof(rp));
3120 
3121 	hci_dev_lock(hdev);
3122 
3123 	if (!test_bit(HCI_UP, &hdev->flags)) {
3124 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3125 					MGMT_STATUS_NOT_POWERED, &rp,
3126 					sizeof(rp));
3127 		goto failed;
3128 	}
3129 
3130 	cmd = mgmt_pending_new(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3131 	if (!cmd) {
3132 		err = -ENOMEM;
3133 		goto failed;
3134 	}
3135 
3136 	cmd->cmd_complete = generic_cmd_complete;
3137 
3138 	err = hci_cmd_sync_queue(hdev, disconnect_sync, cmd,
3139 				 disconnect_complete);
3140 	if (err < 0)
3141 		mgmt_pending_free(cmd);
3142 
3143 failed:
3144 	hci_dev_unlock(hdev);
3145 	return err;
3146 }
3147 
3148 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3149 {
3150 	switch (link_type) {
3151 	case ISO_LINK:
3152 	case LE_LINK:
3153 		switch (addr_type) {
3154 		case ADDR_LE_DEV_PUBLIC:
3155 			return BDADDR_LE_PUBLIC;
3156 
3157 		default:
3158 			/* Fallback to LE Random address type */
3159 			return BDADDR_LE_RANDOM;
3160 		}
3161 
3162 	default:
3163 		/* Fallback to BR/EDR type */
3164 		return BDADDR_BREDR;
3165 	}
3166 }
3167 
3168 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3169 			   u16 data_len)
3170 {
3171 	struct mgmt_rp_get_connections *rp;
3172 	struct hci_conn *c;
3173 	int err;
3174 	u16 i;
3175 
3176 	bt_dev_dbg(hdev, "sock %p", sk);
3177 
3178 	hci_dev_lock(hdev);
3179 
3180 	if (!hdev_is_powered(hdev)) {
3181 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3182 				      MGMT_STATUS_NOT_POWERED);
3183 		goto unlock;
3184 	}
3185 
3186 	i = 0;
3187 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
3188 		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3189 			i++;
3190 	}
3191 
3192 	rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3193 	if (!rp) {
3194 		err = -ENOMEM;
3195 		goto unlock;
3196 	}
3197 
3198 	i = 0;
3199 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
3200 		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3201 			continue;
3202 		bacpy(&rp->addr[i].bdaddr, &c->dst);
3203 		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3204 		if (c->type == SCO_LINK || c->type == ESCO_LINK)
3205 			continue;
3206 		i++;
3207 	}
3208 
3209 	rp->conn_count = cpu_to_le16(i);
3210 
3211 	/* Recalculate length in case of filtered SCO connections, etc */
3212 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3213 				struct_size(rp, addr, i));
3214 
3215 	kfree(rp);
3216 
3217 unlock:
3218 	hci_dev_unlock(hdev);
3219 	return err;
3220 }
3221 
3222 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3223 				   struct mgmt_cp_pin_code_neg_reply *cp)
3224 {
3225 	struct mgmt_pending_cmd *cmd;
3226 	int err;
3227 
3228 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3229 			       sizeof(*cp));
3230 	if (!cmd)
3231 		return -ENOMEM;
3232 
3233 	cmd->cmd_complete = addr_cmd_complete;
3234 
3235 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3236 			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3237 	if (err < 0)
3238 		mgmt_pending_remove(cmd);
3239 
3240 	return err;
3241 }
3242 
3243 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3244 			  u16 len)
3245 {
3246 	struct hci_conn *conn;
3247 	struct mgmt_cp_pin_code_reply *cp = data;
3248 	struct hci_cp_pin_code_reply reply;
3249 	struct mgmt_pending_cmd *cmd;
3250 	int err;
3251 
3252 	bt_dev_dbg(hdev, "sock %p", sk);
3253 
3254 	hci_dev_lock(hdev);
3255 
3256 	if (!hdev_is_powered(hdev)) {
3257 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3258 				      MGMT_STATUS_NOT_POWERED);
3259 		goto failed;
3260 	}
3261 
3262 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3263 	if (!conn) {
3264 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3265 				      MGMT_STATUS_NOT_CONNECTED);
3266 		goto failed;
3267 	}
3268 
3269 	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3270 		struct mgmt_cp_pin_code_neg_reply ncp;
3271 
3272 		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3273 
3274 		bt_dev_err(hdev, "PIN code is not 16 bytes long");
3275 
3276 		err = send_pin_code_neg_reply(sk, hdev, &ncp);
3277 		if (err >= 0)
3278 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3279 					      MGMT_STATUS_INVALID_PARAMS);
3280 
3281 		goto failed;
3282 	}
3283 
3284 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3285 	if (!cmd) {
3286 		err = -ENOMEM;
3287 		goto failed;
3288 	}
3289 
3290 	cmd->cmd_complete = addr_cmd_complete;
3291 
3292 	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3293 	reply.pin_len = cp->pin_len;
3294 	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3295 
3296 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3297 	if (err < 0)
3298 		mgmt_pending_remove(cmd);
3299 
3300 failed:
3301 	hci_dev_unlock(hdev);
3302 	return err;
3303 }
3304 
3305 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3306 			     u16 len)
3307 {
3308 	struct mgmt_cp_set_io_capability *cp = data;
3309 
3310 	bt_dev_dbg(hdev, "sock %p", sk);
3311 
3312 	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3313 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3314 				       MGMT_STATUS_INVALID_PARAMS);
3315 
3316 	hci_dev_lock(hdev);
3317 
3318 	hdev->io_capability = cp->io_capability;
3319 
3320 	bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3321 
3322 	hci_dev_unlock(hdev);
3323 
3324 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3325 				 NULL, 0);
3326 }
3327 
3328 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3329 {
3330 	struct hci_dev *hdev = conn->hdev;
3331 	struct mgmt_pending_cmd *cmd;
3332 
3333 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3334 		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3335 			continue;
3336 
3337 		if (cmd->user_data != conn)
3338 			continue;
3339 
3340 		return cmd;
3341 	}
3342 
3343 	return NULL;
3344 }
3345 
3346 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3347 {
3348 	struct mgmt_rp_pair_device rp;
3349 	struct hci_conn *conn = cmd->user_data;
3350 	int err;
3351 
3352 	bacpy(&rp.addr.bdaddr, &conn->dst);
3353 	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3354 
3355 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3356 				status, &rp, sizeof(rp));
3357 
3358 	/* So we don't get further callbacks for this connection */
3359 	conn->connect_cfm_cb = NULL;
3360 	conn->security_cfm_cb = NULL;
3361 	conn->disconn_cfm_cb = NULL;
3362 
3363 	hci_conn_drop(conn);
3364 
3365 	/* The device is paired so there is no need to remove
3366 	 * its connection parameters anymore.
3367 	 */
3368 	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3369 
3370 	hci_conn_put(conn);
3371 
3372 	return err;
3373 }
3374 
3375 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3376 {
3377 	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3378 	struct mgmt_pending_cmd *cmd;
3379 
3380 	cmd = find_pairing(conn);
3381 	if (cmd) {
3382 		cmd->cmd_complete(cmd, status);
3383 		mgmt_pending_remove(cmd);
3384 	}
3385 }
3386 
3387 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3388 {
3389 	struct mgmt_pending_cmd *cmd;
3390 
3391 	BT_DBG("status %u", status);
3392 
3393 	cmd = find_pairing(conn);
3394 	if (!cmd) {
3395 		BT_DBG("Unable to find a pending command");
3396 		return;
3397 	}
3398 
3399 	cmd->cmd_complete(cmd, mgmt_status(status));
3400 	mgmt_pending_remove(cmd);
3401 }
3402 
3403 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3404 {
3405 	struct mgmt_pending_cmd *cmd;
3406 
3407 	BT_DBG("status %u", status);
3408 
3409 	if (!status)
3410 		return;
3411 
3412 	cmd = find_pairing(conn);
3413 	if (!cmd) {
3414 		BT_DBG("Unable to find a pending command");
3415 		return;
3416 	}
3417 
3418 	cmd->cmd_complete(cmd, mgmt_status(status));
3419 	mgmt_pending_remove(cmd);
3420 }
3421 
3422 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3423 		       u16 len)
3424 {
3425 	struct mgmt_cp_pair_device *cp = data;
3426 	struct mgmt_rp_pair_device rp;
3427 	struct mgmt_pending_cmd *cmd;
3428 	u8 sec_level, auth_type;
3429 	struct hci_conn *conn;
3430 	int err;
3431 
3432 	bt_dev_dbg(hdev, "sock %p", sk);
3433 
3434 	memset(&rp, 0, sizeof(rp));
3435 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3436 	rp.addr.type = cp->addr.type;
3437 
3438 	if (!bdaddr_type_is_valid(cp->addr.type))
3439 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3440 					 MGMT_STATUS_INVALID_PARAMS,
3441 					 &rp, sizeof(rp));
3442 
3443 	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3444 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3445 					 MGMT_STATUS_INVALID_PARAMS,
3446 					 &rp, sizeof(rp));
3447 
3448 	hci_dev_lock(hdev);
3449 
3450 	if (!hdev_is_powered(hdev)) {
3451 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3452 					MGMT_STATUS_NOT_POWERED, &rp,
3453 					sizeof(rp));
3454 		goto unlock;
3455 	}
3456 
3457 	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3458 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3459 					MGMT_STATUS_ALREADY_PAIRED, &rp,
3460 					sizeof(rp));
3461 		goto unlock;
3462 	}
3463 
3464 	sec_level = BT_SECURITY_MEDIUM;
3465 	auth_type = HCI_AT_DEDICATED_BONDING;
3466 
3467 	if (cp->addr.type == BDADDR_BREDR) {
3468 		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3469 				       auth_type, CONN_REASON_PAIR_DEVICE);
3470 	} else {
3471 		u8 addr_type = le_addr_type(cp->addr.type);
3472 		struct hci_conn_params *p;
3473 
3474 		/* When pairing a new device, it is expected to remember
3475 		 * this device for future connections. Adding the connection
3476 		 * parameter information ahead of time allows tracking
3477 		 * of the peripheral preferred values and will speed up any
3478 		 * further connection establishment.
3479 		 *
3480 		 * If connection parameters already exist, then they
3481 		 * will be kept and this function does nothing.
3482 		 */
3483 		p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3484 		if (!p) {
3485 			err = -EIO;
3486 			goto unlock;
3487 		}
3488 
3489 		if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3490 			p->auto_connect = HCI_AUTO_CONN_DISABLED;
3491 
3492 		conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3493 					   sec_level, HCI_LE_CONN_TIMEOUT,
3494 					   CONN_REASON_PAIR_DEVICE);
3495 	}
3496 
3497 	if (IS_ERR(conn)) {
3498 		int status;
3499 
3500 		if (PTR_ERR(conn) == -EBUSY)
3501 			status = MGMT_STATUS_BUSY;
3502 		else if (PTR_ERR(conn) == -EOPNOTSUPP)
3503 			status = MGMT_STATUS_NOT_SUPPORTED;
3504 		else if (PTR_ERR(conn) == -ECONNREFUSED)
3505 			status = MGMT_STATUS_REJECTED;
3506 		else
3507 			status = MGMT_STATUS_CONNECT_FAILED;
3508 
3509 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3510 					status, &rp, sizeof(rp));
3511 		goto unlock;
3512 	}
3513 
3514 	if (conn->connect_cfm_cb) {
3515 		hci_conn_drop(conn);
3516 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3517 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
3518 		goto unlock;
3519 	}
3520 
3521 	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3522 	if (!cmd) {
3523 		err = -ENOMEM;
3524 		hci_conn_drop(conn);
3525 		goto unlock;
3526 	}
3527 
3528 	cmd->cmd_complete = pairing_complete;
3529 
3530 	/* For LE, just connecting isn't a proof that the pairing finished */
3531 	if (cp->addr.type == BDADDR_BREDR) {
3532 		conn->connect_cfm_cb = pairing_complete_cb;
3533 		conn->security_cfm_cb = pairing_complete_cb;
3534 		conn->disconn_cfm_cb = pairing_complete_cb;
3535 	} else {
3536 		conn->connect_cfm_cb = le_pairing_complete_cb;
3537 		conn->security_cfm_cb = le_pairing_complete_cb;
3538 		conn->disconn_cfm_cb = le_pairing_complete_cb;
3539 	}
3540 
3541 	conn->io_capability = cp->io_cap;
3542 	cmd->user_data = hci_conn_get(conn);
3543 
3544 	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3545 	    hci_conn_security(conn, sec_level, auth_type, true)) {
3546 		cmd->cmd_complete(cmd, 0);
3547 		mgmt_pending_remove(cmd);
3548 	}
3549 
3550 	err = 0;
3551 
3552 unlock:
3553 	hci_dev_unlock(hdev);
3554 	return err;
3555 }
3556 
3557 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3558 			      u16 len)
3559 {
3560 	struct mgmt_addr_info *addr = data;
3561 	struct mgmt_pending_cmd *cmd;
3562 	struct hci_conn *conn;
3563 	int err;
3564 
3565 	bt_dev_dbg(hdev, "sock %p", sk);
3566 
3567 	hci_dev_lock(hdev);
3568 
3569 	if (!hdev_is_powered(hdev)) {
3570 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3571 				      MGMT_STATUS_NOT_POWERED);
3572 		goto unlock;
3573 	}
3574 
3575 	cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3576 	if (!cmd) {
3577 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3578 				      MGMT_STATUS_INVALID_PARAMS);
3579 		goto unlock;
3580 	}
3581 
3582 	conn = cmd->user_data;
3583 
3584 	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3585 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3586 				      MGMT_STATUS_INVALID_PARAMS);
3587 		goto unlock;
3588 	}
3589 
3590 	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3591 	mgmt_pending_remove(cmd);
3592 
3593 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3594 				addr, sizeof(*addr));
3595 
3596 	/* Since user doesn't want to proceed with the connection, abort any
3597 	 * ongoing pairing and then terminate the link if it was created
3598 	 * because of the pair device action.
3599 	 */
3600 	if (addr->type == BDADDR_BREDR)
3601 		hci_remove_link_key(hdev, &addr->bdaddr);
3602 	else
3603 		smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3604 					      le_addr_type(addr->type));
3605 
3606 	if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3607 		hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3608 
3609 unlock:
3610 	hci_dev_unlock(hdev);
3611 	return err;
3612 }
3613 
3614 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3615 			     struct mgmt_addr_info *addr, u16 mgmt_op,
3616 			     u16 hci_op, __le32 passkey)
3617 {
3618 	struct mgmt_pending_cmd *cmd;
3619 	struct hci_conn *conn;
3620 	int err;
3621 
3622 	hci_dev_lock(hdev);
3623 
3624 	if (!hdev_is_powered(hdev)) {
3625 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3626 					MGMT_STATUS_NOT_POWERED, addr,
3627 					sizeof(*addr));
3628 		goto done;
3629 	}
3630 
3631 	if (addr->type == BDADDR_BREDR)
3632 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3633 	else
3634 		conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3635 					       le_addr_type(addr->type));
3636 
3637 	if (!conn) {
3638 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3639 					MGMT_STATUS_NOT_CONNECTED, addr,
3640 					sizeof(*addr));
3641 		goto done;
3642 	}
3643 
3644 	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3645 		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3646 		if (!err)
3647 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3648 						MGMT_STATUS_SUCCESS, addr,
3649 						sizeof(*addr));
3650 		else
3651 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3652 						MGMT_STATUS_FAILED, addr,
3653 						sizeof(*addr));
3654 
3655 		goto done;
3656 	}
3657 
3658 	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3659 	if (!cmd) {
3660 		err = -ENOMEM;
3661 		goto done;
3662 	}
3663 
3664 	cmd->cmd_complete = addr_cmd_complete;
3665 
3666 	/* Continue with pairing via HCI */
3667 	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3668 		struct hci_cp_user_passkey_reply cp;
3669 
3670 		bacpy(&cp.bdaddr, &addr->bdaddr);
3671 		cp.passkey = passkey;
3672 		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3673 	} else
3674 		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3675 				   &addr->bdaddr);
3676 
3677 	if (err < 0)
3678 		mgmt_pending_remove(cmd);
3679 
3680 done:
3681 	hci_dev_unlock(hdev);
3682 	return err;
3683 }
3684 
3685 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3686 			      void *data, u16 len)
3687 {
3688 	struct mgmt_cp_pin_code_neg_reply *cp = data;
3689 
3690 	bt_dev_dbg(hdev, "sock %p", sk);
3691 
3692 	return user_pairing_resp(sk, hdev, &cp->addr,
3693 				MGMT_OP_PIN_CODE_NEG_REPLY,
3694 				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3695 }
3696 
3697 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3698 			      u16 len)
3699 {
3700 	struct mgmt_cp_user_confirm_reply *cp = data;
3701 
3702 	bt_dev_dbg(hdev, "sock %p", sk);
3703 
3704 	if (len != sizeof(*cp))
3705 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3706 				       MGMT_STATUS_INVALID_PARAMS);
3707 
3708 	return user_pairing_resp(sk, hdev, &cp->addr,
3709 				 MGMT_OP_USER_CONFIRM_REPLY,
3710 				 HCI_OP_USER_CONFIRM_REPLY, 0);
3711 }
3712 
3713 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3714 				  void *data, u16 len)
3715 {
3716 	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3717 
3718 	bt_dev_dbg(hdev, "sock %p", sk);
3719 
3720 	return user_pairing_resp(sk, hdev, &cp->addr,
3721 				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3722 				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3723 }
3724 
3725 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3726 			      u16 len)
3727 {
3728 	struct mgmt_cp_user_passkey_reply *cp = data;
3729 
3730 	bt_dev_dbg(hdev, "sock %p", sk);
3731 
3732 	return user_pairing_resp(sk, hdev, &cp->addr,
3733 				 MGMT_OP_USER_PASSKEY_REPLY,
3734 				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3735 }
3736 
3737 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3738 				  void *data, u16 len)
3739 {
3740 	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3741 
3742 	bt_dev_dbg(hdev, "sock %p", sk);
3743 
3744 	return user_pairing_resp(sk, hdev, &cp->addr,
3745 				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3746 				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3747 }
3748 
3749 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3750 {
3751 	struct adv_info *adv_instance;
3752 
3753 	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3754 	if (!adv_instance)
3755 		return 0;
3756 
3757 	/* stop if current instance doesn't need to be changed */
3758 	if (!(adv_instance->flags & flags))
3759 		return 0;
3760 
3761 	cancel_adv_timeout(hdev);
3762 
3763 	adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3764 	if (!adv_instance)
3765 		return 0;
3766 
3767 	hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3768 
3769 	return 0;
3770 }
3771 
3772 static int name_changed_sync(struct hci_dev *hdev, void *data)
3773 {
3774 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3775 }
3776 
3777 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3778 {
3779 	struct mgmt_pending_cmd *cmd = data;
3780 	struct mgmt_cp_set_local_name *cp = cmd->param;
3781 	u8 status = mgmt_status(err);
3782 
3783 	bt_dev_dbg(hdev, "err %d", err);
3784 
3785 	if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3786 		return;
3787 
3788 	if (status) {
3789 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3790 				status);
3791 	} else {
3792 		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3793 				  cp, sizeof(*cp));
3794 
3795 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3796 			hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3797 	}
3798 
3799 	mgmt_pending_remove(cmd);
3800 }
3801 
3802 static int set_name_sync(struct hci_dev *hdev, void *data)
3803 {
3804 	if (lmp_bredr_capable(hdev)) {
3805 		hci_update_name_sync(hdev);
3806 		hci_update_eir_sync(hdev);
3807 	}
3808 
3809 	/* The name is stored in the scan response data and so
3810 	 * no need to update the advertising data here.
3811 	 */
3812 	if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3813 		hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3814 
3815 	return 0;
3816 }
3817 
3818 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3819 			  u16 len)
3820 {
3821 	struct mgmt_cp_set_local_name *cp = data;
3822 	struct mgmt_pending_cmd *cmd;
3823 	int err;
3824 
3825 	bt_dev_dbg(hdev, "sock %p", sk);
3826 
3827 	hci_dev_lock(hdev);
3828 
3829 	/* If the old values are the same as the new ones just return a
3830 	 * direct command complete event.
3831 	 */
3832 	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3833 	    !memcmp(hdev->short_name, cp->short_name,
3834 		    sizeof(hdev->short_name))) {
3835 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3836 					data, len);
3837 		goto failed;
3838 	}
3839 
3840 	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3841 
3842 	if (!hdev_is_powered(hdev)) {
3843 		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3844 
3845 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3846 					data, len);
3847 		if (err < 0)
3848 			goto failed;
3849 
3850 		err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3851 					 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3852 		ext_info_changed(hdev, sk);
3853 
3854 		goto failed;
3855 	}
3856 
3857 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3858 	if (!cmd)
3859 		err = -ENOMEM;
3860 	else
3861 		err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3862 					 set_name_complete);
3863 
3864 	if (err < 0) {
3865 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3866 				      MGMT_STATUS_FAILED);
3867 
3868 		if (cmd)
3869 			mgmt_pending_remove(cmd);
3870 
3871 		goto failed;
3872 	}
3873 
3874 	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3875 
3876 failed:
3877 	hci_dev_unlock(hdev);
3878 	return err;
3879 }
3880 
3881 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3882 {
3883 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3884 }
3885 
3886 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3887 			  u16 len)
3888 {
3889 	struct mgmt_cp_set_appearance *cp = data;
3890 	u16 appearance;
3891 	int err;
3892 
3893 	bt_dev_dbg(hdev, "sock %p", sk);
3894 
3895 	if (!lmp_le_capable(hdev))
3896 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3897 				       MGMT_STATUS_NOT_SUPPORTED);
3898 
3899 	appearance = le16_to_cpu(cp->appearance);
3900 
3901 	hci_dev_lock(hdev);
3902 
3903 	if (hdev->appearance != appearance) {
3904 		hdev->appearance = appearance;
3905 
3906 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3907 			hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3908 					   NULL);
3909 
3910 		ext_info_changed(hdev, sk);
3911 	}
3912 
3913 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3914 				0);
3915 
3916 	hci_dev_unlock(hdev);
3917 
3918 	return err;
3919 }
3920 
3921 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3922 				 void *data, u16 len)
3923 {
3924 	struct mgmt_rp_get_phy_configuration rp;
3925 
3926 	bt_dev_dbg(hdev, "sock %p", sk);
3927 
3928 	hci_dev_lock(hdev);
3929 
3930 	memset(&rp, 0, sizeof(rp));
3931 
3932 	rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3933 	rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3934 	rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3935 
3936 	hci_dev_unlock(hdev);
3937 
3938 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3939 				 &rp, sizeof(rp));
3940 }
3941 
3942 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3943 {
3944 	struct mgmt_ev_phy_configuration_changed ev;
3945 
3946 	memset(&ev, 0, sizeof(ev));
3947 
3948 	ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3949 
3950 	return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3951 			  sizeof(ev), skip);
3952 }
3953 
3954 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3955 {
3956 	struct mgmt_pending_cmd *cmd = data;
3957 	struct sk_buff *skb = cmd->skb;
3958 	u8 status = mgmt_status(err);
3959 
3960 	if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
3961 		return;
3962 
3963 	if (!status) {
3964 		if (!skb)
3965 			status = MGMT_STATUS_FAILED;
3966 		else if (IS_ERR(skb))
3967 			status = mgmt_status(PTR_ERR(skb));
3968 		else
3969 			status = mgmt_status(skb->data[0]);
3970 	}
3971 
3972 	bt_dev_dbg(hdev, "status %d", status);
3973 
3974 	if (status) {
3975 		mgmt_cmd_status(cmd->sk, hdev->id,
3976 				MGMT_OP_SET_PHY_CONFIGURATION, status);
3977 	} else {
3978 		mgmt_cmd_complete(cmd->sk, hdev->id,
3979 				  MGMT_OP_SET_PHY_CONFIGURATION, 0,
3980 				  NULL, 0);
3981 
3982 		mgmt_phy_configuration_changed(hdev, cmd->sk);
3983 	}
3984 
3985 	if (skb && !IS_ERR(skb))
3986 		kfree_skb(skb);
3987 
3988 	mgmt_pending_remove(cmd);
3989 }
3990 
3991 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
3992 {
3993 	struct mgmt_pending_cmd *cmd = data;
3994 	struct mgmt_cp_set_phy_configuration *cp = cmd->param;
3995 	struct hci_cp_le_set_default_phy cp_phy;
3996 	u32 selected_phys = __le32_to_cpu(cp->selected_phys);
3997 
3998 	memset(&cp_phy, 0, sizeof(cp_phy));
3999 
4000 	if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
4001 		cp_phy.all_phys |= 0x01;
4002 
4003 	if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
4004 		cp_phy.all_phys |= 0x02;
4005 
4006 	if (selected_phys & MGMT_PHY_LE_1M_TX)
4007 		cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
4008 
4009 	if (selected_phys & MGMT_PHY_LE_2M_TX)
4010 		cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
4011 
4012 	if (selected_phys & MGMT_PHY_LE_CODED_TX)
4013 		cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
4014 
4015 	if (selected_phys & MGMT_PHY_LE_1M_RX)
4016 		cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4017 
4018 	if (selected_phys & MGMT_PHY_LE_2M_RX)
4019 		cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4020 
4021 	if (selected_phys & MGMT_PHY_LE_CODED_RX)
4022 		cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4023 
4024 	cmd->skb =  __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4025 				   sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4026 
4027 	return 0;
4028 }
4029 
4030 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4031 				 void *data, u16 len)
4032 {
4033 	struct mgmt_cp_set_phy_configuration *cp = data;
4034 	struct mgmt_pending_cmd *cmd;
4035 	u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4036 	u16 pkt_type = (HCI_DH1 | HCI_DM1);
4037 	bool changed = false;
4038 	int err;
4039 
4040 	bt_dev_dbg(hdev, "sock %p", sk);
4041 
4042 	configurable_phys = get_configurable_phys(hdev);
4043 	supported_phys = get_supported_phys(hdev);
4044 	selected_phys = __le32_to_cpu(cp->selected_phys);
4045 
4046 	if (selected_phys & ~supported_phys)
4047 		return mgmt_cmd_status(sk, hdev->id,
4048 				       MGMT_OP_SET_PHY_CONFIGURATION,
4049 				       MGMT_STATUS_INVALID_PARAMS);
4050 
4051 	unconfigure_phys = supported_phys & ~configurable_phys;
4052 
4053 	if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4054 		return mgmt_cmd_status(sk, hdev->id,
4055 				       MGMT_OP_SET_PHY_CONFIGURATION,
4056 				       MGMT_STATUS_INVALID_PARAMS);
4057 
4058 	if (selected_phys == get_selected_phys(hdev))
4059 		return mgmt_cmd_complete(sk, hdev->id,
4060 					 MGMT_OP_SET_PHY_CONFIGURATION,
4061 					 0, NULL, 0);
4062 
4063 	hci_dev_lock(hdev);
4064 
4065 	if (!hdev_is_powered(hdev)) {
4066 		err = mgmt_cmd_status(sk, hdev->id,
4067 				      MGMT_OP_SET_PHY_CONFIGURATION,
4068 				      MGMT_STATUS_REJECTED);
4069 		goto unlock;
4070 	}
4071 
4072 	if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4073 		err = mgmt_cmd_status(sk, hdev->id,
4074 				      MGMT_OP_SET_PHY_CONFIGURATION,
4075 				      MGMT_STATUS_BUSY);
4076 		goto unlock;
4077 	}
4078 
4079 	if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4080 		pkt_type |= (HCI_DH3 | HCI_DM3);
4081 	else
4082 		pkt_type &= ~(HCI_DH3 | HCI_DM3);
4083 
4084 	if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4085 		pkt_type |= (HCI_DH5 | HCI_DM5);
4086 	else
4087 		pkt_type &= ~(HCI_DH5 | HCI_DM5);
4088 
4089 	if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4090 		pkt_type &= ~HCI_2DH1;
4091 	else
4092 		pkt_type |= HCI_2DH1;
4093 
4094 	if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4095 		pkt_type &= ~HCI_2DH3;
4096 	else
4097 		pkt_type |= HCI_2DH3;
4098 
4099 	if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4100 		pkt_type &= ~HCI_2DH5;
4101 	else
4102 		pkt_type |= HCI_2DH5;
4103 
4104 	if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4105 		pkt_type &= ~HCI_3DH1;
4106 	else
4107 		pkt_type |= HCI_3DH1;
4108 
4109 	if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4110 		pkt_type &= ~HCI_3DH3;
4111 	else
4112 		pkt_type |= HCI_3DH3;
4113 
4114 	if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4115 		pkt_type &= ~HCI_3DH5;
4116 	else
4117 		pkt_type |= HCI_3DH5;
4118 
4119 	if (pkt_type != hdev->pkt_type) {
4120 		hdev->pkt_type = pkt_type;
4121 		changed = true;
4122 	}
4123 
4124 	if ((selected_phys & MGMT_PHY_LE_MASK) ==
4125 	    (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4126 		if (changed)
4127 			mgmt_phy_configuration_changed(hdev, sk);
4128 
4129 		err = mgmt_cmd_complete(sk, hdev->id,
4130 					MGMT_OP_SET_PHY_CONFIGURATION,
4131 					0, NULL, 0);
4132 
4133 		goto unlock;
4134 	}
4135 
4136 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4137 			       len);
4138 	if (!cmd)
4139 		err = -ENOMEM;
4140 	else
4141 		err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4142 					 set_default_phy_complete);
4143 
4144 	if (err < 0) {
4145 		err = mgmt_cmd_status(sk, hdev->id,
4146 				      MGMT_OP_SET_PHY_CONFIGURATION,
4147 				      MGMT_STATUS_FAILED);
4148 
4149 		if (cmd)
4150 			mgmt_pending_remove(cmd);
4151 	}
4152 
4153 unlock:
4154 	hci_dev_unlock(hdev);
4155 
4156 	return err;
4157 }
4158 
4159 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4160 			    u16 len)
4161 {
4162 	int err = MGMT_STATUS_SUCCESS;
4163 	struct mgmt_cp_set_blocked_keys *keys = data;
4164 	const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4165 				   sizeof(struct mgmt_blocked_key_info));
4166 	u16 key_count, expected_len;
4167 	int i;
4168 
4169 	bt_dev_dbg(hdev, "sock %p", sk);
4170 
4171 	key_count = __le16_to_cpu(keys->key_count);
4172 	if (key_count > max_key_count) {
4173 		bt_dev_err(hdev, "too big key_count value %u", key_count);
4174 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4175 				       MGMT_STATUS_INVALID_PARAMS);
4176 	}
4177 
4178 	expected_len = struct_size(keys, keys, key_count);
4179 	if (expected_len != len) {
4180 		bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4181 			   expected_len, len);
4182 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4183 				       MGMT_STATUS_INVALID_PARAMS);
4184 	}
4185 
4186 	hci_dev_lock(hdev);
4187 
4188 	hci_blocked_keys_clear(hdev);
4189 
4190 	for (i = 0; i < key_count; ++i) {
4191 		struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4192 
4193 		if (!b) {
4194 			err = MGMT_STATUS_NO_RESOURCES;
4195 			break;
4196 		}
4197 
4198 		b->type = keys->keys[i].type;
4199 		memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4200 		list_add_rcu(&b->list, &hdev->blocked_keys);
4201 	}
4202 	hci_dev_unlock(hdev);
4203 
4204 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4205 				err, NULL, 0);
4206 }
4207 
4208 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4209 			       void *data, u16 len)
4210 {
4211 	struct mgmt_mode *cp = data;
4212 	int err;
4213 	bool changed = false;
4214 
4215 	bt_dev_dbg(hdev, "sock %p", sk);
4216 
4217 	if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4218 		return mgmt_cmd_status(sk, hdev->id,
4219 				       MGMT_OP_SET_WIDEBAND_SPEECH,
4220 				       MGMT_STATUS_NOT_SUPPORTED);
4221 
4222 	if (cp->val != 0x00 && cp->val != 0x01)
4223 		return mgmt_cmd_status(sk, hdev->id,
4224 				       MGMT_OP_SET_WIDEBAND_SPEECH,
4225 				       MGMT_STATUS_INVALID_PARAMS);
4226 
4227 	hci_dev_lock(hdev);
4228 
4229 	if (hdev_is_powered(hdev) &&
4230 	    !!cp->val != hci_dev_test_flag(hdev,
4231 					   HCI_WIDEBAND_SPEECH_ENABLED)) {
4232 		err = mgmt_cmd_status(sk, hdev->id,
4233 				      MGMT_OP_SET_WIDEBAND_SPEECH,
4234 				      MGMT_STATUS_REJECTED);
4235 		goto unlock;
4236 	}
4237 
4238 	if (cp->val)
4239 		changed = !hci_dev_test_and_set_flag(hdev,
4240 						   HCI_WIDEBAND_SPEECH_ENABLED);
4241 	else
4242 		changed = hci_dev_test_and_clear_flag(hdev,
4243 						   HCI_WIDEBAND_SPEECH_ENABLED);
4244 
4245 	err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4246 	if (err < 0)
4247 		goto unlock;
4248 
4249 	if (changed)
4250 		err = new_settings(hdev, sk);
4251 
4252 unlock:
4253 	hci_dev_unlock(hdev);
4254 	return err;
4255 }
4256 
4257 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4258 			       void *data, u16 data_len)
4259 {
4260 	char buf[20];
4261 	struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4262 	u16 cap_len = 0;
4263 	u8 flags = 0;
4264 	u8 tx_power_range[2];
4265 
4266 	bt_dev_dbg(hdev, "sock %p", sk);
4267 
4268 	memset(&buf, 0, sizeof(buf));
4269 
4270 	hci_dev_lock(hdev);
4271 
4272 	/* When the Read Simple Pairing Options command is supported, then
4273 	 * the remote public key validation is supported.
4274 	 *
4275 	 * Alternatively, when Microsoft extensions are available, they can
4276 	 * indicate support for public key validation as well.
4277 	 */
4278 	if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4279 		flags |= 0x01;	/* Remote public key validation (BR/EDR) */
4280 
4281 	flags |= 0x02;		/* Remote public key validation (LE) */
4282 
4283 	/* When the Read Encryption Key Size command is supported, then the
4284 	 * encryption key size is enforced.
4285 	 */
4286 	if (hdev->commands[20] & 0x10)
4287 		flags |= 0x04;	/* Encryption key size enforcement (BR/EDR) */
4288 
4289 	flags |= 0x08;		/* Encryption key size enforcement (LE) */
4290 
4291 	cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4292 				  &flags, 1);
4293 
4294 	/* When the Read Simple Pairing Options command is supported, then
4295 	 * also max encryption key size information is provided.
4296 	 */
4297 	if (hdev->commands[41] & 0x08)
4298 		cap_len = eir_append_le16(rp->cap, cap_len,
4299 					  MGMT_CAP_MAX_ENC_KEY_SIZE,
4300 					  hdev->max_enc_key_size);
4301 
4302 	cap_len = eir_append_le16(rp->cap, cap_len,
4303 				  MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4304 				  SMP_MAX_ENC_KEY_SIZE);
4305 
4306 	/* Append the min/max LE tx power parameters if we were able to fetch
4307 	 * it from the controller
4308 	 */
4309 	if (hdev->commands[38] & 0x80) {
4310 		memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4311 		memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4312 		cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4313 					  tx_power_range, 2);
4314 	}
4315 
4316 	rp->cap_len = cpu_to_le16(cap_len);
4317 
4318 	hci_dev_unlock(hdev);
4319 
4320 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4321 				 rp, sizeof(*rp) + cap_len);
4322 }
4323 
4324 #ifdef CONFIG_BT_FEATURE_DEBUG
4325 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4326 static const u8 debug_uuid[16] = {
4327 	0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4328 	0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4329 };
4330 #endif
4331 
4332 /* 330859bc-7506-492d-9370-9a6f0614037f */
4333 static const u8 quality_report_uuid[16] = {
4334 	0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4335 	0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4336 };
4337 
4338 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4339 static const u8 offload_codecs_uuid[16] = {
4340 	0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4341 	0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4342 };
4343 
4344 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4345 static const u8 le_simultaneous_roles_uuid[16] = {
4346 	0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4347 	0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4348 };
4349 
4350 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
4351 static const u8 rpa_resolution_uuid[16] = {
4352 	0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
4353 	0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
4354 };
4355 
4356 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4357 static const u8 iso_socket_uuid[16] = {
4358 	0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4359 	0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4360 };
4361 
4362 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4363 static const u8 mgmt_mesh_uuid[16] = {
4364 	0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4365 	0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4366 };
4367 
4368 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4369 				  void *data, u16 data_len)
4370 {
4371 	struct mgmt_rp_read_exp_features_info *rp;
4372 	size_t len;
4373 	u16 idx = 0;
4374 	u32 flags;
4375 	int status;
4376 
4377 	bt_dev_dbg(hdev, "sock %p", sk);
4378 
4379 	/* Enough space for 7 features */
4380 	len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4381 	rp = kzalloc(len, GFP_KERNEL);
4382 	if (!rp)
4383 		return -ENOMEM;
4384 
4385 #ifdef CONFIG_BT_FEATURE_DEBUG
4386 	if (!hdev) {
4387 		flags = bt_dbg_get() ? BIT(0) : 0;
4388 
4389 		memcpy(rp->features[idx].uuid, debug_uuid, 16);
4390 		rp->features[idx].flags = cpu_to_le32(flags);
4391 		idx++;
4392 	}
4393 #endif
4394 
4395 	if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4396 		if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4397 			flags = BIT(0);
4398 		else
4399 			flags = 0;
4400 
4401 		memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4402 		rp->features[idx].flags = cpu_to_le32(flags);
4403 		idx++;
4404 	}
4405 
4406 	if (hdev && ll_privacy_capable(hdev)) {
4407 		if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
4408 			flags = BIT(0) | BIT(1);
4409 		else
4410 			flags = BIT(1);
4411 
4412 		memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
4413 		rp->features[idx].flags = cpu_to_le32(flags);
4414 		idx++;
4415 	}
4416 
4417 	if (hdev && (aosp_has_quality_report(hdev) ||
4418 		     hdev->set_quality_report)) {
4419 		if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4420 			flags = BIT(0);
4421 		else
4422 			flags = 0;
4423 
4424 		memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4425 		rp->features[idx].flags = cpu_to_le32(flags);
4426 		idx++;
4427 	}
4428 
4429 	if (hdev && hdev->get_data_path_id) {
4430 		if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4431 			flags = BIT(0);
4432 		else
4433 			flags = 0;
4434 
4435 		memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4436 		rp->features[idx].flags = cpu_to_le32(flags);
4437 		idx++;
4438 	}
4439 
4440 	if (IS_ENABLED(CONFIG_BT_LE)) {
4441 		flags = iso_enabled() ? BIT(0) : 0;
4442 		memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4443 		rp->features[idx].flags = cpu_to_le32(flags);
4444 		idx++;
4445 	}
4446 
4447 	if (hdev && lmp_le_capable(hdev)) {
4448 		if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4449 			flags = BIT(0);
4450 		else
4451 			flags = 0;
4452 
4453 		memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4454 		rp->features[idx].flags = cpu_to_le32(flags);
4455 		idx++;
4456 	}
4457 
4458 	rp->feature_count = cpu_to_le16(idx);
4459 
4460 	/* After reading the experimental features information, enable
4461 	 * the events to update client on any future change.
4462 	 */
4463 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4464 
4465 	status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4466 				   MGMT_OP_READ_EXP_FEATURES_INFO,
4467 				   0, rp, sizeof(*rp) + (20 * idx));
4468 
4469 	kfree(rp);
4470 	return status;
4471 }
4472 
4473 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4474 					  struct sock *skip)
4475 {
4476 	struct mgmt_ev_exp_feature_changed ev;
4477 
4478 	memset(&ev, 0, sizeof(ev));
4479 	memcpy(ev.uuid, rpa_resolution_uuid, 16);
4480 	ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4481 
4482 	// Do we need to be atomic with the conn_flags?
4483 	if (enabled && privacy_mode_capable(hdev))
4484 		hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4485 	else
4486 		hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4487 
4488 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4489 				  &ev, sizeof(ev),
4490 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4491 
4492 }
4493 
4494 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4495 			       bool enabled, struct sock *skip)
4496 {
4497 	struct mgmt_ev_exp_feature_changed ev;
4498 
4499 	memset(&ev, 0, sizeof(ev));
4500 	memcpy(ev.uuid, uuid, 16);
4501 	ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4502 
4503 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4504 				  &ev, sizeof(ev),
4505 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4506 }
4507 
4508 #define EXP_FEAT(_uuid, _set_func)	\
4509 {					\
4510 	.uuid = _uuid,			\
4511 	.set_func = _set_func,		\
4512 }
4513 
4514 /* The zero key uuid is special. Multiple exp features are set through it. */
4515 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4516 			     struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4517 {
4518 	struct mgmt_rp_set_exp_feature rp;
4519 
4520 	memset(rp.uuid, 0, 16);
4521 	rp.flags = cpu_to_le32(0);
4522 
4523 #ifdef CONFIG_BT_FEATURE_DEBUG
4524 	if (!hdev) {
4525 		bool changed = bt_dbg_get();
4526 
4527 		bt_dbg_set(false);
4528 
4529 		if (changed)
4530 			exp_feature_changed(NULL, ZERO_KEY, false, sk);
4531 	}
4532 #endif
4533 
4534 	if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4535 		bool changed;
4536 
4537 		changed = hci_dev_test_and_clear_flag(hdev,
4538 						      HCI_ENABLE_LL_PRIVACY);
4539 		if (changed)
4540 			exp_feature_changed(hdev, rpa_resolution_uuid, false,
4541 					    sk);
4542 	}
4543 
4544 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4545 
4546 	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4547 				 MGMT_OP_SET_EXP_FEATURE, 0,
4548 				 &rp, sizeof(rp));
4549 }
4550 
4551 #ifdef CONFIG_BT_FEATURE_DEBUG
4552 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4553 			  struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4554 {
4555 	struct mgmt_rp_set_exp_feature rp;
4556 
4557 	bool val, changed;
4558 	int err;
4559 
4560 	/* Command requires to use the non-controller index */
4561 	if (hdev)
4562 		return mgmt_cmd_status(sk, hdev->id,
4563 				       MGMT_OP_SET_EXP_FEATURE,
4564 				       MGMT_STATUS_INVALID_INDEX);
4565 
4566 	/* Parameters are limited to a single octet */
4567 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4568 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4569 				       MGMT_OP_SET_EXP_FEATURE,
4570 				       MGMT_STATUS_INVALID_PARAMS);
4571 
4572 	/* Only boolean on/off is supported */
4573 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4574 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4575 				       MGMT_OP_SET_EXP_FEATURE,
4576 				       MGMT_STATUS_INVALID_PARAMS);
4577 
4578 	val = !!cp->param[0];
4579 	changed = val ? !bt_dbg_get() : bt_dbg_get();
4580 	bt_dbg_set(val);
4581 
4582 	memcpy(rp.uuid, debug_uuid, 16);
4583 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4584 
4585 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4586 
4587 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4588 				MGMT_OP_SET_EXP_FEATURE, 0,
4589 				&rp, sizeof(rp));
4590 
4591 	if (changed)
4592 		exp_feature_changed(hdev, debug_uuid, val, sk);
4593 
4594 	return err;
4595 }
4596 #endif
4597 
4598 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4599 			      struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4600 {
4601 	struct mgmt_rp_set_exp_feature rp;
4602 	bool val, changed;
4603 	int err;
4604 
4605 	/* Command requires to use the controller index */
4606 	if (!hdev)
4607 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4608 				       MGMT_OP_SET_EXP_FEATURE,
4609 				       MGMT_STATUS_INVALID_INDEX);
4610 
4611 	/* Parameters are limited to a single octet */
4612 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4613 		return mgmt_cmd_status(sk, hdev->id,
4614 				       MGMT_OP_SET_EXP_FEATURE,
4615 				       MGMT_STATUS_INVALID_PARAMS);
4616 
4617 	/* Only boolean on/off is supported */
4618 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4619 		return mgmt_cmd_status(sk, hdev->id,
4620 				       MGMT_OP_SET_EXP_FEATURE,
4621 				       MGMT_STATUS_INVALID_PARAMS);
4622 
4623 	val = !!cp->param[0];
4624 
4625 	if (val) {
4626 		changed = !hci_dev_test_and_set_flag(hdev,
4627 						     HCI_MESH_EXPERIMENTAL);
4628 	} else {
4629 		hci_dev_clear_flag(hdev, HCI_MESH);
4630 		changed = hci_dev_test_and_clear_flag(hdev,
4631 						      HCI_MESH_EXPERIMENTAL);
4632 	}
4633 
4634 	memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4635 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4636 
4637 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4638 
4639 	err = mgmt_cmd_complete(sk, hdev->id,
4640 				MGMT_OP_SET_EXP_FEATURE, 0,
4641 				&rp, sizeof(rp));
4642 
4643 	if (changed)
4644 		exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4645 
4646 	return err;
4647 }
4648 
4649 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4650 				   struct mgmt_cp_set_exp_feature *cp,
4651 				   u16 data_len)
4652 {
4653 	struct mgmt_rp_set_exp_feature rp;
4654 	bool val, changed;
4655 	int err;
4656 	u32 flags;
4657 
4658 	/* Command requires to use the controller index */
4659 	if (!hdev)
4660 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4661 				       MGMT_OP_SET_EXP_FEATURE,
4662 				       MGMT_STATUS_INVALID_INDEX);
4663 
4664 	/* Changes can only be made when controller is powered down */
4665 	if (hdev_is_powered(hdev))
4666 		return mgmt_cmd_status(sk, hdev->id,
4667 				       MGMT_OP_SET_EXP_FEATURE,
4668 				       MGMT_STATUS_REJECTED);
4669 
4670 	/* Parameters are limited to a single octet */
4671 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4672 		return mgmt_cmd_status(sk, hdev->id,
4673 				       MGMT_OP_SET_EXP_FEATURE,
4674 				       MGMT_STATUS_INVALID_PARAMS);
4675 
4676 	/* Only boolean on/off is supported */
4677 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4678 		return mgmt_cmd_status(sk, hdev->id,
4679 				       MGMT_OP_SET_EXP_FEATURE,
4680 				       MGMT_STATUS_INVALID_PARAMS);
4681 
4682 	val = !!cp->param[0];
4683 
4684 	if (val) {
4685 		changed = !hci_dev_test_and_set_flag(hdev,
4686 						     HCI_ENABLE_LL_PRIVACY);
4687 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4688 
4689 		/* Enable LL privacy + supported settings changed */
4690 		flags = BIT(0) | BIT(1);
4691 	} else {
4692 		changed = hci_dev_test_and_clear_flag(hdev,
4693 						      HCI_ENABLE_LL_PRIVACY);
4694 
4695 		/* Disable LL privacy + supported settings changed */
4696 		flags = BIT(1);
4697 	}
4698 
4699 	memcpy(rp.uuid, rpa_resolution_uuid, 16);
4700 	rp.flags = cpu_to_le32(flags);
4701 
4702 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4703 
4704 	err = mgmt_cmd_complete(sk, hdev->id,
4705 				MGMT_OP_SET_EXP_FEATURE, 0,
4706 				&rp, sizeof(rp));
4707 
4708 	if (changed)
4709 		exp_ll_privacy_feature_changed(val, hdev, sk);
4710 
4711 	return err;
4712 }
4713 
4714 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4715 				   struct mgmt_cp_set_exp_feature *cp,
4716 				   u16 data_len)
4717 {
4718 	struct mgmt_rp_set_exp_feature rp;
4719 	bool val, changed;
4720 	int err;
4721 
4722 	/* Command requires to use a valid controller index */
4723 	if (!hdev)
4724 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4725 				       MGMT_OP_SET_EXP_FEATURE,
4726 				       MGMT_STATUS_INVALID_INDEX);
4727 
4728 	/* Parameters are limited to a single octet */
4729 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4730 		return mgmt_cmd_status(sk, hdev->id,
4731 				       MGMT_OP_SET_EXP_FEATURE,
4732 				       MGMT_STATUS_INVALID_PARAMS);
4733 
4734 	/* Only boolean on/off is supported */
4735 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4736 		return mgmt_cmd_status(sk, hdev->id,
4737 				       MGMT_OP_SET_EXP_FEATURE,
4738 				       MGMT_STATUS_INVALID_PARAMS);
4739 
4740 	hci_req_sync_lock(hdev);
4741 
4742 	val = !!cp->param[0];
4743 	changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4744 
4745 	if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4746 		err = mgmt_cmd_status(sk, hdev->id,
4747 				      MGMT_OP_SET_EXP_FEATURE,
4748 				      MGMT_STATUS_NOT_SUPPORTED);
4749 		goto unlock_quality_report;
4750 	}
4751 
4752 	if (changed) {
4753 		if (hdev->set_quality_report)
4754 			err = hdev->set_quality_report(hdev, val);
4755 		else
4756 			err = aosp_set_quality_report(hdev, val);
4757 
4758 		if (err) {
4759 			err = mgmt_cmd_status(sk, hdev->id,
4760 					      MGMT_OP_SET_EXP_FEATURE,
4761 					      MGMT_STATUS_FAILED);
4762 			goto unlock_quality_report;
4763 		}
4764 
4765 		if (val)
4766 			hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4767 		else
4768 			hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4769 	}
4770 
4771 	bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4772 
4773 	memcpy(rp.uuid, quality_report_uuid, 16);
4774 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4775 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4776 
4777 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4778 				&rp, sizeof(rp));
4779 
4780 	if (changed)
4781 		exp_feature_changed(hdev, quality_report_uuid, val, sk);
4782 
4783 unlock_quality_report:
4784 	hci_req_sync_unlock(hdev);
4785 	return err;
4786 }
4787 
4788 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4789 				  struct mgmt_cp_set_exp_feature *cp,
4790 				  u16 data_len)
4791 {
4792 	bool val, changed;
4793 	int err;
4794 	struct mgmt_rp_set_exp_feature rp;
4795 
4796 	/* Command requires to use a valid controller index */
4797 	if (!hdev)
4798 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4799 				       MGMT_OP_SET_EXP_FEATURE,
4800 				       MGMT_STATUS_INVALID_INDEX);
4801 
4802 	/* Parameters are limited to a single octet */
4803 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4804 		return mgmt_cmd_status(sk, hdev->id,
4805 				       MGMT_OP_SET_EXP_FEATURE,
4806 				       MGMT_STATUS_INVALID_PARAMS);
4807 
4808 	/* Only boolean on/off is supported */
4809 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4810 		return mgmt_cmd_status(sk, hdev->id,
4811 				       MGMT_OP_SET_EXP_FEATURE,
4812 				       MGMT_STATUS_INVALID_PARAMS);
4813 
4814 	val = !!cp->param[0];
4815 	changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4816 
4817 	if (!hdev->get_data_path_id) {
4818 		return mgmt_cmd_status(sk, hdev->id,
4819 				       MGMT_OP_SET_EXP_FEATURE,
4820 				       MGMT_STATUS_NOT_SUPPORTED);
4821 	}
4822 
4823 	if (changed) {
4824 		if (val)
4825 			hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4826 		else
4827 			hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4828 	}
4829 
4830 	bt_dev_info(hdev, "offload codecs enable %d changed %d",
4831 		    val, changed);
4832 
4833 	memcpy(rp.uuid, offload_codecs_uuid, 16);
4834 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4835 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4836 	err = mgmt_cmd_complete(sk, hdev->id,
4837 				MGMT_OP_SET_EXP_FEATURE, 0,
4838 				&rp, sizeof(rp));
4839 
4840 	if (changed)
4841 		exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4842 
4843 	return err;
4844 }
4845 
4846 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4847 					  struct mgmt_cp_set_exp_feature *cp,
4848 					  u16 data_len)
4849 {
4850 	bool val, changed;
4851 	int err;
4852 	struct mgmt_rp_set_exp_feature rp;
4853 
4854 	/* Command requires to use a valid controller index */
4855 	if (!hdev)
4856 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4857 				       MGMT_OP_SET_EXP_FEATURE,
4858 				       MGMT_STATUS_INVALID_INDEX);
4859 
4860 	/* Parameters are limited to a single octet */
4861 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4862 		return mgmt_cmd_status(sk, hdev->id,
4863 				       MGMT_OP_SET_EXP_FEATURE,
4864 				       MGMT_STATUS_INVALID_PARAMS);
4865 
4866 	/* Only boolean on/off is supported */
4867 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4868 		return mgmt_cmd_status(sk, hdev->id,
4869 				       MGMT_OP_SET_EXP_FEATURE,
4870 				       MGMT_STATUS_INVALID_PARAMS);
4871 
4872 	val = !!cp->param[0];
4873 	changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4874 
4875 	if (!hci_dev_le_state_simultaneous(hdev)) {
4876 		return mgmt_cmd_status(sk, hdev->id,
4877 				       MGMT_OP_SET_EXP_FEATURE,
4878 				       MGMT_STATUS_NOT_SUPPORTED);
4879 	}
4880 
4881 	if (changed) {
4882 		if (val)
4883 			hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4884 		else
4885 			hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4886 	}
4887 
4888 	bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4889 		    val, changed);
4890 
4891 	memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4892 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4893 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4894 	err = mgmt_cmd_complete(sk, hdev->id,
4895 				MGMT_OP_SET_EXP_FEATURE, 0,
4896 				&rp, sizeof(rp));
4897 
4898 	if (changed)
4899 		exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4900 
4901 	return err;
4902 }
4903 
4904 #ifdef CONFIG_BT_LE
4905 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4906 			       struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4907 {
4908 	struct mgmt_rp_set_exp_feature rp;
4909 	bool val, changed = false;
4910 	int err;
4911 
4912 	/* Command requires to use the non-controller index */
4913 	if (hdev)
4914 		return mgmt_cmd_status(sk, hdev->id,
4915 				       MGMT_OP_SET_EXP_FEATURE,
4916 				       MGMT_STATUS_INVALID_INDEX);
4917 
4918 	/* Parameters are limited to a single octet */
4919 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4920 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4921 				       MGMT_OP_SET_EXP_FEATURE,
4922 				       MGMT_STATUS_INVALID_PARAMS);
4923 
4924 	/* Only boolean on/off is supported */
4925 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4926 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4927 				       MGMT_OP_SET_EXP_FEATURE,
4928 				       MGMT_STATUS_INVALID_PARAMS);
4929 
4930 	val = cp->param[0] ? true : false;
4931 	if (val)
4932 		err = iso_init();
4933 	else
4934 		err = iso_exit();
4935 
4936 	if (!err)
4937 		changed = true;
4938 
4939 	memcpy(rp.uuid, iso_socket_uuid, 16);
4940 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4941 
4942 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4943 
4944 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4945 				MGMT_OP_SET_EXP_FEATURE, 0,
4946 				&rp, sizeof(rp));
4947 
4948 	if (changed)
4949 		exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4950 
4951 	return err;
4952 }
4953 #endif
4954 
4955 static const struct mgmt_exp_feature {
4956 	const u8 *uuid;
4957 	int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4958 			struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4959 } exp_features[] = {
4960 	EXP_FEAT(ZERO_KEY, set_zero_key_func),
4961 #ifdef CONFIG_BT_FEATURE_DEBUG
4962 	EXP_FEAT(debug_uuid, set_debug_func),
4963 #endif
4964 	EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
4965 	EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4966 	EXP_FEAT(quality_report_uuid, set_quality_report_func),
4967 	EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4968 	EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
4969 #ifdef CONFIG_BT_LE
4970 	EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
4971 #endif
4972 
4973 	/* end with a null feature */
4974 	EXP_FEAT(NULL, NULL)
4975 };
4976 
4977 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4978 			   void *data, u16 data_len)
4979 {
4980 	struct mgmt_cp_set_exp_feature *cp = data;
4981 	size_t i = 0;
4982 
4983 	bt_dev_dbg(hdev, "sock %p", sk);
4984 
4985 	for (i = 0; exp_features[i].uuid; i++) {
4986 		if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4987 			return exp_features[i].set_func(sk, hdev, cp, data_len);
4988 	}
4989 
4990 	return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4991 			       MGMT_OP_SET_EXP_FEATURE,
4992 			       MGMT_STATUS_NOT_SUPPORTED);
4993 }
4994 
4995 static u32 get_params_flags(struct hci_dev *hdev,
4996 			    struct hci_conn_params *params)
4997 {
4998 	u32 flags = hdev->conn_flags;
4999 
5000 	/* Devices using RPAs can only be programmed in the acceptlist if
5001 	 * LL Privacy has been enable otherwise they cannot mark
5002 	 * HCI_CONN_FLAG_REMOTE_WAKEUP.
5003 	 */
5004 	if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
5005 	    hci_find_irk_by_addr(hdev, &params->addr, params->addr_type))
5006 		flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
5007 
5008 	return flags;
5009 }
5010 
5011 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5012 			    u16 data_len)
5013 {
5014 	struct mgmt_cp_get_device_flags *cp = data;
5015 	struct mgmt_rp_get_device_flags rp;
5016 	struct bdaddr_list_with_flags *br_params;
5017 	struct hci_conn_params *params;
5018 	u32 supported_flags;
5019 	u32 current_flags = 0;
5020 	u8 status = MGMT_STATUS_INVALID_PARAMS;
5021 
5022 	bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
5023 		   &cp->addr.bdaddr, cp->addr.type);
5024 
5025 	hci_dev_lock(hdev);
5026 
5027 	supported_flags = hdev->conn_flags;
5028 
5029 	memset(&rp, 0, sizeof(rp));
5030 
5031 	if (cp->addr.type == BDADDR_BREDR) {
5032 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5033 							      &cp->addr.bdaddr,
5034 							      cp->addr.type);
5035 		if (!br_params)
5036 			goto done;
5037 
5038 		current_flags = br_params->flags;
5039 	} else {
5040 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5041 						le_addr_type(cp->addr.type));
5042 		if (!params)
5043 			goto done;
5044 
5045 		supported_flags = get_params_flags(hdev, params);
5046 		current_flags = params->flags;
5047 	}
5048 
5049 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5050 	rp.addr.type = cp->addr.type;
5051 	rp.supported_flags = cpu_to_le32(supported_flags);
5052 	rp.current_flags = cpu_to_le32(current_flags);
5053 
5054 	status = MGMT_STATUS_SUCCESS;
5055 
5056 done:
5057 	hci_dev_unlock(hdev);
5058 
5059 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5060 				&rp, sizeof(rp));
5061 }
5062 
5063 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5064 				 bdaddr_t *bdaddr, u8 bdaddr_type,
5065 				 u32 supported_flags, u32 current_flags)
5066 {
5067 	struct mgmt_ev_device_flags_changed ev;
5068 
5069 	bacpy(&ev.addr.bdaddr, bdaddr);
5070 	ev.addr.type = bdaddr_type;
5071 	ev.supported_flags = cpu_to_le32(supported_flags);
5072 	ev.current_flags = cpu_to_le32(current_flags);
5073 
5074 	mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5075 }
5076 
5077 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5078 			    u16 len)
5079 {
5080 	struct mgmt_cp_set_device_flags *cp = data;
5081 	struct bdaddr_list_with_flags *br_params;
5082 	struct hci_conn_params *params;
5083 	u8 status = MGMT_STATUS_INVALID_PARAMS;
5084 	u32 supported_flags;
5085 	u32 current_flags = __le32_to_cpu(cp->current_flags);
5086 
5087 	bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5088 		   &cp->addr.bdaddr, cp->addr.type, current_flags);
5089 
5090 	// We should take hci_dev_lock() early, I think.. conn_flags can change
5091 	supported_flags = hdev->conn_flags;
5092 
5093 	if ((supported_flags | current_flags) != supported_flags) {
5094 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5095 			    current_flags, supported_flags);
5096 		goto done;
5097 	}
5098 
5099 	hci_dev_lock(hdev);
5100 
5101 	if (cp->addr.type == BDADDR_BREDR) {
5102 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5103 							      &cp->addr.bdaddr,
5104 							      cp->addr.type);
5105 
5106 		if (br_params) {
5107 			br_params->flags = current_flags;
5108 			status = MGMT_STATUS_SUCCESS;
5109 		} else {
5110 			bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5111 				    &cp->addr.bdaddr, cp->addr.type);
5112 		}
5113 
5114 		goto unlock;
5115 	}
5116 
5117 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5118 					le_addr_type(cp->addr.type));
5119 	if (!params) {
5120 		bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5121 			    &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5122 		goto unlock;
5123 	}
5124 
5125 	supported_flags = get_params_flags(hdev, params);
5126 
5127 	if ((supported_flags | current_flags) != supported_flags) {
5128 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5129 			    current_flags, supported_flags);
5130 		goto unlock;
5131 	}
5132 
5133 	WRITE_ONCE(params->flags, current_flags);
5134 	status = MGMT_STATUS_SUCCESS;
5135 
5136 	/* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5137 	 * has been set.
5138 	 */
5139 	if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5140 		hci_update_passive_scan(hdev);
5141 
5142 unlock:
5143 	hci_dev_unlock(hdev);
5144 
5145 done:
5146 	if (status == MGMT_STATUS_SUCCESS)
5147 		device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5148 				     supported_flags, current_flags);
5149 
5150 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5151 				 &cp->addr, sizeof(cp->addr));
5152 }
5153 
5154 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5155 				   u16 handle)
5156 {
5157 	struct mgmt_ev_adv_monitor_added ev;
5158 
5159 	ev.monitor_handle = cpu_to_le16(handle);
5160 
5161 	mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5162 }
5163 
5164 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
5165 {
5166 	struct mgmt_ev_adv_monitor_removed ev;
5167 	struct mgmt_pending_cmd *cmd;
5168 	struct sock *sk_skip = NULL;
5169 	struct mgmt_cp_remove_adv_monitor *cp;
5170 
5171 	cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
5172 	if (cmd) {
5173 		cp = cmd->param;
5174 
5175 		if (cp->monitor_handle)
5176 			sk_skip = cmd->sk;
5177 	}
5178 
5179 	ev.monitor_handle = cpu_to_le16(handle);
5180 
5181 	mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
5182 }
5183 
5184 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5185 				 void *data, u16 len)
5186 {
5187 	struct adv_monitor *monitor = NULL;
5188 	struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5189 	int handle, err;
5190 	size_t rp_size = 0;
5191 	__u32 supported = 0;
5192 	__u32 enabled = 0;
5193 	__u16 num_handles = 0;
5194 	__u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5195 
5196 	BT_DBG("request for %s", hdev->name);
5197 
5198 	hci_dev_lock(hdev);
5199 
5200 	if (msft_monitor_supported(hdev))
5201 		supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5202 
5203 	idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5204 		handles[num_handles++] = monitor->handle;
5205 
5206 	hci_dev_unlock(hdev);
5207 
5208 	rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5209 	rp = kmalloc(rp_size, GFP_KERNEL);
5210 	if (!rp)
5211 		return -ENOMEM;
5212 
5213 	/* All supported features are currently enabled */
5214 	enabled = supported;
5215 
5216 	rp->supported_features = cpu_to_le32(supported);
5217 	rp->enabled_features = cpu_to_le32(enabled);
5218 	rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5219 	rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5220 	rp->num_handles = cpu_to_le16(num_handles);
5221 	if (num_handles)
5222 		memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5223 
5224 	err = mgmt_cmd_complete(sk, hdev->id,
5225 				MGMT_OP_READ_ADV_MONITOR_FEATURES,
5226 				MGMT_STATUS_SUCCESS, rp, rp_size);
5227 
5228 	kfree(rp);
5229 
5230 	return err;
5231 }
5232 
5233 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5234 						   void *data, int status)
5235 {
5236 	struct mgmt_rp_add_adv_patterns_monitor rp;
5237 	struct mgmt_pending_cmd *cmd = data;
5238 	struct adv_monitor *monitor = cmd->user_data;
5239 
5240 	hci_dev_lock(hdev);
5241 
5242 	rp.monitor_handle = cpu_to_le16(monitor->handle);
5243 
5244 	if (!status) {
5245 		mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5246 		hdev->adv_monitors_cnt++;
5247 		if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5248 			monitor->state = ADV_MONITOR_STATE_REGISTERED;
5249 		hci_update_passive_scan(hdev);
5250 	}
5251 
5252 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5253 			  mgmt_status(status), &rp, sizeof(rp));
5254 	mgmt_pending_remove(cmd);
5255 
5256 	hci_dev_unlock(hdev);
5257 	bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5258 		   rp.monitor_handle, status);
5259 }
5260 
5261 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5262 {
5263 	struct mgmt_pending_cmd *cmd = data;
5264 	struct adv_monitor *monitor = cmd->user_data;
5265 
5266 	return hci_add_adv_monitor(hdev, monitor);
5267 }
5268 
5269 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5270 				      struct adv_monitor *m, u8 status,
5271 				      void *data, u16 len, u16 op)
5272 {
5273 	struct mgmt_pending_cmd *cmd;
5274 	int err;
5275 
5276 	hci_dev_lock(hdev);
5277 
5278 	if (status)
5279 		goto unlock;
5280 
5281 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
5282 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5283 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
5284 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
5285 		status = MGMT_STATUS_BUSY;
5286 		goto unlock;
5287 	}
5288 
5289 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
5290 	if (!cmd) {
5291 		status = MGMT_STATUS_NO_RESOURCES;
5292 		goto unlock;
5293 	}
5294 
5295 	cmd->user_data = m;
5296 	err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5297 				 mgmt_add_adv_patterns_monitor_complete);
5298 	if (err) {
5299 		if (err == -ENOMEM)
5300 			status = MGMT_STATUS_NO_RESOURCES;
5301 		else
5302 			status = MGMT_STATUS_FAILED;
5303 
5304 		goto unlock;
5305 	}
5306 
5307 	hci_dev_unlock(hdev);
5308 
5309 	return 0;
5310 
5311 unlock:
5312 	hci_free_adv_monitor(hdev, m);
5313 	hci_dev_unlock(hdev);
5314 	return mgmt_cmd_status(sk, hdev->id, op, status);
5315 }
5316 
5317 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5318 				   struct mgmt_adv_rssi_thresholds *rssi)
5319 {
5320 	if (rssi) {
5321 		m->rssi.low_threshold = rssi->low_threshold;
5322 		m->rssi.low_threshold_timeout =
5323 		    __le16_to_cpu(rssi->low_threshold_timeout);
5324 		m->rssi.high_threshold = rssi->high_threshold;
5325 		m->rssi.high_threshold_timeout =
5326 		    __le16_to_cpu(rssi->high_threshold_timeout);
5327 		m->rssi.sampling_period = rssi->sampling_period;
5328 	} else {
5329 		/* Default values. These numbers are the least constricting
5330 		 * parameters for MSFT API to work, so it behaves as if there
5331 		 * are no rssi parameter to consider. May need to be changed
5332 		 * if other API are to be supported.
5333 		 */
5334 		m->rssi.low_threshold = -127;
5335 		m->rssi.low_threshold_timeout = 60;
5336 		m->rssi.high_threshold = -127;
5337 		m->rssi.high_threshold_timeout = 0;
5338 		m->rssi.sampling_period = 0;
5339 	}
5340 }
5341 
5342 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5343 				    struct mgmt_adv_pattern *patterns)
5344 {
5345 	u8 offset = 0, length = 0;
5346 	struct adv_pattern *p = NULL;
5347 	int i;
5348 
5349 	for (i = 0; i < pattern_count; i++) {
5350 		offset = patterns[i].offset;
5351 		length = patterns[i].length;
5352 		if (offset >= HCI_MAX_EXT_AD_LENGTH ||
5353 		    length > HCI_MAX_EXT_AD_LENGTH ||
5354 		    (offset + length) > HCI_MAX_EXT_AD_LENGTH)
5355 			return MGMT_STATUS_INVALID_PARAMS;
5356 
5357 		p = kmalloc(sizeof(*p), GFP_KERNEL);
5358 		if (!p)
5359 			return MGMT_STATUS_NO_RESOURCES;
5360 
5361 		p->ad_type = patterns[i].ad_type;
5362 		p->offset = patterns[i].offset;
5363 		p->length = patterns[i].length;
5364 		memcpy(p->value, patterns[i].value, p->length);
5365 
5366 		INIT_LIST_HEAD(&p->list);
5367 		list_add(&p->list, &m->patterns);
5368 	}
5369 
5370 	return MGMT_STATUS_SUCCESS;
5371 }
5372 
5373 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5374 				    void *data, u16 len)
5375 {
5376 	struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5377 	struct adv_monitor *m = NULL;
5378 	u8 status = MGMT_STATUS_SUCCESS;
5379 	size_t expected_size = sizeof(*cp);
5380 
5381 	BT_DBG("request for %s", hdev->name);
5382 
5383 	if (len <= sizeof(*cp)) {
5384 		status = MGMT_STATUS_INVALID_PARAMS;
5385 		goto done;
5386 	}
5387 
5388 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5389 	if (len != expected_size) {
5390 		status = MGMT_STATUS_INVALID_PARAMS;
5391 		goto done;
5392 	}
5393 
5394 	m = kzalloc(sizeof(*m), GFP_KERNEL);
5395 	if (!m) {
5396 		status = MGMT_STATUS_NO_RESOURCES;
5397 		goto done;
5398 	}
5399 
5400 	INIT_LIST_HEAD(&m->patterns);
5401 
5402 	parse_adv_monitor_rssi(m, NULL);
5403 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5404 
5405 done:
5406 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5407 					  MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5408 }
5409 
5410 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5411 					 void *data, u16 len)
5412 {
5413 	struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5414 	struct adv_monitor *m = NULL;
5415 	u8 status = MGMT_STATUS_SUCCESS;
5416 	size_t expected_size = sizeof(*cp);
5417 
5418 	BT_DBG("request for %s", hdev->name);
5419 
5420 	if (len <= sizeof(*cp)) {
5421 		status = MGMT_STATUS_INVALID_PARAMS;
5422 		goto done;
5423 	}
5424 
5425 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5426 	if (len != expected_size) {
5427 		status = MGMT_STATUS_INVALID_PARAMS;
5428 		goto done;
5429 	}
5430 
5431 	m = kzalloc(sizeof(*m), GFP_KERNEL);
5432 	if (!m) {
5433 		status = MGMT_STATUS_NO_RESOURCES;
5434 		goto done;
5435 	}
5436 
5437 	INIT_LIST_HEAD(&m->patterns);
5438 
5439 	parse_adv_monitor_rssi(m, &cp->rssi);
5440 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5441 
5442 done:
5443 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5444 					 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5445 }
5446 
5447 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5448 					     void *data, int status)
5449 {
5450 	struct mgmt_rp_remove_adv_monitor rp;
5451 	struct mgmt_pending_cmd *cmd = data;
5452 	struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5453 
5454 	hci_dev_lock(hdev);
5455 
5456 	rp.monitor_handle = cp->monitor_handle;
5457 
5458 	if (!status)
5459 		hci_update_passive_scan(hdev);
5460 
5461 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5462 			  mgmt_status(status), &rp, sizeof(rp));
5463 	mgmt_pending_remove(cmd);
5464 
5465 	hci_dev_unlock(hdev);
5466 	bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5467 		   rp.monitor_handle, status);
5468 }
5469 
5470 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5471 {
5472 	struct mgmt_pending_cmd *cmd = data;
5473 	struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5474 	u16 handle = __le16_to_cpu(cp->monitor_handle);
5475 
5476 	if (!handle)
5477 		return hci_remove_all_adv_monitor(hdev);
5478 
5479 	return hci_remove_single_adv_monitor(hdev, handle);
5480 }
5481 
5482 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5483 			      void *data, u16 len)
5484 {
5485 	struct mgmt_pending_cmd *cmd;
5486 	int err, status;
5487 
5488 	hci_dev_lock(hdev);
5489 
5490 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
5491 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5492 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5493 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5494 		status = MGMT_STATUS_BUSY;
5495 		goto unlock;
5496 	}
5497 
5498 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5499 	if (!cmd) {
5500 		status = MGMT_STATUS_NO_RESOURCES;
5501 		goto unlock;
5502 	}
5503 
5504 	err = hci_cmd_sync_submit(hdev, mgmt_remove_adv_monitor_sync, cmd,
5505 				  mgmt_remove_adv_monitor_complete);
5506 
5507 	if (err) {
5508 		mgmt_pending_remove(cmd);
5509 
5510 		if (err == -ENOMEM)
5511 			status = MGMT_STATUS_NO_RESOURCES;
5512 		else
5513 			status = MGMT_STATUS_FAILED;
5514 
5515 		goto unlock;
5516 	}
5517 
5518 	hci_dev_unlock(hdev);
5519 
5520 	return 0;
5521 
5522 unlock:
5523 	hci_dev_unlock(hdev);
5524 	return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5525 			       status);
5526 }
5527 
5528 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5529 {
5530 	struct mgmt_rp_read_local_oob_data mgmt_rp;
5531 	size_t rp_size = sizeof(mgmt_rp);
5532 	struct mgmt_pending_cmd *cmd = data;
5533 	struct sk_buff *skb = cmd->skb;
5534 	u8 status = mgmt_status(err);
5535 
5536 	if (!status) {
5537 		if (!skb)
5538 			status = MGMT_STATUS_FAILED;
5539 		else if (IS_ERR(skb))
5540 			status = mgmt_status(PTR_ERR(skb));
5541 		else
5542 			status = mgmt_status(skb->data[0]);
5543 	}
5544 
5545 	bt_dev_dbg(hdev, "status %d", status);
5546 
5547 	if (status) {
5548 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5549 		goto remove;
5550 	}
5551 
5552 	memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5553 
5554 	if (!bredr_sc_enabled(hdev)) {
5555 		struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5556 
5557 		if (skb->len < sizeof(*rp)) {
5558 			mgmt_cmd_status(cmd->sk, hdev->id,
5559 					MGMT_OP_READ_LOCAL_OOB_DATA,
5560 					MGMT_STATUS_FAILED);
5561 			goto remove;
5562 		}
5563 
5564 		memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5565 		memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5566 
5567 		rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5568 	} else {
5569 		struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5570 
5571 		if (skb->len < sizeof(*rp)) {
5572 			mgmt_cmd_status(cmd->sk, hdev->id,
5573 					MGMT_OP_READ_LOCAL_OOB_DATA,
5574 					MGMT_STATUS_FAILED);
5575 			goto remove;
5576 		}
5577 
5578 		memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5579 		memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5580 
5581 		memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5582 		memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5583 	}
5584 
5585 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5586 			  MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5587 
5588 remove:
5589 	if (skb && !IS_ERR(skb))
5590 		kfree_skb(skb);
5591 
5592 	mgmt_pending_free(cmd);
5593 }
5594 
5595 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5596 {
5597 	struct mgmt_pending_cmd *cmd = data;
5598 
5599 	if (bredr_sc_enabled(hdev))
5600 		cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5601 	else
5602 		cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5603 
5604 	if (IS_ERR(cmd->skb))
5605 		return PTR_ERR(cmd->skb);
5606 	else
5607 		return 0;
5608 }
5609 
5610 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5611 			       void *data, u16 data_len)
5612 {
5613 	struct mgmt_pending_cmd *cmd;
5614 	int err;
5615 
5616 	bt_dev_dbg(hdev, "sock %p", sk);
5617 
5618 	hci_dev_lock(hdev);
5619 
5620 	if (!hdev_is_powered(hdev)) {
5621 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5622 				      MGMT_STATUS_NOT_POWERED);
5623 		goto unlock;
5624 	}
5625 
5626 	if (!lmp_ssp_capable(hdev)) {
5627 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5628 				      MGMT_STATUS_NOT_SUPPORTED);
5629 		goto unlock;
5630 	}
5631 
5632 	cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5633 	if (!cmd)
5634 		err = -ENOMEM;
5635 	else
5636 		err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5637 					 read_local_oob_data_complete);
5638 
5639 	if (err < 0) {
5640 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5641 				      MGMT_STATUS_FAILED);
5642 
5643 		if (cmd)
5644 			mgmt_pending_free(cmd);
5645 	}
5646 
5647 unlock:
5648 	hci_dev_unlock(hdev);
5649 	return err;
5650 }
5651 
5652 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5653 			       void *data, u16 len)
5654 {
5655 	struct mgmt_addr_info *addr = data;
5656 	int err;
5657 
5658 	bt_dev_dbg(hdev, "sock %p", sk);
5659 
5660 	if (!bdaddr_type_is_valid(addr->type))
5661 		return mgmt_cmd_complete(sk, hdev->id,
5662 					 MGMT_OP_ADD_REMOTE_OOB_DATA,
5663 					 MGMT_STATUS_INVALID_PARAMS,
5664 					 addr, sizeof(*addr));
5665 
5666 	hci_dev_lock(hdev);
5667 
5668 	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5669 		struct mgmt_cp_add_remote_oob_data *cp = data;
5670 		u8 status;
5671 
5672 		if (cp->addr.type != BDADDR_BREDR) {
5673 			err = mgmt_cmd_complete(sk, hdev->id,
5674 						MGMT_OP_ADD_REMOTE_OOB_DATA,
5675 						MGMT_STATUS_INVALID_PARAMS,
5676 						&cp->addr, sizeof(cp->addr));
5677 			goto unlock;
5678 		}
5679 
5680 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5681 					      cp->addr.type, cp->hash,
5682 					      cp->rand, NULL, NULL);
5683 		if (err < 0)
5684 			status = MGMT_STATUS_FAILED;
5685 		else
5686 			status = MGMT_STATUS_SUCCESS;
5687 
5688 		err = mgmt_cmd_complete(sk, hdev->id,
5689 					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5690 					&cp->addr, sizeof(cp->addr));
5691 	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5692 		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5693 		u8 *rand192, *hash192, *rand256, *hash256;
5694 		u8 status;
5695 
5696 		if (bdaddr_type_is_le(cp->addr.type)) {
5697 			/* Enforce zero-valued 192-bit parameters as
5698 			 * long as legacy SMP OOB isn't implemented.
5699 			 */
5700 			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5701 			    memcmp(cp->hash192, ZERO_KEY, 16)) {
5702 				err = mgmt_cmd_complete(sk, hdev->id,
5703 							MGMT_OP_ADD_REMOTE_OOB_DATA,
5704 							MGMT_STATUS_INVALID_PARAMS,
5705 							addr, sizeof(*addr));
5706 				goto unlock;
5707 			}
5708 
5709 			rand192 = NULL;
5710 			hash192 = NULL;
5711 		} else {
5712 			/* In case one of the P-192 values is set to zero,
5713 			 * then just disable OOB data for P-192.
5714 			 */
5715 			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5716 			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
5717 				rand192 = NULL;
5718 				hash192 = NULL;
5719 			} else {
5720 				rand192 = cp->rand192;
5721 				hash192 = cp->hash192;
5722 			}
5723 		}
5724 
5725 		/* In case one of the P-256 values is set to zero, then just
5726 		 * disable OOB data for P-256.
5727 		 */
5728 		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5729 		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
5730 			rand256 = NULL;
5731 			hash256 = NULL;
5732 		} else {
5733 			rand256 = cp->rand256;
5734 			hash256 = cp->hash256;
5735 		}
5736 
5737 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5738 					      cp->addr.type, hash192, rand192,
5739 					      hash256, rand256);
5740 		if (err < 0)
5741 			status = MGMT_STATUS_FAILED;
5742 		else
5743 			status = MGMT_STATUS_SUCCESS;
5744 
5745 		err = mgmt_cmd_complete(sk, hdev->id,
5746 					MGMT_OP_ADD_REMOTE_OOB_DATA,
5747 					status, &cp->addr, sizeof(cp->addr));
5748 	} else {
5749 		bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5750 			   len);
5751 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5752 				      MGMT_STATUS_INVALID_PARAMS);
5753 	}
5754 
5755 unlock:
5756 	hci_dev_unlock(hdev);
5757 	return err;
5758 }
5759 
5760 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5761 				  void *data, u16 len)
5762 {
5763 	struct mgmt_cp_remove_remote_oob_data *cp = data;
5764 	u8 status;
5765 	int err;
5766 
5767 	bt_dev_dbg(hdev, "sock %p", sk);
5768 
5769 	if (cp->addr.type != BDADDR_BREDR)
5770 		return mgmt_cmd_complete(sk, hdev->id,
5771 					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5772 					 MGMT_STATUS_INVALID_PARAMS,
5773 					 &cp->addr, sizeof(cp->addr));
5774 
5775 	hci_dev_lock(hdev);
5776 
5777 	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5778 		hci_remote_oob_data_clear(hdev);
5779 		status = MGMT_STATUS_SUCCESS;
5780 		goto done;
5781 	}
5782 
5783 	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5784 	if (err < 0)
5785 		status = MGMT_STATUS_INVALID_PARAMS;
5786 	else
5787 		status = MGMT_STATUS_SUCCESS;
5788 
5789 done:
5790 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5791 				status, &cp->addr, sizeof(cp->addr));
5792 
5793 	hci_dev_unlock(hdev);
5794 	return err;
5795 }
5796 
5797 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5798 {
5799 	struct mgmt_pending_cmd *cmd;
5800 
5801 	bt_dev_dbg(hdev, "status %u", status);
5802 
5803 	hci_dev_lock(hdev);
5804 
5805 	cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5806 	if (!cmd)
5807 		cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5808 
5809 	if (!cmd)
5810 		cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5811 
5812 	if (cmd) {
5813 		cmd->cmd_complete(cmd, mgmt_status(status));
5814 		mgmt_pending_remove(cmd);
5815 	}
5816 
5817 	hci_dev_unlock(hdev);
5818 }
5819 
5820 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5821 				    uint8_t *mgmt_status)
5822 {
5823 	switch (type) {
5824 	case DISCOV_TYPE_LE:
5825 		*mgmt_status = mgmt_le_support(hdev);
5826 		if (*mgmt_status)
5827 			return false;
5828 		break;
5829 	case DISCOV_TYPE_INTERLEAVED:
5830 		*mgmt_status = mgmt_le_support(hdev);
5831 		if (*mgmt_status)
5832 			return false;
5833 		fallthrough;
5834 	case DISCOV_TYPE_BREDR:
5835 		*mgmt_status = mgmt_bredr_support(hdev);
5836 		if (*mgmt_status)
5837 			return false;
5838 		break;
5839 	default:
5840 		*mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5841 		return false;
5842 	}
5843 
5844 	return true;
5845 }
5846 
5847 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5848 {
5849 	struct mgmt_pending_cmd *cmd = data;
5850 
5851 	if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5852 	    cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5853 	    cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5854 		return;
5855 
5856 	bt_dev_dbg(hdev, "err %d", err);
5857 
5858 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5859 			  cmd->param, 1);
5860 	mgmt_pending_remove(cmd);
5861 
5862 	hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5863 				DISCOVERY_FINDING);
5864 }
5865 
5866 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5867 {
5868 	return hci_start_discovery_sync(hdev);
5869 }
5870 
5871 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5872 				    u16 op, void *data, u16 len)
5873 {
5874 	struct mgmt_cp_start_discovery *cp = data;
5875 	struct mgmt_pending_cmd *cmd;
5876 	u8 status;
5877 	int err;
5878 
5879 	bt_dev_dbg(hdev, "sock %p", sk);
5880 
5881 	hci_dev_lock(hdev);
5882 
5883 	if (!hdev_is_powered(hdev)) {
5884 		err = mgmt_cmd_complete(sk, hdev->id, op,
5885 					MGMT_STATUS_NOT_POWERED,
5886 					&cp->type, sizeof(cp->type));
5887 		goto failed;
5888 	}
5889 
5890 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
5891 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5892 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5893 					&cp->type, sizeof(cp->type));
5894 		goto failed;
5895 	}
5896 
5897 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5898 		err = mgmt_cmd_complete(sk, hdev->id, op, status,
5899 					&cp->type, sizeof(cp->type));
5900 		goto failed;
5901 	}
5902 
5903 	/* Can't start discovery when it is paused */
5904 	if (hdev->discovery_paused) {
5905 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5906 					&cp->type, sizeof(cp->type));
5907 		goto failed;
5908 	}
5909 
5910 	/* Clear the discovery filter first to free any previously
5911 	 * allocated memory for the UUID list.
5912 	 */
5913 	hci_discovery_filter_clear(hdev);
5914 
5915 	hdev->discovery.type = cp->type;
5916 	hdev->discovery.report_invalid_rssi = false;
5917 	if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5918 		hdev->discovery.limited = true;
5919 	else
5920 		hdev->discovery.limited = false;
5921 
5922 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
5923 	if (!cmd) {
5924 		err = -ENOMEM;
5925 		goto failed;
5926 	}
5927 
5928 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5929 				 start_discovery_complete);
5930 	if (err < 0) {
5931 		mgmt_pending_remove(cmd);
5932 		goto failed;
5933 	}
5934 
5935 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5936 
5937 failed:
5938 	hci_dev_unlock(hdev);
5939 	return err;
5940 }
5941 
5942 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5943 			   void *data, u16 len)
5944 {
5945 	return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5946 					data, len);
5947 }
5948 
5949 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5950 				   void *data, u16 len)
5951 {
5952 	return start_discovery_internal(sk, hdev,
5953 					MGMT_OP_START_LIMITED_DISCOVERY,
5954 					data, len);
5955 }
5956 
5957 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5958 				   void *data, u16 len)
5959 {
5960 	struct mgmt_cp_start_service_discovery *cp = data;
5961 	struct mgmt_pending_cmd *cmd;
5962 	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5963 	u16 uuid_count, expected_len;
5964 	u8 status;
5965 	int err;
5966 
5967 	bt_dev_dbg(hdev, "sock %p", sk);
5968 
5969 	hci_dev_lock(hdev);
5970 
5971 	if (!hdev_is_powered(hdev)) {
5972 		err = mgmt_cmd_complete(sk, hdev->id,
5973 					MGMT_OP_START_SERVICE_DISCOVERY,
5974 					MGMT_STATUS_NOT_POWERED,
5975 					&cp->type, sizeof(cp->type));
5976 		goto failed;
5977 	}
5978 
5979 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
5980 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5981 		err = mgmt_cmd_complete(sk, hdev->id,
5982 					MGMT_OP_START_SERVICE_DISCOVERY,
5983 					MGMT_STATUS_BUSY, &cp->type,
5984 					sizeof(cp->type));
5985 		goto failed;
5986 	}
5987 
5988 	if (hdev->discovery_paused) {
5989 		err = mgmt_cmd_complete(sk, hdev->id,
5990 					MGMT_OP_START_SERVICE_DISCOVERY,
5991 					MGMT_STATUS_BUSY, &cp->type,
5992 					sizeof(cp->type));
5993 		goto failed;
5994 	}
5995 
5996 	uuid_count = __le16_to_cpu(cp->uuid_count);
5997 	if (uuid_count > max_uuid_count) {
5998 		bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5999 			   uuid_count);
6000 		err = mgmt_cmd_complete(sk, hdev->id,
6001 					MGMT_OP_START_SERVICE_DISCOVERY,
6002 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
6003 					sizeof(cp->type));
6004 		goto failed;
6005 	}
6006 
6007 	expected_len = sizeof(*cp) + uuid_count * 16;
6008 	if (expected_len != len) {
6009 		bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
6010 			   expected_len, len);
6011 		err = mgmt_cmd_complete(sk, hdev->id,
6012 					MGMT_OP_START_SERVICE_DISCOVERY,
6013 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
6014 					sizeof(cp->type));
6015 		goto failed;
6016 	}
6017 
6018 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
6019 		err = mgmt_cmd_complete(sk, hdev->id,
6020 					MGMT_OP_START_SERVICE_DISCOVERY,
6021 					status, &cp->type, sizeof(cp->type));
6022 		goto failed;
6023 	}
6024 
6025 	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
6026 			       hdev, data, len);
6027 	if (!cmd) {
6028 		err = -ENOMEM;
6029 		goto failed;
6030 	}
6031 
6032 	/* Clear the discovery filter first to free any previously
6033 	 * allocated memory for the UUID list.
6034 	 */
6035 	hci_discovery_filter_clear(hdev);
6036 
6037 	hdev->discovery.result_filtering = true;
6038 	hdev->discovery.type = cp->type;
6039 	hdev->discovery.rssi = cp->rssi;
6040 	hdev->discovery.uuid_count = uuid_count;
6041 
6042 	if (uuid_count > 0) {
6043 		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6044 						GFP_KERNEL);
6045 		if (!hdev->discovery.uuids) {
6046 			err = mgmt_cmd_complete(sk, hdev->id,
6047 						MGMT_OP_START_SERVICE_DISCOVERY,
6048 						MGMT_STATUS_FAILED,
6049 						&cp->type, sizeof(cp->type));
6050 			mgmt_pending_remove(cmd);
6051 			goto failed;
6052 		}
6053 	}
6054 
6055 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6056 				 start_discovery_complete);
6057 	if (err < 0) {
6058 		mgmt_pending_remove(cmd);
6059 		goto failed;
6060 	}
6061 
6062 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6063 
6064 failed:
6065 	hci_dev_unlock(hdev);
6066 	return err;
6067 }
6068 
6069 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
6070 {
6071 	struct mgmt_pending_cmd *cmd;
6072 
6073 	bt_dev_dbg(hdev, "status %u", status);
6074 
6075 	hci_dev_lock(hdev);
6076 
6077 	cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6078 	if (cmd) {
6079 		cmd->cmd_complete(cmd, mgmt_status(status));
6080 		mgmt_pending_remove(cmd);
6081 	}
6082 
6083 	hci_dev_unlock(hdev);
6084 }
6085 
6086 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6087 {
6088 	struct mgmt_pending_cmd *cmd = data;
6089 
6090 	if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6091 		return;
6092 
6093 	bt_dev_dbg(hdev, "err %d", err);
6094 
6095 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
6096 			  cmd->param, 1);
6097 	mgmt_pending_remove(cmd);
6098 
6099 	if (!err)
6100 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6101 }
6102 
6103 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6104 {
6105 	return hci_stop_discovery_sync(hdev);
6106 }
6107 
6108 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6109 			  u16 len)
6110 {
6111 	struct mgmt_cp_stop_discovery *mgmt_cp = data;
6112 	struct mgmt_pending_cmd *cmd;
6113 	int err;
6114 
6115 	bt_dev_dbg(hdev, "sock %p", sk);
6116 
6117 	hci_dev_lock(hdev);
6118 
6119 	if (!hci_discovery_active(hdev)) {
6120 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6121 					MGMT_STATUS_REJECTED, &mgmt_cp->type,
6122 					sizeof(mgmt_cp->type));
6123 		goto unlock;
6124 	}
6125 
6126 	if (hdev->discovery.type != mgmt_cp->type) {
6127 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6128 					MGMT_STATUS_INVALID_PARAMS,
6129 					&mgmt_cp->type, sizeof(mgmt_cp->type));
6130 		goto unlock;
6131 	}
6132 
6133 	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6134 	if (!cmd) {
6135 		err = -ENOMEM;
6136 		goto unlock;
6137 	}
6138 
6139 	err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6140 				 stop_discovery_complete);
6141 	if (err < 0) {
6142 		mgmt_pending_remove(cmd);
6143 		goto unlock;
6144 	}
6145 
6146 	hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6147 
6148 unlock:
6149 	hci_dev_unlock(hdev);
6150 	return err;
6151 }
6152 
6153 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6154 			u16 len)
6155 {
6156 	struct mgmt_cp_confirm_name *cp = data;
6157 	struct inquiry_entry *e;
6158 	int err;
6159 
6160 	bt_dev_dbg(hdev, "sock %p", sk);
6161 
6162 	hci_dev_lock(hdev);
6163 
6164 	if (!hci_discovery_active(hdev)) {
6165 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6166 					MGMT_STATUS_FAILED, &cp->addr,
6167 					sizeof(cp->addr));
6168 		goto failed;
6169 	}
6170 
6171 	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6172 	if (!e) {
6173 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6174 					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6175 					sizeof(cp->addr));
6176 		goto failed;
6177 	}
6178 
6179 	if (cp->name_known) {
6180 		e->name_state = NAME_KNOWN;
6181 		list_del(&e->list);
6182 	} else {
6183 		e->name_state = NAME_NEEDED;
6184 		hci_inquiry_cache_update_resolve(hdev, e);
6185 	}
6186 
6187 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6188 				&cp->addr, sizeof(cp->addr));
6189 
6190 failed:
6191 	hci_dev_unlock(hdev);
6192 	return err;
6193 }
6194 
6195 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6196 			u16 len)
6197 {
6198 	struct mgmt_cp_block_device *cp = data;
6199 	u8 status;
6200 	int err;
6201 
6202 	bt_dev_dbg(hdev, "sock %p", sk);
6203 
6204 	if (!bdaddr_type_is_valid(cp->addr.type))
6205 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6206 					 MGMT_STATUS_INVALID_PARAMS,
6207 					 &cp->addr, sizeof(cp->addr));
6208 
6209 	hci_dev_lock(hdev);
6210 
6211 	err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6212 				  cp->addr.type);
6213 	if (err < 0) {
6214 		status = MGMT_STATUS_FAILED;
6215 		goto done;
6216 	}
6217 
6218 	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6219 		   sk);
6220 	status = MGMT_STATUS_SUCCESS;
6221 
6222 done:
6223 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6224 				&cp->addr, sizeof(cp->addr));
6225 
6226 	hci_dev_unlock(hdev);
6227 
6228 	return err;
6229 }
6230 
6231 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6232 			  u16 len)
6233 {
6234 	struct mgmt_cp_unblock_device *cp = data;
6235 	u8 status;
6236 	int err;
6237 
6238 	bt_dev_dbg(hdev, "sock %p", sk);
6239 
6240 	if (!bdaddr_type_is_valid(cp->addr.type))
6241 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6242 					 MGMT_STATUS_INVALID_PARAMS,
6243 					 &cp->addr, sizeof(cp->addr));
6244 
6245 	hci_dev_lock(hdev);
6246 
6247 	err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6248 				  cp->addr.type);
6249 	if (err < 0) {
6250 		status = MGMT_STATUS_INVALID_PARAMS;
6251 		goto done;
6252 	}
6253 
6254 	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6255 		   sk);
6256 	status = MGMT_STATUS_SUCCESS;
6257 
6258 done:
6259 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6260 				&cp->addr, sizeof(cp->addr));
6261 
6262 	hci_dev_unlock(hdev);
6263 
6264 	return err;
6265 }
6266 
6267 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6268 {
6269 	return hci_update_eir_sync(hdev);
6270 }
6271 
6272 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6273 			 u16 len)
6274 {
6275 	struct mgmt_cp_set_device_id *cp = data;
6276 	int err;
6277 	__u16 source;
6278 
6279 	bt_dev_dbg(hdev, "sock %p", sk);
6280 
6281 	source = __le16_to_cpu(cp->source);
6282 
6283 	if (source > 0x0002)
6284 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6285 				       MGMT_STATUS_INVALID_PARAMS);
6286 
6287 	hci_dev_lock(hdev);
6288 
6289 	hdev->devid_source = source;
6290 	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6291 	hdev->devid_product = __le16_to_cpu(cp->product);
6292 	hdev->devid_version = __le16_to_cpu(cp->version);
6293 
6294 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6295 				NULL, 0);
6296 
6297 	hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6298 
6299 	hci_dev_unlock(hdev);
6300 
6301 	return err;
6302 }
6303 
6304 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6305 {
6306 	if (err)
6307 		bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6308 	else
6309 		bt_dev_dbg(hdev, "status %d", err);
6310 }
6311 
6312 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6313 {
6314 	struct cmd_lookup match = { NULL, hdev };
6315 	u8 instance;
6316 	struct adv_info *adv_instance;
6317 	u8 status = mgmt_status(err);
6318 
6319 	if (status) {
6320 		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
6321 				     cmd_status_rsp, &status);
6322 		return;
6323 	}
6324 
6325 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6326 		hci_dev_set_flag(hdev, HCI_ADVERTISING);
6327 	else
6328 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6329 
6330 	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
6331 			     &match);
6332 
6333 	new_settings(hdev, match.sk);
6334 
6335 	if (match.sk)
6336 		sock_put(match.sk);
6337 
6338 	/* If "Set Advertising" was just disabled and instance advertising was
6339 	 * set up earlier, then re-enable multi-instance advertising.
6340 	 */
6341 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6342 	    list_empty(&hdev->adv_instances))
6343 		return;
6344 
6345 	instance = hdev->cur_adv_instance;
6346 	if (!instance) {
6347 		adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6348 							struct adv_info, list);
6349 		if (!adv_instance)
6350 			return;
6351 
6352 		instance = adv_instance->instance;
6353 	}
6354 
6355 	err = hci_schedule_adv_instance_sync(hdev, instance, true);
6356 
6357 	enable_advertising_instance(hdev, err);
6358 }
6359 
6360 static int set_adv_sync(struct hci_dev *hdev, void *data)
6361 {
6362 	struct mgmt_pending_cmd *cmd = data;
6363 	struct mgmt_mode *cp = cmd->param;
6364 	u8 val = !!cp->val;
6365 
6366 	if (cp->val == 0x02)
6367 		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6368 	else
6369 		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6370 
6371 	cancel_adv_timeout(hdev);
6372 
6373 	if (val) {
6374 		/* Switch to instance "0" for the Set Advertising setting.
6375 		 * We cannot use update_[adv|scan_rsp]_data() here as the
6376 		 * HCI_ADVERTISING flag is not yet set.
6377 		 */
6378 		hdev->cur_adv_instance = 0x00;
6379 
6380 		if (ext_adv_capable(hdev)) {
6381 			hci_start_ext_adv_sync(hdev, 0x00);
6382 		} else {
6383 			hci_update_adv_data_sync(hdev, 0x00);
6384 			hci_update_scan_rsp_data_sync(hdev, 0x00);
6385 			hci_enable_advertising_sync(hdev);
6386 		}
6387 	} else {
6388 		hci_disable_advertising_sync(hdev);
6389 	}
6390 
6391 	return 0;
6392 }
6393 
6394 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6395 			   u16 len)
6396 {
6397 	struct mgmt_mode *cp = data;
6398 	struct mgmt_pending_cmd *cmd;
6399 	u8 val, status;
6400 	int err;
6401 
6402 	bt_dev_dbg(hdev, "sock %p", sk);
6403 
6404 	status = mgmt_le_support(hdev);
6405 	if (status)
6406 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6407 				       status);
6408 
6409 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6410 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6411 				       MGMT_STATUS_INVALID_PARAMS);
6412 
6413 	if (hdev->advertising_paused)
6414 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6415 				       MGMT_STATUS_BUSY);
6416 
6417 	hci_dev_lock(hdev);
6418 
6419 	val = !!cp->val;
6420 
6421 	/* The following conditions are ones which mean that we should
6422 	 * not do any HCI communication but directly send a mgmt
6423 	 * response to user space (after toggling the flag if
6424 	 * necessary).
6425 	 */
6426 	if (!hdev_is_powered(hdev) ||
6427 	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6428 	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6429 	    hci_dev_test_flag(hdev, HCI_MESH) ||
6430 	    hci_conn_num(hdev, LE_LINK) > 0 ||
6431 	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6432 	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6433 		bool changed;
6434 
6435 		if (cp->val) {
6436 			hdev->cur_adv_instance = 0x00;
6437 			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6438 			if (cp->val == 0x02)
6439 				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6440 			else
6441 				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6442 		} else {
6443 			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6444 			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6445 		}
6446 
6447 		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6448 		if (err < 0)
6449 			goto unlock;
6450 
6451 		if (changed)
6452 			err = new_settings(hdev, sk);
6453 
6454 		goto unlock;
6455 	}
6456 
6457 	if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6458 	    pending_find(MGMT_OP_SET_LE, hdev)) {
6459 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6460 				      MGMT_STATUS_BUSY);
6461 		goto unlock;
6462 	}
6463 
6464 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6465 	if (!cmd)
6466 		err = -ENOMEM;
6467 	else
6468 		err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6469 					 set_advertising_complete);
6470 
6471 	if (err < 0 && cmd)
6472 		mgmt_pending_remove(cmd);
6473 
6474 unlock:
6475 	hci_dev_unlock(hdev);
6476 	return err;
6477 }
6478 
6479 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6480 			      void *data, u16 len)
6481 {
6482 	struct mgmt_cp_set_static_address *cp = data;
6483 	int err;
6484 
6485 	bt_dev_dbg(hdev, "sock %p", sk);
6486 
6487 	if (!lmp_le_capable(hdev))
6488 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6489 				       MGMT_STATUS_NOT_SUPPORTED);
6490 
6491 	if (hdev_is_powered(hdev))
6492 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6493 				       MGMT_STATUS_REJECTED);
6494 
6495 	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6496 		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6497 			return mgmt_cmd_status(sk, hdev->id,
6498 					       MGMT_OP_SET_STATIC_ADDRESS,
6499 					       MGMT_STATUS_INVALID_PARAMS);
6500 
6501 		/* Two most significant bits shall be set */
6502 		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6503 			return mgmt_cmd_status(sk, hdev->id,
6504 					       MGMT_OP_SET_STATIC_ADDRESS,
6505 					       MGMT_STATUS_INVALID_PARAMS);
6506 	}
6507 
6508 	hci_dev_lock(hdev);
6509 
6510 	bacpy(&hdev->static_addr, &cp->bdaddr);
6511 
6512 	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6513 	if (err < 0)
6514 		goto unlock;
6515 
6516 	err = new_settings(hdev, sk);
6517 
6518 unlock:
6519 	hci_dev_unlock(hdev);
6520 	return err;
6521 }
6522 
6523 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6524 			   void *data, u16 len)
6525 {
6526 	struct mgmt_cp_set_scan_params *cp = data;
6527 	__u16 interval, window;
6528 	int err;
6529 
6530 	bt_dev_dbg(hdev, "sock %p", sk);
6531 
6532 	if (!lmp_le_capable(hdev))
6533 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6534 				       MGMT_STATUS_NOT_SUPPORTED);
6535 
6536 	interval = __le16_to_cpu(cp->interval);
6537 
6538 	if (interval < 0x0004 || interval > 0x4000)
6539 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6540 				       MGMT_STATUS_INVALID_PARAMS);
6541 
6542 	window = __le16_to_cpu(cp->window);
6543 
6544 	if (window < 0x0004 || window > 0x4000)
6545 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6546 				       MGMT_STATUS_INVALID_PARAMS);
6547 
6548 	if (window > interval)
6549 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6550 				       MGMT_STATUS_INVALID_PARAMS);
6551 
6552 	hci_dev_lock(hdev);
6553 
6554 	hdev->le_scan_interval = interval;
6555 	hdev->le_scan_window = window;
6556 
6557 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6558 				NULL, 0);
6559 
6560 	/* If background scan is running, restart it so new parameters are
6561 	 * loaded.
6562 	 */
6563 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6564 	    hdev->discovery.state == DISCOVERY_STOPPED)
6565 		hci_update_passive_scan(hdev);
6566 
6567 	hci_dev_unlock(hdev);
6568 
6569 	return err;
6570 }
6571 
6572 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6573 {
6574 	struct mgmt_pending_cmd *cmd = data;
6575 
6576 	bt_dev_dbg(hdev, "err %d", err);
6577 
6578 	if (err) {
6579 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6580 				mgmt_status(err));
6581 	} else {
6582 		struct mgmt_mode *cp = cmd->param;
6583 
6584 		if (cp->val)
6585 			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6586 		else
6587 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6588 
6589 		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6590 		new_settings(hdev, cmd->sk);
6591 	}
6592 
6593 	mgmt_pending_free(cmd);
6594 }
6595 
6596 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6597 {
6598 	struct mgmt_pending_cmd *cmd = data;
6599 	struct mgmt_mode *cp = cmd->param;
6600 
6601 	return hci_write_fast_connectable_sync(hdev, cp->val);
6602 }
6603 
6604 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6605 				void *data, u16 len)
6606 {
6607 	struct mgmt_mode *cp = data;
6608 	struct mgmt_pending_cmd *cmd;
6609 	int err;
6610 
6611 	bt_dev_dbg(hdev, "sock %p", sk);
6612 
6613 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6614 	    hdev->hci_ver < BLUETOOTH_VER_1_2)
6615 		return mgmt_cmd_status(sk, hdev->id,
6616 				       MGMT_OP_SET_FAST_CONNECTABLE,
6617 				       MGMT_STATUS_NOT_SUPPORTED);
6618 
6619 	if (cp->val != 0x00 && cp->val != 0x01)
6620 		return mgmt_cmd_status(sk, hdev->id,
6621 				       MGMT_OP_SET_FAST_CONNECTABLE,
6622 				       MGMT_STATUS_INVALID_PARAMS);
6623 
6624 	hci_dev_lock(hdev);
6625 
6626 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6627 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6628 		goto unlock;
6629 	}
6630 
6631 	if (!hdev_is_powered(hdev)) {
6632 		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6633 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6634 		new_settings(hdev, sk);
6635 		goto unlock;
6636 	}
6637 
6638 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6639 			       len);
6640 	if (!cmd)
6641 		err = -ENOMEM;
6642 	else
6643 		err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6644 					 fast_connectable_complete);
6645 
6646 	if (err < 0) {
6647 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6648 				MGMT_STATUS_FAILED);
6649 
6650 		if (cmd)
6651 			mgmt_pending_free(cmd);
6652 	}
6653 
6654 unlock:
6655 	hci_dev_unlock(hdev);
6656 
6657 	return err;
6658 }
6659 
6660 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6661 {
6662 	struct mgmt_pending_cmd *cmd = data;
6663 
6664 	bt_dev_dbg(hdev, "err %d", err);
6665 
6666 	if (err) {
6667 		u8 mgmt_err = mgmt_status(err);
6668 
6669 		/* We need to restore the flag if related HCI commands
6670 		 * failed.
6671 		 */
6672 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6673 
6674 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6675 	} else {
6676 		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6677 		new_settings(hdev, cmd->sk);
6678 	}
6679 
6680 	mgmt_pending_free(cmd);
6681 }
6682 
6683 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6684 {
6685 	int status;
6686 
6687 	status = hci_write_fast_connectable_sync(hdev, false);
6688 
6689 	if (!status)
6690 		status = hci_update_scan_sync(hdev);
6691 
6692 	/* Since only the advertising data flags will change, there
6693 	 * is no need to update the scan response data.
6694 	 */
6695 	if (!status)
6696 		status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6697 
6698 	return status;
6699 }
6700 
6701 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6702 {
6703 	struct mgmt_mode *cp = data;
6704 	struct mgmt_pending_cmd *cmd;
6705 	int err;
6706 
6707 	bt_dev_dbg(hdev, "sock %p", sk);
6708 
6709 	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6710 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6711 				       MGMT_STATUS_NOT_SUPPORTED);
6712 
6713 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6714 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6715 				       MGMT_STATUS_REJECTED);
6716 
6717 	if (cp->val != 0x00 && cp->val != 0x01)
6718 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6719 				       MGMT_STATUS_INVALID_PARAMS);
6720 
6721 	hci_dev_lock(hdev);
6722 
6723 	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6724 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6725 		goto unlock;
6726 	}
6727 
6728 	if (!hdev_is_powered(hdev)) {
6729 		if (!cp->val) {
6730 			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6731 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6732 			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6733 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6734 		}
6735 
6736 		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6737 
6738 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6739 		if (err < 0)
6740 			goto unlock;
6741 
6742 		err = new_settings(hdev, sk);
6743 		goto unlock;
6744 	}
6745 
6746 	/* Reject disabling when powered on */
6747 	if (!cp->val) {
6748 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6749 				      MGMT_STATUS_REJECTED);
6750 		goto unlock;
6751 	} else {
6752 		/* When configuring a dual-mode controller to operate
6753 		 * with LE only and using a static address, then switching
6754 		 * BR/EDR back on is not allowed.
6755 		 *
6756 		 * Dual-mode controllers shall operate with the public
6757 		 * address as its identity address for BR/EDR and LE. So
6758 		 * reject the attempt to create an invalid configuration.
6759 		 *
6760 		 * The same restrictions applies when secure connections
6761 		 * has been enabled. For BR/EDR this is a controller feature
6762 		 * while for LE it is a host stack feature. This means that
6763 		 * switching BR/EDR back on when secure connections has been
6764 		 * enabled is not a supported transaction.
6765 		 */
6766 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6767 		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6768 		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6769 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6770 					      MGMT_STATUS_REJECTED);
6771 			goto unlock;
6772 		}
6773 	}
6774 
6775 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6776 	if (!cmd)
6777 		err = -ENOMEM;
6778 	else
6779 		err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6780 					 set_bredr_complete);
6781 
6782 	if (err < 0) {
6783 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6784 				MGMT_STATUS_FAILED);
6785 		if (cmd)
6786 			mgmt_pending_free(cmd);
6787 
6788 		goto unlock;
6789 	}
6790 
6791 	/* We need to flip the bit already here so that
6792 	 * hci_req_update_adv_data generates the correct flags.
6793 	 */
6794 	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6795 
6796 unlock:
6797 	hci_dev_unlock(hdev);
6798 	return err;
6799 }
6800 
6801 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6802 {
6803 	struct mgmt_pending_cmd *cmd = data;
6804 	struct mgmt_mode *cp;
6805 
6806 	bt_dev_dbg(hdev, "err %d", err);
6807 
6808 	if (err) {
6809 		u8 mgmt_err = mgmt_status(err);
6810 
6811 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6812 		goto done;
6813 	}
6814 
6815 	cp = cmd->param;
6816 
6817 	switch (cp->val) {
6818 	case 0x00:
6819 		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6820 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6821 		break;
6822 	case 0x01:
6823 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6824 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6825 		break;
6826 	case 0x02:
6827 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6828 		hci_dev_set_flag(hdev, HCI_SC_ONLY);
6829 		break;
6830 	}
6831 
6832 	send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6833 	new_settings(hdev, cmd->sk);
6834 
6835 done:
6836 	mgmt_pending_free(cmd);
6837 }
6838 
6839 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6840 {
6841 	struct mgmt_pending_cmd *cmd = data;
6842 	struct mgmt_mode *cp = cmd->param;
6843 	u8 val = !!cp->val;
6844 
6845 	/* Force write of val */
6846 	hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6847 
6848 	return hci_write_sc_support_sync(hdev, val);
6849 }
6850 
6851 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6852 			   void *data, u16 len)
6853 {
6854 	struct mgmt_mode *cp = data;
6855 	struct mgmt_pending_cmd *cmd;
6856 	u8 val;
6857 	int err;
6858 
6859 	bt_dev_dbg(hdev, "sock %p", sk);
6860 
6861 	if (!lmp_sc_capable(hdev) &&
6862 	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6863 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6864 				       MGMT_STATUS_NOT_SUPPORTED);
6865 
6866 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6867 	    lmp_sc_capable(hdev) &&
6868 	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6869 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6870 				       MGMT_STATUS_REJECTED);
6871 
6872 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6873 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6874 				       MGMT_STATUS_INVALID_PARAMS);
6875 
6876 	hci_dev_lock(hdev);
6877 
6878 	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6879 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6880 		bool changed;
6881 
6882 		if (cp->val) {
6883 			changed = !hci_dev_test_and_set_flag(hdev,
6884 							     HCI_SC_ENABLED);
6885 			if (cp->val == 0x02)
6886 				hci_dev_set_flag(hdev, HCI_SC_ONLY);
6887 			else
6888 				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6889 		} else {
6890 			changed = hci_dev_test_and_clear_flag(hdev,
6891 							      HCI_SC_ENABLED);
6892 			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6893 		}
6894 
6895 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6896 		if (err < 0)
6897 			goto failed;
6898 
6899 		if (changed)
6900 			err = new_settings(hdev, sk);
6901 
6902 		goto failed;
6903 	}
6904 
6905 	val = !!cp->val;
6906 
6907 	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6908 	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6909 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6910 		goto failed;
6911 	}
6912 
6913 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6914 	if (!cmd)
6915 		err = -ENOMEM;
6916 	else
6917 		err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6918 					 set_secure_conn_complete);
6919 
6920 	if (err < 0) {
6921 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6922 				MGMT_STATUS_FAILED);
6923 		if (cmd)
6924 			mgmt_pending_free(cmd);
6925 	}
6926 
6927 failed:
6928 	hci_dev_unlock(hdev);
6929 	return err;
6930 }
6931 
6932 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6933 			  void *data, u16 len)
6934 {
6935 	struct mgmt_mode *cp = data;
6936 	bool changed, use_changed;
6937 	int err;
6938 
6939 	bt_dev_dbg(hdev, "sock %p", sk);
6940 
6941 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6942 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6943 				       MGMT_STATUS_INVALID_PARAMS);
6944 
6945 	hci_dev_lock(hdev);
6946 
6947 	if (cp->val)
6948 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6949 	else
6950 		changed = hci_dev_test_and_clear_flag(hdev,
6951 						      HCI_KEEP_DEBUG_KEYS);
6952 
6953 	if (cp->val == 0x02)
6954 		use_changed = !hci_dev_test_and_set_flag(hdev,
6955 							 HCI_USE_DEBUG_KEYS);
6956 	else
6957 		use_changed = hci_dev_test_and_clear_flag(hdev,
6958 							  HCI_USE_DEBUG_KEYS);
6959 
6960 	if (hdev_is_powered(hdev) && use_changed &&
6961 	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6962 		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6963 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6964 			     sizeof(mode), &mode);
6965 	}
6966 
6967 	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6968 	if (err < 0)
6969 		goto unlock;
6970 
6971 	if (changed)
6972 		err = new_settings(hdev, sk);
6973 
6974 unlock:
6975 	hci_dev_unlock(hdev);
6976 	return err;
6977 }
6978 
6979 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6980 		       u16 len)
6981 {
6982 	struct mgmt_cp_set_privacy *cp = cp_data;
6983 	bool changed;
6984 	int err;
6985 
6986 	bt_dev_dbg(hdev, "sock %p", sk);
6987 
6988 	if (!lmp_le_capable(hdev))
6989 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6990 				       MGMT_STATUS_NOT_SUPPORTED);
6991 
6992 	if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6993 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6994 				       MGMT_STATUS_INVALID_PARAMS);
6995 
6996 	if (hdev_is_powered(hdev))
6997 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6998 				       MGMT_STATUS_REJECTED);
6999 
7000 	hci_dev_lock(hdev);
7001 
7002 	/* If user space supports this command it is also expected to
7003 	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
7004 	 */
7005 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7006 
7007 	if (cp->privacy) {
7008 		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
7009 		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
7010 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
7011 		hci_adv_instances_set_rpa_expired(hdev, true);
7012 		if (cp->privacy == 0x02)
7013 			hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
7014 		else
7015 			hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7016 	} else {
7017 		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
7018 		memset(hdev->irk, 0, sizeof(hdev->irk));
7019 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
7020 		hci_adv_instances_set_rpa_expired(hdev, false);
7021 		hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7022 	}
7023 
7024 	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
7025 	if (err < 0)
7026 		goto unlock;
7027 
7028 	if (changed)
7029 		err = new_settings(hdev, sk);
7030 
7031 unlock:
7032 	hci_dev_unlock(hdev);
7033 	return err;
7034 }
7035 
7036 static bool irk_is_valid(struct mgmt_irk_info *irk)
7037 {
7038 	switch (irk->addr.type) {
7039 	case BDADDR_LE_PUBLIC:
7040 		return true;
7041 
7042 	case BDADDR_LE_RANDOM:
7043 		/* Two most significant bits shall be set */
7044 		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7045 			return false;
7046 		return true;
7047 	}
7048 
7049 	return false;
7050 }
7051 
7052 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7053 		     u16 len)
7054 {
7055 	struct mgmt_cp_load_irks *cp = cp_data;
7056 	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7057 				   sizeof(struct mgmt_irk_info));
7058 	u16 irk_count, expected_len;
7059 	int i, err;
7060 
7061 	bt_dev_dbg(hdev, "sock %p", sk);
7062 
7063 	if (!lmp_le_capable(hdev))
7064 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7065 				       MGMT_STATUS_NOT_SUPPORTED);
7066 
7067 	irk_count = __le16_to_cpu(cp->irk_count);
7068 	if (irk_count > max_irk_count) {
7069 		bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7070 			   irk_count);
7071 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7072 				       MGMT_STATUS_INVALID_PARAMS);
7073 	}
7074 
7075 	expected_len = struct_size(cp, irks, irk_count);
7076 	if (expected_len != len) {
7077 		bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7078 			   expected_len, len);
7079 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7080 				       MGMT_STATUS_INVALID_PARAMS);
7081 	}
7082 
7083 	bt_dev_dbg(hdev, "irk_count %u", irk_count);
7084 
7085 	for (i = 0; i < irk_count; i++) {
7086 		struct mgmt_irk_info *key = &cp->irks[i];
7087 
7088 		if (!irk_is_valid(key))
7089 			return mgmt_cmd_status(sk, hdev->id,
7090 					       MGMT_OP_LOAD_IRKS,
7091 					       MGMT_STATUS_INVALID_PARAMS);
7092 	}
7093 
7094 	hci_dev_lock(hdev);
7095 
7096 	hci_smp_irks_clear(hdev);
7097 
7098 	for (i = 0; i < irk_count; i++) {
7099 		struct mgmt_irk_info *irk = &cp->irks[i];
7100 
7101 		if (hci_is_blocked_key(hdev,
7102 				       HCI_BLOCKED_KEY_TYPE_IRK,
7103 				       irk->val)) {
7104 			bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7105 				    &irk->addr.bdaddr);
7106 			continue;
7107 		}
7108 
7109 		hci_add_irk(hdev, &irk->addr.bdaddr,
7110 			    le_addr_type(irk->addr.type), irk->val,
7111 			    BDADDR_ANY);
7112 	}
7113 
7114 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7115 
7116 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7117 
7118 	hci_dev_unlock(hdev);
7119 
7120 	return err;
7121 }
7122 
7123 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7124 {
7125 	if (key->initiator != 0x00 && key->initiator != 0x01)
7126 		return false;
7127 
7128 	switch (key->addr.type) {
7129 	case BDADDR_LE_PUBLIC:
7130 		return true;
7131 
7132 	case BDADDR_LE_RANDOM:
7133 		/* Two most significant bits shall be set */
7134 		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7135 			return false;
7136 		return true;
7137 	}
7138 
7139 	return false;
7140 }
7141 
7142 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7143 			       void *cp_data, u16 len)
7144 {
7145 	struct mgmt_cp_load_long_term_keys *cp = cp_data;
7146 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7147 				   sizeof(struct mgmt_ltk_info));
7148 	u16 key_count, expected_len;
7149 	int i, err;
7150 
7151 	bt_dev_dbg(hdev, "sock %p", sk);
7152 
7153 	if (!lmp_le_capable(hdev))
7154 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7155 				       MGMT_STATUS_NOT_SUPPORTED);
7156 
7157 	key_count = __le16_to_cpu(cp->key_count);
7158 	if (key_count > max_key_count) {
7159 		bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7160 			   key_count);
7161 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7162 				       MGMT_STATUS_INVALID_PARAMS);
7163 	}
7164 
7165 	expected_len = struct_size(cp, keys, key_count);
7166 	if (expected_len != len) {
7167 		bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7168 			   expected_len, len);
7169 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7170 				       MGMT_STATUS_INVALID_PARAMS);
7171 	}
7172 
7173 	bt_dev_dbg(hdev, "key_count %u", key_count);
7174 
7175 	hci_dev_lock(hdev);
7176 
7177 	hci_smp_ltks_clear(hdev);
7178 
7179 	for (i = 0; i < key_count; i++) {
7180 		struct mgmt_ltk_info *key = &cp->keys[i];
7181 		u8 type, authenticated;
7182 
7183 		if (hci_is_blocked_key(hdev,
7184 				       HCI_BLOCKED_KEY_TYPE_LTK,
7185 				       key->val)) {
7186 			bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7187 				    &key->addr.bdaddr);
7188 			continue;
7189 		}
7190 
7191 		if (!ltk_is_valid(key)) {
7192 			bt_dev_warn(hdev, "Invalid LTK for %pMR",
7193 				    &key->addr.bdaddr);
7194 			continue;
7195 		}
7196 
7197 		switch (key->type) {
7198 		case MGMT_LTK_UNAUTHENTICATED:
7199 			authenticated = 0x00;
7200 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7201 			break;
7202 		case MGMT_LTK_AUTHENTICATED:
7203 			authenticated = 0x01;
7204 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7205 			break;
7206 		case MGMT_LTK_P256_UNAUTH:
7207 			authenticated = 0x00;
7208 			type = SMP_LTK_P256;
7209 			break;
7210 		case MGMT_LTK_P256_AUTH:
7211 			authenticated = 0x01;
7212 			type = SMP_LTK_P256;
7213 			break;
7214 		case MGMT_LTK_P256_DEBUG:
7215 			authenticated = 0x00;
7216 			type = SMP_LTK_P256_DEBUG;
7217 			fallthrough;
7218 		default:
7219 			continue;
7220 		}
7221 
7222 		hci_add_ltk(hdev, &key->addr.bdaddr,
7223 			    le_addr_type(key->addr.type), type, authenticated,
7224 			    key->val, key->enc_size, key->ediv, key->rand);
7225 	}
7226 
7227 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7228 			   NULL, 0);
7229 
7230 	hci_dev_unlock(hdev);
7231 
7232 	return err;
7233 }
7234 
7235 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7236 {
7237 	struct mgmt_pending_cmd *cmd = data;
7238 	struct hci_conn *conn = cmd->user_data;
7239 	struct mgmt_cp_get_conn_info *cp = cmd->param;
7240 	struct mgmt_rp_get_conn_info rp;
7241 	u8 status;
7242 
7243 	bt_dev_dbg(hdev, "err %d", err);
7244 
7245 	memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
7246 
7247 	status = mgmt_status(err);
7248 	if (status == MGMT_STATUS_SUCCESS) {
7249 		rp.rssi = conn->rssi;
7250 		rp.tx_power = conn->tx_power;
7251 		rp.max_tx_power = conn->max_tx_power;
7252 	} else {
7253 		rp.rssi = HCI_RSSI_INVALID;
7254 		rp.tx_power = HCI_TX_POWER_INVALID;
7255 		rp.max_tx_power = HCI_TX_POWER_INVALID;
7256 	}
7257 
7258 	mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
7259 			  &rp, sizeof(rp));
7260 
7261 	mgmt_pending_free(cmd);
7262 }
7263 
7264 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7265 {
7266 	struct mgmt_pending_cmd *cmd = data;
7267 	struct mgmt_cp_get_conn_info *cp = cmd->param;
7268 	struct hci_conn *conn;
7269 	int err;
7270 	__le16   handle;
7271 
7272 	/* Make sure we are still connected */
7273 	if (cp->addr.type == BDADDR_BREDR)
7274 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7275 					       &cp->addr.bdaddr);
7276 	else
7277 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7278 
7279 	if (!conn || conn->state != BT_CONNECTED)
7280 		return MGMT_STATUS_NOT_CONNECTED;
7281 
7282 	cmd->user_data = conn;
7283 	handle = cpu_to_le16(conn->handle);
7284 
7285 	/* Refresh RSSI each time */
7286 	err = hci_read_rssi_sync(hdev, handle);
7287 
7288 	/* For LE links TX power does not change thus we don't need to
7289 	 * query for it once value is known.
7290 	 */
7291 	if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7292 		     conn->tx_power == HCI_TX_POWER_INVALID))
7293 		err = hci_read_tx_power_sync(hdev, handle, 0x00);
7294 
7295 	/* Max TX power needs to be read only once per connection */
7296 	if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7297 		err = hci_read_tx_power_sync(hdev, handle, 0x01);
7298 
7299 	return err;
7300 }
7301 
7302 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7303 			 u16 len)
7304 {
7305 	struct mgmt_cp_get_conn_info *cp = data;
7306 	struct mgmt_rp_get_conn_info rp;
7307 	struct hci_conn *conn;
7308 	unsigned long conn_info_age;
7309 	int err = 0;
7310 
7311 	bt_dev_dbg(hdev, "sock %p", sk);
7312 
7313 	memset(&rp, 0, sizeof(rp));
7314 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7315 	rp.addr.type = cp->addr.type;
7316 
7317 	if (!bdaddr_type_is_valid(cp->addr.type))
7318 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7319 					 MGMT_STATUS_INVALID_PARAMS,
7320 					 &rp, sizeof(rp));
7321 
7322 	hci_dev_lock(hdev);
7323 
7324 	if (!hdev_is_powered(hdev)) {
7325 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7326 					MGMT_STATUS_NOT_POWERED, &rp,
7327 					sizeof(rp));
7328 		goto unlock;
7329 	}
7330 
7331 	if (cp->addr.type == BDADDR_BREDR)
7332 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7333 					       &cp->addr.bdaddr);
7334 	else
7335 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7336 
7337 	if (!conn || conn->state != BT_CONNECTED) {
7338 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7339 					MGMT_STATUS_NOT_CONNECTED, &rp,
7340 					sizeof(rp));
7341 		goto unlock;
7342 	}
7343 
7344 	/* To avoid client trying to guess when to poll again for information we
7345 	 * calculate conn info age as random value between min/max set in hdev.
7346 	 */
7347 	conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
7348 						 hdev->conn_info_max_age - 1);
7349 
7350 	/* Query controller to refresh cached values if they are too old or were
7351 	 * never read.
7352 	 */
7353 	if (time_after(jiffies, conn->conn_info_timestamp +
7354 		       msecs_to_jiffies(conn_info_age)) ||
7355 	    !conn->conn_info_timestamp) {
7356 		struct mgmt_pending_cmd *cmd;
7357 
7358 		cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7359 				       len);
7360 		if (!cmd) {
7361 			err = -ENOMEM;
7362 		} else {
7363 			err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7364 						 cmd, get_conn_info_complete);
7365 		}
7366 
7367 		if (err < 0) {
7368 			mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7369 					  MGMT_STATUS_FAILED, &rp, sizeof(rp));
7370 
7371 			if (cmd)
7372 				mgmt_pending_free(cmd);
7373 
7374 			goto unlock;
7375 		}
7376 
7377 		conn->conn_info_timestamp = jiffies;
7378 	} else {
7379 		/* Cache is valid, just reply with values cached in hci_conn */
7380 		rp.rssi = conn->rssi;
7381 		rp.tx_power = conn->tx_power;
7382 		rp.max_tx_power = conn->max_tx_power;
7383 
7384 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7385 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7386 	}
7387 
7388 unlock:
7389 	hci_dev_unlock(hdev);
7390 	return err;
7391 }
7392 
7393 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7394 {
7395 	struct mgmt_pending_cmd *cmd = data;
7396 	struct mgmt_cp_get_clock_info *cp = cmd->param;
7397 	struct mgmt_rp_get_clock_info rp;
7398 	struct hci_conn *conn = cmd->user_data;
7399 	u8 status = mgmt_status(err);
7400 
7401 	bt_dev_dbg(hdev, "err %d", err);
7402 
7403 	memset(&rp, 0, sizeof(rp));
7404 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7405 	rp.addr.type = cp->addr.type;
7406 
7407 	if (err)
7408 		goto complete;
7409 
7410 	rp.local_clock = cpu_to_le32(hdev->clock);
7411 
7412 	if (conn) {
7413 		rp.piconet_clock = cpu_to_le32(conn->clock);
7414 		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7415 	}
7416 
7417 complete:
7418 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
7419 			  sizeof(rp));
7420 
7421 	mgmt_pending_free(cmd);
7422 }
7423 
7424 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7425 {
7426 	struct mgmt_pending_cmd *cmd = data;
7427 	struct mgmt_cp_get_clock_info *cp = cmd->param;
7428 	struct hci_cp_read_clock hci_cp;
7429 	struct hci_conn *conn;
7430 
7431 	memset(&hci_cp, 0, sizeof(hci_cp));
7432 	hci_read_clock_sync(hdev, &hci_cp);
7433 
7434 	/* Make sure connection still exists */
7435 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7436 	if (!conn || conn->state != BT_CONNECTED)
7437 		return MGMT_STATUS_NOT_CONNECTED;
7438 
7439 	cmd->user_data = conn;
7440 	hci_cp.handle = cpu_to_le16(conn->handle);
7441 	hci_cp.which = 0x01; /* Piconet clock */
7442 
7443 	return hci_read_clock_sync(hdev, &hci_cp);
7444 }
7445 
7446 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7447 								u16 len)
7448 {
7449 	struct mgmt_cp_get_clock_info *cp = data;
7450 	struct mgmt_rp_get_clock_info rp;
7451 	struct mgmt_pending_cmd *cmd;
7452 	struct hci_conn *conn;
7453 	int err;
7454 
7455 	bt_dev_dbg(hdev, "sock %p", sk);
7456 
7457 	memset(&rp, 0, sizeof(rp));
7458 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7459 	rp.addr.type = cp->addr.type;
7460 
7461 	if (cp->addr.type != BDADDR_BREDR)
7462 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7463 					 MGMT_STATUS_INVALID_PARAMS,
7464 					 &rp, sizeof(rp));
7465 
7466 	hci_dev_lock(hdev);
7467 
7468 	if (!hdev_is_powered(hdev)) {
7469 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7470 					MGMT_STATUS_NOT_POWERED, &rp,
7471 					sizeof(rp));
7472 		goto unlock;
7473 	}
7474 
7475 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7476 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7477 					       &cp->addr.bdaddr);
7478 		if (!conn || conn->state != BT_CONNECTED) {
7479 			err = mgmt_cmd_complete(sk, hdev->id,
7480 						MGMT_OP_GET_CLOCK_INFO,
7481 						MGMT_STATUS_NOT_CONNECTED,
7482 						&rp, sizeof(rp));
7483 			goto unlock;
7484 		}
7485 	} else {
7486 		conn = NULL;
7487 	}
7488 
7489 	cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7490 	if (!cmd)
7491 		err = -ENOMEM;
7492 	else
7493 		err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7494 					 get_clock_info_complete);
7495 
7496 	if (err < 0) {
7497 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7498 					MGMT_STATUS_FAILED, &rp, sizeof(rp));
7499 
7500 		if (cmd)
7501 			mgmt_pending_free(cmd);
7502 	}
7503 
7504 
7505 unlock:
7506 	hci_dev_unlock(hdev);
7507 	return err;
7508 }
7509 
7510 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7511 {
7512 	struct hci_conn *conn;
7513 
7514 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7515 	if (!conn)
7516 		return false;
7517 
7518 	if (conn->dst_type != type)
7519 		return false;
7520 
7521 	if (conn->state != BT_CONNECTED)
7522 		return false;
7523 
7524 	return true;
7525 }
7526 
7527 /* This function requires the caller holds hdev->lock */
7528 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7529 			       u8 addr_type, u8 auto_connect)
7530 {
7531 	struct hci_conn_params *params;
7532 
7533 	params = hci_conn_params_add(hdev, addr, addr_type);
7534 	if (!params)
7535 		return -EIO;
7536 
7537 	if (params->auto_connect == auto_connect)
7538 		return 0;
7539 
7540 	hci_pend_le_list_del_init(params);
7541 
7542 	switch (auto_connect) {
7543 	case HCI_AUTO_CONN_DISABLED:
7544 	case HCI_AUTO_CONN_LINK_LOSS:
7545 		/* If auto connect is being disabled when we're trying to
7546 		 * connect to device, keep connecting.
7547 		 */
7548 		if (params->explicit_connect)
7549 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
7550 		break;
7551 	case HCI_AUTO_CONN_REPORT:
7552 		if (params->explicit_connect)
7553 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
7554 		else
7555 			hci_pend_le_list_add(params, &hdev->pend_le_reports);
7556 		break;
7557 	case HCI_AUTO_CONN_DIRECT:
7558 	case HCI_AUTO_CONN_ALWAYS:
7559 		if (!is_connected(hdev, addr, addr_type))
7560 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
7561 		break;
7562 	}
7563 
7564 	params->auto_connect = auto_connect;
7565 
7566 	bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7567 		   addr, addr_type, auto_connect);
7568 
7569 	return 0;
7570 }
7571 
7572 static void device_added(struct sock *sk, struct hci_dev *hdev,
7573 			 bdaddr_t *bdaddr, u8 type, u8 action)
7574 {
7575 	struct mgmt_ev_device_added ev;
7576 
7577 	bacpy(&ev.addr.bdaddr, bdaddr);
7578 	ev.addr.type = type;
7579 	ev.action = action;
7580 
7581 	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7582 }
7583 
7584 static int add_device_sync(struct hci_dev *hdev, void *data)
7585 {
7586 	return hci_update_passive_scan_sync(hdev);
7587 }
7588 
7589 static int add_device(struct sock *sk, struct hci_dev *hdev,
7590 		      void *data, u16 len)
7591 {
7592 	struct mgmt_cp_add_device *cp = data;
7593 	u8 auto_conn, addr_type;
7594 	struct hci_conn_params *params;
7595 	int err;
7596 	u32 current_flags = 0;
7597 	u32 supported_flags;
7598 
7599 	bt_dev_dbg(hdev, "sock %p", sk);
7600 
7601 	if (!bdaddr_type_is_valid(cp->addr.type) ||
7602 	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7603 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7604 					 MGMT_STATUS_INVALID_PARAMS,
7605 					 &cp->addr, sizeof(cp->addr));
7606 
7607 	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7608 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7609 					 MGMT_STATUS_INVALID_PARAMS,
7610 					 &cp->addr, sizeof(cp->addr));
7611 
7612 	hci_dev_lock(hdev);
7613 
7614 	if (cp->addr.type == BDADDR_BREDR) {
7615 		/* Only incoming connections action is supported for now */
7616 		if (cp->action != 0x01) {
7617 			err = mgmt_cmd_complete(sk, hdev->id,
7618 						MGMT_OP_ADD_DEVICE,
7619 						MGMT_STATUS_INVALID_PARAMS,
7620 						&cp->addr, sizeof(cp->addr));
7621 			goto unlock;
7622 		}
7623 
7624 		err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7625 						     &cp->addr.bdaddr,
7626 						     cp->addr.type, 0);
7627 		if (err)
7628 			goto unlock;
7629 
7630 		hci_update_scan(hdev);
7631 
7632 		goto added;
7633 	}
7634 
7635 	addr_type = le_addr_type(cp->addr.type);
7636 
7637 	if (cp->action == 0x02)
7638 		auto_conn = HCI_AUTO_CONN_ALWAYS;
7639 	else if (cp->action == 0x01)
7640 		auto_conn = HCI_AUTO_CONN_DIRECT;
7641 	else
7642 		auto_conn = HCI_AUTO_CONN_REPORT;
7643 
7644 	/* Kernel internally uses conn_params with resolvable private
7645 	 * address, but Add Device allows only identity addresses.
7646 	 * Make sure it is enforced before calling
7647 	 * hci_conn_params_lookup.
7648 	 */
7649 	if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7650 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7651 					MGMT_STATUS_INVALID_PARAMS,
7652 					&cp->addr, sizeof(cp->addr));
7653 		goto unlock;
7654 	}
7655 
7656 	/* If the connection parameters don't exist for this device,
7657 	 * they will be created and configured with defaults.
7658 	 */
7659 	if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7660 				auto_conn) < 0) {
7661 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7662 					MGMT_STATUS_FAILED, &cp->addr,
7663 					sizeof(cp->addr));
7664 		goto unlock;
7665 	} else {
7666 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7667 						addr_type);
7668 		if (params)
7669 			current_flags = params->flags;
7670 	}
7671 
7672 	err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
7673 	if (err < 0)
7674 		goto unlock;
7675 
7676 added:
7677 	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7678 	supported_flags = hdev->conn_flags;
7679 	device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7680 			     supported_flags, current_flags);
7681 
7682 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7683 				MGMT_STATUS_SUCCESS, &cp->addr,
7684 				sizeof(cp->addr));
7685 
7686 unlock:
7687 	hci_dev_unlock(hdev);
7688 	return err;
7689 }
7690 
7691 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7692 			   bdaddr_t *bdaddr, u8 type)
7693 {
7694 	struct mgmt_ev_device_removed ev;
7695 
7696 	bacpy(&ev.addr.bdaddr, bdaddr);
7697 	ev.addr.type = type;
7698 
7699 	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7700 }
7701 
7702 static int remove_device_sync(struct hci_dev *hdev, void *data)
7703 {
7704 	return hci_update_passive_scan_sync(hdev);
7705 }
7706 
7707 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7708 			 void *data, u16 len)
7709 {
7710 	struct mgmt_cp_remove_device *cp = data;
7711 	int err;
7712 
7713 	bt_dev_dbg(hdev, "sock %p", sk);
7714 
7715 	hci_dev_lock(hdev);
7716 
7717 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7718 		struct hci_conn_params *params;
7719 		u8 addr_type;
7720 
7721 		if (!bdaddr_type_is_valid(cp->addr.type)) {
7722 			err = mgmt_cmd_complete(sk, hdev->id,
7723 						MGMT_OP_REMOVE_DEVICE,
7724 						MGMT_STATUS_INVALID_PARAMS,
7725 						&cp->addr, sizeof(cp->addr));
7726 			goto unlock;
7727 		}
7728 
7729 		if (cp->addr.type == BDADDR_BREDR) {
7730 			err = hci_bdaddr_list_del(&hdev->accept_list,
7731 						  &cp->addr.bdaddr,
7732 						  cp->addr.type);
7733 			if (err) {
7734 				err = mgmt_cmd_complete(sk, hdev->id,
7735 							MGMT_OP_REMOVE_DEVICE,
7736 							MGMT_STATUS_INVALID_PARAMS,
7737 							&cp->addr,
7738 							sizeof(cp->addr));
7739 				goto unlock;
7740 			}
7741 
7742 			hci_update_scan(hdev);
7743 
7744 			device_removed(sk, hdev, &cp->addr.bdaddr,
7745 				       cp->addr.type);
7746 			goto complete;
7747 		}
7748 
7749 		addr_type = le_addr_type(cp->addr.type);
7750 
7751 		/* Kernel internally uses conn_params with resolvable private
7752 		 * address, but Remove Device allows only identity addresses.
7753 		 * Make sure it is enforced before calling
7754 		 * hci_conn_params_lookup.
7755 		 */
7756 		if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7757 			err = mgmt_cmd_complete(sk, hdev->id,
7758 						MGMT_OP_REMOVE_DEVICE,
7759 						MGMT_STATUS_INVALID_PARAMS,
7760 						&cp->addr, sizeof(cp->addr));
7761 			goto unlock;
7762 		}
7763 
7764 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7765 						addr_type);
7766 		if (!params) {
7767 			err = mgmt_cmd_complete(sk, hdev->id,
7768 						MGMT_OP_REMOVE_DEVICE,
7769 						MGMT_STATUS_INVALID_PARAMS,
7770 						&cp->addr, sizeof(cp->addr));
7771 			goto unlock;
7772 		}
7773 
7774 		if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7775 		    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7776 			err = mgmt_cmd_complete(sk, hdev->id,
7777 						MGMT_OP_REMOVE_DEVICE,
7778 						MGMT_STATUS_INVALID_PARAMS,
7779 						&cp->addr, sizeof(cp->addr));
7780 			goto unlock;
7781 		}
7782 
7783 		hci_conn_params_free(params);
7784 
7785 		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7786 	} else {
7787 		struct hci_conn_params *p, *tmp;
7788 		struct bdaddr_list *b, *btmp;
7789 
7790 		if (cp->addr.type) {
7791 			err = mgmt_cmd_complete(sk, hdev->id,
7792 						MGMT_OP_REMOVE_DEVICE,
7793 						MGMT_STATUS_INVALID_PARAMS,
7794 						&cp->addr, sizeof(cp->addr));
7795 			goto unlock;
7796 		}
7797 
7798 		list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7799 			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7800 			list_del(&b->list);
7801 			kfree(b);
7802 		}
7803 
7804 		hci_update_scan(hdev);
7805 
7806 		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7807 			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7808 				continue;
7809 			device_removed(sk, hdev, &p->addr, p->addr_type);
7810 			if (p->explicit_connect) {
7811 				p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7812 				continue;
7813 			}
7814 			hci_conn_params_free(p);
7815 		}
7816 
7817 		bt_dev_dbg(hdev, "All LE connection parameters were removed");
7818 	}
7819 
7820 	hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7821 
7822 complete:
7823 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7824 				MGMT_STATUS_SUCCESS, &cp->addr,
7825 				sizeof(cp->addr));
7826 unlock:
7827 	hci_dev_unlock(hdev);
7828 	return err;
7829 }
7830 
7831 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7832 			   u16 len)
7833 {
7834 	struct mgmt_cp_load_conn_param *cp = data;
7835 	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7836 				     sizeof(struct mgmt_conn_param));
7837 	u16 param_count, expected_len;
7838 	int i;
7839 
7840 	if (!lmp_le_capable(hdev))
7841 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7842 				       MGMT_STATUS_NOT_SUPPORTED);
7843 
7844 	param_count = __le16_to_cpu(cp->param_count);
7845 	if (param_count > max_param_count) {
7846 		bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7847 			   param_count);
7848 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7849 				       MGMT_STATUS_INVALID_PARAMS);
7850 	}
7851 
7852 	expected_len = struct_size(cp, params, param_count);
7853 	if (expected_len != len) {
7854 		bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7855 			   expected_len, len);
7856 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7857 				       MGMT_STATUS_INVALID_PARAMS);
7858 	}
7859 
7860 	bt_dev_dbg(hdev, "param_count %u", param_count);
7861 
7862 	hci_dev_lock(hdev);
7863 
7864 	hci_conn_params_clear_disabled(hdev);
7865 
7866 	for (i = 0; i < param_count; i++) {
7867 		struct mgmt_conn_param *param = &cp->params[i];
7868 		struct hci_conn_params *hci_param;
7869 		u16 min, max, latency, timeout;
7870 		u8 addr_type;
7871 
7872 		bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
7873 			   param->addr.type);
7874 
7875 		if (param->addr.type == BDADDR_LE_PUBLIC) {
7876 			addr_type = ADDR_LE_DEV_PUBLIC;
7877 		} else if (param->addr.type == BDADDR_LE_RANDOM) {
7878 			addr_type = ADDR_LE_DEV_RANDOM;
7879 		} else {
7880 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7881 			continue;
7882 		}
7883 
7884 		min = le16_to_cpu(param->min_interval);
7885 		max = le16_to_cpu(param->max_interval);
7886 		latency = le16_to_cpu(param->latency);
7887 		timeout = le16_to_cpu(param->timeout);
7888 
7889 		bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7890 			   min, max, latency, timeout);
7891 
7892 		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7893 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7894 			continue;
7895 		}
7896 
7897 		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
7898 						addr_type);
7899 		if (!hci_param) {
7900 			bt_dev_err(hdev, "failed to add connection parameters");
7901 			continue;
7902 		}
7903 
7904 		hci_param->conn_min_interval = min;
7905 		hci_param->conn_max_interval = max;
7906 		hci_param->conn_latency = latency;
7907 		hci_param->supervision_timeout = timeout;
7908 	}
7909 
7910 	hci_dev_unlock(hdev);
7911 
7912 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7913 				 NULL, 0);
7914 }
7915 
7916 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7917 			       void *data, u16 len)
7918 {
7919 	struct mgmt_cp_set_external_config *cp = data;
7920 	bool changed;
7921 	int err;
7922 
7923 	bt_dev_dbg(hdev, "sock %p", sk);
7924 
7925 	if (hdev_is_powered(hdev))
7926 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7927 				       MGMT_STATUS_REJECTED);
7928 
7929 	if (cp->config != 0x00 && cp->config != 0x01)
7930 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7931 				         MGMT_STATUS_INVALID_PARAMS);
7932 
7933 	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7934 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7935 				       MGMT_STATUS_NOT_SUPPORTED);
7936 
7937 	hci_dev_lock(hdev);
7938 
7939 	if (cp->config)
7940 		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7941 	else
7942 		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7943 
7944 	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7945 	if (err < 0)
7946 		goto unlock;
7947 
7948 	if (!changed)
7949 		goto unlock;
7950 
7951 	err = new_options(hdev, sk);
7952 
7953 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7954 		mgmt_index_removed(hdev);
7955 
7956 		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7957 			hci_dev_set_flag(hdev, HCI_CONFIG);
7958 			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7959 
7960 			queue_work(hdev->req_workqueue, &hdev->power_on);
7961 		} else {
7962 			set_bit(HCI_RAW, &hdev->flags);
7963 			mgmt_index_added(hdev);
7964 		}
7965 	}
7966 
7967 unlock:
7968 	hci_dev_unlock(hdev);
7969 	return err;
7970 }
7971 
7972 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
7973 			      void *data, u16 len)
7974 {
7975 	struct mgmt_cp_set_public_address *cp = data;
7976 	bool changed;
7977 	int err;
7978 
7979 	bt_dev_dbg(hdev, "sock %p", sk);
7980 
7981 	if (hdev_is_powered(hdev))
7982 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7983 				       MGMT_STATUS_REJECTED);
7984 
7985 	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
7986 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7987 				       MGMT_STATUS_INVALID_PARAMS);
7988 
7989 	if (!hdev->set_bdaddr)
7990 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7991 				       MGMT_STATUS_NOT_SUPPORTED);
7992 
7993 	hci_dev_lock(hdev);
7994 
7995 	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
7996 	bacpy(&hdev->public_addr, &cp->bdaddr);
7997 
7998 	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
7999 	if (err < 0)
8000 		goto unlock;
8001 
8002 	if (!changed)
8003 		goto unlock;
8004 
8005 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
8006 		err = new_options(hdev, sk);
8007 
8008 	if (is_configured(hdev)) {
8009 		mgmt_index_removed(hdev);
8010 
8011 		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8012 
8013 		hci_dev_set_flag(hdev, HCI_CONFIG);
8014 		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8015 
8016 		queue_work(hdev->req_workqueue, &hdev->power_on);
8017 	}
8018 
8019 unlock:
8020 	hci_dev_unlock(hdev);
8021 	return err;
8022 }
8023 
8024 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8025 					     int err)
8026 {
8027 	const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8028 	struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8029 	u8 *h192, *r192, *h256, *r256;
8030 	struct mgmt_pending_cmd *cmd = data;
8031 	struct sk_buff *skb = cmd->skb;
8032 	u8 status = mgmt_status(err);
8033 	u16 eir_len;
8034 
8035 	if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
8036 		return;
8037 
8038 	if (!status) {
8039 		if (!skb)
8040 			status = MGMT_STATUS_FAILED;
8041 		else if (IS_ERR(skb))
8042 			status = mgmt_status(PTR_ERR(skb));
8043 		else
8044 			status = mgmt_status(skb->data[0]);
8045 	}
8046 
8047 	bt_dev_dbg(hdev, "status %u", status);
8048 
8049 	mgmt_cp = cmd->param;
8050 
8051 	if (status) {
8052 		status = mgmt_status(status);
8053 		eir_len = 0;
8054 
8055 		h192 = NULL;
8056 		r192 = NULL;
8057 		h256 = NULL;
8058 		r256 = NULL;
8059 	} else if (!bredr_sc_enabled(hdev)) {
8060 		struct hci_rp_read_local_oob_data *rp;
8061 
8062 		if (skb->len != sizeof(*rp)) {
8063 			status = MGMT_STATUS_FAILED;
8064 			eir_len = 0;
8065 		} else {
8066 			status = MGMT_STATUS_SUCCESS;
8067 			rp = (void *)skb->data;
8068 
8069 			eir_len = 5 + 18 + 18;
8070 			h192 = rp->hash;
8071 			r192 = rp->rand;
8072 			h256 = NULL;
8073 			r256 = NULL;
8074 		}
8075 	} else {
8076 		struct hci_rp_read_local_oob_ext_data *rp;
8077 
8078 		if (skb->len != sizeof(*rp)) {
8079 			status = MGMT_STATUS_FAILED;
8080 			eir_len = 0;
8081 		} else {
8082 			status = MGMT_STATUS_SUCCESS;
8083 			rp = (void *)skb->data;
8084 
8085 			if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8086 				eir_len = 5 + 18 + 18;
8087 				h192 = NULL;
8088 				r192 = NULL;
8089 			} else {
8090 				eir_len = 5 + 18 + 18 + 18 + 18;
8091 				h192 = rp->hash192;
8092 				r192 = rp->rand192;
8093 			}
8094 
8095 			h256 = rp->hash256;
8096 			r256 = rp->rand256;
8097 		}
8098 	}
8099 
8100 	mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8101 	if (!mgmt_rp)
8102 		goto done;
8103 
8104 	if (eir_len == 0)
8105 		goto send_rsp;
8106 
8107 	eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8108 				  hdev->dev_class, 3);
8109 
8110 	if (h192 && r192) {
8111 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8112 					  EIR_SSP_HASH_C192, h192, 16);
8113 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8114 					  EIR_SSP_RAND_R192, r192, 16);
8115 	}
8116 
8117 	if (h256 && r256) {
8118 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8119 					  EIR_SSP_HASH_C256, h256, 16);
8120 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8121 					  EIR_SSP_RAND_R256, r256, 16);
8122 	}
8123 
8124 send_rsp:
8125 	mgmt_rp->type = mgmt_cp->type;
8126 	mgmt_rp->eir_len = cpu_to_le16(eir_len);
8127 
8128 	err = mgmt_cmd_complete(cmd->sk, hdev->id,
8129 				MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8130 				mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8131 	if (err < 0 || status)
8132 		goto done;
8133 
8134 	hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8135 
8136 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8137 				 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8138 				 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8139 done:
8140 	if (skb && !IS_ERR(skb))
8141 		kfree_skb(skb);
8142 
8143 	kfree(mgmt_rp);
8144 	mgmt_pending_remove(cmd);
8145 }
8146 
8147 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8148 				  struct mgmt_cp_read_local_oob_ext_data *cp)
8149 {
8150 	struct mgmt_pending_cmd *cmd;
8151 	int err;
8152 
8153 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8154 			       cp, sizeof(*cp));
8155 	if (!cmd)
8156 		return -ENOMEM;
8157 
8158 	err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8159 				 read_local_oob_ext_data_complete);
8160 
8161 	if (err < 0) {
8162 		mgmt_pending_remove(cmd);
8163 		return err;
8164 	}
8165 
8166 	return 0;
8167 }
8168 
8169 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8170 				   void *data, u16 data_len)
8171 {
8172 	struct mgmt_cp_read_local_oob_ext_data *cp = data;
8173 	struct mgmt_rp_read_local_oob_ext_data *rp;
8174 	size_t rp_len;
8175 	u16 eir_len;
8176 	u8 status, flags, role, addr[7], hash[16], rand[16];
8177 	int err;
8178 
8179 	bt_dev_dbg(hdev, "sock %p", sk);
8180 
8181 	if (hdev_is_powered(hdev)) {
8182 		switch (cp->type) {
8183 		case BIT(BDADDR_BREDR):
8184 			status = mgmt_bredr_support(hdev);
8185 			if (status)
8186 				eir_len = 0;
8187 			else
8188 				eir_len = 5;
8189 			break;
8190 		case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8191 			status = mgmt_le_support(hdev);
8192 			if (status)
8193 				eir_len = 0;
8194 			else
8195 				eir_len = 9 + 3 + 18 + 18 + 3;
8196 			break;
8197 		default:
8198 			status = MGMT_STATUS_INVALID_PARAMS;
8199 			eir_len = 0;
8200 			break;
8201 		}
8202 	} else {
8203 		status = MGMT_STATUS_NOT_POWERED;
8204 		eir_len = 0;
8205 	}
8206 
8207 	rp_len = sizeof(*rp) + eir_len;
8208 	rp = kmalloc(rp_len, GFP_ATOMIC);
8209 	if (!rp)
8210 		return -ENOMEM;
8211 
8212 	if (!status && !lmp_ssp_capable(hdev)) {
8213 		status = MGMT_STATUS_NOT_SUPPORTED;
8214 		eir_len = 0;
8215 	}
8216 
8217 	if (status)
8218 		goto complete;
8219 
8220 	hci_dev_lock(hdev);
8221 
8222 	eir_len = 0;
8223 	switch (cp->type) {
8224 	case BIT(BDADDR_BREDR):
8225 		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8226 			err = read_local_ssp_oob_req(hdev, sk, cp);
8227 			hci_dev_unlock(hdev);
8228 			if (!err)
8229 				goto done;
8230 
8231 			status = MGMT_STATUS_FAILED;
8232 			goto complete;
8233 		} else {
8234 			eir_len = eir_append_data(rp->eir, eir_len,
8235 						  EIR_CLASS_OF_DEV,
8236 						  hdev->dev_class, 3);
8237 		}
8238 		break;
8239 	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8240 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8241 		    smp_generate_oob(hdev, hash, rand) < 0) {
8242 			hci_dev_unlock(hdev);
8243 			status = MGMT_STATUS_FAILED;
8244 			goto complete;
8245 		}
8246 
8247 		/* This should return the active RPA, but since the RPA
8248 		 * is only programmed on demand, it is really hard to fill
8249 		 * this in at the moment. For now disallow retrieving
8250 		 * local out-of-band data when privacy is in use.
8251 		 *
8252 		 * Returning the identity address will not help here since
8253 		 * pairing happens before the identity resolving key is
8254 		 * known and thus the connection establishment happens
8255 		 * based on the RPA and not the identity address.
8256 		 */
8257 		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8258 			hci_dev_unlock(hdev);
8259 			status = MGMT_STATUS_REJECTED;
8260 			goto complete;
8261 		}
8262 
8263 		if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8264 		   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8265 		   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8266 		    bacmp(&hdev->static_addr, BDADDR_ANY))) {
8267 			memcpy(addr, &hdev->static_addr, 6);
8268 			addr[6] = 0x01;
8269 		} else {
8270 			memcpy(addr, &hdev->bdaddr, 6);
8271 			addr[6] = 0x00;
8272 		}
8273 
8274 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8275 					  addr, sizeof(addr));
8276 
8277 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8278 			role = 0x02;
8279 		else
8280 			role = 0x01;
8281 
8282 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8283 					  &role, sizeof(role));
8284 
8285 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8286 			eir_len = eir_append_data(rp->eir, eir_len,
8287 						  EIR_LE_SC_CONFIRM,
8288 						  hash, sizeof(hash));
8289 
8290 			eir_len = eir_append_data(rp->eir, eir_len,
8291 						  EIR_LE_SC_RANDOM,
8292 						  rand, sizeof(rand));
8293 		}
8294 
8295 		flags = mgmt_get_adv_discov_flags(hdev);
8296 
8297 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8298 			flags |= LE_AD_NO_BREDR;
8299 
8300 		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8301 					  &flags, sizeof(flags));
8302 		break;
8303 	}
8304 
8305 	hci_dev_unlock(hdev);
8306 
8307 	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8308 
8309 	status = MGMT_STATUS_SUCCESS;
8310 
8311 complete:
8312 	rp->type = cp->type;
8313 	rp->eir_len = cpu_to_le16(eir_len);
8314 
8315 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8316 				status, rp, sizeof(*rp) + eir_len);
8317 	if (err < 0 || status)
8318 		goto done;
8319 
8320 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8321 				 rp, sizeof(*rp) + eir_len,
8322 				 HCI_MGMT_OOB_DATA_EVENTS, sk);
8323 
8324 done:
8325 	kfree(rp);
8326 
8327 	return err;
8328 }
8329 
8330 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8331 {
8332 	u32 flags = 0;
8333 
8334 	flags |= MGMT_ADV_FLAG_CONNECTABLE;
8335 	flags |= MGMT_ADV_FLAG_DISCOV;
8336 	flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8337 	flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8338 	flags |= MGMT_ADV_FLAG_APPEARANCE;
8339 	flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8340 	flags |= MGMT_ADV_PARAM_DURATION;
8341 	flags |= MGMT_ADV_PARAM_TIMEOUT;
8342 	flags |= MGMT_ADV_PARAM_INTERVALS;
8343 	flags |= MGMT_ADV_PARAM_TX_POWER;
8344 	flags |= MGMT_ADV_PARAM_SCAN_RSP;
8345 
8346 	/* In extended adv TX_POWER returned from Set Adv Param
8347 	 * will be always valid.
8348 	 */
8349 	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8350 		flags |= MGMT_ADV_FLAG_TX_POWER;
8351 
8352 	if (ext_adv_capable(hdev)) {
8353 		flags |= MGMT_ADV_FLAG_SEC_1M;
8354 		flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8355 		flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8356 
8357 		if (le_2m_capable(hdev))
8358 			flags |= MGMT_ADV_FLAG_SEC_2M;
8359 
8360 		if (le_coded_capable(hdev))
8361 			flags |= MGMT_ADV_FLAG_SEC_CODED;
8362 	}
8363 
8364 	return flags;
8365 }
8366 
8367 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8368 			     void *data, u16 data_len)
8369 {
8370 	struct mgmt_rp_read_adv_features *rp;
8371 	size_t rp_len;
8372 	int err;
8373 	struct adv_info *adv_instance;
8374 	u32 supported_flags;
8375 	u8 *instance;
8376 
8377 	bt_dev_dbg(hdev, "sock %p", sk);
8378 
8379 	if (!lmp_le_capable(hdev))
8380 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8381 				       MGMT_STATUS_REJECTED);
8382 
8383 	hci_dev_lock(hdev);
8384 
8385 	rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8386 	rp = kmalloc(rp_len, GFP_ATOMIC);
8387 	if (!rp) {
8388 		hci_dev_unlock(hdev);
8389 		return -ENOMEM;
8390 	}
8391 
8392 	supported_flags = get_supported_adv_flags(hdev);
8393 
8394 	rp->supported_flags = cpu_to_le32(supported_flags);
8395 	rp->max_adv_data_len = max_adv_len(hdev);
8396 	rp->max_scan_rsp_len = max_adv_len(hdev);
8397 	rp->max_instances = hdev->le_num_of_adv_sets;
8398 	rp->num_instances = hdev->adv_instance_cnt;
8399 
8400 	instance = rp->instance;
8401 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8402 		/* Only instances 1-le_num_of_adv_sets are externally visible */
8403 		if (adv_instance->instance <= hdev->adv_instance_cnt) {
8404 			*instance = adv_instance->instance;
8405 			instance++;
8406 		} else {
8407 			rp->num_instances--;
8408 			rp_len--;
8409 		}
8410 	}
8411 
8412 	hci_dev_unlock(hdev);
8413 
8414 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8415 				MGMT_STATUS_SUCCESS, rp, rp_len);
8416 
8417 	kfree(rp);
8418 
8419 	return err;
8420 }
8421 
8422 static u8 calculate_name_len(struct hci_dev *hdev)
8423 {
8424 	u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 2]; /* len + type + name */
8425 
8426 	return eir_append_local_name(hdev, buf, 0);
8427 }
8428 
8429 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8430 			   bool is_adv_data)
8431 {
8432 	u8 max_len = max_adv_len(hdev);
8433 
8434 	if (is_adv_data) {
8435 		if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8436 				 MGMT_ADV_FLAG_LIMITED_DISCOV |
8437 				 MGMT_ADV_FLAG_MANAGED_FLAGS))
8438 			max_len -= 3;
8439 
8440 		if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8441 			max_len -= 3;
8442 	} else {
8443 		if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8444 			max_len -= calculate_name_len(hdev);
8445 
8446 		if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8447 			max_len -= 4;
8448 	}
8449 
8450 	return max_len;
8451 }
8452 
8453 static bool flags_managed(u32 adv_flags)
8454 {
8455 	return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8456 			    MGMT_ADV_FLAG_LIMITED_DISCOV |
8457 			    MGMT_ADV_FLAG_MANAGED_FLAGS);
8458 }
8459 
8460 static bool tx_power_managed(u32 adv_flags)
8461 {
8462 	return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8463 }
8464 
8465 static bool name_managed(u32 adv_flags)
8466 {
8467 	return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8468 }
8469 
8470 static bool appearance_managed(u32 adv_flags)
8471 {
8472 	return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8473 }
8474 
8475 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8476 			      u8 len, bool is_adv_data)
8477 {
8478 	int i, cur_len;
8479 	u8 max_len;
8480 
8481 	max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8482 
8483 	if (len > max_len)
8484 		return false;
8485 
8486 	/* Make sure that the data is correctly formatted. */
8487 	for (i = 0; i < len; i += (cur_len + 1)) {
8488 		cur_len = data[i];
8489 
8490 		if (!cur_len)
8491 			continue;
8492 
8493 		if (data[i + 1] == EIR_FLAGS &&
8494 		    (!is_adv_data || flags_managed(adv_flags)))
8495 			return false;
8496 
8497 		if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8498 			return false;
8499 
8500 		if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8501 			return false;
8502 
8503 		if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8504 			return false;
8505 
8506 		if (data[i + 1] == EIR_APPEARANCE &&
8507 		    appearance_managed(adv_flags))
8508 			return false;
8509 
8510 		/* If the current field length would exceed the total data
8511 		 * length, then it's invalid.
8512 		 */
8513 		if (i + cur_len >= len)
8514 			return false;
8515 	}
8516 
8517 	return true;
8518 }
8519 
8520 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8521 {
8522 	u32 supported_flags, phy_flags;
8523 
8524 	/* The current implementation only supports a subset of the specified
8525 	 * flags. Also need to check mutual exclusiveness of sec flags.
8526 	 */
8527 	supported_flags = get_supported_adv_flags(hdev);
8528 	phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8529 	if (adv_flags & ~supported_flags ||
8530 	    ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8531 		return false;
8532 
8533 	return true;
8534 }
8535 
8536 static bool adv_busy(struct hci_dev *hdev)
8537 {
8538 	return pending_find(MGMT_OP_SET_LE, hdev);
8539 }
8540 
8541 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8542 			     int err)
8543 {
8544 	struct adv_info *adv, *n;
8545 
8546 	bt_dev_dbg(hdev, "err %d", err);
8547 
8548 	hci_dev_lock(hdev);
8549 
8550 	list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8551 		u8 instance;
8552 
8553 		if (!adv->pending)
8554 			continue;
8555 
8556 		if (!err) {
8557 			adv->pending = false;
8558 			continue;
8559 		}
8560 
8561 		instance = adv->instance;
8562 
8563 		if (hdev->cur_adv_instance == instance)
8564 			cancel_adv_timeout(hdev);
8565 
8566 		hci_remove_adv_instance(hdev, instance);
8567 		mgmt_advertising_removed(sk, hdev, instance);
8568 	}
8569 
8570 	hci_dev_unlock(hdev);
8571 }
8572 
8573 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8574 {
8575 	struct mgmt_pending_cmd *cmd = data;
8576 	struct mgmt_cp_add_advertising *cp = cmd->param;
8577 	struct mgmt_rp_add_advertising rp;
8578 
8579 	memset(&rp, 0, sizeof(rp));
8580 
8581 	rp.instance = cp->instance;
8582 
8583 	if (err)
8584 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8585 				mgmt_status(err));
8586 	else
8587 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8588 				  mgmt_status(err), &rp, sizeof(rp));
8589 
8590 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
8591 
8592 	mgmt_pending_free(cmd);
8593 }
8594 
8595 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8596 {
8597 	struct mgmt_pending_cmd *cmd = data;
8598 	struct mgmt_cp_add_advertising *cp = cmd->param;
8599 
8600 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8601 }
8602 
8603 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8604 			   void *data, u16 data_len)
8605 {
8606 	struct mgmt_cp_add_advertising *cp = data;
8607 	struct mgmt_rp_add_advertising rp;
8608 	u32 flags;
8609 	u8 status;
8610 	u16 timeout, duration;
8611 	unsigned int prev_instance_cnt;
8612 	u8 schedule_instance = 0;
8613 	struct adv_info *adv, *next_instance;
8614 	int err;
8615 	struct mgmt_pending_cmd *cmd;
8616 
8617 	bt_dev_dbg(hdev, "sock %p", sk);
8618 
8619 	status = mgmt_le_support(hdev);
8620 	if (status)
8621 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8622 				       status);
8623 
8624 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8625 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8626 				       MGMT_STATUS_INVALID_PARAMS);
8627 
8628 	if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8629 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8630 				       MGMT_STATUS_INVALID_PARAMS);
8631 
8632 	flags = __le32_to_cpu(cp->flags);
8633 	timeout = __le16_to_cpu(cp->timeout);
8634 	duration = __le16_to_cpu(cp->duration);
8635 
8636 	if (!requested_adv_flags_are_valid(hdev, flags))
8637 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8638 				       MGMT_STATUS_INVALID_PARAMS);
8639 
8640 	hci_dev_lock(hdev);
8641 
8642 	if (timeout && !hdev_is_powered(hdev)) {
8643 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8644 				      MGMT_STATUS_REJECTED);
8645 		goto unlock;
8646 	}
8647 
8648 	if (adv_busy(hdev)) {
8649 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8650 				      MGMT_STATUS_BUSY);
8651 		goto unlock;
8652 	}
8653 
8654 	if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8655 	    !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8656 			       cp->scan_rsp_len, false)) {
8657 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8658 				      MGMT_STATUS_INVALID_PARAMS);
8659 		goto unlock;
8660 	}
8661 
8662 	prev_instance_cnt = hdev->adv_instance_cnt;
8663 
8664 	adv = hci_add_adv_instance(hdev, cp->instance, flags,
8665 				   cp->adv_data_len, cp->data,
8666 				   cp->scan_rsp_len,
8667 				   cp->data + cp->adv_data_len,
8668 				   timeout, duration,
8669 				   HCI_ADV_TX_POWER_NO_PREFERENCE,
8670 				   hdev->le_adv_min_interval,
8671 				   hdev->le_adv_max_interval, 0);
8672 	if (IS_ERR(adv)) {
8673 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8674 				      MGMT_STATUS_FAILED);
8675 		goto unlock;
8676 	}
8677 
8678 	/* Only trigger an advertising added event if a new instance was
8679 	 * actually added.
8680 	 */
8681 	if (hdev->adv_instance_cnt > prev_instance_cnt)
8682 		mgmt_advertising_added(sk, hdev, cp->instance);
8683 
8684 	if (hdev->cur_adv_instance == cp->instance) {
8685 		/* If the currently advertised instance is being changed then
8686 		 * cancel the current advertising and schedule the next
8687 		 * instance. If there is only one instance then the overridden
8688 		 * advertising data will be visible right away.
8689 		 */
8690 		cancel_adv_timeout(hdev);
8691 
8692 		next_instance = hci_get_next_instance(hdev, cp->instance);
8693 		if (next_instance)
8694 			schedule_instance = next_instance->instance;
8695 	} else if (!hdev->adv_instance_timeout) {
8696 		/* Immediately advertise the new instance if no other
8697 		 * instance is currently being advertised.
8698 		 */
8699 		schedule_instance = cp->instance;
8700 	}
8701 
8702 	/* If the HCI_ADVERTISING flag is set or the device isn't powered or
8703 	 * there is no instance to be advertised then we have no HCI
8704 	 * communication to make. Simply return.
8705 	 */
8706 	if (!hdev_is_powered(hdev) ||
8707 	    hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8708 	    !schedule_instance) {
8709 		rp.instance = cp->instance;
8710 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8711 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8712 		goto unlock;
8713 	}
8714 
8715 	/* We're good to go, update advertising data, parameters, and start
8716 	 * advertising.
8717 	 */
8718 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8719 			       data_len);
8720 	if (!cmd) {
8721 		err = -ENOMEM;
8722 		goto unlock;
8723 	}
8724 
8725 	cp->instance = schedule_instance;
8726 
8727 	err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8728 				 add_advertising_complete);
8729 	if (err < 0)
8730 		mgmt_pending_free(cmd);
8731 
8732 unlock:
8733 	hci_dev_unlock(hdev);
8734 
8735 	return err;
8736 }
8737 
8738 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8739 					int err)
8740 {
8741 	struct mgmt_pending_cmd *cmd = data;
8742 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8743 	struct mgmt_rp_add_ext_adv_params rp;
8744 	struct adv_info *adv;
8745 	u32 flags;
8746 
8747 	BT_DBG("%s", hdev->name);
8748 
8749 	hci_dev_lock(hdev);
8750 
8751 	adv = hci_find_adv_instance(hdev, cp->instance);
8752 	if (!adv)
8753 		goto unlock;
8754 
8755 	rp.instance = cp->instance;
8756 	rp.tx_power = adv->tx_power;
8757 
8758 	/* While we're at it, inform userspace of the available space for this
8759 	 * advertisement, given the flags that will be used.
8760 	 */
8761 	flags = __le32_to_cpu(cp->flags);
8762 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8763 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8764 
8765 	if (err) {
8766 		/* If this advertisement was previously advertising and we
8767 		 * failed to update it, we signal that it has been removed and
8768 		 * delete its structure
8769 		 */
8770 		if (!adv->pending)
8771 			mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8772 
8773 		hci_remove_adv_instance(hdev, cp->instance);
8774 
8775 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8776 				mgmt_status(err));
8777 	} else {
8778 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8779 				  mgmt_status(err), &rp, sizeof(rp));
8780 	}
8781 
8782 unlock:
8783 	if (cmd)
8784 		mgmt_pending_free(cmd);
8785 
8786 	hci_dev_unlock(hdev);
8787 }
8788 
8789 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8790 {
8791 	struct mgmt_pending_cmd *cmd = data;
8792 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8793 
8794 	return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8795 }
8796 
8797 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8798 			      void *data, u16 data_len)
8799 {
8800 	struct mgmt_cp_add_ext_adv_params *cp = data;
8801 	struct mgmt_rp_add_ext_adv_params rp;
8802 	struct mgmt_pending_cmd *cmd = NULL;
8803 	struct adv_info *adv;
8804 	u32 flags, min_interval, max_interval;
8805 	u16 timeout, duration;
8806 	u8 status;
8807 	s8 tx_power;
8808 	int err;
8809 
8810 	BT_DBG("%s", hdev->name);
8811 
8812 	status = mgmt_le_support(hdev);
8813 	if (status)
8814 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8815 				       status);
8816 
8817 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8818 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8819 				       MGMT_STATUS_INVALID_PARAMS);
8820 
8821 	/* The purpose of breaking add_advertising into two separate MGMT calls
8822 	 * for params and data is to allow more parameters to be added to this
8823 	 * structure in the future. For this reason, we verify that we have the
8824 	 * bare minimum structure we know of when the interface was defined. Any
8825 	 * extra parameters we don't know about will be ignored in this request.
8826 	 */
8827 	if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8828 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8829 				       MGMT_STATUS_INVALID_PARAMS);
8830 
8831 	flags = __le32_to_cpu(cp->flags);
8832 
8833 	if (!requested_adv_flags_are_valid(hdev, flags))
8834 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8835 				       MGMT_STATUS_INVALID_PARAMS);
8836 
8837 	hci_dev_lock(hdev);
8838 
8839 	/* In new interface, we require that we are powered to register */
8840 	if (!hdev_is_powered(hdev)) {
8841 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8842 				      MGMT_STATUS_REJECTED);
8843 		goto unlock;
8844 	}
8845 
8846 	if (adv_busy(hdev)) {
8847 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8848 				      MGMT_STATUS_BUSY);
8849 		goto unlock;
8850 	}
8851 
8852 	/* Parse defined parameters from request, use defaults otherwise */
8853 	timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8854 		  __le16_to_cpu(cp->timeout) : 0;
8855 
8856 	duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8857 		   __le16_to_cpu(cp->duration) :
8858 		   hdev->def_multi_adv_rotation_duration;
8859 
8860 	min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8861 		       __le32_to_cpu(cp->min_interval) :
8862 		       hdev->le_adv_min_interval;
8863 
8864 	max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8865 		       __le32_to_cpu(cp->max_interval) :
8866 		       hdev->le_adv_max_interval;
8867 
8868 	tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8869 		   cp->tx_power :
8870 		   HCI_ADV_TX_POWER_NO_PREFERENCE;
8871 
8872 	/* Create advertising instance with no advertising or response data */
8873 	adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
8874 				   timeout, duration, tx_power, min_interval,
8875 				   max_interval, 0);
8876 
8877 	if (IS_ERR(adv)) {
8878 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8879 				      MGMT_STATUS_FAILED);
8880 		goto unlock;
8881 	}
8882 
8883 	/* Submit request for advertising params if ext adv available */
8884 	if (ext_adv_capable(hdev)) {
8885 		cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8886 				       data, data_len);
8887 		if (!cmd) {
8888 			err = -ENOMEM;
8889 			hci_remove_adv_instance(hdev, cp->instance);
8890 			goto unlock;
8891 		}
8892 
8893 		err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8894 					 add_ext_adv_params_complete);
8895 		if (err < 0)
8896 			mgmt_pending_free(cmd);
8897 	} else {
8898 		rp.instance = cp->instance;
8899 		rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8900 		rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8901 		rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8902 		err = mgmt_cmd_complete(sk, hdev->id,
8903 					MGMT_OP_ADD_EXT_ADV_PARAMS,
8904 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8905 	}
8906 
8907 unlock:
8908 	hci_dev_unlock(hdev);
8909 
8910 	return err;
8911 }
8912 
8913 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8914 {
8915 	struct mgmt_pending_cmd *cmd = data;
8916 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8917 	struct mgmt_rp_add_advertising rp;
8918 
8919 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
8920 
8921 	memset(&rp, 0, sizeof(rp));
8922 
8923 	rp.instance = cp->instance;
8924 
8925 	if (err)
8926 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8927 				mgmt_status(err));
8928 	else
8929 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8930 				  mgmt_status(err), &rp, sizeof(rp));
8931 
8932 	mgmt_pending_free(cmd);
8933 }
8934 
8935 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8936 {
8937 	struct mgmt_pending_cmd *cmd = data;
8938 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8939 	int err;
8940 
8941 	if (ext_adv_capable(hdev)) {
8942 		err = hci_update_adv_data_sync(hdev, cp->instance);
8943 		if (err)
8944 			return err;
8945 
8946 		err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8947 		if (err)
8948 			return err;
8949 
8950 		return hci_enable_ext_advertising_sync(hdev, cp->instance);
8951 	}
8952 
8953 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8954 }
8955 
8956 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8957 			    u16 data_len)
8958 {
8959 	struct mgmt_cp_add_ext_adv_data *cp = data;
8960 	struct mgmt_rp_add_ext_adv_data rp;
8961 	u8 schedule_instance = 0;
8962 	struct adv_info *next_instance;
8963 	struct adv_info *adv_instance;
8964 	int err = 0;
8965 	struct mgmt_pending_cmd *cmd;
8966 
8967 	BT_DBG("%s", hdev->name);
8968 
8969 	hci_dev_lock(hdev);
8970 
8971 	adv_instance = hci_find_adv_instance(hdev, cp->instance);
8972 
8973 	if (!adv_instance) {
8974 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8975 				      MGMT_STATUS_INVALID_PARAMS);
8976 		goto unlock;
8977 	}
8978 
8979 	/* In new interface, we require that we are powered to register */
8980 	if (!hdev_is_powered(hdev)) {
8981 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8982 				      MGMT_STATUS_REJECTED);
8983 		goto clear_new_instance;
8984 	}
8985 
8986 	if (adv_busy(hdev)) {
8987 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8988 				      MGMT_STATUS_BUSY);
8989 		goto clear_new_instance;
8990 	}
8991 
8992 	/* Validate new data */
8993 	if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
8994 			       cp->adv_data_len, true) ||
8995 	    !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
8996 			       cp->adv_data_len, cp->scan_rsp_len, false)) {
8997 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8998 				      MGMT_STATUS_INVALID_PARAMS);
8999 		goto clear_new_instance;
9000 	}
9001 
9002 	/* Set the data in the advertising instance */
9003 	hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
9004 				  cp->data, cp->scan_rsp_len,
9005 				  cp->data + cp->adv_data_len);
9006 
9007 	/* If using software rotation, determine next instance to use */
9008 	if (hdev->cur_adv_instance == cp->instance) {
9009 		/* If the currently advertised instance is being changed
9010 		 * then cancel the current advertising and schedule the
9011 		 * next instance. If there is only one instance then the
9012 		 * overridden advertising data will be visible right
9013 		 * away
9014 		 */
9015 		cancel_adv_timeout(hdev);
9016 
9017 		next_instance = hci_get_next_instance(hdev, cp->instance);
9018 		if (next_instance)
9019 			schedule_instance = next_instance->instance;
9020 	} else if (!hdev->adv_instance_timeout) {
9021 		/* Immediately advertise the new instance if no other
9022 		 * instance is currently being advertised.
9023 		 */
9024 		schedule_instance = cp->instance;
9025 	}
9026 
9027 	/* If the HCI_ADVERTISING flag is set or there is no instance to
9028 	 * be advertised then we have no HCI communication to make.
9029 	 * Simply return.
9030 	 */
9031 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9032 		if (adv_instance->pending) {
9033 			mgmt_advertising_added(sk, hdev, cp->instance);
9034 			adv_instance->pending = false;
9035 		}
9036 		rp.instance = cp->instance;
9037 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9038 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9039 		goto unlock;
9040 	}
9041 
9042 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9043 			       data_len);
9044 	if (!cmd) {
9045 		err = -ENOMEM;
9046 		goto clear_new_instance;
9047 	}
9048 
9049 	err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9050 				 add_ext_adv_data_complete);
9051 	if (err < 0) {
9052 		mgmt_pending_free(cmd);
9053 		goto clear_new_instance;
9054 	}
9055 
9056 	/* We were successful in updating data, so trigger advertising_added
9057 	 * event if this is an instance that wasn't previously advertising. If
9058 	 * a failure occurs in the requests we initiated, we will remove the
9059 	 * instance again in add_advertising_complete
9060 	 */
9061 	if (adv_instance->pending)
9062 		mgmt_advertising_added(sk, hdev, cp->instance);
9063 
9064 	goto unlock;
9065 
9066 clear_new_instance:
9067 	hci_remove_adv_instance(hdev, cp->instance);
9068 
9069 unlock:
9070 	hci_dev_unlock(hdev);
9071 
9072 	return err;
9073 }
9074 
9075 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9076 					int err)
9077 {
9078 	struct mgmt_pending_cmd *cmd = data;
9079 	struct mgmt_cp_remove_advertising *cp = cmd->param;
9080 	struct mgmt_rp_remove_advertising rp;
9081 
9082 	bt_dev_dbg(hdev, "err %d", err);
9083 
9084 	memset(&rp, 0, sizeof(rp));
9085 	rp.instance = cp->instance;
9086 
9087 	if (err)
9088 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9089 				mgmt_status(err));
9090 	else
9091 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9092 				  MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9093 
9094 	mgmt_pending_free(cmd);
9095 }
9096 
9097 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9098 {
9099 	struct mgmt_pending_cmd *cmd = data;
9100 	struct mgmt_cp_remove_advertising *cp = cmd->param;
9101 	int err;
9102 
9103 	err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9104 	if (err)
9105 		return err;
9106 
9107 	if (list_empty(&hdev->adv_instances))
9108 		err = hci_disable_advertising_sync(hdev);
9109 
9110 	return err;
9111 }
9112 
9113 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9114 			      void *data, u16 data_len)
9115 {
9116 	struct mgmt_cp_remove_advertising *cp = data;
9117 	struct mgmt_pending_cmd *cmd;
9118 	int err;
9119 
9120 	bt_dev_dbg(hdev, "sock %p", sk);
9121 
9122 	hci_dev_lock(hdev);
9123 
9124 	if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9125 		err = mgmt_cmd_status(sk, hdev->id,
9126 				      MGMT_OP_REMOVE_ADVERTISING,
9127 				      MGMT_STATUS_INVALID_PARAMS);
9128 		goto unlock;
9129 	}
9130 
9131 	if (pending_find(MGMT_OP_SET_LE, hdev)) {
9132 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9133 				      MGMT_STATUS_BUSY);
9134 		goto unlock;
9135 	}
9136 
9137 	if (list_empty(&hdev->adv_instances)) {
9138 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9139 				      MGMT_STATUS_INVALID_PARAMS);
9140 		goto unlock;
9141 	}
9142 
9143 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9144 			       data_len);
9145 	if (!cmd) {
9146 		err = -ENOMEM;
9147 		goto unlock;
9148 	}
9149 
9150 	err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9151 				 remove_advertising_complete);
9152 	if (err < 0)
9153 		mgmt_pending_free(cmd);
9154 
9155 unlock:
9156 	hci_dev_unlock(hdev);
9157 
9158 	return err;
9159 }
9160 
9161 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9162 			     void *data, u16 data_len)
9163 {
9164 	struct mgmt_cp_get_adv_size_info *cp = data;
9165 	struct mgmt_rp_get_adv_size_info rp;
9166 	u32 flags, supported_flags;
9167 
9168 	bt_dev_dbg(hdev, "sock %p", sk);
9169 
9170 	if (!lmp_le_capable(hdev))
9171 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9172 				       MGMT_STATUS_REJECTED);
9173 
9174 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9175 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9176 				       MGMT_STATUS_INVALID_PARAMS);
9177 
9178 	flags = __le32_to_cpu(cp->flags);
9179 
9180 	/* The current implementation only supports a subset of the specified
9181 	 * flags.
9182 	 */
9183 	supported_flags = get_supported_adv_flags(hdev);
9184 	if (flags & ~supported_flags)
9185 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9186 				       MGMT_STATUS_INVALID_PARAMS);
9187 
9188 	rp.instance = cp->instance;
9189 	rp.flags = cp->flags;
9190 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9191 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9192 
9193 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9194 				 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9195 }
9196 
9197 static const struct hci_mgmt_handler mgmt_handlers[] = {
9198 	{ NULL }, /* 0x0000 (no command) */
9199 	{ read_version,            MGMT_READ_VERSION_SIZE,
9200 						HCI_MGMT_NO_HDEV |
9201 						HCI_MGMT_UNTRUSTED },
9202 	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
9203 						HCI_MGMT_NO_HDEV |
9204 						HCI_MGMT_UNTRUSTED },
9205 	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
9206 						HCI_MGMT_NO_HDEV |
9207 						HCI_MGMT_UNTRUSTED },
9208 	{ read_controller_info,    MGMT_READ_INFO_SIZE,
9209 						HCI_MGMT_UNTRUSTED },
9210 	{ set_powered,             MGMT_SETTING_SIZE },
9211 	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
9212 	{ set_connectable,         MGMT_SETTING_SIZE },
9213 	{ set_fast_connectable,    MGMT_SETTING_SIZE },
9214 	{ set_bondable,            MGMT_SETTING_SIZE },
9215 	{ set_link_security,       MGMT_SETTING_SIZE },
9216 	{ set_ssp,                 MGMT_SETTING_SIZE },
9217 	{ set_hs,                  MGMT_SETTING_SIZE },
9218 	{ set_le,                  MGMT_SETTING_SIZE },
9219 	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
9220 	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
9221 	{ add_uuid,                MGMT_ADD_UUID_SIZE },
9222 	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
9223 	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
9224 						HCI_MGMT_VAR_LEN },
9225 	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9226 						HCI_MGMT_VAR_LEN },
9227 	{ disconnect,              MGMT_DISCONNECT_SIZE },
9228 	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
9229 	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
9230 	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
9231 	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
9232 	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
9233 	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
9234 	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
9235 	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
9236 	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9237 	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
9238 	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9239 	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
9240 	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9241 						HCI_MGMT_VAR_LEN },
9242 	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9243 	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
9244 	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
9245 	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
9246 	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
9247 	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
9248 	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
9249 	{ set_advertising,         MGMT_SETTING_SIZE },
9250 	{ set_bredr,               MGMT_SETTING_SIZE },
9251 	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
9252 	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
9253 	{ set_secure_conn,         MGMT_SETTING_SIZE },
9254 	{ set_debug_keys,          MGMT_SETTING_SIZE },
9255 	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
9256 	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
9257 						HCI_MGMT_VAR_LEN },
9258 	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
9259 	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
9260 	{ add_device,              MGMT_ADD_DEVICE_SIZE },
9261 	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
9262 	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
9263 						HCI_MGMT_VAR_LEN },
9264 	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9265 						HCI_MGMT_NO_HDEV |
9266 						HCI_MGMT_UNTRUSTED },
9267 	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
9268 						HCI_MGMT_UNCONFIGURED |
9269 						HCI_MGMT_UNTRUSTED },
9270 	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
9271 						HCI_MGMT_UNCONFIGURED },
9272 	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
9273 						HCI_MGMT_UNCONFIGURED },
9274 	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9275 						HCI_MGMT_VAR_LEN },
9276 	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9277 	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
9278 						HCI_MGMT_NO_HDEV |
9279 						HCI_MGMT_UNTRUSTED },
9280 	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
9281 	{ add_advertising,	   MGMT_ADD_ADVERTISING_SIZE,
9282 						HCI_MGMT_VAR_LEN },
9283 	{ remove_advertising,	   MGMT_REMOVE_ADVERTISING_SIZE },
9284 	{ get_adv_size_info,       MGMT_GET_ADV_SIZE_INFO_SIZE },
9285 	{ start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9286 	{ read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9287 						HCI_MGMT_UNTRUSTED },
9288 	{ set_appearance,	   MGMT_SET_APPEARANCE_SIZE },
9289 	{ get_phy_configuration,   MGMT_GET_PHY_CONFIGURATION_SIZE },
9290 	{ set_phy_configuration,   MGMT_SET_PHY_CONFIGURATION_SIZE },
9291 	{ set_blocked_keys,	   MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9292 						HCI_MGMT_VAR_LEN },
9293 	{ set_wideband_speech,	   MGMT_SETTING_SIZE },
9294 	{ read_controller_cap,     MGMT_READ_CONTROLLER_CAP_SIZE,
9295 						HCI_MGMT_UNTRUSTED },
9296 	{ read_exp_features_info,  MGMT_READ_EXP_FEATURES_INFO_SIZE,
9297 						HCI_MGMT_UNTRUSTED |
9298 						HCI_MGMT_HDEV_OPTIONAL },
9299 	{ set_exp_feature,         MGMT_SET_EXP_FEATURE_SIZE,
9300 						HCI_MGMT_VAR_LEN |
9301 						HCI_MGMT_HDEV_OPTIONAL },
9302 	{ read_def_system_config,  MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9303 						HCI_MGMT_UNTRUSTED },
9304 	{ set_def_system_config,   MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9305 						HCI_MGMT_VAR_LEN },
9306 	{ read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9307 						HCI_MGMT_UNTRUSTED },
9308 	{ set_def_runtime_config,  MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9309 						HCI_MGMT_VAR_LEN },
9310 	{ get_device_flags,        MGMT_GET_DEVICE_FLAGS_SIZE },
9311 	{ set_device_flags,        MGMT_SET_DEVICE_FLAGS_SIZE },
9312 	{ read_adv_mon_features,   MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9313 	{ add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9314 						HCI_MGMT_VAR_LEN },
9315 	{ remove_adv_monitor,      MGMT_REMOVE_ADV_MONITOR_SIZE },
9316 	{ add_ext_adv_params,      MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9317 						HCI_MGMT_VAR_LEN },
9318 	{ add_ext_adv_data,        MGMT_ADD_EXT_ADV_DATA_SIZE,
9319 						HCI_MGMT_VAR_LEN },
9320 	{ add_adv_patterns_monitor_rssi,
9321 				   MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9322 						HCI_MGMT_VAR_LEN },
9323 	{ set_mesh,                MGMT_SET_MESH_RECEIVER_SIZE,
9324 						HCI_MGMT_VAR_LEN },
9325 	{ mesh_features,           MGMT_MESH_READ_FEATURES_SIZE },
9326 	{ mesh_send,               MGMT_MESH_SEND_SIZE,
9327 						HCI_MGMT_VAR_LEN },
9328 	{ mesh_send_cancel,        MGMT_MESH_SEND_CANCEL_SIZE },
9329 };
9330 
9331 void mgmt_index_added(struct hci_dev *hdev)
9332 {
9333 	struct mgmt_ev_ext_index ev;
9334 
9335 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9336 		return;
9337 
9338 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9339 		mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0,
9340 				 HCI_MGMT_UNCONF_INDEX_EVENTS);
9341 		ev.type = 0x01;
9342 	} else {
9343 		mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9344 				 HCI_MGMT_INDEX_EVENTS);
9345 		ev.type = 0x00;
9346 	}
9347 
9348 	ev.bus = hdev->bus;
9349 
9350 	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9351 			 HCI_MGMT_EXT_INDEX_EVENTS);
9352 }
9353 
9354 void mgmt_index_removed(struct hci_dev *hdev)
9355 {
9356 	struct mgmt_ev_ext_index ev;
9357 	struct cmd_lookup match = { NULL, hdev, MGMT_STATUS_INVALID_INDEX };
9358 
9359 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9360 		return;
9361 
9362 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &match);
9363 
9364 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9365 		mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0,
9366 				 HCI_MGMT_UNCONF_INDEX_EVENTS);
9367 		ev.type = 0x01;
9368 	} else {
9369 		mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9370 				 HCI_MGMT_INDEX_EVENTS);
9371 		ev.type = 0x00;
9372 	}
9373 
9374 	ev.bus = hdev->bus;
9375 
9376 	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9377 			 HCI_MGMT_EXT_INDEX_EVENTS);
9378 
9379 	/* Cancel any remaining timed work */
9380 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
9381 		return;
9382 	cancel_delayed_work_sync(&hdev->discov_off);
9383 	cancel_delayed_work_sync(&hdev->service_cache);
9384 	cancel_delayed_work_sync(&hdev->rpa_expired);
9385 }
9386 
9387 void mgmt_power_on(struct hci_dev *hdev, int err)
9388 {
9389 	struct cmd_lookup match = { NULL, hdev };
9390 
9391 	bt_dev_dbg(hdev, "err %d", err);
9392 
9393 	hci_dev_lock(hdev);
9394 
9395 	if (!err) {
9396 		restart_le_actions(hdev);
9397 		hci_update_passive_scan(hdev);
9398 	}
9399 
9400 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9401 
9402 	new_settings(hdev, match.sk);
9403 
9404 	if (match.sk)
9405 		sock_put(match.sk);
9406 
9407 	hci_dev_unlock(hdev);
9408 }
9409 
9410 void __mgmt_power_off(struct hci_dev *hdev)
9411 {
9412 	struct cmd_lookup match = { NULL, hdev };
9413 	u8 zero_cod[] = { 0, 0, 0 };
9414 
9415 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9416 
9417 	/* If the power off is because of hdev unregistration let
9418 	 * use the appropriate INVALID_INDEX status. Otherwise use
9419 	 * NOT_POWERED. We cover both scenarios here since later in
9420 	 * mgmt_index_removed() any hci_conn callbacks will have already
9421 	 * been triggered, potentially causing misleading DISCONNECTED
9422 	 * status responses.
9423 	 */
9424 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9425 		match.mgmt_status = MGMT_STATUS_INVALID_INDEX;
9426 	else
9427 		match.mgmt_status = MGMT_STATUS_NOT_POWERED;
9428 
9429 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &match);
9430 
9431 	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9432 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9433 				   zero_cod, sizeof(zero_cod),
9434 				   HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9435 		ext_info_changed(hdev, NULL);
9436 	}
9437 
9438 	new_settings(hdev, match.sk);
9439 
9440 	if (match.sk)
9441 		sock_put(match.sk);
9442 }
9443 
9444 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9445 {
9446 	struct mgmt_pending_cmd *cmd;
9447 	u8 status;
9448 
9449 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9450 	if (!cmd)
9451 		return;
9452 
9453 	if (err == -ERFKILL)
9454 		status = MGMT_STATUS_RFKILLED;
9455 	else
9456 		status = MGMT_STATUS_FAILED;
9457 
9458 	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9459 
9460 	mgmt_pending_remove(cmd);
9461 }
9462 
9463 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9464 		       bool persistent)
9465 {
9466 	struct mgmt_ev_new_link_key ev;
9467 
9468 	memset(&ev, 0, sizeof(ev));
9469 
9470 	ev.store_hint = persistent;
9471 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9472 	ev.key.addr.type = BDADDR_BREDR;
9473 	ev.key.type = key->type;
9474 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9475 	ev.key.pin_len = key->pin_len;
9476 
9477 	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9478 }
9479 
9480 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9481 {
9482 	switch (ltk->type) {
9483 	case SMP_LTK:
9484 	case SMP_LTK_RESPONDER:
9485 		if (ltk->authenticated)
9486 			return MGMT_LTK_AUTHENTICATED;
9487 		return MGMT_LTK_UNAUTHENTICATED;
9488 	case SMP_LTK_P256:
9489 		if (ltk->authenticated)
9490 			return MGMT_LTK_P256_AUTH;
9491 		return MGMT_LTK_P256_UNAUTH;
9492 	case SMP_LTK_P256_DEBUG:
9493 		return MGMT_LTK_P256_DEBUG;
9494 	}
9495 
9496 	return MGMT_LTK_UNAUTHENTICATED;
9497 }
9498 
9499 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9500 {
9501 	struct mgmt_ev_new_long_term_key ev;
9502 
9503 	memset(&ev, 0, sizeof(ev));
9504 
9505 	/* Devices using resolvable or non-resolvable random addresses
9506 	 * without providing an identity resolving key don't require
9507 	 * to store long term keys. Their addresses will change the
9508 	 * next time around.
9509 	 *
9510 	 * Only when a remote device provides an identity address
9511 	 * make sure the long term key is stored. If the remote
9512 	 * identity is known, the long term keys are internally
9513 	 * mapped to the identity address. So allow static random
9514 	 * and public addresses here.
9515 	 */
9516 	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9517 	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
9518 		ev.store_hint = 0x00;
9519 	else
9520 		ev.store_hint = persistent;
9521 
9522 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9523 	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
9524 	ev.key.type = mgmt_ltk_type(key);
9525 	ev.key.enc_size = key->enc_size;
9526 	ev.key.ediv = key->ediv;
9527 	ev.key.rand = key->rand;
9528 
9529 	if (key->type == SMP_LTK)
9530 		ev.key.initiator = 1;
9531 
9532 	/* Make sure we copy only the significant bytes based on the
9533 	 * encryption key size, and set the rest of the value to zeroes.
9534 	 */
9535 	memcpy(ev.key.val, key->val, key->enc_size);
9536 	memset(ev.key.val + key->enc_size, 0,
9537 	       sizeof(ev.key.val) - key->enc_size);
9538 
9539 	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9540 }
9541 
9542 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9543 {
9544 	struct mgmt_ev_new_irk ev;
9545 
9546 	memset(&ev, 0, sizeof(ev));
9547 
9548 	ev.store_hint = persistent;
9549 
9550 	bacpy(&ev.rpa, &irk->rpa);
9551 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9552 	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9553 	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9554 
9555 	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9556 }
9557 
9558 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9559 		   bool persistent)
9560 {
9561 	struct mgmt_ev_new_csrk ev;
9562 
9563 	memset(&ev, 0, sizeof(ev));
9564 
9565 	/* Devices using resolvable or non-resolvable random addresses
9566 	 * without providing an identity resolving key don't require
9567 	 * to store signature resolving keys. Their addresses will change
9568 	 * the next time around.
9569 	 *
9570 	 * Only when a remote device provides an identity address
9571 	 * make sure the signature resolving key is stored. So allow
9572 	 * static random and public addresses here.
9573 	 */
9574 	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9575 	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9576 		ev.store_hint = 0x00;
9577 	else
9578 		ev.store_hint = persistent;
9579 
9580 	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9581 	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9582 	ev.key.type = csrk->type;
9583 	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9584 
9585 	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9586 }
9587 
9588 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9589 			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9590 			 u16 max_interval, u16 latency, u16 timeout)
9591 {
9592 	struct mgmt_ev_new_conn_param ev;
9593 
9594 	if (!hci_is_identity_address(bdaddr, bdaddr_type))
9595 		return;
9596 
9597 	memset(&ev, 0, sizeof(ev));
9598 	bacpy(&ev.addr.bdaddr, bdaddr);
9599 	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9600 	ev.store_hint = store_hint;
9601 	ev.min_interval = cpu_to_le16(min_interval);
9602 	ev.max_interval = cpu_to_le16(max_interval);
9603 	ev.latency = cpu_to_le16(latency);
9604 	ev.timeout = cpu_to_le16(timeout);
9605 
9606 	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9607 }
9608 
9609 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9610 			   u8 *name, u8 name_len)
9611 {
9612 	struct sk_buff *skb;
9613 	struct mgmt_ev_device_connected *ev;
9614 	u16 eir_len = 0;
9615 	u32 flags = 0;
9616 
9617 	if (test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
9618 		return;
9619 
9620 	/* allocate buff for LE or BR/EDR adv */
9621 	if (conn->le_adv_data_len > 0)
9622 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9623 				     sizeof(*ev) + conn->le_adv_data_len);
9624 	else
9625 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9626 				     sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9627 				     eir_precalc_len(sizeof(conn->dev_class)));
9628 
9629 	ev = skb_put(skb, sizeof(*ev));
9630 	bacpy(&ev->addr.bdaddr, &conn->dst);
9631 	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9632 
9633 	if (conn->out)
9634 		flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9635 
9636 	ev->flags = __cpu_to_le32(flags);
9637 
9638 	/* We must ensure that the EIR Data fields are ordered and
9639 	 * unique. Keep it simple for now and avoid the problem by not
9640 	 * adding any BR/EDR data to the LE adv.
9641 	 */
9642 	if (conn->le_adv_data_len > 0) {
9643 		skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9644 		eir_len = conn->le_adv_data_len;
9645 	} else {
9646 		if (name)
9647 			eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9648 
9649 		if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9650 			eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9651 						    conn->dev_class, sizeof(conn->dev_class));
9652 	}
9653 
9654 	ev->eir_len = cpu_to_le16(eir_len);
9655 
9656 	mgmt_event_skb(skb, NULL);
9657 }
9658 
9659 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9660 {
9661 	struct hci_dev *hdev = data;
9662 	struct mgmt_cp_unpair_device *cp = cmd->param;
9663 
9664 	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9665 
9666 	cmd->cmd_complete(cmd, 0);
9667 	mgmt_pending_remove(cmd);
9668 }
9669 
9670 bool mgmt_powering_down(struct hci_dev *hdev)
9671 {
9672 	struct mgmt_pending_cmd *cmd;
9673 	struct mgmt_mode *cp;
9674 
9675 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9676 	if (!cmd)
9677 		return false;
9678 
9679 	cp = cmd->param;
9680 	if (!cp->val)
9681 		return true;
9682 
9683 	return false;
9684 }
9685 
9686 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9687 			      u8 link_type, u8 addr_type, u8 reason,
9688 			      bool mgmt_connected)
9689 {
9690 	struct mgmt_ev_device_disconnected ev;
9691 	struct sock *sk = NULL;
9692 
9693 	if (!mgmt_connected)
9694 		return;
9695 
9696 	if (link_type != ACL_LINK && link_type != LE_LINK)
9697 		return;
9698 
9699 	bacpy(&ev.addr.bdaddr, bdaddr);
9700 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9701 	ev.reason = reason;
9702 
9703 	/* Report disconnects due to suspend */
9704 	if (hdev->suspended)
9705 		ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9706 
9707 	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9708 
9709 	if (sk)
9710 		sock_put(sk);
9711 }
9712 
9713 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9714 			    u8 link_type, u8 addr_type, u8 status)
9715 {
9716 	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9717 	struct mgmt_cp_disconnect *cp;
9718 	struct mgmt_pending_cmd *cmd;
9719 
9720 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9721 			     hdev);
9722 
9723 	cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9724 	if (!cmd)
9725 		return;
9726 
9727 	cp = cmd->param;
9728 
9729 	if (bacmp(bdaddr, &cp->addr.bdaddr))
9730 		return;
9731 
9732 	if (cp->addr.type != bdaddr_type)
9733 		return;
9734 
9735 	cmd->cmd_complete(cmd, mgmt_status(status));
9736 	mgmt_pending_remove(cmd);
9737 }
9738 
9739 void mgmt_connect_failed(struct hci_dev *hdev, struct hci_conn *conn, u8 status)
9740 {
9741 	struct mgmt_ev_connect_failed ev;
9742 
9743 	if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
9744 		mgmt_device_disconnected(hdev, &conn->dst, conn->type,
9745 					 conn->dst_type, status, true);
9746 		return;
9747 	}
9748 
9749 	bacpy(&ev.addr.bdaddr, &conn->dst);
9750 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9751 	ev.status = mgmt_status(status);
9752 
9753 	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9754 }
9755 
9756 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9757 {
9758 	struct mgmt_ev_pin_code_request ev;
9759 
9760 	bacpy(&ev.addr.bdaddr, bdaddr);
9761 	ev.addr.type = BDADDR_BREDR;
9762 	ev.secure = secure;
9763 
9764 	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9765 }
9766 
9767 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9768 				  u8 status)
9769 {
9770 	struct mgmt_pending_cmd *cmd;
9771 
9772 	cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9773 	if (!cmd)
9774 		return;
9775 
9776 	cmd->cmd_complete(cmd, mgmt_status(status));
9777 	mgmt_pending_remove(cmd);
9778 }
9779 
9780 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9781 				      u8 status)
9782 {
9783 	struct mgmt_pending_cmd *cmd;
9784 
9785 	cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9786 	if (!cmd)
9787 		return;
9788 
9789 	cmd->cmd_complete(cmd, mgmt_status(status));
9790 	mgmt_pending_remove(cmd);
9791 }
9792 
9793 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9794 			      u8 link_type, u8 addr_type, u32 value,
9795 			      u8 confirm_hint)
9796 {
9797 	struct mgmt_ev_user_confirm_request ev;
9798 
9799 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9800 
9801 	bacpy(&ev.addr.bdaddr, bdaddr);
9802 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9803 	ev.confirm_hint = confirm_hint;
9804 	ev.value = cpu_to_le32(value);
9805 
9806 	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9807 			  NULL);
9808 }
9809 
9810 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9811 			      u8 link_type, u8 addr_type)
9812 {
9813 	struct mgmt_ev_user_passkey_request ev;
9814 
9815 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9816 
9817 	bacpy(&ev.addr.bdaddr, bdaddr);
9818 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9819 
9820 	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9821 			  NULL);
9822 }
9823 
9824 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9825 				      u8 link_type, u8 addr_type, u8 status,
9826 				      u8 opcode)
9827 {
9828 	struct mgmt_pending_cmd *cmd;
9829 
9830 	cmd = pending_find(opcode, hdev);
9831 	if (!cmd)
9832 		return -ENOENT;
9833 
9834 	cmd->cmd_complete(cmd, mgmt_status(status));
9835 	mgmt_pending_remove(cmd);
9836 
9837 	return 0;
9838 }
9839 
9840 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9841 				     u8 link_type, u8 addr_type, u8 status)
9842 {
9843 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9844 					  status, MGMT_OP_USER_CONFIRM_REPLY);
9845 }
9846 
9847 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9848 					 u8 link_type, u8 addr_type, u8 status)
9849 {
9850 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9851 					  status,
9852 					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
9853 }
9854 
9855 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9856 				     u8 link_type, u8 addr_type, u8 status)
9857 {
9858 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9859 					  status, MGMT_OP_USER_PASSKEY_REPLY);
9860 }
9861 
9862 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9863 					 u8 link_type, u8 addr_type, u8 status)
9864 {
9865 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9866 					  status,
9867 					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
9868 }
9869 
9870 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9871 			     u8 link_type, u8 addr_type, u32 passkey,
9872 			     u8 entered)
9873 {
9874 	struct mgmt_ev_passkey_notify ev;
9875 
9876 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9877 
9878 	bacpy(&ev.addr.bdaddr, bdaddr);
9879 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9880 	ev.passkey = __cpu_to_le32(passkey);
9881 	ev.entered = entered;
9882 
9883 	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9884 }
9885 
9886 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9887 {
9888 	struct mgmt_ev_auth_failed ev;
9889 	struct mgmt_pending_cmd *cmd;
9890 	u8 status = mgmt_status(hci_status);
9891 
9892 	bacpy(&ev.addr.bdaddr, &conn->dst);
9893 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9894 	ev.status = status;
9895 
9896 	cmd = find_pairing(conn);
9897 
9898 	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9899 		    cmd ? cmd->sk : NULL);
9900 
9901 	if (cmd) {
9902 		cmd->cmd_complete(cmd, status);
9903 		mgmt_pending_remove(cmd);
9904 	}
9905 }
9906 
9907 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9908 {
9909 	struct cmd_lookup match = { NULL, hdev };
9910 	bool changed;
9911 
9912 	if (status) {
9913 		u8 mgmt_err = mgmt_status(status);
9914 		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9915 				     cmd_status_rsp, &mgmt_err);
9916 		return;
9917 	}
9918 
9919 	if (test_bit(HCI_AUTH, &hdev->flags))
9920 		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9921 	else
9922 		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9923 
9924 	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9925 			     &match);
9926 
9927 	if (changed)
9928 		new_settings(hdev, match.sk);
9929 
9930 	if (match.sk)
9931 		sock_put(match.sk);
9932 }
9933 
9934 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9935 {
9936 	struct cmd_lookup *match = data;
9937 
9938 	if (match->sk == NULL) {
9939 		match->sk = cmd->sk;
9940 		sock_hold(match->sk);
9941 	}
9942 }
9943 
9944 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9945 				    u8 status)
9946 {
9947 	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9948 
9949 	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
9950 	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
9951 	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
9952 
9953 	if (!status) {
9954 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9955 				   3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9956 		ext_info_changed(hdev, NULL);
9957 	}
9958 
9959 	if (match.sk)
9960 		sock_put(match.sk);
9961 }
9962 
9963 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
9964 {
9965 	struct mgmt_cp_set_local_name ev;
9966 	struct mgmt_pending_cmd *cmd;
9967 
9968 	if (status)
9969 		return;
9970 
9971 	memset(&ev, 0, sizeof(ev));
9972 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
9973 	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
9974 
9975 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
9976 	if (!cmd) {
9977 		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
9978 
9979 		/* If this is a HCI command related to powering on the
9980 		 * HCI dev don't send any mgmt signals.
9981 		 */
9982 		if (pending_find(MGMT_OP_SET_POWERED, hdev))
9983 			return;
9984 	}
9985 
9986 	mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
9987 			   HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
9988 	ext_info_changed(hdev, cmd ? cmd->sk : NULL);
9989 }
9990 
9991 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
9992 {
9993 	int i;
9994 
9995 	for (i = 0; i < uuid_count; i++) {
9996 		if (!memcmp(uuid, uuids[i], 16))
9997 			return true;
9998 	}
9999 
10000 	return false;
10001 }
10002 
10003 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10004 {
10005 	u16 parsed = 0;
10006 
10007 	while (parsed < eir_len) {
10008 		u8 field_len = eir[0];
10009 		u8 uuid[16];
10010 		int i;
10011 
10012 		if (field_len == 0)
10013 			break;
10014 
10015 		if (eir_len - parsed < field_len + 1)
10016 			break;
10017 
10018 		switch (eir[1]) {
10019 		case EIR_UUID16_ALL:
10020 		case EIR_UUID16_SOME:
10021 			for (i = 0; i + 3 <= field_len; i += 2) {
10022 				memcpy(uuid, bluetooth_base_uuid, 16);
10023 				uuid[13] = eir[i + 3];
10024 				uuid[12] = eir[i + 2];
10025 				if (has_uuid(uuid, uuid_count, uuids))
10026 					return true;
10027 			}
10028 			break;
10029 		case EIR_UUID32_ALL:
10030 		case EIR_UUID32_SOME:
10031 			for (i = 0; i + 5 <= field_len; i += 4) {
10032 				memcpy(uuid, bluetooth_base_uuid, 16);
10033 				uuid[15] = eir[i + 5];
10034 				uuid[14] = eir[i + 4];
10035 				uuid[13] = eir[i + 3];
10036 				uuid[12] = eir[i + 2];
10037 				if (has_uuid(uuid, uuid_count, uuids))
10038 					return true;
10039 			}
10040 			break;
10041 		case EIR_UUID128_ALL:
10042 		case EIR_UUID128_SOME:
10043 			for (i = 0; i + 17 <= field_len; i += 16) {
10044 				memcpy(uuid, eir + i + 2, 16);
10045 				if (has_uuid(uuid, uuid_count, uuids))
10046 					return true;
10047 			}
10048 			break;
10049 		}
10050 
10051 		parsed += field_len + 1;
10052 		eir += field_len + 1;
10053 	}
10054 
10055 	return false;
10056 }
10057 
10058 static void restart_le_scan(struct hci_dev *hdev)
10059 {
10060 	/* If controller is not scanning we are done. */
10061 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
10062 		return;
10063 
10064 	if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
10065 		       hdev->discovery.scan_start +
10066 		       hdev->discovery.scan_duration))
10067 		return;
10068 
10069 	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
10070 			   DISCOV_LE_RESTART_DELAY);
10071 }
10072 
10073 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10074 			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10075 {
10076 	/* If a RSSI threshold has been specified, and
10077 	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10078 	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10079 	 * is set, let it through for further processing, as we might need to
10080 	 * restart the scan.
10081 	 *
10082 	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10083 	 * the results are also dropped.
10084 	 */
10085 	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10086 	    (rssi == HCI_RSSI_INVALID ||
10087 	    (rssi < hdev->discovery.rssi &&
10088 	     !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
10089 		return  false;
10090 
10091 	if (hdev->discovery.uuid_count != 0) {
10092 		/* If a list of UUIDs is provided in filter, results with no
10093 		 * matching UUID should be dropped.
10094 		 */
10095 		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10096 				   hdev->discovery.uuids) &&
10097 		    !eir_has_uuids(scan_rsp, scan_rsp_len,
10098 				   hdev->discovery.uuid_count,
10099 				   hdev->discovery.uuids))
10100 			return false;
10101 	}
10102 
10103 	/* If duplicate filtering does not report RSSI changes, then restart
10104 	 * scanning to ensure updated result with updated RSSI values.
10105 	 */
10106 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
10107 		restart_le_scan(hdev);
10108 
10109 		/* Validate RSSI value against the RSSI threshold once more. */
10110 		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10111 		    rssi < hdev->discovery.rssi)
10112 			return false;
10113 	}
10114 
10115 	return true;
10116 }
10117 
10118 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10119 				  bdaddr_t *bdaddr, u8 addr_type)
10120 {
10121 	struct mgmt_ev_adv_monitor_device_lost ev;
10122 
10123 	ev.monitor_handle = cpu_to_le16(handle);
10124 	bacpy(&ev.addr.bdaddr, bdaddr);
10125 	ev.addr.type = addr_type;
10126 
10127 	mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10128 		   NULL);
10129 }
10130 
10131 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10132 					       struct sk_buff *skb,
10133 					       struct sock *skip_sk,
10134 					       u16 handle)
10135 {
10136 	struct sk_buff *advmon_skb;
10137 	size_t advmon_skb_len;
10138 	__le16 *monitor_handle;
10139 
10140 	if (!skb)
10141 		return;
10142 
10143 	advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10144 			  sizeof(struct mgmt_ev_device_found)) + skb->len;
10145 	advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10146 				    advmon_skb_len);
10147 	if (!advmon_skb)
10148 		return;
10149 
10150 	/* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10151 	 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10152 	 * store monitor_handle of the matched monitor.
10153 	 */
10154 	monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10155 	*monitor_handle = cpu_to_le16(handle);
10156 	skb_put_data(advmon_skb, skb->data, skb->len);
10157 
10158 	mgmt_event_skb(advmon_skb, skip_sk);
10159 }
10160 
10161 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10162 					  bdaddr_t *bdaddr, bool report_device,
10163 					  struct sk_buff *skb,
10164 					  struct sock *skip_sk)
10165 {
10166 	struct monitored_device *dev, *tmp;
10167 	bool matched = false;
10168 	bool notified = false;
10169 
10170 	/* We have received the Advertisement Report because:
10171 	 * 1. the kernel has initiated active discovery
10172 	 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10173 	 *    passive scanning
10174 	 * 3. if none of the above is true, we have one or more active
10175 	 *    Advertisement Monitor
10176 	 *
10177 	 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10178 	 * and report ONLY one advertisement per device for the matched Monitor
10179 	 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10180 	 *
10181 	 * For case 3, since we are not active scanning and all advertisements
10182 	 * received are due to a matched Advertisement Monitor, report all
10183 	 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10184 	 */
10185 	if (report_device && !hdev->advmon_pend_notify) {
10186 		mgmt_event_skb(skb, skip_sk);
10187 		return;
10188 	}
10189 
10190 	hdev->advmon_pend_notify = false;
10191 
10192 	list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10193 		if (!bacmp(&dev->bdaddr, bdaddr)) {
10194 			matched = true;
10195 
10196 			if (!dev->notified) {
10197 				mgmt_send_adv_monitor_device_found(hdev, skb,
10198 								   skip_sk,
10199 								   dev->handle);
10200 				notified = true;
10201 				dev->notified = true;
10202 			}
10203 		}
10204 
10205 		if (!dev->notified)
10206 			hdev->advmon_pend_notify = true;
10207 	}
10208 
10209 	if (!report_device &&
10210 	    ((matched && !notified) || !msft_monitor_supported(hdev))) {
10211 		/* Handle 0 indicates that we are not active scanning and this
10212 		 * is a subsequent advertisement report for an already matched
10213 		 * Advertisement Monitor or the controller offloading support
10214 		 * is not available.
10215 		 */
10216 		mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10217 	}
10218 
10219 	if (report_device)
10220 		mgmt_event_skb(skb, skip_sk);
10221 	else
10222 		kfree_skb(skb);
10223 }
10224 
10225 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10226 			      u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10227 			      u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10228 			      u64 instant)
10229 {
10230 	struct sk_buff *skb;
10231 	struct mgmt_ev_mesh_device_found *ev;
10232 	int i, j;
10233 
10234 	if (!hdev->mesh_ad_types[0])
10235 		goto accepted;
10236 
10237 	/* Scan for requested AD types */
10238 	if (eir_len > 0) {
10239 		for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10240 			for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10241 				if (!hdev->mesh_ad_types[j])
10242 					break;
10243 
10244 				if (hdev->mesh_ad_types[j] == eir[i + 1])
10245 					goto accepted;
10246 			}
10247 		}
10248 	}
10249 
10250 	if (scan_rsp_len > 0) {
10251 		for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10252 			for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10253 				if (!hdev->mesh_ad_types[j])
10254 					break;
10255 
10256 				if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10257 					goto accepted;
10258 			}
10259 		}
10260 	}
10261 
10262 	return;
10263 
10264 accepted:
10265 	skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10266 			     sizeof(*ev) + eir_len + scan_rsp_len);
10267 	if (!skb)
10268 		return;
10269 
10270 	ev = skb_put(skb, sizeof(*ev));
10271 
10272 	bacpy(&ev->addr.bdaddr, bdaddr);
10273 	ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10274 	ev->rssi = rssi;
10275 	ev->flags = cpu_to_le32(flags);
10276 	ev->instant = cpu_to_le64(instant);
10277 
10278 	if (eir_len > 0)
10279 		/* Copy EIR or advertising data into event */
10280 		skb_put_data(skb, eir, eir_len);
10281 
10282 	if (scan_rsp_len > 0)
10283 		/* Append scan response data to event */
10284 		skb_put_data(skb, scan_rsp, scan_rsp_len);
10285 
10286 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10287 
10288 	mgmt_event_skb(skb, NULL);
10289 }
10290 
10291 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10292 		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10293 		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10294 		       u64 instant)
10295 {
10296 	struct sk_buff *skb;
10297 	struct mgmt_ev_device_found *ev;
10298 	bool report_device = hci_discovery_active(hdev);
10299 
10300 	if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10301 		mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10302 				  eir, eir_len, scan_rsp, scan_rsp_len,
10303 				  instant);
10304 
10305 	/* Don't send events for a non-kernel initiated discovery. With
10306 	 * LE one exception is if we have pend_le_reports > 0 in which
10307 	 * case we're doing passive scanning and want these events.
10308 	 */
10309 	if (!hci_discovery_active(hdev)) {
10310 		if (link_type == ACL_LINK)
10311 			return;
10312 		if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10313 			report_device = true;
10314 		else if (!hci_is_adv_monitoring(hdev))
10315 			return;
10316 	}
10317 
10318 	if (hdev->discovery.result_filtering) {
10319 		/* We are using service discovery */
10320 		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10321 				     scan_rsp_len))
10322 			return;
10323 	}
10324 
10325 	if (hdev->discovery.limited) {
10326 		/* Check for limited discoverable bit */
10327 		if (dev_class) {
10328 			if (!(dev_class[1] & 0x20))
10329 				return;
10330 		} else {
10331 			u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10332 			if (!flags || !(flags[0] & LE_AD_LIMITED))
10333 				return;
10334 		}
10335 	}
10336 
10337 	/* Allocate skb. The 5 extra bytes are for the potential CoD field */
10338 	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10339 			     sizeof(*ev) + eir_len + scan_rsp_len + 5);
10340 	if (!skb)
10341 		return;
10342 
10343 	ev = skb_put(skb, sizeof(*ev));
10344 
10345 	/* In case of device discovery with BR/EDR devices (pre 1.2), the
10346 	 * RSSI value was reported as 0 when not available. This behavior
10347 	 * is kept when using device discovery. This is required for full
10348 	 * backwards compatibility with the API.
10349 	 *
10350 	 * However when using service discovery, the value 127 will be
10351 	 * returned when the RSSI is not available.
10352 	 */
10353 	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10354 	    link_type == ACL_LINK)
10355 		rssi = 0;
10356 
10357 	bacpy(&ev->addr.bdaddr, bdaddr);
10358 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
10359 	ev->rssi = rssi;
10360 	ev->flags = cpu_to_le32(flags);
10361 
10362 	if (eir_len > 0)
10363 		/* Copy EIR or advertising data into event */
10364 		skb_put_data(skb, eir, eir_len);
10365 
10366 	if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10367 		u8 eir_cod[5];
10368 
10369 		eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10370 					   dev_class, 3);
10371 		skb_put_data(skb, eir_cod, sizeof(eir_cod));
10372 	}
10373 
10374 	if (scan_rsp_len > 0)
10375 		/* Append scan response data to event */
10376 		skb_put_data(skb, scan_rsp, scan_rsp_len);
10377 
10378 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10379 
10380 	mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10381 }
10382 
10383 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10384 		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10385 {
10386 	struct sk_buff *skb;
10387 	struct mgmt_ev_device_found *ev;
10388 	u16 eir_len = 0;
10389 	u32 flags = 0;
10390 
10391 	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10392 			     sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10393 
10394 	ev = skb_put(skb, sizeof(*ev));
10395 	bacpy(&ev->addr.bdaddr, bdaddr);
10396 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
10397 	ev->rssi = rssi;
10398 
10399 	if (name)
10400 		eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10401 	else
10402 		flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10403 
10404 	ev->eir_len = cpu_to_le16(eir_len);
10405 	ev->flags = cpu_to_le32(flags);
10406 
10407 	mgmt_event_skb(skb, NULL);
10408 }
10409 
10410 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10411 {
10412 	struct mgmt_ev_discovering ev;
10413 
10414 	bt_dev_dbg(hdev, "discovering %u", discovering);
10415 
10416 	memset(&ev, 0, sizeof(ev));
10417 	ev.type = hdev->discovery.type;
10418 	ev.discovering = discovering;
10419 
10420 	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10421 }
10422 
10423 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10424 {
10425 	struct mgmt_ev_controller_suspend ev;
10426 
10427 	ev.suspend_state = state;
10428 	mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10429 }
10430 
10431 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10432 		   u8 addr_type)
10433 {
10434 	struct mgmt_ev_controller_resume ev;
10435 
10436 	ev.wake_reason = reason;
10437 	if (bdaddr) {
10438 		bacpy(&ev.addr.bdaddr, bdaddr);
10439 		ev.addr.type = addr_type;
10440 	} else {
10441 		memset(&ev.addr, 0, sizeof(ev.addr));
10442 	}
10443 
10444 	mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10445 }
10446 
10447 static struct hci_mgmt_chan chan = {
10448 	.channel	= HCI_CHANNEL_CONTROL,
10449 	.handler_count	= ARRAY_SIZE(mgmt_handlers),
10450 	.handlers	= mgmt_handlers,
10451 	.hdev_init	= mgmt_init_hdev,
10452 };
10453 
10454 int mgmt_init(void)
10455 {
10456 	return hci_mgmt_chan_register(&chan);
10457 }
10458 
10459 void mgmt_exit(void)
10460 {
10461 	hci_mgmt_chan_unregister(&chan);
10462 }
10463 
10464 void mgmt_cleanup(struct sock *sk)
10465 {
10466 	struct mgmt_mesh_tx *mesh_tx;
10467 	struct hci_dev *hdev;
10468 
10469 	read_lock(&hci_dev_list_lock);
10470 
10471 	list_for_each_entry(hdev, &hci_dev_list, list) {
10472 		do {
10473 			mesh_tx = mgmt_mesh_next(hdev, sk);
10474 
10475 			if (mesh_tx)
10476 				mesh_send_complete(hdev, mesh_tx, true);
10477 		} while (mesh_tx);
10478 	}
10479 
10480 	read_unlock(&hci_dev_list_lock);
10481 }
10482