xref: /openbmc/linux/net/bluetooth/mgmt.c (revision f3f5d7a5)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI Management interface */
26 
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35 
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
40 #include "msft.h"
41 #include "eir.h"
42 #include "aosp.h"
43 
44 #define MGMT_VERSION	1
45 #define MGMT_REVISION	22
46 
47 static const u16 mgmt_commands[] = {
48 	MGMT_OP_READ_INDEX_LIST,
49 	MGMT_OP_READ_INFO,
50 	MGMT_OP_SET_POWERED,
51 	MGMT_OP_SET_DISCOVERABLE,
52 	MGMT_OP_SET_CONNECTABLE,
53 	MGMT_OP_SET_FAST_CONNECTABLE,
54 	MGMT_OP_SET_BONDABLE,
55 	MGMT_OP_SET_LINK_SECURITY,
56 	MGMT_OP_SET_SSP,
57 	MGMT_OP_SET_HS,
58 	MGMT_OP_SET_LE,
59 	MGMT_OP_SET_DEV_CLASS,
60 	MGMT_OP_SET_LOCAL_NAME,
61 	MGMT_OP_ADD_UUID,
62 	MGMT_OP_REMOVE_UUID,
63 	MGMT_OP_LOAD_LINK_KEYS,
64 	MGMT_OP_LOAD_LONG_TERM_KEYS,
65 	MGMT_OP_DISCONNECT,
66 	MGMT_OP_GET_CONNECTIONS,
67 	MGMT_OP_PIN_CODE_REPLY,
68 	MGMT_OP_PIN_CODE_NEG_REPLY,
69 	MGMT_OP_SET_IO_CAPABILITY,
70 	MGMT_OP_PAIR_DEVICE,
71 	MGMT_OP_CANCEL_PAIR_DEVICE,
72 	MGMT_OP_UNPAIR_DEVICE,
73 	MGMT_OP_USER_CONFIRM_REPLY,
74 	MGMT_OP_USER_CONFIRM_NEG_REPLY,
75 	MGMT_OP_USER_PASSKEY_REPLY,
76 	MGMT_OP_USER_PASSKEY_NEG_REPLY,
77 	MGMT_OP_READ_LOCAL_OOB_DATA,
78 	MGMT_OP_ADD_REMOTE_OOB_DATA,
79 	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
80 	MGMT_OP_START_DISCOVERY,
81 	MGMT_OP_STOP_DISCOVERY,
82 	MGMT_OP_CONFIRM_NAME,
83 	MGMT_OP_BLOCK_DEVICE,
84 	MGMT_OP_UNBLOCK_DEVICE,
85 	MGMT_OP_SET_DEVICE_ID,
86 	MGMT_OP_SET_ADVERTISING,
87 	MGMT_OP_SET_BREDR,
88 	MGMT_OP_SET_STATIC_ADDRESS,
89 	MGMT_OP_SET_SCAN_PARAMS,
90 	MGMT_OP_SET_SECURE_CONN,
91 	MGMT_OP_SET_DEBUG_KEYS,
92 	MGMT_OP_SET_PRIVACY,
93 	MGMT_OP_LOAD_IRKS,
94 	MGMT_OP_GET_CONN_INFO,
95 	MGMT_OP_GET_CLOCK_INFO,
96 	MGMT_OP_ADD_DEVICE,
97 	MGMT_OP_REMOVE_DEVICE,
98 	MGMT_OP_LOAD_CONN_PARAM,
99 	MGMT_OP_READ_UNCONF_INDEX_LIST,
100 	MGMT_OP_READ_CONFIG_INFO,
101 	MGMT_OP_SET_EXTERNAL_CONFIG,
102 	MGMT_OP_SET_PUBLIC_ADDRESS,
103 	MGMT_OP_START_SERVICE_DISCOVERY,
104 	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
105 	MGMT_OP_READ_EXT_INDEX_LIST,
106 	MGMT_OP_READ_ADV_FEATURES,
107 	MGMT_OP_ADD_ADVERTISING,
108 	MGMT_OP_REMOVE_ADVERTISING,
109 	MGMT_OP_GET_ADV_SIZE_INFO,
110 	MGMT_OP_START_LIMITED_DISCOVERY,
111 	MGMT_OP_READ_EXT_INFO,
112 	MGMT_OP_SET_APPEARANCE,
113 	MGMT_OP_GET_PHY_CONFIGURATION,
114 	MGMT_OP_SET_PHY_CONFIGURATION,
115 	MGMT_OP_SET_BLOCKED_KEYS,
116 	MGMT_OP_SET_WIDEBAND_SPEECH,
117 	MGMT_OP_READ_CONTROLLER_CAP,
118 	MGMT_OP_READ_EXP_FEATURES_INFO,
119 	MGMT_OP_SET_EXP_FEATURE,
120 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
121 	MGMT_OP_SET_DEF_SYSTEM_CONFIG,
122 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
123 	MGMT_OP_SET_DEF_RUNTIME_CONFIG,
124 	MGMT_OP_GET_DEVICE_FLAGS,
125 	MGMT_OP_SET_DEVICE_FLAGS,
126 	MGMT_OP_READ_ADV_MONITOR_FEATURES,
127 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
128 	MGMT_OP_REMOVE_ADV_MONITOR,
129 	MGMT_OP_ADD_EXT_ADV_PARAMS,
130 	MGMT_OP_ADD_EXT_ADV_DATA,
131 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
132 	MGMT_OP_SET_MESH_RECEIVER,
133 	MGMT_OP_MESH_READ_FEATURES,
134 	MGMT_OP_MESH_SEND,
135 	MGMT_OP_MESH_SEND_CANCEL,
136 };
137 
138 static const u16 mgmt_events[] = {
139 	MGMT_EV_CONTROLLER_ERROR,
140 	MGMT_EV_INDEX_ADDED,
141 	MGMT_EV_INDEX_REMOVED,
142 	MGMT_EV_NEW_SETTINGS,
143 	MGMT_EV_CLASS_OF_DEV_CHANGED,
144 	MGMT_EV_LOCAL_NAME_CHANGED,
145 	MGMT_EV_NEW_LINK_KEY,
146 	MGMT_EV_NEW_LONG_TERM_KEY,
147 	MGMT_EV_DEVICE_CONNECTED,
148 	MGMT_EV_DEVICE_DISCONNECTED,
149 	MGMT_EV_CONNECT_FAILED,
150 	MGMT_EV_PIN_CODE_REQUEST,
151 	MGMT_EV_USER_CONFIRM_REQUEST,
152 	MGMT_EV_USER_PASSKEY_REQUEST,
153 	MGMT_EV_AUTH_FAILED,
154 	MGMT_EV_DEVICE_FOUND,
155 	MGMT_EV_DISCOVERING,
156 	MGMT_EV_DEVICE_BLOCKED,
157 	MGMT_EV_DEVICE_UNBLOCKED,
158 	MGMT_EV_DEVICE_UNPAIRED,
159 	MGMT_EV_PASSKEY_NOTIFY,
160 	MGMT_EV_NEW_IRK,
161 	MGMT_EV_NEW_CSRK,
162 	MGMT_EV_DEVICE_ADDED,
163 	MGMT_EV_DEVICE_REMOVED,
164 	MGMT_EV_NEW_CONN_PARAM,
165 	MGMT_EV_UNCONF_INDEX_ADDED,
166 	MGMT_EV_UNCONF_INDEX_REMOVED,
167 	MGMT_EV_NEW_CONFIG_OPTIONS,
168 	MGMT_EV_EXT_INDEX_ADDED,
169 	MGMT_EV_EXT_INDEX_REMOVED,
170 	MGMT_EV_LOCAL_OOB_DATA_UPDATED,
171 	MGMT_EV_ADVERTISING_ADDED,
172 	MGMT_EV_ADVERTISING_REMOVED,
173 	MGMT_EV_EXT_INFO_CHANGED,
174 	MGMT_EV_PHY_CONFIGURATION_CHANGED,
175 	MGMT_EV_EXP_FEATURE_CHANGED,
176 	MGMT_EV_DEVICE_FLAGS_CHANGED,
177 	MGMT_EV_ADV_MONITOR_ADDED,
178 	MGMT_EV_ADV_MONITOR_REMOVED,
179 	MGMT_EV_CONTROLLER_SUSPEND,
180 	MGMT_EV_CONTROLLER_RESUME,
181 	MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
182 	MGMT_EV_ADV_MONITOR_DEVICE_LOST,
183 };
184 
185 static const u16 mgmt_untrusted_commands[] = {
186 	MGMT_OP_READ_INDEX_LIST,
187 	MGMT_OP_READ_INFO,
188 	MGMT_OP_READ_UNCONF_INDEX_LIST,
189 	MGMT_OP_READ_CONFIG_INFO,
190 	MGMT_OP_READ_EXT_INDEX_LIST,
191 	MGMT_OP_READ_EXT_INFO,
192 	MGMT_OP_READ_CONTROLLER_CAP,
193 	MGMT_OP_READ_EXP_FEATURES_INFO,
194 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
195 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
196 };
197 
198 static const u16 mgmt_untrusted_events[] = {
199 	MGMT_EV_INDEX_ADDED,
200 	MGMT_EV_INDEX_REMOVED,
201 	MGMT_EV_NEW_SETTINGS,
202 	MGMT_EV_CLASS_OF_DEV_CHANGED,
203 	MGMT_EV_LOCAL_NAME_CHANGED,
204 	MGMT_EV_UNCONF_INDEX_ADDED,
205 	MGMT_EV_UNCONF_INDEX_REMOVED,
206 	MGMT_EV_NEW_CONFIG_OPTIONS,
207 	MGMT_EV_EXT_INDEX_ADDED,
208 	MGMT_EV_EXT_INDEX_REMOVED,
209 	MGMT_EV_EXT_INFO_CHANGED,
210 	MGMT_EV_EXP_FEATURE_CHANGED,
211 };
212 
213 #define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
214 
215 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
216 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
217 
218 /* HCI to MGMT error code conversion table */
219 static const u8 mgmt_status_table[] = {
220 	MGMT_STATUS_SUCCESS,
221 	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
222 	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
223 	MGMT_STATUS_FAILED,		/* Hardware Failure */
224 	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
225 	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
226 	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
227 	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
228 	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
229 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
230 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
231 	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
232 	MGMT_STATUS_BUSY,		/* Command Disallowed */
233 	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
234 	MGMT_STATUS_REJECTED,		/* Rejected Security */
235 	MGMT_STATUS_REJECTED,		/* Rejected Personal */
236 	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
237 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
238 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
239 	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
240 	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
241 	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
242 	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
243 	MGMT_STATUS_BUSY,		/* Repeated Attempts */
244 	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
245 	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
246 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
247 	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
248 	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
249 	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
250 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
251 	MGMT_STATUS_FAILED,		/* Unspecified Error */
252 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
253 	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
254 	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
255 	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
256 	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
257 	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
258 	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
259 	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
260 	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
261 	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
262 	MGMT_STATUS_FAILED,		/* Transaction Collision */
263 	MGMT_STATUS_FAILED,		/* Reserved for future use */
264 	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
265 	MGMT_STATUS_REJECTED,		/* QoS Rejected */
266 	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
267 	MGMT_STATUS_REJECTED,		/* Insufficient Security */
268 	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
269 	MGMT_STATUS_FAILED,		/* Reserved for future use */
270 	MGMT_STATUS_BUSY,		/* Role Switch Pending */
271 	MGMT_STATUS_FAILED,		/* Reserved for future use */
272 	MGMT_STATUS_FAILED,		/* Slot Violation */
273 	MGMT_STATUS_FAILED,		/* Role Switch Failed */
274 	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
275 	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
276 	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
277 	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
278 	MGMT_STATUS_BUSY,		/* Controller Busy */
279 	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
280 	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
281 	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
282 	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
283 	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
284 };
285 
286 static u8 mgmt_errno_status(int err)
287 {
288 	switch (err) {
289 	case 0:
290 		return MGMT_STATUS_SUCCESS;
291 	case -EPERM:
292 		return MGMT_STATUS_REJECTED;
293 	case -EINVAL:
294 		return MGMT_STATUS_INVALID_PARAMS;
295 	case -EOPNOTSUPP:
296 		return MGMT_STATUS_NOT_SUPPORTED;
297 	case -EBUSY:
298 		return MGMT_STATUS_BUSY;
299 	case -ETIMEDOUT:
300 		return MGMT_STATUS_AUTH_FAILED;
301 	case -ENOMEM:
302 		return MGMT_STATUS_NO_RESOURCES;
303 	case -EISCONN:
304 		return MGMT_STATUS_ALREADY_CONNECTED;
305 	case -ENOTCONN:
306 		return MGMT_STATUS_DISCONNECTED;
307 	}
308 
309 	return MGMT_STATUS_FAILED;
310 }
311 
312 static u8 mgmt_status(int err)
313 {
314 	if (err < 0)
315 		return mgmt_errno_status(err);
316 
317 	if (err < ARRAY_SIZE(mgmt_status_table))
318 		return mgmt_status_table[err];
319 
320 	return MGMT_STATUS_FAILED;
321 }
322 
323 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
324 			    u16 len, int flag)
325 {
326 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
327 			       flag, NULL);
328 }
329 
330 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
331 			      u16 len, int flag, struct sock *skip_sk)
332 {
333 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
334 			       flag, skip_sk);
335 }
336 
337 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
338 		      struct sock *skip_sk)
339 {
340 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341 			       HCI_SOCK_TRUSTED, skip_sk);
342 }
343 
344 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
345 {
346 	return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
347 				   skip_sk);
348 }
349 
350 static u8 le_addr_type(u8 mgmt_addr_type)
351 {
352 	if (mgmt_addr_type == BDADDR_LE_PUBLIC)
353 		return ADDR_LE_DEV_PUBLIC;
354 	else
355 		return ADDR_LE_DEV_RANDOM;
356 }
357 
358 void mgmt_fill_version_info(void *ver)
359 {
360 	struct mgmt_rp_read_version *rp = ver;
361 
362 	rp->version = MGMT_VERSION;
363 	rp->revision = cpu_to_le16(MGMT_REVISION);
364 }
365 
366 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
367 			u16 data_len)
368 {
369 	struct mgmt_rp_read_version rp;
370 
371 	bt_dev_dbg(hdev, "sock %p", sk);
372 
373 	mgmt_fill_version_info(&rp);
374 
375 	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
376 				 &rp, sizeof(rp));
377 }
378 
379 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
380 			 u16 data_len)
381 {
382 	struct mgmt_rp_read_commands *rp;
383 	u16 num_commands, num_events;
384 	size_t rp_size;
385 	int i, err;
386 
387 	bt_dev_dbg(hdev, "sock %p", sk);
388 
389 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
390 		num_commands = ARRAY_SIZE(mgmt_commands);
391 		num_events = ARRAY_SIZE(mgmt_events);
392 	} else {
393 		num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
394 		num_events = ARRAY_SIZE(mgmt_untrusted_events);
395 	}
396 
397 	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
398 
399 	rp = kmalloc(rp_size, GFP_KERNEL);
400 	if (!rp)
401 		return -ENOMEM;
402 
403 	rp->num_commands = cpu_to_le16(num_commands);
404 	rp->num_events = cpu_to_le16(num_events);
405 
406 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
407 		__le16 *opcode = rp->opcodes;
408 
409 		for (i = 0; i < num_commands; i++, opcode++)
410 			put_unaligned_le16(mgmt_commands[i], opcode);
411 
412 		for (i = 0; i < num_events; i++, opcode++)
413 			put_unaligned_le16(mgmt_events[i], opcode);
414 	} else {
415 		__le16 *opcode = rp->opcodes;
416 
417 		for (i = 0; i < num_commands; i++, opcode++)
418 			put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
419 
420 		for (i = 0; i < num_events; i++, opcode++)
421 			put_unaligned_le16(mgmt_untrusted_events[i], opcode);
422 	}
423 
424 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
425 				rp, rp_size);
426 	kfree(rp);
427 
428 	return err;
429 }
430 
431 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
432 			   u16 data_len)
433 {
434 	struct mgmt_rp_read_index_list *rp;
435 	struct hci_dev *d;
436 	size_t rp_len;
437 	u16 count;
438 	int err;
439 
440 	bt_dev_dbg(hdev, "sock %p", sk);
441 
442 	read_lock(&hci_dev_list_lock);
443 
444 	count = 0;
445 	list_for_each_entry(d, &hci_dev_list, list) {
446 		if (d->dev_type == HCI_PRIMARY &&
447 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED))
448 			count++;
449 	}
450 
451 	rp_len = sizeof(*rp) + (2 * count);
452 	rp = kmalloc(rp_len, GFP_ATOMIC);
453 	if (!rp) {
454 		read_unlock(&hci_dev_list_lock);
455 		return -ENOMEM;
456 	}
457 
458 	count = 0;
459 	list_for_each_entry(d, &hci_dev_list, list) {
460 		if (hci_dev_test_flag(d, HCI_SETUP) ||
461 		    hci_dev_test_flag(d, HCI_CONFIG) ||
462 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
463 			continue;
464 
465 		/* Devices marked as raw-only are neither configured
466 		 * nor unconfigured controllers.
467 		 */
468 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
469 			continue;
470 
471 		if (d->dev_type == HCI_PRIMARY &&
472 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
473 			rp->index[count++] = cpu_to_le16(d->id);
474 			bt_dev_dbg(hdev, "Added hci%u", d->id);
475 		}
476 	}
477 
478 	rp->num_controllers = cpu_to_le16(count);
479 	rp_len = sizeof(*rp) + (2 * count);
480 
481 	read_unlock(&hci_dev_list_lock);
482 
483 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
484 				0, rp, rp_len);
485 
486 	kfree(rp);
487 
488 	return err;
489 }
490 
491 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
492 				  void *data, u16 data_len)
493 {
494 	struct mgmt_rp_read_unconf_index_list *rp;
495 	struct hci_dev *d;
496 	size_t rp_len;
497 	u16 count;
498 	int err;
499 
500 	bt_dev_dbg(hdev, "sock %p", sk);
501 
502 	read_lock(&hci_dev_list_lock);
503 
504 	count = 0;
505 	list_for_each_entry(d, &hci_dev_list, list) {
506 		if (d->dev_type == HCI_PRIMARY &&
507 		    hci_dev_test_flag(d, HCI_UNCONFIGURED))
508 			count++;
509 	}
510 
511 	rp_len = sizeof(*rp) + (2 * count);
512 	rp = kmalloc(rp_len, GFP_ATOMIC);
513 	if (!rp) {
514 		read_unlock(&hci_dev_list_lock);
515 		return -ENOMEM;
516 	}
517 
518 	count = 0;
519 	list_for_each_entry(d, &hci_dev_list, list) {
520 		if (hci_dev_test_flag(d, HCI_SETUP) ||
521 		    hci_dev_test_flag(d, HCI_CONFIG) ||
522 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
523 			continue;
524 
525 		/* Devices marked as raw-only are neither configured
526 		 * nor unconfigured controllers.
527 		 */
528 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
529 			continue;
530 
531 		if (d->dev_type == HCI_PRIMARY &&
532 		    hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
533 			rp->index[count++] = cpu_to_le16(d->id);
534 			bt_dev_dbg(hdev, "Added hci%u", d->id);
535 		}
536 	}
537 
538 	rp->num_controllers = cpu_to_le16(count);
539 	rp_len = sizeof(*rp) + (2 * count);
540 
541 	read_unlock(&hci_dev_list_lock);
542 
543 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
544 				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
545 
546 	kfree(rp);
547 
548 	return err;
549 }
550 
551 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
552 			       void *data, u16 data_len)
553 {
554 	struct mgmt_rp_read_ext_index_list *rp;
555 	struct hci_dev *d;
556 	u16 count;
557 	int err;
558 
559 	bt_dev_dbg(hdev, "sock %p", sk);
560 
561 	read_lock(&hci_dev_list_lock);
562 
563 	count = 0;
564 	list_for_each_entry(d, &hci_dev_list, list) {
565 		if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
566 			count++;
567 	}
568 
569 	rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
570 	if (!rp) {
571 		read_unlock(&hci_dev_list_lock);
572 		return -ENOMEM;
573 	}
574 
575 	count = 0;
576 	list_for_each_entry(d, &hci_dev_list, list) {
577 		if (hci_dev_test_flag(d, HCI_SETUP) ||
578 		    hci_dev_test_flag(d, HCI_CONFIG) ||
579 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
580 			continue;
581 
582 		/* Devices marked as raw-only are neither configured
583 		 * nor unconfigured controllers.
584 		 */
585 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
586 			continue;
587 
588 		if (d->dev_type == HCI_PRIMARY) {
589 			if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
590 				rp->entry[count].type = 0x01;
591 			else
592 				rp->entry[count].type = 0x00;
593 		} else if (d->dev_type == HCI_AMP) {
594 			rp->entry[count].type = 0x02;
595 		} else {
596 			continue;
597 		}
598 
599 		rp->entry[count].bus = d->bus;
600 		rp->entry[count++].index = cpu_to_le16(d->id);
601 		bt_dev_dbg(hdev, "Added hci%u", d->id);
602 	}
603 
604 	rp->num_controllers = cpu_to_le16(count);
605 
606 	read_unlock(&hci_dev_list_lock);
607 
608 	/* If this command is called at least once, then all the
609 	 * default index and unconfigured index events are disabled
610 	 * and from now on only extended index events are used.
611 	 */
612 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
613 	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
614 	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
615 
616 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
617 				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
618 				struct_size(rp, entry, count));
619 
620 	kfree(rp);
621 
622 	return err;
623 }
624 
625 static bool is_configured(struct hci_dev *hdev)
626 {
627 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
628 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
629 		return false;
630 
631 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
632 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
633 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
634 		return false;
635 
636 	return true;
637 }
638 
639 static __le32 get_missing_options(struct hci_dev *hdev)
640 {
641 	u32 options = 0;
642 
643 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
644 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
645 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
646 
647 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
648 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
649 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
650 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
651 
652 	return cpu_to_le32(options);
653 }
654 
655 static int new_options(struct hci_dev *hdev, struct sock *skip)
656 {
657 	__le32 options = get_missing_options(hdev);
658 
659 	return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
660 				  sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
661 }
662 
663 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
664 {
665 	__le32 options = get_missing_options(hdev);
666 
667 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
668 				 sizeof(options));
669 }
670 
671 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
672 			    void *data, u16 data_len)
673 {
674 	struct mgmt_rp_read_config_info rp;
675 	u32 options = 0;
676 
677 	bt_dev_dbg(hdev, "sock %p", sk);
678 
679 	hci_dev_lock(hdev);
680 
681 	memset(&rp, 0, sizeof(rp));
682 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
683 
684 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
685 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
686 
687 	if (hdev->set_bdaddr)
688 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
689 
690 	rp.supported_options = cpu_to_le32(options);
691 	rp.missing_options = get_missing_options(hdev);
692 
693 	hci_dev_unlock(hdev);
694 
695 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
696 				 &rp, sizeof(rp));
697 }
698 
699 static u32 get_supported_phys(struct hci_dev *hdev)
700 {
701 	u32 supported_phys = 0;
702 
703 	if (lmp_bredr_capable(hdev)) {
704 		supported_phys |= MGMT_PHY_BR_1M_1SLOT;
705 
706 		if (hdev->features[0][0] & LMP_3SLOT)
707 			supported_phys |= MGMT_PHY_BR_1M_3SLOT;
708 
709 		if (hdev->features[0][0] & LMP_5SLOT)
710 			supported_phys |= MGMT_PHY_BR_1M_5SLOT;
711 
712 		if (lmp_edr_2m_capable(hdev)) {
713 			supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
714 
715 			if (lmp_edr_3slot_capable(hdev))
716 				supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
717 
718 			if (lmp_edr_5slot_capable(hdev))
719 				supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
720 
721 			if (lmp_edr_3m_capable(hdev)) {
722 				supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
723 
724 				if (lmp_edr_3slot_capable(hdev))
725 					supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
726 
727 				if (lmp_edr_5slot_capable(hdev))
728 					supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
729 			}
730 		}
731 	}
732 
733 	if (lmp_le_capable(hdev)) {
734 		supported_phys |= MGMT_PHY_LE_1M_TX;
735 		supported_phys |= MGMT_PHY_LE_1M_RX;
736 
737 		if (hdev->le_features[1] & HCI_LE_PHY_2M) {
738 			supported_phys |= MGMT_PHY_LE_2M_TX;
739 			supported_phys |= MGMT_PHY_LE_2M_RX;
740 		}
741 
742 		if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
743 			supported_phys |= MGMT_PHY_LE_CODED_TX;
744 			supported_phys |= MGMT_PHY_LE_CODED_RX;
745 		}
746 	}
747 
748 	return supported_phys;
749 }
750 
751 static u32 get_selected_phys(struct hci_dev *hdev)
752 {
753 	u32 selected_phys = 0;
754 
755 	if (lmp_bredr_capable(hdev)) {
756 		selected_phys |= MGMT_PHY_BR_1M_1SLOT;
757 
758 		if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
759 			selected_phys |= MGMT_PHY_BR_1M_3SLOT;
760 
761 		if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
762 			selected_phys |= MGMT_PHY_BR_1M_5SLOT;
763 
764 		if (lmp_edr_2m_capable(hdev)) {
765 			if (!(hdev->pkt_type & HCI_2DH1))
766 				selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
767 
768 			if (lmp_edr_3slot_capable(hdev) &&
769 			    !(hdev->pkt_type & HCI_2DH3))
770 				selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
771 
772 			if (lmp_edr_5slot_capable(hdev) &&
773 			    !(hdev->pkt_type & HCI_2DH5))
774 				selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
775 
776 			if (lmp_edr_3m_capable(hdev)) {
777 				if (!(hdev->pkt_type & HCI_3DH1))
778 					selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
779 
780 				if (lmp_edr_3slot_capable(hdev) &&
781 				    !(hdev->pkt_type & HCI_3DH3))
782 					selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
783 
784 				if (lmp_edr_5slot_capable(hdev) &&
785 				    !(hdev->pkt_type & HCI_3DH5))
786 					selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
787 			}
788 		}
789 	}
790 
791 	if (lmp_le_capable(hdev)) {
792 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
793 			selected_phys |= MGMT_PHY_LE_1M_TX;
794 
795 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
796 			selected_phys |= MGMT_PHY_LE_1M_RX;
797 
798 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
799 			selected_phys |= MGMT_PHY_LE_2M_TX;
800 
801 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
802 			selected_phys |= MGMT_PHY_LE_2M_RX;
803 
804 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
805 			selected_phys |= MGMT_PHY_LE_CODED_TX;
806 
807 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
808 			selected_phys |= MGMT_PHY_LE_CODED_RX;
809 	}
810 
811 	return selected_phys;
812 }
813 
814 static u32 get_configurable_phys(struct hci_dev *hdev)
815 {
816 	return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
817 		~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
818 }
819 
820 static u32 get_supported_settings(struct hci_dev *hdev)
821 {
822 	u32 settings = 0;
823 
824 	settings |= MGMT_SETTING_POWERED;
825 	settings |= MGMT_SETTING_BONDABLE;
826 	settings |= MGMT_SETTING_DEBUG_KEYS;
827 	settings |= MGMT_SETTING_CONNECTABLE;
828 	settings |= MGMT_SETTING_DISCOVERABLE;
829 
830 	if (lmp_bredr_capable(hdev)) {
831 		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
832 			settings |= MGMT_SETTING_FAST_CONNECTABLE;
833 		settings |= MGMT_SETTING_BREDR;
834 		settings |= MGMT_SETTING_LINK_SECURITY;
835 
836 		if (lmp_ssp_capable(hdev)) {
837 			settings |= MGMT_SETTING_SSP;
838 		}
839 
840 		if (lmp_sc_capable(hdev))
841 			settings |= MGMT_SETTING_SECURE_CONN;
842 
843 		if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
844 			     &hdev->quirks))
845 			settings |= MGMT_SETTING_WIDEBAND_SPEECH;
846 	}
847 
848 	if (lmp_le_capable(hdev)) {
849 		settings |= MGMT_SETTING_LE;
850 		settings |= MGMT_SETTING_SECURE_CONN;
851 		settings |= MGMT_SETTING_PRIVACY;
852 		settings |= MGMT_SETTING_STATIC_ADDRESS;
853 		settings |= MGMT_SETTING_ADVERTISING;
854 	}
855 
856 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
857 	    hdev->set_bdaddr)
858 		settings |= MGMT_SETTING_CONFIGURATION;
859 
860 	if (cis_central_capable(hdev))
861 		settings |= MGMT_SETTING_CIS_CENTRAL;
862 
863 	if (cis_peripheral_capable(hdev))
864 		settings |= MGMT_SETTING_CIS_PERIPHERAL;
865 
866 	settings |= MGMT_SETTING_PHY_CONFIGURATION;
867 
868 	return settings;
869 }
870 
871 static u32 get_current_settings(struct hci_dev *hdev)
872 {
873 	u32 settings = 0;
874 
875 	if (hdev_is_powered(hdev))
876 		settings |= MGMT_SETTING_POWERED;
877 
878 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
879 		settings |= MGMT_SETTING_CONNECTABLE;
880 
881 	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
882 		settings |= MGMT_SETTING_FAST_CONNECTABLE;
883 
884 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
885 		settings |= MGMT_SETTING_DISCOVERABLE;
886 
887 	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
888 		settings |= MGMT_SETTING_BONDABLE;
889 
890 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
891 		settings |= MGMT_SETTING_BREDR;
892 
893 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
894 		settings |= MGMT_SETTING_LE;
895 
896 	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
897 		settings |= MGMT_SETTING_LINK_SECURITY;
898 
899 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
900 		settings |= MGMT_SETTING_SSP;
901 
902 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
903 		settings |= MGMT_SETTING_ADVERTISING;
904 
905 	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
906 		settings |= MGMT_SETTING_SECURE_CONN;
907 
908 	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
909 		settings |= MGMT_SETTING_DEBUG_KEYS;
910 
911 	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
912 		settings |= MGMT_SETTING_PRIVACY;
913 
914 	/* The current setting for static address has two purposes. The
915 	 * first is to indicate if the static address will be used and
916 	 * the second is to indicate if it is actually set.
917 	 *
918 	 * This means if the static address is not configured, this flag
919 	 * will never be set. If the address is configured, then if the
920 	 * address is actually used decides if the flag is set or not.
921 	 *
922 	 * For single mode LE only controllers and dual-mode controllers
923 	 * with BR/EDR disabled, the existence of the static address will
924 	 * be evaluated.
925 	 */
926 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
927 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
928 	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
929 		if (bacmp(&hdev->static_addr, BDADDR_ANY))
930 			settings |= MGMT_SETTING_STATIC_ADDRESS;
931 	}
932 
933 	if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
934 		settings |= MGMT_SETTING_WIDEBAND_SPEECH;
935 
936 	if (cis_central_capable(hdev))
937 		settings |= MGMT_SETTING_CIS_CENTRAL;
938 
939 	if (cis_peripheral_capable(hdev))
940 		settings |= MGMT_SETTING_CIS_PERIPHERAL;
941 
942 	if (bis_capable(hdev))
943 		settings |= MGMT_SETTING_ISO_BROADCASTER;
944 
945 	if (sync_recv_capable(hdev))
946 		settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
947 
948 	return settings;
949 }
950 
951 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
952 {
953 	return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
954 }
955 
956 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
957 {
958 	struct mgmt_pending_cmd *cmd;
959 
960 	/* If there's a pending mgmt command the flags will not yet have
961 	 * their final values, so check for this first.
962 	 */
963 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
964 	if (cmd) {
965 		struct mgmt_mode *cp = cmd->param;
966 		if (cp->val == 0x01)
967 			return LE_AD_GENERAL;
968 		else if (cp->val == 0x02)
969 			return LE_AD_LIMITED;
970 	} else {
971 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
972 			return LE_AD_LIMITED;
973 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
974 			return LE_AD_GENERAL;
975 	}
976 
977 	return 0;
978 }
979 
980 bool mgmt_get_connectable(struct hci_dev *hdev)
981 {
982 	struct mgmt_pending_cmd *cmd;
983 
984 	/* If there's a pending mgmt command the flag will not yet have
985 	 * it's final value, so check for this first.
986 	 */
987 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
988 	if (cmd) {
989 		struct mgmt_mode *cp = cmd->param;
990 
991 		return cp->val;
992 	}
993 
994 	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
995 }
996 
997 static int service_cache_sync(struct hci_dev *hdev, void *data)
998 {
999 	hci_update_eir_sync(hdev);
1000 	hci_update_class_sync(hdev);
1001 
1002 	return 0;
1003 }
1004 
1005 static void service_cache_off(struct work_struct *work)
1006 {
1007 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1008 					    service_cache.work);
1009 
1010 	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1011 		return;
1012 
1013 	hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1014 }
1015 
1016 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1017 {
1018 	/* The generation of a new RPA and programming it into the
1019 	 * controller happens in the hci_req_enable_advertising()
1020 	 * function.
1021 	 */
1022 	if (ext_adv_capable(hdev))
1023 		return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1024 	else
1025 		return hci_enable_advertising_sync(hdev);
1026 }
1027 
1028 static void rpa_expired(struct work_struct *work)
1029 {
1030 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1031 					    rpa_expired.work);
1032 
1033 	bt_dev_dbg(hdev, "");
1034 
1035 	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1036 
1037 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1038 		return;
1039 
1040 	hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1041 }
1042 
1043 static int set_discoverable_sync(struct hci_dev *hdev, void *data);
1044 
1045 static void discov_off(struct work_struct *work)
1046 {
1047 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1048 					    discov_off.work);
1049 
1050 	bt_dev_dbg(hdev, "");
1051 
1052 	hci_dev_lock(hdev);
1053 
1054 	/* When discoverable timeout triggers, then just make sure
1055 	 * the limited discoverable flag is cleared. Even in the case
1056 	 * of a timeout triggered from general discoverable, it is
1057 	 * safe to unconditionally clear the flag.
1058 	 */
1059 	hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1060 	hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1061 	hdev->discov_timeout = 0;
1062 
1063 	hci_cmd_sync_queue(hdev, set_discoverable_sync, NULL, NULL);
1064 
1065 	mgmt_new_settings(hdev);
1066 
1067 	hci_dev_unlock(hdev);
1068 }
1069 
1070 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1071 
1072 static void mesh_send_complete(struct hci_dev *hdev,
1073 			       struct mgmt_mesh_tx *mesh_tx, bool silent)
1074 {
1075 	u8 handle = mesh_tx->handle;
1076 
1077 	if (!silent)
1078 		mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1079 			   sizeof(handle), NULL);
1080 
1081 	mgmt_mesh_remove(mesh_tx);
1082 }
1083 
1084 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1085 {
1086 	struct mgmt_mesh_tx *mesh_tx;
1087 
1088 	hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1089 	hci_disable_advertising_sync(hdev);
1090 	mesh_tx = mgmt_mesh_next(hdev, NULL);
1091 
1092 	if (mesh_tx)
1093 		mesh_send_complete(hdev, mesh_tx, false);
1094 
1095 	return 0;
1096 }
1097 
1098 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1099 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
1100 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1101 {
1102 	struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1103 
1104 	if (!mesh_tx)
1105 		return;
1106 
1107 	err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1108 				 mesh_send_start_complete);
1109 
1110 	if (err < 0)
1111 		mesh_send_complete(hdev, mesh_tx, false);
1112 	else
1113 		hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1114 }
1115 
1116 static void mesh_send_done(struct work_struct *work)
1117 {
1118 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1119 					    mesh_send_done.work);
1120 
1121 	if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1122 		return;
1123 
1124 	hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1125 }
1126 
1127 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1128 {
1129 	if (hci_dev_test_flag(hdev, HCI_MGMT))
1130 		return;
1131 
1132 	BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1133 
1134 	INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1135 	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1136 	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1137 	INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1138 
1139 	/* Non-mgmt controlled devices get this bit set
1140 	 * implicitly so that pairing works for them, however
1141 	 * for mgmt we require user-space to explicitly enable
1142 	 * it
1143 	 */
1144 	hci_dev_clear_flag(hdev, HCI_BONDABLE);
1145 
1146 	hci_dev_set_flag(hdev, HCI_MGMT);
1147 }
1148 
1149 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1150 				void *data, u16 data_len)
1151 {
1152 	struct mgmt_rp_read_info rp;
1153 
1154 	bt_dev_dbg(hdev, "sock %p", sk);
1155 
1156 	hci_dev_lock(hdev);
1157 
1158 	memset(&rp, 0, sizeof(rp));
1159 
1160 	bacpy(&rp.bdaddr, &hdev->bdaddr);
1161 
1162 	rp.version = hdev->hci_ver;
1163 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1164 
1165 	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1166 	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1167 
1168 	memcpy(rp.dev_class, hdev->dev_class, 3);
1169 
1170 	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1171 	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1172 
1173 	hci_dev_unlock(hdev);
1174 
1175 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1176 				 sizeof(rp));
1177 }
1178 
1179 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1180 {
1181 	u16 eir_len = 0;
1182 	size_t name_len;
1183 
1184 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1185 		eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1186 					  hdev->dev_class, 3);
1187 
1188 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1189 		eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1190 					  hdev->appearance);
1191 
1192 	name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1193 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1194 				  hdev->dev_name, name_len);
1195 
1196 	name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1197 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1198 				  hdev->short_name, name_len);
1199 
1200 	return eir_len;
1201 }
1202 
1203 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1204 				    void *data, u16 data_len)
1205 {
1206 	char buf[512];
1207 	struct mgmt_rp_read_ext_info *rp = (void *)buf;
1208 	u16 eir_len;
1209 
1210 	bt_dev_dbg(hdev, "sock %p", sk);
1211 
1212 	memset(&buf, 0, sizeof(buf));
1213 
1214 	hci_dev_lock(hdev);
1215 
1216 	bacpy(&rp->bdaddr, &hdev->bdaddr);
1217 
1218 	rp->version = hdev->hci_ver;
1219 	rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1220 
1221 	rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1222 	rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1223 
1224 
1225 	eir_len = append_eir_data_to_buf(hdev, rp->eir);
1226 	rp->eir_len = cpu_to_le16(eir_len);
1227 
1228 	hci_dev_unlock(hdev);
1229 
1230 	/* If this command is called at least once, then the events
1231 	 * for class of device and local name changes are disabled
1232 	 * and only the new extended controller information event
1233 	 * is used.
1234 	 */
1235 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1236 	hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1237 	hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1238 
1239 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1240 				 sizeof(*rp) + eir_len);
1241 }
1242 
1243 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1244 {
1245 	char buf[512];
1246 	struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1247 	u16 eir_len;
1248 
1249 	memset(buf, 0, sizeof(buf));
1250 
1251 	eir_len = append_eir_data_to_buf(hdev, ev->eir);
1252 	ev->eir_len = cpu_to_le16(eir_len);
1253 
1254 	return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1255 				  sizeof(*ev) + eir_len,
1256 				  HCI_MGMT_EXT_INFO_EVENTS, skip);
1257 }
1258 
1259 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1260 {
1261 	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1262 
1263 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1264 				 sizeof(settings));
1265 }
1266 
1267 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1268 {
1269 	struct mgmt_ev_advertising_added ev;
1270 
1271 	ev.instance = instance;
1272 
1273 	mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1274 }
1275 
1276 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1277 			      u8 instance)
1278 {
1279 	struct mgmt_ev_advertising_removed ev;
1280 
1281 	ev.instance = instance;
1282 
1283 	mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1284 }
1285 
1286 static void cancel_adv_timeout(struct hci_dev *hdev)
1287 {
1288 	if (hdev->adv_instance_timeout) {
1289 		hdev->adv_instance_timeout = 0;
1290 		cancel_delayed_work(&hdev->adv_instance_expire);
1291 	}
1292 }
1293 
1294 /* This function requires the caller holds hdev->lock */
1295 static void restart_le_actions(struct hci_dev *hdev)
1296 {
1297 	struct hci_conn_params *p;
1298 
1299 	list_for_each_entry(p, &hdev->le_conn_params, list) {
1300 		/* Needed for AUTO_OFF case where might not "really"
1301 		 * have been powered off.
1302 		 */
1303 		hci_pend_le_list_del_init(p);
1304 
1305 		switch (p->auto_connect) {
1306 		case HCI_AUTO_CONN_DIRECT:
1307 		case HCI_AUTO_CONN_ALWAYS:
1308 			hci_pend_le_list_add(p, &hdev->pend_le_conns);
1309 			break;
1310 		case HCI_AUTO_CONN_REPORT:
1311 			hci_pend_le_list_add(p, &hdev->pend_le_reports);
1312 			break;
1313 		default:
1314 			break;
1315 		}
1316 	}
1317 }
1318 
1319 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1320 {
1321 	__le32 ev = cpu_to_le32(get_current_settings(hdev));
1322 
1323 	return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1324 				  sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1325 }
1326 
1327 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1328 {
1329 	struct mgmt_pending_cmd *cmd = data;
1330 	struct mgmt_mode *cp;
1331 
1332 	/* Make sure cmd still outstanding. */
1333 	if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1334 		return;
1335 
1336 	cp = cmd->param;
1337 
1338 	bt_dev_dbg(hdev, "err %d", err);
1339 
1340 	if (!err) {
1341 		if (cp->val) {
1342 			hci_dev_lock(hdev);
1343 			restart_le_actions(hdev);
1344 			hci_update_passive_scan(hdev);
1345 			hci_dev_unlock(hdev);
1346 		}
1347 
1348 		send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1349 
1350 		/* Only call new_setting for power on as power off is deferred
1351 		 * to hdev->power_off work which does call hci_dev_do_close.
1352 		 */
1353 		if (cp->val)
1354 			new_settings(hdev, cmd->sk);
1355 	} else {
1356 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1357 				mgmt_status(err));
1358 	}
1359 
1360 	mgmt_pending_remove(cmd);
1361 }
1362 
1363 static int set_powered_sync(struct hci_dev *hdev, void *data)
1364 {
1365 	struct mgmt_pending_cmd *cmd = data;
1366 	struct mgmt_mode *cp = cmd->param;
1367 
1368 	BT_DBG("%s", hdev->name);
1369 
1370 	return hci_set_powered_sync(hdev, cp->val);
1371 }
1372 
1373 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1374 		       u16 len)
1375 {
1376 	struct mgmt_mode *cp = data;
1377 	struct mgmt_pending_cmd *cmd;
1378 	int err;
1379 
1380 	bt_dev_dbg(hdev, "sock %p", sk);
1381 
1382 	if (cp->val != 0x00 && cp->val != 0x01)
1383 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1384 				       MGMT_STATUS_INVALID_PARAMS);
1385 
1386 	hci_dev_lock(hdev);
1387 
1388 	if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1389 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1390 				      MGMT_STATUS_BUSY);
1391 		goto failed;
1392 	}
1393 
1394 	if (!!cp->val == hdev_is_powered(hdev)) {
1395 		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1396 		goto failed;
1397 	}
1398 
1399 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1400 	if (!cmd) {
1401 		err = -ENOMEM;
1402 		goto failed;
1403 	}
1404 
1405 	/* Cancel potentially blocking sync operation before power off */
1406 	if (cp->val == 0x00) {
1407 		hci_cmd_sync_cancel_sync(hdev, -EHOSTDOWN);
1408 		err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1409 					 mgmt_set_powered_complete);
1410 	} else {
1411 		/* Use hci_cmd_sync_submit since hdev might not be running */
1412 		err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
1413 					  mgmt_set_powered_complete);
1414 	}
1415 
1416 	if (err < 0)
1417 		mgmt_pending_remove(cmd);
1418 
1419 failed:
1420 	hci_dev_unlock(hdev);
1421 	return err;
1422 }
1423 
1424 int mgmt_new_settings(struct hci_dev *hdev)
1425 {
1426 	return new_settings(hdev, NULL);
1427 }
1428 
1429 struct cmd_lookup {
1430 	struct sock *sk;
1431 	struct hci_dev *hdev;
1432 	u8 mgmt_status;
1433 };
1434 
1435 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1436 {
1437 	struct cmd_lookup *match = data;
1438 
1439 	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1440 
1441 	list_del(&cmd->list);
1442 
1443 	if (match->sk == NULL) {
1444 		match->sk = cmd->sk;
1445 		sock_hold(match->sk);
1446 	}
1447 
1448 	mgmt_pending_free(cmd);
1449 }
1450 
1451 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1452 {
1453 	u8 *status = data;
1454 
1455 	mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1456 	mgmt_pending_remove(cmd);
1457 }
1458 
1459 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1460 {
1461 	if (cmd->cmd_complete) {
1462 		u8 *status = data;
1463 
1464 		cmd->cmd_complete(cmd, *status);
1465 		mgmt_pending_remove(cmd);
1466 
1467 		return;
1468 	}
1469 
1470 	cmd_status_rsp(cmd, data);
1471 }
1472 
1473 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1474 {
1475 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1476 				 cmd->param, cmd->param_len);
1477 }
1478 
1479 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1480 {
1481 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1482 				 cmd->param, sizeof(struct mgmt_addr_info));
1483 }
1484 
1485 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1486 {
1487 	if (!lmp_bredr_capable(hdev))
1488 		return MGMT_STATUS_NOT_SUPPORTED;
1489 	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1490 		return MGMT_STATUS_REJECTED;
1491 	else
1492 		return MGMT_STATUS_SUCCESS;
1493 }
1494 
1495 static u8 mgmt_le_support(struct hci_dev *hdev)
1496 {
1497 	if (!lmp_le_capable(hdev))
1498 		return MGMT_STATUS_NOT_SUPPORTED;
1499 	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1500 		return MGMT_STATUS_REJECTED;
1501 	else
1502 		return MGMT_STATUS_SUCCESS;
1503 }
1504 
1505 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1506 					   int err)
1507 {
1508 	struct mgmt_pending_cmd *cmd = data;
1509 
1510 	bt_dev_dbg(hdev, "err %d", err);
1511 
1512 	/* Make sure cmd still outstanding. */
1513 	if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1514 		return;
1515 
1516 	hci_dev_lock(hdev);
1517 
1518 	if (err) {
1519 		u8 mgmt_err = mgmt_status(err);
1520 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1521 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1522 		goto done;
1523 	}
1524 
1525 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1526 	    hdev->discov_timeout > 0) {
1527 		int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1528 		queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1529 	}
1530 
1531 	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1532 	new_settings(hdev, cmd->sk);
1533 
1534 done:
1535 	mgmt_pending_remove(cmd);
1536 	hci_dev_unlock(hdev);
1537 }
1538 
1539 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1540 {
1541 	BT_DBG("%s", hdev->name);
1542 
1543 	return hci_update_discoverable_sync(hdev);
1544 }
1545 
1546 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1547 			    u16 len)
1548 {
1549 	struct mgmt_cp_set_discoverable *cp = data;
1550 	struct mgmt_pending_cmd *cmd;
1551 	u16 timeout;
1552 	int err;
1553 
1554 	bt_dev_dbg(hdev, "sock %p", sk);
1555 
1556 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1557 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1558 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1559 				       MGMT_STATUS_REJECTED);
1560 
1561 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1562 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1563 				       MGMT_STATUS_INVALID_PARAMS);
1564 
1565 	timeout = __le16_to_cpu(cp->timeout);
1566 
1567 	/* Disabling discoverable requires that no timeout is set,
1568 	 * and enabling limited discoverable requires a timeout.
1569 	 */
1570 	if ((cp->val == 0x00 && timeout > 0) ||
1571 	    (cp->val == 0x02 && timeout == 0))
1572 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1573 				       MGMT_STATUS_INVALID_PARAMS);
1574 
1575 	hci_dev_lock(hdev);
1576 
1577 	if (!hdev_is_powered(hdev) && timeout > 0) {
1578 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1579 				      MGMT_STATUS_NOT_POWERED);
1580 		goto failed;
1581 	}
1582 
1583 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1584 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1585 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1586 				      MGMT_STATUS_BUSY);
1587 		goto failed;
1588 	}
1589 
1590 	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1591 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1592 				      MGMT_STATUS_REJECTED);
1593 		goto failed;
1594 	}
1595 
1596 	if (hdev->advertising_paused) {
1597 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1598 				      MGMT_STATUS_BUSY);
1599 		goto failed;
1600 	}
1601 
1602 	if (!hdev_is_powered(hdev)) {
1603 		bool changed = false;
1604 
1605 		/* Setting limited discoverable when powered off is
1606 		 * not a valid operation since it requires a timeout
1607 		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1608 		 */
1609 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1610 			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1611 			changed = true;
1612 		}
1613 
1614 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1615 		if (err < 0)
1616 			goto failed;
1617 
1618 		if (changed)
1619 			err = new_settings(hdev, sk);
1620 
1621 		goto failed;
1622 	}
1623 
1624 	/* If the current mode is the same, then just update the timeout
1625 	 * value with the new value. And if only the timeout gets updated,
1626 	 * then no need for any HCI transactions.
1627 	 */
1628 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1629 	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
1630 						   HCI_LIMITED_DISCOVERABLE)) {
1631 		cancel_delayed_work(&hdev->discov_off);
1632 		hdev->discov_timeout = timeout;
1633 
1634 		if (cp->val && hdev->discov_timeout > 0) {
1635 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1636 			queue_delayed_work(hdev->req_workqueue,
1637 					   &hdev->discov_off, to);
1638 		}
1639 
1640 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1641 		goto failed;
1642 	}
1643 
1644 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1645 	if (!cmd) {
1646 		err = -ENOMEM;
1647 		goto failed;
1648 	}
1649 
1650 	/* Cancel any potential discoverable timeout that might be
1651 	 * still active and store new timeout value. The arming of
1652 	 * the timeout happens in the complete handler.
1653 	 */
1654 	cancel_delayed_work(&hdev->discov_off);
1655 	hdev->discov_timeout = timeout;
1656 
1657 	if (cp->val)
1658 		hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1659 	else
1660 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1661 
1662 	/* Limited discoverable mode */
1663 	if (cp->val == 0x02)
1664 		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1665 	else
1666 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1667 
1668 	err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1669 				 mgmt_set_discoverable_complete);
1670 
1671 	if (err < 0)
1672 		mgmt_pending_remove(cmd);
1673 
1674 failed:
1675 	hci_dev_unlock(hdev);
1676 	return err;
1677 }
1678 
1679 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1680 					  int err)
1681 {
1682 	struct mgmt_pending_cmd *cmd = data;
1683 
1684 	bt_dev_dbg(hdev, "err %d", err);
1685 
1686 	/* Make sure cmd still outstanding. */
1687 	if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1688 		return;
1689 
1690 	hci_dev_lock(hdev);
1691 
1692 	if (err) {
1693 		u8 mgmt_err = mgmt_status(err);
1694 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1695 		goto done;
1696 	}
1697 
1698 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1699 	new_settings(hdev, cmd->sk);
1700 
1701 done:
1702 	if (cmd)
1703 		mgmt_pending_remove(cmd);
1704 
1705 	hci_dev_unlock(hdev);
1706 }
1707 
1708 static int set_connectable_update_settings(struct hci_dev *hdev,
1709 					   struct sock *sk, u8 val)
1710 {
1711 	bool changed = false;
1712 	int err;
1713 
1714 	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1715 		changed = true;
1716 
1717 	if (val) {
1718 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1719 	} else {
1720 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1721 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1722 	}
1723 
1724 	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1725 	if (err < 0)
1726 		return err;
1727 
1728 	if (changed) {
1729 		hci_update_scan(hdev);
1730 		hci_update_passive_scan(hdev);
1731 		return new_settings(hdev, sk);
1732 	}
1733 
1734 	return 0;
1735 }
1736 
1737 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1738 {
1739 	BT_DBG("%s", hdev->name);
1740 
1741 	return hci_update_connectable_sync(hdev);
1742 }
1743 
1744 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1745 			   u16 len)
1746 {
1747 	struct mgmt_mode *cp = data;
1748 	struct mgmt_pending_cmd *cmd;
1749 	int err;
1750 
1751 	bt_dev_dbg(hdev, "sock %p", sk);
1752 
1753 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1754 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1755 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1756 				       MGMT_STATUS_REJECTED);
1757 
1758 	if (cp->val != 0x00 && cp->val != 0x01)
1759 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1760 				       MGMT_STATUS_INVALID_PARAMS);
1761 
1762 	hci_dev_lock(hdev);
1763 
1764 	if (!hdev_is_powered(hdev)) {
1765 		err = set_connectable_update_settings(hdev, sk, cp->val);
1766 		goto failed;
1767 	}
1768 
1769 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1770 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1771 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1772 				      MGMT_STATUS_BUSY);
1773 		goto failed;
1774 	}
1775 
1776 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1777 	if (!cmd) {
1778 		err = -ENOMEM;
1779 		goto failed;
1780 	}
1781 
1782 	if (cp->val) {
1783 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1784 	} else {
1785 		if (hdev->discov_timeout > 0)
1786 			cancel_delayed_work(&hdev->discov_off);
1787 
1788 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1789 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1790 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1791 	}
1792 
1793 	err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1794 				 mgmt_set_connectable_complete);
1795 
1796 	if (err < 0)
1797 		mgmt_pending_remove(cmd);
1798 
1799 failed:
1800 	hci_dev_unlock(hdev);
1801 	return err;
1802 }
1803 
1804 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1805 			u16 len)
1806 {
1807 	struct mgmt_mode *cp = data;
1808 	bool changed;
1809 	int err;
1810 
1811 	bt_dev_dbg(hdev, "sock %p", sk);
1812 
1813 	if (cp->val != 0x00 && cp->val != 0x01)
1814 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1815 				       MGMT_STATUS_INVALID_PARAMS);
1816 
1817 	hci_dev_lock(hdev);
1818 
1819 	if (cp->val)
1820 		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1821 	else
1822 		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1823 
1824 	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1825 	if (err < 0)
1826 		goto unlock;
1827 
1828 	if (changed) {
1829 		/* In limited privacy mode the change of bondable mode
1830 		 * may affect the local advertising address.
1831 		 */
1832 		hci_update_discoverable(hdev);
1833 
1834 		err = new_settings(hdev, sk);
1835 	}
1836 
1837 unlock:
1838 	hci_dev_unlock(hdev);
1839 	return err;
1840 }
1841 
1842 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1843 			     u16 len)
1844 {
1845 	struct mgmt_mode *cp = data;
1846 	struct mgmt_pending_cmd *cmd;
1847 	u8 val, status;
1848 	int err;
1849 
1850 	bt_dev_dbg(hdev, "sock %p", sk);
1851 
1852 	status = mgmt_bredr_support(hdev);
1853 	if (status)
1854 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1855 				       status);
1856 
1857 	if (cp->val != 0x00 && cp->val != 0x01)
1858 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1859 				       MGMT_STATUS_INVALID_PARAMS);
1860 
1861 	hci_dev_lock(hdev);
1862 
1863 	if (!hdev_is_powered(hdev)) {
1864 		bool changed = false;
1865 
1866 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1867 			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1868 			changed = true;
1869 		}
1870 
1871 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1872 		if (err < 0)
1873 			goto failed;
1874 
1875 		if (changed)
1876 			err = new_settings(hdev, sk);
1877 
1878 		goto failed;
1879 	}
1880 
1881 	if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1882 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1883 				      MGMT_STATUS_BUSY);
1884 		goto failed;
1885 	}
1886 
1887 	val = !!cp->val;
1888 
1889 	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1890 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1891 		goto failed;
1892 	}
1893 
1894 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1895 	if (!cmd) {
1896 		err = -ENOMEM;
1897 		goto failed;
1898 	}
1899 
1900 	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1901 	if (err < 0) {
1902 		mgmt_pending_remove(cmd);
1903 		goto failed;
1904 	}
1905 
1906 failed:
1907 	hci_dev_unlock(hdev);
1908 	return err;
1909 }
1910 
1911 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1912 {
1913 	struct cmd_lookup match = { NULL, hdev };
1914 	struct mgmt_pending_cmd *cmd = data;
1915 	struct mgmt_mode *cp = cmd->param;
1916 	u8 enable = cp->val;
1917 	bool changed;
1918 
1919 	/* Make sure cmd still outstanding. */
1920 	if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1921 		return;
1922 
1923 	if (err) {
1924 		u8 mgmt_err = mgmt_status(err);
1925 
1926 		if (enable && hci_dev_test_and_clear_flag(hdev,
1927 							  HCI_SSP_ENABLED)) {
1928 			new_settings(hdev, NULL);
1929 		}
1930 
1931 		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1932 				     &mgmt_err);
1933 		return;
1934 	}
1935 
1936 	if (enable) {
1937 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1938 	} else {
1939 		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1940 	}
1941 
1942 	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1943 
1944 	if (changed)
1945 		new_settings(hdev, match.sk);
1946 
1947 	if (match.sk)
1948 		sock_put(match.sk);
1949 
1950 	hci_update_eir_sync(hdev);
1951 }
1952 
1953 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1954 {
1955 	struct mgmt_pending_cmd *cmd = data;
1956 	struct mgmt_mode *cp = cmd->param;
1957 	bool changed = false;
1958 	int err;
1959 
1960 	if (cp->val)
1961 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1962 
1963 	err = hci_write_ssp_mode_sync(hdev, cp->val);
1964 
1965 	if (!err && changed)
1966 		hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1967 
1968 	return err;
1969 }
1970 
1971 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1972 {
1973 	struct mgmt_mode *cp = data;
1974 	struct mgmt_pending_cmd *cmd;
1975 	u8 status;
1976 	int err;
1977 
1978 	bt_dev_dbg(hdev, "sock %p", sk);
1979 
1980 	status = mgmt_bredr_support(hdev);
1981 	if (status)
1982 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1983 
1984 	if (!lmp_ssp_capable(hdev))
1985 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1986 				       MGMT_STATUS_NOT_SUPPORTED);
1987 
1988 	if (cp->val != 0x00 && cp->val != 0x01)
1989 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1990 				       MGMT_STATUS_INVALID_PARAMS);
1991 
1992 	hci_dev_lock(hdev);
1993 
1994 	if (!hdev_is_powered(hdev)) {
1995 		bool changed;
1996 
1997 		if (cp->val) {
1998 			changed = !hci_dev_test_and_set_flag(hdev,
1999 							     HCI_SSP_ENABLED);
2000 		} else {
2001 			changed = hci_dev_test_and_clear_flag(hdev,
2002 							      HCI_SSP_ENABLED);
2003 		}
2004 
2005 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2006 		if (err < 0)
2007 			goto failed;
2008 
2009 		if (changed)
2010 			err = new_settings(hdev, sk);
2011 
2012 		goto failed;
2013 	}
2014 
2015 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2016 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2017 				      MGMT_STATUS_BUSY);
2018 		goto failed;
2019 	}
2020 
2021 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2022 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2023 		goto failed;
2024 	}
2025 
2026 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2027 	if (!cmd)
2028 		err = -ENOMEM;
2029 	else
2030 		err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2031 					 set_ssp_complete);
2032 
2033 	if (err < 0) {
2034 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2035 				      MGMT_STATUS_FAILED);
2036 
2037 		if (cmd)
2038 			mgmt_pending_remove(cmd);
2039 	}
2040 
2041 failed:
2042 	hci_dev_unlock(hdev);
2043 	return err;
2044 }
2045 
2046 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2047 {
2048 	bt_dev_dbg(hdev, "sock %p", sk);
2049 
2050 	return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2051 				       MGMT_STATUS_NOT_SUPPORTED);
2052 }
2053 
2054 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2055 {
2056 	struct cmd_lookup match = { NULL, hdev };
2057 	u8 status = mgmt_status(err);
2058 
2059 	bt_dev_dbg(hdev, "err %d", err);
2060 
2061 	if (status) {
2062 		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2063 							&status);
2064 		return;
2065 	}
2066 
2067 	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2068 
2069 	new_settings(hdev, match.sk);
2070 
2071 	if (match.sk)
2072 		sock_put(match.sk);
2073 }
2074 
2075 static int set_le_sync(struct hci_dev *hdev, void *data)
2076 {
2077 	struct mgmt_pending_cmd *cmd = data;
2078 	struct mgmt_mode *cp = cmd->param;
2079 	u8 val = !!cp->val;
2080 	int err;
2081 
2082 	if (!val) {
2083 		hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2084 
2085 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2086 			hci_disable_advertising_sync(hdev);
2087 
2088 		if (ext_adv_capable(hdev))
2089 			hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2090 	} else {
2091 		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2092 	}
2093 
2094 	err = hci_write_le_host_supported_sync(hdev, val, 0);
2095 
2096 	/* Make sure the controller has a good default for
2097 	 * advertising data. Restrict the update to when LE
2098 	 * has actually been enabled. During power on, the
2099 	 * update in powered_update_hci will take care of it.
2100 	 */
2101 	if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2102 		if (ext_adv_capable(hdev)) {
2103 			int status;
2104 
2105 			status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2106 			if (!status)
2107 				hci_update_scan_rsp_data_sync(hdev, 0x00);
2108 		} else {
2109 			hci_update_adv_data_sync(hdev, 0x00);
2110 			hci_update_scan_rsp_data_sync(hdev, 0x00);
2111 		}
2112 
2113 		hci_update_passive_scan(hdev);
2114 	}
2115 
2116 	return err;
2117 }
2118 
2119 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2120 {
2121 	struct mgmt_pending_cmd *cmd = data;
2122 	u8 status = mgmt_status(err);
2123 	struct sock *sk = cmd->sk;
2124 
2125 	if (status) {
2126 		mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
2127 				     cmd_status_rsp, &status);
2128 		return;
2129 	}
2130 
2131 	mgmt_pending_remove(cmd);
2132 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2133 }
2134 
2135 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2136 {
2137 	struct mgmt_pending_cmd *cmd = data;
2138 	struct mgmt_cp_set_mesh *cp = cmd->param;
2139 	size_t len = cmd->param_len;
2140 
2141 	memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2142 
2143 	if (cp->enable)
2144 		hci_dev_set_flag(hdev, HCI_MESH);
2145 	else
2146 		hci_dev_clear_flag(hdev, HCI_MESH);
2147 
2148 	len -= sizeof(*cp);
2149 
2150 	/* If filters don't fit, forward all adv pkts */
2151 	if (len <= sizeof(hdev->mesh_ad_types))
2152 		memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2153 
2154 	hci_update_passive_scan_sync(hdev);
2155 	return 0;
2156 }
2157 
2158 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2159 {
2160 	struct mgmt_cp_set_mesh *cp = data;
2161 	struct mgmt_pending_cmd *cmd;
2162 	int err = 0;
2163 
2164 	bt_dev_dbg(hdev, "sock %p", sk);
2165 
2166 	if (!lmp_le_capable(hdev) ||
2167 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2168 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2169 				       MGMT_STATUS_NOT_SUPPORTED);
2170 
2171 	if (cp->enable != 0x00 && cp->enable != 0x01)
2172 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2173 				       MGMT_STATUS_INVALID_PARAMS);
2174 
2175 	hci_dev_lock(hdev);
2176 
2177 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2178 	if (!cmd)
2179 		err = -ENOMEM;
2180 	else
2181 		err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2182 					 set_mesh_complete);
2183 
2184 	if (err < 0) {
2185 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2186 				      MGMT_STATUS_FAILED);
2187 
2188 		if (cmd)
2189 			mgmt_pending_remove(cmd);
2190 	}
2191 
2192 	hci_dev_unlock(hdev);
2193 	return err;
2194 }
2195 
2196 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2197 {
2198 	struct mgmt_mesh_tx *mesh_tx = data;
2199 	struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2200 	unsigned long mesh_send_interval;
2201 	u8 mgmt_err = mgmt_status(err);
2202 
2203 	/* Report any errors here, but don't report completion */
2204 
2205 	if (mgmt_err) {
2206 		hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2207 		/* Send Complete Error Code for handle */
2208 		mesh_send_complete(hdev, mesh_tx, false);
2209 		return;
2210 	}
2211 
2212 	mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2213 	queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2214 			   mesh_send_interval);
2215 }
2216 
2217 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2218 {
2219 	struct mgmt_mesh_tx *mesh_tx = data;
2220 	struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2221 	struct adv_info *adv, *next_instance;
2222 	u8 instance = hdev->le_num_of_adv_sets + 1;
2223 	u16 timeout, duration;
2224 	int err = 0;
2225 
2226 	if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2227 		return MGMT_STATUS_BUSY;
2228 
2229 	timeout = 1000;
2230 	duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2231 	adv = hci_add_adv_instance(hdev, instance, 0,
2232 				   send->adv_data_len, send->adv_data,
2233 				   0, NULL,
2234 				   timeout, duration,
2235 				   HCI_ADV_TX_POWER_NO_PREFERENCE,
2236 				   hdev->le_adv_min_interval,
2237 				   hdev->le_adv_max_interval,
2238 				   mesh_tx->handle);
2239 
2240 	if (!IS_ERR(adv))
2241 		mesh_tx->instance = instance;
2242 	else
2243 		err = PTR_ERR(adv);
2244 
2245 	if (hdev->cur_adv_instance == instance) {
2246 		/* If the currently advertised instance is being changed then
2247 		 * cancel the current advertising and schedule the next
2248 		 * instance. If there is only one instance then the overridden
2249 		 * advertising data will be visible right away.
2250 		 */
2251 		cancel_adv_timeout(hdev);
2252 
2253 		next_instance = hci_get_next_instance(hdev, instance);
2254 		if (next_instance)
2255 			instance = next_instance->instance;
2256 		else
2257 			instance = 0;
2258 	} else if (hdev->adv_instance_timeout) {
2259 		/* Immediately advertise the new instance if no other, or
2260 		 * let it go naturally from queue if ADV is already happening
2261 		 */
2262 		instance = 0;
2263 	}
2264 
2265 	if (instance)
2266 		return hci_schedule_adv_instance_sync(hdev, instance, true);
2267 
2268 	return err;
2269 }
2270 
2271 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2272 {
2273 	struct mgmt_rp_mesh_read_features *rp = data;
2274 
2275 	if (rp->used_handles >= rp->max_handles)
2276 		return;
2277 
2278 	rp->handles[rp->used_handles++] = mesh_tx->handle;
2279 }
2280 
2281 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2282 			 void *data, u16 len)
2283 {
2284 	struct mgmt_rp_mesh_read_features rp;
2285 
2286 	if (!lmp_le_capable(hdev) ||
2287 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2288 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2289 				       MGMT_STATUS_NOT_SUPPORTED);
2290 
2291 	memset(&rp, 0, sizeof(rp));
2292 	rp.index = cpu_to_le16(hdev->id);
2293 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2294 		rp.max_handles = MESH_HANDLES_MAX;
2295 
2296 	hci_dev_lock(hdev);
2297 
2298 	if (rp.max_handles)
2299 		mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2300 
2301 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2302 			  rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2303 
2304 	hci_dev_unlock(hdev);
2305 	return 0;
2306 }
2307 
2308 static int send_cancel(struct hci_dev *hdev, void *data)
2309 {
2310 	struct mgmt_pending_cmd *cmd = data;
2311 	struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2312 	struct mgmt_mesh_tx *mesh_tx;
2313 
2314 	if (!cancel->handle) {
2315 		do {
2316 			mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2317 
2318 			if (mesh_tx)
2319 				mesh_send_complete(hdev, mesh_tx, false);
2320 		} while (mesh_tx);
2321 	} else {
2322 		mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2323 
2324 		if (mesh_tx && mesh_tx->sk == cmd->sk)
2325 			mesh_send_complete(hdev, mesh_tx, false);
2326 	}
2327 
2328 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2329 			  0, NULL, 0);
2330 	mgmt_pending_free(cmd);
2331 
2332 	return 0;
2333 }
2334 
2335 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2336 			    void *data, u16 len)
2337 {
2338 	struct mgmt_pending_cmd *cmd;
2339 	int err;
2340 
2341 	if (!lmp_le_capable(hdev) ||
2342 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2343 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2344 				       MGMT_STATUS_NOT_SUPPORTED);
2345 
2346 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2347 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2348 				       MGMT_STATUS_REJECTED);
2349 
2350 	hci_dev_lock(hdev);
2351 	cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2352 	if (!cmd)
2353 		err = -ENOMEM;
2354 	else
2355 		err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2356 
2357 	if (err < 0) {
2358 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2359 				      MGMT_STATUS_FAILED);
2360 
2361 		if (cmd)
2362 			mgmt_pending_free(cmd);
2363 	}
2364 
2365 	hci_dev_unlock(hdev);
2366 	return err;
2367 }
2368 
2369 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2370 {
2371 	struct mgmt_mesh_tx *mesh_tx;
2372 	struct mgmt_cp_mesh_send *send = data;
2373 	struct mgmt_rp_mesh_read_features rp;
2374 	bool sending;
2375 	int err = 0;
2376 
2377 	if (!lmp_le_capable(hdev) ||
2378 	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2379 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2380 				       MGMT_STATUS_NOT_SUPPORTED);
2381 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2382 	    len <= MGMT_MESH_SEND_SIZE ||
2383 	    len > (MGMT_MESH_SEND_SIZE + 31))
2384 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2385 				       MGMT_STATUS_REJECTED);
2386 
2387 	hci_dev_lock(hdev);
2388 
2389 	memset(&rp, 0, sizeof(rp));
2390 	rp.max_handles = MESH_HANDLES_MAX;
2391 
2392 	mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2393 
2394 	if (rp.max_handles <= rp.used_handles) {
2395 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2396 				      MGMT_STATUS_BUSY);
2397 		goto done;
2398 	}
2399 
2400 	sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2401 	mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2402 
2403 	if (!mesh_tx)
2404 		err = -ENOMEM;
2405 	else if (!sending)
2406 		err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2407 					 mesh_send_start_complete);
2408 
2409 	if (err < 0) {
2410 		bt_dev_err(hdev, "Send Mesh Failed %d", err);
2411 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2412 				      MGMT_STATUS_FAILED);
2413 
2414 		if (mesh_tx) {
2415 			if (sending)
2416 				mgmt_mesh_remove(mesh_tx);
2417 		}
2418 	} else {
2419 		hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2420 
2421 		mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2422 				  &mesh_tx->handle, 1);
2423 	}
2424 
2425 done:
2426 	hci_dev_unlock(hdev);
2427 	return err;
2428 }
2429 
2430 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2431 {
2432 	struct mgmt_mode *cp = data;
2433 	struct mgmt_pending_cmd *cmd;
2434 	int err;
2435 	u8 val, enabled;
2436 
2437 	bt_dev_dbg(hdev, "sock %p", sk);
2438 
2439 	if (!lmp_le_capable(hdev))
2440 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2441 				       MGMT_STATUS_NOT_SUPPORTED);
2442 
2443 	if (cp->val != 0x00 && cp->val != 0x01)
2444 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2445 				       MGMT_STATUS_INVALID_PARAMS);
2446 
2447 	/* Bluetooth single mode LE only controllers or dual-mode
2448 	 * controllers configured as LE only devices, do not allow
2449 	 * switching LE off. These have either LE enabled explicitly
2450 	 * or BR/EDR has been previously switched off.
2451 	 *
2452 	 * When trying to enable an already enabled LE, then gracefully
2453 	 * send a positive response. Trying to disable it however will
2454 	 * result into rejection.
2455 	 */
2456 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2457 		if (cp->val == 0x01)
2458 			return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2459 
2460 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2461 				       MGMT_STATUS_REJECTED);
2462 	}
2463 
2464 	hci_dev_lock(hdev);
2465 
2466 	val = !!cp->val;
2467 	enabled = lmp_host_le_capable(hdev);
2468 
2469 	if (!hdev_is_powered(hdev) || val == enabled) {
2470 		bool changed = false;
2471 
2472 		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2473 			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2474 			changed = true;
2475 		}
2476 
2477 		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2478 			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2479 			changed = true;
2480 		}
2481 
2482 		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2483 		if (err < 0)
2484 			goto unlock;
2485 
2486 		if (changed)
2487 			err = new_settings(hdev, sk);
2488 
2489 		goto unlock;
2490 	}
2491 
2492 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
2493 	    pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2494 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2495 				      MGMT_STATUS_BUSY);
2496 		goto unlock;
2497 	}
2498 
2499 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2500 	if (!cmd)
2501 		err = -ENOMEM;
2502 	else
2503 		err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2504 					 set_le_complete);
2505 
2506 	if (err < 0) {
2507 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2508 				      MGMT_STATUS_FAILED);
2509 
2510 		if (cmd)
2511 			mgmt_pending_remove(cmd);
2512 	}
2513 
2514 unlock:
2515 	hci_dev_unlock(hdev);
2516 	return err;
2517 }
2518 
2519 /* This is a helper function to test for pending mgmt commands that can
2520  * cause CoD or EIR HCI commands. We can only allow one such pending
2521  * mgmt command at a time since otherwise we cannot easily track what
2522  * the current values are, will be, and based on that calculate if a new
2523  * HCI command needs to be sent and if yes with what value.
2524  */
2525 static bool pending_eir_or_class(struct hci_dev *hdev)
2526 {
2527 	struct mgmt_pending_cmd *cmd;
2528 
2529 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2530 		switch (cmd->opcode) {
2531 		case MGMT_OP_ADD_UUID:
2532 		case MGMT_OP_REMOVE_UUID:
2533 		case MGMT_OP_SET_DEV_CLASS:
2534 		case MGMT_OP_SET_POWERED:
2535 			return true;
2536 		}
2537 	}
2538 
2539 	return false;
2540 }
2541 
2542 static const u8 bluetooth_base_uuid[] = {
2543 			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2544 			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2545 };
2546 
2547 static u8 get_uuid_size(const u8 *uuid)
2548 {
2549 	u32 val;
2550 
2551 	if (memcmp(uuid, bluetooth_base_uuid, 12))
2552 		return 128;
2553 
2554 	val = get_unaligned_le32(&uuid[12]);
2555 	if (val > 0xffff)
2556 		return 32;
2557 
2558 	return 16;
2559 }
2560 
2561 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2562 {
2563 	struct mgmt_pending_cmd *cmd = data;
2564 
2565 	bt_dev_dbg(hdev, "err %d", err);
2566 
2567 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2568 			  mgmt_status(err), hdev->dev_class, 3);
2569 
2570 	mgmt_pending_free(cmd);
2571 }
2572 
2573 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2574 {
2575 	int err;
2576 
2577 	err = hci_update_class_sync(hdev);
2578 	if (err)
2579 		return err;
2580 
2581 	return hci_update_eir_sync(hdev);
2582 }
2583 
2584 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2585 {
2586 	struct mgmt_cp_add_uuid *cp = data;
2587 	struct mgmt_pending_cmd *cmd;
2588 	struct bt_uuid *uuid;
2589 	int err;
2590 
2591 	bt_dev_dbg(hdev, "sock %p", sk);
2592 
2593 	hci_dev_lock(hdev);
2594 
2595 	if (pending_eir_or_class(hdev)) {
2596 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2597 				      MGMT_STATUS_BUSY);
2598 		goto failed;
2599 	}
2600 
2601 	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2602 	if (!uuid) {
2603 		err = -ENOMEM;
2604 		goto failed;
2605 	}
2606 
2607 	memcpy(uuid->uuid, cp->uuid, 16);
2608 	uuid->svc_hint = cp->svc_hint;
2609 	uuid->size = get_uuid_size(cp->uuid);
2610 
2611 	list_add_tail(&uuid->list, &hdev->uuids);
2612 
2613 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2614 	if (!cmd) {
2615 		err = -ENOMEM;
2616 		goto failed;
2617 	}
2618 
2619 	err = hci_cmd_sync_queue(hdev, add_uuid_sync, cmd, mgmt_class_complete);
2620 	if (err < 0) {
2621 		mgmt_pending_free(cmd);
2622 		goto failed;
2623 	}
2624 
2625 failed:
2626 	hci_dev_unlock(hdev);
2627 	return err;
2628 }
2629 
2630 static bool enable_service_cache(struct hci_dev *hdev)
2631 {
2632 	if (!hdev_is_powered(hdev))
2633 		return false;
2634 
2635 	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2636 		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2637 				   CACHE_TIMEOUT);
2638 		return true;
2639 	}
2640 
2641 	return false;
2642 }
2643 
2644 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2645 {
2646 	int err;
2647 
2648 	err = hci_update_class_sync(hdev);
2649 	if (err)
2650 		return err;
2651 
2652 	return hci_update_eir_sync(hdev);
2653 }
2654 
2655 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2656 		       u16 len)
2657 {
2658 	struct mgmt_cp_remove_uuid *cp = data;
2659 	struct mgmt_pending_cmd *cmd;
2660 	struct bt_uuid *match, *tmp;
2661 	static const u8 bt_uuid_any[] = {
2662 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2663 	};
2664 	int err, found;
2665 
2666 	bt_dev_dbg(hdev, "sock %p", sk);
2667 
2668 	hci_dev_lock(hdev);
2669 
2670 	if (pending_eir_or_class(hdev)) {
2671 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2672 				      MGMT_STATUS_BUSY);
2673 		goto unlock;
2674 	}
2675 
2676 	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2677 		hci_uuids_clear(hdev);
2678 
2679 		if (enable_service_cache(hdev)) {
2680 			err = mgmt_cmd_complete(sk, hdev->id,
2681 						MGMT_OP_REMOVE_UUID,
2682 						0, hdev->dev_class, 3);
2683 			goto unlock;
2684 		}
2685 
2686 		goto update_class;
2687 	}
2688 
2689 	found = 0;
2690 
2691 	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2692 		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2693 			continue;
2694 
2695 		list_del(&match->list);
2696 		kfree(match);
2697 		found++;
2698 	}
2699 
2700 	if (found == 0) {
2701 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2702 				      MGMT_STATUS_INVALID_PARAMS);
2703 		goto unlock;
2704 	}
2705 
2706 update_class:
2707 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2708 	if (!cmd) {
2709 		err = -ENOMEM;
2710 		goto unlock;
2711 	}
2712 
2713 	err = hci_cmd_sync_queue(hdev, remove_uuid_sync, cmd,
2714 				 mgmt_class_complete);
2715 	if (err < 0)
2716 		mgmt_pending_free(cmd);
2717 
2718 unlock:
2719 	hci_dev_unlock(hdev);
2720 	return err;
2721 }
2722 
2723 static int set_class_sync(struct hci_dev *hdev, void *data)
2724 {
2725 	int err = 0;
2726 
2727 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2728 		cancel_delayed_work_sync(&hdev->service_cache);
2729 		err = hci_update_eir_sync(hdev);
2730 	}
2731 
2732 	if (err)
2733 		return err;
2734 
2735 	return hci_update_class_sync(hdev);
2736 }
2737 
2738 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2739 			 u16 len)
2740 {
2741 	struct mgmt_cp_set_dev_class *cp = data;
2742 	struct mgmt_pending_cmd *cmd;
2743 	int err;
2744 
2745 	bt_dev_dbg(hdev, "sock %p", sk);
2746 
2747 	if (!lmp_bredr_capable(hdev))
2748 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2749 				       MGMT_STATUS_NOT_SUPPORTED);
2750 
2751 	hci_dev_lock(hdev);
2752 
2753 	if (pending_eir_or_class(hdev)) {
2754 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2755 				      MGMT_STATUS_BUSY);
2756 		goto unlock;
2757 	}
2758 
2759 	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2760 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2761 				      MGMT_STATUS_INVALID_PARAMS);
2762 		goto unlock;
2763 	}
2764 
2765 	hdev->major_class = cp->major;
2766 	hdev->minor_class = cp->minor;
2767 
2768 	if (!hdev_is_powered(hdev)) {
2769 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2770 					hdev->dev_class, 3);
2771 		goto unlock;
2772 	}
2773 
2774 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2775 	if (!cmd) {
2776 		err = -ENOMEM;
2777 		goto unlock;
2778 	}
2779 
2780 	err = hci_cmd_sync_queue(hdev, set_class_sync, cmd,
2781 				 mgmt_class_complete);
2782 	if (err < 0)
2783 		mgmt_pending_free(cmd);
2784 
2785 unlock:
2786 	hci_dev_unlock(hdev);
2787 	return err;
2788 }
2789 
2790 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2791 			  u16 len)
2792 {
2793 	struct mgmt_cp_load_link_keys *cp = data;
2794 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2795 				   sizeof(struct mgmt_link_key_info));
2796 	u16 key_count, expected_len;
2797 	bool changed;
2798 	int i;
2799 
2800 	bt_dev_dbg(hdev, "sock %p", sk);
2801 
2802 	if (!lmp_bredr_capable(hdev))
2803 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2804 				       MGMT_STATUS_NOT_SUPPORTED);
2805 
2806 	key_count = __le16_to_cpu(cp->key_count);
2807 	if (key_count > max_key_count) {
2808 		bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2809 			   key_count);
2810 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2811 				       MGMT_STATUS_INVALID_PARAMS);
2812 	}
2813 
2814 	expected_len = struct_size(cp, keys, key_count);
2815 	if (expected_len != len) {
2816 		bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2817 			   expected_len, len);
2818 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2819 				       MGMT_STATUS_INVALID_PARAMS);
2820 	}
2821 
2822 	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2823 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2824 				       MGMT_STATUS_INVALID_PARAMS);
2825 
2826 	bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2827 		   key_count);
2828 
2829 	for (i = 0; i < key_count; i++) {
2830 		struct mgmt_link_key_info *key = &cp->keys[i];
2831 
2832 		/* Considering SMP over BREDR/LE, there is no need to check addr_type */
2833 		if (key->type > 0x08)
2834 			return mgmt_cmd_status(sk, hdev->id,
2835 					       MGMT_OP_LOAD_LINK_KEYS,
2836 					       MGMT_STATUS_INVALID_PARAMS);
2837 	}
2838 
2839 	hci_dev_lock(hdev);
2840 
2841 	hci_link_keys_clear(hdev);
2842 
2843 	if (cp->debug_keys)
2844 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2845 	else
2846 		changed = hci_dev_test_and_clear_flag(hdev,
2847 						      HCI_KEEP_DEBUG_KEYS);
2848 
2849 	if (changed)
2850 		new_settings(hdev, NULL);
2851 
2852 	for (i = 0; i < key_count; i++) {
2853 		struct mgmt_link_key_info *key = &cp->keys[i];
2854 
2855 		if (hci_is_blocked_key(hdev,
2856 				       HCI_BLOCKED_KEY_TYPE_LINKKEY,
2857 				       key->val)) {
2858 			bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2859 				    &key->addr.bdaddr);
2860 			continue;
2861 		}
2862 
2863 		/* Always ignore debug keys and require a new pairing if
2864 		 * the user wants to use them.
2865 		 */
2866 		if (key->type == HCI_LK_DEBUG_COMBINATION)
2867 			continue;
2868 
2869 		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2870 				 key->type, key->pin_len, NULL);
2871 	}
2872 
2873 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2874 
2875 	hci_dev_unlock(hdev);
2876 
2877 	return 0;
2878 }
2879 
2880 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2881 			   u8 addr_type, struct sock *skip_sk)
2882 {
2883 	struct mgmt_ev_device_unpaired ev;
2884 
2885 	bacpy(&ev.addr.bdaddr, bdaddr);
2886 	ev.addr.type = addr_type;
2887 
2888 	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2889 			  skip_sk);
2890 }
2891 
2892 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2893 {
2894 	struct mgmt_pending_cmd *cmd = data;
2895 	struct mgmt_cp_unpair_device *cp = cmd->param;
2896 
2897 	if (!err)
2898 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2899 
2900 	cmd->cmd_complete(cmd, err);
2901 	mgmt_pending_free(cmd);
2902 }
2903 
2904 static int unpair_device_sync(struct hci_dev *hdev, void *data)
2905 {
2906 	struct mgmt_pending_cmd *cmd = data;
2907 	struct mgmt_cp_unpair_device *cp = cmd->param;
2908 	struct hci_conn *conn;
2909 
2910 	if (cp->addr.type == BDADDR_BREDR)
2911 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2912 					       &cp->addr.bdaddr);
2913 	else
2914 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2915 					       le_addr_type(cp->addr.type));
2916 
2917 	if (!conn)
2918 		return 0;
2919 
2920 	return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
2921 }
2922 
2923 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2924 			 u16 len)
2925 {
2926 	struct mgmt_cp_unpair_device *cp = data;
2927 	struct mgmt_rp_unpair_device rp;
2928 	struct hci_conn_params *params;
2929 	struct mgmt_pending_cmd *cmd;
2930 	struct hci_conn *conn;
2931 	u8 addr_type;
2932 	int err;
2933 
2934 	memset(&rp, 0, sizeof(rp));
2935 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2936 	rp.addr.type = cp->addr.type;
2937 
2938 	if (!bdaddr_type_is_valid(cp->addr.type))
2939 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2940 					 MGMT_STATUS_INVALID_PARAMS,
2941 					 &rp, sizeof(rp));
2942 
2943 	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2944 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2945 					 MGMT_STATUS_INVALID_PARAMS,
2946 					 &rp, sizeof(rp));
2947 
2948 	hci_dev_lock(hdev);
2949 
2950 	if (!hdev_is_powered(hdev)) {
2951 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2952 					MGMT_STATUS_NOT_POWERED, &rp,
2953 					sizeof(rp));
2954 		goto unlock;
2955 	}
2956 
2957 	if (cp->addr.type == BDADDR_BREDR) {
2958 		/* If disconnection is requested, then look up the
2959 		 * connection. If the remote device is connected, it
2960 		 * will be later used to terminate the link.
2961 		 *
2962 		 * Setting it to NULL explicitly will cause no
2963 		 * termination of the link.
2964 		 */
2965 		if (cp->disconnect)
2966 			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2967 						       &cp->addr.bdaddr);
2968 		else
2969 			conn = NULL;
2970 
2971 		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2972 		if (err < 0) {
2973 			err = mgmt_cmd_complete(sk, hdev->id,
2974 						MGMT_OP_UNPAIR_DEVICE,
2975 						MGMT_STATUS_NOT_PAIRED, &rp,
2976 						sizeof(rp));
2977 			goto unlock;
2978 		}
2979 
2980 		goto done;
2981 	}
2982 
2983 	/* LE address type */
2984 	addr_type = le_addr_type(cp->addr.type);
2985 
2986 	/* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2987 	err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2988 	if (err < 0) {
2989 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2990 					MGMT_STATUS_NOT_PAIRED, &rp,
2991 					sizeof(rp));
2992 		goto unlock;
2993 	}
2994 
2995 	conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2996 	if (!conn) {
2997 		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2998 		goto done;
2999 	}
3000 
3001 
3002 	/* Defer clearing up the connection parameters until closing to
3003 	 * give a chance of keeping them if a repairing happens.
3004 	 */
3005 	set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3006 
3007 	/* Disable auto-connection parameters if present */
3008 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3009 	if (params) {
3010 		if (params->explicit_connect)
3011 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3012 		else
3013 			params->auto_connect = HCI_AUTO_CONN_DISABLED;
3014 	}
3015 
3016 	/* If disconnection is not requested, then clear the connection
3017 	 * variable so that the link is not terminated.
3018 	 */
3019 	if (!cp->disconnect)
3020 		conn = NULL;
3021 
3022 done:
3023 	/* If the connection variable is set, then termination of the
3024 	 * link is requested.
3025 	 */
3026 	if (!conn) {
3027 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3028 					&rp, sizeof(rp));
3029 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3030 		goto unlock;
3031 	}
3032 
3033 	cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3034 			       sizeof(*cp));
3035 	if (!cmd) {
3036 		err = -ENOMEM;
3037 		goto unlock;
3038 	}
3039 
3040 	cmd->cmd_complete = addr_cmd_complete;
3041 
3042 	err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3043 				 unpair_device_complete);
3044 	if (err < 0)
3045 		mgmt_pending_free(cmd);
3046 
3047 unlock:
3048 	hci_dev_unlock(hdev);
3049 	return err;
3050 }
3051 
3052 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3053 		      u16 len)
3054 {
3055 	struct mgmt_cp_disconnect *cp = data;
3056 	struct mgmt_rp_disconnect rp;
3057 	struct mgmt_pending_cmd *cmd;
3058 	struct hci_conn *conn;
3059 	int err;
3060 
3061 	bt_dev_dbg(hdev, "sock %p", sk);
3062 
3063 	memset(&rp, 0, sizeof(rp));
3064 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3065 	rp.addr.type = cp->addr.type;
3066 
3067 	if (!bdaddr_type_is_valid(cp->addr.type))
3068 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3069 					 MGMT_STATUS_INVALID_PARAMS,
3070 					 &rp, sizeof(rp));
3071 
3072 	hci_dev_lock(hdev);
3073 
3074 	if (!test_bit(HCI_UP, &hdev->flags)) {
3075 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3076 					MGMT_STATUS_NOT_POWERED, &rp,
3077 					sizeof(rp));
3078 		goto failed;
3079 	}
3080 
3081 	if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
3082 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3083 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
3084 		goto failed;
3085 	}
3086 
3087 	if (cp->addr.type == BDADDR_BREDR)
3088 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3089 					       &cp->addr.bdaddr);
3090 	else
3091 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3092 					       le_addr_type(cp->addr.type));
3093 
3094 	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3095 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3096 					MGMT_STATUS_NOT_CONNECTED, &rp,
3097 					sizeof(rp));
3098 		goto failed;
3099 	}
3100 
3101 	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3102 	if (!cmd) {
3103 		err = -ENOMEM;
3104 		goto failed;
3105 	}
3106 
3107 	cmd->cmd_complete = generic_cmd_complete;
3108 
3109 	err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3110 	if (err < 0)
3111 		mgmt_pending_remove(cmd);
3112 
3113 failed:
3114 	hci_dev_unlock(hdev);
3115 	return err;
3116 }
3117 
3118 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3119 {
3120 	switch (link_type) {
3121 	case ISO_LINK:
3122 	case LE_LINK:
3123 		switch (addr_type) {
3124 		case ADDR_LE_DEV_PUBLIC:
3125 			return BDADDR_LE_PUBLIC;
3126 
3127 		default:
3128 			/* Fallback to LE Random address type */
3129 			return BDADDR_LE_RANDOM;
3130 		}
3131 
3132 	default:
3133 		/* Fallback to BR/EDR type */
3134 		return BDADDR_BREDR;
3135 	}
3136 }
3137 
3138 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3139 			   u16 data_len)
3140 {
3141 	struct mgmt_rp_get_connections *rp;
3142 	struct hci_conn *c;
3143 	int err;
3144 	u16 i;
3145 
3146 	bt_dev_dbg(hdev, "sock %p", sk);
3147 
3148 	hci_dev_lock(hdev);
3149 
3150 	if (!hdev_is_powered(hdev)) {
3151 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3152 				      MGMT_STATUS_NOT_POWERED);
3153 		goto unlock;
3154 	}
3155 
3156 	i = 0;
3157 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
3158 		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3159 			i++;
3160 	}
3161 
3162 	rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3163 	if (!rp) {
3164 		err = -ENOMEM;
3165 		goto unlock;
3166 	}
3167 
3168 	i = 0;
3169 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
3170 		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3171 			continue;
3172 		bacpy(&rp->addr[i].bdaddr, &c->dst);
3173 		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3174 		if (c->type == SCO_LINK || c->type == ESCO_LINK)
3175 			continue;
3176 		i++;
3177 	}
3178 
3179 	rp->conn_count = cpu_to_le16(i);
3180 
3181 	/* Recalculate length in case of filtered SCO connections, etc */
3182 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3183 				struct_size(rp, addr, i));
3184 
3185 	kfree(rp);
3186 
3187 unlock:
3188 	hci_dev_unlock(hdev);
3189 	return err;
3190 }
3191 
3192 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3193 				   struct mgmt_cp_pin_code_neg_reply *cp)
3194 {
3195 	struct mgmt_pending_cmd *cmd;
3196 	int err;
3197 
3198 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3199 			       sizeof(*cp));
3200 	if (!cmd)
3201 		return -ENOMEM;
3202 
3203 	cmd->cmd_complete = addr_cmd_complete;
3204 
3205 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3206 			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3207 	if (err < 0)
3208 		mgmt_pending_remove(cmd);
3209 
3210 	return err;
3211 }
3212 
3213 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3214 			  u16 len)
3215 {
3216 	struct hci_conn *conn;
3217 	struct mgmt_cp_pin_code_reply *cp = data;
3218 	struct hci_cp_pin_code_reply reply;
3219 	struct mgmt_pending_cmd *cmd;
3220 	int err;
3221 
3222 	bt_dev_dbg(hdev, "sock %p", sk);
3223 
3224 	hci_dev_lock(hdev);
3225 
3226 	if (!hdev_is_powered(hdev)) {
3227 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3228 				      MGMT_STATUS_NOT_POWERED);
3229 		goto failed;
3230 	}
3231 
3232 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3233 	if (!conn) {
3234 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3235 				      MGMT_STATUS_NOT_CONNECTED);
3236 		goto failed;
3237 	}
3238 
3239 	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3240 		struct mgmt_cp_pin_code_neg_reply ncp;
3241 
3242 		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3243 
3244 		bt_dev_err(hdev, "PIN code is not 16 bytes long");
3245 
3246 		err = send_pin_code_neg_reply(sk, hdev, &ncp);
3247 		if (err >= 0)
3248 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3249 					      MGMT_STATUS_INVALID_PARAMS);
3250 
3251 		goto failed;
3252 	}
3253 
3254 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3255 	if (!cmd) {
3256 		err = -ENOMEM;
3257 		goto failed;
3258 	}
3259 
3260 	cmd->cmd_complete = addr_cmd_complete;
3261 
3262 	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3263 	reply.pin_len = cp->pin_len;
3264 	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3265 
3266 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3267 	if (err < 0)
3268 		mgmt_pending_remove(cmd);
3269 
3270 failed:
3271 	hci_dev_unlock(hdev);
3272 	return err;
3273 }
3274 
3275 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3276 			     u16 len)
3277 {
3278 	struct mgmt_cp_set_io_capability *cp = data;
3279 
3280 	bt_dev_dbg(hdev, "sock %p", sk);
3281 
3282 	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3283 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3284 				       MGMT_STATUS_INVALID_PARAMS);
3285 
3286 	hci_dev_lock(hdev);
3287 
3288 	hdev->io_capability = cp->io_capability;
3289 
3290 	bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3291 
3292 	hci_dev_unlock(hdev);
3293 
3294 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3295 				 NULL, 0);
3296 }
3297 
3298 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3299 {
3300 	struct hci_dev *hdev = conn->hdev;
3301 	struct mgmt_pending_cmd *cmd;
3302 
3303 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3304 		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3305 			continue;
3306 
3307 		if (cmd->user_data != conn)
3308 			continue;
3309 
3310 		return cmd;
3311 	}
3312 
3313 	return NULL;
3314 }
3315 
3316 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3317 {
3318 	struct mgmt_rp_pair_device rp;
3319 	struct hci_conn *conn = cmd->user_data;
3320 	int err;
3321 
3322 	bacpy(&rp.addr.bdaddr, &conn->dst);
3323 	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3324 
3325 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3326 				status, &rp, sizeof(rp));
3327 
3328 	/* So we don't get further callbacks for this connection */
3329 	conn->connect_cfm_cb = NULL;
3330 	conn->security_cfm_cb = NULL;
3331 	conn->disconn_cfm_cb = NULL;
3332 
3333 	hci_conn_drop(conn);
3334 
3335 	/* The device is paired so there is no need to remove
3336 	 * its connection parameters anymore.
3337 	 */
3338 	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3339 
3340 	hci_conn_put(conn);
3341 
3342 	return err;
3343 }
3344 
3345 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3346 {
3347 	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3348 	struct mgmt_pending_cmd *cmd;
3349 
3350 	cmd = find_pairing(conn);
3351 	if (cmd) {
3352 		cmd->cmd_complete(cmd, status);
3353 		mgmt_pending_remove(cmd);
3354 	}
3355 }
3356 
3357 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3358 {
3359 	struct mgmt_pending_cmd *cmd;
3360 
3361 	BT_DBG("status %u", status);
3362 
3363 	cmd = find_pairing(conn);
3364 	if (!cmd) {
3365 		BT_DBG("Unable to find a pending command");
3366 		return;
3367 	}
3368 
3369 	cmd->cmd_complete(cmd, mgmt_status(status));
3370 	mgmt_pending_remove(cmd);
3371 }
3372 
3373 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3374 {
3375 	struct mgmt_pending_cmd *cmd;
3376 
3377 	BT_DBG("status %u", status);
3378 
3379 	if (!status)
3380 		return;
3381 
3382 	cmd = find_pairing(conn);
3383 	if (!cmd) {
3384 		BT_DBG("Unable to find a pending command");
3385 		return;
3386 	}
3387 
3388 	cmd->cmd_complete(cmd, mgmt_status(status));
3389 	mgmt_pending_remove(cmd);
3390 }
3391 
3392 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3393 		       u16 len)
3394 {
3395 	struct mgmt_cp_pair_device *cp = data;
3396 	struct mgmt_rp_pair_device rp;
3397 	struct mgmt_pending_cmd *cmd;
3398 	u8 sec_level, auth_type;
3399 	struct hci_conn *conn;
3400 	int err;
3401 
3402 	bt_dev_dbg(hdev, "sock %p", sk);
3403 
3404 	memset(&rp, 0, sizeof(rp));
3405 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3406 	rp.addr.type = cp->addr.type;
3407 
3408 	if (!bdaddr_type_is_valid(cp->addr.type))
3409 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3410 					 MGMT_STATUS_INVALID_PARAMS,
3411 					 &rp, sizeof(rp));
3412 
3413 	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3414 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3415 					 MGMT_STATUS_INVALID_PARAMS,
3416 					 &rp, sizeof(rp));
3417 
3418 	hci_dev_lock(hdev);
3419 
3420 	if (!hdev_is_powered(hdev)) {
3421 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3422 					MGMT_STATUS_NOT_POWERED, &rp,
3423 					sizeof(rp));
3424 		goto unlock;
3425 	}
3426 
3427 	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3428 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3429 					MGMT_STATUS_ALREADY_PAIRED, &rp,
3430 					sizeof(rp));
3431 		goto unlock;
3432 	}
3433 
3434 	sec_level = BT_SECURITY_MEDIUM;
3435 	auth_type = HCI_AT_DEDICATED_BONDING;
3436 
3437 	if (cp->addr.type == BDADDR_BREDR) {
3438 		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3439 				       auth_type, CONN_REASON_PAIR_DEVICE);
3440 	} else {
3441 		u8 addr_type = le_addr_type(cp->addr.type);
3442 		struct hci_conn_params *p;
3443 
3444 		/* When pairing a new device, it is expected to remember
3445 		 * this device for future connections. Adding the connection
3446 		 * parameter information ahead of time allows tracking
3447 		 * of the peripheral preferred values and will speed up any
3448 		 * further connection establishment.
3449 		 *
3450 		 * If connection parameters already exist, then they
3451 		 * will be kept and this function does nothing.
3452 		 */
3453 		p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3454 
3455 		if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3456 			p->auto_connect = HCI_AUTO_CONN_DISABLED;
3457 
3458 		conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3459 					   sec_level, HCI_LE_CONN_TIMEOUT,
3460 					   CONN_REASON_PAIR_DEVICE);
3461 	}
3462 
3463 	if (IS_ERR(conn)) {
3464 		int status;
3465 
3466 		if (PTR_ERR(conn) == -EBUSY)
3467 			status = MGMT_STATUS_BUSY;
3468 		else if (PTR_ERR(conn) == -EOPNOTSUPP)
3469 			status = MGMT_STATUS_NOT_SUPPORTED;
3470 		else if (PTR_ERR(conn) == -ECONNREFUSED)
3471 			status = MGMT_STATUS_REJECTED;
3472 		else
3473 			status = MGMT_STATUS_CONNECT_FAILED;
3474 
3475 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3476 					status, &rp, sizeof(rp));
3477 		goto unlock;
3478 	}
3479 
3480 	if (conn->connect_cfm_cb) {
3481 		hci_conn_drop(conn);
3482 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3483 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
3484 		goto unlock;
3485 	}
3486 
3487 	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3488 	if (!cmd) {
3489 		err = -ENOMEM;
3490 		hci_conn_drop(conn);
3491 		goto unlock;
3492 	}
3493 
3494 	cmd->cmd_complete = pairing_complete;
3495 
3496 	/* For LE, just connecting isn't a proof that the pairing finished */
3497 	if (cp->addr.type == BDADDR_BREDR) {
3498 		conn->connect_cfm_cb = pairing_complete_cb;
3499 		conn->security_cfm_cb = pairing_complete_cb;
3500 		conn->disconn_cfm_cb = pairing_complete_cb;
3501 	} else {
3502 		conn->connect_cfm_cb = le_pairing_complete_cb;
3503 		conn->security_cfm_cb = le_pairing_complete_cb;
3504 		conn->disconn_cfm_cb = le_pairing_complete_cb;
3505 	}
3506 
3507 	conn->io_capability = cp->io_cap;
3508 	cmd->user_data = hci_conn_get(conn);
3509 
3510 	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3511 	    hci_conn_security(conn, sec_level, auth_type, true)) {
3512 		cmd->cmd_complete(cmd, 0);
3513 		mgmt_pending_remove(cmd);
3514 	}
3515 
3516 	err = 0;
3517 
3518 unlock:
3519 	hci_dev_unlock(hdev);
3520 	return err;
3521 }
3522 
3523 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3524 			      u16 len)
3525 {
3526 	struct mgmt_addr_info *addr = data;
3527 	struct mgmt_pending_cmd *cmd;
3528 	struct hci_conn *conn;
3529 	int err;
3530 
3531 	bt_dev_dbg(hdev, "sock %p", sk);
3532 
3533 	hci_dev_lock(hdev);
3534 
3535 	if (!hdev_is_powered(hdev)) {
3536 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3537 				      MGMT_STATUS_NOT_POWERED);
3538 		goto unlock;
3539 	}
3540 
3541 	cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3542 	if (!cmd) {
3543 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3544 				      MGMT_STATUS_INVALID_PARAMS);
3545 		goto unlock;
3546 	}
3547 
3548 	conn = cmd->user_data;
3549 
3550 	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3551 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3552 				      MGMT_STATUS_INVALID_PARAMS);
3553 		goto unlock;
3554 	}
3555 
3556 	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3557 	mgmt_pending_remove(cmd);
3558 
3559 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3560 				addr, sizeof(*addr));
3561 
3562 	/* Since user doesn't want to proceed with the connection, abort any
3563 	 * ongoing pairing and then terminate the link if it was created
3564 	 * because of the pair device action.
3565 	 */
3566 	if (addr->type == BDADDR_BREDR)
3567 		hci_remove_link_key(hdev, &addr->bdaddr);
3568 	else
3569 		smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3570 					      le_addr_type(addr->type));
3571 
3572 	if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3573 		hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3574 
3575 unlock:
3576 	hci_dev_unlock(hdev);
3577 	return err;
3578 }
3579 
3580 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3581 			     struct mgmt_addr_info *addr, u16 mgmt_op,
3582 			     u16 hci_op, __le32 passkey)
3583 {
3584 	struct mgmt_pending_cmd *cmd;
3585 	struct hci_conn *conn;
3586 	int err;
3587 
3588 	hci_dev_lock(hdev);
3589 
3590 	if (!hdev_is_powered(hdev)) {
3591 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3592 					MGMT_STATUS_NOT_POWERED, addr,
3593 					sizeof(*addr));
3594 		goto done;
3595 	}
3596 
3597 	if (addr->type == BDADDR_BREDR)
3598 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3599 	else
3600 		conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3601 					       le_addr_type(addr->type));
3602 
3603 	if (!conn) {
3604 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3605 					MGMT_STATUS_NOT_CONNECTED, addr,
3606 					sizeof(*addr));
3607 		goto done;
3608 	}
3609 
3610 	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3611 		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3612 		if (!err)
3613 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3614 						MGMT_STATUS_SUCCESS, addr,
3615 						sizeof(*addr));
3616 		else
3617 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3618 						MGMT_STATUS_FAILED, addr,
3619 						sizeof(*addr));
3620 
3621 		goto done;
3622 	}
3623 
3624 	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3625 	if (!cmd) {
3626 		err = -ENOMEM;
3627 		goto done;
3628 	}
3629 
3630 	cmd->cmd_complete = addr_cmd_complete;
3631 
3632 	/* Continue with pairing via HCI */
3633 	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3634 		struct hci_cp_user_passkey_reply cp;
3635 
3636 		bacpy(&cp.bdaddr, &addr->bdaddr);
3637 		cp.passkey = passkey;
3638 		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3639 	} else
3640 		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3641 				   &addr->bdaddr);
3642 
3643 	if (err < 0)
3644 		mgmt_pending_remove(cmd);
3645 
3646 done:
3647 	hci_dev_unlock(hdev);
3648 	return err;
3649 }
3650 
3651 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3652 			      void *data, u16 len)
3653 {
3654 	struct mgmt_cp_pin_code_neg_reply *cp = data;
3655 
3656 	bt_dev_dbg(hdev, "sock %p", sk);
3657 
3658 	return user_pairing_resp(sk, hdev, &cp->addr,
3659 				MGMT_OP_PIN_CODE_NEG_REPLY,
3660 				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3661 }
3662 
3663 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3664 			      u16 len)
3665 {
3666 	struct mgmt_cp_user_confirm_reply *cp = data;
3667 
3668 	bt_dev_dbg(hdev, "sock %p", sk);
3669 
3670 	if (len != sizeof(*cp))
3671 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3672 				       MGMT_STATUS_INVALID_PARAMS);
3673 
3674 	return user_pairing_resp(sk, hdev, &cp->addr,
3675 				 MGMT_OP_USER_CONFIRM_REPLY,
3676 				 HCI_OP_USER_CONFIRM_REPLY, 0);
3677 }
3678 
3679 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3680 				  void *data, u16 len)
3681 {
3682 	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3683 
3684 	bt_dev_dbg(hdev, "sock %p", sk);
3685 
3686 	return user_pairing_resp(sk, hdev, &cp->addr,
3687 				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3688 				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3689 }
3690 
3691 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3692 			      u16 len)
3693 {
3694 	struct mgmt_cp_user_passkey_reply *cp = data;
3695 
3696 	bt_dev_dbg(hdev, "sock %p", sk);
3697 
3698 	return user_pairing_resp(sk, hdev, &cp->addr,
3699 				 MGMT_OP_USER_PASSKEY_REPLY,
3700 				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3701 }
3702 
3703 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3704 				  void *data, u16 len)
3705 {
3706 	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3707 
3708 	bt_dev_dbg(hdev, "sock %p", sk);
3709 
3710 	return user_pairing_resp(sk, hdev, &cp->addr,
3711 				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3712 				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3713 }
3714 
3715 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3716 {
3717 	struct adv_info *adv_instance;
3718 
3719 	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3720 	if (!adv_instance)
3721 		return 0;
3722 
3723 	/* stop if current instance doesn't need to be changed */
3724 	if (!(adv_instance->flags & flags))
3725 		return 0;
3726 
3727 	cancel_adv_timeout(hdev);
3728 
3729 	adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3730 	if (!adv_instance)
3731 		return 0;
3732 
3733 	hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3734 
3735 	return 0;
3736 }
3737 
3738 static int name_changed_sync(struct hci_dev *hdev, void *data)
3739 {
3740 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3741 }
3742 
3743 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3744 {
3745 	struct mgmt_pending_cmd *cmd = data;
3746 	struct mgmt_cp_set_local_name *cp = cmd->param;
3747 	u8 status = mgmt_status(err);
3748 
3749 	bt_dev_dbg(hdev, "err %d", err);
3750 
3751 	if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3752 		return;
3753 
3754 	if (status) {
3755 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3756 				status);
3757 	} else {
3758 		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3759 				  cp, sizeof(*cp));
3760 
3761 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3762 			hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3763 	}
3764 
3765 	mgmt_pending_remove(cmd);
3766 }
3767 
3768 static int set_name_sync(struct hci_dev *hdev, void *data)
3769 {
3770 	if (lmp_bredr_capable(hdev)) {
3771 		hci_update_name_sync(hdev);
3772 		hci_update_eir_sync(hdev);
3773 	}
3774 
3775 	/* The name is stored in the scan response data and so
3776 	 * no need to update the advertising data here.
3777 	 */
3778 	if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3779 		hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3780 
3781 	return 0;
3782 }
3783 
3784 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3785 			  u16 len)
3786 {
3787 	struct mgmt_cp_set_local_name *cp = data;
3788 	struct mgmt_pending_cmd *cmd;
3789 	int err;
3790 
3791 	bt_dev_dbg(hdev, "sock %p", sk);
3792 
3793 	hci_dev_lock(hdev);
3794 
3795 	/* If the old values are the same as the new ones just return a
3796 	 * direct command complete event.
3797 	 */
3798 	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3799 	    !memcmp(hdev->short_name, cp->short_name,
3800 		    sizeof(hdev->short_name))) {
3801 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3802 					data, len);
3803 		goto failed;
3804 	}
3805 
3806 	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3807 
3808 	if (!hdev_is_powered(hdev)) {
3809 		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3810 
3811 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3812 					data, len);
3813 		if (err < 0)
3814 			goto failed;
3815 
3816 		err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3817 					 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3818 		ext_info_changed(hdev, sk);
3819 
3820 		goto failed;
3821 	}
3822 
3823 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3824 	if (!cmd)
3825 		err = -ENOMEM;
3826 	else
3827 		err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3828 					 set_name_complete);
3829 
3830 	if (err < 0) {
3831 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3832 				      MGMT_STATUS_FAILED);
3833 
3834 		if (cmd)
3835 			mgmt_pending_remove(cmd);
3836 
3837 		goto failed;
3838 	}
3839 
3840 	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3841 
3842 failed:
3843 	hci_dev_unlock(hdev);
3844 	return err;
3845 }
3846 
3847 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3848 {
3849 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3850 }
3851 
3852 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3853 			  u16 len)
3854 {
3855 	struct mgmt_cp_set_appearance *cp = data;
3856 	u16 appearance;
3857 	int err;
3858 
3859 	bt_dev_dbg(hdev, "sock %p", sk);
3860 
3861 	if (!lmp_le_capable(hdev))
3862 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3863 				       MGMT_STATUS_NOT_SUPPORTED);
3864 
3865 	appearance = le16_to_cpu(cp->appearance);
3866 
3867 	hci_dev_lock(hdev);
3868 
3869 	if (hdev->appearance != appearance) {
3870 		hdev->appearance = appearance;
3871 
3872 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3873 			hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3874 					   NULL);
3875 
3876 		ext_info_changed(hdev, sk);
3877 	}
3878 
3879 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3880 				0);
3881 
3882 	hci_dev_unlock(hdev);
3883 
3884 	return err;
3885 }
3886 
3887 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3888 				 void *data, u16 len)
3889 {
3890 	struct mgmt_rp_get_phy_configuration rp;
3891 
3892 	bt_dev_dbg(hdev, "sock %p", sk);
3893 
3894 	hci_dev_lock(hdev);
3895 
3896 	memset(&rp, 0, sizeof(rp));
3897 
3898 	rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3899 	rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3900 	rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3901 
3902 	hci_dev_unlock(hdev);
3903 
3904 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3905 				 &rp, sizeof(rp));
3906 }
3907 
3908 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3909 {
3910 	struct mgmt_ev_phy_configuration_changed ev;
3911 
3912 	memset(&ev, 0, sizeof(ev));
3913 
3914 	ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3915 
3916 	return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3917 			  sizeof(ev), skip);
3918 }
3919 
3920 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3921 {
3922 	struct mgmt_pending_cmd *cmd = data;
3923 	struct sk_buff *skb = cmd->skb;
3924 	u8 status = mgmt_status(err);
3925 
3926 	if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
3927 		return;
3928 
3929 	if (!status) {
3930 		if (!skb)
3931 			status = MGMT_STATUS_FAILED;
3932 		else if (IS_ERR(skb))
3933 			status = mgmt_status(PTR_ERR(skb));
3934 		else
3935 			status = mgmt_status(skb->data[0]);
3936 	}
3937 
3938 	bt_dev_dbg(hdev, "status %d", status);
3939 
3940 	if (status) {
3941 		mgmt_cmd_status(cmd->sk, hdev->id,
3942 				MGMT_OP_SET_PHY_CONFIGURATION, status);
3943 	} else {
3944 		mgmt_cmd_complete(cmd->sk, hdev->id,
3945 				  MGMT_OP_SET_PHY_CONFIGURATION, 0,
3946 				  NULL, 0);
3947 
3948 		mgmt_phy_configuration_changed(hdev, cmd->sk);
3949 	}
3950 
3951 	if (skb && !IS_ERR(skb))
3952 		kfree_skb(skb);
3953 
3954 	mgmt_pending_remove(cmd);
3955 }
3956 
3957 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
3958 {
3959 	struct mgmt_pending_cmd *cmd = data;
3960 	struct mgmt_cp_set_phy_configuration *cp = cmd->param;
3961 	struct hci_cp_le_set_default_phy cp_phy;
3962 	u32 selected_phys = __le32_to_cpu(cp->selected_phys);
3963 
3964 	memset(&cp_phy, 0, sizeof(cp_phy));
3965 
3966 	if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3967 		cp_phy.all_phys |= 0x01;
3968 
3969 	if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3970 		cp_phy.all_phys |= 0x02;
3971 
3972 	if (selected_phys & MGMT_PHY_LE_1M_TX)
3973 		cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3974 
3975 	if (selected_phys & MGMT_PHY_LE_2M_TX)
3976 		cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3977 
3978 	if (selected_phys & MGMT_PHY_LE_CODED_TX)
3979 		cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3980 
3981 	if (selected_phys & MGMT_PHY_LE_1M_RX)
3982 		cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3983 
3984 	if (selected_phys & MGMT_PHY_LE_2M_RX)
3985 		cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3986 
3987 	if (selected_phys & MGMT_PHY_LE_CODED_RX)
3988 		cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3989 
3990 	cmd->skb =  __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
3991 				   sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
3992 
3993 	return 0;
3994 }
3995 
3996 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3997 				 void *data, u16 len)
3998 {
3999 	struct mgmt_cp_set_phy_configuration *cp = data;
4000 	struct mgmt_pending_cmd *cmd;
4001 	u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4002 	u16 pkt_type = (HCI_DH1 | HCI_DM1);
4003 	bool changed = false;
4004 	int err;
4005 
4006 	bt_dev_dbg(hdev, "sock %p", sk);
4007 
4008 	configurable_phys = get_configurable_phys(hdev);
4009 	supported_phys = get_supported_phys(hdev);
4010 	selected_phys = __le32_to_cpu(cp->selected_phys);
4011 
4012 	if (selected_phys & ~supported_phys)
4013 		return mgmt_cmd_status(sk, hdev->id,
4014 				       MGMT_OP_SET_PHY_CONFIGURATION,
4015 				       MGMT_STATUS_INVALID_PARAMS);
4016 
4017 	unconfigure_phys = supported_phys & ~configurable_phys;
4018 
4019 	if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4020 		return mgmt_cmd_status(sk, hdev->id,
4021 				       MGMT_OP_SET_PHY_CONFIGURATION,
4022 				       MGMT_STATUS_INVALID_PARAMS);
4023 
4024 	if (selected_phys == get_selected_phys(hdev))
4025 		return mgmt_cmd_complete(sk, hdev->id,
4026 					 MGMT_OP_SET_PHY_CONFIGURATION,
4027 					 0, NULL, 0);
4028 
4029 	hci_dev_lock(hdev);
4030 
4031 	if (!hdev_is_powered(hdev)) {
4032 		err = mgmt_cmd_status(sk, hdev->id,
4033 				      MGMT_OP_SET_PHY_CONFIGURATION,
4034 				      MGMT_STATUS_REJECTED);
4035 		goto unlock;
4036 	}
4037 
4038 	if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4039 		err = mgmt_cmd_status(sk, hdev->id,
4040 				      MGMT_OP_SET_PHY_CONFIGURATION,
4041 				      MGMT_STATUS_BUSY);
4042 		goto unlock;
4043 	}
4044 
4045 	if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4046 		pkt_type |= (HCI_DH3 | HCI_DM3);
4047 	else
4048 		pkt_type &= ~(HCI_DH3 | HCI_DM3);
4049 
4050 	if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4051 		pkt_type |= (HCI_DH5 | HCI_DM5);
4052 	else
4053 		pkt_type &= ~(HCI_DH5 | HCI_DM5);
4054 
4055 	if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4056 		pkt_type &= ~HCI_2DH1;
4057 	else
4058 		pkt_type |= HCI_2DH1;
4059 
4060 	if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4061 		pkt_type &= ~HCI_2DH3;
4062 	else
4063 		pkt_type |= HCI_2DH3;
4064 
4065 	if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4066 		pkt_type &= ~HCI_2DH5;
4067 	else
4068 		pkt_type |= HCI_2DH5;
4069 
4070 	if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4071 		pkt_type &= ~HCI_3DH1;
4072 	else
4073 		pkt_type |= HCI_3DH1;
4074 
4075 	if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4076 		pkt_type &= ~HCI_3DH3;
4077 	else
4078 		pkt_type |= HCI_3DH3;
4079 
4080 	if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4081 		pkt_type &= ~HCI_3DH5;
4082 	else
4083 		pkt_type |= HCI_3DH5;
4084 
4085 	if (pkt_type != hdev->pkt_type) {
4086 		hdev->pkt_type = pkt_type;
4087 		changed = true;
4088 	}
4089 
4090 	if ((selected_phys & MGMT_PHY_LE_MASK) ==
4091 	    (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4092 		if (changed)
4093 			mgmt_phy_configuration_changed(hdev, sk);
4094 
4095 		err = mgmt_cmd_complete(sk, hdev->id,
4096 					MGMT_OP_SET_PHY_CONFIGURATION,
4097 					0, NULL, 0);
4098 
4099 		goto unlock;
4100 	}
4101 
4102 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4103 			       len);
4104 	if (!cmd)
4105 		err = -ENOMEM;
4106 	else
4107 		err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4108 					 set_default_phy_complete);
4109 
4110 	if (err < 0) {
4111 		err = mgmt_cmd_status(sk, hdev->id,
4112 				      MGMT_OP_SET_PHY_CONFIGURATION,
4113 				      MGMT_STATUS_FAILED);
4114 
4115 		if (cmd)
4116 			mgmt_pending_remove(cmd);
4117 	}
4118 
4119 unlock:
4120 	hci_dev_unlock(hdev);
4121 
4122 	return err;
4123 }
4124 
4125 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4126 			    u16 len)
4127 {
4128 	int err = MGMT_STATUS_SUCCESS;
4129 	struct mgmt_cp_set_blocked_keys *keys = data;
4130 	const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4131 				   sizeof(struct mgmt_blocked_key_info));
4132 	u16 key_count, expected_len;
4133 	int i;
4134 
4135 	bt_dev_dbg(hdev, "sock %p", sk);
4136 
4137 	key_count = __le16_to_cpu(keys->key_count);
4138 	if (key_count > max_key_count) {
4139 		bt_dev_err(hdev, "too big key_count value %u", key_count);
4140 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4141 				       MGMT_STATUS_INVALID_PARAMS);
4142 	}
4143 
4144 	expected_len = struct_size(keys, keys, key_count);
4145 	if (expected_len != len) {
4146 		bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4147 			   expected_len, len);
4148 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4149 				       MGMT_STATUS_INVALID_PARAMS);
4150 	}
4151 
4152 	hci_dev_lock(hdev);
4153 
4154 	hci_blocked_keys_clear(hdev);
4155 
4156 	for (i = 0; i < key_count; ++i) {
4157 		struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4158 
4159 		if (!b) {
4160 			err = MGMT_STATUS_NO_RESOURCES;
4161 			break;
4162 		}
4163 
4164 		b->type = keys->keys[i].type;
4165 		memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4166 		list_add_rcu(&b->list, &hdev->blocked_keys);
4167 	}
4168 	hci_dev_unlock(hdev);
4169 
4170 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4171 				err, NULL, 0);
4172 }
4173 
4174 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4175 			       void *data, u16 len)
4176 {
4177 	struct mgmt_mode *cp = data;
4178 	int err;
4179 	bool changed = false;
4180 
4181 	bt_dev_dbg(hdev, "sock %p", sk);
4182 
4183 	if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4184 		return mgmt_cmd_status(sk, hdev->id,
4185 				       MGMT_OP_SET_WIDEBAND_SPEECH,
4186 				       MGMT_STATUS_NOT_SUPPORTED);
4187 
4188 	if (cp->val != 0x00 && cp->val != 0x01)
4189 		return mgmt_cmd_status(sk, hdev->id,
4190 				       MGMT_OP_SET_WIDEBAND_SPEECH,
4191 				       MGMT_STATUS_INVALID_PARAMS);
4192 
4193 	hci_dev_lock(hdev);
4194 
4195 	if (hdev_is_powered(hdev) &&
4196 	    !!cp->val != hci_dev_test_flag(hdev,
4197 					   HCI_WIDEBAND_SPEECH_ENABLED)) {
4198 		err = mgmt_cmd_status(sk, hdev->id,
4199 				      MGMT_OP_SET_WIDEBAND_SPEECH,
4200 				      MGMT_STATUS_REJECTED);
4201 		goto unlock;
4202 	}
4203 
4204 	if (cp->val)
4205 		changed = !hci_dev_test_and_set_flag(hdev,
4206 						   HCI_WIDEBAND_SPEECH_ENABLED);
4207 	else
4208 		changed = hci_dev_test_and_clear_flag(hdev,
4209 						   HCI_WIDEBAND_SPEECH_ENABLED);
4210 
4211 	err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4212 	if (err < 0)
4213 		goto unlock;
4214 
4215 	if (changed)
4216 		err = new_settings(hdev, sk);
4217 
4218 unlock:
4219 	hci_dev_unlock(hdev);
4220 	return err;
4221 }
4222 
4223 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4224 			       void *data, u16 data_len)
4225 {
4226 	char buf[20];
4227 	struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4228 	u16 cap_len = 0;
4229 	u8 flags = 0;
4230 	u8 tx_power_range[2];
4231 
4232 	bt_dev_dbg(hdev, "sock %p", sk);
4233 
4234 	memset(&buf, 0, sizeof(buf));
4235 
4236 	hci_dev_lock(hdev);
4237 
4238 	/* When the Read Simple Pairing Options command is supported, then
4239 	 * the remote public key validation is supported.
4240 	 *
4241 	 * Alternatively, when Microsoft extensions are available, they can
4242 	 * indicate support for public key validation as well.
4243 	 */
4244 	if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4245 		flags |= 0x01;	/* Remote public key validation (BR/EDR) */
4246 
4247 	flags |= 0x02;		/* Remote public key validation (LE) */
4248 
4249 	/* When the Read Encryption Key Size command is supported, then the
4250 	 * encryption key size is enforced.
4251 	 */
4252 	if (hdev->commands[20] & 0x10)
4253 		flags |= 0x04;	/* Encryption key size enforcement (BR/EDR) */
4254 
4255 	flags |= 0x08;		/* Encryption key size enforcement (LE) */
4256 
4257 	cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4258 				  &flags, 1);
4259 
4260 	/* When the Read Simple Pairing Options command is supported, then
4261 	 * also max encryption key size information is provided.
4262 	 */
4263 	if (hdev->commands[41] & 0x08)
4264 		cap_len = eir_append_le16(rp->cap, cap_len,
4265 					  MGMT_CAP_MAX_ENC_KEY_SIZE,
4266 					  hdev->max_enc_key_size);
4267 
4268 	cap_len = eir_append_le16(rp->cap, cap_len,
4269 				  MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4270 				  SMP_MAX_ENC_KEY_SIZE);
4271 
4272 	/* Append the min/max LE tx power parameters if we were able to fetch
4273 	 * it from the controller
4274 	 */
4275 	if (hdev->commands[38] & 0x80) {
4276 		memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4277 		memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4278 		cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4279 					  tx_power_range, 2);
4280 	}
4281 
4282 	rp->cap_len = cpu_to_le16(cap_len);
4283 
4284 	hci_dev_unlock(hdev);
4285 
4286 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4287 				 rp, sizeof(*rp) + cap_len);
4288 }
4289 
4290 #ifdef CONFIG_BT_FEATURE_DEBUG
4291 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4292 static const u8 debug_uuid[16] = {
4293 	0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4294 	0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4295 };
4296 #endif
4297 
4298 /* 330859bc-7506-492d-9370-9a6f0614037f */
4299 static const u8 quality_report_uuid[16] = {
4300 	0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4301 	0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4302 };
4303 
4304 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4305 static const u8 offload_codecs_uuid[16] = {
4306 	0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4307 	0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4308 };
4309 
4310 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4311 static const u8 le_simultaneous_roles_uuid[16] = {
4312 	0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4313 	0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4314 };
4315 
4316 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
4317 static const u8 rpa_resolution_uuid[16] = {
4318 	0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
4319 	0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
4320 };
4321 
4322 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4323 static const u8 iso_socket_uuid[16] = {
4324 	0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4325 	0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4326 };
4327 
4328 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4329 static const u8 mgmt_mesh_uuid[16] = {
4330 	0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4331 	0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4332 };
4333 
4334 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4335 				  void *data, u16 data_len)
4336 {
4337 	struct mgmt_rp_read_exp_features_info *rp;
4338 	size_t len;
4339 	u16 idx = 0;
4340 	u32 flags;
4341 	int status;
4342 
4343 	bt_dev_dbg(hdev, "sock %p", sk);
4344 
4345 	/* Enough space for 7 features */
4346 	len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4347 	rp = kzalloc(len, GFP_KERNEL);
4348 	if (!rp)
4349 		return -ENOMEM;
4350 
4351 #ifdef CONFIG_BT_FEATURE_DEBUG
4352 	if (!hdev) {
4353 		flags = bt_dbg_get() ? BIT(0) : 0;
4354 
4355 		memcpy(rp->features[idx].uuid, debug_uuid, 16);
4356 		rp->features[idx].flags = cpu_to_le32(flags);
4357 		idx++;
4358 	}
4359 #endif
4360 
4361 	if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4362 		if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4363 			flags = BIT(0);
4364 		else
4365 			flags = 0;
4366 
4367 		memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4368 		rp->features[idx].flags = cpu_to_le32(flags);
4369 		idx++;
4370 	}
4371 
4372 	if (hdev && ll_privacy_capable(hdev)) {
4373 		if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
4374 			flags = BIT(0) | BIT(1);
4375 		else
4376 			flags = BIT(1);
4377 
4378 		memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
4379 		rp->features[idx].flags = cpu_to_le32(flags);
4380 		idx++;
4381 	}
4382 
4383 	if (hdev && (aosp_has_quality_report(hdev) ||
4384 		     hdev->set_quality_report)) {
4385 		if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4386 			flags = BIT(0);
4387 		else
4388 			flags = 0;
4389 
4390 		memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4391 		rp->features[idx].flags = cpu_to_le32(flags);
4392 		idx++;
4393 	}
4394 
4395 	if (hdev && hdev->get_data_path_id) {
4396 		if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4397 			flags = BIT(0);
4398 		else
4399 			flags = 0;
4400 
4401 		memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4402 		rp->features[idx].flags = cpu_to_le32(flags);
4403 		idx++;
4404 	}
4405 
4406 	if (IS_ENABLED(CONFIG_BT_LE)) {
4407 		flags = iso_enabled() ? BIT(0) : 0;
4408 		memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4409 		rp->features[idx].flags = cpu_to_le32(flags);
4410 		idx++;
4411 	}
4412 
4413 	if (hdev && lmp_le_capable(hdev)) {
4414 		if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4415 			flags = BIT(0);
4416 		else
4417 			flags = 0;
4418 
4419 		memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4420 		rp->features[idx].flags = cpu_to_le32(flags);
4421 		idx++;
4422 	}
4423 
4424 	rp->feature_count = cpu_to_le16(idx);
4425 
4426 	/* After reading the experimental features information, enable
4427 	 * the events to update client on any future change.
4428 	 */
4429 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4430 
4431 	status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4432 				   MGMT_OP_READ_EXP_FEATURES_INFO,
4433 				   0, rp, sizeof(*rp) + (20 * idx));
4434 
4435 	kfree(rp);
4436 	return status;
4437 }
4438 
4439 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4440 					  struct sock *skip)
4441 {
4442 	struct mgmt_ev_exp_feature_changed ev;
4443 
4444 	memset(&ev, 0, sizeof(ev));
4445 	memcpy(ev.uuid, rpa_resolution_uuid, 16);
4446 	ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4447 
4448 	// Do we need to be atomic with the conn_flags?
4449 	if (enabled && privacy_mode_capable(hdev))
4450 		hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4451 	else
4452 		hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4453 
4454 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4455 				  &ev, sizeof(ev),
4456 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4457 
4458 }
4459 
4460 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4461 			       bool enabled, struct sock *skip)
4462 {
4463 	struct mgmt_ev_exp_feature_changed ev;
4464 
4465 	memset(&ev, 0, sizeof(ev));
4466 	memcpy(ev.uuid, uuid, 16);
4467 	ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4468 
4469 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4470 				  &ev, sizeof(ev),
4471 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4472 }
4473 
4474 #define EXP_FEAT(_uuid, _set_func)	\
4475 {					\
4476 	.uuid = _uuid,			\
4477 	.set_func = _set_func,		\
4478 }
4479 
4480 /* The zero key uuid is special. Multiple exp features are set through it. */
4481 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4482 			     struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4483 {
4484 	struct mgmt_rp_set_exp_feature rp;
4485 
4486 	memset(rp.uuid, 0, 16);
4487 	rp.flags = cpu_to_le32(0);
4488 
4489 #ifdef CONFIG_BT_FEATURE_DEBUG
4490 	if (!hdev) {
4491 		bool changed = bt_dbg_get();
4492 
4493 		bt_dbg_set(false);
4494 
4495 		if (changed)
4496 			exp_feature_changed(NULL, ZERO_KEY, false, sk);
4497 	}
4498 #endif
4499 
4500 	if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4501 		bool changed;
4502 
4503 		changed = hci_dev_test_and_clear_flag(hdev,
4504 						      HCI_ENABLE_LL_PRIVACY);
4505 		if (changed)
4506 			exp_feature_changed(hdev, rpa_resolution_uuid, false,
4507 					    sk);
4508 	}
4509 
4510 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4511 
4512 	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4513 				 MGMT_OP_SET_EXP_FEATURE, 0,
4514 				 &rp, sizeof(rp));
4515 }
4516 
4517 #ifdef CONFIG_BT_FEATURE_DEBUG
4518 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4519 			  struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4520 {
4521 	struct mgmt_rp_set_exp_feature rp;
4522 
4523 	bool val, changed;
4524 	int err;
4525 
4526 	/* Command requires to use the non-controller index */
4527 	if (hdev)
4528 		return mgmt_cmd_status(sk, hdev->id,
4529 				       MGMT_OP_SET_EXP_FEATURE,
4530 				       MGMT_STATUS_INVALID_INDEX);
4531 
4532 	/* Parameters are limited to a single octet */
4533 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4534 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4535 				       MGMT_OP_SET_EXP_FEATURE,
4536 				       MGMT_STATUS_INVALID_PARAMS);
4537 
4538 	/* Only boolean on/off is supported */
4539 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4540 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4541 				       MGMT_OP_SET_EXP_FEATURE,
4542 				       MGMT_STATUS_INVALID_PARAMS);
4543 
4544 	val = !!cp->param[0];
4545 	changed = val ? !bt_dbg_get() : bt_dbg_get();
4546 	bt_dbg_set(val);
4547 
4548 	memcpy(rp.uuid, debug_uuid, 16);
4549 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4550 
4551 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4552 
4553 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4554 				MGMT_OP_SET_EXP_FEATURE, 0,
4555 				&rp, sizeof(rp));
4556 
4557 	if (changed)
4558 		exp_feature_changed(hdev, debug_uuid, val, sk);
4559 
4560 	return err;
4561 }
4562 #endif
4563 
4564 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4565 			      struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4566 {
4567 	struct mgmt_rp_set_exp_feature rp;
4568 	bool val, changed;
4569 	int err;
4570 
4571 	/* Command requires to use the controller index */
4572 	if (!hdev)
4573 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4574 				       MGMT_OP_SET_EXP_FEATURE,
4575 				       MGMT_STATUS_INVALID_INDEX);
4576 
4577 	/* Parameters are limited to a single octet */
4578 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4579 		return mgmt_cmd_status(sk, hdev->id,
4580 				       MGMT_OP_SET_EXP_FEATURE,
4581 				       MGMT_STATUS_INVALID_PARAMS);
4582 
4583 	/* Only boolean on/off is supported */
4584 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4585 		return mgmt_cmd_status(sk, hdev->id,
4586 				       MGMT_OP_SET_EXP_FEATURE,
4587 				       MGMT_STATUS_INVALID_PARAMS);
4588 
4589 	val = !!cp->param[0];
4590 
4591 	if (val) {
4592 		changed = !hci_dev_test_and_set_flag(hdev,
4593 						     HCI_MESH_EXPERIMENTAL);
4594 	} else {
4595 		hci_dev_clear_flag(hdev, HCI_MESH);
4596 		changed = hci_dev_test_and_clear_flag(hdev,
4597 						      HCI_MESH_EXPERIMENTAL);
4598 	}
4599 
4600 	memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4601 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4602 
4603 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4604 
4605 	err = mgmt_cmd_complete(sk, hdev->id,
4606 				MGMT_OP_SET_EXP_FEATURE, 0,
4607 				&rp, sizeof(rp));
4608 
4609 	if (changed)
4610 		exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4611 
4612 	return err;
4613 }
4614 
4615 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4616 				   struct mgmt_cp_set_exp_feature *cp,
4617 				   u16 data_len)
4618 {
4619 	struct mgmt_rp_set_exp_feature rp;
4620 	bool val, changed;
4621 	int err;
4622 	u32 flags;
4623 
4624 	/* Command requires to use the controller index */
4625 	if (!hdev)
4626 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4627 				       MGMT_OP_SET_EXP_FEATURE,
4628 				       MGMT_STATUS_INVALID_INDEX);
4629 
4630 	/* Changes can only be made when controller is powered down */
4631 	if (hdev_is_powered(hdev))
4632 		return mgmt_cmd_status(sk, hdev->id,
4633 				       MGMT_OP_SET_EXP_FEATURE,
4634 				       MGMT_STATUS_REJECTED);
4635 
4636 	/* Parameters are limited to a single octet */
4637 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4638 		return mgmt_cmd_status(sk, hdev->id,
4639 				       MGMT_OP_SET_EXP_FEATURE,
4640 				       MGMT_STATUS_INVALID_PARAMS);
4641 
4642 	/* Only boolean on/off is supported */
4643 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4644 		return mgmt_cmd_status(sk, hdev->id,
4645 				       MGMT_OP_SET_EXP_FEATURE,
4646 				       MGMT_STATUS_INVALID_PARAMS);
4647 
4648 	val = !!cp->param[0];
4649 
4650 	if (val) {
4651 		changed = !hci_dev_test_and_set_flag(hdev,
4652 						     HCI_ENABLE_LL_PRIVACY);
4653 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4654 
4655 		/* Enable LL privacy + supported settings changed */
4656 		flags = BIT(0) | BIT(1);
4657 	} else {
4658 		changed = hci_dev_test_and_clear_flag(hdev,
4659 						      HCI_ENABLE_LL_PRIVACY);
4660 
4661 		/* Disable LL privacy + supported settings changed */
4662 		flags = BIT(1);
4663 	}
4664 
4665 	memcpy(rp.uuid, rpa_resolution_uuid, 16);
4666 	rp.flags = cpu_to_le32(flags);
4667 
4668 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4669 
4670 	err = mgmt_cmd_complete(sk, hdev->id,
4671 				MGMT_OP_SET_EXP_FEATURE, 0,
4672 				&rp, sizeof(rp));
4673 
4674 	if (changed)
4675 		exp_ll_privacy_feature_changed(val, hdev, sk);
4676 
4677 	return err;
4678 }
4679 
4680 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4681 				   struct mgmt_cp_set_exp_feature *cp,
4682 				   u16 data_len)
4683 {
4684 	struct mgmt_rp_set_exp_feature rp;
4685 	bool val, changed;
4686 	int err;
4687 
4688 	/* Command requires to use a valid controller index */
4689 	if (!hdev)
4690 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4691 				       MGMT_OP_SET_EXP_FEATURE,
4692 				       MGMT_STATUS_INVALID_INDEX);
4693 
4694 	/* Parameters are limited to a single octet */
4695 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4696 		return mgmt_cmd_status(sk, hdev->id,
4697 				       MGMT_OP_SET_EXP_FEATURE,
4698 				       MGMT_STATUS_INVALID_PARAMS);
4699 
4700 	/* Only boolean on/off is supported */
4701 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4702 		return mgmt_cmd_status(sk, hdev->id,
4703 				       MGMT_OP_SET_EXP_FEATURE,
4704 				       MGMT_STATUS_INVALID_PARAMS);
4705 
4706 	hci_req_sync_lock(hdev);
4707 
4708 	val = !!cp->param[0];
4709 	changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4710 
4711 	if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4712 		err = mgmt_cmd_status(sk, hdev->id,
4713 				      MGMT_OP_SET_EXP_FEATURE,
4714 				      MGMT_STATUS_NOT_SUPPORTED);
4715 		goto unlock_quality_report;
4716 	}
4717 
4718 	if (changed) {
4719 		if (hdev->set_quality_report)
4720 			err = hdev->set_quality_report(hdev, val);
4721 		else
4722 			err = aosp_set_quality_report(hdev, val);
4723 
4724 		if (err) {
4725 			err = mgmt_cmd_status(sk, hdev->id,
4726 					      MGMT_OP_SET_EXP_FEATURE,
4727 					      MGMT_STATUS_FAILED);
4728 			goto unlock_quality_report;
4729 		}
4730 
4731 		if (val)
4732 			hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4733 		else
4734 			hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4735 	}
4736 
4737 	bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4738 
4739 	memcpy(rp.uuid, quality_report_uuid, 16);
4740 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4741 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4742 
4743 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4744 				&rp, sizeof(rp));
4745 
4746 	if (changed)
4747 		exp_feature_changed(hdev, quality_report_uuid, val, sk);
4748 
4749 unlock_quality_report:
4750 	hci_req_sync_unlock(hdev);
4751 	return err;
4752 }
4753 
4754 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4755 				  struct mgmt_cp_set_exp_feature *cp,
4756 				  u16 data_len)
4757 {
4758 	bool val, changed;
4759 	int err;
4760 	struct mgmt_rp_set_exp_feature rp;
4761 
4762 	/* Command requires to use a valid controller index */
4763 	if (!hdev)
4764 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4765 				       MGMT_OP_SET_EXP_FEATURE,
4766 				       MGMT_STATUS_INVALID_INDEX);
4767 
4768 	/* Parameters are limited to a single octet */
4769 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4770 		return mgmt_cmd_status(sk, hdev->id,
4771 				       MGMT_OP_SET_EXP_FEATURE,
4772 				       MGMT_STATUS_INVALID_PARAMS);
4773 
4774 	/* Only boolean on/off is supported */
4775 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4776 		return mgmt_cmd_status(sk, hdev->id,
4777 				       MGMT_OP_SET_EXP_FEATURE,
4778 				       MGMT_STATUS_INVALID_PARAMS);
4779 
4780 	val = !!cp->param[0];
4781 	changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4782 
4783 	if (!hdev->get_data_path_id) {
4784 		return mgmt_cmd_status(sk, hdev->id,
4785 				       MGMT_OP_SET_EXP_FEATURE,
4786 				       MGMT_STATUS_NOT_SUPPORTED);
4787 	}
4788 
4789 	if (changed) {
4790 		if (val)
4791 			hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4792 		else
4793 			hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4794 	}
4795 
4796 	bt_dev_info(hdev, "offload codecs enable %d changed %d",
4797 		    val, changed);
4798 
4799 	memcpy(rp.uuid, offload_codecs_uuid, 16);
4800 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4801 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4802 	err = mgmt_cmd_complete(sk, hdev->id,
4803 				MGMT_OP_SET_EXP_FEATURE, 0,
4804 				&rp, sizeof(rp));
4805 
4806 	if (changed)
4807 		exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4808 
4809 	return err;
4810 }
4811 
4812 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4813 					  struct mgmt_cp_set_exp_feature *cp,
4814 					  u16 data_len)
4815 {
4816 	bool val, changed;
4817 	int err;
4818 	struct mgmt_rp_set_exp_feature rp;
4819 
4820 	/* Command requires to use a valid controller index */
4821 	if (!hdev)
4822 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4823 				       MGMT_OP_SET_EXP_FEATURE,
4824 				       MGMT_STATUS_INVALID_INDEX);
4825 
4826 	/* Parameters are limited to a single octet */
4827 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4828 		return mgmt_cmd_status(sk, hdev->id,
4829 				       MGMT_OP_SET_EXP_FEATURE,
4830 				       MGMT_STATUS_INVALID_PARAMS);
4831 
4832 	/* Only boolean on/off is supported */
4833 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4834 		return mgmt_cmd_status(sk, hdev->id,
4835 				       MGMT_OP_SET_EXP_FEATURE,
4836 				       MGMT_STATUS_INVALID_PARAMS);
4837 
4838 	val = !!cp->param[0];
4839 	changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4840 
4841 	if (!hci_dev_le_state_simultaneous(hdev)) {
4842 		return mgmt_cmd_status(sk, hdev->id,
4843 				       MGMT_OP_SET_EXP_FEATURE,
4844 				       MGMT_STATUS_NOT_SUPPORTED);
4845 	}
4846 
4847 	if (changed) {
4848 		if (val)
4849 			hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4850 		else
4851 			hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4852 	}
4853 
4854 	bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4855 		    val, changed);
4856 
4857 	memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4858 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4859 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4860 	err = mgmt_cmd_complete(sk, hdev->id,
4861 				MGMT_OP_SET_EXP_FEATURE, 0,
4862 				&rp, sizeof(rp));
4863 
4864 	if (changed)
4865 		exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4866 
4867 	return err;
4868 }
4869 
4870 #ifdef CONFIG_BT_LE
4871 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4872 			       struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4873 {
4874 	struct mgmt_rp_set_exp_feature rp;
4875 	bool val, changed = false;
4876 	int err;
4877 
4878 	/* Command requires to use the non-controller index */
4879 	if (hdev)
4880 		return mgmt_cmd_status(sk, hdev->id,
4881 				       MGMT_OP_SET_EXP_FEATURE,
4882 				       MGMT_STATUS_INVALID_INDEX);
4883 
4884 	/* Parameters are limited to a single octet */
4885 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4886 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4887 				       MGMT_OP_SET_EXP_FEATURE,
4888 				       MGMT_STATUS_INVALID_PARAMS);
4889 
4890 	/* Only boolean on/off is supported */
4891 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4892 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4893 				       MGMT_OP_SET_EXP_FEATURE,
4894 				       MGMT_STATUS_INVALID_PARAMS);
4895 
4896 	val = cp->param[0] ? true : false;
4897 	if (val)
4898 		err = iso_init();
4899 	else
4900 		err = iso_exit();
4901 
4902 	if (!err)
4903 		changed = true;
4904 
4905 	memcpy(rp.uuid, iso_socket_uuid, 16);
4906 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4907 
4908 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4909 
4910 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4911 				MGMT_OP_SET_EXP_FEATURE, 0,
4912 				&rp, sizeof(rp));
4913 
4914 	if (changed)
4915 		exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4916 
4917 	return err;
4918 }
4919 #endif
4920 
4921 static const struct mgmt_exp_feature {
4922 	const u8 *uuid;
4923 	int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4924 			struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4925 } exp_features[] = {
4926 	EXP_FEAT(ZERO_KEY, set_zero_key_func),
4927 #ifdef CONFIG_BT_FEATURE_DEBUG
4928 	EXP_FEAT(debug_uuid, set_debug_func),
4929 #endif
4930 	EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
4931 	EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4932 	EXP_FEAT(quality_report_uuid, set_quality_report_func),
4933 	EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4934 	EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
4935 #ifdef CONFIG_BT_LE
4936 	EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
4937 #endif
4938 
4939 	/* end with a null feature */
4940 	EXP_FEAT(NULL, NULL)
4941 };
4942 
4943 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4944 			   void *data, u16 data_len)
4945 {
4946 	struct mgmt_cp_set_exp_feature *cp = data;
4947 	size_t i = 0;
4948 
4949 	bt_dev_dbg(hdev, "sock %p", sk);
4950 
4951 	for (i = 0; exp_features[i].uuid; i++) {
4952 		if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4953 			return exp_features[i].set_func(sk, hdev, cp, data_len);
4954 	}
4955 
4956 	return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4957 			       MGMT_OP_SET_EXP_FEATURE,
4958 			       MGMT_STATUS_NOT_SUPPORTED);
4959 }
4960 
4961 static u32 get_params_flags(struct hci_dev *hdev,
4962 			    struct hci_conn_params *params)
4963 {
4964 	u32 flags = hdev->conn_flags;
4965 
4966 	/* Devices using RPAs can only be programmed in the acceptlist if
4967 	 * LL Privacy has been enable otherwise they cannot mark
4968 	 * HCI_CONN_FLAG_REMOTE_WAKEUP.
4969 	 */
4970 	if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
4971 	    hci_find_irk_by_addr(hdev, &params->addr, params->addr_type))
4972 		flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
4973 
4974 	return flags;
4975 }
4976 
4977 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4978 			    u16 data_len)
4979 {
4980 	struct mgmt_cp_get_device_flags *cp = data;
4981 	struct mgmt_rp_get_device_flags rp;
4982 	struct bdaddr_list_with_flags *br_params;
4983 	struct hci_conn_params *params;
4984 	u32 supported_flags;
4985 	u32 current_flags = 0;
4986 	u8 status = MGMT_STATUS_INVALID_PARAMS;
4987 
4988 	bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4989 		   &cp->addr.bdaddr, cp->addr.type);
4990 
4991 	hci_dev_lock(hdev);
4992 
4993 	supported_flags = hdev->conn_flags;
4994 
4995 	memset(&rp, 0, sizeof(rp));
4996 
4997 	if (cp->addr.type == BDADDR_BREDR) {
4998 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4999 							      &cp->addr.bdaddr,
5000 							      cp->addr.type);
5001 		if (!br_params)
5002 			goto done;
5003 
5004 		current_flags = br_params->flags;
5005 	} else {
5006 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5007 						le_addr_type(cp->addr.type));
5008 		if (!params)
5009 			goto done;
5010 
5011 		supported_flags = get_params_flags(hdev, params);
5012 		current_flags = params->flags;
5013 	}
5014 
5015 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5016 	rp.addr.type = cp->addr.type;
5017 	rp.supported_flags = cpu_to_le32(supported_flags);
5018 	rp.current_flags = cpu_to_le32(current_flags);
5019 
5020 	status = MGMT_STATUS_SUCCESS;
5021 
5022 done:
5023 	hci_dev_unlock(hdev);
5024 
5025 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5026 				&rp, sizeof(rp));
5027 }
5028 
5029 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5030 				 bdaddr_t *bdaddr, u8 bdaddr_type,
5031 				 u32 supported_flags, u32 current_flags)
5032 {
5033 	struct mgmt_ev_device_flags_changed ev;
5034 
5035 	bacpy(&ev.addr.bdaddr, bdaddr);
5036 	ev.addr.type = bdaddr_type;
5037 	ev.supported_flags = cpu_to_le32(supported_flags);
5038 	ev.current_flags = cpu_to_le32(current_flags);
5039 
5040 	mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5041 }
5042 
5043 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5044 			    u16 len)
5045 {
5046 	struct mgmt_cp_set_device_flags *cp = data;
5047 	struct bdaddr_list_with_flags *br_params;
5048 	struct hci_conn_params *params;
5049 	u8 status = MGMT_STATUS_INVALID_PARAMS;
5050 	u32 supported_flags;
5051 	u32 current_flags = __le32_to_cpu(cp->current_flags);
5052 
5053 	bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5054 		   &cp->addr.bdaddr, cp->addr.type, current_flags);
5055 
5056 	// We should take hci_dev_lock() early, I think.. conn_flags can change
5057 	supported_flags = hdev->conn_flags;
5058 
5059 	if ((supported_flags | current_flags) != supported_flags) {
5060 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5061 			    current_flags, supported_flags);
5062 		goto done;
5063 	}
5064 
5065 	hci_dev_lock(hdev);
5066 
5067 	if (cp->addr.type == BDADDR_BREDR) {
5068 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5069 							      &cp->addr.bdaddr,
5070 							      cp->addr.type);
5071 
5072 		if (br_params) {
5073 			br_params->flags = current_flags;
5074 			status = MGMT_STATUS_SUCCESS;
5075 		} else {
5076 			bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5077 				    &cp->addr.bdaddr, cp->addr.type);
5078 		}
5079 
5080 		goto unlock;
5081 	}
5082 
5083 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5084 					le_addr_type(cp->addr.type));
5085 	if (!params) {
5086 		bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5087 			    &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5088 		goto unlock;
5089 	}
5090 
5091 	supported_flags = get_params_flags(hdev, params);
5092 
5093 	if ((supported_flags | current_flags) != supported_flags) {
5094 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5095 			    current_flags, supported_flags);
5096 		goto unlock;
5097 	}
5098 
5099 	WRITE_ONCE(params->flags, current_flags);
5100 	status = MGMT_STATUS_SUCCESS;
5101 
5102 	/* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5103 	 * has been set.
5104 	 */
5105 	if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5106 		hci_update_passive_scan(hdev);
5107 
5108 unlock:
5109 	hci_dev_unlock(hdev);
5110 
5111 done:
5112 	if (status == MGMT_STATUS_SUCCESS)
5113 		device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5114 				     supported_flags, current_flags);
5115 
5116 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5117 				 &cp->addr, sizeof(cp->addr));
5118 }
5119 
5120 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5121 				   u16 handle)
5122 {
5123 	struct mgmt_ev_adv_monitor_added ev;
5124 
5125 	ev.monitor_handle = cpu_to_le16(handle);
5126 
5127 	mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5128 }
5129 
5130 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
5131 {
5132 	struct mgmt_ev_adv_monitor_removed ev;
5133 	struct mgmt_pending_cmd *cmd;
5134 	struct sock *sk_skip = NULL;
5135 	struct mgmt_cp_remove_adv_monitor *cp;
5136 
5137 	cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
5138 	if (cmd) {
5139 		cp = cmd->param;
5140 
5141 		if (cp->monitor_handle)
5142 			sk_skip = cmd->sk;
5143 	}
5144 
5145 	ev.monitor_handle = cpu_to_le16(handle);
5146 
5147 	mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
5148 }
5149 
5150 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5151 				 void *data, u16 len)
5152 {
5153 	struct adv_monitor *monitor = NULL;
5154 	struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5155 	int handle, err;
5156 	size_t rp_size = 0;
5157 	__u32 supported = 0;
5158 	__u32 enabled = 0;
5159 	__u16 num_handles = 0;
5160 	__u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5161 
5162 	BT_DBG("request for %s", hdev->name);
5163 
5164 	hci_dev_lock(hdev);
5165 
5166 	if (msft_monitor_supported(hdev))
5167 		supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5168 
5169 	idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5170 		handles[num_handles++] = monitor->handle;
5171 
5172 	hci_dev_unlock(hdev);
5173 
5174 	rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5175 	rp = kmalloc(rp_size, GFP_KERNEL);
5176 	if (!rp)
5177 		return -ENOMEM;
5178 
5179 	/* All supported features are currently enabled */
5180 	enabled = supported;
5181 
5182 	rp->supported_features = cpu_to_le32(supported);
5183 	rp->enabled_features = cpu_to_le32(enabled);
5184 	rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5185 	rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5186 	rp->num_handles = cpu_to_le16(num_handles);
5187 	if (num_handles)
5188 		memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5189 
5190 	err = mgmt_cmd_complete(sk, hdev->id,
5191 				MGMT_OP_READ_ADV_MONITOR_FEATURES,
5192 				MGMT_STATUS_SUCCESS, rp, rp_size);
5193 
5194 	kfree(rp);
5195 
5196 	return err;
5197 }
5198 
5199 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5200 						   void *data, int status)
5201 {
5202 	struct mgmt_rp_add_adv_patterns_monitor rp;
5203 	struct mgmt_pending_cmd *cmd = data;
5204 	struct adv_monitor *monitor = cmd->user_data;
5205 
5206 	hci_dev_lock(hdev);
5207 
5208 	rp.monitor_handle = cpu_to_le16(monitor->handle);
5209 
5210 	if (!status) {
5211 		mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5212 		hdev->adv_monitors_cnt++;
5213 		if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5214 			monitor->state = ADV_MONITOR_STATE_REGISTERED;
5215 		hci_update_passive_scan(hdev);
5216 	}
5217 
5218 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5219 			  mgmt_status(status), &rp, sizeof(rp));
5220 	mgmt_pending_remove(cmd);
5221 
5222 	hci_dev_unlock(hdev);
5223 	bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5224 		   rp.monitor_handle, status);
5225 }
5226 
5227 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5228 {
5229 	struct mgmt_pending_cmd *cmd = data;
5230 	struct adv_monitor *monitor = cmd->user_data;
5231 
5232 	return hci_add_adv_monitor(hdev, monitor);
5233 }
5234 
5235 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5236 				      struct adv_monitor *m, u8 status,
5237 				      void *data, u16 len, u16 op)
5238 {
5239 	struct mgmt_pending_cmd *cmd;
5240 	int err;
5241 
5242 	hci_dev_lock(hdev);
5243 
5244 	if (status)
5245 		goto unlock;
5246 
5247 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
5248 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5249 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
5250 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
5251 		status = MGMT_STATUS_BUSY;
5252 		goto unlock;
5253 	}
5254 
5255 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
5256 	if (!cmd) {
5257 		status = MGMT_STATUS_NO_RESOURCES;
5258 		goto unlock;
5259 	}
5260 
5261 	cmd->user_data = m;
5262 	err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5263 				 mgmt_add_adv_patterns_monitor_complete);
5264 	if (err) {
5265 		if (err == -ENOMEM)
5266 			status = MGMT_STATUS_NO_RESOURCES;
5267 		else
5268 			status = MGMT_STATUS_FAILED;
5269 
5270 		goto unlock;
5271 	}
5272 
5273 	hci_dev_unlock(hdev);
5274 
5275 	return 0;
5276 
5277 unlock:
5278 	hci_free_adv_monitor(hdev, m);
5279 	hci_dev_unlock(hdev);
5280 	return mgmt_cmd_status(sk, hdev->id, op, status);
5281 }
5282 
5283 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5284 				   struct mgmt_adv_rssi_thresholds *rssi)
5285 {
5286 	if (rssi) {
5287 		m->rssi.low_threshold = rssi->low_threshold;
5288 		m->rssi.low_threshold_timeout =
5289 		    __le16_to_cpu(rssi->low_threshold_timeout);
5290 		m->rssi.high_threshold = rssi->high_threshold;
5291 		m->rssi.high_threshold_timeout =
5292 		    __le16_to_cpu(rssi->high_threshold_timeout);
5293 		m->rssi.sampling_period = rssi->sampling_period;
5294 	} else {
5295 		/* Default values. These numbers are the least constricting
5296 		 * parameters for MSFT API to work, so it behaves as if there
5297 		 * are no rssi parameter to consider. May need to be changed
5298 		 * if other API are to be supported.
5299 		 */
5300 		m->rssi.low_threshold = -127;
5301 		m->rssi.low_threshold_timeout = 60;
5302 		m->rssi.high_threshold = -127;
5303 		m->rssi.high_threshold_timeout = 0;
5304 		m->rssi.sampling_period = 0;
5305 	}
5306 }
5307 
5308 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5309 				    struct mgmt_adv_pattern *patterns)
5310 {
5311 	u8 offset = 0, length = 0;
5312 	struct adv_pattern *p = NULL;
5313 	int i;
5314 
5315 	for (i = 0; i < pattern_count; i++) {
5316 		offset = patterns[i].offset;
5317 		length = patterns[i].length;
5318 		if (offset >= HCI_MAX_EXT_AD_LENGTH ||
5319 		    length > HCI_MAX_EXT_AD_LENGTH ||
5320 		    (offset + length) > HCI_MAX_EXT_AD_LENGTH)
5321 			return MGMT_STATUS_INVALID_PARAMS;
5322 
5323 		p = kmalloc(sizeof(*p), GFP_KERNEL);
5324 		if (!p)
5325 			return MGMT_STATUS_NO_RESOURCES;
5326 
5327 		p->ad_type = patterns[i].ad_type;
5328 		p->offset = patterns[i].offset;
5329 		p->length = patterns[i].length;
5330 		memcpy(p->value, patterns[i].value, p->length);
5331 
5332 		INIT_LIST_HEAD(&p->list);
5333 		list_add(&p->list, &m->patterns);
5334 	}
5335 
5336 	return MGMT_STATUS_SUCCESS;
5337 }
5338 
5339 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5340 				    void *data, u16 len)
5341 {
5342 	struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5343 	struct adv_monitor *m = NULL;
5344 	u8 status = MGMT_STATUS_SUCCESS;
5345 	size_t expected_size = sizeof(*cp);
5346 
5347 	BT_DBG("request for %s", hdev->name);
5348 
5349 	if (len <= sizeof(*cp)) {
5350 		status = MGMT_STATUS_INVALID_PARAMS;
5351 		goto done;
5352 	}
5353 
5354 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5355 	if (len != expected_size) {
5356 		status = MGMT_STATUS_INVALID_PARAMS;
5357 		goto done;
5358 	}
5359 
5360 	m = kzalloc(sizeof(*m), GFP_KERNEL);
5361 	if (!m) {
5362 		status = MGMT_STATUS_NO_RESOURCES;
5363 		goto done;
5364 	}
5365 
5366 	INIT_LIST_HEAD(&m->patterns);
5367 
5368 	parse_adv_monitor_rssi(m, NULL);
5369 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5370 
5371 done:
5372 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5373 					  MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5374 }
5375 
5376 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5377 					 void *data, u16 len)
5378 {
5379 	struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5380 	struct adv_monitor *m = NULL;
5381 	u8 status = MGMT_STATUS_SUCCESS;
5382 	size_t expected_size = sizeof(*cp);
5383 
5384 	BT_DBG("request for %s", hdev->name);
5385 
5386 	if (len <= sizeof(*cp)) {
5387 		status = MGMT_STATUS_INVALID_PARAMS;
5388 		goto done;
5389 	}
5390 
5391 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5392 	if (len != expected_size) {
5393 		status = MGMT_STATUS_INVALID_PARAMS;
5394 		goto done;
5395 	}
5396 
5397 	m = kzalloc(sizeof(*m), GFP_KERNEL);
5398 	if (!m) {
5399 		status = MGMT_STATUS_NO_RESOURCES;
5400 		goto done;
5401 	}
5402 
5403 	INIT_LIST_HEAD(&m->patterns);
5404 
5405 	parse_adv_monitor_rssi(m, &cp->rssi);
5406 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5407 
5408 done:
5409 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5410 					 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5411 }
5412 
5413 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5414 					     void *data, int status)
5415 {
5416 	struct mgmt_rp_remove_adv_monitor rp;
5417 	struct mgmt_pending_cmd *cmd = data;
5418 	struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5419 
5420 	hci_dev_lock(hdev);
5421 
5422 	rp.monitor_handle = cp->monitor_handle;
5423 
5424 	if (!status)
5425 		hci_update_passive_scan(hdev);
5426 
5427 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5428 			  mgmt_status(status), &rp, sizeof(rp));
5429 	mgmt_pending_remove(cmd);
5430 
5431 	hci_dev_unlock(hdev);
5432 	bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5433 		   rp.monitor_handle, status);
5434 }
5435 
5436 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5437 {
5438 	struct mgmt_pending_cmd *cmd = data;
5439 	struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5440 	u16 handle = __le16_to_cpu(cp->monitor_handle);
5441 
5442 	if (!handle)
5443 		return hci_remove_all_adv_monitor(hdev);
5444 
5445 	return hci_remove_single_adv_monitor(hdev, handle);
5446 }
5447 
5448 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5449 			      void *data, u16 len)
5450 {
5451 	struct mgmt_pending_cmd *cmd;
5452 	int err, status;
5453 
5454 	hci_dev_lock(hdev);
5455 
5456 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
5457 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5458 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5459 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5460 		status = MGMT_STATUS_BUSY;
5461 		goto unlock;
5462 	}
5463 
5464 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5465 	if (!cmd) {
5466 		status = MGMT_STATUS_NO_RESOURCES;
5467 		goto unlock;
5468 	}
5469 
5470 	err = hci_cmd_sync_queue(hdev, mgmt_remove_adv_monitor_sync, cmd,
5471 				 mgmt_remove_adv_monitor_complete);
5472 
5473 	if (err) {
5474 		mgmt_pending_remove(cmd);
5475 
5476 		if (err == -ENOMEM)
5477 			status = MGMT_STATUS_NO_RESOURCES;
5478 		else
5479 			status = MGMT_STATUS_FAILED;
5480 
5481 		goto unlock;
5482 	}
5483 
5484 	hci_dev_unlock(hdev);
5485 
5486 	return 0;
5487 
5488 unlock:
5489 	hci_dev_unlock(hdev);
5490 	return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5491 			       status);
5492 }
5493 
5494 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5495 {
5496 	struct mgmt_rp_read_local_oob_data mgmt_rp;
5497 	size_t rp_size = sizeof(mgmt_rp);
5498 	struct mgmt_pending_cmd *cmd = data;
5499 	struct sk_buff *skb = cmd->skb;
5500 	u8 status = mgmt_status(err);
5501 
5502 	if (!status) {
5503 		if (!skb)
5504 			status = MGMT_STATUS_FAILED;
5505 		else if (IS_ERR(skb))
5506 			status = mgmt_status(PTR_ERR(skb));
5507 		else
5508 			status = mgmt_status(skb->data[0]);
5509 	}
5510 
5511 	bt_dev_dbg(hdev, "status %d", status);
5512 
5513 	if (status) {
5514 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5515 		goto remove;
5516 	}
5517 
5518 	memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5519 
5520 	if (!bredr_sc_enabled(hdev)) {
5521 		struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5522 
5523 		if (skb->len < sizeof(*rp)) {
5524 			mgmt_cmd_status(cmd->sk, hdev->id,
5525 					MGMT_OP_READ_LOCAL_OOB_DATA,
5526 					MGMT_STATUS_FAILED);
5527 			goto remove;
5528 		}
5529 
5530 		memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5531 		memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5532 
5533 		rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5534 	} else {
5535 		struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5536 
5537 		if (skb->len < sizeof(*rp)) {
5538 			mgmt_cmd_status(cmd->sk, hdev->id,
5539 					MGMT_OP_READ_LOCAL_OOB_DATA,
5540 					MGMT_STATUS_FAILED);
5541 			goto remove;
5542 		}
5543 
5544 		memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5545 		memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5546 
5547 		memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5548 		memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5549 	}
5550 
5551 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5552 			  MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5553 
5554 remove:
5555 	if (skb && !IS_ERR(skb))
5556 		kfree_skb(skb);
5557 
5558 	mgmt_pending_free(cmd);
5559 }
5560 
5561 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5562 {
5563 	struct mgmt_pending_cmd *cmd = data;
5564 
5565 	if (bredr_sc_enabled(hdev))
5566 		cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5567 	else
5568 		cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5569 
5570 	if (IS_ERR(cmd->skb))
5571 		return PTR_ERR(cmd->skb);
5572 	else
5573 		return 0;
5574 }
5575 
5576 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5577 			       void *data, u16 data_len)
5578 {
5579 	struct mgmt_pending_cmd *cmd;
5580 	int err;
5581 
5582 	bt_dev_dbg(hdev, "sock %p", sk);
5583 
5584 	hci_dev_lock(hdev);
5585 
5586 	if (!hdev_is_powered(hdev)) {
5587 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5588 				      MGMT_STATUS_NOT_POWERED);
5589 		goto unlock;
5590 	}
5591 
5592 	if (!lmp_ssp_capable(hdev)) {
5593 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5594 				      MGMT_STATUS_NOT_SUPPORTED);
5595 		goto unlock;
5596 	}
5597 
5598 	cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5599 	if (!cmd)
5600 		err = -ENOMEM;
5601 	else
5602 		err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5603 					 read_local_oob_data_complete);
5604 
5605 	if (err < 0) {
5606 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5607 				      MGMT_STATUS_FAILED);
5608 
5609 		if (cmd)
5610 			mgmt_pending_free(cmd);
5611 	}
5612 
5613 unlock:
5614 	hci_dev_unlock(hdev);
5615 	return err;
5616 }
5617 
5618 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5619 			       void *data, u16 len)
5620 {
5621 	struct mgmt_addr_info *addr = data;
5622 	int err;
5623 
5624 	bt_dev_dbg(hdev, "sock %p", sk);
5625 
5626 	if (!bdaddr_type_is_valid(addr->type))
5627 		return mgmt_cmd_complete(sk, hdev->id,
5628 					 MGMT_OP_ADD_REMOTE_OOB_DATA,
5629 					 MGMT_STATUS_INVALID_PARAMS,
5630 					 addr, sizeof(*addr));
5631 
5632 	hci_dev_lock(hdev);
5633 
5634 	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5635 		struct mgmt_cp_add_remote_oob_data *cp = data;
5636 		u8 status;
5637 
5638 		if (cp->addr.type != BDADDR_BREDR) {
5639 			err = mgmt_cmd_complete(sk, hdev->id,
5640 						MGMT_OP_ADD_REMOTE_OOB_DATA,
5641 						MGMT_STATUS_INVALID_PARAMS,
5642 						&cp->addr, sizeof(cp->addr));
5643 			goto unlock;
5644 		}
5645 
5646 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5647 					      cp->addr.type, cp->hash,
5648 					      cp->rand, NULL, NULL);
5649 		if (err < 0)
5650 			status = MGMT_STATUS_FAILED;
5651 		else
5652 			status = MGMT_STATUS_SUCCESS;
5653 
5654 		err = mgmt_cmd_complete(sk, hdev->id,
5655 					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5656 					&cp->addr, sizeof(cp->addr));
5657 	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5658 		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5659 		u8 *rand192, *hash192, *rand256, *hash256;
5660 		u8 status;
5661 
5662 		if (bdaddr_type_is_le(cp->addr.type)) {
5663 			/* Enforce zero-valued 192-bit parameters as
5664 			 * long as legacy SMP OOB isn't implemented.
5665 			 */
5666 			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5667 			    memcmp(cp->hash192, ZERO_KEY, 16)) {
5668 				err = mgmt_cmd_complete(sk, hdev->id,
5669 							MGMT_OP_ADD_REMOTE_OOB_DATA,
5670 							MGMT_STATUS_INVALID_PARAMS,
5671 							addr, sizeof(*addr));
5672 				goto unlock;
5673 			}
5674 
5675 			rand192 = NULL;
5676 			hash192 = NULL;
5677 		} else {
5678 			/* In case one of the P-192 values is set to zero,
5679 			 * then just disable OOB data for P-192.
5680 			 */
5681 			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5682 			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
5683 				rand192 = NULL;
5684 				hash192 = NULL;
5685 			} else {
5686 				rand192 = cp->rand192;
5687 				hash192 = cp->hash192;
5688 			}
5689 		}
5690 
5691 		/* In case one of the P-256 values is set to zero, then just
5692 		 * disable OOB data for P-256.
5693 		 */
5694 		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5695 		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
5696 			rand256 = NULL;
5697 			hash256 = NULL;
5698 		} else {
5699 			rand256 = cp->rand256;
5700 			hash256 = cp->hash256;
5701 		}
5702 
5703 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5704 					      cp->addr.type, hash192, rand192,
5705 					      hash256, rand256);
5706 		if (err < 0)
5707 			status = MGMT_STATUS_FAILED;
5708 		else
5709 			status = MGMT_STATUS_SUCCESS;
5710 
5711 		err = mgmt_cmd_complete(sk, hdev->id,
5712 					MGMT_OP_ADD_REMOTE_OOB_DATA,
5713 					status, &cp->addr, sizeof(cp->addr));
5714 	} else {
5715 		bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5716 			   len);
5717 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5718 				      MGMT_STATUS_INVALID_PARAMS);
5719 	}
5720 
5721 unlock:
5722 	hci_dev_unlock(hdev);
5723 	return err;
5724 }
5725 
5726 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5727 				  void *data, u16 len)
5728 {
5729 	struct mgmt_cp_remove_remote_oob_data *cp = data;
5730 	u8 status;
5731 	int err;
5732 
5733 	bt_dev_dbg(hdev, "sock %p", sk);
5734 
5735 	if (cp->addr.type != BDADDR_BREDR)
5736 		return mgmt_cmd_complete(sk, hdev->id,
5737 					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5738 					 MGMT_STATUS_INVALID_PARAMS,
5739 					 &cp->addr, sizeof(cp->addr));
5740 
5741 	hci_dev_lock(hdev);
5742 
5743 	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5744 		hci_remote_oob_data_clear(hdev);
5745 		status = MGMT_STATUS_SUCCESS;
5746 		goto done;
5747 	}
5748 
5749 	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5750 	if (err < 0)
5751 		status = MGMT_STATUS_INVALID_PARAMS;
5752 	else
5753 		status = MGMT_STATUS_SUCCESS;
5754 
5755 done:
5756 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5757 				status, &cp->addr, sizeof(cp->addr));
5758 
5759 	hci_dev_unlock(hdev);
5760 	return err;
5761 }
5762 
5763 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5764 {
5765 	struct mgmt_pending_cmd *cmd;
5766 
5767 	bt_dev_dbg(hdev, "status %u", status);
5768 
5769 	hci_dev_lock(hdev);
5770 
5771 	cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5772 	if (!cmd)
5773 		cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5774 
5775 	if (!cmd)
5776 		cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5777 
5778 	if (cmd) {
5779 		cmd->cmd_complete(cmd, mgmt_status(status));
5780 		mgmt_pending_remove(cmd);
5781 	}
5782 
5783 	hci_dev_unlock(hdev);
5784 }
5785 
5786 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5787 				    uint8_t *mgmt_status)
5788 {
5789 	switch (type) {
5790 	case DISCOV_TYPE_LE:
5791 		*mgmt_status = mgmt_le_support(hdev);
5792 		if (*mgmt_status)
5793 			return false;
5794 		break;
5795 	case DISCOV_TYPE_INTERLEAVED:
5796 		*mgmt_status = mgmt_le_support(hdev);
5797 		if (*mgmt_status)
5798 			return false;
5799 		fallthrough;
5800 	case DISCOV_TYPE_BREDR:
5801 		*mgmt_status = mgmt_bredr_support(hdev);
5802 		if (*mgmt_status)
5803 			return false;
5804 		break;
5805 	default:
5806 		*mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5807 		return false;
5808 	}
5809 
5810 	return true;
5811 }
5812 
5813 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5814 {
5815 	struct mgmt_pending_cmd *cmd = data;
5816 
5817 	if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5818 	    cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5819 	    cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5820 		return;
5821 
5822 	bt_dev_dbg(hdev, "err %d", err);
5823 
5824 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5825 			  cmd->param, 1);
5826 	mgmt_pending_remove(cmd);
5827 
5828 	hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5829 				DISCOVERY_FINDING);
5830 }
5831 
5832 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5833 {
5834 	return hci_start_discovery_sync(hdev);
5835 }
5836 
5837 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5838 				    u16 op, void *data, u16 len)
5839 {
5840 	struct mgmt_cp_start_discovery *cp = data;
5841 	struct mgmt_pending_cmd *cmd;
5842 	u8 status;
5843 	int err;
5844 
5845 	bt_dev_dbg(hdev, "sock %p", sk);
5846 
5847 	hci_dev_lock(hdev);
5848 
5849 	if (!hdev_is_powered(hdev)) {
5850 		err = mgmt_cmd_complete(sk, hdev->id, op,
5851 					MGMT_STATUS_NOT_POWERED,
5852 					&cp->type, sizeof(cp->type));
5853 		goto failed;
5854 	}
5855 
5856 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
5857 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5858 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5859 					&cp->type, sizeof(cp->type));
5860 		goto failed;
5861 	}
5862 
5863 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5864 		err = mgmt_cmd_complete(sk, hdev->id, op, status,
5865 					&cp->type, sizeof(cp->type));
5866 		goto failed;
5867 	}
5868 
5869 	/* Can't start discovery when it is paused */
5870 	if (hdev->discovery_paused) {
5871 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5872 					&cp->type, sizeof(cp->type));
5873 		goto failed;
5874 	}
5875 
5876 	/* Clear the discovery filter first to free any previously
5877 	 * allocated memory for the UUID list.
5878 	 */
5879 	hci_discovery_filter_clear(hdev);
5880 
5881 	hdev->discovery.type = cp->type;
5882 	hdev->discovery.report_invalid_rssi = false;
5883 	if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5884 		hdev->discovery.limited = true;
5885 	else
5886 		hdev->discovery.limited = false;
5887 
5888 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
5889 	if (!cmd) {
5890 		err = -ENOMEM;
5891 		goto failed;
5892 	}
5893 
5894 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5895 				 start_discovery_complete);
5896 	if (err < 0) {
5897 		mgmt_pending_remove(cmd);
5898 		goto failed;
5899 	}
5900 
5901 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5902 
5903 failed:
5904 	hci_dev_unlock(hdev);
5905 	return err;
5906 }
5907 
5908 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5909 			   void *data, u16 len)
5910 {
5911 	return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5912 					data, len);
5913 }
5914 
5915 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5916 				   void *data, u16 len)
5917 {
5918 	return start_discovery_internal(sk, hdev,
5919 					MGMT_OP_START_LIMITED_DISCOVERY,
5920 					data, len);
5921 }
5922 
5923 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5924 				   void *data, u16 len)
5925 {
5926 	struct mgmt_cp_start_service_discovery *cp = data;
5927 	struct mgmt_pending_cmd *cmd;
5928 	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5929 	u16 uuid_count, expected_len;
5930 	u8 status;
5931 	int err;
5932 
5933 	bt_dev_dbg(hdev, "sock %p", sk);
5934 
5935 	hci_dev_lock(hdev);
5936 
5937 	if (!hdev_is_powered(hdev)) {
5938 		err = mgmt_cmd_complete(sk, hdev->id,
5939 					MGMT_OP_START_SERVICE_DISCOVERY,
5940 					MGMT_STATUS_NOT_POWERED,
5941 					&cp->type, sizeof(cp->type));
5942 		goto failed;
5943 	}
5944 
5945 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
5946 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5947 		err = mgmt_cmd_complete(sk, hdev->id,
5948 					MGMT_OP_START_SERVICE_DISCOVERY,
5949 					MGMT_STATUS_BUSY, &cp->type,
5950 					sizeof(cp->type));
5951 		goto failed;
5952 	}
5953 
5954 	if (hdev->discovery_paused) {
5955 		err = mgmt_cmd_complete(sk, hdev->id,
5956 					MGMT_OP_START_SERVICE_DISCOVERY,
5957 					MGMT_STATUS_BUSY, &cp->type,
5958 					sizeof(cp->type));
5959 		goto failed;
5960 	}
5961 
5962 	uuid_count = __le16_to_cpu(cp->uuid_count);
5963 	if (uuid_count > max_uuid_count) {
5964 		bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5965 			   uuid_count);
5966 		err = mgmt_cmd_complete(sk, hdev->id,
5967 					MGMT_OP_START_SERVICE_DISCOVERY,
5968 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
5969 					sizeof(cp->type));
5970 		goto failed;
5971 	}
5972 
5973 	expected_len = sizeof(*cp) + uuid_count * 16;
5974 	if (expected_len != len) {
5975 		bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5976 			   expected_len, len);
5977 		err = mgmt_cmd_complete(sk, hdev->id,
5978 					MGMT_OP_START_SERVICE_DISCOVERY,
5979 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
5980 					sizeof(cp->type));
5981 		goto failed;
5982 	}
5983 
5984 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5985 		err = mgmt_cmd_complete(sk, hdev->id,
5986 					MGMT_OP_START_SERVICE_DISCOVERY,
5987 					status, &cp->type, sizeof(cp->type));
5988 		goto failed;
5989 	}
5990 
5991 	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
5992 			       hdev, data, len);
5993 	if (!cmd) {
5994 		err = -ENOMEM;
5995 		goto failed;
5996 	}
5997 
5998 	/* Clear the discovery filter first to free any previously
5999 	 * allocated memory for the UUID list.
6000 	 */
6001 	hci_discovery_filter_clear(hdev);
6002 
6003 	hdev->discovery.result_filtering = true;
6004 	hdev->discovery.type = cp->type;
6005 	hdev->discovery.rssi = cp->rssi;
6006 	hdev->discovery.uuid_count = uuid_count;
6007 
6008 	if (uuid_count > 0) {
6009 		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6010 						GFP_KERNEL);
6011 		if (!hdev->discovery.uuids) {
6012 			err = mgmt_cmd_complete(sk, hdev->id,
6013 						MGMT_OP_START_SERVICE_DISCOVERY,
6014 						MGMT_STATUS_FAILED,
6015 						&cp->type, sizeof(cp->type));
6016 			mgmt_pending_remove(cmd);
6017 			goto failed;
6018 		}
6019 	}
6020 
6021 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6022 				 start_discovery_complete);
6023 	if (err < 0) {
6024 		mgmt_pending_remove(cmd);
6025 		goto failed;
6026 	}
6027 
6028 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6029 
6030 failed:
6031 	hci_dev_unlock(hdev);
6032 	return err;
6033 }
6034 
6035 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
6036 {
6037 	struct mgmt_pending_cmd *cmd;
6038 
6039 	bt_dev_dbg(hdev, "status %u", status);
6040 
6041 	hci_dev_lock(hdev);
6042 
6043 	cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6044 	if (cmd) {
6045 		cmd->cmd_complete(cmd, mgmt_status(status));
6046 		mgmt_pending_remove(cmd);
6047 	}
6048 
6049 	hci_dev_unlock(hdev);
6050 }
6051 
6052 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6053 {
6054 	struct mgmt_pending_cmd *cmd = data;
6055 
6056 	if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6057 		return;
6058 
6059 	bt_dev_dbg(hdev, "err %d", err);
6060 
6061 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
6062 			  cmd->param, 1);
6063 	mgmt_pending_remove(cmd);
6064 
6065 	if (!err)
6066 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6067 }
6068 
6069 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6070 {
6071 	return hci_stop_discovery_sync(hdev);
6072 }
6073 
6074 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6075 			  u16 len)
6076 {
6077 	struct mgmt_cp_stop_discovery *mgmt_cp = data;
6078 	struct mgmt_pending_cmd *cmd;
6079 	int err;
6080 
6081 	bt_dev_dbg(hdev, "sock %p", sk);
6082 
6083 	hci_dev_lock(hdev);
6084 
6085 	if (!hci_discovery_active(hdev)) {
6086 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6087 					MGMT_STATUS_REJECTED, &mgmt_cp->type,
6088 					sizeof(mgmt_cp->type));
6089 		goto unlock;
6090 	}
6091 
6092 	if (hdev->discovery.type != mgmt_cp->type) {
6093 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6094 					MGMT_STATUS_INVALID_PARAMS,
6095 					&mgmt_cp->type, sizeof(mgmt_cp->type));
6096 		goto unlock;
6097 	}
6098 
6099 	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6100 	if (!cmd) {
6101 		err = -ENOMEM;
6102 		goto unlock;
6103 	}
6104 
6105 	err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6106 				 stop_discovery_complete);
6107 	if (err < 0) {
6108 		mgmt_pending_remove(cmd);
6109 		goto unlock;
6110 	}
6111 
6112 	hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6113 
6114 unlock:
6115 	hci_dev_unlock(hdev);
6116 	return err;
6117 }
6118 
6119 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6120 			u16 len)
6121 {
6122 	struct mgmt_cp_confirm_name *cp = data;
6123 	struct inquiry_entry *e;
6124 	int err;
6125 
6126 	bt_dev_dbg(hdev, "sock %p", sk);
6127 
6128 	hci_dev_lock(hdev);
6129 
6130 	if (!hci_discovery_active(hdev)) {
6131 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6132 					MGMT_STATUS_FAILED, &cp->addr,
6133 					sizeof(cp->addr));
6134 		goto failed;
6135 	}
6136 
6137 	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6138 	if (!e) {
6139 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6140 					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6141 					sizeof(cp->addr));
6142 		goto failed;
6143 	}
6144 
6145 	if (cp->name_known) {
6146 		e->name_state = NAME_KNOWN;
6147 		list_del(&e->list);
6148 	} else {
6149 		e->name_state = NAME_NEEDED;
6150 		hci_inquiry_cache_update_resolve(hdev, e);
6151 	}
6152 
6153 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6154 				&cp->addr, sizeof(cp->addr));
6155 
6156 failed:
6157 	hci_dev_unlock(hdev);
6158 	return err;
6159 }
6160 
6161 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6162 			u16 len)
6163 {
6164 	struct mgmt_cp_block_device *cp = data;
6165 	u8 status;
6166 	int err;
6167 
6168 	bt_dev_dbg(hdev, "sock %p", sk);
6169 
6170 	if (!bdaddr_type_is_valid(cp->addr.type))
6171 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6172 					 MGMT_STATUS_INVALID_PARAMS,
6173 					 &cp->addr, sizeof(cp->addr));
6174 
6175 	hci_dev_lock(hdev);
6176 
6177 	err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6178 				  cp->addr.type);
6179 	if (err < 0) {
6180 		status = MGMT_STATUS_FAILED;
6181 		goto done;
6182 	}
6183 
6184 	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6185 		   sk);
6186 	status = MGMT_STATUS_SUCCESS;
6187 
6188 done:
6189 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6190 				&cp->addr, sizeof(cp->addr));
6191 
6192 	hci_dev_unlock(hdev);
6193 
6194 	return err;
6195 }
6196 
6197 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6198 			  u16 len)
6199 {
6200 	struct mgmt_cp_unblock_device *cp = data;
6201 	u8 status;
6202 	int err;
6203 
6204 	bt_dev_dbg(hdev, "sock %p", sk);
6205 
6206 	if (!bdaddr_type_is_valid(cp->addr.type))
6207 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6208 					 MGMT_STATUS_INVALID_PARAMS,
6209 					 &cp->addr, sizeof(cp->addr));
6210 
6211 	hci_dev_lock(hdev);
6212 
6213 	err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6214 				  cp->addr.type);
6215 	if (err < 0) {
6216 		status = MGMT_STATUS_INVALID_PARAMS;
6217 		goto done;
6218 	}
6219 
6220 	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6221 		   sk);
6222 	status = MGMT_STATUS_SUCCESS;
6223 
6224 done:
6225 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6226 				&cp->addr, sizeof(cp->addr));
6227 
6228 	hci_dev_unlock(hdev);
6229 
6230 	return err;
6231 }
6232 
6233 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6234 {
6235 	return hci_update_eir_sync(hdev);
6236 }
6237 
6238 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6239 			 u16 len)
6240 {
6241 	struct mgmt_cp_set_device_id *cp = data;
6242 	int err;
6243 	__u16 source;
6244 
6245 	bt_dev_dbg(hdev, "sock %p", sk);
6246 
6247 	source = __le16_to_cpu(cp->source);
6248 
6249 	if (source > 0x0002)
6250 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6251 				       MGMT_STATUS_INVALID_PARAMS);
6252 
6253 	hci_dev_lock(hdev);
6254 
6255 	hdev->devid_source = source;
6256 	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6257 	hdev->devid_product = __le16_to_cpu(cp->product);
6258 	hdev->devid_version = __le16_to_cpu(cp->version);
6259 
6260 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6261 				NULL, 0);
6262 
6263 	hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6264 
6265 	hci_dev_unlock(hdev);
6266 
6267 	return err;
6268 }
6269 
6270 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6271 {
6272 	if (err)
6273 		bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6274 	else
6275 		bt_dev_dbg(hdev, "status %d", err);
6276 }
6277 
6278 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6279 {
6280 	struct cmd_lookup match = { NULL, hdev };
6281 	u8 instance;
6282 	struct adv_info *adv_instance;
6283 	u8 status = mgmt_status(err);
6284 
6285 	if (status) {
6286 		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
6287 				     cmd_status_rsp, &status);
6288 		return;
6289 	}
6290 
6291 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6292 		hci_dev_set_flag(hdev, HCI_ADVERTISING);
6293 	else
6294 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6295 
6296 	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
6297 			     &match);
6298 
6299 	new_settings(hdev, match.sk);
6300 
6301 	if (match.sk)
6302 		sock_put(match.sk);
6303 
6304 	/* If "Set Advertising" was just disabled and instance advertising was
6305 	 * set up earlier, then re-enable multi-instance advertising.
6306 	 */
6307 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6308 	    list_empty(&hdev->adv_instances))
6309 		return;
6310 
6311 	instance = hdev->cur_adv_instance;
6312 	if (!instance) {
6313 		adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6314 							struct adv_info, list);
6315 		if (!adv_instance)
6316 			return;
6317 
6318 		instance = adv_instance->instance;
6319 	}
6320 
6321 	err = hci_schedule_adv_instance_sync(hdev, instance, true);
6322 
6323 	enable_advertising_instance(hdev, err);
6324 }
6325 
6326 static int set_adv_sync(struct hci_dev *hdev, void *data)
6327 {
6328 	struct mgmt_pending_cmd *cmd = data;
6329 	struct mgmt_mode *cp = cmd->param;
6330 	u8 val = !!cp->val;
6331 
6332 	if (cp->val == 0x02)
6333 		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6334 	else
6335 		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6336 
6337 	cancel_adv_timeout(hdev);
6338 
6339 	if (val) {
6340 		/* Switch to instance "0" for the Set Advertising setting.
6341 		 * We cannot use update_[adv|scan_rsp]_data() here as the
6342 		 * HCI_ADVERTISING flag is not yet set.
6343 		 */
6344 		hdev->cur_adv_instance = 0x00;
6345 
6346 		if (ext_adv_capable(hdev)) {
6347 			hci_start_ext_adv_sync(hdev, 0x00);
6348 		} else {
6349 			hci_update_adv_data_sync(hdev, 0x00);
6350 			hci_update_scan_rsp_data_sync(hdev, 0x00);
6351 			hci_enable_advertising_sync(hdev);
6352 		}
6353 	} else {
6354 		hci_disable_advertising_sync(hdev);
6355 	}
6356 
6357 	return 0;
6358 }
6359 
6360 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6361 			   u16 len)
6362 {
6363 	struct mgmt_mode *cp = data;
6364 	struct mgmt_pending_cmd *cmd;
6365 	u8 val, status;
6366 	int err;
6367 
6368 	bt_dev_dbg(hdev, "sock %p", sk);
6369 
6370 	status = mgmt_le_support(hdev);
6371 	if (status)
6372 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6373 				       status);
6374 
6375 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6376 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6377 				       MGMT_STATUS_INVALID_PARAMS);
6378 
6379 	if (hdev->advertising_paused)
6380 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6381 				       MGMT_STATUS_BUSY);
6382 
6383 	hci_dev_lock(hdev);
6384 
6385 	val = !!cp->val;
6386 
6387 	/* The following conditions are ones which mean that we should
6388 	 * not do any HCI communication but directly send a mgmt
6389 	 * response to user space (after toggling the flag if
6390 	 * necessary).
6391 	 */
6392 	if (!hdev_is_powered(hdev) ||
6393 	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6394 	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6395 	    hci_dev_test_flag(hdev, HCI_MESH) ||
6396 	    hci_conn_num(hdev, LE_LINK) > 0 ||
6397 	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6398 	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6399 		bool changed;
6400 
6401 		if (cp->val) {
6402 			hdev->cur_adv_instance = 0x00;
6403 			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6404 			if (cp->val == 0x02)
6405 				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6406 			else
6407 				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6408 		} else {
6409 			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6410 			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6411 		}
6412 
6413 		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6414 		if (err < 0)
6415 			goto unlock;
6416 
6417 		if (changed)
6418 			err = new_settings(hdev, sk);
6419 
6420 		goto unlock;
6421 	}
6422 
6423 	if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6424 	    pending_find(MGMT_OP_SET_LE, hdev)) {
6425 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6426 				      MGMT_STATUS_BUSY);
6427 		goto unlock;
6428 	}
6429 
6430 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6431 	if (!cmd)
6432 		err = -ENOMEM;
6433 	else
6434 		err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6435 					 set_advertising_complete);
6436 
6437 	if (err < 0 && cmd)
6438 		mgmt_pending_remove(cmd);
6439 
6440 unlock:
6441 	hci_dev_unlock(hdev);
6442 	return err;
6443 }
6444 
6445 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6446 			      void *data, u16 len)
6447 {
6448 	struct mgmt_cp_set_static_address *cp = data;
6449 	int err;
6450 
6451 	bt_dev_dbg(hdev, "sock %p", sk);
6452 
6453 	if (!lmp_le_capable(hdev))
6454 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6455 				       MGMT_STATUS_NOT_SUPPORTED);
6456 
6457 	if (hdev_is_powered(hdev))
6458 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6459 				       MGMT_STATUS_REJECTED);
6460 
6461 	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6462 		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6463 			return mgmt_cmd_status(sk, hdev->id,
6464 					       MGMT_OP_SET_STATIC_ADDRESS,
6465 					       MGMT_STATUS_INVALID_PARAMS);
6466 
6467 		/* Two most significant bits shall be set */
6468 		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6469 			return mgmt_cmd_status(sk, hdev->id,
6470 					       MGMT_OP_SET_STATIC_ADDRESS,
6471 					       MGMT_STATUS_INVALID_PARAMS);
6472 	}
6473 
6474 	hci_dev_lock(hdev);
6475 
6476 	bacpy(&hdev->static_addr, &cp->bdaddr);
6477 
6478 	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6479 	if (err < 0)
6480 		goto unlock;
6481 
6482 	err = new_settings(hdev, sk);
6483 
6484 unlock:
6485 	hci_dev_unlock(hdev);
6486 	return err;
6487 }
6488 
6489 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6490 			   void *data, u16 len)
6491 {
6492 	struct mgmt_cp_set_scan_params *cp = data;
6493 	__u16 interval, window;
6494 	int err;
6495 
6496 	bt_dev_dbg(hdev, "sock %p", sk);
6497 
6498 	if (!lmp_le_capable(hdev))
6499 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6500 				       MGMT_STATUS_NOT_SUPPORTED);
6501 
6502 	interval = __le16_to_cpu(cp->interval);
6503 
6504 	if (interval < 0x0004 || interval > 0x4000)
6505 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6506 				       MGMT_STATUS_INVALID_PARAMS);
6507 
6508 	window = __le16_to_cpu(cp->window);
6509 
6510 	if (window < 0x0004 || window > 0x4000)
6511 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6512 				       MGMT_STATUS_INVALID_PARAMS);
6513 
6514 	if (window > interval)
6515 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6516 				       MGMT_STATUS_INVALID_PARAMS);
6517 
6518 	hci_dev_lock(hdev);
6519 
6520 	hdev->le_scan_interval = interval;
6521 	hdev->le_scan_window = window;
6522 
6523 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6524 				NULL, 0);
6525 
6526 	/* If background scan is running, restart it so new parameters are
6527 	 * loaded.
6528 	 */
6529 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6530 	    hdev->discovery.state == DISCOVERY_STOPPED)
6531 		hci_update_passive_scan(hdev);
6532 
6533 	hci_dev_unlock(hdev);
6534 
6535 	return err;
6536 }
6537 
6538 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6539 {
6540 	struct mgmt_pending_cmd *cmd = data;
6541 
6542 	bt_dev_dbg(hdev, "err %d", err);
6543 
6544 	if (err) {
6545 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6546 				mgmt_status(err));
6547 	} else {
6548 		struct mgmt_mode *cp = cmd->param;
6549 
6550 		if (cp->val)
6551 			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6552 		else
6553 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6554 
6555 		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6556 		new_settings(hdev, cmd->sk);
6557 	}
6558 
6559 	mgmt_pending_free(cmd);
6560 }
6561 
6562 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6563 {
6564 	struct mgmt_pending_cmd *cmd = data;
6565 	struct mgmt_mode *cp = cmd->param;
6566 
6567 	return hci_write_fast_connectable_sync(hdev, cp->val);
6568 }
6569 
6570 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6571 				void *data, u16 len)
6572 {
6573 	struct mgmt_mode *cp = data;
6574 	struct mgmt_pending_cmd *cmd;
6575 	int err;
6576 
6577 	bt_dev_dbg(hdev, "sock %p", sk);
6578 
6579 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6580 	    hdev->hci_ver < BLUETOOTH_VER_1_2)
6581 		return mgmt_cmd_status(sk, hdev->id,
6582 				       MGMT_OP_SET_FAST_CONNECTABLE,
6583 				       MGMT_STATUS_NOT_SUPPORTED);
6584 
6585 	if (cp->val != 0x00 && cp->val != 0x01)
6586 		return mgmt_cmd_status(sk, hdev->id,
6587 				       MGMT_OP_SET_FAST_CONNECTABLE,
6588 				       MGMT_STATUS_INVALID_PARAMS);
6589 
6590 	hci_dev_lock(hdev);
6591 
6592 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6593 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6594 		goto unlock;
6595 	}
6596 
6597 	if (!hdev_is_powered(hdev)) {
6598 		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6599 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6600 		new_settings(hdev, sk);
6601 		goto unlock;
6602 	}
6603 
6604 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6605 			       len);
6606 	if (!cmd)
6607 		err = -ENOMEM;
6608 	else
6609 		err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6610 					 fast_connectable_complete);
6611 
6612 	if (err < 0) {
6613 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6614 				MGMT_STATUS_FAILED);
6615 
6616 		if (cmd)
6617 			mgmt_pending_free(cmd);
6618 	}
6619 
6620 unlock:
6621 	hci_dev_unlock(hdev);
6622 
6623 	return err;
6624 }
6625 
6626 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6627 {
6628 	struct mgmt_pending_cmd *cmd = data;
6629 
6630 	bt_dev_dbg(hdev, "err %d", err);
6631 
6632 	if (err) {
6633 		u8 mgmt_err = mgmt_status(err);
6634 
6635 		/* We need to restore the flag if related HCI commands
6636 		 * failed.
6637 		 */
6638 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6639 
6640 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6641 	} else {
6642 		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6643 		new_settings(hdev, cmd->sk);
6644 	}
6645 
6646 	mgmt_pending_free(cmd);
6647 }
6648 
6649 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6650 {
6651 	int status;
6652 
6653 	status = hci_write_fast_connectable_sync(hdev, false);
6654 
6655 	if (!status)
6656 		status = hci_update_scan_sync(hdev);
6657 
6658 	/* Since only the advertising data flags will change, there
6659 	 * is no need to update the scan response data.
6660 	 */
6661 	if (!status)
6662 		status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6663 
6664 	return status;
6665 }
6666 
6667 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6668 {
6669 	struct mgmt_mode *cp = data;
6670 	struct mgmt_pending_cmd *cmd;
6671 	int err;
6672 
6673 	bt_dev_dbg(hdev, "sock %p", sk);
6674 
6675 	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6676 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6677 				       MGMT_STATUS_NOT_SUPPORTED);
6678 
6679 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6680 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6681 				       MGMT_STATUS_REJECTED);
6682 
6683 	if (cp->val != 0x00 && cp->val != 0x01)
6684 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6685 				       MGMT_STATUS_INVALID_PARAMS);
6686 
6687 	hci_dev_lock(hdev);
6688 
6689 	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6690 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6691 		goto unlock;
6692 	}
6693 
6694 	if (!hdev_is_powered(hdev)) {
6695 		if (!cp->val) {
6696 			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6697 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6698 			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6699 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6700 		}
6701 
6702 		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6703 
6704 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6705 		if (err < 0)
6706 			goto unlock;
6707 
6708 		err = new_settings(hdev, sk);
6709 		goto unlock;
6710 	}
6711 
6712 	/* Reject disabling when powered on */
6713 	if (!cp->val) {
6714 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6715 				      MGMT_STATUS_REJECTED);
6716 		goto unlock;
6717 	} else {
6718 		/* When configuring a dual-mode controller to operate
6719 		 * with LE only and using a static address, then switching
6720 		 * BR/EDR back on is not allowed.
6721 		 *
6722 		 * Dual-mode controllers shall operate with the public
6723 		 * address as its identity address for BR/EDR and LE. So
6724 		 * reject the attempt to create an invalid configuration.
6725 		 *
6726 		 * The same restrictions applies when secure connections
6727 		 * has been enabled. For BR/EDR this is a controller feature
6728 		 * while for LE it is a host stack feature. This means that
6729 		 * switching BR/EDR back on when secure connections has been
6730 		 * enabled is not a supported transaction.
6731 		 */
6732 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6733 		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6734 		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6735 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6736 					      MGMT_STATUS_REJECTED);
6737 			goto unlock;
6738 		}
6739 	}
6740 
6741 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6742 	if (!cmd)
6743 		err = -ENOMEM;
6744 	else
6745 		err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6746 					 set_bredr_complete);
6747 
6748 	if (err < 0) {
6749 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6750 				MGMT_STATUS_FAILED);
6751 		if (cmd)
6752 			mgmt_pending_free(cmd);
6753 
6754 		goto unlock;
6755 	}
6756 
6757 	/* We need to flip the bit already here so that
6758 	 * hci_req_update_adv_data generates the correct flags.
6759 	 */
6760 	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6761 
6762 unlock:
6763 	hci_dev_unlock(hdev);
6764 	return err;
6765 }
6766 
6767 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6768 {
6769 	struct mgmt_pending_cmd *cmd = data;
6770 	struct mgmt_mode *cp;
6771 
6772 	bt_dev_dbg(hdev, "err %d", err);
6773 
6774 	if (err) {
6775 		u8 mgmt_err = mgmt_status(err);
6776 
6777 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6778 		goto done;
6779 	}
6780 
6781 	cp = cmd->param;
6782 
6783 	switch (cp->val) {
6784 	case 0x00:
6785 		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6786 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6787 		break;
6788 	case 0x01:
6789 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6790 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6791 		break;
6792 	case 0x02:
6793 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6794 		hci_dev_set_flag(hdev, HCI_SC_ONLY);
6795 		break;
6796 	}
6797 
6798 	send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6799 	new_settings(hdev, cmd->sk);
6800 
6801 done:
6802 	mgmt_pending_free(cmd);
6803 }
6804 
6805 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6806 {
6807 	struct mgmt_pending_cmd *cmd = data;
6808 	struct mgmt_mode *cp = cmd->param;
6809 	u8 val = !!cp->val;
6810 
6811 	/* Force write of val */
6812 	hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6813 
6814 	return hci_write_sc_support_sync(hdev, val);
6815 }
6816 
6817 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6818 			   void *data, u16 len)
6819 {
6820 	struct mgmt_mode *cp = data;
6821 	struct mgmt_pending_cmd *cmd;
6822 	u8 val;
6823 	int err;
6824 
6825 	bt_dev_dbg(hdev, "sock %p", sk);
6826 
6827 	if (!lmp_sc_capable(hdev) &&
6828 	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6829 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6830 				       MGMT_STATUS_NOT_SUPPORTED);
6831 
6832 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6833 	    lmp_sc_capable(hdev) &&
6834 	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6835 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6836 				       MGMT_STATUS_REJECTED);
6837 
6838 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6839 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6840 				       MGMT_STATUS_INVALID_PARAMS);
6841 
6842 	hci_dev_lock(hdev);
6843 
6844 	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6845 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6846 		bool changed;
6847 
6848 		if (cp->val) {
6849 			changed = !hci_dev_test_and_set_flag(hdev,
6850 							     HCI_SC_ENABLED);
6851 			if (cp->val == 0x02)
6852 				hci_dev_set_flag(hdev, HCI_SC_ONLY);
6853 			else
6854 				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6855 		} else {
6856 			changed = hci_dev_test_and_clear_flag(hdev,
6857 							      HCI_SC_ENABLED);
6858 			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6859 		}
6860 
6861 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6862 		if (err < 0)
6863 			goto failed;
6864 
6865 		if (changed)
6866 			err = new_settings(hdev, sk);
6867 
6868 		goto failed;
6869 	}
6870 
6871 	val = !!cp->val;
6872 
6873 	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6874 	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6875 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6876 		goto failed;
6877 	}
6878 
6879 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6880 	if (!cmd)
6881 		err = -ENOMEM;
6882 	else
6883 		err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6884 					 set_secure_conn_complete);
6885 
6886 	if (err < 0) {
6887 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6888 				MGMT_STATUS_FAILED);
6889 		if (cmd)
6890 			mgmt_pending_free(cmd);
6891 	}
6892 
6893 failed:
6894 	hci_dev_unlock(hdev);
6895 	return err;
6896 }
6897 
6898 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6899 			  void *data, u16 len)
6900 {
6901 	struct mgmt_mode *cp = data;
6902 	bool changed, use_changed;
6903 	int err;
6904 
6905 	bt_dev_dbg(hdev, "sock %p", sk);
6906 
6907 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6908 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6909 				       MGMT_STATUS_INVALID_PARAMS);
6910 
6911 	hci_dev_lock(hdev);
6912 
6913 	if (cp->val)
6914 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6915 	else
6916 		changed = hci_dev_test_and_clear_flag(hdev,
6917 						      HCI_KEEP_DEBUG_KEYS);
6918 
6919 	if (cp->val == 0x02)
6920 		use_changed = !hci_dev_test_and_set_flag(hdev,
6921 							 HCI_USE_DEBUG_KEYS);
6922 	else
6923 		use_changed = hci_dev_test_and_clear_flag(hdev,
6924 							  HCI_USE_DEBUG_KEYS);
6925 
6926 	if (hdev_is_powered(hdev) && use_changed &&
6927 	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6928 		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6929 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6930 			     sizeof(mode), &mode);
6931 	}
6932 
6933 	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6934 	if (err < 0)
6935 		goto unlock;
6936 
6937 	if (changed)
6938 		err = new_settings(hdev, sk);
6939 
6940 unlock:
6941 	hci_dev_unlock(hdev);
6942 	return err;
6943 }
6944 
6945 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6946 		       u16 len)
6947 {
6948 	struct mgmt_cp_set_privacy *cp = cp_data;
6949 	bool changed;
6950 	int err;
6951 
6952 	bt_dev_dbg(hdev, "sock %p", sk);
6953 
6954 	if (!lmp_le_capable(hdev))
6955 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6956 				       MGMT_STATUS_NOT_SUPPORTED);
6957 
6958 	if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6959 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6960 				       MGMT_STATUS_INVALID_PARAMS);
6961 
6962 	if (hdev_is_powered(hdev))
6963 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6964 				       MGMT_STATUS_REJECTED);
6965 
6966 	hci_dev_lock(hdev);
6967 
6968 	/* If user space supports this command it is also expected to
6969 	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6970 	 */
6971 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6972 
6973 	if (cp->privacy) {
6974 		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6975 		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6976 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6977 		hci_adv_instances_set_rpa_expired(hdev, true);
6978 		if (cp->privacy == 0x02)
6979 			hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6980 		else
6981 			hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6982 	} else {
6983 		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6984 		memset(hdev->irk, 0, sizeof(hdev->irk));
6985 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6986 		hci_adv_instances_set_rpa_expired(hdev, false);
6987 		hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6988 	}
6989 
6990 	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
6991 	if (err < 0)
6992 		goto unlock;
6993 
6994 	if (changed)
6995 		err = new_settings(hdev, sk);
6996 
6997 unlock:
6998 	hci_dev_unlock(hdev);
6999 	return err;
7000 }
7001 
7002 static bool irk_is_valid(struct mgmt_irk_info *irk)
7003 {
7004 	switch (irk->addr.type) {
7005 	case BDADDR_LE_PUBLIC:
7006 		return true;
7007 
7008 	case BDADDR_LE_RANDOM:
7009 		/* Two most significant bits shall be set */
7010 		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7011 			return false;
7012 		return true;
7013 	}
7014 
7015 	return false;
7016 }
7017 
7018 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7019 		     u16 len)
7020 {
7021 	struct mgmt_cp_load_irks *cp = cp_data;
7022 	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7023 				   sizeof(struct mgmt_irk_info));
7024 	u16 irk_count, expected_len;
7025 	int i, err;
7026 
7027 	bt_dev_dbg(hdev, "sock %p", sk);
7028 
7029 	if (!lmp_le_capable(hdev))
7030 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7031 				       MGMT_STATUS_NOT_SUPPORTED);
7032 
7033 	irk_count = __le16_to_cpu(cp->irk_count);
7034 	if (irk_count > max_irk_count) {
7035 		bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7036 			   irk_count);
7037 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7038 				       MGMT_STATUS_INVALID_PARAMS);
7039 	}
7040 
7041 	expected_len = struct_size(cp, irks, irk_count);
7042 	if (expected_len != len) {
7043 		bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7044 			   expected_len, len);
7045 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7046 				       MGMT_STATUS_INVALID_PARAMS);
7047 	}
7048 
7049 	bt_dev_dbg(hdev, "irk_count %u", irk_count);
7050 
7051 	for (i = 0; i < irk_count; i++) {
7052 		struct mgmt_irk_info *key = &cp->irks[i];
7053 
7054 		if (!irk_is_valid(key))
7055 			return mgmt_cmd_status(sk, hdev->id,
7056 					       MGMT_OP_LOAD_IRKS,
7057 					       MGMT_STATUS_INVALID_PARAMS);
7058 	}
7059 
7060 	hci_dev_lock(hdev);
7061 
7062 	hci_smp_irks_clear(hdev);
7063 
7064 	for (i = 0; i < irk_count; i++) {
7065 		struct mgmt_irk_info *irk = &cp->irks[i];
7066 		u8 addr_type = le_addr_type(irk->addr.type);
7067 
7068 		if (hci_is_blocked_key(hdev,
7069 				       HCI_BLOCKED_KEY_TYPE_IRK,
7070 				       irk->val)) {
7071 			bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7072 				    &irk->addr.bdaddr);
7073 			continue;
7074 		}
7075 
7076 		/* When using SMP over BR/EDR, the addr type should be set to BREDR */
7077 		if (irk->addr.type == BDADDR_BREDR)
7078 			addr_type = BDADDR_BREDR;
7079 
7080 		hci_add_irk(hdev, &irk->addr.bdaddr,
7081 			    addr_type, irk->val,
7082 			    BDADDR_ANY);
7083 	}
7084 
7085 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7086 
7087 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7088 
7089 	hci_dev_unlock(hdev);
7090 
7091 	return err;
7092 }
7093 
7094 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7095 {
7096 	if (key->initiator != 0x00 && key->initiator != 0x01)
7097 		return false;
7098 
7099 	switch (key->addr.type) {
7100 	case BDADDR_LE_PUBLIC:
7101 		return true;
7102 
7103 	case BDADDR_LE_RANDOM:
7104 		/* Two most significant bits shall be set */
7105 		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7106 			return false;
7107 		return true;
7108 	}
7109 
7110 	return false;
7111 }
7112 
7113 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7114 			       void *cp_data, u16 len)
7115 {
7116 	struct mgmt_cp_load_long_term_keys *cp = cp_data;
7117 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7118 				   sizeof(struct mgmt_ltk_info));
7119 	u16 key_count, expected_len;
7120 	int i, err;
7121 
7122 	bt_dev_dbg(hdev, "sock %p", sk);
7123 
7124 	if (!lmp_le_capable(hdev))
7125 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7126 				       MGMT_STATUS_NOT_SUPPORTED);
7127 
7128 	key_count = __le16_to_cpu(cp->key_count);
7129 	if (key_count > max_key_count) {
7130 		bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7131 			   key_count);
7132 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7133 				       MGMT_STATUS_INVALID_PARAMS);
7134 	}
7135 
7136 	expected_len = struct_size(cp, keys, key_count);
7137 	if (expected_len != len) {
7138 		bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7139 			   expected_len, len);
7140 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7141 				       MGMT_STATUS_INVALID_PARAMS);
7142 	}
7143 
7144 	bt_dev_dbg(hdev, "key_count %u", key_count);
7145 
7146 	for (i = 0; i < key_count; i++) {
7147 		struct mgmt_ltk_info *key = &cp->keys[i];
7148 
7149 		if (!ltk_is_valid(key))
7150 			return mgmt_cmd_status(sk, hdev->id,
7151 					       MGMT_OP_LOAD_LONG_TERM_KEYS,
7152 					       MGMT_STATUS_INVALID_PARAMS);
7153 	}
7154 
7155 	hci_dev_lock(hdev);
7156 
7157 	hci_smp_ltks_clear(hdev);
7158 
7159 	for (i = 0; i < key_count; i++) {
7160 		struct mgmt_ltk_info *key = &cp->keys[i];
7161 		u8 type, authenticated;
7162 		u8 addr_type = le_addr_type(key->addr.type);
7163 
7164 		if (hci_is_blocked_key(hdev,
7165 				       HCI_BLOCKED_KEY_TYPE_LTK,
7166 				       key->val)) {
7167 			bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7168 				    &key->addr.bdaddr);
7169 			continue;
7170 		}
7171 
7172 		switch (key->type) {
7173 		case MGMT_LTK_UNAUTHENTICATED:
7174 			authenticated = 0x00;
7175 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7176 			break;
7177 		case MGMT_LTK_AUTHENTICATED:
7178 			authenticated = 0x01;
7179 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7180 			break;
7181 		case MGMT_LTK_P256_UNAUTH:
7182 			authenticated = 0x00;
7183 			type = SMP_LTK_P256;
7184 			break;
7185 		case MGMT_LTK_P256_AUTH:
7186 			authenticated = 0x01;
7187 			type = SMP_LTK_P256;
7188 			break;
7189 		case MGMT_LTK_P256_DEBUG:
7190 			authenticated = 0x00;
7191 			type = SMP_LTK_P256_DEBUG;
7192 			fallthrough;
7193 		default:
7194 			continue;
7195 		}
7196 
7197 		/* When using SMP over BR/EDR, the addr type should be set to BREDR */
7198 		if (key->addr.type == BDADDR_BREDR)
7199 			addr_type = BDADDR_BREDR;
7200 
7201 		hci_add_ltk(hdev, &key->addr.bdaddr,
7202 			    addr_type, type, authenticated,
7203 			    key->val, key->enc_size, key->ediv, key->rand);
7204 	}
7205 
7206 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7207 			   NULL, 0);
7208 
7209 	hci_dev_unlock(hdev);
7210 
7211 	return err;
7212 }
7213 
7214 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7215 {
7216 	struct mgmt_pending_cmd *cmd = data;
7217 	struct hci_conn *conn = cmd->user_data;
7218 	struct mgmt_cp_get_conn_info *cp = cmd->param;
7219 	struct mgmt_rp_get_conn_info rp;
7220 	u8 status;
7221 
7222 	bt_dev_dbg(hdev, "err %d", err);
7223 
7224 	memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
7225 
7226 	status = mgmt_status(err);
7227 	if (status == MGMT_STATUS_SUCCESS) {
7228 		rp.rssi = conn->rssi;
7229 		rp.tx_power = conn->tx_power;
7230 		rp.max_tx_power = conn->max_tx_power;
7231 	} else {
7232 		rp.rssi = HCI_RSSI_INVALID;
7233 		rp.tx_power = HCI_TX_POWER_INVALID;
7234 		rp.max_tx_power = HCI_TX_POWER_INVALID;
7235 	}
7236 
7237 	mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
7238 			  &rp, sizeof(rp));
7239 
7240 	mgmt_pending_free(cmd);
7241 }
7242 
7243 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7244 {
7245 	struct mgmt_pending_cmd *cmd = data;
7246 	struct mgmt_cp_get_conn_info *cp = cmd->param;
7247 	struct hci_conn *conn;
7248 	int err;
7249 	__le16   handle;
7250 
7251 	/* Make sure we are still connected */
7252 	if (cp->addr.type == BDADDR_BREDR)
7253 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7254 					       &cp->addr.bdaddr);
7255 	else
7256 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7257 
7258 	if (!conn || conn->state != BT_CONNECTED)
7259 		return MGMT_STATUS_NOT_CONNECTED;
7260 
7261 	cmd->user_data = conn;
7262 	handle = cpu_to_le16(conn->handle);
7263 
7264 	/* Refresh RSSI each time */
7265 	err = hci_read_rssi_sync(hdev, handle);
7266 
7267 	/* For LE links TX power does not change thus we don't need to
7268 	 * query for it once value is known.
7269 	 */
7270 	if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7271 		     conn->tx_power == HCI_TX_POWER_INVALID))
7272 		err = hci_read_tx_power_sync(hdev, handle, 0x00);
7273 
7274 	/* Max TX power needs to be read only once per connection */
7275 	if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7276 		err = hci_read_tx_power_sync(hdev, handle, 0x01);
7277 
7278 	return err;
7279 }
7280 
7281 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7282 			 u16 len)
7283 {
7284 	struct mgmt_cp_get_conn_info *cp = data;
7285 	struct mgmt_rp_get_conn_info rp;
7286 	struct hci_conn *conn;
7287 	unsigned long conn_info_age;
7288 	int err = 0;
7289 
7290 	bt_dev_dbg(hdev, "sock %p", sk);
7291 
7292 	memset(&rp, 0, sizeof(rp));
7293 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7294 	rp.addr.type = cp->addr.type;
7295 
7296 	if (!bdaddr_type_is_valid(cp->addr.type))
7297 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7298 					 MGMT_STATUS_INVALID_PARAMS,
7299 					 &rp, sizeof(rp));
7300 
7301 	hci_dev_lock(hdev);
7302 
7303 	if (!hdev_is_powered(hdev)) {
7304 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7305 					MGMT_STATUS_NOT_POWERED, &rp,
7306 					sizeof(rp));
7307 		goto unlock;
7308 	}
7309 
7310 	if (cp->addr.type == BDADDR_BREDR)
7311 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7312 					       &cp->addr.bdaddr);
7313 	else
7314 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7315 
7316 	if (!conn || conn->state != BT_CONNECTED) {
7317 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7318 					MGMT_STATUS_NOT_CONNECTED, &rp,
7319 					sizeof(rp));
7320 		goto unlock;
7321 	}
7322 
7323 	/* To avoid client trying to guess when to poll again for information we
7324 	 * calculate conn info age as random value between min/max set in hdev.
7325 	 */
7326 	conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
7327 						 hdev->conn_info_max_age - 1);
7328 
7329 	/* Query controller to refresh cached values if they are too old or were
7330 	 * never read.
7331 	 */
7332 	if (time_after(jiffies, conn->conn_info_timestamp +
7333 		       msecs_to_jiffies(conn_info_age)) ||
7334 	    !conn->conn_info_timestamp) {
7335 		struct mgmt_pending_cmd *cmd;
7336 
7337 		cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7338 				       len);
7339 		if (!cmd) {
7340 			err = -ENOMEM;
7341 		} else {
7342 			err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7343 						 cmd, get_conn_info_complete);
7344 		}
7345 
7346 		if (err < 0) {
7347 			mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7348 					  MGMT_STATUS_FAILED, &rp, sizeof(rp));
7349 
7350 			if (cmd)
7351 				mgmt_pending_free(cmd);
7352 
7353 			goto unlock;
7354 		}
7355 
7356 		conn->conn_info_timestamp = jiffies;
7357 	} else {
7358 		/* Cache is valid, just reply with values cached in hci_conn */
7359 		rp.rssi = conn->rssi;
7360 		rp.tx_power = conn->tx_power;
7361 		rp.max_tx_power = conn->max_tx_power;
7362 
7363 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7364 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7365 	}
7366 
7367 unlock:
7368 	hci_dev_unlock(hdev);
7369 	return err;
7370 }
7371 
7372 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7373 {
7374 	struct mgmt_pending_cmd *cmd = data;
7375 	struct mgmt_cp_get_clock_info *cp = cmd->param;
7376 	struct mgmt_rp_get_clock_info rp;
7377 	struct hci_conn *conn = cmd->user_data;
7378 	u8 status = mgmt_status(err);
7379 
7380 	bt_dev_dbg(hdev, "err %d", err);
7381 
7382 	memset(&rp, 0, sizeof(rp));
7383 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7384 	rp.addr.type = cp->addr.type;
7385 
7386 	if (err)
7387 		goto complete;
7388 
7389 	rp.local_clock = cpu_to_le32(hdev->clock);
7390 
7391 	if (conn) {
7392 		rp.piconet_clock = cpu_to_le32(conn->clock);
7393 		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7394 	}
7395 
7396 complete:
7397 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
7398 			  sizeof(rp));
7399 
7400 	mgmt_pending_free(cmd);
7401 }
7402 
7403 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7404 {
7405 	struct mgmt_pending_cmd *cmd = data;
7406 	struct mgmt_cp_get_clock_info *cp = cmd->param;
7407 	struct hci_cp_read_clock hci_cp;
7408 	struct hci_conn *conn;
7409 
7410 	memset(&hci_cp, 0, sizeof(hci_cp));
7411 	hci_read_clock_sync(hdev, &hci_cp);
7412 
7413 	/* Make sure connection still exists */
7414 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7415 	if (!conn || conn->state != BT_CONNECTED)
7416 		return MGMT_STATUS_NOT_CONNECTED;
7417 
7418 	cmd->user_data = conn;
7419 	hci_cp.handle = cpu_to_le16(conn->handle);
7420 	hci_cp.which = 0x01; /* Piconet clock */
7421 
7422 	return hci_read_clock_sync(hdev, &hci_cp);
7423 }
7424 
7425 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7426 								u16 len)
7427 {
7428 	struct mgmt_cp_get_clock_info *cp = data;
7429 	struct mgmt_rp_get_clock_info rp;
7430 	struct mgmt_pending_cmd *cmd;
7431 	struct hci_conn *conn;
7432 	int err;
7433 
7434 	bt_dev_dbg(hdev, "sock %p", sk);
7435 
7436 	memset(&rp, 0, sizeof(rp));
7437 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7438 	rp.addr.type = cp->addr.type;
7439 
7440 	if (cp->addr.type != BDADDR_BREDR)
7441 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7442 					 MGMT_STATUS_INVALID_PARAMS,
7443 					 &rp, sizeof(rp));
7444 
7445 	hci_dev_lock(hdev);
7446 
7447 	if (!hdev_is_powered(hdev)) {
7448 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7449 					MGMT_STATUS_NOT_POWERED, &rp,
7450 					sizeof(rp));
7451 		goto unlock;
7452 	}
7453 
7454 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7455 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7456 					       &cp->addr.bdaddr);
7457 		if (!conn || conn->state != BT_CONNECTED) {
7458 			err = mgmt_cmd_complete(sk, hdev->id,
7459 						MGMT_OP_GET_CLOCK_INFO,
7460 						MGMT_STATUS_NOT_CONNECTED,
7461 						&rp, sizeof(rp));
7462 			goto unlock;
7463 		}
7464 	} else {
7465 		conn = NULL;
7466 	}
7467 
7468 	cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7469 	if (!cmd)
7470 		err = -ENOMEM;
7471 	else
7472 		err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7473 					 get_clock_info_complete);
7474 
7475 	if (err < 0) {
7476 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7477 					MGMT_STATUS_FAILED, &rp, sizeof(rp));
7478 
7479 		if (cmd)
7480 			mgmt_pending_free(cmd);
7481 	}
7482 
7483 
7484 unlock:
7485 	hci_dev_unlock(hdev);
7486 	return err;
7487 }
7488 
7489 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7490 {
7491 	struct hci_conn *conn;
7492 
7493 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7494 	if (!conn)
7495 		return false;
7496 
7497 	if (conn->dst_type != type)
7498 		return false;
7499 
7500 	if (conn->state != BT_CONNECTED)
7501 		return false;
7502 
7503 	return true;
7504 }
7505 
7506 /* This function requires the caller holds hdev->lock */
7507 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7508 			       u8 addr_type, u8 auto_connect)
7509 {
7510 	struct hci_conn_params *params;
7511 
7512 	params = hci_conn_params_add(hdev, addr, addr_type);
7513 	if (!params)
7514 		return -EIO;
7515 
7516 	if (params->auto_connect == auto_connect)
7517 		return 0;
7518 
7519 	hci_pend_le_list_del_init(params);
7520 
7521 	switch (auto_connect) {
7522 	case HCI_AUTO_CONN_DISABLED:
7523 	case HCI_AUTO_CONN_LINK_LOSS:
7524 		/* If auto connect is being disabled when we're trying to
7525 		 * connect to device, keep connecting.
7526 		 */
7527 		if (params->explicit_connect)
7528 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
7529 		break;
7530 	case HCI_AUTO_CONN_REPORT:
7531 		if (params->explicit_connect)
7532 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
7533 		else
7534 			hci_pend_le_list_add(params, &hdev->pend_le_reports);
7535 		break;
7536 	case HCI_AUTO_CONN_DIRECT:
7537 	case HCI_AUTO_CONN_ALWAYS:
7538 		if (!is_connected(hdev, addr, addr_type))
7539 			hci_pend_le_list_add(params, &hdev->pend_le_conns);
7540 		break;
7541 	}
7542 
7543 	params->auto_connect = auto_connect;
7544 
7545 	bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7546 		   addr, addr_type, auto_connect);
7547 
7548 	return 0;
7549 }
7550 
7551 static void device_added(struct sock *sk, struct hci_dev *hdev,
7552 			 bdaddr_t *bdaddr, u8 type, u8 action)
7553 {
7554 	struct mgmt_ev_device_added ev;
7555 
7556 	bacpy(&ev.addr.bdaddr, bdaddr);
7557 	ev.addr.type = type;
7558 	ev.action = action;
7559 
7560 	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7561 }
7562 
7563 static int add_device_sync(struct hci_dev *hdev, void *data)
7564 {
7565 	return hci_update_passive_scan_sync(hdev);
7566 }
7567 
7568 static int add_device(struct sock *sk, struct hci_dev *hdev,
7569 		      void *data, u16 len)
7570 {
7571 	struct mgmt_cp_add_device *cp = data;
7572 	u8 auto_conn, addr_type;
7573 	struct hci_conn_params *params;
7574 	int err;
7575 	u32 current_flags = 0;
7576 	u32 supported_flags;
7577 
7578 	bt_dev_dbg(hdev, "sock %p", sk);
7579 
7580 	if (!bdaddr_type_is_valid(cp->addr.type) ||
7581 	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7582 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7583 					 MGMT_STATUS_INVALID_PARAMS,
7584 					 &cp->addr, sizeof(cp->addr));
7585 
7586 	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7587 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7588 					 MGMT_STATUS_INVALID_PARAMS,
7589 					 &cp->addr, sizeof(cp->addr));
7590 
7591 	hci_dev_lock(hdev);
7592 
7593 	if (cp->addr.type == BDADDR_BREDR) {
7594 		/* Only incoming connections action is supported for now */
7595 		if (cp->action != 0x01) {
7596 			err = mgmt_cmd_complete(sk, hdev->id,
7597 						MGMT_OP_ADD_DEVICE,
7598 						MGMT_STATUS_INVALID_PARAMS,
7599 						&cp->addr, sizeof(cp->addr));
7600 			goto unlock;
7601 		}
7602 
7603 		err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7604 						     &cp->addr.bdaddr,
7605 						     cp->addr.type, 0);
7606 		if (err)
7607 			goto unlock;
7608 
7609 		hci_update_scan(hdev);
7610 
7611 		goto added;
7612 	}
7613 
7614 	addr_type = le_addr_type(cp->addr.type);
7615 
7616 	if (cp->action == 0x02)
7617 		auto_conn = HCI_AUTO_CONN_ALWAYS;
7618 	else if (cp->action == 0x01)
7619 		auto_conn = HCI_AUTO_CONN_DIRECT;
7620 	else
7621 		auto_conn = HCI_AUTO_CONN_REPORT;
7622 
7623 	/* Kernel internally uses conn_params with resolvable private
7624 	 * address, but Add Device allows only identity addresses.
7625 	 * Make sure it is enforced before calling
7626 	 * hci_conn_params_lookup.
7627 	 */
7628 	if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7629 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7630 					MGMT_STATUS_INVALID_PARAMS,
7631 					&cp->addr, sizeof(cp->addr));
7632 		goto unlock;
7633 	}
7634 
7635 	/* If the connection parameters don't exist for this device,
7636 	 * they will be created and configured with defaults.
7637 	 */
7638 	if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7639 				auto_conn) < 0) {
7640 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7641 					MGMT_STATUS_FAILED, &cp->addr,
7642 					sizeof(cp->addr));
7643 		goto unlock;
7644 	} else {
7645 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7646 						addr_type);
7647 		if (params)
7648 			current_flags = params->flags;
7649 	}
7650 
7651 	err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
7652 	if (err < 0)
7653 		goto unlock;
7654 
7655 added:
7656 	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7657 	supported_flags = hdev->conn_flags;
7658 	device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7659 			     supported_flags, current_flags);
7660 
7661 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7662 				MGMT_STATUS_SUCCESS, &cp->addr,
7663 				sizeof(cp->addr));
7664 
7665 unlock:
7666 	hci_dev_unlock(hdev);
7667 	return err;
7668 }
7669 
7670 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7671 			   bdaddr_t *bdaddr, u8 type)
7672 {
7673 	struct mgmt_ev_device_removed ev;
7674 
7675 	bacpy(&ev.addr.bdaddr, bdaddr);
7676 	ev.addr.type = type;
7677 
7678 	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7679 }
7680 
7681 static int remove_device_sync(struct hci_dev *hdev, void *data)
7682 {
7683 	return hci_update_passive_scan_sync(hdev);
7684 }
7685 
7686 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7687 			 void *data, u16 len)
7688 {
7689 	struct mgmt_cp_remove_device *cp = data;
7690 	int err;
7691 
7692 	bt_dev_dbg(hdev, "sock %p", sk);
7693 
7694 	hci_dev_lock(hdev);
7695 
7696 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7697 		struct hci_conn_params *params;
7698 		u8 addr_type;
7699 
7700 		if (!bdaddr_type_is_valid(cp->addr.type)) {
7701 			err = mgmt_cmd_complete(sk, hdev->id,
7702 						MGMT_OP_REMOVE_DEVICE,
7703 						MGMT_STATUS_INVALID_PARAMS,
7704 						&cp->addr, sizeof(cp->addr));
7705 			goto unlock;
7706 		}
7707 
7708 		if (cp->addr.type == BDADDR_BREDR) {
7709 			err = hci_bdaddr_list_del(&hdev->accept_list,
7710 						  &cp->addr.bdaddr,
7711 						  cp->addr.type);
7712 			if (err) {
7713 				err = mgmt_cmd_complete(sk, hdev->id,
7714 							MGMT_OP_REMOVE_DEVICE,
7715 							MGMT_STATUS_INVALID_PARAMS,
7716 							&cp->addr,
7717 							sizeof(cp->addr));
7718 				goto unlock;
7719 			}
7720 
7721 			hci_update_scan(hdev);
7722 
7723 			device_removed(sk, hdev, &cp->addr.bdaddr,
7724 				       cp->addr.type);
7725 			goto complete;
7726 		}
7727 
7728 		addr_type = le_addr_type(cp->addr.type);
7729 
7730 		/* Kernel internally uses conn_params with resolvable private
7731 		 * address, but Remove Device allows only identity addresses.
7732 		 * Make sure it is enforced before calling
7733 		 * hci_conn_params_lookup.
7734 		 */
7735 		if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7736 			err = mgmt_cmd_complete(sk, hdev->id,
7737 						MGMT_OP_REMOVE_DEVICE,
7738 						MGMT_STATUS_INVALID_PARAMS,
7739 						&cp->addr, sizeof(cp->addr));
7740 			goto unlock;
7741 		}
7742 
7743 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7744 						addr_type);
7745 		if (!params) {
7746 			err = mgmt_cmd_complete(sk, hdev->id,
7747 						MGMT_OP_REMOVE_DEVICE,
7748 						MGMT_STATUS_INVALID_PARAMS,
7749 						&cp->addr, sizeof(cp->addr));
7750 			goto unlock;
7751 		}
7752 
7753 		if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7754 		    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7755 			err = mgmt_cmd_complete(sk, hdev->id,
7756 						MGMT_OP_REMOVE_DEVICE,
7757 						MGMT_STATUS_INVALID_PARAMS,
7758 						&cp->addr, sizeof(cp->addr));
7759 			goto unlock;
7760 		}
7761 
7762 		hci_conn_params_free(params);
7763 
7764 		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7765 	} else {
7766 		struct hci_conn_params *p, *tmp;
7767 		struct bdaddr_list *b, *btmp;
7768 
7769 		if (cp->addr.type) {
7770 			err = mgmt_cmd_complete(sk, hdev->id,
7771 						MGMT_OP_REMOVE_DEVICE,
7772 						MGMT_STATUS_INVALID_PARAMS,
7773 						&cp->addr, sizeof(cp->addr));
7774 			goto unlock;
7775 		}
7776 
7777 		list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7778 			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7779 			list_del(&b->list);
7780 			kfree(b);
7781 		}
7782 
7783 		hci_update_scan(hdev);
7784 
7785 		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7786 			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7787 				continue;
7788 			device_removed(sk, hdev, &p->addr, p->addr_type);
7789 			if (p->explicit_connect) {
7790 				p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7791 				continue;
7792 			}
7793 			hci_conn_params_free(p);
7794 		}
7795 
7796 		bt_dev_dbg(hdev, "All LE connection parameters were removed");
7797 	}
7798 
7799 	hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7800 
7801 complete:
7802 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7803 				MGMT_STATUS_SUCCESS, &cp->addr,
7804 				sizeof(cp->addr));
7805 unlock:
7806 	hci_dev_unlock(hdev);
7807 	return err;
7808 }
7809 
7810 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7811 			   u16 len)
7812 {
7813 	struct mgmt_cp_load_conn_param *cp = data;
7814 	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7815 				     sizeof(struct mgmt_conn_param));
7816 	u16 param_count, expected_len;
7817 	int i;
7818 
7819 	if (!lmp_le_capable(hdev))
7820 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7821 				       MGMT_STATUS_NOT_SUPPORTED);
7822 
7823 	param_count = __le16_to_cpu(cp->param_count);
7824 	if (param_count > max_param_count) {
7825 		bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7826 			   param_count);
7827 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7828 				       MGMT_STATUS_INVALID_PARAMS);
7829 	}
7830 
7831 	expected_len = struct_size(cp, params, param_count);
7832 	if (expected_len != len) {
7833 		bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7834 			   expected_len, len);
7835 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7836 				       MGMT_STATUS_INVALID_PARAMS);
7837 	}
7838 
7839 	bt_dev_dbg(hdev, "param_count %u", param_count);
7840 
7841 	hci_dev_lock(hdev);
7842 
7843 	hci_conn_params_clear_disabled(hdev);
7844 
7845 	for (i = 0; i < param_count; i++) {
7846 		struct mgmt_conn_param *param = &cp->params[i];
7847 		struct hci_conn_params *hci_param;
7848 		u16 min, max, latency, timeout;
7849 		u8 addr_type;
7850 
7851 		bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
7852 			   param->addr.type);
7853 
7854 		if (param->addr.type == BDADDR_LE_PUBLIC) {
7855 			addr_type = ADDR_LE_DEV_PUBLIC;
7856 		} else if (param->addr.type == BDADDR_LE_RANDOM) {
7857 			addr_type = ADDR_LE_DEV_RANDOM;
7858 		} else {
7859 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7860 			continue;
7861 		}
7862 
7863 		min = le16_to_cpu(param->min_interval);
7864 		max = le16_to_cpu(param->max_interval);
7865 		latency = le16_to_cpu(param->latency);
7866 		timeout = le16_to_cpu(param->timeout);
7867 
7868 		bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7869 			   min, max, latency, timeout);
7870 
7871 		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7872 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7873 			continue;
7874 		}
7875 
7876 		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
7877 						addr_type);
7878 		if (!hci_param) {
7879 			bt_dev_err(hdev, "failed to add connection parameters");
7880 			continue;
7881 		}
7882 
7883 		hci_param->conn_min_interval = min;
7884 		hci_param->conn_max_interval = max;
7885 		hci_param->conn_latency = latency;
7886 		hci_param->supervision_timeout = timeout;
7887 	}
7888 
7889 	hci_dev_unlock(hdev);
7890 
7891 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7892 				 NULL, 0);
7893 }
7894 
7895 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7896 			       void *data, u16 len)
7897 {
7898 	struct mgmt_cp_set_external_config *cp = data;
7899 	bool changed;
7900 	int err;
7901 
7902 	bt_dev_dbg(hdev, "sock %p", sk);
7903 
7904 	if (hdev_is_powered(hdev))
7905 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7906 				       MGMT_STATUS_REJECTED);
7907 
7908 	if (cp->config != 0x00 && cp->config != 0x01)
7909 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7910 				         MGMT_STATUS_INVALID_PARAMS);
7911 
7912 	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7913 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7914 				       MGMT_STATUS_NOT_SUPPORTED);
7915 
7916 	hci_dev_lock(hdev);
7917 
7918 	if (cp->config)
7919 		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7920 	else
7921 		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7922 
7923 	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7924 	if (err < 0)
7925 		goto unlock;
7926 
7927 	if (!changed)
7928 		goto unlock;
7929 
7930 	err = new_options(hdev, sk);
7931 
7932 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7933 		mgmt_index_removed(hdev);
7934 
7935 		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7936 			hci_dev_set_flag(hdev, HCI_CONFIG);
7937 			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7938 
7939 			queue_work(hdev->req_workqueue, &hdev->power_on);
7940 		} else {
7941 			set_bit(HCI_RAW, &hdev->flags);
7942 			mgmt_index_added(hdev);
7943 		}
7944 	}
7945 
7946 unlock:
7947 	hci_dev_unlock(hdev);
7948 	return err;
7949 }
7950 
7951 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
7952 			      void *data, u16 len)
7953 {
7954 	struct mgmt_cp_set_public_address *cp = data;
7955 	bool changed;
7956 	int err;
7957 
7958 	bt_dev_dbg(hdev, "sock %p", sk);
7959 
7960 	if (hdev_is_powered(hdev))
7961 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7962 				       MGMT_STATUS_REJECTED);
7963 
7964 	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
7965 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7966 				       MGMT_STATUS_INVALID_PARAMS);
7967 
7968 	if (!hdev->set_bdaddr)
7969 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7970 				       MGMT_STATUS_NOT_SUPPORTED);
7971 
7972 	hci_dev_lock(hdev);
7973 
7974 	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
7975 	bacpy(&hdev->public_addr, &cp->bdaddr);
7976 
7977 	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
7978 	if (err < 0)
7979 		goto unlock;
7980 
7981 	if (!changed)
7982 		goto unlock;
7983 
7984 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
7985 		err = new_options(hdev, sk);
7986 
7987 	if (is_configured(hdev)) {
7988 		mgmt_index_removed(hdev);
7989 
7990 		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
7991 
7992 		hci_dev_set_flag(hdev, HCI_CONFIG);
7993 		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7994 
7995 		queue_work(hdev->req_workqueue, &hdev->power_on);
7996 	}
7997 
7998 unlock:
7999 	hci_dev_unlock(hdev);
8000 	return err;
8001 }
8002 
8003 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8004 					     int err)
8005 {
8006 	const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8007 	struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8008 	u8 *h192, *r192, *h256, *r256;
8009 	struct mgmt_pending_cmd *cmd = data;
8010 	struct sk_buff *skb = cmd->skb;
8011 	u8 status = mgmt_status(err);
8012 	u16 eir_len;
8013 
8014 	if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
8015 		return;
8016 
8017 	if (!status) {
8018 		if (!skb)
8019 			status = MGMT_STATUS_FAILED;
8020 		else if (IS_ERR(skb))
8021 			status = mgmt_status(PTR_ERR(skb));
8022 		else
8023 			status = mgmt_status(skb->data[0]);
8024 	}
8025 
8026 	bt_dev_dbg(hdev, "status %u", status);
8027 
8028 	mgmt_cp = cmd->param;
8029 
8030 	if (status) {
8031 		status = mgmt_status(status);
8032 		eir_len = 0;
8033 
8034 		h192 = NULL;
8035 		r192 = NULL;
8036 		h256 = NULL;
8037 		r256 = NULL;
8038 	} else if (!bredr_sc_enabled(hdev)) {
8039 		struct hci_rp_read_local_oob_data *rp;
8040 
8041 		if (skb->len != sizeof(*rp)) {
8042 			status = MGMT_STATUS_FAILED;
8043 			eir_len = 0;
8044 		} else {
8045 			status = MGMT_STATUS_SUCCESS;
8046 			rp = (void *)skb->data;
8047 
8048 			eir_len = 5 + 18 + 18;
8049 			h192 = rp->hash;
8050 			r192 = rp->rand;
8051 			h256 = NULL;
8052 			r256 = NULL;
8053 		}
8054 	} else {
8055 		struct hci_rp_read_local_oob_ext_data *rp;
8056 
8057 		if (skb->len != sizeof(*rp)) {
8058 			status = MGMT_STATUS_FAILED;
8059 			eir_len = 0;
8060 		} else {
8061 			status = MGMT_STATUS_SUCCESS;
8062 			rp = (void *)skb->data;
8063 
8064 			if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8065 				eir_len = 5 + 18 + 18;
8066 				h192 = NULL;
8067 				r192 = NULL;
8068 			} else {
8069 				eir_len = 5 + 18 + 18 + 18 + 18;
8070 				h192 = rp->hash192;
8071 				r192 = rp->rand192;
8072 			}
8073 
8074 			h256 = rp->hash256;
8075 			r256 = rp->rand256;
8076 		}
8077 	}
8078 
8079 	mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8080 	if (!mgmt_rp)
8081 		goto done;
8082 
8083 	if (eir_len == 0)
8084 		goto send_rsp;
8085 
8086 	eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8087 				  hdev->dev_class, 3);
8088 
8089 	if (h192 && r192) {
8090 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8091 					  EIR_SSP_HASH_C192, h192, 16);
8092 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8093 					  EIR_SSP_RAND_R192, r192, 16);
8094 	}
8095 
8096 	if (h256 && r256) {
8097 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8098 					  EIR_SSP_HASH_C256, h256, 16);
8099 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8100 					  EIR_SSP_RAND_R256, r256, 16);
8101 	}
8102 
8103 send_rsp:
8104 	mgmt_rp->type = mgmt_cp->type;
8105 	mgmt_rp->eir_len = cpu_to_le16(eir_len);
8106 
8107 	err = mgmt_cmd_complete(cmd->sk, hdev->id,
8108 				MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8109 				mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8110 	if (err < 0 || status)
8111 		goto done;
8112 
8113 	hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8114 
8115 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8116 				 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8117 				 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8118 done:
8119 	if (skb && !IS_ERR(skb))
8120 		kfree_skb(skb);
8121 
8122 	kfree(mgmt_rp);
8123 	mgmt_pending_remove(cmd);
8124 }
8125 
8126 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8127 				  struct mgmt_cp_read_local_oob_ext_data *cp)
8128 {
8129 	struct mgmt_pending_cmd *cmd;
8130 	int err;
8131 
8132 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8133 			       cp, sizeof(*cp));
8134 	if (!cmd)
8135 		return -ENOMEM;
8136 
8137 	err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8138 				 read_local_oob_ext_data_complete);
8139 
8140 	if (err < 0) {
8141 		mgmt_pending_remove(cmd);
8142 		return err;
8143 	}
8144 
8145 	return 0;
8146 }
8147 
8148 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8149 				   void *data, u16 data_len)
8150 {
8151 	struct mgmt_cp_read_local_oob_ext_data *cp = data;
8152 	struct mgmt_rp_read_local_oob_ext_data *rp;
8153 	size_t rp_len;
8154 	u16 eir_len;
8155 	u8 status, flags, role, addr[7], hash[16], rand[16];
8156 	int err;
8157 
8158 	bt_dev_dbg(hdev, "sock %p", sk);
8159 
8160 	if (hdev_is_powered(hdev)) {
8161 		switch (cp->type) {
8162 		case BIT(BDADDR_BREDR):
8163 			status = mgmt_bredr_support(hdev);
8164 			if (status)
8165 				eir_len = 0;
8166 			else
8167 				eir_len = 5;
8168 			break;
8169 		case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8170 			status = mgmt_le_support(hdev);
8171 			if (status)
8172 				eir_len = 0;
8173 			else
8174 				eir_len = 9 + 3 + 18 + 18 + 3;
8175 			break;
8176 		default:
8177 			status = MGMT_STATUS_INVALID_PARAMS;
8178 			eir_len = 0;
8179 			break;
8180 		}
8181 	} else {
8182 		status = MGMT_STATUS_NOT_POWERED;
8183 		eir_len = 0;
8184 	}
8185 
8186 	rp_len = sizeof(*rp) + eir_len;
8187 	rp = kmalloc(rp_len, GFP_ATOMIC);
8188 	if (!rp)
8189 		return -ENOMEM;
8190 
8191 	if (!status && !lmp_ssp_capable(hdev)) {
8192 		status = MGMT_STATUS_NOT_SUPPORTED;
8193 		eir_len = 0;
8194 	}
8195 
8196 	if (status)
8197 		goto complete;
8198 
8199 	hci_dev_lock(hdev);
8200 
8201 	eir_len = 0;
8202 	switch (cp->type) {
8203 	case BIT(BDADDR_BREDR):
8204 		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8205 			err = read_local_ssp_oob_req(hdev, sk, cp);
8206 			hci_dev_unlock(hdev);
8207 			if (!err)
8208 				goto done;
8209 
8210 			status = MGMT_STATUS_FAILED;
8211 			goto complete;
8212 		} else {
8213 			eir_len = eir_append_data(rp->eir, eir_len,
8214 						  EIR_CLASS_OF_DEV,
8215 						  hdev->dev_class, 3);
8216 		}
8217 		break;
8218 	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8219 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8220 		    smp_generate_oob(hdev, hash, rand) < 0) {
8221 			hci_dev_unlock(hdev);
8222 			status = MGMT_STATUS_FAILED;
8223 			goto complete;
8224 		}
8225 
8226 		/* This should return the active RPA, but since the RPA
8227 		 * is only programmed on demand, it is really hard to fill
8228 		 * this in at the moment. For now disallow retrieving
8229 		 * local out-of-band data when privacy is in use.
8230 		 *
8231 		 * Returning the identity address will not help here since
8232 		 * pairing happens before the identity resolving key is
8233 		 * known and thus the connection establishment happens
8234 		 * based on the RPA and not the identity address.
8235 		 */
8236 		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8237 			hci_dev_unlock(hdev);
8238 			status = MGMT_STATUS_REJECTED;
8239 			goto complete;
8240 		}
8241 
8242 		if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8243 		   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8244 		   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8245 		    bacmp(&hdev->static_addr, BDADDR_ANY))) {
8246 			memcpy(addr, &hdev->static_addr, 6);
8247 			addr[6] = 0x01;
8248 		} else {
8249 			memcpy(addr, &hdev->bdaddr, 6);
8250 			addr[6] = 0x00;
8251 		}
8252 
8253 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8254 					  addr, sizeof(addr));
8255 
8256 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8257 			role = 0x02;
8258 		else
8259 			role = 0x01;
8260 
8261 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8262 					  &role, sizeof(role));
8263 
8264 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8265 			eir_len = eir_append_data(rp->eir, eir_len,
8266 						  EIR_LE_SC_CONFIRM,
8267 						  hash, sizeof(hash));
8268 
8269 			eir_len = eir_append_data(rp->eir, eir_len,
8270 						  EIR_LE_SC_RANDOM,
8271 						  rand, sizeof(rand));
8272 		}
8273 
8274 		flags = mgmt_get_adv_discov_flags(hdev);
8275 
8276 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8277 			flags |= LE_AD_NO_BREDR;
8278 
8279 		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8280 					  &flags, sizeof(flags));
8281 		break;
8282 	}
8283 
8284 	hci_dev_unlock(hdev);
8285 
8286 	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8287 
8288 	status = MGMT_STATUS_SUCCESS;
8289 
8290 complete:
8291 	rp->type = cp->type;
8292 	rp->eir_len = cpu_to_le16(eir_len);
8293 
8294 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8295 				status, rp, sizeof(*rp) + eir_len);
8296 	if (err < 0 || status)
8297 		goto done;
8298 
8299 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8300 				 rp, sizeof(*rp) + eir_len,
8301 				 HCI_MGMT_OOB_DATA_EVENTS, sk);
8302 
8303 done:
8304 	kfree(rp);
8305 
8306 	return err;
8307 }
8308 
8309 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8310 {
8311 	u32 flags = 0;
8312 
8313 	flags |= MGMT_ADV_FLAG_CONNECTABLE;
8314 	flags |= MGMT_ADV_FLAG_DISCOV;
8315 	flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8316 	flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8317 	flags |= MGMT_ADV_FLAG_APPEARANCE;
8318 	flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8319 	flags |= MGMT_ADV_PARAM_DURATION;
8320 	flags |= MGMT_ADV_PARAM_TIMEOUT;
8321 	flags |= MGMT_ADV_PARAM_INTERVALS;
8322 	flags |= MGMT_ADV_PARAM_TX_POWER;
8323 	flags |= MGMT_ADV_PARAM_SCAN_RSP;
8324 
8325 	/* In extended adv TX_POWER returned from Set Adv Param
8326 	 * will be always valid.
8327 	 */
8328 	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8329 		flags |= MGMT_ADV_FLAG_TX_POWER;
8330 
8331 	if (ext_adv_capable(hdev)) {
8332 		flags |= MGMT_ADV_FLAG_SEC_1M;
8333 		flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8334 		flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8335 
8336 		if (le_2m_capable(hdev))
8337 			flags |= MGMT_ADV_FLAG_SEC_2M;
8338 
8339 		if (le_coded_capable(hdev))
8340 			flags |= MGMT_ADV_FLAG_SEC_CODED;
8341 	}
8342 
8343 	return flags;
8344 }
8345 
8346 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8347 			     void *data, u16 data_len)
8348 {
8349 	struct mgmt_rp_read_adv_features *rp;
8350 	size_t rp_len;
8351 	int err;
8352 	struct adv_info *adv_instance;
8353 	u32 supported_flags;
8354 	u8 *instance;
8355 
8356 	bt_dev_dbg(hdev, "sock %p", sk);
8357 
8358 	if (!lmp_le_capable(hdev))
8359 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8360 				       MGMT_STATUS_REJECTED);
8361 
8362 	hci_dev_lock(hdev);
8363 
8364 	rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8365 	rp = kmalloc(rp_len, GFP_ATOMIC);
8366 	if (!rp) {
8367 		hci_dev_unlock(hdev);
8368 		return -ENOMEM;
8369 	}
8370 
8371 	supported_flags = get_supported_adv_flags(hdev);
8372 
8373 	rp->supported_flags = cpu_to_le32(supported_flags);
8374 	rp->max_adv_data_len = max_adv_len(hdev);
8375 	rp->max_scan_rsp_len = max_adv_len(hdev);
8376 	rp->max_instances = hdev->le_num_of_adv_sets;
8377 	rp->num_instances = hdev->adv_instance_cnt;
8378 
8379 	instance = rp->instance;
8380 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8381 		/* Only instances 1-le_num_of_adv_sets are externally visible */
8382 		if (adv_instance->instance <= hdev->adv_instance_cnt) {
8383 			*instance = adv_instance->instance;
8384 			instance++;
8385 		} else {
8386 			rp->num_instances--;
8387 			rp_len--;
8388 		}
8389 	}
8390 
8391 	hci_dev_unlock(hdev);
8392 
8393 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8394 				MGMT_STATUS_SUCCESS, rp, rp_len);
8395 
8396 	kfree(rp);
8397 
8398 	return err;
8399 }
8400 
8401 static u8 calculate_name_len(struct hci_dev *hdev)
8402 {
8403 	u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 2]; /* len + type + name */
8404 
8405 	return eir_append_local_name(hdev, buf, 0);
8406 }
8407 
8408 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8409 			   bool is_adv_data)
8410 {
8411 	u8 max_len = max_adv_len(hdev);
8412 
8413 	if (is_adv_data) {
8414 		if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8415 				 MGMT_ADV_FLAG_LIMITED_DISCOV |
8416 				 MGMT_ADV_FLAG_MANAGED_FLAGS))
8417 			max_len -= 3;
8418 
8419 		if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8420 			max_len -= 3;
8421 	} else {
8422 		if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8423 			max_len -= calculate_name_len(hdev);
8424 
8425 		if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8426 			max_len -= 4;
8427 	}
8428 
8429 	return max_len;
8430 }
8431 
8432 static bool flags_managed(u32 adv_flags)
8433 {
8434 	return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8435 			    MGMT_ADV_FLAG_LIMITED_DISCOV |
8436 			    MGMT_ADV_FLAG_MANAGED_FLAGS);
8437 }
8438 
8439 static bool tx_power_managed(u32 adv_flags)
8440 {
8441 	return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8442 }
8443 
8444 static bool name_managed(u32 adv_flags)
8445 {
8446 	return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8447 }
8448 
8449 static bool appearance_managed(u32 adv_flags)
8450 {
8451 	return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8452 }
8453 
8454 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8455 			      u8 len, bool is_adv_data)
8456 {
8457 	int i, cur_len;
8458 	u8 max_len;
8459 
8460 	max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8461 
8462 	if (len > max_len)
8463 		return false;
8464 
8465 	/* Make sure that the data is correctly formatted. */
8466 	for (i = 0; i < len; i += (cur_len + 1)) {
8467 		cur_len = data[i];
8468 
8469 		if (!cur_len)
8470 			continue;
8471 
8472 		if (data[i + 1] == EIR_FLAGS &&
8473 		    (!is_adv_data || flags_managed(adv_flags)))
8474 			return false;
8475 
8476 		if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8477 			return false;
8478 
8479 		if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8480 			return false;
8481 
8482 		if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8483 			return false;
8484 
8485 		if (data[i + 1] == EIR_APPEARANCE &&
8486 		    appearance_managed(adv_flags))
8487 			return false;
8488 
8489 		/* If the current field length would exceed the total data
8490 		 * length, then it's invalid.
8491 		 */
8492 		if (i + cur_len >= len)
8493 			return false;
8494 	}
8495 
8496 	return true;
8497 }
8498 
8499 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8500 {
8501 	u32 supported_flags, phy_flags;
8502 
8503 	/* The current implementation only supports a subset of the specified
8504 	 * flags. Also need to check mutual exclusiveness of sec flags.
8505 	 */
8506 	supported_flags = get_supported_adv_flags(hdev);
8507 	phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8508 	if (adv_flags & ~supported_flags ||
8509 	    ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8510 		return false;
8511 
8512 	return true;
8513 }
8514 
8515 static bool adv_busy(struct hci_dev *hdev)
8516 {
8517 	return pending_find(MGMT_OP_SET_LE, hdev);
8518 }
8519 
8520 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8521 			     int err)
8522 {
8523 	struct adv_info *adv, *n;
8524 
8525 	bt_dev_dbg(hdev, "err %d", err);
8526 
8527 	hci_dev_lock(hdev);
8528 
8529 	list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8530 		u8 instance;
8531 
8532 		if (!adv->pending)
8533 			continue;
8534 
8535 		if (!err) {
8536 			adv->pending = false;
8537 			continue;
8538 		}
8539 
8540 		instance = adv->instance;
8541 
8542 		if (hdev->cur_adv_instance == instance)
8543 			cancel_adv_timeout(hdev);
8544 
8545 		hci_remove_adv_instance(hdev, instance);
8546 		mgmt_advertising_removed(sk, hdev, instance);
8547 	}
8548 
8549 	hci_dev_unlock(hdev);
8550 }
8551 
8552 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8553 {
8554 	struct mgmt_pending_cmd *cmd = data;
8555 	struct mgmt_cp_add_advertising *cp = cmd->param;
8556 	struct mgmt_rp_add_advertising rp;
8557 
8558 	memset(&rp, 0, sizeof(rp));
8559 
8560 	rp.instance = cp->instance;
8561 
8562 	if (err)
8563 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8564 				mgmt_status(err));
8565 	else
8566 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8567 				  mgmt_status(err), &rp, sizeof(rp));
8568 
8569 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
8570 
8571 	mgmt_pending_free(cmd);
8572 }
8573 
8574 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8575 {
8576 	struct mgmt_pending_cmd *cmd = data;
8577 	struct mgmt_cp_add_advertising *cp = cmd->param;
8578 
8579 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8580 }
8581 
8582 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8583 			   void *data, u16 data_len)
8584 {
8585 	struct mgmt_cp_add_advertising *cp = data;
8586 	struct mgmt_rp_add_advertising rp;
8587 	u32 flags;
8588 	u8 status;
8589 	u16 timeout, duration;
8590 	unsigned int prev_instance_cnt;
8591 	u8 schedule_instance = 0;
8592 	struct adv_info *adv, *next_instance;
8593 	int err;
8594 	struct mgmt_pending_cmd *cmd;
8595 
8596 	bt_dev_dbg(hdev, "sock %p", sk);
8597 
8598 	status = mgmt_le_support(hdev);
8599 	if (status)
8600 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8601 				       status);
8602 
8603 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8604 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8605 				       MGMT_STATUS_INVALID_PARAMS);
8606 
8607 	if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8608 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8609 				       MGMT_STATUS_INVALID_PARAMS);
8610 
8611 	flags = __le32_to_cpu(cp->flags);
8612 	timeout = __le16_to_cpu(cp->timeout);
8613 	duration = __le16_to_cpu(cp->duration);
8614 
8615 	if (!requested_adv_flags_are_valid(hdev, flags))
8616 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8617 				       MGMT_STATUS_INVALID_PARAMS);
8618 
8619 	hci_dev_lock(hdev);
8620 
8621 	if (timeout && !hdev_is_powered(hdev)) {
8622 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8623 				      MGMT_STATUS_REJECTED);
8624 		goto unlock;
8625 	}
8626 
8627 	if (adv_busy(hdev)) {
8628 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8629 				      MGMT_STATUS_BUSY);
8630 		goto unlock;
8631 	}
8632 
8633 	if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8634 	    !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8635 			       cp->scan_rsp_len, false)) {
8636 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8637 				      MGMT_STATUS_INVALID_PARAMS);
8638 		goto unlock;
8639 	}
8640 
8641 	prev_instance_cnt = hdev->adv_instance_cnt;
8642 
8643 	adv = hci_add_adv_instance(hdev, cp->instance, flags,
8644 				   cp->adv_data_len, cp->data,
8645 				   cp->scan_rsp_len,
8646 				   cp->data + cp->adv_data_len,
8647 				   timeout, duration,
8648 				   HCI_ADV_TX_POWER_NO_PREFERENCE,
8649 				   hdev->le_adv_min_interval,
8650 				   hdev->le_adv_max_interval, 0);
8651 	if (IS_ERR(adv)) {
8652 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8653 				      MGMT_STATUS_FAILED);
8654 		goto unlock;
8655 	}
8656 
8657 	/* Only trigger an advertising added event if a new instance was
8658 	 * actually added.
8659 	 */
8660 	if (hdev->adv_instance_cnt > prev_instance_cnt)
8661 		mgmt_advertising_added(sk, hdev, cp->instance);
8662 
8663 	if (hdev->cur_adv_instance == cp->instance) {
8664 		/* If the currently advertised instance is being changed then
8665 		 * cancel the current advertising and schedule the next
8666 		 * instance. If there is only one instance then the overridden
8667 		 * advertising data will be visible right away.
8668 		 */
8669 		cancel_adv_timeout(hdev);
8670 
8671 		next_instance = hci_get_next_instance(hdev, cp->instance);
8672 		if (next_instance)
8673 			schedule_instance = next_instance->instance;
8674 	} else if (!hdev->adv_instance_timeout) {
8675 		/* Immediately advertise the new instance if no other
8676 		 * instance is currently being advertised.
8677 		 */
8678 		schedule_instance = cp->instance;
8679 	}
8680 
8681 	/* If the HCI_ADVERTISING flag is set or the device isn't powered or
8682 	 * there is no instance to be advertised then we have no HCI
8683 	 * communication to make. Simply return.
8684 	 */
8685 	if (!hdev_is_powered(hdev) ||
8686 	    hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8687 	    !schedule_instance) {
8688 		rp.instance = cp->instance;
8689 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8690 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8691 		goto unlock;
8692 	}
8693 
8694 	/* We're good to go, update advertising data, parameters, and start
8695 	 * advertising.
8696 	 */
8697 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8698 			       data_len);
8699 	if (!cmd) {
8700 		err = -ENOMEM;
8701 		goto unlock;
8702 	}
8703 
8704 	cp->instance = schedule_instance;
8705 
8706 	err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8707 				 add_advertising_complete);
8708 	if (err < 0)
8709 		mgmt_pending_free(cmd);
8710 
8711 unlock:
8712 	hci_dev_unlock(hdev);
8713 
8714 	return err;
8715 }
8716 
8717 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8718 					int err)
8719 {
8720 	struct mgmt_pending_cmd *cmd = data;
8721 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8722 	struct mgmt_rp_add_ext_adv_params rp;
8723 	struct adv_info *adv;
8724 	u32 flags;
8725 
8726 	BT_DBG("%s", hdev->name);
8727 
8728 	hci_dev_lock(hdev);
8729 
8730 	adv = hci_find_adv_instance(hdev, cp->instance);
8731 	if (!adv)
8732 		goto unlock;
8733 
8734 	rp.instance = cp->instance;
8735 	rp.tx_power = adv->tx_power;
8736 
8737 	/* While we're at it, inform userspace of the available space for this
8738 	 * advertisement, given the flags that will be used.
8739 	 */
8740 	flags = __le32_to_cpu(cp->flags);
8741 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8742 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8743 
8744 	if (err) {
8745 		/* If this advertisement was previously advertising and we
8746 		 * failed to update it, we signal that it has been removed and
8747 		 * delete its structure
8748 		 */
8749 		if (!adv->pending)
8750 			mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8751 
8752 		hci_remove_adv_instance(hdev, cp->instance);
8753 
8754 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8755 				mgmt_status(err));
8756 	} else {
8757 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8758 				  mgmt_status(err), &rp, sizeof(rp));
8759 	}
8760 
8761 unlock:
8762 	if (cmd)
8763 		mgmt_pending_free(cmd);
8764 
8765 	hci_dev_unlock(hdev);
8766 }
8767 
8768 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8769 {
8770 	struct mgmt_pending_cmd *cmd = data;
8771 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8772 
8773 	return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8774 }
8775 
8776 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8777 			      void *data, u16 data_len)
8778 {
8779 	struct mgmt_cp_add_ext_adv_params *cp = data;
8780 	struct mgmt_rp_add_ext_adv_params rp;
8781 	struct mgmt_pending_cmd *cmd = NULL;
8782 	struct adv_info *adv;
8783 	u32 flags, min_interval, max_interval;
8784 	u16 timeout, duration;
8785 	u8 status;
8786 	s8 tx_power;
8787 	int err;
8788 
8789 	BT_DBG("%s", hdev->name);
8790 
8791 	status = mgmt_le_support(hdev);
8792 	if (status)
8793 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8794 				       status);
8795 
8796 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8797 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8798 				       MGMT_STATUS_INVALID_PARAMS);
8799 
8800 	/* The purpose of breaking add_advertising into two separate MGMT calls
8801 	 * for params and data is to allow more parameters to be added to this
8802 	 * structure in the future. For this reason, we verify that we have the
8803 	 * bare minimum structure we know of when the interface was defined. Any
8804 	 * extra parameters we don't know about will be ignored in this request.
8805 	 */
8806 	if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8807 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8808 				       MGMT_STATUS_INVALID_PARAMS);
8809 
8810 	flags = __le32_to_cpu(cp->flags);
8811 
8812 	if (!requested_adv_flags_are_valid(hdev, flags))
8813 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8814 				       MGMT_STATUS_INVALID_PARAMS);
8815 
8816 	hci_dev_lock(hdev);
8817 
8818 	/* In new interface, we require that we are powered to register */
8819 	if (!hdev_is_powered(hdev)) {
8820 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8821 				      MGMT_STATUS_REJECTED);
8822 		goto unlock;
8823 	}
8824 
8825 	if (adv_busy(hdev)) {
8826 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8827 				      MGMT_STATUS_BUSY);
8828 		goto unlock;
8829 	}
8830 
8831 	/* Parse defined parameters from request, use defaults otherwise */
8832 	timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8833 		  __le16_to_cpu(cp->timeout) : 0;
8834 
8835 	duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8836 		   __le16_to_cpu(cp->duration) :
8837 		   hdev->def_multi_adv_rotation_duration;
8838 
8839 	min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8840 		       __le32_to_cpu(cp->min_interval) :
8841 		       hdev->le_adv_min_interval;
8842 
8843 	max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8844 		       __le32_to_cpu(cp->max_interval) :
8845 		       hdev->le_adv_max_interval;
8846 
8847 	tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8848 		   cp->tx_power :
8849 		   HCI_ADV_TX_POWER_NO_PREFERENCE;
8850 
8851 	/* Create advertising instance with no advertising or response data */
8852 	adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
8853 				   timeout, duration, tx_power, min_interval,
8854 				   max_interval, 0);
8855 
8856 	if (IS_ERR(adv)) {
8857 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8858 				      MGMT_STATUS_FAILED);
8859 		goto unlock;
8860 	}
8861 
8862 	/* Submit request for advertising params if ext adv available */
8863 	if (ext_adv_capable(hdev)) {
8864 		cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8865 				       data, data_len);
8866 		if (!cmd) {
8867 			err = -ENOMEM;
8868 			hci_remove_adv_instance(hdev, cp->instance);
8869 			goto unlock;
8870 		}
8871 
8872 		err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8873 					 add_ext_adv_params_complete);
8874 		if (err < 0)
8875 			mgmt_pending_free(cmd);
8876 	} else {
8877 		rp.instance = cp->instance;
8878 		rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8879 		rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8880 		rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8881 		err = mgmt_cmd_complete(sk, hdev->id,
8882 					MGMT_OP_ADD_EXT_ADV_PARAMS,
8883 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8884 	}
8885 
8886 unlock:
8887 	hci_dev_unlock(hdev);
8888 
8889 	return err;
8890 }
8891 
8892 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8893 {
8894 	struct mgmt_pending_cmd *cmd = data;
8895 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8896 	struct mgmt_rp_add_advertising rp;
8897 
8898 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
8899 
8900 	memset(&rp, 0, sizeof(rp));
8901 
8902 	rp.instance = cp->instance;
8903 
8904 	if (err)
8905 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8906 				mgmt_status(err));
8907 	else
8908 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8909 				  mgmt_status(err), &rp, sizeof(rp));
8910 
8911 	mgmt_pending_free(cmd);
8912 }
8913 
8914 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8915 {
8916 	struct mgmt_pending_cmd *cmd = data;
8917 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8918 	int err;
8919 
8920 	if (ext_adv_capable(hdev)) {
8921 		err = hci_update_adv_data_sync(hdev, cp->instance);
8922 		if (err)
8923 			return err;
8924 
8925 		err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8926 		if (err)
8927 			return err;
8928 
8929 		return hci_enable_ext_advertising_sync(hdev, cp->instance);
8930 	}
8931 
8932 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8933 }
8934 
8935 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8936 			    u16 data_len)
8937 {
8938 	struct mgmt_cp_add_ext_adv_data *cp = data;
8939 	struct mgmt_rp_add_ext_adv_data rp;
8940 	u8 schedule_instance = 0;
8941 	struct adv_info *next_instance;
8942 	struct adv_info *adv_instance;
8943 	int err = 0;
8944 	struct mgmt_pending_cmd *cmd;
8945 
8946 	BT_DBG("%s", hdev->name);
8947 
8948 	hci_dev_lock(hdev);
8949 
8950 	adv_instance = hci_find_adv_instance(hdev, cp->instance);
8951 
8952 	if (!adv_instance) {
8953 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8954 				      MGMT_STATUS_INVALID_PARAMS);
8955 		goto unlock;
8956 	}
8957 
8958 	/* In new interface, we require that we are powered to register */
8959 	if (!hdev_is_powered(hdev)) {
8960 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8961 				      MGMT_STATUS_REJECTED);
8962 		goto clear_new_instance;
8963 	}
8964 
8965 	if (adv_busy(hdev)) {
8966 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8967 				      MGMT_STATUS_BUSY);
8968 		goto clear_new_instance;
8969 	}
8970 
8971 	/* Validate new data */
8972 	if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
8973 			       cp->adv_data_len, true) ||
8974 	    !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
8975 			       cp->adv_data_len, cp->scan_rsp_len, false)) {
8976 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8977 				      MGMT_STATUS_INVALID_PARAMS);
8978 		goto clear_new_instance;
8979 	}
8980 
8981 	/* Set the data in the advertising instance */
8982 	hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
8983 				  cp->data, cp->scan_rsp_len,
8984 				  cp->data + cp->adv_data_len);
8985 
8986 	/* If using software rotation, determine next instance to use */
8987 	if (hdev->cur_adv_instance == cp->instance) {
8988 		/* If the currently advertised instance is being changed
8989 		 * then cancel the current advertising and schedule the
8990 		 * next instance. If there is only one instance then the
8991 		 * overridden advertising data will be visible right
8992 		 * away
8993 		 */
8994 		cancel_adv_timeout(hdev);
8995 
8996 		next_instance = hci_get_next_instance(hdev, cp->instance);
8997 		if (next_instance)
8998 			schedule_instance = next_instance->instance;
8999 	} else if (!hdev->adv_instance_timeout) {
9000 		/* Immediately advertise the new instance if no other
9001 		 * instance is currently being advertised.
9002 		 */
9003 		schedule_instance = cp->instance;
9004 	}
9005 
9006 	/* If the HCI_ADVERTISING flag is set or there is no instance to
9007 	 * be advertised then we have no HCI communication to make.
9008 	 * Simply return.
9009 	 */
9010 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9011 		if (adv_instance->pending) {
9012 			mgmt_advertising_added(sk, hdev, cp->instance);
9013 			adv_instance->pending = false;
9014 		}
9015 		rp.instance = cp->instance;
9016 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9017 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9018 		goto unlock;
9019 	}
9020 
9021 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9022 			       data_len);
9023 	if (!cmd) {
9024 		err = -ENOMEM;
9025 		goto clear_new_instance;
9026 	}
9027 
9028 	err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9029 				 add_ext_adv_data_complete);
9030 	if (err < 0) {
9031 		mgmt_pending_free(cmd);
9032 		goto clear_new_instance;
9033 	}
9034 
9035 	/* We were successful in updating data, so trigger advertising_added
9036 	 * event if this is an instance that wasn't previously advertising. If
9037 	 * a failure occurs in the requests we initiated, we will remove the
9038 	 * instance again in add_advertising_complete
9039 	 */
9040 	if (adv_instance->pending)
9041 		mgmt_advertising_added(sk, hdev, cp->instance);
9042 
9043 	goto unlock;
9044 
9045 clear_new_instance:
9046 	hci_remove_adv_instance(hdev, cp->instance);
9047 
9048 unlock:
9049 	hci_dev_unlock(hdev);
9050 
9051 	return err;
9052 }
9053 
9054 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9055 					int err)
9056 {
9057 	struct mgmt_pending_cmd *cmd = data;
9058 	struct mgmt_cp_remove_advertising *cp = cmd->param;
9059 	struct mgmt_rp_remove_advertising rp;
9060 
9061 	bt_dev_dbg(hdev, "err %d", err);
9062 
9063 	memset(&rp, 0, sizeof(rp));
9064 	rp.instance = cp->instance;
9065 
9066 	if (err)
9067 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9068 				mgmt_status(err));
9069 	else
9070 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9071 				  MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9072 
9073 	mgmt_pending_free(cmd);
9074 }
9075 
9076 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9077 {
9078 	struct mgmt_pending_cmd *cmd = data;
9079 	struct mgmt_cp_remove_advertising *cp = cmd->param;
9080 	int err;
9081 
9082 	err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9083 	if (err)
9084 		return err;
9085 
9086 	if (list_empty(&hdev->adv_instances))
9087 		err = hci_disable_advertising_sync(hdev);
9088 
9089 	return err;
9090 }
9091 
9092 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9093 			      void *data, u16 data_len)
9094 {
9095 	struct mgmt_cp_remove_advertising *cp = data;
9096 	struct mgmt_pending_cmd *cmd;
9097 	int err;
9098 
9099 	bt_dev_dbg(hdev, "sock %p", sk);
9100 
9101 	hci_dev_lock(hdev);
9102 
9103 	if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9104 		err = mgmt_cmd_status(sk, hdev->id,
9105 				      MGMT_OP_REMOVE_ADVERTISING,
9106 				      MGMT_STATUS_INVALID_PARAMS);
9107 		goto unlock;
9108 	}
9109 
9110 	if (pending_find(MGMT_OP_SET_LE, hdev)) {
9111 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9112 				      MGMT_STATUS_BUSY);
9113 		goto unlock;
9114 	}
9115 
9116 	if (list_empty(&hdev->adv_instances)) {
9117 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9118 				      MGMT_STATUS_INVALID_PARAMS);
9119 		goto unlock;
9120 	}
9121 
9122 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9123 			       data_len);
9124 	if (!cmd) {
9125 		err = -ENOMEM;
9126 		goto unlock;
9127 	}
9128 
9129 	err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9130 				 remove_advertising_complete);
9131 	if (err < 0)
9132 		mgmt_pending_free(cmd);
9133 
9134 unlock:
9135 	hci_dev_unlock(hdev);
9136 
9137 	return err;
9138 }
9139 
9140 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9141 			     void *data, u16 data_len)
9142 {
9143 	struct mgmt_cp_get_adv_size_info *cp = data;
9144 	struct mgmt_rp_get_adv_size_info rp;
9145 	u32 flags, supported_flags;
9146 
9147 	bt_dev_dbg(hdev, "sock %p", sk);
9148 
9149 	if (!lmp_le_capable(hdev))
9150 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9151 				       MGMT_STATUS_REJECTED);
9152 
9153 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9154 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9155 				       MGMT_STATUS_INVALID_PARAMS);
9156 
9157 	flags = __le32_to_cpu(cp->flags);
9158 
9159 	/* The current implementation only supports a subset of the specified
9160 	 * flags.
9161 	 */
9162 	supported_flags = get_supported_adv_flags(hdev);
9163 	if (flags & ~supported_flags)
9164 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9165 				       MGMT_STATUS_INVALID_PARAMS);
9166 
9167 	rp.instance = cp->instance;
9168 	rp.flags = cp->flags;
9169 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9170 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9171 
9172 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9173 				 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9174 }
9175 
9176 static const struct hci_mgmt_handler mgmt_handlers[] = {
9177 	{ NULL }, /* 0x0000 (no command) */
9178 	{ read_version,            MGMT_READ_VERSION_SIZE,
9179 						HCI_MGMT_NO_HDEV |
9180 						HCI_MGMT_UNTRUSTED },
9181 	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
9182 						HCI_MGMT_NO_HDEV |
9183 						HCI_MGMT_UNTRUSTED },
9184 	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
9185 						HCI_MGMT_NO_HDEV |
9186 						HCI_MGMT_UNTRUSTED },
9187 	{ read_controller_info,    MGMT_READ_INFO_SIZE,
9188 						HCI_MGMT_UNTRUSTED },
9189 	{ set_powered,             MGMT_SETTING_SIZE },
9190 	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
9191 	{ set_connectable,         MGMT_SETTING_SIZE },
9192 	{ set_fast_connectable,    MGMT_SETTING_SIZE },
9193 	{ set_bondable,            MGMT_SETTING_SIZE },
9194 	{ set_link_security,       MGMT_SETTING_SIZE },
9195 	{ set_ssp,                 MGMT_SETTING_SIZE },
9196 	{ set_hs,                  MGMT_SETTING_SIZE },
9197 	{ set_le,                  MGMT_SETTING_SIZE },
9198 	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
9199 	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
9200 	{ add_uuid,                MGMT_ADD_UUID_SIZE },
9201 	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
9202 	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
9203 						HCI_MGMT_VAR_LEN },
9204 	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9205 						HCI_MGMT_VAR_LEN },
9206 	{ disconnect,              MGMT_DISCONNECT_SIZE },
9207 	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
9208 	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
9209 	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
9210 	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
9211 	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
9212 	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
9213 	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
9214 	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
9215 	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9216 	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
9217 	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9218 	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
9219 	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9220 						HCI_MGMT_VAR_LEN },
9221 	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9222 	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
9223 	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
9224 	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
9225 	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
9226 	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
9227 	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
9228 	{ set_advertising,         MGMT_SETTING_SIZE },
9229 	{ set_bredr,               MGMT_SETTING_SIZE },
9230 	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
9231 	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
9232 	{ set_secure_conn,         MGMT_SETTING_SIZE },
9233 	{ set_debug_keys,          MGMT_SETTING_SIZE },
9234 	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
9235 	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
9236 						HCI_MGMT_VAR_LEN },
9237 	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
9238 	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
9239 	{ add_device,              MGMT_ADD_DEVICE_SIZE },
9240 	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
9241 	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
9242 						HCI_MGMT_VAR_LEN },
9243 	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9244 						HCI_MGMT_NO_HDEV |
9245 						HCI_MGMT_UNTRUSTED },
9246 	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
9247 						HCI_MGMT_UNCONFIGURED |
9248 						HCI_MGMT_UNTRUSTED },
9249 	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
9250 						HCI_MGMT_UNCONFIGURED },
9251 	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
9252 						HCI_MGMT_UNCONFIGURED },
9253 	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9254 						HCI_MGMT_VAR_LEN },
9255 	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9256 	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
9257 						HCI_MGMT_NO_HDEV |
9258 						HCI_MGMT_UNTRUSTED },
9259 	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
9260 	{ add_advertising,	   MGMT_ADD_ADVERTISING_SIZE,
9261 						HCI_MGMT_VAR_LEN },
9262 	{ remove_advertising,	   MGMT_REMOVE_ADVERTISING_SIZE },
9263 	{ get_adv_size_info,       MGMT_GET_ADV_SIZE_INFO_SIZE },
9264 	{ start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9265 	{ read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9266 						HCI_MGMT_UNTRUSTED },
9267 	{ set_appearance,	   MGMT_SET_APPEARANCE_SIZE },
9268 	{ get_phy_configuration,   MGMT_GET_PHY_CONFIGURATION_SIZE },
9269 	{ set_phy_configuration,   MGMT_SET_PHY_CONFIGURATION_SIZE },
9270 	{ set_blocked_keys,	   MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9271 						HCI_MGMT_VAR_LEN },
9272 	{ set_wideband_speech,	   MGMT_SETTING_SIZE },
9273 	{ read_controller_cap,     MGMT_READ_CONTROLLER_CAP_SIZE,
9274 						HCI_MGMT_UNTRUSTED },
9275 	{ read_exp_features_info,  MGMT_READ_EXP_FEATURES_INFO_SIZE,
9276 						HCI_MGMT_UNTRUSTED |
9277 						HCI_MGMT_HDEV_OPTIONAL },
9278 	{ set_exp_feature,         MGMT_SET_EXP_FEATURE_SIZE,
9279 						HCI_MGMT_VAR_LEN |
9280 						HCI_MGMT_HDEV_OPTIONAL },
9281 	{ read_def_system_config,  MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9282 						HCI_MGMT_UNTRUSTED },
9283 	{ set_def_system_config,   MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9284 						HCI_MGMT_VAR_LEN },
9285 	{ read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9286 						HCI_MGMT_UNTRUSTED },
9287 	{ set_def_runtime_config,  MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9288 						HCI_MGMT_VAR_LEN },
9289 	{ get_device_flags,        MGMT_GET_DEVICE_FLAGS_SIZE },
9290 	{ set_device_flags,        MGMT_SET_DEVICE_FLAGS_SIZE },
9291 	{ read_adv_mon_features,   MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9292 	{ add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9293 						HCI_MGMT_VAR_LEN },
9294 	{ remove_adv_monitor,      MGMT_REMOVE_ADV_MONITOR_SIZE },
9295 	{ add_ext_adv_params,      MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9296 						HCI_MGMT_VAR_LEN },
9297 	{ add_ext_adv_data,        MGMT_ADD_EXT_ADV_DATA_SIZE,
9298 						HCI_MGMT_VAR_LEN },
9299 	{ add_adv_patterns_monitor_rssi,
9300 				   MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9301 						HCI_MGMT_VAR_LEN },
9302 	{ set_mesh,                MGMT_SET_MESH_RECEIVER_SIZE,
9303 						HCI_MGMT_VAR_LEN },
9304 	{ mesh_features,           MGMT_MESH_READ_FEATURES_SIZE },
9305 	{ mesh_send,               MGMT_MESH_SEND_SIZE,
9306 						HCI_MGMT_VAR_LEN },
9307 	{ mesh_send_cancel,        MGMT_MESH_SEND_CANCEL_SIZE },
9308 };
9309 
9310 void mgmt_index_added(struct hci_dev *hdev)
9311 {
9312 	struct mgmt_ev_ext_index ev;
9313 
9314 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9315 		return;
9316 
9317 	switch (hdev->dev_type) {
9318 	case HCI_PRIMARY:
9319 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9320 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
9321 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
9322 			ev.type = 0x01;
9323 		} else {
9324 			mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9325 					 HCI_MGMT_INDEX_EVENTS);
9326 			ev.type = 0x00;
9327 		}
9328 		break;
9329 	case HCI_AMP:
9330 		ev.type = 0x02;
9331 		break;
9332 	default:
9333 		return;
9334 	}
9335 
9336 	ev.bus = hdev->bus;
9337 
9338 	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9339 			 HCI_MGMT_EXT_INDEX_EVENTS);
9340 }
9341 
9342 void mgmt_index_removed(struct hci_dev *hdev)
9343 {
9344 	struct mgmt_ev_ext_index ev;
9345 	u8 status = MGMT_STATUS_INVALID_INDEX;
9346 
9347 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9348 		return;
9349 
9350 	switch (hdev->dev_type) {
9351 	case HCI_PRIMARY:
9352 		mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9353 
9354 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9355 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
9356 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
9357 			ev.type = 0x01;
9358 		} else {
9359 			mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9360 					 HCI_MGMT_INDEX_EVENTS);
9361 			ev.type = 0x00;
9362 		}
9363 		break;
9364 	case HCI_AMP:
9365 		ev.type = 0x02;
9366 		break;
9367 	default:
9368 		return;
9369 	}
9370 
9371 	ev.bus = hdev->bus;
9372 
9373 	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9374 			 HCI_MGMT_EXT_INDEX_EVENTS);
9375 
9376 	/* Cancel any remaining timed work */
9377 	if (!hci_dev_test_flag(hdev, HCI_MGMT))
9378 		return;
9379 	cancel_delayed_work_sync(&hdev->discov_off);
9380 	cancel_delayed_work_sync(&hdev->service_cache);
9381 	cancel_delayed_work_sync(&hdev->rpa_expired);
9382 }
9383 
9384 void mgmt_power_on(struct hci_dev *hdev, int err)
9385 {
9386 	struct cmd_lookup match = { NULL, hdev };
9387 
9388 	bt_dev_dbg(hdev, "err %d", err);
9389 
9390 	hci_dev_lock(hdev);
9391 
9392 	if (!err) {
9393 		restart_le_actions(hdev);
9394 		hci_update_passive_scan(hdev);
9395 	}
9396 
9397 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9398 
9399 	new_settings(hdev, match.sk);
9400 
9401 	if (match.sk)
9402 		sock_put(match.sk);
9403 
9404 	hci_dev_unlock(hdev);
9405 }
9406 
9407 void __mgmt_power_off(struct hci_dev *hdev)
9408 {
9409 	struct cmd_lookup match = { NULL, hdev };
9410 	u8 status, zero_cod[] = { 0, 0, 0 };
9411 
9412 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9413 
9414 	/* If the power off is because of hdev unregistration let
9415 	 * use the appropriate INVALID_INDEX status. Otherwise use
9416 	 * NOT_POWERED. We cover both scenarios here since later in
9417 	 * mgmt_index_removed() any hci_conn callbacks will have already
9418 	 * been triggered, potentially causing misleading DISCONNECTED
9419 	 * status responses.
9420 	 */
9421 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9422 		status = MGMT_STATUS_INVALID_INDEX;
9423 	else
9424 		status = MGMT_STATUS_NOT_POWERED;
9425 
9426 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9427 
9428 	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9429 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9430 				   zero_cod, sizeof(zero_cod),
9431 				   HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9432 		ext_info_changed(hdev, NULL);
9433 	}
9434 
9435 	new_settings(hdev, match.sk);
9436 
9437 	if (match.sk)
9438 		sock_put(match.sk);
9439 }
9440 
9441 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9442 {
9443 	struct mgmt_pending_cmd *cmd;
9444 	u8 status;
9445 
9446 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9447 	if (!cmd)
9448 		return;
9449 
9450 	if (err == -ERFKILL)
9451 		status = MGMT_STATUS_RFKILLED;
9452 	else
9453 		status = MGMT_STATUS_FAILED;
9454 
9455 	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9456 
9457 	mgmt_pending_remove(cmd);
9458 }
9459 
9460 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9461 		       bool persistent)
9462 {
9463 	struct mgmt_ev_new_link_key ev;
9464 
9465 	memset(&ev, 0, sizeof(ev));
9466 
9467 	ev.store_hint = persistent;
9468 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9469 	ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
9470 	ev.key.type = key->type;
9471 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9472 	ev.key.pin_len = key->pin_len;
9473 
9474 	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9475 }
9476 
9477 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9478 {
9479 	switch (ltk->type) {
9480 	case SMP_LTK:
9481 	case SMP_LTK_RESPONDER:
9482 		if (ltk->authenticated)
9483 			return MGMT_LTK_AUTHENTICATED;
9484 		return MGMT_LTK_UNAUTHENTICATED;
9485 	case SMP_LTK_P256:
9486 		if (ltk->authenticated)
9487 			return MGMT_LTK_P256_AUTH;
9488 		return MGMT_LTK_P256_UNAUTH;
9489 	case SMP_LTK_P256_DEBUG:
9490 		return MGMT_LTK_P256_DEBUG;
9491 	}
9492 
9493 	return MGMT_LTK_UNAUTHENTICATED;
9494 }
9495 
9496 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9497 {
9498 	struct mgmt_ev_new_long_term_key ev;
9499 
9500 	memset(&ev, 0, sizeof(ev));
9501 
9502 	/* Devices using resolvable or non-resolvable random addresses
9503 	 * without providing an identity resolving key don't require
9504 	 * to store long term keys. Their addresses will change the
9505 	 * next time around.
9506 	 *
9507 	 * Only when a remote device provides an identity address
9508 	 * make sure the long term key is stored. If the remote
9509 	 * identity is known, the long term keys are internally
9510 	 * mapped to the identity address. So allow static random
9511 	 * and public addresses here.
9512 	 */
9513 	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9514 	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
9515 		ev.store_hint = 0x00;
9516 	else
9517 		ev.store_hint = persistent;
9518 
9519 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9520 	ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
9521 	ev.key.type = mgmt_ltk_type(key);
9522 	ev.key.enc_size = key->enc_size;
9523 	ev.key.ediv = key->ediv;
9524 	ev.key.rand = key->rand;
9525 
9526 	if (key->type == SMP_LTK)
9527 		ev.key.initiator = 1;
9528 
9529 	/* Make sure we copy only the significant bytes based on the
9530 	 * encryption key size, and set the rest of the value to zeroes.
9531 	 */
9532 	memcpy(ev.key.val, key->val, key->enc_size);
9533 	memset(ev.key.val + key->enc_size, 0,
9534 	       sizeof(ev.key.val) - key->enc_size);
9535 
9536 	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9537 }
9538 
9539 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9540 {
9541 	struct mgmt_ev_new_irk ev;
9542 
9543 	memset(&ev, 0, sizeof(ev));
9544 
9545 	ev.store_hint = persistent;
9546 
9547 	bacpy(&ev.rpa, &irk->rpa);
9548 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9549 	ev.irk.addr.type = link_to_bdaddr(irk->link_type, irk->addr_type);
9550 	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9551 
9552 	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9553 }
9554 
9555 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9556 		   bool persistent)
9557 {
9558 	struct mgmt_ev_new_csrk ev;
9559 
9560 	memset(&ev, 0, sizeof(ev));
9561 
9562 	/* Devices using resolvable or non-resolvable random addresses
9563 	 * without providing an identity resolving key don't require
9564 	 * to store signature resolving keys. Their addresses will change
9565 	 * the next time around.
9566 	 *
9567 	 * Only when a remote device provides an identity address
9568 	 * make sure the signature resolving key is stored. So allow
9569 	 * static random and public addresses here.
9570 	 */
9571 	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9572 	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9573 		ev.store_hint = 0x00;
9574 	else
9575 		ev.store_hint = persistent;
9576 
9577 	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9578 	ev.key.addr.type = link_to_bdaddr(csrk->link_type, csrk->bdaddr_type);
9579 	ev.key.type = csrk->type;
9580 	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9581 
9582 	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9583 }
9584 
9585 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9586 			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9587 			 u16 max_interval, u16 latency, u16 timeout)
9588 {
9589 	struct mgmt_ev_new_conn_param ev;
9590 
9591 	if (!hci_is_identity_address(bdaddr, bdaddr_type))
9592 		return;
9593 
9594 	memset(&ev, 0, sizeof(ev));
9595 	bacpy(&ev.addr.bdaddr, bdaddr);
9596 	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9597 	ev.store_hint = store_hint;
9598 	ev.min_interval = cpu_to_le16(min_interval);
9599 	ev.max_interval = cpu_to_le16(max_interval);
9600 	ev.latency = cpu_to_le16(latency);
9601 	ev.timeout = cpu_to_le16(timeout);
9602 
9603 	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9604 }
9605 
9606 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9607 			   u8 *name, u8 name_len)
9608 {
9609 	struct sk_buff *skb;
9610 	struct mgmt_ev_device_connected *ev;
9611 	u16 eir_len = 0;
9612 	u32 flags = 0;
9613 
9614 	if (test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
9615 		return;
9616 
9617 	/* allocate buff for LE or BR/EDR adv */
9618 	if (conn->le_adv_data_len > 0)
9619 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9620 				     sizeof(*ev) + conn->le_adv_data_len);
9621 	else
9622 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9623 				     sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9624 				     eir_precalc_len(sizeof(conn->dev_class)));
9625 
9626 	ev = skb_put(skb, sizeof(*ev));
9627 	bacpy(&ev->addr.bdaddr, &conn->dst);
9628 	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9629 
9630 	if (conn->out)
9631 		flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9632 
9633 	ev->flags = __cpu_to_le32(flags);
9634 
9635 	/* We must ensure that the EIR Data fields are ordered and
9636 	 * unique. Keep it simple for now and avoid the problem by not
9637 	 * adding any BR/EDR data to the LE adv.
9638 	 */
9639 	if (conn->le_adv_data_len > 0) {
9640 		skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9641 		eir_len = conn->le_adv_data_len;
9642 	} else {
9643 		if (name)
9644 			eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9645 
9646 		if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9647 			eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9648 						    conn->dev_class, sizeof(conn->dev_class));
9649 	}
9650 
9651 	ev->eir_len = cpu_to_le16(eir_len);
9652 
9653 	mgmt_event_skb(skb, NULL);
9654 }
9655 
9656 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
9657 {
9658 	struct sock **sk = data;
9659 
9660 	cmd->cmd_complete(cmd, 0);
9661 
9662 	*sk = cmd->sk;
9663 	sock_hold(*sk);
9664 
9665 	mgmt_pending_remove(cmd);
9666 }
9667 
9668 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9669 {
9670 	struct hci_dev *hdev = data;
9671 	struct mgmt_cp_unpair_device *cp = cmd->param;
9672 
9673 	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9674 
9675 	cmd->cmd_complete(cmd, 0);
9676 	mgmt_pending_remove(cmd);
9677 }
9678 
9679 bool mgmt_powering_down(struct hci_dev *hdev)
9680 {
9681 	struct mgmt_pending_cmd *cmd;
9682 	struct mgmt_mode *cp;
9683 
9684 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9685 	if (!cmd)
9686 		return false;
9687 
9688 	cp = cmd->param;
9689 	if (!cp->val)
9690 		return true;
9691 
9692 	return false;
9693 }
9694 
9695 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9696 			      u8 link_type, u8 addr_type, u8 reason,
9697 			      bool mgmt_connected)
9698 {
9699 	struct mgmt_ev_device_disconnected ev;
9700 	struct sock *sk = NULL;
9701 
9702 	if (!mgmt_connected)
9703 		return;
9704 
9705 	if (link_type != ACL_LINK && link_type != LE_LINK)
9706 		return;
9707 
9708 	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
9709 
9710 	bacpy(&ev.addr.bdaddr, bdaddr);
9711 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9712 	ev.reason = reason;
9713 
9714 	/* Report disconnects due to suspend */
9715 	if (hdev->suspended)
9716 		ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9717 
9718 	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9719 
9720 	if (sk)
9721 		sock_put(sk);
9722 
9723 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9724 			     hdev);
9725 }
9726 
9727 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9728 			    u8 link_type, u8 addr_type, u8 status)
9729 {
9730 	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9731 	struct mgmt_cp_disconnect *cp;
9732 	struct mgmt_pending_cmd *cmd;
9733 
9734 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9735 			     hdev);
9736 
9737 	cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9738 	if (!cmd)
9739 		return;
9740 
9741 	cp = cmd->param;
9742 
9743 	if (bacmp(bdaddr, &cp->addr.bdaddr))
9744 		return;
9745 
9746 	if (cp->addr.type != bdaddr_type)
9747 		return;
9748 
9749 	cmd->cmd_complete(cmd, mgmt_status(status));
9750 	mgmt_pending_remove(cmd);
9751 }
9752 
9753 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9754 			 u8 addr_type, u8 status)
9755 {
9756 	struct mgmt_ev_connect_failed ev;
9757 
9758 	bacpy(&ev.addr.bdaddr, bdaddr);
9759 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9760 	ev.status = mgmt_status(status);
9761 
9762 	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9763 }
9764 
9765 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9766 {
9767 	struct mgmt_ev_pin_code_request ev;
9768 
9769 	bacpy(&ev.addr.bdaddr, bdaddr);
9770 	ev.addr.type = BDADDR_BREDR;
9771 	ev.secure = secure;
9772 
9773 	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9774 }
9775 
9776 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9777 				  u8 status)
9778 {
9779 	struct mgmt_pending_cmd *cmd;
9780 
9781 	cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9782 	if (!cmd)
9783 		return;
9784 
9785 	cmd->cmd_complete(cmd, mgmt_status(status));
9786 	mgmt_pending_remove(cmd);
9787 }
9788 
9789 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9790 				      u8 status)
9791 {
9792 	struct mgmt_pending_cmd *cmd;
9793 
9794 	cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9795 	if (!cmd)
9796 		return;
9797 
9798 	cmd->cmd_complete(cmd, mgmt_status(status));
9799 	mgmt_pending_remove(cmd);
9800 }
9801 
9802 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9803 			      u8 link_type, u8 addr_type, u32 value,
9804 			      u8 confirm_hint)
9805 {
9806 	struct mgmt_ev_user_confirm_request ev;
9807 
9808 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9809 
9810 	bacpy(&ev.addr.bdaddr, bdaddr);
9811 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9812 	ev.confirm_hint = confirm_hint;
9813 	ev.value = cpu_to_le32(value);
9814 
9815 	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9816 			  NULL);
9817 }
9818 
9819 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9820 			      u8 link_type, u8 addr_type)
9821 {
9822 	struct mgmt_ev_user_passkey_request ev;
9823 
9824 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9825 
9826 	bacpy(&ev.addr.bdaddr, bdaddr);
9827 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9828 
9829 	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9830 			  NULL);
9831 }
9832 
9833 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9834 				      u8 link_type, u8 addr_type, u8 status,
9835 				      u8 opcode)
9836 {
9837 	struct mgmt_pending_cmd *cmd;
9838 
9839 	cmd = pending_find(opcode, hdev);
9840 	if (!cmd)
9841 		return -ENOENT;
9842 
9843 	cmd->cmd_complete(cmd, mgmt_status(status));
9844 	mgmt_pending_remove(cmd);
9845 
9846 	return 0;
9847 }
9848 
9849 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9850 				     u8 link_type, u8 addr_type, u8 status)
9851 {
9852 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9853 					  status, MGMT_OP_USER_CONFIRM_REPLY);
9854 }
9855 
9856 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9857 					 u8 link_type, u8 addr_type, u8 status)
9858 {
9859 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9860 					  status,
9861 					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
9862 }
9863 
9864 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9865 				     u8 link_type, u8 addr_type, u8 status)
9866 {
9867 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9868 					  status, MGMT_OP_USER_PASSKEY_REPLY);
9869 }
9870 
9871 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9872 					 u8 link_type, u8 addr_type, u8 status)
9873 {
9874 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9875 					  status,
9876 					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
9877 }
9878 
9879 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9880 			     u8 link_type, u8 addr_type, u32 passkey,
9881 			     u8 entered)
9882 {
9883 	struct mgmt_ev_passkey_notify ev;
9884 
9885 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9886 
9887 	bacpy(&ev.addr.bdaddr, bdaddr);
9888 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9889 	ev.passkey = __cpu_to_le32(passkey);
9890 	ev.entered = entered;
9891 
9892 	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9893 }
9894 
9895 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9896 {
9897 	struct mgmt_ev_auth_failed ev;
9898 	struct mgmt_pending_cmd *cmd;
9899 	u8 status = mgmt_status(hci_status);
9900 
9901 	bacpy(&ev.addr.bdaddr, &conn->dst);
9902 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9903 	ev.status = status;
9904 
9905 	cmd = find_pairing(conn);
9906 
9907 	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9908 		    cmd ? cmd->sk : NULL);
9909 
9910 	if (cmd) {
9911 		cmd->cmd_complete(cmd, status);
9912 		mgmt_pending_remove(cmd);
9913 	}
9914 }
9915 
9916 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9917 {
9918 	struct cmd_lookup match = { NULL, hdev };
9919 	bool changed;
9920 
9921 	if (status) {
9922 		u8 mgmt_err = mgmt_status(status);
9923 		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9924 				     cmd_status_rsp, &mgmt_err);
9925 		return;
9926 	}
9927 
9928 	if (test_bit(HCI_AUTH, &hdev->flags))
9929 		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9930 	else
9931 		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9932 
9933 	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9934 			     &match);
9935 
9936 	if (changed)
9937 		new_settings(hdev, match.sk);
9938 
9939 	if (match.sk)
9940 		sock_put(match.sk);
9941 }
9942 
9943 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9944 {
9945 	struct cmd_lookup *match = data;
9946 
9947 	if (match->sk == NULL) {
9948 		match->sk = cmd->sk;
9949 		sock_hold(match->sk);
9950 	}
9951 }
9952 
9953 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9954 				    u8 status)
9955 {
9956 	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9957 
9958 	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
9959 	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
9960 	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
9961 
9962 	if (!status) {
9963 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9964 				   3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9965 		ext_info_changed(hdev, NULL);
9966 	}
9967 
9968 	if (match.sk)
9969 		sock_put(match.sk);
9970 }
9971 
9972 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
9973 {
9974 	struct mgmt_cp_set_local_name ev;
9975 	struct mgmt_pending_cmd *cmd;
9976 
9977 	if (status)
9978 		return;
9979 
9980 	memset(&ev, 0, sizeof(ev));
9981 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
9982 	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
9983 
9984 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
9985 	if (!cmd) {
9986 		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
9987 
9988 		/* If this is a HCI command related to powering on the
9989 		 * HCI dev don't send any mgmt signals.
9990 		 */
9991 		if (pending_find(MGMT_OP_SET_POWERED, hdev))
9992 			return;
9993 	}
9994 
9995 	mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
9996 			   HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
9997 	ext_info_changed(hdev, cmd ? cmd->sk : NULL);
9998 }
9999 
10000 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10001 {
10002 	int i;
10003 
10004 	for (i = 0; i < uuid_count; i++) {
10005 		if (!memcmp(uuid, uuids[i], 16))
10006 			return true;
10007 	}
10008 
10009 	return false;
10010 }
10011 
10012 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10013 {
10014 	u16 parsed = 0;
10015 
10016 	while (parsed < eir_len) {
10017 		u8 field_len = eir[0];
10018 		u8 uuid[16];
10019 		int i;
10020 
10021 		if (field_len == 0)
10022 			break;
10023 
10024 		if (eir_len - parsed < field_len + 1)
10025 			break;
10026 
10027 		switch (eir[1]) {
10028 		case EIR_UUID16_ALL:
10029 		case EIR_UUID16_SOME:
10030 			for (i = 0; i + 3 <= field_len; i += 2) {
10031 				memcpy(uuid, bluetooth_base_uuid, 16);
10032 				uuid[13] = eir[i + 3];
10033 				uuid[12] = eir[i + 2];
10034 				if (has_uuid(uuid, uuid_count, uuids))
10035 					return true;
10036 			}
10037 			break;
10038 		case EIR_UUID32_ALL:
10039 		case EIR_UUID32_SOME:
10040 			for (i = 0; i + 5 <= field_len; i += 4) {
10041 				memcpy(uuid, bluetooth_base_uuid, 16);
10042 				uuid[15] = eir[i + 5];
10043 				uuid[14] = eir[i + 4];
10044 				uuid[13] = eir[i + 3];
10045 				uuid[12] = eir[i + 2];
10046 				if (has_uuid(uuid, uuid_count, uuids))
10047 					return true;
10048 			}
10049 			break;
10050 		case EIR_UUID128_ALL:
10051 		case EIR_UUID128_SOME:
10052 			for (i = 0; i + 17 <= field_len; i += 16) {
10053 				memcpy(uuid, eir + i + 2, 16);
10054 				if (has_uuid(uuid, uuid_count, uuids))
10055 					return true;
10056 			}
10057 			break;
10058 		}
10059 
10060 		parsed += field_len + 1;
10061 		eir += field_len + 1;
10062 	}
10063 
10064 	return false;
10065 }
10066 
10067 static void restart_le_scan(struct hci_dev *hdev)
10068 {
10069 	/* If controller is not scanning we are done. */
10070 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
10071 		return;
10072 
10073 	if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
10074 		       hdev->discovery.scan_start +
10075 		       hdev->discovery.scan_duration))
10076 		return;
10077 
10078 	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
10079 			   DISCOV_LE_RESTART_DELAY);
10080 }
10081 
10082 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10083 			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10084 {
10085 	/* If a RSSI threshold has been specified, and
10086 	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10087 	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10088 	 * is set, let it through for further processing, as we might need to
10089 	 * restart the scan.
10090 	 *
10091 	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10092 	 * the results are also dropped.
10093 	 */
10094 	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10095 	    (rssi == HCI_RSSI_INVALID ||
10096 	    (rssi < hdev->discovery.rssi &&
10097 	     !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
10098 		return  false;
10099 
10100 	if (hdev->discovery.uuid_count != 0) {
10101 		/* If a list of UUIDs is provided in filter, results with no
10102 		 * matching UUID should be dropped.
10103 		 */
10104 		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10105 				   hdev->discovery.uuids) &&
10106 		    !eir_has_uuids(scan_rsp, scan_rsp_len,
10107 				   hdev->discovery.uuid_count,
10108 				   hdev->discovery.uuids))
10109 			return false;
10110 	}
10111 
10112 	/* If duplicate filtering does not report RSSI changes, then restart
10113 	 * scanning to ensure updated result with updated RSSI values.
10114 	 */
10115 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
10116 		restart_le_scan(hdev);
10117 
10118 		/* Validate RSSI value against the RSSI threshold once more. */
10119 		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10120 		    rssi < hdev->discovery.rssi)
10121 			return false;
10122 	}
10123 
10124 	return true;
10125 }
10126 
10127 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10128 				  bdaddr_t *bdaddr, u8 addr_type)
10129 {
10130 	struct mgmt_ev_adv_monitor_device_lost ev;
10131 
10132 	ev.monitor_handle = cpu_to_le16(handle);
10133 	bacpy(&ev.addr.bdaddr, bdaddr);
10134 	ev.addr.type = addr_type;
10135 
10136 	mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10137 		   NULL);
10138 }
10139 
10140 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10141 					       struct sk_buff *skb,
10142 					       struct sock *skip_sk,
10143 					       u16 handle)
10144 {
10145 	struct sk_buff *advmon_skb;
10146 	size_t advmon_skb_len;
10147 	__le16 *monitor_handle;
10148 
10149 	if (!skb)
10150 		return;
10151 
10152 	advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10153 			  sizeof(struct mgmt_ev_device_found)) + skb->len;
10154 	advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10155 				    advmon_skb_len);
10156 	if (!advmon_skb)
10157 		return;
10158 
10159 	/* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10160 	 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10161 	 * store monitor_handle of the matched monitor.
10162 	 */
10163 	monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10164 	*monitor_handle = cpu_to_le16(handle);
10165 	skb_put_data(advmon_skb, skb->data, skb->len);
10166 
10167 	mgmt_event_skb(advmon_skb, skip_sk);
10168 }
10169 
10170 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10171 					  bdaddr_t *bdaddr, bool report_device,
10172 					  struct sk_buff *skb,
10173 					  struct sock *skip_sk)
10174 {
10175 	struct monitored_device *dev, *tmp;
10176 	bool matched = false;
10177 	bool notified = false;
10178 
10179 	/* We have received the Advertisement Report because:
10180 	 * 1. the kernel has initiated active discovery
10181 	 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10182 	 *    passive scanning
10183 	 * 3. if none of the above is true, we have one or more active
10184 	 *    Advertisement Monitor
10185 	 *
10186 	 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10187 	 * and report ONLY one advertisement per device for the matched Monitor
10188 	 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10189 	 *
10190 	 * For case 3, since we are not active scanning and all advertisements
10191 	 * received are due to a matched Advertisement Monitor, report all
10192 	 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10193 	 */
10194 	if (report_device && !hdev->advmon_pend_notify) {
10195 		mgmt_event_skb(skb, skip_sk);
10196 		return;
10197 	}
10198 
10199 	hdev->advmon_pend_notify = false;
10200 
10201 	list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10202 		if (!bacmp(&dev->bdaddr, bdaddr)) {
10203 			matched = true;
10204 
10205 			if (!dev->notified) {
10206 				mgmt_send_adv_monitor_device_found(hdev, skb,
10207 								   skip_sk,
10208 								   dev->handle);
10209 				notified = true;
10210 				dev->notified = true;
10211 			}
10212 		}
10213 
10214 		if (!dev->notified)
10215 			hdev->advmon_pend_notify = true;
10216 	}
10217 
10218 	if (!report_device &&
10219 	    ((matched && !notified) || !msft_monitor_supported(hdev))) {
10220 		/* Handle 0 indicates that we are not active scanning and this
10221 		 * is a subsequent advertisement report for an already matched
10222 		 * Advertisement Monitor or the controller offloading support
10223 		 * is not available.
10224 		 */
10225 		mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10226 	}
10227 
10228 	if (report_device)
10229 		mgmt_event_skb(skb, skip_sk);
10230 	else
10231 		kfree_skb(skb);
10232 }
10233 
10234 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10235 			      u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10236 			      u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10237 			      u64 instant)
10238 {
10239 	struct sk_buff *skb;
10240 	struct mgmt_ev_mesh_device_found *ev;
10241 	int i, j;
10242 
10243 	if (!hdev->mesh_ad_types[0])
10244 		goto accepted;
10245 
10246 	/* Scan for requested AD types */
10247 	if (eir_len > 0) {
10248 		for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10249 			for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10250 				if (!hdev->mesh_ad_types[j])
10251 					break;
10252 
10253 				if (hdev->mesh_ad_types[j] == eir[i + 1])
10254 					goto accepted;
10255 			}
10256 		}
10257 	}
10258 
10259 	if (scan_rsp_len > 0) {
10260 		for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10261 			for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10262 				if (!hdev->mesh_ad_types[j])
10263 					break;
10264 
10265 				if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10266 					goto accepted;
10267 			}
10268 		}
10269 	}
10270 
10271 	return;
10272 
10273 accepted:
10274 	skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10275 			     sizeof(*ev) + eir_len + scan_rsp_len);
10276 	if (!skb)
10277 		return;
10278 
10279 	ev = skb_put(skb, sizeof(*ev));
10280 
10281 	bacpy(&ev->addr.bdaddr, bdaddr);
10282 	ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10283 	ev->rssi = rssi;
10284 	ev->flags = cpu_to_le32(flags);
10285 	ev->instant = cpu_to_le64(instant);
10286 
10287 	if (eir_len > 0)
10288 		/* Copy EIR or advertising data into event */
10289 		skb_put_data(skb, eir, eir_len);
10290 
10291 	if (scan_rsp_len > 0)
10292 		/* Append scan response data to event */
10293 		skb_put_data(skb, scan_rsp, scan_rsp_len);
10294 
10295 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10296 
10297 	mgmt_event_skb(skb, NULL);
10298 }
10299 
10300 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10301 		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10302 		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10303 		       u64 instant)
10304 {
10305 	struct sk_buff *skb;
10306 	struct mgmt_ev_device_found *ev;
10307 	bool report_device = hci_discovery_active(hdev);
10308 
10309 	if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10310 		mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10311 				  eir, eir_len, scan_rsp, scan_rsp_len,
10312 				  instant);
10313 
10314 	/* Don't send events for a non-kernel initiated discovery. With
10315 	 * LE one exception is if we have pend_le_reports > 0 in which
10316 	 * case we're doing passive scanning and want these events.
10317 	 */
10318 	if (!hci_discovery_active(hdev)) {
10319 		if (link_type == ACL_LINK)
10320 			return;
10321 		if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10322 			report_device = true;
10323 		else if (!hci_is_adv_monitoring(hdev))
10324 			return;
10325 	}
10326 
10327 	if (hdev->discovery.result_filtering) {
10328 		/* We are using service discovery */
10329 		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10330 				     scan_rsp_len))
10331 			return;
10332 	}
10333 
10334 	if (hdev->discovery.limited) {
10335 		/* Check for limited discoverable bit */
10336 		if (dev_class) {
10337 			if (!(dev_class[1] & 0x20))
10338 				return;
10339 		} else {
10340 			u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10341 			if (!flags || !(flags[0] & LE_AD_LIMITED))
10342 				return;
10343 		}
10344 	}
10345 
10346 	/* Allocate skb. The 5 extra bytes are for the potential CoD field */
10347 	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10348 			     sizeof(*ev) + eir_len + scan_rsp_len + 5);
10349 	if (!skb)
10350 		return;
10351 
10352 	ev = skb_put(skb, sizeof(*ev));
10353 
10354 	/* In case of device discovery with BR/EDR devices (pre 1.2), the
10355 	 * RSSI value was reported as 0 when not available. This behavior
10356 	 * is kept when using device discovery. This is required for full
10357 	 * backwards compatibility with the API.
10358 	 *
10359 	 * However when using service discovery, the value 127 will be
10360 	 * returned when the RSSI is not available.
10361 	 */
10362 	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10363 	    link_type == ACL_LINK)
10364 		rssi = 0;
10365 
10366 	bacpy(&ev->addr.bdaddr, bdaddr);
10367 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
10368 	ev->rssi = rssi;
10369 	ev->flags = cpu_to_le32(flags);
10370 
10371 	if (eir_len > 0)
10372 		/* Copy EIR or advertising data into event */
10373 		skb_put_data(skb, eir, eir_len);
10374 
10375 	if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10376 		u8 eir_cod[5];
10377 
10378 		eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10379 					   dev_class, 3);
10380 		skb_put_data(skb, eir_cod, sizeof(eir_cod));
10381 	}
10382 
10383 	if (scan_rsp_len > 0)
10384 		/* Append scan response data to event */
10385 		skb_put_data(skb, scan_rsp, scan_rsp_len);
10386 
10387 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10388 
10389 	mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10390 }
10391 
10392 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10393 		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10394 {
10395 	struct sk_buff *skb;
10396 	struct mgmt_ev_device_found *ev;
10397 	u16 eir_len = 0;
10398 	u32 flags = 0;
10399 
10400 	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10401 			     sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10402 
10403 	ev = skb_put(skb, sizeof(*ev));
10404 	bacpy(&ev->addr.bdaddr, bdaddr);
10405 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
10406 	ev->rssi = rssi;
10407 
10408 	if (name)
10409 		eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10410 	else
10411 		flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10412 
10413 	ev->eir_len = cpu_to_le16(eir_len);
10414 	ev->flags = cpu_to_le32(flags);
10415 
10416 	mgmt_event_skb(skb, NULL);
10417 }
10418 
10419 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10420 {
10421 	struct mgmt_ev_discovering ev;
10422 
10423 	bt_dev_dbg(hdev, "discovering %u", discovering);
10424 
10425 	memset(&ev, 0, sizeof(ev));
10426 	ev.type = hdev->discovery.type;
10427 	ev.discovering = discovering;
10428 
10429 	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10430 }
10431 
10432 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10433 {
10434 	struct mgmt_ev_controller_suspend ev;
10435 
10436 	ev.suspend_state = state;
10437 	mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10438 }
10439 
10440 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10441 		   u8 addr_type)
10442 {
10443 	struct mgmt_ev_controller_resume ev;
10444 
10445 	ev.wake_reason = reason;
10446 	if (bdaddr) {
10447 		bacpy(&ev.addr.bdaddr, bdaddr);
10448 		ev.addr.type = addr_type;
10449 	} else {
10450 		memset(&ev.addr, 0, sizeof(ev.addr));
10451 	}
10452 
10453 	mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10454 }
10455 
10456 static struct hci_mgmt_chan chan = {
10457 	.channel	= HCI_CHANNEL_CONTROL,
10458 	.handler_count	= ARRAY_SIZE(mgmt_handlers),
10459 	.handlers	= mgmt_handlers,
10460 	.hdev_init	= mgmt_init_hdev,
10461 };
10462 
10463 int mgmt_init(void)
10464 {
10465 	return hci_mgmt_chan_register(&chan);
10466 }
10467 
10468 void mgmt_exit(void)
10469 {
10470 	hci_mgmt_chan_unregister(&chan);
10471 }
10472 
10473 void mgmt_cleanup(struct sock *sk)
10474 {
10475 	struct mgmt_mesh_tx *mesh_tx;
10476 	struct hci_dev *hdev;
10477 
10478 	read_lock(&hci_dev_list_lock);
10479 
10480 	list_for_each_entry(hdev, &hci_dev_list, list) {
10481 		do {
10482 			mesh_tx = mgmt_mesh_next(hdev, sk);
10483 
10484 			if (mesh_tx)
10485 				mesh_send_complete(hdev, mesh_tx, true);
10486 		} while (mesh_tx);
10487 	}
10488 
10489 	read_unlock(&hci_dev_list_lock);
10490 }
10491