xref: /openbmc/linux/net/bluetooth/mgmt.c (revision 47db6b42991e6d5645d0938e43085aaf88cdfba4)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI Management interface */
26 
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35 
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
40 #include "msft.h"
41 #include "eir.h"
42 
43 #define MGMT_VERSION	1
44 #define MGMT_REVISION	21
45 
46 static const u16 mgmt_commands[] = {
47 	MGMT_OP_READ_INDEX_LIST,
48 	MGMT_OP_READ_INFO,
49 	MGMT_OP_SET_POWERED,
50 	MGMT_OP_SET_DISCOVERABLE,
51 	MGMT_OP_SET_CONNECTABLE,
52 	MGMT_OP_SET_FAST_CONNECTABLE,
53 	MGMT_OP_SET_BONDABLE,
54 	MGMT_OP_SET_LINK_SECURITY,
55 	MGMT_OP_SET_SSP,
56 	MGMT_OP_SET_HS,
57 	MGMT_OP_SET_LE,
58 	MGMT_OP_SET_DEV_CLASS,
59 	MGMT_OP_SET_LOCAL_NAME,
60 	MGMT_OP_ADD_UUID,
61 	MGMT_OP_REMOVE_UUID,
62 	MGMT_OP_LOAD_LINK_KEYS,
63 	MGMT_OP_LOAD_LONG_TERM_KEYS,
64 	MGMT_OP_DISCONNECT,
65 	MGMT_OP_GET_CONNECTIONS,
66 	MGMT_OP_PIN_CODE_REPLY,
67 	MGMT_OP_PIN_CODE_NEG_REPLY,
68 	MGMT_OP_SET_IO_CAPABILITY,
69 	MGMT_OP_PAIR_DEVICE,
70 	MGMT_OP_CANCEL_PAIR_DEVICE,
71 	MGMT_OP_UNPAIR_DEVICE,
72 	MGMT_OP_USER_CONFIRM_REPLY,
73 	MGMT_OP_USER_CONFIRM_NEG_REPLY,
74 	MGMT_OP_USER_PASSKEY_REPLY,
75 	MGMT_OP_USER_PASSKEY_NEG_REPLY,
76 	MGMT_OP_READ_LOCAL_OOB_DATA,
77 	MGMT_OP_ADD_REMOTE_OOB_DATA,
78 	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
79 	MGMT_OP_START_DISCOVERY,
80 	MGMT_OP_STOP_DISCOVERY,
81 	MGMT_OP_CONFIRM_NAME,
82 	MGMT_OP_BLOCK_DEVICE,
83 	MGMT_OP_UNBLOCK_DEVICE,
84 	MGMT_OP_SET_DEVICE_ID,
85 	MGMT_OP_SET_ADVERTISING,
86 	MGMT_OP_SET_BREDR,
87 	MGMT_OP_SET_STATIC_ADDRESS,
88 	MGMT_OP_SET_SCAN_PARAMS,
89 	MGMT_OP_SET_SECURE_CONN,
90 	MGMT_OP_SET_DEBUG_KEYS,
91 	MGMT_OP_SET_PRIVACY,
92 	MGMT_OP_LOAD_IRKS,
93 	MGMT_OP_GET_CONN_INFO,
94 	MGMT_OP_GET_CLOCK_INFO,
95 	MGMT_OP_ADD_DEVICE,
96 	MGMT_OP_REMOVE_DEVICE,
97 	MGMT_OP_LOAD_CONN_PARAM,
98 	MGMT_OP_READ_UNCONF_INDEX_LIST,
99 	MGMT_OP_READ_CONFIG_INFO,
100 	MGMT_OP_SET_EXTERNAL_CONFIG,
101 	MGMT_OP_SET_PUBLIC_ADDRESS,
102 	MGMT_OP_START_SERVICE_DISCOVERY,
103 	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
104 	MGMT_OP_READ_EXT_INDEX_LIST,
105 	MGMT_OP_READ_ADV_FEATURES,
106 	MGMT_OP_ADD_ADVERTISING,
107 	MGMT_OP_REMOVE_ADVERTISING,
108 	MGMT_OP_GET_ADV_SIZE_INFO,
109 	MGMT_OP_START_LIMITED_DISCOVERY,
110 	MGMT_OP_READ_EXT_INFO,
111 	MGMT_OP_SET_APPEARANCE,
112 	MGMT_OP_GET_PHY_CONFIGURATION,
113 	MGMT_OP_SET_PHY_CONFIGURATION,
114 	MGMT_OP_SET_BLOCKED_KEYS,
115 	MGMT_OP_SET_WIDEBAND_SPEECH,
116 	MGMT_OP_READ_CONTROLLER_CAP,
117 	MGMT_OP_READ_EXP_FEATURES_INFO,
118 	MGMT_OP_SET_EXP_FEATURE,
119 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
120 	MGMT_OP_SET_DEF_SYSTEM_CONFIG,
121 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
122 	MGMT_OP_SET_DEF_RUNTIME_CONFIG,
123 	MGMT_OP_GET_DEVICE_FLAGS,
124 	MGMT_OP_SET_DEVICE_FLAGS,
125 	MGMT_OP_READ_ADV_MONITOR_FEATURES,
126 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
127 	MGMT_OP_REMOVE_ADV_MONITOR,
128 	MGMT_OP_ADD_EXT_ADV_PARAMS,
129 	MGMT_OP_ADD_EXT_ADV_DATA,
130 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
131 };
132 
133 static const u16 mgmt_events[] = {
134 	MGMT_EV_CONTROLLER_ERROR,
135 	MGMT_EV_INDEX_ADDED,
136 	MGMT_EV_INDEX_REMOVED,
137 	MGMT_EV_NEW_SETTINGS,
138 	MGMT_EV_CLASS_OF_DEV_CHANGED,
139 	MGMT_EV_LOCAL_NAME_CHANGED,
140 	MGMT_EV_NEW_LINK_KEY,
141 	MGMT_EV_NEW_LONG_TERM_KEY,
142 	MGMT_EV_DEVICE_CONNECTED,
143 	MGMT_EV_DEVICE_DISCONNECTED,
144 	MGMT_EV_CONNECT_FAILED,
145 	MGMT_EV_PIN_CODE_REQUEST,
146 	MGMT_EV_USER_CONFIRM_REQUEST,
147 	MGMT_EV_USER_PASSKEY_REQUEST,
148 	MGMT_EV_AUTH_FAILED,
149 	MGMT_EV_DEVICE_FOUND,
150 	MGMT_EV_DISCOVERING,
151 	MGMT_EV_DEVICE_BLOCKED,
152 	MGMT_EV_DEVICE_UNBLOCKED,
153 	MGMT_EV_DEVICE_UNPAIRED,
154 	MGMT_EV_PASSKEY_NOTIFY,
155 	MGMT_EV_NEW_IRK,
156 	MGMT_EV_NEW_CSRK,
157 	MGMT_EV_DEVICE_ADDED,
158 	MGMT_EV_DEVICE_REMOVED,
159 	MGMT_EV_NEW_CONN_PARAM,
160 	MGMT_EV_UNCONF_INDEX_ADDED,
161 	MGMT_EV_UNCONF_INDEX_REMOVED,
162 	MGMT_EV_NEW_CONFIG_OPTIONS,
163 	MGMT_EV_EXT_INDEX_ADDED,
164 	MGMT_EV_EXT_INDEX_REMOVED,
165 	MGMT_EV_LOCAL_OOB_DATA_UPDATED,
166 	MGMT_EV_ADVERTISING_ADDED,
167 	MGMT_EV_ADVERTISING_REMOVED,
168 	MGMT_EV_EXT_INFO_CHANGED,
169 	MGMT_EV_PHY_CONFIGURATION_CHANGED,
170 	MGMT_EV_EXP_FEATURE_CHANGED,
171 	MGMT_EV_DEVICE_FLAGS_CHANGED,
172 	MGMT_EV_ADV_MONITOR_ADDED,
173 	MGMT_EV_ADV_MONITOR_REMOVED,
174 	MGMT_EV_CONTROLLER_SUSPEND,
175 	MGMT_EV_CONTROLLER_RESUME,
176 };
177 
178 static const u16 mgmt_untrusted_commands[] = {
179 	MGMT_OP_READ_INDEX_LIST,
180 	MGMT_OP_READ_INFO,
181 	MGMT_OP_READ_UNCONF_INDEX_LIST,
182 	MGMT_OP_READ_CONFIG_INFO,
183 	MGMT_OP_READ_EXT_INDEX_LIST,
184 	MGMT_OP_READ_EXT_INFO,
185 	MGMT_OP_READ_CONTROLLER_CAP,
186 	MGMT_OP_READ_EXP_FEATURES_INFO,
187 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
188 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
189 };
190 
191 static const u16 mgmt_untrusted_events[] = {
192 	MGMT_EV_INDEX_ADDED,
193 	MGMT_EV_INDEX_REMOVED,
194 	MGMT_EV_NEW_SETTINGS,
195 	MGMT_EV_CLASS_OF_DEV_CHANGED,
196 	MGMT_EV_LOCAL_NAME_CHANGED,
197 	MGMT_EV_UNCONF_INDEX_ADDED,
198 	MGMT_EV_UNCONF_INDEX_REMOVED,
199 	MGMT_EV_NEW_CONFIG_OPTIONS,
200 	MGMT_EV_EXT_INDEX_ADDED,
201 	MGMT_EV_EXT_INDEX_REMOVED,
202 	MGMT_EV_EXT_INFO_CHANGED,
203 	MGMT_EV_EXP_FEATURE_CHANGED,
204 };
205 
206 #define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
207 
208 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
209 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
210 
211 /* HCI to MGMT error code conversion table */
212 static const u8 mgmt_status_table[] = {
213 	MGMT_STATUS_SUCCESS,
214 	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
215 	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
216 	MGMT_STATUS_FAILED,		/* Hardware Failure */
217 	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
218 	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
219 	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
220 	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
221 	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
222 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
223 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
224 	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
225 	MGMT_STATUS_BUSY,		/* Command Disallowed */
226 	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
227 	MGMT_STATUS_REJECTED,		/* Rejected Security */
228 	MGMT_STATUS_REJECTED,		/* Rejected Personal */
229 	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
230 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
231 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
232 	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
233 	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
234 	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
235 	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
236 	MGMT_STATUS_BUSY,		/* Repeated Attempts */
237 	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
238 	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
239 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
240 	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
241 	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
242 	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
243 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
244 	MGMT_STATUS_FAILED,		/* Unspecified Error */
245 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
246 	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
247 	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
248 	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
249 	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
250 	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
251 	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
252 	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
253 	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
254 	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
255 	MGMT_STATUS_FAILED,		/* Transaction Collision */
256 	MGMT_STATUS_FAILED,		/* Reserved for future use */
257 	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
258 	MGMT_STATUS_REJECTED,		/* QoS Rejected */
259 	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
260 	MGMT_STATUS_REJECTED,		/* Insufficient Security */
261 	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
262 	MGMT_STATUS_FAILED,		/* Reserved for future use */
263 	MGMT_STATUS_BUSY,		/* Role Switch Pending */
264 	MGMT_STATUS_FAILED,		/* Reserved for future use */
265 	MGMT_STATUS_FAILED,		/* Slot Violation */
266 	MGMT_STATUS_FAILED,		/* Role Switch Failed */
267 	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
268 	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
269 	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
270 	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
271 	MGMT_STATUS_BUSY,		/* Controller Busy */
272 	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
273 	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
274 	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
275 	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
276 	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
277 };
278 
279 static u8 mgmt_errno_status(int err)
280 {
281 	switch (err) {
282 	case 0:
283 		return MGMT_STATUS_SUCCESS;
284 	case -EPERM:
285 		return MGMT_STATUS_REJECTED;
286 	case -EINVAL:
287 		return MGMT_STATUS_INVALID_PARAMS;
288 	case -EOPNOTSUPP:
289 		return MGMT_STATUS_NOT_SUPPORTED;
290 	case -EBUSY:
291 		return MGMT_STATUS_BUSY;
292 	case -ETIMEDOUT:
293 		return MGMT_STATUS_AUTH_FAILED;
294 	case -ENOMEM:
295 		return MGMT_STATUS_NO_RESOURCES;
296 	case -EISCONN:
297 		return MGMT_STATUS_ALREADY_CONNECTED;
298 	case -ENOTCONN:
299 		return MGMT_STATUS_DISCONNECTED;
300 	}
301 
302 	return MGMT_STATUS_FAILED;
303 }
304 
305 static u8 mgmt_status(int err)
306 {
307 	if (err < 0)
308 		return mgmt_errno_status(err);
309 
310 	if (err < ARRAY_SIZE(mgmt_status_table))
311 		return mgmt_status_table[err];
312 
313 	return MGMT_STATUS_FAILED;
314 }
315 
316 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
317 			    u16 len, int flag)
318 {
319 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
320 			       flag, NULL);
321 }
322 
323 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
324 			      u16 len, int flag, struct sock *skip_sk)
325 {
326 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
327 			       flag, skip_sk);
328 }
329 
330 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
331 		      struct sock *skip_sk)
332 {
333 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
334 			       HCI_SOCK_TRUSTED, skip_sk);
335 }
336 
337 static u8 le_addr_type(u8 mgmt_addr_type)
338 {
339 	if (mgmt_addr_type == BDADDR_LE_PUBLIC)
340 		return ADDR_LE_DEV_PUBLIC;
341 	else
342 		return ADDR_LE_DEV_RANDOM;
343 }
344 
345 void mgmt_fill_version_info(void *ver)
346 {
347 	struct mgmt_rp_read_version *rp = ver;
348 
349 	rp->version = MGMT_VERSION;
350 	rp->revision = cpu_to_le16(MGMT_REVISION);
351 }
352 
353 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
354 			u16 data_len)
355 {
356 	struct mgmt_rp_read_version rp;
357 
358 	bt_dev_dbg(hdev, "sock %p", sk);
359 
360 	mgmt_fill_version_info(&rp);
361 
362 	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
363 				 &rp, sizeof(rp));
364 }
365 
366 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
367 			 u16 data_len)
368 {
369 	struct mgmt_rp_read_commands *rp;
370 	u16 num_commands, num_events;
371 	size_t rp_size;
372 	int i, err;
373 
374 	bt_dev_dbg(hdev, "sock %p", sk);
375 
376 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
377 		num_commands = ARRAY_SIZE(mgmt_commands);
378 		num_events = ARRAY_SIZE(mgmt_events);
379 	} else {
380 		num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
381 		num_events = ARRAY_SIZE(mgmt_untrusted_events);
382 	}
383 
384 	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
385 
386 	rp = kmalloc(rp_size, GFP_KERNEL);
387 	if (!rp)
388 		return -ENOMEM;
389 
390 	rp->num_commands = cpu_to_le16(num_commands);
391 	rp->num_events = cpu_to_le16(num_events);
392 
393 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
394 		__le16 *opcode = rp->opcodes;
395 
396 		for (i = 0; i < num_commands; i++, opcode++)
397 			put_unaligned_le16(mgmt_commands[i], opcode);
398 
399 		for (i = 0; i < num_events; i++, opcode++)
400 			put_unaligned_le16(mgmt_events[i], opcode);
401 	} else {
402 		__le16 *opcode = rp->opcodes;
403 
404 		for (i = 0; i < num_commands; i++, opcode++)
405 			put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
406 
407 		for (i = 0; i < num_events; i++, opcode++)
408 			put_unaligned_le16(mgmt_untrusted_events[i], opcode);
409 	}
410 
411 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
412 				rp, rp_size);
413 	kfree(rp);
414 
415 	return err;
416 }
417 
418 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
419 			   u16 data_len)
420 {
421 	struct mgmt_rp_read_index_list *rp;
422 	struct hci_dev *d;
423 	size_t rp_len;
424 	u16 count;
425 	int err;
426 
427 	bt_dev_dbg(hdev, "sock %p", sk);
428 
429 	read_lock(&hci_dev_list_lock);
430 
431 	count = 0;
432 	list_for_each_entry(d, &hci_dev_list, list) {
433 		if (d->dev_type == HCI_PRIMARY &&
434 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED))
435 			count++;
436 	}
437 
438 	rp_len = sizeof(*rp) + (2 * count);
439 	rp = kmalloc(rp_len, GFP_ATOMIC);
440 	if (!rp) {
441 		read_unlock(&hci_dev_list_lock);
442 		return -ENOMEM;
443 	}
444 
445 	count = 0;
446 	list_for_each_entry(d, &hci_dev_list, list) {
447 		if (hci_dev_test_flag(d, HCI_SETUP) ||
448 		    hci_dev_test_flag(d, HCI_CONFIG) ||
449 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
450 			continue;
451 
452 		/* Devices marked as raw-only are neither configured
453 		 * nor unconfigured controllers.
454 		 */
455 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
456 			continue;
457 
458 		if (d->dev_type == HCI_PRIMARY &&
459 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
460 			rp->index[count++] = cpu_to_le16(d->id);
461 			bt_dev_dbg(hdev, "Added hci%u", d->id);
462 		}
463 	}
464 
465 	rp->num_controllers = cpu_to_le16(count);
466 	rp_len = sizeof(*rp) + (2 * count);
467 
468 	read_unlock(&hci_dev_list_lock);
469 
470 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
471 				0, rp, rp_len);
472 
473 	kfree(rp);
474 
475 	return err;
476 }
477 
478 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
479 				  void *data, u16 data_len)
480 {
481 	struct mgmt_rp_read_unconf_index_list *rp;
482 	struct hci_dev *d;
483 	size_t rp_len;
484 	u16 count;
485 	int err;
486 
487 	bt_dev_dbg(hdev, "sock %p", sk);
488 
489 	read_lock(&hci_dev_list_lock);
490 
491 	count = 0;
492 	list_for_each_entry(d, &hci_dev_list, list) {
493 		if (d->dev_type == HCI_PRIMARY &&
494 		    hci_dev_test_flag(d, HCI_UNCONFIGURED))
495 			count++;
496 	}
497 
498 	rp_len = sizeof(*rp) + (2 * count);
499 	rp = kmalloc(rp_len, GFP_ATOMIC);
500 	if (!rp) {
501 		read_unlock(&hci_dev_list_lock);
502 		return -ENOMEM;
503 	}
504 
505 	count = 0;
506 	list_for_each_entry(d, &hci_dev_list, list) {
507 		if (hci_dev_test_flag(d, HCI_SETUP) ||
508 		    hci_dev_test_flag(d, HCI_CONFIG) ||
509 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
510 			continue;
511 
512 		/* Devices marked as raw-only are neither configured
513 		 * nor unconfigured controllers.
514 		 */
515 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
516 			continue;
517 
518 		if (d->dev_type == HCI_PRIMARY &&
519 		    hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
520 			rp->index[count++] = cpu_to_le16(d->id);
521 			bt_dev_dbg(hdev, "Added hci%u", d->id);
522 		}
523 	}
524 
525 	rp->num_controllers = cpu_to_le16(count);
526 	rp_len = sizeof(*rp) + (2 * count);
527 
528 	read_unlock(&hci_dev_list_lock);
529 
530 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
531 				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
532 
533 	kfree(rp);
534 
535 	return err;
536 }
537 
538 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
539 			       void *data, u16 data_len)
540 {
541 	struct mgmt_rp_read_ext_index_list *rp;
542 	struct hci_dev *d;
543 	u16 count;
544 	int err;
545 
546 	bt_dev_dbg(hdev, "sock %p", sk);
547 
548 	read_lock(&hci_dev_list_lock);
549 
550 	count = 0;
551 	list_for_each_entry(d, &hci_dev_list, list) {
552 		if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
553 			count++;
554 	}
555 
556 	rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
557 	if (!rp) {
558 		read_unlock(&hci_dev_list_lock);
559 		return -ENOMEM;
560 	}
561 
562 	count = 0;
563 	list_for_each_entry(d, &hci_dev_list, list) {
564 		if (hci_dev_test_flag(d, HCI_SETUP) ||
565 		    hci_dev_test_flag(d, HCI_CONFIG) ||
566 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
567 			continue;
568 
569 		/* Devices marked as raw-only are neither configured
570 		 * nor unconfigured controllers.
571 		 */
572 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
573 			continue;
574 
575 		if (d->dev_type == HCI_PRIMARY) {
576 			if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
577 				rp->entry[count].type = 0x01;
578 			else
579 				rp->entry[count].type = 0x00;
580 		} else if (d->dev_type == HCI_AMP) {
581 			rp->entry[count].type = 0x02;
582 		} else {
583 			continue;
584 		}
585 
586 		rp->entry[count].bus = d->bus;
587 		rp->entry[count++].index = cpu_to_le16(d->id);
588 		bt_dev_dbg(hdev, "Added hci%u", d->id);
589 	}
590 
591 	rp->num_controllers = cpu_to_le16(count);
592 
593 	read_unlock(&hci_dev_list_lock);
594 
595 	/* If this command is called at least once, then all the
596 	 * default index and unconfigured index events are disabled
597 	 * and from now on only extended index events are used.
598 	 */
599 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
600 	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
601 	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
602 
603 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
604 				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
605 				struct_size(rp, entry, count));
606 
607 	kfree(rp);
608 
609 	return err;
610 }
611 
612 static bool is_configured(struct hci_dev *hdev)
613 {
614 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
615 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
616 		return false;
617 
618 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
619 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
620 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
621 		return false;
622 
623 	return true;
624 }
625 
626 static __le32 get_missing_options(struct hci_dev *hdev)
627 {
628 	u32 options = 0;
629 
630 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
631 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
632 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
633 
634 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
635 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
636 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
637 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
638 
639 	return cpu_to_le32(options);
640 }
641 
642 static int new_options(struct hci_dev *hdev, struct sock *skip)
643 {
644 	__le32 options = get_missing_options(hdev);
645 
646 	return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
647 				  sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
648 }
649 
650 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
651 {
652 	__le32 options = get_missing_options(hdev);
653 
654 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
655 				 sizeof(options));
656 }
657 
658 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
659 			    void *data, u16 data_len)
660 {
661 	struct mgmt_rp_read_config_info rp;
662 	u32 options = 0;
663 
664 	bt_dev_dbg(hdev, "sock %p", sk);
665 
666 	hci_dev_lock(hdev);
667 
668 	memset(&rp, 0, sizeof(rp));
669 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
670 
671 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
672 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
673 
674 	if (hdev->set_bdaddr)
675 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
676 
677 	rp.supported_options = cpu_to_le32(options);
678 	rp.missing_options = get_missing_options(hdev);
679 
680 	hci_dev_unlock(hdev);
681 
682 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
683 				 &rp, sizeof(rp));
684 }
685 
686 static u32 get_supported_phys(struct hci_dev *hdev)
687 {
688 	u32 supported_phys = 0;
689 
690 	if (lmp_bredr_capable(hdev)) {
691 		supported_phys |= MGMT_PHY_BR_1M_1SLOT;
692 
693 		if (hdev->features[0][0] & LMP_3SLOT)
694 			supported_phys |= MGMT_PHY_BR_1M_3SLOT;
695 
696 		if (hdev->features[0][0] & LMP_5SLOT)
697 			supported_phys |= MGMT_PHY_BR_1M_5SLOT;
698 
699 		if (lmp_edr_2m_capable(hdev)) {
700 			supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
701 
702 			if (lmp_edr_3slot_capable(hdev))
703 				supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
704 
705 			if (lmp_edr_5slot_capable(hdev))
706 				supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
707 
708 			if (lmp_edr_3m_capable(hdev)) {
709 				supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
710 
711 				if (lmp_edr_3slot_capable(hdev))
712 					supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
713 
714 				if (lmp_edr_5slot_capable(hdev))
715 					supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
716 			}
717 		}
718 	}
719 
720 	if (lmp_le_capable(hdev)) {
721 		supported_phys |= MGMT_PHY_LE_1M_TX;
722 		supported_phys |= MGMT_PHY_LE_1M_RX;
723 
724 		if (hdev->le_features[1] & HCI_LE_PHY_2M) {
725 			supported_phys |= MGMT_PHY_LE_2M_TX;
726 			supported_phys |= MGMT_PHY_LE_2M_RX;
727 		}
728 
729 		if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
730 			supported_phys |= MGMT_PHY_LE_CODED_TX;
731 			supported_phys |= MGMT_PHY_LE_CODED_RX;
732 		}
733 	}
734 
735 	return supported_phys;
736 }
737 
738 static u32 get_selected_phys(struct hci_dev *hdev)
739 {
740 	u32 selected_phys = 0;
741 
742 	if (lmp_bredr_capable(hdev)) {
743 		selected_phys |= MGMT_PHY_BR_1M_1SLOT;
744 
745 		if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
746 			selected_phys |= MGMT_PHY_BR_1M_3SLOT;
747 
748 		if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
749 			selected_phys |= MGMT_PHY_BR_1M_5SLOT;
750 
751 		if (lmp_edr_2m_capable(hdev)) {
752 			if (!(hdev->pkt_type & HCI_2DH1))
753 				selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
754 
755 			if (lmp_edr_3slot_capable(hdev) &&
756 			    !(hdev->pkt_type & HCI_2DH3))
757 				selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
758 
759 			if (lmp_edr_5slot_capable(hdev) &&
760 			    !(hdev->pkt_type & HCI_2DH5))
761 				selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
762 
763 			if (lmp_edr_3m_capable(hdev)) {
764 				if (!(hdev->pkt_type & HCI_3DH1))
765 					selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
766 
767 				if (lmp_edr_3slot_capable(hdev) &&
768 				    !(hdev->pkt_type & HCI_3DH3))
769 					selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
770 
771 				if (lmp_edr_5slot_capable(hdev) &&
772 				    !(hdev->pkt_type & HCI_3DH5))
773 					selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
774 			}
775 		}
776 	}
777 
778 	if (lmp_le_capable(hdev)) {
779 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
780 			selected_phys |= MGMT_PHY_LE_1M_TX;
781 
782 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
783 			selected_phys |= MGMT_PHY_LE_1M_RX;
784 
785 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
786 			selected_phys |= MGMT_PHY_LE_2M_TX;
787 
788 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
789 			selected_phys |= MGMT_PHY_LE_2M_RX;
790 
791 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
792 			selected_phys |= MGMT_PHY_LE_CODED_TX;
793 
794 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
795 			selected_phys |= MGMT_PHY_LE_CODED_RX;
796 	}
797 
798 	return selected_phys;
799 }
800 
801 static u32 get_configurable_phys(struct hci_dev *hdev)
802 {
803 	return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
804 		~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
805 }
806 
807 static u32 get_supported_settings(struct hci_dev *hdev)
808 {
809 	u32 settings = 0;
810 
811 	settings |= MGMT_SETTING_POWERED;
812 	settings |= MGMT_SETTING_BONDABLE;
813 	settings |= MGMT_SETTING_DEBUG_KEYS;
814 	settings |= MGMT_SETTING_CONNECTABLE;
815 	settings |= MGMT_SETTING_DISCOVERABLE;
816 
817 	if (lmp_bredr_capable(hdev)) {
818 		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
819 			settings |= MGMT_SETTING_FAST_CONNECTABLE;
820 		settings |= MGMT_SETTING_BREDR;
821 		settings |= MGMT_SETTING_LINK_SECURITY;
822 
823 		if (lmp_ssp_capable(hdev)) {
824 			settings |= MGMT_SETTING_SSP;
825 			if (IS_ENABLED(CONFIG_BT_HS))
826 				settings |= MGMT_SETTING_HS;
827 		}
828 
829 		if (lmp_sc_capable(hdev))
830 			settings |= MGMT_SETTING_SECURE_CONN;
831 
832 		if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
833 			     &hdev->quirks))
834 			settings |= MGMT_SETTING_WIDEBAND_SPEECH;
835 	}
836 
837 	if (lmp_le_capable(hdev)) {
838 		settings |= MGMT_SETTING_LE;
839 		settings |= MGMT_SETTING_SECURE_CONN;
840 		settings |= MGMT_SETTING_PRIVACY;
841 		settings |= MGMT_SETTING_STATIC_ADDRESS;
842 		settings |= MGMT_SETTING_ADVERTISING;
843 	}
844 
845 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
846 	    hdev->set_bdaddr)
847 		settings |= MGMT_SETTING_CONFIGURATION;
848 
849 	settings |= MGMT_SETTING_PHY_CONFIGURATION;
850 
851 	return settings;
852 }
853 
854 static u32 get_current_settings(struct hci_dev *hdev)
855 {
856 	u32 settings = 0;
857 
858 	if (hdev_is_powered(hdev))
859 		settings |= MGMT_SETTING_POWERED;
860 
861 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
862 		settings |= MGMT_SETTING_CONNECTABLE;
863 
864 	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
865 		settings |= MGMT_SETTING_FAST_CONNECTABLE;
866 
867 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
868 		settings |= MGMT_SETTING_DISCOVERABLE;
869 
870 	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
871 		settings |= MGMT_SETTING_BONDABLE;
872 
873 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
874 		settings |= MGMT_SETTING_BREDR;
875 
876 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
877 		settings |= MGMT_SETTING_LE;
878 
879 	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
880 		settings |= MGMT_SETTING_LINK_SECURITY;
881 
882 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
883 		settings |= MGMT_SETTING_SSP;
884 
885 	if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
886 		settings |= MGMT_SETTING_HS;
887 
888 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
889 		settings |= MGMT_SETTING_ADVERTISING;
890 
891 	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
892 		settings |= MGMT_SETTING_SECURE_CONN;
893 
894 	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
895 		settings |= MGMT_SETTING_DEBUG_KEYS;
896 
897 	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
898 		settings |= MGMT_SETTING_PRIVACY;
899 
900 	/* The current setting for static address has two purposes. The
901 	 * first is to indicate if the static address will be used and
902 	 * the second is to indicate if it is actually set.
903 	 *
904 	 * This means if the static address is not configured, this flag
905 	 * will never be set. If the address is configured, then if the
906 	 * address is actually used decides if the flag is set or not.
907 	 *
908 	 * For single mode LE only controllers and dual-mode controllers
909 	 * with BR/EDR disabled, the existence of the static address will
910 	 * be evaluated.
911 	 */
912 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
913 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
914 	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
915 		if (bacmp(&hdev->static_addr, BDADDR_ANY))
916 			settings |= MGMT_SETTING_STATIC_ADDRESS;
917 	}
918 
919 	if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
920 		settings |= MGMT_SETTING_WIDEBAND_SPEECH;
921 
922 	return settings;
923 }
924 
925 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
926 {
927 	return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
928 }
929 
930 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
931 						  struct hci_dev *hdev,
932 						  const void *data)
933 {
934 	return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
935 }
936 
937 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
938 {
939 	struct mgmt_pending_cmd *cmd;
940 
941 	/* If there's a pending mgmt command the flags will not yet have
942 	 * their final values, so check for this first.
943 	 */
944 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
945 	if (cmd) {
946 		struct mgmt_mode *cp = cmd->param;
947 		if (cp->val == 0x01)
948 			return LE_AD_GENERAL;
949 		else if (cp->val == 0x02)
950 			return LE_AD_LIMITED;
951 	} else {
952 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
953 			return LE_AD_LIMITED;
954 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
955 			return LE_AD_GENERAL;
956 	}
957 
958 	return 0;
959 }
960 
961 bool mgmt_get_connectable(struct hci_dev *hdev)
962 {
963 	struct mgmt_pending_cmd *cmd;
964 
965 	/* If there's a pending mgmt command the flag will not yet have
966 	 * it's final value, so check for this first.
967 	 */
968 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
969 	if (cmd) {
970 		struct mgmt_mode *cp = cmd->param;
971 
972 		return cp->val;
973 	}
974 
975 	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
976 }
977 
978 static int service_cache_sync(struct hci_dev *hdev, void *data)
979 {
980 	hci_update_eir_sync(hdev);
981 	hci_update_class_sync(hdev);
982 
983 	return 0;
984 }
985 
986 static void service_cache_off(struct work_struct *work)
987 {
988 	struct hci_dev *hdev = container_of(work, struct hci_dev,
989 					    service_cache.work);
990 
991 	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
992 		return;
993 
994 	hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
995 }
996 
997 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
998 {
999 	/* The generation of a new RPA and programming it into the
1000 	 * controller happens in the hci_req_enable_advertising()
1001 	 * function.
1002 	 */
1003 	if (ext_adv_capable(hdev))
1004 		return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1005 	else
1006 		return hci_enable_advertising_sync(hdev);
1007 }
1008 
1009 static void rpa_expired(struct work_struct *work)
1010 {
1011 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1012 					    rpa_expired.work);
1013 
1014 	bt_dev_dbg(hdev, "");
1015 
1016 	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1017 
1018 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1019 		return;
1020 
1021 	hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1022 }
1023 
1024 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1025 {
1026 	if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1027 		return;
1028 
1029 	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1030 	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1031 
1032 	/* Non-mgmt controlled devices get this bit set
1033 	 * implicitly so that pairing works for them, however
1034 	 * for mgmt we require user-space to explicitly enable
1035 	 * it
1036 	 */
1037 	hci_dev_clear_flag(hdev, HCI_BONDABLE);
1038 }
1039 
1040 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1041 				void *data, u16 data_len)
1042 {
1043 	struct mgmt_rp_read_info rp;
1044 
1045 	bt_dev_dbg(hdev, "sock %p", sk);
1046 
1047 	hci_dev_lock(hdev);
1048 
1049 	memset(&rp, 0, sizeof(rp));
1050 
1051 	bacpy(&rp.bdaddr, &hdev->bdaddr);
1052 
1053 	rp.version = hdev->hci_ver;
1054 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1055 
1056 	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1057 	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1058 
1059 	memcpy(rp.dev_class, hdev->dev_class, 3);
1060 
1061 	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1062 	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1063 
1064 	hci_dev_unlock(hdev);
1065 
1066 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1067 				 sizeof(rp));
1068 }
1069 
1070 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1071 {
1072 	u16 eir_len = 0;
1073 	size_t name_len;
1074 
1075 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1076 		eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1077 					  hdev->dev_class, 3);
1078 
1079 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1080 		eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1081 					  hdev->appearance);
1082 
1083 	name_len = strlen(hdev->dev_name);
1084 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1085 				  hdev->dev_name, name_len);
1086 
1087 	name_len = strlen(hdev->short_name);
1088 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1089 				  hdev->short_name, name_len);
1090 
1091 	return eir_len;
1092 }
1093 
1094 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1095 				    void *data, u16 data_len)
1096 {
1097 	char buf[512];
1098 	struct mgmt_rp_read_ext_info *rp = (void *)buf;
1099 	u16 eir_len;
1100 
1101 	bt_dev_dbg(hdev, "sock %p", sk);
1102 
1103 	memset(&buf, 0, sizeof(buf));
1104 
1105 	hci_dev_lock(hdev);
1106 
1107 	bacpy(&rp->bdaddr, &hdev->bdaddr);
1108 
1109 	rp->version = hdev->hci_ver;
1110 	rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1111 
1112 	rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1113 	rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1114 
1115 
1116 	eir_len = append_eir_data_to_buf(hdev, rp->eir);
1117 	rp->eir_len = cpu_to_le16(eir_len);
1118 
1119 	hci_dev_unlock(hdev);
1120 
1121 	/* If this command is called at least once, then the events
1122 	 * for class of device and local name changes are disabled
1123 	 * and only the new extended controller information event
1124 	 * is used.
1125 	 */
1126 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1127 	hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1128 	hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1129 
1130 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1131 				 sizeof(*rp) + eir_len);
1132 }
1133 
1134 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1135 {
1136 	char buf[512];
1137 	struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1138 	u16 eir_len;
1139 
1140 	memset(buf, 0, sizeof(buf));
1141 
1142 	eir_len = append_eir_data_to_buf(hdev, ev->eir);
1143 	ev->eir_len = cpu_to_le16(eir_len);
1144 
1145 	return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1146 				  sizeof(*ev) + eir_len,
1147 				  HCI_MGMT_EXT_INFO_EVENTS, skip);
1148 }
1149 
1150 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1151 {
1152 	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1153 
1154 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1155 				 sizeof(settings));
1156 }
1157 
1158 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1159 {
1160 	struct mgmt_ev_advertising_added ev;
1161 
1162 	ev.instance = instance;
1163 
1164 	mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1165 }
1166 
1167 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1168 			      u8 instance)
1169 {
1170 	struct mgmt_ev_advertising_removed ev;
1171 
1172 	ev.instance = instance;
1173 
1174 	mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1175 }
1176 
1177 static void cancel_adv_timeout(struct hci_dev *hdev)
1178 {
1179 	if (hdev->adv_instance_timeout) {
1180 		hdev->adv_instance_timeout = 0;
1181 		cancel_delayed_work(&hdev->adv_instance_expire);
1182 	}
1183 }
1184 
1185 /* This function requires the caller holds hdev->lock */
1186 static void restart_le_actions(struct hci_dev *hdev)
1187 {
1188 	struct hci_conn_params *p;
1189 
1190 	list_for_each_entry(p, &hdev->le_conn_params, list) {
1191 		/* Needed for AUTO_OFF case where might not "really"
1192 		 * have been powered off.
1193 		 */
1194 		list_del_init(&p->action);
1195 
1196 		switch (p->auto_connect) {
1197 		case HCI_AUTO_CONN_DIRECT:
1198 		case HCI_AUTO_CONN_ALWAYS:
1199 			list_add(&p->action, &hdev->pend_le_conns);
1200 			break;
1201 		case HCI_AUTO_CONN_REPORT:
1202 			list_add(&p->action, &hdev->pend_le_reports);
1203 			break;
1204 		default:
1205 			break;
1206 		}
1207 	}
1208 }
1209 
1210 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1211 {
1212 	__le32 ev = cpu_to_le32(get_current_settings(hdev));
1213 
1214 	return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1215 				  sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1216 }
1217 
1218 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1219 {
1220 	struct mgmt_pending_cmd *cmd = data;
1221 	struct mgmt_mode *cp = cmd->param;
1222 
1223 	bt_dev_dbg(hdev, "err %d", err);
1224 
1225 	if (!err) {
1226 		if (cp->val) {
1227 			hci_dev_lock(hdev);
1228 			restart_le_actions(hdev);
1229 			hci_update_passive_scan(hdev);
1230 			hci_dev_unlock(hdev);
1231 		}
1232 
1233 		send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1234 
1235 		/* Only call new_setting for power on as power off is deferred
1236 		 * to hdev->power_off work which does call hci_dev_do_close.
1237 		 */
1238 		if (cp->val)
1239 			new_settings(hdev, cmd->sk);
1240 	} else {
1241 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1242 				mgmt_status(err));
1243 	}
1244 
1245 	mgmt_pending_free(cmd);
1246 }
1247 
1248 static int set_powered_sync(struct hci_dev *hdev, void *data)
1249 {
1250 	struct mgmt_pending_cmd *cmd = data;
1251 	struct mgmt_mode *cp = cmd->param;
1252 
1253 	BT_DBG("%s", hdev->name);
1254 
1255 	return hci_set_powered_sync(hdev, cp->val);
1256 }
1257 
1258 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1259 		       u16 len)
1260 {
1261 	struct mgmt_mode *cp = data;
1262 	struct mgmt_pending_cmd *cmd;
1263 	int err;
1264 
1265 	bt_dev_dbg(hdev, "sock %p", sk);
1266 
1267 	if (cp->val != 0x00 && cp->val != 0x01)
1268 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1269 				       MGMT_STATUS_INVALID_PARAMS);
1270 
1271 	hci_dev_lock(hdev);
1272 
1273 	if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1274 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1275 				      MGMT_STATUS_BUSY);
1276 		goto failed;
1277 	}
1278 
1279 	if (!!cp->val == hdev_is_powered(hdev)) {
1280 		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1281 		goto failed;
1282 	}
1283 
1284 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1285 	if (!cmd) {
1286 		err = -ENOMEM;
1287 		goto failed;
1288 	}
1289 
1290 	err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1291 				 mgmt_set_powered_complete);
1292 
1293 failed:
1294 	hci_dev_unlock(hdev);
1295 	return err;
1296 }
1297 
1298 int mgmt_new_settings(struct hci_dev *hdev)
1299 {
1300 	return new_settings(hdev, NULL);
1301 }
1302 
1303 struct cmd_lookup {
1304 	struct sock *sk;
1305 	struct hci_dev *hdev;
1306 	u8 mgmt_status;
1307 };
1308 
1309 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1310 {
1311 	struct cmd_lookup *match = data;
1312 
1313 	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1314 
1315 	list_del(&cmd->list);
1316 
1317 	if (match->sk == NULL) {
1318 		match->sk = cmd->sk;
1319 		sock_hold(match->sk);
1320 	}
1321 
1322 	mgmt_pending_free(cmd);
1323 }
1324 
1325 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1326 {
1327 	u8 *status = data;
1328 
1329 	mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1330 	mgmt_pending_remove(cmd);
1331 }
1332 
1333 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1334 {
1335 	if (cmd->cmd_complete) {
1336 		u8 *status = data;
1337 
1338 		cmd->cmd_complete(cmd, *status);
1339 		mgmt_pending_remove(cmd);
1340 
1341 		return;
1342 	}
1343 
1344 	cmd_status_rsp(cmd, data);
1345 }
1346 
1347 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1348 {
1349 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1350 				 cmd->param, cmd->param_len);
1351 }
1352 
1353 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1354 {
1355 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1356 				 cmd->param, sizeof(struct mgmt_addr_info));
1357 }
1358 
1359 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1360 {
1361 	if (!lmp_bredr_capable(hdev))
1362 		return MGMT_STATUS_NOT_SUPPORTED;
1363 	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1364 		return MGMT_STATUS_REJECTED;
1365 	else
1366 		return MGMT_STATUS_SUCCESS;
1367 }
1368 
1369 static u8 mgmt_le_support(struct hci_dev *hdev)
1370 {
1371 	if (!lmp_le_capable(hdev))
1372 		return MGMT_STATUS_NOT_SUPPORTED;
1373 	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1374 		return MGMT_STATUS_REJECTED;
1375 	else
1376 		return MGMT_STATUS_SUCCESS;
1377 }
1378 
1379 void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
1380 {
1381 	struct mgmt_pending_cmd *cmd;
1382 
1383 	bt_dev_dbg(hdev, "status 0x%02x", status);
1384 
1385 	hci_dev_lock(hdev);
1386 
1387 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1388 	if (!cmd)
1389 		goto unlock;
1390 
1391 	if (status) {
1392 		u8 mgmt_err = mgmt_status(status);
1393 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1394 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1395 		goto remove_cmd;
1396 	}
1397 
1398 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1399 	    hdev->discov_timeout > 0) {
1400 		int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1401 		queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1402 	}
1403 
1404 	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1405 	new_settings(hdev, cmd->sk);
1406 
1407 remove_cmd:
1408 	mgmt_pending_remove(cmd);
1409 
1410 unlock:
1411 	hci_dev_unlock(hdev);
1412 }
1413 
1414 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1415 			    u16 len)
1416 {
1417 	struct mgmt_cp_set_discoverable *cp = data;
1418 	struct mgmt_pending_cmd *cmd;
1419 	u16 timeout;
1420 	int err;
1421 
1422 	bt_dev_dbg(hdev, "sock %p", sk);
1423 
1424 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1425 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1426 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1427 				       MGMT_STATUS_REJECTED);
1428 
1429 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1430 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1431 				       MGMT_STATUS_INVALID_PARAMS);
1432 
1433 	timeout = __le16_to_cpu(cp->timeout);
1434 
1435 	/* Disabling discoverable requires that no timeout is set,
1436 	 * and enabling limited discoverable requires a timeout.
1437 	 */
1438 	if ((cp->val == 0x00 && timeout > 0) ||
1439 	    (cp->val == 0x02 && timeout == 0))
1440 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1441 				       MGMT_STATUS_INVALID_PARAMS);
1442 
1443 	hci_dev_lock(hdev);
1444 
1445 	if (!hdev_is_powered(hdev) && timeout > 0) {
1446 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1447 				      MGMT_STATUS_NOT_POWERED);
1448 		goto failed;
1449 	}
1450 
1451 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1452 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1453 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1454 				      MGMT_STATUS_BUSY);
1455 		goto failed;
1456 	}
1457 
1458 	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1459 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1460 				      MGMT_STATUS_REJECTED);
1461 		goto failed;
1462 	}
1463 
1464 	if (hdev->advertising_paused) {
1465 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1466 				      MGMT_STATUS_BUSY);
1467 		goto failed;
1468 	}
1469 
1470 	if (!hdev_is_powered(hdev)) {
1471 		bool changed = false;
1472 
1473 		/* Setting limited discoverable when powered off is
1474 		 * not a valid operation since it requires a timeout
1475 		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1476 		 */
1477 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1478 			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1479 			changed = true;
1480 		}
1481 
1482 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1483 		if (err < 0)
1484 			goto failed;
1485 
1486 		if (changed)
1487 			err = new_settings(hdev, sk);
1488 
1489 		goto failed;
1490 	}
1491 
1492 	/* If the current mode is the same, then just update the timeout
1493 	 * value with the new value. And if only the timeout gets updated,
1494 	 * then no need for any HCI transactions.
1495 	 */
1496 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1497 	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
1498 						   HCI_LIMITED_DISCOVERABLE)) {
1499 		cancel_delayed_work(&hdev->discov_off);
1500 		hdev->discov_timeout = timeout;
1501 
1502 		if (cp->val && hdev->discov_timeout > 0) {
1503 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1504 			queue_delayed_work(hdev->req_workqueue,
1505 					   &hdev->discov_off, to);
1506 		}
1507 
1508 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1509 		goto failed;
1510 	}
1511 
1512 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1513 	if (!cmd) {
1514 		err = -ENOMEM;
1515 		goto failed;
1516 	}
1517 
1518 	/* Cancel any potential discoverable timeout that might be
1519 	 * still active and store new timeout value. The arming of
1520 	 * the timeout happens in the complete handler.
1521 	 */
1522 	cancel_delayed_work(&hdev->discov_off);
1523 	hdev->discov_timeout = timeout;
1524 
1525 	if (cp->val)
1526 		hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1527 	else
1528 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1529 
1530 	/* Limited discoverable mode */
1531 	if (cp->val == 0x02)
1532 		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1533 	else
1534 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1535 
1536 	queue_work(hdev->req_workqueue, &hdev->discoverable_update);
1537 	err = 0;
1538 
1539 failed:
1540 	hci_dev_unlock(hdev);
1541 	return err;
1542 }
1543 
1544 void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
1545 {
1546 	struct mgmt_pending_cmd *cmd;
1547 
1548 	bt_dev_dbg(hdev, "status 0x%02x", status);
1549 
1550 	hci_dev_lock(hdev);
1551 
1552 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1553 	if (!cmd)
1554 		goto unlock;
1555 
1556 	if (status) {
1557 		u8 mgmt_err = mgmt_status(status);
1558 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1559 		goto remove_cmd;
1560 	}
1561 
1562 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1563 	new_settings(hdev, cmd->sk);
1564 
1565 remove_cmd:
1566 	mgmt_pending_remove(cmd);
1567 
1568 unlock:
1569 	hci_dev_unlock(hdev);
1570 }
1571 
1572 static int set_connectable_update_settings(struct hci_dev *hdev,
1573 					   struct sock *sk, u8 val)
1574 {
1575 	bool changed = false;
1576 	int err;
1577 
1578 	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1579 		changed = true;
1580 
1581 	if (val) {
1582 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1583 	} else {
1584 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1585 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1586 	}
1587 
1588 	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1589 	if (err < 0)
1590 		return err;
1591 
1592 	if (changed) {
1593 		hci_req_update_scan(hdev);
1594 		hci_update_passive_scan(hdev);
1595 		return new_settings(hdev, sk);
1596 	}
1597 
1598 	return 0;
1599 }
1600 
1601 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1602 			   u16 len)
1603 {
1604 	struct mgmt_mode *cp = data;
1605 	struct mgmt_pending_cmd *cmd;
1606 	int err;
1607 
1608 	bt_dev_dbg(hdev, "sock %p", sk);
1609 
1610 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1611 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1612 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1613 				       MGMT_STATUS_REJECTED);
1614 
1615 	if (cp->val != 0x00 && cp->val != 0x01)
1616 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1617 				       MGMT_STATUS_INVALID_PARAMS);
1618 
1619 	hci_dev_lock(hdev);
1620 
1621 	if (!hdev_is_powered(hdev)) {
1622 		err = set_connectable_update_settings(hdev, sk, cp->val);
1623 		goto failed;
1624 	}
1625 
1626 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1627 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1628 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1629 				      MGMT_STATUS_BUSY);
1630 		goto failed;
1631 	}
1632 
1633 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1634 	if (!cmd) {
1635 		err = -ENOMEM;
1636 		goto failed;
1637 	}
1638 
1639 	if (cp->val) {
1640 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1641 	} else {
1642 		if (hdev->discov_timeout > 0)
1643 			cancel_delayed_work(&hdev->discov_off);
1644 
1645 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1646 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1647 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1648 	}
1649 
1650 	queue_work(hdev->req_workqueue, &hdev->connectable_update);
1651 	err = 0;
1652 
1653 failed:
1654 	hci_dev_unlock(hdev);
1655 	return err;
1656 }
1657 
1658 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1659 			u16 len)
1660 {
1661 	struct mgmt_mode *cp = data;
1662 	bool changed;
1663 	int err;
1664 
1665 	bt_dev_dbg(hdev, "sock %p", sk);
1666 
1667 	if (cp->val != 0x00 && cp->val != 0x01)
1668 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1669 				       MGMT_STATUS_INVALID_PARAMS);
1670 
1671 	hci_dev_lock(hdev);
1672 
1673 	if (cp->val)
1674 		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1675 	else
1676 		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1677 
1678 	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1679 	if (err < 0)
1680 		goto unlock;
1681 
1682 	if (changed) {
1683 		/* In limited privacy mode the change of bondable mode
1684 		 * may affect the local advertising address.
1685 		 */
1686 		if (hdev_is_powered(hdev) &&
1687 		    hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1688 		    hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1689 		    hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1690 			queue_work(hdev->req_workqueue,
1691 				   &hdev->discoverable_update);
1692 
1693 		err = new_settings(hdev, sk);
1694 	}
1695 
1696 unlock:
1697 	hci_dev_unlock(hdev);
1698 	return err;
1699 }
1700 
1701 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1702 			     u16 len)
1703 {
1704 	struct mgmt_mode *cp = data;
1705 	struct mgmt_pending_cmd *cmd;
1706 	u8 val, status;
1707 	int err;
1708 
1709 	bt_dev_dbg(hdev, "sock %p", sk);
1710 
1711 	status = mgmt_bredr_support(hdev);
1712 	if (status)
1713 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1714 				       status);
1715 
1716 	if (cp->val != 0x00 && cp->val != 0x01)
1717 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1718 				       MGMT_STATUS_INVALID_PARAMS);
1719 
1720 	hci_dev_lock(hdev);
1721 
1722 	if (!hdev_is_powered(hdev)) {
1723 		bool changed = false;
1724 
1725 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1726 			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1727 			changed = true;
1728 		}
1729 
1730 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1731 		if (err < 0)
1732 			goto failed;
1733 
1734 		if (changed)
1735 			err = new_settings(hdev, sk);
1736 
1737 		goto failed;
1738 	}
1739 
1740 	if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1741 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1742 				      MGMT_STATUS_BUSY);
1743 		goto failed;
1744 	}
1745 
1746 	val = !!cp->val;
1747 
1748 	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1749 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1750 		goto failed;
1751 	}
1752 
1753 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1754 	if (!cmd) {
1755 		err = -ENOMEM;
1756 		goto failed;
1757 	}
1758 
1759 	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1760 	if (err < 0) {
1761 		mgmt_pending_remove(cmd);
1762 		goto failed;
1763 	}
1764 
1765 failed:
1766 	hci_dev_unlock(hdev);
1767 	return err;
1768 }
1769 
1770 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1771 {
1772 	struct mgmt_mode *cp = data;
1773 	struct mgmt_pending_cmd *cmd;
1774 	u8 status;
1775 	int err;
1776 
1777 	bt_dev_dbg(hdev, "sock %p", sk);
1778 
1779 	status = mgmt_bredr_support(hdev);
1780 	if (status)
1781 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1782 
1783 	if (!lmp_ssp_capable(hdev))
1784 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1785 				       MGMT_STATUS_NOT_SUPPORTED);
1786 
1787 	if (cp->val != 0x00 && cp->val != 0x01)
1788 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1789 				       MGMT_STATUS_INVALID_PARAMS);
1790 
1791 	hci_dev_lock(hdev);
1792 
1793 	if (!hdev_is_powered(hdev)) {
1794 		bool changed;
1795 
1796 		if (cp->val) {
1797 			changed = !hci_dev_test_and_set_flag(hdev,
1798 							     HCI_SSP_ENABLED);
1799 		} else {
1800 			changed = hci_dev_test_and_clear_flag(hdev,
1801 							      HCI_SSP_ENABLED);
1802 			if (!changed)
1803 				changed = hci_dev_test_and_clear_flag(hdev,
1804 								      HCI_HS_ENABLED);
1805 			else
1806 				hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1807 		}
1808 
1809 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1810 		if (err < 0)
1811 			goto failed;
1812 
1813 		if (changed)
1814 			err = new_settings(hdev, sk);
1815 
1816 		goto failed;
1817 	}
1818 
1819 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1820 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1821 				      MGMT_STATUS_BUSY);
1822 		goto failed;
1823 	}
1824 
1825 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1826 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1827 		goto failed;
1828 	}
1829 
1830 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1831 	if (!cmd) {
1832 		err = -ENOMEM;
1833 		goto failed;
1834 	}
1835 
1836 	if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
1837 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1838 			     sizeof(cp->val), &cp->val);
1839 
1840 	err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1841 	if (err < 0) {
1842 		mgmt_pending_remove(cmd);
1843 		goto failed;
1844 	}
1845 
1846 failed:
1847 	hci_dev_unlock(hdev);
1848 	return err;
1849 }
1850 
1851 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1852 {
1853 	struct mgmt_mode *cp = data;
1854 	bool changed;
1855 	u8 status;
1856 	int err;
1857 
1858 	bt_dev_dbg(hdev, "sock %p", sk);
1859 
1860 	if (!IS_ENABLED(CONFIG_BT_HS))
1861 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1862 				       MGMT_STATUS_NOT_SUPPORTED);
1863 
1864 	status = mgmt_bredr_support(hdev);
1865 	if (status)
1866 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1867 
1868 	if (!lmp_ssp_capable(hdev))
1869 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1870 				       MGMT_STATUS_NOT_SUPPORTED);
1871 
1872 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1873 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1874 				       MGMT_STATUS_REJECTED);
1875 
1876 	if (cp->val != 0x00 && cp->val != 0x01)
1877 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1878 				       MGMT_STATUS_INVALID_PARAMS);
1879 
1880 	hci_dev_lock(hdev);
1881 
1882 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1883 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1884 				      MGMT_STATUS_BUSY);
1885 		goto unlock;
1886 	}
1887 
1888 	if (cp->val) {
1889 		changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1890 	} else {
1891 		if (hdev_is_powered(hdev)) {
1892 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1893 					      MGMT_STATUS_REJECTED);
1894 			goto unlock;
1895 		}
1896 
1897 		changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1898 	}
1899 
1900 	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1901 	if (err < 0)
1902 		goto unlock;
1903 
1904 	if (changed)
1905 		err = new_settings(hdev, sk);
1906 
1907 unlock:
1908 	hci_dev_unlock(hdev);
1909 	return err;
1910 }
1911 
1912 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1913 {
1914 	struct cmd_lookup match = { NULL, hdev };
1915 
1916 	hci_dev_lock(hdev);
1917 
1918 	if (status) {
1919 		u8 mgmt_err = mgmt_status(status);
1920 
1921 		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1922 				     &mgmt_err);
1923 		goto unlock;
1924 	}
1925 
1926 	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1927 
1928 	new_settings(hdev, match.sk);
1929 
1930 	if (match.sk)
1931 		sock_put(match.sk);
1932 
1933 	/* Make sure the controller has a good default for
1934 	 * advertising data. Restrict the update to when LE
1935 	 * has actually been enabled. During power on, the
1936 	 * update in powered_update_hci will take care of it.
1937 	 */
1938 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1939 		struct hci_request req;
1940 		hci_req_init(&req, hdev);
1941 		if (ext_adv_capable(hdev)) {
1942 			int err;
1943 
1944 			err = __hci_req_setup_ext_adv_instance(&req, 0x00);
1945 			if (!err)
1946 				__hci_req_update_scan_rsp_data(&req, 0x00);
1947 		} else {
1948 			__hci_req_update_adv_data(&req, 0x00);
1949 			__hci_req_update_scan_rsp_data(&req, 0x00);
1950 		}
1951 		hci_req_run(&req, NULL);
1952 		hci_update_passive_scan(hdev);
1953 	}
1954 
1955 unlock:
1956 	hci_dev_unlock(hdev);
1957 }
1958 
1959 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1960 {
1961 	struct mgmt_mode *cp = data;
1962 	struct hci_cp_write_le_host_supported hci_cp;
1963 	struct mgmt_pending_cmd *cmd;
1964 	struct hci_request req;
1965 	int err;
1966 	u8 val, enabled;
1967 
1968 	bt_dev_dbg(hdev, "sock %p", sk);
1969 
1970 	if (!lmp_le_capable(hdev))
1971 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1972 				       MGMT_STATUS_NOT_SUPPORTED);
1973 
1974 	if (cp->val != 0x00 && cp->val != 0x01)
1975 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1976 				       MGMT_STATUS_INVALID_PARAMS);
1977 
1978 	/* Bluetooth single mode LE only controllers or dual-mode
1979 	 * controllers configured as LE only devices, do not allow
1980 	 * switching LE off. These have either LE enabled explicitly
1981 	 * or BR/EDR has been previously switched off.
1982 	 *
1983 	 * When trying to enable an already enabled LE, then gracefully
1984 	 * send a positive response. Trying to disable it however will
1985 	 * result into rejection.
1986 	 */
1987 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1988 		if (cp->val == 0x01)
1989 			return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1990 
1991 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1992 				       MGMT_STATUS_REJECTED);
1993 	}
1994 
1995 	hci_dev_lock(hdev);
1996 
1997 	val = !!cp->val;
1998 	enabled = lmp_host_le_capable(hdev);
1999 
2000 	if (!val)
2001 		hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
2002 
2003 	if (!hdev_is_powered(hdev) || val == enabled) {
2004 		bool changed = false;
2005 
2006 		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2007 			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2008 			changed = true;
2009 		}
2010 
2011 		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2012 			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2013 			changed = true;
2014 		}
2015 
2016 		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2017 		if (err < 0)
2018 			goto unlock;
2019 
2020 		if (changed)
2021 			err = new_settings(hdev, sk);
2022 
2023 		goto unlock;
2024 	}
2025 
2026 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
2027 	    pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2028 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2029 				      MGMT_STATUS_BUSY);
2030 		goto unlock;
2031 	}
2032 
2033 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2034 	if (!cmd) {
2035 		err = -ENOMEM;
2036 		goto unlock;
2037 	}
2038 
2039 	hci_req_init(&req, hdev);
2040 
2041 	memset(&hci_cp, 0, sizeof(hci_cp));
2042 
2043 	if (val) {
2044 		hci_cp.le = val;
2045 		hci_cp.simul = 0x00;
2046 	} else {
2047 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2048 			__hci_req_disable_advertising(&req);
2049 
2050 		if (ext_adv_capable(hdev))
2051 			__hci_req_clear_ext_adv_sets(&req);
2052 	}
2053 
2054 	hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2055 		    &hci_cp);
2056 
2057 	err = hci_req_run(&req, le_enable_complete);
2058 	if (err < 0)
2059 		mgmt_pending_remove(cmd);
2060 
2061 unlock:
2062 	hci_dev_unlock(hdev);
2063 	return err;
2064 }
2065 
2066 /* This is a helper function to test for pending mgmt commands that can
2067  * cause CoD or EIR HCI commands. We can only allow one such pending
2068  * mgmt command at a time since otherwise we cannot easily track what
2069  * the current values are, will be, and based on that calculate if a new
2070  * HCI command needs to be sent and if yes with what value.
2071  */
2072 static bool pending_eir_or_class(struct hci_dev *hdev)
2073 {
2074 	struct mgmt_pending_cmd *cmd;
2075 
2076 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2077 		switch (cmd->opcode) {
2078 		case MGMT_OP_ADD_UUID:
2079 		case MGMT_OP_REMOVE_UUID:
2080 		case MGMT_OP_SET_DEV_CLASS:
2081 		case MGMT_OP_SET_POWERED:
2082 			return true;
2083 		}
2084 	}
2085 
2086 	return false;
2087 }
2088 
2089 static const u8 bluetooth_base_uuid[] = {
2090 			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2091 			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2092 };
2093 
2094 static u8 get_uuid_size(const u8 *uuid)
2095 {
2096 	u32 val;
2097 
2098 	if (memcmp(uuid, bluetooth_base_uuid, 12))
2099 		return 128;
2100 
2101 	val = get_unaligned_le32(&uuid[12]);
2102 	if (val > 0xffff)
2103 		return 32;
2104 
2105 	return 16;
2106 }
2107 
2108 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2109 {
2110 	struct mgmt_pending_cmd *cmd = data;
2111 
2112 	bt_dev_dbg(hdev, "err %d", err);
2113 
2114 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2115 			  mgmt_status(err), hdev->dev_class, 3);
2116 
2117 	mgmt_pending_free(cmd);
2118 }
2119 
2120 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2121 {
2122 	int err;
2123 
2124 	err = hci_update_class_sync(hdev);
2125 	if (err)
2126 		return err;
2127 
2128 	return hci_update_eir_sync(hdev);
2129 }
2130 
2131 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2132 {
2133 	struct mgmt_cp_add_uuid *cp = data;
2134 	struct mgmt_pending_cmd *cmd;
2135 	struct bt_uuid *uuid;
2136 	int err;
2137 
2138 	bt_dev_dbg(hdev, "sock %p", sk);
2139 
2140 	hci_dev_lock(hdev);
2141 
2142 	if (pending_eir_or_class(hdev)) {
2143 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2144 				      MGMT_STATUS_BUSY);
2145 		goto failed;
2146 	}
2147 
2148 	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2149 	if (!uuid) {
2150 		err = -ENOMEM;
2151 		goto failed;
2152 	}
2153 
2154 	memcpy(uuid->uuid, cp->uuid, 16);
2155 	uuid->svc_hint = cp->svc_hint;
2156 	uuid->size = get_uuid_size(cp->uuid);
2157 
2158 	list_add_tail(&uuid->list, &hdev->uuids);
2159 
2160 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2161 	if (!cmd) {
2162 		err = -ENOMEM;
2163 		goto failed;
2164 	}
2165 
2166 	err = hci_cmd_sync_queue(hdev, add_uuid_sync, cmd, mgmt_class_complete);
2167 	if (err < 0) {
2168 		mgmt_pending_free(cmd);
2169 		goto failed;
2170 	}
2171 
2172 failed:
2173 	hci_dev_unlock(hdev);
2174 	return err;
2175 }
2176 
2177 static bool enable_service_cache(struct hci_dev *hdev)
2178 {
2179 	if (!hdev_is_powered(hdev))
2180 		return false;
2181 
2182 	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2183 		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2184 				   CACHE_TIMEOUT);
2185 		return true;
2186 	}
2187 
2188 	return false;
2189 }
2190 
2191 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2192 {
2193 	int err;
2194 
2195 	err = hci_update_class_sync(hdev);
2196 	if (err)
2197 		return err;
2198 
2199 	return hci_update_eir_sync(hdev);
2200 }
2201 
2202 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2203 		       u16 len)
2204 {
2205 	struct mgmt_cp_remove_uuid *cp = data;
2206 	struct mgmt_pending_cmd *cmd;
2207 	struct bt_uuid *match, *tmp;
2208 	u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2209 	int err, found;
2210 
2211 	bt_dev_dbg(hdev, "sock %p", sk);
2212 
2213 	hci_dev_lock(hdev);
2214 
2215 	if (pending_eir_or_class(hdev)) {
2216 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2217 				      MGMT_STATUS_BUSY);
2218 		goto unlock;
2219 	}
2220 
2221 	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2222 		hci_uuids_clear(hdev);
2223 
2224 		if (enable_service_cache(hdev)) {
2225 			err = mgmt_cmd_complete(sk, hdev->id,
2226 						MGMT_OP_REMOVE_UUID,
2227 						0, hdev->dev_class, 3);
2228 			goto unlock;
2229 		}
2230 
2231 		goto update_class;
2232 	}
2233 
2234 	found = 0;
2235 
2236 	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2237 		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2238 			continue;
2239 
2240 		list_del(&match->list);
2241 		kfree(match);
2242 		found++;
2243 	}
2244 
2245 	if (found == 0) {
2246 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2247 				      MGMT_STATUS_INVALID_PARAMS);
2248 		goto unlock;
2249 	}
2250 
2251 update_class:
2252 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2253 	if (!cmd) {
2254 		err = -ENOMEM;
2255 		goto unlock;
2256 	}
2257 
2258 	err = hci_cmd_sync_queue(hdev, remove_uuid_sync, cmd,
2259 				 mgmt_class_complete);
2260 	if (err < 0)
2261 		mgmt_pending_free(cmd);
2262 
2263 unlock:
2264 	hci_dev_unlock(hdev);
2265 	return err;
2266 }
2267 
2268 static int set_class_sync(struct hci_dev *hdev, void *data)
2269 {
2270 	int err = 0;
2271 
2272 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2273 		cancel_delayed_work_sync(&hdev->service_cache);
2274 		err = hci_update_eir_sync(hdev);
2275 	}
2276 
2277 	if (err)
2278 		return err;
2279 
2280 	return hci_update_class_sync(hdev);
2281 }
2282 
2283 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2284 			 u16 len)
2285 {
2286 	struct mgmt_cp_set_dev_class *cp = data;
2287 	struct mgmt_pending_cmd *cmd;
2288 	int err;
2289 
2290 	bt_dev_dbg(hdev, "sock %p", sk);
2291 
2292 	if (!lmp_bredr_capable(hdev))
2293 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2294 				       MGMT_STATUS_NOT_SUPPORTED);
2295 
2296 	hci_dev_lock(hdev);
2297 
2298 	if (pending_eir_or_class(hdev)) {
2299 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2300 				      MGMT_STATUS_BUSY);
2301 		goto unlock;
2302 	}
2303 
2304 	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2305 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2306 				      MGMT_STATUS_INVALID_PARAMS);
2307 		goto unlock;
2308 	}
2309 
2310 	hdev->major_class = cp->major;
2311 	hdev->minor_class = cp->minor;
2312 
2313 	if (!hdev_is_powered(hdev)) {
2314 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2315 					hdev->dev_class, 3);
2316 		goto unlock;
2317 	}
2318 
2319 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2320 	if (!cmd) {
2321 		err = -ENOMEM;
2322 		goto unlock;
2323 	}
2324 
2325 	err = hci_cmd_sync_queue(hdev, set_class_sync, cmd,
2326 				 mgmt_class_complete);
2327 	if (err < 0)
2328 		mgmt_pending_free(cmd);
2329 
2330 unlock:
2331 	hci_dev_unlock(hdev);
2332 	return err;
2333 }
2334 
2335 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2336 			  u16 len)
2337 {
2338 	struct mgmt_cp_load_link_keys *cp = data;
2339 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2340 				   sizeof(struct mgmt_link_key_info));
2341 	u16 key_count, expected_len;
2342 	bool changed;
2343 	int i;
2344 
2345 	bt_dev_dbg(hdev, "sock %p", sk);
2346 
2347 	if (!lmp_bredr_capable(hdev))
2348 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2349 				       MGMT_STATUS_NOT_SUPPORTED);
2350 
2351 	key_count = __le16_to_cpu(cp->key_count);
2352 	if (key_count > max_key_count) {
2353 		bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2354 			   key_count);
2355 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2356 				       MGMT_STATUS_INVALID_PARAMS);
2357 	}
2358 
2359 	expected_len = struct_size(cp, keys, key_count);
2360 	if (expected_len != len) {
2361 		bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2362 			   expected_len, len);
2363 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2364 				       MGMT_STATUS_INVALID_PARAMS);
2365 	}
2366 
2367 	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2368 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2369 				       MGMT_STATUS_INVALID_PARAMS);
2370 
2371 	bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2372 		   key_count);
2373 
2374 	for (i = 0; i < key_count; i++) {
2375 		struct mgmt_link_key_info *key = &cp->keys[i];
2376 
2377 		if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2378 			return mgmt_cmd_status(sk, hdev->id,
2379 					       MGMT_OP_LOAD_LINK_KEYS,
2380 					       MGMT_STATUS_INVALID_PARAMS);
2381 	}
2382 
2383 	hci_dev_lock(hdev);
2384 
2385 	hci_link_keys_clear(hdev);
2386 
2387 	if (cp->debug_keys)
2388 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2389 	else
2390 		changed = hci_dev_test_and_clear_flag(hdev,
2391 						      HCI_KEEP_DEBUG_KEYS);
2392 
2393 	if (changed)
2394 		new_settings(hdev, NULL);
2395 
2396 	for (i = 0; i < key_count; i++) {
2397 		struct mgmt_link_key_info *key = &cp->keys[i];
2398 
2399 		if (hci_is_blocked_key(hdev,
2400 				       HCI_BLOCKED_KEY_TYPE_LINKKEY,
2401 				       key->val)) {
2402 			bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2403 				    &key->addr.bdaddr);
2404 			continue;
2405 		}
2406 
2407 		/* Always ignore debug keys and require a new pairing if
2408 		 * the user wants to use them.
2409 		 */
2410 		if (key->type == HCI_LK_DEBUG_COMBINATION)
2411 			continue;
2412 
2413 		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2414 				 key->type, key->pin_len, NULL);
2415 	}
2416 
2417 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2418 
2419 	hci_dev_unlock(hdev);
2420 
2421 	return 0;
2422 }
2423 
2424 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2425 			   u8 addr_type, struct sock *skip_sk)
2426 {
2427 	struct mgmt_ev_device_unpaired ev;
2428 
2429 	bacpy(&ev.addr.bdaddr, bdaddr);
2430 	ev.addr.type = addr_type;
2431 
2432 	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2433 			  skip_sk);
2434 }
2435 
2436 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2437 			 u16 len)
2438 {
2439 	struct mgmt_cp_unpair_device *cp = data;
2440 	struct mgmt_rp_unpair_device rp;
2441 	struct hci_conn_params *params;
2442 	struct mgmt_pending_cmd *cmd;
2443 	struct hci_conn *conn;
2444 	u8 addr_type;
2445 	int err;
2446 
2447 	memset(&rp, 0, sizeof(rp));
2448 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2449 	rp.addr.type = cp->addr.type;
2450 
2451 	if (!bdaddr_type_is_valid(cp->addr.type))
2452 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2453 					 MGMT_STATUS_INVALID_PARAMS,
2454 					 &rp, sizeof(rp));
2455 
2456 	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2457 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2458 					 MGMT_STATUS_INVALID_PARAMS,
2459 					 &rp, sizeof(rp));
2460 
2461 	hci_dev_lock(hdev);
2462 
2463 	if (!hdev_is_powered(hdev)) {
2464 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2465 					MGMT_STATUS_NOT_POWERED, &rp,
2466 					sizeof(rp));
2467 		goto unlock;
2468 	}
2469 
2470 	if (cp->addr.type == BDADDR_BREDR) {
2471 		/* If disconnection is requested, then look up the
2472 		 * connection. If the remote device is connected, it
2473 		 * will be later used to terminate the link.
2474 		 *
2475 		 * Setting it to NULL explicitly will cause no
2476 		 * termination of the link.
2477 		 */
2478 		if (cp->disconnect)
2479 			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2480 						       &cp->addr.bdaddr);
2481 		else
2482 			conn = NULL;
2483 
2484 		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2485 		if (err < 0) {
2486 			err = mgmt_cmd_complete(sk, hdev->id,
2487 						MGMT_OP_UNPAIR_DEVICE,
2488 						MGMT_STATUS_NOT_PAIRED, &rp,
2489 						sizeof(rp));
2490 			goto unlock;
2491 		}
2492 
2493 		goto done;
2494 	}
2495 
2496 	/* LE address type */
2497 	addr_type = le_addr_type(cp->addr.type);
2498 
2499 	/* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2500 	err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2501 	if (err < 0) {
2502 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2503 					MGMT_STATUS_NOT_PAIRED, &rp,
2504 					sizeof(rp));
2505 		goto unlock;
2506 	}
2507 
2508 	conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2509 	if (!conn) {
2510 		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2511 		goto done;
2512 	}
2513 
2514 
2515 	/* Defer clearing up the connection parameters until closing to
2516 	 * give a chance of keeping them if a repairing happens.
2517 	 */
2518 	set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2519 
2520 	/* Disable auto-connection parameters if present */
2521 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2522 	if (params) {
2523 		if (params->explicit_connect)
2524 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2525 		else
2526 			params->auto_connect = HCI_AUTO_CONN_DISABLED;
2527 	}
2528 
2529 	/* If disconnection is not requested, then clear the connection
2530 	 * variable so that the link is not terminated.
2531 	 */
2532 	if (!cp->disconnect)
2533 		conn = NULL;
2534 
2535 done:
2536 	/* If the connection variable is set, then termination of the
2537 	 * link is requested.
2538 	 */
2539 	if (!conn) {
2540 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2541 					&rp, sizeof(rp));
2542 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2543 		goto unlock;
2544 	}
2545 
2546 	cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2547 			       sizeof(*cp));
2548 	if (!cmd) {
2549 		err = -ENOMEM;
2550 		goto unlock;
2551 	}
2552 
2553 	cmd->cmd_complete = addr_cmd_complete;
2554 
2555 	err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2556 	if (err < 0)
2557 		mgmt_pending_remove(cmd);
2558 
2559 unlock:
2560 	hci_dev_unlock(hdev);
2561 	return err;
2562 }
2563 
2564 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2565 		      u16 len)
2566 {
2567 	struct mgmt_cp_disconnect *cp = data;
2568 	struct mgmt_rp_disconnect rp;
2569 	struct mgmt_pending_cmd *cmd;
2570 	struct hci_conn *conn;
2571 	int err;
2572 
2573 	bt_dev_dbg(hdev, "sock %p", sk);
2574 
2575 	memset(&rp, 0, sizeof(rp));
2576 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2577 	rp.addr.type = cp->addr.type;
2578 
2579 	if (!bdaddr_type_is_valid(cp->addr.type))
2580 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2581 					 MGMT_STATUS_INVALID_PARAMS,
2582 					 &rp, sizeof(rp));
2583 
2584 	hci_dev_lock(hdev);
2585 
2586 	if (!test_bit(HCI_UP, &hdev->flags)) {
2587 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2588 					MGMT_STATUS_NOT_POWERED, &rp,
2589 					sizeof(rp));
2590 		goto failed;
2591 	}
2592 
2593 	if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2594 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2595 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
2596 		goto failed;
2597 	}
2598 
2599 	if (cp->addr.type == BDADDR_BREDR)
2600 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2601 					       &cp->addr.bdaddr);
2602 	else
2603 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2604 					       le_addr_type(cp->addr.type));
2605 
2606 	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2607 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2608 					MGMT_STATUS_NOT_CONNECTED, &rp,
2609 					sizeof(rp));
2610 		goto failed;
2611 	}
2612 
2613 	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2614 	if (!cmd) {
2615 		err = -ENOMEM;
2616 		goto failed;
2617 	}
2618 
2619 	cmd->cmd_complete = generic_cmd_complete;
2620 
2621 	err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2622 	if (err < 0)
2623 		mgmt_pending_remove(cmd);
2624 
2625 failed:
2626 	hci_dev_unlock(hdev);
2627 	return err;
2628 }
2629 
2630 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2631 {
2632 	switch (link_type) {
2633 	case LE_LINK:
2634 		switch (addr_type) {
2635 		case ADDR_LE_DEV_PUBLIC:
2636 			return BDADDR_LE_PUBLIC;
2637 
2638 		default:
2639 			/* Fallback to LE Random address type */
2640 			return BDADDR_LE_RANDOM;
2641 		}
2642 
2643 	default:
2644 		/* Fallback to BR/EDR type */
2645 		return BDADDR_BREDR;
2646 	}
2647 }
2648 
2649 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2650 			   u16 data_len)
2651 {
2652 	struct mgmt_rp_get_connections *rp;
2653 	struct hci_conn *c;
2654 	int err;
2655 	u16 i;
2656 
2657 	bt_dev_dbg(hdev, "sock %p", sk);
2658 
2659 	hci_dev_lock(hdev);
2660 
2661 	if (!hdev_is_powered(hdev)) {
2662 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2663 				      MGMT_STATUS_NOT_POWERED);
2664 		goto unlock;
2665 	}
2666 
2667 	i = 0;
2668 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2669 		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2670 			i++;
2671 	}
2672 
2673 	rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2674 	if (!rp) {
2675 		err = -ENOMEM;
2676 		goto unlock;
2677 	}
2678 
2679 	i = 0;
2680 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2681 		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2682 			continue;
2683 		bacpy(&rp->addr[i].bdaddr, &c->dst);
2684 		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2685 		if (c->type == SCO_LINK || c->type == ESCO_LINK)
2686 			continue;
2687 		i++;
2688 	}
2689 
2690 	rp->conn_count = cpu_to_le16(i);
2691 
2692 	/* Recalculate length in case of filtered SCO connections, etc */
2693 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2694 				struct_size(rp, addr, i));
2695 
2696 	kfree(rp);
2697 
2698 unlock:
2699 	hci_dev_unlock(hdev);
2700 	return err;
2701 }
2702 
2703 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2704 				   struct mgmt_cp_pin_code_neg_reply *cp)
2705 {
2706 	struct mgmt_pending_cmd *cmd;
2707 	int err;
2708 
2709 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2710 			       sizeof(*cp));
2711 	if (!cmd)
2712 		return -ENOMEM;
2713 
2714 	cmd->cmd_complete = addr_cmd_complete;
2715 
2716 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2717 			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2718 	if (err < 0)
2719 		mgmt_pending_remove(cmd);
2720 
2721 	return err;
2722 }
2723 
2724 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2725 			  u16 len)
2726 {
2727 	struct hci_conn *conn;
2728 	struct mgmt_cp_pin_code_reply *cp = data;
2729 	struct hci_cp_pin_code_reply reply;
2730 	struct mgmt_pending_cmd *cmd;
2731 	int err;
2732 
2733 	bt_dev_dbg(hdev, "sock %p", sk);
2734 
2735 	hci_dev_lock(hdev);
2736 
2737 	if (!hdev_is_powered(hdev)) {
2738 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2739 				      MGMT_STATUS_NOT_POWERED);
2740 		goto failed;
2741 	}
2742 
2743 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2744 	if (!conn) {
2745 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2746 				      MGMT_STATUS_NOT_CONNECTED);
2747 		goto failed;
2748 	}
2749 
2750 	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2751 		struct mgmt_cp_pin_code_neg_reply ncp;
2752 
2753 		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2754 
2755 		bt_dev_err(hdev, "PIN code is not 16 bytes long");
2756 
2757 		err = send_pin_code_neg_reply(sk, hdev, &ncp);
2758 		if (err >= 0)
2759 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2760 					      MGMT_STATUS_INVALID_PARAMS);
2761 
2762 		goto failed;
2763 	}
2764 
2765 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2766 	if (!cmd) {
2767 		err = -ENOMEM;
2768 		goto failed;
2769 	}
2770 
2771 	cmd->cmd_complete = addr_cmd_complete;
2772 
2773 	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2774 	reply.pin_len = cp->pin_len;
2775 	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2776 
2777 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2778 	if (err < 0)
2779 		mgmt_pending_remove(cmd);
2780 
2781 failed:
2782 	hci_dev_unlock(hdev);
2783 	return err;
2784 }
2785 
2786 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2787 			     u16 len)
2788 {
2789 	struct mgmt_cp_set_io_capability *cp = data;
2790 
2791 	bt_dev_dbg(hdev, "sock %p", sk);
2792 
2793 	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2794 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2795 				       MGMT_STATUS_INVALID_PARAMS);
2796 
2797 	hci_dev_lock(hdev);
2798 
2799 	hdev->io_capability = cp->io_capability;
2800 
2801 	bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
2802 
2803 	hci_dev_unlock(hdev);
2804 
2805 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2806 				 NULL, 0);
2807 }
2808 
2809 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2810 {
2811 	struct hci_dev *hdev = conn->hdev;
2812 	struct mgmt_pending_cmd *cmd;
2813 
2814 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2815 		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2816 			continue;
2817 
2818 		if (cmd->user_data != conn)
2819 			continue;
2820 
2821 		return cmd;
2822 	}
2823 
2824 	return NULL;
2825 }
2826 
2827 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2828 {
2829 	struct mgmt_rp_pair_device rp;
2830 	struct hci_conn *conn = cmd->user_data;
2831 	int err;
2832 
2833 	bacpy(&rp.addr.bdaddr, &conn->dst);
2834 	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2835 
2836 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2837 				status, &rp, sizeof(rp));
2838 
2839 	/* So we don't get further callbacks for this connection */
2840 	conn->connect_cfm_cb = NULL;
2841 	conn->security_cfm_cb = NULL;
2842 	conn->disconn_cfm_cb = NULL;
2843 
2844 	hci_conn_drop(conn);
2845 
2846 	/* The device is paired so there is no need to remove
2847 	 * its connection parameters anymore.
2848 	 */
2849 	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2850 
2851 	hci_conn_put(conn);
2852 
2853 	return err;
2854 }
2855 
2856 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2857 {
2858 	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2859 	struct mgmt_pending_cmd *cmd;
2860 
2861 	cmd = find_pairing(conn);
2862 	if (cmd) {
2863 		cmd->cmd_complete(cmd, status);
2864 		mgmt_pending_remove(cmd);
2865 	}
2866 }
2867 
2868 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2869 {
2870 	struct mgmt_pending_cmd *cmd;
2871 
2872 	BT_DBG("status %u", status);
2873 
2874 	cmd = find_pairing(conn);
2875 	if (!cmd) {
2876 		BT_DBG("Unable to find a pending command");
2877 		return;
2878 	}
2879 
2880 	cmd->cmd_complete(cmd, mgmt_status(status));
2881 	mgmt_pending_remove(cmd);
2882 }
2883 
2884 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2885 {
2886 	struct mgmt_pending_cmd *cmd;
2887 
2888 	BT_DBG("status %u", status);
2889 
2890 	if (!status)
2891 		return;
2892 
2893 	cmd = find_pairing(conn);
2894 	if (!cmd) {
2895 		BT_DBG("Unable to find a pending command");
2896 		return;
2897 	}
2898 
2899 	cmd->cmd_complete(cmd, mgmt_status(status));
2900 	mgmt_pending_remove(cmd);
2901 }
2902 
2903 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2904 		       u16 len)
2905 {
2906 	struct mgmt_cp_pair_device *cp = data;
2907 	struct mgmt_rp_pair_device rp;
2908 	struct mgmt_pending_cmd *cmd;
2909 	u8 sec_level, auth_type;
2910 	struct hci_conn *conn;
2911 	int err;
2912 
2913 	bt_dev_dbg(hdev, "sock %p", sk);
2914 
2915 	memset(&rp, 0, sizeof(rp));
2916 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2917 	rp.addr.type = cp->addr.type;
2918 
2919 	if (!bdaddr_type_is_valid(cp->addr.type))
2920 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2921 					 MGMT_STATUS_INVALID_PARAMS,
2922 					 &rp, sizeof(rp));
2923 
2924 	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2925 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2926 					 MGMT_STATUS_INVALID_PARAMS,
2927 					 &rp, sizeof(rp));
2928 
2929 	hci_dev_lock(hdev);
2930 
2931 	if (!hdev_is_powered(hdev)) {
2932 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2933 					MGMT_STATUS_NOT_POWERED, &rp,
2934 					sizeof(rp));
2935 		goto unlock;
2936 	}
2937 
2938 	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
2939 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2940 					MGMT_STATUS_ALREADY_PAIRED, &rp,
2941 					sizeof(rp));
2942 		goto unlock;
2943 	}
2944 
2945 	sec_level = BT_SECURITY_MEDIUM;
2946 	auth_type = HCI_AT_DEDICATED_BONDING;
2947 
2948 	if (cp->addr.type == BDADDR_BREDR) {
2949 		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2950 				       auth_type, CONN_REASON_PAIR_DEVICE);
2951 	} else {
2952 		u8 addr_type = le_addr_type(cp->addr.type);
2953 		struct hci_conn_params *p;
2954 
2955 		/* When pairing a new device, it is expected to remember
2956 		 * this device for future connections. Adding the connection
2957 		 * parameter information ahead of time allows tracking
2958 		 * of the peripheral preferred values and will speed up any
2959 		 * further connection establishment.
2960 		 *
2961 		 * If connection parameters already exist, then they
2962 		 * will be kept and this function does nothing.
2963 		 */
2964 		p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2965 
2966 		if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
2967 			p->auto_connect = HCI_AUTO_CONN_DISABLED;
2968 
2969 		conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
2970 					   sec_level, HCI_LE_CONN_TIMEOUT,
2971 					   CONN_REASON_PAIR_DEVICE);
2972 	}
2973 
2974 	if (IS_ERR(conn)) {
2975 		int status;
2976 
2977 		if (PTR_ERR(conn) == -EBUSY)
2978 			status = MGMT_STATUS_BUSY;
2979 		else if (PTR_ERR(conn) == -EOPNOTSUPP)
2980 			status = MGMT_STATUS_NOT_SUPPORTED;
2981 		else if (PTR_ERR(conn) == -ECONNREFUSED)
2982 			status = MGMT_STATUS_REJECTED;
2983 		else
2984 			status = MGMT_STATUS_CONNECT_FAILED;
2985 
2986 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2987 					status, &rp, sizeof(rp));
2988 		goto unlock;
2989 	}
2990 
2991 	if (conn->connect_cfm_cb) {
2992 		hci_conn_drop(conn);
2993 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2994 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
2995 		goto unlock;
2996 	}
2997 
2998 	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2999 	if (!cmd) {
3000 		err = -ENOMEM;
3001 		hci_conn_drop(conn);
3002 		goto unlock;
3003 	}
3004 
3005 	cmd->cmd_complete = pairing_complete;
3006 
3007 	/* For LE, just connecting isn't a proof that the pairing finished */
3008 	if (cp->addr.type == BDADDR_BREDR) {
3009 		conn->connect_cfm_cb = pairing_complete_cb;
3010 		conn->security_cfm_cb = pairing_complete_cb;
3011 		conn->disconn_cfm_cb = pairing_complete_cb;
3012 	} else {
3013 		conn->connect_cfm_cb = le_pairing_complete_cb;
3014 		conn->security_cfm_cb = le_pairing_complete_cb;
3015 		conn->disconn_cfm_cb = le_pairing_complete_cb;
3016 	}
3017 
3018 	conn->io_capability = cp->io_cap;
3019 	cmd->user_data = hci_conn_get(conn);
3020 
3021 	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3022 	    hci_conn_security(conn, sec_level, auth_type, true)) {
3023 		cmd->cmd_complete(cmd, 0);
3024 		mgmt_pending_remove(cmd);
3025 	}
3026 
3027 	err = 0;
3028 
3029 unlock:
3030 	hci_dev_unlock(hdev);
3031 	return err;
3032 }
3033 
3034 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3035 			      u16 len)
3036 {
3037 	struct mgmt_addr_info *addr = data;
3038 	struct mgmt_pending_cmd *cmd;
3039 	struct hci_conn *conn;
3040 	int err;
3041 
3042 	bt_dev_dbg(hdev, "sock %p", sk);
3043 
3044 	hci_dev_lock(hdev);
3045 
3046 	if (!hdev_is_powered(hdev)) {
3047 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3048 				      MGMT_STATUS_NOT_POWERED);
3049 		goto unlock;
3050 	}
3051 
3052 	cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3053 	if (!cmd) {
3054 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3055 				      MGMT_STATUS_INVALID_PARAMS);
3056 		goto unlock;
3057 	}
3058 
3059 	conn = cmd->user_data;
3060 
3061 	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3062 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3063 				      MGMT_STATUS_INVALID_PARAMS);
3064 		goto unlock;
3065 	}
3066 
3067 	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3068 	mgmt_pending_remove(cmd);
3069 
3070 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3071 				addr, sizeof(*addr));
3072 
3073 	/* Since user doesn't want to proceed with the connection, abort any
3074 	 * ongoing pairing and then terminate the link if it was created
3075 	 * because of the pair device action.
3076 	 */
3077 	if (addr->type == BDADDR_BREDR)
3078 		hci_remove_link_key(hdev, &addr->bdaddr);
3079 	else
3080 		smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3081 					      le_addr_type(addr->type));
3082 
3083 	if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3084 		hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3085 
3086 unlock:
3087 	hci_dev_unlock(hdev);
3088 	return err;
3089 }
3090 
3091 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3092 			     struct mgmt_addr_info *addr, u16 mgmt_op,
3093 			     u16 hci_op, __le32 passkey)
3094 {
3095 	struct mgmt_pending_cmd *cmd;
3096 	struct hci_conn *conn;
3097 	int err;
3098 
3099 	hci_dev_lock(hdev);
3100 
3101 	if (!hdev_is_powered(hdev)) {
3102 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3103 					MGMT_STATUS_NOT_POWERED, addr,
3104 					sizeof(*addr));
3105 		goto done;
3106 	}
3107 
3108 	if (addr->type == BDADDR_BREDR)
3109 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3110 	else
3111 		conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3112 					       le_addr_type(addr->type));
3113 
3114 	if (!conn) {
3115 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3116 					MGMT_STATUS_NOT_CONNECTED, addr,
3117 					sizeof(*addr));
3118 		goto done;
3119 	}
3120 
3121 	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3122 		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3123 		if (!err)
3124 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3125 						MGMT_STATUS_SUCCESS, addr,
3126 						sizeof(*addr));
3127 		else
3128 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3129 						MGMT_STATUS_FAILED, addr,
3130 						sizeof(*addr));
3131 
3132 		goto done;
3133 	}
3134 
3135 	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3136 	if (!cmd) {
3137 		err = -ENOMEM;
3138 		goto done;
3139 	}
3140 
3141 	cmd->cmd_complete = addr_cmd_complete;
3142 
3143 	/* Continue with pairing via HCI */
3144 	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3145 		struct hci_cp_user_passkey_reply cp;
3146 
3147 		bacpy(&cp.bdaddr, &addr->bdaddr);
3148 		cp.passkey = passkey;
3149 		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3150 	} else
3151 		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3152 				   &addr->bdaddr);
3153 
3154 	if (err < 0)
3155 		mgmt_pending_remove(cmd);
3156 
3157 done:
3158 	hci_dev_unlock(hdev);
3159 	return err;
3160 }
3161 
3162 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3163 			      void *data, u16 len)
3164 {
3165 	struct mgmt_cp_pin_code_neg_reply *cp = data;
3166 
3167 	bt_dev_dbg(hdev, "sock %p", sk);
3168 
3169 	return user_pairing_resp(sk, hdev, &cp->addr,
3170 				MGMT_OP_PIN_CODE_NEG_REPLY,
3171 				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3172 }
3173 
3174 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3175 			      u16 len)
3176 {
3177 	struct mgmt_cp_user_confirm_reply *cp = data;
3178 
3179 	bt_dev_dbg(hdev, "sock %p", sk);
3180 
3181 	if (len != sizeof(*cp))
3182 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3183 				       MGMT_STATUS_INVALID_PARAMS);
3184 
3185 	return user_pairing_resp(sk, hdev, &cp->addr,
3186 				 MGMT_OP_USER_CONFIRM_REPLY,
3187 				 HCI_OP_USER_CONFIRM_REPLY, 0);
3188 }
3189 
3190 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3191 				  void *data, u16 len)
3192 {
3193 	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3194 
3195 	bt_dev_dbg(hdev, "sock %p", sk);
3196 
3197 	return user_pairing_resp(sk, hdev, &cp->addr,
3198 				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3199 				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3200 }
3201 
3202 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3203 			      u16 len)
3204 {
3205 	struct mgmt_cp_user_passkey_reply *cp = data;
3206 
3207 	bt_dev_dbg(hdev, "sock %p", sk);
3208 
3209 	return user_pairing_resp(sk, hdev, &cp->addr,
3210 				 MGMT_OP_USER_PASSKEY_REPLY,
3211 				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3212 }
3213 
3214 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3215 				  void *data, u16 len)
3216 {
3217 	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3218 
3219 	bt_dev_dbg(hdev, "sock %p", sk);
3220 
3221 	return user_pairing_resp(sk, hdev, &cp->addr,
3222 				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3223 				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3224 }
3225 
3226 static void adv_expire(struct hci_dev *hdev, u32 flags)
3227 {
3228 	struct adv_info *adv_instance;
3229 	struct hci_request req;
3230 	int err;
3231 
3232 	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3233 	if (!adv_instance)
3234 		return;
3235 
3236 	/* stop if current instance doesn't need to be changed */
3237 	if (!(adv_instance->flags & flags))
3238 		return;
3239 
3240 	cancel_adv_timeout(hdev);
3241 
3242 	adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3243 	if (!adv_instance)
3244 		return;
3245 
3246 	hci_req_init(&req, hdev);
3247 	err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
3248 					      true);
3249 	if (err)
3250 		return;
3251 
3252 	hci_req_run(&req, NULL);
3253 }
3254 
3255 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3256 {
3257 	struct mgmt_cp_set_local_name *cp;
3258 	struct mgmt_pending_cmd *cmd;
3259 
3260 	bt_dev_dbg(hdev, "status 0x%02x", status);
3261 
3262 	hci_dev_lock(hdev);
3263 
3264 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3265 	if (!cmd)
3266 		goto unlock;
3267 
3268 	cp = cmd->param;
3269 
3270 	if (status) {
3271 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3272 			        mgmt_status(status));
3273 	} else {
3274 		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3275 				  cp, sizeof(*cp));
3276 
3277 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3278 			adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3279 	}
3280 
3281 	mgmt_pending_remove(cmd);
3282 
3283 unlock:
3284 	hci_dev_unlock(hdev);
3285 }
3286 
3287 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3288 			  u16 len)
3289 {
3290 	struct mgmt_cp_set_local_name *cp = data;
3291 	struct mgmt_pending_cmd *cmd;
3292 	struct hci_request req;
3293 	int err;
3294 
3295 	bt_dev_dbg(hdev, "sock %p", sk);
3296 
3297 	hci_dev_lock(hdev);
3298 
3299 	/* If the old values are the same as the new ones just return a
3300 	 * direct command complete event.
3301 	 */
3302 	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3303 	    !memcmp(hdev->short_name, cp->short_name,
3304 		    sizeof(hdev->short_name))) {
3305 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3306 					data, len);
3307 		goto failed;
3308 	}
3309 
3310 	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3311 
3312 	if (!hdev_is_powered(hdev)) {
3313 		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3314 
3315 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3316 					data, len);
3317 		if (err < 0)
3318 			goto failed;
3319 
3320 		err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3321 					 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3322 		ext_info_changed(hdev, sk);
3323 
3324 		goto failed;
3325 	}
3326 
3327 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3328 	if (!cmd) {
3329 		err = -ENOMEM;
3330 		goto failed;
3331 	}
3332 
3333 	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3334 
3335 	hci_req_init(&req, hdev);
3336 
3337 	if (lmp_bredr_capable(hdev)) {
3338 		__hci_req_update_name(&req);
3339 		__hci_req_update_eir(&req);
3340 	}
3341 
3342 	/* The name is stored in the scan response data and so
3343 	 * no need to update the advertising data here.
3344 	 */
3345 	if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3346 		__hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
3347 
3348 	err = hci_req_run(&req, set_name_complete);
3349 	if (err < 0)
3350 		mgmt_pending_remove(cmd);
3351 
3352 failed:
3353 	hci_dev_unlock(hdev);
3354 	return err;
3355 }
3356 
3357 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3358 			  u16 len)
3359 {
3360 	struct mgmt_cp_set_appearance *cp = data;
3361 	u16 appearance;
3362 	int err;
3363 
3364 	bt_dev_dbg(hdev, "sock %p", sk);
3365 
3366 	if (!lmp_le_capable(hdev))
3367 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3368 				       MGMT_STATUS_NOT_SUPPORTED);
3369 
3370 	appearance = le16_to_cpu(cp->appearance);
3371 
3372 	hci_dev_lock(hdev);
3373 
3374 	if (hdev->appearance != appearance) {
3375 		hdev->appearance = appearance;
3376 
3377 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3378 			adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
3379 
3380 		ext_info_changed(hdev, sk);
3381 	}
3382 
3383 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3384 				0);
3385 
3386 	hci_dev_unlock(hdev);
3387 
3388 	return err;
3389 }
3390 
3391 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3392 				 void *data, u16 len)
3393 {
3394 	struct mgmt_rp_get_phy_configuration rp;
3395 
3396 	bt_dev_dbg(hdev, "sock %p", sk);
3397 
3398 	hci_dev_lock(hdev);
3399 
3400 	memset(&rp, 0, sizeof(rp));
3401 
3402 	rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3403 	rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3404 	rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3405 
3406 	hci_dev_unlock(hdev);
3407 
3408 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3409 				 &rp, sizeof(rp));
3410 }
3411 
3412 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3413 {
3414 	struct mgmt_ev_phy_configuration_changed ev;
3415 
3416 	memset(&ev, 0, sizeof(ev));
3417 
3418 	ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3419 
3420 	return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3421 			  sizeof(ev), skip);
3422 }
3423 
3424 static void set_default_phy_complete(struct hci_dev *hdev, u8 status,
3425 				     u16 opcode, struct sk_buff *skb)
3426 {
3427 	struct mgmt_pending_cmd *cmd;
3428 
3429 	bt_dev_dbg(hdev, "status 0x%02x", status);
3430 
3431 	hci_dev_lock(hdev);
3432 
3433 	cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev);
3434 	if (!cmd)
3435 		goto unlock;
3436 
3437 	if (status) {
3438 		mgmt_cmd_status(cmd->sk, hdev->id,
3439 				MGMT_OP_SET_PHY_CONFIGURATION,
3440 				mgmt_status(status));
3441 	} else {
3442 		mgmt_cmd_complete(cmd->sk, hdev->id,
3443 				  MGMT_OP_SET_PHY_CONFIGURATION, 0,
3444 				  NULL, 0);
3445 
3446 		mgmt_phy_configuration_changed(hdev, cmd->sk);
3447 	}
3448 
3449 	mgmt_pending_remove(cmd);
3450 
3451 unlock:
3452 	hci_dev_unlock(hdev);
3453 }
3454 
3455 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3456 				 void *data, u16 len)
3457 {
3458 	struct mgmt_cp_set_phy_configuration *cp = data;
3459 	struct hci_cp_le_set_default_phy cp_phy;
3460 	struct mgmt_pending_cmd *cmd;
3461 	struct hci_request req;
3462 	u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3463 	u16 pkt_type = (HCI_DH1 | HCI_DM1);
3464 	bool changed = false;
3465 	int err;
3466 
3467 	bt_dev_dbg(hdev, "sock %p", sk);
3468 
3469 	configurable_phys = get_configurable_phys(hdev);
3470 	supported_phys = get_supported_phys(hdev);
3471 	selected_phys = __le32_to_cpu(cp->selected_phys);
3472 
3473 	if (selected_phys & ~supported_phys)
3474 		return mgmt_cmd_status(sk, hdev->id,
3475 				       MGMT_OP_SET_PHY_CONFIGURATION,
3476 				       MGMT_STATUS_INVALID_PARAMS);
3477 
3478 	unconfigure_phys = supported_phys & ~configurable_phys;
3479 
3480 	if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3481 		return mgmt_cmd_status(sk, hdev->id,
3482 				       MGMT_OP_SET_PHY_CONFIGURATION,
3483 				       MGMT_STATUS_INVALID_PARAMS);
3484 
3485 	if (selected_phys == get_selected_phys(hdev))
3486 		return mgmt_cmd_complete(sk, hdev->id,
3487 					 MGMT_OP_SET_PHY_CONFIGURATION,
3488 					 0, NULL, 0);
3489 
3490 	hci_dev_lock(hdev);
3491 
3492 	if (!hdev_is_powered(hdev)) {
3493 		err = mgmt_cmd_status(sk, hdev->id,
3494 				      MGMT_OP_SET_PHY_CONFIGURATION,
3495 				      MGMT_STATUS_REJECTED);
3496 		goto unlock;
3497 	}
3498 
3499 	if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3500 		err = mgmt_cmd_status(sk, hdev->id,
3501 				      MGMT_OP_SET_PHY_CONFIGURATION,
3502 				      MGMT_STATUS_BUSY);
3503 		goto unlock;
3504 	}
3505 
3506 	if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3507 		pkt_type |= (HCI_DH3 | HCI_DM3);
3508 	else
3509 		pkt_type &= ~(HCI_DH3 | HCI_DM3);
3510 
3511 	if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3512 		pkt_type |= (HCI_DH5 | HCI_DM5);
3513 	else
3514 		pkt_type &= ~(HCI_DH5 | HCI_DM5);
3515 
3516 	if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3517 		pkt_type &= ~HCI_2DH1;
3518 	else
3519 		pkt_type |= HCI_2DH1;
3520 
3521 	if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3522 		pkt_type &= ~HCI_2DH3;
3523 	else
3524 		pkt_type |= HCI_2DH3;
3525 
3526 	if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3527 		pkt_type &= ~HCI_2DH5;
3528 	else
3529 		pkt_type |= HCI_2DH5;
3530 
3531 	if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3532 		pkt_type &= ~HCI_3DH1;
3533 	else
3534 		pkt_type |= HCI_3DH1;
3535 
3536 	if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3537 		pkt_type &= ~HCI_3DH3;
3538 	else
3539 		pkt_type |= HCI_3DH3;
3540 
3541 	if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3542 		pkt_type &= ~HCI_3DH5;
3543 	else
3544 		pkt_type |= HCI_3DH5;
3545 
3546 	if (pkt_type != hdev->pkt_type) {
3547 		hdev->pkt_type = pkt_type;
3548 		changed = true;
3549 	}
3550 
3551 	if ((selected_phys & MGMT_PHY_LE_MASK) ==
3552 	    (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3553 		if (changed)
3554 			mgmt_phy_configuration_changed(hdev, sk);
3555 
3556 		err = mgmt_cmd_complete(sk, hdev->id,
3557 					MGMT_OP_SET_PHY_CONFIGURATION,
3558 					0, NULL, 0);
3559 
3560 		goto unlock;
3561 	}
3562 
3563 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3564 			       len);
3565 	if (!cmd) {
3566 		err = -ENOMEM;
3567 		goto unlock;
3568 	}
3569 
3570 	hci_req_init(&req, hdev);
3571 
3572 	memset(&cp_phy, 0, sizeof(cp_phy));
3573 
3574 	if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3575 		cp_phy.all_phys |= 0x01;
3576 
3577 	if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3578 		cp_phy.all_phys |= 0x02;
3579 
3580 	if (selected_phys & MGMT_PHY_LE_1M_TX)
3581 		cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3582 
3583 	if (selected_phys & MGMT_PHY_LE_2M_TX)
3584 		cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3585 
3586 	if (selected_phys & MGMT_PHY_LE_CODED_TX)
3587 		cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3588 
3589 	if (selected_phys & MGMT_PHY_LE_1M_RX)
3590 		cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3591 
3592 	if (selected_phys & MGMT_PHY_LE_2M_RX)
3593 		cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3594 
3595 	if (selected_phys & MGMT_PHY_LE_CODED_RX)
3596 		cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3597 
3598 	hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy);
3599 
3600 	err = hci_req_run_skb(&req, set_default_phy_complete);
3601 	if (err < 0)
3602 		mgmt_pending_remove(cmd);
3603 
3604 unlock:
3605 	hci_dev_unlock(hdev);
3606 
3607 	return err;
3608 }
3609 
3610 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
3611 			    u16 len)
3612 {
3613 	int err = MGMT_STATUS_SUCCESS;
3614 	struct mgmt_cp_set_blocked_keys *keys = data;
3615 	const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
3616 				   sizeof(struct mgmt_blocked_key_info));
3617 	u16 key_count, expected_len;
3618 	int i;
3619 
3620 	bt_dev_dbg(hdev, "sock %p", sk);
3621 
3622 	key_count = __le16_to_cpu(keys->key_count);
3623 	if (key_count > max_key_count) {
3624 		bt_dev_err(hdev, "too big key_count value %u", key_count);
3625 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3626 				       MGMT_STATUS_INVALID_PARAMS);
3627 	}
3628 
3629 	expected_len = struct_size(keys, keys, key_count);
3630 	if (expected_len != len) {
3631 		bt_dev_err(hdev, "expected %u bytes, got %u bytes",
3632 			   expected_len, len);
3633 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3634 				       MGMT_STATUS_INVALID_PARAMS);
3635 	}
3636 
3637 	hci_dev_lock(hdev);
3638 
3639 	hci_blocked_keys_clear(hdev);
3640 
3641 	for (i = 0; i < keys->key_count; ++i) {
3642 		struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
3643 
3644 		if (!b) {
3645 			err = MGMT_STATUS_NO_RESOURCES;
3646 			break;
3647 		}
3648 
3649 		b->type = keys->keys[i].type;
3650 		memcpy(b->val, keys->keys[i].val, sizeof(b->val));
3651 		list_add_rcu(&b->list, &hdev->blocked_keys);
3652 	}
3653 	hci_dev_unlock(hdev);
3654 
3655 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3656 				err, NULL, 0);
3657 }
3658 
3659 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
3660 			       void *data, u16 len)
3661 {
3662 	struct mgmt_mode *cp = data;
3663 	int err;
3664 	bool changed = false;
3665 
3666 	bt_dev_dbg(hdev, "sock %p", sk);
3667 
3668 	if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
3669 		return mgmt_cmd_status(sk, hdev->id,
3670 				       MGMT_OP_SET_WIDEBAND_SPEECH,
3671 				       MGMT_STATUS_NOT_SUPPORTED);
3672 
3673 	if (cp->val != 0x00 && cp->val != 0x01)
3674 		return mgmt_cmd_status(sk, hdev->id,
3675 				       MGMT_OP_SET_WIDEBAND_SPEECH,
3676 				       MGMT_STATUS_INVALID_PARAMS);
3677 
3678 	hci_dev_lock(hdev);
3679 
3680 	if (pending_find(MGMT_OP_SET_WIDEBAND_SPEECH, hdev)) {
3681 		err = mgmt_cmd_status(sk, hdev->id,
3682 				      MGMT_OP_SET_WIDEBAND_SPEECH,
3683 				      MGMT_STATUS_BUSY);
3684 		goto unlock;
3685 	}
3686 
3687 	if (hdev_is_powered(hdev) &&
3688 	    !!cp->val != hci_dev_test_flag(hdev,
3689 					   HCI_WIDEBAND_SPEECH_ENABLED)) {
3690 		err = mgmt_cmd_status(sk, hdev->id,
3691 				      MGMT_OP_SET_WIDEBAND_SPEECH,
3692 				      MGMT_STATUS_REJECTED);
3693 		goto unlock;
3694 	}
3695 
3696 	if (cp->val)
3697 		changed = !hci_dev_test_and_set_flag(hdev,
3698 						   HCI_WIDEBAND_SPEECH_ENABLED);
3699 	else
3700 		changed = hci_dev_test_and_clear_flag(hdev,
3701 						   HCI_WIDEBAND_SPEECH_ENABLED);
3702 
3703 	err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
3704 	if (err < 0)
3705 		goto unlock;
3706 
3707 	if (changed)
3708 		err = new_settings(hdev, sk);
3709 
3710 unlock:
3711 	hci_dev_unlock(hdev);
3712 	return err;
3713 }
3714 
3715 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
3716 			       void *data, u16 data_len)
3717 {
3718 	char buf[20];
3719 	struct mgmt_rp_read_controller_cap *rp = (void *)buf;
3720 	u16 cap_len = 0;
3721 	u8 flags = 0;
3722 	u8 tx_power_range[2];
3723 
3724 	bt_dev_dbg(hdev, "sock %p", sk);
3725 
3726 	memset(&buf, 0, sizeof(buf));
3727 
3728 	hci_dev_lock(hdev);
3729 
3730 	/* When the Read Simple Pairing Options command is supported, then
3731 	 * the remote public key validation is supported.
3732 	 *
3733 	 * Alternatively, when Microsoft extensions are available, they can
3734 	 * indicate support for public key validation as well.
3735 	 */
3736 	if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
3737 		flags |= 0x01;	/* Remote public key validation (BR/EDR) */
3738 
3739 	flags |= 0x02;		/* Remote public key validation (LE) */
3740 
3741 	/* When the Read Encryption Key Size command is supported, then the
3742 	 * encryption key size is enforced.
3743 	 */
3744 	if (hdev->commands[20] & 0x10)
3745 		flags |= 0x04;	/* Encryption key size enforcement (BR/EDR) */
3746 
3747 	flags |= 0x08;		/* Encryption key size enforcement (LE) */
3748 
3749 	cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
3750 				  &flags, 1);
3751 
3752 	/* When the Read Simple Pairing Options command is supported, then
3753 	 * also max encryption key size information is provided.
3754 	 */
3755 	if (hdev->commands[41] & 0x08)
3756 		cap_len = eir_append_le16(rp->cap, cap_len,
3757 					  MGMT_CAP_MAX_ENC_KEY_SIZE,
3758 					  hdev->max_enc_key_size);
3759 
3760 	cap_len = eir_append_le16(rp->cap, cap_len,
3761 				  MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
3762 				  SMP_MAX_ENC_KEY_SIZE);
3763 
3764 	/* Append the min/max LE tx power parameters if we were able to fetch
3765 	 * it from the controller
3766 	 */
3767 	if (hdev->commands[38] & 0x80) {
3768 		memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
3769 		memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
3770 		cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
3771 					  tx_power_range, 2);
3772 	}
3773 
3774 	rp->cap_len = cpu_to_le16(cap_len);
3775 
3776 	hci_dev_unlock(hdev);
3777 
3778 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
3779 				 rp, sizeof(*rp) + cap_len);
3780 }
3781 
3782 #ifdef CONFIG_BT_FEATURE_DEBUG
3783 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
3784 static const u8 debug_uuid[16] = {
3785 	0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
3786 	0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
3787 };
3788 #endif
3789 
3790 /* 330859bc-7506-492d-9370-9a6f0614037f */
3791 static const u8 quality_report_uuid[16] = {
3792 	0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
3793 	0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
3794 };
3795 
3796 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
3797 static const u8 offload_codecs_uuid[16] = {
3798 	0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
3799 	0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
3800 };
3801 
3802 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
3803 static const u8 simult_central_periph_uuid[16] = {
3804 	0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
3805 	0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
3806 };
3807 
3808 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
3809 static const u8 rpa_resolution_uuid[16] = {
3810 	0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
3811 	0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
3812 };
3813 
3814 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
3815 				  void *data, u16 data_len)
3816 {
3817 	char buf[102];   /* Enough space for 5 features: 2 + 20 * 5 */
3818 	struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
3819 	u16 idx = 0;
3820 	u32 flags;
3821 
3822 	bt_dev_dbg(hdev, "sock %p", sk);
3823 
3824 	memset(&buf, 0, sizeof(buf));
3825 
3826 #ifdef CONFIG_BT_FEATURE_DEBUG
3827 	if (!hdev) {
3828 		flags = bt_dbg_get() ? BIT(0) : 0;
3829 
3830 		memcpy(rp->features[idx].uuid, debug_uuid, 16);
3831 		rp->features[idx].flags = cpu_to_le32(flags);
3832 		idx++;
3833 	}
3834 #endif
3835 
3836 	if (hdev) {
3837 		if (test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) &&
3838 		    (hdev->le_states[4] & 0x08) &&	/* Central */
3839 		    (hdev->le_states[4] & 0x40) &&	/* Peripheral */
3840 		    (hdev->le_states[3] & 0x10))	/* Simultaneous */
3841 			flags = BIT(0);
3842 		else
3843 			flags = 0;
3844 
3845 		memcpy(rp->features[idx].uuid, simult_central_periph_uuid, 16);
3846 		rp->features[idx].flags = cpu_to_le32(flags);
3847 		idx++;
3848 	}
3849 
3850 	if (hdev && ll_privacy_capable(hdev)) {
3851 		if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
3852 			flags = BIT(0) | BIT(1);
3853 		else
3854 			flags = BIT(1);
3855 
3856 		memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
3857 		rp->features[idx].flags = cpu_to_le32(flags);
3858 		idx++;
3859 	}
3860 
3861 	if (hdev && hdev->set_quality_report) {
3862 		if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
3863 			flags = BIT(0);
3864 		else
3865 			flags = 0;
3866 
3867 		memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
3868 		rp->features[idx].flags = cpu_to_le32(flags);
3869 		idx++;
3870 	}
3871 
3872 	if (hdev && hdev->get_data_path_id) {
3873 		if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
3874 			flags = BIT(0);
3875 		else
3876 			flags = 0;
3877 
3878 		memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
3879 		rp->features[idx].flags = cpu_to_le32(flags);
3880 		idx++;
3881 	}
3882 
3883 	rp->feature_count = cpu_to_le16(idx);
3884 
3885 	/* After reading the experimental features information, enable
3886 	 * the events to update client on any future change.
3887 	 */
3888 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3889 
3890 	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3891 				 MGMT_OP_READ_EXP_FEATURES_INFO,
3892 				 0, rp, sizeof(*rp) + (20 * idx));
3893 }
3894 
3895 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
3896 					  struct sock *skip)
3897 {
3898 	struct mgmt_ev_exp_feature_changed ev;
3899 
3900 	memset(&ev, 0, sizeof(ev));
3901 	memcpy(ev.uuid, rpa_resolution_uuid, 16);
3902 	ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
3903 
3904 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
3905 				  &ev, sizeof(ev),
3906 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3907 
3908 }
3909 
3910 #ifdef CONFIG_BT_FEATURE_DEBUG
3911 static int exp_debug_feature_changed(bool enabled, struct sock *skip)
3912 {
3913 	struct mgmt_ev_exp_feature_changed ev;
3914 
3915 	memset(&ev, 0, sizeof(ev));
3916 	memcpy(ev.uuid, debug_uuid, 16);
3917 	ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
3918 
3919 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL,
3920 				  &ev, sizeof(ev),
3921 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3922 }
3923 #endif
3924 
3925 static int exp_quality_report_feature_changed(bool enabled,
3926 					      struct hci_dev *hdev,
3927 					      struct sock *skip)
3928 {
3929 	struct mgmt_ev_exp_feature_changed ev;
3930 
3931 	memset(&ev, 0, sizeof(ev));
3932 	memcpy(ev.uuid, quality_report_uuid, 16);
3933 	ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
3934 
3935 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
3936 				  &ev, sizeof(ev),
3937 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3938 }
3939 
3940 #define EXP_FEAT(_uuid, _set_func)	\
3941 {					\
3942 	.uuid = _uuid,			\
3943 	.set_func = _set_func,		\
3944 }
3945 
3946 /* The zero key uuid is special. Multiple exp features are set through it. */
3947 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
3948 			     struct mgmt_cp_set_exp_feature *cp, u16 data_len)
3949 {
3950 	struct mgmt_rp_set_exp_feature rp;
3951 
3952 	memset(rp.uuid, 0, 16);
3953 	rp.flags = cpu_to_le32(0);
3954 
3955 #ifdef CONFIG_BT_FEATURE_DEBUG
3956 	if (!hdev) {
3957 		bool changed = bt_dbg_get();
3958 
3959 		bt_dbg_set(false);
3960 
3961 		if (changed)
3962 			exp_debug_feature_changed(false, sk);
3963 	}
3964 #endif
3965 
3966 	if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
3967 		bool changed = hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY);
3968 
3969 		hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
3970 
3971 		if (changed)
3972 			exp_ll_privacy_feature_changed(false, hdev, sk);
3973 	}
3974 
3975 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3976 
3977 	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3978 				 MGMT_OP_SET_EXP_FEATURE, 0,
3979 				 &rp, sizeof(rp));
3980 }
3981 
3982 #ifdef CONFIG_BT_FEATURE_DEBUG
3983 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
3984 			  struct mgmt_cp_set_exp_feature *cp, u16 data_len)
3985 {
3986 	struct mgmt_rp_set_exp_feature rp;
3987 
3988 	bool val, changed;
3989 	int err;
3990 
3991 	/* Command requires to use the non-controller index */
3992 	if (hdev)
3993 		return mgmt_cmd_status(sk, hdev->id,
3994 				       MGMT_OP_SET_EXP_FEATURE,
3995 				       MGMT_STATUS_INVALID_INDEX);
3996 
3997 	/* Parameters are limited to a single octet */
3998 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
3999 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4000 				       MGMT_OP_SET_EXP_FEATURE,
4001 				       MGMT_STATUS_INVALID_PARAMS);
4002 
4003 	/* Only boolean on/off is supported */
4004 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4005 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4006 				       MGMT_OP_SET_EXP_FEATURE,
4007 				       MGMT_STATUS_INVALID_PARAMS);
4008 
4009 	val = !!cp->param[0];
4010 	changed = val ? !bt_dbg_get() : bt_dbg_get();
4011 	bt_dbg_set(val);
4012 
4013 	memcpy(rp.uuid, debug_uuid, 16);
4014 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4015 
4016 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4017 
4018 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4019 				MGMT_OP_SET_EXP_FEATURE, 0,
4020 				&rp, sizeof(rp));
4021 
4022 	if (changed)
4023 		exp_debug_feature_changed(val, sk);
4024 
4025 	return err;
4026 }
4027 #endif
4028 
4029 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4030 				   struct mgmt_cp_set_exp_feature *cp,
4031 				   u16 data_len)
4032 {
4033 	struct mgmt_rp_set_exp_feature rp;
4034 	bool val, changed;
4035 	int err;
4036 	u32 flags;
4037 
4038 	/* Command requires to use the controller index */
4039 	if (!hdev)
4040 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4041 				       MGMT_OP_SET_EXP_FEATURE,
4042 				       MGMT_STATUS_INVALID_INDEX);
4043 
4044 	/* Changes can only be made when controller is powered down */
4045 	if (hdev_is_powered(hdev))
4046 		return mgmt_cmd_status(sk, hdev->id,
4047 				       MGMT_OP_SET_EXP_FEATURE,
4048 				       MGMT_STATUS_REJECTED);
4049 
4050 	/* Parameters are limited to a single octet */
4051 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4052 		return mgmt_cmd_status(sk, hdev->id,
4053 				       MGMT_OP_SET_EXP_FEATURE,
4054 				       MGMT_STATUS_INVALID_PARAMS);
4055 
4056 	/* Only boolean on/off is supported */
4057 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4058 		return mgmt_cmd_status(sk, hdev->id,
4059 				       MGMT_OP_SET_EXP_FEATURE,
4060 				       MGMT_STATUS_INVALID_PARAMS);
4061 
4062 	val = !!cp->param[0];
4063 
4064 	if (val) {
4065 		changed = !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY);
4066 		hci_dev_set_flag(hdev, HCI_ENABLE_LL_PRIVACY);
4067 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4068 
4069 		/* Enable LL privacy + supported settings changed */
4070 		flags = BIT(0) | BIT(1);
4071 	} else {
4072 		changed = hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY);
4073 		hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
4074 
4075 		/* Disable LL privacy + supported settings changed */
4076 		flags = BIT(1);
4077 	}
4078 
4079 	memcpy(rp.uuid, rpa_resolution_uuid, 16);
4080 	rp.flags = cpu_to_le32(flags);
4081 
4082 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4083 
4084 	err = mgmt_cmd_complete(sk, hdev->id,
4085 				MGMT_OP_SET_EXP_FEATURE, 0,
4086 				&rp, sizeof(rp));
4087 
4088 	if (changed)
4089 		exp_ll_privacy_feature_changed(val, hdev, sk);
4090 
4091 	return err;
4092 }
4093 
4094 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4095 				   struct mgmt_cp_set_exp_feature *cp,
4096 				   u16 data_len)
4097 {
4098 	struct mgmt_rp_set_exp_feature rp;
4099 	bool val, changed;
4100 	int err;
4101 
4102 	/* Command requires to use a valid controller index */
4103 	if (!hdev)
4104 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4105 				       MGMT_OP_SET_EXP_FEATURE,
4106 				       MGMT_STATUS_INVALID_INDEX);
4107 
4108 	/* Parameters are limited to a single octet */
4109 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4110 		return mgmt_cmd_status(sk, hdev->id,
4111 				       MGMT_OP_SET_EXP_FEATURE,
4112 				       MGMT_STATUS_INVALID_PARAMS);
4113 
4114 	/* Only boolean on/off is supported */
4115 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4116 		return mgmt_cmd_status(sk, hdev->id,
4117 				       MGMT_OP_SET_EXP_FEATURE,
4118 				       MGMT_STATUS_INVALID_PARAMS);
4119 
4120 	hci_req_sync_lock(hdev);
4121 
4122 	val = !!cp->param[0];
4123 	changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4124 
4125 	if (!hdev->set_quality_report) {
4126 		err = mgmt_cmd_status(sk, hdev->id,
4127 				      MGMT_OP_SET_EXP_FEATURE,
4128 				      MGMT_STATUS_NOT_SUPPORTED);
4129 		goto unlock_quality_report;
4130 	}
4131 
4132 	if (changed) {
4133 		err = hdev->set_quality_report(hdev, val);
4134 		if (err) {
4135 			err = mgmt_cmd_status(sk, hdev->id,
4136 					      MGMT_OP_SET_EXP_FEATURE,
4137 					      MGMT_STATUS_FAILED);
4138 			goto unlock_quality_report;
4139 		}
4140 		if (val)
4141 			hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4142 		else
4143 			hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4144 	}
4145 
4146 	bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4147 
4148 	memcpy(rp.uuid, quality_report_uuid, 16);
4149 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4150 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4151 	err = mgmt_cmd_complete(sk, hdev->id,
4152 				MGMT_OP_SET_EXP_FEATURE, 0,
4153 				&rp, sizeof(rp));
4154 
4155 	if (changed)
4156 		exp_quality_report_feature_changed(val, hdev, sk);
4157 
4158 unlock_quality_report:
4159 	hci_req_sync_unlock(hdev);
4160 	return err;
4161 }
4162 
4163 static int exp_offload_codec_feature_changed(bool enabled, struct hci_dev *hdev,
4164 					     struct sock *skip)
4165 {
4166 	struct mgmt_ev_exp_feature_changed ev;
4167 
4168 	memset(&ev, 0, sizeof(ev));
4169 	memcpy(ev.uuid, offload_codecs_uuid, 16);
4170 	ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4171 
4172 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4173 				  &ev, sizeof(ev),
4174 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4175 }
4176 
4177 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4178 				  struct mgmt_cp_set_exp_feature *cp,
4179 				  u16 data_len)
4180 {
4181 	bool val, changed;
4182 	int err;
4183 	struct mgmt_rp_set_exp_feature rp;
4184 
4185 	/* Command requires to use a valid controller index */
4186 	if (!hdev)
4187 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4188 				       MGMT_OP_SET_EXP_FEATURE,
4189 				       MGMT_STATUS_INVALID_INDEX);
4190 
4191 	/* Parameters are limited to a single octet */
4192 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4193 		return mgmt_cmd_status(sk, hdev->id,
4194 				       MGMT_OP_SET_EXP_FEATURE,
4195 				       MGMT_STATUS_INVALID_PARAMS);
4196 
4197 	/* Only boolean on/off is supported */
4198 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4199 		return mgmt_cmd_status(sk, hdev->id,
4200 				       MGMT_OP_SET_EXP_FEATURE,
4201 				       MGMT_STATUS_INVALID_PARAMS);
4202 
4203 	val = !!cp->param[0];
4204 	changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4205 
4206 	if (!hdev->get_data_path_id) {
4207 		return mgmt_cmd_status(sk, hdev->id,
4208 				       MGMT_OP_SET_EXP_FEATURE,
4209 				       MGMT_STATUS_NOT_SUPPORTED);
4210 	}
4211 
4212 	if (changed) {
4213 		if (val)
4214 			hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4215 		else
4216 			hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4217 	}
4218 
4219 	bt_dev_info(hdev, "offload codecs enable %d changed %d",
4220 		    val, changed);
4221 
4222 	memcpy(rp.uuid, offload_codecs_uuid, 16);
4223 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4224 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4225 	err = mgmt_cmd_complete(sk, hdev->id,
4226 				MGMT_OP_SET_EXP_FEATURE, 0,
4227 				&rp, sizeof(rp));
4228 
4229 	if (changed)
4230 		exp_offload_codec_feature_changed(val, hdev, sk);
4231 
4232 	return err;
4233 }
4234 
4235 static const struct mgmt_exp_feature {
4236 	const u8 *uuid;
4237 	int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4238 			struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4239 } exp_features[] = {
4240 	EXP_FEAT(ZERO_KEY, set_zero_key_func),
4241 #ifdef CONFIG_BT_FEATURE_DEBUG
4242 	EXP_FEAT(debug_uuid, set_debug_func),
4243 #endif
4244 	EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4245 	EXP_FEAT(quality_report_uuid, set_quality_report_func),
4246 	EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4247 
4248 	/* end with a null feature */
4249 	EXP_FEAT(NULL, NULL)
4250 };
4251 
4252 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4253 			   void *data, u16 data_len)
4254 {
4255 	struct mgmt_cp_set_exp_feature *cp = data;
4256 	size_t i = 0;
4257 
4258 	bt_dev_dbg(hdev, "sock %p", sk);
4259 
4260 	for (i = 0; exp_features[i].uuid; i++) {
4261 		if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4262 			return exp_features[i].set_func(sk, hdev, cp, data_len);
4263 	}
4264 
4265 	return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4266 			       MGMT_OP_SET_EXP_FEATURE,
4267 			       MGMT_STATUS_NOT_SUPPORTED);
4268 }
4269 
4270 #define SUPPORTED_DEVICE_FLAGS() ((1U << HCI_CONN_FLAG_MAX) - 1)
4271 
4272 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4273 			    u16 data_len)
4274 {
4275 	struct mgmt_cp_get_device_flags *cp = data;
4276 	struct mgmt_rp_get_device_flags rp;
4277 	struct bdaddr_list_with_flags *br_params;
4278 	struct hci_conn_params *params;
4279 	u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4280 	u32 current_flags = 0;
4281 	u8 status = MGMT_STATUS_INVALID_PARAMS;
4282 
4283 	bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4284 		   &cp->addr.bdaddr, cp->addr.type);
4285 
4286 	hci_dev_lock(hdev);
4287 
4288 	memset(&rp, 0, sizeof(rp));
4289 
4290 	if (cp->addr.type == BDADDR_BREDR) {
4291 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4292 							      &cp->addr.bdaddr,
4293 							      cp->addr.type);
4294 		if (!br_params)
4295 			goto done;
4296 
4297 		current_flags = br_params->current_flags;
4298 	} else {
4299 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4300 						le_addr_type(cp->addr.type));
4301 
4302 		if (!params)
4303 			goto done;
4304 
4305 		current_flags = params->current_flags;
4306 	}
4307 
4308 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4309 	rp.addr.type = cp->addr.type;
4310 	rp.supported_flags = cpu_to_le32(supported_flags);
4311 	rp.current_flags = cpu_to_le32(current_flags);
4312 
4313 	status = MGMT_STATUS_SUCCESS;
4314 
4315 done:
4316 	hci_dev_unlock(hdev);
4317 
4318 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
4319 				&rp, sizeof(rp));
4320 }
4321 
4322 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
4323 				 bdaddr_t *bdaddr, u8 bdaddr_type,
4324 				 u32 supported_flags, u32 current_flags)
4325 {
4326 	struct mgmt_ev_device_flags_changed ev;
4327 
4328 	bacpy(&ev.addr.bdaddr, bdaddr);
4329 	ev.addr.type = bdaddr_type;
4330 	ev.supported_flags = cpu_to_le32(supported_flags);
4331 	ev.current_flags = cpu_to_le32(current_flags);
4332 
4333 	mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
4334 }
4335 
4336 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4337 			    u16 len)
4338 {
4339 	struct mgmt_cp_set_device_flags *cp = data;
4340 	struct bdaddr_list_with_flags *br_params;
4341 	struct hci_conn_params *params;
4342 	u8 status = MGMT_STATUS_INVALID_PARAMS;
4343 	u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4344 	u32 current_flags = __le32_to_cpu(cp->current_flags);
4345 
4346 	bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
4347 		   &cp->addr.bdaddr, cp->addr.type,
4348 		   __le32_to_cpu(current_flags));
4349 
4350 	if ((supported_flags | current_flags) != supported_flags) {
4351 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
4352 			    current_flags, supported_flags);
4353 		goto done;
4354 	}
4355 
4356 	hci_dev_lock(hdev);
4357 
4358 	if (cp->addr.type == BDADDR_BREDR) {
4359 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4360 							      &cp->addr.bdaddr,
4361 							      cp->addr.type);
4362 
4363 		if (br_params) {
4364 			br_params->current_flags = current_flags;
4365 			status = MGMT_STATUS_SUCCESS;
4366 		} else {
4367 			bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
4368 				    &cp->addr.bdaddr, cp->addr.type);
4369 		}
4370 	} else {
4371 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4372 						le_addr_type(cp->addr.type));
4373 		if (params) {
4374 			params->current_flags = current_flags;
4375 			status = MGMT_STATUS_SUCCESS;
4376 		} else {
4377 			bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
4378 				    &cp->addr.bdaddr,
4379 				    le_addr_type(cp->addr.type));
4380 		}
4381 	}
4382 
4383 done:
4384 	hci_dev_unlock(hdev);
4385 
4386 	if (status == MGMT_STATUS_SUCCESS)
4387 		device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
4388 				     supported_flags, current_flags);
4389 
4390 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
4391 				 &cp->addr, sizeof(cp->addr));
4392 }
4393 
4394 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
4395 				   u16 handle)
4396 {
4397 	struct mgmt_ev_adv_monitor_added ev;
4398 
4399 	ev.monitor_handle = cpu_to_le16(handle);
4400 
4401 	mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
4402 }
4403 
4404 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
4405 {
4406 	struct mgmt_ev_adv_monitor_removed ev;
4407 	struct mgmt_pending_cmd *cmd;
4408 	struct sock *sk_skip = NULL;
4409 	struct mgmt_cp_remove_adv_monitor *cp;
4410 
4411 	cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4412 	if (cmd) {
4413 		cp = cmd->param;
4414 
4415 		if (cp->monitor_handle)
4416 			sk_skip = cmd->sk;
4417 	}
4418 
4419 	ev.monitor_handle = cpu_to_le16(handle);
4420 
4421 	mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
4422 }
4423 
4424 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
4425 				 void *data, u16 len)
4426 {
4427 	struct adv_monitor *monitor = NULL;
4428 	struct mgmt_rp_read_adv_monitor_features *rp = NULL;
4429 	int handle, err;
4430 	size_t rp_size = 0;
4431 	__u32 supported = 0;
4432 	__u32 enabled = 0;
4433 	__u16 num_handles = 0;
4434 	__u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
4435 
4436 	BT_DBG("request for %s", hdev->name);
4437 
4438 	hci_dev_lock(hdev);
4439 
4440 	if (msft_monitor_supported(hdev))
4441 		supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
4442 
4443 	idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
4444 		handles[num_handles++] = monitor->handle;
4445 
4446 	hci_dev_unlock(hdev);
4447 
4448 	rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
4449 	rp = kmalloc(rp_size, GFP_KERNEL);
4450 	if (!rp)
4451 		return -ENOMEM;
4452 
4453 	/* All supported features are currently enabled */
4454 	enabled = supported;
4455 
4456 	rp->supported_features = cpu_to_le32(supported);
4457 	rp->enabled_features = cpu_to_le32(enabled);
4458 	rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
4459 	rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
4460 	rp->num_handles = cpu_to_le16(num_handles);
4461 	if (num_handles)
4462 		memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
4463 
4464 	err = mgmt_cmd_complete(sk, hdev->id,
4465 				MGMT_OP_READ_ADV_MONITOR_FEATURES,
4466 				MGMT_STATUS_SUCCESS, rp, rp_size);
4467 
4468 	kfree(rp);
4469 
4470 	return err;
4471 }
4472 
4473 int mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
4474 {
4475 	struct mgmt_rp_add_adv_patterns_monitor rp;
4476 	struct mgmt_pending_cmd *cmd;
4477 	struct adv_monitor *monitor;
4478 	int err = 0;
4479 
4480 	hci_dev_lock(hdev);
4481 
4482 	cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev);
4483 	if (!cmd) {
4484 		cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev);
4485 		if (!cmd)
4486 			goto done;
4487 	}
4488 
4489 	monitor = cmd->user_data;
4490 	rp.monitor_handle = cpu_to_le16(monitor->handle);
4491 
4492 	if (!status) {
4493 		mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
4494 		hdev->adv_monitors_cnt++;
4495 		if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
4496 			monitor->state = ADV_MONITOR_STATE_REGISTERED;
4497 		hci_update_passive_scan(hdev);
4498 	}
4499 
4500 	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4501 				mgmt_status(status), &rp, sizeof(rp));
4502 	mgmt_pending_remove(cmd);
4503 
4504 done:
4505 	hci_dev_unlock(hdev);
4506 	bt_dev_dbg(hdev, "add monitor %d complete, status %u",
4507 		   rp.monitor_handle, status);
4508 
4509 	return err;
4510 }
4511 
4512 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4513 				      struct adv_monitor *m, u8 status,
4514 				      void *data, u16 len, u16 op)
4515 {
4516 	struct mgmt_rp_add_adv_patterns_monitor rp;
4517 	struct mgmt_pending_cmd *cmd;
4518 	int err;
4519 	bool pending;
4520 
4521 	hci_dev_lock(hdev);
4522 
4523 	if (status)
4524 		goto unlock;
4525 
4526 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
4527 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4528 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
4529 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
4530 		status = MGMT_STATUS_BUSY;
4531 		goto unlock;
4532 	}
4533 
4534 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
4535 	if (!cmd) {
4536 		status = MGMT_STATUS_NO_RESOURCES;
4537 		goto unlock;
4538 	}
4539 
4540 	cmd->user_data = m;
4541 	pending = hci_add_adv_monitor(hdev, m, &err);
4542 	if (err) {
4543 		if (err == -ENOSPC || err == -ENOMEM)
4544 			status = MGMT_STATUS_NO_RESOURCES;
4545 		else if (err == -EINVAL)
4546 			status = MGMT_STATUS_INVALID_PARAMS;
4547 		else
4548 			status = MGMT_STATUS_FAILED;
4549 
4550 		mgmt_pending_remove(cmd);
4551 		goto unlock;
4552 	}
4553 
4554 	if (!pending) {
4555 		mgmt_pending_remove(cmd);
4556 		rp.monitor_handle = cpu_to_le16(m->handle);
4557 		mgmt_adv_monitor_added(sk, hdev, m->handle);
4558 		m->state = ADV_MONITOR_STATE_REGISTERED;
4559 		hdev->adv_monitors_cnt++;
4560 
4561 		hci_dev_unlock(hdev);
4562 		return mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_SUCCESS,
4563 					 &rp, sizeof(rp));
4564 	}
4565 
4566 	hci_dev_unlock(hdev);
4567 
4568 	return 0;
4569 
4570 unlock:
4571 	hci_free_adv_monitor(hdev, m);
4572 	hci_dev_unlock(hdev);
4573 	return mgmt_cmd_status(sk, hdev->id, op, status);
4574 }
4575 
4576 static void parse_adv_monitor_rssi(struct adv_monitor *m,
4577 				   struct mgmt_adv_rssi_thresholds *rssi)
4578 {
4579 	if (rssi) {
4580 		m->rssi.low_threshold = rssi->low_threshold;
4581 		m->rssi.low_threshold_timeout =
4582 		    __le16_to_cpu(rssi->low_threshold_timeout);
4583 		m->rssi.high_threshold = rssi->high_threshold;
4584 		m->rssi.high_threshold_timeout =
4585 		    __le16_to_cpu(rssi->high_threshold_timeout);
4586 		m->rssi.sampling_period = rssi->sampling_period;
4587 	} else {
4588 		/* Default values. These numbers are the least constricting
4589 		 * parameters for MSFT API to work, so it behaves as if there
4590 		 * are no rssi parameter to consider. May need to be changed
4591 		 * if other API are to be supported.
4592 		 */
4593 		m->rssi.low_threshold = -127;
4594 		m->rssi.low_threshold_timeout = 60;
4595 		m->rssi.high_threshold = -127;
4596 		m->rssi.high_threshold_timeout = 0;
4597 		m->rssi.sampling_period = 0;
4598 	}
4599 }
4600 
4601 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
4602 				    struct mgmt_adv_pattern *patterns)
4603 {
4604 	u8 offset = 0, length = 0;
4605 	struct adv_pattern *p = NULL;
4606 	int i;
4607 
4608 	for (i = 0; i < pattern_count; i++) {
4609 		offset = patterns[i].offset;
4610 		length = patterns[i].length;
4611 		if (offset >= HCI_MAX_AD_LENGTH ||
4612 		    length > HCI_MAX_AD_LENGTH ||
4613 		    (offset + length) > HCI_MAX_AD_LENGTH)
4614 			return MGMT_STATUS_INVALID_PARAMS;
4615 
4616 		p = kmalloc(sizeof(*p), GFP_KERNEL);
4617 		if (!p)
4618 			return MGMT_STATUS_NO_RESOURCES;
4619 
4620 		p->ad_type = patterns[i].ad_type;
4621 		p->offset = patterns[i].offset;
4622 		p->length = patterns[i].length;
4623 		memcpy(p->value, patterns[i].value, p->length);
4624 
4625 		INIT_LIST_HEAD(&p->list);
4626 		list_add(&p->list, &m->patterns);
4627 	}
4628 
4629 	return MGMT_STATUS_SUCCESS;
4630 }
4631 
4632 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4633 				    void *data, u16 len)
4634 {
4635 	struct mgmt_cp_add_adv_patterns_monitor *cp = data;
4636 	struct adv_monitor *m = NULL;
4637 	u8 status = MGMT_STATUS_SUCCESS;
4638 	size_t expected_size = sizeof(*cp);
4639 
4640 	BT_DBG("request for %s", hdev->name);
4641 
4642 	if (len <= sizeof(*cp)) {
4643 		status = MGMT_STATUS_INVALID_PARAMS;
4644 		goto done;
4645 	}
4646 
4647 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4648 	if (len != expected_size) {
4649 		status = MGMT_STATUS_INVALID_PARAMS;
4650 		goto done;
4651 	}
4652 
4653 	m = kzalloc(sizeof(*m), GFP_KERNEL);
4654 	if (!m) {
4655 		status = MGMT_STATUS_NO_RESOURCES;
4656 		goto done;
4657 	}
4658 
4659 	INIT_LIST_HEAD(&m->patterns);
4660 
4661 	parse_adv_monitor_rssi(m, NULL);
4662 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4663 
4664 done:
4665 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4666 					  MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
4667 }
4668 
4669 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
4670 					 void *data, u16 len)
4671 {
4672 	struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
4673 	struct adv_monitor *m = NULL;
4674 	u8 status = MGMT_STATUS_SUCCESS;
4675 	size_t expected_size = sizeof(*cp);
4676 
4677 	BT_DBG("request for %s", hdev->name);
4678 
4679 	if (len <= sizeof(*cp)) {
4680 		status = MGMT_STATUS_INVALID_PARAMS;
4681 		goto done;
4682 	}
4683 
4684 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4685 	if (len != expected_size) {
4686 		status = MGMT_STATUS_INVALID_PARAMS;
4687 		goto done;
4688 	}
4689 
4690 	m = kzalloc(sizeof(*m), GFP_KERNEL);
4691 	if (!m) {
4692 		status = MGMT_STATUS_NO_RESOURCES;
4693 		goto done;
4694 	}
4695 
4696 	INIT_LIST_HEAD(&m->patterns);
4697 
4698 	parse_adv_monitor_rssi(m, &cp->rssi);
4699 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4700 
4701 done:
4702 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4703 					 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
4704 }
4705 
4706 int mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
4707 {
4708 	struct mgmt_rp_remove_adv_monitor rp;
4709 	struct mgmt_cp_remove_adv_monitor *cp;
4710 	struct mgmt_pending_cmd *cmd;
4711 	int err = 0;
4712 
4713 	hci_dev_lock(hdev);
4714 
4715 	cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4716 	if (!cmd)
4717 		goto done;
4718 
4719 	cp = cmd->param;
4720 	rp.monitor_handle = cp->monitor_handle;
4721 
4722 	if (!status)
4723 		hci_update_passive_scan(hdev);
4724 
4725 	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4726 				mgmt_status(status), &rp, sizeof(rp));
4727 	mgmt_pending_remove(cmd);
4728 
4729 done:
4730 	hci_dev_unlock(hdev);
4731 	bt_dev_dbg(hdev, "remove monitor %d complete, status %u",
4732 		   rp.monitor_handle, status);
4733 
4734 	return err;
4735 }
4736 
4737 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
4738 			      void *data, u16 len)
4739 {
4740 	struct mgmt_cp_remove_adv_monitor *cp = data;
4741 	struct mgmt_rp_remove_adv_monitor rp;
4742 	struct mgmt_pending_cmd *cmd;
4743 	u16 handle = __le16_to_cpu(cp->monitor_handle);
4744 	int err, status;
4745 	bool pending;
4746 
4747 	BT_DBG("request for %s", hdev->name);
4748 	rp.monitor_handle = cp->monitor_handle;
4749 
4750 	hci_dev_lock(hdev);
4751 
4752 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
4753 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
4754 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4755 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
4756 		status = MGMT_STATUS_BUSY;
4757 		goto unlock;
4758 	}
4759 
4760 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
4761 	if (!cmd) {
4762 		status = MGMT_STATUS_NO_RESOURCES;
4763 		goto unlock;
4764 	}
4765 
4766 	if (handle)
4767 		pending = hci_remove_single_adv_monitor(hdev, handle, &err);
4768 	else
4769 		pending = hci_remove_all_adv_monitor(hdev, &err);
4770 
4771 	if (err) {
4772 		mgmt_pending_remove(cmd);
4773 
4774 		if (err == -ENOENT)
4775 			status = MGMT_STATUS_INVALID_INDEX;
4776 		else
4777 			status = MGMT_STATUS_FAILED;
4778 
4779 		goto unlock;
4780 	}
4781 
4782 	/* monitor can be removed without forwarding request to controller */
4783 	if (!pending) {
4784 		mgmt_pending_remove(cmd);
4785 		hci_dev_unlock(hdev);
4786 
4787 		return mgmt_cmd_complete(sk, hdev->id,
4788 					 MGMT_OP_REMOVE_ADV_MONITOR,
4789 					 MGMT_STATUS_SUCCESS,
4790 					 &rp, sizeof(rp));
4791 	}
4792 
4793 	hci_dev_unlock(hdev);
4794 	return 0;
4795 
4796 unlock:
4797 	hci_dev_unlock(hdev);
4798 	return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4799 			       status);
4800 }
4801 
4802 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
4803 				         u16 opcode, struct sk_buff *skb)
4804 {
4805 	struct mgmt_rp_read_local_oob_data mgmt_rp;
4806 	size_t rp_size = sizeof(mgmt_rp);
4807 	struct mgmt_pending_cmd *cmd;
4808 
4809 	bt_dev_dbg(hdev, "status %u", status);
4810 
4811 	cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
4812 	if (!cmd)
4813 		return;
4814 
4815 	if (status || !skb) {
4816 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4817 				status ? mgmt_status(status) : MGMT_STATUS_FAILED);
4818 		goto remove;
4819 	}
4820 
4821 	memset(&mgmt_rp, 0, sizeof(mgmt_rp));
4822 
4823 	if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
4824 		struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
4825 
4826 		if (skb->len < sizeof(*rp)) {
4827 			mgmt_cmd_status(cmd->sk, hdev->id,
4828 					MGMT_OP_READ_LOCAL_OOB_DATA,
4829 					MGMT_STATUS_FAILED);
4830 			goto remove;
4831 		}
4832 
4833 		memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
4834 		memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
4835 
4836 		rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
4837 	} else {
4838 		struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
4839 
4840 		if (skb->len < sizeof(*rp)) {
4841 			mgmt_cmd_status(cmd->sk, hdev->id,
4842 					MGMT_OP_READ_LOCAL_OOB_DATA,
4843 					MGMT_STATUS_FAILED);
4844 			goto remove;
4845 		}
4846 
4847 		memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
4848 		memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
4849 
4850 		memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
4851 		memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
4852 	}
4853 
4854 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4855 			  MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
4856 
4857 remove:
4858 	mgmt_pending_remove(cmd);
4859 }
4860 
4861 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
4862 			       void *data, u16 data_len)
4863 {
4864 	struct mgmt_pending_cmd *cmd;
4865 	struct hci_request req;
4866 	int err;
4867 
4868 	bt_dev_dbg(hdev, "sock %p", sk);
4869 
4870 	hci_dev_lock(hdev);
4871 
4872 	if (!hdev_is_powered(hdev)) {
4873 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4874 				      MGMT_STATUS_NOT_POWERED);
4875 		goto unlock;
4876 	}
4877 
4878 	if (!lmp_ssp_capable(hdev)) {
4879 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4880 				      MGMT_STATUS_NOT_SUPPORTED);
4881 		goto unlock;
4882 	}
4883 
4884 	if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
4885 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4886 				      MGMT_STATUS_BUSY);
4887 		goto unlock;
4888 	}
4889 
4890 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
4891 	if (!cmd) {
4892 		err = -ENOMEM;
4893 		goto unlock;
4894 	}
4895 
4896 	hci_req_init(&req, hdev);
4897 
4898 	if (bredr_sc_enabled(hdev))
4899 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
4900 	else
4901 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
4902 
4903 	err = hci_req_run_skb(&req, read_local_oob_data_complete);
4904 	if (err < 0)
4905 		mgmt_pending_remove(cmd);
4906 
4907 unlock:
4908 	hci_dev_unlock(hdev);
4909 	return err;
4910 }
4911 
4912 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4913 			       void *data, u16 len)
4914 {
4915 	struct mgmt_addr_info *addr = data;
4916 	int err;
4917 
4918 	bt_dev_dbg(hdev, "sock %p", sk);
4919 
4920 	if (!bdaddr_type_is_valid(addr->type))
4921 		return mgmt_cmd_complete(sk, hdev->id,
4922 					 MGMT_OP_ADD_REMOTE_OOB_DATA,
4923 					 MGMT_STATUS_INVALID_PARAMS,
4924 					 addr, sizeof(*addr));
4925 
4926 	hci_dev_lock(hdev);
4927 
4928 	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
4929 		struct mgmt_cp_add_remote_oob_data *cp = data;
4930 		u8 status;
4931 
4932 		if (cp->addr.type != BDADDR_BREDR) {
4933 			err = mgmt_cmd_complete(sk, hdev->id,
4934 						MGMT_OP_ADD_REMOTE_OOB_DATA,
4935 						MGMT_STATUS_INVALID_PARAMS,
4936 						&cp->addr, sizeof(cp->addr));
4937 			goto unlock;
4938 		}
4939 
4940 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4941 					      cp->addr.type, cp->hash,
4942 					      cp->rand, NULL, NULL);
4943 		if (err < 0)
4944 			status = MGMT_STATUS_FAILED;
4945 		else
4946 			status = MGMT_STATUS_SUCCESS;
4947 
4948 		err = mgmt_cmd_complete(sk, hdev->id,
4949 					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
4950 					&cp->addr, sizeof(cp->addr));
4951 	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
4952 		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
4953 		u8 *rand192, *hash192, *rand256, *hash256;
4954 		u8 status;
4955 
4956 		if (bdaddr_type_is_le(cp->addr.type)) {
4957 			/* Enforce zero-valued 192-bit parameters as
4958 			 * long as legacy SMP OOB isn't implemented.
4959 			 */
4960 			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
4961 			    memcmp(cp->hash192, ZERO_KEY, 16)) {
4962 				err = mgmt_cmd_complete(sk, hdev->id,
4963 							MGMT_OP_ADD_REMOTE_OOB_DATA,
4964 							MGMT_STATUS_INVALID_PARAMS,
4965 							addr, sizeof(*addr));
4966 				goto unlock;
4967 			}
4968 
4969 			rand192 = NULL;
4970 			hash192 = NULL;
4971 		} else {
4972 			/* In case one of the P-192 values is set to zero,
4973 			 * then just disable OOB data for P-192.
4974 			 */
4975 			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
4976 			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
4977 				rand192 = NULL;
4978 				hash192 = NULL;
4979 			} else {
4980 				rand192 = cp->rand192;
4981 				hash192 = cp->hash192;
4982 			}
4983 		}
4984 
4985 		/* In case one of the P-256 values is set to zero, then just
4986 		 * disable OOB data for P-256.
4987 		 */
4988 		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
4989 		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
4990 			rand256 = NULL;
4991 			hash256 = NULL;
4992 		} else {
4993 			rand256 = cp->rand256;
4994 			hash256 = cp->hash256;
4995 		}
4996 
4997 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4998 					      cp->addr.type, hash192, rand192,
4999 					      hash256, rand256);
5000 		if (err < 0)
5001 			status = MGMT_STATUS_FAILED;
5002 		else
5003 			status = MGMT_STATUS_SUCCESS;
5004 
5005 		err = mgmt_cmd_complete(sk, hdev->id,
5006 					MGMT_OP_ADD_REMOTE_OOB_DATA,
5007 					status, &cp->addr, sizeof(cp->addr));
5008 	} else {
5009 		bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5010 			   len);
5011 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5012 				      MGMT_STATUS_INVALID_PARAMS);
5013 	}
5014 
5015 unlock:
5016 	hci_dev_unlock(hdev);
5017 	return err;
5018 }
5019 
5020 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5021 				  void *data, u16 len)
5022 {
5023 	struct mgmt_cp_remove_remote_oob_data *cp = data;
5024 	u8 status;
5025 	int err;
5026 
5027 	bt_dev_dbg(hdev, "sock %p", sk);
5028 
5029 	if (cp->addr.type != BDADDR_BREDR)
5030 		return mgmt_cmd_complete(sk, hdev->id,
5031 					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5032 					 MGMT_STATUS_INVALID_PARAMS,
5033 					 &cp->addr, sizeof(cp->addr));
5034 
5035 	hci_dev_lock(hdev);
5036 
5037 	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5038 		hci_remote_oob_data_clear(hdev);
5039 		status = MGMT_STATUS_SUCCESS;
5040 		goto done;
5041 	}
5042 
5043 	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5044 	if (err < 0)
5045 		status = MGMT_STATUS_INVALID_PARAMS;
5046 	else
5047 		status = MGMT_STATUS_SUCCESS;
5048 
5049 done:
5050 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5051 				status, &cp->addr, sizeof(cp->addr));
5052 
5053 	hci_dev_unlock(hdev);
5054 	return err;
5055 }
5056 
5057 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5058 {
5059 	struct mgmt_pending_cmd *cmd;
5060 
5061 	bt_dev_dbg(hdev, "status %u", status);
5062 
5063 	hci_dev_lock(hdev);
5064 
5065 	cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5066 	if (!cmd)
5067 		cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5068 
5069 	if (!cmd)
5070 		cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5071 
5072 	if (cmd) {
5073 		cmd->cmd_complete(cmd, mgmt_status(status));
5074 		mgmt_pending_remove(cmd);
5075 	}
5076 
5077 	hci_dev_unlock(hdev);
5078 
5079 	/* Handle suspend notifier */
5080 	if (test_and_clear_bit(SUSPEND_UNPAUSE_DISCOVERY,
5081 			       hdev->suspend_tasks)) {
5082 		bt_dev_dbg(hdev, "Unpaused discovery");
5083 		wake_up(&hdev->suspend_wait_q);
5084 	}
5085 }
5086 
5087 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5088 				    uint8_t *mgmt_status)
5089 {
5090 	switch (type) {
5091 	case DISCOV_TYPE_LE:
5092 		*mgmt_status = mgmt_le_support(hdev);
5093 		if (*mgmt_status)
5094 			return false;
5095 		break;
5096 	case DISCOV_TYPE_INTERLEAVED:
5097 		*mgmt_status = mgmt_le_support(hdev);
5098 		if (*mgmt_status)
5099 			return false;
5100 		fallthrough;
5101 	case DISCOV_TYPE_BREDR:
5102 		*mgmt_status = mgmt_bredr_support(hdev);
5103 		if (*mgmt_status)
5104 			return false;
5105 		break;
5106 	default:
5107 		*mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5108 		return false;
5109 	}
5110 
5111 	return true;
5112 }
5113 
5114 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5115 {
5116 	struct mgmt_pending_cmd *cmd = data;
5117 
5118 	bt_dev_dbg(hdev, "err %d", err);
5119 
5120 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5121 			  cmd->param, 1);
5122 	mgmt_pending_free(cmd);
5123 
5124 	/* Handle suspend notifier */
5125 	if (test_and_clear_bit(SUSPEND_UNPAUSE_DISCOVERY,
5126 			       hdev->suspend_tasks)) {
5127 		bt_dev_dbg(hdev, "Unpaused discovery");
5128 		wake_up(&hdev->suspend_wait_q);
5129 	}
5130 
5131 	hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED :
5132 				DISCOVERY_FINDING);
5133 }
5134 
5135 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5136 {
5137 	return hci_start_discovery_sync(hdev);
5138 }
5139 
5140 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5141 				    u16 op, void *data, u16 len)
5142 {
5143 	struct mgmt_cp_start_discovery *cp = data;
5144 	struct mgmt_pending_cmd *cmd;
5145 	u8 status;
5146 	int err;
5147 
5148 	bt_dev_dbg(hdev, "sock %p", sk);
5149 
5150 	hci_dev_lock(hdev);
5151 
5152 	if (!hdev_is_powered(hdev)) {
5153 		err = mgmt_cmd_complete(sk, hdev->id, op,
5154 					MGMT_STATUS_NOT_POWERED,
5155 					&cp->type, sizeof(cp->type));
5156 		goto failed;
5157 	}
5158 
5159 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
5160 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5161 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5162 					&cp->type, sizeof(cp->type));
5163 		goto failed;
5164 	}
5165 
5166 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5167 		err = mgmt_cmd_complete(sk, hdev->id, op, status,
5168 					&cp->type, sizeof(cp->type));
5169 		goto failed;
5170 	}
5171 
5172 	/* Can't start discovery when it is paused */
5173 	if (hdev->discovery_paused) {
5174 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5175 					&cp->type, sizeof(cp->type));
5176 		goto failed;
5177 	}
5178 
5179 	/* Clear the discovery filter first to free any previously
5180 	 * allocated memory for the UUID list.
5181 	 */
5182 	hci_discovery_filter_clear(hdev);
5183 
5184 	hdev->discovery.type = cp->type;
5185 	hdev->discovery.report_invalid_rssi = false;
5186 	if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5187 		hdev->discovery.limited = true;
5188 	else
5189 		hdev->discovery.limited = false;
5190 
5191 	cmd = mgmt_pending_new(sk, op, hdev, data, len);
5192 	if (!cmd) {
5193 		err = -ENOMEM;
5194 		goto failed;
5195 	}
5196 
5197 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5198 				 start_discovery_complete);
5199 	if (err < 0) {
5200 		mgmt_pending_free(cmd);
5201 		goto failed;
5202 	}
5203 
5204 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5205 
5206 failed:
5207 	hci_dev_unlock(hdev);
5208 	return err;
5209 }
5210 
5211 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5212 			   void *data, u16 len)
5213 {
5214 	return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5215 					data, len);
5216 }
5217 
5218 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5219 				   void *data, u16 len)
5220 {
5221 	return start_discovery_internal(sk, hdev,
5222 					MGMT_OP_START_LIMITED_DISCOVERY,
5223 					data, len);
5224 }
5225 
5226 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5227 				   void *data, u16 len)
5228 {
5229 	struct mgmt_cp_start_service_discovery *cp = data;
5230 	struct mgmt_pending_cmd *cmd;
5231 	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5232 	u16 uuid_count, expected_len;
5233 	u8 status;
5234 	int err;
5235 
5236 	bt_dev_dbg(hdev, "sock %p", sk);
5237 
5238 	hci_dev_lock(hdev);
5239 
5240 	if (!hdev_is_powered(hdev)) {
5241 		err = mgmt_cmd_complete(sk, hdev->id,
5242 					MGMT_OP_START_SERVICE_DISCOVERY,
5243 					MGMT_STATUS_NOT_POWERED,
5244 					&cp->type, sizeof(cp->type));
5245 		goto failed;
5246 	}
5247 
5248 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
5249 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5250 		err = mgmt_cmd_complete(sk, hdev->id,
5251 					MGMT_OP_START_SERVICE_DISCOVERY,
5252 					MGMT_STATUS_BUSY, &cp->type,
5253 					sizeof(cp->type));
5254 		goto failed;
5255 	}
5256 
5257 	if (hdev->discovery_paused) {
5258 		err = mgmt_cmd_complete(sk, hdev->id,
5259 					MGMT_OP_START_SERVICE_DISCOVERY,
5260 					MGMT_STATUS_BUSY, &cp->type,
5261 					sizeof(cp->type));
5262 		goto failed;
5263 	}
5264 
5265 	uuid_count = __le16_to_cpu(cp->uuid_count);
5266 	if (uuid_count > max_uuid_count) {
5267 		bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5268 			   uuid_count);
5269 		err = mgmt_cmd_complete(sk, hdev->id,
5270 					MGMT_OP_START_SERVICE_DISCOVERY,
5271 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
5272 					sizeof(cp->type));
5273 		goto failed;
5274 	}
5275 
5276 	expected_len = sizeof(*cp) + uuid_count * 16;
5277 	if (expected_len != len) {
5278 		bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5279 			   expected_len, len);
5280 		err = mgmt_cmd_complete(sk, hdev->id,
5281 					MGMT_OP_START_SERVICE_DISCOVERY,
5282 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
5283 					sizeof(cp->type));
5284 		goto failed;
5285 	}
5286 
5287 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5288 		err = mgmt_cmd_complete(sk, hdev->id,
5289 					MGMT_OP_START_SERVICE_DISCOVERY,
5290 					status, &cp->type, sizeof(cp->type));
5291 		goto failed;
5292 	}
5293 
5294 	cmd = mgmt_pending_new(sk, MGMT_OP_START_SERVICE_DISCOVERY,
5295 			       hdev, data, len);
5296 	if (!cmd) {
5297 		err = -ENOMEM;
5298 		goto failed;
5299 	}
5300 
5301 	/* Clear the discovery filter first to free any previously
5302 	 * allocated memory for the UUID list.
5303 	 */
5304 	hci_discovery_filter_clear(hdev);
5305 
5306 	hdev->discovery.result_filtering = true;
5307 	hdev->discovery.type = cp->type;
5308 	hdev->discovery.rssi = cp->rssi;
5309 	hdev->discovery.uuid_count = uuid_count;
5310 
5311 	if (uuid_count > 0) {
5312 		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
5313 						GFP_KERNEL);
5314 		if (!hdev->discovery.uuids) {
5315 			err = mgmt_cmd_complete(sk, hdev->id,
5316 						MGMT_OP_START_SERVICE_DISCOVERY,
5317 						MGMT_STATUS_FAILED,
5318 						&cp->type, sizeof(cp->type));
5319 			mgmt_pending_remove(cmd);
5320 			goto failed;
5321 		}
5322 	}
5323 
5324 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5325 				 start_discovery_complete);
5326 	if (err < 0) {
5327 		mgmt_pending_free(cmd);
5328 		goto failed;
5329 	}
5330 
5331 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5332 
5333 failed:
5334 	hci_dev_unlock(hdev);
5335 	return err;
5336 }
5337 
5338 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
5339 {
5340 	struct mgmt_pending_cmd *cmd;
5341 
5342 	bt_dev_dbg(hdev, "status %u", status);
5343 
5344 	hci_dev_lock(hdev);
5345 
5346 	cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5347 	if (cmd) {
5348 		cmd->cmd_complete(cmd, mgmt_status(status));
5349 		mgmt_pending_remove(cmd);
5350 	}
5351 
5352 	hci_dev_unlock(hdev);
5353 
5354 	/* Handle suspend notifier */
5355 	if (test_and_clear_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks)) {
5356 		bt_dev_dbg(hdev, "Paused discovery");
5357 		wake_up(&hdev->suspend_wait_q);
5358 	}
5359 }
5360 
5361 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
5362 {
5363 	struct mgmt_pending_cmd *cmd = data;
5364 
5365 	bt_dev_dbg(hdev, "err %d", err);
5366 
5367 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5368 			  cmd->param, 1);
5369 	mgmt_pending_free(cmd);
5370 
5371 	/* Handle suspend notifier */
5372 	if (test_and_clear_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks)) {
5373 		bt_dev_dbg(hdev, "Paused discovery");
5374 		wake_up(&hdev->suspend_wait_q);
5375 	}
5376 
5377 	if (!err)
5378 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
5379 }
5380 
5381 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
5382 {
5383 	return hci_stop_discovery_sync(hdev);
5384 }
5385 
5386 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
5387 			  u16 len)
5388 {
5389 	struct mgmt_cp_stop_discovery *mgmt_cp = data;
5390 	struct mgmt_pending_cmd *cmd;
5391 	int err;
5392 
5393 	bt_dev_dbg(hdev, "sock %p", sk);
5394 
5395 	hci_dev_lock(hdev);
5396 
5397 	if (!hci_discovery_active(hdev)) {
5398 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5399 					MGMT_STATUS_REJECTED, &mgmt_cp->type,
5400 					sizeof(mgmt_cp->type));
5401 		goto unlock;
5402 	}
5403 
5404 	if (hdev->discovery.type != mgmt_cp->type) {
5405 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5406 					MGMT_STATUS_INVALID_PARAMS,
5407 					&mgmt_cp->type, sizeof(mgmt_cp->type));
5408 		goto unlock;
5409 	}
5410 
5411 	cmd = mgmt_pending_new(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
5412 	if (!cmd) {
5413 		err = -ENOMEM;
5414 		goto unlock;
5415 	}
5416 
5417 	err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
5418 				 stop_discovery_complete);
5419 	if (err < 0) {
5420 		mgmt_pending_free(cmd);
5421 		goto unlock;
5422 	}
5423 
5424 	hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
5425 
5426 unlock:
5427 	hci_dev_unlock(hdev);
5428 	return err;
5429 }
5430 
5431 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
5432 			u16 len)
5433 {
5434 	struct mgmt_cp_confirm_name *cp = data;
5435 	struct inquiry_entry *e;
5436 	int err;
5437 
5438 	bt_dev_dbg(hdev, "sock %p", sk);
5439 
5440 	hci_dev_lock(hdev);
5441 
5442 	if (!hci_discovery_active(hdev)) {
5443 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5444 					MGMT_STATUS_FAILED, &cp->addr,
5445 					sizeof(cp->addr));
5446 		goto failed;
5447 	}
5448 
5449 	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
5450 	if (!e) {
5451 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5452 					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
5453 					sizeof(cp->addr));
5454 		goto failed;
5455 	}
5456 
5457 	if (cp->name_known) {
5458 		e->name_state = NAME_KNOWN;
5459 		list_del(&e->list);
5460 	} else {
5461 		e->name_state = NAME_NEEDED;
5462 		hci_inquiry_cache_update_resolve(hdev, e);
5463 	}
5464 
5465 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
5466 				&cp->addr, sizeof(cp->addr));
5467 
5468 failed:
5469 	hci_dev_unlock(hdev);
5470 	return err;
5471 }
5472 
5473 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
5474 			u16 len)
5475 {
5476 	struct mgmt_cp_block_device *cp = data;
5477 	u8 status;
5478 	int err;
5479 
5480 	bt_dev_dbg(hdev, "sock %p", sk);
5481 
5482 	if (!bdaddr_type_is_valid(cp->addr.type))
5483 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
5484 					 MGMT_STATUS_INVALID_PARAMS,
5485 					 &cp->addr, sizeof(cp->addr));
5486 
5487 	hci_dev_lock(hdev);
5488 
5489 	err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
5490 				  cp->addr.type);
5491 	if (err < 0) {
5492 		status = MGMT_STATUS_FAILED;
5493 		goto done;
5494 	}
5495 
5496 	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5497 		   sk);
5498 	status = MGMT_STATUS_SUCCESS;
5499 
5500 done:
5501 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
5502 				&cp->addr, sizeof(cp->addr));
5503 
5504 	hci_dev_unlock(hdev);
5505 
5506 	return err;
5507 }
5508 
5509 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
5510 			  u16 len)
5511 {
5512 	struct mgmt_cp_unblock_device *cp = data;
5513 	u8 status;
5514 	int err;
5515 
5516 	bt_dev_dbg(hdev, "sock %p", sk);
5517 
5518 	if (!bdaddr_type_is_valid(cp->addr.type))
5519 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
5520 					 MGMT_STATUS_INVALID_PARAMS,
5521 					 &cp->addr, sizeof(cp->addr));
5522 
5523 	hci_dev_lock(hdev);
5524 
5525 	err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
5526 				  cp->addr.type);
5527 	if (err < 0) {
5528 		status = MGMT_STATUS_INVALID_PARAMS;
5529 		goto done;
5530 	}
5531 
5532 	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5533 		   sk);
5534 	status = MGMT_STATUS_SUCCESS;
5535 
5536 done:
5537 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
5538 				&cp->addr, sizeof(cp->addr));
5539 
5540 	hci_dev_unlock(hdev);
5541 
5542 	return err;
5543 }
5544 
5545 static int set_device_id_sync(struct hci_dev *hdev, void *data)
5546 {
5547 	return hci_update_eir_sync(hdev);
5548 }
5549 
5550 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
5551 			 u16 len)
5552 {
5553 	struct mgmt_cp_set_device_id *cp = data;
5554 	int err;
5555 	__u16 source;
5556 
5557 	bt_dev_dbg(hdev, "sock %p", sk);
5558 
5559 	source = __le16_to_cpu(cp->source);
5560 
5561 	if (source > 0x0002)
5562 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
5563 				       MGMT_STATUS_INVALID_PARAMS);
5564 
5565 	hci_dev_lock(hdev);
5566 
5567 	hdev->devid_source = source;
5568 	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
5569 	hdev->devid_product = __le16_to_cpu(cp->product);
5570 	hdev->devid_version = __le16_to_cpu(cp->version);
5571 
5572 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
5573 				NULL, 0);
5574 
5575 	hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
5576 
5577 	hci_dev_unlock(hdev);
5578 
5579 	return err;
5580 }
5581 
5582 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
5583 					u16 opcode)
5584 {
5585 	bt_dev_dbg(hdev, "status %u", status);
5586 }
5587 
5588 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
5589 				     u16 opcode)
5590 {
5591 	struct cmd_lookup match = { NULL, hdev };
5592 	struct hci_request req;
5593 	u8 instance;
5594 	struct adv_info *adv_instance;
5595 	int err;
5596 
5597 	hci_dev_lock(hdev);
5598 
5599 	if (status) {
5600 		u8 mgmt_err = mgmt_status(status);
5601 
5602 		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
5603 				     cmd_status_rsp, &mgmt_err);
5604 		goto unlock;
5605 	}
5606 
5607 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
5608 		hci_dev_set_flag(hdev, HCI_ADVERTISING);
5609 	else
5610 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
5611 
5612 	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
5613 			     &match);
5614 
5615 	new_settings(hdev, match.sk);
5616 
5617 	if (match.sk)
5618 		sock_put(match.sk);
5619 
5620 	/* Handle suspend notifier */
5621 	if (test_and_clear_bit(SUSPEND_PAUSE_ADVERTISING,
5622 			       hdev->suspend_tasks)) {
5623 		bt_dev_dbg(hdev, "Paused advertising");
5624 		wake_up(&hdev->suspend_wait_q);
5625 	} else if (test_and_clear_bit(SUSPEND_UNPAUSE_ADVERTISING,
5626 				      hdev->suspend_tasks)) {
5627 		bt_dev_dbg(hdev, "Unpaused advertising");
5628 		wake_up(&hdev->suspend_wait_q);
5629 	}
5630 
5631 	/* If "Set Advertising" was just disabled and instance advertising was
5632 	 * set up earlier, then re-enable multi-instance advertising.
5633 	 */
5634 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5635 	    list_empty(&hdev->adv_instances))
5636 		goto unlock;
5637 
5638 	instance = hdev->cur_adv_instance;
5639 	if (!instance) {
5640 		adv_instance = list_first_entry_or_null(&hdev->adv_instances,
5641 							struct adv_info, list);
5642 		if (!adv_instance)
5643 			goto unlock;
5644 
5645 		instance = adv_instance->instance;
5646 	}
5647 
5648 	hci_req_init(&req, hdev);
5649 
5650 	err = __hci_req_schedule_adv_instance(&req, instance, true);
5651 
5652 	if (!err)
5653 		err = hci_req_run(&req, enable_advertising_instance);
5654 
5655 	if (err)
5656 		bt_dev_err(hdev, "failed to re-configure advertising");
5657 
5658 unlock:
5659 	hci_dev_unlock(hdev);
5660 }
5661 
5662 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
5663 			   u16 len)
5664 {
5665 	struct mgmt_mode *cp = data;
5666 	struct mgmt_pending_cmd *cmd;
5667 	struct hci_request req;
5668 	u8 val, status;
5669 	int err;
5670 
5671 	bt_dev_dbg(hdev, "sock %p", sk);
5672 
5673 	status = mgmt_le_support(hdev);
5674 	if (status)
5675 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5676 				       status);
5677 
5678 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5679 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5680 				       MGMT_STATUS_INVALID_PARAMS);
5681 
5682 	if (hdev->advertising_paused)
5683 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5684 				       MGMT_STATUS_BUSY);
5685 
5686 	hci_dev_lock(hdev);
5687 
5688 	val = !!cp->val;
5689 
5690 	/* The following conditions are ones which mean that we should
5691 	 * not do any HCI communication but directly send a mgmt
5692 	 * response to user space (after toggling the flag if
5693 	 * necessary).
5694 	 */
5695 	if (!hdev_is_powered(hdev) ||
5696 	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5697 	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
5698 	    hci_conn_num(hdev, LE_LINK) > 0 ||
5699 	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5700 	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
5701 		bool changed;
5702 
5703 		if (cp->val) {
5704 			hdev->cur_adv_instance = 0x00;
5705 			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
5706 			if (cp->val == 0x02)
5707 				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5708 			else
5709 				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5710 		} else {
5711 			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
5712 			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5713 		}
5714 
5715 		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
5716 		if (err < 0)
5717 			goto unlock;
5718 
5719 		if (changed)
5720 			err = new_settings(hdev, sk);
5721 
5722 		goto unlock;
5723 	}
5724 
5725 	if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
5726 	    pending_find(MGMT_OP_SET_LE, hdev)) {
5727 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5728 				      MGMT_STATUS_BUSY);
5729 		goto unlock;
5730 	}
5731 
5732 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
5733 	if (!cmd) {
5734 		err = -ENOMEM;
5735 		goto unlock;
5736 	}
5737 
5738 	hci_req_init(&req, hdev);
5739 
5740 	if (cp->val == 0x02)
5741 		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5742 	else
5743 		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5744 
5745 	cancel_adv_timeout(hdev);
5746 
5747 	if (val) {
5748 		/* Switch to instance "0" for the Set Advertising setting.
5749 		 * We cannot use update_[adv|scan_rsp]_data() here as the
5750 		 * HCI_ADVERTISING flag is not yet set.
5751 		 */
5752 		hdev->cur_adv_instance = 0x00;
5753 
5754 		if (ext_adv_capable(hdev)) {
5755 			__hci_req_start_ext_adv(&req, 0x00);
5756 		} else {
5757 			__hci_req_update_adv_data(&req, 0x00);
5758 			__hci_req_update_scan_rsp_data(&req, 0x00);
5759 			__hci_req_enable_advertising(&req);
5760 		}
5761 	} else {
5762 		__hci_req_disable_advertising(&req);
5763 	}
5764 
5765 	err = hci_req_run(&req, set_advertising_complete);
5766 	if (err < 0)
5767 		mgmt_pending_remove(cmd);
5768 
5769 unlock:
5770 	hci_dev_unlock(hdev);
5771 	return err;
5772 }
5773 
5774 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
5775 			      void *data, u16 len)
5776 {
5777 	struct mgmt_cp_set_static_address *cp = data;
5778 	int err;
5779 
5780 	bt_dev_dbg(hdev, "sock %p", sk);
5781 
5782 	if (!lmp_le_capable(hdev))
5783 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5784 				       MGMT_STATUS_NOT_SUPPORTED);
5785 
5786 	if (hdev_is_powered(hdev))
5787 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5788 				       MGMT_STATUS_REJECTED);
5789 
5790 	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
5791 		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
5792 			return mgmt_cmd_status(sk, hdev->id,
5793 					       MGMT_OP_SET_STATIC_ADDRESS,
5794 					       MGMT_STATUS_INVALID_PARAMS);
5795 
5796 		/* Two most significant bits shall be set */
5797 		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
5798 			return mgmt_cmd_status(sk, hdev->id,
5799 					       MGMT_OP_SET_STATIC_ADDRESS,
5800 					       MGMT_STATUS_INVALID_PARAMS);
5801 	}
5802 
5803 	hci_dev_lock(hdev);
5804 
5805 	bacpy(&hdev->static_addr, &cp->bdaddr);
5806 
5807 	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
5808 	if (err < 0)
5809 		goto unlock;
5810 
5811 	err = new_settings(hdev, sk);
5812 
5813 unlock:
5814 	hci_dev_unlock(hdev);
5815 	return err;
5816 }
5817 
5818 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
5819 			   void *data, u16 len)
5820 {
5821 	struct mgmt_cp_set_scan_params *cp = data;
5822 	__u16 interval, window;
5823 	int err;
5824 
5825 	bt_dev_dbg(hdev, "sock %p", sk);
5826 
5827 	if (!lmp_le_capable(hdev))
5828 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5829 				       MGMT_STATUS_NOT_SUPPORTED);
5830 
5831 	interval = __le16_to_cpu(cp->interval);
5832 
5833 	if (interval < 0x0004 || interval > 0x4000)
5834 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5835 				       MGMT_STATUS_INVALID_PARAMS);
5836 
5837 	window = __le16_to_cpu(cp->window);
5838 
5839 	if (window < 0x0004 || window > 0x4000)
5840 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5841 				       MGMT_STATUS_INVALID_PARAMS);
5842 
5843 	if (window > interval)
5844 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5845 				       MGMT_STATUS_INVALID_PARAMS);
5846 
5847 	hci_dev_lock(hdev);
5848 
5849 	hdev->le_scan_interval = interval;
5850 	hdev->le_scan_window = window;
5851 
5852 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
5853 				NULL, 0);
5854 
5855 	/* If background scan is running, restart it so new parameters are
5856 	 * loaded.
5857 	 */
5858 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5859 	    hdev->discovery.state == DISCOVERY_STOPPED)
5860 		hci_update_passive_scan(hdev);
5861 
5862 	hci_dev_unlock(hdev);
5863 
5864 	return err;
5865 }
5866 
5867 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
5868 {
5869 	struct mgmt_pending_cmd *cmd = data;
5870 
5871 	bt_dev_dbg(hdev, "err %d", err);
5872 
5873 	if (err) {
5874 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5875 				mgmt_status(err));
5876 	} else {
5877 		struct mgmt_mode *cp = cmd->param;
5878 
5879 		if (cp->val)
5880 			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
5881 		else
5882 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5883 
5884 		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5885 		new_settings(hdev, cmd->sk);
5886 	}
5887 
5888 	mgmt_pending_free(cmd);
5889 }
5890 
5891 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
5892 {
5893 	struct mgmt_pending_cmd *cmd = data;
5894 	struct mgmt_mode *cp = cmd->param;
5895 
5896 	return hci_write_fast_connectable_sync(hdev, cp->val);
5897 }
5898 
5899 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
5900 				void *data, u16 len)
5901 {
5902 	struct mgmt_mode *cp = data;
5903 	struct mgmt_pending_cmd *cmd;
5904 	int err;
5905 
5906 	bt_dev_dbg(hdev, "sock %p", sk);
5907 
5908 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
5909 	    hdev->hci_ver < BLUETOOTH_VER_1_2)
5910 		return mgmt_cmd_status(sk, hdev->id,
5911 				       MGMT_OP_SET_FAST_CONNECTABLE,
5912 				       MGMT_STATUS_NOT_SUPPORTED);
5913 
5914 	if (cp->val != 0x00 && cp->val != 0x01)
5915 		return mgmt_cmd_status(sk, hdev->id,
5916 				       MGMT_OP_SET_FAST_CONNECTABLE,
5917 				       MGMT_STATUS_INVALID_PARAMS);
5918 
5919 	hci_dev_lock(hdev);
5920 
5921 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
5922 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5923 		goto unlock;
5924 	}
5925 
5926 	if (!hdev_is_powered(hdev)) {
5927 		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
5928 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5929 		new_settings(hdev, sk);
5930 		goto unlock;
5931 	}
5932 
5933 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
5934 			       len);
5935 	if (!cmd)
5936 		err = -ENOMEM;
5937 	else
5938 		err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
5939 					 fast_connectable_complete);
5940 
5941 	if (err < 0) {
5942 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5943 				MGMT_STATUS_FAILED);
5944 
5945 		if (cmd)
5946 			mgmt_pending_free(cmd);
5947 	}
5948 
5949 unlock:
5950 	hci_dev_unlock(hdev);
5951 
5952 	return err;
5953 }
5954 
5955 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
5956 {
5957 	struct mgmt_pending_cmd *cmd = data;
5958 
5959 	bt_dev_dbg(hdev, "err %d", err);
5960 
5961 	if (err) {
5962 		u8 mgmt_err = mgmt_status(err);
5963 
5964 		/* We need to restore the flag if related HCI commands
5965 		 * failed.
5966 		 */
5967 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
5968 
5969 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
5970 	} else {
5971 		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
5972 		new_settings(hdev, cmd->sk);
5973 	}
5974 
5975 	mgmt_pending_free(cmd);
5976 }
5977 
5978 static int set_bredr_sync(struct hci_dev *hdev, void *data)
5979 {
5980 	int status;
5981 
5982 	status = hci_write_fast_connectable_sync(hdev, false);
5983 
5984 	if (!status)
5985 		status = hci_update_scan_sync(hdev);
5986 
5987 	/* Since only the advertising data flags will change, there
5988 	 * is no need to update the scan response data.
5989 	 */
5990 	if (!status)
5991 		status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
5992 
5993 	return status;
5994 }
5995 
5996 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
5997 {
5998 	struct mgmt_mode *cp = data;
5999 	struct mgmt_pending_cmd *cmd;
6000 	int err;
6001 
6002 	bt_dev_dbg(hdev, "sock %p", sk);
6003 
6004 	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6005 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6006 				       MGMT_STATUS_NOT_SUPPORTED);
6007 
6008 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6009 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6010 				       MGMT_STATUS_REJECTED);
6011 
6012 	if (cp->val != 0x00 && cp->val != 0x01)
6013 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6014 				       MGMT_STATUS_INVALID_PARAMS);
6015 
6016 	hci_dev_lock(hdev);
6017 
6018 	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6019 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6020 		goto unlock;
6021 	}
6022 
6023 	if (!hdev_is_powered(hdev)) {
6024 		if (!cp->val) {
6025 			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6026 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6027 			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6028 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6029 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
6030 		}
6031 
6032 		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6033 
6034 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6035 		if (err < 0)
6036 			goto unlock;
6037 
6038 		err = new_settings(hdev, sk);
6039 		goto unlock;
6040 	}
6041 
6042 	/* Reject disabling when powered on */
6043 	if (!cp->val) {
6044 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6045 				      MGMT_STATUS_REJECTED);
6046 		goto unlock;
6047 	} else {
6048 		/* When configuring a dual-mode controller to operate
6049 		 * with LE only and using a static address, then switching
6050 		 * BR/EDR back on is not allowed.
6051 		 *
6052 		 * Dual-mode controllers shall operate with the public
6053 		 * address as its identity address for BR/EDR and LE. So
6054 		 * reject the attempt to create an invalid configuration.
6055 		 *
6056 		 * The same restrictions applies when secure connections
6057 		 * has been enabled. For BR/EDR this is a controller feature
6058 		 * while for LE it is a host stack feature. This means that
6059 		 * switching BR/EDR back on when secure connections has been
6060 		 * enabled is not a supported transaction.
6061 		 */
6062 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6063 		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6064 		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6065 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6066 					      MGMT_STATUS_REJECTED);
6067 			goto unlock;
6068 		}
6069 	}
6070 
6071 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6072 	if (!cmd)
6073 		err = -ENOMEM;
6074 	else
6075 		err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6076 					 set_bredr_complete);
6077 
6078 	if (err < 0) {
6079 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6080 				MGMT_STATUS_FAILED);
6081 		if (cmd)
6082 			mgmt_pending_free(cmd);
6083 
6084 		goto unlock;
6085 	}
6086 
6087 	/* We need to flip the bit already here so that
6088 	 * hci_req_update_adv_data generates the correct flags.
6089 	 */
6090 	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6091 
6092 unlock:
6093 	hci_dev_unlock(hdev);
6094 	return err;
6095 }
6096 
6097 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6098 {
6099 	struct mgmt_pending_cmd *cmd;
6100 	struct mgmt_mode *cp;
6101 
6102 	bt_dev_dbg(hdev, "status %u", status);
6103 
6104 	hci_dev_lock(hdev);
6105 
6106 	cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
6107 	if (!cmd)
6108 		goto unlock;
6109 
6110 	if (status) {
6111 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
6112 			        mgmt_status(status));
6113 		goto remove;
6114 	}
6115 
6116 	cp = cmd->param;
6117 
6118 	switch (cp->val) {
6119 	case 0x00:
6120 		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6121 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6122 		break;
6123 	case 0x01:
6124 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6125 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6126 		break;
6127 	case 0x02:
6128 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6129 		hci_dev_set_flag(hdev, HCI_SC_ONLY);
6130 		break;
6131 	}
6132 
6133 	send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
6134 	new_settings(hdev, cmd->sk);
6135 
6136 remove:
6137 	mgmt_pending_remove(cmd);
6138 unlock:
6139 	hci_dev_unlock(hdev);
6140 }
6141 
6142 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6143 			   void *data, u16 len)
6144 {
6145 	struct mgmt_mode *cp = data;
6146 	struct mgmt_pending_cmd *cmd;
6147 	struct hci_request req;
6148 	u8 val;
6149 	int err;
6150 
6151 	bt_dev_dbg(hdev, "sock %p", sk);
6152 
6153 	if (!lmp_sc_capable(hdev) &&
6154 	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6155 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6156 				       MGMT_STATUS_NOT_SUPPORTED);
6157 
6158 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6159 	    lmp_sc_capable(hdev) &&
6160 	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6161 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6162 				       MGMT_STATUS_REJECTED);
6163 
6164 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6165 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6166 				  MGMT_STATUS_INVALID_PARAMS);
6167 
6168 	hci_dev_lock(hdev);
6169 
6170 	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6171 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6172 		bool changed;
6173 
6174 		if (cp->val) {
6175 			changed = !hci_dev_test_and_set_flag(hdev,
6176 							     HCI_SC_ENABLED);
6177 			if (cp->val == 0x02)
6178 				hci_dev_set_flag(hdev, HCI_SC_ONLY);
6179 			else
6180 				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6181 		} else {
6182 			changed = hci_dev_test_and_clear_flag(hdev,
6183 							      HCI_SC_ENABLED);
6184 			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6185 		}
6186 
6187 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6188 		if (err < 0)
6189 			goto failed;
6190 
6191 		if (changed)
6192 			err = new_settings(hdev, sk);
6193 
6194 		goto failed;
6195 	}
6196 
6197 	if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
6198 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6199 				      MGMT_STATUS_BUSY);
6200 		goto failed;
6201 	}
6202 
6203 	val = !!cp->val;
6204 
6205 	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6206 	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6207 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6208 		goto failed;
6209 	}
6210 
6211 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6212 	if (!cmd) {
6213 		err = -ENOMEM;
6214 		goto failed;
6215 	}
6216 
6217 	hci_req_init(&req, hdev);
6218 	hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
6219 	err = hci_req_run(&req, sc_enable_complete);
6220 	if (err < 0) {
6221 		mgmt_pending_remove(cmd);
6222 		goto failed;
6223 	}
6224 
6225 failed:
6226 	hci_dev_unlock(hdev);
6227 	return err;
6228 }
6229 
6230 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6231 			  void *data, u16 len)
6232 {
6233 	struct mgmt_mode *cp = data;
6234 	bool changed, use_changed;
6235 	int err;
6236 
6237 	bt_dev_dbg(hdev, "sock %p", sk);
6238 
6239 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6240 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6241 				       MGMT_STATUS_INVALID_PARAMS);
6242 
6243 	hci_dev_lock(hdev);
6244 
6245 	if (cp->val)
6246 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6247 	else
6248 		changed = hci_dev_test_and_clear_flag(hdev,
6249 						      HCI_KEEP_DEBUG_KEYS);
6250 
6251 	if (cp->val == 0x02)
6252 		use_changed = !hci_dev_test_and_set_flag(hdev,
6253 							 HCI_USE_DEBUG_KEYS);
6254 	else
6255 		use_changed = hci_dev_test_and_clear_flag(hdev,
6256 							  HCI_USE_DEBUG_KEYS);
6257 
6258 	if (hdev_is_powered(hdev) && use_changed &&
6259 	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6260 		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6261 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6262 			     sizeof(mode), &mode);
6263 	}
6264 
6265 	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6266 	if (err < 0)
6267 		goto unlock;
6268 
6269 	if (changed)
6270 		err = new_settings(hdev, sk);
6271 
6272 unlock:
6273 	hci_dev_unlock(hdev);
6274 	return err;
6275 }
6276 
6277 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6278 		       u16 len)
6279 {
6280 	struct mgmt_cp_set_privacy *cp = cp_data;
6281 	bool changed;
6282 	int err;
6283 
6284 	bt_dev_dbg(hdev, "sock %p", sk);
6285 
6286 	if (!lmp_le_capable(hdev))
6287 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6288 				       MGMT_STATUS_NOT_SUPPORTED);
6289 
6290 	if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6291 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6292 				       MGMT_STATUS_INVALID_PARAMS);
6293 
6294 	if (hdev_is_powered(hdev))
6295 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6296 				       MGMT_STATUS_REJECTED);
6297 
6298 	hci_dev_lock(hdev);
6299 
6300 	/* If user space supports this command it is also expected to
6301 	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6302 	 */
6303 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6304 
6305 	if (cp->privacy) {
6306 		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6307 		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6308 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6309 		hci_adv_instances_set_rpa_expired(hdev, true);
6310 		if (cp->privacy == 0x02)
6311 			hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6312 		else
6313 			hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6314 	} else {
6315 		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6316 		memset(hdev->irk, 0, sizeof(hdev->irk));
6317 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6318 		hci_adv_instances_set_rpa_expired(hdev, false);
6319 		hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6320 	}
6321 
6322 	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
6323 	if (err < 0)
6324 		goto unlock;
6325 
6326 	if (changed)
6327 		err = new_settings(hdev, sk);
6328 
6329 unlock:
6330 	hci_dev_unlock(hdev);
6331 	return err;
6332 }
6333 
6334 static bool irk_is_valid(struct mgmt_irk_info *irk)
6335 {
6336 	switch (irk->addr.type) {
6337 	case BDADDR_LE_PUBLIC:
6338 		return true;
6339 
6340 	case BDADDR_LE_RANDOM:
6341 		/* Two most significant bits shall be set */
6342 		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6343 			return false;
6344 		return true;
6345 	}
6346 
6347 	return false;
6348 }
6349 
6350 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6351 		     u16 len)
6352 {
6353 	struct mgmt_cp_load_irks *cp = cp_data;
6354 	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
6355 				   sizeof(struct mgmt_irk_info));
6356 	u16 irk_count, expected_len;
6357 	int i, err;
6358 
6359 	bt_dev_dbg(hdev, "sock %p", sk);
6360 
6361 	if (!lmp_le_capable(hdev))
6362 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6363 				       MGMT_STATUS_NOT_SUPPORTED);
6364 
6365 	irk_count = __le16_to_cpu(cp->irk_count);
6366 	if (irk_count > max_irk_count) {
6367 		bt_dev_err(hdev, "load_irks: too big irk_count value %u",
6368 			   irk_count);
6369 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6370 				       MGMT_STATUS_INVALID_PARAMS);
6371 	}
6372 
6373 	expected_len = struct_size(cp, irks, irk_count);
6374 	if (expected_len != len) {
6375 		bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
6376 			   expected_len, len);
6377 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6378 				       MGMT_STATUS_INVALID_PARAMS);
6379 	}
6380 
6381 	bt_dev_dbg(hdev, "irk_count %u", irk_count);
6382 
6383 	for (i = 0; i < irk_count; i++) {
6384 		struct mgmt_irk_info *key = &cp->irks[i];
6385 
6386 		if (!irk_is_valid(key))
6387 			return mgmt_cmd_status(sk, hdev->id,
6388 					       MGMT_OP_LOAD_IRKS,
6389 					       MGMT_STATUS_INVALID_PARAMS);
6390 	}
6391 
6392 	hci_dev_lock(hdev);
6393 
6394 	hci_smp_irks_clear(hdev);
6395 
6396 	for (i = 0; i < irk_count; i++) {
6397 		struct mgmt_irk_info *irk = &cp->irks[i];
6398 
6399 		if (hci_is_blocked_key(hdev,
6400 				       HCI_BLOCKED_KEY_TYPE_IRK,
6401 				       irk->val)) {
6402 			bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
6403 				    &irk->addr.bdaddr);
6404 			continue;
6405 		}
6406 
6407 		hci_add_irk(hdev, &irk->addr.bdaddr,
6408 			    le_addr_type(irk->addr.type), irk->val,
6409 			    BDADDR_ANY);
6410 	}
6411 
6412 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6413 
6414 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
6415 
6416 	hci_dev_unlock(hdev);
6417 
6418 	return err;
6419 }
6420 
6421 static bool ltk_is_valid(struct mgmt_ltk_info *key)
6422 {
6423 	if (key->initiator != 0x00 && key->initiator != 0x01)
6424 		return false;
6425 
6426 	switch (key->addr.type) {
6427 	case BDADDR_LE_PUBLIC:
6428 		return true;
6429 
6430 	case BDADDR_LE_RANDOM:
6431 		/* Two most significant bits shall be set */
6432 		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6433 			return false;
6434 		return true;
6435 	}
6436 
6437 	return false;
6438 }
6439 
6440 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
6441 			       void *cp_data, u16 len)
6442 {
6443 	struct mgmt_cp_load_long_term_keys *cp = cp_data;
6444 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
6445 				   sizeof(struct mgmt_ltk_info));
6446 	u16 key_count, expected_len;
6447 	int i, err;
6448 
6449 	bt_dev_dbg(hdev, "sock %p", sk);
6450 
6451 	if (!lmp_le_capable(hdev))
6452 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6453 				       MGMT_STATUS_NOT_SUPPORTED);
6454 
6455 	key_count = __le16_to_cpu(cp->key_count);
6456 	if (key_count > max_key_count) {
6457 		bt_dev_err(hdev, "load_ltks: too big key_count value %u",
6458 			   key_count);
6459 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6460 				       MGMT_STATUS_INVALID_PARAMS);
6461 	}
6462 
6463 	expected_len = struct_size(cp, keys, key_count);
6464 	if (expected_len != len) {
6465 		bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
6466 			   expected_len, len);
6467 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6468 				       MGMT_STATUS_INVALID_PARAMS);
6469 	}
6470 
6471 	bt_dev_dbg(hdev, "key_count %u", key_count);
6472 
6473 	for (i = 0; i < key_count; i++) {
6474 		struct mgmt_ltk_info *key = &cp->keys[i];
6475 
6476 		if (!ltk_is_valid(key))
6477 			return mgmt_cmd_status(sk, hdev->id,
6478 					       MGMT_OP_LOAD_LONG_TERM_KEYS,
6479 					       MGMT_STATUS_INVALID_PARAMS);
6480 	}
6481 
6482 	hci_dev_lock(hdev);
6483 
6484 	hci_smp_ltks_clear(hdev);
6485 
6486 	for (i = 0; i < key_count; i++) {
6487 		struct mgmt_ltk_info *key = &cp->keys[i];
6488 		u8 type, authenticated;
6489 
6490 		if (hci_is_blocked_key(hdev,
6491 				       HCI_BLOCKED_KEY_TYPE_LTK,
6492 				       key->val)) {
6493 			bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
6494 				    &key->addr.bdaddr);
6495 			continue;
6496 		}
6497 
6498 		switch (key->type) {
6499 		case MGMT_LTK_UNAUTHENTICATED:
6500 			authenticated = 0x00;
6501 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
6502 			break;
6503 		case MGMT_LTK_AUTHENTICATED:
6504 			authenticated = 0x01;
6505 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
6506 			break;
6507 		case MGMT_LTK_P256_UNAUTH:
6508 			authenticated = 0x00;
6509 			type = SMP_LTK_P256;
6510 			break;
6511 		case MGMT_LTK_P256_AUTH:
6512 			authenticated = 0x01;
6513 			type = SMP_LTK_P256;
6514 			break;
6515 		case MGMT_LTK_P256_DEBUG:
6516 			authenticated = 0x00;
6517 			type = SMP_LTK_P256_DEBUG;
6518 			fallthrough;
6519 		default:
6520 			continue;
6521 		}
6522 
6523 		hci_add_ltk(hdev, &key->addr.bdaddr,
6524 			    le_addr_type(key->addr.type), type, authenticated,
6525 			    key->val, key->enc_size, key->ediv, key->rand);
6526 	}
6527 
6528 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
6529 			   NULL, 0);
6530 
6531 	hci_dev_unlock(hdev);
6532 
6533 	return err;
6534 }
6535 
6536 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
6537 {
6538 	struct mgmt_pending_cmd *cmd = data;
6539 	struct hci_conn *conn = cmd->user_data;
6540 	struct mgmt_cp_get_conn_info *cp = cmd->param;
6541 	struct mgmt_rp_get_conn_info rp;
6542 	u8 status;
6543 
6544 	bt_dev_dbg(hdev, "err %d", err);
6545 
6546 	memcpy(&rp.addr, &cp->addr.bdaddr, sizeof(rp.addr));
6547 
6548 	status = mgmt_status(err);
6549 	if (status == MGMT_STATUS_SUCCESS) {
6550 		rp.rssi = conn->rssi;
6551 		rp.tx_power = conn->tx_power;
6552 		rp.max_tx_power = conn->max_tx_power;
6553 	} else {
6554 		rp.rssi = HCI_RSSI_INVALID;
6555 		rp.tx_power = HCI_TX_POWER_INVALID;
6556 		rp.max_tx_power = HCI_TX_POWER_INVALID;
6557 	}
6558 
6559 	mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
6560 			  &rp, sizeof(rp));
6561 
6562 	if (conn) {
6563 		hci_conn_drop(conn);
6564 		hci_conn_put(conn);
6565 	}
6566 
6567 	mgmt_pending_free(cmd);
6568 }
6569 
6570 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
6571 {
6572 	struct mgmt_pending_cmd *cmd = data;
6573 	struct mgmt_cp_get_conn_info *cp = cmd->param;
6574 	struct hci_conn *conn;
6575 	int err;
6576 	__le16   handle;
6577 
6578 	/* Make sure we are still connected */
6579 	if (cp->addr.type == BDADDR_BREDR)
6580 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6581 					       &cp->addr.bdaddr);
6582 	else
6583 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6584 
6585 	if (!conn || conn != cmd->user_data || conn->state != BT_CONNECTED) {
6586 		if (cmd->user_data) {
6587 			hci_conn_drop(cmd->user_data);
6588 			hci_conn_put(cmd->user_data);
6589 			cmd->user_data = NULL;
6590 		}
6591 		return MGMT_STATUS_NOT_CONNECTED;
6592 	}
6593 
6594 	handle = cpu_to_le16(conn->handle);
6595 
6596 	/* Refresh RSSI each time */
6597 	err = hci_read_rssi_sync(hdev, handle);
6598 
6599 	/* For LE links TX power does not change thus we don't need to
6600 	 * query for it once value is known.
6601 	 */
6602 	if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
6603 		     conn->tx_power == HCI_TX_POWER_INVALID))
6604 		err = hci_read_tx_power_sync(hdev, handle, 0x00);
6605 
6606 	/* Max TX power needs to be read only once per connection */
6607 	if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
6608 		err = hci_read_tx_power_sync(hdev, handle, 0x01);
6609 
6610 	return err;
6611 }
6612 
6613 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
6614 			 u16 len)
6615 {
6616 	struct mgmt_cp_get_conn_info *cp = data;
6617 	struct mgmt_rp_get_conn_info rp;
6618 	struct hci_conn *conn;
6619 	unsigned long conn_info_age;
6620 	int err = 0;
6621 
6622 	bt_dev_dbg(hdev, "sock %p", sk);
6623 
6624 	memset(&rp, 0, sizeof(rp));
6625 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6626 	rp.addr.type = cp->addr.type;
6627 
6628 	if (!bdaddr_type_is_valid(cp->addr.type))
6629 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6630 					 MGMT_STATUS_INVALID_PARAMS,
6631 					 &rp, sizeof(rp));
6632 
6633 	hci_dev_lock(hdev);
6634 
6635 	if (!hdev_is_powered(hdev)) {
6636 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6637 					MGMT_STATUS_NOT_POWERED, &rp,
6638 					sizeof(rp));
6639 		goto unlock;
6640 	}
6641 
6642 	if (cp->addr.type == BDADDR_BREDR)
6643 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6644 					       &cp->addr.bdaddr);
6645 	else
6646 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6647 
6648 	if (!conn || conn->state != BT_CONNECTED) {
6649 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6650 					MGMT_STATUS_NOT_CONNECTED, &rp,
6651 					sizeof(rp));
6652 		goto unlock;
6653 	}
6654 
6655 	/* To avoid client trying to guess when to poll again for information we
6656 	 * calculate conn info age as random value between min/max set in hdev.
6657 	 */
6658 	conn_info_age = hdev->conn_info_min_age +
6659 			prandom_u32_max(hdev->conn_info_max_age -
6660 					hdev->conn_info_min_age);
6661 
6662 	/* Query controller to refresh cached values if they are too old or were
6663 	 * never read.
6664 	 */
6665 	if (time_after(jiffies, conn->conn_info_timestamp +
6666 		       msecs_to_jiffies(conn_info_age)) ||
6667 	    !conn->conn_info_timestamp) {
6668 		struct mgmt_pending_cmd *cmd;
6669 
6670 		cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
6671 				       len);
6672 		if (!cmd)
6673 			err = -ENOMEM;
6674 		else
6675 			err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
6676 						 cmd, get_conn_info_complete);
6677 
6678 		if (err < 0) {
6679 			mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6680 					  MGMT_STATUS_FAILED, &rp, sizeof(rp));
6681 
6682 			if (cmd)
6683 				mgmt_pending_free(cmd);
6684 
6685 			goto unlock;
6686 		}
6687 
6688 		hci_conn_hold(conn);
6689 		cmd->user_data = hci_conn_get(conn);
6690 
6691 		conn->conn_info_timestamp = jiffies;
6692 	} else {
6693 		/* Cache is valid, just reply with values cached in hci_conn */
6694 		rp.rssi = conn->rssi;
6695 		rp.tx_power = conn->tx_power;
6696 		rp.max_tx_power = conn->max_tx_power;
6697 
6698 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6699 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6700 	}
6701 
6702 unlock:
6703 	hci_dev_unlock(hdev);
6704 	return err;
6705 }
6706 
6707 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
6708 {
6709 	struct hci_conn *conn = cmd->user_data;
6710 	struct mgmt_rp_get_clock_info rp;
6711 	struct hci_dev *hdev;
6712 	int err;
6713 
6714 	memset(&rp, 0, sizeof(rp));
6715 	memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
6716 
6717 	if (status)
6718 		goto complete;
6719 
6720 	hdev = hci_dev_get(cmd->index);
6721 	if (hdev) {
6722 		rp.local_clock = cpu_to_le32(hdev->clock);
6723 		hci_dev_put(hdev);
6724 	}
6725 
6726 	if (conn) {
6727 		rp.piconet_clock = cpu_to_le32(conn->clock);
6728 		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
6729 	}
6730 
6731 complete:
6732 	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
6733 				sizeof(rp));
6734 
6735 	if (conn) {
6736 		hci_conn_drop(conn);
6737 		hci_conn_put(conn);
6738 	}
6739 
6740 	return err;
6741 }
6742 
6743 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6744 {
6745 	struct hci_cp_read_clock *hci_cp;
6746 	struct mgmt_pending_cmd *cmd;
6747 	struct hci_conn *conn;
6748 
6749 	bt_dev_dbg(hdev, "status %u", status);
6750 
6751 	hci_dev_lock(hdev);
6752 
6753 	hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
6754 	if (!hci_cp)
6755 		goto unlock;
6756 
6757 	if (hci_cp->which) {
6758 		u16 handle = __le16_to_cpu(hci_cp->handle);
6759 		conn = hci_conn_hash_lookup_handle(hdev, handle);
6760 	} else {
6761 		conn = NULL;
6762 	}
6763 
6764 	cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
6765 	if (!cmd)
6766 		goto unlock;
6767 
6768 	cmd->cmd_complete(cmd, mgmt_status(status));
6769 	mgmt_pending_remove(cmd);
6770 
6771 unlock:
6772 	hci_dev_unlock(hdev);
6773 }
6774 
6775 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
6776 			 u16 len)
6777 {
6778 	struct mgmt_cp_get_clock_info *cp = data;
6779 	struct mgmt_rp_get_clock_info rp;
6780 	struct hci_cp_read_clock hci_cp;
6781 	struct mgmt_pending_cmd *cmd;
6782 	struct hci_request req;
6783 	struct hci_conn *conn;
6784 	int err;
6785 
6786 	bt_dev_dbg(hdev, "sock %p", sk);
6787 
6788 	memset(&rp, 0, sizeof(rp));
6789 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6790 	rp.addr.type = cp->addr.type;
6791 
6792 	if (cp->addr.type != BDADDR_BREDR)
6793 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6794 					 MGMT_STATUS_INVALID_PARAMS,
6795 					 &rp, sizeof(rp));
6796 
6797 	hci_dev_lock(hdev);
6798 
6799 	if (!hdev_is_powered(hdev)) {
6800 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6801 					MGMT_STATUS_NOT_POWERED, &rp,
6802 					sizeof(rp));
6803 		goto unlock;
6804 	}
6805 
6806 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6807 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6808 					       &cp->addr.bdaddr);
6809 		if (!conn || conn->state != BT_CONNECTED) {
6810 			err = mgmt_cmd_complete(sk, hdev->id,
6811 						MGMT_OP_GET_CLOCK_INFO,
6812 						MGMT_STATUS_NOT_CONNECTED,
6813 						&rp, sizeof(rp));
6814 			goto unlock;
6815 		}
6816 	} else {
6817 		conn = NULL;
6818 	}
6819 
6820 	cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
6821 	if (!cmd) {
6822 		err = -ENOMEM;
6823 		goto unlock;
6824 	}
6825 
6826 	cmd->cmd_complete = clock_info_cmd_complete;
6827 
6828 	hci_req_init(&req, hdev);
6829 
6830 	memset(&hci_cp, 0, sizeof(hci_cp));
6831 	hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
6832 
6833 	if (conn) {
6834 		hci_conn_hold(conn);
6835 		cmd->user_data = hci_conn_get(conn);
6836 
6837 		hci_cp.handle = cpu_to_le16(conn->handle);
6838 		hci_cp.which = 0x01; /* Piconet clock */
6839 		hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
6840 	}
6841 
6842 	err = hci_req_run(&req, get_clock_info_complete);
6843 	if (err < 0)
6844 		mgmt_pending_remove(cmd);
6845 
6846 unlock:
6847 	hci_dev_unlock(hdev);
6848 	return err;
6849 }
6850 
6851 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
6852 {
6853 	struct hci_conn *conn;
6854 
6855 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
6856 	if (!conn)
6857 		return false;
6858 
6859 	if (conn->dst_type != type)
6860 		return false;
6861 
6862 	if (conn->state != BT_CONNECTED)
6863 		return false;
6864 
6865 	return true;
6866 }
6867 
6868 /* This function requires the caller holds hdev->lock */
6869 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
6870 			       u8 addr_type, u8 auto_connect)
6871 {
6872 	struct hci_conn_params *params;
6873 
6874 	params = hci_conn_params_add(hdev, addr, addr_type);
6875 	if (!params)
6876 		return -EIO;
6877 
6878 	if (params->auto_connect == auto_connect)
6879 		return 0;
6880 
6881 	list_del_init(&params->action);
6882 
6883 	switch (auto_connect) {
6884 	case HCI_AUTO_CONN_DISABLED:
6885 	case HCI_AUTO_CONN_LINK_LOSS:
6886 		/* If auto connect is being disabled when we're trying to
6887 		 * connect to device, keep connecting.
6888 		 */
6889 		if (params->explicit_connect)
6890 			list_add(&params->action, &hdev->pend_le_conns);
6891 		break;
6892 	case HCI_AUTO_CONN_REPORT:
6893 		if (params->explicit_connect)
6894 			list_add(&params->action, &hdev->pend_le_conns);
6895 		else
6896 			list_add(&params->action, &hdev->pend_le_reports);
6897 		break;
6898 	case HCI_AUTO_CONN_DIRECT:
6899 	case HCI_AUTO_CONN_ALWAYS:
6900 		if (!is_connected(hdev, addr, addr_type))
6901 			list_add(&params->action, &hdev->pend_le_conns);
6902 		break;
6903 	}
6904 
6905 	params->auto_connect = auto_connect;
6906 
6907 	bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
6908 		   addr, addr_type, auto_connect);
6909 
6910 	return 0;
6911 }
6912 
6913 static void device_added(struct sock *sk, struct hci_dev *hdev,
6914 			 bdaddr_t *bdaddr, u8 type, u8 action)
6915 {
6916 	struct mgmt_ev_device_added ev;
6917 
6918 	bacpy(&ev.addr.bdaddr, bdaddr);
6919 	ev.addr.type = type;
6920 	ev.action = action;
6921 
6922 	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
6923 }
6924 
6925 static int add_device_sync(struct hci_dev *hdev, void *data)
6926 {
6927 	return hci_update_passive_scan_sync(hdev);
6928 }
6929 
6930 static int add_device(struct sock *sk, struct hci_dev *hdev,
6931 		      void *data, u16 len)
6932 {
6933 	struct mgmt_cp_add_device *cp = data;
6934 	u8 auto_conn, addr_type;
6935 	struct hci_conn_params *params;
6936 	int err;
6937 	u32 current_flags = 0;
6938 
6939 	bt_dev_dbg(hdev, "sock %p", sk);
6940 
6941 	if (!bdaddr_type_is_valid(cp->addr.type) ||
6942 	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
6943 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6944 					 MGMT_STATUS_INVALID_PARAMS,
6945 					 &cp->addr, sizeof(cp->addr));
6946 
6947 	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
6948 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6949 					 MGMT_STATUS_INVALID_PARAMS,
6950 					 &cp->addr, sizeof(cp->addr));
6951 
6952 	hci_dev_lock(hdev);
6953 
6954 	if (cp->addr.type == BDADDR_BREDR) {
6955 		/* Only incoming connections action is supported for now */
6956 		if (cp->action != 0x01) {
6957 			err = mgmt_cmd_complete(sk, hdev->id,
6958 						MGMT_OP_ADD_DEVICE,
6959 						MGMT_STATUS_INVALID_PARAMS,
6960 						&cp->addr, sizeof(cp->addr));
6961 			goto unlock;
6962 		}
6963 
6964 		err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
6965 						     &cp->addr.bdaddr,
6966 						     cp->addr.type, 0);
6967 		if (err)
6968 			goto unlock;
6969 
6970 		hci_req_update_scan(hdev);
6971 
6972 		goto added;
6973 	}
6974 
6975 	addr_type = le_addr_type(cp->addr.type);
6976 
6977 	if (cp->action == 0x02)
6978 		auto_conn = HCI_AUTO_CONN_ALWAYS;
6979 	else if (cp->action == 0x01)
6980 		auto_conn = HCI_AUTO_CONN_DIRECT;
6981 	else
6982 		auto_conn = HCI_AUTO_CONN_REPORT;
6983 
6984 	/* Kernel internally uses conn_params with resolvable private
6985 	 * address, but Add Device allows only identity addresses.
6986 	 * Make sure it is enforced before calling
6987 	 * hci_conn_params_lookup.
6988 	 */
6989 	if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
6990 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6991 					MGMT_STATUS_INVALID_PARAMS,
6992 					&cp->addr, sizeof(cp->addr));
6993 		goto unlock;
6994 	}
6995 
6996 	/* If the connection parameters don't exist for this device,
6997 	 * they will be created and configured with defaults.
6998 	 */
6999 	if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7000 				auto_conn) < 0) {
7001 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7002 					MGMT_STATUS_FAILED, &cp->addr,
7003 					sizeof(cp->addr));
7004 		goto unlock;
7005 	} else {
7006 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7007 						addr_type);
7008 		if (params)
7009 			current_flags = params->current_flags;
7010 	}
7011 
7012 	err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
7013 	if (err < 0)
7014 		goto unlock;
7015 
7016 added:
7017 	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7018 	device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7019 			     SUPPORTED_DEVICE_FLAGS(), current_flags);
7020 
7021 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7022 				MGMT_STATUS_SUCCESS, &cp->addr,
7023 				sizeof(cp->addr));
7024 
7025 unlock:
7026 	hci_dev_unlock(hdev);
7027 	return err;
7028 }
7029 
7030 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7031 			   bdaddr_t *bdaddr, u8 type)
7032 {
7033 	struct mgmt_ev_device_removed ev;
7034 
7035 	bacpy(&ev.addr.bdaddr, bdaddr);
7036 	ev.addr.type = type;
7037 
7038 	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7039 }
7040 
7041 static int remove_device_sync(struct hci_dev *hdev, void *data)
7042 {
7043 	return hci_update_passive_scan_sync(hdev);
7044 }
7045 
7046 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7047 			 void *data, u16 len)
7048 {
7049 	struct mgmt_cp_remove_device *cp = data;
7050 	int err;
7051 
7052 	bt_dev_dbg(hdev, "sock %p", sk);
7053 
7054 	hci_dev_lock(hdev);
7055 
7056 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7057 		struct hci_conn_params *params;
7058 		u8 addr_type;
7059 
7060 		if (!bdaddr_type_is_valid(cp->addr.type)) {
7061 			err = mgmt_cmd_complete(sk, hdev->id,
7062 						MGMT_OP_REMOVE_DEVICE,
7063 						MGMT_STATUS_INVALID_PARAMS,
7064 						&cp->addr, sizeof(cp->addr));
7065 			goto unlock;
7066 		}
7067 
7068 		if (cp->addr.type == BDADDR_BREDR) {
7069 			err = hci_bdaddr_list_del(&hdev->accept_list,
7070 						  &cp->addr.bdaddr,
7071 						  cp->addr.type);
7072 			if (err) {
7073 				err = mgmt_cmd_complete(sk, hdev->id,
7074 							MGMT_OP_REMOVE_DEVICE,
7075 							MGMT_STATUS_INVALID_PARAMS,
7076 							&cp->addr,
7077 							sizeof(cp->addr));
7078 				goto unlock;
7079 			}
7080 
7081 			hci_req_update_scan(hdev);
7082 
7083 			device_removed(sk, hdev, &cp->addr.bdaddr,
7084 				       cp->addr.type);
7085 			goto complete;
7086 		}
7087 
7088 		addr_type = le_addr_type(cp->addr.type);
7089 
7090 		/* Kernel internally uses conn_params with resolvable private
7091 		 * address, but Remove Device allows only identity addresses.
7092 		 * Make sure it is enforced before calling
7093 		 * hci_conn_params_lookup.
7094 		 */
7095 		if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7096 			err = mgmt_cmd_complete(sk, hdev->id,
7097 						MGMT_OP_REMOVE_DEVICE,
7098 						MGMT_STATUS_INVALID_PARAMS,
7099 						&cp->addr, sizeof(cp->addr));
7100 			goto unlock;
7101 		}
7102 
7103 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7104 						addr_type);
7105 		if (!params) {
7106 			err = mgmt_cmd_complete(sk, hdev->id,
7107 						MGMT_OP_REMOVE_DEVICE,
7108 						MGMT_STATUS_INVALID_PARAMS,
7109 						&cp->addr, sizeof(cp->addr));
7110 			goto unlock;
7111 		}
7112 
7113 		if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7114 		    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7115 			err = mgmt_cmd_complete(sk, hdev->id,
7116 						MGMT_OP_REMOVE_DEVICE,
7117 						MGMT_STATUS_INVALID_PARAMS,
7118 						&cp->addr, sizeof(cp->addr));
7119 			goto unlock;
7120 		}
7121 
7122 		list_del(&params->action);
7123 		list_del(&params->list);
7124 		kfree(params);
7125 
7126 		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7127 	} else {
7128 		struct hci_conn_params *p, *tmp;
7129 		struct bdaddr_list *b, *btmp;
7130 
7131 		if (cp->addr.type) {
7132 			err = mgmt_cmd_complete(sk, hdev->id,
7133 						MGMT_OP_REMOVE_DEVICE,
7134 						MGMT_STATUS_INVALID_PARAMS,
7135 						&cp->addr, sizeof(cp->addr));
7136 			goto unlock;
7137 		}
7138 
7139 		list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7140 			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7141 			list_del(&b->list);
7142 			kfree(b);
7143 		}
7144 
7145 		hci_req_update_scan(hdev);
7146 
7147 		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7148 			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7149 				continue;
7150 			device_removed(sk, hdev, &p->addr, p->addr_type);
7151 			if (p->explicit_connect) {
7152 				p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7153 				continue;
7154 			}
7155 			list_del(&p->action);
7156 			list_del(&p->list);
7157 			kfree(p);
7158 		}
7159 
7160 		bt_dev_dbg(hdev, "All LE connection parameters were removed");
7161 	}
7162 
7163 	hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7164 
7165 complete:
7166 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7167 				MGMT_STATUS_SUCCESS, &cp->addr,
7168 				sizeof(cp->addr));
7169 unlock:
7170 	hci_dev_unlock(hdev);
7171 	return err;
7172 }
7173 
7174 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7175 			   u16 len)
7176 {
7177 	struct mgmt_cp_load_conn_param *cp = data;
7178 	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7179 				     sizeof(struct mgmt_conn_param));
7180 	u16 param_count, expected_len;
7181 	int i;
7182 
7183 	if (!lmp_le_capable(hdev))
7184 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7185 				       MGMT_STATUS_NOT_SUPPORTED);
7186 
7187 	param_count = __le16_to_cpu(cp->param_count);
7188 	if (param_count > max_param_count) {
7189 		bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7190 			   param_count);
7191 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7192 				       MGMT_STATUS_INVALID_PARAMS);
7193 	}
7194 
7195 	expected_len = struct_size(cp, params, param_count);
7196 	if (expected_len != len) {
7197 		bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7198 			   expected_len, len);
7199 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7200 				       MGMT_STATUS_INVALID_PARAMS);
7201 	}
7202 
7203 	bt_dev_dbg(hdev, "param_count %u", param_count);
7204 
7205 	hci_dev_lock(hdev);
7206 
7207 	hci_conn_params_clear_disabled(hdev);
7208 
7209 	for (i = 0; i < param_count; i++) {
7210 		struct mgmt_conn_param *param = &cp->params[i];
7211 		struct hci_conn_params *hci_param;
7212 		u16 min, max, latency, timeout;
7213 		u8 addr_type;
7214 
7215 		bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
7216 			   param->addr.type);
7217 
7218 		if (param->addr.type == BDADDR_LE_PUBLIC) {
7219 			addr_type = ADDR_LE_DEV_PUBLIC;
7220 		} else if (param->addr.type == BDADDR_LE_RANDOM) {
7221 			addr_type = ADDR_LE_DEV_RANDOM;
7222 		} else {
7223 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7224 			continue;
7225 		}
7226 
7227 		min = le16_to_cpu(param->min_interval);
7228 		max = le16_to_cpu(param->max_interval);
7229 		latency = le16_to_cpu(param->latency);
7230 		timeout = le16_to_cpu(param->timeout);
7231 
7232 		bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7233 			   min, max, latency, timeout);
7234 
7235 		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7236 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7237 			continue;
7238 		}
7239 
7240 		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
7241 						addr_type);
7242 		if (!hci_param) {
7243 			bt_dev_err(hdev, "failed to add connection parameters");
7244 			continue;
7245 		}
7246 
7247 		hci_param->conn_min_interval = min;
7248 		hci_param->conn_max_interval = max;
7249 		hci_param->conn_latency = latency;
7250 		hci_param->supervision_timeout = timeout;
7251 	}
7252 
7253 	hci_dev_unlock(hdev);
7254 
7255 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7256 				 NULL, 0);
7257 }
7258 
7259 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7260 			       void *data, u16 len)
7261 {
7262 	struct mgmt_cp_set_external_config *cp = data;
7263 	bool changed;
7264 	int err;
7265 
7266 	bt_dev_dbg(hdev, "sock %p", sk);
7267 
7268 	if (hdev_is_powered(hdev))
7269 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7270 				       MGMT_STATUS_REJECTED);
7271 
7272 	if (cp->config != 0x00 && cp->config != 0x01)
7273 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7274 				         MGMT_STATUS_INVALID_PARAMS);
7275 
7276 	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7277 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7278 				       MGMT_STATUS_NOT_SUPPORTED);
7279 
7280 	hci_dev_lock(hdev);
7281 
7282 	if (cp->config)
7283 		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7284 	else
7285 		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7286 
7287 	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7288 	if (err < 0)
7289 		goto unlock;
7290 
7291 	if (!changed)
7292 		goto unlock;
7293 
7294 	err = new_options(hdev, sk);
7295 
7296 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7297 		mgmt_index_removed(hdev);
7298 
7299 		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7300 			hci_dev_set_flag(hdev, HCI_CONFIG);
7301 			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7302 
7303 			queue_work(hdev->req_workqueue, &hdev->power_on);
7304 		} else {
7305 			set_bit(HCI_RAW, &hdev->flags);
7306 			mgmt_index_added(hdev);
7307 		}
7308 	}
7309 
7310 unlock:
7311 	hci_dev_unlock(hdev);
7312 	return err;
7313 }
7314 
7315 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
7316 			      void *data, u16 len)
7317 {
7318 	struct mgmt_cp_set_public_address *cp = data;
7319 	bool changed;
7320 	int err;
7321 
7322 	bt_dev_dbg(hdev, "sock %p", sk);
7323 
7324 	if (hdev_is_powered(hdev))
7325 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7326 				       MGMT_STATUS_REJECTED);
7327 
7328 	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
7329 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7330 				       MGMT_STATUS_INVALID_PARAMS);
7331 
7332 	if (!hdev->set_bdaddr)
7333 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7334 				       MGMT_STATUS_NOT_SUPPORTED);
7335 
7336 	hci_dev_lock(hdev);
7337 
7338 	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
7339 	bacpy(&hdev->public_addr, &cp->bdaddr);
7340 
7341 	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
7342 	if (err < 0)
7343 		goto unlock;
7344 
7345 	if (!changed)
7346 		goto unlock;
7347 
7348 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
7349 		err = new_options(hdev, sk);
7350 
7351 	if (is_configured(hdev)) {
7352 		mgmt_index_removed(hdev);
7353 
7354 		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
7355 
7356 		hci_dev_set_flag(hdev, HCI_CONFIG);
7357 		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7358 
7359 		queue_work(hdev->req_workqueue, &hdev->power_on);
7360 	}
7361 
7362 unlock:
7363 	hci_dev_unlock(hdev);
7364 	return err;
7365 }
7366 
7367 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
7368 					     u16 opcode, struct sk_buff *skb)
7369 {
7370 	const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
7371 	struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
7372 	u8 *h192, *r192, *h256, *r256;
7373 	struct mgmt_pending_cmd *cmd;
7374 	u16 eir_len;
7375 	int err;
7376 
7377 	bt_dev_dbg(hdev, "status %u", status);
7378 
7379 	cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
7380 	if (!cmd)
7381 		return;
7382 
7383 	mgmt_cp = cmd->param;
7384 
7385 	if (status) {
7386 		status = mgmt_status(status);
7387 		eir_len = 0;
7388 
7389 		h192 = NULL;
7390 		r192 = NULL;
7391 		h256 = NULL;
7392 		r256 = NULL;
7393 	} else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
7394 		struct hci_rp_read_local_oob_data *rp;
7395 
7396 		if (skb->len != sizeof(*rp)) {
7397 			status = MGMT_STATUS_FAILED;
7398 			eir_len = 0;
7399 		} else {
7400 			status = MGMT_STATUS_SUCCESS;
7401 			rp = (void *)skb->data;
7402 
7403 			eir_len = 5 + 18 + 18;
7404 			h192 = rp->hash;
7405 			r192 = rp->rand;
7406 			h256 = NULL;
7407 			r256 = NULL;
7408 		}
7409 	} else {
7410 		struct hci_rp_read_local_oob_ext_data *rp;
7411 
7412 		if (skb->len != sizeof(*rp)) {
7413 			status = MGMT_STATUS_FAILED;
7414 			eir_len = 0;
7415 		} else {
7416 			status = MGMT_STATUS_SUCCESS;
7417 			rp = (void *)skb->data;
7418 
7419 			if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
7420 				eir_len = 5 + 18 + 18;
7421 				h192 = NULL;
7422 				r192 = NULL;
7423 			} else {
7424 				eir_len = 5 + 18 + 18 + 18 + 18;
7425 				h192 = rp->hash192;
7426 				r192 = rp->rand192;
7427 			}
7428 
7429 			h256 = rp->hash256;
7430 			r256 = rp->rand256;
7431 		}
7432 	}
7433 
7434 	mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
7435 	if (!mgmt_rp)
7436 		goto done;
7437 
7438 	if (eir_len == 0)
7439 		goto send_rsp;
7440 
7441 	eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
7442 				  hdev->dev_class, 3);
7443 
7444 	if (h192 && r192) {
7445 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7446 					  EIR_SSP_HASH_C192, h192, 16);
7447 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7448 					  EIR_SSP_RAND_R192, r192, 16);
7449 	}
7450 
7451 	if (h256 && r256) {
7452 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7453 					  EIR_SSP_HASH_C256, h256, 16);
7454 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7455 					  EIR_SSP_RAND_R256, r256, 16);
7456 	}
7457 
7458 send_rsp:
7459 	mgmt_rp->type = mgmt_cp->type;
7460 	mgmt_rp->eir_len = cpu_to_le16(eir_len);
7461 
7462 	err = mgmt_cmd_complete(cmd->sk, hdev->id,
7463 				MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
7464 				mgmt_rp, sizeof(*mgmt_rp) + eir_len);
7465 	if (err < 0 || status)
7466 		goto done;
7467 
7468 	hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
7469 
7470 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7471 				 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
7472 				 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
7473 done:
7474 	kfree(mgmt_rp);
7475 	mgmt_pending_remove(cmd);
7476 }
7477 
7478 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
7479 				  struct mgmt_cp_read_local_oob_ext_data *cp)
7480 {
7481 	struct mgmt_pending_cmd *cmd;
7482 	struct hci_request req;
7483 	int err;
7484 
7485 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
7486 			       cp, sizeof(*cp));
7487 	if (!cmd)
7488 		return -ENOMEM;
7489 
7490 	hci_req_init(&req, hdev);
7491 
7492 	if (bredr_sc_enabled(hdev))
7493 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
7494 	else
7495 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
7496 
7497 	err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
7498 	if (err < 0) {
7499 		mgmt_pending_remove(cmd);
7500 		return err;
7501 	}
7502 
7503 	return 0;
7504 }
7505 
7506 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
7507 				   void *data, u16 data_len)
7508 {
7509 	struct mgmt_cp_read_local_oob_ext_data *cp = data;
7510 	struct mgmt_rp_read_local_oob_ext_data *rp;
7511 	size_t rp_len;
7512 	u16 eir_len;
7513 	u8 status, flags, role, addr[7], hash[16], rand[16];
7514 	int err;
7515 
7516 	bt_dev_dbg(hdev, "sock %p", sk);
7517 
7518 	if (hdev_is_powered(hdev)) {
7519 		switch (cp->type) {
7520 		case BIT(BDADDR_BREDR):
7521 			status = mgmt_bredr_support(hdev);
7522 			if (status)
7523 				eir_len = 0;
7524 			else
7525 				eir_len = 5;
7526 			break;
7527 		case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7528 			status = mgmt_le_support(hdev);
7529 			if (status)
7530 				eir_len = 0;
7531 			else
7532 				eir_len = 9 + 3 + 18 + 18 + 3;
7533 			break;
7534 		default:
7535 			status = MGMT_STATUS_INVALID_PARAMS;
7536 			eir_len = 0;
7537 			break;
7538 		}
7539 	} else {
7540 		status = MGMT_STATUS_NOT_POWERED;
7541 		eir_len = 0;
7542 	}
7543 
7544 	rp_len = sizeof(*rp) + eir_len;
7545 	rp = kmalloc(rp_len, GFP_ATOMIC);
7546 	if (!rp)
7547 		return -ENOMEM;
7548 
7549 	if (!status && !lmp_ssp_capable(hdev)) {
7550 		status = MGMT_STATUS_NOT_SUPPORTED;
7551 		eir_len = 0;
7552 	}
7553 
7554 	if (status)
7555 		goto complete;
7556 
7557 	hci_dev_lock(hdev);
7558 
7559 	eir_len = 0;
7560 	switch (cp->type) {
7561 	case BIT(BDADDR_BREDR):
7562 		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7563 			err = read_local_ssp_oob_req(hdev, sk, cp);
7564 			hci_dev_unlock(hdev);
7565 			if (!err)
7566 				goto done;
7567 
7568 			status = MGMT_STATUS_FAILED;
7569 			goto complete;
7570 		} else {
7571 			eir_len = eir_append_data(rp->eir, eir_len,
7572 						  EIR_CLASS_OF_DEV,
7573 						  hdev->dev_class, 3);
7574 		}
7575 		break;
7576 	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7577 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
7578 		    smp_generate_oob(hdev, hash, rand) < 0) {
7579 			hci_dev_unlock(hdev);
7580 			status = MGMT_STATUS_FAILED;
7581 			goto complete;
7582 		}
7583 
7584 		/* This should return the active RPA, but since the RPA
7585 		 * is only programmed on demand, it is really hard to fill
7586 		 * this in at the moment. For now disallow retrieving
7587 		 * local out-of-band data when privacy is in use.
7588 		 *
7589 		 * Returning the identity address will not help here since
7590 		 * pairing happens before the identity resolving key is
7591 		 * known and thus the connection establishment happens
7592 		 * based on the RPA and not the identity address.
7593 		 */
7594 		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
7595 			hci_dev_unlock(hdev);
7596 			status = MGMT_STATUS_REJECTED;
7597 			goto complete;
7598 		}
7599 
7600 		if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
7601 		   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
7602 		   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
7603 		    bacmp(&hdev->static_addr, BDADDR_ANY))) {
7604 			memcpy(addr, &hdev->static_addr, 6);
7605 			addr[6] = 0x01;
7606 		} else {
7607 			memcpy(addr, &hdev->bdaddr, 6);
7608 			addr[6] = 0x00;
7609 		}
7610 
7611 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
7612 					  addr, sizeof(addr));
7613 
7614 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7615 			role = 0x02;
7616 		else
7617 			role = 0x01;
7618 
7619 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
7620 					  &role, sizeof(role));
7621 
7622 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
7623 			eir_len = eir_append_data(rp->eir, eir_len,
7624 						  EIR_LE_SC_CONFIRM,
7625 						  hash, sizeof(hash));
7626 
7627 			eir_len = eir_append_data(rp->eir, eir_len,
7628 						  EIR_LE_SC_RANDOM,
7629 						  rand, sizeof(rand));
7630 		}
7631 
7632 		flags = mgmt_get_adv_discov_flags(hdev);
7633 
7634 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
7635 			flags |= LE_AD_NO_BREDR;
7636 
7637 		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
7638 					  &flags, sizeof(flags));
7639 		break;
7640 	}
7641 
7642 	hci_dev_unlock(hdev);
7643 
7644 	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
7645 
7646 	status = MGMT_STATUS_SUCCESS;
7647 
7648 complete:
7649 	rp->type = cp->type;
7650 	rp->eir_len = cpu_to_le16(eir_len);
7651 
7652 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
7653 				status, rp, sizeof(*rp) + eir_len);
7654 	if (err < 0 || status)
7655 		goto done;
7656 
7657 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7658 				 rp, sizeof(*rp) + eir_len,
7659 				 HCI_MGMT_OOB_DATA_EVENTS, sk);
7660 
7661 done:
7662 	kfree(rp);
7663 
7664 	return err;
7665 }
7666 
7667 static u32 get_supported_adv_flags(struct hci_dev *hdev)
7668 {
7669 	u32 flags = 0;
7670 
7671 	flags |= MGMT_ADV_FLAG_CONNECTABLE;
7672 	flags |= MGMT_ADV_FLAG_DISCOV;
7673 	flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
7674 	flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
7675 	flags |= MGMT_ADV_FLAG_APPEARANCE;
7676 	flags |= MGMT_ADV_FLAG_LOCAL_NAME;
7677 	flags |= MGMT_ADV_PARAM_DURATION;
7678 	flags |= MGMT_ADV_PARAM_TIMEOUT;
7679 	flags |= MGMT_ADV_PARAM_INTERVALS;
7680 	flags |= MGMT_ADV_PARAM_TX_POWER;
7681 	flags |= MGMT_ADV_PARAM_SCAN_RSP;
7682 
7683 	/* In extended adv TX_POWER returned from Set Adv Param
7684 	 * will be always valid.
7685 	 */
7686 	if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
7687 	    ext_adv_capable(hdev))
7688 		flags |= MGMT_ADV_FLAG_TX_POWER;
7689 
7690 	if (ext_adv_capable(hdev)) {
7691 		flags |= MGMT_ADV_FLAG_SEC_1M;
7692 		flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
7693 		flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
7694 
7695 		if (hdev->le_features[1] & HCI_LE_PHY_2M)
7696 			flags |= MGMT_ADV_FLAG_SEC_2M;
7697 
7698 		if (hdev->le_features[1] & HCI_LE_PHY_CODED)
7699 			flags |= MGMT_ADV_FLAG_SEC_CODED;
7700 	}
7701 
7702 	return flags;
7703 }
7704 
7705 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
7706 			     void *data, u16 data_len)
7707 {
7708 	struct mgmt_rp_read_adv_features *rp;
7709 	size_t rp_len;
7710 	int err;
7711 	struct adv_info *adv_instance;
7712 	u32 supported_flags;
7713 	u8 *instance;
7714 
7715 	bt_dev_dbg(hdev, "sock %p", sk);
7716 
7717 	if (!lmp_le_capable(hdev))
7718 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7719 				       MGMT_STATUS_REJECTED);
7720 
7721 	hci_dev_lock(hdev);
7722 
7723 	rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
7724 	rp = kmalloc(rp_len, GFP_ATOMIC);
7725 	if (!rp) {
7726 		hci_dev_unlock(hdev);
7727 		return -ENOMEM;
7728 	}
7729 
7730 	supported_flags = get_supported_adv_flags(hdev);
7731 
7732 	rp->supported_flags = cpu_to_le32(supported_flags);
7733 	rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
7734 	rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
7735 	rp->max_instances = hdev->le_num_of_adv_sets;
7736 	rp->num_instances = hdev->adv_instance_cnt;
7737 
7738 	instance = rp->instance;
7739 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
7740 		*instance = adv_instance->instance;
7741 		instance++;
7742 	}
7743 
7744 	hci_dev_unlock(hdev);
7745 
7746 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7747 				MGMT_STATUS_SUCCESS, rp, rp_len);
7748 
7749 	kfree(rp);
7750 
7751 	return err;
7752 }
7753 
7754 static u8 calculate_name_len(struct hci_dev *hdev)
7755 {
7756 	u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
7757 
7758 	return eir_append_local_name(hdev, buf, 0);
7759 }
7760 
7761 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
7762 			   bool is_adv_data)
7763 {
7764 	u8 max_len = HCI_MAX_AD_LENGTH;
7765 
7766 	if (is_adv_data) {
7767 		if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
7768 				 MGMT_ADV_FLAG_LIMITED_DISCOV |
7769 				 MGMT_ADV_FLAG_MANAGED_FLAGS))
7770 			max_len -= 3;
7771 
7772 		if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
7773 			max_len -= 3;
7774 	} else {
7775 		if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
7776 			max_len -= calculate_name_len(hdev);
7777 
7778 		if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
7779 			max_len -= 4;
7780 	}
7781 
7782 	return max_len;
7783 }
7784 
7785 static bool flags_managed(u32 adv_flags)
7786 {
7787 	return adv_flags & (MGMT_ADV_FLAG_DISCOV |
7788 			    MGMT_ADV_FLAG_LIMITED_DISCOV |
7789 			    MGMT_ADV_FLAG_MANAGED_FLAGS);
7790 }
7791 
7792 static bool tx_power_managed(u32 adv_flags)
7793 {
7794 	return adv_flags & MGMT_ADV_FLAG_TX_POWER;
7795 }
7796 
7797 static bool name_managed(u32 adv_flags)
7798 {
7799 	return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
7800 }
7801 
7802 static bool appearance_managed(u32 adv_flags)
7803 {
7804 	return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
7805 }
7806 
7807 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
7808 			      u8 len, bool is_adv_data)
7809 {
7810 	int i, cur_len;
7811 	u8 max_len;
7812 
7813 	max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
7814 
7815 	if (len > max_len)
7816 		return false;
7817 
7818 	/* Make sure that the data is correctly formatted. */
7819 	for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
7820 		cur_len = data[i];
7821 
7822 		if (!cur_len)
7823 			continue;
7824 
7825 		if (data[i + 1] == EIR_FLAGS &&
7826 		    (!is_adv_data || flags_managed(adv_flags)))
7827 			return false;
7828 
7829 		if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
7830 			return false;
7831 
7832 		if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
7833 			return false;
7834 
7835 		if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
7836 			return false;
7837 
7838 		if (data[i + 1] == EIR_APPEARANCE &&
7839 		    appearance_managed(adv_flags))
7840 			return false;
7841 
7842 		/* If the current field length would exceed the total data
7843 		 * length, then it's invalid.
7844 		 */
7845 		if (i + cur_len >= len)
7846 			return false;
7847 	}
7848 
7849 	return true;
7850 }
7851 
7852 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
7853 {
7854 	u32 supported_flags, phy_flags;
7855 
7856 	/* The current implementation only supports a subset of the specified
7857 	 * flags. Also need to check mutual exclusiveness of sec flags.
7858 	 */
7859 	supported_flags = get_supported_adv_flags(hdev);
7860 	phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
7861 	if (adv_flags & ~supported_flags ||
7862 	    ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
7863 		return false;
7864 
7865 	return true;
7866 }
7867 
7868 static bool adv_busy(struct hci_dev *hdev)
7869 {
7870 	return (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
7871 		pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
7872 		pending_find(MGMT_OP_SET_LE, hdev) ||
7873 		pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev) ||
7874 		pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev));
7875 }
7876 
7877 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
7878 			     int err)
7879 {
7880 	struct adv_info *adv, *n;
7881 
7882 	bt_dev_dbg(hdev, "err %d", err);
7883 
7884 	hci_dev_lock(hdev);
7885 
7886 	list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
7887 		u8 instance;
7888 
7889 		if (!adv->pending)
7890 			continue;
7891 
7892 		if (!err) {
7893 			adv->pending = false;
7894 			continue;
7895 		}
7896 
7897 		instance = adv->instance;
7898 
7899 		if (hdev->cur_adv_instance == instance)
7900 			cancel_adv_timeout(hdev);
7901 
7902 		hci_remove_adv_instance(hdev, instance);
7903 		mgmt_advertising_removed(sk, hdev, instance);
7904 	}
7905 
7906 	hci_dev_unlock(hdev);
7907 }
7908 
7909 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
7910 {
7911 	struct mgmt_pending_cmd *cmd = data;
7912 	struct mgmt_cp_add_advertising *cp = cmd->param;
7913 	struct mgmt_rp_add_advertising rp;
7914 
7915 	memset(&rp, 0, sizeof(rp));
7916 
7917 	rp.instance = cp->instance;
7918 
7919 	if (err)
7920 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
7921 				mgmt_status(err));
7922 	else
7923 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
7924 				  mgmt_status(err), &rp, sizeof(rp));
7925 
7926 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
7927 
7928 	mgmt_pending_free(cmd);
7929 }
7930 
7931 static int add_advertising_sync(struct hci_dev *hdev, void *data)
7932 {
7933 	struct mgmt_pending_cmd *cmd = data;
7934 	struct mgmt_cp_add_advertising *cp = cmd->param;
7935 
7936 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
7937 }
7938 
7939 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
7940 			   void *data, u16 data_len)
7941 {
7942 	struct mgmt_cp_add_advertising *cp = data;
7943 	struct mgmt_rp_add_advertising rp;
7944 	u32 flags;
7945 	u8 status;
7946 	u16 timeout, duration;
7947 	unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
7948 	u8 schedule_instance = 0;
7949 	struct adv_info *next_instance;
7950 	int err;
7951 	struct mgmt_pending_cmd *cmd;
7952 
7953 	bt_dev_dbg(hdev, "sock %p", sk);
7954 
7955 	status = mgmt_le_support(hdev);
7956 	if (status)
7957 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7958 				       status);
7959 
7960 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
7961 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7962 				       MGMT_STATUS_INVALID_PARAMS);
7963 
7964 	if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
7965 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7966 				       MGMT_STATUS_INVALID_PARAMS);
7967 
7968 	flags = __le32_to_cpu(cp->flags);
7969 	timeout = __le16_to_cpu(cp->timeout);
7970 	duration = __le16_to_cpu(cp->duration);
7971 
7972 	if (!requested_adv_flags_are_valid(hdev, flags))
7973 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7974 				       MGMT_STATUS_INVALID_PARAMS);
7975 
7976 	hci_dev_lock(hdev);
7977 
7978 	if (timeout && !hdev_is_powered(hdev)) {
7979 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7980 				      MGMT_STATUS_REJECTED);
7981 		goto unlock;
7982 	}
7983 
7984 	if (adv_busy(hdev)) {
7985 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7986 				      MGMT_STATUS_BUSY);
7987 		goto unlock;
7988 	}
7989 
7990 	if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
7991 	    !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
7992 			       cp->scan_rsp_len, false)) {
7993 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7994 				      MGMT_STATUS_INVALID_PARAMS);
7995 		goto unlock;
7996 	}
7997 
7998 	err = hci_add_adv_instance(hdev, cp->instance, flags,
7999 				   cp->adv_data_len, cp->data,
8000 				   cp->scan_rsp_len,
8001 				   cp->data + cp->adv_data_len,
8002 				   timeout, duration,
8003 				   HCI_ADV_TX_POWER_NO_PREFERENCE,
8004 				   hdev->le_adv_min_interval,
8005 				   hdev->le_adv_max_interval);
8006 	if (err < 0) {
8007 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8008 				      MGMT_STATUS_FAILED);
8009 		goto unlock;
8010 	}
8011 
8012 	/* Only trigger an advertising added event if a new instance was
8013 	 * actually added.
8014 	 */
8015 	if (hdev->adv_instance_cnt > prev_instance_cnt)
8016 		mgmt_advertising_added(sk, hdev, cp->instance);
8017 
8018 	if (hdev->cur_adv_instance == cp->instance) {
8019 		/* If the currently advertised instance is being changed then
8020 		 * cancel the current advertising and schedule the next
8021 		 * instance. If there is only one instance then the overridden
8022 		 * advertising data will be visible right away.
8023 		 */
8024 		cancel_adv_timeout(hdev);
8025 
8026 		next_instance = hci_get_next_instance(hdev, cp->instance);
8027 		if (next_instance)
8028 			schedule_instance = next_instance->instance;
8029 	} else if (!hdev->adv_instance_timeout) {
8030 		/* Immediately advertise the new instance if no other
8031 		 * instance is currently being advertised.
8032 		 */
8033 		schedule_instance = cp->instance;
8034 	}
8035 
8036 	/* If the HCI_ADVERTISING flag is set or the device isn't powered or
8037 	 * there is no instance to be advertised then we have no HCI
8038 	 * communication to make. Simply return.
8039 	 */
8040 	if (!hdev_is_powered(hdev) ||
8041 	    hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8042 	    !schedule_instance) {
8043 		rp.instance = cp->instance;
8044 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8045 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8046 		goto unlock;
8047 	}
8048 
8049 	/* We're good to go, update advertising data, parameters, and start
8050 	 * advertising.
8051 	 */
8052 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8053 			       data_len);
8054 	if (!cmd) {
8055 		err = -ENOMEM;
8056 		goto unlock;
8057 	}
8058 
8059 	cp->instance = schedule_instance;
8060 
8061 	err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8062 				 add_advertising_complete);
8063 	if (err < 0)
8064 		mgmt_pending_free(cmd);
8065 
8066 unlock:
8067 	hci_dev_unlock(hdev);
8068 
8069 	return err;
8070 }
8071 
8072 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8073 					int err)
8074 {
8075 	struct mgmt_pending_cmd *cmd = data;
8076 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8077 	struct mgmt_rp_add_ext_adv_params rp;
8078 	struct adv_info *adv;
8079 	u32 flags;
8080 
8081 	BT_DBG("%s", hdev->name);
8082 
8083 	hci_dev_lock(hdev);
8084 
8085 	adv = hci_find_adv_instance(hdev, cp->instance);
8086 	if (!adv)
8087 		goto unlock;
8088 
8089 	rp.instance = cp->instance;
8090 	rp.tx_power = adv->tx_power;
8091 
8092 	/* While we're at it, inform userspace of the available space for this
8093 	 * advertisement, given the flags that will be used.
8094 	 */
8095 	flags = __le32_to_cpu(cp->flags);
8096 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8097 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8098 
8099 	if (err) {
8100 		/* If this advertisement was previously advertising and we
8101 		 * failed to update it, we signal that it has been removed and
8102 		 * delete its structure
8103 		 */
8104 		if (!adv->pending)
8105 			mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8106 
8107 		hci_remove_adv_instance(hdev, cp->instance);
8108 
8109 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8110 				mgmt_status(err));
8111 	} else {
8112 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8113 				  mgmt_status(err), &rp, sizeof(rp));
8114 	}
8115 
8116 unlock:
8117 	if (cmd)
8118 		mgmt_pending_free(cmd);
8119 
8120 	hci_dev_unlock(hdev);
8121 }
8122 
8123 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8124 {
8125 	struct mgmt_pending_cmd *cmd = data;
8126 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8127 
8128 	return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8129 }
8130 
8131 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8132 			      void *data, u16 data_len)
8133 {
8134 	struct mgmt_cp_add_ext_adv_params *cp = data;
8135 	struct mgmt_rp_add_ext_adv_params rp;
8136 	struct mgmt_pending_cmd *cmd = NULL;
8137 	u32 flags, min_interval, max_interval;
8138 	u16 timeout, duration;
8139 	u8 status;
8140 	s8 tx_power;
8141 	int err;
8142 
8143 	BT_DBG("%s", hdev->name);
8144 
8145 	status = mgmt_le_support(hdev);
8146 	if (status)
8147 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8148 				       status);
8149 
8150 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8151 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8152 				       MGMT_STATUS_INVALID_PARAMS);
8153 
8154 	/* The purpose of breaking add_advertising into two separate MGMT calls
8155 	 * for params and data is to allow more parameters to be added to this
8156 	 * structure in the future. For this reason, we verify that we have the
8157 	 * bare minimum structure we know of when the interface was defined. Any
8158 	 * extra parameters we don't know about will be ignored in this request.
8159 	 */
8160 	if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8161 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8162 				       MGMT_STATUS_INVALID_PARAMS);
8163 
8164 	flags = __le32_to_cpu(cp->flags);
8165 
8166 	if (!requested_adv_flags_are_valid(hdev, flags))
8167 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8168 				       MGMT_STATUS_INVALID_PARAMS);
8169 
8170 	hci_dev_lock(hdev);
8171 
8172 	/* In new interface, we require that we are powered to register */
8173 	if (!hdev_is_powered(hdev)) {
8174 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8175 				      MGMT_STATUS_REJECTED);
8176 		goto unlock;
8177 	}
8178 
8179 	if (adv_busy(hdev)) {
8180 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8181 				      MGMT_STATUS_BUSY);
8182 		goto unlock;
8183 	}
8184 
8185 	/* Parse defined parameters from request, use defaults otherwise */
8186 	timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8187 		  __le16_to_cpu(cp->timeout) : 0;
8188 
8189 	duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8190 		   __le16_to_cpu(cp->duration) :
8191 		   hdev->def_multi_adv_rotation_duration;
8192 
8193 	min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8194 		       __le32_to_cpu(cp->min_interval) :
8195 		       hdev->le_adv_min_interval;
8196 
8197 	max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8198 		       __le32_to_cpu(cp->max_interval) :
8199 		       hdev->le_adv_max_interval;
8200 
8201 	tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8202 		   cp->tx_power :
8203 		   HCI_ADV_TX_POWER_NO_PREFERENCE;
8204 
8205 	/* Create advertising instance with no advertising or response data */
8206 	err = hci_add_adv_instance(hdev, cp->instance, flags,
8207 				   0, NULL, 0, NULL, timeout, duration,
8208 				   tx_power, min_interval, max_interval);
8209 
8210 	if (err < 0) {
8211 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8212 				      MGMT_STATUS_FAILED);
8213 		goto unlock;
8214 	}
8215 
8216 	/* Submit request for advertising params if ext adv available */
8217 	if (ext_adv_capable(hdev)) {
8218 		cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8219 				       data, data_len);
8220 		if (!cmd) {
8221 			err = -ENOMEM;
8222 			hci_remove_adv_instance(hdev, cp->instance);
8223 			goto unlock;
8224 		}
8225 
8226 		err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8227 					 add_ext_adv_params_complete);
8228 		if (err < 0)
8229 			mgmt_pending_free(cmd);
8230 	} else {
8231 		rp.instance = cp->instance;
8232 		rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8233 		rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8234 		rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8235 		err = mgmt_cmd_complete(sk, hdev->id,
8236 					MGMT_OP_ADD_EXT_ADV_PARAMS,
8237 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8238 	}
8239 
8240 unlock:
8241 	hci_dev_unlock(hdev);
8242 
8243 	return err;
8244 }
8245 
8246 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8247 {
8248 	struct mgmt_pending_cmd *cmd = data;
8249 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8250 	struct mgmt_rp_add_advertising rp;
8251 
8252 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
8253 
8254 	memset(&rp, 0, sizeof(rp));
8255 
8256 	rp.instance = cp->instance;
8257 
8258 	if (err)
8259 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8260 				mgmt_status(err));
8261 	else
8262 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8263 				  mgmt_status(err), &rp, sizeof(rp));
8264 
8265 	mgmt_pending_free(cmd);
8266 }
8267 
8268 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8269 {
8270 	struct mgmt_pending_cmd *cmd = data;
8271 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8272 	int err;
8273 
8274 	if (ext_adv_capable(hdev)) {
8275 		err = hci_update_adv_data_sync(hdev, cp->instance);
8276 		if (err)
8277 			return err;
8278 
8279 		err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8280 		if (err)
8281 			return err;
8282 
8283 		return hci_enable_ext_advertising_sync(hdev, cp->instance);
8284 	}
8285 
8286 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8287 }
8288 
8289 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8290 			    u16 data_len)
8291 {
8292 	struct mgmt_cp_add_ext_adv_data *cp = data;
8293 	struct mgmt_rp_add_ext_adv_data rp;
8294 	u8 schedule_instance = 0;
8295 	struct adv_info *next_instance;
8296 	struct adv_info *adv_instance;
8297 	int err = 0;
8298 	struct mgmt_pending_cmd *cmd;
8299 
8300 	BT_DBG("%s", hdev->name);
8301 
8302 	hci_dev_lock(hdev);
8303 
8304 	adv_instance = hci_find_adv_instance(hdev, cp->instance);
8305 
8306 	if (!adv_instance) {
8307 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8308 				      MGMT_STATUS_INVALID_PARAMS);
8309 		goto unlock;
8310 	}
8311 
8312 	/* In new interface, we require that we are powered to register */
8313 	if (!hdev_is_powered(hdev)) {
8314 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8315 				      MGMT_STATUS_REJECTED);
8316 		goto clear_new_instance;
8317 	}
8318 
8319 	if (adv_busy(hdev)) {
8320 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8321 				      MGMT_STATUS_BUSY);
8322 		goto clear_new_instance;
8323 	}
8324 
8325 	/* Validate new data */
8326 	if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
8327 			       cp->adv_data_len, true) ||
8328 	    !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
8329 			       cp->adv_data_len, cp->scan_rsp_len, false)) {
8330 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8331 				      MGMT_STATUS_INVALID_PARAMS);
8332 		goto clear_new_instance;
8333 	}
8334 
8335 	/* Set the data in the advertising instance */
8336 	hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
8337 				  cp->data, cp->scan_rsp_len,
8338 				  cp->data + cp->adv_data_len);
8339 
8340 	/* If using software rotation, determine next instance to use */
8341 	if (hdev->cur_adv_instance == cp->instance) {
8342 		/* If the currently advertised instance is being changed
8343 		 * then cancel the current advertising and schedule the
8344 		 * next instance. If there is only one instance then the
8345 		 * overridden advertising data will be visible right
8346 		 * away
8347 		 */
8348 		cancel_adv_timeout(hdev);
8349 
8350 		next_instance = hci_get_next_instance(hdev, cp->instance);
8351 		if (next_instance)
8352 			schedule_instance = next_instance->instance;
8353 	} else if (!hdev->adv_instance_timeout) {
8354 		/* Immediately advertise the new instance if no other
8355 		 * instance is currently being advertised.
8356 		 */
8357 		schedule_instance = cp->instance;
8358 	}
8359 
8360 	/* If the HCI_ADVERTISING flag is set or there is no instance to
8361 	 * be advertised then we have no HCI communication to make.
8362 	 * Simply return.
8363 	 */
8364 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
8365 		if (adv_instance->pending) {
8366 			mgmt_advertising_added(sk, hdev, cp->instance);
8367 			adv_instance->pending = false;
8368 		}
8369 		rp.instance = cp->instance;
8370 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8371 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8372 		goto unlock;
8373 	}
8374 
8375 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
8376 			       data_len);
8377 	if (!cmd) {
8378 		err = -ENOMEM;
8379 		goto clear_new_instance;
8380 	}
8381 
8382 	err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
8383 				 add_ext_adv_data_complete);
8384 	if (err < 0) {
8385 		mgmt_pending_free(cmd);
8386 		goto clear_new_instance;
8387 	}
8388 
8389 	/* We were successful in updating data, so trigger advertising_added
8390 	 * event if this is an instance that wasn't previously advertising. If
8391 	 * a failure occurs in the requests we initiated, we will remove the
8392 	 * instance again in add_advertising_complete
8393 	 */
8394 	if (adv_instance->pending)
8395 		mgmt_advertising_added(sk, hdev, cp->instance);
8396 
8397 	goto unlock;
8398 
8399 clear_new_instance:
8400 	hci_remove_adv_instance(hdev, cp->instance);
8401 
8402 unlock:
8403 	hci_dev_unlock(hdev);
8404 
8405 	return err;
8406 }
8407 
8408 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
8409 					int err)
8410 {
8411 	struct mgmt_pending_cmd *cmd = data;
8412 	struct mgmt_cp_remove_advertising *cp = cmd->param;
8413 	struct mgmt_rp_remove_advertising rp;
8414 
8415 	bt_dev_dbg(hdev, "err %d", err);
8416 
8417 	memset(&rp, 0, sizeof(rp));
8418 	rp.instance = cp->instance;
8419 
8420 	if (err)
8421 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8422 				mgmt_status(err));
8423 	else
8424 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8425 				  MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8426 
8427 	mgmt_pending_free(cmd);
8428 }
8429 
8430 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
8431 {
8432 	struct mgmt_pending_cmd *cmd = data;
8433 	struct mgmt_cp_remove_advertising *cp = cmd->param;
8434 	int err;
8435 
8436 	err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
8437 	if (err)
8438 		return err;
8439 
8440 	if (list_empty(&hdev->adv_instances))
8441 		err = hci_disable_advertising_sync(hdev);
8442 
8443 	return err;
8444 }
8445 
8446 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
8447 			      void *data, u16 data_len)
8448 {
8449 	struct mgmt_cp_remove_advertising *cp = data;
8450 	struct mgmt_pending_cmd *cmd;
8451 	int err;
8452 
8453 	bt_dev_dbg(hdev, "sock %p", sk);
8454 
8455 	hci_dev_lock(hdev);
8456 
8457 	if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
8458 		err = mgmt_cmd_status(sk, hdev->id,
8459 				      MGMT_OP_REMOVE_ADVERTISING,
8460 				      MGMT_STATUS_INVALID_PARAMS);
8461 		goto unlock;
8462 	}
8463 
8464 	if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
8465 	    pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
8466 	    pending_find(MGMT_OP_SET_LE, hdev)) {
8467 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8468 				      MGMT_STATUS_BUSY);
8469 		goto unlock;
8470 	}
8471 
8472 	if (list_empty(&hdev->adv_instances)) {
8473 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8474 				      MGMT_STATUS_INVALID_PARAMS);
8475 		goto unlock;
8476 	}
8477 
8478 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
8479 			       data_len);
8480 	if (!cmd) {
8481 		err = -ENOMEM;
8482 		goto unlock;
8483 	}
8484 
8485 	err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
8486 				 remove_advertising_complete);
8487 	if (err < 0)
8488 		mgmt_pending_free(cmd);
8489 
8490 unlock:
8491 	hci_dev_unlock(hdev);
8492 
8493 	return err;
8494 }
8495 
8496 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
8497 			     void *data, u16 data_len)
8498 {
8499 	struct mgmt_cp_get_adv_size_info *cp = data;
8500 	struct mgmt_rp_get_adv_size_info rp;
8501 	u32 flags, supported_flags;
8502 	int err;
8503 
8504 	bt_dev_dbg(hdev, "sock %p", sk);
8505 
8506 	if (!lmp_le_capable(hdev))
8507 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8508 				       MGMT_STATUS_REJECTED);
8509 
8510 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8511 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8512 				       MGMT_STATUS_INVALID_PARAMS);
8513 
8514 	flags = __le32_to_cpu(cp->flags);
8515 
8516 	/* The current implementation only supports a subset of the specified
8517 	 * flags.
8518 	 */
8519 	supported_flags = get_supported_adv_flags(hdev);
8520 	if (flags & ~supported_flags)
8521 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8522 				       MGMT_STATUS_INVALID_PARAMS);
8523 
8524 	rp.instance = cp->instance;
8525 	rp.flags = cp->flags;
8526 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8527 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8528 
8529 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8530 				MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8531 
8532 	return err;
8533 }
8534 
8535 static const struct hci_mgmt_handler mgmt_handlers[] = {
8536 	{ NULL }, /* 0x0000 (no command) */
8537 	{ read_version,            MGMT_READ_VERSION_SIZE,
8538 						HCI_MGMT_NO_HDEV |
8539 						HCI_MGMT_UNTRUSTED },
8540 	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
8541 						HCI_MGMT_NO_HDEV |
8542 						HCI_MGMT_UNTRUSTED },
8543 	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
8544 						HCI_MGMT_NO_HDEV |
8545 						HCI_MGMT_UNTRUSTED },
8546 	{ read_controller_info,    MGMT_READ_INFO_SIZE,
8547 						HCI_MGMT_UNTRUSTED },
8548 	{ set_powered,             MGMT_SETTING_SIZE },
8549 	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
8550 	{ set_connectable,         MGMT_SETTING_SIZE },
8551 	{ set_fast_connectable,    MGMT_SETTING_SIZE },
8552 	{ set_bondable,            MGMT_SETTING_SIZE },
8553 	{ set_link_security,       MGMT_SETTING_SIZE },
8554 	{ set_ssp,                 MGMT_SETTING_SIZE },
8555 	{ set_hs,                  MGMT_SETTING_SIZE },
8556 	{ set_le,                  MGMT_SETTING_SIZE },
8557 	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
8558 	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
8559 	{ add_uuid,                MGMT_ADD_UUID_SIZE },
8560 	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
8561 	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
8562 						HCI_MGMT_VAR_LEN },
8563 	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
8564 						HCI_MGMT_VAR_LEN },
8565 	{ disconnect,              MGMT_DISCONNECT_SIZE },
8566 	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
8567 	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
8568 	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
8569 	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
8570 	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
8571 	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
8572 	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
8573 	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
8574 	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
8575 	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
8576 	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
8577 	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
8578 	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
8579 						HCI_MGMT_VAR_LEN },
8580 	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
8581 	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
8582 	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
8583 	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
8584 	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
8585 	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
8586 	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
8587 	{ set_advertising,         MGMT_SETTING_SIZE },
8588 	{ set_bredr,               MGMT_SETTING_SIZE },
8589 	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
8590 	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
8591 	{ set_secure_conn,         MGMT_SETTING_SIZE },
8592 	{ set_debug_keys,          MGMT_SETTING_SIZE },
8593 	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
8594 	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
8595 						HCI_MGMT_VAR_LEN },
8596 	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
8597 	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
8598 	{ add_device,              MGMT_ADD_DEVICE_SIZE },
8599 	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
8600 	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
8601 						HCI_MGMT_VAR_LEN },
8602 	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
8603 						HCI_MGMT_NO_HDEV |
8604 						HCI_MGMT_UNTRUSTED },
8605 	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
8606 						HCI_MGMT_UNCONFIGURED |
8607 						HCI_MGMT_UNTRUSTED },
8608 	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
8609 						HCI_MGMT_UNCONFIGURED },
8610 	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
8611 						HCI_MGMT_UNCONFIGURED },
8612 	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
8613 						HCI_MGMT_VAR_LEN },
8614 	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
8615 	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
8616 						HCI_MGMT_NO_HDEV |
8617 						HCI_MGMT_UNTRUSTED },
8618 	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
8619 	{ add_advertising,	   MGMT_ADD_ADVERTISING_SIZE,
8620 						HCI_MGMT_VAR_LEN },
8621 	{ remove_advertising,	   MGMT_REMOVE_ADVERTISING_SIZE },
8622 	{ get_adv_size_info,       MGMT_GET_ADV_SIZE_INFO_SIZE },
8623 	{ start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
8624 	{ read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
8625 						HCI_MGMT_UNTRUSTED },
8626 	{ set_appearance,	   MGMT_SET_APPEARANCE_SIZE },
8627 	{ get_phy_configuration,   MGMT_GET_PHY_CONFIGURATION_SIZE },
8628 	{ set_phy_configuration,   MGMT_SET_PHY_CONFIGURATION_SIZE },
8629 	{ set_blocked_keys,	   MGMT_OP_SET_BLOCKED_KEYS_SIZE,
8630 						HCI_MGMT_VAR_LEN },
8631 	{ set_wideband_speech,	   MGMT_SETTING_SIZE },
8632 	{ read_controller_cap,     MGMT_READ_CONTROLLER_CAP_SIZE,
8633 						HCI_MGMT_UNTRUSTED },
8634 	{ read_exp_features_info,  MGMT_READ_EXP_FEATURES_INFO_SIZE,
8635 						HCI_MGMT_UNTRUSTED |
8636 						HCI_MGMT_HDEV_OPTIONAL },
8637 	{ set_exp_feature,         MGMT_SET_EXP_FEATURE_SIZE,
8638 						HCI_MGMT_VAR_LEN |
8639 						HCI_MGMT_HDEV_OPTIONAL },
8640 	{ read_def_system_config,  MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
8641 						HCI_MGMT_UNTRUSTED },
8642 	{ set_def_system_config,   MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
8643 						HCI_MGMT_VAR_LEN },
8644 	{ read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
8645 						HCI_MGMT_UNTRUSTED },
8646 	{ set_def_runtime_config,  MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
8647 						HCI_MGMT_VAR_LEN },
8648 	{ get_device_flags,        MGMT_GET_DEVICE_FLAGS_SIZE },
8649 	{ set_device_flags,        MGMT_SET_DEVICE_FLAGS_SIZE },
8650 	{ read_adv_mon_features,   MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
8651 	{ add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
8652 						HCI_MGMT_VAR_LEN },
8653 	{ remove_adv_monitor,      MGMT_REMOVE_ADV_MONITOR_SIZE },
8654 	{ add_ext_adv_params,      MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
8655 						HCI_MGMT_VAR_LEN },
8656 	{ add_ext_adv_data,        MGMT_ADD_EXT_ADV_DATA_SIZE,
8657 						HCI_MGMT_VAR_LEN },
8658 	{ add_adv_patterns_monitor_rssi,
8659 				   MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
8660 						HCI_MGMT_VAR_LEN },
8661 };
8662 
8663 void mgmt_index_added(struct hci_dev *hdev)
8664 {
8665 	struct mgmt_ev_ext_index ev;
8666 
8667 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8668 		return;
8669 
8670 	switch (hdev->dev_type) {
8671 	case HCI_PRIMARY:
8672 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8673 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
8674 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8675 			ev.type = 0x01;
8676 		} else {
8677 			mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
8678 					 HCI_MGMT_INDEX_EVENTS);
8679 			ev.type = 0x00;
8680 		}
8681 		break;
8682 	case HCI_AMP:
8683 		ev.type = 0x02;
8684 		break;
8685 	default:
8686 		return;
8687 	}
8688 
8689 	ev.bus = hdev->bus;
8690 
8691 	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
8692 			 HCI_MGMT_EXT_INDEX_EVENTS);
8693 }
8694 
8695 void mgmt_index_removed(struct hci_dev *hdev)
8696 {
8697 	struct mgmt_ev_ext_index ev;
8698 	u8 status = MGMT_STATUS_INVALID_INDEX;
8699 
8700 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8701 		return;
8702 
8703 	switch (hdev->dev_type) {
8704 	case HCI_PRIMARY:
8705 		mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8706 
8707 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8708 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
8709 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8710 			ev.type = 0x01;
8711 		} else {
8712 			mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
8713 					 HCI_MGMT_INDEX_EVENTS);
8714 			ev.type = 0x00;
8715 		}
8716 		break;
8717 	case HCI_AMP:
8718 		ev.type = 0x02;
8719 		break;
8720 	default:
8721 		return;
8722 	}
8723 
8724 	ev.bus = hdev->bus;
8725 
8726 	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
8727 			 HCI_MGMT_EXT_INDEX_EVENTS);
8728 }
8729 
8730 void mgmt_power_on(struct hci_dev *hdev, int err)
8731 {
8732 	struct cmd_lookup match = { NULL, hdev };
8733 
8734 	bt_dev_dbg(hdev, "err %d", err);
8735 
8736 	hci_dev_lock(hdev);
8737 
8738 	if (!err) {
8739 		restart_le_actions(hdev);
8740 		hci_update_passive_scan(hdev);
8741 	}
8742 
8743 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8744 
8745 	new_settings(hdev, match.sk);
8746 
8747 	if (match.sk)
8748 		sock_put(match.sk);
8749 
8750 	hci_dev_unlock(hdev);
8751 }
8752 
8753 void __mgmt_power_off(struct hci_dev *hdev)
8754 {
8755 	struct cmd_lookup match = { NULL, hdev };
8756 	u8 status, zero_cod[] = { 0, 0, 0 };
8757 
8758 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8759 
8760 	/* If the power off is because of hdev unregistration let
8761 	 * use the appropriate INVALID_INDEX status. Otherwise use
8762 	 * NOT_POWERED. We cover both scenarios here since later in
8763 	 * mgmt_index_removed() any hci_conn callbacks will have already
8764 	 * been triggered, potentially causing misleading DISCONNECTED
8765 	 * status responses.
8766 	 */
8767 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
8768 		status = MGMT_STATUS_INVALID_INDEX;
8769 	else
8770 		status = MGMT_STATUS_NOT_POWERED;
8771 
8772 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8773 
8774 	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
8775 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
8776 				   zero_cod, sizeof(zero_cod),
8777 				   HCI_MGMT_DEV_CLASS_EVENTS, NULL);
8778 		ext_info_changed(hdev, NULL);
8779 	}
8780 
8781 	new_settings(hdev, match.sk);
8782 
8783 	if (match.sk)
8784 		sock_put(match.sk);
8785 }
8786 
8787 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
8788 {
8789 	struct mgmt_pending_cmd *cmd;
8790 	u8 status;
8791 
8792 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8793 	if (!cmd)
8794 		return;
8795 
8796 	if (err == -ERFKILL)
8797 		status = MGMT_STATUS_RFKILLED;
8798 	else
8799 		status = MGMT_STATUS_FAILED;
8800 
8801 	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
8802 
8803 	mgmt_pending_remove(cmd);
8804 }
8805 
8806 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
8807 		       bool persistent)
8808 {
8809 	struct mgmt_ev_new_link_key ev;
8810 
8811 	memset(&ev, 0, sizeof(ev));
8812 
8813 	ev.store_hint = persistent;
8814 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8815 	ev.key.addr.type = BDADDR_BREDR;
8816 	ev.key.type = key->type;
8817 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
8818 	ev.key.pin_len = key->pin_len;
8819 
8820 	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
8821 }
8822 
8823 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
8824 {
8825 	switch (ltk->type) {
8826 	case SMP_LTK:
8827 	case SMP_LTK_RESPONDER:
8828 		if (ltk->authenticated)
8829 			return MGMT_LTK_AUTHENTICATED;
8830 		return MGMT_LTK_UNAUTHENTICATED;
8831 	case SMP_LTK_P256:
8832 		if (ltk->authenticated)
8833 			return MGMT_LTK_P256_AUTH;
8834 		return MGMT_LTK_P256_UNAUTH;
8835 	case SMP_LTK_P256_DEBUG:
8836 		return MGMT_LTK_P256_DEBUG;
8837 	}
8838 
8839 	return MGMT_LTK_UNAUTHENTICATED;
8840 }
8841 
8842 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
8843 {
8844 	struct mgmt_ev_new_long_term_key ev;
8845 
8846 	memset(&ev, 0, sizeof(ev));
8847 
8848 	/* Devices using resolvable or non-resolvable random addresses
8849 	 * without providing an identity resolving key don't require
8850 	 * to store long term keys. Their addresses will change the
8851 	 * next time around.
8852 	 *
8853 	 * Only when a remote device provides an identity address
8854 	 * make sure the long term key is stored. If the remote
8855 	 * identity is known, the long term keys are internally
8856 	 * mapped to the identity address. So allow static random
8857 	 * and public addresses here.
8858 	 */
8859 	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8860 	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
8861 		ev.store_hint = 0x00;
8862 	else
8863 		ev.store_hint = persistent;
8864 
8865 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8866 	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
8867 	ev.key.type = mgmt_ltk_type(key);
8868 	ev.key.enc_size = key->enc_size;
8869 	ev.key.ediv = key->ediv;
8870 	ev.key.rand = key->rand;
8871 
8872 	if (key->type == SMP_LTK)
8873 		ev.key.initiator = 1;
8874 
8875 	/* Make sure we copy only the significant bytes based on the
8876 	 * encryption key size, and set the rest of the value to zeroes.
8877 	 */
8878 	memcpy(ev.key.val, key->val, key->enc_size);
8879 	memset(ev.key.val + key->enc_size, 0,
8880 	       sizeof(ev.key.val) - key->enc_size);
8881 
8882 	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
8883 }
8884 
8885 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
8886 {
8887 	struct mgmt_ev_new_irk ev;
8888 
8889 	memset(&ev, 0, sizeof(ev));
8890 
8891 	ev.store_hint = persistent;
8892 
8893 	bacpy(&ev.rpa, &irk->rpa);
8894 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
8895 	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
8896 	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
8897 
8898 	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
8899 }
8900 
8901 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
8902 		   bool persistent)
8903 {
8904 	struct mgmt_ev_new_csrk ev;
8905 
8906 	memset(&ev, 0, sizeof(ev));
8907 
8908 	/* Devices using resolvable or non-resolvable random addresses
8909 	 * without providing an identity resolving key don't require
8910 	 * to store signature resolving keys. Their addresses will change
8911 	 * the next time around.
8912 	 *
8913 	 * Only when a remote device provides an identity address
8914 	 * make sure the signature resolving key is stored. So allow
8915 	 * static random and public addresses here.
8916 	 */
8917 	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8918 	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
8919 		ev.store_hint = 0x00;
8920 	else
8921 		ev.store_hint = persistent;
8922 
8923 	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
8924 	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
8925 	ev.key.type = csrk->type;
8926 	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
8927 
8928 	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
8929 }
8930 
8931 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
8932 			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
8933 			 u16 max_interval, u16 latency, u16 timeout)
8934 {
8935 	struct mgmt_ev_new_conn_param ev;
8936 
8937 	if (!hci_is_identity_address(bdaddr, bdaddr_type))
8938 		return;
8939 
8940 	memset(&ev, 0, sizeof(ev));
8941 	bacpy(&ev.addr.bdaddr, bdaddr);
8942 	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
8943 	ev.store_hint = store_hint;
8944 	ev.min_interval = cpu_to_le16(min_interval);
8945 	ev.max_interval = cpu_to_le16(max_interval);
8946 	ev.latency = cpu_to_le16(latency);
8947 	ev.timeout = cpu_to_le16(timeout);
8948 
8949 	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
8950 }
8951 
8952 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
8953 			   u8 *name, u8 name_len)
8954 {
8955 	char buf[512];
8956 	struct mgmt_ev_device_connected *ev = (void *) buf;
8957 	u16 eir_len = 0;
8958 	u32 flags = 0;
8959 
8960 	bacpy(&ev->addr.bdaddr, &conn->dst);
8961 	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
8962 
8963 	if (conn->out)
8964 		flags |= MGMT_DEV_FOUND_INITIATED_CONN;
8965 
8966 	ev->flags = __cpu_to_le32(flags);
8967 
8968 	/* We must ensure that the EIR Data fields are ordered and
8969 	 * unique. Keep it simple for now and avoid the problem by not
8970 	 * adding any BR/EDR data to the LE adv.
8971 	 */
8972 	if (conn->le_adv_data_len > 0) {
8973 		memcpy(&ev->eir[eir_len],
8974 		       conn->le_adv_data, conn->le_adv_data_len);
8975 		eir_len = conn->le_adv_data_len;
8976 	} else {
8977 		if (name_len > 0)
8978 			eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
8979 						  name, name_len);
8980 
8981 		if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
8982 			eir_len = eir_append_data(ev->eir, eir_len,
8983 						  EIR_CLASS_OF_DEV,
8984 						  conn->dev_class, 3);
8985 	}
8986 
8987 	ev->eir_len = cpu_to_le16(eir_len);
8988 
8989 	mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
8990 		    sizeof(*ev) + eir_len, NULL);
8991 }
8992 
8993 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
8994 {
8995 	struct sock **sk = data;
8996 
8997 	cmd->cmd_complete(cmd, 0);
8998 
8999 	*sk = cmd->sk;
9000 	sock_hold(*sk);
9001 
9002 	mgmt_pending_remove(cmd);
9003 }
9004 
9005 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9006 {
9007 	struct hci_dev *hdev = data;
9008 	struct mgmt_cp_unpair_device *cp = cmd->param;
9009 
9010 	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9011 
9012 	cmd->cmd_complete(cmd, 0);
9013 	mgmt_pending_remove(cmd);
9014 }
9015 
9016 bool mgmt_powering_down(struct hci_dev *hdev)
9017 {
9018 	struct mgmt_pending_cmd *cmd;
9019 	struct mgmt_mode *cp;
9020 
9021 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9022 	if (!cmd)
9023 		return false;
9024 
9025 	cp = cmd->param;
9026 	if (!cp->val)
9027 		return true;
9028 
9029 	return false;
9030 }
9031 
9032 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9033 			      u8 link_type, u8 addr_type, u8 reason,
9034 			      bool mgmt_connected)
9035 {
9036 	struct mgmt_ev_device_disconnected ev;
9037 	struct sock *sk = NULL;
9038 
9039 	/* The connection is still in hci_conn_hash so test for 1
9040 	 * instead of 0 to know if this is the last one.
9041 	 */
9042 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9043 		cancel_delayed_work(&hdev->power_off);
9044 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
9045 	}
9046 
9047 	if (!mgmt_connected)
9048 		return;
9049 
9050 	if (link_type != ACL_LINK && link_type != LE_LINK)
9051 		return;
9052 
9053 	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
9054 
9055 	bacpy(&ev.addr.bdaddr, bdaddr);
9056 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9057 	ev.reason = reason;
9058 
9059 	/* Report disconnects due to suspend */
9060 	if (hdev->suspended)
9061 		ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9062 
9063 	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9064 
9065 	if (sk)
9066 		sock_put(sk);
9067 
9068 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9069 			     hdev);
9070 }
9071 
9072 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9073 			    u8 link_type, u8 addr_type, u8 status)
9074 {
9075 	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9076 	struct mgmt_cp_disconnect *cp;
9077 	struct mgmt_pending_cmd *cmd;
9078 
9079 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9080 			     hdev);
9081 
9082 	cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9083 	if (!cmd)
9084 		return;
9085 
9086 	cp = cmd->param;
9087 
9088 	if (bacmp(bdaddr, &cp->addr.bdaddr))
9089 		return;
9090 
9091 	if (cp->addr.type != bdaddr_type)
9092 		return;
9093 
9094 	cmd->cmd_complete(cmd, mgmt_status(status));
9095 	mgmt_pending_remove(cmd);
9096 }
9097 
9098 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9099 			 u8 addr_type, u8 status)
9100 {
9101 	struct mgmt_ev_connect_failed ev;
9102 
9103 	/* The connection is still in hci_conn_hash so test for 1
9104 	 * instead of 0 to know if this is the last one.
9105 	 */
9106 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9107 		cancel_delayed_work(&hdev->power_off);
9108 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
9109 	}
9110 
9111 	bacpy(&ev.addr.bdaddr, bdaddr);
9112 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9113 	ev.status = mgmt_status(status);
9114 
9115 	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9116 }
9117 
9118 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9119 {
9120 	struct mgmt_ev_pin_code_request ev;
9121 
9122 	bacpy(&ev.addr.bdaddr, bdaddr);
9123 	ev.addr.type = BDADDR_BREDR;
9124 	ev.secure = secure;
9125 
9126 	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9127 }
9128 
9129 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9130 				  u8 status)
9131 {
9132 	struct mgmt_pending_cmd *cmd;
9133 
9134 	cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9135 	if (!cmd)
9136 		return;
9137 
9138 	cmd->cmd_complete(cmd, mgmt_status(status));
9139 	mgmt_pending_remove(cmd);
9140 }
9141 
9142 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9143 				      u8 status)
9144 {
9145 	struct mgmt_pending_cmd *cmd;
9146 
9147 	cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9148 	if (!cmd)
9149 		return;
9150 
9151 	cmd->cmd_complete(cmd, mgmt_status(status));
9152 	mgmt_pending_remove(cmd);
9153 }
9154 
9155 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9156 			      u8 link_type, u8 addr_type, u32 value,
9157 			      u8 confirm_hint)
9158 {
9159 	struct mgmt_ev_user_confirm_request ev;
9160 
9161 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9162 
9163 	bacpy(&ev.addr.bdaddr, bdaddr);
9164 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9165 	ev.confirm_hint = confirm_hint;
9166 	ev.value = cpu_to_le32(value);
9167 
9168 	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9169 			  NULL);
9170 }
9171 
9172 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9173 			      u8 link_type, u8 addr_type)
9174 {
9175 	struct mgmt_ev_user_passkey_request ev;
9176 
9177 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9178 
9179 	bacpy(&ev.addr.bdaddr, bdaddr);
9180 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9181 
9182 	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9183 			  NULL);
9184 }
9185 
9186 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9187 				      u8 link_type, u8 addr_type, u8 status,
9188 				      u8 opcode)
9189 {
9190 	struct mgmt_pending_cmd *cmd;
9191 
9192 	cmd = pending_find(opcode, hdev);
9193 	if (!cmd)
9194 		return -ENOENT;
9195 
9196 	cmd->cmd_complete(cmd, mgmt_status(status));
9197 	mgmt_pending_remove(cmd);
9198 
9199 	return 0;
9200 }
9201 
9202 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9203 				     u8 link_type, u8 addr_type, u8 status)
9204 {
9205 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9206 					  status, MGMT_OP_USER_CONFIRM_REPLY);
9207 }
9208 
9209 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9210 					 u8 link_type, u8 addr_type, u8 status)
9211 {
9212 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9213 					  status,
9214 					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
9215 }
9216 
9217 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9218 				     u8 link_type, u8 addr_type, u8 status)
9219 {
9220 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9221 					  status, MGMT_OP_USER_PASSKEY_REPLY);
9222 }
9223 
9224 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9225 					 u8 link_type, u8 addr_type, u8 status)
9226 {
9227 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9228 					  status,
9229 					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
9230 }
9231 
9232 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9233 			     u8 link_type, u8 addr_type, u32 passkey,
9234 			     u8 entered)
9235 {
9236 	struct mgmt_ev_passkey_notify ev;
9237 
9238 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9239 
9240 	bacpy(&ev.addr.bdaddr, bdaddr);
9241 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9242 	ev.passkey = __cpu_to_le32(passkey);
9243 	ev.entered = entered;
9244 
9245 	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9246 }
9247 
9248 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9249 {
9250 	struct mgmt_ev_auth_failed ev;
9251 	struct mgmt_pending_cmd *cmd;
9252 	u8 status = mgmt_status(hci_status);
9253 
9254 	bacpy(&ev.addr.bdaddr, &conn->dst);
9255 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9256 	ev.status = status;
9257 
9258 	cmd = find_pairing(conn);
9259 
9260 	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9261 		    cmd ? cmd->sk : NULL);
9262 
9263 	if (cmd) {
9264 		cmd->cmd_complete(cmd, status);
9265 		mgmt_pending_remove(cmd);
9266 	}
9267 }
9268 
9269 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9270 {
9271 	struct cmd_lookup match = { NULL, hdev };
9272 	bool changed;
9273 
9274 	if (status) {
9275 		u8 mgmt_err = mgmt_status(status);
9276 		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9277 				     cmd_status_rsp, &mgmt_err);
9278 		return;
9279 	}
9280 
9281 	if (test_bit(HCI_AUTH, &hdev->flags))
9282 		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9283 	else
9284 		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9285 
9286 	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9287 			     &match);
9288 
9289 	if (changed)
9290 		new_settings(hdev, match.sk);
9291 
9292 	if (match.sk)
9293 		sock_put(match.sk);
9294 }
9295 
9296 static void clear_eir(struct hci_request *req)
9297 {
9298 	struct hci_dev *hdev = req->hdev;
9299 	struct hci_cp_write_eir cp;
9300 
9301 	if (!lmp_ext_inq_capable(hdev))
9302 		return;
9303 
9304 	memset(hdev->eir, 0, sizeof(hdev->eir));
9305 
9306 	memset(&cp, 0, sizeof(cp));
9307 
9308 	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
9309 }
9310 
9311 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
9312 {
9313 	struct cmd_lookup match = { NULL, hdev };
9314 	struct hci_request req;
9315 	bool changed = false;
9316 
9317 	if (status) {
9318 		u8 mgmt_err = mgmt_status(status);
9319 
9320 		if (enable && hci_dev_test_and_clear_flag(hdev,
9321 							  HCI_SSP_ENABLED)) {
9322 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
9323 			new_settings(hdev, NULL);
9324 		}
9325 
9326 		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
9327 				     &mgmt_err);
9328 		return;
9329 	}
9330 
9331 	if (enable) {
9332 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
9333 	} else {
9334 		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
9335 		if (!changed)
9336 			changed = hci_dev_test_and_clear_flag(hdev,
9337 							      HCI_HS_ENABLED);
9338 		else
9339 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
9340 	}
9341 
9342 	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
9343 
9344 	if (changed)
9345 		new_settings(hdev, match.sk);
9346 
9347 	if (match.sk)
9348 		sock_put(match.sk);
9349 
9350 	hci_req_init(&req, hdev);
9351 
9352 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
9353 		if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
9354 			hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
9355 				    sizeof(enable), &enable);
9356 		__hci_req_update_eir(&req);
9357 	} else {
9358 		clear_eir(&req);
9359 	}
9360 
9361 	hci_req_run(&req, NULL);
9362 }
9363 
9364 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9365 {
9366 	struct cmd_lookup *match = data;
9367 
9368 	if (match->sk == NULL) {
9369 		match->sk = cmd->sk;
9370 		sock_hold(match->sk);
9371 	}
9372 }
9373 
9374 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9375 				    u8 status)
9376 {
9377 	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9378 
9379 	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
9380 	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
9381 	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
9382 
9383 	if (!status) {
9384 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9385 				   3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9386 		ext_info_changed(hdev, NULL);
9387 	}
9388 
9389 	if (match.sk)
9390 		sock_put(match.sk);
9391 }
9392 
9393 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
9394 {
9395 	struct mgmt_cp_set_local_name ev;
9396 	struct mgmt_pending_cmd *cmd;
9397 
9398 	if (status)
9399 		return;
9400 
9401 	memset(&ev, 0, sizeof(ev));
9402 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
9403 	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
9404 
9405 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
9406 	if (!cmd) {
9407 		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
9408 
9409 		/* If this is a HCI command related to powering on the
9410 		 * HCI dev don't send any mgmt signals.
9411 		 */
9412 		if (pending_find(MGMT_OP_SET_POWERED, hdev))
9413 			return;
9414 	}
9415 
9416 	mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
9417 			   HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
9418 	ext_info_changed(hdev, cmd ? cmd->sk : NULL);
9419 }
9420 
9421 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
9422 {
9423 	int i;
9424 
9425 	for (i = 0; i < uuid_count; i++) {
9426 		if (!memcmp(uuid, uuids[i], 16))
9427 			return true;
9428 	}
9429 
9430 	return false;
9431 }
9432 
9433 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
9434 {
9435 	u16 parsed = 0;
9436 
9437 	while (parsed < eir_len) {
9438 		u8 field_len = eir[0];
9439 		u8 uuid[16];
9440 		int i;
9441 
9442 		if (field_len == 0)
9443 			break;
9444 
9445 		if (eir_len - parsed < field_len + 1)
9446 			break;
9447 
9448 		switch (eir[1]) {
9449 		case EIR_UUID16_ALL:
9450 		case EIR_UUID16_SOME:
9451 			for (i = 0; i + 3 <= field_len; i += 2) {
9452 				memcpy(uuid, bluetooth_base_uuid, 16);
9453 				uuid[13] = eir[i + 3];
9454 				uuid[12] = eir[i + 2];
9455 				if (has_uuid(uuid, uuid_count, uuids))
9456 					return true;
9457 			}
9458 			break;
9459 		case EIR_UUID32_ALL:
9460 		case EIR_UUID32_SOME:
9461 			for (i = 0; i + 5 <= field_len; i += 4) {
9462 				memcpy(uuid, bluetooth_base_uuid, 16);
9463 				uuid[15] = eir[i + 5];
9464 				uuid[14] = eir[i + 4];
9465 				uuid[13] = eir[i + 3];
9466 				uuid[12] = eir[i + 2];
9467 				if (has_uuid(uuid, uuid_count, uuids))
9468 					return true;
9469 			}
9470 			break;
9471 		case EIR_UUID128_ALL:
9472 		case EIR_UUID128_SOME:
9473 			for (i = 0; i + 17 <= field_len; i += 16) {
9474 				memcpy(uuid, eir + i + 2, 16);
9475 				if (has_uuid(uuid, uuid_count, uuids))
9476 					return true;
9477 			}
9478 			break;
9479 		}
9480 
9481 		parsed += field_len + 1;
9482 		eir += field_len + 1;
9483 	}
9484 
9485 	return false;
9486 }
9487 
9488 static void restart_le_scan(struct hci_dev *hdev)
9489 {
9490 	/* If controller is not scanning we are done. */
9491 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
9492 		return;
9493 
9494 	if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
9495 		       hdev->discovery.scan_start +
9496 		       hdev->discovery.scan_duration))
9497 		return;
9498 
9499 	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
9500 			   DISCOV_LE_RESTART_DELAY);
9501 }
9502 
9503 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
9504 			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9505 {
9506 	/* If a RSSI threshold has been specified, and
9507 	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
9508 	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
9509 	 * is set, let it through for further processing, as we might need to
9510 	 * restart the scan.
9511 	 *
9512 	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
9513 	 * the results are also dropped.
9514 	 */
9515 	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9516 	    (rssi == HCI_RSSI_INVALID ||
9517 	    (rssi < hdev->discovery.rssi &&
9518 	     !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
9519 		return  false;
9520 
9521 	if (hdev->discovery.uuid_count != 0) {
9522 		/* If a list of UUIDs is provided in filter, results with no
9523 		 * matching UUID should be dropped.
9524 		 */
9525 		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
9526 				   hdev->discovery.uuids) &&
9527 		    !eir_has_uuids(scan_rsp, scan_rsp_len,
9528 				   hdev->discovery.uuid_count,
9529 				   hdev->discovery.uuids))
9530 			return false;
9531 	}
9532 
9533 	/* If duplicate filtering does not report RSSI changes, then restart
9534 	 * scanning to ensure updated result with updated RSSI values.
9535 	 */
9536 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
9537 		restart_le_scan(hdev);
9538 
9539 		/* Validate RSSI value against the RSSI threshold once more. */
9540 		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9541 		    rssi < hdev->discovery.rssi)
9542 			return false;
9543 	}
9544 
9545 	return true;
9546 }
9547 
9548 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9549 		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
9550 		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9551 {
9552 	char buf[512];
9553 	struct mgmt_ev_device_found *ev = (void *)buf;
9554 	size_t ev_size;
9555 
9556 	/* Don't send events for a non-kernel initiated discovery. With
9557 	 * LE one exception is if we have pend_le_reports > 0 in which
9558 	 * case we're doing passive scanning and want these events.
9559 	 */
9560 	if (!hci_discovery_active(hdev)) {
9561 		if (link_type == ACL_LINK)
9562 			return;
9563 		if (link_type == LE_LINK &&
9564 		    list_empty(&hdev->pend_le_reports) &&
9565 		    !hci_is_adv_monitoring(hdev)) {
9566 			return;
9567 		}
9568 	}
9569 
9570 	if (hdev->discovery.result_filtering) {
9571 		/* We are using service discovery */
9572 		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
9573 				     scan_rsp_len))
9574 			return;
9575 	}
9576 
9577 	if (hdev->discovery.limited) {
9578 		/* Check for limited discoverable bit */
9579 		if (dev_class) {
9580 			if (!(dev_class[1] & 0x20))
9581 				return;
9582 		} else {
9583 			u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
9584 			if (!flags || !(flags[0] & LE_AD_LIMITED))
9585 				return;
9586 		}
9587 	}
9588 
9589 	/* Make sure that the buffer is big enough. The 5 extra bytes
9590 	 * are for the potential CoD field.
9591 	 */
9592 	if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
9593 		return;
9594 
9595 	memset(buf, 0, sizeof(buf));
9596 
9597 	/* In case of device discovery with BR/EDR devices (pre 1.2), the
9598 	 * RSSI value was reported as 0 when not available. This behavior
9599 	 * is kept when using device discovery. This is required for full
9600 	 * backwards compatibility with the API.
9601 	 *
9602 	 * However when using service discovery, the value 127 will be
9603 	 * returned when the RSSI is not available.
9604 	 */
9605 	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
9606 	    link_type == ACL_LINK)
9607 		rssi = 0;
9608 
9609 	bacpy(&ev->addr.bdaddr, bdaddr);
9610 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
9611 	ev->rssi = rssi;
9612 	ev->flags = cpu_to_le32(flags);
9613 
9614 	if (eir_len > 0)
9615 		/* Copy EIR or advertising data into event */
9616 		memcpy(ev->eir, eir, eir_len);
9617 
9618 	if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
9619 				       NULL))
9620 		eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
9621 					  dev_class, 3);
9622 
9623 	if (scan_rsp_len > 0)
9624 		/* Append scan response data to event */
9625 		memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
9626 
9627 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
9628 	ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
9629 
9630 	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
9631 }
9632 
9633 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9634 		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
9635 {
9636 	struct mgmt_ev_device_found *ev;
9637 	char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
9638 	u16 eir_len;
9639 
9640 	ev = (struct mgmt_ev_device_found *) buf;
9641 
9642 	memset(buf, 0, sizeof(buf));
9643 
9644 	bacpy(&ev->addr.bdaddr, bdaddr);
9645 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
9646 	ev->rssi = rssi;
9647 
9648 	eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
9649 				  name_len);
9650 
9651 	ev->eir_len = cpu_to_le16(eir_len);
9652 
9653 	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
9654 }
9655 
9656 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
9657 {
9658 	struct mgmt_ev_discovering ev;
9659 
9660 	bt_dev_dbg(hdev, "discovering %u", discovering);
9661 
9662 	memset(&ev, 0, sizeof(ev));
9663 	ev.type = hdev->discovery.type;
9664 	ev.discovering = discovering;
9665 
9666 	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
9667 }
9668 
9669 void mgmt_suspending(struct hci_dev *hdev, u8 state)
9670 {
9671 	struct mgmt_ev_controller_suspend ev;
9672 
9673 	ev.suspend_state = state;
9674 	mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
9675 }
9676 
9677 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
9678 		   u8 addr_type)
9679 {
9680 	struct mgmt_ev_controller_resume ev;
9681 
9682 	ev.wake_reason = reason;
9683 	if (bdaddr) {
9684 		bacpy(&ev.addr.bdaddr, bdaddr);
9685 		ev.addr.type = addr_type;
9686 	} else {
9687 		memset(&ev.addr, 0, sizeof(ev.addr));
9688 	}
9689 
9690 	mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
9691 }
9692 
9693 static struct hci_mgmt_chan chan = {
9694 	.channel	= HCI_CHANNEL_CONTROL,
9695 	.handler_count	= ARRAY_SIZE(mgmt_handlers),
9696 	.handlers	= mgmt_handlers,
9697 	.hdev_init	= mgmt_init_hdev,
9698 };
9699 
9700 int mgmt_init(void)
9701 {
9702 	return hci_mgmt_chan_register(&chan);
9703 }
9704 
9705 void mgmt_exit(void)
9706 {
9707 	hci_mgmt_chan_unregister(&chan);
9708 }
9709