xref: /openbmc/linux/net/bluetooth/mgmt.c (revision 6d425d7c)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI Management interface */
26 
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35 
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
40 #include "msft.h"
41 #include "eir.h"
42 
43 #define MGMT_VERSION	1
44 #define MGMT_REVISION	21
45 
46 static const u16 mgmt_commands[] = {
47 	MGMT_OP_READ_INDEX_LIST,
48 	MGMT_OP_READ_INFO,
49 	MGMT_OP_SET_POWERED,
50 	MGMT_OP_SET_DISCOVERABLE,
51 	MGMT_OP_SET_CONNECTABLE,
52 	MGMT_OP_SET_FAST_CONNECTABLE,
53 	MGMT_OP_SET_BONDABLE,
54 	MGMT_OP_SET_LINK_SECURITY,
55 	MGMT_OP_SET_SSP,
56 	MGMT_OP_SET_HS,
57 	MGMT_OP_SET_LE,
58 	MGMT_OP_SET_DEV_CLASS,
59 	MGMT_OP_SET_LOCAL_NAME,
60 	MGMT_OP_ADD_UUID,
61 	MGMT_OP_REMOVE_UUID,
62 	MGMT_OP_LOAD_LINK_KEYS,
63 	MGMT_OP_LOAD_LONG_TERM_KEYS,
64 	MGMT_OP_DISCONNECT,
65 	MGMT_OP_GET_CONNECTIONS,
66 	MGMT_OP_PIN_CODE_REPLY,
67 	MGMT_OP_PIN_CODE_NEG_REPLY,
68 	MGMT_OP_SET_IO_CAPABILITY,
69 	MGMT_OP_PAIR_DEVICE,
70 	MGMT_OP_CANCEL_PAIR_DEVICE,
71 	MGMT_OP_UNPAIR_DEVICE,
72 	MGMT_OP_USER_CONFIRM_REPLY,
73 	MGMT_OP_USER_CONFIRM_NEG_REPLY,
74 	MGMT_OP_USER_PASSKEY_REPLY,
75 	MGMT_OP_USER_PASSKEY_NEG_REPLY,
76 	MGMT_OP_READ_LOCAL_OOB_DATA,
77 	MGMT_OP_ADD_REMOTE_OOB_DATA,
78 	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
79 	MGMT_OP_START_DISCOVERY,
80 	MGMT_OP_STOP_DISCOVERY,
81 	MGMT_OP_CONFIRM_NAME,
82 	MGMT_OP_BLOCK_DEVICE,
83 	MGMT_OP_UNBLOCK_DEVICE,
84 	MGMT_OP_SET_DEVICE_ID,
85 	MGMT_OP_SET_ADVERTISING,
86 	MGMT_OP_SET_BREDR,
87 	MGMT_OP_SET_STATIC_ADDRESS,
88 	MGMT_OP_SET_SCAN_PARAMS,
89 	MGMT_OP_SET_SECURE_CONN,
90 	MGMT_OP_SET_DEBUG_KEYS,
91 	MGMT_OP_SET_PRIVACY,
92 	MGMT_OP_LOAD_IRKS,
93 	MGMT_OP_GET_CONN_INFO,
94 	MGMT_OP_GET_CLOCK_INFO,
95 	MGMT_OP_ADD_DEVICE,
96 	MGMT_OP_REMOVE_DEVICE,
97 	MGMT_OP_LOAD_CONN_PARAM,
98 	MGMT_OP_READ_UNCONF_INDEX_LIST,
99 	MGMT_OP_READ_CONFIG_INFO,
100 	MGMT_OP_SET_EXTERNAL_CONFIG,
101 	MGMT_OP_SET_PUBLIC_ADDRESS,
102 	MGMT_OP_START_SERVICE_DISCOVERY,
103 	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
104 	MGMT_OP_READ_EXT_INDEX_LIST,
105 	MGMT_OP_READ_ADV_FEATURES,
106 	MGMT_OP_ADD_ADVERTISING,
107 	MGMT_OP_REMOVE_ADVERTISING,
108 	MGMT_OP_GET_ADV_SIZE_INFO,
109 	MGMT_OP_START_LIMITED_DISCOVERY,
110 	MGMT_OP_READ_EXT_INFO,
111 	MGMT_OP_SET_APPEARANCE,
112 	MGMT_OP_GET_PHY_CONFIGURATION,
113 	MGMT_OP_SET_PHY_CONFIGURATION,
114 	MGMT_OP_SET_BLOCKED_KEYS,
115 	MGMT_OP_SET_WIDEBAND_SPEECH,
116 	MGMT_OP_READ_CONTROLLER_CAP,
117 	MGMT_OP_READ_EXP_FEATURES_INFO,
118 	MGMT_OP_SET_EXP_FEATURE,
119 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
120 	MGMT_OP_SET_DEF_SYSTEM_CONFIG,
121 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
122 	MGMT_OP_SET_DEF_RUNTIME_CONFIG,
123 	MGMT_OP_GET_DEVICE_FLAGS,
124 	MGMT_OP_SET_DEVICE_FLAGS,
125 	MGMT_OP_READ_ADV_MONITOR_FEATURES,
126 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
127 	MGMT_OP_REMOVE_ADV_MONITOR,
128 	MGMT_OP_ADD_EXT_ADV_PARAMS,
129 	MGMT_OP_ADD_EXT_ADV_DATA,
130 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
131 };
132 
133 static const u16 mgmt_events[] = {
134 	MGMT_EV_CONTROLLER_ERROR,
135 	MGMT_EV_INDEX_ADDED,
136 	MGMT_EV_INDEX_REMOVED,
137 	MGMT_EV_NEW_SETTINGS,
138 	MGMT_EV_CLASS_OF_DEV_CHANGED,
139 	MGMT_EV_LOCAL_NAME_CHANGED,
140 	MGMT_EV_NEW_LINK_KEY,
141 	MGMT_EV_NEW_LONG_TERM_KEY,
142 	MGMT_EV_DEVICE_CONNECTED,
143 	MGMT_EV_DEVICE_DISCONNECTED,
144 	MGMT_EV_CONNECT_FAILED,
145 	MGMT_EV_PIN_CODE_REQUEST,
146 	MGMT_EV_USER_CONFIRM_REQUEST,
147 	MGMT_EV_USER_PASSKEY_REQUEST,
148 	MGMT_EV_AUTH_FAILED,
149 	MGMT_EV_DEVICE_FOUND,
150 	MGMT_EV_DISCOVERING,
151 	MGMT_EV_DEVICE_BLOCKED,
152 	MGMT_EV_DEVICE_UNBLOCKED,
153 	MGMT_EV_DEVICE_UNPAIRED,
154 	MGMT_EV_PASSKEY_NOTIFY,
155 	MGMT_EV_NEW_IRK,
156 	MGMT_EV_NEW_CSRK,
157 	MGMT_EV_DEVICE_ADDED,
158 	MGMT_EV_DEVICE_REMOVED,
159 	MGMT_EV_NEW_CONN_PARAM,
160 	MGMT_EV_UNCONF_INDEX_ADDED,
161 	MGMT_EV_UNCONF_INDEX_REMOVED,
162 	MGMT_EV_NEW_CONFIG_OPTIONS,
163 	MGMT_EV_EXT_INDEX_ADDED,
164 	MGMT_EV_EXT_INDEX_REMOVED,
165 	MGMT_EV_LOCAL_OOB_DATA_UPDATED,
166 	MGMT_EV_ADVERTISING_ADDED,
167 	MGMT_EV_ADVERTISING_REMOVED,
168 	MGMT_EV_EXT_INFO_CHANGED,
169 	MGMT_EV_PHY_CONFIGURATION_CHANGED,
170 	MGMT_EV_EXP_FEATURE_CHANGED,
171 	MGMT_EV_DEVICE_FLAGS_CHANGED,
172 	MGMT_EV_ADV_MONITOR_ADDED,
173 	MGMT_EV_ADV_MONITOR_REMOVED,
174 	MGMT_EV_CONTROLLER_SUSPEND,
175 	MGMT_EV_CONTROLLER_RESUME,
176 };
177 
178 static const u16 mgmt_untrusted_commands[] = {
179 	MGMT_OP_READ_INDEX_LIST,
180 	MGMT_OP_READ_INFO,
181 	MGMT_OP_READ_UNCONF_INDEX_LIST,
182 	MGMT_OP_READ_CONFIG_INFO,
183 	MGMT_OP_READ_EXT_INDEX_LIST,
184 	MGMT_OP_READ_EXT_INFO,
185 	MGMT_OP_READ_CONTROLLER_CAP,
186 	MGMT_OP_READ_EXP_FEATURES_INFO,
187 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
188 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
189 };
190 
191 static const u16 mgmt_untrusted_events[] = {
192 	MGMT_EV_INDEX_ADDED,
193 	MGMT_EV_INDEX_REMOVED,
194 	MGMT_EV_NEW_SETTINGS,
195 	MGMT_EV_CLASS_OF_DEV_CHANGED,
196 	MGMT_EV_LOCAL_NAME_CHANGED,
197 	MGMT_EV_UNCONF_INDEX_ADDED,
198 	MGMT_EV_UNCONF_INDEX_REMOVED,
199 	MGMT_EV_NEW_CONFIG_OPTIONS,
200 	MGMT_EV_EXT_INDEX_ADDED,
201 	MGMT_EV_EXT_INDEX_REMOVED,
202 	MGMT_EV_EXT_INFO_CHANGED,
203 	MGMT_EV_EXP_FEATURE_CHANGED,
204 };
205 
206 #define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
207 
208 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
209 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
210 
211 /* HCI to MGMT error code conversion table */
212 static const u8 mgmt_status_table[] = {
213 	MGMT_STATUS_SUCCESS,
214 	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
215 	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
216 	MGMT_STATUS_FAILED,		/* Hardware Failure */
217 	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
218 	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
219 	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
220 	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
221 	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
222 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
223 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
224 	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
225 	MGMT_STATUS_BUSY,		/* Command Disallowed */
226 	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
227 	MGMT_STATUS_REJECTED,		/* Rejected Security */
228 	MGMT_STATUS_REJECTED,		/* Rejected Personal */
229 	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
230 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
231 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
232 	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
233 	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
234 	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
235 	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
236 	MGMT_STATUS_BUSY,		/* Repeated Attempts */
237 	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
238 	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
239 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
240 	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
241 	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
242 	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
243 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
244 	MGMT_STATUS_FAILED,		/* Unspecified Error */
245 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
246 	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
247 	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
248 	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
249 	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
250 	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
251 	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
252 	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
253 	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
254 	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
255 	MGMT_STATUS_FAILED,		/* Transaction Collision */
256 	MGMT_STATUS_FAILED,		/* Reserved for future use */
257 	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
258 	MGMT_STATUS_REJECTED,		/* QoS Rejected */
259 	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
260 	MGMT_STATUS_REJECTED,		/* Insufficient Security */
261 	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
262 	MGMT_STATUS_FAILED,		/* Reserved for future use */
263 	MGMT_STATUS_BUSY,		/* Role Switch Pending */
264 	MGMT_STATUS_FAILED,		/* Reserved for future use */
265 	MGMT_STATUS_FAILED,		/* Slot Violation */
266 	MGMT_STATUS_FAILED,		/* Role Switch Failed */
267 	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
268 	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
269 	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
270 	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
271 	MGMT_STATUS_BUSY,		/* Controller Busy */
272 	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
273 	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
274 	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
275 	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
276 	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
277 };
278 
279 static u8 mgmt_status(u8 hci_status)
280 {
281 	if (hci_status < ARRAY_SIZE(mgmt_status_table))
282 		return mgmt_status_table[hci_status];
283 
284 	return MGMT_STATUS_FAILED;
285 }
286 
287 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
288 			    u16 len, int flag)
289 {
290 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
291 			       flag, NULL);
292 }
293 
294 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
295 			      u16 len, int flag, struct sock *skip_sk)
296 {
297 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
298 			       flag, skip_sk);
299 }
300 
301 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
302 		      struct sock *skip_sk)
303 {
304 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
305 			       HCI_SOCK_TRUSTED, skip_sk);
306 }
307 
308 static u8 le_addr_type(u8 mgmt_addr_type)
309 {
310 	if (mgmt_addr_type == BDADDR_LE_PUBLIC)
311 		return ADDR_LE_DEV_PUBLIC;
312 	else
313 		return ADDR_LE_DEV_RANDOM;
314 }
315 
316 void mgmt_fill_version_info(void *ver)
317 {
318 	struct mgmt_rp_read_version *rp = ver;
319 
320 	rp->version = MGMT_VERSION;
321 	rp->revision = cpu_to_le16(MGMT_REVISION);
322 }
323 
324 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
325 			u16 data_len)
326 {
327 	struct mgmt_rp_read_version rp;
328 
329 	bt_dev_dbg(hdev, "sock %p", sk);
330 
331 	mgmt_fill_version_info(&rp);
332 
333 	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
334 				 &rp, sizeof(rp));
335 }
336 
337 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
338 			 u16 data_len)
339 {
340 	struct mgmt_rp_read_commands *rp;
341 	u16 num_commands, num_events;
342 	size_t rp_size;
343 	int i, err;
344 
345 	bt_dev_dbg(hdev, "sock %p", sk);
346 
347 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
348 		num_commands = ARRAY_SIZE(mgmt_commands);
349 		num_events = ARRAY_SIZE(mgmt_events);
350 	} else {
351 		num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
352 		num_events = ARRAY_SIZE(mgmt_untrusted_events);
353 	}
354 
355 	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
356 
357 	rp = kmalloc(rp_size, GFP_KERNEL);
358 	if (!rp)
359 		return -ENOMEM;
360 
361 	rp->num_commands = cpu_to_le16(num_commands);
362 	rp->num_events = cpu_to_le16(num_events);
363 
364 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
365 		__le16 *opcode = rp->opcodes;
366 
367 		for (i = 0; i < num_commands; i++, opcode++)
368 			put_unaligned_le16(mgmt_commands[i], opcode);
369 
370 		for (i = 0; i < num_events; i++, opcode++)
371 			put_unaligned_le16(mgmt_events[i], opcode);
372 	} else {
373 		__le16 *opcode = rp->opcodes;
374 
375 		for (i = 0; i < num_commands; i++, opcode++)
376 			put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
377 
378 		for (i = 0; i < num_events; i++, opcode++)
379 			put_unaligned_le16(mgmt_untrusted_events[i], opcode);
380 	}
381 
382 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
383 				rp, rp_size);
384 	kfree(rp);
385 
386 	return err;
387 }
388 
389 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
390 			   u16 data_len)
391 {
392 	struct mgmt_rp_read_index_list *rp;
393 	struct hci_dev *d;
394 	size_t rp_len;
395 	u16 count;
396 	int err;
397 
398 	bt_dev_dbg(hdev, "sock %p", sk);
399 
400 	read_lock(&hci_dev_list_lock);
401 
402 	count = 0;
403 	list_for_each_entry(d, &hci_dev_list, list) {
404 		if (d->dev_type == HCI_PRIMARY &&
405 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED))
406 			count++;
407 	}
408 
409 	rp_len = sizeof(*rp) + (2 * count);
410 	rp = kmalloc(rp_len, GFP_ATOMIC);
411 	if (!rp) {
412 		read_unlock(&hci_dev_list_lock);
413 		return -ENOMEM;
414 	}
415 
416 	count = 0;
417 	list_for_each_entry(d, &hci_dev_list, list) {
418 		if (hci_dev_test_flag(d, HCI_SETUP) ||
419 		    hci_dev_test_flag(d, HCI_CONFIG) ||
420 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
421 			continue;
422 
423 		/* Devices marked as raw-only are neither configured
424 		 * nor unconfigured controllers.
425 		 */
426 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
427 			continue;
428 
429 		if (d->dev_type == HCI_PRIMARY &&
430 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
431 			rp->index[count++] = cpu_to_le16(d->id);
432 			bt_dev_dbg(hdev, "Added hci%u", d->id);
433 		}
434 	}
435 
436 	rp->num_controllers = cpu_to_le16(count);
437 	rp_len = sizeof(*rp) + (2 * count);
438 
439 	read_unlock(&hci_dev_list_lock);
440 
441 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
442 				0, rp, rp_len);
443 
444 	kfree(rp);
445 
446 	return err;
447 }
448 
449 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
450 				  void *data, u16 data_len)
451 {
452 	struct mgmt_rp_read_unconf_index_list *rp;
453 	struct hci_dev *d;
454 	size_t rp_len;
455 	u16 count;
456 	int err;
457 
458 	bt_dev_dbg(hdev, "sock %p", sk);
459 
460 	read_lock(&hci_dev_list_lock);
461 
462 	count = 0;
463 	list_for_each_entry(d, &hci_dev_list, list) {
464 		if (d->dev_type == HCI_PRIMARY &&
465 		    hci_dev_test_flag(d, HCI_UNCONFIGURED))
466 			count++;
467 	}
468 
469 	rp_len = sizeof(*rp) + (2 * count);
470 	rp = kmalloc(rp_len, GFP_ATOMIC);
471 	if (!rp) {
472 		read_unlock(&hci_dev_list_lock);
473 		return -ENOMEM;
474 	}
475 
476 	count = 0;
477 	list_for_each_entry(d, &hci_dev_list, list) {
478 		if (hci_dev_test_flag(d, HCI_SETUP) ||
479 		    hci_dev_test_flag(d, HCI_CONFIG) ||
480 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
481 			continue;
482 
483 		/* Devices marked as raw-only are neither configured
484 		 * nor unconfigured controllers.
485 		 */
486 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
487 			continue;
488 
489 		if (d->dev_type == HCI_PRIMARY &&
490 		    hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
491 			rp->index[count++] = cpu_to_le16(d->id);
492 			bt_dev_dbg(hdev, "Added hci%u", d->id);
493 		}
494 	}
495 
496 	rp->num_controllers = cpu_to_le16(count);
497 	rp_len = sizeof(*rp) + (2 * count);
498 
499 	read_unlock(&hci_dev_list_lock);
500 
501 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
502 				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
503 
504 	kfree(rp);
505 
506 	return err;
507 }
508 
509 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
510 			       void *data, u16 data_len)
511 {
512 	struct mgmt_rp_read_ext_index_list *rp;
513 	struct hci_dev *d;
514 	u16 count;
515 	int err;
516 
517 	bt_dev_dbg(hdev, "sock %p", sk);
518 
519 	read_lock(&hci_dev_list_lock);
520 
521 	count = 0;
522 	list_for_each_entry(d, &hci_dev_list, list) {
523 		if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
524 			count++;
525 	}
526 
527 	rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
528 	if (!rp) {
529 		read_unlock(&hci_dev_list_lock);
530 		return -ENOMEM;
531 	}
532 
533 	count = 0;
534 	list_for_each_entry(d, &hci_dev_list, list) {
535 		if (hci_dev_test_flag(d, HCI_SETUP) ||
536 		    hci_dev_test_flag(d, HCI_CONFIG) ||
537 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
538 			continue;
539 
540 		/* Devices marked as raw-only are neither configured
541 		 * nor unconfigured controllers.
542 		 */
543 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
544 			continue;
545 
546 		if (d->dev_type == HCI_PRIMARY) {
547 			if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
548 				rp->entry[count].type = 0x01;
549 			else
550 				rp->entry[count].type = 0x00;
551 		} else if (d->dev_type == HCI_AMP) {
552 			rp->entry[count].type = 0x02;
553 		} else {
554 			continue;
555 		}
556 
557 		rp->entry[count].bus = d->bus;
558 		rp->entry[count++].index = cpu_to_le16(d->id);
559 		bt_dev_dbg(hdev, "Added hci%u", d->id);
560 	}
561 
562 	rp->num_controllers = cpu_to_le16(count);
563 
564 	read_unlock(&hci_dev_list_lock);
565 
566 	/* If this command is called at least once, then all the
567 	 * default index and unconfigured index events are disabled
568 	 * and from now on only extended index events are used.
569 	 */
570 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
571 	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
572 	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
573 
574 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
575 				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
576 				struct_size(rp, entry, count));
577 
578 	kfree(rp);
579 
580 	return err;
581 }
582 
583 static bool is_configured(struct hci_dev *hdev)
584 {
585 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
586 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
587 		return false;
588 
589 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
590 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
591 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
592 		return false;
593 
594 	return true;
595 }
596 
597 static __le32 get_missing_options(struct hci_dev *hdev)
598 {
599 	u32 options = 0;
600 
601 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
602 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
603 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
604 
605 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
606 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
607 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
608 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
609 
610 	return cpu_to_le32(options);
611 }
612 
613 static int new_options(struct hci_dev *hdev, struct sock *skip)
614 {
615 	__le32 options = get_missing_options(hdev);
616 
617 	return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
618 				  sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
619 }
620 
621 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
622 {
623 	__le32 options = get_missing_options(hdev);
624 
625 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
626 				 sizeof(options));
627 }
628 
629 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
630 			    void *data, u16 data_len)
631 {
632 	struct mgmt_rp_read_config_info rp;
633 	u32 options = 0;
634 
635 	bt_dev_dbg(hdev, "sock %p", sk);
636 
637 	hci_dev_lock(hdev);
638 
639 	memset(&rp, 0, sizeof(rp));
640 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
641 
642 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
643 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
644 
645 	if (hdev->set_bdaddr)
646 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
647 
648 	rp.supported_options = cpu_to_le32(options);
649 	rp.missing_options = get_missing_options(hdev);
650 
651 	hci_dev_unlock(hdev);
652 
653 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
654 				 &rp, sizeof(rp));
655 }
656 
657 static u32 get_supported_phys(struct hci_dev *hdev)
658 {
659 	u32 supported_phys = 0;
660 
661 	if (lmp_bredr_capable(hdev)) {
662 		supported_phys |= MGMT_PHY_BR_1M_1SLOT;
663 
664 		if (hdev->features[0][0] & LMP_3SLOT)
665 			supported_phys |= MGMT_PHY_BR_1M_3SLOT;
666 
667 		if (hdev->features[0][0] & LMP_5SLOT)
668 			supported_phys |= MGMT_PHY_BR_1M_5SLOT;
669 
670 		if (lmp_edr_2m_capable(hdev)) {
671 			supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
672 
673 			if (lmp_edr_3slot_capable(hdev))
674 				supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
675 
676 			if (lmp_edr_5slot_capable(hdev))
677 				supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
678 
679 			if (lmp_edr_3m_capable(hdev)) {
680 				supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
681 
682 				if (lmp_edr_3slot_capable(hdev))
683 					supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
684 
685 				if (lmp_edr_5slot_capable(hdev))
686 					supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
687 			}
688 		}
689 	}
690 
691 	if (lmp_le_capable(hdev)) {
692 		supported_phys |= MGMT_PHY_LE_1M_TX;
693 		supported_phys |= MGMT_PHY_LE_1M_RX;
694 
695 		if (hdev->le_features[1] & HCI_LE_PHY_2M) {
696 			supported_phys |= MGMT_PHY_LE_2M_TX;
697 			supported_phys |= MGMT_PHY_LE_2M_RX;
698 		}
699 
700 		if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
701 			supported_phys |= MGMT_PHY_LE_CODED_TX;
702 			supported_phys |= MGMT_PHY_LE_CODED_RX;
703 		}
704 	}
705 
706 	return supported_phys;
707 }
708 
709 static u32 get_selected_phys(struct hci_dev *hdev)
710 {
711 	u32 selected_phys = 0;
712 
713 	if (lmp_bredr_capable(hdev)) {
714 		selected_phys |= MGMT_PHY_BR_1M_1SLOT;
715 
716 		if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
717 			selected_phys |= MGMT_PHY_BR_1M_3SLOT;
718 
719 		if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
720 			selected_phys |= MGMT_PHY_BR_1M_5SLOT;
721 
722 		if (lmp_edr_2m_capable(hdev)) {
723 			if (!(hdev->pkt_type & HCI_2DH1))
724 				selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
725 
726 			if (lmp_edr_3slot_capable(hdev) &&
727 			    !(hdev->pkt_type & HCI_2DH3))
728 				selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
729 
730 			if (lmp_edr_5slot_capable(hdev) &&
731 			    !(hdev->pkt_type & HCI_2DH5))
732 				selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
733 
734 			if (lmp_edr_3m_capable(hdev)) {
735 				if (!(hdev->pkt_type & HCI_3DH1))
736 					selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
737 
738 				if (lmp_edr_3slot_capable(hdev) &&
739 				    !(hdev->pkt_type & HCI_3DH3))
740 					selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
741 
742 				if (lmp_edr_5slot_capable(hdev) &&
743 				    !(hdev->pkt_type & HCI_3DH5))
744 					selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
745 			}
746 		}
747 	}
748 
749 	if (lmp_le_capable(hdev)) {
750 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
751 			selected_phys |= MGMT_PHY_LE_1M_TX;
752 
753 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
754 			selected_phys |= MGMT_PHY_LE_1M_RX;
755 
756 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
757 			selected_phys |= MGMT_PHY_LE_2M_TX;
758 
759 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
760 			selected_phys |= MGMT_PHY_LE_2M_RX;
761 
762 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
763 			selected_phys |= MGMT_PHY_LE_CODED_TX;
764 
765 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
766 			selected_phys |= MGMT_PHY_LE_CODED_RX;
767 	}
768 
769 	return selected_phys;
770 }
771 
772 static u32 get_configurable_phys(struct hci_dev *hdev)
773 {
774 	return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
775 		~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
776 }
777 
778 static u32 get_supported_settings(struct hci_dev *hdev)
779 {
780 	u32 settings = 0;
781 
782 	settings |= MGMT_SETTING_POWERED;
783 	settings |= MGMT_SETTING_BONDABLE;
784 	settings |= MGMT_SETTING_DEBUG_KEYS;
785 	settings |= MGMT_SETTING_CONNECTABLE;
786 	settings |= MGMT_SETTING_DISCOVERABLE;
787 
788 	if (lmp_bredr_capable(hdev)) {
789 		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
790 			settings |= MGMT_SETTING_FAST_CONNECTABLE;
791 		settings |= MGMT_SETTING_BREDR;
792 		settings |= MGMT_SETTING_LINK_SECURITY;
793 
794 		if (lmp_ssp_capable(hdev)) {
795 			settings |= MGMT_SETTING_SSP;
796 			if (IS_ENABLED(CONFIG_BT_HS))
797 				settings |= MGMT_SETTING_HS;
798 		}
799 
800 		if (lmp_sc_capable(hdev))
801 			settings |= MGMT_SETTING_SECURE_CONN;
802 
803 		if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
804 			     &hdev->quirks))
805 			settings |= MGMT_SETTING_WIDEBAND_SPEECH;
806 	}
807 
808 	if (lmp_le_capable(hdev)) {
809 		settings |= MGMT_SETTING_LE;
810 		settings |= MGMT_SETTING_SECURE_CONN;
811 		settings |= MGMT_SETTING_PRIVACY;
812 		settings |= MGMT_SETTING_STATIC_ADDRESS;
813 
814 		/* When the experimental feature for LL Privacy support is
815 		 * enabled, then advertising is no longer supported.
816 		 */
817 		if (!hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
818 			settings |= MGMT_SETTING_ADVERTISING;
819 	}
820 
821 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
822 	    hdev->set_bdaddr)
823 		settings |= MGMT_SETTING_CONFIGURATION;
824 
825 	settings |= MGMT_SETTING_PHY_CONFIGURATION;
826 
827 	return settings;
828 }
829 
830 static u32 get_current_settings(struct hci_dev *hdev)
831 {
832 	u32 settings = 0;
833 
834 	if (hdev_is_powered(hdev))
835 		settings |= MGMT_SETTING_POWERED;
836 
837 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
838 		settings |= MGMT_SETTING_CONNECTABLE;
839 
840 	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
841 		settings |= MGMT_SETTING_FAST_CONNECTABLE;
842 
843 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
844 		settings |= MGMT_SETTING_DISCOVERABLE;
845 
846 	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
847 		settings |= MGMT_SETTING_BONDABLE;
848 
849 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
850 		settings |= MGMT_SETTING_BREDR;
851 
852 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
853 		settings |= MGMT_SETTING_LE;
854 
855 	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
856 		settings |= MGMT_SETTING_LINK_SECURITY;
857 
858 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
859 		settings |= MGMT_SETTING_SSP;
860 
861 	if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
862 		settings |= MGMT_SETTING_HS;
863 
864 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
865 		settings |= MGMT_SETTING_ADVERTISING;
866 
867 	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
868 		settings |= MGMT_SETTING_SECURE_CONN;
869 
870 	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
871 		settings |= MGMT_SETTING_DEBUG_KEYS;
872 
873 	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
874 		settings |= MGMT_SETTING_PRIVACY;
875 
876 	/* The current setting for static address has two purposes. The
877 	 * first is to indicate if the static address will be used and
878 	 * the second is to indicate if it is actually set.
879 	 *
880 	 * This means if the static address is not configured, this flag
881 	 * will never be set. If the address is configured, then if the
882 	 * address is actually used decides if the flag is set or not.
883 	 *
884 	 * For single mode LE only controllers and dual-mode controllers
885 	 * with BR/EDR disabled, the existence of the static address will
886 	 * be evaluated.
887 	 */
888 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
889 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
890 	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
891 		if (bacmp(&hdev->static_addr, BDADDR_ANY))
892 			settings |= MGMT_SETTING_STATIC_ADDRESS;
893 	}
894 
895 	if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
896 		settings |= MGMT_SETTING_WIDEBAND_SPEECH;
897 
898 	return settings;
899 }
900 
901 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
902 {
903 	return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
904 }
905 
906 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
907 						  struct hci_dev *hdev,
908 						  const void *data)
909 {
910 	return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
911 }
912 
913 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
914 {
915 	struct mgmt_pending_cmd *cmd;
916 
917 	/* If there's a pending mgmt command the flags will not yet have
918 	 * their final values, so check for this first.
919 	 */
920 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
921 	if (cmd) {
922 		struct mgmt_mode *cp = cmd->param;
923 		if (cp->val == 0x01)
924 			return LE_AD_GENERAL;
925 		else if (cp->val == 0x02)
926 			return LE_AD_LIMITED;
927 	} else {
928 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
929 			return LE_AD_LIMITED;
930 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
931 			return LE_AD_GENERAL;
932 	}
933 
934 	return 0;
935 }
936 
937 bool mgmt_get_connectable(struct hci_dev *hdev)
938 {
939 	struct mgmt_pending_cmd *cmd;
940 
941 	/* If there's a pending mgmt command the flag will not yet have
942 	 * it's final value, so check for this first.
943 	 */
944 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
945 	if (cmd) {
946 		struct mgmt_mode *cp = cmd->param;
947 
948 		return cp->val;
949 	}
950 
951 	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
952 }
953 
954 static void service_cache_off(struct work_struct *work)
955 {
956 	struct hci_dev *hdev = container_of(work, struct hci_dev,
957 					    service_cache.work);
958 	struct hci_request req;
959 
960 	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
961 		return;
962 
963 	hci_req_init(&req, hdev);
964 
965 	hci_dev_lock(hdev);
966 
967 	__hci_req_update_eir(&req);
968 	__hci_req_update_class(&req);
969 
970 	hci_dev_unlock(hdev);
971 
972 	hci_req_run(&req, NULL);
973 }
974 
975 static void rpa_expired(struct work_struct *work)
976 {
977 	struct hci_dev *hdev = container_of(work, struct hci_dev,
978 					    rpa_expired.work);
979 	struct hci_request req;
980 
981 	bt_dev_dbg(hdev, "");
982 
983 	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
984 
985 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
986 		return;
987 
988 	/* The generation of a new RPA and programming it into the
989 	 * controller happens in the hci_req_enable_advertising()
990 	 * function.
991 	 */
992 	hci_req_init(&req, hdev);
993 	if (ext_adv_capable(hdev))
994 		__hci_req_start_ext_adv(&req, hdev->cur_adv_instance);
995 	else
996 		__hci_req_enable_advertising(&req);
997 	hci_req_run(&req, NULL);
998 }
999 
1000 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1001 {
1002 	if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1003 		return;
1004 
1005 	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1006 	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1007 
1008 	/* Non-mgmt controlled devices get this bit set
1009 	 * implicitly so that pairing works for them, however
1010 	 * for mgmt we require user-space to explicitly enable
1011 	 * it
1012 	 */
1013 	hci_dev_clear_flag(hdev, HCI_BONDABLE);
1014 }
1015 
1016 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1017 				void *data, u16 data_len)
1018 {
1019 	struct mgmt_rp_read_info rp;
1020 
1021 	bt_dev_dbg(hdev, "sock %p", sk);
1022 
1023 	hci_dev_lock(hdev);
1024 
1025 	memset(&rp, 0, sizeof(rp));
1026 
1027 	bacpy(&rp.bdaddr, &hdev->bdaddr);
1028 
1029 	rp.version = hdev->hci_ver;
1030 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1031 
1032 	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1033 	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1034 
1035 	memcpy(rp.dev_class, hdev->dev_class, 3);
1036 
1037 	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1038 	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1039 
1040 	hci_dev_unlock(hdev);
1041 
1042 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1043 				 sizeof(rp));
1044 }
1045 
1046 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1047 {
1048 	u16 eir_len = 0;
1049 	size_t name_len;
1050 
1051 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1052 		eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1053 					  hdev->dev_class, 3);
1054 
1055 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1056 		eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1057 					  hdev->appearance);
1058 
1059 	name_len = strlen(hdev->dev_name);
1060 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1061 				  hdev->dev_name, name_len);
1062 
1063 	name_len = strlen(hdev->short_name);
1064 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1065 				  hdev->short_name, name_len);
1066 
1067 	return eir_len;
1068 }
1069 
1070 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1071 				    void *data, u16 data_len)
1072 {
1073 	char buf[512];
1074 	struct mgmt_rp_read_ext_info *rp = (void *)buf;
1075 	u16 eir_len;
1076 
1077 	bt_dev_dbg(hdev, "sock %p", sk);
1078 
1079 	memset(&buf, 0, sizeof(buf));
1080 
1081 	hci_dev_lock(hdev);
1082 
1083 	bacpy(&rp->bdaddr, &hdev->bdaddr);
1084 
1085 	rp->version = hdev->hci_ver;
1086 	rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1087 
1088 	rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1089 	rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1090 
1091 
1092 	eir_len = append_eir_data_to_buf(hdev, rp->eir);
1093 	rp->eir_len = cpu_to_le16(eir_len);
1094 
1095 	hci_dev_unlock(hdev);
1096 
1097 	/* If this command is called at least once, then the events
1098 	 * for class of device and local name changes are disabled
1099 	 * and only the new extended controller information event
1100 	 * is used.
1101 	 */
1102 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1103 	hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1104 	hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1105 
1106 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1107 				 sizeof(*rp) + eir_len);
1108 }
1109 
1110 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1111 {
1112 	char buf[512];
1113 	struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1114 	u16 eir_len;
1115 
1116 	memset(buf, 0, sizeof(buf));
1117 
1118 	eir_len = append_eir_data_to_buf(hdev, ev->eir);
1119 	ev->eir_len = cpu_to_le16(eir_len);
1120 
1121 	return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1122 				  sizeof(*ev) + eir_len,
1123 				  HCI_MGMT_EXT_INFO_EVENTS, skip);
1124 }
1125 
1126 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1127 {
1128 	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1129 
1130 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1131 				 sizeof(settings));
1132 }
1133 
1134 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1135 {
1136 	bt_dev_dbg(hdev, "status 0x%02x", status);
1137 
1138 	if (hci_conn_count(hdev) == 0) {
1139 		cancel_delayed_work(&hdev->power_off);
1140 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
1141 	}
1142 }
1143 
1144 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1145 {
1146 	struct mgmt_ev_advertising_added ev;
1147 
1148 	ev.instance = instance;
1149 
1150 	mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1151 }
1152 
1153 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1154 			      u8 instance)
1155 {
1156 	struct mgmt_ev_advertising_removed ev;
1157 
1158 	ev.instance = instance;
1159 
1160 	mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1161 }
1162 
1163 static void cancel_adv_timeout(struct hci_dev *hdev)
1164 {
1165 	if (hdev->adv_instance_timeout) {
1166 		hdev->adv_instance_timeout = 0;
1167 		cancel_delayed_work(&hdev->adv_instance_expire);
1168 	}
1169 }
1170 
1171 static int clean_up_hci_state(struct hci_dev *hdev)
1172 {
1173 	struct hci_request req;
1174 	struct hci_conn *conn;
1175 	bool discov_stopped;
1176 	int err;
1177 
1178 	hci_req_init(&req, hdev);
1179 
1180 	if (test_bit(HCI_ISCAN, &hdev->flags) ||
1181 	    test_bit(HCI_PSCAN, &hdev->flags)) {
1182 		u8 scan = 0x00;
1183 		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1184 	}
1185 
1186 	hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
1187 
1188 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1189 		__hci_req_disable_advertising(&req);
1190 
1191 	discov_stopped = hci_req_stop_discovery(&req);
1192 
1193 	list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1194 		/* 0x15 == Terminated due to Power Off */
1195 		__hci_abort_conn(&req, conn, 0x15);
1196 	}
1197 
1198 	err = hci_req_run(&req, clean_up_hci_complete);
1199 	if (!err && discov_stopped)
1200 		hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1201 
1202 	return err;
1203 }
1204 
1205 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1206 		       u16 len)
1207 {
1208 	struct mgmt_mode *cp = data;
1209 	struct mgmt_pending_cmd *cmd;
1210 	int err;
1211 
1212 	bt_dev_dbg(hdev, "sock %p", sk);
1213 
1214 	if (cp->val != 0x00 && cp->val != 0x01)
1215 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1216 				       MGMT_STATUS_INVALID_PARAMS);
1217 
1218 	hci_dev_lock(hdev);
1219 
1220 	if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1221 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1222 				      MGMT_STATUS_BUSY);
1223 		goto failed;
1224 	}
1225 
1226 	if (!!cp->val == hdev_is_powered(hdev)) {
1227 		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1228 		goto failed;
1229 	}
1230 
1231 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1232 	if (!cmd) {
1233 		err = -ENOMEM;
1234 		goto failed;
1235 	}
1236 
1237 	if (cp->val) {
1238 		queue_work(hdev->req_workqueue, &hdev->power_on);
1239 		err = 0;
1240 	} else {
1241 		/* Disconnect connections, stop scans, etc */
1242 		err = clean_up_hci_state(hdev);
1243 		if (!err)
1244 			queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1245 					   HCI_POWER_OFF_TIMEOUT);
1246 
1247 		/* ENODATA means there were no HCI commands queued */
1248 		if (err == -ENODATA) {
1249 			cancel_delayed_work(&hdev->power_off);
1250 			queue_work(hdev->req_workqueue, &hdev->power_off.work);
1251 			err = 0;
1252 		}
1253 	}
1254 
1255 failed:
1256 	hci_dev_unlock(hdev);
1257 	return err;
1258 }
1259 
1260 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1261 {
1262 	__le32 ev = cpu_to_le32(get_current_settings(hdev));
1263 
1264 	return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1265 				  sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1266 }
1267 
1268 int mgmt_new_settings(struct hci_dev *hdev)
1269 {
1270 	return new_settings(hdev, NULL);
1271 }
1272 
1273 struct cmd_lookup {
1274 	struct sock *sk;
1275 	struct hci_dev *hdev;
1276 	u8 mgmt_status;
1277 };
1278 
1279 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1280 {
1281 	struct cmd_lookup *match = data;
1282 
1283 	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1284 
1285 	list_del(&cmd->list);
1286 
1287 	if (match->sk == NULL) {
1288 		match->sk = cmd->sk;
1289 		sock_hold(match->sk);
1290 	}
1291 
1292 	mgmt_pending_free(cmd);
1293 }
1294 
1295 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1296 {
1297 	u8 *status = data;
1298 
1299 	mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1300 	mgmt_pending_remove(cmd);
1301 }
1302 
1303 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1304 {
1305 	if (cmd->cmd_complete) {
1306 		u8 *status = data;
1307 
1308 		cmd->cmd_complete(cmd, *status);
1309 		mgmt_pending_remove(cmd);
1310 
1311 		return;
1312 	}
1313 
1314 	cmd_status_rsp(cmd, data);
1315 }
1316 
1317 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1318 {
1319 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1320 				 cmd->param, cmd->param_len);
1321 }
1322 
1323 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1324 {
1325 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1326 				 cmd->param, sizeof(struct mgmt_addr_info));
1327 }
1328 
1329 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1330 {
1331 	if (!lmp_bredr_capable(hdev))
1332 		return MGMT_STATUS_NOT_SUPPORTED;
1333 	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1334 		return MGMT_STATUS_REJECTED;
1335 	else
1336 		return MGMT_STATUS_SUCCESS;
1337 }
1338 
1339 static u8 mgmt_le_support(struct hci_dev *hdev)
1340 {
1341 	if (!lmp_le_capable(hdev))
1342 		return MGMT_STATUS_NOT_SUPPORTED;
1343 	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1344 		return MGMT_STATUS_REJECTED;
1345 	else
1346 		return MGMT_STATUS_SUCCESS;
1347 }
1348 
1349 void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
1350 {
1351 	struct mgmt_pending_cmd *cmd;
1352 
1353 	bt_dev_dbg(hdev, "status 0x%02x", status);
1354 
1355 	hci_dev_lock(hdev);
1356 
1357 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1358 	if (!cmd)
1359 		goto unlock;
1360 
1361 	if (status) {
1362 		u8 mgmt_err = mgmt_status(status);
1363 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1364 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1365 		goto remove_cmd;
1366 	}
1367 
1368 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1369 	    hdev->discov_timeout > 0) {
1370 		int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1371 		queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1372 	}
1373 
1374 	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1375 	new_settings(hdev, cmd->sk);
1376 
1377 remove_cmd:
1378 	mgmt_pending_remove(cmd);
1379 
1380 unlock:
1381 	hci_dev_unlock(hdev);
1382 }
1383 
1384 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1385 			    u16 len)
1386 {
1387 	struct mgmt_cp_set_discoverable *cp = data;
1388 	struct mgmt_pending_cmd *cmd;
1389 	u16 timeout;
1390 	int err;
1391 
1392 	bt_dev_dbg(hdev, "sock %p", sk);
1393 
1394 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1395 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1396 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1397 				       MGMT_STATUS_REJECTED);
1398 
1399 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1400 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1401 				       MGMT_STATUS_INVALID_PARAMS);
1402 
1403 	timeout = __le16_to_cpu(cp->timeout);
1404 
1405 	/* Disabling discoverable requires that no timeout is set,
1406 	 * and enabling limited discoverable requires a timeout.
1407 	 */
1408 	if ((cp->val == 0x00 && timeout > 0) ||
1409 	    (cp->val == 0x02 && timeout == 0))
1410 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1411 				       MGMT_STATUS_INVALID_PARAMS);
1412 
1413 	hci_dev_lock(hdev);
1414 
1415 	if (!hdev_is_powered(hdev) && timeout > 0) {
1416 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1417 				      MGMT_STATUS_NOT_POWERED);
1418 		goto failed;
1419 	}
1420 
1421 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1422 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1423 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1424 				      MGMT_STATUS_BUSY);
1425 		goto failed;
1426 	}
1427 
1428 	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1429 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1430 				      MGMT_STATUS_REJECTED);
1431 		goto failed;
1432 	}
1433 
1434 	if (hdev->advertising_paused) {
1435 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1436 				      MGMT_STATUS_BUSY);
1437 		goto failed;
1438 	}
1439 
1440 	if (!hdev_is_powered(hdev)) {
1441 		bool changed = false;
1442 
1443 		/* Setting limited discoverable when powered off is
1444 		 * not a valid operation since it requires a timeout
1445 		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1446 		 */
1447 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1448 			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1449 			changed = true;
1450 		}
1451 
1452 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1453 		if (err < 0)
1454 			goto failed;
1455 
1456 		if (changed)
1457 			err = new_settings(hdev, sk);
1458 
1459 		goto failed;
1460 	}
1461 
1462 	/* If the current mode is the same, then just update the timeout
1463 	 * value with the new value. And if only the timeout gets updated,
1464 	 * then no need for any HCI transactions.
1465 	 */
1466 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1467 	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
1468 						   HCI_LIMITED_DISCOVERABLE)) {
1469 		cancel_delayed_work(&hdev->discov_off);
1470 		hdev->discov_timeout = timeout;
1471 
1472 		if (cp->val && hdev->discov_timeout > 0) {
1473 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1474 			queue_delayed_work(hdev->req_workqueue,
1475 					   &hdev->discov_off, to);
1476 		}
1477 
1478 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1479 		goto failed;
1480 	}
1481 
1482 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1483 	if (!cmd) {
1484 		err = -ENOMEM;
1485 		goto failed;
1486 	}
1487 
1488 	/* Cancel any potential discoverable timeout that might be
1489 	 * still active and store new timeout value. The arming of
1490 	 * the timeout happens in the complete handler.
1491 	 */
1492 	cancel_delayed_work(&hdev->discov_off);
1493 	hdev->discov_timeout = timeout;
1494 
1495 	if (cp->val)
1496 		hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1497 	else
1498 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1499 
1500 	/* Limited discoverable mode */
1501 	if (cp->val == 0x02)
1502 		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1503 	else
1504 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1505 
1506 	queue_work(hdev->req_workqueue, &hdev->discoverable_update);
1507 	err = 0;
1508 
1509 failed:
1510 	hci_dev_unlock(hdev);
1511 	return err;
1512 }
1513 
1514 void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
1515 {
1516 	struct mgmt_pending_cmd *cmd;
1517 
1518 	bt_dev_dbg(hdev, "status 0x%02x", status);
1519 
1520 	hci_dev_lock(hdev);
1521 
1522 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1523 	if (!cmd)
1524 		goto unlock;
1525 
1526 	if (status) {
1527 		u8 mgmt_err = mgmt_status(status);
1528 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1529 		goto remove_cmd;
1530 	}
1531 
1532 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1533 	new_settings(hdev, cmd->sk);
1534 
1535 remove_cmd:
1536 	mgmt_pending_remove(cmd);
1537 
1538 unlock:
1539 	hci_dev_unlock(hdev);
1540 }
1541 
1542 static int set_connectable_update_settings(struct hci_dev *hdev,
1543 					   struct sock *sk, u8 val)
1544 {
1545 	bool changed = false;
1546 	int err;
1547 
1548 	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1549 		changed = true;
1550 
1551 	if (val) {
1552 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1553 	} else {
1554 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1555 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1556 	}
1557 
1558 	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1559 	if (err < 0)
1560 		return err;
1561 
1562 	if (changed) {
1563 		hci_req_update_scan(hdev);
1564 		hci_update_background_scan(hdev);
1565 		return new_settings(hdev, sk);
1566 	}
1567 
1568 	return 0;
1569 }
1570 
1571 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1572 			   u16 len)
1573 {
1574 	struct mgmt_mode *cp = data;
1575 	struct mgmt_pending_cmd *cmd;
1576 	int err;
1577 
1578 	bt_dev_dbg(hdev, "sock %p", sk);
1579 
1580 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1581 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1582 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1583 				       MGMT_STATUS_REJECTED);
1584 
1585 	if (cp->val != 0x00 && cp->val != 0x01)
1586 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1587 				       MGMT_STATUS_INVALID_PARAMS);
1588 
1589 	hci_dev_lock(hdev);
1590 
1591 	if (!hdev_is_powered(hdev)) {
1592 		err = set_connectable_update_settings(hdev, sk, cp->val);
1593 		goto failed;
1594 	}
1595 
1596 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1597 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1598 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1599 				      MGMT_STATUS_BUSY);
1600 		goto failed;
1601 	}
1602 
1603 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1604 	if (!cmd) {
1605 		err = -ENOMEM;
1606 		goto failed;
1607 	}
1608 
1609 	if (cp->val) {
1610 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1611 	} else {
1612 		if (hdev->discov_timeout > 0)
1613 			cancel_delayed_work(&hdev->discov_off);
1614 
1615 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1616 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1617 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1618 	}
1619 
1620 	queue_work(hdev->req_workqueue, &hdev->connectable_update);
1621 	err = 0;
1622 
1623 failed:
1624 	hci_dev_unlock(hdev);
1625 	return err;
1626 }
1627 
1628 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1629 			u16 len)
1630 {
1631 	struct mgmt_mode *cp = data;
1632 	bool changed;
1633 	int err;
1634 
1635 	bt_dev_dbg(hdev, "sock %p", sk);
1636 
1637 	if (cp->val != 0x00 && cp->val != 0x01)
1638 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1639 				       MGMT_STATUS_INVALID_PARAMS);
1640 
1641 	hci_dev_lock(hdev);
1642 
1643 	if (cp->val)
1644 		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1645 	else
1646 		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1647 
1648 	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1649 	if (err < 0)
1650 		goto unlock;
1651 
1652 	if (changed) {
1653 		/* In limited privacy mode the change of bondable mode
1654 		 * may affect the local advertising address.
1655 		 */
1656 		if (hdev_is_powered(hdev) &&
1657 		    hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1658 		    hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1659 		    hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1660 			queue_work(hdev->req_workqueue,
1661 				   &hdev->discoverable_update);
1662 
1663 		err = new_settings(hdev, sk);
1664 	}
1665 
1666 unlock:
1667 	hci_dev_unlock(hdev);
1668 	return err;
1669 }
1670 
1671 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1672 			     u16 len)
1673 {
1674 	struct mgmt_mode *cp = data;
1675 	struct mgmt_pending_cmd *cmd;
1676 	u8 val, status;
1677 	int err;
1678 
1679 	bt_dev_dbg(hdev, "sock %p", sk);
1680 
1681 	status = mgmt_bredr_support(hdev);
1682 	if (status)
1683 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1684 				       status);
1685 
1686 	if (cp->val != 0x00 && cp->val != 0x01)
1687 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1688 				       MGMT_STATUS_INVALID_PARAMS);
1689 
1690 	hci_dev_lock(hdev);
1691 
1692 	if (!hdev_is_powered(hdev)) {
1693 		bool changed = false;
1694 
1695 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1696 			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1697 			changed = true;
1698 		}
1699 
1700 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1701 		if (err < 0)
1702 			goto failed;
1703 
1704 		if (changed)
1705 			err = new_settings(hdev, sk);
1706 
1707 		goto failed;
1708 	}
1709 
1710 	if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1711 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1712 				      MGMT_STATUS_BUSY);
1713 		goto failed;
1714 	}
1715 
1716 	val = !!cp->val;
1717 
1718 	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1719 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1720 		goto failed;
1721 	}
1722 
1723 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1724 	if (!cmd) {
1725 		err = -ENOMEM;
1726 		goto failed;
1727 	}
1728 
1729 	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1730 	if (err < 0) {
1731 		mgmt_pending_remove(cmd);
1732 		goto failed;
1733 	}
1734 
1735 failed:
1736 	hci_dev_unlock(hdev);
1737 	return err;
1738 }
1739 
1740 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1741 {
1742 	struct mgmt_mode *cp = data;
1743 	struct mgmt_pending_cmd *cmd;
1744 	u8 status;
1745 	int err;
1746 
1747 	bt_dev_dbg(hdev, "sock %p", sk);
1748 
1749 	status = mgmt_bredr_support(hdev);
1750 	if (status)
1751 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1752 
1753 	if (!lmp_ssp_capable(hdev))
1754 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1755 				       MGMT_STATUS_NOT_SUPPORTED);
1756 
1757 	if (cp->val != 0x00 && cp->val != 0x01)
1758 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1759 				       MGMT_STATUS_INVALID_PARAMS);
1760 
1761 	hci_dev_lock(hdev);
1762 
1763 	if (!hdev_is_powered(hdev)) {
1764 		bool changed;
1765 
1766 		if (cp->val) {
1767 			changed = !hci_dev_test_and_set_flag(hdev,
1768 							     HCI_SSP_ENABLED);
1769 		} else {
1770 			changed = hci_dev_test_and_clear_flag(hdev,
1771 							      HCI_SSP_ENABLED);
1772 			if (!changed)
1773 				changed = hci_dev_test_and_clear_flag(hdev,
1774 								      HCI_HS_ENABLED);
1775 			else
1776 				hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1777 		}
1778 
1779 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1780 		if (err < 0)
1781 			goto failed;
1782 
1783 		if (changed)
1784 			err = new_settings(hdev, sk);
1785 
1786 		goto failed;
1787 	}
1788 
1789 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1790 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1791 				      MGMT_STATUS_BUSY);
1792 		goto failed;
1793 	}
1794 
1795 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1796 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1797 		goto failed;
1798 	}
1799 
1800 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1801 	if (!cmd) {
1802 		err = -ENOMEM;
1803 		goto failed;
1804 	}
1805 
1806 	if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
1807 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1808 			     sizeof(cp->val), &cp->val);
1809 
1810 	err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1811 	if (err < 0) {
1812 		mgmt_pending_remove(cmd);
1813 		goto failed;
1814 	}
1815 
1816 failed:
1817 	hci_dev_unlock(hdev);
1818 	return err;
1819 }
1820 
1821 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1822 {
1823 	struct mgmt_mode *cp = data;
1824 	bool changed;
1825 	u8 status;
1826 	int err;
1827 
1828 	bt_dev_dbg(hdev, "sock %p", sk);
1829 
1830 	if (!IS_ENABLED(CONFIG_BT_HS))
1831 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1832 				       MGMT_STATUS_NOT_SUPPORTED);
1833 
1834 	status = mgmt_bredr_support(hdev);
1835 	if (status)
1836 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1837 
1838 	if (!lmp_ssp_capable(hdev))
1839 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1840 				       MGMT_STATUS_NOT_SUPPORTED);
1841 
1842 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1843 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1844 				       MGMT_STATUS_REJECTED);
1845 
1846 	if (cp->val != 0x00 && cp->val != 0x01)
1847 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1848 				       MGMT_STATUS_INVALID_PARAMS);
1849 
1850 	hci_dev_lock(hdev);
1851 
1852 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1853 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1854 				      MGMT_STATUS_BUSY);
1855 		goto unlock;
1856 	}
1857 
1858 	if (cp->val) {
1859 		changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1860 	} else {
1861 		if (hdev_is_powered(hdev)) {
1862 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1863 					      MGMT_STATUS_REJECTED);
1864 			goto unlock;
1865 		}
1866 
1867 		changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1868 	}
1869 
1870 	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1871 	if (err < 0)
1872 		goto unlock;
1873 
1874 	if (changed)
1875 		err = new_settings(hdev, sk);
1876 
1877 unlock:
1878 	hci_dev_unlock(hdev);
1879 	return err;
1880 }
1881 
1882 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1883 {
1884 	struct cmd_lookup match = { NULL, hdev };
1885 
1886 	hci_dev_lock(hdev);
1887 
1888 	if (status) {
1889 		u8 mgmt_err = mgmt_status(status);
1890 
1891 		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1892 				     &mgmt_err);
1893 		goto unlock;
1894 	}
1895 
1896 	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1897 
1898 	new_settings(hdev, match.sk);
1899 
1900 	if (match.sk)
1901 		sock_put(match.sk);
1902 
1903 	/* Make sure the controller has a good default for
1904 	 * advertising data. Restrict the update to when LE
1905 	 * has actually been enabled. During power on, the
1906 	 * update in powered_update_hci will take care of it.
1907 	 */
1908 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1909 		struct hci_request req;
1910 		hci_req_init(&req, hdev);
1911 		if (ext_adv_capable(hdev)) {
1912 			int err;
1913 
1914 			err = __hci_req_setup_ext_adv_instance(&req, 0x00);
1915 			if (!err)
1916 				__hci_req_update_scan_rsp_data(&req, 0x00);
1917 		} else {
1918 			__hci_req_update_adv_data(&req, 0x00);
1919 			__hci_req_update_scan_rsp_data(&req, 0x00);
1920 		}
1921 		hci_req_run(&req, NULL);
1922 		hci_update_background_scan(hdev);
1923 	}
1924 
1925 unlock:
1926 	hci_dev_unlock(hdev);
1927 }
1928 
1929 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1930 {
1931 	struct mgmt_mode *cp = data;
1932 	struct hci_cp_write_le_host_supported hci_cp;
1933 	struct mgmt_pending_cmd *cmd;
1934 	struct hci_request req;
1935 	int err;
1936 	u8 val, enabled;
1937 
1938 	bt_dev_dbg(hdev, "sock %p", sk);
1939 
1940 	if (!lmp_le_capable(hdev))
1941 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1942 				       MGMT_STATUS_NOT_SUPPORTED);
1943 
1944 	if (cp->val != 0x00 && cp->val != 0x01)
1945 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1946 				       MGMT_STATUS_INVALID_PARAMS);
1947 
1948 	/* Bluetooth single mode LE only controllers or dual-mode
1949 	 * controllers configured as LE only devices, do not allow
1950 	 * switching LE off. These have either LE enabled explicitly
1951 	 * or BR/EDR has been previously switched off.
1952 	 *
1953 	 * When trying to enable an already enabled LE, then gracefully
1954 	 * send a positive response. Trying to disable it however will
1955 	 * result into rejection.
1956 	 */
1957 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1958 		if (cp->val == 0x01)
1959 			return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1960 
1961 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1962 				       MGMT_STATUS_REJECTED);
1963 	}
1964 
1965 	hci_dev_lock(hdev);
1966 
1967 	val = !!cp->val;
1968 	enabled = lmp_host_le_capable(hdev);
1969 
1970 	if (!val)
1971 		hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
1972 
1973 	if (!hdev_is_powered(hdev) || val == enabled) {
1974 		bool changed = false;
1975 
1976 		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1977 			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
1978 			changed = true;
1979 		}
1980 
1981 		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1982 			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1983 			changed = true;
1984 		}
1985 
1986 		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1987 		if (err < 0)
1988 			goto unlock;
1989 
1990 		if (changed)
1991 			err = new_settings(hdev, sk);
1992 
1993 		goto unlock;
1994 	}
1995 
1996 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
1997 	    pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1998 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1999 				      MGMT_STATUS_BUSY);
2000 		goto unlock;
2001 	}
2002 
2003 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2004 	if (!cmd) {
2005 		err = -ENOMEM;
2006 		goto unlock;
2007 	}
2008 
2009 	hci_req_init(&req, hdev);
2010 
2011 	memset(&hci_cp, 0, sizeof(hci_cp));
2012 
2013 	if (val) {
2014 		hci_cp.le = val;
2015 		hci_cp.simul = 0x00;
2016 	} else {
2017 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2018 			__hci_req_disable_advertising(&req);
2019 
2020 		if (ext_adv_capable(hdev))
2021 			__hci_req_clear_ext_adv_sets(&req);
2022 	}
2023 
2024 	hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2025 		    &hci_cp);
2026 
2027 	err = hci_req_run(&req, le_enable_complete);
2028 	if (err < 0)
2029 		mgmt_pending_remove(cmd);
2030 
2031 unlock:
2032 	hci_dev_unlock(hdev);
2033 	return err;
2034 }
2035 
2036 /* This is a helper function to test for pending mgmt commands that can
2037  * cause CoD or EIR HCI commands. We can only allow one such pending
2038  * mgmt command at a time since otherwise we cannot easily track what
2039  * the current values are, will be, and based on that calculate if a new
2040  * HCI command needs to be sent and if yes with what value.
2041  */
2042 static bool pending_eir_or_class(struct hci_dev *hdev)
2043 {
2044 	struct mgmt_pending_cmd *cmd;
2045 
2046 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2047 		switch (cmd->opcode) {
2048 		case MGMT_OP_ADD_UUID:
2049 		case MGMT_OP_REMOVE_UUID:
2050 		case MGMT_OP_SET_DEV_CLASS:
2051 		case MGMT_OP_SET_POWERED:
2052 			return true;
2053 		}
2054 	}
2055 
2056 	return false;
2057 }
2058 
2059 static const u8 bluetooth_base_uuid[] = {
2060 			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2061 			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2062 };
2063 
2064 static u8 get_uuid_size(const u8 *uuid)
2065 {
2066 	u32 val;
2067 
2068 	if (memcmp(uuid, bluetooth_base_uuid, 12))
2069 		return 128;
2070 
2071 	val = get_unaligned_le32(&uuid[12]);
2072 	if (val > 0xffff)
2073 		return 32;
2074 
2075 	return 16;
2076 }
2077 
2078 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2079 {
2080 	struct mgmt_pending_cmd *cmd;
2081 
2082 	hci_dev_lock(hdev);
2083 
2084 	cmd = pending_find(mgmt_op, hdev);
2085 	if (!cmd)
2086 		goto unlock;
2087 
2088 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2089 			  mgmt_status(status), hdev->dev_class, 3);
2090 
2091 	mgmt_pending_remove(cmd);
2092 
2093 unlock:
2094 	hci_dev_unlock(hdev);
2095 }
2096 
2097 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2098 {
2099 	bt_dev_dbg(hdev, "status 0x%02x", status);
2100 
2101 	mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2102 }
2103 
2104 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2105 {
2106 	struct mgmt_cp_add_uuid *cp = data;
2107 	struct mgmt_pending_cmd *cmd;
2108 	struct hci_request req;
2109 	struct bt_uuid *uuid;
2110 	int err;
2111 
2112 	bt_dev_dbg(hdev, "sock %p", sk);
2113 
2114 	hci_dev_lock(hdev);
2115 
2116 	if (pending_eir_or_class(hdev)) {
2117 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2118 				      MGMT_STATUS_BUSY);
2119 		goto failed;
2120 	}
2121 
2122 	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2123 	if (!uuid) {
2124 		err = -ENOMEM;
2125 		goto failed;
2126 	}
2127 
2128 	memcpy(uuid->uuid, cp->uuid, 16);
2129 	uuid->svc_hint = cp->svc_hint;
2130 	uuid->size = get_uuid_size(cp->uuid);
2131 
2132 	list_add_tail(&uuid->list, &hdev->uuids);
2133 
2134 	hci_req_init(&req, hdev);
2135 
2136 	__hci_req_update_class(&req);
2137 	__hci_req_update_eir(&req);
2138 
2139 	err = hci_req_run(&req, add_uuid_complete);
2140 	if (err < 0) {
2141 		if (err != -ENODATA)
2142 			goto failed;
2143 
2144 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2145 					hdev->dev_class, 3);
2146 		goto failed;
2147 	}
2148 
2149 	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2150 	if (!cmd) {
2151 		err = -ENOMEM;
2152 		goto failed;
2153 	}
2154 
2155 	err = 0;
2156 
2157 failed:
2158 	hci_dev_unlock(hdev);
2159 	return err;
2160 }
2161 
2162 static bool enable_service_cache(struct hci_dev *hdev)
2163 {
2164 	if (!hdev_is_powered(hdev))
2165 		return false;
2166 
2167 	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2168 		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2169 				   CACHE_TIMEOUT);
2170 		return true;
2171 	}
2172 
2173 	return false;
2174 }
2175 
2176 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2177 {
2178 	bt_dev_dbg(hdev, "status 0x%02x", status);
2179 
2180 	mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2181 }
2182 
2183 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2184 		       u16 len)
2185 {
2186 	struct mgmt_cp_remove_uuid *cp = data;
2187 	struct mgmt_pending_cmd *cmd;
2188 	struct bt_uuid *match, *tmp;
2189 	u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2190 	struct hci_request req;
2191 	int err, found;
2192 
2193 	bt_dev_dbg(hdev, "sock %p", sk);
2194 
2195 	hci_dev_lock(hdev);
2196 
2197 	if (pending_eir_or_class(hdev)) {
2198 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2199 				      MGMT_STATUS_BUSY);
2200 		goto unlock;
2201 	}
2202 
2203 	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2204 		hci_uuids_clear(hdev);
2205 
2206 		if (enable_service_cache(hdev)) {
2207 			err = mgmt_cmd_complete(sk, hdev->id,
2208 						MGMT_OP_REMOVE_UUID,
2209 						0, hdev->dev_class, 3);
2210 			goto unlock;
2211 		}
2212 
2213 		goto update_class;
2214 	}
2215 
2216 	found = 0;
2217 
2218 	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2219 		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2220 			continue;
2221 
2222 		list_del(&match->list);
2223 		kfree(match);
2224 		found++;
2225 	}
2226 
2227 	if (found == 0) {
2228 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2229 				      MGMT_STATUS_INVALID_PARAMS);
2230 		goto unlock;
2231 	}
2232 
2233 update_class:
2234 	hci_req_init(&req, hdev);
2235 
2236 	__hci_req_update_class(&req);
2237 	__hci_req_update_eir(&req);
2238 
2239 	err = hci_req_run(&req, remove_uuid_complete);
2240 	if (err < 0) {
2241 		if (err != -ENODATA)
2242 			goto unlock;
2243 
2244 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2245 					hdev->dev_class, 3);
2246 		goto unlock;
2247 	}
2248 
2249 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2250 	if (!cmd) {
2251 		err = -ENOMEM;
2252 		goto unlock;
2253 	}
2254 
2255 	err = 0;
2256 
2257 unlock:
2258 	hci_dev_unlock(hdev);
2259 	return err;
2260 }
2261 
2262 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2263 {
2264 	bt_dev_dbg(hdev, "status 0x%02x", status);
2265 
2266 	mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2267 }
2268 
2269 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2270 			 u16 len)
2271 {
2272 	struct mgmt_cp_set_dev_class *cp = data;
2273 	struct mgmt_pending_cmd *cmd;
2274 	struct hci_request req;
2275 	int err;
2276 
2277 	bt_dev_dbg(hdev, "sock %p", sk);
2278 
2279 	if (!lmp_bredr_capable(hdev))
2280 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2281 				       MGMT_STATUS_NOT_SUPPORTED);
2282 
2283 	hci_dev_lock(hdev);
2284 
2285 	if (pending_eir_or_class(hdev)) {
2286 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2287 				      MGMT_STATUS_BUSY);
2288 		goto unlock;
2289 	}
2290 
2291 	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2292 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2293 				      MGMT_STATUS_INVALID_PARAMS);
2294 		goto unlock;
2295 	}
2296 
2297 	hdev->major_class = cp->major;
2298 	hdev->minor_class = cp->minor;
2299 
2300 	if (!hdev_is_powered(hdev)) {
2301 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2302 					hdev->dev_class, 3);
2303 		goto unlock;
2304 	}
2305 
2306 	hci_req_init(&req, hdev);
2307 
2308 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2309 		hci_dev_unlock(hdev);
2310 		cancel_delayed_work_sync(&hdev->service_cache);
2311 		hci_dev_lock(hdev);
2312 		__hci_req_update_eir(&req);
2313 	}
2314 
2315 	__hci_req_update_class(&req);
2316 
2317 	err = hci_req_run(&req, set_class_complete);
2318 	if (err < 0) {
2319 		if (err != -ENODATA)
2320 			goto unlock;
2321 
2322 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2323 					hdev->dev_class, 3);
2324 		goto unlock;
2325 	}
2326 
2327 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2328 	if (!cmd) {
2329 		err = -ENOMEM;
2330 		goto unlock;
2331 	}
2332 
2333 	err = 0;
2334 
2335 unlock:
2336 	hci_dev_unlock(hdev);
2337 	return err;
2338 }
2339 
2340 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2341 			  u16 len)
2342 {
2343 	struct mgmt_cp_load_link_keys *cp = data;
2344 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2345 				   sizeof(struct mgmt_link_key_info));
2346 	u16 key_count, expected_len;
2347 	bool changed;
2348 	int i;
2349 
2350 	bt_dev_dbg(hdev, "sock %p", sk);
2351 
2352 	if (!lmp_bredr_capable(hdev))
2353 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2354 				       MGMT_STATUS_NOT_SUPPORTED);
2355 
2356 	key_count = __le16_to_cpu(cp->key_count);
2357 	if (key_count > max_key_count) {
2358 		bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2359 			   key_count);
2360 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2361 				       MGMT_STATUS_INVALID_PARAMS);
2362 	}
2363 
2364 	expected_len = struct_size(cp, keys, key_count);
2365 	if (expected_len != len) {
2366 		bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2367 			   expected_len, len);
2368 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2369 				       MGMT_STATUS_INVALID_PARAMS);
2370 	}
2371 
2372 	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2373 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2374 				       MGMT_STATUS_INVALID_PARAMS);
2375 
2376 	bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2377 		   key_count);
2378 
2379 	for (i = 0; i < key_count; i++) {
2380 		struct mgmt_link_key_info *key = &cp->keys[i];
2381 
2382 		if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2383 			return mgmt_cmd_status(sk, hdev->id,
2384 					       MGMT_OP_LOAD_LINK_KEYS,
2385 					       MGMT_STATUS_INVALID_PARAMS);
2386 	}
2387 
2388 	hci_dev_lock(hdev);
2389 
2390 	hci_link_keys_clear(hdev);
2391 
2392 	if (cp->debug_keys)
2393 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2394 	else
2395 		changed = hci_dev_test_and_clear_flag(hdev,
2396 						      HCI_KEEP_DEBUG_KEYS);
2397 
2398 	if (changed)
2399 		new_settings(hdev, NULL);
2400 
2401 	for (i = 0; i < key_count; i++) {
2402 		struct mgmt_link_key_info *key = &cp->keys[i];
2403 
2404 		if (hci_is_blocked_key(hdev,
2405 				       HCI_BLOCKED_KEY_TYPE_LINKKEY,
2406 				       key->val)) {
2407 			bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2408 				    &key->addr.bdaddr);
2409 			continue;
2410 		}
2411 
2412 		/* Always ignore debug keys and require a new pairing if
2413 		 * the user wants to use them.
2414 		 */
2415 		if (key->type == HCI_LK_DEBUG_COMBINATION)
2416 			continue;
2417 
2418 		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2419 				 key->type, key->pin_len, NULL);
2420 	}
2421 
2422 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2423 
2424 	hci_dev_unlock(hdev);
2425 
2426 	return 0;
2427 }
2428 
2429 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2430 			   u8 addr_type, struct sock *skip_sk)
2431 {
2432 	struct mgmt_ev_device_unpaired ev;
2433 
2434 	bacpy(&ev.addr.bdaddr, bdaddr);
2435 	ev.addr.type = addr_type;
2436 
2437 	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2438 			  skip_sk);
2439 }
2440 
2441 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2442 			 u16 len)
2443 {
2444 	struct mgmt_cp_unpair_device *cp = data;
2445 	struct mgmt_rp_unpair_device rp;
2446 	struct hci_conn_params *params;
2447 	struct mgmt_pending_cmd *cmd;
2448 	struct hci_conn *conn;
2449 	u8 addr_type;
2450 	int err;
2451 
2452 	memset(&rp, 0, sizeof(rp));
2453 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2454 	rp.addr.type = cp->addr.type;
2455 
2456 	if (!bdaddr_type_is_valid(cp->addr.type))
2457 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2458 					 MGMT_STATUS_INVALID_PARAMS,
2459 					 &rp, sizeof(rp));
2460 
2461 	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2462 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2463 					 MGMT_STATUS_INVALID_PARAMS,
2464 					 &rp, sizeof(rp));
2465 
2466 	hci_dev_lock(hdev);
2467 
2468 	if (!hdev_is_powered(hdev)) {
2469 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2470 					MGMT_STATUS_NOT_POWERED, &rp,
2471 					sizeof(rp));
2472 		goto unlock;
2473 	}
2474 
2475 	if (cp->addr.type == BDADDR_BREDR) {
2476 		/* If disconnection is requested, then look up the
2477 		 * connection. If the remote device is connected, it
2478 		 * will be later used to terminate the link.
2479 		 *
2480 		 * Setting it to NULL explicitly will cause no
2481 		 * termination of the link.
2482 		 */
2483 		if (cp->disconnect)
2484 			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2485 						       &cp->addr.bdaddr);
2486 		else
2487 			conn = NULL;
2488 
2489 		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2490 		if (err < 0) {
2491 			err = mgmt_cmd_complete(sk, hdev->id,
2492 						MGMT_OP_UNPAIR_DEVICE,
2493 						MGMT_STATUS_NOT_PAIRED, &rp,
2494 						sizeof(rp));
2495 			goto unlock;
2496 		}
2497 
2498 		goto done;
2499 	}
2500 
2501 	/* LE address type */
2502 	addr_type = le_addr_type(cp->addr.type);
2503 
2504 	/* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2505 	err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2506 	if (err < 0) {
2507 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2508 					MGMT_STATUS_NOT_PAIRED, &rp,
2509 					sizeof(rp));
2510 		goto unlock;
2511 	}
2512 
2513 	conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2514 	if (!conn) {
2515 		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2516 		goto done;
2517 	}
2518 
2519 
2520 	/* Defer clearing up the connection parameters until closing to
2521 	 * give a chance of keeping them if a repairing happens.
2522 	 */
2523 	set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2524 
2525 	/* Disable auto-connection parameters if present */
2526 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2527 	if (params) {
2528 		if (params->explicit_connect)
2529 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2530 		else
2531 			params->auto_connect = HCI_AUTO_CONN_DISABLED;
2532 	}
2533 
2534 	/* If disconnection is not requested, then clear the connection
2535 	 * variable so that the link is not terminated.
2536 	 */
2537 	if (!cp->disconnect)
2538 		conn = NULL;
2539 
2540 done:
2541 	/* If the connection variable is set, then termination of the
2542 	 * link is requested.
2543 	 */
2544 	if (!conn) {
2545 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2546 					&rp, sizeof(rp));
2547 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2548 		goto unlock;
2549 	}
2550 
2551 	cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2552 			       sizeof(*cp));
2553 	if (!cmd) {
2554 		err = -ENOMEM;
2555 		goto unlock;
2556 	}
2557 
2558 	cmd->cmd_complete = addr_cmd_complete;
2559 
2560 	err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2561 	if (err < 0)
2562 		mgmt_pending_remove(cmd);
2563 
2564 unlock:
2565 	hci_dev_unlock(hdev);
2566 	return err;
2567 }
2568 
2569 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2570 		      u16 len)
2571 {
2572 	struct mgmt_cp_disconnect *cp = data;
2573 	struct mgmt_rp_disconnect rp;
2574 	struct mgmt_pending_cmd *cmd;
2575 	struct hci_conn *conn;
2576 	int err;
2577 
2578 	bt_dev_dbg(hdev, "sock %p", sk);
2579 
2580 	memset(&rp, 0, sizeof(rp));
2581 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2582 	rp.addr.type = cp->addr.type;
2583 
2584 	if (!bdaddr_type_is_valid(cp->addr.type))
2585 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2586 					 MGMT_STATUS_INVALID_PARAMS,
2587 					 &rp, sizeof(rp));
2588 
2589 	hci_dev_lock(hdev);
2590 
2591 	if (!test_bit(HCI_UP, &hdev->flags)) {
2592 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2593 					MGMT_STATUS_NOT_POWERED, &rp,
2594 					sizeof(rp));
2595 		goto failed;
2596 	}
2597 
2598 	if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2599 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2600 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
2601 		goto failed;
2602 	}
2603 
2604 	if (cp->addr.type == BDADDR_BREDR)
2605 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2606 					       &cp->addr.bdaddr);
2607 	else
2608 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2609 					       le_addr_type(cp->addr.type));
2610 
2611 	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2612 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2613 					MGMT_STATUS_NOT_CONNECTED, &rp,
2614 					sizeof(rp));
2615 		goto failed;
2616 	}
2617 
2618 	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2619 	if (!cmd) {
2620 		err = -ENOMEM;
2621 		goto failed;
2622 	}
2623 
2624 	cmd->cmd_complete = generic_cmd_complete;
2625 
2626 	err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2627 	if (err < 0)
2628 		mgmt_pending_remove(cmd);
2629 
2630 failed:
2631 	hci_dev_unlock(hdev);
2632 	return err;
2633 }
2634 
2635 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2636 {
2637 	switch (link_type) {
2638 	case LE_LINK:
2639 		switch (addr_type) {
2640 		case ADDR_LE_DEV_PUBLIC:
2641 			return BDADDR_LE_PUBLIC;
2642 
2643 		default:
2644 			/* Fallback to LE Random address type */
2645 			return BDADDR_LE_RANDOM;
2646 		}
2647 
2648 	default:
2649 		/* Fallback to BR/EDR type */
2650 		return BDADDR_BREDR;
2651 	}
2652 }
2653 
2654 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2655 			   u16 data_len)
2656 {
2657 	struct mgmt_rp_get_connections *rp;
2658 	struct hci_conn *c;
2659 	int err;
2660 	u16 i;
2661 
2662 	bt_dev_dbg(hdev, "sock %p", sk);
2663 
2664 	hci_dev_lock(hdev);
2665 
2666 	if (!hdev_is_powered(hdev)) {
2667 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2668 				      MGMT_STATUS_NOT_POWERED);
2669 		goto unlock;
2670 	}
2671 
2672 	i = 0;
2673 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2674 		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2675 			i++;
2676 	}
2677 
2678 	rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2679 	if (!rp) {
2680 		err = -ENOMEM;
2681 		goto unlock;
2682 	}
2683 
2684 	i = 0;
2685 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2686 		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2687 			continue;
2688 		bacpy(&rp->addr[i].bdaddr, &c->dst);
2689 		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2690 		if (c->type == SCO_LINK || c->type == ESCO_LINK)
2691 			continue;
2692 		i++;
2693 	}
2694 
2695 	rp->conn_count = cpu_to_le16(i);
2696 
2697 	/* Recalculate length in case of filtered SCO connections, etc */
2698 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2699 				struct_size(rp, addr, i));
2700 
2701 	kfree(rp);
2702 
2703 unlock:
2704 	hci_dev_unlock(hdev);
2705 	return err;
2706 }
2707 
2708 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2709 				   struct mgmt_cp_pin_code_neg_reply *cp)
2710 {
2711 	struct mgmt_pending_cmd *cmd;
2712 	int err;
2713 
2714 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2715 			       sizeof(*cp));
2716 	if (!cmd)
2717 		return -ENOMEM;
2718 
2719 	cmd->cmd_complete = addr_cmd_complete;
2720 
2721 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2722 			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2723 	if (err < 0)
2724 		mgmt_pending_remove(cmd);
2725 
2726 	return err;
2727 }
2728 
2729 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2730 			  u16 len)
2731 {
2732 	struct hci_conn *conn;
2733 	struct mgmt_cp_pin_code_reply *cp = data;
2734 	struct hci_cp_pin_code_reply reply;
2735 	struct mgmt_pending_cmd *cmd;
2736 	int err;
2737 
2738 	bt_dev_dbg(hdev, "sock %p", sk);
2739 
2740 	hci_dev_lock(hdev);
2741 
2742 	if (!hdev_is_powered(hdev)) {
2743 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2744 				      MGMT_STATUS_NOT_POWERED);
2745 		goto failed;
2746 	}
2747 
2748 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2749 	if (!conn) {
2750 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2751 				      MGMT_STATUS_NOT_CONNECTED);
2752 		goto failed;
2753 	}
2754 
2755 	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2756 		struct mgmt_cp_pin_code_neg_reply ncp;
2757 
2758 		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2759 
2760 		bt_dev_err(hdev, "PIN code is not 16 bytes long");
2761 
2762 		err = send_pin_code_neg_reply(sk, hdev, &ncp);
2763 		if (err >= 0)
2764 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2765 					      MGMT_STATUS_INVALID_PARAMS);
2766 
2767 		goto failed;
2768 	}
2769 
2770 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2771 	if (!cmd) {
2772 		err = -ENOMEM;
2773 		goto failed;
2774 	}
2775 
2776 	cmd->cmd_complete = addr_cmd_complete;
2777 
2778 	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2779 	reply.pin_len = cp->pin_len;
2780 	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2781 
2782 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2783 	if (err < 0)
2784 		mgmt_pending_remove(cmd);
2785 
2786 failed:
2787 	hci_dev_unlock(hdev);
2788 	return err;
2789 }
2790 
2791 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2792 			     u16 len)
2793 {
2794 	struct mgmt_cp_set_io_capability *cp = data;
2795 
2796 	bt_dev_dbg(hdev, "sock %p", sk);
2797 
2798 	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2799 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2800 				       MGMT_STATUS_INVALID_PARAMS);
2801 
2802 	hci_dev_lock(hdev);
2803 
2804 	hdev->io_capability = cp->io_capability;
2805 
2806 	bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
2807 
2808 	hci_dev_unlock(hdev);
2809 
2810 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2811 				 NULL, 0);
2812 }
2813 
2814 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2815 {
2816 	struct hci_dev *hdev = conn->hdev;
2817 	struct mgmt_pending_cmd *cmd;
2818 
2819 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2820 		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2821 			continue;
2822 
2823 		if (cmd->user_data != conn)
2824 			continue;
2825 
2826 		return cmd;
2827 	}
2828 
2829 	return NULL;
2830 }
2831 
2832 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2833 {
2834 	struct mgmt_rp_pair_device rp;
2835 	struct hci_conn *conn = cmd->user_data;
2836 	int err;
2837 
2838 	bacpy(&rp.addr.bdaddr, &conn->dst);
2839 	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2840 
2841 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2842 				status, &rp, sizeof(rp));
2843 
2844 	/* So we don't get further callbacks for this connection */
2845 	conn->connect_cfm_cb = NULL;
2846 	conn->security_cfm_cb = NULL;
2847 	conn->disconn_cfm_cb = NULL;
2848 
2849 	hci_conn_drop(conn);
2850 
2851 	/* The device is paired so there is no need to remove
2852 	 * its connection parameters anymore.
2853 	 */
2854 	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2855 
2856 	hci_conn_put(conn);
2857 
2858 	return err;
2859 }
2860 
2861 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2862 {
2863 	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2864 	struct mgmt_pending_cmd *cmd;
2865 
2866 	cmd = find_pairing(conn);
2867 	if (cmd) {
2868 		cmd->cmd_complete(cmd, status);
2869 		mgmt_pending_remove(cmd);
2870 	}
2871 }
2872 
2873 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2874 {
2875 	struct mgmt_pending_cmd *cmd;
2876 
2877 	BT_DBG("status %u", status);
2878 
2879 	cmd = find_pairing(conn);
2880 	if (!cmd) {
2881 		BT_DBG("Unable to find a pending command");
2882 		return;
2883 	}
2884 
2885 	cmd->cmd_complete(cmd, mgmt_status(status));
2886 	mgmt_pending_remove(cmd);
2887 }
2888 
2889 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2890 {
2891 	struct mgmt_pending_cmd *cmd;
2892 
2893 	BT_DBG("status %u", status);
2894 
2895 	if (!status)
2896 		return;
2897 
2898 	cmd = find_pairing(conn);
2899 	if (!cmd) {
2900 		BT_DBG("Unable to find a pending command");
2901 		return;
2902 	}
2903 
2904 	cmd->cmd_complete(cmd, mgmt_status(status));
2905 	mgmt_pending_remove(cmd);
2906 }
2907 
2908 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2909 		       u16 len)
2910 {
2911 	struct mgmt_cp_pair_device *cp = data;
2912 	struct mgmt_rp_pair_device rp;
2913 	struct mgmt_pending_cmd *cmd;
2914 	u8 sec_level, auth_type;
2915 	struct hci_conn *conn;
2916 	int err;
2917 
2918 	bt_dev_dbg(hdev, "sock %p", sk);
2919 
2920 	memset(&rp, 0, sizeof(rp));
2921 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2922 	rp.addr.type = cp->addr.type;
2923 
2924 	if (!bdaddr_type_is_valid(cp->addr.type))
2925 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2926 					 MGMT_STATUS_INVALID_PARAMS,
2927 					 &rp, sizeof(rp));
2928 
2929 	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2930 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2931 					 MGMT_STATUS_INVALID_PARAMS,
2932 					 &rp, sizeof(rp));
2933 
2934 	hci_dev_lock(hdev);
2935 
2936 	if (!hdev_is_powered(hdev)) {
2937 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2938 					MGMT_STATUS_NOT_POWERED, &rp,
2939 					sizeof(rp));
2940 		goto unlock;
2941 	}
2942 
2943 	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
2944 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2945 					MGMT_STATUS_ALREADY_PAIRED, &rp,
2946 					sizeof(rp));
2947 		goto unlock;
2948 	}
2949 
2950 	sec_level = BT_SECURITY_MEDIUM;
2951 	auth_type = HCI_AT_DEDICATED_BONDING;
2952 
2953 	if (cp->addr.type == BDADDR_BREDR) {
2954 		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2955 				       auth_type, CONN_REASON_PAIR_DEVICE);
2956 	} else {
2957 		u8 addr_type = le_addr_type(cp->addr.type);
2958 		struct hci_conn_params *p;
2959 
2960 		/* When pairing a new device, it is expected to remember
2961 		 * this device for future connections. Adding the connection
2962 		 * parameter information ahead of time allows tracking
2963 		 * of the peripheral preferred values and will speed up any
2964 		 * further connection establishment.
2965 		 *
2966 		 * If connection parameters already exist, then they
2967 		 * will be kept and this function does nothing.
2968 		 */
2969 		p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2970 
2971 		if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
2972 			p->auto_connect = HCI_AUTO_CONN_DISABLED;
2973 
2974 		conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
2975 					   sec_level, HCI_LE_CONN_TIMEOUT,
2976 					   CONN_REASON_PAIR_DEVICE);
2977 	}
2978 
2979 	if (IS_ERR(conn)) {
2980 		int status;
2981 
2982 		if (PTR_ERR(conn) == -EBUSY)
2983 			status = MGMT_STATUS_BUSY;
2984 		else if (PTR_ERR(conn) == -EOPNOTSUPP)
2985 			status = MGMT_STATUS_NOT_SUPPORTED;
2986 		else if (PTR_ERR(conn) == -ECONNREFUSED)
2987 			status = MGMT_STATUS_REJECTED;
2988 		else
2989 			status = MGMT_STATUS_CONNECT_FAILED;
2990 
2991 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2992 					status, &rp, sizeof(rp));
2993 		goto unlock;
2994 	}
2995 
2996 	if (conn->connect_cfm_cb) {
2997 		hci_conn_drop(conn);
2998 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2999 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
3000 		goto unlock;
3001 	}
3002 
3003 	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3004 	if (!cmd) {
3005 		err = -ENOMEM;
3006 		hci_conn_drop(conn);
3007 		goto unlock;
3008 	}
3009 
3010 	cmd->cmd_complete = pairing_complete;
3011 
3012 	/* For LE, just connecting isn't a proof that the pairing finished */
3013 	if (cp->addr.type == BDADDR_BREDR) {
3014 		conn->connect_cfm_cb = pairing_complete_cb;
3015 		conn->security_cfm_cb = pairing_complete_cb;
3016 		conn->disconn_cfm_cb = pairing_complete_cb;
3017 	} else {
3018 		conn->connect_cfm_cb = le_pairing_complete_cb;
3019 		conn->security_cfm_cb = le_pairing_complete_cb;
3020 		conn->disconn_cfm_cb = le_pairing_complete_cb;
3021 	}
3022 
3023 	conn->io_capability = cp->io_cap;
3024 	cmd->user_data = hci_conn_get(conn);
3025 
3026 	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3027 	    hci_conn_security(conn, sec_level, auth_type, true)) {
3028 		cmd->cmd_complete(cmd, 0);
3029 		mgmt_pending_remove(cmd);
3030 	}
3031 
3032 	err = 0;
3033 
3034 unlock:
3035 	hci_dev_unlock(hdev);
3036 	return err;
3037 }
3038 
3039 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3040 			      u16 len)
3041 {
3042 	struct mgmt_addr_info *addr = data;
3043 	struct mgmt_pending_cmd *cmd;
3044 	struct hci_conn *conn;
3045 	int err;
3046 
3047 	bt_dev_dbg(hdev, "sock %p", sk);
3048 
3049 	hci_dev_lock(hdev);
3050 
3051 	if (!hdev_is_powered(hdev)) {
3052 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3053 				      MGMT_STATUS_NOT_POWERED);
3054 		goto unlock;
3055 	}
3056 
3057 	cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3058 	if (!cmd) {
3059 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3060 				      MGMT_STATUS_INVALID_PARAMS);
3061 		goto unlock;
3062 	}
3063 
3064 	conn = cmd->user_data;
3065 
3066 	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3067 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3068 				      MGMT_STATUS_INVALID_PARAMS);
3069 		goto unlock;
3070 	}
3071 
3072 	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3073 	mgmt_pending_remove(cmd);
3074 
3075 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3076 				addr, sizeof(*addr));
3077 
3078 	/* Since user doesn't want to proceed with the connection, abort any
3079 	 * ongoing pairing and then terminate the link if it was created
3080 	 * because of the pair device action.
3081 	 */
3082 	if (addr->type == BDADDR_BREDR)
3083 		hci_remove_link_key(hdev, &addr->bdaddr);
3084 	else
3085 		smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3086 					      le_addr_type(addr->type));
3087 
3088 	if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3089 		hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3090 
3091 unlock:
3092 	hci_dev_unlock(hdev);
3093 	return err;
3094 }
3095 
3096 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3097 			     struct mgmt_addr_info *addr, u16 mgmt_op,
3098 			     u16 hci_op, __le32 passkey)
3099 {
3100 	struct mgmt_pending_cmd *cmd;
3101 	struct hci_conn *conn;
3102 	int err;
3103 
3104 	hci_dev_lock(hdev);
3105 
3106 	if (!hdev_is_powered(hdev)) {
3107 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3108 					MGMT_STATUS_NOT_POWERED, addr,
3109 					sizeof(*addr));
3110 		goto done;
3111 	}
3112 
3113 	if (addr->type == BDADDR_BREDR)
3114 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3115 	else
3116 		conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3117 					       le_addr_type(addr->type));
3118 
3119 	if (!conn) {
3120 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3121 					MGMT_STATUS_NOT_CONNECTED, addr,
3122 					sizeof(*addr));
3123 		goto done;
3124 	}
3125 
3126 	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3127 		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3128 		if (!err)
3129 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3130 						MGMT_STATUS_SUCCESS, addr,
3131 						sizeof(*addr));
3132 		else
3133 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3134 						MGMT_STATUS_FAILED, addr,
3135 						sizeof(*addr));
3136 
3137 		goto done;
3138 	}
3139 
3140 	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3141 	if (!cmd) {
3142 		err = -ENOMEM;
3143 		goto done;
3144 	}
3145 
3146 	cmd->cmd_complete = addr_cmd_complete;
3147 
3148 	/* Continue with pairing via HCI */
3149 	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3150 		struct hci_cp_user_passkey_reply cp;
3151 
3152 		bacpy(&cp.bdaddr, &addr->bdaddr);
3153 		cp.passkey = passkey;
3154 		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3155 	} else
3156 		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3157 				   &addr->bdaddr);
3158 
3159 	if (err < 0)
3160 		mgmt_pending_remove(cmd);
3161 
3162 done:
3163 	hci_dev_unlock(hdev);
3164 	return err;
3165 }
3166 
3167 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3168 			      void *data, u16 len)
3169 {
3170 	struct mgmt_cp_pin_code_neg_reply *cp = data;
3171 
3172 	bt_dev_dbg(hdev, "sock %p", sk);
3173 
3174 	return user_pairing_resp(sk, hdev, &cp->addr,
3175 				MGMT_OP_PIN_CODE_NEG_REPLY,
3176 				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3177 }
3178 
3179 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3180 			      u16 len)
3181 {
3182 	struct mgmt_cp_user_confirm_reply *cp = data;
3183 
3184 	bt_dev_dbg(hdev, "sock %p", sk);
3185 
3186 	if (len != sizeof(*cp))
3187 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3188 				       MGMT_STATUS_INVALID_PARAMS);
3189 
3190 	return user_pairing_resp(sk, hdev, &cp->addr,
3191 				 MGMT_OP_USER_CONFIRM_REPLY,
3192 				 HCI_OP_USER_CONFIRM_REPLY, 0);
3193 }
3194 
3195 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3196 				  void *data, u16 len)
3197 {
3198 	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3199 
3200 	bt_dev_dbg(hdev, "sock %p", sk);
3201 
3202 	return user_pairing_resp(sk, hdev, &cp->addr,
3203 				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3204 				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3205 }
3206 
3207 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3208 			      u16 len)
3209 {
3210 	struct mgmt_cp_user_passkey_reply *cp = data;
3211 
3212 	bt_dev_dbg(hdev, "sock %p", sk);
3213 
3214 	return user_pairing_resp(sk, hdev, &cp->addr,
3215 				 MGMT_OP_USER_PASSKEY_REPLY,
3216 				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3217 }
3218 
3219 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3220 				  void *data, u16 len)
3221 {
3222 	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3223 
3224 	bt_dev_dbg(hdev, "sock %p", sk);
3225 
3226 	return user_pairing_resp(sk, hdev, &cp->addr,
3227 				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3228 				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3229 }
3230 
3231 static void adv_expire(struct hci_dev *hdev, u32 flags)
3232 {
3233 	struct adv_info *adv_instance;
3234 	struct hci_request req;
3235 	int err;
3236 
3237 	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3238 	if (!adv_instance)
3239 		return;
3240 
3241 	/* stop if current instance doesn't need to be changed */
3242 	if (!(adv_instance->flags & flags))
3243 		return;
3244 
3245 	cancel_adv_timeout(hdev);
3246 
3247 	adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3248 	if (!adv_instance)
3249 		return;
3250 
3251 	hci_req_init(&req, hdev);
3252 	err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
3253 					      true);
3254 	if (err)
3255 		return;
3256 
3257 	hci_req_run(&req, NULL);
3258 }
3259 
3260 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3261 {
3262 	struct mgmt_cp_set_local_name *cp;
3263 	struct mgmt_pending_cmd *cmd;
3264 
3265 	bt_dev_dbg(hdev, "status 0x%02x", status);
3266 
3267 	hci_dev_lock(hdev);
3268 
3269 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3270 	if (!cmd)
3271 		goto unlock;
3272 
3273 	cp = cmd->param;
3274 
3275 	if (status) {
3276 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3277 			        mgmt_status(status));
3278 	} else {
3279 		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3280 				  cp, sizeof(*cp));
3281 
3282 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3283 			adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3284 	}
3285 
3286 	mgmt_pending_remove(cmd);
3287 
3288 unlock:
3289 	hci_dev_unlock(hdev);
3290 }
3291 
3292 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3293 			  u16 len)
3294 {
3295 	struct mgmt_cp_set_local_name *cp = data;
3296 	struct mgmt_pending_cmd *cmd;
3297 	struct hci_request req;
3298 	int err;
3299 
3300 	bt_dev_dbg(hdev, "sock %p", sk);
3301 
3302 	hci_dev_lock(hdev);
3303 
3304 	/* If the old values are the same as the new ones just return a
3305 	 * direct command complete event.
3306 	 */
3307 	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3308 	    !memcmp(hdev->short_name, cp->short_name,
3309 		    sizeof(hdev->short_name))) {
3310 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3311 					data, len);
3312 		goto failed;
3313 	}
3314 
3315 	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3316 
3317 	if (!hdev_is_powered(hdev)) {
3318 		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3319 
3320 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3321 					data, len);
3322 		if (err < 0)
3323 			goto failed;
3324 
3325 		err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3326 					 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3327 		ext_info_changed(hdev, sk);
3328 
3329 		goto failed;
3330 	}
3331 
3332 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3333 	if (!cmd) {
3334 		err = -ENOMEM;
3335 		goto failed;
3336 	}
3337 
3338 	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3339 
3340 	hci_req_init(&req, hdev);
3341 
3342 	if (lmp_bredr_capable(hdev)) {
3343 		__hci_req_update_name(&req);
3344 		__hci_req_update_eir(&req);
3345 	}
3346 
3347 	/* The name is stored in the scan response data and so
3348 	 * no need to update the advertising data here.
3349 	 */
3350 	if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3351 		__hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
3352 
3353 	err = hci_req_run(&req, set_name_complete);
3354 	if (err < 0)
3355 		mgmt_pending_remove(cmd);
3356 
3357 failed:
3358 	hci_dev_unlock(hdev);
3359 	return err;
3360 }
3361 
3362 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3363 			  u16 len)
3364 {
3365 	struct mgmt_cp_set_appearance *cp = data;
3366 	u16 appearance;
3367 	int err;
3368 
3369 	bt_dev_dbg(hdev, "sock %p", sk);
3370 
3371 	if (!lmp_le_capable(hdev))
3372 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3373 				       MGMT_STATUS_NOT_SUPPORTED);
3374 
3375 	appearance = le16_to_cpu(cp->appearance);
3376 
3377 	hci_dev_lock(hdev);
3378 
3379 	if (hdev->appearance != appearance) {
3380 		hdev->appearance = appearance;
3381 
3382 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3383 			adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
3384 
3385 		ext_info_changed(hdev, sk);
3386 	}
3387 
3388 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3389 				0);
3390 
3391 	hci_dev_unlock(hdev);
3392 
3393 	return err;
3394 }
3395 
3396 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3397 				 void *data, u16 len)
3398 {
3399 	struct mgmt_rp_get_phy_configuration rp;
3400 
3401 	bt_dev_dbg(hdev, "sock %p", sk);
3402 
3403 	hci_dev_lock(hdev);
3404 
3405 	memset(&rp, 0, sizeof(rp));
3406 
3407 	rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3408 	rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3409 	rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3410 
3411 	hci_dev_unlock(hdev);
3412 
3413 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3414 				 &rp, sizeof(rp));
3415 }
3416 
3417 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3418 {
3419 	struct mgmt_ev_phy_configuration_changed ev;
3420 
3421 	memset(&ev, 0, sizeof(ev));
3422 
3423 	ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3424 
3425 	return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3426 			  sizeof(ev), skip);
3427 }
3428 
3429 static void set_default_phy_complete(struct hci_dev *hdev, u8 status,
3430 				     u16 opcode, struct sk_buff *skb)
3431 {
3432 	struct mgmt_pending_cmd *cmd;
3433 
3434 	bt_dev_dbg(hdev, "status 0x%02x", status);
3435 
3436 	hci_dev_lock(hdev);
3437 
3438 	cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev);
3439 	if (!cmd)
3440 		goto unlock;
3441 
3442 	if (status) {
3443 		mgmt_cmd_status(cmd->sk, hdev->id,
3444 				MGMT_OP_SET_PHY_CONFIGURATION,
3445 				mgmt_status(status));
3446 	} else {
3447 		mgmt_cmd_complete(cmd->sk, hdev->id,
3448 				  MGMT_OP_SET_PHY_CONFIGURATION, 0,
3449 				  NULL, 0);
3450 
3451 		mgmt_phy_configuration_changed(hdev, cmd->sk);
3452 	}
3453 
3454 	mgmt_pending_remove(cmd);
3455 
3456 unlock:
3457 	hci_dev_unlock(hdev);
3458 }
3459 
3460 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3461 				 void *data, u16 len)
3462 {
3463 	struct mgmt_cp_set_phy_configuration *cp = data;
3464 	struct hci_cp_le_set_default_phy cp_phy;
3465 	struct mgmt_pending_cmd *cmd;
3466 	struct hci_request req;
3467 	u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3468 	u16 pkt_type = (HCI_DH1 | HCI_DM1);
3469 	bool changed = false;
3470 	int err;
3471 
3472 	bt_dev_dbg(hdev, "sock %p", sk);
3473 
3474 	configurable_phys = get_configurable_phys(hdev);
3475 	supported_phys = get_supported_phys(hdev);
3476 	selected_phys = __le32_to_cpu(cp->selected_phys);
3477 
3478 	if (selected_phys & ~supported_phys)
3479 		return mgmt_cmd_status(sk, hdev->id,
3480 				       MGMT_OP_SET_PHY_CONFIGURATION,
3481 				       MGMT_STATUS_INVALID_PARAMS);
3482 
3483 	unconfigure_phys = supported_phys & ~configurable_phys;
3484 
3485 	if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3486 		return mgmt_cmd_status(sk, hdev->id,
3487 				       MGMT_OP_SET_PHY_CONFIGURATION,
3488 				       MGMT_STATUS_INVALID_PARAMS);
3489 
3490 	if (selected_phys == get_selected_phys(hdev))
3491 		return mgmt_cmd_complete(sk, hdev->id,
3492 					 MGMT_OP_SET_PHY_CONFIGURATION,
3493 					 0, NULL, 0);
3494 
3495 	hci_dev_lock(hdev);
3496 
3497 	if (!hdev_is_powered(hdev)) {
3498 		err = mgmt_cmd_status(sk, hdev->id,
3499 				      MGMT_OP_SET_PHY_CONFIGURATION,
3500 				      MGMT_STATUS_REJECTED);
3501 		goto unlock;
3502 	}
3503 
3504 	if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3505 		err = mgmt_cmd_status(sk, hdev->id,
3506 				      MGMT_OP_SET_PHY_CONFIGURATION,
3507 				      MGMT_STATUS_BUSY);
3508 		goto unlock;
3509 	}
3510 
3511 	if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3512 		pkt_type |= (HCI_DH3 | HCI_DM3);
3513 	else
3514 		pkt_type &= ~(HCI_DH3 | HCI_DM3);
3515 
3516 	if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3517 		pkt_type |= (HCI_DH5 | HCI_DM5);
3518 	else
3519 		pkt_type &= ~(HCI_DH5 | HCI_DM5);
3520 
3521 	if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3522 		pkt_type &= ~HCI_2DH1;
3523 	else
3524 		pkt_type |= HCI_2DH1;
3525 
3526 	if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3527 		pkt_type &= ~HCI_2DH3;
3528 	else
3529 		pkt_type |= HCI_2DH3;
3530 
3531 	if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3532 		pkt_type &= ~HCI_2DH5;
3533 	else
3534 		pkt_type |= HCI_2DH5;
3535 
3536 	if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3537 		pkt_type &= ~HCI_3DH1;
3538 	else
3539 		pkt_type |= HCI_3DH1;
3540 
3541 	if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3542 		pkt_type &= ~HCI_3DH3;
3543 	else
3544 		pkt_type |= HCI_3DH3;
3545 
3546 	if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3547 		pkt_type &= ~HCI_3DH5;
3548 	else
3549 		pkt_type |= HCI_3DH5;
3550 
3551 	if (pkt_type != hdev->pkt_type) {
3552 		hdev->pkt_type = pkt_type;
3553 		changed = true;
3554 	}
3555 
3556 	if ((selected_phys & MGMT_PHY_LE_MASK) ==
3557 	    (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3558 		if (changed)
3559 			mgmt_phy_configuration_changed(hdev, sk);
3560 
3561 		err = mgmt_cmd_complete(sk, hdev->id,
3562 					MGMT_OP_SET_PHY_CONFIGURATION,
3563 					0, NULL, 0);
3564 
3565 		goto unlock;
3566 	}
3567 
3568 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3569 			       len);
3570 	if (!cmd) {
3571 		err = -ENOMEM;
3572 		goto unlock;
3573 	}
3574 
3575 	hci_req_init(&req, hdev);
3576 
3577 	memset(&cp_phy, 0, sizeof(cp_phy));
3578 
3579 	if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3580 		cp_phy.all_phys |= 0x01;
3581 
3582 	if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3583 		cp_phy.all_phys |= 0x02;
3584 
3585 	if (selected_phys & MGMT_PHY_LE_1M_TX)
3586 		cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3587 
3588 	if (selected_phys & MGMT_PHY_LE_2M_TX)
3589 		cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3590 
3591 	if (selected_phys & MGMT_PHY_LE_CODED_TX)
3592 		cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3593 
3594 	if (selected_phys & MGMT_PHY_LE_1M_RX)
3595 		cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3596 
3597 	if (selected_phys & MGMT_PHY_LE_2M_RX)
3598 		cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3599 
3600 	if (selected_phys & MGMT_PHY_LE_CODED_RX)
3601 		cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3602 
3603 	hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy);
3604 
3605 	err = hci_req_run_skb(&req, set_default_phy_complete);
3606 	if (err < 0)
3607 		mgmt_pending_remove(cmd);
3608 
3609 unlock:
3610 	hci_dev_unlock(hdev);
3611 
3612 	return err;
3613 }
3614 
3615 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
3616 			    u16 len)
3617 {
3618 	int err = MGMT_STATUS_SUCCESS;
3619 	struct mgmt_cp_set_blocked_keys *keys = data;
3620 	const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
3621 				   sizeof(struct mgmt_blocked_key_info));
3622 	u16 key_count, expected_len;
3623 	int i;
3624 
3625 	bt_dev_dbg(hdev, "sock %p", sk);
3626 
3627 	key_count = __le16_to_cpu(keys->key_count);
3628 	if (key_count > max_key_count) {
3629 		bt_dev_err(hdev, "too big key_count value %u", key_count);
3630 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3631 				       MGMT_STATUS_INVALID_PARAMS);
3632 	}
3633 
3634 	expected_len = struct_size(keys, keys, key_count);
3635 	if (expected_len != len) {
3636 		bt_dev_err(hdev, "expected %u bytes, got %u bytes",
3637 			   expected_len, len);
3638 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3639 				       MGMT_STATUS_INVALID_PARAMS);
3640 	}
3641 
3642 	hci_dev_lock(hdev);
3643 
3644 	hci_blocked_keys_clear(hdev);
3645 
3646 	for (i = 0; i < keys->key_count; ++i) {
3647 		struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
3648 
3649 		if (!b) {
3650 			err = MGMT_STATUS_NO_RESOURCES;
3651 			break;
3652 		}
3653 
3654 		b->type = keys->keys[i].type;
3655 		memcpy(b->val, keys->keys[i].val, sizeof(b->val));
3656 		list_add_rcu(&b->list, &hdev->blocked_keys);
3657 	}
3658 	hci_dev_unlock(hdev);
3659 
3660 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3661 				err, NULL, 0);
3662 }
3663 
3664 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
3665 			       void *data, u16 len)
3666 {
3667 	struct mgmt_mode *cp = data;
3668 	int err;
3669 	bool changed = false;
3670 
3671 	bt_dev_dbg(hdev, "sock %p", sk);
3672 
3673 	if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
3674 		return mgmt_cmd_status(sk, hdev->id,
3675 				       MGMT_OP_SET_WIDEBAND_SPEECH,
3676 				       MGMT_STATUS_NOT_SUPPORTED);
3677 
3678 	if (cp->val != 0x00 && cp->val != 0x01)
3679 		return mgmt_cmd_status(sk, hdev->id,
3680 				       MGMT_OP_SET_WIDEBAND_SPEECH,
3681 				       MGMT_STATUS_INVALID_PARAMS);
3682 
3683 	hci_dev_lock(hdev);
3684 
3685 	if (pending_find(MGMT_OP_SET_WIDEBAND_SPEECH, hdev)) {
3686 		err = mgmt_cmd_status(sk, hdev->id,
3687 				      MGMT_OP_SET_WIDEBAND_SPEECH,
3688 				      MGMT_STATUS_BUSY);
3689 		goto unlock;
3690 	}
3691 
3692 	if (hdev_is_powered(hdev) &&
3693 	    !!cp->val != hci_dev_test_flag(hdev,
3694 					   HCI_WIDEBAND_SPEECH_ENABLED)) {
3695 		err = mgmt_cmd_status(sk, hdev->id,
3696 				      MGMT_OP_SET_WIDEBAND_SPEECH,
3697 				      MGMT_STATUS_REJECTED);
3698 		goto unlock;
3699 	}
3700 
3701 	if (cp->val)
3702 		changed = !hci_dev_test_and_set_flag(hdev,
3703 						   HCI_WIDEBAND_SPEECH_ENABLED);
3704 	else
3705 		changed = hci_dev_test_and_clear_flag(hdev,
3706 						   HCI_WIDEBAND_SPEECH_ENABLED);
3707 
3708 	err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
3709 	if (err < 0)
3710 		goto unlock;
3711 
3712 	if (changed)
3713 		err = new_settings(hdev, sk);
3714 
3715 unlock:
3716 	hci_dev_unlock(hdev);
3717 	return err;
3718 }
3719 
3720 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
3721 			       void *data, u16 data_len)
3722 {
3723 	char buf[20];
3724 	struct mgmt_rp_read_controller_cap *rp = (void *)buf;
3725 	u16 cap_len = 0;
3726 	u8 flags = 0;
3727 	u8 tx_power_range[2];
3728 
3729 	bt_dev_dbg(hdev, "sock %p", sk);
3730 
3731 	memset(&buf, 0, sizeof(buf));
3732 
3733 	hci_dev_lock(hdev);
3734 
3735 	/* When the Read Simple Pairing Options command is supported, then
3736 	 * the remote public key validation is supported.
3737 	 *
3738 	 * Alternatively, when Microsoft extensions are available, they can
3739 	 * indicate support for public key validation as well.
3740 	 */
3741 	if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
3742 		flags |= 0x01;	/* Remote public key validation (BR/EDR) */
3743 
3744 	flags |= 0x02;		/* Remote public key validation (LE) */
3745 
3746 	/* When the Read Encryption Key Size command is supported, then the
3747 	 * encryption key size is enforced.
3748 	 */
3749 	if (hdev->commands[20] & 0x10)
3750 		flags |= 0x04;	/* Encryption key size enforcement (BR/EDR) */
3751 
3752 	flags |= 0x08;		/* Encryption key size enforcement (LE) */
3753 
3754 	cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
3755 				  &flags, 1);
3756 
3757 	/* When the Read Simple Pairing Options command is supported, then
3758 	 * also max encryption key size information is provided.
3759 	 */
3760 	if (hdev->commands[41] & 0x08)
3761 		cap_len = eir_append_le16(rp->cap, cap_len,
3762 					  MGMT_CAP_MAX_ENC_KEY_SIZE,
3763 					  hdev->max_enc_key_size);
3764 
3765 	cap_len = eir_append_le16(rp->cap, cap_len,
3766 				  MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
3767 				  SMP_MAX_ENC_KEY_SIZE);
3768 
3769 	/* Append the min/max LE tx power parameters if we were able to fetch
3770 	 * it from the controller
3771 	 */
3772 	if (hdev->commands[38] & 0x80) {
3773 		memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
3774 		memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
3775 		cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
3776 					  tx_power_range, 2);
3777 	}
3778 
3779 	rp->cap_len = cpu_to_le16(cap_len);
3780 
3781 	hci_dev_unlock(hdev);
3782 
3783 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
3784 				 rp, sizeof(*rp) + cap_len);
3785 }
3786 
3787 #ifdef CONFIG_BT_FEATURE_DEBUG
3788 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
3789 static const u8 debug_uuid[16] = {
3790 	0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
3791 	0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
3792 };
3793 #endif
3794 
3795 /* 330859bc-7506-492d-9370-9a6f0614037f */
3796 static const u8 quality_report_uuid[16] = {
3797 	0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
3798 	0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
3799 };
3800 
3801 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
3802 static const u8 offload_codecs_uuid[16] = {
3803 	0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
3804 	0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
3805 };
3806 
3807 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
3808 static const u8 simult_central_periph_uuid[16] = {
3809 	0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
3810 	0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
3811 };
3812 
3813 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
3814 static const u8 rpa_resolution_uuid[16] = {
3815 	0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
3816 	0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
3817 };
3818 
3819 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
3820 				  void *data, u16 data_len)
3821 {
3822 	char buf[102];   /* Enough space for 5 features: 2 + 20 * 5 */
3823 	struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
3824 	u16 idx = 0;
3825 	u32 flags;
3826 
3827 	bt_dev_dbg(hdev, "sock %p", sk);
3828 
3829 	memset(&buf, 0, sizeof(buf));
3830 
3831 #ifdef CONFIG_BT_FEATURE_DEBUG
3832 	if (!hdev) {
3833 		flags = bt_dbg_get() ? BIT(0) : 0;
3834 
3835 		memcpy(rp->features[idx].uuid, debug_uuid, 16);
3836 		rp->features[idx].flags = cpu_to_le32(flags);
3837 		idx++;
3838 	}
3839 #endif
3840 
3841 	if (hdev) {
3842 		if (test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) &&
3843 		    (hdev->le_states[4] & 0x08) &&	/* Central */
3844 		    (hdev->le_states[4] & 0x40) &&	/* Peripheral */
3845 		    (hdev->le_states[3] & 0x10))	/* Simultaneous */
3846 			flags = BIT(0);
3847 		else
3848 			flags = 0;
3849 
3850 		memcpy(rp->features[idx].uuid, simult_central_periph_uuid, 16);
3851 		rp->features[idx].flags = cpu_to_le32(flags);
3852 		idx++;
3853 	}
3854 
3855 	if (hdev && use_ll_privacy(hdev)) {
3856 		if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
3857 			flags = BIT(0) | BIT(1);
3858 		else
3859 			flags = BIT(1);
3860 
3861 		memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
3862 		rp->features[idx].flags = cpu_to_le32(flags);
3863 		idx++;
3864 	}
3865 
3866 	if (hdev && hdev->set_quality_report) {
3867 		if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
3868 			flags = BIT(0);
3869 		else
3870 			flags = 0;
3871 
3872 		memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
3873 		rp->features[idx].flags = cpu_to_le32(flags);
3874 		idx++;
3875 	}
3876 
3877 	if (hdev && hdev->get_data_path_id) {
3878 		if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
3879 			flags = BIT(0);
3880 		else
3881 			flags = 0;
3882 
3883 		memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
3884 		rp->features[idx].flags = cpu_to_le32(flags);
3885 		idx++;
3886 	}
3887 
3888 	rp->feature_count = cpu_to_le16(idx);
3889 
3890 	/* After reading the experimental features information, enable
3891 	 * the events to update client on any future change.
3892 	 */
3893 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3894 
3895 	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3896 				 MGMT_OP_READ_EXP_FEATURES_INFO,
3897 				 0, rp, sizeof(*rp) + (20 * idx));
3898 }
3899 
3900 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
3901 					  struct sock *skip)
3902 {
3903 	struct mgmt_ev_exp_feature_changed ev;
3904 
3905 	memset(&ev, 0, sizeof(ev));
3906 	memcpy(ev.uuid, rpa_resolution_uuid, 16);
3907 	ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
3908 
3909 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
3910 				  &ev, sizeof(ev),
3911 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3912 
3913 }
3914 
3915 #ifdef CONFIG_BT_FEATURE_DEBUG
3916 static int exp_debug_feature_changed(bool enabled, struct sock *skip)
3917 {
3918 	struct mgmt_ev_exp_feature_changed ev;
3919 
3920 	memset(&ev, 0, sizeof(ev));
3921 	memcpy(ev.uuid, debug_uuid, 16);
3922 	ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
3923 
3924 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL,
3925 				  &ev, sizeof(ev),
3926 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3927 }
3928 #endif
3929 
3930 static int exp_quality_report_feature_changed(bool enabled, struct sock *skip)
3931 {
3932 	struct mgmt_ev_exp_feature_changed ev;
3933 
3934 	memset(&ev, 0, sizeof(ev));
3935 	memcpy(ev.uuid, quality_report_uuid, 16);
3936 	ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
3937 
3938 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL,
3939 				  &ev, sizeof(ev),
3940 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
3941 }
3942 
3943 #define EXP_FEAT(_uuid, _set_func)	\
3944 {					\
3945 	.uuid = _uuid,			\
3946 	.set_func = _set_func,		\
3947 }
3948 
3949 /* The zero key uuid is special. Multiple exp features are set through it. */
3950 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
3951 			     struct mgmt_cp_set_exp_feature *cp, u16 data_len)
3952 {
3953 	struct mgmt_rp_set_exp_feature rp;
3954 
3955 	memset(rp.uuid, 0, 16);
3956 	rp.flags = cpu_to_le32(0);
3957 
3958 #ifdef CONFIG_BT_FEATURE_DEBUG
3959 	if (!hdev) {
3960 		bool changed = bt_dbg_get();
3961 
3962 		bt_dbg_set(false);
3963 
3964 		if (changed)
3965 			exp_debug_feature_changed(false, sk);
3966 	}
3967 #endif
3968 
3969 	if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
3970 		bool changed = hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY);
3971 
3972 		hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
3973 
3974 		if (changed)
3975 			exp_ll_privacy_feature_changed(false, hdev, sk);
3976 	}
3977 
3978 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3979 
3980 	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3981 				 MGMT_OP_SET_EXP_FEATURE, 0,
3982 				 &rp, sizeof(rp));
3983 }
3984 
3985 #ifdef CONFIG_BT_FEATURE_DEBUG
3986 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
3987 			  struct mgmt_cp_set_exp_feature *cp, u16 data_len)
3988 {
3989 	struct mgmt_rp_set_exp_feature rp;
3990 
3991 	bool val, changed;
3992 	int err;
3993 
3994 	/* Command requires to use the non-controller index */
3995 	if (hdev)
3996 		return mgmt_cmd_status(sk, hdev->id,
3997 				       MGMT_OP_SET_EXP_FEATURE,
3998 				       MGMT_STATUS_INVALID_INDEX);
3999 
4000 	/* Parameters are limited to a single octet */
4001 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4002 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4003 				       MGMT_OP_SET_EXP_FEATURE,
4004 				       MGMT_STATUS_INVALID_PARAMS);
4005 
4006 	/* Only boolean on/off is supported */
4007 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4008 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4009 				       MGMT_OP_SET_EXP_FEATURE,
4010 				       MGMT_STATUS_INVALID_PARAMS);
4011 
4012 	val = !!cp->param[0];
4013 	changed = val ? !bt_dbg_get() : bt_dbg_get();
4014 	bt_dbg_set(val);
4015 
4016 	memcpy(rp.uuid, debug_uuid, 16);
4017 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4018 
4019 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4020 
4021 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4022 				MGMT_OP_SET_EXP_FEATURE, 0,
4023 				&rp, sizeof(rp));
4024 
4025 	if (changed)
4026 		exp_debug_feature_changed(val, sk);
4027 
4028 	return err;
4029 }
4030 #endif
4031 
4032 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4033 				   struct mgmt_cp_set_exp_feature *cp,
4034 				   u16 data_len)
4035 {
4036 	struct mgmt_rp_set_exp_feature rp;
4037 	bool val, changed;
4038 	int err;
4039 	u32 flags;
4040 
4041 	/* Command requires to use the controller index */
4042 	if (!hdev)
4043 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4044 				       MGMT_OP_SET_EXP_FEATURE,
4045 				       MGMT_STATUS_INVALID_INDEX);
4046 
4047 	/* Changes can only be made when controller is powered down */
4048 	if (hdev_is_powered(hdev))
4049 		return mgmt_cmd_status(sk, hdev->id,
4050 				       MGMT_OP_SET_EXP_FEATURE,
4051 				       MGMT_STATUS_REJECTED);
4052 
4053 	/* Parameters are limited to a single octet */
4054 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4055 		return mgmt_cmd_status(sk, hdev->id,
4056 				       MGMT_OP_SET_EXP_FEATURE,
4057 				       MGMT_STATUS_INVALID_PARAMS);
4058 
4059 	/* Only boolean on/off is supported */
4060 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4061 		return mgmt_cmd_status(sk, hdev->id,
4062 				       MGMT_OP_SET_EXP_FEATURE,
4063 				       MGMT_STATUS_INVALID_PARAMS);
4064 
4065 	val = !!cp->param[0];
4066 
4067 	if (val) {
4068 		changed = !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY);
4069 		hci_dev_set_flag(hdev, HCI_ENABLE_LL_PRIVACY);
4070 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4071 
4072 		/* Enable LL privacy + supported settings changed */
4073 		flags = BIT(0) | BIT(1);
4074 	} else {
4075 		changed = hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY);
4076 		hci_dev_clear_flag(hdev, HCI_ENABLE_LL_PRIVACY);
4077 
4078 		/* Disable LL privacy + supported settings changed */
4079 		flags = BIT(1);
4080 	}
4081 
4082 	memcpy(rp.uuid, rpa_resolution_uuid, 16);
4083 	rp.flags = cpu_to_le32(flags);
4084 
4085 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4086 
4087 	err = mgmt_cmd_complete(sk, hdev->id,
4088 				MGMT_OP_SET_EXP_FEATURE, 0,
4089 				&rp, sizeof(rp));
4090 
4091 	if (changed)
4092 		exp_ll_privacy_feature_changed(val, hdev, sk);
4093 
4094 	return err;
4095 }
4096 
4097 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4098 				   struct mgmt_cp_set_exp_feature *cp,
4099 				   u16 data_len)
4100 {
4101 	struct mgmt_rp_set_exp_feature rp;
4102 	bool val, changed;
4103 	int err;
4104 
4105 	/* Command requires to use a valid controller index */
4106 	if (!hdev)
4107 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4108 				       MGMT_OP_SET_EXP_FEATURE,
4109 				       MGMT_STATUS_INVALID_INDEX);
4110 
4111 	/* Parameters are limited to a single octet */
4112 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4113 		return mgmt_cmd_status(sk, hdev->id,
4114 				       MGMT_OP_SET_EXP_FEATURE,
4115 				       MGMT_STATUS_INVALID_PARAMS);
4116 
4117 	/* Only boolean on/off is supported */
4118 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4119 		return mgmt_cmd_status(sk, hdev->id,
4120 				       MGMT_OP_SET_EXP_FEATURE,
4121 				       MGMT_STATUS_INVALID_PARAMS);
4122 
4123 	hci_req_sync_lock(hdev);
4124 
4125 	val = !!cp->param[0];
4126 	changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4127 
4128 	if (!hdev->set_quality_report) {
4129 		err = mgmt_cmd_status(sk, hdev->id,
4130 				      MGMT_OP_SET_EXP_FEATURE,
4131 				      MGMT_STATUS_NOT_SUPPORTED);
4132 		goto unlock_quality_report;
4133 	}
4134 
4135 	if (changed) {
4136 		err = hdev->set_quality_report(hdev, val);
4137 		if (err) {
4138 			err = mgmt_cmd_status(sk, hdev->id,
4139 					      MGMT_OP_SET_EXP_FEATURE,
4140 					      MGMT_STATUS_FAILED);
4141 			goto unlock_quality_report;
4142 		}
4143 		if (val)
4144 			hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4145 		else
4146 			hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4147 	}
4148 
4149 	bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4150 
4151 	memcpy(rp.uuid, quality_report_uuid, 16);
4152 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4153 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4154 	err = mgmt_cmd_complete(sk, hdev->id,
4155 				MGMT_OP_SET_EXP_FEATURE, 0,
4156 				&rp, sizeof(rp));
4157 
4158 	if (changed)
4159 		exp_quality_report_feature_changed(val, sk);
4160 
4161 unlock_quality_report:
4162 	hci_req_sync_unlock(hdev);
4163 	return err;
4164 }
4165 
4166 static int exp_offload_codec_feature_changed(bool enabled, struct sock *skip)
4167 {
4168 	struct mgmt_ev_exp_feature_changed ev;
4169 
4170 	memset(&ev, 0, sizeof(ev));
4171 	memcpy(ev.uuid, offload_codecs_uuid, 16);
4172 	ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4173 
4174 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, NULL,
4175 				  &ev, sizeof(ev),
4176 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4177 }
4178 
4179 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4180 				  struct mgmt_cp_set_exp_feature *cp,
4181 				  u16 data_len)
4182 {
4183 	bool val, changed;
4184 	int err;
4185 	struct mgmt_rp_set_exp_feature rp;
4186 
4187 	/* Command requires to use a valid controller index */
4188 	if (!hdev)
4189 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4190 				       MGMT_OP_SET_EXP_FEATURE,
4191 				       MGMT_STATUS_INVALID_INDEX);
4192 
4193 	/* Parameters are limited to a single octet */
4194 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4195 		return mgmt_cmd_status(sk, hdev->id,
4196 				       MGMT_OP_SET_EXP_FEATURE,
4197 				       MGMT_STATUS_INVALID_PARAMS);
4198 
4199 	/* Only boolean on/off is supported */
4200 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4201 		return mgmt_cmd_status(sk, hdev->id,
4202 				       MGMT_OP_SET_EXP_FEATURE,
4203 				       MGMT_STATUS_INVALID_PARAMS);
4204 
4205 	val = !!cp->param[0];
4206 	changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4207 
4208 	if (!hdev->get_data_path_id) {
4209 		return mgmt_cmd_status(sk, hdev->id,
4210 				       MGMT_OP_SET_EXP_FEATURE,
4211 				       MGMT_STATUS_NOT_SUPPORTED);
4212 	}
4213 
4214 	if (changed) {
4215 		if (val)
4216 			hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4217 		else
4218 			hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4219 	}
4220 
4221 	bt_dev_info(hdev, "offload codecs enable %d changed %d",
4222 		    val, changed);
4223 
4224 	memcpy(rp.uuid, offload_codecs_uuid, 16);
4225 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4226 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4227 	err = mgmt_cmd_complete(sk, hdev->id,
4228 				MGMT_OP_SET_EXP_FEATURE, 0,
4229 				&rp, sizeof(rp));
4230 
4231 	if (changed)
4232 		exp_offload_codec_feature_changed(val, sk);
4233 
4234 	return err;
4235 }
4236 
4237 static const struct mgmt_exp_feature {
4238 	const u8 *uuid;
4239 	int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4240 			struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4241 } exp_features[] = {
4242 	EXP_FEAT(ZERO_KEY, set_zero_key_func),
4243 #ifdef CONFIG_BT_FEATURE_DEBUG
4244 	EXP_FEAT(debug_uuid, set_debug_func),
4245 #endif
4246 	EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4247 	EXP_FEAT(quality_report_uuid, set_quality_report_func),
4248 	EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4249 
4250 	/* end with a null feature */
4251 	EXP_FEAT(NULL, NULL)
4252 };
4253 
4254 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4255 			   void *data, u16 data_len)
4256 {
4257 	struct mgmt_cp_set_exp_feature *cp = data;
4258 	size_t i = 0;
4259 
4260 	bt_dev_dbg(hdev, "sock %p", sk);
4261 
4262 	for (i = 0; exp_features[i].uuid; i++) {
4263 		if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4264 			return exp_features[i].set_func(sk, hdev, cp, data_len);
4265 	}
4266 
4267 	return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4268 			       MGMT_OP_SET_EXP_FEATURE,
4269 			       MGMT_STATUS_NOT_SUPPORTED);
4270 }
4271 
4272 #define SUPPORTED_DEVICE_FLAGS() ((1U << HCI_CONN_FLAG_MAX) - 1)
4273 
4274 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4275 			    u16 data_len)
4276 {
4277 	struct mgmt_cp_get_device_flags *cp = data;
4278 	struct mgmt_rp_get_device_flags rp;
4279 	struct bdaddr_list_with_flags *br_params;
4280 	struct hci_conn_params *params;
4281 	u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4282 	u32 current_flags = 0;
4283 	u8 status = MGMT_STATUS_INVALID_PARAMS;
4284 
4285 	bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4286 		   &cp->addr.bdaddr, cp->addr.type);
4287 
4288 	hci_dev_lock(hdev);
4289 
4290 	memset(&rp, 0, sizeof(rp));
4291 
4292 	if (cp->addr.type == BDADDR_BREDR) {
4293 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4294 							      &cp->addr.bdaddr,
4295 							      cp->addr.type);
4296 		if (!br_params)
4297 			goto done;
4298 
4299 		current_flags = br_params->current_flags;
4300 	} else {
4301 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4302 						le_addr_type(cp->addr.type));
4303 
4304 		if (!params)
4305 			goto done;
4306 
4307 		current_flags = params->current_flags;
4308 	}
4309 
4310 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4311 	rp.addr.type = cp->addr.type;
4312 	rp.supported_flags = cpu_to_le32(supported_flags);
4313 	rp.current_flags = cpu_to_le32(current_flags);
4314 
4315 	status = MGMT_STATUS_SUCCESS;
4316 
4317 done:
4318 	hci_dev_unlock(hdev);
4319 
4320 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
4321 				&rp, sizeof(rp));
4322 }
4323 
4324 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
4325 				 bdaddr_t *bdaddr, u8 bdaddr_type,
4326 				 u32 supported_flags, u32 current_flags)
4327 {
4328 	struct mgmt_ev_device_flags_changed ev;
4329 
4330 	bacpy(&ev.addr.bdaddr, bdaddr);
4331 	ev.addr.type = bdaddr_type;
4332 	ev.supported_flags = cpu_to_le32(supported_flags);
4333 	ev.current_flags = cpu_to_le32(current_flags);
4334 
4335 	mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
4336 }
4337 
4338 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4339 			    u16 len)
4340 {
4341 	struct mgmt_cp_set_device_flags *cp = data;
4342 	struct bdaddr_list_with_flags *br_params;
4343 	struct hci_conn_params *params;
4344 	u8 status = MGMT_STATUS_INVALID_PARAMS;
4345 	u32 supported_flags = SUPPORTED_DEVICE_FLAGS();
4346 	u32 current_flags = __le32_to_cpu(cp->current_flags);
4347 
4348 	bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
4349 		   &cp->addr.bdaddr, cp->addr.type,
4350 		   __le32_to_cpu(current_flags));
4351 
4352 	if ((supported_flags | current_flags) != supported_flags) {
4353 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
4354 			    current_flags, supported_flags);
4355 		goto done;
4356 	}
4357 
4358 	hci_dev_lock(hdev);
4359 
4360 	if (cp->addr.type == BDADDR_BREDR) {
4361 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4362 							      &cp->addr.bdaddr,
4363 							      cp->addr.type);
4364 
4365 		if (br_params) {
4366 			br_params->current_flags = current_flags;
4367 			status = MGMT_STATUS_SUCCESS;
4368 		} else {
4369 			bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
4370 				    &cp->addr.bdaddr, cp->addr.type);
4371 		}
4372 	} else {
4373 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4374 						le_addr_type(cp->addr.type));
4375 		if (params) {
4376 			params->current_flags = current_flags;
4377 			status = MGMT_STATUS_SUCCESS;
4378 		} else {
4379 			bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
4380 				    &cp->addr.bdaddr,
4381 				    le_addr_type(cp->addr.type));
4382 		}
4383 	}
4384 
4385 done:
4386 	hci_dev_unlock(hdev);
4387 
4388 	if (status == MGMT_STATUS_SUCCESS)
4389 		device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
4390 				     supported_flags, current_flags);
4391 
4392 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
4393 				 &cp->addr, sizeof(cp->addr));
4394 }
4395 
4396 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
4397 				   u16 handle)
4398 {
4399 	struct mgmt_ev_adv_monitor_added ev;
4400 
4401 	ev.monitor_handle = cpu_to_le16(handle);
4402 
4403 	mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
4404 }
4405 
4406 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
4407 {
4408 	struct mgmt_ev_adv_monitor_removed ev;
4409 	struct mgmt_pending_cmd *cmd;
4410 	struct sock *sk_skip = NULL;
4411 	struct mgmt_cp_remove_adv_monitor *cp;
4412 
4413 	cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4414 	if (cmd) {
4415 		cp = cmd->param;
4416 
4417 		if (cp->monitor_handle)
4418 			sk_skip = cmd->sk;
4419 	}
4420 
4421 	ev.monitor_handle = cpu_to_le16(handle);
4422 
4423 	mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
4424 }
4425 
4426 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
4427 				 void *data, u16 len)
4428 {
4429 	struct adv_monitor *monitor = NULL;
4430 	struct mgmt_rp_read_adv_monitor_features *rp = NULL;
4431 	int handle, err;
4432 	size_t rp_size = 0;
4433 	__u32 supported = 0;
4434 	__u32 enabled = 0;
4435 	__u16 num_handles = 0;
4436 	__u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
4437 
4438 	BT_DBG("request for %s", hdev->name);
4439 
4440 	hci_dev_lock(hdev);
4441 
4442 	if (msft_monitor_supported(hdev))
4443 		supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
4444 
4445 	idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
4446 		handles[num_handles++] = monitor->handle;
4447 
4448 	hci_dev_unlock(hdev);
4449 
4450 	rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
4451 	rp = kmalloc(rp_size, GFP_KERNEL);
4452 	if (!rp)
4453 		return -ENOMEM;
4454 
4455 	/* All supported features are currently enabled */
4456 	enabled = supported;
4457 
4458 	rp->supported_features = cpu_to_le32(supported);
4459 	rp->enabled_features = cpu_to_le32(enabled);
4460 	rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
4461 	rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
4462 	rp->num_handles = cpu_to_le16(num_handles);
4463 	if (num_handles)
4464 		memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
4465 
4466 	err = mgmt_cmd_complete(sk, hdev->id,
4467 				MGMT_OP_READ_ADV_MONITOR_FEATURES,
4468 				MGMT_STATUS_SUCCESS, rp, rp_size);
4469 
4470 	kfree(rp);
4471 
4472 	return err;
4473 }
4474 
4475 int mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
4476 {
4477 	struct mgmt_rp_add_adv_patterns_monitor rp;
4478 	struct mgmt_pending_cmd *cmd;
4479 	struct adv_monitor *monitor;
4480 	int err = 0;
4481 
4482 	hci_dev_lock(hdev);
4483 
4484 	cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev);
4485 	if (!cmd) {
4486 		cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev);
4487 		if (!cmd)
4488 			goto done;
4489 	}
4490 
4491 	monitor = cmd->user_data;
4492 	rp.monitor_handle = cpu_to_le16(monitor->handle);
4493 
4494 	if (!status) {
4495 		mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
4496 		hdev->adv_monitors_cnt++;
4497 		if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
4498 			monitor->state = ADV_MONITOR_STATE_REGISTERED;
4499 		hci_update_background_scan(hdev);
4500 	}
4501 
4502 	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4503 				mgmt_status(status), &rp, sizeof(rp));
4504 	mgmt_pending_remove(cmd);
4505 
4506 done:
4507 	hci_dev_unlock(hdev);
4508 	bt_dev_dbg(hdev, "add monitor %d complete, status %u",
4509 		   rp.monitor_handle, status);
4510 
4511 	return err;
4512 }
4513 
4514 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4515 				      struct adv_monitor *m, u8 status,
4516 				      void *data, u16 len, u16 op)
4517 {
4518 	struct mgmt_rp_add_adv_patterns_monitor rp;
4519 	struct mgmt_pending_cmd *cmd;
4520 	int err;
4521 	bool pending;
4522 
4523 	hci_dev_lock(hdev);
4524 
4525 	if (status)
4526 		goto unlock;
4527 
4528 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
4529 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4530 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
4531 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
4532 		status = MGMT_STATUS_BUSY;
4533 		goto unlock;
4534 	}
4535 
4536 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
4537 	if (!cmd) {
4538 		status = MGMT_STATUS_NO_RESOURCES;
4539 		goto unlock;
4540 	}
4541 
4542 	cmd->user_data = m;
4543 	pending = hci_add_adv_monitor(hdev, m, &err);
4544 	if (err) {
4545 		if (err == -ENOSPC || err == -ENOMEM)
4546 			status = MGMT_STATUS_NO_RESOURCES;
4547 		else if (err == -EINVAL)
4548 			status = MGMT_STATUS_INVALID_PARAMS;
4549 		else
4550 			status = MGMT_STATUS_FAILED;
4551 
4552 		mgmt_pending_remove(cmd);
4553 		goto unlock;
4554 	}
4555 
4556 	if (!pending) {
4557 		mgmt_pending_remove(cmd);
4558 		rp.monitor_handle = cpu_to_le16(m->handle);
4559 		mgmt_adv_monitor_added(sk, hdev, m->handle);
4560 		m->state = ADV_MONITOR_STATE_REGISTERED;
4561 		hdev->adv_monitors_cnt++;
4562 
4563 		hci_dev_unlock(hdev);
4564 		return mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_SUCCESS,
4565 					 &rp, sizeof(rp));
4566 	}
4567 
4568 	hci_dev_unlock(hdev);
4569 
4570 	return 0;
4571 
4572 unlock:
4573 	hci_free_adv_monitor(hdev, m);
4574 	hci_dev_unlock(hdev);
4575 	return mgmt_cmd_status(sk, hdev->id, op, status);
4576 }
4577 
4578 static void parse_adv_monitor_rssi(struct adv_monitor *m,
4579 				   struct mgmt_adv_rssi_thresholds *rssi)
4580 {
4581 	if (rssi) {
4582 		m->rssi.low_threshold = rssi->low_threshold;
4583 		m->rssi.low_threshold_timeout =
4584 		    __le16_to_cpu(rssi->low_threshold_timeout);
4585 		m->rssi.high_threshold = rssi->high_threshold;
4586 		m->rssi.high_threshold_timeout =
4587 		    __le16_to_cpu(rssi->high_threshold_timeout);
4588 		m->rssi.sampling_period = rssi->sampling_period;
4589 	} else {
4590 		/* Default values. These numbers are the least constricting
4591 		 * parameters for MSFT API to work, so it behaves as if there
4592 		 * are no rssi parameter to consider. May need to be changed
4593 		 * if other API are to be supported.
4594 		 */
4595 		m->rssi.low_threshold = -127;
4596 		m->rssi.low_threshold_timeout = 60;
4597 		m->rssi.high_threshold = -127;
4598 		m->rssi.high_threshold_timeout = 0;
4599 		m->rssi.sampling_period = 0;
4600 	}
4601 }
4602 
4603 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
4604 				    struct mgmt_adv_pattern *patterns)
4605 {
4606 	u8 offset = 0, length = 0;
4607 	struct adv_pattern *p = NULL;
4608 	int i;
4609 
4610 	for (i = 0; i < pattern_count; i++) {
4611 		offset = patterns[i].offset;
4612 		length = patterns[i].length;
4613 		if (offset >= HCI_MAX_AD_LENGTH ||
4614 		    length > HCI_MAX_AD_LENGTH ||
4615 		    (offset + length) > HCI_MAX_AD_LENGTH)
4616 			return MGMT_STATUS_INVALID_PARAMS;
4617 
4618 		p = kmalloc(sizeof(*p), GFP_KERNEL);
4619 		if (!p)
4620 			return MGMT_STATUS_NO_RESOURCES;
4621 
4622 		p->ad_type = patterns[i].ad_type;
4623 		p->offset = patterns[i].offset;
4624 		p->length = patterns[i].length;
4625 		memcpy(p->value, patterns[i].value, p->length);
4626 
4627 		INIT_LIST_HEAD(&p->list);
4628 		list_add(&p->list, &m->patterns);
4629 	}
4630 
4631 	return MGMT_STATUS_SUCCESS;
4632 }
4633 
4634 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4635 				    void *data, u16 len)
4636 {
4637 	struct mgmt_cp_add_adv_patterns_monitor *cp = data;
4638 	struct adv_monitor *m = NULL;
4639 	u8 status = MGMT_STATUS_SUCCESS;
4640 	size_t expected_size = sizeof(*cp);
4641 
4642 	BT_DBG("request for %s", hdev->name);
4643 
4644 	if (len <= sizeof(*cp)) {
4645 		status = MGMT_STATUS_INVALID_PARAMS;
4646 		goto done;
4647 	}
4648 
4649 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4650 	if (len != expected_size) {
4651 		status = MGMT_STATUS_INVALID_PARAMS;
4652 		goto done;
4653 	}
4654 
4655 	m = kzalloc(sizeof(*m), GFP_KERNEL);
4656 	if (!m) {
4657 		status = MGMT_STATUS_NO_RESOURCES;
4658 		goto done;
4659 	}
4660 
4661 	INIT_LIST_HEAD(&m->patterns);
4662 
4663 	parse_adv_monitor_rssi(m, NULL);
4664 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4665 
4666 done:
4667 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4668 					  MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
4669 }
4670 
4671 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
4672 					 void *data, u16 len)
4673 {
4674 	struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
4675 	struct adv_monitor *m = NULL;
4676 	u8 status = MGMT_STATUS_SUCCESS;
4677 	size_t expected_size = sizeof(*cp);
4678 
4679 	BT_DBG("request for %s", hdev->name);
4680 
4681 	if (len <= sizeof(*cp)) {
4682 		status = MGMT_STATUS_INVALID_PARAMS;
4683 		goto done;
4684 	}
4685 
4686 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4687 	if (len != expected_size) {
4688 		status = MGMT_STATUS_INVALID_PARAMS;
4689 		goto done;
4690 	}
4691 
4692 	m = kzalloc(sizeof(*m), GFP_KERNEL);
4693 	if (!m) {
4694 		status = MGMT_STATUS_NO_RESOURCES;
4695 		goto done;
4696 	}
4697 
4698 	INIT_LIST_HEAD(&m->patterns);
4699 
4700 	parse_adv_monitor_rssi(m, &cp->rssi);
4701 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4702 
4703 done:
4704 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4705 					 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
4706 }
4707 
4708 int mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
4709 {
4710 	struct mgmt_rp_remove_adv_monitor rp;
4711 	struct mgmt_cp_remove_adv_monitor *cp;
4712 	struct mgmt_pending_cmd *cmd;
4713 	int err = 0;
4714 
4715 	hci_dev_lock(hdev);
4716 
4717 	cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4718 	if (!cmd)
4719 		goto done;
4720 
4721 	cp = cmd->param;
4722 	rp.monitor_handle = cp->monitor_handle;
4723 
4724 	if (!status)
4725 		hci_update_background_scan(hdev);
4726 
4727 	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4728 				mgmt_status(status), &rp, sizeof(rp));
4729 	mgmt_pending_remove(cmd);
4730 
4731 done:
4732 	hci_dev_unlock(hdev);
4733 	bt_dev_dbg(hdev, "remove monitor %d complete, status %u",
4734 		   rp.monitor_handle, status);
4735 
4736 	return err;
4737 }
4738 
4739 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
4740 			      void *data, u16 len)
4741 {
4742 	struct mgmt_cp_remove_adv_monitor *cp = data;
4743 	struct mgmt_rp_remove_adv_monitor rp;
4744 	struct mgmt_pending_cmd *cmd;
4745 	u16 handle = __le16_to_cpu(cp->monitor_handle);
4746 	int err, status;
4747 	bool pending;
4748 
4749 	BT_DBG("request for %s", hdev->name);
4750 	rp.monitor_handle = cp->monitor_handle;
4751 
4752 	hci_dev_lock(hdev);
4753 
4754 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
4755 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
4756 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4757 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
4758 		status = MGMT_STATUS_BUSY;
4759 		goto unlock;
4760 	}
4761 
4762 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
4763 	if (!cmd) {
4764 		status = MGMT_STATUS_NO_RESOURCES;
4765 		goto unlock;
4766 	}
4767 
4768 	if (handle)
4769 		pending = hci_remove_single_adv_monitor(hdev, handle, &err);
4770 	else
4771 		pending = hci_remove_all_adv_monitor(hdev, &err);
4772 
4773 	if (err) {
4774 		mgmt_pending_remove(cmd);
4775 
4776 		if (err == -ENOENT)
4777 			status = MGMT_STATUS_INVALID_INDEX;
4778 		else
4779 			status = MGMT_STATUS_FAILED;
4780 
4781 		goto unlock;
4782 	}
4783 
4784 	/* monitor can be removed without forwarding request to controller */
4785 	if (!pending) {
4786 		mgmt_pending_remove(cmd);
4787 		hci_dev_unlock(hdev);
4788 
4789 		return mgmt_cmd_complete(sk, hdev->id,
4790 					 MGMT_OP_REMOVE_ADV_MONITOR,
4791 					 MGMT_STATUS_SUCCESS,
4792 					 &rp, sizeof(rp));
4793 	}
4794 
4795 	hci_dev_unlock(hdev);
4796 	return 0;
4797 
4798 unlock:
4799 	hci_dev_unlock(hdev);
4800 	return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4801 			       status);
4802 }
4803 
4804 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
4805 				         u16 opcode, struct sk_buff *skb)
4806 {
4807 	struct mgmt_rp_read_local_oob_data mgmt_rp;
4808 	size_t rp_size = sizeof(mgmt_rp);
4809 	struct mgmt_pending_cmd *cmd;
4810 
4811 	bt_dev_dbg(hdev, "status %u", status);
4812 
4813 	cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
4814 	if (!cmd)
4815 		return;
4816 
4817 	if (status || !skb) {
4818 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4819 				status ? mgmt_status(status) : MGMT_STATUS_FAILED);
4820 		goto remove;
4821 	}
4822 
4823 	memset(&mgmt_rp, 0, sizeof(mgmt_rp));
4824 
4825 	if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
4826 		struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
4827 
4828 		if (skb->len < sizeof(*rp)) {
4829 			mgmt_cmd_status(cmd->sk, hdev->id,
4830 					MGMT_OP_READ_LOCAL_OOB_DATA,
4831 					MGMT_STATUS_FAILED);
4832 			goto remove;
4833 		}
4834 
4835 		memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
4836 		memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
4837 
4838 		rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
4839 	} else {
4840 		struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
4841 
4842 		if (skb->len < sizeof(*rp)) {
4843 			mgmt_cmd_status(cmd->sk, hdev->id,
4844 					MGMT_OP_READ_LOCAL_OOB_DATA,
4845 					MGMT_STATUS_FAILED);
4846 			goto remove;
4847 		}
4848 
4849 		memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
4850 		memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
4851 
4852 		memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
4853 		memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
4854 	}
4855 
4856 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4857 			  MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
4858 
4859 remove:
4860 	mgmt_pending_remove(cmd);
4861 }
4862 
4863 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
4864 			       void *data, u16 data_len)
4865 {
4866 	struct mgmt_pending_cmd *cmd;
4867 	struct hci_request req;
4868 	int err;
4869 
4870 	bt_dev_dbg(hdev, "sock %p", sk);
4871 
4872 	hci_dev_lock(hdev);
4873 
4874 	if (!hdev_is_powered(hdev)) {
4875 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4876 				      MGMT_STATUS_NOT_POWERED);
4877 		goto unlock;
4878 	}
4879 
4880 	if (!lmp_ssp_capable(hdev)) {
4881 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4882 				      MGMT_STATUS_NOT_SUPPORTED);
4883 		goto unlock;
4884 	}
4885 
4886 	if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
4887 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4888 				      MGMT_STATUS_BUSY);
4889 		goto unlock;
4890 	}
4891 
4892 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
4893 	if (!cmd) {
4894 		err = -ENOMEM;
4895 		goto unlock;
4896 	}
4897 
4898 	hci_req_init(&req, hdev);
4899 
4900 	if (bredr_sc_enabled(hdev))
4901 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
4902 	else
4903 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
4904 
4905 	err = hci_req_run_skb(&req, read_local_oob_data_complete);
4906 	if (err < 0)
4907 		mgmt_pending_remove(cmd);
4908 
4909 unlock:
4910 	hci_dev_unlock(hdev);
4911 	return err;
4912 }
4913 
4914 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
4915 			       void *data, u16 len)
4916 {
4917 	struct mgmt_addr_info *addr = data;
4918 	int err;
4919 
4920 	bt_dev_dbg(hdev, "sock %p", sk);
4921 
4922 	if (!bdaddr_type_is_valid(addr->type))
4923 		return mgmt_cmd_complete(sk, hdev->id,
4924 					 MGMT_OP_ADD_REMOTE_OOB_DATA,
4925 					 MGMT_STATUS_INVALID_PARAMS,
4926 					 addr, sizeof(*addr));
4927 
4928 	hci_dev_lock(hdev);
4929 
4930 	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
4931 		struct mgmt_cp_add_remote_oob_data *cp = data;
4932 		u8 status;
4933 
4934 		if (cp->addr.type != BDADDR_BREDR) {
4935 			err = mgmt_cmd_complete(sk, hdev->id,
4936 						MGMT_OP_ADD_REMOTE_OOB_DATA,
4937 						MGMT_STATUS_INVALID_PARAMS,
4938 						&cp->addr, sizeof(cp->addr));
4939 			goto unlock;
4940 		}
4941 
4942 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
4943 					      cp->addr.type, cp->hash,
4944 					      cp->rand, NULL, NULL);
4945 		if (err < 0)
4946 			status = MGMT_STATUS_FAILED;
4947 		else
4948 			status = MGMT_STATUS_SUCCESS;
4949 
4950 		err = mgmt_cmd_complete(sk, hdev->id,
4951 					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
4952 					&cp->addr, sizeof(cp->addr));
4953 	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
4954 		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
4955 		u8 *rand192, *hash192, *rand256, *hash256;
4956 		u8 status;
4957 
4958 		if (bdaddr_type_is_le(cp->addr.type)) {
4959 			/* Enforce zero-valued 192-bit parameters as
4960 			 * long as legacy SMP OOB isn't implemented.
4961 			 */
4962 			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
4963 			    memcmp(cp->hash192, ZERO_KEY, 16)) {
4964 				err = mgmt_cmd_complete(sk, hdev->id,
4965 							MGMT_OP_ADD_REMOTE_OOB_DATA,
4966 							MGMT_STATUS_INVALID_PARAMS,
4967 							addr, sizeof(*addr));
4968 				goto unlock;
4969 			}
4970 
4971 			rand192 = NULL;
4972 			hash192 = NULL;
4973 		} else {
4974 			/* In case one of the P-192 values is set to zero,
4975 			 * then just disable OOB data for P-192.
4976 			 */
4977 			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
4978 			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
4979 				rand192 = NULL;
4980 				hash192 = NULL;
4981 			} else {
4982 				rand192 = cp->rand192;
4983 				hash192 = cp->hash192;
4984 			}
4985 		}
4986 
4987 		/* In case one of the P-256 values is set to zero, then just
4988 		 * disable OOB data for P-256.
4989 		 */
4990 		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
4991 		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
4992 			rand256 = NULL;
4993 			hash256 = NULL;
4994 		} else {
4995 			rand256 = cp->rand256;
4996 			hash256 = cp->hash256;
4997 		}
4998 
4999 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5000 					      cp->addr.type, hash192, rand192,
5001 					      hash256, rand256);
5002 		if (err < 0)
5003 			status = MGMT_STATUS_FAILED;
5004 		else
5005 			status = MGMT_STATUS_SUCCESS;
5006 
5007 		err = mgmt_cmd_complete(sk, hdev->id,
5008 					MGMT_OP_ADD_REMOTE_OOB_DATA,
5009 					status, &cp->addr, sizeof(cp->addr));
5010 	} else {
5011 		bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5012 			   len);
5013 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5014 				      MGMT_STATUS_INVALID_PARAMS);
5015 	}
5016 
5017 unlock:
5018 	hci_dev_unlock(hdev);
5019 	return err;
5020 }
5021 
5022 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5023 				  void *data, u16 len)
5024 {
5025 	struct mgmt_cp_remove_remote_oob_data *cp = data;
5026 	u8 status;
5027 	int err;
5028 
5029 	bt_dev_dbg(hdev, "sock %p", sk);
5030 
5031 	if (cp->addr.type != BDADDR_BREDR)
5032 		return mgmt_cmd_complete(sk, hdev->id,
5033 					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5034 					 MGMT_STATUS_INVALID_PARAMS,
5035 					 &cp->addr, sizeof(cp->addr));
5036 
5037 	hci_dev_lock(hdev);
5038 
5039 	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5040 		hci_remote_oob_data_clear(hdev);
5041 		status = MGMT_STATUS_SUCCESS;
5042 		goto done;
5043 	}
5044 
5045 	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5046 	if (err < 0)
5047 		status = MGMT_STATUS_INVALID_PARAMS;
5048 	else
5049 		status = MGMT_STATUS_SUCCESS;
5050 
5051 done:
5052 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5053 				status, &cp->addr, sizeof(cp->addr));
5054 
5055 	hci_dev_unlock(hdev);
5056 	return err;
5057 }
5058 
5059 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5060 {
5061 	struct mgmt_pending_cmd *cmd;
5062 
5063 	bt_dev_dbg(hdev, "status %u", status);
5064 
5065 	hci_dev_lock(hdev);
5066 
5067 	cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5068 	if (!cmd)
5069 		cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5070 
5071 	if (!cmd)
5072 		cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5073 
5074 	if (cmd) {
5075 		cmd->cmd_complete(cmd, mgmt_status(status));
5076 		mgmt_pending_remove(cmd);
5077 	}
5078 
5079 	hci_dev_unlock(hdev);
5080 
5081 	/* Handle suspend notifier */
5082 	if (test_and_clear_bit(SUSPEND_UNPAUSE_DISCOVERY,
5083 			       hdev->suspend_tasks)) {
5084 		bt_dev_dbg(hdev, "Unpaused discovery");
5085 		wake_up(&hdev->suspend_wait_q);
5086 	}
5087 }
5088 
5089 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5090 				    uint8_t *mgmt_status)
5091 {
5092 	switch (type) {
5093 	case DISCOV_TYPE_LE:
5094 		*mgmt_status = mgmt_le_support(hdev);
5095 		if (*mgmt_status)
5096 			return false;
5097 		break;
5098 	case DISCOV_TYPE_INTERLEAVED:
5099 		*mgmt_status = mgmt_le_support(hdev);
5100 		if (*mgmt_status)
5101 			return false;
5102 		fallthrough;
5103 	case DISCOV_TYPE_BREDR:
5104 		*mgmt_status = mgmt_bredr_support(hdev);
5105 		if (*mgmt_status)
5106 			return false;
5107 		break;
5108 	default:
5109 		*mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5110 		return false;
5111 	}
5112 
5113 	return true;
5114 }
5115 
5116 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5117 				    u16 op, void *data, u16 len)
5118 {
5119 	struct mgmt_cp_start_discovery *cp = data;
5120 	struct mgmt_pending_cmd *cmd;
5121 	u8 status;
5122 	int err;
5123 
5124 	bt_dev_dbg(hdev, "sock %p", sk);
5125 
5126 	hci_dev_lock(hdev);
5127 
5128 	if (!hdev_is_powered(hdev)) {
5129 		err = mgmt_cmd_complete(sk, hdev->id, op,
5130 					MGMT_STATUS_NOT_POWERED,
5131 					&cp->type, sizeof(cp->type));
5132 		goto failed;
5133 	}
5134 
5135 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
5136 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5137 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5138 					&cp->type, sizeof(cp->type));
5139 		goto failed;
5140 	}
5141 
5142 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5143 		err = mgmt_cmd_complete(sk, hdev->id, op, status,
5144 					&cp->type, sizeof(cp->type));
5145 		goto failed;
5146 	}
5147 
5148 	/* Can't start discovery when it is paused */
5149 	if (hdev->discovery_paused) {
5150 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5151 					&cp->type, sizeof(cp->type));
5152 		goto failed;
5153 	}
5154 
5155 	/* Clear the discovery filter first to free any previously
5156 	 * allocated memory for the UUID list.
5157 	 */
5158 	hci_discovery_filter_clear(hdev);
5159 
5160 	hdev->discovery.type = cp->type;
5161 	hdev->discovery.report_invalid_rssi = false;
5162 	if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5163 		hdev->discovery.limited = true;
5164 	else
5165 		hdev->discovery.limited = false;
5166 
5167 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
5168 	if (!cmd) {
5169 		err = -ENOMEM;
5170 		goto failed;
5171 	}
5172 
5173 	cmd->cmd_complete = generic_cmd_complete;
5174 
5175 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5176 	queue_work(hdev->req_workqueue, &hdev->discov_update);
5177 	err = 0;
5178 
5179 failed:
5180 	hci_dev_unlock(hdev);
5181 	return err;
5182 }
5183 
5184 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5185 			   void *data, u16 len)
5186 {
5187 	return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5188 					data, len);
5189 }
5190 
5191 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5192 				   void *data, u16 len)
5193 {
5194 	return start_discovery_internal(sk, hdev,
5195 					MGMT_OP_START_LIMITED_DISCOVERY,
5196 					data, len);
5197 }
5198 
5199 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
5200 					  u8 status)
5201 {
5202 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
5203 				 cmd->param, 1);
5204 }
5205 
5206 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5207 				   void *data, u16 len)
5208 {
5209 	struct mgmt_cp_start_service_discovery *cp = data;
5210 	struct mgmt_pending_cmd *cmd;
5211 	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5212 	u16 uuid_count, expected_len;
5213 	u8 status;
5214 	int err;
5215 
5216 	bt_dev_dbg(hdev, "sock %p", sk);
5217 
5218 	hci_dev_lock(hdev);
5219 
5220 	if (!hdev_is_powered(hdev)) {
5221 		err = mgmt_cmd_complete(sk, hdev->id,
5222 					MGMT_OP_START_SERVICE_DISCOVERY,
5223 					MGMT_STATUS_NOT_POWERED,
5224 					&cp->type, sizeof(cp->type));
5225 		goto failed;
5226 	}
5227 
5228 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
5229 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5230 		err = mgmt_cmd_complete(sk, hdev->id,
5231 					MGMT_OP_START_SERVICE_DISCOVERY,
5232 					MGMT_STATUS_BUSY, &cp->type,
5233 					sizeof(cp->type));
5234 		goto failed;
5235 	}
5236 
5237 	if (hdev->discovery_paused) {
5238 		err = mgmt_cmd_complete(sk, hdev->id,
5239 					MGMT_OP_START_SERVICE_DISCOVERY,
5240 					MGMT_STATUS_BUSY, &cp->type,
5241 					sizeof(cp->type));
5242 		goto failed;
5243 	}
5244 
5245 	uuid_count = __le16_to_cpu(cp->uuid_count);
5246 	if (uuid_count > max_uuid_count) {
5247 		bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5248 			   uuid_count);
5249 		err = mgmt_cmd_complete(sk, hdev->id,
5250 					MGMT_OP_START_SERVICE_DISCOVERY,
5251 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
5252 					sizeof(cp->type));
5253 		goto failed;
5254 	}
5255 
5256 	expected_len = sizeof(*cp) + uuid_count * 16;
5257 	if (expected_len != len) {
5258 		bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5259 			   expected_len, len);
5260 		err = mgmt_cmd_complete(sk, hdev->id,
5261 					MGMT_OP_START_SERVICE_DISCOVERY,
5262 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
5263 					sizeof(cp->type));
5264 		goto failed;
5265 	}
5266 
5267 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5268 		err = mgmt_cmd_complete(sk, hdev->id,
5269 					MGMT_OP_START_SERVICE_DISCOVERY,
5270 					status, &cp->type, sizeof(cp->type));
5271 		goto failed;
5272 	}
5273 
5274 	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
5275 			       hdev, data, len);
5276 	if (!cmd) {
5277 		err = -ENOMEM;
5278 		goto failed;
5279 	}
5280 
5281 	cmd->cmd_complete = service_discovery_cmd_complete;
5282 
5283 	/* Clear the discovery filter first to free any previously
5284 	 * allocated memory for the UUID list.
5285 	 */
5286 	hci_discovery_filter_clear(hdev);
5287 
5288 	hdev->discovery.result_filtering = true;
5289 	hdev->discovery.type = cp->type;
5290 	hdev->discovery.rssi = cp->rssi;
5291 	hdev->discovery.uuid_count = uuid_count;
5292 
5293 	if (uuid_count > 0) {
5294 		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
5295 						GFP_KERNEL);
5296 		if (!hdev->discovery.uuids) {
5297 			err = mgmt_cmd_complete(sk, hdev->id,
5298 						MGMT_OP_START_SERVICE_DISCOVERY,
5299 						MGMT_STATUS_FAILED,
5300 						&cp->type, sizeof(cp->type));
5301 			mgmt_pending_remove(cmd);
5302 			goto failed;
5303 		}
5304 	}
5305 
5306 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5307 	queue_work(hdev->req_workqueue, &hdev->discov_update);
5308 	err = 0;
5309 
5310 failed:
5311 	hci_dev_unlock(hdev);
5312 	return err;
5313 }
5314 
5315 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
5316 {
5317 	struct mgmt_pending_cmd *cmd;
5318 
5319 	bt_dev_dbg(hdev, "status %u", status);
5320 
5321 	hci_dev_lock(hdev);
5322 
5323 	cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5324 	if (cmd) {
5325 		cmd->cmd_complete(cmd, mgmt_status(status));
5326 		mgmt_pending_remove(cmd);
5327 	}
5328 
5329 	hci_dev_unlock(hdev);
5330 
5331 	/* Handle suspend notifier */
5332 	if (test_and_clear_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks)) {
5333 		bt_dev_dbg(hdev, "Paused discovery");
5334 		wake_up(&hdev->suspend_wait_q);
5335 	}
5336 }
5337 
5338 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
5339 			  u16 len)
5340 {
5341 	struct mgmt_cp_stop_discovery *mgmt_cp = data;
5342 	struct mgmt_pending_cmd *cmd;
5343 	int err;
5344 
5345 	bt_dev_dbg(hdev, "sock %p", sk);
5346 
5347 	hci_dev_lock(hdev);
5348 
5349 	if (!hci_discovery_active(hdev)) {
5350 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5351 					MGMT_STATUS_REJECTED, &mgmt_cp->type,
5352 					sizeof(mgmt_cp->type));
5353 		goto unlock;
5354 	}
5355 
5356 	if (hdev->discovery.type != mgmt_cp->type) {
5357 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5358 					MGMT_STATUS_INVALID_PARAMS,
5359 					&mgmt_cp->type, sizeof(mgmt_cp->type));
5360 		goto unlock;
5361 	}
5362 
5363 	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
5364 	if (!cmd) {
5365 		err = -ENOMEM;
5366 		goto unlock;
5367 	}
5368 
5369 	cmd->cmd_complete = generic_cmd_complete;
5370 
5371 	hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
5372 	queue_work(hdev->req_workqueue, &hdev->discov_update);
5373 	err = 0;
5374 
5375 unlock:
5376 	hci_dev_unlock(hdev);
5377 	return err;
5378 }
5379 
5380 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
5381 			u16 len)
5382 {
5383 	struct mgmt_cp_confirm_name *cp = data;
5384 	struct inquiry_entry *e;
5385 	int err;
5386 
5387 	bt_dev_dbg(hdev, "sock %p", sk);
5388 
5389 	hci_dev_lock(hdev);
5390 
5391 	if (!hci_discovery_active(hdev)) {
5392 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5393 					MGMT_STATUS_FAILED, &cp->addr,
5394 					sizeof(cp->addr));
5395 		goto failed;
5396 	}
5397 
5398 	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
5399 	if (!e) {
5400 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5401 					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
5402 					sizeof(cp->addr));
5403 		goto failed;
5404 	}
5405 
5406 	if (cp->name_known) {
5407 		e->name_state = NAME_KNOWN;
5408 		list_del(&e->list);
5409 	} else {
5410 		e->name_state = NAME_NEEDED;
5411 		hci_inquiry_cache_update_resolve(hdev, e);
5412 	}
5413 
5414 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
5415 				&cp->addr, sizeof(cp->addr));
5416 
5417 failed:
5418 	hci_dev_unlock(hdev);
5419 	return err;
5420 }
5421 
5422 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
5423 			u16 len)
5424 {
5425 	struct mgmt_cp_block_device *cp = data;
5426 	u8 status;
5427 	int err;
5428 
5429 	bt_dev_dbg(hdev, "sock %p", sk);
5430 
5431 	if (!bdaddr_type_is_valid(cp->addr.type))
5432 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
5433 					 MGMT_STATUS_INVALID_PARAMS,
5434 					 &cp->addr, sizeof(cp->addr));
5435 
5436 	hci_dev_lock(hdev);
5437 
5438 	err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
5439 				  cp->addr.type);
5440 	if (err < 0) {
5441 		status = MGMT_STATUS_FAILED;
5442 		goto done;
5443 	}
5444 
5445 	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5446 		   sk);
5447 	status = MGMT_STATUS_SUCCESS;
5448 
5449 done:
5450 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
5451 				&cp->addr, sizeof(cp->addr));
5452 
5453 	hci_dev_unlock(hdev);
5454 
5455 	return err;
5456 }
5457 
5458 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
5459 			  u16 len)
5460 {
5461 	struct mgmt_cp_unblock_device *cp = data;
5462 	u8 status;
5463 	int err;
5464 
5465 	bt_dev_dbg(hdev, "sock %p", sk);
5466 
5467 	if (!bdaddr_type_is_valid(cp->addr.type))
5468 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
5469 					 MGMT_STATUS_INVALID_PARAMS,
5470 					 &cp->addr, sizeof(cp->addr));
5471 
5472 	hci_dev_lock(hdev);
5473 
5474 	err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
5475 				  cp->addr.type);
5476 	if (err < 0) {
5477 		status = MGMT_STATUS_INVALID_PARAMS;
5478 		goto done;
5479 	}
5480 
5481 	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5482 		   sk);
5483 	status = MGMT_STATUS_SUCCESS;
5484 
5485 done:
5486 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
5487 				&cp->addr, sizeof(cp->addr));
5488 
5489 	hci_dev_unlock(hdev);
5490 
5491 	return err;
5492 }
5493 
5494 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
5495 			 u16 len)
5496 {
5497 	struct mgmt_cp_set_device_id *cp = data;
5498 	struct hci_request req;
5499 	int err;
5500 	__u16 source;
5501 
5502 	bt_dev_dbg(hdev, "sock %p", sk);
5503 
5504 	source = __le16_to_cpu(cp->source);
5505 
5506 	if (source > 0x0002)
5507 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
5508 				       MGMT_STATUS_INVALID_PARAMS);
5509 
5510 	hci_dev_lock(hdev);
5511 
5512 	hdev->devid_source = source;
5513 	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
5514 	hdev->devid_product = __le16_to_cpu(cp->product);
5515 	hdev->devid_version = __le16_to_cpu(cp->version);
5516 
5517 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
5518 				NULL, 0);
5519 
5520 	hci_req_init(&req, hdev);
5521 	__hci_req_update_eir(&req);
5522 	hci_req_run(&req, NULL);
5523 
5524 	hci_dev_unlock(hdev);
5525 
5526 	return err;
5527 }
5528 
5529 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
5530 					u16 opcode)
5531 {
5532 	bt_dev_dbg(hdev, "status %u", status);
5533 }
5534 
5535 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
5536 				     u16 opcode)
5537 {
5538 	struct cmd_lookup match = { NULL, hdev };
5539 	struct hci_request req;
5540 	u8 instance;
5541 	struct adv_info *adv_instance;
5542 	int err;
5543 
5544 	hci_dev_lock(hdev);
5545 
5546 	if (status) {
5547 		u8 mgmt_err = mgmt_status(status);
5548 
5549 		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
5550 				     cmd_status_rsp, &mgmt_err);
5551 		goto unlock;
5552 	}
5553 
5554 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
5555 		hci_dev_set_flag(hdev, HCI_ADVERTISING);
5556 	else
5557 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
5558 
5559 	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
5560 			     &match);
5561 
5562 	new_settings(hdev, match.sk);
5563 
5564 	if (match.sk)
5565 		sock_put(match.sk);
5566 
5567 	/* Handle suspend notifier */
5568 	if (test_and_clear_bit(SUSPEND_PAUSE_ADVERTISING,
5569 			       hdev->suspend_tasks)) {
5570 		bt_dev_dbg(hdev, "Paused advertising");
5571 		wake_up(&hdev->suspend_wait_q);
5572 	} else if (test_and_clear_bit(SUSPEND_UNPAUSE_ADVERTISING,
5573 				      hdev->suspend_tasks)) {
5574 		bt_dev_dbg(hdev, "Unpaused advertising");
5575 		wake_up(&hdev->suspend_wait_q);
5576 	}
5577 
5578 	/* If "Set Advertising" was just disabled and instance advertising was
5579 	 * set up earlier, then re-enable multi-instance advertising.
5580 	 */
5581 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5582 	    list_empty(&hdev->adv_instances))
5583 		goto unlock;
5584 
5585 	instance = hdev->cur_adv_instance;
5586 	if (!instance) {
5587 		adv_instance = list_first_entry_or_null(&hdev->adv_instances,
5588 							struct adv_info, list);
5589 		if (!adv_instance)
5590 			goto unlock;
5591 
5592 		instance = adv_instance->instance;
5593 	}
5594 
5595 	hci_req_init(&req, hdev);
5596 
5597 	err = __hci_req_schedule_adv_instance(&req, instance, true);
5598 
5599 	if (!err)
5600 		err = hci_req_run(&req, enable_advertising_instance);
5601 
5602 	if (err)
5603 		bt_dev_err(hdev, "failed to re-configure advertising");
5604 
5605 unlock:
5606 	hci_dev_unlock(hdev);
5607 }
5608 
5609 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
5610 			   u16 len)
5611 {
5612 	struct mgmt_mode *cp = data;
5613 	struct mgmt_pending_cmd *cmd;
5614 	struct hci_request req;
5615 	u8 val, status;
5616 	int err;
5617 
5618 	bt_dev_dbg(hdev, "sock %p", sk);
5619 
5620 	status = mgmt_le_support(hdev);
5621 	if (status)
5622 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5623 				       status);
5624 
5625 	/* Enabling the experimental LL Privay support disables support for
5626 	 * advertising.
5627 	 */
5628 	if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
5629 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5630 				       MGMT_STATUS_NOT_SUPPORTED);
5631 
5632 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5633 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5634 				       MGMT_STATUS_INVALID_PARAMS);
5635 
5636 	if (hdev->advertising_paused)
5637 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5638 				       MGMT_STATUS_BUSY);
5639 
5640 	hci_dev_lock(hdev);
5641 
5642 	val = !!cp->val;
5643 
5644 	/* The following conditions are ones which mean that we should
5645 	 * not do any HCI communication but directly send a mgmt
5646 	 * response to user space (after toggling the flag if
5647 	 * necessary).
5648 	 */
5649 	if (!hdev_is_powered(hdev) ||
5650 	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5651 	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
5652 	    hci_conn_num(hdev, LE_LINK) > 0 ||
5653 	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5654 	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
5655 		bool changed;
5656 
5657 		if (cp->val) {
5658 			hdev->cur_adv_instance = 0x00;
5659 			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
5660 			if (cp->val == 0x02)
5661 				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5662 			else
5663 				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5664 		} else {
5665 			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
5666 			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5667 		}
5668 
5669 		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
5670 		if (err < 0)
5671 			goto unlock;
5672 
5673 		if (changed)
5674 			err = new_settings(hdev, sk);
5675 
5676 		goto unlock;
5677 	}
5678 
5679 	if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
5680 	    pending_find(MGMT_OP_SET_LE, hdev)) {
5681 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5682 				      MGMT_STATUS_BUSY);
5683 		goto unlock;
5684 	}
5685 
5686 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
5687 	if (!cmd) {
5688 		err = -ENOMEM;
5689 		goto unlock;
5690 	}
5691 
5692 	hci_req_init(&req, hdev);
5693 
5694 	if (cp->val == 0x02)
5695 		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5696 	else
5697 		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5698 
5699 	cancel_adv_timeout(hdev);
5700 
5701 	if (val) {
5702 		/* Switch to instance "0" for the Set Advertising setting.
5703 		 * We cannot use update_[adv|scan_rsp]_data() here as the
5704 		 * HCI_ADVERTISING flag is not yet set.
5705 		 */
5706 		hdev->cur_adv_instance = 0x00;
5707 
5708 		if (ext_adv_capable(hdev)) {
5709 			__hci_req_start_ext_adv(&req, 0x00);
5710 		} else {
5711 			__hci_req_update_adv_data(&req, 0x00);
5712 			__hci_req_update_scan_rsp_data(&req, 0x00);
5713 			__hci_req_enable_advertising(&req);
5714 		}
5715 	} else {
5716 		__hci_req_disable_advertising(&req);
5717 	}
5718 
5719 	err = hci_req_run(&req, set_advertising_complete);
5720 	if (err < 0)
5721 		mgmt_pending_remove(cmd);
5722 
5723 unlock:
5724 	hci_dev_unlock(hdev);
5725 	return err;
5726 }
5727 
5728 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
5729 			      void *data, u16 len)
5730 {
5731 	struct mgmt_cp_set_static_address *cp = data;
5732 	int err;
5733 
5734 	bt_dev_dbg(hdev, "sock %p", sk);
5735 
5736 	if (!lmp_le_capable(hdev))
5737 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5738 				       MGMT_STATUS_NOT_SUPPORTED);
5739 
5740 	if (hdev_is_powered(hdev))
5741 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5742 				       MGMT_STATUS_REJECTED);
5743 
5744 	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
5745 		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
5746 			return mgmt_cmd_status(sk, hdev->id,
5747 					       MGMT_OP_SET_STATIC_ADDRESS,
5748 					       MGMT_STATUS_INVALID_PARAMS);
5749 
5750 		/* Two most significant bits shall be set */
5751 		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
5752 			return mgmt_cmd_status(sk, hdev->id,
5753 					       MGMT_OP_SET_STATIC_ADDRESS,
5754 					       MGMT_STATUS_INVALID_PARAMS);
5755 	}
5756 
5757 	hci_dev_lock(hdev);
5758 
5759 	bacpy(&hdev->static_addr, &cp->bdaddr);
5760 
5761 	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
5762 	if (err < 0)
5763 		goto unlock;
5764 
5765 	err = new_settings(hdev, sk);
5766 
5767 unlock:
5768 	hci_dev_unlock(hdev);
5769 	return err;
5770 }
5771 
5772 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
5773 			   void *data, u16 len)
5774 {
5775 	struct mgmt_cp_set_scan_params *cp = data;
5776 	__u16 interval, window;
5777 	int err;
5778 
5779 	bt_dev_dbg(hdev, "sock %p", sk);
5780 
5781 	if (!lmp_le_capable(hdev))
5782 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5783 				       MGMT_STATUS_NOT_SUPPORTED);
5784 
5785 	interval = __le16_to_cpu(cp->interval);
5786 
5787 	if (interval < 0x0004 || interval > 0x4000)
5788 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5789 				       MGMT_STATUS_INVALID_PARAMS);
5790 
5791 	window = __le16_to_cpu(cp->window);
5792 
5793 	if (window < 0x0004 || window > 0x4000)
5794 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5795 				       MGMT_STATUS_INVALID_PARAMS);
5796 
5797 	if (window > interval)
5798 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5799 				       MGMT_STATUS_INVALID_PARAMS);
5800 
5801 	hci_dev_lock(hdev);
5802 
5803 	hdev->le_scan_interval = interval;
5804 	hdev->le_scan_window = window;
5805 
5806 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
5807 				NULL, 0);
5808 
5809 	/* If background scan is running, restart it so new parameters are
5810 	 * loaded.
5811 	 */
5812 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5813 	    hdev->discovery.state == DISCOVERY_STOPPED) {
5814 		struct hci_request req;
5815 
5816 		hci_req_init(&req, hdev);
5817 
5818 		hci_req_add_le_scan_disable(&req, false);
5819 		hci_req_add_le_passive_scan(&req);
5820 
5821 		hci_req_run(&req, NULL);
5822 	}
5823 
5824 	hci_dev_unlock(hdev);
5825 
5826 	return err;
5827 }
5828 
5829 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
5830 				      u16 opcode)
5831 {
5832 	struct mgmt_pending_cmd *cmd;
5833 
5834 	bt_dev_dbg(hdev, "status 0x%02x", status);
5835 
5836 	hci_dev_lock(hdev);
5837 
5838 	cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5839 	if (!cmd)
5840 		goto unlock;
5841 
5842 	if (status) {
5843 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5844 			        mgmt_status(status));
5845 	} else {
5846 		struct mgmt_mode *cp = cmd->param;
5847 
5848 		if (cp->val)
5849 			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
5850 		else
5851 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5852 
5853 		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5854 		new_settings(hdev, cmd->sk);
5855 	}
5856 
5857 	mgmt_pending_remove(cmd);
5858 
5859 unlock:
5860 	hci_dev_unlock(hdev);
5861 }
5862 
5863 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
5864 				void *data, u16 len)
5865 {
5866 	struct mgmt_mode *cp = data;
5867 	struct mgmt_pending_cmd *cmd;
5868 	struct hci_request req;
5869 	int err;
5870 
5871 	bt_dev_dbg(hdev, "sock %p", sk);
5872 
5873 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
5874 	    hdev->hci_ver < BLUETOOTH_VER_1_2)
5875 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5876 				       MGMT_STATUS_NOT_SUPPORTED);
5877 
5878 	if (cp->val != 0x00 && cp->val != 0x01)
5879 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5880 				       MGMT_STATUS_INVALID_PARAMS);
5881 
5882 	hci_dev_lock(hdev);
5883 
5884 	if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
5885 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5886 				      MGMT_STATUS_BUSY);
5887 		goto unlock;
5888 	}
5889 
5890 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
5891 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5892 					hdev);
5893 		goto unlock;
5894 	}
5895 
5896 	if (!hdev_is_powered(hdev)) {
5897 		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
5898 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5899 					hdev);
5900 		new_settings(hdev, sk);
5901 		goto unlock;
5902 	}
5903 
5904 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
5905 			       data, len);
5906 	if (!cmd) {
5907 		err = -ENOMEM;
5908 		goto unlock;
5909 	}
5910 
5911 	hci_req_init(&req, hdev);
5912 
5913 	__hci_req_write_fast_connectable(&req, cp->val);
5914 
5915 	err = hci_req_run(&req, fast_connectable_complete);
5916 	if (err < 0) {
5917 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5918 				      MGMT_STATUS_FAILED);
5919 		mgmt_pending_remove(cmd);
5920 	}
5921 
5922 unlock:
5923 	hci_dev_unlock(hdev);
5924 
5925 	return err;
5926 }
5927 
5928 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5929 {
5930 	struct mgmt_pending_cmd *cmd;
5931 
5932 	bt_dev_dbg(hdev, "status 0x%02x", status);
5933 
5934 	hci_dev_lock(hdev);
5935 
5936 	cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
5937 	if (!cmd)
5938 		goto unlock;
5939 
5940 	if (status) {
5941 		u8 mgmt_err = mgmt_status(status);
5942 
5943 		/* We need to restore the flag if related HCI commands
5944 		 * failed.
5945 		 */
5946 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
5947 
5948 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
5949 	} else {
5950 		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
5951 		new_settings(hdev, cmd->sk);
5952 	}
5953 
5954 	mgmt_pending_remove(cmd);
5955 
5956 unlock:
5957 	hci_dev_unlock(hdev);
5958 }
5959 
5960 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
5961 {
5962 	struct mgmt_mode *cp = data;
5963 	struct mgmt_pending_cmd *cmd;
5964 	struct hci_request req;
5965 	int err;
5966 
5967 	bt_dev_dbg(hdev, "sock %p", sk);
5968 
5969 	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
5970 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5971 				       MGMT_STATUS_NOT_SUPPORTED);
5972 
5973 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5974 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5975 				       MGMT_STATUS_REJECTED);
5976 
5977 	if (cp->val != 0x00 && cp->val != 0x01)
5978 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5979 				       MGMT_STATUS_INVALID_PARAMS);
5980 
5981 	hci_dev_lock(hdev);
5982 
5983 	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5984 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5985 		goto unlock;
5986 	}
5987 
5988 	if (!hdev_is_powered(hdev)) {
5989 		if (!cp->val) {
5990 			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
5991 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
5992 			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
5993 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
5994 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
5995 		}
5996 
5997 		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
5998 
5999 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6000 		if (err < 0)
6001 			goto unlock;
6002 
6003 		err = new_settings(hdev, sk);
6004 		goto unlock;
6005 	}
6006 
6007 	/* Reject disabling when powered on */
6008 	if (!cp->val) {
6009 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6010 				      MGMT_STATUS_REJECTED);
6011 		goto unlock;
6012 	} else {
6013 		/* When configuring a dual-mode controller to operate
6014 		 * with LE only and using a static address, then switching
6015 		 * BR/EDR back on is not allowed.
6016 		 *
6017 		 * Dual-mode controllers shall operate with the public
6018 		 * address as its identity address for BR/EDR and LE. So
6019 		 * reject the attempt to create an invalid configuration.
6020 		 *
6021 		 * The same restrictions applies when secure connections
6022 		 * has been enabled. For BR/EDR this is a controller feature
6023 		 * while for LE it is a host stack feature. This means that
6024 		 * switching BR/EDR back on when secure connections has been
6025 		 * enabled is not a supported transaction.
6026 		 */
6027 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6028 		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6029 		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6030 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6031 					      MGMT_STATUS_REJECTED);
6032 			goto unlock;
6033 		}
6034 	}
6035 
6036 	if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
6037 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6038 				      MGMT_STATUS_BUSY);
6039 		goto unlock;
6040 	}
6041 
6042 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6043 	if (!cmd) {
6044 		err = -ENOMEM;
6045 		goto unlock;
6046 	}
6047 
6048 	/* We need to flip the bit already here so that
6049 	 * hci_req_update_adv_data generates the correct flags.
6050 	 */
6051 	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6052 
6053 	hci_req_init(&req, hdev);
6054 
6055 	__hci_req_write_fast_connectable(&req, false);
6056 	__hci_req_update_scan(&req);
6057 
6058 	/* Since only the advertising data flags will change, there
6059 	 * is no need to update the scan response data.
6060 	 */
6061 	__hci_req_update_adv_data(&req, hdev->cur_adv_instance);
6062 
6063 	err = hci_req_run(&req, set_bredr_complete);
6064 	if (err < 0)
6065 		mgmt_pending_remove(cmd);
6066 
6067 unlock:
6068 	hci_dev_unlock(hdev);
6069 	return err;
6070 }
6071 
6072 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6073 {
6074 	struct mgmt_pending_cmd *cmd;
6075 	struct mgmt_mode *cp;
6076 
6077 	bt_dev_dbg(hdev, "status %u", status);
6078 
6079 	hci_dev_lock(hdev);
6080 
6081 	cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
6082 	if (!cmd)
6083 		goto unlock;
6084 
6085 	if (status) {
6086 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
6087 			        mgmt_status(status));
6088 		goto remove;
6089 	}
6090 
6091 	cp = cmd->param;
6092 
6093 	switch (cp->val) {
6094 	case 0x00:
6095 		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6096 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6097 		break;
6098 	case 0x01:
6099 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6100 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6101 		break;
6102 	case 0x02:
6103 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6104 		hci_dev_set_flag(hdev, HCI_SC_ONLY);
6105 		break;
6106 	}
6107 
6108 	send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
6109 	new_settings(hdev, cmd->sk);
6110 
6111 remove:
6112 	mgmt_pending_remove(cmd);
6113 unlock:
6114 	hci_dev_unlock(hdev);
6115 }
6116 
6117 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6118 			   void *data, u16 len)
6119 {
6120 	struct mgmt_mode *cp = data;
6121 	struct mgmt_pending_cmd *cmd;
6122 	struct hci_request req;
6123 	u8 val;
6124 	int err;
6125 
6126 	bt_dev_dbg(hdev, "sock %p", sk);
6127 
6128 	if (!lmp_sc_capable(hdev) &&
6129 	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6130 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6131 				       MGMT_STATUS_NOT_SUPPORTED);
6132 
6133 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6134 	    lmp_sc_capable(hdev) &&
6135 	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6136 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6137 				       MGMT_STATUS_REJECTED);
6138 
6139 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6140 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6141 				  MGMT_STATUS_INVALID_PARAMS);
6142 
6143 	hci_dev_lock(hdev);
6144 
6145 	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6146 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6147 		bool changed;
6148 
6149 		if (cp->val) {
6150 			changed = !hci_dev_test_and_set_flag(hdev,
6151 							     HCI_SC_ENABLED);
6152 			if (cp->val == 0x02)
6153 				hci_dev_set_flag(hdev, HCI_SC_ONLY);
6154 			else
6155 				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6156 		} else {
6157 			changed = hci_dev_test_and_clear_flag(hdev,
6158 							      HCI_SC_ENABLED);
6159 			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6160 		}
6161 
6162 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6163 		if (err < 0)
6164 			goto failed;
6165 
6166 		if (changed)
6167 			err = new_settings(hdev, sk);
6168 
6169 		goto failed;
6170 	}
6171 
6172 	if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
6173 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6174 				      MGMT_STATUS_BUSY);
6175 		goto failed;
6176 	}
6177 
6178 	val = !!cp->val;
6179 
6180 	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6181 	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6182 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6183 		goto failed;
6184 	}
6185 
6186 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6187 	if (!cmd) {
6188 		err = -ENOMEM;
6189 		goto failed;
6190 	}
6191 
6192 	hci_req_init(&req, hdev);
6193 	hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
6194 	err = hci_req_run(&req, sc_enable_complete);
6195 	if (err < 0) {
6196 		mgmt_pending_remove(cmd);
6197 		goto failed;
6198 	}
6199 
6200 failed:
6201 	hci_dev_unlock(hdev);
6202 	return err;
6203 }
6204 
6205 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6206 			  void *data, u16 len)
6207 {
6208 	struct mgmt_mode *cp = data;
6209 	bool changed, use_changed;
6210 	int err;
6211 
6212 	bt_dev_dbg(hdev, "sock %p", sk);
6213 
6214 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6215 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6216 				       MGMT_STATUS_INVALID_PARAMS);
6217 
6218 	hci_dev_lock(hdev);
6219 
6220 	if (cp->val)
6221 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6222 	else
6223 		changed = hci_dev_test_and_clear_flag(hdev,
6224 						      HCI_KEEP_DEBUG_KEYS);
6225 
6226 	if (cp->val == 0x02)
6227 		use_changed = !hci_dev_test_and_set_flag(hdev,
6228 							 HCI_USE_DEBUG_KEYS);
6229 	else
6230 		use_changed = hci_dev_test_and_clear_flag(hdev,
6231 							  HCI_USE_DEBUG_KEYS);
6232 
6233 	if (hdev_is_powered(hdev) && use_changed &&
6234 	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6235 		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6236 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6237 			     sizeof(mode), &mode);
6238 	}
6239 
6240 	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6241 	if (err < 0)
6242 		goto unlock;
6243 
6244 	if (changed)
6245 		err = new_settings(hdev, sk);
6246 
6247 unlock:
6248 	hci_dev_unlock(hdev);
6249 	return err;
6250 }
6251 
6252 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6253 		       u16 len)
6254 {
6255 	struct mgmt_cp_set_privacy *cp = cp_data;
6256 	bool changed;
6257 	int err;
6258 
6259 	bt_dev_dbg(hdev, "sock %p", sk);
6260 
6261 	if (!lmp_le_capable(hdev))
6262 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6263 				       MGMT_STATUS_NOT_SUPPORTED);
6264 
6265 	if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6266 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6267 				       MGMT_STATUS_INVALID_PARAMS);
6268 
6269 	if (hdev_is_powered(hdev))
6270 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6271 				       MGMT_STATUS_REJECTED);
6272 
6273 	hci_dev_lock(hdev);
6274 
6275 	/* If user space supports this command it is also expected to
6276 	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6277 	 */
6278 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6279 
6280 	if (cp->privacy) {
6281 		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6282 		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6283 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6284 		hci_adv_instances_set_rpa_expired(hdev, true);
6285 		if (cp->privacy == 0x02)
6286 			hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6287 		else
6288 			hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6289 	} else {
6290 		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6291 		memset(hdev->irk, 0, sizeof(hdev->irk));
6292 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6293 		hci_adv_instances_set_rpa_expired(hdev, false);
6294 		hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6295 	}
6296 
6297 	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
6298 	if (err < 0)
6299 		goto unlock;
6300 
6301 	if (changed)
6302 		err = new_settings(hdev, sk);
6303 
6304 unlock:
6305 	hci_dev_unlock(hdev);
6306 	return err;
6307 }
6308 
6309 static bool irk_is_valid(struct mgmt_irk_info *irk)
6310 {
6311 	switch (irk->addr.type) {
6312 	case BDADDR_LE_PUBLIC:
6313 		return true;
6314 
6315 	case BDADDR_LE_RANDOM:
6316 		/* Two most significant bits shall be set */
6317 		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6318 			return false;
6319 		return true;
6320 	}
6321 
6322 	return false;
6323 }
6324 
6325 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6326 		     u16 len)
6327 {
6328 	struct mgmt_cp_load_irks *cp = cp_data;
6329 	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
6330 				   sizeof(struct mgmt_irk_info));
6331 	u16 irk_count, expected_len;
6332 	int i, err;
6333 
6334 	bt_dev_dbg(hdev, "sock %p", sk);
6335 
6336 	if (!lmp_le_capable(hdev))
6337 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6338 				       MGMT_STATUS_NOT_SUPPORTED);
6339 
6340 	irk_count = __le16_to_cpu(cp->irk_count);
6341 	if (irk_count > max_irk_count) {
6342 		bt_dev_err(hdev, "load_irks: too big irk_count value %u",
6343 			   irk_count);
6344 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6345 				       MGMT_STATUS_INVALID_PARAMS);
6346 	}
6347 
6348 	expected_len = struct_size(cp, irks, irk_count);
6349 	if (expected_len != len) {
6350 		bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
6351 			   expected_len, len);
6352 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6353 				       MGMT_STATUS_INVALID_PARAMS);
6354 	}
6355 
6356 	bt_dev_dbg(hdev, "irk_count %u", irk_count);
6357 
6358 	for (i = 0; i < irk_count; i++) {
6359 		struct mgmt_irk_info *key = &cp->irks[i];
6360 
6361 		if (!irk_is_valid(key))
6362 			return mgmt_cmd_status(sk, hdev->id,
6363 					       MGMT_OP_LOAD_IRKS,
6364 					       MGMT_STATUS_INVALID_PARAMS);
6365 	}
6366 
6367 	hci_dev_lock(hdev);
6368 
6369 	hci_smp_irks_clear(hdev);
6370 
6371 	for (i = 0; i < irk_count; i++) {
6372 		struct mgmt_irk_info *irk = &cp->irks[i];
6373 
6374 		if (hci_is_blocked_key(hdev,
6375 				       HCI_BLOCKED_KEY_TYPE_IRK,
6376 				       irk->val)) {
6377 			bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
6378 				    &irk->addr.bdaddr);
6379 			continue;
6380 		}
6381 
6382 		hci_add_irk(hdev, &irk->addr.bdaddr,
6383 			    le_addr_type(irk->addr.type), irk->val,
6384 			    BDADDR_ANY);
6385 	}
6386 
6387 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6388 
6389 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
6390 
6391 	hci_dev_unlock(hdev);
6392 
6393 	return err;
6394 }
6395 
6396 static bool ltk_is_valid(struct mgmt_ltk_info *key)
6397 {
6398 	if (key->initiator != 0x00 && key->initiator != 0x01)
6399 		return false;
6400 
6401 	switch (key->addr.type) {
6402 	case BDADDR_LE_PUBLIC:
6403 		return true;
6404 
6405 	case BDADDR_LE_RANDOM:
6406 		/* Two most significant bits shall be set */
6407 		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6408 			return false;
6409 		return true;
6410 	}
6411 
6412 	return false;
6413 }
6414 
6415 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
6416 			       void *cp_data, u16 len)
6417 {
6418 	struct mgmt_cp_load_long_term_keys *cp = cp_data;
6419 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
6420 				   sizeof(struct mgmt_ltk_info));
6421 	u16 key_count, expected_len;
6422 	int i, err;
6423 
6424 	bt_dev_dbg(hdev, "sock %p", sk);
6425 
6426 	if (!lmp_le_capable(hdev))
6427 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6428 				       MGMT_STATUS_NOT_SUPPORTED);
6429 
6430 	key_count = __le16_to_cpu(cp->key_count);
6431 	if (key_count > max_key_count) {
6432 		bt_dev_err(hdev, "load_ltks: too big key_count value %u",
6433 			   key_count);
6434 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6435 				       MGMT_STATUS_INVALID_PARAMS);
6436 	}
6437 
6438 	expected_len = struct_size(cp, keys, key_count);
6439 	if (expected_len != len) {
6440 		bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
6441 			   expected_len, len);
6442 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6443 				       MGMT_STATUS_INVALID_PARAMS);
6444 	}
6445 
6446 	bt_dev_dbg(hdev, "key_count %u", key_count);
6447 
6448 	for (i = 0; i < key_count; i++) {
6449 		struct mgmt_ltk_info *key = &cp->keys[i];
6450 
6451 		if (!ltk_is_valid(key))
6452 			return mgmt_cmd_status(sk, hdev->id,
6453 					       MGMT_OP_LOAD_LONG_TERM_KEYS,
6454 					       MGMT_STATUS_INVALID_PARAMS);
6455 	}
6456 
6457 	hci_dev_lock(hdev);
6458 
6459 	hci_smp_ltks_clear(hdev);
6460 
6461 	for (i = 0; i < key_count; i++) {
6462 		struct mgmt_ltk_info *key = &cp->keys[i];
6463 		u8 type, authenticated;
6464 
6465 		if (hci_is_blocked_key(hdev,
6466 				       HCI_BLOCKED_KEY_TYPE_LTK,
6467 				       key->val)) {
6468 			bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
6469 				    &key->addr.bdaddr);
6470 			continue;
6471 		}
6472 
6473 		switch (key->type) {
6474 		case MGMT_LTK_UNAUTHENTICATED:
6475 			authenticated = 0x00;
6476 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
6477 			break;
6478 		case MGMT_LTK_AUTHENTICATED:
6479 			authenticated = 0x01;
6480 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
6481 			break;
6482 		case MGMT_LTK_P256_UNAUTH:
6483 			authenticated = 0x00;
6484 			type = SMP_LTK_P256;
6485 			break;
6486 		case MGMT_LTK_P256_AUTH:
6487 			authenticated = 0x01;
6488 			type = SMP_LTK_P256;
6489 			break;
6490 		case MGMT_LTK_P256_DEBUG:
6491 			authenticated = 0x00;
6492 			type = SMP_LTK_P256_DEBUG;
6493 			fallthrough;
6494 		default:
6495 			continue;
6496 		}
6497 
6498 		hci_add_ltk(hdev, &key->addr.bdaddr,
6499 			    le_addr_type(key->addr.type), type, authenticated,
6500 			    key->val, key->enc_size, key->ediv, key->rand);
6501 	}
6502 
6503 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
6504 			   NULL, 0);
6505 
6506 	hci_dev_unlock(hdev);
6507 
6508 	return err;
6509 }
6510 
6511 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
6512 {
6513 	struct hci_conn *conn = cmd->user_data;
6514 	struct mgmt_rp_get_conn_info rp;
6515 	int err;
6516 
6517 	memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
6518 
6519 	if (status == MGMT_STATUS_SUCCESS) {
6520 		rp.rssi = conn->rssi;
6521 		rp.tx_power = conn->tx_power;
6522 		rp.max_tx_power = conn->max_tx_power;
6523 	} else {
6524 		rp.rssi = HCI_RSSI_INVALID;
6525 		rp.tx_power = HCI_TX_POWER_INVALID;
6526 		rp.max_tx_power = HCI_TX_POWER_INVALID;
6527 	}
6528 
6529 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
6530 				status, &rp, sizeof(rp));
6531 
6532 	hci_conn_drop(conn);
6533 	hci_conn_put(conn);
6534 
6535 	return err;
6536 }
6537 
6538 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
6539 				       u16 opcode)
6540 {
6541 	struct hci_cp_read_rssi *cp;
6542 	struct mgmt_pending_cmd *cmd;
6543 	struct hci_conn *conn;
6544 	u16 handle;
6545 	u8 status;
6546 
6547 	bt_dev_dbg(hdev, "status 0x%02x", hci_status);
6548 
6549 	hci_dev_lock(hdev);
6550 
6551 	/* Commands sent in request are either Read RSSI or Read Transmit Power
6552 	 * Level so we check which one was last sent to retrieve connection
6553 	 * handle.  Both commands have handle as first parameter so it's safe to
6554 	 * cast data on the same command struct.
6555 	 *
6556 	 * First command sent is always Read RSSI and we fail only if it fails.
6557 	 * In other case we simply override error to indicate success as we
6558 	 * already remembered if TX power value is actually valid.
6559 	 */
6560 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
6561 	if (!cp) {
6562 		cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
6563 		status = MGMT_STATUS_SUCCESS;
6564 	} else {
6565 		status = mgmt_status(hci_status);
6566 	}
6567 
6568 	if (!cp) {
6569 		bt_dev_err(hdev, "invalid sent_cmd in conn_info response");
6570 		goto unlock;
6571 	}
6572 
6573 	handle = __le16_to_cpu(cp->handle);
6574 	conn = hci_conn_hash_lookup_handle(hdev, handle);
6575 	if (!conn) {
6576 		bt_dev_err(hdev, "unknown handle (%u) in conn_info response",
6577 			   handle);
6578 		goto unlock;
6579 	}
6580 
6581 	cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
6582 	if (!cmd)
6583 		goto unlock;
6584 
6585 	cmd->cmd_complete(cmd, status);
6586 	mgmt_pending_remove(cmd);
6587 
6588 unlock:
6589 	hci_dev_unlock(hdev);
6590 }
6591 
6592 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
6593 			 u16 len)
6594 {
6595 	struct mgmt_cp_get_conn_info *cp = data;
6596 	struct mgmt_rp_get_conn_info rp;
6597 	struct hci_conn *conn;
6598 	unsigned long conn_info_age;
6599 	int err = 0;
6600 
6601 	bt_dev_dbg(hdev, "sock %p", sk);
6602 
6603 	memset(&rp, 0, sizeof(rp));
6604 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6605 	rp.addr.type = cp->addr.type;
6606 
6607 	if (!bdaddr_type_is_valid(cp->addr.type))
6608 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6609 					 MGMT_STATUS_INVALID_PARAMS,
6610 					 &rp, sizeof(rp));
6611 
6612 	hci_dev_lock(hdev);
6613 
6614 	if (!hdev_is_powered(hdev)) {
6615 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6616 					MGMT_STATUS_NOT_POWERED, &rp,
6617 					sizeof(rp));
6618 		goto unlock;
6619 	}
6620 
6621 	if (cp->addr.type == BDADDR_BREDR)
6622 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6623 					       &cp->addr.bdaddr);
6624 	else
6625 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6626 
6627 	if (!conn || conn->state != BT_CONNECTED) {
6628 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6629 					MGMT_STATUS_NOT_CONNECTED, &rp,
6630 					sizeof(rp));
6631 		goto unlock;
6632 	}
6633 
6634 	if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
6635 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6636 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
6637 		goto unlock;
6638 	}
6639 
6640 	/* To avoid client trying to guess when to poll again for information we
6641 	 * calculate conn info age as random value between min/max set in hdev.
6642 	 */
6643 	conn_info_age = hdev->conn_info_min_age +
6644 			prandom_u32_max(hdev->conn_info_max_age -
6645 					hdev->conn_info_min_age);
6646 
6647 	/* Query controller to refresh cached values if they are too old or were
6648 	 * never read.
6649 	 */
6650 	if (time_after(jiffies, conn->conn_info_timestamp +
6651 		       msecs_to_jiffies(conn_info_age)) ||
6652 	    !conn->conn_info_timestamp) {
6653 		struct hci_request req;
6654 		struct hci_cp_read_tx_power req_txp_cp;
6655 		struct hci_cp_read_rssi req_rssi_cp;
6656 		struct mgmt_pending_cmd *cmd;
6657 
6658 		hci_req_init(&req, hdev);
6659 		req_rssi_cp.handle = cpu_to_le16(conn->handle);
6660 		hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
6661 			    &req_rssi_cp);
6662 
6663 		/* For LE links TX power does not change thus we don't need to
6664 		 * query for it once value is known.
6665 		 */
6666 		if (!bdaddr_type_is_le(cp->addr.type) ||
6667 		    conn->tx_power == HCI_TX_POWER_INVALID) {
6668 			req_txp_cp.handle = cpu_to_le16(conn->handle);
6669 			req_txp_cp.type = 0x00;
6670 			hci_req_add(&req, HCI_OP_READ_TX_POWER,
6671 				    sizeof(req_txp_cp), &req_txp_cp);
6672 		}
6673 
6674 		/* Max TX power needs to be read only once per connection */
6675 		if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
6676 			req_txp_cp.handle = cpu_to_le16(conn->handle);
6677 			req_txp_cp.type = 0x01;
6678 			hci_req_add(&req, HCI_OP_READ_TX_POWER,
6679 				    sizeof(req_txp_cp), &req_txp_cp);
6680 		}
6681 
6682 		err = hci_req_run(&req, conn_info_refresh_complete);
6683 		if (err < 0)
6684 			goto unlock;
6685 
6686 		cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
6687 				       data, len);
6688 		if (!cmd) {
6689 			err = -ENOMEM;
6690 			goto unlock;
6691 		}
6692 
6693 		hci_conn_hold(conn);
6694 		cmd->user_data = hci_conn_get(conn);
6695 		cmd->cmd_complete = conn_info_cmd_complete;
6696 
6697 		conn->conn_info_timestamp = jiffies;
6698 	} else {
6699 		/* Cache is valid, just reply with values cached in hci_conn */
6700 		rp.rssi = conn->rssi;
6701 		rp.tx_power = conn->tx_power;
6702 		rp.max_tx_power = conn->max_tx_power;
6703 
6704 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6705 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6706 	}
6707 
6708 unlock:
6709 	hci_dev_unlock(hdev);
6710 	return err;
6711 }
6712 
6713 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
6714 {
6715 	struct hci_conn *conn = cmd->user_data;
6716 	struct mgmt_rp_get_clock_info rp;
6717 	struct hci_dev *hdev;
6718 	int err;
6719 
6720 	memset(&rp, 0, sizeof(rp));
6721 	memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
6722 
6723 	if (status)
6724 		goto complete;
6725 
6726 	hdev = hci_dev_get(cmd->index);
6727 	if (hdev) {
6728 		rp.local_clock = cpu_to_le32(hdev->clock);
6729 		hci_dev_put(hdev);
6730 	}
6731 
6732 	if (conn) {
6733 		rp.piconet_clock = cpu_to_le32(conn->clock);
6734 		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
6735 	}
6736 
6737 complete:
6738 	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
6739 				sizeof(rp));
6740 
6741 	if (conn) {
6742 		hci_conn_drop(conn);
6743 		hci_conn_put(conn);
6744 	}
6745 
6746 	return err;
6747 }
6748 
6749 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6750 {
6751 	struct hci_cp_read_clock *hci_cp;
6752 	struct mgmt_pending_cmd *cmd;
6753 	struct hci_conn *conn;
6754 
6755 	bt_dev_dbg(hdev, "status %u", status);
6756 
6757 	hci_dev_lock(hdev);
6758 
6759 	hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
6760 	if (!hci_cp)
6761 		goto unlock;
6762 
6763 	if (hci_cp->which) {
6764 		u16 handle = __le16_to_cpu(hci_cp->handle);
6765 		conn = hci_conn_hash_lookup_handle(hdev, handle);
6766 	} else {
6767 		conn = NULL;
6768 	}
6769 
6770 	cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
6771 	if (!cmd)
6772 		goto unlock;
6773 
6774 	cmd->cmd_complete(cmd, mgmt_status(status));
6775 	mgmt_pending_remove(cmd);
6776 
6777 unlock:
6778 	hci_dev_unlock(hdev);
6779 }
6780 
6781 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
6782 			 u16 len)
6783 {
6784 	struct mgmt_cp_get_clock_info *cp = data;
6785 	struct mgmt_rp_get_clock_info rp;
6786 	struct hci_cp_read_clock hci_cp;
6787 	struct mgmt_pending_cmd *cmd;
6788 	struct hci_request req;
6789 	struct hci_conn *conn;
6790 	int err;
6791 
6792 	bt_dev_dbg(hdev, "sock %p", sk);
6793 
6794 	memset(&rp, 0, sizeof(rp));
6795 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6796 	rp.addr.type = cp->addr.type;
6797 
6798 	if (cp->addr.type != BDADDR_BREDR)
6799 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6800 					 MGMT_STATUS_INVALID_PARAMS,
6801 					 &rp, sizeof(rp));
6802 
6803 	hci_dev_lock(hdev);
6804 
6805 	if (!hdev_is_powered(hdev)) {
6806 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6807 					MGMT_STATUS_NOT_POWERED, &rp,
6808 					sizeof(rp));
6809 		goto unlock;
6810 	}
6811 
6812 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6813 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6814 					       &cp->addr.bdaddr);
6815 		if (!conn || conn->state != BT_CONNECTED) {
6816 			err = mgmt_cmd_complete(sk, hdev->id,
6817 						MGMT_OP_GET_CLOCK_INFO,
6818 						MGMT_STATUS_NOT_CONNECTED,
6819 						&rp, sizeof(rp));
6820 			goto unlock;
6821 		}
6822 	} else {
6823 		conn = NULL;
6824 	}
6825 
6826 	cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
6827 	if (!cmd) {
6828 		err = -ENOMEM;
6829 		goto unlock;
6830 	}
6831 
6832 	cmd->cmd_complete = clock_info_cmd_complete;
6833 
6834 	hci_req_init(&req, hdev);
6835 
6836 	memset(&hci_cp, 0, sizeof(hci_cp));
6837 	hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
6838 
6839 	if (conn) {
6840 		hci_conn_hold(conn);
6841 		cmd->user_data = hci_conn_get(conn);
6842 
6843 		hci_cp.handle = cpu_to_le16(conn->handle);
6844 		hci_cp.which = 0x01; /* Piconet clock */
6845 		hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
6846 	}
6847 
6848 	err = hci_req_run(&req, get_clock_info_complete);
6849 	if (err < 0)
6850 		mgmt_pending_remove(cmd);
6851 
6852 unlock:
6853 	hci_dev_unlock(hdev);
6854 	return err;
6855 }
6856 
6857 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
6858 {
6859 	struct hci_conn *conn;
6860 
6861 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
6862 	if (!conn)
6863 		return false;
6864 
6865 	if (conn->dst_type != type)
6866 		return false;
6867 
6868 	if (conn->state != BT_CONNECTED)
6869 		return false;
6870 
6871 	return true;
6872 }
6873 
6874 /* This function requires the caller holds hdev->lock */
6875 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
6876 			       u8 addr_type, u8 auto_connect)
6877 {
6878 	struct hci_conn_params *params;
6879 
6880 	params = hci_conn_params_add(hdev, addr, addr_type);
6881 	if (!params)
6882 		return -EIO;
6883 
6884 	if (params->auto_connect == auto_connect)
6885 		return 0;
6886 
6887 	list_del_init(&params->action);
6888 
6889 	switch (auto_connect) {
6890 	case HCI_AUTO_CONN_DISABLED:
6891 	case HCI_AUTO_CONN_LINK_LOSS:
6892 		/* If auto connect is being disabled when we're trying to
6893 		 * connect to device, keep connecting.
6894 		 */
6895 		if (params->explicit_connect)
6896 			list_add(&params->action, &hdev->pend_le_conns);
6897 		break;
6898 	case HCI_AUTO_CONN_REPORT:
6899 		if (params->explicit_connect)
6900 			list_add(&params->action, &hdev->pend_le_conns);
6901 		else
6902 			list_add(&params->action, &hdev->pend_le_reports);
6903 		break;
6904 	case HCI_AUTO_CONN_DIRECT:
6905 	case HCI_AUTO_CONN_ALWAYS:
6906 		if (!is_connected(hdev, addr, addr_type))
6907 			list_add(&params->action, &hdev->pend_le_conns);
6908 		break;
6909 	}
6910 
6911 	params->auto_connect = auto_connect;
6912 
6913 	bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
6914 		   addr, addr_type, auto_connect);
6915 
6916 	return 0;
6917 }
6918 
6919 static void device_added(struct sock *sk, struct hci_dev *hdev,
6920 			 bdaddr_t *bdaddr, u8 type, u8 action)
6921 {
6922 	struct mgmt_ev_device_added ev;
6923 
6924 	bacpy(&ev.addr.bdaddr, bdaddr);
6925 	ev.addr.type = type;
6926 	ev.action = action;
6927 
6928 	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
6929 }
6930 
6931 static int add_device(struct sock *sk, struct hci_dev *hdev,
6932 		      void *data, u16 len)
6933 {
6934 	struct mgmt_cp_add_device *cp = data;
6935 	u8 auto_conn, addr_type;
6936 	struct hci_conn_params *params;
6937 	int err;
6938 	u32 current_flags = 0;
6939 
6940 	bt_dev_dbg(hdev, "sock %p", sk);
6941 
6942 	if (!bdaddr_type_is_valid(cp->addr.type) ||
6943 	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
6944 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6945 					 MGMT_STATUS_INVALID_PARAMS,
6946 					 &cp->addr, sizeof(cp->addr));
6947 
6948 	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
6949 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6950 					 MGMT_STATUS_INVALID_PARAMS,
6951 					 &cp->addr, sizeof(cp->addr));
6952 
6953 	hci_dev_lock(hdev);
6954 
6955 	if (cp->addr.type == BDADDR_BREDR) {
6956 		/* Only incoming connections action is supported for now */
6957 		if (cp->action != 0x01) {
6958 			err = mgmt_cmd_complete(sk, hdev->id,
6959 						MGMT_OP_ADD_DEVICE,
6960 						MGMT_STATUS_INVALID_PARAMS,
6961 						&cp->addr, sizeof(cp->addr));
6962 			goto unlock;
6963 		}
6964 
6965 		err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
6966 						     &cp->addr.bdaddr,
6967 						     cp->addr.type, 0);
6968 		if (err)
6969 			goto unlock;
6970 
6971 		hci_req_update_scan(hdev);
6972 
6973 		goto added;
6974 	}
6975 
6976 	addr_type = le_addr_type(cp->addr.type);
6977 
6978 	if (cp->action == 0x02)
6979 		auto_conn = HCI_AUTO_CONN_ALWAYS;
6980 	else if (cp->action == 0x01)
6981 		auto_conn = HCI_AUTO_CONN_DIRECT;
6982 	else
6983 		auto_conn = HCI_AUTO_CONN_REPORT;
6984 
6985 	/* Kernel internally uses conn_params with resolvable private
6986 	 * address, but Add Device allows only identity addresses.
6987 	 * Make sure it is enforced before calling
6988 	 * hci_conn_params_lookup.
6989 	 */
6990 	if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
6991 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
6992 					MGMT_STATUS_INVALID_PARAMS,
6993 					&cp->addr, sizeof(cp->addr));
6994 		goto unlock;
6995 	}
6996 
6997 	/* If the connection parameters don't exist for this device,
6998 	 * they will be created and configured with defaults.
6999 	 */
7000 	if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7001 				auto_conn) < 0) {
7002 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7003 					MGMT_STATUS_FAILED, &cp->addr,
7004 					sizeof(cp->addr));
7005 		goto unlock;
7006 	} else {
7007 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7008 						addr_type);
7009 		if (params)
7010 			current_flags = params->current_flags;
7011 	}
7012 
7013 	hci_update_background_scan(hdev);
7014 
7015 added:
7016 	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7017 	device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7018 			     SUPPORTED_DEVICE_FLAGS(), current_flags);
7019 
7020 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7021 				MGMT_STATUS_SUCCESS, &cp->addr,
7022 				sizeof(cp->addr));
7023 
7024 unlock:
7025 	hci_dev_unlock(hdev);
7026 	return err;
7027 }
7028 
7029 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7030 			   bdaddr_t *bdaddr, u8 type)
7031 {
7032 	struct mgmt_ev_device_removed ev;
7033 
7034 	bacpy(&ev.addr.bdaddr, bdaddr);
7035 	ev.addr.type = type;
7036 
7037 	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7038 }
7039 
7040 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7041 			 void *data, u16 len)
7042 {
7043 	struct mgmt_cp_remove_device *cp = data;
7044 	int err;
7045 
7046 	bt_dev_dbg(hdev, "sock %p", sk);
7047 
7048 	hci_dev_lock(hdev);
7049 
7050 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7051 		struct hci_conn_params *params;
7052 		u8 addr_type;
7053 
7054 		if (!bdaddr_type_is_valid(cp->addr.type)) {
7055 			err = mgmt_cmd_complete(sk, hdev->id,
7056 						MGMT_OP_REMOVE_DEVICE,
7057 						MGMT_STATUS_INVALID_PARAMS,
7058 						&cp->addr, sizeof(cp->addr));
7059 			goto unlock;
7060 		}
7061 
7062 		if (cp->addr.type == BDADDR_BREDR) {
7063 			err = hci_bdaddr_list_del(&hdev->accept_list,
7064 						  &cp->addr.bdaddr,
7065 						  cp->addr.type);
7066 			if (err) {
7067 				err = mgmt_cmd_complete(sk, hdev->id,
7068 							MGMT_OP_REMOVE_DEVICE,
7069 							MGMT_STATUS_INVALID_PARAMS,
7070 							&cp->addr,
7071 							sizeof(cp->addr));
7072 				goto unlock;
7073 			}
7074 
7075 			hci_req_update_scan(hdev);
7076 
7077 			device_removed(sk, hdev, &cp->addr.bdaddr,
7078 				       cp->addr.type);
7079 			goto complete;
7080 		}
7081 
7082 		addr_type = le_addr_type(cp->addr.type);
7083 
7084 		/* Kernel internally uses conn_params with resolvable private
7085 		 * address, but Remove Device allows only identity addresses.
7086 		 * Make sure it is enforced before calling
7087 		 * hci_conn_params_lookup.
7088 		 */
7089 		if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7090 			err = mgmt_cmd_complete(sk, hdev->id,
7091 						MGMT_OP_REMOVE_DEVICE,
7092 						MGMT_STATUS_INVALID_PARAMS,
7093 						&cp->addr, sizeof(cp->addr));
7094 			goto unlock;
7095 		}
7096 
7097 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7098 						addr_type);
7099 		if (!params) {
7100 			err = mgmt_cmd_complete(sk, hdev->id,
7101 						MGMT_OP_REMOVE_DEVICE,
7102 						MGMT_STATUS_INVALID_PARAMS,
7103 						&cp->addr, sizeof(cp->addr));
7104 			goto unlock;
7105 		}
7106 
7107 		if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7108 		    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7109 			err = mgmt_cmd_complete(sk, hdev->id,
7110 						MGMT_OP_REMOVE_DEVICE,
7111 						MGMT_STATUS_INVALID_PARAMS,
7112 						&cp->addr, sizeof(cp->addr));
7113 			goto unlock;
7114 		}
7115 
7116 		list_del(&params->action);
7117 		list_del(&params->list);
7118 		kfree(params);
7119 		hci_update_background_scan(hdev);
7120 
7121 		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7122 	} else {
7123 		struct hci_conn_params *p, *tmp;
7124 		struct bdaddr_list *b, *btmp;
7125 
7126 		if (cp->addr.type) {
7127 			err = mgmt_cmd_complete(sk, hdev->id,
7128 						MGMT_OP_REMOVE_DEVICE,
7129 						MGMT_STATUS_INVALID_PARAMS,
7130 						&cp->addr, sizeof(cp->addr));
7131 			goto unlock;
7132 		}
7133 
7134 		list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7135 			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7136 			list_del(&b->list);
7137 			kfree(b);
7138 		}
7139 
7140 		hci_req_update_scan(hdev);
7141 
7142 		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7143 			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7144 				continue;
7145 			device_removed(sk, hdev, &p->addr, p->addr_type);
7146 			if (p->explicit_connect) {
7147 				p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7148 				continue;
7149 			}
7150 			list_del(&p->action);
7151 			list_del(&p->list);
7152 			kfree(p);
7153 		}
7154 
7155 		bt_dev_dbg(hdev, "All LE connection parameters were removed");
7156 
7157 		hci_update_background_scan(hdev);
7158 	}
7159 
7160 complete:
7161 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7162 				MGMT_STATUS_SUCCESS, &cp->addr,
7163 				sizeof(cp->addr));
7164 unlock:
7165 	hci_dev_unlock(hdev);
7166 	return err;
7167 }
7168 
7169 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7170 			   u16 len)
7171 {
7172 	struct mgmt_cp_load_conn_param *cp = data;
7173 	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7174 				     sizeof(struct mgmt_conn_param));
7175 	u16 param_count, expected_len;
7176 	int i;
7177 
7178 	if (!lmp_le_capable(hdev))
7179 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7180 				       MGMT_STATUS_NOT_SUPPORTED);
7181 
7182 	param_count = __le16_to_cpu(cp->param_count);
7183 	if (param_count > max_param_count) {
7184 		bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7185 			   param_count);
7186 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7187 				       MGMT_STATUS_INVALID_PARAMS);
7188 	}
7189 
7190 	expected_len = struct_size(cp, params, param_count);
7191 	if (expected_len != len) {
7192 		bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7193 			   expected_len, len);
7194 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7195 				       MGMT_STATUS_INVALID_PARAMS);
7196 	}
7197 
7198 	bt_dev_dbg(hdev, "param_count %u", param_count);
7199 
7200 	hci_dev_lock(hdev);
7201 
7202 	hci_conn_params_clear_disabled(hdev);
7203 
7204 	for (i = 0; i < param_count; i++) {
7205 		struct mgmt_conn_param *param = &cp->params[i];
7206 		struct hci_conn_params *hci_param;
7207 		u16 min, max, latency, timeout;
7208 		u8 addr_type;
7209 
7210 		bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
7211 			   param->addr.type);
7212 
7213 		if (param->addr.type == BDADDR_LE_PUBLIC) {
7214 			addr_type = ADDR_LE_DEV_PUBLIC;
7215 		} else if (param->addr.type == BDADDR_LE_RANDOM) {
7216 			addr_type = ADDR_LE_DEV_RANDOM;
7217 		} else {
7218 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7219 			continue;
7220 		}
7221 
7222 		min = le16_to_cpu(param->min_interval);
7223 		max = le16_to_cpu(param->max_interval);
7224 		latency = le16_to_cpu(param->latency);
7225 		timeout = le16_to_cpu(param->timeout);
7226 
7227 		bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7228 			   min, max, latency, timeout);
7229 
7230 		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7231 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7232 			continue;
7233 		}
7234 
7235 		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
7236 						addr_type);
7237 		if (!hci_param) {
7238 			bt_dev_err(hdev, "failed to add connection parameters");
7239 			continue;
7240 		}
7241 
7242 		hci_param->conn_min_interval = min;
7243 		hci_param->conn_max_interval = max;
7244 		hci_param->conn_latency = latency;
7245 		hci_param->supervision_timeout = timeout;
7246 	}
7247 
7248 	hci_dev_unlock(hdev);
7249 
7250 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7251 				 NULL, 0);
7252 }
7253 
7254 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7255 			       void *data, u16 len)
7256 {
7257 	struct mgmt_cp_set_external_config *cp = data;
7258 	bool changed;
7259 	int err;
7260 
7261 	bt_dev_dbg(hdev, "sock %p", sk);
7262 
7263 	if (hdev_is_powered(hdev))
7264 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7265 				       MGMT_STATUS_REJECTED);
7266 
7267 	if (cp->config != 0x00 && cp->config != 0x01)
7268 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7269 				         MGMT_STATUS_INVALID_PARAMS);
7270 
7271 	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7272 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7273 				       MGMT_STATUS_NOT_SUPPORTED);
7274 
7275 	hci_dev_lock(hdev);
7276 
7277 	if (cp->config)
7278 		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7279 	else
7280 		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7281 
7282 	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7283 	if (err < 0)
7284 		goto unlock;
7285 
7286 	if (!changed)
7287 		goto unlock;
7288 
7289 	err = new_options(hdev, sk);
7290 
7291 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7292 		mgmt_index_removed(hdev);
7293 
7294 		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7295 			hci_dev_set_flag(hdev, HCI_CONFIG);
7296 			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7297 
7298 			queue_work(hdev->req_workqueue, &hdev->power_on);
7299 		} else {
7300 			set_bit(HCI_RAW, &hdev->flags);
7301 			mgmt_index_added(hdev);
7302 		}
7303 	}
7304 
7305 unlock:
7306 	hci_dev_unlock(hdev);
7307 	return err;
7308 }
7309 
7310 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
7311 			      void *data, u16 len)
7312 {
7313 	struct mgmt_cp_set_public_address *cp = data;
7314 	bool changed;
7315 	int err;
7316 
7317 	bt_dev_dbg(hdev, "sock %p", sk);
7318 
7319 	if (hdev_is_powered(hdev))
7320 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7321 				       MGMT_STATUS_REJECTED);
7322 
7323 	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
7324 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7325 				       MGMT_STATUS_INVALID_PARAMS);
7326 
7327 	if (!hdev->set_bdaddr)
7328 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7329 				       MGMT_STATUS_NOT_SUPPORTED);
7330 
7331 	hci_dev_lock(hdev);
7332 
7333 	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
7334 	bacpy(&hdev->public_addr, &cp->bdaddr);
7335 
7336 	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
7337 	if (err < 0)
7338 		goto unlock;
7339 
7340 	if (!changed)
7341 		goto unlock;
7342 
7343 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
7344 		err = new_options(hdev, sk);
7345 
7346 	if (is_configured(hdev)) {
7347 		mgmt_index_removed(hdev);
7348 
7349 		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
7350 
7351 		hci_dev_set_flag(hdev, HCI_CONFIG);
7352 		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7353 
7354 		queue_work(hdev->req_workqueue, &hdev->power_on);
7355 	}
7356 
7357 unlock:
7358 	hci_dev_unlock(hdev);
7359 	return err;
7360 }
7361 
7362 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
7363 					     u16 opcode, struct sk_buff *skb)
7364 {
7365 	const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
7366 	struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
7367 	u8 *h192, *r192, *h256, *r256;
7368 	struct mgmt_pending_cmd *cmd;
7369 	u16 eir_len;
7370 	int err;
7371 
7372 	bt_dev_dbg(hdev, "status %u", status);
7373 
7374 	cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
7375 	if (!cmd)
7376 		return;
7377 
7378 	mgmt_cp = cmd->param;
7379 
7380 	if (status) {
7381 		status = mgmt_status(status);
7382 		eir_len = 0;
7383 
7384 		h192 = NULL;
7385 		r192 = NULL;
7386 		h256 = NULL;
7387 		r256 = NULL;
7388 	} else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
7389 		struct hci_rp_read_local_oob_data *rp;
7390 
7391 		if (skb->len != sizeof(*rp)) {
7392 			status = MGMT_STATUS_FAILED;
7393 			eir_len = 0;
7394 		} else {
7395 			status = MGMT_STATUS_SUCCESS;
7396 			rp = (void *)skb->data;
7397 
7398 			eir_len = 5 + 18 + 18;
7399 			h192 = rp->hash;
7400 			r192 = rp->rand;
7401 			h256 = NULL;
7402 			r256 = NULL;
7403 		}
7404 	} else {
7405 		struct hci_rp_read_local_oob_ext_data *rp;
7406 
7407 		if (skb->len != sizeof(*rp)) {
7408 			status = MGMT_STATUS_FAILED;
7409 			eir_len = 0;
7410 		} else {
7411 			status = MGMT_STATUS_SUCCESS;
7412 			rp = (void *)skb->data;
7413 
7414 			if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
7415 				eir_len = 5 + 18 + 18;
7416 				h192 = NULL;
7417 				r192 = NULL;
7418 			} else {
7419 				eir_len = 5 + 18 + 18 + 18 + 18;
7420 				h192 = rp->hash192;
7421 				r192 = rp->rand192;
7422 			}
7423 
7424 			h256 = rp->hash256;
7425 			r256 = rp->rand256;
7426 		}
7427 	}
7428 
7429 	mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
7430 	if (!mgmt_rp)
7431 		goto done;
7432 
7433 	if (eir_len == 0)
7434 		goto send_rsp;
7435 
7436 	eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
7437 				  hdev->dev_class, 3);
7438 
7439 	if (h192 && r192) {
7440 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7441 					  EIR_SSP_HASH_C192, h192, 16);
7442 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7443 					  EIR_SSP_RAND_R192, r192, 16);
7444 	}
7445 
7446 	if (h256 && r256) {
7447 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7448 					  EIR_SSP_HASH_C256, h256, 16);
7449 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7450 					  EIR_SSP_RAND_R256, r256, 16);
7451 	}
7452 
7453 send_rsp:
7454 	mgmt_rp->type = mgmt_cp->type;
7455 	mgmt_rp->eir_len = cpu_to_le16(eir_len);
7456 
7457 	err = mgmt_cmd_complete(cmd->sk, hdev->id,
7458 				MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
7459 				mgmt_rp, sizeof(*mgmt_rp) + eir_len);
7460 	if (err < 0 || status)
7461 		goto done;
7462 
7463 	hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
7464 
7465 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7466 				 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
7467 				 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
7468 done:
7469 	kfree(mgmt_rp);
7470 	mgmt_pending_remove(cmd);
7471 }
7472 
7473 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
7474 				  struct mgmt_cp_read_local_oob_ext_data *cp)
7475 {
7476 	struct mgmt_pending_cmd *cmd;
7477 	struct hci_request req;
7478 	int err;
7479 
7480 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
7481 			       cp, sizeof(*cp));
7482 	if (!cmd)
7483 		return -ENOMEM;
7484 
7485 	hci_req_init(&req, hdev);
7486 
7487 	if (bredr_sc_enabled(hdev))
7488 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
7489 	else
7490 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
7491 
7492 	err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
7493 	if (err < 0) {
7494 		mgmt_pending_remove(cmd);
7495 		return err;
7496 	}
7497 
7498 	return 0;
7499 }
7500 
7501 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
7502 				   void *data, u16 data_len)
7503 {
7504 	struct mgmt_cp_read_local_oob_ext_data *cp = data;
7505 	struct mgmt_rp_read_local_oob_ext_data *rp;
7506 	size_t rp_len;
7507 	u16 eir_len;
7508 	u8 status, flags, role, addr[7], hash[16], rand[16];
7509 	int err;
7510 
7511 	bt_dev_dbg(hdev, "sock %p", sk);
7512 
7513 	if (hdev_is_powered(hdev)) {
7514 		switch (cp->type) {
7515 		case BIT(BDADDR_BREDR):
7516 			status = mgmt_bredr_support(hdev);
7517 			if (status)
7518 				eir_len = 0;
7519 			else
7520 				eir_len = 5;
7521 			break;
7522 		case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7523 			status = mgmt_le_support(hdev);
7524 			if (status)
7525 				eir_len = 0;
7526 			else
7527 				eir_len = 9 + 3 + 18 + 18 + 3;
7528 			break;
7529 		default:
7530 			status = MGMT_STATUS_INVALID_PARAMS;
7531 			eir_len = 0;
7532 			break;
7533 		}
7534 	} else {
7535 		status = MGMT_STATUS_NOT_POWERED;
7536 		eir_len = 0;
7537 	}
7538 
7539 	rp_len = sizeof(*rp) + eir_len;
7540 	rp = kmalloc(rp_len, GFP_ATOMIC);
7541 	if (!rp)
7542 		return -ENOMEM;
7543 
7544 	if (!status && !lmp_ssp_capable(hdev)) {
7545 		status = MGMT_STATUS_NOT_SUPPORTED;
7546 		eir_len = 0;
7547 	}
7548 
7549 	if (status)
7550 		goto complete;
7551 
7552 	hci_dev_lock(hdev);
7553 
7554 	eir_len = 0;
7555 	switch (cp->type) {
7556 	case BIT(BDADDR_BREDR):
7557 		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7558 			err = read_local_ssp_oob_req(hdev, sk, cp);
7559 			hci_dev_unlock(hdev);
7560 			if (!err)
7561 				goto done;
7562 
7563 			status = MGMT_STATUS_FAILED;
7564 			goto complete;
7565 		} else {
7566 			eir_len = eir_append_data(rp->eir, eir_len,
7567 						  EIR_CLASS_OF_DEV,
7568 						  hdev->dev_class, 3);
7569 		}
7570 		break;
7571 	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7572 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
7573 		    smp_generate_oob(hdev, hash, rand) < 0) {
7574 			hci_dev_unlock(hdev);
7575 			status = MGMT_STATUS_FAILED;
7576 			goto complete;
7577 		}
7578 
7579 		/* This should return the active RPA, but since the RPA
7580 		 * is only programmed on demand, it is really hard to fill
7581 		 * this in at the moment. For now disallow retrieving
7582 		 * local out-of-band data when privacy is in use.
7583 		 *
7584 		 * Returning the identity address will not help here since
7585 		 * pairing happens before the identity resolving key is
7586 		 * known and thus the connection establishment happens
7587 		 * based on the RPA and not the identity address.
7588 		 */
7589 		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
7590 			hci_dev_unlock(hdev);
7591 			status = MGMT_STATUS_REJECTED;
7592 			goto complete;
7593 		}
7594 
7595 		if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
7596 		   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
7597 		   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
7598 		    bacmp(&hdev->static_addr, BDADDR_ANY))) {
7599 			memcpy(addr, &hdev->static_addr, 6);
7600 			addr[6] = 0x01;
7601 		} else {
7602 			memcpy(addr, &hdev->bdaddr, 6);
7603 			addr[6] = 0x00;
7604 		}
7605 
7606 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
7607 					  addr, sizeof(addr));
7608 
7609 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7610 			role = 0x02;
7611 		else
7612 			role = 0x01;
7613 
7614 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
7615 					  &role, sizeof(role));
7616 
7617 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
7618 			eir_len = eir_append_data(rp->eir, eir_len,
7619 						  EIR_LE_SC_CONFIRM,
7620 						  hash, sizeof(hash));
7621 
7622 			eir_len = eir_append_data(rp->eir, eir_len,
7623 						  EIR_LE_SC_RANDOM,
7624 						  rand, sizeof(rand));
7625 		}
7626 
7627 		flags = mgmt_get_adv_discov_flags(hdev);
7628 
7629 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
7630 			flags |= LE_AD_NO_BREDR;
7631 
7632 		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
7633 					  &flags, sizeof(flags));
7634 		break;
7635 	}
7636 
7637 	hci_dev_unlock(hdev);
7638 
7639 	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
7640 
7641 	status = MGMT_STATUS_SUCCESS;
7642 
7643 complete:
7644 	rp->type = cp->type;
7645 	rp->eir_len = cpu_to_le16(eir_len);
7646 
7647 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
7648 				status, rp, sizeof(*rp) + eir_len);
7649 	if (err < 0 || status)
7650 		goto done;
7651 
7652 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7653 				 rp, sizeof(*rp) + eir_len,
7654 				 HCI_MGMT_OOB_DATA_EVENTS, sk);
7655 
7656 done:
7657 	kfree(rp);
7658 
7659 	return err;
7660 }
7661 
7662 static u32 get_supported_adv_flags(struct hci_dev *hdev)
7663 {
7664 	u32 flags = 0;
7665 
7666 	flags |= MGMT_ADV_FLAG_CONNECTABLE;
7667 	flags |= MGMT_ADV_FLAG_DISCOV;
7668 	flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
7669 	flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
7670 	flags |= MGMT_ADV_FLAG_APPEARANCE;
7671 	flags |= MGMT_ADV_FLAG_LOCAL_NAME;
7672 	flags |= MGMT_ADV_PARAM_DURATION;
7673 	flags |= MGMT_ADV_PARAM_TIMEOUT;
7674 	flags |= MGMT_ADV_PARAM_INTERVALS;
7675 	flags |= MGMT_ADV_PARAM_TX_POWER;
7676 	flags |= MGMT_ADV_PARAM_SCAN_RSP;
7677 
7678 	/* In extended adv TX_POWER returned from Set Adv Param
7679 	 * will be always valid.
7680 	 */
7681 	if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
7682 	    ext_adv_capable(hdev))
7683 		flags |= MGMT_ADV_FLAG_TX_POWER;
7684 
7685 	if (ext_adv_capable(hdev)) {
7686 		flags |= MGMT_ADV_FLAG_SEC_1M;
7687 		flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
7688 		flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
7689 
7690 		if (hdev->le_features[1] & HCI_LE_PHY_2M)
7691 			flags |= MGMT_ADV_FLAG_SEC_2M;
7692 
7693 		if (hdev->le_features[1] & HCI_LE_PHY_CODED)
7694 			flags |= MGMT_ADV_FLAG_SEC_CODED;
7695 	}
7696 
7697 	return flags;
7698 }
7699 
7700 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
7701 			     void *data, u16 data_len)
7702 {
7703 	struct mgmt_rp_read_adv_features *rp;
7704 	size_t rp_len;
7705 	int err;
7706 	struct adv_info *adv_instance;
7707 	u32 supported_flags;
7708 	u8 *instance;
7709 
7710 	bt_dev_dbg(hdev, "sock %p", sk);
7711 
7712 	if (!lmp_le_capable(hdev))
7713 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7714 				       MGMT_STATUS_REJECTED);
7715 
7716 	/* Enabling the experimental LL Privay support disables support for
7717 	 * advertising.
7718 	 */
7719 	if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
7720 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7721 				       MGMT_STATUS_NOT_SUPPORTED);
7722 
7723 	hci_dev_lock(hdev);
7724 
7725 	rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
7726 	rp = kmalloc(rp_len, GFP_ATOMIC);
7727 	if (!rp) {
7728 		hci_dev_unlock(hdev);
7729 		return -ENOMEM;
7730 	}
7731 
7732 	supported_flags = get_supported_adv_flags(hdev);
7733 
7734 	rp->supported_flags = cpu_to_le32(supported_flags);
7735 	rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
7736 	rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
7737 	rp->max_instances = hdev->le_num_of_adv_sets;
7738 	rp->num_instances = hdev->adv_instance_cnt;
7739 
7740 	instance = rp->instance;
7741 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
7742 		*instance = adv_instance->instance;
7743 		instance++;
7744 	}
7745 
7746 	hci_dev_unlock(hdev);
7747 
7748 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7749 				MGMT_STATUS_SUCCESS, rp, rp_len);
7750 
7751 	kfree(rp);
7752 
7753 	return err;
7754 }
7755 
7756 static u8 calculate_name_len(struct hci_dev *hdev)
7757 {
7758 	u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
7759 
7760 	return eir_append_local_name(hdev, buf, 0);
7761 }
7762 
7763 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
7764 			   bool is_adv_data)
7765 {
7766 	u8 max_len = HCI_MAX_AD_LENGTH;
7767 
7768 	if (is_adv_data) {
7769 		if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
7770 				 MGMT_ADV_FLAG_LIMITED_DISCOV |
7771 				 MGMT_ADV_FLAG_MANAGED_FLAGS))
7772 			max_len -= 3;
7773 
7774 		if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
7775 			max_len -= 3;
7776 	} else {
7777 		if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
7778 			max_len -= calculate_name_len(hdev);
7779 
7780 		if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
7781 			max_len -= 4;
7782 	}
7783 
7784 	return max_len;
7785 }
7786 
7787 static bool flags_managed(u32 adv_flags)
7788 {
7789 	return adv_flags & (MGMT_ADV_FLAG_DISCOV |
7790 			    MGMT_ADV_FLAG_LIMITED_DISCOV |
7791 			    MGMT_ADV_FLAG_MANAGED_FLAGS);
7792 }
7793 
7794 static bool tx_power_managed(u32 adv_flags)
7795 {
7796 	return adv_flags & MGMT_ADV_FLAG_TX_POWER;
7797 }
7798 
7799 static bool name_managed(u32 adv_flags)
7800 {
7801 	return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
7802 }
7803 
7804 static bool appearance_managed(u32 adv_flags)
7805 {
7806 	return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
7807 }
7808 
7809 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
7810 			      u8 len, bool is_adv_data)
7811 {
7812 	int i, cur_len;
7813 	u8 max_len;
7814 
7815 	max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
7816 
7817 	if (len > max_len)
7818 		return false;
7819 
7820 	/* Make sure that the data is correctly formatted. */
7821 	for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
7822 		cur_len = data[i];
7823 
7824 		if (!cur_len)
7825 			continue;
7826 
7827 		if (data[i + 1] == EIR_FLAGS &&
7828 		    (!is_adv_data || flags_managed(adv_flags)))
7829 			return false;
7830 
7831 		if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
7832 			return false;
7833 
7834 		if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
7835 			return false;
7836 
7837 		if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
7838 			return false;
7839 
7840 		if (data[i + 1] == EIR_APPEARANCE &&
7841 		    appearance_managed(adv_flags))
7842 			return false;
7843 
7844 		/* If the current field length would exceed the total data
7845 		 * length, then it's invalid.
7846 		 */
7847 		if (i + cur_len >= len)
7848 			return false;
7849 	}
7850 
7851 	return true;
7852 }
7853 
7854 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
7855 {
7856 	u32 supported_flags, phy_flags;
7857 
7858 	/* The current implementation only supports a subset of the specified
7859 	 * flags. Also need to check mutual exclusiveness of sec flags.
7860 	 */
7861 	supported_flags = get_supported_adv_flags(hdev);
7862 	phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
7863 	if (adv_flags & ~supported_flags ||
7864 	    ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
7865 		return false;
7866 
7867 	return true;
7868 }
7869 
7870 static bool adv_busy(struct hci_dev *hdev)
7871 {
7872 	return (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
7873 		pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
7874 		pending_find(MGMT_OP_SET_LE, hdev) ||
7875 		pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev) ||
7876 		pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev));
7877 }
7878 
7879 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
7880 				     u16 opcode)
7881 {
7882 	struct mgmt_pending_cmd *cmd;
7883 	struct mgmt_cp_add_advertising *cp;
7884 	struct mgmt_rp_add_advertising rp;
7885 	struct adv_info *adv_instance, *n;
7886 	u8 instance;
7887 
7888 	bt_dev_dbg(hdev, "status %u", status);
7889 
7890 	hci_dev_lock(hdev);
7891 
7892 	cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
7893 	if (!cmd)
7894 		cmd = pending_find(MGMT_OP_ADD_EXT_ADV_DATA, hdev);
7895 
7896 	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
7897 		if (!adv_instance->pending)
7898 			continue;
7899 
7900 		if (!status) {
7901 			adv_instance->pending = false;
7902 			continue;
7903 		}
7904 
7905 		instance = adv_instance->instance;
7906 
7907 		if (hdev->cur_adv_instance == instance)
7908 			cancel_adv_timeout(hdev);
7909 
7910 		hci_remove_adv_instance(hdev, instance);
7911 		mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
7912 	}
7913 
7914 	if (!cmd)
7915 		goto unlock;
7916 
7917 	cp = cmd->param;
7918 	rp.instance = cp->instance;
7919 
7920 	if (status)
7921 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
7922 				mgmt_status(status));
7923 	else
7924 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
7925 				  mgmt_status(status), &rp, sizeof(rp));
7926 
7927 	mgmt_pending_remove(cmd);
7928 
7929 unlock:
7930 	hci_dev_unlock(hdev);
7931 }
7932 
7933 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
7934 			   void *data, u16 data_len)
7935 {
7936 	struct mgmt_cp_add_advertising *cp = data;
7937 	struct mgmt_rp_add_advertising rp;
7938 	u32 flags;
7939 	u8 status;
7940 	u16 timeout, duration;
7941 	unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
7942 	u8 schedule_instance = 0;
7943 	struct adv_info *next_instance;
7944 	int err;
7945 	struct mgmt_pending_cmd *cmd;
7946 	struct hci_request req;
7947 
7948 	bt_dev_dbg(hdev, "sock %p", sk);
7949 
7950 	status = mgmt_le_support(hdev);
7951 	if (status)
7952 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7953 				       status);
7954 
7955 	/* Enabling the experimental LL Privay support disables support for
7956 	 * advertising.
7957 	 */
7958 	if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
7959 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7960 				       MGMT_STATUS_NOT_SUPPORTED);
7961 
7962 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
7963 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7964 				       MGMT_STATUS_INVALID_PARAMS);
7965 
7966 	if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
7967 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7968 				       MGMT_STATUS_INVALID_PARAMS);
7969 
7970 	flags = __le32_to_cpu(cp->flags);
7971 	timeout = __le16_to_cpu(cp->timeout);
7972 	duration = __le16_to_cpu(cp->duration);
7973 
7974 	if (!requested_adv_flags_are_valid(hdev, flags))
7975 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7976 				       MGMT_STATUS_INVALID_PARAMS);
7977 
7978 	hci_dev_lock(hdev);
7979 
7980 	if (timeout && !hdev_is_powered(hdev)) {
7981 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7982 				      MGMT_STATUS_REJECTED);
7983 		goto unlock;
7984 	}
7985 
7986 	if (adv_busy(hdev)) {
7987 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7988 				      MGMT_STATUS_BUSY);
7989 		goto unlock;
7990 	}
7991 
7992 	if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
7993 	    !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
7994 			       cp->scan_rsp_len, false)) {
7995 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
7996 				      MGMT_STATUS_INVALID_PARAMS);
7997 		goto unlock;
7998 	}
7999 
8000 	err = hci_add_adv_instance(hdev, cp->instance, flags,
8001 				   cp->adv_data_len, cp->data,
8002 				   cp->scan_rsp_len,
8003 				   cp->data + cp->adv_data_len,
8004 				   timeout, duration,
8005 				   HCI_ADV_TX_POWER_NO_PREFERENCE,
8006 				   hdev->le_adv_min_interval,
8007 				   hdev->le_adv_max_interval);
8008 	if (err < 0) {
8009 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8010 				      MGMT_STATUS_FAILED);
8011 		goto unlock;
8012 	}
8013 
8014 	/* Only trigger an advertising added event if a new instance was
8015 	 * actually added.
8016 	 */
8017 	if (hdev->adv_instance_cnt > prev_instance_cnt)
8018 		mgmt_advertising_added(sk, hdev, cp->instance);
8019 
8020 	if (hdev->cur_adv_instance == cp->instance) {
8021 		/* If the currently advertised instance is being changed then
8022 		 * cancel the current advertising and schedule the next
8023 		 * instance. If there is only one instance then the overridden
8024 		 * advertising data will be visible right away.
8025 		 */
8026 		cancel_adv_timeout(hdev);
8027 
8028 		next_instance = hci_get_next_instance(hdev, cp->instance);
8029 		if (next_instance)
8030 			schedule_instance = next_instance->instance;
8031 	} else if (!hdev->adv_instance_timeout) {
8032 		/* Immediately advertise the new instance if no other
8033 		 * instance is currently being advertised.
8034 		 */
8035 		schedule_instance = cp->instance;
8036 	}
8037 
8038 	/* If the HCI_ADVERTISING flag is set or the device isn't powered or
8039 	 * there is no instance to be advertised then we have no HCI
8040 	 * communication to make. Simply return.
8041 	 */
8042 	if (!hdev_is_powered(hdev) ||
8043 	    hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8044 	    !schedule_instance) {
8045 		rp.instance = cp->instance;
8046 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8047 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8048 		goto unlock;
8049 	}
8050 
8051 	/* We're good to go, update advertising data, parameters, and start
8052 	 * advertising.
8053 	 */
8054 	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8055 			       data_len);
8056 	if (!cmd) {
8057 		err = -ENOMEM;
8058 		goto unlock;
8059 	}
8060 
8061 	hci_req_init(&req, hdev);
8062 
8063 	err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
8064 
8065 	if (!err)
8066 		err = hci_req_run(&req, add_advertising_complete);
8067 
8068 	if (err < 0) {
8069 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8070 				      MGMT_STATUS_FAILED);
8071 		mgmt_pending_remove(cmd);
8072 	}
8073 
8074 unlock:
8075 	hci_dev_unlock(hdev);
8076 
8077 	return err;
8078 }
8079 
8080 static void add_ext_adv_params_complete(struct hci_dev *hdev, u8 status,
8081 					u16 opcode)
8082 {
8083 	struct mgmt_pending_cmd *cmd;
8084 	struct mgmt_cp_add_ext_adv_params *cp;
8085 	struct mgmt_rp_add_ext_adv_params rp;
8086 	struct adv_info *adv_instance;
8087 	u32 flags;
8088 
8089 	BT_DBG("%s", hdev->name);
8090 
8091 	hci_dev_lock(hdev);
8092 
8093 	cmd = pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS, hdev);
8094 	if (!cmd)
8095 		goto unlock;
8096 
8097 	cp = cmd->param;
8098 	adv_instance = hci_find_adv_instance(hdev, cp->instance);
8099 	if (!adv_instance)
8100 		goto unlock;
8101 
8102 	rp.instance = cp->instance;
8103 	rp.tx_power = adv_instance->tx_power;
8104 
8105 	/* While we're at it, inform userspace of the available space for this
8106 	 * advertisement, given the flags that will be used.
8107 	 */
8108 	flags = __le32_to_cpu(cp->flags);
8109 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8110 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8111 
8112 	if (status) {
8113 		/* If this advertisement was previously advertising and we
8114 		 * failed to update it, we signal that it has been removed and
8115 		 * delete its structure
8116 		 */
8117 		if (!adv_instance->pending)
8118 			mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8119 
8120 		hci_remove_adv_instance(hdev, cp->instance);
8121 
8122 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8123 				mgmt_status(status));
8124 
8125 	} else {
8126 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8127 				  mgmt_status(status), &rp, sizeof(rp));
8128 	}
8129 
8130 unlock:
8131 	if (cmd)
8132 		mgmt_pending_remove(cmd);
8133 
8134 	hci_dev_unlock(hdev);
8135 }
8136 
8137 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8138 			      void *data, u16 data_len)
8139 {
8140 	struct mgmt_cp_add_ext_adv_params *cp = data;
8141 	struct mgmt_rp_add_ext_adv_params rp;
8142 	struct mgmt_pending_cmd *cmd = NULL;
8143 	struct adv_info *adv_instance;
8144 	struct hci_request req;
8145 	u32 flags, min_interval, max_interval;
8146 	u16 timeout, duration;
8147 	u8 status;
8148 	s8 tx_power;
8149 	int err;
8150 
8151 	BT_DBG("%s", hdev->name);
8152 
8153 	status = mgmt_le_support(hdev);
8154 	if (status)
8155 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8156 				       status);
8157 
8158 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8159 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8160 				       MGMT_STATUS_INVALID_PARAMS);
8161 
8162 	/* The purpose of breaking add_advertising into two separate MGMT calls
8163 	 * for params and data is to allow more parameters to be added to this
8164 	 * structure in the future. For this reason, we verify that we have the
8165 	 * bare minimum structure we know of when the interface was defined. Any
8166 	 * extra parameters we don't know about will be ignored in this request.
8167 	 */
8168 	if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8169 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8170 				       MGMT_STATUS_INVALID_PARAMS);
8171 
8172 	flags = __le32_to_cpu(cp->flags);
8173 
8174 	if (!requested_adv_flags_are_valid(hdev, flags))
8175 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8176 				       MGMT_STATUS_INVALID_PARAMS);
8177 
8178 	hci_dev_lock(hdev);
8179 
8180 	/* In new interface, we require that we are powered to register */
8181 	if (!hdev_is_powered(hdev)) {
8182 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8183 				      MGMT_STATUS_REJECTED);
8184 		goto unlock;
8185 	}
8186 
8187 	if (adv_busy(hdev)) {
8188 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8189 				      MGMT_STATUS_BUSY);
8190 		goto unlock;
8191 	}
8192 
8193 	/* Parse defined parameters from request, use defaults otherwise */
8194 	timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8195 		  __le16_to_cpu(cp->timeout) : 0;
8196 
8197 	duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8198 		   __le16_to_cpu(cp->duration) :
8199 		   hdev->def_multi_adv_rotation_duration;
8200 
8201 	min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8202 		       __le32_to_cpu(cp->min_interval) :
8203 		       hdev->le_adv_min_interval;
8204 
8205 	max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8206 		       __le32_to_cpu(cp->max_interval) :
8207 		       hdev->le_adv_max_interval;
8208 
8209 	tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8210 		   cp->tx_power :
8211 		   HCI_ADV_TX_POWER_NO_PREFERENCE;
8212 
8213 	/* Create advertising instance with no advertising or response data */
8214 	err = hci_add_adv_instance(hdev, cp->instance, flags,
8215 				   0, NULL, 0, NULL, timeout, duration,
8216 				   tx_power, min_interval, max_interval);
8217 
8218 	if (err < 0) {
8219 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8220 				      MGMT_STATUS_FAILED);
8221 		goto unlock;
8222 	}
8223 
8224 	/* Submit request for advertising params if ext adv available */
8225 	if (ext_adv_capable(hdev)) {
8226 		hci_req_init(&req, hdev);
8227 		adv_instance = hci_find_adv_instance(hdev, cp->instance);
8228 
8229 		/* Updating parameters of an active instance will return a
8230 		 * Command Disallowed error, so we must first disable the
8231 		 * instance if it is active.
8232 		 */
8233 		if (!adv_instance->pending)
8234 			__hci_req_disable_ext_adv_instance(&req, cp->instance);
8235 
8236 		__hci_req_setup_ext_adv_instance(&req, cp->instance);
8237 
8238 		err = hci_req_run(&req, add_ext_adv_params_complete);
8239 
8240 		if (!err)
8241 			cmd = mgmt_pending_add(sk, MGMT_OP_ADD_EXT_ADV_PARAMS,
8242 					       hdev, data, data_len);
8243 		if (!cmd) {
8244 			err = -ENOMEM;
8245 			hci_remove_adv_instance(hdev, cp->instance);
8246 			goto unlock;
8247 		}
8248 
8249 	} else {
8250 		rp.instance = cp->instance;
8251 		rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8252 		rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8253 		rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8254 		err = mgmt_cmd_complete(sk, hdev->id,
8255 					MGMT_OP_ADD_EXT_ADV_PARAMS,
8256 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8257 	}
8258 
8259 unlock:
8260 	hci_dev_unlock(hdev);
8261 
8262 	return err;
8263 }
8264 
8265 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8266 			    u16 data_len)
8267 {
8268 	struct mgmt_cp_add_ext_adv_data *cp = data;
8269 	struct mgmt_rp_add_ext_adv_data rp;
8270 	u8 schedule_instance = 0;
8271 	struct adv_info *next_instance;
8272 	struct adv_info *adv_instance;
8273 	int err = 0;
8274 	struct mgmt_pending_cmd *cmd;
8275 	struct hci_request req;
8276 
8277 	BT_DBG("%s", hdev->name);
8278 
8279 	hci_dev_lock(hdev);
8280 
8281 	adv_instance = hci_find_adv_instance(hdev, cp->instance);
8282 
8283 	if (!adv_instance) {
8284 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8285 				      MGMT_STATUS_INVALID_PARAMS);
8286 		goto unlock;
8287 	}
8288 
8289 	/* In new interface, we require that we are powered to register */
8290 	if (!hdev_is_powered(hdev)) {
8291 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8292 				      MGMT_STATUS_REJECTED);
8293 		goto clear_new_instance;
8294 	}
8295 
8296 	if (adv_busy(hdev)) {
8297 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8298 				      MGMT_STATUS_BUSY);
8299 		goto clear_new_instance;
8300 	}
8301 
8302 	/* Validate new data */
8303 	if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
8304 			       cp->adv_data_len, true) ||
8305 	    !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
8306 			       cp->adv_data_len, cp->scan_rsp_len, false)) {
8307 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8308 				      MGMT_STATUS_INVALID_PARAMS);
8309 		goto clear_new_instance;
8310 	}
8311 
8312 	/* Set the data in the advertising instance */
8313 	hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
8314 				  cp->data, cp->scan_rsp_len,
8315 				  cp->data + cp->adv_data_len);
8316 
8317 	/* We're good to go, update advertising data, parameters, and start
8318 	 * advertising.
8319 	 */
8320 
8321 	hci_req_init(&req, hdev);
8322 
8323 	hci_req_add(&req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
8324 
8325 	if (ext_adv_capable(hdev)) {
8326 		__hci_req_update_adv_data(&req, cp->instance);
8327 		__hci_req_update_scan_rsp_data(&req, cp->instance);
8328 		__hci_req_enable_ext_advertising(&req, cp->instance);
8329 
8330 	} else {
8331 		/* If using software rotation, determine next instance to use */
8332 
8333 		if (hdev->cur_adv_instance == cp->instance) {
8334 			/* If the currently advertised instance is being changed
8335 			 * then cancel the current advertising and schedule the
8336 			 * next instance. If there is only one instance then the
8337 			 * overridden advertising data will be visible right
8338 			 * away
8339 			 */
8340 			cancel_adv_timeout(hdev);
8341 
8342 			next_instance = hci_get_next_instance(hdev,
8343 							      cp->instance);
8344 			if (next_instance)
8345 				schedule_instance = next_instance->instance;
8346 		} else if (!hdev->adv_instance_timeout) {
8347 			/* Immediately advertise the new instance if no other
8348 			 * instance is currently being advertised.
8349 			 */
8350 			schedule_instance = cp->instance;
8351 		}
8352 
8353 		/* If the HCI_ADVERTISING flag is set or there is no instance to
8354 		 * be advertised then we have no HCI communication to make.
8355 		 * Simply return.
8356 		 */
8357 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8358 		    !schedule_instance) {
8359 			if (adv_instance->pending) {
8360 				mgmt_advertising_added(sk, hdev, cp->instance);
8361 				adv_instance->pending = false;
8362 			}
8363 			rp.instance = cp->instance;
8364 			err = mgmt_cmd_complete(sk, hdev->id,
8365 						MGMT_OP_ADD_EXT_ADV_DATA,
8366 						MGMT_STATUS_SUCCESS, &rp,
8367 						sizeof(rp));
8368 			goto unlock;
8369 		}
8370 
8371 		err = __hci_req_schedule_adv_instance(&req, schedule_instance,
8372 						      true);
8373 	}
8374 
8375 	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
8376 			       data_len);
8377 	if (!cmd) {
8378 		err = -ENOMEM;
8379 		goto clear_new_instance;
8380 	}
8381 
8382 	if (!err)
8383 		err = hci_req_run(&req, add_advertising_complete);
8384 
8385 	if (err < 0) {
8386 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8387 				      MGMT_STATUS_FAILED);
8388 		mgmt_pending_remove(cmd);
8389 		goto clear_new_instance;
8390 	}
8391 
8392 	/* We were successful in updating data, so trigger advertising_added
8393 	 * event if this is an instance that wasn't previously advertising. If
8394 	 * a failure occurs in the requests we initiated, we will remove the
8395 	 * instance again in add_advertising_complete
8396 	 */
8397 	if (adv_instance->pending)
8398 		mgmt_advertising_added(sk, hdev, cp->instance);
8399 
8400 	goto unlock;
8401 
8402 clear_new_instance:
8403 	hci_remove_adv_instance(hdev, cp->instance);
8404 
8405 unlock:
8406 	hci_dev_unlock(hdev);
8407 
8408 	return err;
8409 }
8410 
8411 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
8412 					u16 opcode)
8413 {
8414 	struct mgmt_pending_cmd *cmd;
8415 	struct mgmt_cp_remove_advertising *cp;
8416 	struct mgmt_rp_remove_advertising rp;
8417 
8418 	bt_dev_dbg(hdev, "status %u", status);
8419 
8420 	hci_dev_lock(hdev);
8421 
8422 	/* A failure status here only means that we failed to disable
8423 	 * advertising. Otherwise, the advertising instance has been removed,
8424 	 * so report success.
8425 	 */
8426 	cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
8427 	if (!cmd)
8428 		goto unlock;
8429 
8430 	cp = cmd->param;
8431 	rp.instance = cp->instance;
8432 
8433 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
8434 			  &rp, sizeof(rp));
8435 	mgmt_pending_remove(cmd);
8436 
8437 unlock:
8438 	hci_dev_unlock(hdev);
8439 }
8440 
8441 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
8442 			      void *data, u16 data_len)
8443 {
8444 	struct mgmt_cp_remove_advertising *cp = data;
8445 	struct mgmt_rp_remove_advertising rp;
8446 	struct mgmt_pending_cmd *cmd;
8447 	struct hci_request req;
8448 	int err;
8449 
8450 	bt_dev_dbg(hdev, "sock %p", sk);
8451 
8452 	/* Enabling the experimental LL Privay support disables support for
8453 	 * advertising.
8454 	 */
8455 	if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
8456 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8457 				       MGMT_STATUS_NOT_SUPPORTED);
8458 
8459 	hci_dev_lock(hdev);
8460 
8461 	if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
8462 		err = mgmt_cmd_status(sk, hdev->id,
8463 				      MGMT_OP_REMOVE_ADVERTISING,
8464 				      MGMT_STATUS_INVALID_PARAMS);
8465 		goto unlock;
8466 	}
8467 
8468 	if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
8469 	    pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
8470 	    pending_find(MGMT_OP_SET_LE, hdev)) {
8471 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8472 				      MGMT_STATUS_BUSY);
8473 		goto unlock;
8474 	}
8475 
8476 	if (list_empty(&hdev->adv_instances)) {
8477 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8478 				      MGMT_STATUS_INVALID_PARAMS);
8479 		goto unlock;
8480 	}
8481 
8482 	hci_req_init(&req, hdev);
8483 
8484 	/* If we use extended advertising, instance is disabled and removed */
8485 	if (ext_adv_capable(hdev)) {
8486 		__hci_req_disable_ext_adv_instance(&req, cp->instance);
8487 		__hci_req_remove_ext_adv_instance(&req, cp->instance);
8488 	}
8489 
8490 	hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
8491 
8492 	if (list_empty(&hdev->adv_instances))
8493 		__hci_req_disable_advertising(&req);
8494 
8495 	/* If no HCI commands have been collected so far or the HCI_ADVERTISING
8496 	 * flag is set or the device isn't powered then we have no HCI
8497 	 * communication to make. Simply return.
8498 	 */
8499 	if (skb_queue_empty(&req.cmd_q) ||
8500 	    !hdev_is_powered(hdev) ||
8501 	    hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
8502 		hci_req_purge(&req);
8503 		rp.instance = cp->instance;
8504 		err = mgmt_cmd_complete(sk, hdev->id,
8505 					MGMT_OP_REMOVE_ADVERTISING,
8506 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8507 		goto unlock;
8508 	}
8509 
8510 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
8511 			       data_len);
8512 	if (!cmd) {
8513 		err = -ENOMEM;
8514 		goto unlock;
8515 	}
8516 
8517 	err = hci_req_run(&req, remove_advertising_complete);
8518 	if (err < 0)
8519 		mgmt_pending_remove(cmd);
8520 
8521 unlock:
8522 	hci_dev_unlock(hdev);
8523 
8524 	return err;
8525 }
8526 
8527 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
8528 			     void *data, u16 data_len)
8529 {
8530 	struct mgmt_cp_get_adv_size_info *cp = data;
8531 	struct mgmt_rp_get_adv_size_info rp;
8532 	u32 flags, supported_flags;
8533 	int err;
8534 
8535 	bt_dev_dbg(hdev, "sock %p", sk);
8536 
8537 	if (!lmp_le_capable(hdev))
8538 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8539 				       MGMT_STATUS_REJECTED);
8540 
8541 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8542 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8543 				       MGMT_STATUS_INVALID_PARAMS);
8544 
8545 	flags = __le32_to_cpu(cp->flags);
8546 
8547 	/* The current implementation only supports a subset of the specified
8548 	 * flags.
8549 	 */
8550 	supported_flags = get_supported_adv_flags(hdev);
8551 	if (flags & ~supported_flags)
8552 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8553 				       MGMT_STATUS_INVALID_PARAMS);
8554 
8555 	rp.instance = cp->instance;
8556 	rp.flags = cp->flags;
8557 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8558 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8559 
8560 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8561 				MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8562 
8563 	return err;
8564 }
8565 
8566 static const struct hci_mgmt_handler mgmt_handlers[] = {
8567 	{ NULL }, /* 0x0000 (no command) */
8568 	{ read_version,            MGMT_READ_VERSION_SIZE,
8569 						HCI_MGMT_NO_HDEV |
8570 						HCI_MGMT_UNTRUSTED },
8571 	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
8572 						HCI_MGMT_NO_HDEV |
8573 						HCI_MGMT_UNTRUSTED },
8574 	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
8575 						HCI_MGMT_NO_HDEV |
8576 						HCI_MGMT_UNTRUSTED },
8577 	{ read_controller_info,    MGMT_READ_INFO_SIZE,
8578 						HCI_MGMT_UNTRUSTED },
8579 	{ set_powered,             MGMT_SETTING_SIZE },
8580 	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
8581 	{ set_connectable,         MGMT_SETTING_SIZE },
8582 	{ set_fast_connectable,    MGMT_SETTING_SIZE },
8583 	{ set_bondable,            MGMT_SETTING_SIZE },
8584 	{ set_link_security,       MGMT_SETTING_SIZE },
8585 	{ set_ssp,                 MGMT_SETTING_SIZE },
8586 	{ set_hs,                  MGMT_SETTING_SIZE },
8587 	{ set_le,                  MGMT_SETTING_SIZE },
8588 	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
8589 	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
8590 	{ add_uuid,                MGMT_ADD_UUID_SIZE },
8591 	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
8592 	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
8593 						HCI_MGMT_VAR_LEN },
8594 	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
8595 						HCI_MGMT_VAR_LEN },
8596 	{ disconnect,              MGMT_DISCONNECT_SIZE },
8597 	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
8598 	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
8599 	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
8600 	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
8601 	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
8602 	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
8603 	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
8604 	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
8605 	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
8606 	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
8607 	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
8608 	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
8609 	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
8610 						HCI_MGMT_VAR_LEN },
8611 	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
8612 	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
8613 	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
8614 	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
8615 	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
8616 	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
8617 	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
8618 	{ set_advertising,         MGMT_SETTING_SIZE },
8619 	{ set_bredr,               MGMT_SETTING_SIZE },
8620 	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
8621 	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
8622 	{ set_secure_conn,         MGMT_SETTING_SIZE },
8623 	{ set_debug_keys,          MGMT_SETTING_SIZE },
8624 	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
8625 	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
8626 						HCI_MGMT_VAR_LEN },
8627 	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
8628 	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
8629 	{ add_device,              MGMT_ADD_DEVICE_SIZE },
8630 	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
8631 	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
8632 						HCI_MGMT_VAR_LEN },
8633 	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
8634 						HCI_MGMT_NO_HDEV |
8635 						HCI_MGMT_UNTRUSTED },
8636 	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
8637 						HCI_MGMT_UNCONFIGURED |
8638 						HCI_MGMT_UNTRUSTED },
8639 	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
8640 						HCI_MGMT_UNCONFIGURED },
8641 	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
8642 						HCI_MGMT_UNCONFIGURED },
8643 	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
8644 						HCI_MGMT_VAR_LEN },
8645 	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
8646 	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
8647 						HCI_MGMT_NO_HDEV |
8648 						HCI_MGMT_UNTRUSTED },
8649 	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
8650 	{ add_advertising,	   MGMT_ADD_ADVERTISING_SIZE,
8651 						HCI_MGMT_VAR_LEN },
8652 	{ remove_advertising,	   MGMT_REMOVE_ADVERTISING_SIZE },
8653 	{ get_adv_size_info,       MGMT_GET_ADV_SIZE_INFO_SIZE },
8654 	{ start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
8655 	{ read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
8656 						HCI_MGMT_UNTRUSTED },
8657 	{ set_appearance,	   MGMT_SET_APPEARANCE_SIZE },
8658 	{ get_phy_configuration,   MGMT_GET_PHY_CONFIGURATION_SIZE },
8659 	{ set_phy_configuration,   MGMT_SET_PHY_CONFIGURATION_SIZE },
8660 	{ set_blocked_keys,	   MGMT_OP_SET_BLOCKED_KEYS_SIZE,
8661 						HCI_MGMT_VAR_LEN },
8662 	{ set_wideband_speech,	   MGMT_SETTING_SIZE },
8663 	{ read_controller_cap,     MGMT_READ_CONTROLLER_CAP_SIZE,
8664 						HCI_MGMT_UNTRUSTED },
8665 	{ read_exp_features_info,  MGMT_READ_EXP_FEATURES_INFO_SIZE,
8666 						HCI_MGMT_UNTRUSTED |
8667 						HCI_MGMT_HDEV_OPTIONAL },
8668 	{ set_exp_feature,         MGMT_SET_EXP_FEATURE_SIZE,
8669 						HCI_MGMT_VAR_LEN |
8670 						HCI_MGMT_HDEV_OPTIONAL },
8671 	{ read_def_system_config,  MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
8672 						HCI_MGMT_UNTRUSTED },
8673 	{ set_def_system_config,   MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
8674 						HCI_MGMT_VAR_LEN },
8675 	{ read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
8676 						HCI_MGMT_UNTRUSTED },
8677 	{ set_def_runtime_config,  MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
8678 						HCI_MGMT_VAR_LEN },
8679 	{ get_device_flags,        MGMT_GET_DEVICE_FLAGS_SIZE },
8680 	{ set_device_flags,        MGMT_SET_DEVICE_FLAGS_SIZE },
8681 	{ read_adv_mon_features,   MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
8682 	{ add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
8683 						HCI_MGMT_VAR_LEN },
8684 	{ remove_adv_monitor,      MGMT_REMOVE_ADV_MONITOR_SIZE },
8685 	{ add_ext_adv_params,      MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
8686 						HCI_MGMT_VAR_LEN },
8687 	{ add_ext_adv_data,        MGMT_ADD_EXT_ADV_DATA_SIZE,
8688 						HCI_MGMT_VAR_LEN },
8689 	{ add_adv_patterns_monitor_rssi,
8690 				   MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
8691 						HCI_MGMT_VAR_LEN },
8692 };
8693 
8694 void mgmt_index_added(struct hci_dev *hdev)
8695 {
8696 	struct mgmt_ev_ext_index ev;
8697 
8698 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8699 		return;
8700 
8701 	switch (hdev->dev_type) {
8702 	case HCI_PRIMARY:
8703 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8704 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
8705 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8706 			ev.type = 0x01;
8707 		} else {
8708 			mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
8709 					 HCI_MGMT_INDEX_EVENTS);
8710 			ev.type = 0x00;
8711 		}
8712 		break;
8713 	case HCI_AMP:
8714 		ev.type = 0x02;
8715 		break;
8716 	default:
8717 		return;
8718 	}
8719 
8720 	ev.bus = hdev->bus;
8721 
8722 	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
8723 			 HCI_MGMT_EXT_INDEX_EVENTS);
8724 }
8725 
8726 void mgmt_index_removed(struct hci_dev *hdev)
8727 {
8728 	struct mgmt_ev_ext_index ev;
8729 	u8 status = MGMT_STATUS_INVALID_INDEX;
8730 
8731 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8732 		return;
8733 
8734 	switch (hdev->dev_type) {
8735 	case HCI_PRIMARY:
8736 		mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8737 
8738 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8739 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
8740 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8741 			ev.type = 0x01;
8742 		} else {
8743 			mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
8744 					 HCI_MGMT_INDEX_EVENTS);
8745 			ev.type = 0x00;
8746 		}
8747 		break;
8748 	case HCI_AMP:
8749 		ev.type = 0x02;
8750 		break;
8751 	default:
8752 		return;
8753 	}
8754 
8755 	ev.bus = hdev->bus;
8756 
8757 	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
8758 			 HCI_MGMT_EXT_INDEX_EVENTS);
8759 }
8760 
8761 /* This function requires the caller holds hdev->lock */
8762 static void restart_le_actions(struct hci_dev *hdev)
8763 {
8764 	struct hci_conn_params *p;
8765 
8766 	list_for_each_entry(p, &hdev->le_conn_params, list) {
8767 		/* Needed for AUTO_OFF case where might not "really"
8768 		 * have been powered off.
8769 		 */
8770 		list_del_init(&p->action);
8771 
8772 		switch (p->auto_connect) {
8773 		case HCI_AUTO_CONN_DIRECT:
8774 		case HCI_AUTO_CONN_ALWAYS:
8775 			list_add(&p->action, &hdev->pend_le_conns);
8776 			break;
8777 		case HCI_AUTO_CONN_REPORT:
8778 			list_add(&p->action, &hdev->pend_le_reports);
8779 			break;
8780 		default:
8781 			break;
8782 		}
8783 	}
8784 }
8785 
8786 void mgmt_power_on(struct hci_dev *hdev, int err)
8787 {
8788 	struct cmd_lookup match = { NULL, hdev };
8789 
8790 	bt_dev_dbg(hdev, "err %d", err);
8791 
8792 	hci_dev_lock(hdev);
8793 
8794 	if (!err) {
8795 		restart_le_actions(hdev);
8796 		hci_update_background_scan(hdev);
8797 	}
8798 
8799 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8800 
8801 	new_settings(hdev, match.sk);
8802 
8803 	if (match.sk)
8804 		sock_put(match.sk);
8805 
8806 	hci_dev_unlock(hdev);
8807 }
8808 
8809 void __mgmt_power_off(struct hci_dev *hdev)
8810 {
8811 	struct cmd_lookup match = { NULL, hdev };
8812 	u8 status, zero_cod[] = { 0, 0, 0 };
8813 
8814 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8815 
8816 	/* If the power off is because of hdev unregistration let
8817 	 * use the appropriate INVALID_INDEX status. Otherwise use
8818 	 * NOT_POWERED. We cover both scenarios here since later in
8819 	 * mgmt_index_removed() any hci_conn callbacks will have already
8820 	 * been triggered, potentially causing misleading DISCONNECTED
8821 	 * status responses.
8822 	 */
8823 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
8824 		status = MGMT_STATUS_INVALID_INDEX;
8825 	else
8826 		status = MGMT_STATUS_NOT_POWERED;
8827 
8828 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8829 
8830 	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
8831 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
8832 				   zero_cod, sizeof(zero_cod),
8833 				   HCI_MGMT_DEV_CLASS_EVENTS, NULL);
8834 		ext_info_changed(hdev, NULL);
8835 	}
8836 
8837 	new_settings(hdev, match.sk);
8838 
8839 	if (match.sk)
8840 		sock_put(match.sk);
8841 }
8842 
8843 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
8844 {
8845 	struct mgmt_pending_cmd *cmd;
8846 	u8 status;
8847 
8848 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8849 	if (!cmd)
8850 		return;
8851 
8852 	if (err == -ERFKILL)
8853 		status = MGMT_STATUS_RFKILLED;
8854 	else
8855 		status = MGMT_STATUS_FAILED;
8856 
8857 	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
8858 
8859 	mgmt_pending_remove(cmd);
8860 }
8861 
8862 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
8863 		       bool persistent)
8864 {
8865 	struct mgmt_ev_new_link_key ev;
8866 
8867 	memset(&ev, 0, sizeof(ev));
8868 
8869 	ev.store_hint = persistent;
8870 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8871 	ev.key.addr.type = BDADDR_BREDR;
8872 	ev.key.type = key->type;
8873 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
8874 	ev.key.pin_len = key->pin_len;
8875 
8876 	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
8877 }
8878 
8879 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
8880 {
8881 	switch (ltk->type) {
8882 	case SMP_LTK:
8883 	case SMP_LTK_RESPONDER:
8884 		if (ltk->authenticated)
8885 			return MGMT_LTK_AUTHENTICATED;
8886 		return MGMT_LTK_UNAUTHENTICATED;
8887 	case SMP_LTK_P256:
8888 		if (ltk->authenticated)
8889 			return MGMT_LTK_P256_AUTH;
8890 		return MGMT_LTK_P256_UNAUTH;
8891 	case SMP_LTK_P256_DEBUG:
8892 		return MGMT_LTK_P256_DEBUG;
8893 	}
8894 
8895 	return MGMT_LTK_UNAUTHENTICATED;
8896 }
8897 
8898 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
8899 {
8900 	struct mgmt_ev_new_long_term_key ev;
8901 
8902 	memset(&ev, 0, sizeof(ev));
8903 
8904 	/* Devices using resolvable or non-resolvable random addresses
8905 	 * without providing an identity resolving key don't require
8906 	 * to store long term keys. Their addresses will change the
8907 	 * next time around.
8908 	 *
8909 	 * Only when a remote device provides an identity address
8910 	 * make sure the long term key is stored. If the remote
8911 	 * identity is known, the long term keys are internally
8912 	 * mapped to the identity address. So allow static random
8913 	 * and public addresses here.
8914 	 */
8915 	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8916 	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
8917 		ev.store_hint = 0x00;
8918 	else
8919 		ev.store_hint = persistent;
8920 
8921 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8922 	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
8923 	ev.key.type = mgmt_ltk_type(key);
8924 	ev.key.enc_size = key->enc_size;
8925 	ev.key.ediv = key->ediv;
8926 	ev.key.rand = key->rand;
8927 
8928 	if (key->type == SMP_LTK)
8929 		ev.key.initiator = 1;
8930 
8931 	/* Make sure we copy only the significant bytes based on the
8932 	 * encryption key size, and set the rest of the value to zeroes.
8933 	 */
8934 	memcpy(ev.key.val, key->val, key->enc_size);
8935 	memset(ev.key.val + key->enc_size, 0,
8936 	       sizeof(ev.key.val) - key->enc_size);
8937 
8938 	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
8939 }
8940 
8941 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
8942 {
8943 	struct mgmt_ev_new_irk ev;
8944 
8945 	memset(&ev, 0, sizeof(ev));
8946 
8947 	ev.store_hint = persistent;
8948 
8949 	bacpy(&ev.rpa, &irk->rpa);
8950 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
8951 	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
8952 	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
8953 
8954 	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
8955 }
8956 
8957 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
8958 		   bool persistent)
8959 {
8960 	struct mgmt_ev_new_csrk ev;
8961 
8962 	memset(&ev, 0, sizeof(ev));
8963 
8964 	/* Devices using resolvable or non-resolvable random addresses
8965 	 * without providing an identity resolving key don't require
8966 	 * to store signature resolving keys. Their addresses will change
8967 	 * the next time around.
8968 	 *
8969 	 * Only when a remote device provides an identity address
8970 	 * make sure the signature resolving key is stored. So allow
8971 	 * static random and public addresses here.
8972 	 */
8973 	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8974 	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
8975 		ev.store_hint = 0x00;
8976 	else
8977 		ev.store_hint = persistent;
8978 
8979 	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
8980 	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
8981 	ev.key.type = csrk->type;
8982 	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
8983 
8984 	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
8985 }
8986 
8987 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
8988 			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
8989 			 u16 max_interval, u16 latency, u16 timeout)
8990 {
8991 	struct mgmt_ev_new_conn_param ev;
8992 
8993 	if (!hci_is_identity_address(bdaddr, bdaddr_type))
8994 		return;
8995 
8996 	memset(&ev, 0, sizeof(ev));
8997 	bacpy(&ev.addr.bdaddr, bdaddr);
8998 	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
8999 	ev.store_hint = store_hint;
9000 	ev.min_interval = cpu_to_le16(min_interval);
9001 	ev.max_interval = cpu_to_le16(max_interval);
9002 	ev.latency = cpu_to_le16(latency);
9003 	ev.timeout = cpu_to_le16(timeout);
9004 
9005 	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9006 }
9007 
9008 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9009 			   u8 *name, u8 name_len)
9010 {
9011 	char buf[512];
9012 	struct mgmt_ev_device_connected *ev = (void *) buf;
9013 	u16 eir_len = 0;
9014 	u32 flags = 0;
9015 
9016 	bacpy(&ev->addr.bdaddr, &conn->dst);
9017 	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9018 
9019 	if (conn->out)
9020 		flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9021 
9022 	ev->flags = __cpu_to_le32(flags);
9023 
9024 	/* We must ensure that the EIR Data fields are ordered and
9025 	 * unique. Keep it simple for now and avoid the problem by not
9026 	 * adding any BR/EDR data to the LE adv.
9027 	 */
9028 	if (conn->le_adv_data_len > 0) {
9029 		memcpy(&ev->eir[eir_len],
9030 		       conn->le_adv_data, conn->le_adv_data_len);
9031 		eir_len = conn->le_adv_data_len;
9032 	} else {
9033 		if (name_len > 0)
9034 			eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
9035 						  name, name_len);
9036 
9037 		if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
9038 			eir_len = eir_append_data(ev->eir, eir_len,
9039 						  EIR_CLASS_OF_DEV,
9040 						  conn->dev_class, 3);
9041 	}
9042 
9043 	ev->eir_len = cpu_to_le16(eir_len);
9044 
9045 	mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
9046 		    sizeof(*ev) + eir_len, NULL);
9047 }
9048 
9049 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
9050 {
9051 	struct sock **sk = data;
9052 
9053 	cmd->cmd_complete(cmd, 0);
9054 
9055 	*sk = cmd->sk;
9056 	sock_hold(*sk);
9057 
9058 	mgmt_pending_remove(cmd);
9059 }
9060 
9061 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9062 {
9063 	struct hci_dev *hdev = data;
9064 	struct mgmt_cp_unpair_device *cp = cmd->param;
9065 
9066 	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9067 
9068 	cmd->cmd_complete(cmd, 0);
9069 	mgmt_pending_remove(cmd);
9070 }
9071 
9072 bool mgmt_powering_down(struct hci_dev *hdev)
9073 {
9074 	struct mgmt_pending_cmd *cmd;
9075 	struct mgmt_mode *cp;
9076 
9077 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9078 	if (!cmd)
9079 		return false;
9080 
9081 	cp = cmd->param;
9082 	if (!cp->val)
9083 		return true;
9084 
9085 	return false;
9086 }
9087 
9088 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9089 			      u8 link_type, u8 addr_type, u8 reason,
9090 			      bool mgmt_connected)
9091 {
9092 	struct mgmt_ev_device_disconnected ev;
9093 	struct sock *sk = NULL;
9094 
9095 	/* The connection is still in hci_conn_hash so test for 1
9096 	 * instead of 0 to know if this is the last one.
9097 	 */
9098 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9099 		cancel_delayed_work(&hdev->power_off);
9100 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
9101 	}
9102 
9103 	if (!mgmt_connected)
9104 		return;
9105 
9106 	if (link_type != ACL_LINK && link_type != LE_LINK)
9107 		return;
9108 
9109 	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
9110 
9111 	bacpy(&ev.addr.bdaddr, bdaddr);
9112 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9113 	ev.reason = reason;
9114 
9115 	/* Report disconnects due to suspend */
9116 	if (hdev->suspended)
9117 		ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9118 
9119 	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9120 
9121 	if (sk)
9122 		sock_put(sk);
9123 
9124 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9125 			     hdev);
9126 }
9127 
9128 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9129 			    u8 link_type, u8 addr_type, u8 status)
9130 {
9131 	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9132 	struct mgmt_cp_disconnect *cp;
9133 	struct mgmt_pending_cmd *cmd;
9134 
9135 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9136 			     hdev);
9137 
9138 	cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9139 	if (!cmd)
9140 		return;
9141 
9142 	cp = cmd->param;
9143 
9144 	if (bacmp(bdaddr, &cp->addr.bdaddr))
9145 		return;
9146 
9147 	if (cp->addr.type != bdaddr_type)
9148 		return;
9149 
9150 	cmd->cmd_complete(cmd, mgmt_status(status));
9151 	mgmt_pending_remove(cmd);
9152 }
9153 
9154 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9155 			 u8 addr_type, u8 status)
9156 {
9157 	struct mgmt_ev_connect_failed ev;
9158 
9159 	/* The connection is still in hci_conn_hash so test for 1
9160 	 * instead of 0 to know if this is the last one.
9161 	 */
9162 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9163 		cancel_delayed_work(&hdev->power_off);
9164 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
9165 	}
9166 
9167 	bacpy(&ev.addr.bdaddr, bdaddr);
9168 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9169 	ev.status = mgmt_status(status);
9170 
9171 	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9172 }
9173 
9174 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9175 {
9176 	struct mgmt_ev_pin_code_request ev;
9177 
9178 	bacpy(&ev.addr.bdaddr, bdaddr);
9179 	ev.addr.type = BDADDR_BREDR;
9180 	ev.secure = secure;
9181 
9182 	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9183 }
9184 
9185 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9186 				  u8 status)
9187 {
9188 	struct mgmt_pending_cmd *cmd;
9189 
9190 	cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9191 	if (!cmd)
9192 		return;
9193 
9194 	cmd->cmd_complete(cmd, mgmt_status(status));
9195 	mgmt_pending_remove(cmd);
9196 }
9197 
9198 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9199 				      u8 status)
9200 {
9201 	struct mgmt_pending_cmd *cmd;
9202 
9203 	cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9204 	if (!cmd)
9205 		return;
9206 
9207 	cmd->cmd_complete(cmd, mgmt_status(status));
9208 	mgmt_pending_remove(cmd);
9209 }
9210 
9211 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9212 			      u8 link_type, u8 addr_type, u32 value,
9213 			      u8 confirm_hint)
9214 {
9215 	struct mgmt_ev_user_confirm_request ev;
9216 
9217 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9218 
9219 	bacpy(&ev.addr.bdaddr, bdaddr);
9220 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9221 	ev.confirm_hint = confirm_hint;
9222 	ev.value = cpu_to_le32(value);
9223 
9224 	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9225 			  NULL);
9226 }
9227 
9228 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9229 			      u8 link_type, u8 addr_type)
9230 {
9231 	struct mgmt_ev_user_passkey_request ev;
9232 
9233 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9234 
9235 	bacpy(&ev.addr.bdaddr, bdaddr);
9236 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9237 
9238 	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9239 			  NULL);
9240 }
9241 
9242 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9243 				      u8 link_type, u8 addr_type, u8 status,
9244 				      u8 opcode)
9245 {
9246 	struct mgmt_pending_cmd *cmd;
9247 
9248 	cmd = pending_find(opcode, hdev);
9249 	if (!cmd)
9250 		return -ENOENT;
9251 
9252 	cmd->cmd_complete(cmd, mgmt_status(status));
9253 	mgmt_pending_remove(cmd);
9254 
9255 	return 0;
9256 }
9257 
9258 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9259 				     u8 link_type, u8 addr_type, u8 status)
9260 {
9261 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9262 					  status, MGMT_OP_USER_CONFIRM_REPLY);
9263 }
9264 
9265 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9266 					 u8 link_type, u8 addr_type, u8 status)
9267 {
9268 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9269 					  status,
9270 					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
9271 }
9272 
9273 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9274 				     u8 link_type, u8 addr_type, u8 status)
9275 {
9276 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9277 					  status, MGMT_OP_USER_PASSKEY_REPLY);
9278 }
9279 
9280 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9281 					 u8 link_type, u8 addr_type, u8 status)
9282 {
9283 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9284 					  status,
9285 					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
9286 }
9287 
9288 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9289 			     u8 link_type, u8 addr_type, u32 passkey,
9290 			     u8 entered)
9291 {
9292 	struct mgmt_ev_passkey_notify ev;
9293 
9294 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9295 
9296 	bacpy(&ev.addr.bdaddr, bdaddr);
9297 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9298 	ev.passkey = __cpu_to_le32(passkey);
9299 	ev.entered = entered;
9300 
9301 	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9302 }
9303 
9304 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9305 {
9306 	struct mgmt_ev_auth_failed ev;
9307 	struct mgmt_pending_cmd *cmd;
9308 	u8 status = mgmt_status(hci_status);
9309 
9310 	bacpy(&ev.addr.bdaddr, &conn->dst);
9311 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9312 	ev.status = status;
9313 
9314 	cmd = find_pairing(conn);
9315 
9316 	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9317 		    cmd ? cmd->sk : NULL);
9318 
9319 	if (cmd) {
9320 		cmd->cmd_complete(cmd, status);
9321 		mgmt_pending_remove(cmd);
9322 	}
9323 }
9324 
9325 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9326 {
9327 	struct cmd_lookup match = { NULL, hdev };
9328 	bool changed;
9329 
9330 	if (status) {
9331 		u8 mgmt_err = mgmt_status(status);
9332 		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9333 				     cmd_status_rsp, &mgmt_err);
9334 		return;
9335 	}
9336 
9337 	if (test_bit(HCI_AUTH, &hdev->flags))
9338 		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9339 	else
9340 		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9341 
9342 	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9343 			     &match);
9344 
9345 	if (changed)
9346 		new_settings(hdev, match.sk);
9347 
9348 	if (match.sk)
9349 		sock_put(match.sk);
9350 }
9351 
9352 static void clear_eir(struct hci_request *req)
9353 {
9354 	struct hci_dev *hdev = req->hdev;
9355 	struct hci_cp_write_eir cp;
9356 
9357 	if (!lmp_ext_inq_capable(hdev))
9358 		return;
9359 
9360 	memset(hdev->eir, 0, sizeof(hdev->eir));
9361 
9362 	memset(&cp, 0, sizeof(cp));
9363 
9364 	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
9365 }
9366 
9367 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
9368 {
9369 	struct cmd_lookup match = { NULL, hdev };
9370 	struct hci_request req;
9371 	bool changed = false;
9372 
9373 	if (status) {
9374 		u8 mgmt_err = mgmt_status(status);
9375 
9376 		if (enable && hci_dev_test_and_clear_flag(hdev,
9377 							  HCI_SSP_ENABLED)) {
9378 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
9379 			new_settings(hdev, NULL);
9380 		}
9381 
9382 		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
9383 				     &mgmt_err);
9384 		return;
9385 	}
9386 
9387 	if (enable) {
9388 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
9389 	} else {
9390 		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
9391 		if (!changed)
9392 			changed = hci_dev_test_and_clear_flag(hdev,
9393 							      HCI_HS_ENABLED);
9394 		else
9395 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
9396 	}
9397 
9398 	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
9399 
9400 	if (changed)
9401 		new_settings(hdev, match.sk);
9402 
9403 	if (match.sk)
9404 		sock_put(match.sk);
9405 
9406 	hci_req_init(&req, hdev);
9407 
9408 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
9409 		if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
9410 			hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
9411 				    sizeof(enable), &enable);
9412 		__hci_req_update_eir(&req);
9413 	} else {
9414 		clear_eir(&req);
9415 	}
9416 
9417 	hci_req_run(&req, NULL);
9418 }
9419 
9420 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9421 {
9422 	struct cmd_lookup *match = data;
9423 
9424 	if (match->sk == NULL) {
9425 		match->sk = cmd->sk;
9426 		sock_hold(match->sk);
9427 	}
9428 }
9429 
9430 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9431 				    u8 status)
9432 {
9433 	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9434 
9435 	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
9436 	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
9437 	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
9438 
9439 	if (!status) {
9440 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9441 				   3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9442 		ext_info_changed(hdev, NULL);
9443 	}
9444 
9445 	if (match.sk)
9446 		sock_put(match.sk);
9447 }
9448 
9449 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
9450 {
9451 	struct mgmt_cp_set_local_name ev;
9452 	struct mgmt_pending_cmd *cmd;
9453 
9454 	if (status)
9455 		return;
9456 
9457 	memset(&ev, 0, sizeof(ev));
9458 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
9459 	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
9460 
9461 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
9462 	if (!cmd) {
9463 		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
9464 
9465 		/* If this is a HCI command related to powering on the
9466 		 * HCI dev don't send any mgmt signals.
9467 		 */
9468 		if (pending_find(MGMT_OP_SET_POWERED, hdev))
9469 			return;
9470 	}
9471 
9472 	mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
9473 			   HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
9474 	ext_info_changed(hdev, cmd ? cmd->sk : NULL);
9475 }
9476 
9477 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
9478 {
9479 	int i;
9480 
9481 	for (i = 0; i < uuid_count; i++) {
9482 		if (!memcmp(uuid, uuids[i], 16))
9483 			return true;
9484 	}
9485 
9486 	return false;
9487 }
9488 
9489 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
9490 {
9491 	u16 parsed = 0;
9492 
9493 	while (parsed < eir_len) {
9494 		u8 field_len = eir[0];
9495 		u8 uuid[16];
9496 		int i;
9497 
9498 		if (field_len == 0)
9499 			break;
9500 
9501 		if (eir_len - parsed < field_len + 1)
9502 			break;
9503 
9504 		switch (eir[1]) {
9505 		case EIR_UUID16_ALL:
9506 		case EIR_UUID16_SOME:
9507 			for (i = 0; i + 3 <= field_len; i += 2) {
9508 				memcpy(uuid, bluetooth_base_uuid, 16);
9509 				uuid[13] = eir[i + 3];
9510 				uuid[12] = eir[i + 2];
9511 				if (has_uuid(uuid, uuid_count, uuids))
9512 					return true;
9513 			}
9514 			break;
9515 		case EIR_UUID32_ALL:
9516 		case EIR_UUID32_SOME:
9517 			for (i = 0; i + 5 <= field_len; i += 4) {
9518 				memcpy(uuid, bluetooth_base_uuid, 16);
9519 				uuid[15] = eir[i + 5];
9520 				uuid[14] = eir[i + 4];
9521 				uuid[13] = eir[i + 3];
9522 				uuid[12] = eir[i + 2];
9523 				if (has_uuid(uuid, uuid_count, uuids))
9524 					return true;
9525 			}
9526 			break;
9527 		case EIR_UUID128_ALL:
9528 		case EIR_UUID128_SOME:
9529 			for (i = 0; i + 17 <= field_len; i += 16) {
9530 				memcpy(uuid, eir + i + 2, 16);
9531 				if (has_uuid(uuid, uuid_count, uuids))
9532 					return true;
9533 			}
9534 			break;
9535 		}
9536 
9537 		parsed += field_len + 1;
9538 		eir += field_len + 1;
9539 	}
9540 
9541 	return false;
9542 }
9543 
9544 static void restart_le_scan(struct hci_dev *hdev)
9545 {
9546 	/* If controller is not scanning we are done. */
9547 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
9548 		return;
9549 
9550 	if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
9551 		       hdev->discovery.scan_start +
9552 		       hdev->discovery.scan_duration))
9553 		return;
9554 
9555 	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
9556 			   DISCOV_LE_RESTART_DELAY);
9557 }
9558 
9559 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
9560 			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9561 {
9562 	/* If a RSSI threshold has been specified, and
9563 	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
9564 	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
9565 	 * is set, let it through for further processing, as we might need to
9566 	 * restart the scan.
9567 	 *
9568 	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
9569 	 * the results are also dropped.
9570 	 */
9571 	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9572 	    (rssi == HCI_RSSI_INVALID ||
9573 	    (rssi < hdev->discovery.rssi &&
9574 	     !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
9575 		return  false;
9576 
9577 	if (hdev->discovery.uuid_count != 0) {
9578 		/* If a list of UUIDs is provided in filter, results with no
9579 		 * matching UUID should be dropped.
9580 		 */
9581 		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
9582 				   hdev->discovery.uuids) &&
9583 		    !eir_has_uuids(scan_rsp, scan_rsp_len,
9584 				   hdev->discovery.uuid_count,
9585 				   hdev->discovery.uuids))
9586 			return false;
9587 	}
9588 
9589 	/* If duplicate filtering does not report RSSI changes, then restart
9590 	 * scanning to ensure updated result with updated RSSI values.
9591 	 */
9592 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
9593 		restart_le_scan(hdev);
9594 
9595 		/* Validate RSSI value against the RSSI threshold once more. */
9596 		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9597 		    rssi < hdev->discovery.rssi)
9598 			return false;
9599 	}
9600 
9601 	return true;
9602 }
9603 
9604 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9605 		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
9606 		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9607 {
9608 	char buf[512];
9609 	struct mgmt_ev_device_found *ev = (void *)buf;
9610 	size_t ev_size;
9611 
9612 	/* Don't send events for a non-kernel initiated discovery. With
9613 	 * LE one exception is if we have pend_le_reports > 0 in which
9614 	 * case we're doing passive scanning and want these events.
9615 	 */
9616 	if (!hci_discovery_active(hdev)) {
9617 		if (link_type == ACL_LINK)
9618 			return;
9619 		if (link_type == LE_LINK &&
9620 		    list_empty(&hdev->pend_le_reports) &&
9621 		    !hci_is_adv_monitoring(hdev)) {
9622 			return;
9623 		}
9624 	}
9625 
9626 	if (hdev->discovery.result_filtering) {
9627 		/* We are using service discovery */
9628 		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
9629 				     scan_rsp_len))
9630 			return;
9631 	}
9632 
9633 	if (hdev->discovery.limited) {
9634 		/* Check for limited discoverable bit */
9635 		if (dev_class) {
9636 			if (!(dev_class[1] & 0x20))
9637 				return;
9638 		} else {
9639 			u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
9640 			if (!flags || !(flags[0] & LE_AD_LIMITED))
9641 				return;
9642 		}
9643 	}
9644 
9645 	/* Make sure that the buffer is big enough. The 5 extra bytes
9646 	 * are for the potential CoD field.
9647 	 */
9648 	if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
9649 		return;
9650 
9651 	memset(buf, 0, sizeof(buf));
9652 
9653 	/* In case of device discovery with BR/EDR devices (pre 1.2), the
9654 	 * RSSI value was reported as 0 when not available. This behavior
9655 	 * is kept when using device discovery. This is required for full
9656 	 * backwards compatibility with the API.
9657 	 *
9658 	 * However when using service discovery, the value 127 will be
9659 	 * returned when the RSSI is not available.
9660 	 */
9661 	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
9662 	    link_type == ACL_LINK)
9663 		rssi = 0;
9664 
9665 	bacpy(&ev->addr.bdaddr, bdaddr);
9666 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
9667 	ev->rssi = rssi;
9668 	ev->flags = cpu_to_le32(flags);
9669 
9670 	if (eir_len > 0)
9671 		/* Copy EIR or advertising data into event */
9672 		memcpy(ev->eir, eir, eir_len);
9673 
9674 	if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
9675 				       NULL))
9676 		eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
9677 					  dev_class, 3);
9678 
9679 	if (scan_rsp_len > 0)
9680 		/* Append scan response data to event */
9681 		memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
9682 
9683 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
9684 	ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
9685 
9686 	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
9687 }
9688 
9689 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9690 		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
9691 {
9692 	struct mgmt_ev_device_found *ev;
9693 	char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
9694 	u16 eir_len;
9695 
9696 	ev = (struct mgmt_ev_device_found *) buf;
9697 
9698 	memset(buf, 0, sizeof(buf));
9699 
9700 	bacpy(&ev->addr.bdaddr, bdaddr);
9701 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
9702 	ev->rssi = rssi;
9703 
9704 	eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
9705 				  name_len);
9706 
9707 	ev->eir_len = cpu_to_le16(eir_len);
9708 
9709 	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
9710 }
9711 
9712 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
9713 {
9714 	struct mgmt_ev_discovering ev;
9715 
9716 	bt_dev_dbg(hdev, "discovering %u", discovering);
9717 
9718 	memset(&ev, 0, sizeof(ev));
9719 	ev.type = hdev->discovery.type;
9720 	ev.discovering = discovering;
9721 
9722 	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
9723 }
9724 
9725 void mgmt_suspending(struct hci_dev *hdev, u8 state)
9726 {
9727 	struct mgmt_ev_controller_suspend ev;
9728 
9729 	ev.suspend_state = state;
9730 	mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
9731 }
9732 
9733 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
9734 		   u8 addr_type)
9735 {
9736 	struct mgmt_ev_controller_resume ev;
9737 
9738 	ev.wake_reason = reason;
9739 	if (bdaddr) {
9740 		bacpy(&ev.addr.bdaddr, bdaddr);
9741 		ev.addr.type = addr_type;
9742 	} else {
9743 		memset(&ev.addr, 0, sizeof(ev.addr));
9744 	}
9745 
9746 	mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
9747 }
9748 
9749 static struct hci_mgmt_chan chan = {
9750 	.channel	= HCI_CHANNEL_CONTROL,
9751 	.handler_count	= ARRAY_SIZE(mgmt_handlers),
9752 	.handlers	= mgmt_handlers,
9753 	.hdev_init	= mgmt_init_hdev,
9754 };
9755 
9756 int mgmt_init(void)
9757 {
9758 	return hci_mgmt_chan_register(&chan);
9759 }
9760 
9761 void mgmt_exit(void)
9762 {
9763 	hci_mgmt_chan_unregister(&chan);
9764 }
9765