xref: /openbmc/linux/net/bluetooth/mgmt.c (revision a10c907ce0e5e138c3da091fcb7c3d109a15aec5)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI Management interface */
26 
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35 
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 
40 #define MGMT_VERSION	1
41 #define MGMT_REVISION	17
42 
43 static const u16 mgmt_commands[] = {
44 	MGMT_OP_READ_INDEX_LIST,
45 	MGMT_OP_READ_INFO,
46 	MGMT_OP_SET_POWERED,
47 	MGMT_OP_SET_DISCOVERABLE,
48 	MGMT_OP_SET_CONNECTABLE,
49 	MGMT_OP_SET_FAST_CONNECTABLE,
50 	MGMT_OP_SET_BONDABLE,
51 	MGMT_OP_SET_LINK_SECURITY,
52 	MGMT_OP_SET_SSP,
53 	MGMT_OP_SET_HS,
54 	MGMT_OP_SET_LE,
55 	MGMT_OP_SET_DEV_CLASS,
56 	MGMT_OP_SET_LOCAL_NAME,
57 	MGMT_OP_ADD_UUID,
58 	MGMT_OP_REMOVE_UUID,
59 	MGMT_OP_LOAD_LINK_KEYS,
60 	MGMT_OP_LOAD_LONG_TERM_KEYS,
61 	MGMT_OP_DISCONNECT,
62 	MGMT_OP_GET_CONNECTIONS,
63 	MGMT_OP_PIN_CODE_REPLY,
64 	MGMT_OP_PIN_CODE_NEG_REPLY,
65 	MGMT_OP_SET_IO_CAPABILITY,
66 	MGMT_OP_PAIR_DEVICE,
67 	MGMT_OP_CANCEL_PAIR_DEVICE,
68 	MGMT_OP_UNPAIR_DEVICE,
69 	MGMT_OP_USER_CONFIRM_REPLY,
70 	MGMT_OP_USER_CONFIRM_NEG_REPLY,
71 	MGMT_OP_USER_PASSKEY_REPLY,
72 	MGMT_OP_USER_PASSKEY_NEG_REPLY,
73 	MGMT_OP_READ_LOCAL_OOB_DATA,
74 	MGMT_OP_ADD_REMOTE_OOB_DATA,
75 	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
76 	MGMT_OP_START_DISCOVERY,
77 	MGMT_OP_STOP_DISCOVERY,
78 	MGMT_OP_CONFIRM_NAME,
79 	MGMT_OP_BLOCK_DEVICE,
80 	MGMT_OP_UNBLOCK_DEVICE,
81 	MGMT_OP_SET_DEVICE_ID,
82 	MGMT_OP_SET_ADVERTISING,
83 	MGMT_OP_SET_BREDR,
84 	MGMT_OP_SET_STATIC_ADDRESS,
85 	MGMT_OP_SET_SCAN_PARAMS,
86 	MGMT_OP_SET_SECURE_CONN,
87 	MGMT_OP_SET_DEBUG_KEYS,
88 	MGMT_OP_SET_PRIVACY,
89 	MGMT_OP_LOAD_IRKS,
90 	MGMT_OP_GET_CONN_INFO,
91 	MGMT_OP_GET_CLOCK_INFO,
92 	MGMT_OP_ADD_DEVICE,
93 	MGMT_OP_REMOVE_DEVICE,
94 	MGMT_OP_LOAD_CONN_PARAM,
95 	MGMT_OP_READ_UNCONF_INDEX_LIST,
96 	MGMT_OP_READ_CONFIG_INFO,
97 	MGMT_OP_SET_EXTERNAL_CONFIG,
98 	MGMT_OP_SET_PUBLIC_ADDRESS,
99 	MGMT_OP_START_SERVICE_DISCOVERY,
100 	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
101 	MGMT_OP_READ_EXT_INDEX_LIST,
102 	MGMT_OP_READ_ADV_FEATURES,
103 	MGMT_OP_ADD_ADVERTISING,
104 	MGMT_OP_REMOVE_ADVERTISING,
105 	MGMT_OP_GET_ADV_SIZE_INFO,
106 	MGMT_OP_START_LIMITED_DISCOVERY,
107 	MGMT_OP_READ_EXT_INFO,
108 	MGMT_OP_SET_APPEARANCE,
109 	MGMT_OP_SET_BLOCKED_KEYS,
110 	MGMT_OP_SET_WIDEBAND_SPEECH,
111 	MGMT_OP_READ_SECURITY_INFO,
112 	MGMT_OP_READ_EXP_FEATURES_INFO,
113 	MGMT_OP_SET_EXP_FEATURE,
114 };
115 
116 static const u16 mgmt_events[] = {
117 	MGMT_EV_CONTROLLER_ERROR,
118 	MGMT_EV_INDEX_ADDED,
119 	MGMT_EV_INDEX_REMOVED,
120 	MGMT_EV_NEW_SETTINGS,
121 	MGMT_EV_CLASS_OF_DEV_CHANGED,
122 	MGMT_EV_LOCAL_NAME_CHANGED,
123 	MGMT_EV_NEW_LINK_KEY,
124 	MGMT_EV_NEW_LONG_TERM_KEY,
125 	MGMT_EV_DEVICE_CONNECTED,
126 	MGMT_EV_DEVICE_DISCONNECTED,
127 	MGMT_EV_CONNECT_FAILED,
128 	MGMT_EV_PIN_CODE_REQUEST,
129 	MGMT_EV_USER_CONFIRM_REQUEST,
130 	MGMT_EV_USER_PASSKEY_REQUEST,
131 	MGMT_EV_AUTH_FAILED,
132 	MGMT_EV_DEVICE_FOUND,
133 	MGMT_EV_DISCOVERING,
134 	MGMT_EV_DEVICE_BLOCKED,
135 	MGMT_EV_DEVICE_UNBLOCKED,
136 	MGMT_EV_DEVICE_UNPAIRED,
137 	MGMT_EV_PASSKEY_NOTIFY,
138 	MGMT_EV_NEW_IRK,
139 	MGMT_EV_NEW_CSRK,
140 	MGMT_EV_DEVICE_ADDED,
141 	MGMT_EV_DEVICE_REMOVED,
142 	MGMT_EV_NEW_CONN_PARAM,
143 	MGMT_EV_UNCONF_INDEX_ADDED,
144 	MGMT_EV_UNCONF_INDEX_REMOVED,
145 	MGMT_EV_NEW_CONFIG_OPTIONS,
146 	MGMT_EV_EXT_INDEX_ADDED,
147 	MGMT_EV_EXT_INDEX_REMOVED,
148 	MGMT_EV_LOCAL_OOB_DATA_UPDATED,
149 	MGMT_EV_ADVERTISING_ADDED,
150 	MGMT_EV_ADVERTISING_REMOVED,
151 	MGMT_EV_EXT_INFO_CHANGED,
152 	MGMT_EV_PHY_CONFIGURATION_CHANGED,
153 	MGMT_EV_EXP_FEATURE_CHANGED,
154 };
155 
156 static const u16 mgmt_untrusted_commands[] = {
157 	MGMT_OP_READ_INDEX_LIST,
158 	MGMT_OP_READ_INFO,
159 	MGMT_OP_READ_UNCONF_INDEX_LIST,
160 	MGMT_OP_READ_CONFIG_INFO,
161 	MGMT_OP_READ_EXT_INDEX_LIST,
162 	MGMT_OP_READ_EXT_INFO,
163 	MGMT_OP_READ_SECURITY_INFO,
164 	MGMT_OP_READ_EXP_FEATURES_INFO,
165 };
166 
167 static const u16 mgmt_untrusted_events[] = {
168 	MGMT_EV_INDEX_ADDED,
169 	MGMT_EV_INDEX_REMOVED,
170 	MGMT_EV_NEW_SETTINGS,
171 	MGMT_EV_CLASS_OF_DEV_CHANGED,
172 	MGMT_EV_LOCAL_NAME_CHANGED,
173 	MGMT_EV_UNCONF_INDEX_ADDED,
174 	MGMT_EV_UNCONF_INDEX_REMOVED,
175 	MGMT_EV_NEW_CONFIG_OPTIONS,
176 	MGMT_EV_EXT_INDEX_ADDED,
177 	MGMT_EV_EXT_INDEX_REMOVED,
178 	MGMT_EV_EXT_INFO_CHANGED,
179 	MGMT_EV_EXP_FEATURE_CHANGED,
180 };
181 
182 #define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
183 
184 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
185 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
186 
187 /* HCI to MGMT error code conversion table */
188 static const u8 mgmt_status_table[] = {
189 	MGMT_STATUS_SUCCESS,
190 	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
191 	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
192 	MGMT_STATUS_FAILED,		/* Hardware Failure */
193 	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
194 	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
195 	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
196 	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
197 	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
198 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
199 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
200 	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
201 	MGMT_STATUS_BUSY,		/* Command Disallowed */
202 	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
203 	MGMT_STATUS_REJECTED,		/* Rejected Security */
204 	MGMT_STATUS_REJECTED,		/* Rejected Personal */
205 	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
206 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
207 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
208 	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
209 	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
210 	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
211 	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
212 	MGMT_STATUS_BUSY,		/* Repeated Attempts */
213 	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
214 	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
215 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
216 	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
217 	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
218 	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
219 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
220 	MGMT_STATUS_FAILED,		/* Unspecified Error */
221 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
222 	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
223 	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
224 	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
225 	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
226 	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
227 	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
228 	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
229 	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
230 	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
231 	MGMT_STATUS_FAILED,		/* Transaction Collision */
232 	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
233 	MGMT_STATUS_REJECTED,		/* QoS Rejected */
234 	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
235 	MGMT_STATUS_REJECTED,		/* Insufficient Security */
236 	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
237 	MGMT_STATUS_BUSY,		/* Role Switch Pending */
238 	MGMT_STATUS_FAILED,		/* Slot Violation */
239 	MGMT_STATUS_FAILED,		/* Role Switch Failed */
240 	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
241 	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
242 	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
243 	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
244 	MGMT_STATUS_BUSY,		/* Controller Busy */
245 	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
246 	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
247 	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
248 	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
249 	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
250 };
251 
252 static u8 mgmt_status(u8 hci_status)
253 {
254 	if (hci_status < ARRAY_SIZE(mgmt_status_table))
255 		return mgmt_status_table[hci_status];
256 
257 	return MGMT_STATUS_FAILED;
258 }
259 
260 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
261 			    u16 len, int flag)
262 {
263 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
264 			       flag, NULL);
265 }
266 
267 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
268 			      u16 len, int flag, struct sock *skip_sk)
269 {
270 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
271 			       flag, skip_sk);
272 }
273 
274 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
275 		      struct sock *skip_sk)
276 {
277 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
278 			       HCI_SOCK_TRUSTED, skip_sk);
279 }
280 
281 static u8 le_addr_type(u8 mgmt_addr_type)
282 {
283 	if (mgmt_addr_type == BDADDR_LE_PUBLIC)
284 		return ADDR_LE_DEV_PUBLIC;
285 	else
286 		return ADDR_LE_DEV_RANDOM;
287 }
288 
289 void mgmt_fill_version_info(void *ver)
290 {
291 	struct mgmt_rp_read_version *rp = ver;
292 
293 	rp->version = MGMT_VERSION;
294 	rp->revision = cpu_to_le16(MGMT_REVISION);
295 }
296 
297 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
298 			u16 data_len)
299 {
300 	struct mgmt_rp_read_version rp;
301 
302 	bt_dev_dbg(hdev, "sock %p", sk);
303 
304 	mgmt_fill_version_info(&rp);
305 
306 	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
307 				 &rp, sizeof(rp));
308 }
309 
310 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
311 			 u16 data_len)
312 {
313 	struct mgmt_rp_read_commands *rp;
314 	u16 num_commands, num_events;
315 	size_t rp_size;
316 	int i, err;
317 
318 	bt_dev_dbg(hdev, "sock %p", sk);
319 
320 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
321 		num_commands = ARRAY_SIZE(mgmt_commands);
322 		num_events = ARRAY_SIZE(mgmt_events);
323 	} else {
324 		num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
325 		num_events = ARRAY_SIZE(mgmt_untrusted_events);
326 	}
327 
328 	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
329 
330 	rp = kmalloc(rp_size, GFP_KERNEL);
331 	if (!rp)
332 		return -ENOMEM;
333 
334 	rp->num_commands = cpu_to_le16(num_commands);
335 	rp->num_events = cpu_to_le16(num_events);
336 
337 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
338 		__le16 *opcode = rp->opcodes;
339 
340 		for (i = 0; i < num_commands; i++, opcode++)
341 			put_unaligned_le16(mgmt_commands[i], opcode);
342 
343 		for (i = 0; i < num_events; i++, opcode++)
344 			put_unaligned_le16(mgmt_events[i], opcode);
345 	} else {
346 		__le16 *opcode = rp->opcodes;
347 
348 		for (i = 0; i < num_commands; i++, opcode++)
349 			put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
350 
351 		for (i = 0; i < num_events; i++, opcode++)
352 			put_unaligned_le16(mgmt_untrusted_events[i], opcode);
353 	}
354 
355 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
356 				rp, rp_size);
357 	kfree(rp);
358 
359 	return err;
360 }
361 
362 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
363 			   u16 data_len)
364 {
365 	struct mgmt_rp_read_index_list *rp;
366 	struct hci_dev *d;
367 	size_t rp_len;
368 	u16 count;
369 	int err;
370 
371 	bt_dev_dbg(hdev, "sock %p", sk);
372 
373 	read_lock(&hci_dev_list_lock);
374 
375 	count = 0;
376 	list_for_each_entry(d, &hci_dev_list, list) {
377 		if (d->dev_type == HCI_PRIMARY &&
378 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED))
379 			count++;
380 	}
381 
382 	rp_len = sizeof(*rp) + (2 * count);
383 	rp = kmalloc(rp_len, GFP_ATOMIC);
384 	if (!rp) {
385 		read_unlock(&hci_dev_list_lock);
386 		return -ENOMEM;
387 	}
388 
389 	count = 0;
390 	list_for_each_entry(d, &hci_dev_list, list) {
391 		if (hci_dev_test_flag(d, HCI_SETUP) ||
392 		    hci_dev_test_flag(d, HCI_CONFIG) ||
393 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
394 			continue;
395 
396 		/* Devices marked as raw-only are neither configured
397 		 * nor unconfigured controllers.
398 		 */
399 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
400 			continue;
401 
402 		if (d->dev_type == HCI_PRIMARY &&
403 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
404 			rp->index[count++] = cpu_to_le16(d->id);
405 			bt_dev_dbg(hdev, "Added hci%u", d->id);
406 		}
407 	}
408 
409 	rp->num_controllers = cpu_to_le16(count);
410 	rp_len = sizeof(*rp) + (2 * count);
411 
412 	read_unlock(&hci_dev_list_lock);
413 
414 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
415 				0, rp, rp_len);
416 
417 	kfree(rp);
418 
419 	return err;
420 }
421 
422 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
423 				  void *data, u16 data_len)
424 {
425 	struct mgmt_rp_read_unconf_index_list *rp;
426 	struct hci_dev *d;
427 	size_t rp_len;
428 	u16 count;
429 	int err;
430 
431 	bt_dev_dbg(hdev, "sock %p", sk);
432 
433 	read_lock(&hci_dev_list_lock);
434 
435 	count = 0;
436 	list_for_each_entry(d, &hci_dev_list, list) {
437 		if (d->dev_type == HCI_PRIMARY &&
438 		    hci_dev_test_flag(d, HCI_UNCONFIGURED))
439 			count++;
440 	}
441 
442 	rp_len = sizeof(*rp) + (2 * count);
443 	rp = kmalloc(rp_len, GFP_ATOMIC);
444 	if (!rp) {
445 		read_unlock(&hci_dev_list_lock);
446 		return -ENOMEM;
447 	}
448 
449 	count = 0;
450 	list_for_each_entry(d, &hci_dev_list, list) {
451 		if (hci_dev_test_flag(d, HCI_SETUP) ||
452 		    hci_dev_test_flag(d, HCI_CONFIG) ||
453 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
454 			continue;
455 
456 		/* Devices marked as raw-only are neither configured
457 		 * nor unconfigured controllers.
458 		 */
459 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
460 			continue;
461 
462 		if (d->dev_type == HCI_PRIMARY &&
463 		    hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
464 			rp->index[count++] = cpu_to_le16(d->id);
465 			bt_dev_dbg(hdev, "Added hci%u", d->id);
466 		}
467 	}
468 
469 	rp->num_controllers = cpu_to_le16(count);
470 	rp_len = sizeof(*rp) + (2 * count);
471 
472 	read_unlock(&hci_dev_list_lock);
473 
474 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
475 				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
476 
477 	kfree(rp);
478 
479 	return err;
480 }
481 
482 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
483 			       void *data, u16 data_len)
484 {
485 	struct mgmt_rp_read_ext_index_list *rp;
486 	struct hci_dev *d;
487 	u16 count;
488 	int err;
489 
490 	bt_dev_dbg(hdev, "sock %p", sk);
491 
492 	read_lock(&hci_dev_list_lock);
493 
494 	count = 0;
495 	list_for_each_entry(d, &hci_dev_list, list) {
496 		if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
497 			count++;
498 	}
499 
500 	rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
501 	if (!rp) {
502 		read_unlock(&hci_dev_list_lock);
503 		return -ENOMEM;
504 	}
505 
506 	count = 0;
507 	list_for_each_entry(d, &hci_dev_list, list) {
508 		if (hci_dev_test_flag(d, HCI_SETUP) ||
509 		    hci_dev_test_flag(d, HCI_CONFIG) ||
510 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
511 			continue;
512 
513 		/* Devices marked as raw-only are neither configured
514 		 * nor unconfigured controllers.
515 		 */
516 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
517 			continue;
518 
519 		if (d->dev_type == HCI_PRIMARY) {
520 			if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
521 				rp->entry[count].type = 0x01;
522 			else
523 				rp->entry[count].type = 0x00;
524 		} else if (d->dev_type == HCI_AMP) {
525 			rp->entry[count].type = 0x02;
526 		} else {
527 			continue;
528 		}
529 
530 		rp->entry[count].bus = d->bus;
531 		rp->entry[count++].index = cpu_to_le16(d->id);
532 		bt_dev_dbg(hdev, "Added hci%u", d->id);
533 	}
534 
535 	rp->num_controllers = cpu_to_le16(count);
536 
537 	read_unlock(&hci_dev_list_lock);
538 
539 	/* If this command is called at least once, then all the
540 	 * default index and unconfigured index events are disabled
541 	 * and from now on only extended index events are used.
542 	 */
543 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
544 	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
545 	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
546 
547 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
548 				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
549 				struct_size(rp, entry, count));
550 
551 	kfree(rp);
552 
553 	return err;
554 }
555 
556 static bool is_configured(struct hci_dev *hdev)
557 {
558 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
559 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
560 		return false;
561 
562 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
563 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
564 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
565 		return false;
566 
567 	return true;
568 }
569 
570 static __le32 get_missing_options(struct hci_dev *hdev)
571 {
572 	u32 options = 0;
573 
574 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
575 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
576 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
577 
578 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
579 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
580 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
581 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
582 
583 	return cpu_to_le32(options);
584 }
585 
586 static int new_options(struct hci_dev *hdev, struct sock *skip)
587 {
588 	__le32 options = get_missing_options(hdev);
589 
590 	return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
591 				  sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
592 }
593 
594 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
595 {
596 	__le32 options = get_missing_options(hdev);
597 
598 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
599 				 sizeof(options));
600 }
601 
602 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
603 			    void *data, u16 data_len)
604 {
605 	struct mgmt_rp_read_config_info rp;
606 	u32 options = 0;
607 
608 	bt_dev_dbg(hdev, "sock %p", sk);
609 
610 	hci_dev_lock(hdev);
611 
612 	memset(&rp, 0, sizeof(rp));
613 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
614 
615 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
616 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
617 
618 	if (hdev->set_bdaddr)
619 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
620 
621 	rp.supported_options = cpu_to_le32(options);
622 	rp.missing_options = get_missing_options(hdev);
623 
624 	hci_dev_unlock(hdev);
625 
626 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
627 				 &rp, sizeof(rp));
628 }
629 
630 static u32 get_supported_phys(struct hci_dev *hdev)
631 {
632 	u32 supported_phys = 0;
633 
634 	if (lmp_bredr_capable(hdev)) {
635 		supported_phys |= MGMT_PHY_BR_1M_1SLOT;
636 
637 		if (hdev->features[0][0] & LMP_3SLOT)
638 			supported_phys |= MGMT_PHY_BR_1M_3SLOT;
639 
640 		if (hdev->features[0][0] & LMP_5SLOT)
641 			supported_phys |= MGMT_PHY_BR_1M_5SLOT;
642 
643 		if (lmp_edr_2m_capable(hdev)) {
644 			supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
645 
646 			if (lmp_edr_3slot_capable(hdev))
647 				supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
648 
649 			if (lmp_edr_5slot_capable(hdev))
650 				supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
651 
652 			if (lmp_edr_3m_capable(hdev)) {
653 				supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
654 
655 				if (lmp_edr_3slot_capable(hdev))
656 					supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
657 
658 				if (lmp_edr_5slot_capable(hdev))
659 					supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
660 			}
661 		}
662 	}
663 
664 	if (lmp_le_capable(hdev)) {
665 		supported_phys |= MGMT_PHY_LE_1M_TX;
666 		supported_phys |= MGMT_PHY_LE_1M_RX;
667 
668 		if (hdev->le_features[1] & HCI_LE_PHY_2M) {
669 			supported_phys |= MGMT_PHY_LE_2M_TX;
670 			supported_phys |= MGMT_PHY_LE_2M_RX;
671 		}
672 
673 		if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
674 			supported_phys |= MGMT_PHY_LE_CODED_TX;
675 			supported_phys |= MGMT_PHY_LE_CODED_RX;
676 		}
677 	}
678 
679 	return supported_phys;
680 }
681 
682 static u32 get_selected_phys(struct hci_dev *hdev)
683 {
684 	u32 selected_phys = 0;
685 
686 	if (lmp_bredr_capable(hdev)) {
687 		selected_phys |= MGMT_PHY_BR_1M_1SLOT;
688 
689 		if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
690 			selected_phys |= MGMT_PHY_BR_1M_3SLOT;
691 
692 		if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
693 			selected_phys |= MGMT_PHY_BR_1M_5SLOT;
694 
695 		if (lmp_edr_2m_capable(hdev)) {
696 			if (!(hdev->pkt_type & HCI_2DH1))
697 				selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
698 
699 			if (lmp_edr_3slot_capable(hdev) &&
700 			    !(hdev->pkt_type & HCI_2DH3))
701 				selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
702 
703 			if (lmp_edr_5slot_capable(hdev) &&
704 			    !(hdev->pkt_type & HCI_2DH5))
705 				selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
706 
707 			if (lmp_edr_3m_capable(hdev)) {
708 				if (!(hdev->pkt_type & HCI_3DH1))
709 					selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
710 
711 				if (lmp_edr_3slot_capable(hdev) &&
712 				    !(hdev->pkt_type & HCI_3DH3))
713 					selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
714 
715 				if (lmp_edr_5slot_capable(hdev) &&
716 				    !(hdev->pkt_type & HCI_3DH5))
717 					selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
718 			}
719 		}
720 	}
721 
722 	if (lmp_le_capable(hdev)) {
723 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
724 			selected_phys |= MGMT_PHY_LE_1M_TX;
725 
726 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
727 			selected_phys |= MGMT_PHY_LE_1M_RX;
728 
729 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
730 			selected_phys |= MGMT_PHY_LE_2M_TX;
731 
732 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
733 			selected_phys |= MGMT_PHY_LE_2M_RX;
734 
735 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
736 			selected_phys |= MGMT_PHY_LE_CODED_TX;
737 
738 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
739 			selected_phys |= MGMT_PHY_LE_CODED_RX;
740 	}
741 
742 	return selected_phys;
743 }
744 
745 static u32 get_configurable_phys(struct hci_dev *hdev)
746 {
747 	return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
748 		~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
749 }
750 
751 static u32 get_supported_settings(struct hci_dev *hdev)
752 {
753 	u32 settings = 0;
754 
755 	settings |= MGMT_SETTING_POWERED;
756 	settings |= MGMT_SETTING_BONDABLE;
757 	settings |= MGMT_SETTING_DEBUG_KEYS;
758 	settings |= MGMT_SETTING_CONNECTABLE;
759 	settings |= MGMT_SETTING_DISCOVERABLE;
760 
761 	if (lmp_bredr_capable(hdev)) {
762 		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
763 			settings |= MGMT_SETTING_FAST_CONNECTABLE;
764 		settings |= MGMT_SETTING_BREDR;
765 		settings |= MGMT_SETTING_LINK_SECURITY;
766 
767 		if (lmp_ssp_capable(hdev)) {
768 			settings |= MGMT_SETTING_SSP;
769 			settings |= MGMT_SETTING_HS;
770 		}
771 
772 		if (lmp_sc_capable(hdev))
773 			settings |= MGMT_SETTING_SECURE_CONN;
774 
775 		if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
776 			     &hdev->quirks))
777 			settings |= MGMT_SETTING_WIDEBAND_SPEECH;
778 	}
779 
780 	if (lmp_le_capable(hdev)) {
781 		settings |= MGMT_SETTING_LE;
782 		settings |= MGMT_SETTING_ADVERTISING;
783 		settings |= MGMT_SETTING_SECURE_CONN;
784 		settings |= MGMT_SETTING_PRIVACY;
785 		settings |= MGMT_SETTING_STATIC_ADDRESS;
786 	}
787 
788 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
789 	    hdev->set_bdaddr)
790 		settings |= MGMT_SETTING_CONFIGURATION;
791 
792 	settings |= MGMT_SETTING_PHY_CONFIGURATION;
793 
794 	return settings;
795 }
796 
797 static u32 get_current_settings(struct hci_dev *hdev)
798 {
799 	u32 settings = 0;
800 
801 	if (hdev_is_powered(hdev))
802 		settings |= MGMT_SETTING_POWERED;
803 
804 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
805 		settings |= MGMT_SETTING_CONNECTABLE;
806 
807 	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
808 		settings |= MGMT_SETTING_FAST_CONNECTABLE;
809 
810 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
811 		settings |= MGMT_SETTING_DISCOVERABLE;
812 
813 	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
814 		settings |= MGMT_SETTING_BONDABLE;
815 
816 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
817 		settings |= MGMT_SETTING_BREDR;
818 
819 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
820 		settings |= MGMT_SETTING_LE;
821 
822 	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
823 		settings |= MGMT_SETTING_LINK_SECURITY;
824 
825 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
826 		settings |= MGMT_SETTING_SSP;
827 
828 	if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
829 		settings |= MGMT_SETTING_HS;
830 
831 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
832 		settings |= MGMT_SETTING_ADVERTISING;
833 
834 	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
835 		settings |= MGMT_SETTING_SECURE_CONN;
836 
837 	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
838 		settings |= MGMT_SETTING_DEBUG_KEYS;
839 
840 	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
841 		settings |= MGMT_SETTING_PRIVACY;
842 
843 	/* The current setting for static address has two purposes. The
844 	 * first is to indicate if the static address will be used and
845 	 * the second is to indicate if it is actually set.
846 	 *
847 	 * This means if the static address is not configured, this flag
848 	 * will never be set. If the address is configured, then if the
849 	 * address is actually used decides if the flag is set or not.
850 	 *
851 	 * For single mode LE only controllers and dual-mode controllers
852 	 * with BR/EDR disabled, the existence of the static address will
853 	 * be evaluated.
854 	 */
855 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
856 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
857 	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
858 		if (bacmp(&hdev->static_addr, BDADDR_ANY))
859 			settings |= MGMT_SETTING_STATIC_ADDRESS;
860 	}
861 
862 	if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
863 		settings |= MGMT_SETTING_WIDEBAND_SPEECH;
864 
865 	return settings;
866 }
867 
868 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
869 {
870 	return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
871 }
872 
873 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
874 						  struct hci_dev *hdev,
875 						  const void *data)
876 {
877 	return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
878 }
879 
880 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
881 {
882 	struct mgmt_pending_cmd *cmd;
883 
884 	/* If there's a pending mgmt command the flags will not yet have
885 	 * their final values, so check for this first.
886 	 */
887 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
888 	if (cmd) {
889 		struct mgmt_mode *cp = cmd->param;
890 		if (cp->val == 0x01)
891 			return LE_AD_GENERAL;
892 		else if (cp->val == 0x02)
893 			return LE_AD_LIMITED;
894 	} else {
895 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
896 			return LE_AD_LIMITED;
897 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
898 			return LE_AD_GENERAL;
899 	}
900 
901 	return 0;
902 }
903 
904 bool mgmt_get_connectable(struct hci_dev *hdev)
905 {
906 	struct mgmt_pending_cmd *cmd;
907 
908 	/* If there's a pending mgmt command the flag will not yet have
909 	 * it's final value, so check for this first.
910 	 */
911 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
912 	if (cmd) {
913 		struct mgmt_mode *cp = cmd->param;
914 
915 		return cp->val;
916 	}
917 
918 	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
919 }
920 
921 static void service_cache_off(struct work_struct *work)
922 {
923 	struct hci_dev *hdev = container_of(work, struct hci_dev,
924 					    service_cache.work);
925 	struct hci_request req;
926 
927 	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
928 		return;
929 
930 	hci_req_init(&req, hdev);
931 
932 	hci_dev_lock(hdev);
933 
934 	__hci_req_update_eir(&req);
935 	__hci_req_update_class(&req);
936 
937 	hci_dev_unlock(hdev);
938 
939 	hci_req_run(&req, NULL);
940 }
941 
942 static void rpa_expired(struct work_struct *work)
943 {
944 	struct hci_dev *hdev = container_of(work, struct hci_dev,
945 					    rpa_expired.work);
946 	struct hci_request req;
947 
948 	bt_dev_dbg(hdev, "");
949 
950 	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
951 
952 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
953 		return;
954 
955 	/* The generation of a new RPA and programming it into the
956 	 * controller happens in the hci_req_enable_advertising()
957 	 * function.
958 	 */
959 	hci_req_init(&req, hdev);
960 	if (ext_adv_capable(hdev))
961 		__hci_req_start_ext_adv(&req, hdev->cur_adv_instance);
962 	else
963 		__hci_req_enable_advertising(&req);
964 	hci_req_run(&req, NULL);
965 }
966 
967 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
968 {
969 	if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
970 		return;
971 
972 	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
973 	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
974 
975 	/* Non-mgmt controlled devices get this bit set
976 	 * implicitly so that pairing works for them, however
977 	 * for mgmt we require user-space to explicitly enable
978 	 * it
979 	 */
980 	hci_dev_clear_flag(hdev, HCI_BONDABLE);
981 }
982 
983 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
984 				void *data, u16 data_len)
985 {
986 	struct mgmt_rp_read_info rp;
987 
988 	bt_dev_dbg(hdev, "sock %p", sk);
989 
990 	hci_dev_lock(hdev);
991 
992 	memset(&rp, 0, sizeof(rp));
993 
994 	bacpy(&rp.bdaddr, &hdev->bdaddr);
995 
996 	rp.version = hdev->hci_ver;
997 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
998 
999 	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1000 	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1001 
1002 	memcpy(rp.dev_class, hdev->dev_class, 3);
1003 
1004 	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1005 	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1006 
1007 	hci_dev_unlock(hdev);
1008 
1009 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1010 				 sizeof(rp));
1011 }
1012 
1013 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1014 {
1015 	u16 eir_len = 0;
1016 	size_t name_len;
1017 
1018 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1019 		eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1020 					  hdev->dev_class, 3);
1021 
1022 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1023 		eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1024 					  hdev->appearance);
1025 
1026 	name_len = strlen(hdev->dev_name);
1027 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1028 				  hdev->dev_name, name_len);
1029 
1030 	name_len = strlen(hdev->short_name);
1031 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1032 				  hdev->short_name, name_len);
1033 
1034 	return eir_len;
1035 }
1036 
1037 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1038 				    void *data, u16 data_len)
1039 {
1040 	char buf[512];
1041 	struct mgmt_rp_read_ext_info *rp = (void *)buf;
1042 	u16 eir_len;
1043 
1044 	bt_dev_dbg(hdev, "sock %p", sk);
1045 
1046 	memset(&buf, 0, sizeof(buf));
1047 
1048 	hci_dev_lock(hdev);
1049 
1050 	bacpy(&rp->bdaddr, &hdev->bdaddr);
1051 
1052 	rp->version = hdev->hci_ver;
1053 	rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1054 
1055 	rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1056 	rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1057 
1058 
1059 	eir_len = append_eir_data_to_buf(hdev, rp->eir);
1060 	rp->eir_len = cpu_to_le16(eir_len);
1061 
1062 	hci_dev_unlock(hdev);
1063 
1064 	/* If this command is called at least once, then the events
1065 	 * for class of device and local name changes are disabled
1066 	 * and only the new extended controller information event
1067 	 * is used.
1068 	 */
1069 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1070 	hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1071 	hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1072 
1073 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1074 				 sizeof(*rp) + eir_len);
1075 }
1076 
1077 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1078 {
1079 	char buf[512];
1080 	struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1081 	u16 eir_len;
1082 
1083 	memset(buf, 0, sizeof(buf));
1084 
1085 	eir_len = append_eir_data_to_buf(hdev, ev->eir);
1086 	ev->eir_len = cpu_to_le16(eir_len);
1087 
1088 	return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1089 				  sizeof(*ev) + eir_len,
1090 				  HCI_MGMT_EXT_INFO_EVENTS, skip);
1091 }
1092 
1093 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1094 {
1095 	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1096 
1097 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1098 				 sizeof(settings));
1099 }
1100 
1101 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1102 {
1103 	bt_dev_dbg(hdev, "status 0x%02x", status);
1104 
1105 	if (hci_conn_count(hdev) == 0) {
1106 		cancel_delayed_work(&hdev->power_off);
1107 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
1108 	}
1109 }
1110 
1111 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1112 {
1113 	struct mgmt_ev_advertising_added ev;
1114 
1115 	ev.instance = instance;
1116 
1117 	mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1118 }
1119 
1120 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1121 			      u8 instance)
1122 {
1123 	struct mgmt_ev_advertising_removed ev;
1124 
1125 	ev.instance = instance;
1126 
1127 	mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1128 }
1129 
1130 static void cancel_adv_timeout(struct hci_dev *hdev)
1131 {
1132 	if (hdev->adv_instance_timeout) {
1133 		hdev->adv_instance_timeout = 0;
1134 		cancel_delayed_work(&hdev->adv_instance_expire);
1135 	}
1136 }
1137 
1138 static int clean_up_hci_state(struct hci_dev *hdev)
1139 {
1140 	struct hci_request req;
1141 	struct hci_conn *conn;
1142 	bool discov_stopped;
1143 	int err;
1144 
1145 	hci_req_init(&req, hdev);
1146 
1147 	if (test_bit(HCI_ISCAN, &hdev->flags) ||
1148 	    test_bit(HCI_PSCAN, &hdev->flags)) {
1149 		u8 scan = 0x00;
1150 		hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1151 	}
1152 
1153 	hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
1154 
1155 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1156 		__hci_req_disable_advertising(&req);
1157 
1158 	discov_stopped = hci_req_stop_discovery(&req);
1159 
1160 	list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1161 		/* 0x15 == Terminated due to Power Off */
1162 		__hci_abort_conn(&req, conn, 0x15);
1163 	}
1164 
1165 	err = hci_req_run(&req, clean_up_hci_complete);
1166 	if (!err && discov_stopped)
1167 		hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1168 
1169 	return err;
1170 }
1171 
1172 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1173 		       u16 len)
1174 {
1175 	struct mgmt_mode *cp = data;
1176 	struct mgmt_pending_cmd *cmd;
1177 	int err;
1178 
1179 	bt_dev_dbg(hdev, "sock %p", sk);
1180 
1181 	if (cp->val != 0x00 && cp->val != 0x01)
1182 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1183 				       MGMT_STATUS_INVALID_PARAMS);
1184 
1185 	hci_dev_lock(hdev);
1186 
1187 	if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1188 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1189 				      MGMT_STATUS_BUSY);
1190 		goto failed;
1191 	}
1192 
1193 	if (!!cp->val == hdev_is_powered(hdev)) {
1194 		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1195 		goto failed;
1196 	}
1197 
1198 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1199 	if (!cmd) {
1200 		err = -ENOMEM;
1201 		goto failed;
1202 	}
1203 
1204 	if (cp->val) {
1205 		queue_work(hdev->req_workqueue, &hdev->power_on);
1206 		err = 0;
1207 	} else {
1208 		/* Disconnect connections, stop scans, etc */
1209 		err = clean_up_hci_state(hdev);
1210 		if (!err)
1211 			queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1212 					   HCI_POWER_OFF_TIMEOUT);
1213 
1214 		/* ENODATA means there were no HCI commands queued */
1215 		if (err == -ENODATA) {
1216 			cancel_delayed_work(&hdev->power_off);
1217 			queue_work(hdev->req_workqueue, &hdev->power_off.work);
1218 			err = 0;
1219 		}
1220 	}
1221 
1222 failed:
1223 	hci_dev_unlock(hdev);
1224 	return err;
1225 }
1226 
1227 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1228 {
1229 	__le32 ev = cpu_to_le32(get_current_settings(hdev));
1230 
1231 	return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1232 				  sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1233 }
1234 
1235 int mgmt_new_settings(struct hci_dev *hdev)
1236 {
1237 	return new_settings(hdev, NULL);
1238 }
1239 
1240 struct cmd_lookup {
1241 	struct sock *sk;
1242 	struct hci_dev *hdev;
1243 	u8 mgmt_status;
1244 };
1245 
1246 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1247 {
1248 	struct cmd_lookup *match = data;
1249 
1250 	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1251 
1252 	list_del(&cmd->list);
1253 
1254 	if (match->sk == NULL) {
1255 		match->sk = cmd->sk;
1256 		sock_hold(match->sk);
1257 	}
1258 
1259 	mgmt_pending_free(cmd);
1260 }
1261 
1262 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1263 {
1264 	u8 *status = data;
1265 
1266 	mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1267 	mgmt_pending_remove(cmd);
1268 }
1269 
1270 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1271 {
1272 	if (cmd->cmd_complete) {
1273 		u8 *status = data;
1274 
1275 		cmd->cmd_complete(cmd, *status);
1276 		mgmt_pending_remove(cmd);
1277 
1278 		return;
1279 	}
1280 
1281 	cmd_status_rsp(cmd, data);
1282 }
1283 
1284 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1285 {
1286 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1287 				 cmd->param, cmd->param_len);
1288 }
1289 
1290 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1291 {
1292 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1293 				 cmd->param, sizeof(struct mgmt_addr_info));
1294 }
1295 
1296 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1297 {
1298 	if (!lmp_bredr_capable(hdev))
1299 		return MGMT_STATUS_NOT_SUPPORTED;
1300 	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1301 		return MGMT_STATUS_REJECTED;
1302 	else
1303 		return MGMT_STATUS_SUCCESS;
1304 }
1305 
1306 static u8 mgmt_le_support(struct hci_dev *hdev)
1307 {
1308 	if (!lmp_le_capable(hdev))
1309 		return MGMT_STATUS_NOT_SUPPORTED;
1310 	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1311 		return MGMT_STATUS_REJECTED;
1312 	else
1313 		return MGMT_STATUS_SUCCESS;
1314 }
1315 
1316 void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
1317 {
1318 	struct mgmt_pending_cmd *cmd;
1319 
1320 	bt_dev_dbg(hdev, "status 0x%02x", status);
1321 
1322 	hci_dev_lock(hdev);
1323 
1324 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1325 	if (!cmd)
1326 		goto unlock;
1327 
1328 	if (status) {
1329 		u8 mgmt_err = mgmt_status(status);
1330 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1331 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1332 		goto remove_cmd;
1333 	}
1334 
1335 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1336 	    hdev->discov_timeout > 0) {
1337 		int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1338 		queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1339 	}
1340 
1341 	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1342 	new_settings(hdev, cmd->sk);
1343 
1344 remove_cmd:
1345 	mgmt_pending_remove(cmd);
1346 
1347 unlock:
1348 	hci_dev_unlock(hdev);
1349 }
1350 
1351 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1352 			    u16 len)
1353 {
1354 	struct mgmt_cp_set_discoverable *cp = data;
1355 	struct mgmt_pending_cmd *cmd;
1356 	u16 timeout;
1357 	int err;
1358 
1359 	bt_dev_dbg(hdev, "sock %p", sk);
1360 
1361 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1362 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1363 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1364 				       MGMT_STATUS_REJECTED);
1365 
1366 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1367 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1368 				       MGMT_STATUS_INVALID_PARAMS);
1369 
1370 	timeout = __le16_to_cpu(cp->timeout);
1371 
1372 	/* Disabling discoverable requires that no timeout is set,
1373 	 * and enabling limited discoverable requires a timeout.
1374 	 */
1375 	if ((cp->val == 0x00 && timeout > 0) ||
1376 	    (cp->val == 0x02 && timeout == 0))
1377 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1378 				       MGMT_STATUS_INVALID_PARAMS);
1379 
1380 	hci_dev_lock(hdev);
1381 
1382 	if (!hdev_is_powered(hdev) && timeout > 0) {
1383 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1384 				      MGMT_STATUS_NOT_POWERED);
1385 		goto failed;
1386 	}
1387 
1388 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1389 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1390 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1391 				      MGMT_STATUS_BUSY);
1392 		goto failed;
1393 	}
1394 
1395 	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1396 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1397 				      MGMT_STATUS_REJECTED);
1398 		goto failed;
1399 	}
1400 
1401 	if (hdev->advertising_paused) {
1402 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1403 				      MGMT_STATUS_BUSY);
1404 		goto failed;
1405 	}
1406 
1407 	if (!hdev_is_powered(hdev)) {
1408 		bool changed = false;
1409 
1410 		/* Setting limited discoverable when powered off is
1411 		 * not a valid operation since it requires a timeout
1412 		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1413 		 */
1414 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1415 			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1416 			changed = true;
1417 		}
1418 
1419 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1420 		if (err < 0)
1421 			goto failed;
1422 
1423 		if (changed)
1424 			err = new_settings(hdev, sk);
1425 
1426 		goto failed;
1427 	}
1428 
1429 	/* If the current mode is the same, then just update the timeout
1430 	 * value with the new value. And if only the timeout gets updated,
1431 	 * then no need for any HCI transactions.
1432 	 */
1433 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1434 	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
1435 						   HCI_LIMITED_DISCOVERABLE)) {
1436 		cancel_delayed_work(&hdev->discov_off);
1437 		hdev->discov_timeout = timeout;
1438 
1439 		if (cp->val && hdev->discov_timeout > 0) {
1440 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1441 			queue_delayed_work(hdev->req_workqueue,
1442 					   &hdev->discov_off, to);
1443 		}
1444 
1445 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1446 		goto failed;
1447 	}
1448 
1449 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1450 	if (!cmd) {
1451 		err = -ENOMEM;
1452 		goto failed;
1453 	}
1454 
1455 	/* Cancel any potential discoverable timeout that might be
1456 	 * still active and store new timeout value. The arming of
1457 	 * the timeout happens in the complete handler.
1458 	 */
1459 	cancel_delayed_work(&hdev->discov_off);
1460 	hdev->discov_timeout = timeout;
1461 
1462 	if (cp->val)
1463 		hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1464 	else
1465 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1466 
1467 	/* Limited discoverable mode */
1468 	if (cp->val == 0x02)
1469 		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1470 	else
1471 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1472 
1473 	queue_work(hdev->req_workqueue, &hdev->discoverable_update);
1474 	err = 0;
1475 
1476 failed:
1477 	hci_dev_unlock(hdev);
1478 	return err;
1479 }
1480 
1481 void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
1482 {
1483 	struct mgmt_pending_cmd *cmd;
1484 
1485 	bt_dev_dbg(hdev, "status 0x%02x", status);
1486 
1487 	hci_dev_lock(hdev);
1488 
1489 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1490 	if (!cmd)
1491 		goto unlock;
1492 
1493 	if (status) {
1494 		u8 mgmt_err = mgmt_status(status);
1495 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1496 		goto remove_cmd;
1497 	}
1498 
1499 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1500 	new_settings(hdev, cmd->sk);
1501 
1502 remove_cmd:
1503 	mgmt_pending_remove(cmd);
1504 
1505 unlock:
1506 	hci_dev_unlock(hdev);
1507 }
1508 
1509 static int set_connectable_update_settings(struct hci_dev *hdev,
1510 					   struct sock *sk, u8 val)
1511 {
1512 	bool changed = false;
1513 	int err;
1514 
1515 	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1516 		changed = true;
1517 
1518 	if (val) {
1519 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1520 	} else {
1521 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1522 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1523 	}
1524 
1525 	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1526 	if (err < 0)
1527 		return err;
1528 
1529 	if (changed) {
1530 		hci_req_update_scan(hdev);
1531 		hci_update_background_scan(hdev);
1532 		return new_settings(hdev, sk);
1533 	}
1534 
1535 	return 0;
1536 }
1537 
1538 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1539 			   u16 len)
1540 {
1541 	struct mgmt_mode *cp = data;
1542 	struct mgmt_pending_cmd *cmd;
1543 	int err;
1544 
1545 	bt_dev_dbg(hdev, "sock %p", sk);
1546 
1547 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1548 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1549 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1550 				       MGMT_STATUS_REJECTED);
1551 
1552 	if (cp->val != 0x00 && cp->val != 0x01)
1553 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1554 				       MGMT_STATUS_INVALID_PARAMS);
1555 
1556 	hci_dev_lock(hdev);
1557 
1558 	if (!hdev_is_powered(hdev)) {
1559 		err = set_connectable_update_settings(hdev, sk, cp->val);
1560 		goto failed;
1561 	}
1562 
1563 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1564 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1565 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1566 				      MGMT_STATUS_BUSY);
1567 		goto failed;
1568 	}
1569 
1570 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1571 	if (!cmd) {
1572 		err = -ENOMEM;
1573 		goto failed;
1574 	}
1575 
1576 	if (cp->val) {
1577 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1578 	} else {
1579 		if (hdev->discov_timeout > 0)
1580 			cancel_delayed_work(&hdev->discov_off);
1581 
1582 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1583 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1584 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1585 	}
1586 
1587 	queue_work(hdev->req_workqueue, &hdev->connectable_update);
1588 	err = 0;
1589 
1590 failed:
1591 	hci_dev_unlock(hdev);
1592 	return err;
1593 }
1594 
1595 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1596 			u16 len)
1597 {
1598 	struct mgmt_mode *cp = data;
1599 	bool changed;
1600 	int err;
1601 
1602 	bt_dev_dbg(hdev, "sock %p", sk);
1603 
1604 	if (cp->val != 0x00 && cp->val != 0x01)
1605 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1606 				       MGMT_STATUS_INVALID_PARAMS);
1607 
1608 	hci_dev_lock(hdev);
1609 
1610 	if (cp->val)
1611 		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1612 	else
1613 		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1614 
1615 	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1616 	if (err < 0)
1617 		goto unlock;
1618 
1619 	if (changed) {
1620 		/* In limited privacy mode the change of bondable mode
1621 		 * may affect the local advertising address.
1622 		 */
1623 		if (hdev_is_powered(hdev) &&
1624 		    hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1625 		    hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1626 		    hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1627 			queue_work(hdev->req_workqueue,
1628 				   &hdev->discoverable_update);
1629 
1630 		err = new_settings(hdev, sk);
1631 	}
1632 
1633 unlock:
1634 	hci_dev_unlock(hdev);
1635 	return err;
1636 }
1637 
1638 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1639 			     u16 len)
1640 {
1641 	struct mgmt_mode *cp = data;
1642 	struct mgmt_pending_cmd *cmd;
1643 	u8 val, status;
1644 	int err;
1645 
1646 	bt_dev_dbg(hdev, "sock %p", sk);
1647 
1648 	status = mgmt_bredr_support(hdev);
1649 	if (status)
1650 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1651 				       status);
1652 
1653 	if (cp->val != 0x00 && cp->val != 0x01)
1654 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1655 				       MGMT_STATUS_INVALID_PARAMS);
1656 
1657 	hci_dev_lock(hdev);
1658 
1659 	if (!hdev_is_powered(hdev)) {
1660 		bool changed = false;
1661 
1662 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1663 			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1664 			changed = true;
1665 		}
1666 
1667 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1668 		if (err < 0)
1669 			goto failed;
1670 
1671 		if (changed)
1672 			err = new_settings(hdev, sk);
1673 
1674 		goto failed;
1675 	}
1676 
1677 	if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1678 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1679 				      MGMT_STATUS_BUSY);
1680 		goto failed;
1681 	}
1682 
1683 	val = !!cp->val;
1684 
1685 	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1686 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1687 		goto failed;
1688 	}
1689 
1690 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1691 	if (!cmd) {
1692 		err = -ENOMEM;
1693 		goto failed;
1694 	}
1695 
1696 	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1697 	if (err < 0) {
1698 		mgmt_pending_remove(cmd);
1699 		goto failed;
1700 	}
1701 
1702 failed:
1703 	hci_dev_unlock(hdev);
1704 	return err;
1705 }
1706 
1707 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1708 {
1709 	struct mgmt_mode *cp = data;
1710 	struct mgmt_pending_cmd *cmd;
1711 	u8 status;
1712 	int err;
1713 
1714 	bt_dev_dbg(hdev, "sock %p", sk);
1715 
1716 	status = mgmt_bredr_support(hdev);
1717 	if (status)
1718 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1719 
1720 	if (!lmp_ssp_capable(hdev))
1721 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1722 				       MGMT_STATUS_NOT_SUPPORTED);
1723 
1724 	if (cp->val != 0x00 && cp->val != 0x01)
1725 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1726 				       MGMT_STATUS_INVALID_PARAMS);
1727 
1728 	hci_dev_lock(hdev);
1729 
1730 	if (!hdev_is_powered(hdev)) {
1731 		bool changed;
1732 
1733 		if (cp->val) {
1734 			changed = !hci_dev_test_and_set_flag(hdev,
1735 							     HCI_SSP_ENABLED);
1736 		} else {
1737 			changed = hci_dev_test_and_clear_flag(hdev,
1738 							      HCI_SSP_ENABLED);
1739 			if (!changed)
1740 				changed = hci_dev_test_and_clear_flag(hdev,
1741 								      HCI_HS_ENABLED);
1742 			else
1743 				hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1744 		}
1745 
1746 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1747 		if (err < 0)
1748 			goto failed;
1749 
1750 		if (changed)
1751 			err = new_settings(hdev, sk);
1752 
1753 		goto failed;
1754 	}
1755 
1756 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1757 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1758 				      MGMT_STATUS_BUSY);
1759 		goto failed;
1760 	}
1761 
1762 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1763 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1764 		goto failed;
1765 	}
1766 
1767 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1768 	if (!cmd) {
1769 		err = -ENOMEM;
1770 		goto failed;
1771 	}
1772 
1773 	if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
1774 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1775 			     sizeof(cp->val), &cp->val);
1776 
1777 	err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1778 	if (err < 0) {
1779 		mgmt_pending_remove(cmd);
1780 		goto failed;
1781 	}
1782 
1783 failed:
1784 	hci_dev_unlock(hdev);
1785 	return err;
1786 }
1787 
1788 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1789 {
1790 	struct mgmt_mode *cp = data;
1791 	bool changed;
1792 	u8 status;
1793 	int err;
1794 
1795 	bt_dev_dbg(hdev, "sock %p", sk);
1796 
1797 	status = mgmt_bredr_support(hdev);
1798 	if (status)
1799 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1800 
1801 	if (!lmp_ssp_capable(hdev))
1802 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1803 				       MGMT_STATUS_NOT_SUPPORTED);
1804 
1805 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1806 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1807 				       MGMT_STATUS_REJECTED);
1808 
1809 	if (cp->val != 0x00 && cp->val != 0x01)
1810 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1811 				       MGMT_STATUS_INVALID_PARAMS);
1812 
1813 	hci_dev_lock(hdev);
1814 
1815 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1816 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1817 				      MGMT_STATUS_BUSY);
1818 		goto unlock;
1819 	}
1820 
1821 	if (cp->val) {
1822 		changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1823 	} else {
1824 		if (hdev_is_powered(hdev)) {
1825 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1826 					      MGMT_STATUS_REJECTED);
1827 			goto unlock;
1828 		}
1829 
1830 		changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1831 	}
1832 
1833 	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1834 	if (err < 0)
1835 		goto unlock;
1836 
1837 	if (changed)
1838 		err = new_settings(hdev, sk);
1839 
1840 unlock:
1841 	hci_dev_unlock(hdev);
1842 	return err;
1843 }
1844 
1845 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1846 {
1847 	struct cmd_lookup match = { NULL, hdev };
1848 
1849 	hci_dev_lock(hdev);
1850 
1851 	if (status) {
1852 		u8 mgmt_err = mgmt_status(status);
1853 
1854 		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1855 				     &mgmt_err);
1856 		goto unlock;
1857 	}
1858 
1859 	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1860 
1861 	new_settings(hdev, match.sk);
1862 
1863 	if (match.sk)
1864 		sock_put(match.sk);
1865 
1866 	/* Make sure the controller has a good default for
1867 	 * advertising data. Restrict the update to when LE
1868 	 * has actually been enabled. During power on, the
1869 	 * update in powered_update_hci will take care of it.
1870 	 */
1871 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1872 		struct hci_request req;
1873 		hci_req_init(&req, hdev);
1874 		if (ext_adv_capable(hdev)) {
1875 			int err;
1876 
1877 			err = __hci_req_setup_ext_adv_instance(&req, 0x00);
1878 			if (!err)
1879 				__hci_req_update_scan_rsp_data(&req, 0x00);
1880 		} else {
1881 			__hci_req_update_adv_data(&req, 0x00);
1882 			__hci_req_update_scan_rsp_data(&req, 0x00);
1883 		}
1884 		hci_req_run(&req, NULL);
1885 		hci_update_background_scan(hdev);
1886 	}
1887 
1888 unlock:
1889 	hci_dev_unlock(hdev);
1890 }
1891 
1892 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1893 {
1894 	struct mgmt_mode *cp = data;
1895 	struct hci_cp_write_le_host_supported hci_cp;
1896 	struct mgmt_pending_cmd *cmd;
1897 	struct hci_request req;
1898 	int err;
1899 	u8 val, enabled;
1900 
1901 	bt_dev_dbg(hdev, "sock %p", sk);
1902 
1903 	if (!lmp_le_capable(hdev))
1904 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1905 				       MGMT_STATUS_NOT_SUPPORTED);
1906 
1907 	if (cp->val != 0x00 && cp->val != 0x01)
1908 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1909 				       MGMT_STATUS_INVALID_PARAMS);
1910 
1911 	/* Bluetooth single mode LE only controllers or dual-mode
1912 	 * controllers configured as LE only devices, do not allow
1913 	 * switching LE off. These have either LE enabled explicitly
1914 	 * or BR/EDR has been previously switched off.
1915 	 *
1916 	 * When trying to enable an already enabled LE, then gracefully
1917 	 * send a positive response. Trying to disable it however will
1918 	 * result into rejection.
1919 	 */
1920 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1921 		if (cp->val == 0x01)
1922 			return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1923 
1924 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1925 				       MGMT_STATUS_REJECTED);
1926 	}
1927 
1928 	hci_dev_lock(hdev);
1929 
1930 	val = !!cp->val;
1931 	enabled = lmp_host_le_capable(hdev);
1932 
1933 	if (!val)
1934 		hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
1935 
1936 	if (!hdev_is_powered(hdev) || val == enabled) {
1937 		bool changed = false;
1938 
1939 		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1940 			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
1941 			changed = true;
1942 		}
1943 
1944 		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1945 			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1946 			changed = true;
1947 		}
1948 
1949 		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1950 		if (err < 0)
1951 			goto unlock;
1952 
1953 		if (changed)
1954 			err = new_settings(hdev, sk);
1955 
1956 		goto unlock;
1957 	}
1958 
1959 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
1960 	    pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1961 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1962 				      MGMT_STATUS_BUSY);
1963 		goto unlock;
1964 	}
1965 
1966 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1967 	if (!cmd) {
1968 		err = -ENOMEM;
1969 		goto unlock;
1970 	}
1971 
1972 	hci_req_init(&req, hdev);
1973 
1974 	memset(&hci_cp, 0, sizeof(hci_cp));
1975 
1976 	if (val) {
1977 		hci_cp.le = val;
1978 		hci_cp.simul = 0x00;
1979 	} else {
1980 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1981 			__hci_req_disable_advertising(&req);
1982 
1983 		if (ext_adv_capable(hdev))
1984 			__hci_req_clear_ext_adv_sets(&req);
1985 	}
1986 
1987 	hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1988 		    &hci_cp);
1989 
1990 	err = hci_req_run(&req, le_enable_complete);
1991 	if (err < 0)
1992 		mgmt_pending_remove(cmd);
1993 
1994 unlock:
1995 	hci_dev_unlock(hdev);
1996 	return err;
1997 }
1998 
1999 /* This is a helper function to test for pending mgmt commands that can
2000  * cause CoD or EIR HCI commands. We can only allow one such pending
2001  * mgmt command at a time since otherwise we cannot easily track what
2002  * the current values are, will be, and based on that calculate if a new
2003  * HCI command needs to be sent and if yes with what value.
2004  */
2005 static bool pending_eir_or_class(struct hci_dev *hdev)
2006 {
2007 	struct mgmt_pending_cmd *cmd;
2008 
2009 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2010 		switch (cmd->opcode) {
2011 		case MGMT_OP_ADD_UUID:
2012 		case MGMT_OP_REMOVE_UUID:
2013 		case MGMT_OP_SET_DEV_CLASS:
2014 		case MGMT_OP_SET_POWERED:
2015 			return true;
2016 		}
2017 	}
2018 
2019 	return false;
2020 }
2021 
2022 static const u8 bluetooth_base_uuid[] = {
2023 			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2024 			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2025 };
2026 
2027 static u8 get_uuid_size(const u8 *uuid)
2028 {
2029 	u32 val;
2030 
2031 	if (memcmp(uuid, bluetooth_base_uuid, 12))
2032 		return 128;
2033 
2034 	val = get_unaligned_le32(&uuid[12]);
2035 	if (val > 0xffff)
2036 		return 32;
2037 
2038 	return 16;
2039 }
2040 
2041 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2042 {
2043 	struct mgmt_pending_cmd *cmd;
2044 
2045 	hci_dev_lock(hdev);
2046 
2047 	cmd = pending_find(mgmt_op, hdev);
2048 	if (!cmd)
2049 		goto unlock;
2050 
2051 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2052 			  mgmt_status(status), hdev->dev_class, 3);
2053 
2054 	mgmt_pending_remove(cmd);
2055 
2056 unlock:
2057 	hci_dev_unlock(hdev);
2058 }
2059 
2060 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2061 {
2062 	bt_dev_dbg(hdev, "status 0x%02x", status);
2063 
2064 	mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2065 }
2066 
2067 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2068 {
2069 	struct mgmt_cp_add_uuid *cp = data;
2070 	struct mgmt_pending_cmd *cmd;
2071 	struct hci_request req;
2072 	struct bt_uuid *uuid;
2073 	int err;
2074 
2075 	bt_dev_dbg(hdev, "sock %p", sk);
2076 
2077 	hci_dev_lock(hdev);
2078 
2079 	if (pending_eir_or_class(hdev)) {
2080 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2081 				      MGMT_STATUS_BUSY);
2082 		goto failed;
2083 	}
2084 
2085 	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2086 	if (!uuid) {
2087 		err = -ENOMEM;
2088 		goto failed;
2089 	}
2090 
2091 	memcpy(uuid->uuid, cp->uuid, 16);
2092 	uuid->svc_hint = cp->svc_hint;
2093 	uuid->size = get_uuid_size(cp->uuid);
2094 
2095 	list_add_tail(&uuid->list, &hdev->uuids);
2096 
2097 	hci_req_init(&req, hdev);
2098 
2099 	__hci_req_update_class(&req);
2100 	__hci_req_update_eir(&req);
2101 
2102 	err = hci_req_run(&req, add_uuid_complete);
2103 	if (err < 0) {
2104 		if (err != -ENODATA)
2105 			goto failed;
2106 
2107 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2108 					hdev->dev_class, 3);
2109 		goto failed;
2110 	}
2111 
2112 	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2113 	if (!cmd) {
2114 		err = -ENOMEM;
2115 		goto failed;
2116 	}
2117 
2118 	err = 0;
2119 
2120 failed:
2121 	hci_dev_unlock(hdev);
2122 	return err;
2123 }
2124 
2125 static bool enable_service_cache(struct hci_dev *hdev)
2126 {
2127 	if (!hdev_is_powered(hdev))
2128 		return false;
2129 
2130 	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2131 		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2132 				   CACHE_TIMEOUT);
2133 		return true;
2134 	}
2135 
2136 	return false;
2137 }
2138 
2139 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2140 {
2141 	bt_dev_dbg(hdev, "status 0x%02x", status);
2142 
2143 	mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2144 }
2145 
2146 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2147 		       u16 len)
2148 {
2149 	struct mgmt_cp_remove_uuid *cp = data;
2150 	struct mgmt_pending_cmd *cmd;
2151 	struct bt_uuid *match, *tmp;
2152 	u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2153 	struct hci_request req;
2154 	int err, found;
2155 
2156 	bt_dev_dbg(hdev, "sock %p", sk);
2157 
2158 	hci_dev_lock(hdev);
2159 
2160 	if (pending_eir_or_class(hdev)) {
2161 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2162 				      MGMT_STATUS_BUSY);
2163 		goto unlock;
2164 	}
2165 
2166 	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2167 		hci_uuids_clear(hdev);
2168 
2169 		if (enable_service_cache(hdev)) {
2170 			err = mgmt_cmd_complete(sk, hdev->id,
2171 						MGMT_OP_REMOVE_UUID,
2172 						0, hdev->dev_class, 3);
2173 			goto unlock;
2174 		}
2175 
2176 		goto update_class;
2177 	}
2178 
2179 	found = 0;
2180 
2181 	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2182 		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2183 			continue;
2184 
2185 		list_del(&match->list);
2186 		kfree(match);
2187 		found++;
2188 	}
2189 
2190 	if (found == 0) {
2191 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2192 				      MGMT_STATUS_INVALID_PARAMS);
2193 		goto unlock;
2194 	}
2195 
2196 update_class:
2197 	hci_req_init(&req, hdev);
2198 
2199 	__hci_req_update_class(&req);
2200 	__hci_req_update_eir(&req);
2201 
2202 	err = hci_req_run(&req, remove_uuid_complete);
2203 	if (err < 0) {
2204 		if (err != -ENODATA)
2205 			goto unlock;
2206 
2207 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2208 					hdev->dev_class, 3);
2209 		goto unlock;
2210 	}
2211 
2212 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2213 	if (!cmd) {
2214 		err = -ENOMEM;
2215 		goto unlock;
2216 	}
2217 
2218 	err = 0;
2219 
2220 unlock:
2221 	hci_dev_unlock(hdev);
2222 	return err;
2223 }
2224 
2225 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2226 {
2227 	bt_dev_dbg(hdev, "status 0x%02x", status);
2228 
2229 	mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2230 }
2231 
2232 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2233 			 u16 len)
2234 {
2235 	struct mgmt_cp_set_dev_class *cp = data;
2236 	struct mgmt_pending_cmd *cmd;
2237 	struct hci_request req;
2238 	int err;
2239 
2240 	bt_dev_dbg(hdev, "sock %p", sk);
2241 
2242 	if (!lmp_bredr_capable(hdev))
2243 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2244 				       MGMT_STATUS_NOT_SUPPORTED);
2245 
2246 	hci_dev_lock(hdev);
2247 
2248 	if (pending_eir_or_class(hdev)) {
2249 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2250 				      MGMT_STATUS_BUSY);
2251 		goto unlock;
2252 	}
2253 
2254 	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2255 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2256 				      MGMT_STATUS_INVALID_PARAMS);
2257 		goto unlock;
2258 	}
2259 
2260 	hdev->major_class = cp->major;
2261 	hdev->minor_class = cp->minor;
2262 
2263 	if (!hdev_is_powered(hdev)) {
2264 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2265 					hdev->dev_class, 3);
2266 		goto unlock;
2267 	}
2268 
2269 	hci_req_init(&req, hdev);
2270 
2271 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2272 		hci_dev_unlock(hdev);
2273 		cancel_delayed_work_sync(&hdev->service_cache);
2274 		hci_dev_lock(hdev);
2275 		__hci_req_update_eir(&req);
2276 	}
2277 
2278 	__hci_req_update_class(&req);
2279 
2280 	err = hci_req_run(&req, set_class_complete);
2281 	if (err < 0) {
2282 		if (err != -ENODATA)
2283 			goto unlock;
2284 
2285 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2286 					hdev->dev_class, 3);
2287 		goto unlock;
2288 	}
2289 
2290 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2291 	if (!cmd) {
2292 		err = -ENOMEM;
2293 		goto unlock;
2294 	}
2295 
2296 	err = 0;
2297 
2298 unlock:
2299 	hci_dev_unlock(hdev);
2300 	return err;
2301 }
2302 
2303 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2304 			  u16 len)
2305 {
2306 	struct mgmt_cp_load_link_keys *cp = data;
2307 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2308 				   sizeof(struct mgmt_link_key_info));
2309 	u16 key_count, expected_len;
2310 	bool changed;
2311 	int i;
2312 
2313 	bt_dev_dbg(hdev, "sock %p", sk);
2314 
2315 	if (!lmp_bredr_capable(hdev))
2316 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2317 				       MGMT_STATUS_NOT_SUPPORTED);
2318 
2319 	key_count = __le16_to_cpu(cp->key_count);
2320 	if (key_count > max_key_count) {
2321 		bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2322 			   key_count);
2323 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2324 				       MGMT_STATUS_INVALID_PARAMS);
2325 	}
2326 
2327 	expected_len = struct_size(cp, keys, key_count);
2328 	if (expected_len != len) {
2329 		bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2330 			   expected_len, len);
2331 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2332 				       MGMT_STATUS_INVALID_PARAMS);
2333 	}
2334 
2335 	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2336 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2337 				       MGMT_STATUS_INVALID_PARAMS);
2338 
2339 	bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2340 		   key_count);
2341 
2342 	for (i = 0; i < key_count; i++) {
2343 		struct mgmt_link_key_info *key = &cp->keys[i];
2344 
2345 		if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2346 			return mgmt_cmd_status(sk, hdev->id,
2347 					       MGMT_OP_LOAD_LINK_KEYS,
2348 					       MGMT_STATUS_INVALID_PARAMS);
2349 	}
2350 
2351 	hci_dev_lock(hdev);
2352 
2353 	hci_link_keys_clear(hdev);
2354 
2355 	if (cp->debug_keys)
2356 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2357 	else
2358 		changed = hci_dev_test_and_clear_flag(hdev,
2359 						      HCI_KEEP_DEBUG_KEYS);
2360 
2361 	if (changed)
2362 		new_settings(hdev, NULL);
2363 
2364 	for (i = 0; i < key_count; i++) {
2365 		struct mgmt_link_key_info *key = &cp->keys[i];
2366 
2367 		if (hci_is_blocked_key(hdev,
2368 				       HCI_BLOCKED_KEY_TYPE_LINKKEY,
2369 				       key->val)) {
2370 			bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2371 				    &key->addr.bdaddr);
2372 			continue;
2373 		}
2374 
2375 		/* Always ignore debug keys and require a new pairing if
2376 		 * the user wants to use them.
2377 		 */
2378 		if (key->type == HCI_LK_DEBUG_COMBINATION)
2379 			continue;
2380 
2381 		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2382 				 key->type, key->pin_len, NULL);
2383 	}
2384 
2385 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2386 
2387 	hci_dev_unlock(hdev);
2388 
2389 	return 0;
2390 }
2391 
2392 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2393 			   u8 addr_type, struct sock *skip_sk)
2394 {
2395 	struct mgmt_ev_device_unpaired ev;
2396 
2397 	bacpy(&ev.addr.bdaddr, bdaddr);
2398 	ev.addr.type = addr_type;
2399 
2400 	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2401 			  skip_sk);
2402 }
2403 
2404 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2405 			 u16 len)
2406 {
2407 	struct mgmt_cp_unpair_device *cp = data;
2408 	struct mgmt_rp_unpair_device rp;
2409 	struct hci_conn_params *params;
2410 	struct mgmt_pending_cmd *cmd;
2411 	struct hci_conn *conn;
2412 	u8 addr_type;
2413 	int err;
2414 
2415 	memset(&rp, 0, sizeof(rp));
2416 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2417 	rp.addr.type = cp->addr.type;
2418 
2419 	if (!bdaddr_type_is_valid(cp->addr.type))
2420 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2421 					 MGMT_STATUS_INVALID_PARAMS,
2422 					 &rp, sizeof(rp));
2423 
2424 	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2425 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2426 					 MGMT_STATUS_INVALID_PARAMS,
2427 					 &rp, sizeof(rp));
2428 
2429 	hci_dev_lock(hdev);
2430 
2431 	if (!hdev_is_powered(hdev)) {
2432 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2433 					MGMT_STATUS_NOT_POWERED, &rp,
2434 					sizeof(rp));
2435 		goto unlock;
2436 	}
2437 
2438 	if (cp->addr.type == BDADDR_BREDR) {
2439 		/* If disconnection is requested, then look up the
2440 		 * connection. If the remote device is connected, it
2441 		 * will be later used to terminate the link.
2442 		 *
2443 		 * Setting it to NULL explicitly will cause no
2444 		 * termination of the link.
2445 		 */
2446 		if (cp->disconnect)
2447 			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2448 						       &cp->addr.bdaddr);
2449 		else
2450 			conn = NULL;
2451 
2452 		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2453 		if (err < 0) {
2454 			err = mgmt_cmd_complete(sk, hdev->id,
2455 						MGMT_OP_UNPAIR_DEVICE,
2456 						MGMT_STATUS_NOT_PAIRED, &rp,
2457 						sizeof(rp));
2458 			goto unlock;
2459 		}
2460 
2461 		goto done;
2462 	}
2463 
2464 	/* LE address type */
2465 	addr_type = le_addr_type(cp->addr.type);
2466 
2467 	/* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2468 	err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2469 	if (err < 0) {
2470 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2471 					MGMT_STATUS_NOT_PAIRED, &rp,
2472 					sizeof(rp));
2473 		goto unlock;
2474 	}
2475 
2476 	conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2477 	if (!conn) {
2478 		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2479 		goto done;
2480 	}
2481 
2482 
2483 	/* Defer clearing up the connection parameters until closing to
2484 	 * give a chance of keeping them if a repairing happens.
2485 	 */
2486 	set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2487 
2488 	/* Disable auto-connection parameters if present */
2489 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2490 	if (params) {
2491 		if (params->explicit_connect)
2492 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2493 		else
2494 			params->auto_connect = HCI_AUTO_CONN_DISABLED;
2495 	}
2496 
2497 	/* If disconnection is not requested, then clear the connection
2498 	 * variable so that the link is not terminated.
2499 	 */
2500 	if (!cp->disconnect)
2501 		conn = NULL;
2502 
2503 done:
2504 	/* If the connection variable is set, then termination of the
2505 	 * link is requested.
2506 	 */
2507 	if (!conn) {
2508 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2509 					&rp, sizeof(rp));
2510 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2511 		goto unlock;
2512 	}
2513 
2514 	cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2515 			       sizeof(*cp));
2516 	if (!cmd) {
2517 		err = -ENOMEM;
2518 		goto unlock;
2519 	}
2520 
2521 	cmd->cmd_complete = addr_cmd_complete;
2522 
2523 	err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2524 	if (err < 0)
2525 		mgmt_pending_remove(cmd);
2526 
2527 unlock:
2528 	hci_dev_unlock(hdev);
2529 	return err;
2530 }
2531 
2532 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2533 		      u16 len)
2534 {
2535 	struct mgmt_cp_disconnect *cp = data;
2536 	struct mgmt_rp_disconnect rp;
2537 	struct mgmt_pending_cmd *cmd;
2538 	struct hci_conn *conn;
2539 	int err;
2540 
2541 	bt_dev_dbg(hdev, "sock %p", sk);
2542 
2543 	memset(&rp, 0, sizeof(rp));
2544 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2545 	rp.addr.type = cp->addr.type;
2546 
2547 	if (!bdaddr_type_is_valid(cp->addr.type))
2548 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2549 					 MGMT_STATUS_INVALID_PARAMS,
2550 					 &rp, sizeof(rp));
2551 
2552 	hci_dev_lock(hdev);
2553 
2554 	if (!test_bit(HCI_UP, &hdev->flags)) {
2555 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2556 					MGMT_STATUS_NOT_POWERED, &rp,
2557 					sizeof(rp));
2558 		goto failed;
2559 	}
2560 
2561 	if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2562 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2563 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
2564 		goto failed;
2565 	}
2566 
2567 	if (cp->addr.type == BDADDR_BREDR)
2568 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2569 					       &cp->addr.bdaddr);
2570 	else
2571 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2572 					       le_addr_type(cp->addr.type));
2573 
2574 	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2575 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2576 					MGMT_STATUS_NOT_CONNECTED, &rp,
2577 					sizeof(rp));
2578 		goto failed;
2579 	}
2580 
2581 	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2582 	if (!cmd) {
2583 		err = -ENOMEM;
2584 		goto failed;
2585 	}
2586 
2587 	cmd->cmd_complete = generic_cmd_complete;
2588 
2589 	err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2590 	if (err < 0)
2591 		mgmt_pending_remove(cmd);
2592 
2593 failed:
2594 	hci_dev_unlock(hdev);
2595 	return err;
2596 }
2597 
2598 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2599 {
2600 	switch (link_type) {
2601 	case LE_LINK:
2602 		switch (addr_type) {
2603 		case ADDR_LE_DEV_PUBLIC:
2604 			return BDADDR_LE_PUBLIC;
2605 
2606 		default:
2607 			/* Fallback to LE Random address type */
2608 			return BDADDR_LE_RANDOM;
2609 		}
2610 
2611 	default:
2612 		/* Fallback to BR/EDR type */
2613 		return BDADDR_BREDR;
2614 	}
2615 }
2616 
2617 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2618 			   u16 data_len)
2619 {
2620 	struct mgmt_rp_get_connections *rp;
2621 	struct hci_conn *c;
2622 	int err;
2623 	u16 i;
2624 
2625 	bt_dev_dbg(hdev, "sock %p", sk);
2626 
2627 	hci_dev_lock(hdev);
2628 
2629 	if (!hdev_is_powered(hdev)) {
2630 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2631 				      MGMT_STATUS_NOT_POWERED);
2632 		goto unlock;
2633 	}
2634 
2635 	i = 0;
2636 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2637 		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2638 			i++;
2639 	}
2640 
2641 	rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2642 	if (!rp) {
2643 		err = -ENOMEM;
2644 		goto unlock;
2645 	}
2646 
2647 	i = 0;
2648 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2649 		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2650 			continue;
2651 		bacpy(&rp->addr[i].bdaddr, &c->dst);
2652 		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2653 		if (c->type == SCO_LINK || c->type == ESCO_LINK)
2654 			continue;
2655 		i++;
2656 	}
2657 
2658 	rp->conn_count = cpu_to_le16(i);
2659 
2660 	/* Recalculate length in case of filtered SCO connections, etc */
2661 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2662 				struct_size(rp, addr, i));
2663 
2664 	kfree(rp);
2665 
2666 unlock:
2667 	hci_dev_unlock(hdev);
2668 	return err;
2669 }
2670 
2671 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2672 				   struct mgmt_cp_pin_code_neg_reply *cp)
2673 {
2674 	struct mgmt_pending_cmd *cmd;
2675 	int err;
2676 
2677 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2678 			       sizeof(*cp));
2679 	if (!cmd)
2680 		return -ENOMEM;
2681 
2682 	cmd->cmd_complete = addr_cmd_complete;
2683 
2684 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2685 			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2686 	if (err < 0)
2687 		mgmt_pending_remove(cmd);
2688 
2689 	return err;
2690 }
2691 
2692 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2693 			  u16 len)
2694 {
2695 	struct hci_conn *conn;
2696 	struct mgmt_cp_pin_code_reply *cp = data;
2697 	struct hci_cp_pin_code_reply reply;
2698 	struct mgmt_pending_cmd *cmd;
2699 	int err;
2700 
2701 	bt_dev_dbg(hdev, "sock %p", sk);
2702 
2703 	hci_dev_lock(hdev);
2704 
2705 	if (!hdev_is_powered(hdev)) {
2706 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2707 				      MGMT_STATUS_NOT_POWERED);
2708 		goto failed;
2709 	}
2710 
2711 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2712 	if (!conn) {
2713 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2714 				      MGMT_STATUS_NOT_CONNECTED);
2715 		goto failed;
2716 	}
2717 
2718 	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2719 		struct mgmt_cp_pin_code_neg_reply ncp;
2720 
2721 		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2722 
2723 		bt_dev_err(hdev, "PIN code is not 16 bytes long");
2724 
2725 		err = send_pin_code_neg_reply(sk, hdev, &ncp);
2726 		if (err >= 0)
2727 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2728 					      MGMT_STATUS_INVALID_PARAMS);
2729 
2730 		goto failed;
2731 	}
2732 
2733 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2734 	if (!cmd) {
2735 		err = -ENOMEM;
2736 		goto failed;
2737 	}
2738 
2739 	cmd->cmd_complete = addr_cmd_complete;
2740 
2741 	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2742 	reply.pin_len = cp->pin_len;
2743 	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2744 
2745 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2746 	if (err < 0)
2747 		mgmt_pending_remove(cmd);
2748 
2749 failed:
2750 	hci_dev_unlock(hdev);
2751 	return err;
2752 }
2753 
2754 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2755 			     u16 len)
2756 {
2757 	struct mgmt_cp_set_io_capability *cp = data;
2758 
2759 	bt_dev_dbg(hdev, "sock %p", sk);
2760 
2761 	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2762 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2763 				       MGMT_STATUS_INVALID_PARAMS);
2764 
2765 	hci_dev_lock(hdev);
2766 
2767 	hdev->io_capability = cp->io_capability;
2768 
2769 	bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
2770 
2771 	hci_dev_unlock(hdev);
2772 
2773 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2774 				 NULL, 0);
2775 }
2776 
2777 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2778 {
2779 	struct hci_dev *hdev = conn->hdev;
2780 	struct mgmt_pending_cmd *cmd;
2781 
2782 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2783 		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2784 			continue;
2785 
2786 		if (cmd->user_data != conn)
2787 			continue;
2788 
2789 		return cmd;
2790 	}
2791 
2792 	return NULL;
2793 }
2794 
2795 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2796 {
2797 	struct mgmt_rp_pair_device rp;
2798 	struct hci_conn *conn = cmd->user_data;
2799 	int err;
2800 
2801 	bacpy(&rp.addr.bdaddr, &conn->dst);
2802 	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2803 
2804 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2805 				status, &rp, sizeof(rp));
2806 
2807 	/* So we don't get further callbacks for this connection */
2808 	conn->connect_cfm_cb = NULL;
2809 	conn->security_cfm_cb = NULL;
2810 	conn->disconn_cfm_cb = NULL;
2811 
2812 	hci_conn_drop(conn);
2813 
2814 	/* The device is paired so there is no need to remove
2815 	 * its connection parameters anymore.
2816 	 */
2817 	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2818 
2819 	hci_conn_put(conn);
2820 
2821 	return err;
2822 }
2823 
2824 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2825 {
2826 	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2827 	struct mgmt_pending_cmd *cmd;
2828 
2829 	cmd = find_pairing(conn);
2830 	if (cmd) {
2831 		cmd->cmd_complete(cmd, status);
2832 		mgmt_pending_remove(cmd);
2833 	}
2834 }
2835 
2836 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2837 {
2838 	struct mgmt_pending_cmd *cmd;
2839 
2840 	BT_DBG("status %u", status);
2841 
2842 	cmd = find_pairing(conn);
2843 	if (!cmd) {
2844 		BT_DBG("Unable to find a pending command");
2845 		return;
2846 	}
2847 
2848 	cmd->cmd_complete(cmd, mgmt_status(status));
2849 	mgmt_pending_remove(cmd);
2850 }
2851 
2852 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2853 {
2854 	struct mgmt_pending_cmd *cmd;
2855 
2856 	BT_DBG("status %u", status);
2857 
2858 	if (!status)
2859 		return;
2860 
2861 	cmd = find_pairing(conn);
2862 	if (!cmd) {
2863 		BT_DBG("Unable to find a pending command");
2864 		return;
2865 	}
2866 
2867 	cmd->cmd_complete(cmd, mgmt_status(status));
2868 	mgmt_pending_remove(cmd);
2869 }
2870 
2871 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2872 		       u16 len)
2873 {
2874 	struct mgmt_cp_pair_device *cp = data;
2875 	struct mgmt_rp_pair_device rp;
2876 	struct mgmt_pending_cmd *cmd;
2877 	u8 sec_level, auth_type;
2878 	struct hci_conn *conn;
2879 	int err;
2880 
2881 	bt_dev_dbg(hdev, "sock %p", sk);
2882 
2883 	memset(&rp, 0, sizeof(rp));
2884 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2885 	rp.addr.type = cp->addr.type;
2886 
2887 	if (!bdaddr_type_is_valid(cp->addr.type))
2888 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2889 					 MGMT_STATUS_INVALID_PARAMS,
2890 					 &rp, sizeof(rp));
2891 
2892 	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2893 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2894 					 MGMT_STATUS_INVALID_PARAMS,
2895 					 &rp, sizeof(rp));
2896 
2897 	hci_dev_lock(hdev);
2898 
2899 	if (!hdev_is_powered(hdev)) {
2900 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2901 					MGMT_STATUS_NOT_POWERED, &rp,
2902 					sizeof(rp));
2903 		goto unlock;
2904 	}
2905 
2906 	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
2907 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2908 					MGMT_STATUS_ALREADY_PAIRED, &rp,
2909 					sizeof(rp));
2910 		goto unlock;
2911 	}
2912 
2913 	sec_level = BT_SECURITY_MEDIUM;
2914 	auth_type = HCI_AT_DEDICATED_BONDING;
2915 
2916 	if (cp->addr.type == BDADDR_BREDR) {
2917 		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2918 				       auth_type);
2919 	} else {
2920 		u8 addr_type = le_addr_type(cp->addr.type);
2921 		struct hci_conn_params *p;
2922 
2923 		/* When pairing a new device, it is expected to remember
2924 		 * this device for future connections. Adding the connection
2925 		 * parameter information ahead of time allows tracking
2926 		 * of the slave preferred values and will speed up any
2927 		 * further connection establishment.
2928 		 *
2929 		 * If connection parameters already exist, then they
2930 		 * will be kept and this function does nothing.
2931 		 */
2932 		p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2933 
2934 		if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
2935 			p->auto_connect = HCI_AUTO_CONN_DISABLED;
2936 
2937 		conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr,
2938 					   addr_type, sec_level,
2939 					   HCI_LE_CONN_TIMEOUT);
2940 	}
2941 
2942 	if (IS_ERR(conn)) {
2943 		int status;
2944 
2945 		if (PTR_ERR(conn) == -EBUSY)
2946 			status = MGMT_STATUS_BUSY;
2947 		else if (PTR_ERR(conn) == -EOPNOTSUPP)
2948 			status = MGMT_STATUS_NOT_SUPPORTED;
2949 		else if (PTR_ERR(conn) == -ECONNREFUSED)
2950 			status = MGMT_STATUS_REJECTED;
2951 		else
2952 			status = MGMT_STATUS_CONNECT_FAILED;
2953 
2954 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2955 					status, &rp, sizeof(rp));
2956 		goto unlock;
2957 	}
2958 
2959 	if (conn->connect_cfm_cb) {
2960 		hci_conn_drop(conn);
2961 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2962 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
2963 		goto unlock;
2964 	}
2965 
2966 	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2967 	if (!cmd) {
2968 		err = -ENOMEM;
2969 		hci_conn_drop(conn);
2970 		goto unlock;
2971 	}
2972 
2973 	cmd->cmd_complete = pairing_complete;
2974 
2975 	/* For LE, just connecting isn't a proof that the pairing finished */
2976 	if (cp->addr.type == BDADDR_BREDR) {
2977 		conn->connect_cfm_cb = pairing_complete_cb;
2978 		conn->security_cfm_cb = pairing_complete_cb;
2979 		conn->disconn_cfm_cb = pairing_complete_cb;
2980 	} else {
2981 		conn->connect_cfm_cb = le_pairing_complete_cb;
2982 		conn->security_cfm_cb = le_pairing_complete_cb;
2983 		conn->disconn_cfm_cb = le_pairing_complete_cb;
2984 	}
2985 
2986 	conn->io_capability = cp->io_cap;
2987 	cmd->user_data = hci_conn_get(conn);
2988 
2989 	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
2990 	    hci_conn_security(conn, sec_level, auth_type, true)) {
2991 		cmd->cmd_complete(cmd, 0);
2992 		mgmt_pending_remove(cmd);
2993 	}
2994 
2995 	err = 0;
2996 
2997 unlock:
2998 	hci_dev_unlock(hdev);
2999 	return err;
3000 }
3001 
3002 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3003 			      u16 len)
3004 {
3005 	struct mgmt_addr_info *addr = data;
3006 	struct mgmt_pending_cmd *cmd;
3007 	struct hci_conn *conn;
3008 	int err;
3009 
3010 	bt_dev_dbg(hdev, "sock %p", sk);
3011 
3012 	hci_dev_lock(hdev);
3013 
3014 	if (!hdev_is_powered(hdev)) {
3015 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3016 				      MGMT_STATUS_NOT_POWERED);
3017 		goto unlock;
3018 	}
3019 
3020 	cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3021 	if (!cmd) {
3022 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3023 				      MGMT_STATUS_INVALID_PARAMS);
3024 		goto unlock;
3025 	}
3026 
3027 	conn = cmd->user_data;
3028 
3029 	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3030 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3031 				      MGMT_STATUS_INVALID_PARAMS);
3032 		goto unlock;
3033 	}
3034 
3035 	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3036 	mgmt_pending_remove(cmd);
3037 
3038 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3039 				addr, sizeof(*addr));
3040 unlock:
3041 	hci_dev_unlock(hdev);
3042 	return err;
3043 }
3044 
3045 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3046 			     struct mgmt_addr_info *addr, u16 mgmt_op,
3047 			     u16 hci_op, __le32 passkey)
3048 {
3049 	struct mgmt_pending_cmd *cmd;
3050 	struct hci_conn *conn;
3051 	int err;
3052 
3053 	hci_dev_lock(hdev);
3054 
3055 	if (!hdev_is_powered(hdev)) {
3056 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3057 					MGMT_STATUS_NOT_POWERED, addr,
3058 					sizeof(*addr));
3059 		goto done;
3060 	}
3061 
3062 	if (addr->type == BDADDR_BREDR)
3063 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3064 	else
3065 		conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3066 					       le_addr_type(addr->type));
3067 
3068 	if (!conn) {
3069 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3070 					MGMT_STATUS_NOT_CONNECTED, addr,
3071 					sizeof(*addr));
3072 		goto done;
3073 	}
3074 
3075 	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3076 		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3077 		if (!err)
3078 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3079 						MGMT_STATUS_SUCCESS, addr,
3080 						sizeof(*addr));
3081 		else
3082 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3083 						MGMT_STATUS_FAILED, addr,
3084 						sizeof(*addr));
3085 
3086 		goto done;
3087 	}
3088 
3089 	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3090 	if (!cmd) {
3091 		err = -ENOMEM;
3092 		goto done;
3093 	}
3094 
3095 	cmd->cmd_complete = addr_cmd_complete;
3096 
3097 	/* Continue with pairing via HCI */
3098 	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3099 		struct hci_cp_user_passkey_reply cp;
3100 
3101 		bacpy(&cp.bdaddr, &addr->bdaddr);
3102 		cp.passkey = passkey;
3103 		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3104 	} else
3105 		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3106 				   &addr->bdaddr);
3107 
3108 	if (err < 0)
3109 		mgmt_pending_remove(cmd);
3110 
3111 done:
3112 	hci_dev_unlock(hdev);
3113 	return err;
3114 }
3115 
3116 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3117 			      void *data, u16 len)
3118 {
3119 	struct mgmt_cp_pin_code_neg_reply *cp = data;
3120 
3121 	bt_dev_dbg(hdev, "sock %p", sk);
3122 
3123 	return user_pairing_resp(sk, hdev, &cp->addr,
3124 				MGMT_OP_PIN_CODE_NEG_REPLY,
3125 				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3126 }
3127 
3128 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3129 			      u16 len)
3130 {
3131 	struct mgmt_cp_user_confirm_reply *cp = data;
3132 
3133 	bt_dev_dbg(hdev, "sock %p", sk);
3134 
3135 	if (len != sizeof(*cp))
3136 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3137 				       MGMT_STATUS_INVALID_PARAMS);
3138 
3139 	return user_pairing_resp(sk, hdev, &cp->addr,
3140 				 MGMT_OP_USER_CONFIRM_REPLY,
3141 				 HCI_OP_USER_CONFIRM_REPLY, 0);
3142 }
3143 
3144 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3145 				  void *data, u16 len)
3146 {
3147 	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3148 
3149 	bt_dev_dbg(hdev, "sock %p", sk);
3150 
3151 	return user_pairing_resp(sk, hdev, &cp->addr,
3152 				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3153 				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3154 }
3155 
3156 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3157 			      u16 len)
3158 {
3159 	struct mgmt_cp_user_passkey_reply *cp = data;
3160 
3161 	bt_dev_dbg(hdev, "sock %p", sk);
3162 
3163 	return user_pairing_resp(sk, hdev, &cp->addr,
3164 				 MGMT_OP_USER_PASSKEY_REPLY,
3165 				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3166 }
3167 
3168 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3169 				  void *data, u16 len)
3170 {
3171 	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3172 
3173 	bt_dev_dbg(hdev, "sock %p", sk);
3174 
3175 	return user_pairing_resp(sk, hdev, &cp->addr,
3176 				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3177 				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3178 }
3179 
3180 static void adv_expire(struct hci_dev *hdev, u32 flags)
3181 {
3182 	struct adv_info *adv_instance;
3183 	struct hci_request req;
3184 	int err;
3185 
3186 	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3187 	if (!adv_instance)
3188 		return;
3189 
3190 	/* stop if current instance doesn't need to be changed */
3191 	if (!(adv_instance->flags & flags))
3192 		return;
3193 
3194 	cancel_adv_timeout(hdev);
3195 
3196 	adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3197 	if (!adv_instance)
3198 		return;
3199 
3200 	hci_req_init(&req, hdev);
3201 	err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
3202 					      true);
3203 	if (err)
3204 		return;
3205 
3206 	hci_req_run(&req, NULL);
3207 }
3208 
3209 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3210 {
3211 	struct mgmt_cp_set_local_name *cp;
3212 	struct mgmt_pending_cmd *cmd;
3213 
3214 	bt_dev_dbg(hdev, "status 0x%02x", status);
3215 
3216 	hci_dev_lock(hdev);
3217 
3218 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3219 	if (!cmd)
3220 		goto unlock;
3221 
3222 	cp = cmd->param;
3223 
3224 	if (status) {
3225 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3226 			        mgmt_status(status));
3227 	} else {
3228 		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3229 				  cp, sizeof(*cp));
3230 
3231 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3232 			adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3233 	}
3234 
3235 	mgmt_pending_remove(cmd);
3236 
3237 unlock:
3238 	hci_dev_unlock(hdev);
3239 }
3240 
3241 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3242 			  u16 len)
3243 {
3244 	struct mgmt_cp_set_local_name *cp = data;
3245 	struct mgmt_pending_cmd *cmd;
3246 	struct hci_request req;
3247 	int err;
3248 
3249 	bt_dev_dbg(hdev, "sock %p", sk);
3250 
3251 	hci_dev_lock(hdev);
3252 
3253 	/* If the old values are the same as the new ones just return a
3254 	 * direct command complete event.
3255 	 */
3256 	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3257 	    !memcmp(hdev->short_name, cp->short_name,
3258 		    sizeof(hdev->short_name))) {
3259 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3260 					data, len);
3261 		goto failed;
3262 	}
3263 
3264 	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3265 
3266 	if (!hdev_is_powered(hdev)) {
3267 		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3268 
3269 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3270 					data, len);
3271 		if (err < 0)
3272 			goto failed;
3273 
3274 		err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3275 					 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3276 		ext_info_changed(hdev, sk);
3277 
3278 		goto failed;
3279 	}
3280 
3281 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3282 	if (!cmd) {
3283 		err = -ENOMEM;
3284 		goto failed;
3285 	}
3286 
3287 	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3288 
3289 	hci_req_init(&req, hdev);
3290 
3291 	if (lmp_bredr_capable(hdev)) {
3292 		__hci_req_update_name(&req);
3293 		__hci_req_update_eir(&req);
3294 	}
3295 
3296 	/* The name is stored in the scan response data and so
3297 	 * no need to udpate the advertising data here.
3298 	 */
3299 	if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3300 		__hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
3301 
3302 	err = hci_req_run(&req, set_name_complete);
3303 	if (err < 0)
3304 		mgmt_pending_remove(cmd);
3305 
3306 failed:
3307 	hci_dev_unlock(hdev);
3308 	return err;
3309 }
3310 
3311 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3312 			  u16 len)
3313 {
3314 	struct mgmt_cp_set_appearance *cp = data;
3315 	u16 appearance;
3316 	int err;
3317 
3318 	bt_dev_dbg(hdev, "sock %p", sk);
3319 
3320 	if (!lmp_le_capable(hdev))
3321 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3322 				       MGMT_STATUS_NOT_SUPPORTED);
3323 
3324 	appearance = le16_to_cpu(cp->appearance);
3325 
3326 	hci_dev_lock(hdev);
3327 
3328 	if (hdev->appearance != appearance) {
3329 		hdev->appearance = appearance;
3330 
3331 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3332 			adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
3333 
3334 		ext_info_changed(hdev, sk);
3335 	}
3336 
3337 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3338 				0);
3339 
3340 	hci_dev_unlock(hdev);
3341 
3342 	return err;
3343 }
3344 
3345 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3346 				 void *data, u16 len)
3347 {
3348 	struct mgmt_rp_get_phy_confguration rp;
3349 
3350 	bt_dev_dbg(hdev, "sock %p", sk);
3351 
3352 	hci_dev_lock(hdev);
3353 
3354 	memset(&rp, 0, sizeof(rp));
3355 
3356 	rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3357 	rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3358 	rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3359 
3360 	hci_dev_unlock(hdev);
3361 
3362 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3363 				 &rp, sizeof(rp));
3364 }
3365 
3366 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3367 {
3368 	struct mgmt_ev_phy_configuration_changed ev;
3369 
3370 	memset(&ev, 0, sizeof(ev));
3371 
3372 	ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3373 
3374 	return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3375 			  sizeof(ev), skip);
3376 }
3377 
3378 static void set_default_phy_complete(struct hci_dev *hdev, u8 status,
3379 				     u16 opcode, struct sk_buff *skb)
3380 {
3381 	struct mgmt_pending_cmd *cmd;
3382 
3383 	bt_dev_dbg(hdev, "status 0x%02x", status);
3384 
3385 	hci_dev_lock(hdev);
3386 
3387 	cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev);
3388 	if (!cmd)
3389 		goto unlock;
3390 
3391 	if (status) {
3392 		mgmt_cmd_status(cmd->sk, hdev->id,
3393 				MGMT_OP_SET_PHY_CONFIGURATION,
3394 				mgmt_status(status));
3395 	} else {
3396 		mgmt_cmd_complete(cmd->sk, hdev->id,
3397 				  MGMT_OP_SET_PHY_CONFIGURATION, 0,
3398 				  NULL, 0);
3399 
3400 		mgmt_phy_configuration_changed(hdev, cmd->sk);
3401 	}
3402 
3403 	mgmt_pending_remove(cmd);
3404 
3405 unlock:
3406 	hci_dev_unlock(hdev);
3407 }
3408 
3409 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3410 				 void *data, u16 len)
3411 {
3412 	struct mgmt_cp_set_phy_confguration *cp = data;
3413 	struct hci_cp_le_set_default_phy cp_phy;
3414 	struct mgmt_pending_cmd *cmd;
3415 	struct hci_request req;
3416 	u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3417 	u16 pkt_type = (HCI_DH1 | HCI_DM1);
3418 	bool changed = false;
3419 	int err;
3420 
3421 	bt_dev_dbg(hdev, "sock %p", sk);
3422 
3423 	configurable_phys = get_configurable_phys(hdev);
3424 	supported_phys = get_supported_phys(hdev);
3425 	selected_phys = __le32_to_cpu(cp->selected_phys);
3426 
3427 	if (selected_phys & ~supported_phys)
3428 		return mgmt_cmd_status(sk, hdev->id,
3429 				       MGMT_OP_SET_PHY_CONFIGURATION,
3430 				       MGMT_STATUS_INVALID_PARAMS);
3431 
3432 	unconfigure_phys = supported_phys & ~configurable_phys;
3433 
3434 	if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3435 		return mgmt_cmd_status(sk, hdev->id,
3436 				       MGMT_OP_SET_PHY_CONFIGURATION,
3437 				       MGMT_STATUS_INVALID_PARAMS);
3438 
3439 	if (selected_phys == get_selected_phys(hdev))
3440 		return mgmt_cmd_complete(sk, hdev->id,
3441 					 MGMT_OP_SET_PHY_CONFIGURATION,
3442 					 0, NULL, 0);
3443 
3444 	hci_dev_lock(hdev);
3445 
3446 	if (!hdev_is_powered(hdev)) {
3447 		err = mgmt_cmd_status(sk, hdev->id,
3448 				      MGMT_OP_SET_PHY_CONFIGURATION,
3449 				      MGMT_STATUS_REJECTED);
3450 		goto unlock;
3451 	}
3452 
3453 	if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3454 		err = mgmt_cmd_status(sk, hdev->id,
3455 				      MGMT_OP_SET_PHY_CONFIGURATION,
3456 				      MGMT_STATUS_BUSY);
3457 		goto unlock;
3458 	}
3459 
3460 	if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3461 		pkt_type |= (HCI_DH3 | HCI_DM3);
3462 	else
3463 		pkt_type &= ~(HCI_DH3 | HCI_DM3);
3464 
3465 	if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3466 		pkt_type |= (HCI_DH5 | HCI_DM5);
3467 	else
3468 		pkt_type &= ~(HCI_DH5 | HCI_DM5);
3469 
3470 	if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3471 		pkt_type &= ~HCI_2DH1;
3472 	else
3473 		pkt_type |= HCI_2DH1;
3474 
3475 	if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3476 		pkt_type &= ~HCI_2DH3;
3477 	else
3478 		pkt_type |= HCI_2DH3;
3479 
3480 	if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3481 		pkt_type &= ~HCI_2DH5;
3482 	else
3483 		pkt_type |= HCI_2DH5;
3484 
3485 	if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3486 		pkt_type &= ~HCI_3DH1;
3487 	else
3488 		pkt_type |= HCI_3DH1;
3489 
3490 	if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3491 		pkt_type &= ~HCI_3DH3;
3492 	else
3493 		pkt_type |= HCI_3DH3;
3494 
3495 	if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3496 		pkt_type &= ~HCI_3DH5;
3497 	else
3498 		pkt_type |= HCI_3DH5;
3499 
3500 	if (pkt_type != hdev->pkt_type) {
3501 		hdev->pkt_type = pkt_type;
3502 		changed = true;
3503 	}
3504 
3505 	if ((selected_phys & MGMT_PHY_LE_MASK) ==
3506 	    (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3507 		if (changed)
3508 			mgmt_phy_configuration_changed(hdev, sk);
3509 
3510 		err = mgmt_cmd_complete(sk, hdev->id,
3511 					MGMT_OP_SET_PHY_CONFIGURATION,
3512 					0, NULL, 0);
3513 
3514 		goto unlock;
3515 	}
3516 
3517 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3518 			       len);
3519 	if (!cmd) {
3520 		err = -ENOMEM;
3521 		goto unlock;
3522 	}
3523 
3524 	hci_req_init(&req, hdev);
3525 
3526 	memset(&cp_phy, 0, sizeof(cp_phy));
3527 
3528 	if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3529 		cp_phy.all_phys |= 0x01;
3530 
3531 	if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3532 		cp_phy.all_phys |= 0x02;
3533 
3534 	if (selected_phys & MGMT_PHY_LE_1M_TX)
3535 		cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3536 
3537 	if (selected_phys & MGMT_PHY_LE_2M_TX)
3538 		cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3539 
3540 	if (selected_phys & MGMT_PHY_LE_CODED_TX)
3541 		cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3542 
3543 	if (selected_phys & MGMT_PHY_LE_1M_RX)
3544 		cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3545 
3546 	if (selected_phys & MGMT_PHY_LE_2M_RX)
3547 		cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3548 
3549 	if (selected_phys & MGMT_PHY_LE_CODED_RX)
3550 		cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3551 
3552 	hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy);
3553 
3554 	err = hci_req_run_skb(&req, set_default_phy_complete);
3555 	if (err < 0)
3556 		mgmt_pending_remove(cmd);
3557 
3558 unlock:
3559 	hci_dev_unlock(hdev);
3560 
3561 	return err;
3562 }
3563 
3564 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
3565 			    u16 len)
3566 {
3567 	int err = MGMT_STATUS_SUCCESS;
3568 	struct mgmt_cp_set_blocked_keys *keys = data;
3569 	const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
3570 				   sizeof(struct mgmt_blocked_key_info));
3571 	u16 key_count, expected_len;
3572 	int i;
3573 
3574 	bt_dev_dbg(hdev, "sock %p", sk);
3575 
3576 	key_count = __le16_to_cpu(keys->key_count);
3577 	if (key_count > max_key_count) {
3578 		bt_dev_err(hdev, "too big key_count value %u", key_count);
3579 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3580 				       MGMT_STATUS_INVALID_PARAMS);
3581 	}
3582 
3583 	expected_len = struct_size(keys, keys, key_count);
3584 	if (expected_len != len) {
3585 		bt_dev_err(hdev, "expected %u bytes, got %u bytes",
3586 			   expected_len, len);
3587 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3588 				       MGMT_STATUS_INVALID_PARAMS);
3589 	}
3590 
3591 	hci_dev_lock(hdev);
3592 
3593 	hci_blocked_keys_clear(hdev);
3594 
3595 	for (i = 0; i < keys->key_count; ++i) {
3596 		struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
3597 
3598 		if (!b) {
3599 			err = MGMT_STATUS_NO_RESOURCES;
3600 			break;
3601 		}
3602 
3603 		b->type = keys->keys[i].type;
3604 		memcpy(b->val, keys->keys[i].val, sizeof(b->val));
3605 		list_add_rcu(&b->list, &hdev->blocked_keys);
3606 	}
3607 	hci_dev_unlock(hdev);
3608 
3609 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3610 				err, NULL, 0);
3611 }
3612 
3613 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
3614 			       void *data, u16 len)
3615 {
3616 	struct mgmt_mode *cp = data;
3617 	int err;
3618 	bool changed = false;
3619 
3620 	bt_dev_dbg(hdev, "sock %p", sk);
3621 
3622 	if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
3623 		return mgmt_cmd_status(sk, hdev->id,
3624 				       MGMT_OP_SET_WIDEBAND_SPEECH,
3625 				       MGMT_STATUS_NOT_SUPPORTED);
3626 
3627 	if (cp->val != 0x00 && cp->val != 0x01)
3628 		return mgmt_cmd_status(sk, hdev->id,
3629 				       MGMT_OP_SET_WIDEBAND_SPEECH,
3630 				       MGMT_STATUS_INVALID_PARAMS);
3631 
3632 	hci_dev_lock(hdev);
3633 
3634 	if (pending_find(MGMT_OP_SET_WIDEBAND_SPEECH, hdev)) {
3635 		err = mgmt_cmd_status(sk, hdev->id,
3636 				      MGMT_OP_SET_WIDEBAND_SPEECH,
3637 				      MGMT_STATUS_BUSY);
3638 		goto unlock;
3639 	}
3640 
3641 	if (hdev_is_powered(hdev) &&
3642 	    !!cp->val != hci_dev_test_flag(hdev,
3643 					   HCI_WIDEBAND_SPEECH_ENABLED)) {
3644 		err = mgmt_cmd_status(sk, hdev->id,
3645 				      MGMT_OP_SET_WIDEBAND_SPEECH,
3646 				      MGMT_STATUS_REJECTED);
3647 		goto unlock;
3648 	}
3649 
3650 	if (cp->val)
3651 		changed = !hci_dev_test_and_set_flag(hdev,
3652 						   HCI_WIDEBAND_SPEECH_ENABLED);
3653 	else
3654 		changed = hci_dev_test_and_clear_flag(hdev,
3655 						   HCI_WIDEBAND_SPEECH_ENABLED);
3656 
3657 	err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
3658 	if (err < 0)
3659 		goto unlock;
3660 
3661 	if (changed)
3662 		err = new_settings(hdev, sk);
3663 
3664 unlock:
3665 	hci_dev_unlock(hdev);
3666 	return err;
3667 }
3668 
3669 static int read_security_info(struct sock *sk, struct hci_dev *hdev,
3670 			      void *data, u16 data_len)
3671 {
3672 	char buf[16];
3673 	struct mgmt_rp_read_security_info *rp = (void *)buf;
3674 	u16 sec_len = 0;
3675 	u8 flags = 0;
3676 
3677 	bt_dev_dbg(hdev, "sock %p", sk);
3678 
3679 	memset(&buf, 0, sizeof(buf));
3680 
3681 	hci_dev_lock(hdev);
3682 
3683 	/* When the Read Simple Pairing Options command is supported, then
3684 	 * the remote public key validation is supported.
3685 	 */
3686 	if (hdev->commands[41] & 0x08)
3687 		flags |= 0x01;	/* Remote public key validation (BR/EDR) */
3688 
3689 	flags |= 0x02;		/* Remote public key validation (LE) */
3690 
3691 	/* When the Read Encryption Key Size command is supported, then the
3692 	 * encryption key size is enforced.
3693 	 */
3694 	if (hdev->commands[20] & 0x10)
3695 		flags |= 0x04;	/* Encryption key size enforcement (BR/EDR) */
3696 
3697 	flags |= 0x08;		/* Encryption key size enforcement (LE) */
3698 
3699 	sec_len = eir_append_data(rp->sec, sec_len, 0x01, &flags, 1);
3700 
3701 	/* When the Read Simple Pairing Options command is supported, then
3702 	 * also max encryption key size information is provided.
3703 	 */
3704 	if (hdev->commands[41] & 0x08)
3705 		sec_len = eir_append_le16(rp->sec, sec_len, 0x02,
3706 					  hdev->max_enc_key_size);
3707 
3708 	sec_len = eir_append_le16(rp->sec, sec_len, 0x03, SMP_MAX_ENC_KEY_SIZE);
3709 
3710 	rp->sec_len = cpu_to_le16(sec_len);
3711 
3712 	hci_dev_unlock(hdev);
3713 
3714 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_SECURITY_INFO, 0,
3715 				 rp, sizeof(*rp) + sec_len);
3716 }
3717 
3718 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
3719 				  void *data, u16 data_len)
3720 {
3721 	char buf[42];
3722 	struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
3723 	u16 idx = 0;
3724 
3725 	bt_dev_dbg(hdev, "sock %p", sk);
3726 
3727 	memset(&buf, 0, sizeof(buf));
3728 
3729 	rp->feature_count = cpu_to_le16(idx);
3730 
3731 	/* After reading the experimental features information, enable
3732 	 * the events to update client on any future change.
3733 	 */
3734 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3735 
3736 	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3737 				 MGMT_OP_READ_EXP_FEATURES_INFO,
3738 				 0, rp, sizeof(*rp) + (20 * idx));
3739 }
3740 
3741 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
3742 			   void *data, u16 data_len)
3743 {
3744 	struct mgmt_cp_set_exp_feature *cp = data;
3745 	struct mgmt_rp_set_exp_feature rp;
3746 
3747 	bt_dev_dbg(hdev, "sock %p", sk);
3748 
3749 	if (!memcmp(cp->uuid, ZERO_KEY, 16)) {
3750 		memset(rp.uuid, 0, 16);
3751 		rp.flags = cpu_to_le32(0);
3752 
3753 		hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
3754 
3755 		return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3756 					 MGMT_OP_SET_EXP_FEATURE, 0,
3757 					 &rp, sizeof(rp));
3758 	}
3759 
3760 	return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
3761 			       MGMT_OP_SET_EXP_FEATURE,
3762 			       MGMT_STATUS_NOT_SUPPORTED);
3763 }
3764 
3765 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
3766 				         u16 opcode, struct sk_buff *skb)
3767 {
3768 	struct mgmt_rp_read_local_oob_data mgmt_rp;
3769 	size_t rp_size = sizeof(mgmt_rp);
3770 	struct mgmt_pending_cmd *cmd;
3771 
3772 	bt_dev_dbg(hdev, "status %u", status);
3773 
3774 	cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
3775 	if (!cmd)
3776 		return;
3777 
3778 	if (status || !skb) {
3779 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3780 				status ? mgmt_status(status) : MGMT_STATUS_FAILED);
3781 		goto remove;
3782 	}
3783 
3784 	memset(&mgmt_rp, 0, sizeof(mgmt_rp));
3785 
3786 	if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
3787 		struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
3788 
3789 		if (skb->len < sizeof(*rp)) {
3790 			mgmt_cmd_status(cmd->sk, hdev->id,
3791 					MGMT_OP_READ_LOCAL_OOB_DATA,
3792 					MGMT_STATUS_FAILED);
3793 			goto remove;
3794 		}
3795 
3796 		memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
3797 		memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
3798 
3799 		rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
3800 	} else {
3801 		struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
3802 
3803 		if (skb->len < sizeof(*rp)) {
3804 			mgmt_cmd_status(cmd->sk, hdev->id,
3805 					MGMT_OP_READ_LOCAL_OOB_DATA,
3806 					MGMT_STATUS_FAILED);
3807 			goto remove;
3808 		}
3809 
3810 		memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
3811 		memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
3812 
3813 		memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
3814 		memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
3815 	}
3816 
3817 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3818 			  MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
3819 
3820 remove:
3821 	mgmt_pending_remove(cmd);
3822 }
3823 
3824 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3825 			       void *data, u16 data_len)
3826 {
3827 	struct mgmt_pending_cmd *cmd;
3828 	struct hci_request req;
3829 	int err;
3830 
3831 	bt_dev_dbg(hdev, "sock %p", sk);
3832 
3833 	hci_dev_lock(hdev);
3834 
3835 	if (!hdev_is_powered(hdev)) {
3836 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3837 				      MGMT_STATUS_NOT_POWERED);
3838 		goto unlock;
3839 	}
3840 
3841 	if (!lmp_ssp_capable(hdev)) {
3842 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3843 				      MGMT_STATUS_NOT_SUPPORTED);
3844 		goto unlock;
3845 	}
3846 
3847 	if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3848 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3849 				      MGMT_STATUS_BUSY);
3850 		goto unlock;
3851 	}
3852 
3853 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3854 	if (!cmd) {
3855 		err = -ENOMEM;
3856 		goto unlock;
3857 	}
3858 
3859 	hci_req_init(&req, hdev);
3860 
3861 	if (bredr_sc_enabled(hdev))
3862 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
3863 	else
3864 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3865 
3866 	err = hci_req_run_skb(&req, read_local_oob_data_complete);
3867 	if (err < 0)
3868 		mgmt_pending_remove(cmd);
3869 
3870 unlock:
3871 	hci_dev_unlock(hdev);
3872 	return err;
3873 }
3874 
3875 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3876 			       void *data, u16 len)
3877 {
3878 	struct mgmt_addr_info *addr = data;
3879 	int err;
3880 
3881 	bt_dev_dbg(hdev, "sock %p", sk);
3882 
3883 	if (!bdaddr_type_is_valid(addr->type))
3884 		return mgmt_cmd_complete(sk, hdev->id,
3885 					 MGMT_OP_ADD_REMOTE_OOB_DATA,
3886 					 MGMT_STATUS_INVALID_PARAMS,
3887 					 addr, sizeof(*addr));
3888 
3889 	hci_dev_lock(hdev);
3890 
3891 	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3892 		struct mgmt_cp_add_remote_oob_data *cp = data;
3893 		u8 status;
3894 
3895 		if (cp->addr.type != BDADDR_BREDR) {
3896 			err = mgmt_cmd_complete(sk, hdev->id,
3897 						MGMT_OP_ADD_REMOTE_OOB_DATA,
3898 						MGMT_STATUS_INVALID_PARAMS,
3899 						&cp->addr, sizeof(cp->addr));
3900 			goto unlock;
3901 		}
3902 
3903 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3904 					      cp->addr.type, cp->hash,
3905 					      cp->rand, NULL, NULL);
3906 		if (err < 0)
3907 			status = MGMT_STATUS_FAILED;
3908 		else
3909 			status = MGMT_STATUS_SUCCESS;
3910 
3911 		err = mgmt_cmd_complete(sk, hdev->id,
3912 					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
3913 					&cp->addr, sizeof(cp->addr));
3914 	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3915 		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3916 		u8 *rand192, *hash192, *rand256, *hash256;
3917 		u8 status;
3918 
3919 		if (bdaddr_type_is_le(cp->addr.type)) {
3920 			/* Enforce zero-valued 192-bit parameters as
3921 			 * long as legacy SMP OOB isn't implemented.
3922 			 */
3923 			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
3924 			    memcmp(cp->hash192, ZERO_KEY, 16)) {
3925 				err = mgmt_cmd_complete(sk, hdev->id,
3926 							MGMT_OP_ADD_REMOTE_OOB_DATA,
3927 							MGMT_STATUS_INVALID_PARAMS,
3928 							addr, sizeof(*addr));
3929 				goto unlock;
3930 			}
3931 
3932 			rand192 = NULL;
3933 			hash192 = NULL;
3934 		} else {
3935 			/* In case one of the P-192 values is set to zero,
3936 			 * then just disable OOB data for P-192.
3937 			 */
3938 			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
3939 			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
3940 				rand192 = NULL;
3941 				hash192 = NULL;
3942 			} else {
3943 				rand192 = cp->rand192;
3944 				hash192 = cp->hash192;
3945 			}
3946 		}
3947 
3948 		/* In case one of the P-256 values is set to zero, then just
3949 		 * disable OOB data for P-256.
3950 		 */
3951 		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
3952 		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
3953 			rand256 = NULL;
3954 			hash256 = NULL;
3955 		} else {
3956 			rand256 = cp->rand256;
3957 			hash256 = cp->hash256;
3958 		}
3959 
3960 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3961 					      cp->addr.type, hash192, rand192,
3962 					      hash256, rand256);
3963 		if (err < 0)
3964 			status = MGMT_STATUS_FAILED;
3965 		else
3966 			status = MGMT_STATUS_SUCCESS;
3967 
3968 		err = mgmt_cmd_complete(sk, hdev->id,
3969 					MGMT_OP_ADD_REMOTE_OOB_DATA,
3970 					status, &cp->addr, sizeof(cp->addr));
3971 	} else {
3972 		bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
3973 			   len);
3974 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3975 				      MGMT_STATUS_INVALID_PARAMS);
3976 	}
3977 
3978 unlock:
3979 	hci_dev_unlock(hdev);
3980 	return err;
3981 }
3982 
3983 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3984 				  void *data, u16 len)
3985 {
3986 	struct mgmt_cp_remove_remote_oob_data *cp = data;
3987 	u8 status;
3988 	int err;
3989 
3990 	bt_dev_dbg(hdev, "sock %p", sk);
3991 
3992 	if (cp->addr.type != BDADDR_BREDR)
3993 		return mgmt_cmd_complete(sk, hdev->id,
3994 					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3995 					 MGMT_STATUS_INVALID_PARAMS,
3996 					 &cp->addr, sizeof(cp->addr));
3997 
3998 	hci_dev_lock(hdev);
3999 
4000 	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
4001 		hci_remote_oob_data_clear(hdev);
4002 		status = MGMT_STATUS_SUCCESS;
4003 		goto done;
4004 	}
4005 
4006 	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
4007 	if (err < 0)
4008 		status = MGMT_STATUS_INVALID_PARAMS;
4009 	else
4010 		status = MGMT_STATUS_SUCCESS;
4011 
4012 done:
4013 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
4014 				status, &cp->addr, sizeof(cp->addr));
4015 
4016 	hci_dev_unlock(hdev);
4017 	return err;
4018 }
4019 
4020 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
4021 {
4022 	struct mgmt_pending_cmd *cmd;
4023 
4024 	bt_dev_dbg(hdev, "status %d", status);
4025 
4026 	hci_dev_lock(hdev);
4027 
4028 	cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
4029 	if (!cmd)
4030 		cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
4031 
4032 	if (!cmd)
4033 		cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
4034 
4035 	if (cmd) {
4036 		cmd->cmd_complete(cmd, mgmt_status(status));
4037 		mgmt_pending_remove(cmd);
4038 	}
4039 
4040 	hci_dev_unlock(hdev);
4041 
4042 	/* Handle suspend notifier */
4043 	if (test_and_clear_bit(SUSPEND_UNPAUSE_DISCOVERY,
4044 			       hdev->suspend_tasks)) {
4045 		bt_dev_dbg(hdev, "Unpaused discovery");
4046 		wake_up(&hdev->suspend_wait_q);
4047 	}
4048 }
4049 
4050 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
4051 				    uint8_t *mgmt_status)
4052 {
4053 	switch (type) {
4054 	case DISCOV_TYPE_LE:
4055 		*mgmt_status = mgmt_le_support(hdev);
4056 		if (*mgmt_status)
4057 			return false;
4058 		break;
4059 	case DISCOV_TYPE_INTERLEAVED:
4060 		*mgmt_status = mgmt_le_support(hdev);
4061 		if (*mgmt_status)
4062 			return false;
4063 		/* Intentional fall-through */
4064 	case DISCOV_TYPE_BREDR:
4065 		*mgmt_status = mgmt_bredr_support(hdev);
4066 		if (*mgmt_status)
4067 			return false;
4068 		break;
4069 	default:
4070 		*mgmt_status = MGMT_STATUS_INVALID_PARAMS;
4071 		return false;
4072 	}
4073 
4074 	return true;
4075 }
4076 
4077 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
4078 				    u16 op, void *data, u16 len)
4079 {
4080 	struct mgmt_cp_start_discovery *cp = data;
4081 	struct mgmt_pending_cmd *cmd;
4082 	u8 status;
4083 	int err;
4084 
4085 	bt_dev_dbg(hdev, "sock %p", sk);
4086 
4087 	hci_dev_lock(hdev);
4088 
4089 	if (!hdev_is_powered(hdev)) {
4090 		err = mgmt_cmd_complete(sk, hdev->id, op,
4091 					MGMT_STATUS_NOT_POWERED,
4092 					&cp->type, sizeof(cp->type));
4093 		goto failed;
4094 	}
4095 
4096 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
4097 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4098 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4099 					&cp->type, sizeof(cp->type));
4100 		goto failed;
4101 	}
4102 
4103 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4104 		err = mgmt_cmd_complete(sk, hdev->id, op, status,
4105 					&cp->type, sizeof(cp->type));
4106 		goto failed;
4107 	}
4108 
4109 	/* Can't start discovery when it is paused */
4110 	if (hdev->discovery_paused) {
4111 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
4112 					&cp->type, sizeof(cp->type));
4113 		goto failed;
4114 	}
4115 
4116 	/* Clear the discovery filter first to free any previously
4117 	 * allocated memory for the UUID list.
4118 	 */
4119 	hci_discovery_filter_clear(hdev);
4120 
4121 	hdev->discovery.type = cp->type;
4122 	hdev->discovery.report_invalid_rssi = false;
4123 	if (op == MGMT_OP_START_LIMITED_DISCOVERY)
4124 		hdev->discovery.limited = true;
4125 	else
4126 		hdev->discovery.limited = false;
4127 
4128 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
4129 	if (!cmd) {
4130 		err = -ENOMEM;
4131 		goto failed;
4132 	}
4133 
4134 	cmd->cmd_complete = generic_cmd_complete;
4135 
4136 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4137 	queue_work(hdev->req_workqueue, &hdev->discov_update);
4138 	err = 0;
4139 
4140 failed:
4141 	hci_dev_unlock(hdev);
4142 	return err;
4143 }
4144 
4145 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4146 			   void *data, u16 len)
4147 {
4148 	return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
4149 					data, len);
4150 }
4151 
4152 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
4153 				   void *data, u16 len)
4154 {
4155 	return start_discovery_internal(sk, hdev,
4156 					MGMT_OP_START_LIMITED_DISCOVERY,
4157 					data, len);
4158 }
4159 
4160 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
4161 					  u8 status)
4162 {
4163 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
4164 				 cmd->param, 1);
4165 }
4166 
4167 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4168 				   void *data, u16 len)
4169 {
4170 	struct mgmt_cp_start_service_discovery *cp = data;
4171 	struct mgmt_pending_cmd *cmd;
4172 	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
4173 	u16 uuid_count, expected_len;
4174 	u8 status;
4175 	int err;
4176 
4177 	bt_dev_dbg(hdev, "sock %p", sk);
4178 
4179 	hci_dev_lock(hdev);
4180 
4181 	if (!hdev_is_powered(hdev)) {
4182 		err = mgmt_cmd_complete(sk, hdev->id,
4183 					MGMT_OP_START_SERVICE_DISCOVERY,
4184 					MGMT_STATUS_NOT_POWERED,
4185 					&cp->type, sizeof(cp->type));
4186 		goto failed;
4187 	}
4188 
4189 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
4190 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4191 		err = mgmt_cmd_complete(sk, hdev->id,
4192 					MGMT_OP_START_SERVICE_DISCOVERY,
4193 					MGMT_STATUS_BUSY, &cp->type,
4194 					sizeof(cp->type));
4195 		goto failed;
4196 	}
4197 
4198 	uuid_count = __le16_to_cpu(cp->uuid_count);
4199 	if (uuid_count > max_uuid_count) {
4200 		bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
4201 			   uuid_count);
4202 		err = mgmt_cmd_complete(sk, hdev->id,
4203 					MGMT_OP_START_SERVICE_DISCOVERY,
4204 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
4205 					sizeof(cp->type));
4206 		goto failed;
4207 	}
4208 
4209 	expected_len = sizeof(*cp) + uuid_count * 16;
4210 	if (expected_len != len) {
4211 		bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
4212 			   expected_len, len);
4213 		err = mgmt_cmd_complete(sk, hdev->id,
4214 					MGMT_OP_START_SERVICE_DISCOVERY,
4215 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
4216 					sizeof(cp->type));
4217 		goto failed;
4218 	}
4219 
4220 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4221 		err = mgmt_cmd_complete(sk, hdev->id,
4222 					MGMT_OP_START_SERVICE_DISCOVERY,
4223 					status, &cp->type, sizeof(cp->type));
4224 		goto failed;
4225 	}
4226 
4227 	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4228 			       hdev, data, len);
4229 	if (!cmd) {
4230 		err = -ENOMEM;
4231 		goto failed;
4232 	}
4233 
4234 	cmd->cmd_complete = service_discovery_cmd_complete;
4235 
4236 	/* Clear the discovery filter first to free any previously
4237 	 * allocated memory for the UUID list.
4238 	 */
4239 	hci_discovery_filter_clear(hdev);
4240 
4241 	hdev->discovery.result_filtering = true;
4242 	hdev->discovery.type = cp->type;
4243 	hdev->discovery.rssi = cp->rssi;
4244 	hdev->discovery.uuid_count = uuid_count;
4245 
4246 	if (uuid_count > 0) {
4247 		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4248 						GFP_KERNEL);
4249 		if (!hdev->discovery.uuids) {
4250 			err = mgmt_cmd_complete(sk, hdev->id,
4251 						MGMT_OP_START_SERVICE_DISCOVERY,
4252 						MGMT_STATUS_FAILED,
4253 						&cp->type, sizeof(cp->type));
4254 			mgmt_pending_remove(cmd);
4255 			goto failed;
4256 		}
4257 	}
4258 
4259 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4260 	queue_work(hdev->req_workqueue, &hdev->discov_update);
4261 	err = 0;
4262 
4263 failed:
4264 	hci_dev_unlock(hdev);
4265 	return err;
4266 }
4267 
4268 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
4269 {
4270 	struct mgmt_pending_cmd *cmd;
4271 
4272 	bt_dev_dbg(hdev, "status %d", status);
4273 
4274 	hci_dev_lock(hdev);
4275 
4276 	cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4277 	if (cmd) {
4278 		cmd->cmd_complete(cmd, mgmt_status(status));
4279 		mgmt_pending_remove(cmd);
4280 	}
4281 
4282 	hci_dev_unlock(hdev);
4283 
4284 	/* Handle suspend notifier */
4285 	if (test_and_clear_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks)) {
4286 		bt_dev_dbg(hdev, "Paused discovery");
4287 		wake_up(&hdev->suspend_wait_q);
4288 	}
4289 }
4290 
4291 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4292 			  u16 len)
4293 {
4294 	struct mgmt_cp_stop_discovery *mgmt_cp = data;
4295 	struct mgmt_pending_cmd *cmd;
4296 	int err;
4297 
4298 	bt_dev_dbg(hdev, "sock %p", sk);
4299 
4300 	hci_dev_lock(hdev);
4301 
4302 	if (!hci_discovery_active(hdev)) {
4303 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4304 					MGMT_STATUS_REJECTED, &mgmt_cp->type,
4305 					sizeof(mgmt_cp->type));
4306 		goto unlock;
4307 	}
4308 
4309 	if (hdev->discovery.type != mgmt_cp->type) {
4310 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4311 					MGMT_STATUS_INVALID_PARAMS,
4312 					&mgmt_cp->type, sizeof(mgmt_cp->type));
4313 		goto unlock;
4314 	}
4315 
4316 	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4317 	if (!cmd) {
4318 		err = -ENOMEM;
4319 		goto unlock;
4320 	}
4321 
4322 	cmd->cmd_complete = generic_cmd_complete;
4323 
4324 	hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4325 	queue_work(hdev->req_workqueue, &hdev->discov_update);
4326 	err = 0;
4327 
4328 unlock:
4329 	hci_dev_unlock(hdev);
4330 	return err;
4331 }
4332 
4333 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4334 			u16 len)
4335 {
4336 	struct mgmt_cp_confirm_name *cp = data;
4337 	struct inquiry_entry *e;
4338 	int err;
4339 
4340 	bt_dev_dbg(hdev, "sock %p", sk);
4341 
4342 	hci_dev_lock(hdev);
4343 
4344 	if (!hci_discovery_active(hdev)) {
4345 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4346 					MGMT_STATUS_FAILED, &cp->addr,
4347 					sizeof(cp->addr));
4348 		goto failed;
4349 	}
4350 
4351 	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4352 	if (!e) {
4353 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4354 					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4355 					sizeof(cp->addr));
4356 		goto failed;
4357 	}
4358 
4359 	if (cp->name_known) {
4360 		e->name_state = NAME_KNOWN;
4361 		list_del(&e->list);
4362 	} else {
4363 		e->name_state = NAME_NEEDED;
4364 		hci_inquiry_cache_update_resolve(hdev, e);
4365 	}
4366 
4367 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
4368 				&cp->addr, sizeof(cp->addr));
4369 
4370 failed:
4371 	hci_dev_unlock(hdev);
4372 	return err;
4373 }
4374 
4375 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4376 			u16 len)
4377 {
4378 	struct mgmt_cp_block_device *cp = data;
4379 	u8 status;
4380 	int err;
4381 
4382 	bt_dev_dbg(hdev, "sock %p", sk);
4383 
4384 	if (!bdaddr_type_is_valid(cp->addr.type))
4385 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4386 					 MGMT_STATUS_INVALID_PARAMS,
4387 					 &cp->addr, sizeof(cp->addr));
4388 
4389 	hci_dev_lock(hdev);
4390 
4391 	err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4392 				  cp->addr.type);
4393 	if (err < 0) {
4394 		status = MGMT_STATUS_FAILED;
4395 		goto done;
4396 	}
4397 
4398 	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4399 		   sk);
4400 	status = MGMT_STATUS_SUCCESS;
4401 
4402 done:
4403 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4404 				&cp->addr, sizeof(cp->addr));
4405 
4406 	hci_dev_unlock(hdev);
4407 
4408 	return err;
4409 }
4410 
4411 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4412 			  u16 len)
4413 {
4414 	struct mgmt_cp_unblock_device *cp = data;
4415 	u8 status;
4416 	int err;
4417 
4418 	bt_dev_dbg(hdev, "sock %p", sk);
4419 
4420 	if (!bdaddr_type_is_valid(cp->addr.type))
4421 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4422 					 MGMT_STATUS_INVALID_PARAMS,
4423 					 &cp->addr, sizeof(cp->addr));
4424 
4425 	hci_dev_lock(hdev);
4426 
4427 	err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4428 				  cp->addr.type);
4429 	if (err < 0) {
4430 		status = MGMT_STATUS_INVALID_PARAMS;
4431 		goto done;
4432 	}
4433 
4434 	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4435 		   sk);
4436 	status = MGMT_STATUS_SUCCESS;
4437 
4438 done:
4439 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4440 				&cp->addr, sizeof(cp->addr));
4441 
4442 	hci_dev_unlock(hdev);
4443 
4444 	return err;
4445 }
4446 
4447 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4448 			 u16 len)
4449 {
4450 	struct mgmt_cp_set_device_id *cp = data;
4451 	struct hci_request req;
4452 	int err;
4453 	__u16 source;
4454 
4455 	bt_dev_dbg(hdev, "sock %p", sk);
4456 
4457 	source = __le16_to_cpu(cp->source);
4458 
4459 	if (source > 0x0002)
4460 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4461 				       MGMT_STATUS_INVALID_PARAMS);
4462 
4463 	hci_dev_lock(hdev);
4464 
4465 	hdev->devid_source = source;
4466 	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4467 	hdev->devid_product = __le16_to_cpu(cp->product);
4468 	hdev->devid_version = __le16_to_cpu(cp->version);
4469 
4470 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
4471 				NULL, 0);
4472 
4473 	hci_req_init(&req, hdev);
4474 	__hci_req_update_eir(&req);
4475 	hci_req_run(&req, NULL);
4476 
4477 	hci_dev_unlock(hdev);
4478 
4479 	return err;
4480 }
4481 
4482 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
4483 					u16 opcode)
4484 {
4485 	bt_dev_dbg(hdev, "status %d", status);
4486 }
4487 
4488 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
4489 				     u16 opcode)
4490 {
4491 	struct cmd_lookup match = { NULL, hdev };
4492 	struct hci_request req;
4493 	u8 instance;
4494 	struct adv_info *adv_instance;
4495 	int err;
4496 
4497 	hci_dev_lock(hdev);
4498 
4499 	if (status) {
4500 		u8 mgmt_err = mgmt_status(status);
4501 
4502 		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4503 				     cmd_status_rsp, &mgmt_err);
4504 		goto unlock;
4505 	}
4506 
4507 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
4508 		hci_dev_set_flag(hdev, HCI_ADVERTISING);
4509 	else
4510 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4511 
4512 	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4513 			     &match);
4514 
4515 	new_settings(hdev, match.sk);
4516 
4517 	if (match.sk)
4518 		sock_put(match.sk);
4519 
4520 	/* Handle suspend notifier */
4521 	if (test_and_clear_bit(SUSPEND_PAUSE_ADVERTISING,
4522 			       hdev->suspend_tasks)) {
4523 		bt_dev_dbg(hdev, "Paused advertising");
4524 		wake_up(&hdev->suspend_wait_q);
4525 	} else if (test_and_clear_bit(SUSPEND_UNPAUSE_ADVERTISING,
4526 				      hdev->suspend_tasks)) {
4527 		bt_dev_dbg(hdev, "Unpaused advertising");
4528 		wake_up(&hdev->suspend_wait_q);
4529 	}
4530 
4531 	/* If "Set Advertising" was just disabled and instance advertising was
4532 	 * set up earlier, then re-enable multi-instance advertising.
4533 	 */
4534 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
4535 	    list_empty(&hdev->adv_instances))
4536 		goto unlock;
4537 
4538 	instance = hdev->cur_adv_instance;
4539 	if (!instance) {
4540 		adv_instance = list_first_entry_or_null(&hdev->adv_instances,
4541 							struct adv_info, list);
4542 		if (!adv_instance)
4543 			goto unlock;
4544 
4545 		instance = adv_instance->instance;
4546 	}
4547 
4548 	hci_req_init(&req, hdev);
4549 
4550 	err = __hci_req_schedule_adv_instance(&req, instance, true);
4551 
4552 	if (!err)
4553 		err = hci_req_run(&req, enable_advertising_instance);
4554 
4555 	if (err)
4556 		bt_dev_err(hdev, "failed to re-configure advertising");
4557 
4558 unlock:
4559 	hci_dev_unlock(hdev);
4560 }
4561 
4562 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4563 			   u16 len)
4564 {
4565 	struct mgmt_mode *cp = data;
4566 	struct mgmt_pending_cmd *cmd;
4567 	struct hci_request req;
4568 	u8 val, status;
4569 	int err;
4570 
4571 	bt_dev_dbg(hdev, "sock %p", sk);
4572 
4573 	status = mgmt_le_support(hdev);
4574 	if (status)
4575 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4576 				       status);
4577 
4578 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4579 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4580 				       MGMT_STATUS_INVALID_PARAMS);
4581 
4582 	if (hdev->advertising_paused)
4583 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4584 				       MGMT_STATUS_BUSY);
4585 
4586 	hci_dev_lock(hdev);
4587 
4588 	val = !!cp->val;
4589 
4590 	/* The following conditions are ones which mean that we should
4591 	 * not do any HCI communication but directly send a mgmt
4592 	 * response to user space (after toggling the flag if
4593 	 * necessary).
4594 	 */
4595 	if (!hdev_is_powered(hdev) ||
4596 	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
4597 	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
4598 	    hci_conn_num(hdev, LE_LINK) > 0 ||
4599 	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4600 	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4601 		bool changed;
4602 
4603 		if (cp->val) {
4604 			hdev->cur_adv_instance = 0x00;
4605 			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
4606 			if (cp->val == 0x02)
4607 				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4608 			else
4609 				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4610 		} else {
4611 			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
4612 			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4613 		}
4614 
4615 		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4616 		if (err < 0)
4617 			goto unlock;
4618 
4619 		if (changed)
4620 			err = new_settings(hdev, sk);
4621 
4622 		goto unlock;
4623 	}
4624 
4625 	if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4626 	    pending_find(MGMT_OP_SET_LE, hdev)) {
4627 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4628 				      MGMT_STATUS_BUSY);
4629 		goto unlock;
4630 	}
4631 
4632 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4633 	if (!cmd) {
4634 		err = -ENOMEM;
4635 		goto unlock;
4636 	}
4637 
4638 	hci_req_init(&req, hdev);
4639 
4640 	if (cp->val == 0x02)
4641 		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4642 	else
4643 		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4644 
4645 	cancel_adv_timeout(hdev);
4646 
4647 	if (val) {
4648 		/* Switch to instance "0" for the Set Advertising setting.
4649 		 * We cannot use update_[adv|scan_rsp]_data() here as the
4650 		 * HCI_ADVERTISING flag is not yet set.
4651 		 */
4652 		hdev->cur_adv_instance = 0x00;
4653 
4654 		if (ext_adv_capable(hdev)) {
4655 			__hci_req_start_ext_adv(&req, 0x00);
4656 		} else {
4657 			__hci_req_update_adv_data(&req, 0x00);
4658 			__hci_req_update_scan_rsp_data(&req, 0x00);
4659 			__hci_req_enable_advertising(&req);
4660 		}
4661 	} else {
4662 		__hci_req_disable_advertising(&req);
4663 	}
4664 
4665 	err = hci_req_run(&req, set_advertising_complete);
4666 	if (err < 0)
4667 		mgmt_pending_remove(cmd);
4668 
4669 unlock:
4670 	hci_dev_unlock(hdev);
4671 	return err;
4672 }
4673 
4674 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4675 			      void *data, u16 len)
4676 {
4677 	struct mgmt_cp_set_static_address *cp = data;
4678 	int err;
4679 
4680 	bt_dev_dbg(hdev, "sock %p", sk);
4681 
4682 	if (!lmp_le_capable(hdev))
4683 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4684 				       MGMT_STATUS_NOT_SUPPORTED);
4685 
4686 	if (hdev_is_powered(hdev))
4687 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4688 				       MGMT_STATUS_REJECTED);
4689 
4690 	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4691 		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4692 			return mgmt_cmd_status(sk, hdev->id,
4693 					       MGMT_OP_SET_STATIC_ADDRESS,
4694 					       MGMT_STATUS_INVALID_PARAMS);
4695 
4696 		/* Two most significant bits shall be set */
4697 		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4698 			return mgmt_cmd_status(sk, hdev->id,
4699 					       MGMT_OP_SET_STATIC_ADDRESS,
4700 					       MGMT_STATUS_INVALID_PARAMS);
4701 	}
4702 
4703 	hci_dev_lock(hdev);
4704 
4705 	bacpy(&hdev->static_addr, &cp->bdaddr);
4706 
4707 	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
4708 	if (err < 0)
4709 		goto unlock;
4710 
4711 	err = new_settings(hdev, sk);
4712 
4713 unlock:
4714 	hci_dev_unlock(hdev);
4715 	return err;
4716 }
4717 
4718 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4719 			   void *data, u16 len)
4720 {
4721 	struct mgmt_cp_set_scan_params *cp = data;
4722 	__u16 interval, window;
4723 	int err;
4724 
4725 	bt_dev_dbg(hdev, "sock %p", sk);
4726 
4727 	if (!lmp_le_capable(hdev))
4728 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4729 				       MGMT_STATUS_NOT_SUPPORTED);
4730 
4731 	interval = __le16_to_cpu(cp->interval);
4732 
4733 	if (interval < 0x0004 || interval > 0x4000)
4734 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4735 				       MGMT_STATUS_INVALID_PARAMS);
4736 
4737 	window = __le16_to_cpu(cp->window);
4738 
4739 	if (window < 0x0004 || window > 0x4000)
4740 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4741 				       MGMT_STATUS_INVALID_PARAMS);
4742 
4743 	if (window > interval)
4744 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4745 				       MGMT_STATUS_INVALID_PARAMS);
4746 
4747 	hci_dev_lock(hdev);
4748 
4749 	hdev->le_scan_interval = interval;
4750 	hdev->le_scan_window = window;
4751 
4752 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
4753 				NULL, 0);
4754 
4755 	/* If background scan is running, restart it so new parameters are
4756 	 * loaded.
4757 	 */
4758 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4759 	    hdev->discovery.state == DISCOVERY_STOPPED) {
4760 		struct hci_request req;
4761 
4762 		hci_req_init(&req, hdev);
4763 
4764 		hci_req_add_le_scan_disable(&req);
4765 		hci_req_add_le_passive_scan(&req);
4766 
4767 		hci_req_run(&req, NULL);
4768 	}
4769 
4770 	hci_dev_unlock(hdev);
4771 
4772 	return err;
4773 }
4774 
4775 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
4776 				      u16 opcode)
4777 {
4778 	struct mgmt_pending_cmd *cmd;
4779 
4780 	bt_dev_dbg(hdev, "status 0x%02x", status);
4781 
4782 	hci_dev_lock(hdev);
4783 
4784 	cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4785 	if (!cmd)
4786 		goto unlock;
4787 
4788 	if (status) {
4789 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4790 			        mgmt_status(status));
4791 	} else {
4792 		struct mgmt_mode *cp = cmd->param;
4793 
4794 		if (cp->val)
4795 			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
4796 		else
4797 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4798 
4799 		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4800 		new_settings(hdev, cmd->sk);
4801 	}
4802 
4803 	mgmt_pending_remove(cmd);
4804 
4805 unlock:
4806 	hci_dev_unlock(hdev);
4807 }
4808 
4809 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4810 				void *data, u16 len)
4811 {
4812 	struct mgmt_mode *cp = data;
4813 	struct mgmt_pending_cmd *cmd;
4814 	struct hci_request req;
4815 	int err;
4816 
4817 	bt_dev_dbg(hdev, "sock %p", sk);
4818 
4819 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
4820 	    hdev->hci_ver < BLUETOOTH_VER_1_2)
4821 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4822 				       MGMT_STATUS_NOT_SUPPORTED);
4823 
4824 	if (cp->val != 0x00 && cp->val != 0x01)
4825 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4826 				       MGMT_STATUS_INVALID_PARAMS);
4827 
4828 	hci_dev_lock(hdev);
4829 
4830 	if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4831 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4832 				      MGMT_STATUS_BUSY);
4833 		goto unlock;
4834 	}
4835 
4836 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
4837 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4838 					hdev);
4839 		goto unlock;
4840 	}
4841 
4842 	if (!hdev_is_powered(hdev)) {
4843 		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
4844 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4845 					hdev);
4846 		new_settings(hdev, sk);
4847 		goto unlock;
4848 	}
4849 
4850 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4851 			       data, len);
4852 	if (!cmd) {
4853 		err = -ENOMEM;
4854 		goto unlock;
4855 	}
4856 
4857 	hci_req_init(&req, hdev);
4858 
4859 	__hci_req_write_fast_connectable(&req, cp->val);
4860 
4861 	err = hci_req_run(&req, fast_connectable_complete);
4862 	if (err < 0) {
4863 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4864 				      MGMT_STATUS_FAILED);
4865 		mgmt_pending_remove(cmd);
4866 	}
4867 
4868 unlock:
4869 	hci_dev_unlock(hdev);
4870 
4871 	return err;
4872 }
4873 
4874 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4875 {
4876 	struct mgmt_pending_cmd *cmd;
4877 
4878 	bt_dev_dbg(hdev, "status 0x%02x", status);
4879 
4880 	hci_dev_lock(hdev);
4881 
4882 	cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
4883 	if (!cmd)
4884 		goto unlock;
4885 
4886 	if (status) {
4887 		u8 mgmt_err = mgmt_status(status);
4888 
4889 		/* We need to restore the flag if related HCI commands
4890 		 * failed.
4891 		 */
4892 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
4893 
4894 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4895 	} else {
4896 		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4897 		new_settings(hdev, cmd->sk);
4898 	}
4899 
4900 	mgmt_pending_remove(cmd);
4901 
4902 unlock:
4903 	hci_dev_unlock(hdev);
4904 }
4905 
4906 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4907 {
4908 	struct mgmt_mode *cp = data;
4909 	struct mgmt_pending_cmd *cmd;
4910 	struct hci_request req;
4911 	int err;
4912 
4913 	bt_dev_dbg(hdev, "sock %p", sk);
4914 
4915 	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4916 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4917 				       MGMT_STATUS_NOT_SUPPORTED);
4918 
4919 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
4920 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4921 				       MGMT_STATUS_REJECTED);
4922 
4923 	if (cp->val != 0x00 && cp->val != 0x01)
4924 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4925 				       MGMT_STATUS_INVALID_PARAMS);
4926 
4927 	hci_dev_lock(hdev);
4928 
4929 	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4930 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4931 		goto unlock;
4932 	}
4933 
4934 	if (!hdev_is_powered(hdev)) {
4935 		if (!cp->val) {
4936 			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
4937 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
4938 			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
4939 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4940 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
4941 		}
4942 
4943 		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
4944 
4945 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4946 		if (err < 0)
4947 			goto unlock;
4948 
4949 		err = new_settings(hdev, sk);
4950 		goto unlock;
4951 	}
4952 
4953 	/* Reject disabling when powered on */
4954 	if (!cp->val) {
4955 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4956 				      MGMT_STATUS_REJECTED);
4957 		goto unlock;
4958 	} else {
4959 		/* When configuring a dual-mode controller to operate
4960 		 * with LE only and using a static address, then switching
4961 		 * BR/EDR back on is not allowed.
4962 		 *
4963 		 * Dual-mode controllers shall operate with the public
4964 		 * address as its identity address for BR/EDR and LE. So
4965 		 * reject the attempt to create an invalid configuration.
4966 		 *
4967 		 * The same restrictions applies when secure connections
4968 		 * has been enabled. For BR/EDR this is a controller feature
4969 		 * while for LE it is a host stack feature. This means that
4970 		 * switching BR/EDR back on when secure connections has been
4971 		 * enabled is not a supported transaction.
4972 		 */
4973 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
4974 		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
4975 		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
4976 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4977 					      MGMT_STATUS_REJECTED);
4978 			goto unlock;
4979 		}
4980 	}
4981 
4982 	if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
4983 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4984 				      MGMT_STATUS_BUSY);
4985 		goto unlock;
4986 	}
4987 
4988 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4989 	if (!cmd) {
4990 		err = -ENOMEM;
4991 		goto unlock;
4992 	}
4993 
4994 	/* We need to flip the bit already here so that
4995 	 * hci_req_update_adv_data generates the correct flags.
4996 	 */
4997 	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
4998 
4999 	hci_req_init(&req, hdev);
5000 
5001 	__hci_req_write_fast_connectable(&req, false);
5002 	__hci_req_update_scan(&req);
5003 
5004 	/* Since only the advertising data flags will change, there
5005 	 * is no need to update the scan response data.
5006 	 */
5007 	__hci_req_update_adv_data(&req, hdev->cur_adv_instance);
5008 
5009 	err = hci_req_run(&req, set_bredr_complete);
5010 	if (err < 0)
5011 		mgmt_pending_remove(cmd);
5012 
5013 unlock:
5014 	hci_dev_unlock(hdev);
5015 	return err;
5016 }
5017 
5018 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5019 {
5020 	struct mgmt_pending_cmd *cmd;
5021 	struct mgmt_mode *cp;
5022 
5023 	bt_dev_dbg(hdev, "status %u", status);
5024 
5025 	hci_dev_lock(hdev);
5026 
5027 	cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
5028 	if (!cmd)
5029 		goto unlock;
5030 
5031 	if (status) {
5032 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
5033 			        mgmt_status(status));
5034 		goto remove;
5035 	}
5036 
5037 	cp = cmd->param;
5038 
5039 	switch (cp->val) {
5040 	case 0x00:
5041 		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
5042 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5043 		break;
5044 	case 0x01:
5045 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5046 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5047 		break;
5048 	case 0x02:
5049 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
5050 		hci_dev_set_flag(hdev, HCI_SC_ONLY);
5051 		break;
5052 	}
5053 
5054 	send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
5055 	new_settings(hdev, cmd->sk);
5056 
5057 remove:
5058 	mgmt_pending_remove(cmd);
5059 unlock:
5060 	hci_dev_unlock(hdev);
5061 }
5062 
5063 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
5064 			   void *data, u16 len)
5065 {
5066 	struct mgmt_mode *cp = data;
5067 	struct mgmt_pending_cmd *cmd;
5068 	struct hci_request req;
5069 	u8 val;
5070 	int err;
5071 
5072 	bt_dev_dbg(hdev, "sock %p", sk);
5073 
5074 	if (!lmp_sc_capable(hdev) &&
5075 	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
5076 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5077 				       MGMT_STATUS_NOT_SUPPORTED);
5078 
5079 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
5080 	    lmp_sc_capable(hdev) &&
5081 	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
5082 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5083 				       MGMT_STATUS_REJECTED);
5084 
5085 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5086 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5087 				  MGMT_STATUS_INVALID_PARAMS);
5088 
5089 	hci_dev_lock(hdev);
5090 
5091 	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
5092 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5093 		bool changed;
5094 
5095 		if (cp->val) {
5096 			changed = !hci_dev_test_and_set_flag(hdev,
5097 							     HCI_SC_ENABLED);
5098 			if (cp->val == 0x02)
5099 				hci_dev_set_flag(hdev, HCI_SC_ONLY);
5100 			else
5101 				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5102 		} else {
5103 			changed = hci_dev_test_and_clear_flag(hdev,
5104 							      HCI_SC_ENABLED);
5105 			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
5106 		}
5107 
5108 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5109 		if (err < 0)
5110 			goto failed;
5111 
5112 		if (changed)
5113 			err = new_settings(hdev, sk);
5114 
5115 		goto failed;
5116 	}
5117 
5118 	if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
5119 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5120 				      MGMT_STATUS_BUSY);
5121 		goto failed;
5122 	}
5123 
5124 	val = !!cp->val;
5125 
5126 	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
5127 	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5128 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5129 		goto failed;
5130 	}
5131 
5132 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
5133 	if (!cmd) {
5134 		err = -ENOMEM;
5135 		goto failed;
5136 	}
5137 
5138 	hci_req_init(&req, hdev);
5139 	hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
5140 	err = hci_req_run(&req, sc_enable_complete);
5141 	if (err < 0) {
5142 		mgmt_pending_remove(cmd);
5143 		goto failed;
5144 	}
5145 
5146 failed:
5147 	hci_dev_unlock(hdev);
5148 	return err;
5149 }
5150 
5151 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
5152 			  void *data, u16 len)
5153 {
5154 	struct mgmt_mode *cp = data;
5155 	bool changed, use_changed;
5156 	int err;
5157 
5158 	bt_dev_dbg(hdev, "sock %p", sk);
5159 
5160 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5161 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
5162 				       MGMT_STATUS_INVALID_PARAMS);
5163 
5164 	hci_dev_lock(hdev);
5165 
5166 	if (cp->val)
5167 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
5168 	else
5169 		changed = hci_dev_test_and_clear_flag(hdev,
5170 						      HCI_KEEP_DEBUG_KEYS);
5171 
5172 	if (cp->val == 0x02)
5173 		use_changed = !hci_dev_test_and_set_flag(hdev,
5174 							 HCI_USE_DEBUG_KEYS);
5175 	else
5176 		use_changed = hci_dev_test_and_clear_flag(hdev,
5177 							  HCI_USE_DEBUG_KEYS);
5178 
5179 	if (hdev_is_powered(hdev) && use_changed &&
5180 	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
5181 		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
5182 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
5183 			     sizeof(mode), &mode);
5184 	}
5185 
5186 	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
5187 	if (err < 0)
5188 		goto unlock;
5189 
5190 	if (changed)
5191 		err = new_settings(hdev, sk);
5192 
5193 unlock:
5194 	hci_dev_unlock(hdev);
5195 	return err;
5196 }
5197 
5198 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5199 		       u16 len)
5200 {
5201 	struct mgmt_cp_set_privacy *cp = cp_data;
5202 	bool changed;
5203 	int err;
5204 
5205 	bt_dev_dbg(hdev, "sock %p", sk);
5206 
5207 	if (!lmp_le_capable(hdev))
5208 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5209 				       MGMT_STATUS_NOT_SUPPORTED);
5210 
5211 	if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
5212 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5213 				       MGMT_STATUS_INVALID_PARAMS);
5214 
5215 	if (hdev_is_powered(hdev))
5216 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5217 				       MGMT_STATUS_REJECTED);
5218 
5219 	hci_dev_lock(hdev);
5220 
5221 	/* If user space supports this command it is also expected to
5222 	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
5223 	 */
5224 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5225 
5226 	if (cp->privacy) {
5227 		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
5228 		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
5229 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
5230 		hci_adv_instances_set_rpa_expired(hdev, true);
5231 		if (cp->privacy == 0x02)
5232 			hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
5233 		else
5234 			hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
5235 	} else {
5236 		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
5237 		memset(hdev->irk, 0, sizeof(hdev->irk));
5238 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
5239 		hci_adv_instances_set_rpa_expired(hdev, false);
5240 		hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
5241 	}
5242 
5243 	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
5244 	if (err < 0)
5245 		goto unlock;
5246 
5247 	if (changed)
5248 		err = new_settings(hdev, sk);
5249 
5250 unlock:
5251 	hci_dev_unlock(hdev);
5252 	return err;
5253 }
5254 
5255 static bool irk_is_valid(struct mgmt_irk_info *irk)
5256 {
5257 	switch (irk->addr.type) {
5258 	case BDADDR_LE_PUBLIC:
5259 		return true;
5260 
5261 	case BDADDR_LE_RANDOM:
5262 		/* Two most significant bits shall be set */
5263 		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5264 			return false;
5265 		return true;
5266 	}
5267 
5268 	return false;
5269 }
5270 
5271 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5272 		     u16 len)
5273 {
5274 	struct mgmt_cp_load_irks *cp = cp_data;
5275 	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
5276 				   sizeof(struct mgmt_irk_info));
5277 	u16 irk_count, expected_len;
5278 	int i, err;
5279 
5280 	bt_dev_dbg(hdev, "sock %p", sk);
5281 
5282 	if (!lmp_le_capable(hdev))
5283 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5284 				       MGMT_STATUS_NOT_SUPPORTED);
5285 
5286 	irk_count = __le16_to_cpu(cp->irk_count);
5287 	if (irk_count > max_irk_count) {
5288 		bt_dev_err(hdev, "load_irks: too big irk_count value %u",
5289 			   irk_count);
5290 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5291 				       MGMT_STATUS_INVALID_PARAMS);
5292 	}
5293 
5294 	expected_len = struct_size(cp, irks, irk_count);
5295 	if (expected_len != len) {
5296 		bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
5297 			   expected_len, len);
5298 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5299 				       MGMT_STATUS_INVALID_PARAMS);
5300 	}
5301 
5302 	bt_dev_dbg(hdev, "irk_count %u", irk_count);
5303 
5304 	for (i = 0; i < irk_count; i++) {
5305 		struct mgmt_irk_info *key = &cp->irks[i];
5306 
5307 		if (!irk_is_valid(key))
5308 			return mgmt_cmd_status(sk, hdev->id,
5309 					       MGMT_OP_LOAD_IRKS,
5310 					       MGMT_STATUS_INVALID_PARAMS);
5311 	}
5312 
5313 	hci_dev_lock(hdev);
5314 
5315 	hci_smp_irks_clear(hdev);
5316 
5317 	for (i = 0; i < irk_count; i++) {
5318 		struct mgmt_irk_info *irk = &cp->irks[i];
5319 
5320 		if (hci_is_blocked_key(hdev,
5321 				       HCI_BLOCKED_KEY_TYPE_IRK,
5322 				       irk->val)) {
5323 			bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
5324 				    &irk->addr.bdaddr);
5325 			continue;
5326 		}
5327 
5328 		hci_add_irk(hdev, &irk->addr.bdaddr,
5329 			    le_addr_type(irk->addr.type), irk->val,
5330 			    BDADDR_ANY);
5331 	}
5332 
5333 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5334 
5335 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5336 
5337 	hci_dev_unlock(hdev);
5338 
5339 	return err;
5340 }
5341 
5342 static bool ltk_is_valid(struct mgmt_ltk_info *key)
5343 {
5344 	if (key->master != 0x00 && key->master != 0x01)
5345 		return false;
5346 
5347 	switch (key->addr.type) {
5348 	case BDADDR_LE_PUBLIC:
5349 		return true;
5350 
5351 	case BDADDR_LE_RANDOM:
5352 		/* Two most significant bits shall be set */
5353 		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5354 			return false;
5355 		return true;
5356 	}
5357 
5358 	return false;
5359 }
5360 
5361 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5362 			       void *cp_data, u16 len)
5363 {
5364 	struct mgmt_cp_load_long_term_keys *cp = cp_data;
5365 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5366 				   sizeof(struct mgmt_ltk_info));
5367 	u16 key_count, expected_len;
5368 	int i, err;
5369 
5370 	bt_dev_dbg(hdev, "sock %p", sk);
5371 
5372 	if (!lmp_le_capable(hdev))
5373 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5374 				       MGMT_STATUS_NOT_SUPPORTED);
5375 
5376 	key_count = __le16_to_cpu(cp->key_count);
5377 	if (key_count > max_key_count) {
5378 		bt_dev_err(hdev, "load_ltks: too big key_count value %u",
5379 			   key_count);
5380 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5381 				       MGMT_STATUS_INVALID_PARAMS);
5382 	}
5383 
5384 	expected_len = struct_size(cp, keys, key_count);
5385 	if (expected_len != len) {
5386 		bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
5387 			   expected_len, len);
5388 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5389 				       MGMT_STATUS_INVALID_PARAMS);
5390 	}
5391 
5392 	bt_dev_dbg(hdev, "key_count %u", key_count);
5393 
5394 	for (i = 0; i < key_count; i++) {
5395 		struct mgmt_ltk_info *key = &cp->keys[i];
5396 
5397 		if (!ltk_is_valid(key))
5398 			return mgmt_cmd_status(sk, hdev->id,
5399 					       MGMT_OP_LOAD_LONG_TERM_KEYS,
5400 					       MGMT_STATUS_INVALID_PARAMS);
5401 	}
5402 
5403 	hci_dev_lock(hdev);
5404 
5405 	hci_smp_ltks_clear(hdev);
5406 
5407 	for (i = 0; i < key_count; i++) {
5408 		struct mgmt_ltk_info *key = &cp->keys[i];
5409 		u8 type, authenticated;
5410 
5411 		if (hci_is_blocked_key(hdev,
5412 				       HCI_BLOCKED_KEY_TYPE_LTK,
5413 				       key->val)) {
5414 			bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
5415 				    &key->addr.bdaddr);
5416 			continue;
5417 		}
5418 
5419 		switch (key->type) {
5420 		case MGMT_LTK_UNAUTHENTICATED:
5421 			authenticated = 0x00;
5422 			type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5423 			break;
5424 		case MGMT_LTK_AUTHENTICATED:
5425 			authenticated = 0x01;
5426 			type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5427 			break;
5428 		case MGMT_LTK_P256_UNAUTH:
5429 			authenticated = 0x00;
5430 			type = SMP_LTK_P256;
5431 			break;
5432 		case MGMT_LTK_P256_AUTH:
5433 			authenticated = 0x01;
5434 			type = SMP_LTK_P256;
5435 			break;
5436 		case MGMT_LTK_P256_DEBUG:
5437 			authenticated = 0x00;
5438 			type = SMP_LTK_P256_DEBUG;
5439 			/* fall through */
5440 		default:
5441 			continue;
5442 		}
5443 
5444 		hci_add_ltk(hdev, &key->addr.bdaddr,
5445 			    le_addr_type(key->addr.type), type, authenticated,
5446 			    key->val, key->enc_size, key->ediv, key->rand);
5447 	}
5448 
5449 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5450 			   NULL, 0);
5451 
5452 	hci_dev_unlock(hdev);
5453 
5454 	return err;
5455 }
5456 
5457 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5458 {
5459 	struct hci_conn *conn = cmd->user_data;
5460 	struct mgmt_rp_get_conn_info rp;
5461 	int err;
5462 
5463 	memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5464 
5465 	if (status == MGMT_STATUS_SUCCESS) {
5466 		rp.rssi = conn->rssi;
5467 		rp.tx_power = conn->tx_power;
5468 		rp.max_tx_power = conn->max_tx_power;
5469 	} else {
5470 		rp.rssi = HCI_RSSI_INVALID;
5471 		rp.tx_power = HCI_TX_POWER_INVALID;
5472 		rp.max_tx_power = HCI_TX_POWER_INVALID;
5473 	}
5474 
5475 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
5476 				status, &rp, sizeof(rp));
5477 
5478 	hci_conn_drop(conn);
5479 	hci_conn_put(conn);
5480 
5481 	return err;
5482 }
5483 
5484 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
5485 				       u16 opcode)
5486 {
5487 	struct hci_cp_read_rssi *cp;
5488 	struct mgmt_pending_cmd *cmd;
5489 	struct hci_conn *conn;
5490 	u16 handle;
5491 	u8 status;
5492 
5493 	bt_dev_dbg(hdev, "status 0x%02x", hci_status);
5494 
5495 	hci_dev_lock(hdev);
5496 
5497 	/* Commands sent in request are either Read RSSI or Read Transmit Power
5498 	 * Level so we check which one was last sent to retrieve connection
5499 	 * handle.  Both commands have handle as first parameter so it's safe to
5500 	 * cast data on the same command struct.
5501 	 *
5502 	 * First command sent is always Read RSSI and we fail only if it fails.
5503 	 * In other case we simply override error to indicate success as we
5504 	 * already remembered if TX power value is actually valid.
5505 	 */
5506 	cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
5507 	if (!cp) {
5508 		cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5509 		status = MGMT_STATUS_SUCCESS;
5510 	} else {
5511 		status = mgmt_status(hci_status);
5512 	}
5513 
5514 	if (!cp) {
5515 		bt_dev_err(hdev, "invalid sent_cmd in conn_info response");
5516 		goto unlock;
5517 	}
5518 
5519 	handle = __le16_to_cpu(cp->handle);
5520 	conn = hci_conn_hash_lookup_handle(hdev, handle);
5521 	if (!conn) {
5522 		bt_dev_err(hdev, "unknown handle (%d) in conn_info response",
5523 			   handle);
5524 		goto unlock;
5525 	}
5526 
5527 	cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
5528 	if (!cmd)
5529 		goto unlock;
5530 
5531 	cmd->cmd_complete(cmd, status);
5532 	mgmt_pending_remove(cmd);
5533 
5534 unlock:
5535 	hci_dev_unlock(hdev);
5536 }
5537 
5538 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5539 			 u16 len)
5540 {
5541 	struct mgmt_cp_get_conn_info *cp = data;
5542 	struct mgmt_rp_get_conn_info rp;
5543 	struct hci_conn *conn;
5544 	unsigned long conn_info_age;
5545 	int err = 0;
5546 
5547 	bt_dev_dbg(hdev, "sock %p", sk);
5548 
5549 	memset(&rp, 0, sizeof(rp));
5550 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5551 	rp.addr.type = cp->addr.type;
5552 
5553 	if (!bdaddr_type_is_valid(cp->addr.type))
5554 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5555 					 MGMT_STATUS_INVALID_PARAMS,
5556 					 &rp, sizeof(rp));
5557 
5558 	hci_dev_lock(hdev);
5559 
5560 	if (!hdev_is_powered(hdev)) {
5561 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5562 					MGMT_STATUS_NOT_POWERED, &rp,
5563 					sizeof(rp));
5564 		goto unlock;
5565 	}
5566 
5567 	if (cp->addr.type == BDADDR_BREDR)
5568 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5569 					       &cp->addr.bdaddr);
5570 	else
5571 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5572 
5573 	if (!conn || conn->state != BT_CONNECTED) {
5574 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5575 					MGMT_STATUS_NOT_CONNECTED, &rp,
5576 					sizeof(rp));
5577 		goto unlock;
5578 	}
5579 
5580 	if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
5581 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5582 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
5583 		goto unlock;
5584 	}
5585 
5586 	/* To avoid client trying to guess when to poll again for information we
5587 	 * calculate conn info age as random value between min/max set in hdev.
5588 	 */
5589 	conn_info_age = hdev->conn_info_min_age +
5590 			prandom_u32_max(hdev->conn_info_max_age -
5591 					hdev->conn_info_min_age);
5592 
5593 	/* Query controller to refresh cached values if they are too old or were
5594 	 * never read.
5595 	 */
5596 	if (time_after(jiffies, conn->conn_info_timestamp +
5597 		       msecs_to_jiffies(conn_info_age)) ||
5598 	    !conn->conn_info_timestamp) {
5599 		struct hci_request req;
5600 		struct hci_cp_read_tx_power req_txp_cp;
5601 		struct hci_cp_read_rssi req_rssi_cp;
5602 		struct mgmt_pending_cmd *cmd;
5603 
5604 		hci_req_init(&req, hdev);
5605 		req_rssi_cp.handle = cpu_to_le16(conn->handle);
5606 		hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5607 			    &req_rssi_cp);
5608 
5609 		/* For LE links TX power does not change thus we don't need to
5610 		 * query for it once value is known.
5611 		 */
5612 		if (!bdaddr_type_is_le(cp->addr.type) ||
5613 		    conn->tx_power == HCI_TX_POWER_INVALID) {
5614 			req_txp_cp.handle = cpu_to_le16(conn->handle);
5615 			req_txp_cp.type = 0x00;
5616 			hci_req_add(&req, HCI_OP_READ_TX_POWER,
5617 				    sizeof(req_txp_cp), &req_txp_cp);
5618 		}
5619 
5620 		/* Max TX power needs to be read only once per connection */
5621 		if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5622 			req_txp_cp.handle = cpu_to_le16(conn->handle);
5623 			req_txp_cp.type = 0x01;
5624 			hci_req_add(&req, HCI_OP_READ_TX_POWER,
5625 				    sizeof(req_txp_cp), &req_txp_cp);
5626 		}
5627 
5628 		err = hci_req_run(&req, conn_info_refresh_complete);
5629 		if (err < 0)
5630 			goto unlock;
5631 
5632 		cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5633 				       data, len);
5634 		if (!cmd) {
5635 			err = -ENOMEM;
5636 			goto unlock;
5637 		}
5638 
5639 		hci_conn_hold(conn);
5640 		cmd->user_data = hci_conn_get(conn);
5641 		cmd->cmd_complete = conn_info_cmd_complete;
5642 
5643 		conn->conn_info_timestamp = jiffies;
5644 	} else {
5645 		/* Cache is valid, just reply with values cached in hci_conn */
5646 		rp.rssi = conn->rssi;
5647 		rp.tx_power = conn->tx_power;
5648 		rp.max_tx_power = conn->max_tx_power;
5649 
5650 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5651 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5652 	}
5653 
5654 unlock:
5655 	hci_dev_unlock(hdev);
5656 	return err;
5657 }
5658 
5659 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5660 {
5661 	struct hci_conn *conn = cmd->user_data;
5662 	struct mgmt_rp_get_clock_info rp;
5663 	struct hci_dev *hdev;
5664 	int err;
5665 
5666 	memset(&rp, 0, sizeof(rp));
5667 	memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5668 
5669 	if (status)
5670 		goto complete;
5671 
5672 	hdev = hci_dev_get(cmd->index);
5673 	if (hdev) {
5674 		rp.local_clock = cpu_to_le32(hdev->clock);
5675 		hci_dev_put(hdev);
5676 	}
5677 
5678 	if (conn) {
5679 		rp.piconet_clock = cpu_to_le32(conn->clock);
5680 		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5681 	}
5682 
5683 complete:
5684 	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
5685 				sizeof(rp));
5686 
5687 	if (conn) {
5688 		hci_conn_drop(conn);
5689 		hci_conn_put(conn);
5690 	}
5691 
5692 	return err;
5693 }
5694 
5695 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5696 {
5697 	struct hci_cp_read_clock *hci_cp;
5698 	struct mgmt_pending_cmd *cmd;
5699 	struct hci_conn *conn;
5700 
5701 	bt_dev_dbg(hdev, "status %u", status);
5702 
5703 	hci_dev_lock(hdev);
5704 
5705 	hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5706 	if (!hci_cp)
5707 		goto unlock;
5708 
5709 	if (hci_cp->which) {
5710 		u16 handle = __le16_to_cpu(hci_cp->handle);
5711 		conn = hci_conn_hash_lookup_handle(hdev, handle);
5712 	} else {
5713 		conn = NULL;
5714 	}
5715 
5716 	cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5717 	if (!cmd)
5718 		goto unlock;
5719 
5720 	cmd->cmd_complete(cmd, mgmt_status(status));
5721 	mgmt_pending_remove(cmd);
5722 
5723 unlock:
5724 	hci_dev_unlock(hdev);
5725 }
5726 
5727 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5728 			 u16 len)
5729 {
5730 	struct mgmt_cp_get_clock_info *cp = data;
5731 	struct mgmt_rp_get_clock_info rp;
5732 	struct hci_cp_read_clock hci_cp;
5733 	struct mgmt_pending_cmd *cmd;
5734 	struct hci_request req;
5735 	struct hci_conn *conn;
5736 	int err;
5737 
5738 	bt_dev_dbg(hdev, "sock %p", sk);
5739 
5740 	memset(&rp, 0, sizeof(rp));
5741 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5742 	rp.addr.type = cp->addr.type;
5743 
5744 	if (cp->addr.type != BDADDR_BREDR)
5745 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5746 					 MGMT_STATUS_INVALID_PARAMS,
5747 					 &rp, sizeof(rp));
5748 
5749 	hci_dev_lock(hdev);
5750 
5751 	if (!hdev_is_powered(hdev)) {
5752 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5753 					MGMT_STATUS_NOT_POWERED, &rp,
5754 					sizeof(rp));
5755 		goto unlock;
5756 	}
5757 
5758 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5759 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5760 					       &cp->addr.bdaddr);
5761 		if (!conn || conn->state != BT_CONNECTED) {
5762 			err = mgmt_cmd_complete(sk, hdev->id,
5763 						MGMT_OP_GET_CLOCK_INFO,
5764 						MGMT_STATUS_NOT_CONNECTED,
5765 						&rp, sizeof(rp));
5766 			goto unlock;
5767 		}
5768 	} else {
5769 		conn = NULL;
5770 	}
5771 
5772 	cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5773 	if (!cmd) {
5774 		err = -ENOMEM;
5775 		goto unlock;
5776 	}
5777 
5778 	cmd->cmd_complete = clock_info_cmd_complete;
5779 
5780 	hci_req_init(&req, hdev);
5781 
5782 	memset(&hci_cp, 0, sizeof(hci_cp));
5783 	hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5784 
5785 	if (conn) {
5786 		hci_conn_hold(conn);
5787 		cmd->user_data = hci_conn_get(conn);
5788 
5789 		hci_cp.handle = cpu_to_le16(conn->handle);
5790 		hci_cp.which = 0x01; /* Piconet clock */
5791 		hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5792 	}
5793 
5794 	err = hci_req_run(&req, get_clock_info_complete);
5795 	if (err < 0)
5796 		mgmt_pending_remove(cmd);
5797 
5798 unlock:
5799 	hci_dev_unlock(hdev);
5800 	return err;
5801 }
5802 
5803 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
5804 {
5805 	struct hci_conn *conn;
5806 
5807 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
5808 	if (!conn)
5809 		return false;
5810 
5811 	if (conn->dst_type != type)
5812 		return false;
5813 
5814 	if (conn->state != BT_CONNECTED)
5815 		return false;
5816 
5817 	return true;
5818 }
5819 
5820 /* This function requires the caller holds hdev->lock */
5821 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
5822 			       u8 addr_type, u8 auto_connect)
5823 {
5824 	struct hci_conn_params *params;
5825 
5826 	params = hci_conn_params_add(hdev, addr, addr_type);
5827 	if (!params)
5828 		return -EIO;
5829 
5830 	if (params->auto_connect == auto_connect)
5831 		return 0;
5832 
5833 	list_del_init(&params->action);
5834 
5835 	switch (auto_connect) {
5836 	case HCI_AUTO_CONN_DISABLED:
5837 	case HCI_AUTO_CONN_LINK_LOSS:
5838 		/* If auto connect is being disabled when we're trying to
5839 		 * connect to device, keep connecting.
5840 		 */
5841 		if (params->explicit_connect)
5842 			list_add(&params->action, &hdev->pend_le_conns);
5843 		break;
5844 	case HCI_AUTO_CONN_REPORT:
5845 		if (params->explicit_connect)
5846 			list_add(&params->action, &hdev->pend_le_conns);
5847 		else
5848 			list_add(&params->action, &hdev->pend_le_reports);
5849 		break;
5850 	case HCI_AUTO_CONN_DIRECT:
5851 	case HCI_AUTO_CONN_ALWAYS:
5852 		if (!is_connected(hdev, addr, addr_type))
5853 			list_add(&params->action, &hdev->pend_le_conns);
5854 		break;
5855 	}
5856 
5857 	params->auto_connect = auto_connect;
5858 
5859 	bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
5860 		   addr, addr_type, auto_connect);
5861 
5862 	return 0;
5863 }
5864 
5865 static void device_added(struct sock *sk, struct hci_dev *hdev,
5866 			 bdaddr_t *bdaddr, u8 type, u8 action)
5867 {
5868 	struct mgmt_ev_device_added ev;
5869 
5870 	bacpy(&ev.addr.bdaddr, bdaddr);
5871 	ev.addr.type = type;
5872 	ev.action = action;
5873 
5874 	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5875 }
5876 
5877 static int add_device(struct sock *sk, struct hci_dev *hdev,
5878 		      void *data, u16 len)
5879 {
5880 	struct mgmt_cp_add_device *cp = data;
5881 	u8 auto_conn, addr_type;
5882 	int err;
5883 
5884 	bt_dev_dbg(hdev, "sock %p", sk);
5885 
5886 	if (!bdaddr_type_is_valid(cp->addr.type) ||
5887 	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5888 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5889 					 MGMT_STATUS_INVALID_PARAMS,
5890 					 &cp->addr, sizeof(cp->addr));
5891 
5892 	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5893 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5894 					 MGMT_STATUS_INVALID_PARAMS,
5895 					 &cp->addr, sizeof(cp->addr));
5896 
5897 	hci_dev_lock(hdev);
5898 
5899 	if (cp->addr.type == BDADDR_BREDR) {
5900 		/* Only incoming connections action is supported for now */
5901 		if (cp->action != 0x01) {
5902 			err = mgmt_cmd_complete(sk, hdev->id,
5903 						MGMT_OP_ADD_DEVICE,
5904 						MGMT_STATUS_INVALID_PARAMS,
5905 						&cp->addr, sizeof(cp->addr));
5906 			goto unlock;
5907 		}
5908 
5909 		err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5910 					  cp->addr.type);
5911 		if (err)
5912 			goto unlock;
5913 
5914 		hci_req_update_scan(hdev);
5915 
5916 		goto added;
5917 	}
5918 
5919 	addr_type = le_addr_type(cp->addr.type);
5920 
5921 	if (cp->action == 0x02)
5922 		auto_conn = HCI_AUTO_CONN_ALWAYS;
5923 	else if (cp->action == 0x01)
5924 		auto_conn = HCI_AUTO_CONN_DIRECT;
5925 	else
5926 		auto_conn = HCI_AUTO_CONN_REPORT;
5927 
5928 	/* Kernel internally uses conn_params with resolvable private
5929 	 * address, but Add Device allows only identity addresses.
5930 	 * Make sure it is enforced before calling
5931 	 * hci_conn_params_lookup.
5932 	 */
5933 	if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
5934 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5935 					MGMT_STATUS_INVALID_PARAMS,
5936 					&cp->addr, sizeof(cp->addr));
5937 		goto unlock;
5938 	}
5939 
5940 	/* If the connection parameters don't exist for this device,
5941 	 * they will be created and configured with defaults.
5942 	 */
5943 	if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
5944 				auto_conn) < 0) {
5945 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5946 					MGMT_STATUS_FAILED, &cp->addr,
5947 					sizeof(cp->addr));
5948 		goto unlock;
5949 	}
5950 
5951 	hci_update_background_scan(hdev);
5952 
5953 added:
5954 	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5955 
5956 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5957 				MGMT_STATUS_SUCCESS, &cp->addr,
5958 				sizeof(cp->addr));
5959 
5960 unlock:
5961 	hci_dev_unlock(hdev);
5962 	return err;
5963 }
5964 
5965 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5966 			   bdaddr_t *bdaddr, u8 type)
5967 {
5968 	struct mgmt_ev_device_removed ev;
5969 
5970 	bacpy(&ev.addr.bdaddr, bdaddr);
5971 	ev.addr.type = type;
5972 
5973 	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5974 }
5975 
5976 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5977 			 void *data, u16 len)
5978 {
5979 	struct mgmt_cp_remove_device *cp = data;
5980 	int err;
5981 
5982 	bt_dev_dbg(hdev, "sock %p", sk);
5983 
5984 	hci_dev_lock(hdev);
5985 
5986 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5987 		struct hci_conn_params *params;
5988 		u8 addr_type;
5989 
5990 		if (!bdaddr_type_is_valid(cp->addr.type)) {
5991 			err = mgmt_cmd_complete(sk, hdev->id,
5992 						MGMT_OP_REMOVE_DEVICE,
5993 						MGMT_STATUS_INVALID_PARAMS,
5994 						&cp->addr, sizeof(cp->addr));
5995 			goto unlock;
5996 		}
5997 
5998 		if (cp->addr.type == BDADDR_BREDR) {
5999 			err = hci_bdaddr_list_del(&hdev->whitelist,
6000 						  &cp->addr.bdaddr,
6001 						  cp->addr.type);
6002 			if (err) {
6003 				err = mgmt_cmd_complete(sk, hdev->id,
6004 							MGMT_OP_REMOVE_DEVICE,
6005 							MGMT_STATUS_INVALID_PARAMS,
6006 							&cp->addr,
6007 							sizeof(cp->addr));
6008 				goto unlock;
6009 			}
6010 
6011 			hci_req_update_scan(hdev);
6012 
6013 			device_removed(sk, hdev, &cp->addr.bdaddr,
6014 				       cp->addr.type);
6015 			goto complete;
6016 		}
6017 
6018 		addr_type = le_addr_type(cp->addr.type);
6019 
6020 		/* Kernel internally uses conn_params with resolvable private
6021 		 * address, but Remove Device allows only identity addresses.
6022 		 * Make sure it is enforced before calling
6023 		 * hci_conn_params_lookup.
6024 		 */
6025 		if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
6026 			err = mgmt_cmd_complete(sk, hdev->id,
6027 						MGMT_OP_REMOVE_DEVICE,
6028 						MGMT_STATUS_INVALID_PARAMS,
6029 						&cp->addr, sizeof(cp->addr));
6030 			goto unlock;
6031 		}
6032 
6033 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
6034 						addr_type);
6035 		if (!params) {
6036 			err = mgmt_cmd_complete(sk, hdev->id,
6037 						MGMT_OP_REMOVE_DEVICE,
6038 						MGMT_STATUS_INVALID_PARAMS,
6039 						&cp->addr, sizeof(cp->addr));
6040 			goto unlock;
6041 		}
6042 
6043 		if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
6044 		    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
6045 			err = mgmt_cmd_complete(sk, hdev->id,
6046 						MGMT_OP_REMOVE_DEVICE,
6047 						MGMT_STATUS_INVALID_PARAMS,
6048 						&cp->addr, sizeof(cp->addr));
6049 			goto unlock;
6050 		}
6051 
6052 		list_del(&params->action);
6053 		list_del(&params->list);
6054 		kfree(params);
6055 		hci_update_background_scan(hdev);
6056 
6057 		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
6058 	} else {
6059 		struct hci_conn_params *p, *tmp;
6060 		struct bdaddr_list *b, *btmp;
6061 
6062 		if (cp->addr.type) {
6063 			err = mgmt_cmd_complete(sk, hdev->id,
6064 						MGMT_OP_REMOVE_DEVICE,
6065 						MGMT_STATUS_INVALID_PARAMS,
6066 						&cp->addr, sizeof(cp->addr));
6067 			goto unlock;
6068 		}
6069 
6070 		list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
6071 			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
6072 			list_del(&b->list);
6073 			kfree(b);
6074 		}
6075 
6076 		hci_req_update_scan(hdev);
6077 
6078 		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
6079 			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
6080 				continue;
6081 			device_removed(sk, hdev, &p->addr, p->addr_type);
6082 			if (p->explicit_connect) {
6083 				p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
6084 				continue;
6085 			}
6086 			list_del(&p->action);
6087 			list_del(&p->list);
6088 			kfree(p);
6089 		}
6090 
6091 		bt_dev_dbg(hdev, "All LE connection parameters were removed");
6092 
6093 		hci_update_background_scan(hdev);
6094 	}
6095 
6096 complete:
6097 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
6098 				MGMT_STATUS_SUCCESS, &cp->addr,
6099 				sizeof(cp->addr));
6100 unlock:
6101 	hci_dev_unlock(hdev);
6102 	return err;
6103 }
6104 
6105 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
6106 			   u16 len)
6107 {
6108 	struct mgmt_cp_load_conn_param *cp = data;
6109 	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
6110 				     sizeof(struct mgmt_conn_param));
6111 	u16 param_count, expected_len;
6112 	int i;
6113 
6114 	if (!lmp_le_capable(hdev))
6115 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6116 				       MGMT_STATUS_NOT_SUPPORTED);
6117 
6118 	param_count = __le16_to_cpu(cp->param_count);
6119 	if (param_count > max_param_count) {
6120 		bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
6121 			   param_count);
6122 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6123 				       MGMT_STATUS_INVALID_PARAMS);
6124 	}
6125 
6126 	expected_len = struct_size(cp, params, param_count);
6127 	if (expected_len != len) {
6128 		bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
6129 			   expected_len, len);
6130 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
6131 				       MGMT_STATUS_INVALID_PARAMS);
6132 	}
6133 
6134 	bt_dev_dbg(hdev, "param_count %u", param_count);
6135 
6136 	hci_dev_lock(hdev);
6137 
6138 	hci_conn_params_clear_disabled(hdev);
6139 
6140 	for (i = 0; i < param_count; i++) {
6141 		struct mgmt_conn_param *param = &cp->params[i];
6142 		struct hci_conn_params *hci_param;
6143 		u16 min, max, latency, timeout;
6144 		u8 addr_type;
6145 
6146 		bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
6147 			   param->addr.type);
6148 
6149 		if (param->addr.type == BDADDR_LE_PUBLIC) {
6150 			addr_type = ADDR_LE_DEV_PUBLIC;
6151 		} else if (param->addr.type == BDADDR_LE_RANDOM) {
6152 			addr_type = ADDR_LE_DEV_RANDOM;
6153 		} else {
6154 			bt_dev_err(hdev, "ignoring invalid connection parameters");
6155 			continue;
6156 		}
6157 
6158 		min = le16_to_cpu(param->min_interval);
6159 		max = le16_to_cpu(param->max_interval);
6160 		latency = le16_to_cpu(param->latency);
6161 		timeout = le16_to_cpu(param->timeout);
6162 
6163 		bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
6164 			   min, max, latency, timeout);
6165 
6166 		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
6167 			bt_dev_err(hdev, "ignoring invalid connection parameters");
6168 			continue;
6169 		}
6170 
6171 		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
6172 						addr_type);
6173 		if (!hci_param) {
6174 			bt_dev_err(hdev, "failed to add connection parameters");
6175 			continue;
6176 		}
6177 
6178 		hci_param->conn_min_interval = min;
6179 		hci_param->conn_max_interval = max;
6180 		hci_param->conn_latency = latency;
6181 		hci_param->supervision_timeout = timeout;
6182 	}
6183 
6184 	hci_dev_unlock(hdev);
6185 
6186 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
6187 				 NULL, 0);
6188 }
6189 
6190 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
6191 			       void *data, u16 len)
6192 {
6193 	struct mgmt_cp_set_external_config *cp = data;
6194 	bool changed;
6195 	int err;
6196 
6197 	bt_dev_dbg(hdev, "sock %p", sk);
6198 
6199 	if (hdev_is_powered(hdev))
6200 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6201 				       MGMT_STATUS_REJECTED);
6202 
6203 	if (cp->config != 0x00 && cp->config != 0x01)
6204 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6205 				         MGMT_STATUS_INVALID_PARAMS);
6206 
6207 	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
6208 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6209 				       MGMT_STATUS_NOT_SUPPORTED);
6210 
6211 	hci_dev_lock(hdev);
6212 
6213 	if (cp->config)
6214 		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
6215 	else
6216 		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
6217 
6218 	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
6219 	if (err < 0)
6220 		goto unlock;
6221 
6222 	if (!changed)
6223 		goto unlock;
6224 
6225 	err = new_options(hdev, sk);
6226 
6227 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
6228 		mgmt_index_removed(hdev);
6229 
6230 		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
6231 			hci_dev_set_flag(hdev, HCI_CONFIG);
6232 			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6233 
6234 			queue_work(hdev->req_workqueue, &hdev->power_on);
6235 		} else {
6236 			set_bit(HCI_RAW, &hdev->flags);
6237 			mgmt_index_added(hdev);
6238 		}
6239 	}
6240 
6241 unlock:
6242 	hci_dev_unlock(hdev);
6243 	return err;
6244 }
6245 
6246 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
6247 			      void *data, u16 len)
6248 {
6249 	struct mgmt_cp_set_public_address *cp = data;
6250 	bool changed;
6251 	int err;
6252 
6253 	bt_dev_dbg(hdev, "sock %p", sk);
6254 
6255 	if (hdev_is_powered(hdev))
6256 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6257 				       MGMT_STATUS_REJECTED);
6258 
6259 	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
6260 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6261 				       MGMT_STATUS_INVALID_PARAMS);
6262 
6263 	if (!hdev->set_bdaddr)
6264 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6265 				       MGMT_STATUS_NOT_SUPPORTED);
6266 
6267 	hci_dev_lock(hdev);
6268 
6269 	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
6270 	bacpy(&hdev->public_addr, &cp->bdaddr);
6271 
6272 	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
6273 	if (err < 0)
6274 		goto unlock;
6275 
6276 	if (!changed)
6277 		goto unlock;
6278 
6279 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
6280 		err = new_options(hdev, sk);
6281 
6282 	if (is_configured(hdev)) {
6283 		mgmt_index_removed(hdev);
6284 
6285 		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
6286 
6287 		hci_dev_set_flag(hdev, HCI_CONFIG);
6288 		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6289 
6290 		queue_work(hdev->req_workqueue, &hdev->power_on);
6291 	}
6292 
6293 unlock:
6294 	hci_dev_unlock(hdev);
6295 	return err;
6296 }
6297 
6298 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
6299 					     u16 opcode, struct sk_buff *skb)
6300 {
6301 	const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
6302 	struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
6303 	u8 *h192, *r192, *h256, *r256;
6304 	struct mgmt_pending_cmd *cmd;
6305 	u16 eir_len;
6306 	int err;
6307 
6308 	bt_dev_dbg(hdev, "status %u", status);
6309 
6310 	cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
6311 	if (!cmd)
6312 		return;
6313 
6314 	mgmt_cp = cmd->param;
6315 
6316 	if (status) {
6317 		status = mgmt_status(status);
6318 		eir_len = 0;
6319 
6320 		h192 = NULL;
6321 		r192 = NULL;
6322 		h256 = NULL;
6323 		r256 = NULL;
6324 	} else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
6325 		struct hci_rp_read_local_oob_data *rp;
6326 
6327 		if (skb->len != sizeof(*rp)) {
6328 			status = MGMT_STATUS_FAILED;
6329 			eir_len = 0;
6330 		} else {
6331 			status = MGMT_STATUS_SUCCESS;
6332 			rp = (void *)skb->data;
6333 
6334 			eir_len = 5 + 18 + 18;
6335 			h192 = rp->hash;
6336 			r192 = rp->rand;
6337 			h256 = NULL;
6338 			r256 = NULL;
6339 		}
6340 	} else {
6341 		struct hci_rp_read_local_oob_ext_data *rp;
6342 
6343 		if (skb->len != sizeof(*rp)) {
6344 			status = MGMT_STATUS_FAILED;
6345 			eir_len = 0;
6346 		} else {
6347 			status = MGMT_STATUS_SUCCESS;
6348 			rp = (void *)skb->data;
6349 
6350 			if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6351 				eir_len = 5 + 18 + 18;
6352 				h192 = NULL;
6353 				r192 = NULL;
6354 			} else {
6355 				eir_len = 5 + 18 + 18 + 18 + 18;
6356 				h192 = rp->hash192;
6357 				r192 = rp->rand192;
6358 			}
6359 
6360 			h256 = rp->hash256;
6361 			r256 = rp->rand256;
6362 		}
6363 	}
6364 
6365 	mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
6366 	if (!mgmt_rp)
6367 		goto done;
6368 
6369 	if (status)
6370 		goto send_rsp;
6371 
6372 	eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
6373 				  hdev->dev_class, 3);
6374 
6375 	if (h192 && r192) {
6376 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6377 					  EIR_SSP_HASH_C192, h192, 16);
6378 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6379 					  EIR_SSP_RAND_R192, r192, 16);
6380 	}
6381 
6382 	if (h256 && r256) {
6383 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6384 					  EIR_SSP_HASH_C256, h256, 16);
6385 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6386 					  EIR_SSP_RAND_R256, r256, 16);
6387 	}
6388 
6389 send_rsp:
6390 	mgmt_rp->type = mgmt_cp->type;
6391 	mgmt_rp->eir_len = cpu_to_le16(eir_len);
6392 
6393 	err = mgmt_cmd_complete(cmd->sk, hdev->id,
6394 				MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
6395 				mgmt_rp, sizeof(*mgmt_rp) + eir_len);
6396 	if (err < 0 || status)
6397 		goto done;
6398 
6399 	hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
6400 
6401 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
6402 				 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
6403 				 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
6404 done:
6405 	kfree(mgmt_rp);
6406 	mgmt_pending_remove(cmd);
6407 }
6408 
6409 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
6410 				  struct mgmt_cp_read_local_oob_ext_data *cp)
6411 {
6412 	struct mgmt_pending_cmd *cmd;
6413 	struct hci_request req;
6414 	int err;
6415 
6416 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
6417 			       cp, sizeof(*cp));
6418 	if (!cmd)
6419 		return -ENOMEM;
6420 
6421 	hci_req_init(&req, hdev);
6422 
6423 	if (bredr_sc_enabled(hdev))
6424 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
6425 	else
6426 		hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
6427 
6428 	err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
6429 	if (err < 0) {
6430 		mgmt_pending_remove(cmd);
6431 		return err;
6432 	}
6433 
6434 	return 0;
6435 }
6436 
6437 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
6438 				   void *data, u16 data_len)
6439 {
6440 	struct mgmt_cp_read_local_oob_ext_data *cp = data;
6441 	struct mgmt_rp_read_local_oob_ext_data *rp;
6442 	size_t rp_len;
6443 	u16 eir_len;
6444 	u8 status, flags, role, addr[7], hash[16], rand[16];
6445 	int err;
6446 
6447 	bt_dev_dbg(hdev, "sock %p", sk);
6448 
6449 	if (hdev_is_powered(hdev)) {
6450 		switch (cp->type) {
6451 		case BIT(BDADDR_BREDR):
6452 			status = mgmt_bredr_support(hdev);
6453 			if (status)
6454 				eir_len = 0;
6455 			else
6456 				eir_len = 5;
6457 			break;
6458 		case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6459 			status = mgmt_le_support(hdev);
6460 			if (status)
6461 				eir_len = 0;
6462 			else
6463 				eir_len = 9 + 3 + 18 + 18 + 3;
6464 			break;
6465 		default:
6466 			status = MGMT_STATUS_INVALID_PARAMS;
6467 			eir_len = 0;
6468 			break;
6469 		}
6470 	} else {
6471 		status = MGMT_STATUS_NOT_POWERED;
6472 		eir_len = 0;
6473 	}
6474 
6475 	rp_len = sizeof(*rp) + eir_len;
6476 	rp = kmalloc(rp_len, GFP_ATOMIC);
6477 	if (!rp)
6478 		return -ENOMEM;
6479 
6480 	if (status)
6481 		goto complete;
6482 
6483 	hci_dev_lock(hdev);
6484 
6485 	eir_len = 0;
6486 	switch (cp->type) {
6487 	case BIT(BDADDR_BREDR):
6488 		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6489 			err = read_local_ssp_oob_req(hdev, sk, cp);
6490 			hci_dev_unlock(hdev);
6491 			if (!err)
6492 				goto done;
6493 
6494 			status = MGMT_STATUS_FAILED;
6495 			goto complete;
6496 		} else {
6497 			eir_len = eir_append_data(rp->eir, eir_len,
6498 						  EIR_CLASS_OF_DEV,
6499 						  hdev->dev_class, 3);
6500 		}
6501 		break;
6502 	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6503 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6504 		    smp_generate_oob(hdev, hash, rand) < 0) {
6505 			hci_dev_unlock(hdev);
6506 			status = MGMT_STATUS_FAILED;
6507 			goto complete;
6508 		}
6509 
6510 		/* This should return the active RPA, but since the RPA
6511 		 * is only programmed on demand, it is really hard to fill
6512 		 * this in at the moment. For now disallow retrieving
6513 		 * local out-of-band data when privacy is in use.
6514 		 *
6515 		 * Returning the identity address will not help here since
6516 		 * pairing happens before the identity resolving key is
6517 		 * known and thus the connection establishment happens
6518 		 * based on the RPA and not the identity address.
6519 		 */
6520 		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
6521 			hci_dev_unlock(hdev);
6522 			status = MGMT_STATUS_REJECTED;
6523 			goto complete;
6524 		}
6525 
6526 		if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
6527 		   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
6528 		   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6529 		    bacmp(&hdev->static_addr, BDADDR_ANY))) {
6530 			memcpy(addr, &hdev->static_addr, 6);
6531 			addr[6] = 0x01;
6532 		} else {
6533 			memcpy(addr, &hdev->bdaddr, 6);
6534 			addr[6] = 0x00;
6535 		}
6536 
6537 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
6538 					  addr, sizeof(addr));
6539 
6540 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
6541 			role = 0x02;
6542 		else
6543 			role = 0x01;
6544 
6545 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
6546 					  &role, sizeof(role));
6547 
6548 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
6549 			eir_len = eir_append_data(rp->eir, eir_len,
6550 						  EIR_LE_SC_CONFIRM,
6551 						  hash, sizeof(hash));
6552 
6553 			eir_len = eir_append_data(rp->eir, eir_len,
6554 						  EIR_LE_SC_RANDOM,
6555 						  rand, sizeof(rand));
6556 		}
6557 
6558 		flags = mgmt_get_adv_discov_flags(hdev);
6559 
6560 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
6561 			flags |= LE_AD_NO_BREDR;
6562 
6563 		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
6564 					  &flags, sizeof(flags));
6565 		break;
6566 	}
6567 
6568 	hci_dev_unlock(hdev);
6569 
6570 	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
6571 
6572 	status = MGMT_STATUS_SUCCESS;
6573 
6574 complete:
6575 	rp->type = cp->type;
6576 	rp->eir_len = cpu_to_le16(eir_len);
6577 
6578 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6579 				status, rp, sizeof(*rp) + eir_len);
6580 	if (err < 0 || status)
6581 		goto done;
6582 
6583 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
6584 				 rp, sizeof(*rp) + eir_len,
6585 				 HCI_MGMT_OOB_DATA_EVENTS, sk);
6586 
6587 done:
6588 	kfree(rp);
6589 
6590 	return err;
6591 }
6592 
6593 static u32 get_supported_adv_flags(struct hci_dev *hdev)
6594 {
6595 	u32 flags = 0;
6596 
6597 	flags |= MGMT_ADV_FLAG_CONNECTABLE;
6598 	flags |= MGMT_ADV_FLAG_DISCOV;
6599 	flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
6600 	flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
6601 	flags |= MGMT_ADV_FLAG_APPEARANCE;
6602 	flags |= MGMT_ADV_FLAG_LOCAL_NAME;
6603 
6604 	/* In extended adv TX_POWER returned from Set Adv Param
6605 	 * will be always valid.
6606 	 */
6607 	if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
6608 	    ext_adv_capable(hdev))
6609 		flags |= MGMT_ADV_FLAG_TX_POWER;
6610 
6611 	if (ext_adv_capable(hdev)) {
6612 		flags |= MGMT_ADV_FLAG_SEC_1M;
6613 
6614 		if (hdev->le_features[1] & HCI_LE_PHY_2M)
6615 			flags |= MGMT_ADV_FLAG_SEC_2M;
6616 
6617 		if (hdev->le_features[1] & HCI_LE_PHY_CODED)
6618 			flags |= MGMT_ADV_FLAG_SEC_CODED;
6619 	}
6620 
6621 	return flags;
6622 }
6623 
6624 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
6625 			     void *data, u16 data_len)
6626 {
6627 	struct mgmt_rp_read_adv_features *rp;
6628 	size_t rp_len;
6629 	int err;
6630 	struct adv_info *adv_instance;
6631 	u32 supported_flags;
6632 	u8 *instance;
6633 
6634 	bt_dev_dbg(hdev, "sock %p", sk);
6635 
6636 	if (!lmp_le_capable(hdev))
6637 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
6638 				       MGMT_STATUS_REJECTED);
6639 
6640 	hci_dev_lock(hdev);
6641 
6642 	rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
6643 	rp = kmalloc(rp_len, GFP_ATOMIC);
6644 	if (!rp) {
6645 		hci_dev_unlock(hdev);
6646 		return -ENOMEM;
6647 	}
6648 
6649 	supported_flags = get_supported_adv_flags(hdev);
6650 
6651 	rp->supported_flags = cpu_to_le32(supported_flags);
6652 	rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
6653 	rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
6654 	rp->max_instances = HCI_MAX_ADV_INSTANCES;
6655 	rp->num_instances = hdev->adv_instance_cnt;
6656 
6657 	instance = rp->instance;
6658 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
6659 		*instance = adv_instance->instance;
6660 		instance++;
6661 	}
6662 
6663 	hci_dev_unlock(hdev);
6664 
6665 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
6666 				MGMT_STATUS_SUCCESS, rp, rp_len);
6667 
6668 	kfree(rp);
6669 
6670 	return err;
6671 }
6672 
6673 static u8 calculate_name_len(struct hci_dev *hdev)
6674 {
6675 	u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
6676 
6677 	return append_local_name(hdev, buf, 0);
6678 }
6679 
6680 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
6681 			   bool is_adv_data)
6682 {
6683 	u8 max_len = HCI_MAX_AD_LENGTH;
6684 
6685 	if (is_adv_data) {
6686 		if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
6687 				 MGMT_ADV_FLAG_LIMITED_DISCOV |
6688 				 MGMT_ADV_FLAG_MANAGED_FLAGS))
6689 			max_len -= 3;
6690 
6691 		if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
6692 			max_len -= 3;
6693 	} else {
6694 		if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
6695 			max_len -= calculate_name_len(hdev);
6696 
6697 		if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
6698 			max_len -= 4;
6699 	}
6700 
6701 	return max_len;
6702 }
6703 
6704 static bool flags_managed(u32 adv_flags)
6705 {
6706 	return adv_flags & (MGMT_ADV_FLAG_DISCOV |
6707 			    MGMT_ADV_FLAG_LIMITED_DISCOV |
6708 			    MGMT_ADV_FLAG_MANAGED_FLAGS);
6709 }
6710 
6711 static bool tx_power_managed(u32 adv_flags)
6712 {
6713 	return adv_flags & MGMT_ADV_FLAG_TX_POWER;
6714 }
6715 
6716 static bool name_managed(u32 adv_flags)
6717 {
6718 	return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
6719 }
6720 
6721 static bool appearance_managed(u32 adv_flags)
6722 {
6723 	return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
6724 }
6725 
6726 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
6727 			      u8 len, bool is_adv_data)
6728 {
6729 	int i, cur_len;
6730 	u8 max_len;
6731 
6732 	max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
6733 
6734 	if (len > max_len)
6735 		return false;
6736 
6737 	/* Make sure that the data is correctly formatted. */
6738 	for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
6739 		cur_len = data[i];
6740 
6741 		if (data[i + 1] == EIR_FLAGS &&
6742 		    (!is_adv_data || flags_managed(adv_flags)))
6743 			return false;
6744 
6745 		if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
6746 			return false;
6747 
6748 		if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
6749 			return false;
6750 
6751 		if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
6752 			return false;
6753 
6754 		if (data[i + 1] == EIR_APPEARANCE &&
6755 		    appearance_managed(adv_flags))
6756 			return false;
6757 
6758 		/* If the current field length would exceed the total data
6759 		 * length, then it's invalid.
6760 		 */
6761 		if (i + cur_len >= len)
6762 			return false;
6763 	}
6764 
6765 	return true;
6766 }
6767 
6768 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
6769 				     u16 opcode)
6770 {
6771 	struct mgmt_pending_cmd *cmd;
6772 	struct mgmt_cp_add_advertising *cp;
6773 	struct mgmt_rp_add_advertising rp;
6774 	struct adv_info *adv_instance, *n;
6775 	u8 instance;
6776 
6777 	bt_dev_dbg(hdev, "status %d", status);
6778 
6779 	hci_dev_lock(hdev);
6780 
6781 	cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
6782 
6783 	list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
6784 		if (!adv_instance->pending)
6785 			continue;
6786 
6787 		if (!status) {
6788 			adv_instance->pending = false;
6789 			continue;
6790 		}
6791 
6792 		instance = adv_instance->instance;
6793 
6794 		if (hdev->cur_adv_instance == instance)
6795 			cancel_adv_timeout(hdev);
6796 
6797 		hci_remove_adv_instance(hdev, instance);
6798 		mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
6799 	}
6800 
6801 	if (!cmd)
6802 		goto unlock;
6803 
6804 	cp = cmd->param;
6805 	rp.instance = cp->instance;
6806 
6807 	if (status)
6808 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
6809 				mgmt_status(status));
6810 	else
6811 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
6812 				  mgmt_status(status), &rp, sizeof(rp));
6813 
6814 	mgmt_pending_remove(cmd);
6815 
6816 unlock:
6817 	hci_dev_unlock(hdev);
6818 }
6819 
6820 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
6821 			   void *data, u16 data_len)
6822 {
6823 	struct mgmt_cp_add_advertising *cp = data;
6824 	struct mgmt_rp_add_advertising rp;
6825 	u32 flags;
6826 	u32 supported_flags, phy_flags;
6827 	u8 status;
6828 	u16 timeout, duration;
6829 	unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
6830 	u8 schedule_instance = 0;
6831 	struct adv_info *next_instance;
6832 	int err;
6833 	struct mgmt_pending_cmd *cmd;
6834 	struct hci_request req;
6835 
6836 	bt_dev_dbg(hdev, "sock %p", sk);
6837 
6838 	status = mgmt_le_support(hdev);
6839 	if (status)
6840 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6841 				       status);
6842 
6843 	if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
6844 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6845 				       MGMT_STATUS_INVALID_PARAMS);
6846 
6847 	if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
6848 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6849 				       MGMT_STATUS_INVALID_PARAMS);
6850 
6851 	flags = __le32_to_cpu(cp->flags);
6852 	timeout = __le16_to_cpu(cp->timeout);
6853 	duration = __le16_to_cpu(cp->duration);
6854 
6855 	/* The current implementation only supports a subset of the specified
6856 	 * flags. Also need to check mutual exclusiveness of sec flags.
6857 	 */
6858 	supported_flags = get_supported_adv_flags(hdev);
6859 	phy_flags = flags & MGMT_ADV_FLAG_SEC_MASK;
6860 	if (flags & ~supported_flags ||
6861 	    ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
6862 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6863 				       MGMT_STATUS_INVALID_PARAMS);
6864 
6865 	hci_dev_lock(hdev);
6866 
6867 	if (timeout && !hdev_is_powered(hdev)) {
6868 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6869 				      MGMT_STATUS_REJECTED);
6870 		goto unlock;
6871 	}
6872 
6873 	if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
6874 	    pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
6875 	    pending_find(MGMT_OP_SET_LE, hdev)) {
6876 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6877 				      MGMT_STATUS_BUSY);
6878 		goto unlock;
6879 	}
6880 
6881 	if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
6882 	    !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
6883 			       cp->scan_rsp_len, false)) {
6884 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6885 				      MGMT_STATUS_INVALID_PARAMS);
6886 		goto unlock;
6887 	}
6888 
6889 	err = hci_add_adv_instance(hdev, cp->instance, flags,
6890 				   cp->adv_data_len, cp->data,
6891 				   cp->scan_rsp_len,
6892 				   cp->data + cp->adv_data_len,
6893 				   timeout, duration);
6894 	if (err < 0) {
6895 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6896 				      MGMT_STATUS_FAILED);
6897 		goto unlock;
6898 	}
6899 
6900 	/* Only trigger an advertising added event if a new instance was
6901 	 * actually added.
6902 	 */
6903 	if (hdev->adv_instance_cnt > prev_instance_cnt)
6904 		mgmt_advertising_added(sk, hdev, cp->instance);
6905 
6906 	if (hdev->cur_adv_instance == cp->instance) {
6907 		/* If the currently advertised instance is being changed then
6908 		 * cancel the current advertising and schedule the next
6909 		 * instance. If there is only one instance then the overridden
6910 		 * advertising data will be visible right away.
6911 		 */
6912 		cancel_adv_timeout(hdev);
6913 
6914 		next_instance = hci_get_next_instance(hdev, cp->instance);
6915 		if (next_instance)
6916 			schedule_instance = next_instance->instance;
6917 	} else if (!hdev->adv_instance_timeout) {
6918 		/* Immediately advertise the new instance if no other
6919 		 * instance is currently being advertised.
6920 		 */
6921 		schedule_instance = cp->instance;
6922 	}
6923 
6924 	/* If the HCI_ADVERTISING flag is set or the device isn't powered or
6925 	 * there is no instance to be advertised then we have no HCI
6926 	 * communication to make. Simply return.
6927 	 */
6928 	if (!hdev_is_powered(hdev) ||
6929 	    hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6930 	    !schedule_instance) {
6931 		rp.instance = cp->instance;
6932 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6933 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6934 		goto unlock;
6935 	}
6936 
6937 	/* We're good to go, update advertising data, parameters, and start
6938 	 * advertising.
6939 	 */
6940 	cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
6941 			       data_len);
6942 	if (!cmd) {
6943 		err = -ENOMEM;
6944 		goto unlock;
6945 	}
6946 
6947 	hci_req_init(&req, hdev);
6948 
6949 	err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
6950 
6951 	if (!err)
6952 		err = hci_req_run(&req, add_advertising_complete);
6953 
6954 	if (err < 0) {
6955 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6956 				      MGMT_STATUS_FAILED);
6957 		mgmt_pending_remove(cmd);
6958 	}
6959 
6960 unlock:
6961 	hci_dev_unlock(hdev);
6962 
6963 	return err;
6964 }
6965 
6966 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
6967 					u16 opcode)
6968 {
6969 	struct mgmt_pending_cmd *cmd;
6970 	struct mgmt_cp_remove_advertising *cp;
6971 	struct mgmt_rp_remove_advertising rp;
6972 
6973 	bt_dev_dbg(hdev, "status %d", status);
6974 
6975 	hci_dev_lock(hdev);
6976 
6977 	/* A failure status here only means that we failed to disable
6978 	 * advertising. Otherwise, the advertising instance has been removed,
6979 	 * so report success.
6980 	 */
6981 	cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
6982 	if (!cmd)
6983 		goto unlock;
6984 
6985 	cp = cmd->param;
6986 	rp.instance = cp->instance;
6987 
6988 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
6989 			  &rp, sizeof(rp));
6990 	mgmt_pending_remove(cmd);
6991 
6992 unlock:
6993 	hci_dev_unlock(hdev);
6994 }
6995 
6996 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
6997 			      void *data, u16 data_len)
6998 {
6999 	struct mgmt_cp_remove_advertising *cp = data;
7000 	struct mgmt_rp_remove_advertising rp;
7001 	struct mgmt_pending_cmd *cmd;
7002 	struct hci_request req;
7003 	int err;
7004 
7005 	bt_dev_dbg(hdev, "sock %p", sk);
7006 
7007 	hci_dev_lock(hdev);
7008 
7009 	if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
7010 		err = mgmt_cmd_status(sk, hdev->id,
7011 				      MGMT_OP_REMOVE_ADVERTISING,
7012 				      MGMT_STATUS_INVALID_PARAMS);
7013 		goto unlock;
7014 	}
7015 
7016 	if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
7017 	    pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
7018 	    pending_find(MGMT_OP_SET_LE, hdev)) {
7019 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
7020 				      MGMT_STATUS_BUSY);
7021 		goto unlock;
7022 	}
7023 
7024 	if (list_empty(&hdev->adv_instances)) {
7025 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
7026 				      MGMT_STATUS_INVALID_PARAMS);
7027 		goto unlock;
7028 	}
7029 
7030 	hci_req_init(&req, hdev);
7031 
7032 	hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
7033 
7034 	if (list_empty(&hdev->adv_instances))
7035 		__hci_req_disable_advertising(&req);
7036 
7037 	/* If no HCI commands have been collected so far or the HCI_ADVERTISING
7038 	 * flag is set or the device isn't powered then we have no HCI
7039 	 * communication to make. Simply return.
7040 	 */
7041 	if (skb_queue_empty(&req.cmd_q) ||
7042 	    !hdev_is_powered(hdev) ||
7043 	    hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
7044 		hci_req_purge(&req);
7045 		rp.instance = cp->instance;
7046 		err = mgmt_cmd_complete(sk, hdev->id,
7047 					MGMT_OP_REMOVE_ADVERTISING,
7048 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7049 		goto unlock;
7050 	}
7051 
7052 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
7053 			       data_len);
7054 	if (!cmd) {
7055 		err = -ENOMEM;
7056 		goto unlock;
7057 	}
7058 
7059 	err = hci_req_run(&req, remove_advertising_complete);
7060 	if (err < 0)
7061 		mgmt_pending_remove(cmd);
7062 
7063 unlock:
7064 	hci_dev_unlock(hdev);
7065 
7066 	return err;
7067 }
7068 
7069 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
7070 			     void *data, u16 data_len)
7071 {
7072 	struct mgmt_cp_get_adv_size_info *cp = data;
7073 	struct mgmt_rp_get_adv_size_info rp;
7074 	u32 flags, supported_flags;
7075 	int err;
7076 
7077 	bt_dev_dbg(hdev, "sock %p", sk);
7078 
7079 	if (!lmp_le_capable(hdev))
7080 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7081 				       MGMT_STATUS_REJECTED);
7082 
7083 	if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
7084 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7085 				       MGMT_STATUS_INVALID_PARAMS);
7086 
7087 	flags = __le32_to_cpu(cp->flags);
7088 
7089 	/* The current implementation only supports a subset of the specified
7090 	 * flags.
7091 	 */
7092 	supported_flags = get_supported_adv_flags(hdev);
7093 	if (flags & ~supported_flags)
7094 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7095 				       MGMT_STATUS_INVALID_PARAMS);
7096 
7097 	rp.instance = cp->instance;
7098 	rp.flags = cp->flags;
7099 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
7100 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
7101 
7102 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
7103 				MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7104 
7105 	return err;
7106 }
7107 
7108 static const struct hci_mgmt_handler mgmt_handlers[] = {
7109 	{ NULL }, /* 0x0000 (no command) */
7110 	{ read_version,            MGMT_READ_VERSION_SIZE,
7111 						HCI_MGMT_NO_HDEV |
7112 						HCI_MGMT_UNTRUSTED },
7113 	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
7114 						HCI_MGMT_NO_HDEV |
7115 						HCI_MGMT_UNTRUSTED },
7116 	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
7117 						HCI_MGMT_NO_HDEV |
7118 						HCI_MGMT_UNTRUSTED },
7119 	{ read_controller_info,    MGMT_READ_INFO_SIZE,
7120 						HCI_MGMT_UNTRUSTED },
7121 	{ set_powered,             MGMT_SETTING_SIZE },
7122 	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
7123 	{ set_connectable,         MGMT_SETTING_SIZE },
7124 	{ set_fast_connectable,    MGMT_SETTING_SIZE },
7125 	{ set_bondable,            MGMT_SETTING_SIZE },
7126 	{ set_link_security,       MGMT_SETTING_SIZE },
7127 	{ set_ssp,                 MGMT_SETTING_SIZE },
7128 	{ set_hs,                  MGMT_SETTING_SIZE },
7129 	{ set_le,                  MGMT_SETTING_SIZE },
7130 	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
7131 	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
7132 	{ add_uuid,                MGMT_ADD_UUID_SIZE },
7133 	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
7134 	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
7135 						HCI_MGMT_VAR_LEN },
7136 	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
7137 						HCI_MGMT_VAR_LEN },
7138 	{ disconnect,              MGMT_DISCONNECT_SIZE },
7139 	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
7140 	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
7141 	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
7142 	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
7143 	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
7144 	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
7145 	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
7146 	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
7147 	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
7148 	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
7149 	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
7150 	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
7151 	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
7152 						HCI_MGMT_VAR_LEN },
7153 	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
7154 	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
7155 	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
7156 	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
7157 	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
7158 	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
7159 	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
7160 	{ set_advertising,         MGMT_SETTING_SIZE },
7161 	{ set_bredr,               MGMT_SETTING_SIZE },
7162 	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
7163 	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
7164 	{ set_secure_conn,         MGMT_SETTING_SIZE },
7165 	{ set_debug_keys,          MGMT_SETTING_SIZE },
7166 	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
7167 	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
7168 						HCI_MGMT_VAR_LEN },
7169 	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
7170 	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
7171 	{ add_device,              MGMT_ADD_DEVICE_SIZE },
7172 	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
7173 	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
7174 						HCI_MGMT_VAR_LEN },
7175 	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
7176 						HCI_MGMT_NO_HDEV |
7177 						HCI_MGMT_UNTRUSTED },
7178 	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
7179 						HCI_MGMT_UNCONFIGURED |
7180 						HCI_MGMT_UNTRUSTED },
7181 	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
7182 						HCI_MGMT_UNCONFIGURED },
7183 	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
7184 						HCI_MGMT_UNCONFIGURED },
7185 	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
7186 						HCI_MGMT_VAR_LEN },
7187 	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
7188 	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
7189 						HCI_MGMT_NO_HDEV |
7190 						HCI_MGMT_UNTRUSTED },
7191 	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
7192 	{ add_advertising,	   MGMT_ADD_ADVERTISING_SIZE,
7193 						HCI_MGMT_VAR_LEN },
7194 	{ remove_advertising,	   MGMT_REMOVE_ADVERTISING_SIZE },
7195 	{ get_adv_size_info,       MGMT_GET_ADV_SIZE_INFO_SIZE },
7196 	{ start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
7197 	{ read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
7198 						HCI_MGMT_UNTRUSTED },
7199 	{ set_appearance,	   MGMT_SET_APPEARANCE_SIZE },
7200 	{ get_phy_configuration,   MGMT_GET_PHY_CONFIGURATION_SIZE },
7201 	{ set_phy_configuration,   MGMT_SET_PHY_CONFIGURATION_SIZE },
7202 	{ set_blocked_keys,	   MGMT_OP_SET_BLOCKED_KEYS_SIZE,
7203 						HCI_MGMT_VAR_LEN },
7204 	{ set_wideband_speech,	   MGMT_SETTING_SIZE },
7205 	{ read_security_info,      MGMT_READ_SECURITY_INFO_SIZE,
7206 						HCI_MGMT_UNTRUSTED },
7207 	{ read_exp_features_info,  MGMT_READ_EXP_FEATURES_INFO_SIZE,
7208 						HCI_MGMT_UNTRUSTED |
7209 						HCI_MGMT_HDEV_OPTIONAL },
7210 	{ set_exp_feature,         MGMT_SET_EXP_FEATURE_SIZE,
7211 						HCI_MGMT_VAR_LEN |
7212 						HCI_MGMT_HDEV_OPTIONAL },
7213 };
7214 
7215 void mgmt_index_added(struct hci_dev *hdev)
7216 {
7217 	struct mgmt_ev_ext_index ev;
7218 
7219 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
7220 		return;
7221 
7222 	switch (hdev->dev_type) {
7223 	case HCI_PRIMARY:
7224 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7225 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
7226 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7227 			ev.type = 0x01;
7228 		} else {
7229 			mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
7230 					 HCI_MGMT_INDEX_EVENTS);
7231 			ev.type = 0x00;
7232 		}
7233 		break;
7234 	case HCI_AMP:
7235 		ev.type = 0x02;
7236 		break;
7237 	default:
7238 		return;
7239 	}
7240 
7241 	ev.bus = hdev->bus;
7242 
7243 	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
7244 			 HCI_MGMT_EXT_INDEX_EVENTS);
7245 }
7246 
7247 void mgmt_index_removed(struct hci_dev *hdev)
7248 {
7249 	struct mgmt_ev_ext_index ev;
7250 	u8 status = MGMT_STATUS_INVALID_INDEX;
7251 
7252 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
7253 		return;
7254 
7255 	switch (hdev->dev_type) {
7256 	case HCI_PRIMARY:
7257 		mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7258 
7259 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7260 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
7261 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7262 			ev.type = 0x01;
7263 		} else {
7264 			mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
7265 					 HCI_MGMT_INDEX_EVENTS);
7266 			ev.type = 0x00;
7267 		}
7268 		break;
7269 	case HCI_AMP:
7270 		ev.type = 0x02;
7271 		break;
7272 	default:
7273 		return;
7274 	}
7275 
7276 	ev.bus = hdev->bus;
7277 
7278 	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
7279 			 HCI_MGMT_EXT_INDEX_EVENTS);
7280 }
7281 
7282 /* This function requires the caller holds hdev->lock */
7283 static void restart_le_actions(struct hci_dev *hdev)
7284 {
7285 	struct hci_conn_params *p;
7286 
7287 	list_for_each_entry(p, &hdev->le_conn_params, list) {
7288 		/* Needed for AUTO_OFF case where might not "really"
7289 		 * have been powered off.
7290 		 */
7291 		list_del_init(&p->action);
7292 
7293 		switch (p->auto_connect) {
7294 		case HCI_AUTO_CONN_DIRECT:
7295 		case HCI_AUTO_CONN_ALWAYS:
7296 			list_add(&p->action, &hdev->pend_le_conns);
7297 			break;
7298 		case HCI_AUTO_CONN_REPORT:
7299 			list_add(&p->action, &hdev->pend_le_reports);
7300 			break;
7301 		default:
7302 			break;
7303 		}
7304 	}
7305 }
7306 
7307 void mgmt_power_on(struct hci_dev *hdev, int err)
7308 {
7309 	struct cmd_lookup match = { NULL, hdev };
7310 
7311 	bt_dev_dbg(hdev, "err %d", err);
7312 
7313 	hci_dev_lock(hdev);
7314 
7315 	if (!err) {
7316 		restart_le_actions(hdev);
7317 		hci_update_background_scan(hdev);
7318 	}
7319 
7320 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7321 
7322 	new_settings(hdev, match.sk);
7323 
7324 	if (match.sk)
7325 		sock_put(match.sk);
7326 
7327 	hci_dev_unlock(hdev);
7328 }
7329 
7330 void __mgmt_power_off(struct hci_dev *hdev)
7331 {
7332 	struct cmd_lookup match = { NULL, hdev };
7333 	u8 status, zero_cod[] = { 0, 0, 0 };
7334 
7335 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7336 
7337 	/* If the power off is because of hdev unregistration let
7338 	 * use the appropriate INVALID_INDEX status. Otherwise use
7339 	 * NOT_POWERED. We cover both scenarios here since later in
7340 	 * mgmt_index_removed() any hci_conn callbacks will have already
7341 	 * been triggered, potentially causing misleading DISCONNECTED
7342 	 * status responses.
7343 	 */
7344 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
7345 		status = MGMT_STATUS_INVALID_INDEX;
7346 	else
7347 		status = MGMT_STATUS_NOT_POWERED;
7348 
7349 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7350 
7351 	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
7352 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
7353 				   zero_cod, sizeof(zero_cod),
7354 				   HCI_MGMT_DEV_CLASS_EVENTS, NULL);
7355 		ext_info_changed(hdev, NULL);
7356 	}
7357 
7358 	new_settings(hdev, match.sk);
7359 
7360 	if (match.sk)
7361 		sock_put(match.sk);
7362 }
7363 
7364 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
7365 {
7366 	struct mgmt_pending_cmd *cmd;
7367 	u8 status;
7368 
7369 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7370 	if (!cmd)
7371 		return;
7372 
7373 	if (err == -ERFKILL)
7374 		status = MGMT_STATUS_RFKILLED;
7375 	else
7376 		status = MGMT_STATUS_FAILED;
7377 
7378 	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
7379 
7380 	mgmt_pending_remove(cmd);
7381 }
7382 
7383 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
7384 		       bool persistent)
7385 {
7386 	struct mgmt_ev_new_link_key ev;
7387 
7388 	memset(&ev, 0, sizeof(ev));
7389 
7390 	ev.store_hint = persistent;
7391 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7392 	ev.key.addr.type = BDADDR_BREDR;
7393 	ev.key.type = key->type;
7394 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
7395 	ev.key.pin_len = key->pin_len;
7396 
7397 	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
7398 }
7399 
7400 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
7401 {
7402 	switch (ltk->type) {
7403 	case SMP_LTK:
7404 	case SMP_LTK_SLAVE:
7405 		if (ltk->authenticated)
7406 			return MGMT_LTK_AUTHENTICATED;
7407 		return MGMT_LTK_UNAUTHENTICATED;
7408 	case SMP_LTK_P256:
7409 		if (ltk->authenticated)
7410 			return MGMT_LTK_P256_AUTH;
7411 		return MGMT_LTK_P256_UNAUTH;
7412 	case SMP_LTK_P256_DEBUG:
7413 		return MGMT_LTK_P256_DEBUG;
7414 	}
7415 
7416 	return MGMT_LTK_UNAUTHENTICATED;
7417 }
7418 
7419 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
7420 {
7421 	struct mgmt_ev_new_long_term_key ev;
7422 
7423 	memset(&ev, 0, sizeof(ev));
7424 
7425 	/* Devices using resolvable or non-resolvable random addresses
7426 	 * without providing an identity resolving key don't require
7427 	 * to store long term keys. Their addresses will change the
7428 	 * next time around.
7429 	 *
7430 	 * Only when a remote device provides an identity address
7431 	 * make sure the long term key is stored. If the remote
7432 	 * identity is known, the long term keys are internally
7433 	 * mapped to the identity address. So allow static random
7434 	 * and public addresses here.
7435 	 */
7436 	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
7437 	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
7438 		ev.store_hint = 0x00;
7439 	else
7440 		ev.store_hint = persistent;
7441 
7442 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7443 	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
7444 	ev.key.type = mgmt_ltk_type(key);
7445 	ev.key.enc_size = key->enc_size;
7446 	ev.key.ediv = key->ediv;
7447 	ev.key.rand = key->rand;
7448 
7449 	if (key->type == SMP_LTK)
7450 		ev.key.master = 1;
7451 
7452 	/* Make sure we copy only the significant bytes based on the
7453 	 * encryption key size, and set the rest of the value to zeroes.
7454 	 */
7455 	memcpy(ev.key.val, key->val, key->enc_size);
7456 	memset(ev.key.val + key->enc_size, 0,
7457 	       sizeof(ev.key.val) - key->enc_size);
7458 
7459 	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
7460 }
7461 
7462 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
7463 {
7464 	struct mgmt_ev_new_irk ev;
7465 
7466 	memset(&ev, 0, sizeof(ev));
7467 
7468 	ev.store_hint = persistent;
7469 
7470 	bacpy(&ev.rpa, &irk->rpa);
7471 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
7472 	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
7473 	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
7474 
7475 	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
7476 }
7477 
7478 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
7479 		   bool persistent)
7480 {
7481 	struct mgmt_ev_new_csrk ev;
7482 
7483 	memset(&ev, 0, sizeof(ev));
7484 
7485 	/* Devices using resolvable or non-resolvable random addresses
7486 	 * without providing an identity resolving key don't require
7487 	 * to store signature resolving keys. Their addresses will change
7488 	 * the next time around.
7489 	 *
7490 	 * Only when a remote device provides an identity address
7491 	 * make sure the signature resolving key is stored. So allow
7492 	 * static random and public addresses here.
7493 	 */
7494 	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
7495 	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
7496 		ev.store_hint = 0x00;
7497 	else
7498 		ev.store_hint = persistent;
7499 
7500 	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
7501 	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
7502 	ev.key.type = csrk->type;
7503 	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
7504 
7505 	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
7506 }
7507 
7508 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
7509 			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
7510 			 u16 max_interval, u16 latency, u16 timeout)
7511 {
7512 	struct mgmt_ev_new_conn_param ev;
7513 
7514 	if (!hci_is_identity_address(bdaddr, bdaddr_type))
7515 		return;
7516 
7517 	memset(&ev, 0, sizeof(ev));
7518 	bacpy(&ev.addr.bdaddr, bdaddr);
7519 	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
7520 	ev.store_hint = store_hint;
7521 	ev.min_interval = cpu_to_le16(min_interval);
7522 	ev.max_interval = cpu_to_le16(max_interval);
7523 	ev.latency = cpu_to_le16(latency);
7524 	ev.timeout = cpu_to_le16(timeout);
7525 
7526 	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
7527 }
7528 
7529 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
7530 			   u32 flags, u8 *name, u8 name_len)
7531 {
7532 	char buf[512];
7533 	struct mgmt_ev_device_connected *ev = (void *) buf;
7534 	u16 eir_len = 0;
7535 
7536 	bacpy(&ev->addr.bdaddr, &conn->dst);
7537 	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7538 
7539 	ev->flags = __cpu_to_le32(flags);
7540 
7541 	/* We must ensure that the EIR Data fields are ordered and
7542 	 * unique. Keep it simple for now and avoid the problem by not
7543 	 * adding any BR/EDR data to the LE adv.
7544 	 */
7545 	if (conn->le_adv_data_len > 0) {
7546 		memcpy(&ev->eir[eir_len],
7547 		       conn->le_adv_data, conn->le_adv_data_len);
7548 		eir_len = conn->le_adv_data_len;
7549 	} else {
7550 		if (name_len > 0)
7551 			eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
7552 						  name, name_len);
7553 
7554 		if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
7555 			eir_len = eir_append_data(ev->eir, eir_len,
7556 						  EIR_CLASS_OF_DEV,
7557 						  conn->dev_class, 3);
7558 	}
7559 
7560 	ev->eir_len = cpu_to_le16(eir_len);
7561 
7562 	mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
7563 		    sizeof(*ev) + eir_len, NULL);
7564 }
7565 
7566 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
7567 {
7568 	struct sock **sk = data;
7569 
7570 	cmd->cmd_complete(cmd, 0);
7571 
7572 	*sk = cmd->sk;
7573 	sock_hold(*sk);
7574 
7575 	mgmt_pending_remove(cmd);
7576 }
7577 
7578 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
7579 {
7580 	struct hci_dev *hdev = data;
7581 	struct mgmt_cp_unpair_device *cp = cmd->param;
7582 
7583 	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
7584 
7585 	cmd->cmd_complete(cmd, 0);
7586 	mgmt_pending_remove(cmd);
7587 }
7588 
7589 bool mgmt_powering_down(struct hci_dev *hdev)
7590 {
7591 	struct mgmt_pending_cmd *cmd;
7592 	struct mgmt_mode *cp;
7593 
7594 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7595 	if (!cmd)
7596 		return false;
7597 
7598 	cp = cmd->param;
7599 	if (!cp->val)
7600 		return true;
7601 
7602 	return false;
7603 }
7604 
7605 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
7606 			      u8 link_type, u8 addr_type, u8 reason,
7607 			      bool mgmt_connected)
7608 {
7609 	struct mgmt_ev_device_disconnected ev;
7610 	struct sock *sk = NULL;
7611 
7612 	/* The connection is still in hci_conn_hash so test for 1
7613 	 * instead of 0 to know if this is the last one.
7614 	 */
7615 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
7616 		cancel_delayed_work(&hdev->power_off);
7617 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
7618 	}
7619 
7620 	if (!mgmt_connected)
7621 		return;
7622 
7623 	if (link_type != ACL_LINK && link_type != LE_LINK)
7624 		return;
7625 
7626 	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
7627 
7628 	bacpy(&ev.addr.bdaddr, bdaddr);
7629 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7630 	ev.reason = reason;
7631 
7632 	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
7633 
7634 	if (sk)
7635 		sock_put(sk);
7636 
7637 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
7638 			     hdev);
7639 }
7640 
7641 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
7642 			    u8 link_type, u8 addr_type, u8 status)
7643 {
7644 	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
7645 	struct mgmt_cp_disconnect *cp;
7646 	struct mgmt_pending_cmd *cmd;
7647 
7648 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
7649 			     hdev);
7650 
7651 	cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
7652 	if (!cmd)
7653 		return;
7654 
7655 	cp = cmd->param;
7656 
7657 	if (bacmp(bdaddr, &cp->addr.bdaddr))
7658 		return;
7659 
7660 	if (cp->addr.type != bdaddr_type)
7661 		return;
7662 
7663 	cmd->cmd_complete(cmd, mgmt_status(status));
7664 	mgmt_pending_remove(cmd);
7665 }
7666 
7667 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7668 			 u8 addr_type, u8 status)
7669 {
7670 	struct mgmt_ev_connect_failed ev;
7671 
7672 	/* The connection is still in hci_conn_hash so test for 1
7673 	 * instead of 0 to know if this is the last one.
7674 	 */
7675 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
7676 		cancel_delayed_work(&hdev->power_off);
7677 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
7678 	}
7679 
7680 	bacpy(&ev.addr.bdaddr, bdaddr);
7681 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7682 	ev.status = mgmt_status(status);
7683 
7684 	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
7685 }
7686 
7687 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
7688 {
7689 	struct mgmt_ev_pin_code_request ev;
7690 
7691 	bacpy(&ev.addr.bdaddr, bdaddr);
7692 	ev.addr.type = BDADDR_BREDR;
7693 	ev.secure = secure;
7694 
7695 	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
7696 }
7697 
7698 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7699 				  u8 status)
7700 {
7701 	struct mgmt_pending_cmd *cmd;
7702 
7703 	cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
7704 	if (!cmd)
7705 		return;
7706 
7707 	cmd->cmd_complete(cmd, mgmt_status(status));
7708 	mgmt_pending_remove(cmd);
7709 }
7710 
7711 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7712 				      u8 status)
7713 {
7714 	struct mgmt_pending_cmd *cmd;
7715 
7716 	cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
7717 	if (!cmd)
7718 		return;
7719 
7720 	cmd->cmd_complete(cmd, mgmt_status(status));
7721 	mgmt_pending_remove(cmd);
7722 }
7723 
7724 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7725 			      u8 link_type, u8 addr_type, u32 value,
7726 			      u8 confirm_hint)
7727 {
7728 	struct mgmt_ev_user_confirm_request ev;
7729 
7730 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
7731 
7732 	bacpy(&ev.addr.bdaddr, bdaddr);
7733 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7734 	ev.confirm_hint = confirm_hint;
7735 	ev.value = cpu_to_le32(value);
7736 
7737 	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
7738 			  NULL);
7739 }
7740 
7741 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7742 			      u8 link_type, u8 addr_type)
7743 {
7744 	struct mgmt_ev_user_passkey_request ev;
7745 
7746 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
7747 
7748 	bacpy(&ev.addr.bdaddr, bdaddr);
7749 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7750 
7751 	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
7752 			  NULL);
7753 }
7754 
7755 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7756 				      u8 link_type, u8 addr_type, u8 status,
7757 				      u8 opcode)
7758 {
7759 	struct mgmt_pending_cmd *cmd;
7760 
7761 	cmd = pending_find(opcode, hdev);
7762 	if (!cmd)
7763 		return -ENOENT;
7764 
7765 	cmd->cmd_complete(cmd, mgmt_status(status));
7766 	mgmt_pending_remove(cmd);
7767 
7768 	return 0;
7769 }
7770 
7771 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7772 				     u8 link_type, u8 addr_type, u8 status)
7773 {
7774 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7775 					  status, MGMT_OP_USER_CONFIRM_REPLY);
7776 }
7777 
7778 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7779 					 u8 link_type, u8 addr_type, u8 status)
7780 {
7781 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7782 					  status,
7783 					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
7784 }
7785 
7786 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7787 				     u8 link_type, u8 addr_type, u8 status)
7788 {
7789 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7790 					  status, MGMT_OP_USER_PASSKEY_REPLY);
7791 }
7792 
7793 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7794 					 u8 link_type, u8 addr_type, u8 status)
7795 {
7796 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7797 					  status,
7798 					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
7799 }
7800 
7801 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
7802 			     u8 link_type, u8 addr_type, u32 passkey,
7803 			     u8 entered)
7804 {
7805 	struct mgmt_ev_passkey_notify ev;
7806 
7807 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
7808 
7809 	bacpy(&ev.addr.bdaddr, bdaddr);
7810 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
7811 	ev.passkey = __cpu_to_le32(passkey);
7812 	ev.entered = entered;
7813 
7814 	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
7815 }
7816 
7817 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
7818 {
7819 	struct mgmt_ev_auth_failed ev;
7820 	struct mgmt_pending_cmd *cmd;
7821 	u8 status = mgmt_status(hci_status);
7822 
7823 	bacpy(&ev.addr.bdaddr, &conn->dst);
7824 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7825 	ev.status = status;
7826 
7827 	cmd = find_pairing(conn);
7828 
7829 	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
7830 		    cmd ? cmd->sk : NULL);
7831 
7832 	if (cmd) {
7833 		cmd->cmd_complete(cmd, status);
7834 		mgmt_pending_remove(cmd);
7835 	}
7836 }
7837 
7838 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
7839 {
7840 	struct cmd_lookup match = { NULL, hdev };
7841 	bool changed;
7842 
7843 	if (status) {
7844 		u8 mgmt_err = mgmt_status(status);
7845 		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
7846 				     cmd_status_rsp, &mgmt_err);
7847 		return;
7848 	}
7849 
7850 	if (test_bit(HCI_AUTH, &hdev->flags))
7851 		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
7852 	else
7853 		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
7854 
7855 	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
7856 			     &match);
7857 
7858 	if (changed)
7859 		new_settings(hdev, match.sk);
7860 
7861 	if (match.sk)
7862 		sock_put(match.sk);
7863 }
7864 
7865 static void clear_eir(struct hci_request *req)
7866 {
7867 	struct hci_dev *hdev = req->hdev;
7868 	struct hci_cp_write_eir cp;
7869 
7870 	if (!lmp_ext_inq_capable(hdev))
7871 		return;
7872 
7873 	memset(hdev->eir, 0, sizeof(hdev->eir));
7874 
7875 	memset(&cp, 0, sizeof(cp));
7876 
7877 	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
7878 }
7879 
7880 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
7881 {
7882 	struct cmd_lookup match = { NULL, hdev };
7883 	struct hci_request req;
7884 	bool changed = false;
7885 
7886 	if (status) {
7887 		u8 mgmt_err = mgmt_status(status);
7888 
7889 		if (enable && hci_dev_test_and_clear_flag(hdev,
7890 							  HCI_SSP_ENABLED)) {
7891 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7892 			new_settings(hdev, NULL);
7893 		}
7894 
7895 		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
7896 				     &mgmt_err);
7897 		return;
7898 	}
7899 
7900 	if (enable) {
7901 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
7902 	} else {
7903 		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
7904 		if (!changed)
7905 			changed = hci_dev_test_and_clear_flag(hdev,
7906 							      HCI_HS_ENABLED);
7907 		else
7908 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7909 	}
7910 
7911 	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
7912 
7913 	if (changed)
7914 		new_settings(hdev, match.sk);
7915 
7916 	if (match.sk)
7917 		sock_put(match.sk);
7918 
7919 	hci_req_init(&req, hdev);
7920 
7921 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7922 		if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
7923 			hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
7924 				    sizeof(enable), &enable);
7925 		__hci_req_update_eir(&req);
7926 	} else {
7927 		clear_eir(&req);
7928 	}
7929 
7930 	hci_req_run(&req, NULL);
7931 }
7932 
7933 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
7934 {
7935 	struct cmd_lookup *match = data;
7936 
7937 	if (match->sk == NULL) {
7938 		match->sk = cmd->sk;
7939 		sock_hold(match->sk);
7940 	}
7941 }
7942 
7943 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
7944 				    u8 status)
7945 {
7946 	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
7947 
7948 	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
7949 	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
7950 	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
7951 
7952 	if (!status) {
7953 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
7954 				   3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
7955 		ext_info_changed(hdev, NULL);
7956 	}
7957 
7958 	if (match.sk)
7959 		sock_put(match.sk);
7960 }
7961 
7962 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
7963 {
7964 	struct mgmt_cp_set_local_name ev;
7965 	struct mgmt_pending_cmd *cmd;
7966 
7967 	if (status)
7968 		return;
7969 
7970 	memset(&ev, 0, sizeof(ev));
7971 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
7972 	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
7973 
7974 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
7975 	if (!cmd) {
7976 		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
7977 
7978 		/* If this is a HCI command related to powering on the
7979 		 * HCI dev don't send any mgmt signals.
7980 		 */
7981 		if (pending_find(MGMT_OP_SET_POWERED, hdev))
7982 			return;
7983 	}
7984 
7985 	mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
7986 			   HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
7987 	ext_info_changed(hdev, cmd ? cmd->sk : NULL);
7988 }
7989 
7990 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
7991 {
7992 	int i;
7993 
7994 	for (i = 0; i < uuid_count; i++) {
7995 		if (!memcmp(uuid, uuids[i], 16))
7996 			return true;
7997 	}
7998 
7999 	return false;
8000 }
8001 
8002 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
8003 {
8004 	u16 parsed = 0;
8005 
8006 	while (parsed < eir_len) {
8007 		u8 field_len = eir[0];
8008 		u8 uuid[16];
8009 		int i;
8010 
8011 		if (field_len == 0)
8012 			break;
8013 
8014 		if (eir_len - parsed < field_len + 1)
8015 			break;
8016 
8017 		switch (eir[1]) {
8018 		case EIR_UUID16_ALL:
8019 		case EIR_UUID16_SOME:
8020 			for (i = 0; i + 3 <= field_len; i += 2) {
8021 				memcpy(uuid, bluetooth_base_uuid, 16);
8022 				uuid[13] = eir[i + 3];
8023 				uuid[12] = eir[i + 2];
8024 				if (has_uuid(uuid, uuid_count, uuids))
8025 					return true;
8026 			}
8027 			break;
8028 		case EIR_UUID32_ALL:
8029 		case EIR_UUID32_SOME:
8030 			for (i = 0; i + 5 <= field_len; i += 4) {
8031 				memcpy(uuid, bluetooth_base_uuid, 16);
8032 				uuid[15] = eir[i + 5];
8033 				uuid[14] = eir[i + 4];
8034 				uuid[13] = eir[i + 3];
8035 				uuid[12] = eir[i + 2];
8036 				if (has_uuid(uuid, uuid_count, uuids))
8037 					return true;
8038 			}
8039 			break;
8040 		case EIR_UUID128_ALL:
8041 		case EIR_UUID128_SOME:
8042 			for (i = 0; i + 17 <= field_len; i += 16) {
8043 				memcpy(uuid, eir + i + 2, 16);
8044 				if (has_uuid(uuid, uuid_count, uuids))
8045 					return true;
8046 			}
8047 			break;
8048 		}
8049 
8050 		parsed += field_len + 1;
8051 		eir += field_len + 1;
8052 	}
8053 
8054 	return false;
8055 }
8056 
8057 static void restart_le_scan(struct hci_dev *hdev)
8058 {
8059 	/* If controller is not scanning we are done. */
8060 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
8061 		return;
8062 
8063 	if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
8064 		       hdev->discovery.scan_start +
8065 		       hdev->discovery.scan_duration))
8066 		return;
8067 
8068 	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
8069 			   DISCOV_LE_RESTART_DELAY);
8070 }
8071 
8072 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
8073 			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8074 {
8075 	/* If a RSSI threshold has been specified, and
8076 	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
8077 	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
8078 	 * is set, let it through for further processing, as we might need to
8079 	 * restart the scan.
8080 	 *
8081 	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
8082 	 * the results are also dropped.
8083 	 */
8084 	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8085 	    (rssi == HCI_RSSI_INVALID ||
8086 	    (rssi < hdev->discovery.rssi &&
8087 	     !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
8088 		return  false;
8089 
8090 	if (hdev->discovery.uuid_count != 0) {
8091 		/* If a list of UUIDs is provided in filter, results with no
8092 		 * matching UUID should be dropped.
8093 		 */
8094 		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
8095 				   hdev->discovery.uuids) &&
8096 		    !eir_has_uuids(scan_rsp, scan_rsp_len,
8097 				   hdev->discovery.uuid_count,
8098 				   hdev->discovery.uuids))
8099 			return false;
8100 	}
8101 
8102 	/* If duplicate filtering does not report RSSI changes, then restart
8103 	 * scanning to ensure updated result with updated RSSI values.
8104 	 */
8105 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
8106 		restart_le_scan(hdev);
8107 
8108 		/* Validate RSSI value against the RSSI threshold once more. */
8109 		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
8110 		    rssi < hdev->discovery.rssi)
8111 			return false;
8112 	}
8113 
8114 	return true;
8115 }
8116 
8117 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8118 		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
8119 		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
8120 {
8121 	char buf[512];
8122 	struct mgmt_ev_device_found *ev = (void *)buf;
8123 	size_t ev_size;
8124 
8125 	/* Don't send events for a non-kernel initiated discovery. With
8126 	 * LE one exception is if we have pend_le_reports > 0 in which
8127 	 * case we're doing passive scanning and want these events.
8128 	 */
8129 	if (!hci_discovery_active(hdev)) {
8130 		if (link_type == ACL_LINK)
8131 			return;
8132 		if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
8133 			return;
8134 	}
8135 
8136 	if (hdev->discovery.result_filtering) {
8137 		/* We are using service discovery */
8138 		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
8139 				     scan_rsp_len))
8140 			return;
8141 	}
8142 
8143 	if (hdev->discovery.limited) {
8144 		/* Check for limited discoverable bit */
8145 		if (dev_class) {
8146 			if (!(dev_class[1] & 0x20))
8147 				return;
8148 		} else {
8149 			u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
8150 			if (!flags || !(flags[0] & LE_AD_LIMITED))
8151 				return;
8152 		}
8153 	}
8154 
8155 	/* Make sure that the buffer is big enough. The 5 extra bytes
8156 	 * are for the potential CoD field.
8157 	 */
8158 	if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
8159 		return;
8160 
8161 	memset(buf, 0, sizeof(buf));
8162 
8163 	/* In case of device discovery with BR/EDR devices (pre 1.2), the
8164 	 * RSSI value was reported as 0 when not available. This behavior
8165 	 * is kept when using device discovery. This is required for full
8166 	 * backwards compatibility with the API.
8167 	 *
8168 	 * However when using service discovery, the value 127 will be
8169 	 * returned when the RSSI is not available.
8170 	 */
8171 	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
8172 	    link_type == ACL_LINK)
8173 		rssi = 0;
8174 
8175 	bacpy(&ev->addr.bdaddr, bdaddr);
8176 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
8177 	ev->rssi = rssi;
8178 	ev->flags = cpu_to_le32(flags);
8179 
8180 	if (eir_len > 0)
8181 		/* Copy EIR or advertising data into event */
8182 		memcpy(ev->eir, eir, eir_len);
8183 
8184 	if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
8185 				       NULL))
8186 		eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
8187 					  dev_class, 3);
8188 
8189 	if (scan_rsp_len > 0)
8190 		/* Append scan response data to event */
8191 		memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
8192 
8193 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
8194 	ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
8195 
8196 	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
8197 }
8198 
8199 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
8200 		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
8201 {
8202 	struct mgmt_ev_device_found *ev;
8203 	char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
8204 	u16 eir_len;
8205 
8206 	ev = (struct mgmt_ev_device_found *) buf;
8207 
8208 	memset(buf, 0, sizeof(buf));
8209 
8210 	bacpy(&ev->addr.bdaddr, bdaddr);
8211 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
8212 	ev->rssi = rssi;
8213 
8214 	eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
8215 				  name_len);
8216 
8217 	ev->eir_len = cpu_to_le16(eir_len);
8218 
8219 	mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
8220 }
8221 
8222 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
8223 {
8224 	struct mgmt_ev_discovering ev;
8225 
8226 	bt_dev_dbg(hdev, "discovering %u", discovering);
8227 
8228 	memset(&ev, 0, sizeof(ev));
8229 	ev.type = hdev->discovery.type;
8230 	ev.discovering = discovering;
8231 
8232 	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
8233 }
8234 
8235 static struct hci_mgmt_chan chan = {
8236 	.channel	= HCI_CHANNEL_CONTROL,
8237 	.handler_count	= ARRAY_SIZE(mgmt_handlers),
8238 	.handlers	= mgmt_handlers,
8239 	.hdev_init	= mgmt_init_hdev,
8240 };
8241 
8242 int mgmt_init(void)
8243 {
8244 	return hci_mgmt_chan_register(&chan);
8245 }
8246 
8247 void mgmt_exit(void)
8248 {
8249 	hci_mgmt_chan_unregister(&chan);
8250 }
8251